summaryrefslogtreecommitdiff
path: root/tools/testing
ModeNameSize
d---------fault-injection38logplain
d---------ktest205logplain
d---------nvdimm429logplain
d---------radix-tree643logplain
d---------scatterlist102logplain
d---------selftests2255logplain
d---------vsock301logplain
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/acl.c1
-rw-r--r--fs/9p/v9fs.c540
-rw-r--r--fs/9p/v9fs.h9
-rw-r--r--fs/9p/vfs_addr.c16
-rw-r--r--fs/9p/vfs_dentry.c51
-rw-r--r--fs/9p/vfs_file.c41
-rw-r--r--fs/9p/vfs_inode.c46
-rw-r--r--fs/9p/vfs_inode_dotl.c29
-rw-r--r--fs/9p/vfs_super.c142
-rw-r--r--fs/Kconfig17
-rw-r--r--fs/Kconfig.binfmt9
-rw-r--r--fs/Makefile8
-rw-r--r--fs/adfs/file.c2
-rw-r--r--fs/adfs/inode.c9
-rw-r--r--fs/adfs/super.c2
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/file.c37
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/affs/namei.c8
-rw-r--r--fs/affs/super.c4
-rw-r--r--fs/afs/Kconfig1
-rw-r--r--fs/afs/Makefile2
-rw-r--r--fs/afs/addr_list.c50
-rw-r--r--fs/afs/addr_prefs.c8
-rw-r--r--fs/afs/afs.h2
-rw-r--r--fs/afs/afs_vl.h1
-rw-r--r--fs/afs/callback.c8
-rw-r--r--fs/afs/cell.c561
-rw-r--r--fs/afs/cm_security.c340
-rw-r--r--fs/afs/cmservice.c82
-rw-r--r--fs/afs/dir.c1087
-rw-r--r--fs/afs/dir_edit.c405
-rw-r--r--fs/afs/dir_search.c227
-rw-r--r--fs/afs/dir_silly.c17
-rw-r--r--fs/afs/dynroot.c489
-rw-r--r--fs/afs/file.c272
-rw-r--r--fs/afs/fs_operation.c113
-rw-r--r--fs/afs/fs_probe.c34
-rw-r--r--fs/afs/fsclient.c66
-rw-r--r--fs/afs/inode.c150
-rw-r--r--fs/afs/internal.h293
-rw-r--r--fs/afs/main.c23
-rw-r--r--fs/afs/misc.c28
-rw-r--r--fs/afs/mntpt.c30
-rw-r--r--fs/afs/proc.c26
-rw-r--r--fs/afs/protocol_yfs.h3
-rw-r--r--fs/afs/rotate.c21
-rw-r--r--fs/afs/rxrpc.c91
-rw-r--r--fs/afs/security.c49
-rw-r--r--fs/afs/server.c607
-rw-r--r--fs/afs/server_list.c6
-rw-r--r--fs/afs/super.c33
-rw-r--r--fs/afs/validation.c31
-rw-r--r--fs/afs/vl_alias.c14
-rw-r--r--fs/afs/vl_rotate.c2
-rw-r--r--fs/afs/vlclient.c3
-rw-r--r--fs/afs/volume.c15
-rw-r--r--fs/afs/write.c32
-rw-r--r--fs/afs/xdr_fs.h2
-rw-r--r--fs/afs/yfsclient.c303
-rw-r--r--fs/aio.c21
-rw-r--r--fs/anon_inodes.c95
-rw-r--r--fs/attr.c56
-rw-r--r--fs/autofs/autofs_i.h7
-rw-r--r--fs/autofs/dev-ioctl.c59
-rw-r--r--fs/autofs/inode.c5
-rw-r--r--fs/autofs/root.c33
-rw-r--r--fs/backing-file.c159
-rw-r--r--fs/bad_inode.c6
-rw-r--r--fs/bcachefs/Kconfig105
-rw-r--r--fs/bcachefs/Makefile101
-rw-r--r--fs/bcachefs/acl.c450
-rw-r--r--fs/bcachefs/acl.h60
-rw-r--r--fs/bcachefs/alloc_background.c2553
-rw-r--r--fs/bcachefs/alloc_background.h357
-rw-r--r--fs/bcachefs/alloc_background_format.h95
-rw-r--r--fs/bcachefs/alloc_foreground.c1818
-rw-r--r--fs/bcachefs/alloc_foreground.h242
-rw-r--r--fs/bcachefs/alloc_types.h134
-rw-r--r--fs/bcachefs/backpointers.c1068
-rw-r--r--fs/bcachefs/backpointers.h180
-rw-r--r--fs/bcachefs/bbpos.h37
-rw-r--r--fs/bcachefs/bbpos_types.h18
-rw-r--r--fs/bcachefs/bcachefs.h1254
-rw-r--r--fs/bcachefs/bcachefs_format.h1465
-rw-r--r--fs/bcachefs/bcachefs_ioctl.h446
-rw-r--r--fs/bcachefs/bkey.c1117
-rw-r--r--fs/bcachefs/bkey.h612
-rw-r--r--fs/bcachefs/bkey_buf.h61
-rw-r--r--fs/bcachefs/bkey_cmp.h129
-rw-r--r--fs/bcachefs/bkey_methods.c480
-rw-r--r--fs/bcachefs/bkey_methods.h138
-rw-r--r--fs/bcachefs/bkey_sort.c214
-rw-r--r--fs/bcachefs/bkey_sort.h54
-rw-r--r--fs/bcachefs/bkey_types.h213
-rw-r--r--fs/bcachefs/bset.c1570
-rw-r--r--fs/bcachefs/bset.h544
-rw-r--r--fs/bcachefs/btree_cache.c1491
-rw-r--r--fs/bcachefs/btree_cache.h148
-rw-r--r--fs/bcachefs/btree_gc.c1345
-rw-r--r--fs/bcachefs/btree_gc.h87
-rw-r--r--fs/bcachefs/btree_gc_types.h34
-rw-r--r--fs/bcachefs/btree_io.c2355
-rw-r--r--fs/bcachefs/btree_io.h223
-rw-r--r--fs/bcachefs/btree_iter.c3499
-rw-r--r--fs/bcachefs/btree_iter.h940
-rw-r--r--fs/bcachefs/btree_journal_iter.c635
-rw-r--r--fs/bcachefs/btree_journal_iter.h92
-rw-r--r--fs/bcachefs/btree_key_cache.c813
-rw-r--r--fs/bcachefs/btree_key_cache.h60
-rw-r--r--fs/bcachefs/btree_key_cache_types.h34
-rw-r--r--fs/bcachefs/btree_locking.c887
-rw-r--r--fs/bcachefs/btree_locking.h446
-rw-r--r--fs/bcachefs/btree_node_scan.c549
-rw-r--r--fs/bcachefs/btree_node_scan.h11
-rw-r--r--fs/bcachefs/btree_node_scan_types.h32
-rw-r--r--fs/bcachefs/btree_trans_commit.c1157
-rw-r--r--fs/bcachefs/btree_types.h865
-rw-r--r--fs/bcachefs/btree_update.c903
-rw-r--r--fs/bcachefs/btree_update.h363
-rw-r--r--fs/bcachefs/btree_update_interior.c2716
-rw-r--r--fs/bcachefs/btree_update_interior.h346
-rw-r--r--fs/bcachefs/btree_write_buffer.c847
-rw-r--r--fs/bcachefs/btree_write_buffer.h106
-rw-r--r--fs/bcachefs/btree_write_buffer_types.h59
-rw-r--r--fs/bcachefs/buckets.c1327
-rw-r--r--fs/bcachefs/buckets.h417
-rw-r--r--fs/bcachefs/buckets_types.h68
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.c175
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.h15
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal_types.h23
-rw-r--r--fs/bcachefs/chardev.c1024
-rw-r--r--fs/bcachefs/chardev.h31
-rw-r--r--fs/bcachefs/checksum.c822
-rw-r--r--fs/bcachefs/checksum.h237
-rw-r--r--fs/bcachefs/clock.c192
-rw-r--r--fs/bcachefs/clock.h28
-rw-r--r--fs/bcachefs/clock_types.h38
-rw-r--r--fs/bcachefs/compress.c728
-rw-r--r--fs/bcachefs/compress.h73
-rw-r--r--fs/bcachefs/darray.c38
-rw-r--r--fs/bcachefs/darray.h103
-rw-r--r--fs/bcachefs/data_update.c763
-rw-r--r--fs/bcachefs/data_update.h55
-rw-r--r--fs/bcachefs/debug.c951
-rw-r--r--fs/bcachefs/debug.h32
-rw-r--r--fs/bcachefs/dirent.c574
-rw-r--r--fs/bcachefs/dirent.h82
-rw-r--r--fs/bcachefs/dirent_format.h42
-rw-r--r--fs/bcachefs/disk_accounting.c976
-rw-r--r--fs/bcachefs/disk_accounting.h224
-rw-r--r--fs/bcachefs/disk_accounting_format.h167
-rw-r--r--fs/bcachefs/disk_accounting_types.h19
-rw-r--r--fs/bcachefs/disk_groups.c616
-rw-r--r--fs/bcachefs/disk_groups.h111
-rw-r--r--fs/bcachefs/disk_groups_format.h21
-rw-r--r--fs/bcachefs/disk_groups_types.h18
-rw-r--r--fs/bcachefs/ec.c2496
-rw-r--r--fs/bcachefs/ec.h272
-rw-r--r--fs/bcachefs/ec_format.h26
-rw-r--r--fs/bcachefs/ec_types.h42
-rw-r--r--fs/bcachefs/errcode.c71
-rw-r--r--fs/bcachefs/errcode.h310
-rw-r--r--fs/bcachefs/error.c485
-rw-r--r--fs/bcachefs/error.h255
-rw-r--r--fs/bcachefs/extent_update.c173
-rw-r--r--fs/bcachefs/extent_update.h12
-rw-r--r--fs/bcachefs/extents.c1673
-rw-r--r--fs/bcachefs/extents.h766
-rw-r--r--fs/bcachefs/extents_format.h295
-rw-r--r--fs/bcachefs/extents_types.h40
-rw-r--r--fs/bcachefs/eytzinger.c305
-rw-r--r--fs/bcachefs/eytzinger.h319
-rw-r--r--fs/bcachefs/fifo.h127
-rw-r--r--fs/bcachefs/fs-common.c550
-rw-r--r--fs/bcachefs/fs-common.h45
-rw-r--r--fs/bcachefs/fs-io-buffered.c1081
-rw-r--r--fs/bcachefs/fs-io-buffered.h27
-rw-r--r--fs/bcachefs/fs-io-direct.c690
-rw-r--r--fs/bcachefs/fs-io-direct.h16
-rw-r--r--fs/bcachefs/fs-io-pagecache.c823
-rw-r--r--fs/bcachefs/fs-io-pagecache.h176
-rw-r--r--fs/bcachefs/fs-io.c1028
-rw-r--r--fs/bcachefs/fs-io.h184
-rw-r--r--fs/bcachefs/fs-ioctl.c634
-rw-r--r--fs/bcachefs/fs-ioctl.h81
-rw-r--r--fs/bcachefs/fs.c2392
-rw-r--r--fs/bcachefs/fs.h214
-rw-r--r--fs/bcachefs/fsck.c3196
-rw-r--r--fs/bcachefs/fsck.h17
-rw-r--r--fs/bcachefs/inode.c1407
-rw-r--r--fs/bcachefs/inode.h268
-rw-r--r--fs/bcachefs/inode_format.h167
-rw-r--r--fs/bcachefs/io_misc.c540
-rw-r--r--fs/bcachefs/io_misc.h34
-rw-r--r--fs/bcachefs/io_read.c1266
-rw-r--r--fs/bcachefs/io_read.h158
-rw-r--r--fs/bcachefs/io_write.c1689
-rw-r--r--fs/bcachefs/io_write.h109
-rw-r--r--fs/bcachefs/io_write_types.h97
-rw-r--r--fs/bcachefs/journal.c1583
-rw-r--r--fs/bcachefs/journal.h449
-rw-r--r--fs/bcachefs/journal_io.c2081
-rw-r--r--fs/bcachefs/journal_io.h93
-rw-r--r--fs/bcachefs/journal_reclaim.c917
-rw-r--r--fs/bcachefs/journal_reclaim.h81
-rw-r--r--fs/bcachefs/journal_sb.c232
-rw-r--r--fs/bcachefs/journal_sb.h24
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c255
-rw-r--r--fs/bcachefs/journal_seq_blacklist.h22
-rw-r--r--fs/bcachefs/journal_seq_blacklist_format.h15
-rw-r--r--fs/bcachefs/journal_types.h345
-rw-r--r--fs/bcachefs/keylist.c50
-rw-r--r--fs/bcachefs/keylist.h72
-rw-r--r--fs/bcachefs/keylist_types.h16
-rw-r--r--fs/bcachefs/logged_ops.c118
-rw-r--r--fs/bcachefs/logged_ops.h20
-rw-r--r--fs/bcachefs/logged_ops_format.h30
-rw-r--r--fs/bcachefs/lru.c202
-rw-r--r--fs/bcachefs/lru.h56
-rw-r--r--fs/bcachefs/lru_format.h25
-rw-r--r--fs/bcachefs/mean_and_variance.c173
-rw-r--r--fs/bcachefs/mean_and_variance.h203
-rw-r--r--fs/bcachefs/mean_and_variance_test.c221
-rw-r--r--fs/bcachefs/migrate.c174
-rw-r--r--fs/bcachefs/migrate.h7
-rw-r--r--fs/bcachefs/move.c1181
-rw-r--r--fs/bcachefs/move.h155
-rw-r--r--fs/bcachefs/move_types.h36
-rw-r--r--fs/bcachefs/movinggc.c449
-rw-r--r--fs/bcachefs/movinggc.h12
-rw-r--r--fs/bcachefs/nocow_locking.c144
-rw-r--r--fs/bcachefs/nocow_locking.h50
-rw-r--r--fs/bcachefs/nocow_locking_types.h20
-rw-r--r--fs/bcachefs/opts.c734
-rw-r--r--fs/bcachefs/opts.h637
-rw-r--r--fs/bcachefs/printbuf.c509
-rw-r--r--fs/bcachefs/printbuf.h282
-rw-r--r--fs/bcachefs/quota.c892
-rw-r--r--fs/bcachefs/quota.h73
-rw-r--r--fs/bcachefs/quota_format.h47
-rw-r--r--fs/bcachefs/quota_types.h43
-rw-r--r--fs/bcachefs/rcu_pending.c650
-rw-r--r--fs/bcachefs/rcu_pending.h27
-rw-r--r--fs/bcachefs/rebalance.c490
-rw-r--r--fs/bcachefs/rebalance.h27
-rw-r--r--fs/bcachefs/rebalance_types.h37
-rw-r--r--fs/bcachefs/recovery.c1144
-rw-r--r--fs/bcachefs/recovery.h12
-rw-r--r--fs/bcachefs/recovery_passes.c264
-rw-r--r--fs/bcachefs/recovery_passes.h17
-rw-r--r--fs/bcachefs/recovery_passes_types.h74
-rw-r--r--fs/bcachefs/reflink.c593
-rw-r--r--fs/bcachefs/reflink.h79
-rw-r--r--fs/bcachefs/reflink_format.h33
-rw-r--r--fs/bcachefs/replicas.c919
-rw-r--r--fs/bcachefs/replicas.h83
-rw-r--r--fs/bcachefs/replicas_format.h36
-rw-r--r--fs/bcachefs/replicas_types.h11
-rw-r--r--fs/bcachefs/sb-clean.c336
-rw-r--r--fs/bcachefs/sb-clean.h16
-rw-r--r--fs/bcachefs/sb-counters.c99
-rw-r--r--fs/bcachefs/sb-counters.h16
-rw-r--r--fs/bcachefs/sb-counters_format.h98
-rw-r--r--fs/bcachefs/sb-downgrade.c416
-rw-r--r--fs/bcachefs/sb-downgrade.h12
-rw-r--r--fs/bcachefs/sb-downgrade_format.h17
-rw-r--r--fs/bcachefs/sb-errors.c176
-rw-r--r--fs/bcachefs/sb-errors.h21
-rw-r--r--fs/bcachefs/sb-errors_format.h328
-rw-r--r--fs/bcachefs/sb-errors_types.h15
-rw-r--r--fs/bcachefs/sb-members.c532
-rw-r--r--fs/bcachefs/sb-members.h367
-rw-r--r--fs/bcachefs/sb-members_format.h121
-rw-r--r--fs/bcachefs/sb-members_types.h21
-rw-r--r--fs/bcachefs/seqmutex.h45
-rw-r--r--fs/bcachefs/siphash.c173
-rw-r--r--fs/bcachefs/siphash.h87
-rw-r--r--fs/bcachefs/six.c873
-rw-r--r--fs/bcachefs/six.h386
-rw-r--r--fs/bcachefs/snapshot.c1815
-rw-r--r--fs/bcachefs/snapshot.h265
-rw-r--r--fs/bcachefs/snapshot_format.h36
-rw-r--r--fs/bcachefs/str_hash.h396
-rw-r--r--fs/bcachefs/subvolume.c691
-rw-r--r--fs/bcachefs/subvolume.h92
-rw-r--r--fs/bcachefs/subvolume_format.h35
-rw-r--r--fs/bcachefs/subvolume_types.h38
-rw-r--r--fs/bcachefs/super-io.c1418
-rw-r--r--fs/bcachefs/super-io.h104
-rw-r--r--fs/bcachefs/super.c2148
-rw-r--r--fs/bcachefs/super.h54
-rw-r--r--fs/bcachefs/super_types.h29
-rw-r--r--fs/bcachefs/sysfs.c893
-rw-r--r--fs/bcachefs/sysfs.h48
-rw-r--r--fs/bcachefs/tests.c887
-rw-r--r--fs/bcachefs/tests.h15
-rw-r--r--fs/bcachefs/thread_with_file.c492
-rw-r--r--fs/bcachefs/thread_with_file.h81
-rw-r--r--fs/bcachefs/thread_with_file_types.h20
-rw-r--r--fs/bcachefs/time_stats.c179
-rw-r--r--fs/bcachefs/time_stats.h160
-rw-r--r--fs/bcachefs/trace.c18
-rw-r--r--fs/bcachefs/trace.h1905
-rw-r--r--fs/bcachefs/two_state_shared_lock.c8
-rw-r--r--fs/bcachefs/two_state_shared_lock.h58
-rw-r--r--fs/bcachefs/util.c887
-rw-r--r--fs/bcachefs/util.h699
-rw-r--r--fs/bcachefs/varint.c129
-rw-r--r--fs/bcachefs/varint.h11
-rw-r--r--fs/bcachefs/vstructs.h63
-rw-r--r--fs/bcachefs/xattr.c638
-rw-r--r--fs/bcachefs/xattr.h49
-rw-r--r--fs/bcachefs/xattr_format.h19
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/bfs/file.c9
-rw-r--r--fs/bfs/inode.c51
-rw-r--r--fs/binfmt_elf.c248
-rw-r--r--fs/binfmt_elf_fdpic.c28
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/binfmt_misc.c126
-rw-r--r--fs/bpf_fs_kfuncs.c261
-rw-r--r--fs/btrfs/Kconfig46
-rw-r--r--fs/btrfs/Makefile4
-rw-r--r--fs/btrfs/accessors.c164
-rw-r--r--fs/btrfs/accessors.h39
-rw-r--r--fs/btrfs/acl.c25
-rw-r--r--fs/btrfs/acl.h2
-rw-r--r--fs/btrfs/async-thread.c20
-rw-r--r--fs/btrfs/backref.c292
-rw-r--r--fs/btrfs/backref.h50
-rw-r--r--fs/btrfs/bio.c447
-rw-r--r--fs/btrfs/bio.h42
-rw-r--r--fs/btrfs/block-group.c571
-rw-r--r--fs/btrfs/block-group.h22
-rw-r--r--fs/btrfs/block-rsv.c35
-rw-r--r--fs/btrfs/block-rsv.h1
-rw-r--r--fs/btrfs/btrfs_inode.h64
-rw-r--r--fs/btrfs/compression.c410
-rw-r--r--fs/btrfs/compression.h92
-rw-r--r--fs/btrfs/ctree.c659
-rw-r--r--fs/btrfs/ctree.h105
-rw-r--r--fs/btrfs/defrag.c294
-rw-r--r--fs/btrfs/defrag.h4
-rw-r--r--fs/btrfs/delalloc-space.c55
-rw-r--r--fs/btrfs/delalloc-space.h4
-rw-r--r--fs/btrfs/delayed-inode.c511
-rw-r--r--fs/btrfs/delayed-inode.h109
-rw-r--r--fs/btrfs/delayed-ref.c150
-rw-r--r--fs/btrfs/delayed-ref.h19
-rw-r--r--fs/btrfs/dev-replace.c90
-rw-r--r--fs/btrfs/dev-replace.h2
-rw-r--r--fs/btrfs/dir-item.c34
-rw-r--r--fs/btrfs/dir-item.h3
-rw-r--r--fs/btrfs/direct-io.c117
-rw-r--r--fs/btrfs/direct-io.h2
-rw-r--r--fs/btrfs/discard.c53
-rw-r--r--fs/btrfs/discard.h1
-rw-r--r--fs/btrfs/disk-io.c564
-rw-r--r--fs/btrfs/disk-io.h14
-rw-r--r--fs/btrfs/export.c61
-rw-r--r--fs/btrfs/extent-io-tree.c530
-rw-r--r--fs/btrfs/extent-io-tree.h166
-rw-r--r--fs/btrfs/extent-tree.c827
-rw-r--r--fs/btrfs/extent-tree.h48
-rw-r--r--fs/btrfs/extent_io.c2147
-rw-r--r--fs/btrfs/extent_io.h46
-rw-r--r--fs/btrfs/extent_map.c282
-rw-r--r--fs/btrfs/extent_map.h50
-rw-r--r--fs/btrfs/fiemap.c13
-rw-r--r--fs/btrfs/file-item.c209
-rw-r--r--fs/btrfs/file-item.h10
-rw-r--r--fs/btrfs/file.c1117
-rw-r--r--fs/btrfs/file.h2
-rw-r--r--fs/btrfs/free-space-cache.c152
-rw-r--r--fs/btrfs/free-space-tree.c627
-rw-r--r--fs/btrfs/free-space-tree.h52
-rw-r--r--fs/btrfs/fs.c179
-rw-r--r--fs/btrfs/fs.h154
-rw-r--r--fs/btrfs/inode-item.c77
-rw-r--r--fs/btrfs/inode-item.h11
-rw-r--r--fs/btrfs/inode.c2729
-rw-r--r--fs/btrfs/ioctl.c992
-rw-r--r--fs/btrfs/ioctl.h11
-rw-r--r--fs/btrfs/locking.c11
-rw-r--r--fs/btrfs/locking.h19
-rw-r--r--fs/btrfs/lzo.c98
-rw-r--r--fs/btrfs/messages.c2
-rw-r--r--fs/btrfs/messages.h188
-rw-r--r--fs/btrfs/misc.h89
-rw-r--r--fs/btrfs/ordered-data.c184
-rw-r--r--fs/btrfs/ordered-data.h9
-rw-r--r--fs/btrfs/print-tree.c268
-rw-r--r--fs/btrfs/print-tree.h2
-rw-r--r--fs/btrfs/props.c66
-rw-r--r--fs/btrfs/props.h8
-rw-r--r--fs/btrfs/qgroup.c695
-rw-r--r--fs/btrfs/qgroup.h3
-rw-r--r--fs/btrfs/raid-stripe-tree.c174
-rw-r--r--fs/btrfs/raid-stripe-tree.h1
-rw-r--r--fs/btrfs/raid56.c969
-rw-r--r--fs/btrfs/raid56.h107
-rw-r--r--fs/btrfs/rcu-string.h58
-rw-r--r--fs/btrfs/ref-verify.c159
-rw-r--r--fs/btrfs/ref-verify.h8
-rw-r--r--fs/btrfs/reflink.c155
-rw-r--r--fs/btrfs/relocation.c835
-rw-r--r--fs/btrfs/relocation.h3
-rw-r--r--fs/btrfs/root-tree.c72
-rw-r--r--fs/btrfs/scrub.c874
-rw-r--r--fs/btrfs/scrub.h2
-rw-r--r--fs/btrfs/send.c1237
-rw-r--r--fs/btrfs/send.h4
-rw-r--r--fs/btrfs/space-info.c707
-rw-r--r--fs/btrfs/space-info.h71
-rw-r--r--fs/btrfs/subpage.c481
-rw-r--r--fs/btrfs/subpage.h97
-rw-r--r--fs/btrfs/super.c533
-rw-r--r--fs/btrfs/sysfs.c363
-rw-r--r--fs/btrfs/sysfs.h10
-rw-r--r--fs/btrfs/tests/btrfs-tests.c50
-rw-r--r--fs/btrfs/tests/btrfs-tests.h6
-rw-r--r--fs/btrfs/tests/delayed-refs-tests.c1016
-rw-r--r--fs/btrfs/tests/extent-io-tests.c94
-rw-r--r--fs/btrfs/tests/extent-map-tests.c111
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c93
-rw-r--r--fs/btrfs/tests/inode-tests.c107
-rw-r--r--fs/btrfs/tests/qgroup-tests.c16
-rw-r--r--fs/btrfs/tests/raid-stripe-tree-tests.c661
-rw-r--r--fs/btrfs/transaction.c259
-rw-r--r--fs/btrfs/transaction.h22
-rw-r--r--fs/btrfs/tree-checker.c225
-rw-r--r--fs/btrfs/tree-checker.h7
-rw-r--r--fs/btrfs/tree-log.c2948
-rw-r--r--fs/btrfs/tree-log.h8
-rw-r--r--fs/btrfs/tree-mod-log.c81
-rw-r--r--fs/btrfs/ulist.c59
-rw-r--r--fs/btrfs/uuid-tree.c122
-rw-r--r--fs/btrfs/verity.c48
-rw-r--r--fs/btrfs/volumes.c1107
-rw-r--r--fs/btrfs/volumes.h88
-rw-r--r--fs/btrfs/xattr.c51
-rw-r--r--fs/btrfs/xattr.h2
-rw-r--r--fs/btrfs/zlib.c170
-rw-r--r--fs/btrfs/zoned.c627
-rw-r--r--fs/btrfs/zoned.h26
-rw-r--r--fs/btrfs/zstd.c259
-rw-r--r--fs/buffer.c226
-rw-r--r--fs/cachefiles/daemon.c14
-rw-r--r--fs/cachefiles/error_inject.c2
-rw-r--r--fs/cachefiles/interface.c11
-rw-r--r--fs/cachefiles/internal.h4
-rw-r--r--fs/cachefiles/io.c22
-rw-r--r--fs/cachefiles/key.c3
-rw-r--r--fs/cachefiles/namei.c114
-rw-r--r--fs/cachefiles/ondemand.c11
-rw-r--r--fs/cachefiles/security.c6
-rw-r--r--fs/cachefiles/volume.c9
-rw-r--r--fs/cachefiles/xattr.c9
-rw-r--r--fs/ceph/Kconfig2
-rw-r--r--fs/ceph/addr.c1315
-rw-r--r--fs/ceph/cache.c2
-rw-r--r--fs/ceph/caps.c18
-rw-r--r--fs/ceph/crypto.c159
-rw-r--r--fs/ceph/crypto.h34
-rw-r--r--fs/ceph/debugfs.c16
-rw-r--r--fs/ceph/dir.c107
-rw-r--r--fs/ceph/export.c21
-rw-r--r--fs/ceph/file.c164
-rw-r--r--fs/ceph/inode.c215
-rw-r--r--fs/ceph/io.c100
-rw-r--r--fs/ceph/io.h8
-rw-r--r--fs/ceph/ioctl.c17
-rw-r--r--fs/ceph/locks.c5
-rw-r--r--fs/ceph/mds_client.c252
-rw-r--r--fs/ceph/mds_client.h23
-rw-r--r--fs/ceph/mdsmap.c14
-rw-r--r--fs/ceph/quota.c2
-rw-r--r--fs/ceph/super.c39
-rw-r--r--fs/ceph/super.h25
-rw-r--r--fs/ceph/xattr.c18
-rw-r--r--fs/coda/cnode.c4
-rw-r--r--fs/coda/dir.c29
-rw-r--r--fs/coda/file.c6
-rw-r--r--fs/coda/inode.c2
-rw-r--r--fs/coda/sysctl.c2
-rw-r--r--fs/configfs/Kconfig1
-rw-r--r--fs/configfs/dir.c28
-rw-r--r--fs/configfs/file.c2
-rw-r--r--fs/configfs/inode.c3
-rw-r--r--fs/configfs/item.c2
-rw-r--r--fs/configfs/mount.c7
-rw-r--r--fs/configfs/symlink.c33
-rw-r--r--fs/coredump.c1081
-rw-r--r--fs/cramfs/inode.c18
-rw-r--r--fs/crypto/Kconfig20
-rw-r--r--fs/crypto/bio.c13
-rw-r--r--fs/crypto/crypto.c84
-rw-r--r--fs/crypto/fname.c192
-rw-r--r--fs/crypto/fscrypt_private.h128
-rw-r--r--fs/crypto/hkdf.c142
-rw-r--r--fs/crypto/hooks.c6
-rw-r--r--fs/crypto/inline_crypt.c58
-rw-r--r--fs/crypto/keyring.c155
-rw-r--r--fs/crypto/keysetup.c196
-rw-r--r--fs/crypto/keysetup_v1.c59
-rw-r--r--fs/crypto/policy.c15
-rw-r--r--fs/d_path.c8
-rw-r--r--fs/dax.c573
-rw-r--r--fs/dcache.c534
-rw-r--r--fs/debugfs/file.c284
-rw-r--r--fs/debugfs/inode.c327
-rw-r--r--fs/debugfs/internal.h69
-rw-r--r--fs/devpts/inode.c312
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/dlm/Kconfig1
-rw-r--r--fs/dlm/config.c69
-rw-r--r--fs/dlm/config.h4
-rw-r--r--fs/dlm/lock.c52
-rw-r--r--fs/dlm/lockspace.c48
-rw-r--r--fs/dlm/lowcomms.c24
-rw-r--r--fs/dlm/main.c2
-rw-r--r--fs/dlm/member.c27
-rw-r--r--fs/dlm/recover.c2
-rw-r--r--fs/dlm/user.c6
-rw-r--r--fs/drop_caches.c25
-rw-r--r--fs/ecryptfs/Kconfig2
-rw-r--r--fs/ecryptfs/crypto.c90
-rw-r--r--fs/ecryptfs/dentry.c32
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h40
-rw-r--r--fs/ecryptfs/file.c17
-rw-r--r--fs/ecryptfs/inode.c221
-rw-r--r--fs/ecryptfs/keystore.c65
-rw-r--r--fs/ecryptfs/main.c36
-rw-r--r--fs/ecryptfs/mmap.c10
-rw-r--r--fs/ecryptfs/super.c6
-rw-r--r--fs/efivarfs/file.c59
-rw-r--r--fs/efivarfs/inode.c71
-rw-r--r--fs/efivarfs/internal.h27
-rw-r--r--fs/efivarfs/super.c249
-rw-r--r--fs/efivarfs/vars.c183
-rw-r--r--fs/efs/inode.c2
-rw-r--r--fs/erofs/Kconfig48
-rw-r--r--fs/erofs/Makefile1
-rw-r--r--fs/erofs/compress.h47
-rw-r--r--fs/erofs/data.c283
-rw-r--r--fs/erofs/decompressor.c241
-rw-r--r--fs/erofs/decompressor_crypto.c182
-rw-r--r--fs/erofs/decompressor_deflate.c55
-rw-r--r--fs/erofs/decompressor_lzma.c34
-rw-r--r--fs/erofs/decompressor_zstd.c43
-rw-r--r--fs/erofs/dir.c36
-rw-r--r--fs/erofs/erofs_fs.h215
-rw-r--r--fs/erofs/fileio.c45
-rw-r--r--fs/erofs/fscache.c23
-rw-r--r--fs/erofs/inode.c180
-rw-r--r--fs/erofs/internal.h115
-rw-r--r--fs/erofs/namei.c2
-rw-r--r--fs/erofs/super.c328
-rw-r--r--fs/erofs/sysfs.c73
-rw-r--r--fs/erofs/xattr.c85
-rw-r--r--fs/erofs/xattr.h3
-rw-r--r--fs/erofs/zdata.c522
-rw-r--r--fs/erofs/zmap.c573
-rw-r--r--fs/erofs/zutil.c13
-rw-r--r--fs/eventfd.c30
-rw-r--r--fs/eventpoll.c359
-rw-r--r--fs/exec.c280
-rw-r--r--fs/exfat/balloc.c129
-rw-r--r--fs/exfat/dir.c180
-rw-r--r--fs/exfat/exfat_fs.h15
-rw-r--r--fs/exfat/exfat_raw.h6
-rw-r--r--fs/exfat/fatent.c75
-rw-r--r--fs/exfat/file.c119
-rw-r--r--fs/exfat/inode.c160
-rw-r--r--fs/exfat/namei.c47
-rw-r--r--fs/exfat/nls.c6
-rw-r--r--fs/exfat/super.c171
-rw-r--r--fs/exportfs/expfs.c11
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h5
-rw-r--r--fs/ext2/file.c12
-rw-r--r--fs/ext2/inode.c25
-rw-r--r--fs/ext2/ioctl.c4
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/ext2/super.c596
-rw-r--r--fs/ext4/Kconfig30
-rw-r--r--fs/ext4/balloc.c8
-rw-r--r--fs/ext4/bitmap.c16
-rw-r--r--fs/ext4/block_validity.c5
-rw-r--r--fs/ext4/crypto.c2
-rw-r--r--fs/ext4/dir.c15
-rw-r--r--fs/ext4/ext4.h366
-rw-r--r--fs/ext4/ext4_extents.h7
-rw-r--r--fs/ext4/ext4_jbd2.c23
-rw-r--r--fs/ext4/ext4_jbd2.h117
-rw-r--r--fs/ext4/extents.c782
-rw-r--r--fs/ext4/extents_status.c67
-rw-r--r--fs/ext4/extents_status.h2
-rw-r--r--fs/ext4/fast_commit.c491
-rw-r--r--fs/ext4/fast_commit.h3
-rw-r--r--fs/ext4/file.c61
-rw-r--r--fs/ext4/fsmap.c37
-rw-r--r--fs/ext4/fsync.c12
-rw-r--r--fs/ext4/hash.c4
-rw-r--r--fs/ext4/ialloc.c24
-rw-r--r--fs/ext4/indirect.c6
-rw-r--r--fs/ext4/inline.c313
-rw-r--r--fs/ext4/inode.c1405
-rw-r--r--fs/ext4/ioctl.c359
-rw-r--r--fs/ext4/mballoc-test.c7
-rw-r--r--fs/ext4/mballoc.c1122
-rw-r--r--fs/ext4/mballoc.h9
-rw-r--r--fs/ext4/mmp.c22
-rw-r--r--fs/ext4/move_extent.c788
-rw-r--r--fs/ext4/namei.c232
-rw-r--r--fs/ext4/orphan.c47
-rw-r--r--fs/ext4/page-io.c83
-rw-r--r--fs/ext4/readpage.c35
-rw-r--r--fs/ext4/resize.c6
-rw-r--r--fs/ext4/super.c495
-rw-r--r--fs/ext4/sysfs.c10
-rw-r--r--fs/ext4/verity.c4
-rw-r--r--fs/ext4/xattr.c86
-rw-r--r--fs/ext4/xattr.h10
-rw-r--r--fs/f2fs/Kconfig3
-rw-r--r--fs/f2fs/acl.c34
-rw-r--r--fs/f2fs/acl.h10
-rw-r--r--fs/f2fs/checkpoint.c376
-rw-r--r--fs/f2fs/compress.c361
-rw-r--r--fs/f2fs/data.c732
-rw-r--r--fs/f2fs/debug.c53
-rw-r--r--fs/f2fs/dir.c301
-rw-r--r--fs/f2fs/extent_cache.c36
-rw-r--r--fs/f2fs/f2fs.h800
-rw-r--r--fs/f2fs/file.c524
-rw-r--r--fs/f2fs/gc.c410
-rw-r--r--fs/f2fs/gc.h7
-rw-r--r--fs/f2fs/inline.c323
-rw-r--r--fs/f2fs/inode.c223
-rw-r--r--fs/f2fs/namei.c195
-rw-r--r--fs/f2fs/node.c1139
-rw-r--r--fs/f2fs/node.h91
-rw-r--r--fs/f2fs/recovery.c269
-rw-r--r--fs/f2fs/segment.c611
-rw-r--r--fs/f2fs/segment.h209
-rw-r--r--fs/f2fs/shrinker.c99
-rw-r--r--fs/f2fs/super.c2694
-rw-r--r--fs/f2fs/sysfs.c359
-rw-r--r--fs/f2fs/verity.c4
-rw-r--r--fs/f2fs/xattr.c148
-rw-r--r--fs/f2fs/xattr.h30
-rw-r--r--fs/fat/dir.c7
-rw-r--r--fs/fat/fatent.c2
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fat/inode.c25
-rw-r--r--fs/fat/misc.c6
-rw-r--r--fs/fat/namei_msdos.c10
-rw-r--r--fs/fat/namei_vfat.c31
-rw-r--r--fs/fcntl.c27
-rw-r--r--fs/fhandle.c189
-rw-r--r--fs/file.c224
-rw-r--r--fs/file_attr.c490
-rw-r--r--fs/file_table.c123
-rw-r--r--fs/filesystems.c14
-rw-r--r--fs/freevxfs/vxfs_inode.c2
-rw-r--r--fs/fs-writeback.c367
-rw-r--r--fs/fs_context.c25
-rw-r--r--fs/fs_dirent.c (renamed from fs/fs_types.c)2
-rw-r--r--fs/fs_parser.c58
-rw-r--r--fs/fs_struct.c42
-rw-r--r--fs/fsopen.c72
-rw-r--r--fs/fuse/Kconfig15
-rw-r--r--fs/fuse/Makefile6
-rw-r--r--fs/fuse/backing.c179
-rw-r--r--fs/fuse/control.c58
-rw-r--r--fs/fuse/cuse.c3
-rw-r--r--fs/fuse/dax.c44
-rw-r--r--fs/fuse/dev.c692
-rw-r--r--fs/fuse/dev_uring.c1373
-rw-r--r--fs/fuse/dev_uring_i.h211
-rw-r--r--fs/fuse/dir.c484
-rw-r--r--fs/fuse/file.c1101
-rw-r--r--fs/fuse/fuse_dev_i.h79
-rw-r--r--fs/fuse/fuse_i.h204
-rw-r--r--fs/fuse/inode.c214
-rw-r--r--fs/fuse/ioctl.c4
-rw-r--r--fs/fuse/iomode.c3
-rw-r--r--fs/fuse/passthrough.c162
-rw-r--r--fs/fuse/readdir.c40
-rw-r--r--fs/fuse/sysctl.c26
-rw-r--r--fs/fuse/trace.c13
-rw-r--r--fs/fuse/virtio_fs.c29
-rw-r--r--fs/fuse/xattr.c7
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/aops.c110
-rw-r--r--fs/gfs2/aops.h3
-rw-r--r--fs/gfs2/bmap.c58
-rw-r--r--fs/gfs2/bmap.h1
-rw-r--r--fs/gfs2/dentry.c31
-rw-r--r--fs/gfs2/dir.c6
-rw-r--r--fs/gfs2/file.c43
-rw-r--r--fs/gfs2/glock.c506
-rw-r--r--fs/gfs2/glock.h26
-rw-r--r--fs/gfs2/glops.c117
-rw-r--r--fs/gfs2/incore.h41
-rw-r--r--fs/gfs2/inode.c150
-rw-r--r--fs/gfs2/inode.h12
-rw-r--r--fs/gfs2/lock_dlm.c177
-rw-r--r--fs/gfs2/log.c66
-rw-r--r--fs/gfs2/log.h11
-rw-r--r--fs/gfs2/lops.c103
-rw-r--r--fs/gfs2/lops.h2
-rw-r--r--fs/gfs2/main.c6
-rw-r--r--fs/gfs2/meta_io.c40
-rw-r--r--fs/gfs2/meta_io.h4
-rw-r--r--fs/gfs2/ops_fstype.c124
-rw-r--r--fs/gfs2/quota.c71
-rw-r--r--fs/gfs2/quota.h4
-rw-r--r--fs/gfs2/recovery.c36
-rw-r--r--fs/gfs2/recovery.h2
-rw-r--r--fs/gfs2/super.c183
-rw-r--r--fs/gfs2/super.h1
-rw-r--r--fs/gfs2/sys.c69
-rw-r--r--fs/gfs2/trace_gfs2.h10
-rw-r--r--fs/gfs2/trans.c55
-rw-r--r--fs/gfs2/trans.h2
-rw-r--r--fs/gfs2/util.c373
-rw-r--r--fs/gfs2/util.h92
-rw-r--r--fs/gfs2/xattr.c11
-rw-r--r--fs/gfs2/xattr.h2
-rw-r--r--fs/hfs/.kunitconfig7
-rw-r--r--fs/hfs/Kconfig15
-rw-r--r--fs/hfs/Makefile2
-rw-r--r--fs/hfs/bfind.c17
-rw-r--r--fs/hfs/bitmap.c4
-rw-r--r--fs/hfs/bnode.c159
-rw-r--r--fs/hfs/brec.c37
-rw-r--r--fs/hfs/btree.c63
-rw-r--r--fs/hfs/btree.h113
-rw-r--r--fs/hfs/catalog.c129
-rw-r--r--fs/hfs/dir.c10
-rw-r--r--fs/hfs/extent.c21
-rw-r--r--fs/hfs/hfs.h269
-rw-r--r--fs/hfs/hfs_fs.h129
-rw-r--r--fs/hfs/inode.c37
-rw-r--r--fs/hfs/mdb.c20
-rw-r--r--fs/hfs/string.c5
-rw-r--r--fs/hfs/string_test.c133
-rw-r--r--fs/hfs/super.c10
-rw-r--r--fs/hfs/sysdep.c3
-rw-r--r--fs/hfsplus/.kunitconfig8
-rw-r--r--fs/hfsplus/Kconfig15
-rw-r--r--fs/hfsplus/Makefile3
-rw-r--r--fs/hfsplus/attributes.c8
-rw-r--r--fs/hfsplus/bfind.c14
-rw-r--r--fs/hfsplus/bitmap.c10
-rw-r--r--fs/hfsplus/bnode.c141
-rw-r--r--fs/hfsplus/brec.c12
-rw-r--r--fs/hfsplus/btree.c12
-rw-r--r--fs/hfsplus/catalog.c6
-rw-r--r--fs/hfsplus/dir.c15
-rw-r--r--fs/hfsplus/extents.c30
-rw-r--r--fs/hfsplus/hfsplus_fs.h122
-rw-r--r--fs/hfsplus/hfsplus_raw.h394
-rw-r--r--fs/hfsplus/inode.c56
-rw-r--r--fs/hfsplus/options.c1
-rw-r--r--fs/hfsplus/super.c132
-rw-r--r--fs/hfsplus/unicode.c63
-rw-r--r--fs/hfsplus/unicode_test.c1579
-rw-r--r--fs/hfsplus/wrapper.c46
-rw-r--r--fs/hfsplus/xattr.c38
-rw-r--r--fs/hostfs/hostfs.h36
-rw-r--r--fs/hostfs/hostfs_kern.c149
-rw-r--r--fs/hostfs/hostfs_user.c59
-rw-r--r--fs/hpfs/anode.c43
-rw-r--r--fs/hpfs/dir.c2
-rw-r--r--fs/hpfs/ea.c2
-rw-r--r--fs/hpfs/file.c24
-rw-r--r--fs/hpfs/hpfs.h44
-rw-r--r--fs/hpfs/inode.c4
-rw-r--r--fs/hpfs/map.c8
-rw-r--r--fs/hpfs/namei.c28
-rw-r--r--fs/hpfs/super.c11
-rw-r--r--fs/hugetlbfs/inode.c172
-rw-r--r--fs/init.c30
-rw-r--r--fs/inode.c560
-rw-r--r--fs/internal.h34
-rw-r--r--fs/ioctl.c337
-rw-r--r--fs/iomap/Makefile10
-rw-r--r--fs/iomap/bio.c88
-rw-r--r--fs/iomap/buffered-io.c1363
-rw-r--r--fs/iomap/direct-io.c437
-rw-r--r--fs/iomap/fiemap.c24
-rw-r--r--fs/iomap/internal.h21
-rw-r--r--fs/iomap/ioend.c432
-rw-r--r--fs/iomap/iter.c96
-rw-r--r--fs/iomap/seek.c20
-rw-r--r--fs/iomap/swapfile.c11
-rw-r--r--fs/iomap/trace.c1
-rw-r--r--fs/iomap/trace.h47
-rw-r--r--fs/isofs/compress.c12
-rw-r--r--fs/isofs/dir.c3
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/isofs/inode.c25
-rw-r--r--fs/isofs/isofs.h4
-rw-r--r--fs/isofs/rock.c40
-rw-r--r--fs/isofs/rock.h6
-rw-r--r--fs/isofs/util.c49
-rw-r--r--fs/jbd2/Kconfig2
-rw-r--r--fs/jbd2/checkpoint.c5
-rw-r--r--fs/jbd2/commit.c20
-rw-r--r--fs/jbd2/journal.c126
-rw-r--r--fs/jbd2/recovery.c90
-rw-r--r--fs/jbd2/revoke.c38
-rw-r--r--fs/jbd2/transaction.c65
-rw-r--r--fs/jffs2/compr_rtime.c2
-rw-r--r--fs/jffs2/dir.c18
-rw-r--r--fs/jffs2/erase.c4
-rw-r--r--fs/jffs2/file.c34
-rw-r--r--fs/jffs2/fs.c4
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/summary.c7
-rw-r--r--fs/jffs2/wbuf.c2
-rw-r--r--fs/jfs/file.c9
-rw-r--r--fs/jfs/inode.c30
-rw-r--r--fs/jfs/ioctl.c4
-rw-r--r--fs/jfs/jfs_discard.c3
-rw-r--r--fs/jfs/jfs_dmap.c53
-rw-r--r--fs/jfs/jfs_dtree.c25
-rw-r--r--fs/jfs/jfs_extent.c10
-rw-r--r--fs/jfs/jfs_imap.c17
-rw-r--r--fs/jfs/jfs_incore.h6
-rw-r--r--fs/jfs/jfs_inode.h4
-rw-r--r--fs/jfs/jfs_logmgr.c1
-rw-r--r--fs/jfs/jfs_metapage.c114
-rw-r--r--fs/jfs/jfs_mount.c10
-rw-r--r--fs/jfs/jfs_txnmgr.c11
-rw-r--r--fs/jfs/jfs_xtree.c142
-rw-r--r--fs/jfs/namei.c11
-rw-r--r--fs/jfs/super.c8
-rw-r--r--fs/jfs/xattr.c15
-rw-r--r--fs/kernfs/dir.c254
-rw-r--r--fs/kernfs/file.c67
-rw-r--r--fs/kernfs/inode.c72
-rw-r--r--fs/kernfs/kernfs-internal.h45
-rw-r--r--fs/kernfs/mount.c64
-rw-r--r--fs/kernfs/symlink.c30
-rw-r--r--fs/libfs.c390
-rw-r--r--fs/lockd/Makefile2
-rw-r--r--fs/lockd/netlink.c45
-rw-r--r--fs/lockd/netlink.h20
-rw-r--r--fs/lockd/netns.h3
-rw-r--r--fs/lockd/svc.c139
-rw-r--r--fs/lockd/svclock.c14
-rw-r--r--fs/lockd/svcshare.c6
-rw-r--r--fs/locks.c113
-rw-r--r--fs/minix/dir.c2
-rw-r--r--fs/minix/file.c2
-rw-r--r--fs/minix/inode.c33
-rw-r--r--fs/minix/minix.h9
-rw-r--r--fs/minix/namei.c47
-rw-r--r--fs/mnt_idmapping.c51
-rw-r--r--fs/mount.h154
-rw-r--r--fs/mpage.c76
-rw-r--r--fs/namei.c1755
-rw-r--r--fs/namespace.c3080
-rw-r--r--fs/netfs/Makefile5
-rw-r--r--fs/netfs/buffered_read.c327
-rw-r--r--fs/netfs/buffered_write.c47
-rw-r--r--fs/netfs/direct_read.c101
-rw-r--r--fs/netfs/direct_write.c52
-rw-r--r--fs/netfs/fscache_cache.c2
-rw-r--r--fs/netfs/fscache_cookie.c2
-rw-r--r--fs/netfs/fscache_io.c10
-rw-r--r--fs/netfs/internal.h102
-rw-r--r--fs/netfs/main.c17
-rw-r--r--fs/netfs/misc.c405
-rw-r--r--fs/netfs/objects.c97
-rw-r--r--fs/netfs/read_collect.c707
-rw-r--r--fs/netfs/read_pgpriv2.c208
-rw-r--r--fs/netfs/read_retry.c225
-rw-r--r--fs/netfs/read_single.c195
-rw-r--r--fs/netfs/rolling_buffer.c222
-rw-r--r--fs/netfs/stats.c13
-rw-r--r--fs/netfs/write_collect.c387
-rw-r--r--fs/netfs/write_issue.c277
-rw-r--r--fs/netfs/write_retry.c230
-rw-r--r--fs/nfs/Kconfig5
-rw-r--r--fs/nfs/blocklayout/blocklayout.c12
-rw-r--r--fs/nfs/blocklayout/dev.c13
-rw-r--r--fs/nfs/blocklayout/extent_tree.c104
-rw-r--r--fs/nfs/blocklayout/rpc_pipefs.c53
-rw-r--r--fs/nfs/callback.c14
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/callback_xdr.c1
-rw-r--r--fs/nfs/client.c83
-rw-r--r--fs/nfs/delegation.c242
-rw-r--r--fs/nfs/delegation.h4
-rw-r--r--fs/nfs/dir.c136
-rw-r--r--fs/nfs/direct.c26
-rw-r--r--fs/nfs/export.c14
-rw-r--r--fs/nfs/file.c93
-rw-r--r--fs/nfs/filelayout/filelayout.c10
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c16
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1028
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h65
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c129
-rw-r--r--fs/nfs/fs_context.c116
-rw-r--r--fs/nfs/fscache.c16
-rw-r--r--fs/nfs/fscache.h3
-rw-r--r--fs/nfs/inode.c184
-rw-r--r--fs/nfs/internal.h58
-rw-r--r--fs/nfs/io.c13
-rw-r--r--fs/nfs/localio.c629
-rw-r--r--fs/nfs/mount_clnt.c68
-rw-r--r--fs/nfs/namespace.c8
-rw-r--r--fs/nfs/netns.h6
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs3acl.c2
-rw-r--r--fs/nfs/nfs3client.c16
-rw-r--r--fs/nfs/nfs3proc.c82
-rw-r--r--fs/nfs/nfs3xdr.c2
-rw-r--r--fs/nfs/nfs42.h1
-rw-r--r--fs/nfs/nfs42proc.c260
-rw-r--r--fs/nfs/nfs42xdr.c156
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4client.c205
-rw-r--r--fs/nfs/nfs4file.c42
-rw-r--r--fs/nfs/nfs4getroot.c14
-rw-r--r--fs/nfs/nfs4idmap.c21
-rw-r--r--fs/nfs/nfs4proc.c357
-rw-r--r--fs/nfs/nfs4renewd.c2
-rw-r--r--fs/nfs/nfs4session.h4
-rw-r--r--fs/nfs/nfs4state.c18
-rw-r--r--fs/nfs/nfs4super.c44
-rw-r--r--fs/nfs/nfs4sysctl.c2
-rw-r--r--fs/nfs/nfs4trace.c2
-rw-r--r--fs/nfs/nfs4trace.h213
-rw-r--r--fs/nfs/nfs4xdr.c48
-rw-r--r--fs/nfs/nfstrace.h259
-rw-r--r--fs/nfs/pagelist.c14
-rw-r--r--fs/nfs/pnfs.c96
-rw-r--r--fs/nfs/pnfs.h4
-rw-r--r--fs/nfs/pnfs_nfs.c107
-rw-r--r--fs/nfs/proc.c18
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/super.c26
-rw-r--r--fs/nfs/symlink.c20
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/sysfs.c117
-rw-r--r--fs/nfs/unlink.c11
-rw-r--r--fs/nfs/write.c190
-rw-r--r--fs/nfs_common/Makefile3
-rw-r--r--fs/nfs_common/common.c89
-rw-r--r--fs/nfs_common/localio_trace.c10
-rw-r--r--fs/nfs_common/localio_trace.h56
-rw-r--r--fs/nfs_common/nfsacl.c8
-rw-r--r--fs/nfs_common/nfslocalio.c297
-rw-r--r--fs/nfsd/Kconfig21
-rw-r--r--fs/nfsd/Makefile17
-rw-r--r--fs/nfsd/auth.c3
-rw-r--r--fs/nfsd/blocklayout.c192
-rw-r--r--fs/nfsd/blocklayoutxdr.c207
-rw-r--r--fs/nfsd/blocklayoutxdr.h22
-rw-r--r--fs/nfsd/debugfs.c143
-rw-r--r--fs/nfsd/export.c127
-rw-r--r--fs/nfsd/export.h11
-rw-r--r--fs/nfsd/filecache.c259
-rw-r--r--fs/nfsd/filecache.h15
-rw-r--r--fs/nfsd/flexfilelayout.c12
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c3
-rw-r--r--fs/nfsd/localio.c82
-rw-r--r--fs/nfsd/lockd.c15
-rw-r--r--fs/nfsd/netlink.c1
-rw-r--r--fs/nfsd/netlink.h1
-rw-r--r--fs/nfsd/netns.h18
-rw-r--r--fs/nfsd/nfs2acl.c2
-rw-r--r--fs/nfsd/nfs3acl.c2
-rw-r--r--fs/nfsd/nfs3proc.c84
-rw-r--r--fs/nfsd/nfs3xdr.c4
-rw-r--r--fs/nfsd/nfs4callback.c348
-rw-r--r--fs/nfsd/nfs4layouts.c12
-rw-r--r--fs/nfsd/nfs4proc.c252
-rw-r--r--fs/nfsd/nfs4recover.c378
-rw-r--r--fs/nfsd/nfs4state.c1065
-rw-r--r--fs/nfsd/nfs4xdr.c463
-rw-r--r--fs/nfsd/nfs4xdr_gen.c256
-rw-r--r--fs/nfsd/nfs4xdr_gen.h25
-rw-r--r--fs/nfsd/nfscache.c15
-rw-r--r--fs/nfsd/nfsctl.c343
-rw-r--r--fs/nfsd/nfsd.h74
-rw-r--r--fs/nfsd/nfsfh.c75
-rw-r--r--fs/nfsd/nfsfh.h71
-rw-r--r--fs/nfsd/nfsproc.c65
-rw-r--r--fs/nfsd/nfssvc.c120
-rw-r--r--fs/nfsd/nfsxdr.c4
-rw-r--r--fs/nfsd/pnfs.h5
-rw-r--r--fs/nfsd/state.h101
-rw-r--r--fs/nfsd/stats.c4
-rw-r--r--fs/nfsd/stats.h2
-rw-r--r--fs/nfsd/trace.h420
-rw-r--r--fs/nfsd/vfs.c697
-rw-r--r--fs/nfsd/vfs.h47
-rw-r--r--fs/nfsd/xdr4.h69
-rw-r--r--fs/nfsd/xdr4cb.h15
-rw-r--r--fs/nilfs2/alloc.c67
-rw-r--r--fs/nilfs2/alloc.h2
-rw-r--r--fs/nilfs2/bmap.c124
-rw-r--r--fs/nilfs2/btnode.c4
-rw-r--r--fs/nilfs2/btree.c11
-rw-r--r--fs/nilfs2/cpfile.c71
-rw-r--r--fs/nilfs2/dat.c47
-rw-r--r--fs/nilfs2/dir.c17
-rw-r--r--fs/nilfs2/direct.c3
-rw-r--r--fs/nilfs2/file.c8
-rw-r--r--fs/nilfs2/gcinode.c26
-rw-r--r--fs/nilfs2/ifile.c39
-rw-r--r--fs/nilfs2/inode.c60
-rw-r--r--fs/nilfs2/ioctl.c275
-rw-r--r--fs/nilfs2/mdt.c65
-rw-r--r--fs/nilfs2/namei.c52
-rw-r--r--fs/nilfs2/nilfs.h10
-rw-r--r--fs/nilfs2/page.c41
-rw-r--r--fs/nilfs2/recovery.c65
-rw-r--r--fs/nilfs2/segbuf.c12
-rw-r--r--fs/nilfs2/segment.c93
-rw-r--r--fs/nilfs2/segment.h1
-rw-r--r--fs/nilfs2/sufile.c114
-rw-r--r--fs/nilfs2/sufile.h22
-rw-r--r--fs/nilfs2/super.c10
-rw-r--r--fs/nilfs2/sysfs.c4
-rw-r--r--fs/nilfs2/sysfs.h8
-rw-r--r--fs/nilfs2/the_nilfs.c29
-rw-r--r--fs/nls/nls_base.c27
-rw-r--r--fs/notify/dnotify/dnotify.c10
-rw-r--r--fs/notify/fanotify/fanotify.c78
-rw-r--r--fs/notify/fanotify/fanotify.h44
-rw-r--r--fs/notify/fanotify/fanotify_user.c441
-rw-r--r--fs/notify/fdinfo.c15
-rw-r--r--fs/notify/fsnotify.c161
-rw-r--r--fs/notify/fsnotify.h11
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c4
-rw-r--r--fs/notify/inotify/inotify_user.c2
-rw-r--r--fs/notify/mark.c18
-rw-r--r--fs/nsfs.c389
-rw-r--r--fs/ntfs3/attrib.c174
-rw-r--r--fs/ntfs3/bitmap.c1
-rw-r--r--fs/ntfs3/dir.c9
-rw-r--r--fs/ntfs3/file.c267
-rw-r--r--fs/ntfs3/frecord.c439
-rw-r--r--fs/ntfs3/fslog.c32
-rw-r--r--fs/ntfs3/fsntfs.c172
-rw-r--r--fs/ntfs3/index.c31
-rw-r--r--fs/ntfs3/inode.c185
-rw-r--r--fs/ntfs3/namei.c38
-rw-r--r--fs/ntfs3/ntfs.h5
-rw-r--r--fs/ntfs3/ntfs_fs.h93
-rw-r--r--fs/ntfs3/record.c81
-rw-r--r--fs/ntfs3/run.c27
-rw-r--r--fs/ntfs3/super.c161
-rw-r--r--fs/ntfs3/xattr.c40
-rw-r--r--fs/ocfs2/acl.c1
-rw-r--r--fs/ocfs2/alloc.c165
-rw-r--r--fs/ocfs2/alloc.h8
-rw-r--r--fs/ocfs2/aops.c361
-rw-r--r--fs/ocfs2/aops.h17
-rw-r--r--fs/ocfs2/cluster/heartbeat.c28
-rw-r--r--fs/ocfs2/cluster/masklog.h2
-rw-r--r--fs/ocfs2/cluster/quorum.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c21
-rw-r--r--fs/ocfs2/dcache.c14
-rw-r--r--fs/ocfs2/dir.c75
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c9
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c23
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c16
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c46
-rw-r--r--fs/ocfs2/dlmglue.c34
-rw-r--r--fs/ocfs2/dlmglue.h6
-rw-r--r--fs/ocfs2/extent_map.c20
-rw-r--r--fs/ocfs2/file.c14
-rw-r--r--fs/ocfs2/filecheck.c2
-rw-r--r--fs/ocfs2/inode.c177
-rw-r--r--fs/ocfs2/inode.h1
-rw-r--r--fs/ocfs2/ioctl.c24
-rw-r--r--fs/ocfs2/ioctl.h4
-rw-r--r--fs/ocfs2/journal.c95
-rw-r--r--fs/ocfs2/journal.h1
-rw-r--r--fs/ocfs2/localalloc.c27
-rw-r--r--fs/ocfs2/mmap.c23
-rw-r--r--fs/ocfs2/mmap.h2
-rw-r--r--fs/ocfs2/move_extents.c54
-rw-r--r--fs/ocfs2/namei.c32
-rw-r--r--fs/ocfs2/ocfs2.h17
-rw-r--r--fs/ocfs2/ocfs2_fs.h32
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h2
-rw-r--r--fs/ocfs2/ocfs2_lockid.h2
-rw-r--r--fs/ocfs2/ocfs2_trace.h22
-rw-r--r--fs/ocfs2/quota_global.c9
-rw-r--r--fs/ocfs2/quota_local.c12
-rw-r--r--fs/ocfs2/refcounttree.c50
-rw-r--r--fs/ocfs2/reservations.h4
-rw-r--r--fs/ocfs2/stack_o2cb.c2
-rw-r--r--fs/ocfs2/stack_user.c18
-rw-r--r--fs/ocfs2/stackglue.c5
-rw-r--r--fs/ocfs2/stackglue.h2
-rw-r--r--fs/ocfs2/suballoc.c38
-rw-r--r--fs/ocfs2/suballoc.h1
-rw-r--r--fs/ocfs2/super.c600
-rw-r--r--fs/ocfs2/symlink.c16
-rw-r--r--fs/ocfs2/sysfile.c12
-rw-r--r--fs/ocfs2/xattr.c14
-rw-r--r--fs/omfs/dir.c6
-rw-r--r--fs/omfs/file.c9
-rw-r--r--fs/omfs/inode.c179
-rw-r--r--fs/open.c192
-rw-r--r--fs/openpromfs/inode.c2
-rw-r--r--fs/orangefs/dcache.c22
-rw-r--r--fs/orangefs/file.c14
-rw-r--r--fs/orangefs/inode.c184
-rw-r--r--fs/orangefs/namei.c18
-rw-r--r--fs/orangefs/orangefs-bufmap.c25
-rw-r--r--fs/orangefs/orangefs-bufmap.h3
-rw-r--r--fs/orangefs/orangefs-debug.h43
-rw-r--r--fs/orangefs/orangefs-debugfs.c82
-rw-r--r--fs/orangefs/orangefs-kernel.h10
-rw-r--r--fs/orangefs/orangefs-mod.c3
-rw-r--r--fs/orangefs/orangefs-sysfs.c28
-rw-r--r--fs/orangefs/orangefs-utils.c6
-rw-r--r--fs/orangefs/super.c193
-rw-r--r--fs/orangefs/xattr.c12
-rw-r--r--fs/overlayfs/copy_up.c201
-rw-r--r--fs/overlayfs/dir.c762
-rw-r--r--fs/overlayfs/export.c55
-rw-r--r--fs/overlayfs/file.c104
-rw-r--r--fs/overlayfs/inode.c137
-rw-r--r--fs/overlayfs/namei.c504
-rw-r--r--fs/overlayfs/overlayfs.h144
-rw-r--r--fs/overlayfs/ovl_entry.h4
-rw-r--r--fs/overlayfs/params.c86
-rw-r--r--fs/overlayfs/params.h1
-rw-r--r--fs/overlayfs/readdir.c313
-rw-r--r--fs/overlayfs/super.c260
-rw-r--r--fs/overlayfs/util.c85
-rw-r--r--fs/overlayfs/xattrs.c35
-rw-r--r--fs/pidfs.c861
-rw-r--r--fs/pipe.c283
-rw-r--r--fs/pnode.c749
-rw-r--r--fs/pnode.h32
-rw-r--r--fs/posix_acl.c8
-rw-r--r--fs/proc/Kconfig19
-rw-r--r--fs/proc/array.c55
-rw-r--r--fs/proc/base.c134
-rw-r--r--fs/proc/fd.c14
-rw-r--r--fs/proc/generic.c57
-rw-r--r--fs/proc/inode.c33
-rw-r--r--fs/proc/internal.h81
-rw-r--r--fs/proc/kcore.c91
-rw-r--r--fs/proc/meminfo.c8
-rw-r--r--fs/proc/namespaces.c11
-rw-r--r--fs/proc/page.c206
-rw-r--r--fs/proc/proc_sysctl.c28
-rw-r--r--fs/proc/root.c122
-rw-r--r--fs/proc/self.c10
-rw-r--r--fs/proc/task_mmu.c714
-rw-r--r--fs/proc/task_nommu.c14
-rw-r--r--fs/proc/thread_self.c11
-rw-r--r--fs/proc/vmcore.c349
-rw-r--r--fs/proc_namespace.c12
-rw-r--r--fs/pstore/blk.c4
-rw-r--r--fs/pstore/inode.c125
-rw-r--r--fs/pstore/internal.h4
-rw-r--r--fs/pstore/platform.c13
-rw-r--r--fs/pstore/ram.c4
-rw-r--r--fs/pstore/zone.c26
-rw-r--r--fs/qnx4/inode.c2
-rw-r--r--fs/qnx6/inode.c13
-rw-r--r--fs/quota/dquot.c18
-rw-r--r--fs/ramfs/file-mmu.c4
-rw-r--r--fs/ramfs/file-nommu.c12
-rw-r--r--fs/ramfs/inode.c17
-rw-r--r--fs/read_write.c35
-rw-r--r--fs/readdir.c47
-rw-r--r--fs/resctrl/Kconfig39
-rw-r--r--fs/resctrl/Makefile6
-rw-r--r--fs/resctrl/ctrlmondata.c959
-rw-r--r--fs/resctrl/internal.h495
-rw-r--r--fs/resctrl/monitor.c1811
-rw-r--r--fs/resctrl/monitor_trace.h33
-rw-r--r--fs/resctrl/pseudo_lock.c1099
-rw-r--r--fs/resctrl/rdtgroup.c4584
-rw-r--r--fs/romfs/mmap-nommu.c6
-rw-r--r--fs/romfs/super.c2
-rw-r--r--fs/select.c22
-rw-r--r--fs/signalfd.c30
-rw-r--r--fs/smb/client/Kconfig9
-rw-r--r--fs/smb/client/Makefile2
-rw-r--r--fs/smb/client/asn1.c2
-rw-r--r--fs/smb/client/cached_dir.c199
-rw-r--r--fs/smb/client/cached_dir.h31
-rw-r--r--fs/smb/client/cifs_debug.c268
-rw-r--r--fs/smb/client/cifs_debug.h6
-rw-r--r--fs/smb/client/cifs_fs_sb.h1
-rw-r--r--fs/smb/client/cifs_ioctl.h2
-rw-r--r--fs/smb/client/cifs_spnego.c68
-rw-r--r--fs/smb/client/cifs_spnego.h2
-rw-r--r--fs/smb/client/cifs_swn.c20
-rw-r--r--fs/smb/client/cifs_unicode.c3
-rw-r--r--fs/smb/client/cifs_unicode.h3
-rw-r--r--fs/smb/client/cifsacl.c95
-rw-r--r--fs/smb/client/cifsencrypt.c535
-rw-r--r--fs/smb/client/cifsfs.c157
-rw-r--r--fs/smb/client/cifsfs.h13
-rw-r--r--fs/smb/client/cifsglob.h458
-rw-r--r--fs/smb/client/cifspdu.h610
-rw-r--r--fs/smb/client/cifsproto.h254
-rw-r--r--fs/smb/client/cifssmb.c1279
-rw-r--r--fs/smb/client/cifstransport.c263
-rw-r--r--fs/smb/client/compress.c94
-rw-r--r--fs/smb/client/compress.h19
-rw-r--r--fs/smb/client/connect.c823
-rw-r--r--fs/smb/client/dfs.c94
-rw-r--r--fs/smb/client/dfs.h51
-rw-r--r--fs/smb/client/dfs_cache.c98
-rw-r--r--fs/smb/client/dir.c126
-rw-r--r--fs/smb/client/dns_resolve.c108
-rw-r--r--fs/smb/client/dns_resolve.h23
-rw-r--r--fs/smb/client/file.c247
-rw-r--r--fs/smb/client/fs_context.c329
-rw-r--r--fs/smb/client/fs_context.h94
-rw-r--r--fs/smb/client/inode.c506
-rw-r--r--fs/smb/client/ioctl.c2
-rw-r--r--fs/smb/client/link.c104
-rw-r--r--fs/smb/client/misc.c153
-rw-r--r--fs/smb/client/namespace.c23
-rw-r--r--fs/smb/client/netmisc.c25
-rw-r--r--fs/smb/client/nterr.c9
-rw-r--r--fs/smb/client/nterr.h1
-rw-r--r--fs/smb/client/ntlmssp.h8
-rw-r--r--fs/smb/client/readdir.c138
-rw-r--r--fs/smb/client/reparse.c711
-rw-r--r--fs/smb/client/reparse.h41
-rw-r--r--fs/smb/client/rfc1002pdu.h14
-rw-r--r--fs/smb/client/sess.c192
-rw-r--r--fs/smb/client/smb1ops.c558
-rw-r--r--fs/smb/client/smb2file.c80
-rw-r--r--fs/smb/client/smb2glob.h4
-rw-r--r--fs/smb/client/smb2inode.c611
-rw-r--r--fs/smb/client/smb2maperror.c56
-rw-r--r--fs/smb/client/smb2misc.c84
-rw-r--r--fs/smb/client/smb2ops.c774
-rw-r--r--fs/smb/client/smb2pdu.c680
-rw-r--r--fs/smb/client/smb2pdu.h110
-rw-r--r--fs/smb/client/smb2proto.h46
-rw-r--r--fs/smb/client/smb2transport.c251
-rw-r--r--fs/smb/client/smbdirect.c2287
-rw-r--r--fs/smb/client/smbdirect.h259
-rw-r--r--fs/smb/client/trace.c2
-rw-r--r--fs/smb/client/trace.h280
-rw-r--r--fs/smb/client/transport.c803
-rw-r--r--fs/smb/client/xattr.c52
-rw-r--r--fs/smb/common/Makefile1
-rw-r--r--fs/smb/common/arc4.h23
-rw-r--r--fs/smb/common/cifs_arc4.c75
-rw-r--r--fs/smb/common/fscc.h174
-rw-r--r--fs/smb/common/smb2pdu.h329
-rw-r--r--fs/smb/common/smb2status.h5
-rw-r--r--fs/smb/common/smbacl.h11
-rw-r--r--fs/smb/common/smbdirect/smbdirect.h44
-rw-r--r--fs/smb/common/smbdirect/smbdirect_pdu.h55
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h547
-rw-r--r--fs/smb/common/smbfsctl.h3
-rw-r--r--fs/smb/common/smbglob.h71
-rw-r--r--fs/smb/server/Kconfig10
-rw-r--r--fs/smb/server/auth.c451
-rw-r--r--fs/smb/server/auth.h12
-rw-r--r--fs/smb/server/connection.c72
-rw-r--r--fs/smb/server/connection.h38
-rw-r--r--fs/smb/server/crypto_ctx.c32
-rw-r--r--fs/smb/server/crypto_ctx.h19
-rw-r--r--fs/smb/server/ksmbd_netlink.h6
-rw-r--r--fs/smb/server/ksmbd_work.c5
-rw-r--r--fs/smb/server/ksmbd_work.h1
-rw-r--r--fs/smb/server/mgmt/share_config.c2
-rw-r--r--fs/smb/server/mgmt/tree_connect.c18
-rw-r--r--fs/smb/server/mgmt/tree_connect.h1
-rw-r--r--fs/smb/server/mgmt/user_session.c80
-rw-r--r--fs/smb/server/mgmt/user_session.h3
-rw-r--r--fs/smb/server/misc.c15
-rw-r--r--fs/smb/server/oplock.c109
-rw-r--r--fs/smb/server/oplock.h2
-rw-r--r--fs/smb/server/server.c31
-rw-r--r--fs/smb/server/server.h3
-rw-r--r--fs/smb/server/smb2misc.c2
-rw-r--r--fs/smb/server/smb2ops.c38
-rw-r--r--fs/smb/server/smb2pdu.c709
-rw-r--r--fs/smb/server/smb2pdu.h124
-rw-r--r--fs/smb/server/smb_common.c16
-rw-r--r--fs/smb/server/smb_common.h286
-rw-r--r--fs/smb/server/smbacl.c71
-rw-r--r--fs/smb/server/smbacl.h2
-rw-r--r--fs/smb/server/transport_ipc.c78
-rw-r--r--fs/smb/server/transport_ipc.h2
-rw-r--r--fs/smb/server/transport_rdma.c2242
-rw-r--r--fs/smb/server/transport_rdma.h49
-rw-r--r--fs/smb/server/transport_tcp.c239
-rw-r--r--fs/smb/server/transport_tcp.h2
-rw-r--r--fs/smb/server/vfs.c445
-rw-r--r--fs/smb/server/vfs.h17
-rw-r--r--fs/smb/server/vfs_cache.c129
-rw-r--r--fs/smb/server/vfs_cache.h3
-rw-r--r--fs/splice.c69
-rw-r--r--fs/squashfs/Kconfig27
-rw-r--r--fs/squashfs/block.c57
-rw-r--r--fs/squashfs/cache.c12
-rw-r--r--fs/squashfs/file.c234
-rw-r--r--fs/squashfs/file_cache.c6
-rw-r--r--fs/squashfs/file_direct.c11
-rw-r--r--fs/squashfs/inode.c41
-rw-r--r--fs/squashfs/squashfs.h14
-rw-r--r--fs/squashfs/squashfs_fs.h1
-rw-r--r--fs/squashfs/squashfs_fs_i.h2
-rw-r--r--fs/squashfs/super.c30
-rw-r--r--fs/stack.c4
-rw-r--r--fs/stat.c80
-rw-r--r--fs/super.c486
-rw-r--r--fs/sync.c19
-rw-r--r--fs/sysctls.c2
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/file.c58
-rw-r--r--fs/sysfs/group.c42
-rw-r--r--fs/sysv/Kconfig38
-rw-r--r--fs/sysv/Makefile9
-rw-r--r--fs/sysv/balloc.c240
-rw-r--r--fs/sysv/dir.c378
-rw-r--r--fs/sysv/file.c59
-rw-r--r--fs/sysv/ialloc.c235
-rw-r--r--fs/sysv/inode.c354
-rw-r--r--fs/sysv/itree.c511
-rw-r--r--fs/sysv/namei.c280
-rw-r--r--fs/sysv/super.c595
-rw-r--r--fs/sysv/sysv.h245
-rw-r--r--fs/timerfd.c34
-rw-r--r--fs/tracefs/event_inode.c7
-rw-r--r--fs/tracefs/inode.c54
-rw-r--r--fs/ubifs/compress.c243
-rw-r--r--fs/ubifs/crypto.c4
-rw-r--r--fs/ubifs/debug.c23
-rw-r--r--fs/ubifs/dir.c10
-rw-r--r--fs/ubifs/file.c110
-rw-r--r--fs/ubifs/io.c16
-rw-r--r--fs/ubifs/ioctl.c4
-rw-r--r--fs/ubifs/journal.c13
-rw-r--r--fs/ubifs/lpt.c12
-rw-r--r--fs/ubifs/lpt_commit.c1
-rw-r--r--fs/ubifs/recovery.c4
-rw-r--r--fs/ubifs/super.c6
-rw-r--r--fs/ubifs/tnc_misc.c9
-rw-r--r--fs/ubifs/ubifs.h36
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c45
-rw-r--r--fs/udf/namei.c28
-rw-r--r--fs/udf/super.c13
-rw-r--r--fs/udf/truncate.c2
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/file.c2
-rw-r--r--fs/ufs/inode.c18
-rw-r--r--fs/ufs/namei.c8
-rw-r--r--fs/ufs/super.c307
-rw-r--r--fs/ufs/ufs.h9
-rw-r--r--fs/unicode/Kconfig5
-rw-r--r--fs/unicode/Makefile2
-rw-r--r--fs/unicode/mkutf8data.c70
-rw-r--r--fs/unicode/tests/.kunitconfig3
-rw-r--r--fs/unicode/tests/utf8_kunit.c (renamed from fs/unicode/utf8-selftest.c)153
-rw-r--r--fs/unicode/utf8-norm.c2
-rw-r--r--fs/unicode/utf8data.c_shipped6703
-rw-r--r--fs/userfaultfd.c325
-rw-r--r--fs/utimes.c5
-rw-r--r--fs/vboxsf/Kconfig2
-rw-r--r--fs/vboxsf/dir.c36
-rw-r--r--fs/vboxsf/file.c60
-rw-r--r--fs/vboxsf/super.c4
-rw-r--r--fs/verity/Kconfig10
-rw-r--r--fs/verity/enable.c23
-rw-r--r--fs/verity/fsverity_private.h33
-rw-r--r--fs/verity/hash_algs.c195
-rw-r--r--fs/verity/init.c2
-rw-r--r--fs/verity/measure.c1
-rw-r--r--fs/verity/open.c60
-rw-r--r--fs/verity/read_metadata.c1
-rw-r--r--fs/verity/verify.c181
-rw-r--r--fs/xattr.c43
-rw-r--r--fs/xfs/Kconfig36
-rw-r--r--fs/xfs/Makefile13
-rw-r--r--fs/xfs/libxfs/xfs_ag.c2
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c10
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c54
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c52
-rw-r--r--fs/xfs/libxfs/xfs_attr.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c25
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c7
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c401
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h13
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c143
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_btree.c479
-rw-r--r--fs/xfs/libxfs/xfs_btree.h71
-rw-r--r--fs/xfs/libxfs/xfs_btree_mem.c1
-rw-r--r--fs/xfs/libxfs/xfs_btree_staging.c10
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c8
-rw-r--r--fs/xfs/libxfs/xfs_defer.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c11
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h1
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h118
-rw-r--r--fs/xfs/libxfs/xfs_exchmaps.c8
-rw-r--r--fs/xfs/libxfs/xfs_format.h73
-rw-r--r--fs/xfs/libxfs/xfs_fs.h24
-rw-r--r--fs/xfs/libxfs/xfs_group.c17
-rw-r--r--fs/xfs/libxfs/xfs_group.h40
-rw-r--r--fs/xfs/libxfs/xfs_health.h6
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c57
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c28
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c92
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c204
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h6
-rw-r--r--fs/xfs/libxfs/xfs_inode_util.c12
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h203
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h10
-rw-r--r--fs/xfs/libxfs/xfs_log_rlimit.c4
-rw-r--r--fs/xfs/libxfs/xfs_metadir.c4
-rw-r--r--fs/xfs/libxfs/xfs_metafile.c270
-rw-r--r--fs/xfs/libxfs/xfs_metafile.h13
-rw-r--r--fs/xfs/libxfs/xfs_ondisk.h14
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h4
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c287
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h23
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c18
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c180
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h12
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c67
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c15
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.h9
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.c103
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h118
-rw-r--r--fs/xfs/libxfs/xfs_rtrefcount_btree.c757
-rw-r--r--fs/xfs/libxfs/xfs_rtrefcount_btree.h189
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.c1033
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.h212
-rw-r--r--fs/xfs/libxfs/xfs_sb.c122
-rw-r--r--fs/xfs/libxfs/xfs_shared.h21
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c4
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c362
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.h25
-rw-r--r--fs/xfs/libxfs/xfs_trans_space.h13
-rw-r--r--fs/xfs/libxfs/xfs_types.h35
-rw-r--r--fs/xfs/libxfs/xfs_zones.c187
-rw-r--r--fs/xfs/libxfs/xfs_zones.h42
-rw-r--r--fs/xfs/scrub/agheader.c79
-rw-r--r--fs/xfs/scrub/agheader_repair.c8
-rw-r--r--fs/xfs/scrub/alloc_repair.c5
-rw-r--r--fs/xfs/scrub/bmap.c130
-rw-r--r--fs/xfs/scrub/bmap_repair.c148
-rw-r--r--fs/xfs/scrub/btree.c2
-rw-r--r--fs/xfs/scrub/common.c179
-rw-r--r--fs/xfs/scrub/common.h33
-rw-r--r--fs/xfs/scrub/cow_repair.c182
-rw-r--r--fs/xfs/scrub/dir_repair.c8
-rw-r--r--fs/xfs/scrub/fscounters.c31
-rw-r--r--fs/xfs/scrub/fscounters_repair.c12
-rw-r--r--fs/xfs/scrub/health.c59
-rw-r--r--fs/xfs/scrub/ialloc.c4
-rw-r--r--fs/xfs/scrub/inode.c48
-rw-r--r--fs/xfs/scrub/inode_repair.c214
-rw-r--r--fs/xfs/scrub/metapath.c86
-rw-r--r--fs/xfs/scrub/newbt.c53
-rw-r--r--fs/xfs/scrub/newbt.h1
-rw-r--r--fs/xfs/scrub/nlinks.c42
-rw-r--r--fs/xfs/scrub/nlinks_repair.c4
-rw-r--r--fs/xfs/scrub/orphanage.c21
-rw-r--r--fs/xfs/scrub/parent.c2
-rw-r--r--fs/xfs/scrub/parent_repair.c12
-rw-r--r--fs/xfs/scrub/quota.c16
-rw-r--r--fs/xfs/scrub/quota_repair.c20
-rw-r--r--fs/xfs/scrub/quotacheck.c15
-rw-r--r--fs/xfs/scrub/quotacheck_repair.c21
-rw-r--r--fs/xfs/scrub/rcbag_btree.c38
-rw-r--r--fs/xfs/scrub/reap.c893
-rw-r--r--fs/xfs/scrub/reap.h9
-rw-r--r--fs/xfs/scrub/refcount.c4
-rw-r--r--fs/xfs/scrub/refcount_repair.c6
-rw-r--r--fs/xfs/scrub/repair.c242
-rw-r--r--fs/xfs/scrub/repair.h47
-rw-r--r--fs/xfs/scrub/rgb_bitmap.h37
-rw-r--r--fs/xfs/scrub/rgsuper.c6
-rw-r--r--fs/xfs/scrub/rmap_repair.c105
-rw-r--r--fs/xfs/scrub/rtb_bitmap.h37
-rw-r--r--fs/xfs/scrub/rtbitmap.c86
-rw-r--r--fs/xfs/scrub/rtbitmap.h55
-rw-r--r--fs/xfs/scrub/rtbitmap_repair.c451
-rw-r--r--fs/xfs/scrub/rtrefcount.c661
-rw-r--r--fs/xfs/scrub/rtrefcount_repair.c761
-rw-r--r--fs/xfs/scrub/rtrmap.c323
-rw-r--r--fs/xfs/scrub/rtrmap_repair.c981
-rw-r--r--fs/xfs/scrub/rtsummary.c17
-rw-r--r--fs/xfs/scrub/rtsummary_repair.c3
-rw-r--r--fs/xfs/scrub/scrub.c39
-rw-r--r--fs/xfs/scrub/scrub.h34
-rw-r--r--fs/xfs/scrub/stats.c2
-rw-r--r--fs/xfs/scrub/symlink_repair.c7
-rw-r--r--fs/xfs/scrub/tempexch.h2
-rw-r--r--fs/xfs/scrub/tempfile.c43
-rw-r--r--fs/xfs/scrub/trace.c2
-rw-r--r--fs/xfs/scrub/trace.h331
-rw-r--r--fs/xfs/scrub/xfarray.c2
-rw-r--r--fs/xfs/xfs_aops.c399
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_attr_inactive.c5
-rw-r--r--fs/xfs/xfs_attr_item.c150
-rw-r--r--fs/xfs/xfs_attr_item.h8
-rw-r--r--fs/xfs/xfs_attr_list.c3
-rw-r--r--fs/xfs/xfs_bio_io.c30
-rw-r--r--fs/xfs/xfs_bmap_item.c28
-rw-r--r--fs/xfs/xfs_bmap_item.h3
-rw-r--r--fs/xfs/xfs_bmap_util.c36
-rw-r--r--fs/xfs/xfs_bmap_util.h12
-rw-r--r--fs/xfs/xfs_buf.c1402
-rw-r--r--fs/xfs/xfs_buf.h62
-rw-r--r--fs/xfs/xfs_buf_item.c434
-rw-r--r--fs/xfs/xfs_buf_item.h13
-rw-r--r--fs/xfs/xfs_buf_item_recover.c75
-rw-r--r--fs/xfs/xfs_buf_mem.c45
-rw-r--r--fs/xfs/xfs_buf_mem.h6
-rw-r--r--fs/xfs/xfs_discard.c67
-rw-r--r--fs/xfs/xfs_dquot.c362
-rw-r--r--fs/xfs/xfs_dquot.h31
-rw-r--r--fs/xfs/xfs_dquot_item.c57
-rw-r--r--fs/xfs/xfs_dquot_item.h7
-rw-r--r--fs/xfs/xfs_dquot_item_recover.c20
-rw-r--r--fs/xfs/xfs_drain.c20
-rw-r--r--fs/xfs/xfs_drain.h7
-rw-r--r--fs/xfs/xfs_error.c213
-rw-r--r--fs/xfs/xfs_error.h47
-rw-r--r--fs/xfs/xfs_exchmaps_item.c8
-rw-r--r--fs/xfs/xfs_exchrange.c88
-rw-r--r--fs/xfs/xfs_extent_busy.c2
-rw-r--r--fs/xfs/xfs_extent_busy.h8
-rw-r--r--fs/xfs/xfs_extfree_item.c104
-rw-r--r--fs/xfs/xfs_extfree_item.h7
-rw-r--r--fs/xfs/xfs_file.c577
-rw-r--r--fs/xfs/xfs_filestream.c15
-rw-r--r--fs/xfs/xfs_fsmap.c330
-rw-r--r--fs/xfs/xfs_fsops.c75
-rw-r--r--fs/xfs/xfs_fsops.h3
-rw-r--r--fs/xfs/xfs_globals.c4
-rw-r--r--fs/xfs/xfs_handle.c56
-rw-r--r--fs/xfs/xfs_health.c6
-rw-r--r--fs/xfs/xfs_icache.c64
-rw-r--r--fs/xfs/xfs_icreate_item.c2
-rw-r--r--fs/xfs/xfs_inode.c197
-rw-r--r--fs/xfs/xfs_inode.h35
-rw-r--r--fs/xfs/xfs_inode_item.c149
-rw-r--r--fs/xfs/xfs_inode_item.h14
-rw-r--r--fs/xfs/xfs_inode_item_recover.c75
-rw-r--r--fs/xfs/xfs_ioctl.c83
-rw-r--r--fs/xfs/xfs_ioctl.h4
-rw-r--r--fs/xfs/xfs_iomap.c847
-rw-r--r--fs/xfs/xfs_iomap.h9
-rw-r--r--fs/xfs/xfs_iops.c178
-rw-r--r--fs/xfs/xfs_iops.h3
-rw-r--r--fs/xfs/xfs_itable.c26
-rw-r--r--fs/xfs/xfs_itable.h10
-rw-r--r--fs/xfs/xfs_iwalk.c11
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_log.c303
-rw-r--r--fs/xfs/xfs_log.h53
-rw-r--r--fs/xfs/xfs_log_cil.c90
-rw-r--r--fs/xfs/xfs_log_priv.h55
-rw-r--r--fs/xfs/xfs_log_recover.c101
-rw-r--r--fs/xfs/xfs_message.c20
-rw-r--r--fs/xfs/xfs_message.h5
-rw-r--r--fs/xfs/xfs_mount.c418
-rw-r--r--fs/xfs/xfs_mount.h212
-rw-r--r--fs/xfs/xfs_mru_cache.c35
-rw-r--r--fs/xfs/xfs_notify_failure.c241
-rw-r--r--fs/xfs/xfs_notify_failure.h11
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_qm.c329
-rw-r--r--fs/xfs/xfs_qm.h3
-rw-r--r--fs/xfs/xfs_qm_bhv.c85
-rw-r--r--fs/xfs/xfs_qm_syscalls.c23
-rw-r--r--fs/xfs/xfs_quota.h10
-rw-r--r--fs/xfs/xfs_quotaops.c2
-rw-r--r--fs/xfs/xfs_refcount_item.c266
-rw-r--r--fs/xfs/xfs_refcount_item.h3
-rw-r--r--fs/xfs/xfs_reflink.c470
-rw-r--r--fs/xfs/xfs_reflink.h12
-rw-r--r--fs/xfs/xfs_rmap_item.c242
-rw-r--r--fs/xfs/xfs_rmap_item.h3
-rw-r--r--fs/xfs/xfs_rtalloc.c298
-rw-r--r--fs/xfs/xfs_rtalloc.h15
-rw-r--r--fs/xfs/xfs_stats.c5
-rw-r--r--fs/xfs/xfs_stats.h3
-rw-r--r--fs/xfs/xfs_super.c557
-rw-r--r--fs/xfs/xfs_super.h1
-rw-r--r--fs/xfs/xfs_sysctl.c31
-rw-r--r--fs/xfs/xfs_sysctl.h5
-rw-r--r--fs/xfs/xfs_sysfs.c115
-rw-r--r--fs/xfs/xfs_sysfs.h5
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trace.h664
-rw-r--r--fs/xfs/xfs_trans.c296
-rw-r--r--fs/xfs/xfs_trans.h5
-rw-r--r--fs/xfs/xfs_trans_ail.c50
-rw-r--r--fs/xfs/xfs_trans_buf.c8
-rw-r--r--fs/xfs/xfs_trans_dquot.c55
-rw-r--r--fs/xfs/xfs_trans_priv.h28
-rw-r--r--fs/xfs/xfs_xattr.c2
-rw-r--r--fs/xfs/xfs_zone_alloc.c1328
-rw-r--r--fs/xfs/xfs_zone_alloc.h70
-rw-r--r--fs/xfs/xfs_zone_gc.c1214
-rw-r--r--fs/xfs/xfs_zone_info.c105
-rw-r--r--fs/xfs/xfs_zone_priv.h122
-rw-r--r--fs/xfs/xfs_zone_space_resv.c262
-rw-r--r--fs/zonefs/file.c59
-rw-r--r--fs/zonefs/super.c43
1628 files changed, 125543 insertions, 179605 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index eed551d8555f..633da5e37299 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/fs_struct.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <linux/slab.h>
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 77e9c4387c1d..057487efaaeb 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -13,7 +13,8 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/cred.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
+#include <linux/fs_context.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <net/9p/9p.h>
@@ -33,6 +34,10 @@ struct kmem_cache *v9fs_inode_cache;
*/
enum {
+ /* Mount-point source, we need to handle this explicitly because
+ * the code below accepts unknown args and the vfs layer only handles
+ * source if we rejected it as EINVAL */
+ Opt_source,
/* Options that take integer arguments */
Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid,
/* String options */
@@ -43,27 +48,71 @@ enum {
Opt_access, Opt_posixacl,
/* Lock timeout option */
Opt_locktimeout,
- /* Error token */
- Opt_err
+
+ /* Client options */
+ Opt_msize, Opt_trans, Opt_legacy, Opt_version,
+
+ /* fd transport options */
+ /* Options that take integer arguments */
+ Opt_rfdno, Opt_wfdno,
+ /* Options that take no arguments */
+
+ /* rdma transport options */
+ /* Options that take integer arguments */
+ Opt_rq_depth, Opt_sq_depth, Opt_timeout,
+
+ /* Options for both fd and rdma transports */
+ Opt_port, Opt_privport,
+};
+
+static const struct constant_table p9_versions[] = {
+ { "9p2000", p9_proto_legacy },
+ { "9p2000.u", p9_proto_2000u },
+ { "9p2000.L", p9_proto_2000L },
+ {}
};
-static const match_table_t tokens = {
- {Opt_debug, "debug=%x"},
- {Opt_dfltuid, "dfltuid=%u"},
- {Opt_dfltgid, "dfltgid=%u"},
- {Opt_afid, "afid=%u"},
- {Opt_uname, "uname=%s"},
- {Opt_remotename, "aname=%s"},
- {Opt_nodevmap, "nodevmap"},
- {Opt_noxattr, "noxattr"},
- {Opt_directio, "directio"},
- {Opt_ignoreqv, "ignoreqv"},
- {Opt_cache, "cache=%s"},
- {Opt_cachetag, "cachetag=%s"},
- {Opt_access, "access=%s"},
- {Opt_posixacl, "posixacl"},
- {Opt_locktimeout, "locktimeout=%u"},
- {Opt_err, NULL}
+/*
+ * This structure contains all parameters used for the core code,
+ * the client, and all the transports.
+ */
+const struct fs_parameter_spec v9fs_param_spec[] = {
+ fsparam_string ("source", Opt_source),
+ fsparam_u32hex ("debug", Opt_debug),
+ fsparam_uid ("dfltuid", Opt_dfltuid),
+ fsparam_gid ("dfltgid", Opt_dfltgid),
+ fsparam_u32 ("afid", Opt_afid),
+ fsparam_string ("uname", Opt_uname),
+ fsparam_string ("aname", Opt_remotename),
+ fsparam_flag ("nodevmap", Opt_nodevmap),
+ fsparam_flag ("noxattr", Opt_noxattr),
+ fsparam_flag ("directio", Opt_directio),
+ fsparam_flag ("ignoreqv", Opt_ignoreqv),
+ fsparam_string ("cache", Opt_cache),
+ fsparam_string ("cachetag", Opt_cachetag),
+ fsparam_string ("access", Opt_access),
+ fsparam_flag ("posixacl", Opt_posixacl),
+ fsparam_u32 ("locktimeout", Opt_locktimeout),
+
+ /* client options */
+ fsparam_u32 ("msize", Opt_msize),
+ fsparam_flag ("noextend", Opt_legacy),
+ fsparam_string ("trans", Opt_trans),
+ fsparam_enum ("version", Opt_version, p9_versions),
+
+ /* fd transport options */
+ fsparam_u32 ("rfdno", Opt_rfdno),
+ fsparam_u32 ("wfdno", Opt_wfdno),
+
+ /* rdma transport options */
+ fsparam_u32 ("sq", Opt_sq_depth),
+ fsparam_u32 ("rq", Opt_rq_depth),
+ fsparam_u32 ("timeout", Opt_timeout),
+
+ /* fd and rdma transprt options */
+ fsparam_u32 ("port", Opt_port),
+ fsparam_flag ("privport", Opt_privport),
+ {}
};
/* Interpret mount options for cache mode */
@@ -101,7 +150,7 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
struct v9fs_session_info *v9ses = root->d_sb->s_fs_info;
if (v9ses->debug)
- seq_printf(m, ",debug=%x", v9ses->debug);
+ seq_printf(m, ",debug=%#x", v9ses->debug);
if (!uid_eq(v9ses->dfltuid, V9FS_DEFUID))
seq_printf(m, ",dfltuid=%u",
from_kuid_munged(&init_user_ns, v9ses->dfltuid));
@@ -117,7 +166,7 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
if (v9ses->nodev)
seq_puts(m, ",nodevmap");
if (v9ses->cache)
- seq_printf(m, ",cache=%x", v9ses->cache);
+ seq_printf(m, ",cache=%#x", v9ses->cache);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cachetag && (v9ses->cache & CACHE_FSCACHE))
seq_printf(m, ",cachetag=%s", v9ses->cachetag);
@@ -153,267 +202,254 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
}
/**
- * v9fs_parse_options - parse mount options into session structure
- * @v9ses: existing v9fs session information
- * @opts: The mount option string
+ * v9fs_parse_param - parse a mount option into the filesystem context
+ * @fc: the filesystem context
+ * @param: the parameter to parse
*
* Return 0 upon success, -ERRNO upon failure.
*/
-
-static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
+int v9fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *options, *tmp_options;
- substring_t args[MAX_OPT_ARGS];
- char *p;
- int option = 0;
+ struct v9fs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
char *s;
- int ret = 0;
-
- /* setup defaults */
- v9ses->afid = ~0;
- v9ses->debug = 0;
- v9ses->cache = CACHE_NONE;
-#ifdef CONFIG_9P_FSCACHE
- v9ses->cachetag = NULL;
-#endif
- v9ses->session_lock_timeout = P9_LOCK_TIMEOUT;
-
- if (!opts)
- return 0;
+ int r;
+ int opt;
+ struct p9_client_opts *clnt = &ctx->client_opts;
+ struct p9_fd_opts *fd_opts = &ctx->fd_opts;
+ struct p9_rdma_opts *rdma_opts = &ctx->rdma_opts;
+ struct p9_session_opts *session_opts = &ctx->session_opts;
+
+ opt = fs_parse(fc, v9fs_param_spec, param, &result);
+ if (opt < 0) {
+ /*
+ * We might like to report bad mount options here, but
+ * traditionally 9p has ignored unknown mount options
+ */
+ if (opt == -ENOPARAM)
+ return 0;
- tmp_options = kstrdup(opts, GFP_KERNEL);
- if (!tmp_options) {
- ret = -ENOMEM;
- goto fail_option_alloc;
+ return opt;
}
- options = tmp_options;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token, r;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_debug:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- } else {
- v9ses->debug = option;
+
+ switch (opt) {
+ case Opt_source:
+ if (fc->source) {
+ pr_info("p9: multiple sources not supported\n");
+ return -EINVAL;
+ }
+ fc->source = param->string;
+ param->string = NULL;
+ break;
+ case Opt_debug:
+ session_opts->debug = result.uint_32;
#ifdef CONFIG_NET_9P_DEBUG
- p9_debug_level = option;
+ p9_debug_level = result.uint_32;
#endif
- }
- break;
-
- case Opt_dfltuid:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- continue;
- }
- v9ses->dfltuid = make_kuid(current_user_ns(), option);
- if (!uid_valid(v9ses->dfltuid)) {
- p9_debug(P9_DEBUG_ERROR,
- "uid field, but not a uid?\n");
- ret = -EINVAL;
- }
- break;
- case Opt_dfltgid:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- continue;
- }
- v9ses->dfltgid = make_kgid(current_user_ns(), option);
- if (!gid_valid(v9ses->dfltgid)) {
- p9_debug(P9_DEBUG_ERROR,
- "gid field, but not a gid?\n");
- ret = -EINVAL;
- }
- break;
- case Opt_afid:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- } else {
- v9ses->afid = option;
- }
- break;
- case Opt_uname:
- kfree(v9ses->uname);
- v9ses->uname = match_strdup(&args[0]);
- if (!v9ses->uname) {
- ret = -ENOMEM;
- goto free_and_return;
- }
- break;
- case Opt_remotename:
- kfree(v9ses->aname);
- v9ses->aname = match_strdup(&args[0]);
- if (!v9ses->aname) {
- ret = -ENOMEM;
- goto free_and_return;
- }
- break;
- case Opt_nodevmap:
- v9ses->nodev = 1;
- break;
- case Opt_noxattr:
- v9ses->flags |= V9FS_NO_XATTR;
- break;
- case Opt_directio:
- v9ses->flags |= V9FS_DIRECT_IO;
- break;
- case Opt_ignoreqv:
- v9ses->flags |= V9FS_IGNORE_QV;
- break;
- case Opt_cachetag:
+ break;
+
+ case Opt_dfltuid:
+ session_opts->dfltuid = result.uid;
+ break;
+ case Opt_dfltgid:
+ session_opts->dfltgid = result.gid;
+ break;
+ case Opt_afid:
+ session_opts->afid = result.uint_32;
+ break;
+ case Opt_uname:
+ kfree(session_opts->uname);
+ session_opts->uname = param->string;
+ param->string = NULL;
+ break;
+ case Opt_remotename:
+ kfree(session_opts->aname);
+ session_opts->aname = param->string;
+ param->string = NULL;
+ break;
+ case Opt_nodevmap:
+ session_opts->nodev = 1;
+ break;
+ case Opt_noxattr:
+ session_opts->flags |= V9FS_NO_XATTR;
+ break;
+ case Opt_directio:
+ session_opts->flags |= V9FS_DIRECT_IO;
+ break;
+ case Opt_ignoreqv:
+ session_opts->flags |= V9FS_IGNORE_QV;
+ break;
+ case Opt_cachetag:
#ifdef CONFIG_9P_FSCACHE
- kfree(v9ses->cachetag);
- v9ses->cachetag = match_strdup(&args[0]);
- if (!v9ses->cachetag) {
- ret = -ENOMEM;
- goto free_and_return;
- }
+ kfree(session_opts->cachetag);
+ session_opts->cachetag = param->string;
+ param->string = NULL;
#endif
- break;
- case Opt_cache:
- s = match_strdup(&args[0]);
- if (!s) {
- ret = -ENOMEM;
- p9_debug(P9_DEBUG_ERROR,
- "problem allocating copy of cache arg\n");
- goto free_and_return;
- }
- r = get_cache_mode(s);
- if (r < 0)
- ret = r;
- else
- v9ses->cache = r;
-
- kfree(s);
- break;
-
- case Opt_access:
- s = match_strdup(&args[0]);
- if (!s) {
- ret = -ENOMEM;
- p9_debug(P9_DEBUG_ERROR,
- "problem allocating copy of access arg\n");
- goto free_and_return;
+ break;
+ case Opt_cache:
+ r = get_cache_mode(param->string);
+ if (r < 0)
+ return r;
+ session_opts->cache = r;
+ break;
+ case Opt_access:
+ s = param->string;
+ session_opts->flags &= ~V9FS_ACCESS_MASK;
+ if (strcmp(s, "user") == 0) {
+ session_opts->flags |= V9FS_ACCESS_USER;
+ } else if (strcmp(s, "any") == 0) {
+ session_opts->flags |= V9FS_ACCESS_ANY;
+ } else if (strcmp(s, "client") == 0) {
+ session_opts->flags |= V9FS_ACCESS_CLIENT;
+ } else {
+ uid_t uid;
+
+ session_opts->flags |= V9FS_ACCESS_SINGLE;
+ r = kstrtouint(s, 10, &uid);
+ if (r) {
+ pr_info("Unknown access argument %s: %d\n",
+ param->string, r);
+ return r;
}
-
- v9ses->flags &= ~V9FS_ACCESS_MASK;
- if (strcmp(s, "user") == 0)
- v9ses->flags |= V9FS_ACCESS_USER;
- else if (strcmp(s, "any") == 0)
- v9ses->flags |= V9FS_ACCESS_ANY;
- else if (strcmp(s, "client") == 0) {
- v9ses->flags |= V9FS_ACCESS_CLIENT;
- } else {
- uid_t uid;
-
- v9ses->flags |= V9FS_ACCESS_SINGLE;
- r = kstrtouint(s, 10, &uid);
- if (r) {
- ret = r;
- pr_info("Unknown access argument %s: %d\n",
- s, r);
- kfree(s);
- continue;
- }
- v9ses->uid = make_kuid(current_user_ns(), uid);
- if (!uid_valid(v9ses->uid)) {
- ret = -EINVAL;
- pr_info("Unknown uid %s\n", s);
- }
+ session_opts->uid = make_kuid(current_user_ns(), uid);
+ if (!uid_valid(session_opts->uid)) {
+ pr_info("Unknown uid %s\n", s);
+ return -EINVAL;
}
+ }
+ break;
- kfree(s);
- break;
-
- case Opt_posixacl:
+ case Opt_posixacl:
#ifdef CONFIG_9P_FS_POSIX_ACL
- v9ses->flags |= V9FS_POSIX_ACL;
+ session_opts->flags |= V9FS_POSIX_ACL;
#else
- p9_debug(P9_DEBUG_ERROR,
- "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n");
#endif
- break;
-
- case Opt_locktimeout:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- continue;
- }
- if (option < 1) {
- p9_debug(P9_DEBUG_ERROR,
- "locktimeout must be a greater than zero integer.\n");
- ret = -EINVAL;
- continue;
- }
- v9ses->session_lock_timeout = (long)option * HZ;
- break;
+ break;
+
+ case Opt_locktimeout:
+ if (result.uint_32 < 1) {
+ p9_debug(P9_DEBUG_ERROR,
+ "locktimeout must be a greater than zero integer.\n");
+ return -EINVAL;
+ }
+ session_opts->session_lock_timeout = (long)result.uint_32 * HZ;
+ break;
- default:
- continue;
+ /* Options for client */
+ case Opt_msize:
+ if (result.uint_32 < 4096) {
+ p9_debug(P9_DEBUG_ERROR, "msize should be at least 4k\n");
+ return -EINVAL;
+ }
+ if (result.uint_32 > INT_MAX) {
+ p9_debug(P9_DEBUG_ERROR, "msize too big\n");
+ return -EINVAL;
}
+ clnt->msize = result.uint_32;
+ break;
+ case Opt_trans:
+ v9fs_put_trans(clnt->trans_mod);
+ clnt->trans_mod = v9fs_get_trans_by_name(param->string);
+ if (!clnt->trans_mod) {
+ pr_info("Could not find request transport: %s\n",
+ param->string);
+ return -EINVAL;
+ }
+ break;
+ case Opt_legacy:
+ clnt->proto_version = p9_proto_legacy;
+ break;
+ case Opt_version:
+ clnt->proto_version = result.uint_32;
+ p9_debug(P9_DEBUG_9P, "Protocol version: %s\n", param->string);
+ break;
+ /* Options for fd transport */
+ case Opt_rfdno:
+ fd_opts->rfd = result.uint_32;
+ break;
+ case Opt_wfdno:
+ fd_opts->wfd = result.uint_32;
+ break;
+ /* Options for rdma transport */
+ case Opt_sq_depth:
+ rdma_opts->sq_depth = result.uint_32;
+ break;
+ case Opt_rq_depth:
+ rdma_opts->rq_depth = result.uint_32;
+ break;
+ case Opt_timeout:
+ rdma_opts->timeout = result.uint_32;
+ break;
+ /* Options for both fd and rdma transports */
+ case Opt_port:
+ fd_opts->port = result.uint_32;
+ rdma_opts->port = result.uint_32;
+ break;
+ case Opt_privport:
+ fd_opts->privport = true;
+ rdma_opts->port = true;
+ break;
}
-free_and_return:
- kfree(tmp_options);
-fail_option_alloc:
- return ret;
+ return 0;
+}
+
+static void v9fs_apply_options(struct v9fs_session_info *v9ses,
+ struct fs_context *fc)
+{
+ struct v9fs_context *ctx = fc->fs_private;
+
+ v9ses->debug = ctx->session_opts.debug;
+ v9ses->dfltuid = ctx->session_opts.dfltuid;
+ v9ses->dfltgid = ctx->session_opts.dfltgid;
+ v9ses->afid = ctx->session_opts.afid;
+ v9ses->uname = ctx->session_opts.uname;
+ ctx->session_opts.uname = NULL;
+ v9ses->aname = ctx->session_opts.aname;
+ ctx->session_opts.aname = NULL;
+ v9ses->nodev = ctx->session_opts.nodev;
+ /*
+ * Note that we must |= flags here as session_init already
+ * set basic flags. This adds in flags from parsed options.
+ */
+ v9ses->flags |= ctx->session_opts.flags;
+#ifdef CONFIG_9P_FSCACHE
+ v9ses->cachetag = ctx->session_opts.cachetag;
+ ctx->session_opts.cachetag = NULL;
+#endif
+ v9ses->cache = ctx->session_opts.cache;
+ v9ses->uid = ctx->session_opts.uid;
+ v9ses->session_lock_timeout = ctx->session_opts.session_lock_timeout;
}
/**
* v9fs_session_init - initialize session
* @v9ses: session information structure
- * @dev_name: device being mounted
- * @data: options
+ * @fc: the filesystem mount context
*
*/
struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
- const char *dev_name, char *data)
+ struct fs_context *fc)
{
struct p9_fid *fid;
int rc = -ENOMEM;
- v9ses->uname = kstrdup(V9FS_DEFUSER, GFP_KERNEL);
- if (!v9ses->uname)
- goto err_names;
-
- v9ses->aname = kstrdup(V9FS_DEFANAME, GFP_KERNEL);
- if (!v9ses->aname)
- goto err_names;
init_rwsem(&v9ses->rename_sem);
- v9ses->uid = INVALID_UID;
- v9ses->dfltuid = V9FS_DEFUID;
- v9ses->dfltgid = V9FS_DEFGID;
-
- v9ses->clnt = p9_client_create(dev_name, data);
+ v9ses->clnt = p9_client_create(fc);
if (IS_ERR(v9ses->clnt)) {
rc = PTR_ERR(v9ses->clnt);
p9_debug(P9_DEBUG_ERROR, "problem initializing 9p client\n");
goto err_names;
}
+ /*
+ * Initialize flags on the real v9ses. v9fs_apply_options below
+ * will |= the additional flags from parsed options.
+ */
v9ses->flags = V9FS_ACCESS_USER;
if (p9_is_proto_dotl(v9ses->clnt)) {
@@ -423,9 +459,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags |= V9FS_PROTO_2000U;
}
- rc = v9fs_parse_options(v9ses, data);
- if (rc < 0)
- goto err_clnt;
+ v9fs_apply_options(v9ses, fc);
v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ;
@@ -438,8 +472,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags &= ~V9FS_ACCESS_MASK;
v9ses->flags |= V9FS_ACCESS_USER;
}
- /*FIXME !! */
- /* for legacy mode, fall back to V9FS_ACCESS_ANY */
+ /* FIXME: for legacy mode, fall back to V9FS_ACCESS_ANY */
if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) &&
((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) {
@@ -450,7 +483,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
if (!v9fs_proto_dotl(v9ses) ||
!((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) {
/*
- * We support ACL checks on clinet only if the protocol is
+ * We support ACL checks on client only if the protocol is
* 9P2000.L and access is V9FS_ACCESS_CLIENT.
*/
v9ses->flags &= ~V9FS_ACL_MASK;
@@ -472,7 +505,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
#ifdef CONFIG_9P_FSCACHE
/* register the session for caching */
if (v9ses->cache & CACHE_FSCACHE) {
- rc = v9fs_cache_session_get_cookie(v9ses, dev_name);
+ rc = v9fs_cache_session_get_cookie(v9ses, fc->source);
if (rc < 0)
goto err_clnt;
}
@@ -561,7 +594,7 @@ static ssize_t caches_show(struct kobject *kobj,
spin_lock(&v9fs_sessionlist_lock);
list_for_each_entry(v9ses, &v9fs_sessionlist, slist) {
if (v9ses->cachetag) {
- n = snprintf(buf, limit, "%s\n", v9ses->cachetag);
+ n = snprintf(buf + count, limit, "%s\n", v9ses->cachetag);
if (n < 0) {
count = n;
break;
@@ -597,13 +630,16 @@ static const struct attribute_group v9fs_attr_group = {
static int __init v9fs_sysfs_init(void)
{
+ int ret;
+
v9fs_kobj = kobject_create_and_add("9p", fs_kobj);
if (!v9fs_kobj)
return -ENOMEM;
- if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) {
+ ret = sysfs_create_group(v9fs_kobj, &v9fs_attr_group);
+ if (ret) {
kobject_put(v9fs_kobj);
- return -ENOMEM;
+ return ret;
}
return 0;
@@ -669,7 +705,7 @@ static int __init init_v9fs(void)
int err;
pr_info("Installing v9fs 9p2000 file system support\n");
- /* TODO: Setup list of registered trasnport modules */
+ /* TODO: Setup list of registered transport modules */
err = v9fs_init_inode_cache();
if (err < 0) {
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 698c43dd5dc8..6a12445d3858 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -10,6 +10,9 @@
#include <linux/backing-dev.h>
#include <linux/netfs.h>
+#include <linux/fs_parser.h>
+#include <net/9p/client.h>
+#include <net/9p/transport.h>
/**
* enum p9_session_flags - option flags for each 9P session
@@ -163,11 +166,13 @@ static inline struct fscache_volume *v9fs_session_cache(struct v9fs_session_info
#endif
}
+extern const struct fs_parameter_spec v9fs_param_spec[];
+extern int v9fs_parse_param(struct fs_context *fc, struct fs_parameter *param);
extern int v9fs_show_options(struct seq_file *m, struct dentry *root);
struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
- const char *dev_name, char *data);
+ struct fs_context *fc);
extern void v9fs_session_close(struct v9fs_session_info *v9ses);
extern void v9fs_session_cancel(struct v9fs_session_info *v9ses);
extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
@@ -202,7 +207,7 @@ static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode)
return inode->i_sb->s_fs_info;
}
-static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry)
+static inline struct v9fs_session_info *v9fs_dentry2v9ses(const struct dentry *dentry)
{
return dentry->d_sb->s_fs_info;
}
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 819c75233235..862164181bac 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -57,7 +57,9 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
int err, len;
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
- netfs_write_subrequest_terminated(subreq, len ?: err, false);
+ if (len > 0)
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ netfs_write_subrequest_terminated(subreq, len ?: err);
}
/**
@@ -75,15 +77,18 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
/* if we just extended the file size, any portion not in
* cache won't be on server and is zeroes */
- if (subreq->rreq->origin != NETFS_DIO_READ)
+ if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
+ subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
if (pos + total >= i_size_read(rreq->inode))
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
-
- if (!err)
+ if (!err && total) {
subreq->transferred += total;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ }
- netfs_read_subreq_terminated(subreq, err, false);
+ subreq->error = err;
+ netfs_read_subreq_terminated(subreq);
}
/**
@@ -160,4 +165,5 @@ const struct address_space_operations v9fs_addr_operations = {
.invalidate_folio = netfs_invalidate_folio,
.direct_IO = noop_direct_IO,
.writepages = netfs_writepages,
+ .migrate_folio = filemap_migrate_folio,
};
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 01338d4c2d9e..c5bf74d547e8 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -61,7 +61,7 @@ static void v9fs_dentry_release(struct dentry *dentry)
p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
}
-static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
struct p9_fid *fid;
struct inode *inode;
@@ -80,8 +80,13 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
struct v9fs_session_info *v9ses;
fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
+ if (IS_ERR(fid)) {
+ p9_debug(
+ P9_DEBUG_VFS,
+ "v9fs_fid_lookup: dentry = %pd (%p), got error %pe\n",
+ dentry, dentry, fid);
return PTR_ERR(fid);
+ }
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
@@ -90,23 +95,57 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
retval = v9fs_refresh_inode(fid, inode);
p9_fid_put(fid);
- if (retval == -ENOENT)
+ if (retval == -ENOENT) {
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) invalidated due to ENOENT\n",
+ dentry, dentry);
return 0;
- if (retval < 0)
+ }
+ if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) invalidated due to type change\n",
+ dentry, dentry);
+ return 0;
+ }
+ if (retval < 0) {
+ p9_debug(P9_DEBUG_VFS,
+ "refresh inode: dentry = %pd (%p), got error %pe\n",
+ dentry, dentry, ERR_PTR(retval));
return retval;
+ }
}
out_valid:
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) is valid\n", dentry, dentry);
return 1;
}
+static int v9fs_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
+{
+ return __v9fs_lookup_revalidate(dentry, flags);
+}
+
+static bool v9fs_dentry_unalias_trylock(const struct dentry *dentry)
+{
+ struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry);
+ return down_write_trylock(&v9ses->rename_sem);
+}
+
+static void v9fs_dentry_unalias_unlock(const struct dentry *dentry)
+{
+ struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry);
+ up_write(&v9ses->rename_sem);
+}
+
const struct dentry_operations v9fs_cached_dentry_operations = {
.d_revalidate = v9fs_lookup_revalidate,
- .d_weak_revalidate = v9fs_lookup_revalidate,
+ .d_weak_revalidate = __v9fs_lookup_revalidate,
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
+ .d_unalias_trylock = v9fs_dentry_unalias_trylock,
+ .d_unalias_unlock = v9fs_dentry_unalias_unlock,
};
const struct dentry_operations v9fs_dentry_operations = {
- .d_delete = always_delete_dentry,
.d_release = v9fs_dentry_release,
+ .d_unalias_trylock = v9fs_dentry_unalias_trylock,
+ .d_unalias_unlock = v9fs_dentry_unalias_unlock,
};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 348cc90bf9c5..6f3880208587 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -43,14 +43,18 @@ int v9fs_file_open(struct inode *inode, struct file *file)
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
int omode;
+ int o_append;
p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
v9ses = v9fs_inode2v9ses(inode);
- if (v9fs_proto_dotl(v9ses))
+ if (v9fs_proto_dotl(v9ses)) {
omode = v9fs_open_to_dotl_flags(file->f_flags);
- else
+ o_append = P9_DOTL_APPEND;
+ } else {
omode = v9fs_uflags2omode(file->f_flags,
v9fs_proto_dotu(v9ses));
+ o_append = P9_OAPPEND;
+ }
fid = file->private_data;
if (!fid) {
fid = v9fs_fid_clone(file_dentry(file));
@@ -58,9 +62,10 @@ int v9fs_file_open(struct inode *inode, struct file *file)
return PTR_ERR(fid);
if ((v9ses->cache & CACHE_WRITEBACK) && (omode & P9_OWRITE)) {
- int writeback_omode = (omode & ~P9_OWRITE) | P9_ORDWR;
+ int writeback_omode = (omode & ~(P9_OWRITE | o_append)) | P9_ORDWR;
p9_debug(P9_DEBUG_CACHE, "write-only file with writeback enabled, try opening O_RDWR\n");
+
err = p9_client_open(fid, writeback_omode);
if (err < 0) {
p9_debug(P9_DEBUG_CACHE, "could not open O_RDWR, disabling caches\n");
@@ -454,9 +459,10 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
}
static int
-v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
+v9fs_file_mmap_prepare(struct vm_area_desc *desc)
{
int retval;
+ struct file *filp = desc->file;
struct inode *inode = file_inode(filp);
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
@@ -464,12 +470,12 @@ v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
if (!(v9ses->cache & CACHE_WRITEBACK)) {
p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)");
- return generic_file_readonly_mmap(filp, vma);
+ return generic_file_readonly_mmap_prepare(desc);
}
- retval = generic_file_mmap(filp, vma);
+ retval = generic_file_mmap_prepare(desc);
if (!retval)
- vma->vm_ops = &v9fs_mmap_file_vm_ops;
+ desc->vm_ops = &v9fs_mmap_file_vm_ops;
return retval;
}
@@ -482,24 +488,15 @@ v9fs_vm_page_mkwrite(struct vm_fault *vmf)
static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
{
- struct inode *inode;
-
- struct writeback_control wbc = {
- .nr_to_write = LONG_MAX,
- .sync_mode = WB_SYNC_ALL,
- .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
- /* absolute end, byte at end included */
- .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
- (vma->vm_end - vma->vm_start - 1),
- };
-
if (!(vma->vm_flags & VM_SHARED))
return;
p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
- inode = file_inode(vma->vm_file);
- filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
+ filemap_fdatawrite_range(file_inode(vma->vm_file)->i_mapping,
+ (loff_t)vma->vm_pgoff * PAGE_SIZE,
+ (loff_t)vma->vm_pgoff * PAGE_SIZE +
+ (vma->vm_end - vma->vm_start - 1));
}
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
@@ -516,7 +513,7 @@ const struct file_operations v9fs_file_operations = {
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
- .mmap = generic_file_readonly_mmap,
+ .mmap_prepare = generic_file_readonly_mmap_prepare,
.splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync,
@@ -531,7 +528,7 @@ const struct file_operations v9fs_file_operations_dotl = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
- .mmap = v9fs_file_mmap,
+ .mmap_prepare = v9fs_file_mmap_prepare,
.splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync_dotl,
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 3e68521f4e2f..97abe65bf7c1 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -422,7 +422,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode, st);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
/*
* initialize the inode with the stat info
@@ -669,8 +669,8 @@ v9fs_vfs_create(struct mnt_idmap *idmap, struct inode *dir,
*
*/
-static int v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int err;
u32 perm;
@@ -692,8 +692,7 @@ static int v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
if (fid)
p9_fid_put(fid);
-
- return err;
+ return ERR_PTR(err);
}
/**
@@ -769,44 +768,40 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct v9fs_inode __maybe_unused *v9inode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
- struct dentry *res = NULL;
struct inode *inode;
int p9_omode;
if (d_in_lookup(dentry)) {
- res = v9fs_vfs_lookup(dir, dentry, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- if (res)
- dentry = res;
+ struct dentry *res = v9fs_vfs_lookup(dir, dentry, 0);
+ if (res || d_really_is_positive(dentry))
+ return finish_no_open(file, res);
}
/* Only creates */
- if (!(flags & O_CREAT) || d_really_is_positive(dentry))
- return finish_no_open(file, res);
+ if (!(flags & O_CREAT))
+ return finish_no_open(file, NULL);
v9ses = v9fs_inode2v9ses(dir);
perm = unixmode2p9mode(v9ses, mode);
p9_omode = v9fs_uflags2omode(flags, v9fs_proto_dotu(v9ses));
if ((v9ses->cache & CACHE_WRITEBACK) && (p9_omode & P9_OWRITE)) {
- p9_omode = (p9_omode & ~P9_OWRITE) | P9_ORDWR;
+ p9_omode = (p9_omode & ~(P9_OWRITE | P9_OAPPEND)) | P9_ORDWR;
p9_debug(P9_DEBUG_CACHE,
"write-only file with writeback enabled, creating w/ O_RDWR\n");
}
fid = v9fs_create(v9ses, dir, dentry, NULL, perm, p9_omode);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- goto error;
- }
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
v9fs_invalidate_inode_attr(dir);
inode = d_inode(dentry);
v9inode = V9FS_I(inode);
err = finish_open(file, dentry, generic_file_open);
- if (err)
- goto error;
+ if (unlikely(err)) {
+ p9_fid_put(fid);
+ return err;
+ }
file->private_data = fid;
#ifdef CONFIG_9P_FSCACHE
@@ -819,13 +814,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
v9fs_open_fid_add(inode, &fid);
file->f_mode |= FMODE_CREATED;
-out:
- dput(res);
- return err;
-
-error:
- p9_fid_put(fid);
- goto out;
+ return 0;
}
/**
@@ -1404,4 +1393,3 @@ static const struct inode_operations v9fs_symlink_inode_operations = {
.getattr = v9fs_vfs_getattr,
.setattr = v9fs_vfs_setattr,
};
-
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 143ac03b7425..643e759eacb2 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -112,7 +112,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode_dotl, st);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
/*
* initialize the inode with the stat info
@@ -238,20 +238,16 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
struct p9_fid *dfid = NULL, *ofid = NULL;
struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
- struct dentry *res = NULL;
if (d_in_lookup(dentry)) {
- res = v9fs_vfs_lookup(dir, dentry, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- if (res)
- dentry = res;
+ struct dentry *res = v9fs_vfs_lookup(dir, dentry, 0);
+ if (res || d_really_is_positive(dentry))
+ return finish_no_open(file, res);
}
/* Only creates */
- if (!(flags & O_CREAT) || d_really_is_positive(dentry))
- return finish_no_open(file, res);
+ if (!(flags & O_CREAT))
+ return finish_no_open(file, NULL);
v9ses = v9fs_inode2v9ses(dir);
@@ -286,7 +282,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
}
if ((v9ses->cache & CACHE_WRITEBACK) && (p9_omode & P9_OWRITE)) {
- p9_omode = (p9_omode & ~P9_OWRITE) | P9_ORDWR;
+ p9_omode = (p9_omode & ~(P9_OWRITE | P9_DOTL_APPEND)) | P9_ORDWR;
p9_debug(P9_DEBUG_CACHE,
"write-only file with writeback enabled, creating w/ O_RDWR\n");
}
@@ -337,7 +333,6 @@ out:
p9_fid_put(ofid);
p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
- dput(res);
return err;
}
@@ -350,9 +345,9 @@ out:
*
*/
-static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
- umode_t omode)
+static struct dentry *v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t omode)
{
int err;
struct v9fs_session_info *v9ses;
@@ -407,8 +402,8 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
err);
goto error;
}
- v9fs_fid_add(dentry, &fid);
v9fs_set_create_acl(inode, fid, dacl, pacl);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
err = 0;
inc_nlink(dir);
@@ -417,7 +412,7 @@ error:
p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
p9_fid_put(dfid);
- return err;
+ return ERR_PTR(err);
}
static int
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 489db161abc9..315336de6f02 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -19,6 +19,7 @@
#include <linux/statfs.h>
#include <linux/magic.h>
#include <linux/fscache.h>
+#include <linux/fs_context.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -30,32 +31,10 @@
static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl;
-/**
- * v9fs_set_super - set the superblock
- * @s: super block
- * @data: file system specific data
- *
- */
-
-static int v9fs_set_super(struct super_block *s, void *data)
-{
- s->s_fs_info = data;
- return set_anon_super(s, data);
-}
-
-/**
- * v9fs_fill_super - populate superblock with info
- * @sb: superblock
- * @v9ses: session information
- * @flags: flags propagated from v9fs_mount()
- *
- */
-
-static int
-v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
- int flags)
+static int v9fs_fill_super(struct super_block *sb)
{
int ret;
+ struct v9fs_session_info *v9ses = v9ses = sb->s_fs_info;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize_bits = fls(v9ses->maxdata - 1);
@@ -95,16 +74,12 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
}
/**
- * v9fs_mount - mount a superblock
- * @fs_type: file system type
- * @flags: mount flags
- * @dev_name: device name that was mounted
- * @data: mount options
+ * v9fs_get_tree - create the mountable root and superblock
+ * @fc: the filesystem context
*
*/
-static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int v9fs_get_tree(struct fs_context *fc)
{
struct super_block *sb = NULL;
struct inode *inode = NULL;
@@ -117,27 +92,30 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
if (!v9ses)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- fid = v9fs_session_init(v9ses, dev_name, data);
+ fid = v9fs_session_init(v9ses, fc);
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
goto free_session;
}
- sb = sget(fs_type, NULL, v9fs_set_super, flags, v9ses);
+ fc->s_fs_info = v9ses;
+ sb = sget_fc(fc, NULL, set_anon_super_fc);
if (IS_ERR(sb)) {
retval = PTR_ERR(sb);
goto clunk_fid;
}
- retval = v9fs_fill_super(sb, v9ses, flags);
+ retval = v9fs_fill_super(sb);
if (retval)
goto release_sb;
- if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
- sb->s_d_op = &v9fs_cached_dentry_operations;
- else
- sb->s_d_op = &v9fs_dentry_operations;
+ if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
+ set_default_d_op(sb, &v9fs_cached_dentry_operations);
+ } else {
+ set_default_d_op(sb, &v9fs_dentry_operations);
+ sb->s_d_flags |= DCACHE_DONTCACHE;
+ }
inode = v9fs_get_new_inode_from_fid(v9ses, fid, sb);
if (IS_ERR(inode)) {
@@ -157,14 +135,15 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
v9fs_fid_add(root, &fid);
p9_debug(P9_DEBUG_VFS, " simple set mount, return 0\n");
- return dget(sb->s_root);
+ fc->root = dget(sb->s_root);
+ return 0;
clunk_fid:
p9_fid_put(fid);
v9fs_session_close(v9ses);
free_session:
kfree(v9ses);
- return ERR_PTR(retval);
+ return retval;
release_sb:
/*
@@ -175,7 +154,7 @@ release_sb:
*/
p9_fid_put(fid);
deactivate_locked_super(sb);
- return ERR_PTR(retval);
+ return retval;
}
/**
@@ -250,7 +229,7 @@ static int v9fs_drop_inode(struct inode *inode)
v9ses = v9fs_inode2v9ses(inode);
if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
- return generic_drop_inode(inode);
+ return inode_generic_drop(inode);
/*
* in case of non cached mode always drop the
* inode because we want the inode attribute
@@ -301,11 +280,86 @@ static const struct super_operations v9fs_super_ops_dotl = {
.write_inode = v9fs_write_inode_dotl,
};
+static void v9fs_free_fc(struct fs_context *fc)
+{
+ struct v9fs_context *ctx = fc->fs_private;
+
+ if (!ctx)
+ return;
+
+ /* These should be NULL by now but guard against leaks */
+ kfree(ctx->session_opts.uname);
+ kfree(ctx->session_opts.aname);
+#ifdef CONFIG_9P_FSCACHE
+ kfree(ctx->session_opts.cachetag);
+#endif
+ if (ctx->client_opts.trans_mod)
+ v9fs_put_trans(ctx->client_opts.trans_mod);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations v9fs_context_ops = {
+ .parse_param = v9fs_parse_param,
+ .get_tree = v9fs_get_tree,
+ .free = v9fs_free_fc,
+};
+
+static int v9fs_init_fs_context(struct fs_context *fc)
+{
+ struct v9fs_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* initialize core options */
+ ctx->session_opts.afid = ~0;
+ ctx->session_opts.cache = CACHE_NONE;
+ ctx->session_opts.session_lock_timeout = P9_LOCK_TIMEOUT;
+ ctx->session_opts.uname = kstrdup(V9FS_DEFUSER, GFP_KERNEL);
+ if (!ctx->session_opts.uname)
+ goto error;
+
+ ctx->session_opts.aname = kstrdup(V9FS_DEFANAME, GFP_KERNEL);
+ if (!ctx->session_opts.aname)
+ goto error;
+
+ ctx->session_opts.uid = INVALID_UID;
+ ctx->session_opts.dfltuid = V9FS_DEFUID;
+ ctx->session_opts.dfltgid = V9FS_DEFGID;
+
+ /* initialize client options */
+ ctx->client_opts.proto_version = p9_proto_2000L;
+ ctx->client_opts.msize = DEFAULT_MSIZE;
+
+ /* initialize fd transport options */
+ ctx->fd_opts.port = P9_FD_PORT;
+ ctx->fd_opts.rfd = ~0;
+ ctx->fd_opts.wfd = ~0;
+ ctx->fd_opts.privport = false;
+
+ /* initialize rdma transport options */
+ ctx->rdma_opts.port = P9_RDMA_PORT;
+ ctx->rdma_opts.sq_depth = P9_RDMA_SQ_DEPTH;
+ ctx->rdma_opts.rq_depth = P9_RDMA_RQ_DEPTH;
+ ctx->rdma_opts.timeout = P9_RDMA_TIMEOUT;
+ ctx->rdma_opts.privport = false;
+
+ fc->ops = &v9fs_context_ops;
+ fc->fs_private = ctx;
+
+ return 0;
+error:
+ fc->need_free = 1;
+ return -ENOMEM;
+}
+
struct file_system_type v9fs_fs_type = {
.name = "9p",
- .mount = v9fs_mount,
.kill_sb = v9fs_kill_super,
.owner = THIS_MODULE,
.fs_flags = FS_RENAME_DOES_D_MOVE,
+ .init_fs_context = v9fs_init_fs_context,
+ .parameters = v9fs_param_spec,
};
MODULE_ALIAS_FS("9p");
diff --git a/fs/Kconfig b/fs/Kconfig
index 64d420e3c475..0bfdaecaa877 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -51,7 +51,6 @@ source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
source "fs/f2fs/Kconfig"
-source "fs/bcachefs/Kconfig"
source "fs/zonefs/Kconfig"
endif # BLOCK
@@ -59,7 +58,7 @@ endif # BLOCK
config FS_DAX
bool "File system based Direct Access (DAX) support"
depends on MMU
- depends on ZONE_DEVICE || FS_DAX_LIMITED
+ depends on ZONE_DEVICE
select FS_IOMAP
select DAX
help
@@ -95,13 +94,6 @@ config FS_DAX_PMD
depends on ZONE_DEVICE
depends on TRANSPARENT_HUGEPAGE
-# Selected by DAX drivers that do not expect filesystem DAX to support
-# get_user_pages() of DAX mappings. I.e. "limited" indicates no support
-# for fork() of processes with MAP_SHARED mappings or support for
-# direct-I/O to a DAX mapping.
-config FS_DAX_LIMITED
- bool
-
# Posix ACL utility routines
#
# Note: Posix ACLs can be implemented without these helpers. Never use
@@ -256,8 +248,7 @@ config ARCH_SUPPORTS_HUGETLBFS
menuconfig HUGETLBFS
bool "HugeTLB file system support"
- depends on X86 || SPARC64 || ARCH_SUPPORTS_HUGETLBFS || BROKEN
- depends on (SYSFS || SYSCTL)
+ depends on ARCH_SUPPORTS_HUGETLBFS
select MEMFD_CREATE
select PADATA if SMP
help
@@ -286,6 +277,7 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
def_bool HUGETLB_PAGE
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
+ select SPARSEMEM_VMEMMAP_PREINIT if ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
config HUGETLB_PMD_PAGE_TABLE_SHARING
def_bool HUGETLB_PAGE
@@ -334,9 +326,9 @@ source "fs/omfs/Kconfig"
source "fs/hpfs/Kconfig"
source "fs/qnx4/Kconfig"
source "fs/qnx6/Kconfig"
+source "fs/resctrl/Kconfig"
source "fs/romfs/Kconfig"
source "fs/pstore/Kconfig"
-source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/erofs/Kconfig"
source "fs/vboxsf/Kconfig"
@@ -368,6 +360,7 @@ config GRACE_PERIOD
config LOCKD
tristate
depends on FILE_LOCKING
+ select CRC32
select GRACE_PERIOD
config LOCKD_V4
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index bd2f530e5740..1949e25c7741 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -184,4 +184,13 @@ config EXEC_KUNIT_TEST
This builds the exec KUnit tests, which tests boundary conditions
of various aspects of the exec internals.
+config ARCH_HAS_ELF_CORE_EFLAGS
+ bool
+ depends on BINFMT_ELF && ELF_CORE
+ default n
+ help
+ Select this option if the architecture makes use of the e_flags
+ field in the ELF header to store ABI or other architecture-specific
+ information that should be preserved in core dumps.
+
endmenu
diff --git a/fs/Makefile b/fs/Makefile
index 15df0a923d3a..a04274a3c854 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -14,8 +14,9 @@ obj-y := open.o read_write.o file_table.o super.o \
seq_file.o xattr.o libfs.o fs-writeback.o \
pnode.o splice.o sync.o utimes.o d_path.o \
stack.o fs_struct.o statfs.o fs_pin.o nsfs.o \
- fs_types.o fs_context.o fs_parser.o fsopen.o init.o \
- kernel_read_file.o mnt_idmapping.o remap_range.o pidfs.o
+ fs_dirent.o fs_context.o fs_parser.o fsopen.o init.o \
+ kernel_read_file.o mnt_idmapping.o remap_range.o pidfs.o \
+ file_attr.o
obj-$(CONFIG_BUFFER_HEAD) += buffer.o mpage.o
obj-$(CONFIG_PROC_FS) += proc_namespace.o
@@ -87,7 +88,6 @@ obj-$(CONFIG_NFSD) += nfsd/
obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
obj-y += unicode/
-obj-$(CONFIG_SYSV_FS) += sysv/
obj-$(CONFIG_SMBFS) += smb/
obj-$(CONFIG_HPFS_FS) += hpfs/
obj-$(CONFIG_NTFS3_FS) += ntfs3/
@@ -121,7 +121,6 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_BTRFS_FS) += btrfs/
obj-$(CONFIG_GFS2_FS) += gfs2/
obj-$(CONFIG_F2FS_FS) += f2fs/
-obj-$(CONFIG_BCACHEFS_FS) += bcachefs/
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
@@ -129,3 +128,4 @@ obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_VBOXSF_FS) += vboxsf/
obj-$(CONFIG_ZONEFS_FS) += zonefs/
obj-$(CONFIG_BPF_LSM) += bpf_fs_kfuncs.o
+obj-$(CONFIG_RESCTRL_FS) += resctrl/
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
index ee80718aaeec..cd13165fd904 100644
--- a/fs/adfs/file.c
+++ b/fs/adfs/file.c
@@ -25,7 +25,7 @@
const struct file_operations adfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.fsync = generic_file_fsync,
.write_iter = generic_file_write_iter,
.splice_read = filemap_splice_read,
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 21527189e430..6830f8bc8d4e 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -53,13 +53,14 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to)
truncate_pagecache(inode, inode->i_size);
}
-static int adfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
+static int adfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
adfs_get_block,
&ADFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 017c48a80203..fdccdbbfc213 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -397,7 +397,7 @@ static int adfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (asb->s_ftsuffix)
asb->s_namelen += 4;
- sb->s_d_op = &adfs_dentry_operations;
+ set_default_d_op(sb, &adfs_dentry_operations);
root = adfs_iget(sb, &root_obj);
sb->s_root = d_make_root(root);
if (!sb->s_root) {
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e8c2c4535cb3..ac4e9a02910b 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -168,7 +168,7 @@ extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, unsi
extern int affs_unlink(struct inode *dir, struct dentry *dentry);
extern int affs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool);
-extern int affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+extern struct dentry *affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode);
extern int affs_rmdir(struct inode *dir, struct dentry *dentry);
extern int affs_link(struct dentry *olddentry, struct inode *dir,
diff --git a/fs/affs/file.c b/fs/affs/file.c
index a5a861dd5223..765c3443663e 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -415,13 +415,14 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
return ret;
}
-static int affs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
+static int affs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
affs_get_block,
&AFFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -430,14 +431,15 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static int affs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
+static int affs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping, loff_t pos,
+ unsigned int len, unsigned int copied,
struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
+ ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
/* Clear Archived bit on file writes, as AmigaOS would do */
if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
@@ -596,7 +598,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
BUG_ON(tmp > bsize);
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
affs_fix_checksum(sb, bh);
bh->b_state &= ~(1UL << BH_New);
@@ -645,7 +647,8 @@ static int affs_read_folio_ofs(struct file *file, struct folio *folio)
return err;
}
-static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
+static int affs_write_begin_ofs(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
{
@@ -684,9 +687,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
return err;
}
-static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+static int affs_write_end_ofs(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
struct super_block *sb = inode->i_sb;
@@ -724,7 +728,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
tmp = min(bsize - boff, to - from);
BUG_ON(boff + tmp > bsize || tmp > bsize);
memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
- be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
+ AFFS_DATA_HEAD(bh)->size = cpu_to_be32(
+ max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size)));
affs_fix_checksum(sb, bh);
mark_buffer_dirty_inode(bh, inode);
written += tmp;
@@ -746,7 +751,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
AFFS_DATA_HEAD(bh)->next = 0;
bh->b_state &= ~(1UL << BH_New);
@@ -780,7 +785,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
AFFS_DATA_HEAD(bh)->next = 0;
bh->b_state &= ~(1UL << BH_New);
@@ -998,7 +1003,7 @@ const struct file_operations affs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.open = affs_file_open,
.release = affs_file_release,
.fsync = affs_file_fsync,
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 0210df8d3500..0bfc7d151dcd 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -29,7 +29,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
pr_debug("affs_iget(%lu)\n", inode->i_ino);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 8c154490a2d6..f883be50db12 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -273,7 +273,7 @@ affs_create(struct mnt_idmap *idmap, struct inode *dir,
return 0;
}
-int
+struct dentry *
affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
@@ -285,7 +285,7 @@ affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
inode = affs_new_inode(dir);
if (!inode)
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
inode->i_mode = S_IFDIR | mode;
affs_mode_to_prot(inode);
@@ -298,9 +298,9 @@ affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
clear_nlink(inode);
mark_inode_dirty(inode);
iput(inode);
- return error;
+ return ERR_PTR(error);
}
- return 0;
+ return NULL;
}
int
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 2fa40337776d..44f8aa883100 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -500,9 +500,9 @@ got_root:
return PTR_ERR(root_inode);
if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL))
- sb->s_d_op = &affs_intl_dentry_operations;
+ set_default_d_op(sb, &affs_intl_dentry_operations);
else
- sb->s_d_op = &affs_dentry_operations;
+ set_default_d_op(sb, &affs_dentry_operations);
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig
index fc8ba9142f2f..682bd8ec2c10 100644
--- a/fs/afs/Kconfig
+++ b/fs/afs/Kconfig
@@ -5,6 +5,7 @@ config AFS_FS
select AF_RXRPC
select DNS_RESOLVER
select NETFS_SUPPORT
+ select CRYPTO_KRB5
help
If you say Y here, you will get an experimental Andrew File System
driver. It currently only supports unsecured read-only AFS access.
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index dcdc0f1bb76f..b49b8fe682f3 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -8,9 +8,11 @@ kafs-y := \
addr_prefs.o \
callback.o \
cell.o \
+ cm_security.o \
cmservice.o \
dir.o \
dir_edit.o \
+ dir_search.o \
dir_silly.o \
dynroot.o \
file.o \
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index 6d42f85c6be5..e941da5b6dd9 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -362,3 +362,53 @@ int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *alist,
alist->nr_addrs++;
return 0;
}
+
+/*
+ * Set the app data on the rxrpc peers an address list points to
+ */
+void afs_set_peer_appdata(struct afs_server *server,
+ struct afs_addr_list *old_alist,
+ struct afs_addr_list *new_alist)
+{
+ unsigned long data = (unsigned long)server;
+ int n = 0, o = 0;
+
+ if (!old_alist) {
+ /* New server. Just set all. */
+ for (; n < new_alist->nr_addrs; n++)
+ rxrpc_kernel_set_peer_data(new_alist->addrs[n].peer, data);
+ return;
+ }
+ if (!new_alist) {
+ /* Dead server. Just remove all. */
+ for (; o < old_alist->nr_addrs; o++)
+ rxrpc_kernel_set_peer_data(old_alist->addrs[o].peer, 0);
+ return;
+ }
+
+ /* Walk through the two lists simultaneously, setting new peers and
+ * clearing old ones. The two lists are ordered by pointer to peer
+ * record.
+ */
+ while (n < new_alist->nr_addrs && o < old_alist->nr_addrs) {
+ struct rxrpc_peer *pn = new_alist->addrs[n].peer;
+ struct rxrpc_peer *po = old_alist->addrs[o].peer;
+
+ if (pn == po)
+ continue;
+ if (pn < po) {
+ rxrpc_kernel_set_peer_data(pn, data);
+ n++;
+ } else {
+ rxrpc_kernel_set_peer_data(po, 0);
+ o++;
+ }
+ }
+
+ if (n < new_alist->nr_addrs)
+ for (; n < new_alist->nr_addrs; n++)
+ rxrpc_kernel_set_peer_data(new_alist->addrs[n].peer, data);
+ if (o < old_alist->nr_addrs)
+ for (; o < old_alist->nr_addrs; o++)
+ rxrpc_kernel_set_peer_data(old_alist->addrs[o].peer, 0);
+}
diff --git a/fs/afs/addr_prefs.c b/fs/afs/addr_prefs.c
index a189ff8a5034..133736412c3d 100644
--- a/fs/afs/addr_prefs.c
+++ b/fs/afs/addr_prefs.c
@@ -48,7 +48,7 @@ static int afs_split_string(char **pbuf, char *strv[], unsigned int maxstrv)
strv[count++] = p;
/* Skip over word */
- while (!isspace(*p))
+ while (!isspace(*p) && *p)
p++;
if (!*p)
break;
@@ -413,8 +413,10 @@ int afs_proc_addr_prefs_write(struct file *file, char *buf, size_t size)
do {
argc = afs_split_string(&buf, argv, ARRAY_SIZE(argv));
- if (argc < 0)
- return argc;
+ if (argc < 0) {
+ ret = argc;
+ goto done;
+ }
if (argc < 2)
goto inval;
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index b488072aee87..ec3db00bd081 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -10,7 +10,7 @@
#include <linux/in.h>
-#define AFS_MAXCELLNAME 256 /* Maximum length of a cell name */
+#define AFS_MAXCELLNAME 253 /* Maximum length of a cell name (DNS limited) */
#define AFS_MAXVOLNAME 64 /* Maximum length of a volume name */
#define AFS_MAXNSERVERS 8 /* Maximum servers in a basic volume record */
#define AFS_NMAXNSERVERS 13 /* Maximum servers in a N/U-class volume record */
diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
index a06296c8827d..b835e25a2c02 100644
--- a/fs/afs/afs_vl.h
+++ b/fs/afs/afs_vl.h
@@ -13,6 +13,7 @@
#define AFS_VL_PORT 7003 /* volume location service port */
#define VL_SERVICE 52 /* RxRPC service ID for the Volume Location service */
#define YFS_VL_SERVICE 2503 /* Service ID for AuriStor upgraded VL service */
+#define YFS_VL_MAXCELLNAME 256 /* Maximum length of a cell name in YFS protocol */
enum AFSVL_Operations {
VLGETENTRYBYID = 503, /* AFS Get VLDB entry by ID */
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 99b2c8172021..894d2bad6b6c 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -41,8 +41,8 @@ static void afs_volume_init_callback(struct afs_volume *volume)
list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) {
if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) {
- atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
- queue_work(system_unbound_wq, &vnode->cb_work);
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb);
+ queue_work(system_dfl_wq, &vnode->cb_work);
}
}
@@ -79,7 +79,7 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
_enter("");
clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- if (atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE) {
+ if (afs_clear_cb_promise(vnode, afs_cb_promise_clear_cb_break)) {
vnode->cb_break++;
vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break);
afs_clear_permits(vnode);
@@ -90,7 +90,7 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
if (reason != afs_cb_break_for_deleted &&
vnode->status.type == AFS_FTYPE_FILE &&
atomic_read(&vnode->cb_nr_mmap))
- queue_work(system_unbound_wq, &vnode->cb_work);
+ queue_work(system_dfl_wq, &vnode->cb_work);
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
} else {
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index caa09875f520..71c10a05cebe 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -20,8 +20,9 @@ static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
static atomic_t cell_debug_id;
-static void afs_queue_cell_manager(struct afs_net *);
-static void afs_manage_cell_work(struct work_struct *);
+static void afs_cell_timer(struct timer_list *timer);
+static void afs_destroy_cell_work(struct work_struct *work);
+static void afs_manage_cell_work(struct work_struct *work);
static void afs_dec_cells_outstanding(struct afs_net *net)
{
@@ -29,19 +30,11 @@ static void afs_dec_cells_outstanding(struct afs_net *net)
wake_up_var(&net->cells_outstanding);
}
-/*
- * Set the cell timer to fire after a given delay, assuming it's not already
- * set for an earlier time.
- */
-static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
+static void afs_set_cell_state(struct afs_cell *cell, enum afs_cell_state state)
{
- if (net->live) {
- atomic_inc(&net->cells_outstanding);
- if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
- afs_dec_cells_outstanding(net);
- } else {
- afs_queue_cell_manager(net);
- }
+ smp_store_release(&cell->state, state); /* Commit cell changes before state */
+ smp_wmb(); /* Set cell state before task state */
+ wake_up_var(&cell->state);
}
/*
@@ -64,7 +57,8 @@ static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
return ERR_PTR(-ENAMETOOLONG);
if (!name) {
- cell = net->ws_cell;
+ cell = rcu_dereference_protected(net->ws_cell,
+ lockdep_is_held(&net->cells_lock));
if (!cell)
return ERR_PTR(-EDESTADDRREQ);
goto found;
@@ -115,7 +109,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
const char *name, unsigned int namelen,
const char *addresses)
{
- struct afs_vlserver_list *vllist;
+ struct afs_vlserver_list *vllist = NULL;
struct afs_cell *cell;
int i, ret;
@@ -146,27 +140,37 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
return ERR_PTR(-ENOMEM);
}
- cell->name = kmalloc(namelen + 1, GFP_KERNEL);
+ /* Allocate the cell name and the key name in one go. */
+ cell->name = kmalloc(1 + namelen + 1 +
+ 4 + namelen + 1, GFP_KERNEL);
if (!cell->name) {
kfree(cell);
return ERR_PTR(-ENOMEM);
}
- cell->net = net;
+ cell->name[0] = '.';
+ cell->name++;
cell->name_len = namelen;
for (i = 0; i < namelen; i++)
cell->name[i] = tolower(name[i]);
- cell->name[i] = 0;
+ cell->name[i++] = 0;
+ cell->key_desc = cell->name + i;
+ memcpy(cell->key_desc, "afs@", 4);
+ memcpy(cell->key_desc + 4, cell->name, cell->name_len + 1);
+
+ cell->net = net;
refcount_set(&cell->ref, 1);
atomic_set(&cell->active, 0);
+ INIT_WORK(&cell->destroyer, afs_destroy_cell_work);
INIT_WORK(&cell->manager, afs_manage_cell_work);
+ timer_setup(&cell->management_timer, afs_cell_timer, 0);
init_rwsem(&cell->vs_lock);
cell->volumes = RB_ROOT;
INIT_HLIST_HEAD(&cell->proc_volumes);
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
- seqlock_init(&cell->fs_lock);
+ init_rwsem(&cell->fs_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
@@ -179,6 +183,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
VL_SERVICE, AFS_VL_PORT);
if (IS_ERR(vllist)) {
ret = PTR_ERR(vllist);
+ vllist = NULL;
goto parse_failed;
}
@@ -201,7 +206,13 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
cell->dns_status = vllist->status;
smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
atomic_inc(&net->cells_outstanding);
+ ret = idr_alloc_cyclic(&net->cells_dyn_ino, cell,
+ 2, INT_MAX / 2, GFP_KERNEL);
+ if (ret < 0)
+ goto error;
+ cell->dynroot_ino = ret;
cell->debug_id = atomic_inc_return(&cell_debug_id);
+
trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
_leave(" = %p", cell);
@@ -211,7 +222,8 @@ parse_failed:
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
- kfree(cell->name);
+ afs_put_vlserverlist(cell->net, vllist);
+ kfree(cell->name - 1);
kfree(cell);
_leave(" = %d", ret);
return ERR_PTR(ret);
@@ -223,7 +235,8 @@ error:
* @name: The name of the cell.
* @namesz: The strlen of the cell name.
* @vllist: A colon/comma separated list of numeric IP addresses or NULL.
- * @excl: T if an error should be given if the cell name already exists.
+ * @reason: The reason we're doing the lookup
+ * @trace: The reason to be logged if the lookup is successful.
*
* Look up a cell record by name and query the DNS for VL server addresses if
* needed. Note that that actual DNS query is punted off to the manager thread
@@ -232,19 +245,27 @@ error:
*/
struct afs_cell *afs_lookup_cell(struct afs_net *net,
const char *name, unsigned int namesz,
- const char *vllist, bool excl)
+ const char *vllist,
+ enum afs_lookup_cell_for reason,
+ enum afs_cell_trace trace)
{
struct afs_cell *cell, *candidate, *cursor;
struct rb_node *parent, **pp;
enum afs_cell_state state;
int ret, n;
- _enter("%s,%s", name, vllist);
+ _enter("%s,%s,%u", name, vllist, reason);
- if (!excl) {
- cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup);
- if (!IS_ERR(cell))
+ if (reason != AFS_LOOKUP_CELL_PRELOAD) {
+ cell = afs_find_cell(net, name, namesz, trace);
+ if (!IS_ERR(cell)) {
+ if (reason == AFS_LOOKUP_CELL_DYNROOT)
+ goto no_wait;
+ if (cell->state == AFS_CELL_SETTING_UP ||
+ cell->state == AFS_CELL_UNLOOKED)
+ goto lookup_cell;
goto wait_for_cell;
+ }
}
/* Assume we're probably going to create a cell and preallocate and
@@ -285,29 +306,74 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
cell = candidate;
candidate = NULL;
- atomic_set(&cell->active, 2);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert);
+ afs_use_cell(cell, trace);
rb_link_node_rcu(&cell->net_node, parent, pp);
rb_insert_color(&cell->net_node, &net->cells);
up_write(&net->cells_lock);
- afs_queue_cell(cell, afs_cell_trace_get_queue_new);
+lookup_cell:
+ if (reason != AFS_LOOKUP_CELL_PRELOAD &&
+ reason != AFS_LOOKUP_CELL_ROOTCELL) {
+ set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
+ afs_queue_cell(cell, afs_cell_trace_queue_new);
+ }
wait_for_cell:
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active),
- afs_cell_trace_wait);
- _debug("wait_for_cell");
- wait_var_event(&cell->state,
- ({
- state = smp_load_acquire(&cell->state); /* vs error */
- state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
- }));
+ state = smp_load_acquire(&cell->state); /* vs error */
+ switch (state) {
+ case AFS_CELL_ACTIVE:
+ case AFS_CELL_DEAD:
+ break;
+ case AFS_CELL_UNLOOKED:
+ default:
+ if (reason == AFS_LOOKUP_CELL_PRELOAD ||
+ reason == AFS_LOOKUP_CELL_ROOTCELL)
+ break;
+ _debug("wait_for_cell");
+ afs_see_cell(cell, afs_cell_trace_wait);
+ wait_var_event(&cell->state,
+ ({
+ state = smp_load_acquire(&cell->state); /* vs error */
+ state == AFS_CELL_ACTIVE || state == AFS_CELL_DEAD;
+ }));
+ _debug("waited_for_cell %d %d", cell->state, cell->error);
+ }
+no_wait:
/* Check the state obtained from the wait check. */
- if (state == AFS_CELL_REMOVED) {
+ state = smp_load_acquire(&cell->state); /* vs error */
+ if (state == AFS_CELL_DEAD) {
ret = cell->error;
goto error;
}
+ if (state == AFS_CELL_ACTIVE) {
+ switch (cell->dns_status) {
+ case DNS_LOOKUP_NOT_DONE:
+ if (cell->dns_source == DNS_RECORD_FROM_CONFIG) {
+ ret = 0;
+ break;
+ }
+ fallthrough;
+ default:
+ ret = -EIO;
+ goto error;
+ case DNS_LOOKUP_GOOD:
+ case DNS_LOOKUP_GOOD_WITH_BAD:
+ ret = 0;
+ break;
+ case DNS_LOOKUP_GOT_NOT_FOUND:
+ ret = -ENOENT;
+ goto error;
+ case DNS_LOOKUP_BAD:
+ ret = -EREMOTEIO;
+ goto error;
+ case DNS_LOOKUP_GOT_LOCAL_FAILURE:
+ case DNS_LOOKUP_GOT_TEMP_FAILURE:
+ case DNS_LOOKUP_GOT_NS_FAILURE:
+ ret = -EDESTADDRREQ;
+ goto error;
+ }
+ }
_leave(" = %p [cell]", cell);
return cell;
@@ -315,10 +381,10 @@ wait_for_cell:
cell_already_exists:
_debug("cell exists");
cell = cursor;
- if (excl) {
+ if (reason == AFS_LOOKUP_CELL_PRELOAD) {
ret = -EEXIST;
} else {
- afs_use_cell(cursor, afs_cell_trace_use_lookup);
+ afs_use_cell(cursor, trace);
ret = 0;
}
up_write(&net->cells_lock);
@@ -328,7 +394,7 @@ cell_already_exists:
goto wait_for_cell;
goto error_noput;
error:
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_error);
error_noput:
_leave(" = %d [error]", ret);
return ERR_PTR(ret);
@@ -365,8 +431,18 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
len = cp - rootcell;
}
- /* allocate a cell record for the root cell */
- new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
+ if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.')
+ return -EINVAL;
+ if (memchr(rootcell, '/', len))
+ return -EINVAL;
+ cp = strstr(rootcell, "..");
+ if (cp && cp < rootcell + len)
+ return -EINVAL;
+
+ /* allocate a cell record for the root/workstation cell */
+ new_root = afs_lookup_cell(net, rootcell, len, vllist,
+ AFS_LOOKUP_CELL_ROOTCELL,
+ afs_cell_trace_use_lookup_ws);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
return PTR_ERR(new_root);
@@ -377,12 +453,11 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
/* install the new cell */
down_write(&net->cells_lock);
- afs_see_cell(new_root, afs_cell_trace_see_ws);
- old_root = net->ws_cell;
- net->ws_cell = new_root;
+ old_root = rcu_replace_pointer(net->ws_cell, new_root,
+ lockdep_is_held(&net->cells_lock));
up_write(&net->cells_lock);
- afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws);
+ afs_unuse_cell(old_root, afs_cell_trace_unuse_ws);
_leave(" = 0");
return 0;
}
@@ -500,39 +575,24 @@ static void afs_cell_destroy(struct rcu_head *rcu)
trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
- afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
+ afs_unuse_cell(cell->alias_of, afs_cell_trace_unuse_alias);
key_put(cell->anonymous_key);
- kfree(cell->name);
+ idr_remove(&net->cells_dyn_ino, cell->dynroot_ino);
+ kfree(cell->name - 1);
kfree(cell);
afs_dec_cells_outstanding(net);
_leave(" [destroyed]");
}
-/*
- * Queue the cell manager.
- */
-static void afs_queue_cell_manager(struct afs_net *net)
-{
- int outstanding = atomic_inc_return(&net->cells_outstanding);
-
- _enter("%d", outstanding);
-
- if (!queue_work(afs_wq, &net->cells_manager))
- afs_dec_cells_outstanding(net);
-}
-
-/*
- * Cell management timer. We have an increment on cells_outstanding that we
- * need to pass along to the work item.
- */
-void afs_cells_timer(struct timer_list *timer)
+static void afs_destroy_cell_work(struct work_struct *work)
{
- struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
+ struct afs_cell *cell = container_of(work, struct afs_cell, destroyer);
- _enter("");
- if (!queue_work(afs_wq, &net->cells_manager))
- afs_dec_cells_outstanding(net);
+ afs_see_cell(cell, afs_cell_trace_destroy);
+ timer_delete_sync(&cell->management_timer);
+ cancel_work_sync(&cell->manager);
+ call_rcu(&cell->rcu, afs_cell_destroy);
}
/*
@@ -564,7 +624,7 @@ void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
if (zero) {
a = atomic_read(&cell->active);
WARN(a != 0, "Cell active count %u > 0\n", a);
- call_rcu(&cell->rcu, afs_cell_destroy);
+ WARN_ON(!queue_work(afs_wq, &cell->destroyer));
}
}
}
@@ -576,10 +636,9 @@ struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
int r, a;
- r = refcount_read(&cell->ref);
- WARN_ON(r == 0);
+ __refcount_inc(&cell->ref, &r);
a = atomic_inc_return(&cell->active);
- trace_afs_cell(cell->debug_id, r, a, reason);
+ trace_afs_cell(cell->debug_id, r + 1, a, reason);
return cell;
}
@@ -587,10 +646,11 @@ struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
* Record a cell becoming less active. When the active counter reaches 1, it
* is scheduled for destruction, but may get reactivated.
*/
-void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason)
+void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
unsigned int debug_id;
time64_t now, expire_delay;
+ bool zero;
int r, a;
if (!cell)
@@ -605,13 +665,15 @@ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_tr
expire_delay = afs_cell_gc_delay;
debug_id = cell->debug_id;
- r = refcount_read(&cell->ref);
a = atomic_dec_return(&cell->active);
- trace_afs_cell(debug_id, r, a, reason);
- WARN_ON(a == 0);
- if (a == 1)
+ if (!a)
/* 'cell' may now be garbage collected. */
- afs_set_cell_timer(net, expire_delay);
+ afs_set_cell_timer(cell, expire_delay);
+
+ zero = __refcount_dec_and_test(&cell->ref, &r);
+ trace_afs_cell(debug_id, r - 1, a, reason);
+ if (zero)
+ WARN_ON(!queue_work(afs_wq, &cell->destroyer));
}
/*
@@ -631,36 +693,27 @@ void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
*/
void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- afs_get_cell(cell, reason);
- if (!queue_work(afs_wq, &cell->manager))
- afs_put_cell(cell, afs_cell_trace_put_queue_fail);
+ queue_work(afs_wq, &cell->manager);
}
/*
- * Allocate a key to use as a placeholder for anonymous user security.
+ * Cell-specific management timer.
*/
-static int afs_alloc_anon_key(struct afs_cell *cell)
+static void afs_cell_timer(struct timer_list *timer)
{
- struct key *key;
- char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
-
- /* Create a key to represent an anonymous user. */
- memcpy(keyname, "afs@", 4);
- dp = keyname + 4;
- cp = cell->name;
- do {
- *dp++ = tolower(*cp);
- } while (*cp++);
+ struct afs_cell *cell = container_of(timer, struct afs_cell, management_timer);
- key = rxrpc_get_null_key(keyname);
- if (IS_ERR(key))
- return PTR_ERR(key);
-
- cell->anonymous_key = key;
+ afs_see_cell(cell, afs_cell_trace_see_mgmt_timer);
+ if (refcount_read(&cell->ref) > 0 && cell->net->live)
+ queue_work(afs_wq, &cell->manager);
+}
- _debug("anon key %p{%x}",
- cell->anonymous_key, key_serial(cell->anonymous_key));
- return 0;
+/*
+ * Set/reduce the cell timer.
+ */
+void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs)
+{
+ timer_reduce(&cell->management_timer, jiffies + delay_secs * HZ);
}
/*
@@ -672,12 +725,6 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
struct afs_cell *pcell;
int ret;
- if (!cell->anonymous_key) {
- ret = afs_alloc_anon_key(cell);
- if (ret < 0)
- return ret;
- }
-
ret = afs_proc_cell_setup(cell);
if (ret < 0)
return ret;
@@ -695,7 +742,6 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
if (cell->proc_link.next)
cell->proc_link.next->pprev = &cell->proc_link.next;
- afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
return 0;
}
@@ -710,242 +756,167 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
afs_proc_cell_remove(cell);
mutex_lock(&net->proc_cells_lock);
- hlist_del_rcu(&cell->proc_link);
- afs_dynroot_rmdir(net, cell);
+ if (!hlist_unhashed(&cell->proc_link))
+ hlist_del_rcu(&cell->proc_link);
mutex_unlock(&net->proc_cells_lock);
_leave("");
}
+static bool afs_has_cell_expired(struct afs_cell *cell, time64_t *_next_manage)
+{
+ const struct afs_vlserver_list *vllist;
+ time64_t expire_at = cell->last_inactive;
+ time64_t now = ktime_get_real_seconds();
+
+ if (atomic_read(&cell->active))
+ return false;
+ if (!cell->net->live)
+ return true;
+
+ vllist = rcu_dereference_protected(cell->vl_servers, true);
+ if (vllist && vllist->nr_servers > 0)
+ expire_at += afs_cell_gc_delay;
+
+ if (expire_at <= now)
+ return true;
+ if (expire_at < *_next_manage)
+ *_next_manage = expire_at;
+ return false;
+}
+
/*
* Manage a cell record, initialising and destroying it, maintaining its DNS
* records.
*/
-static void afs_manage_cell(struct afs_cell *cell)
+static bool afs_manage_cell(struct afs_cell *cell)
{
struct afs_net *net = cell->net;
- int ret, active;
+ time64_t next_manage = TIME64_MAX;
+ int ret;
_enter("%s", cell->name);
-again:
_debug("state %u", cell->state);
switch (cell->state) {
- case AFS_CELL_INACTIVE:
- case AFS_CELL_FAILED:
- down_write(&net->cells_lock);
- active = 1;
- if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
- rb_erase(&cell->net_node, &net->cells);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0,
- afs_cell_trace_unuse_delete);
- smp_store_release(&cell->state, AFS_CELL_REMOVED);
- }
- up_write(&net->cells_lock);
- if (cell->state == AFS_CELL_REMOVED) {
- wake_up_var(&cell->state);
- goto final_destruction;
- }
- if (cell->state == AFS_CELL_FAILED)
- goto done;
- smp_store_release(&cell->state, AFS_CELL_UNSET);
- wake_up_var(&cell->state);
- goto again;
-
- case AFS_CELL_UNSET:
- smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
- wake_up_var(&cell->state);
- goto again;
-
- case AFS_CELL_ACTIVATING:
- ret = afs_activate_cell(net, cell);
- if (ret < 0)
- goto activation_failed;
+ case AFS_CELL_SETTING_UP:
+ goto set_up_cell;
+ case AFS_CELL_UNLOOKED:
+ case AFS_CELL_ACTIVE:
+ goto cell_is_active;
+ case AFS_CELL_REMOVING:
+ WARN_ON_ONCE(1);
+ return false;
+ case AFS_CELL_DEAD:
+ return false;
+ default:
+ _debug("bad state %u", cell->state);
+ WARN_ON_ONCE(1); /* Unhandled state */
+ return false;
+ }
- smp_store_release(&cell->state, AFS_CELL_ACTIVE);
- wake_up_var(&cell->state);
- goto again;
+set_up_cell:
+ ret = afs_activate_cell(net, cell);
+ if (ret < 0) {
+ cell->error = ret;
+ goto remove_cell;
+ }
- case AFS_CELL_ACTIVE:
- if (atomic_read(&cell->active) > 1) {
- if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
- ret = afs_update_cell(cell);
- if (ret < 0)
- cell->error = ret;
- }
- goto done;
- }
- smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
- wake_up_var(&cell->state);
- goto again;
+ afs_set_cell_state(cell, AFS_CELL_UNLOOKED);
- case AFS_CELL_DEACTIVATING:
- if (atomic_read(&cell->active) > 1)
- goto reverse_deactivation;
- afs_deactivate_cell(net, cell);
- smp_store_release(&cell->state, AFS_CELL_INACTIVE);
- wake_up_var(&cell->state);
- goto again;
+cell_is_active:
+ if (afs_has_cell_expired(cell, &next_manage))
+ goto remove_cell;
- case AFS_CELL_REMOVED:
- goto done;
+ if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
+ ret = afs_update_cell(cell);
+ if (ret < 0)
+ cell->error = ret;
+ if (cell->state == AFS_CELL_UNLOOKED)
+ afs_set_cell_state(cell, AFS_CELL_ACTIVE);
+ }
- default:
- break;
+ if (next_manage < TIME64_MAX && cell->net->live) {
+ time64_t now = ktime_get_real_seconds();
+
+ if (next_manage - now <= 0)
+ afs_queue_cell(cell, afs_cell_trace_queue_again);
+ else
+ afs_set_cell_timer(cell, next_manage - now);
}
- _debug("bad state %u", cell->state);
- BUG(); /* Unhandled state */
+ _leave(" [done %u]", cell->state);
+ return false;
-activation_failed:
- cell->error = ret;
- afs_deactivate_cell(net, cell);
+remove_cell:
+ down_write(&net->cells_lock);
- smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
- wake_up_var(&cell->state);
- goto again;
+ if (atomic_read(&cell->active)) {
+ up_write(&net->cells_lock);
+ goto cell_is_active;
+ }
-reverse_deactivation:
- smp_store_release(&cell->state, AFS_CELL_ACTIVE);
- wake_up_var(&cell->state);
- _leave(" [deact->act]");
- return;
+ /* Make sure that the expiring server records are going to see the fact
+ * that the cell is caput.
+ */
+ afs_set_cell_state(cell, AFS_CELL_REMOVING);
-done:
- _leave(" [done %u]", cell->state);
- return;
+ afs_deactivate_cell(net, cell);
+ afs_purge_servers(cell);
+
+ rb_erase(&cell->net_node, &net->cells);
+ afs_see_cell(cell, afs_cell_trace_unuse_delete);
+ up_write(&net->cells_lock);
-final_destruction:
/* The root volume is pinning the cell */
afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
cell->root_volume = NULL;
- afs_put_cell(cell, afs_cell_trace_put_destroy);
+
+ afs_set_cell_state(cell, AFS_CELL_DEAD);
+ return true;
}
static void afs_manage_cell_work(struct work_struct *work)
{
struct afs_cell *cell = container_of(work, struct afs_cell, manager);
+ bool final_put;
- afs_manage_cell(cell);
- afs_put_cell(cell, afs_cell_trace_put_queue_work);
+ afs_see_cell(cell, afs_cell_trace_manage);
+ final_put = afs_manage_cell(cell);
+ afs_see_cell(cell, afs_cell_trace_managed);
+ if (final_put)
+ afs_put_cell(cell, afs_cell_trace_put_final);
}
/*
- * Manage the records of cells known to a network namespace. This includes
- * updating the DNS records and garbage collecting unused cells that were
- * automatically added.
- *
- * Note that constructed cell records may only be removed from net->cells by
- * this work item, so it is safe for this work item to stash a cursor pointing
- * into the tree and then return to caller (provided it skips cells that are
- * still under construction).
- *
- * Note also that we were given an increment on net->cells_outstanding by
- * whoever queued us that we need to deal with before returning.
+ * Purge in-memory cell database.
*/
-void afs_manage_cells(struct work_struct *work)
+void afs_cell_purge(struct afs_net *net)
{
- struct afs_net *net = container_of(work, struct afs_net, cells_manager);
+ struct afs_cell *ws;
struct rb_node *cursor;
- time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
- bool purging = !net->live;
_enter("");
- /* Trawl the cell database looking for cells that have expired from
- * lack of use and cells whose DNS results have expired and dispatch
- * their managers.
- */
- down_read(&net->cells_lock);
+ down_write(&net->cells_lock);
+ ws = rcu_replace_pointer(net->ws_cell, NULL,
+ lockdep_is_held(&net->cells_lock));
+ up_write(&net->cells_lock);
+ afs_unuse_cell(ws, afs_cell_trace_unuse_ws);
+ _debug("kick cells");
+ down_read(&net->cells_lock);
for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
- struct afs_cell *cell =
- rb_entry(cursor, struct afs_cell, net_node);
- unsigned active;
- bool sched_cell = false;
-
- active = atomic_read(&cell->active);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
- active, afs_cell_trace_manage);
-
- ASSERTCMP(active, >=, 1);
-
- if (purging) {
- if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
- active = atomic_dec_return(&cell->active);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
- active, afs_cell_trace_unuse_pin);
- }
- }
+ struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node);
- if (active == 1) {
- struct afs_vlserver_list *vllist;
- time64_t expire_at = cell->last_inactive;
-
- read_lock(&cell->vl_servers_lock);
- vllist = rcu_dereference_protected(
- cell->vl_servers,
- lockdep_is_held(&cell->vl_servers_lock));
- if (vllist->nr_servers > 0)
- expire_at += afs_cell_gc_delay;
- read_unlock(&cell->vl_servers_lock);
- if (purging || expire_at <= now)
- sched_cell = true;
- else if (expire_at < next_manage)
- next_manage = expire_at;
- }
+ afs_see_cell(cell, afs_cell_trace_purge);
- if (!purging) {
- if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
- sched_cell = true;
- }
+ if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
+ afs_unuse_cell(cell, afs_cell_trace_unuse_pin);
- if (sched_cell)
- afs_queue_cell(cell, afs_cell_trace_get_queue_manage);
+ afs_queue_cell(cell, afs_cell_trace_queue_purge);
}
-
up_read(&net->cells_lock);
- /* Update the timer on the way out. We have to pass an increment on
- * cells_outstanding in the namespace that we are in to the timer or
- * the work scheduler.
- */
- if (!purging && next_manage < TIME64_MAX) {
- now = ktime_get_real_seconds();
-
- if (next_manage - now <= 0) {
- if (queue_work(afs_wq, &net->cells_manager))
- atomic_inc(&net->cells_outstanding);
- } else {
- afs_set_cell_timer(net, next_manage - now);
- }
- }
-
- afs_dec_cells_outstanding(net);
- _leave(" [%d]", atomic_read(&net->cells_outstanding));
-}
-
-/*
- * Purge in-memory cell database.
- */
-void afs_cell_purge(struct afs_net *net)
-{
- struct afs_cell *ws;
-
- _enter("");
-
- down_write(&net->cells_lock);
- ws = net->ws_cell;
- net->ws_cell = NULL;
- up_write(&net->cells_lock);
- afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws);
-
- _debug("del timer");
- if (del_timer_sync(&net->cells_timer))
- atomic_dec(&net->cells_outstanding);
-
- _debug("kick mgr");
- afs_queue_cell_manager(net);
-
_debug("wait");
wait_var_event(&net->cells_outstanding,
!atomic_read(&net->cells_outstanding));
diff --git a/fs/afs/cm_security.c b/fs/afs/cm_security.c
new file mode 100644
index 000000000000..edcbd249d202
--- /dev/null
+++ b/fs/afs/cm_security.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Cache manager security.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <crypto/krb5.h>
+#include "internal.h"
+#include "afs_cm.h"
+#include "afs_fs.h"
+#include "protocol_yfs.h"
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
+
+#define RXGK_SERVER_ENC_TOKEN 1036U // 0x40c
+#define xdr_round_up(x) (round_up((x), sizeof(__be32)))
+#define xdr_len_object(x) (4 + round_up((x), sizeof(__be32)))
+
+#ifdef CONFIG_RXGK
+static int afs_create_yfs_cm_token(struct sk_buff *challenge,
+ struct afs_server *server);
+#endif
+
+/*
+ * Respond to an RxGK challenge, adding appdata.
+ */
+static int afs_respond_to_challenge(struct sk_buff *challenge)
+{
+#ifdef CONFIG_RXGK
+ struct krb5_buffer appdata = {};
+ struct afs_server *server;
+#endif
+ struct rxrpc_peer *peer;
+ unsigned long peer_data;
+ u16 service_id;
+ u8 security_index;
+
+ rxrpc_kernel_query_challenge(challenge, &peer, &peer_data,
+ &service_id, &security_index);
+
+ _enter("%u,%u", service_id, security_index);
+
+ switch (service_id) {
+ /* We don't send CM_SERVICE RPCs, so don't expect a challenge
+ * therefrom.
+ */
+ case FS_SERVICE:
+ case VL_SERVICE:
+ case YFS_FS_SERVICE:
+ case YFS_VL_SERVICE:
+ break;
+ default:
+ pr_warn("Can't respond to unknown challenge %u:%u",
+ service_id, security_index);
+ return rxrpc_kernel_reject_challenge(challenge, RX_USER_ABORT, -EPROTO,
+ afs_abort_unsupported_sec_class);
+ }
+
+ switch (security_index) {
+#ifdef CONFIG_RXKAD
+ case RXRPC_SECURITY_RXKAD:
+ return rxkad_kernel_respond_to_challenge(challenge);
+#endif
+
+#ifdef CONFIG_RXGK
+ case RXRPC_SECURITY_RXGK:
+ return rxgk_kernel_respond_to_challenge(challenge, &appdata);
+
+ case RXRPC_SECURITY_YFS_RXGK:
+ switch (service_id) {
+ case FS_SERVICE:
+ case YFS_FS_SERVICE:
+ server = (struct afs_server *)peer_data;
+ if (!server->cm_rxgk_appdata.data) {
+ mutex_lock(&server->cm_token_lock);
+ if (!server->cm_rxgk_appdata.data)
+ afs_create_yfs_cm_token(challenge, server);
+ mutex_unlock(&server->cm_token_lock);
+ }
+ if (server->cm_rxgk_appdata.data)
+ appdata = server->cm_rxgk_appdata;
+ break;
+ }
+ return rxgk_kernel_respond_to_challenge(challenge, &appdata);
+#endif
+
+ default:
+ return rxrpc_kernel_reject_challenge(challenge, RX_USER_ABORT, -EPROTO,
+ afs_abort_unsupported_sec_class);
+ }
+}
+
+/*
+ * Process the OOB message queue, processing challenge packets.
+ */
+void afs_process_oob_queue(struct work_struct *work)
+{
+ struct afs_net *net = container_of(work, struct afs_net, rx_oob_work);
+ struct sk_buff *oob;
+ enum rxrpc_oob_type type;
+
+ while ((oob = rxrpc_kernel_dequeue_oob(net->socket, &type))) {
+ switch (type) {
+ case RXRPC_OOB_CHALLENGE:
+ afs_respond_to_challenge(oob);
+ break;
+ }
+ rxrpc_kernel_free_oob(oob);
+ }
+}
+
+#ifdef CONFIG_RXGK
+/*
+ * Create a securities keyring for the cache manager and attach a key to it for
+ * the RxGK tokens we want to use to secure the callback connection back from
+ * the fileserver.
+ */
+int afs_create_token_key(struct afs_net *net, struct socket *socket)
+{
+ const struct krb5_enctype *krb5;
+ struct key *ring;
+ key_ref_t key;
+ char K0[32], *desc;
+ int ret;
+
+ ring = keyring_alloc("kafs",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
+ KEY_POS_SEARCH | KEY_POS_WRITE |
+ KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH,
+ KEY_ALLOC_NOT_IN_QUOTA,
+ NULL, NULL);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+
+ ret = rxrpc_sock_set_security_keyring(socket->sk, ring);
+ if (ret < 0)
+ goto out;
+
+ ret = -ENOPKG;
+ krb5 = crypto_krb5_find_enctype(KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96);
+ if (!krb5)
+ goto out;
+
+ if (WARN_ON_ONCE(krb5->key_len > sizeof(K0)))
+ goto out;
+
+ ret = -ENOMEM;
+ desc = kasprintf(GFP_KERNEL, "%u:%u:%u:%u",
+ YFS_CM_SERVICE, RXRPC_SECURITY_YFS_RXGK, 1, krb5->etype);
+ if (!desc)
+ goto out;
+
+ wait_for_random_bytes();
+ get_random_bytes(K0, krb5->key_len);
+
+ key = key_create(make_key_ref(ring, true),
+ "rxrpc_s", desc,
+ K0, krb5->key_len,
+ KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ kfree(desc);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto out;
+ }
+
+ net->fs_cm_token_key = key_ref_to_ptr(key);
+ ret = 0;
+out:
+ key_put(ring);
+ return ret;
+}
+
+/*
+ * Create an YFS RxGK GSS token to use as a ticket to the specified fileserver.
+ */
+static int afs_create_yfs_cm_token(struct sk_buff *challenge,
+ struct afs_server *server)
+{
+ const struct krb5_enctype *conn_krb5, *token_krb5;
+ const struct krb5_buffer *token_key;
+ struct crypto_aead *aead;
+ struct scatterlist sg;
+ struct afs_net *net = server->cell->net;
+ const struct key *key = net->fs_cm_token_key;
+ size_t keysize, uuidsize, authsize, toksize, encsize, contsize, adatasize, offset;
+ __be32 caps[1] = {
+ [0] = htonl(AFS_CAP_ERROR_TRANSLATION),
+ };
+ __be32 *xdr;
+ void *appdata, *K0, *encbase;
+ u32 enctype;
+ int ret;
+
+ if (!key)
+ return -ENOKEY;
+
+ /* Assume that the fileserver is happy to use the same encoding type as
+ * we were told to use by the token obtained by the user.
+ */
+ enctype = rxgk_kernel_query_challenge(challenge);
+
+ conn_krb5 = crypto_krb5_find_enctype(enctype);
+ if (!conn_krb5)
+ return -ENOPKG;
+ token_krb5 = key->payload.data[0];
+ token_key = (const struct krb5_buffer *)&key->payload.data[2];
+
+ /* struct rxgk_key {
+ * afs_uint32 enctype;
+ * opaque key<>;
+ * };
+ */
+ keysize = 4 + xdr_len_object(conn_krb5->key_len);
+
+ /* struct RXGK_AuthName {
+ * afs_int32 kind;
+ * opaque data<AUTHDATAMAX>;
+ * opaque display<AUTHPRINTABLEMAX>;
+ * };
+ */
+ uuidsize = sizeof(server->uuid);
+ authsize = 4 + xdr_len_object(uuidsize) + xdr_len_object(0);
+
+ /* struct RXGK_Token {
+ * rxgk_key K0;
+ * RXGK_Level level;
+ * rxgkTime starttime;
+ * afs_int32 lifetime;
+ * afs_int32 bytelife;
+ * rxgkTime expirationtime;
+ * struct RXGK_AuthName identities<>;
+ * };
+ */
+ toksize = keysize + 8 + 4 + 4 + 8 + xdr_len_object(authsize);
+
+ offset = 0;
+ encsize = crypto_krb5_how_much_buffer(token_krb5, KRB5_ENCRYPT_MODE, toksize, &offset);
+
+ /* struct RXGK_TokenContainer {
+ * afs_int32 kvno;
+ * afs_int32 enctype;
+ * opaque encrypted_token<>;
+ * };
+ */
+ contsize = 4 + 4 + xdr_len_object(encsize);
+
+ /* struct YFSAppData {
+ * opr_uuid initiatorUuid;
+ * opr_uuid acceptorUuid;
+ * Capabilities caps;
+ * afs_int32 enctype;
+ * opaque callbackKey<>;
+ * opaque callbackToken<>;
+ * };
+ */
+ adatasize = 16 + 16 +
+ xdr_len_object(sizeof(caps)) +
+ 4 +
+ xdr_len_object(conn_krb5->key_len) +
+ xdr_len_object(contsize);
+
+ ret = -ENOMEM;
+ appdata = kzalloc(adatasize, GFP_KERNEL);
+ if (!appdata)
+ goto out;
+ xdr = appdata;
+
+ memcpy(xdr, &net->uuid, 16); /* appdata.initiatorUuid */
+ xdr += 16 / 4;
+ memcpy(xdr, &server->uuid, 16); /* appdata.acceptorUuid */
+ xdr += 16 / 4;
+ *xdr++ = htonl(ARRAY_SIZE(caps)); /* appdata.caps.len */
+ memcpy(xdr, &caps, sizeof(caps)); /* appdata.caps */
+ xdr += ARRAY_SIZE(caps);
+ *xdr++ = htonl(conn_krb5->etype); /* appdata.enctype */
+
+ *xdr++ = htonl(conn_krb5->key_len); /* appdata.callbackKey.len */
+ K0 = xdr;
+ get_random_bytes(K0, conn_krb5->key_len); /* appdata.callbackKey.data */
+ xdr += xdr_round_up(conn_krb5->key_len) / 4;
+
+ *xdr++ = htonl(contsize); /* appdata.callbackToken.len */
+ *xdr++ = htonl(1); /* cont.kvno */
+ *xdr++ = htonl(token_krb5->etype); /* cont.enctype */
+ *xdr++ = htonl(encsize); /* cont.encrypted_token.len */
+
+ encbase = xdr;
+ xdr += offset / 4;
+ *xdr++ = htonl(conn_krb5->etype); /* token.K0.enctype */
+ *xdr++ = htonl(conn_krb5->key_len); /* token.K0.key.len */
+ memcpy(xdr, K0, conn_krb5->key_len); /* token.K0.key.data */
+ xdr += xdr_round_up(conn_krb5->key_len) / 4;
+
+ *xdr++ = htonl(RXRPC_SECURITY_ENCRYPT); /* token.level */
+ *xdr++ = htonl(0); /* token.starttime */
+ *xdr++ = htonl(0); /* " */
+ *xdr++ = htonl(0); /* token.lifetime */
+ *xdr++ = htonl(0); /* token.bytelife */
+ *xdr++ = htonl(0); /* token.expirationtime */
+ *xdr++ = htonl(0); /* " */
+ *xdr++ = htonl(1); /* token.identities.count */
+ *xdr++ = htonl(0); /* token.identities[0].kind */
+ *xdr++ = htonl(uuidsize); /* token.identities[0].data.len */
+ memcpy(xdr, &server->uuid, uuidsize);
+ xdr += xdr_round_up(uuidsize) / 4;
+ *xdr++ = htonl(0); /* token.identities[0].display.len */
+
+ xdr = encbase + xdr_round_up(encsize);
+
+ if ((unsigned long)xdr - (unsigned long)appdata != adatasize)
+ pr_err("Appdata size incorrect %lx != %zx\n",
+ (unsigned long)xdr - (unsigned long)appdata, adatasize);
+
+ aead = crypto_krb5_prepare_encryption(token_krb5, token_key, RXGK_SERVER_ENC_TOKEN,
+ GFP_KERNEL);
+ if (IS_ERR(aead)) {
+ ret = PTR_ERR(aead);
+ goto out_token;
+ }
+
+ sg_init_one(&sg, encbase, encsize);
+ ret = crypto_krb5_encrypt(token_krb5, aead, &sg, 1, encsize, offset, toksize, false);
+ if (ret < 0)
+ goto out_aead;
+
+ server->cm_rxgk_appdata.len = adatasize;
+ server->cm_rxgk_appdata.data = appdata;
+ appdata = NULL;
+
+out_aead:
+ crypto_free_aead(aead);
+out_token:
+ kfree(appdata);
+out:
+ return ret;
+}
+#endif /* CONFIG_RXGK */
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 99a3f20bc786..1a906805a9e3 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -139,49 +139,6 @@ bool afs_cm_incoming_call(struct afs_call *call)
}
/*
- * Find the server record by peer address and record a probe to the cache
- * manager from a server.
- */
-static int afs_find_cm_server_by_peer(struct afs_call *call)
-{
- struct sockaddr_rxrpc srx;
- struct afs_server *server;
- struct rxrpc_peer *peer;
-
- peer = rxrpc_kernel_get_call_peer(call->net->socket, call->rxcall);
-
- server = afs_find_server(call->net, peer);
- if (!server) {
- trace_afs_cm_no_server(call, &srx);
- return 0;
- }
-
- call->server = server;
- return 0;
-}
-
-/*
- * Find the server record by server UUID and record a probe to the cache
- * manager from a server.
- */
-static int afs_find_cm_server_by_uuid(struct afs_call *call,
- struct afs_uuid *uuid)
-{
- struct afs_server *server;
-
- rcu_read_lock();
- server = afs_find_server_by_uuid(call->net, call->request);
- rcu_read_unlock();
- if (!server) {
- trace_afs_cm_no_server_u(call, call->request);
- return 0;
- }
-
- call->server = server;
- return 0;
-}
-
-/*
* Clean up a cache manager call.
*/
static void afs_cm_destructor(struct afs_call *call)
@@ -322,10 +279,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
-
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -349,18 +303,10 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
*/
static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
{
- int ret;
-
_enter("");
afs_extract_discard(call, 0);
- ret = afs_extract_data(call, false);
- if (ret < 0)
- return ret;
-
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_peer(call);
+ return afs_extract_data(call, false);
}
/*
@@ -373,8 +319,6 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
__be32 *b;
int ret;
- _enter("");
-
_enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
@@ -421,9 +365,13 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_uuid(call, call->request);
+ if (memcmp(call->request, &call->server->_uuid, sizeof(call->server->_uuid)) != 0) {
+ pr_notice("Callback UUID does not match fileserver UUID\n");
+ trace_afs_cm_no_server_u(call, call->request);
+ return 0;
+ }
+
+ return 0;
}
/*
@@ -455,7 +403,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -533,7 +481,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -593,7 +541,7 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -667,9 +615,5 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
-
- /* We'll need the file server record as that tells us which set of
- * vnodes to operate upon.
- */
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ada363af5aab..f4e9e12373ac 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -13,6 +13,7 @@
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/iversion.h>
+#include <linux/iov_iter.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
#include "afs_fs.h"
@@ -22,7 +23,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
static int afs_dir_open(struct inode *inode, struct file *file);
static int afs_readdir(struct file *file, struct dir_context *ctx);
-static int afs_d_revalidate(struct dentry *dentry, unsigned int flags);
+static int afs_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags);
static int afs_d_delete(const struct dentry *dentry);
static void afs_d_iput(struct dentry *dentry, struct inode *inode);
static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int nlen,
@@ -31,8 +33,8 @@ static bool afs_lookup_filldir(struct dir_context *ctx, const char *name, int nl
loff_t fpos, u64 ino, unsigned dtype);
static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl);
-static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode);
+static struct dentry *afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode);
static int afs_rmdir(struct inode *dir, struct dentry *dentry);
static int afs_unlink(struct inode *dir, struct dentry *dentry);
static int afs_link(struct dentry *from, struct inode *dir,
@@ -42,15 +44,6 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags);
-static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags);
-static void afs_dir_invalidate_folio(struct folio *folio, size_t offset,
- size_t length);
-
-static bool afs_dir_dirty_folio(struct address_space *mapping,
- struct folio *folio)
-{
- BUG(); /* This should never happen. */
-}
const struct file_operations afs_dir_file_operations = {
.open = afs_dir_open,
@@ -75,10 +68,7 @@ const struct inode_operations afs_dir_inode_operations = {
};
const struct address_space_operations afs_dir_aops = {
- .dirty_folio = afs_dir_dirty_folio,
- .release_folio = afs_dir_release_folio,
- .invalidate_folio = afs_dir_invalidate_folio,
- .migrate_folio = filemap_migrate_folio,
+ .writepages = afs_single_writepages,
};
const struct dentry_operations afs_fs_dentry_operations = {
@@ -99,152 +89,124 @@ struct afs_lookup_one_cookie {
struct afs_lookup_cookie {
struct dir_context ctx;
struct qstr name;
- bool found;
- bool one_only;
unsigned short nr_fids;
struct afs_fid fids[50];
};
+static void afs_dir_unuse_cookie(struct afs_vnode *dvnode, int ret)
+{
+ if (ret == 0) {
+ struct afs_vnode_cache_aux aux;
+ loff_t i_size = i_size_read(&dvnode->netfs.inode);
+
+ afs_set_cache_aux(dvnode, &aux);
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), &aux, &i_size);
+ } else {
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), NULL, NULL);
+ }
+}
+
/*
- * Drop the refs that we're holding on the folios we were reading into. We've
- * got refs on the first nr_pages pages.
+ * Iterate through a kmapped directory segment, dumping a summary of
+ * the contents.
*/
-static void afs_dir_read_cleanup(struct afs_read *req)
+static size_t afs_dir_dump_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- struct address_space *mapping = req->vnode->netfs.inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
+ do {
+ union afs_xdr_dir_block *block = iter_base;
- XA_STATE(xas, &mapping->i_pages, 0);
+ pr_warn("[%05zx] %32phN\n", progress, block);
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ progress += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
- if (unlikely(!req->nr_pages))
- return;
+ return len;
+}
- rcu_read_lock();
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
- BUG_ON(xa_is_value(folio));
- ASSERTCMP(folio->mapping, ==, mapping);
+/*
+ * Dump the contents of a directory.
+ */
+static void afs_dir_dump(struct afs_vnode *dvnode)
+{
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(&dvnode->netfs.inode);
- folio_put(folio);
- }
+ pr_warn("DIR %llx:%llx is=%llx\n",
+ dvnode->fid.vid, dvnode->fid.vnode, i_size);
- rcu_read_unlock();
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ iterate_folioq(&iter, iov_iter_count(&iter), NULL, NULL,
+ afs_dir_dump_step);
}
/*
* check that a directory folio is valid
*/
-static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio,
- loff_t i_size)
+static bool afs_dir_check_block(struct afs_vnode *dvnode, size_t progress,
+ union afs_xdr_dir_block *block)
{
- union afs_xdr_dir_block *block;
- size_t offset, size;
- loff_t pos;
+ if (block->hdr.magic != AFS_DIR_MAGIC) {
+ pr_warn("%s(%lx): [%zx] bad magic %04x\n",
+ __func__, dvnode->netfs.inode.i_ino,
+ progress, ntohs(block->hdr.magic));
+ trace_afs_dir_check_failed(dvnode, progress);
+ trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
+ return false;
+ }
- /* Determine how many magic numbers there should be in this folio, but
- * we must take care because the directory may change size under us.
+ /* Make sure each block is NUL terminated so we can reasonably
+ * use string functions on it. The filenames in the folio
+ * *should* be NUL-terminated anyway.
*/
- pos = folio_pos(folio);
- if (i_size <= pos)
- goto checked;
-
- size = min_t(loff_t, folio_size(folio), i_size - pos);
- for (offset = 0; offset < size; offset += sizeof(*block)) {
- block = kmap_local_folio(folio, offset);
- if (block->hdr.magic != AFS_DIR_MAGIC) {
- printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n",
- __func__, dvnode->netfs.inode.i_ino,
- pos, offset, size, ntohs(block->hdr.magic));
- trace_afs_dir_check_failed(dvnode, pos + offset, i_size);
- kunmap_local(block);
- trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
- goto error;
- }
-
- /* Make sure each block is NUL terminated so we can reasonably
- * use string functions on it. The filenames in the folio
- * *should* be NUL-terminated anyway.
- */
- ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0;
-
- kunmap_local(block);
- }
-checked:
+ ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0;
afs_stat_v(dvnode, n_read_dir);
return true;
-
-error:
- return false;
}
/*
- * Dump the contents of a directory.
+ * Iterate through a kmapped directory segment, checking the content.
*/
-static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
+static size_t afs_dir_check_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- union afs_xdr_dir_block *block;
- struct address_space *mapping = dvnode->netfs.inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
- size_t offset, size;
-
- XA_STATE(xas, &mapping->i_pages, 0);
-
- pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx\n",
- dvnode->fid.vid, dvnode->fid.vnode,
- req->file_size, req->len, req->actual_len);
- pr_warn("DIR %llx %x %zx %zx\n",
- req->pos, req->nr_pages,
- req->iter->iov_offset, iov_iter_count(req->iter));
-
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
+ struct afs_vnode *dvnode = priv;
- BUG_ON(folio->mapping != mapping);
+ if (WARN_ON_ONCE(progress % AFS_DIR_BLOCK_SIZE ||
+ len % AFS_DIR_BLOCK_SIZE))
+ return len;
- size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
- for (offset = 0; offset < size; offset += sizeof(*block)) {
- block = kmap_local_folio(folio, offset);
- pr_warn("[%02lx] %32phN\n", folio->index + offset, block);
- kunmap_local(block);
- }
- }
+ do {
+ if (!afs_dir_check_block(dvnode, progress, iter_base))
+ break;
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
+
+ return len;
}
/*
- * Check all the blocks in a directory. All the folios are held pinned.
+ * Check all the blocks in a directory.
*/
-static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
+static int afs_dir_check(struct afs_vnode *dvnode)
{
- struct address_space *mapping = dvnode->netfs.inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
- int ret = 0;
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(&dvnode->netfs.inode);
+ size_t checked = 0;
- XA_STATE(xas, &mapping->i_pages, 0);
-
- if (unlikely(!req->nr_pages))
+ if (unlikely(!i_size))
return 0;
- rcu_read_lock();
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
-
- BUG_ON(folio->mapping != mapping);
-
- if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
- afs_dir_dump(dvnode, req);
- ret = -EIO;
- break;
- }
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ checked = iterate_folioq(&iter, iov_iter_count(&iter), dvnode, NULL,
+ afs_dir_check_step);
+ if (checked != i_size) {
+ afs_dir_dump(dvnode);
+ return -EIO;
}
-
- rcu_read_unlock();
- return ret;
+ return 0;
}
/*
@@ -264,134 +226,140 @@ static int afs_dir_open(struct inode *inode, struct file *file)
}
/*
- * Read the directory into the pagecache in one go, scrubbing the previous
- * contents. The list of folios is returned, pinning them so that they don't
- * get reclaimed during the iteration.
+ * Read a file in a single download.
*/
-static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
- __acquires(&dvnode->validate_lock)
+static ssize_t afs_do_read_single(struct afs_vnode *dvnode, struct file *file)
{
- struct address_space *mapping = dvnode->netfs.inode.i_mapping;
- struct afs_read *req;
+ struct iov_iter iter;
+ ssize_t ret;
loff_t i_size;
- int nr_pages, i;
- int ret;
- loff_t remote_size = 0;
-
- _enter("");
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- refcount_set(&req->usage, 1);
- req->vnode = dvnode;
- req->key = key_get(key);
- req->cleanup = afs_dir_read_cleanup;
+ bool is_dir = (S_ISDIR(dvnode->netfs.inode.i_mode) &&
+ !test_bit(AFS_VNODE_MOUNTPOINT, &dvnode->flags));
-expand:
i_size = i_size_read(&dvnode->netfs.inode);
- if (i_size < remote_size)
- i_size = remote_size;
- if (i_size < 2048) {
- ret = afs_bad(dvnode, afs_file_error_dir_small);
- goto error;
- }
- if (i_size > 2048 * 1024) {
- trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
- ret = -EFBIG;
- goto error;
+ if (is_dir) {
+ if (i_size < AFS_DIR_BLOCK_SIZE)
+ return afs_bad(dvnode, afs_file_error_dir_small);
+ if (i_size > AFS_DIR_BLOCK_SIZE * 1024) {
+ trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
+ return -EFBIG;
+ }
+ } else {
+ if (i_size > AFSPATHMAX) {
+ trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
+ return -EFBIG;
+ }
}
- _enter("%llu", i_size);
+ /* Expand the storage. TODO: Shrink the storage too. */
+ if (dvnode->directory_size < i_size) {
+ size_t cur_size = dvnode->directory_size;
- nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE;
+ ret = netfs_alloc_folioq_buffer(NULL,
+ &dvnode->directory, &cur_size, i_size,
+ mapping_gfp_mask(dvnode->netfs.inode.i_mapping));
+ dvnode->directory_size = cur_size;
+ if (ret < 0)
+ return ret;
+ }
- req->actual_len = i_size; /* May change */
- req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
- req->data_version = dvnode->status.data_version; /* May change */
- iov_iter_xarray(&req->def_iter, ITER_DEST, &dvnode->netfs.inode.i_mapping->i_pages,
- 0, i_size);
- req->iter = &req->def_iter;
+ iov_iter_folio_queue(&iter, ITER_DEST, dvnode->directory, 0, 0, dvnode->directory_size);
- /* Fill in any gaps that we might find where the memory reclaimer has
- * been at work and pin all the folios. If there are any gaps, we will
- * need to reread the entire directory contents.
+ /* AFS requires us to perform the read of a directory synchronously as
+ * a single unit to avoid issues with the directory contents being
+ * changed between reads.
*/
- i = req->nr_pages;
- while (i < nr_pages) {
- struct folio *folio;
-
- folio = filemap_get_folio(mapping, i);
- if (IS_ERR(folio)) {
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_inval);
- folio = __filemap_get_folio(mapping,
- i, FGP_LOCK | FGP_CREAT,
- mapping->gfp_mask);
- if (IS_ERR(folio)) {
- ret = PTR_ERR(folio);
- goto error;
- }
- folio_attach_private(folio, (void *)1);
- folio_unlock(folio);
+ ret = netfs_read_single(&dvnode->netfs.inode, file, &iter);
+ if (ret >= 0) {
+ i_size = i_size_read(&dvnode->netfs.inode);
+ if (i_size > ret) {
+ /* The content has grown, so we need to expand the
+ * buffer.
+ */
+ ret = -ESTALE;
+ } else if (is_dir) {
+ int ret2 = afs_dir_check(dvnode);
+
+ if (ret2 < 0)
+ ret = ret2;
+ } else if (i_size < folioq_folio_size(dvnode->directory, 0)) {
+ /* NUL-terminate a symlink. */
+ char *symlink = kmap_local_folio(folioq_folio(dvnode->directory, 0), 0);
+
+ symlink[i_size] = 0;
+ kunmap_local(symlink);
}
-
- req->nr_pages += folio_nr_pages(folio);
- i += folio_nr_pages(folio);
}
- /* If we're going to reload, we need to lock all the pages to prevent
- * races.
- */
+ return ret;
+}
+
+ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file)
+{
+ ssize_t ret;
+
+ fscache_use_cookie(afs_vnode_cache(dvnode), false);
+ ret = afs_do_read_single(dvnode, file);
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), NULL, NULL);
+ return ret;
+}
+
+/*
+ * Read the directory into a folio_queue buffer in one go, scrubbing the
+ * previous contents. We return -ESTALE if the caller needs to call us again.
+ */
+ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
+ __acquires(&dvnode->validate_lock)
+{
+ ssize_t ret;
+ loff_t i_size;
+
+ i_size = i_size_read(&dvnode->netfs.inode);
+
ret = -ERESTARTSYS;
if (down_read_killable(&dvnode->validate_lock) < 0)
goto error;
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- goto success;
+ /* We only need to reread the data if it became invalid - or if we
+ * haven't read it yet.
+ */
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) {
+ ret = i_size;
+ goto valid;
+ }
up_read(&dvnode->validate_lock);
if (down_write_killable(&dvnode->validate_lock) < 0)
goto error;
- if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
- trace_afs_reload_dir(dvnode);
- ret = afs_fetch_data(dvnode, req);
- if (ret < 0)
- goto error_unlock;
-
- task_io_account_read(PAGE_SIZE * req->nr_pages);
-
- if (req->len < req->file_size) {
- /* The content has grown, so we need to expand the
- * buffer.
- */
- up_write(&dvnode->validate_lock);
- remote_size = req->file_size;
- goto expand;
- }
+ if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_invalidate_cache(dvnode, 0);
- /* Validate the data we just read. */
- ret = afs_dir_check(dvnode, req);
+ if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) ||
+ !test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) {
+ trace_afs_reload_dir(dvnode);
+ ret = afs_read_single(dvnode, file);
if (ret < 0)
goto error_unlock;
// TODO: Trim excess pages
set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
+ set_bit(AFS_VNODE_DIR_READ, &dvnode->flags);
+ } else {
+ ret = i_size;
}
downgrade_write(&dvnode->validate_lock);
-success:
- return req;
+valid:
+ return ret;
error_unlock:
up_write(&dvnode->validate_lock);
error:
- afs_put_read(req);
- _leave(" = %d", ret);
- return ERR_PTR(ret);
+ _leave(" = %zd", ret);
+ return ret;
}
/*
@@ -399,79 +367,69 @@ error:
*/
static int afs_dir_iterate_block(struct afs_vnode *dvnode,
struct dir_context *ctx,
- union afs_xdr_dir_block *block,
- unsigned blkoff)
+ union afs_xdr_dir_block *block)
{
union afs_xdr_dirent *dire;
- unsigned offset, next, curr, nr_slots;
+ unsigned int blknum, base, hdr, pos, next, nr_slots;
size_t nlen;
int tmp;
- _enter("%llx,%x", ctx->pos, blkoff);
+ blknum = ctx->pos / AFS_DIR_BLOCK_SIZE;
+ base = blknum * AFS_DIR_SLOTS_PER_BLOCK;
+ hdr = (blknum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
+ pos = DIV_ROUND_UP(ctx->pos, AFS_DIR_DIRENT_SIZE) - base;
- curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent);
+ _enter("%llx,%x", ctx->pos, blknum);
/* walk through the block, an entry at a time */
- for (offset = (blkoff == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
- offset < AFS_DIR_SLOTS_PER_BLOCK;
- offset = next
- ) {
+ for (unsigned int slot = hdr; slot < AFS_DIR_SLOTS_PER_BLOCK; slot = next) {
/* skip entries marked unused in the bitmap */
- if (!(block->hdr.bitmap[offset / 8] &
- (1 << (offset % 8)))) {
- _debug("ENT[%zu.%u]: unused",
- blkoff / sizeof(union afs_xdr_dir_block), offset);
- next = offset + 1;
- if (offset >= curr)
- ctx->pos = blkoff +
- next * sizeof(union afs_xdr_dirent);
+ if (!(block->hdr.bitmap[slot / 8] &
+ (1 << (slot % 8)))) {
+ _debug("ENT[%x]: Unused", base + slot);
+ next = slot + 1;
+ if (next >= pos)
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
continue;
}
/* got a valid entry */
- dire = &block->dirents[offset];
+ dire = &block->dirents[slot];
nlen = strnlen(dire->u.name,
- sizeof(*block) -
- offset * sizeof(union afs_xdr_dirent));
+ (unsigned long)(block + 1) - (unsigned long)dire->u.name - 1);
if (nlen > AFSNAMEMAX - 1) {
- _debug("ENT[%zu]: name too long (len %u/%zu)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, nlen);
+ _debug("ENT[%x]: Name too long (len %zx)",
+ base + slot, nlen);
return afs_bad(dvnode, afs_file_error_dir_name_too_long);
}
- _debug("ENT[%zu.%u]: %s %zu \"%s\"",
- blkoff / sizeof(union afs_xdr_dir_block), offset,
- (offset < curr ? "skip" : "fill"),
+ _debug("ENT[%x]: %s %zx \"%s\"",
+ base + slot, (slot < pos ? "skip" : "fill"),
nlen, dire->u.name);
nr_slots = afs_dir_calc_slots(nlen);
- next = offset + nr_slots;
+ next = slot + nr_slots;
if (next > AFS_DIR_SLOTS_PER_BLOCK) {
- _debug("ENT[%zu.%u]:"
- " %u extends beyond end dir block"
- " (len %zu)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, next, nlen);
+ _debug("ENT[%x]: extends beyond end dir block (len %zx)",
+ base + slot, nlen);
return afs_bad(dvnode, afs_file_error_dir_over_end);
}
/* Check that the name-extension dirents are all allocated */
for (tmp = 1; tmp < nr_slots; tmp++) {
- unsigned int ix = offset + tmp;
- if (!(block->hdr.bitmap[ix / 8] & (1 << (ix % 8)))) {
- _debug("ENT[%zu.u]:"
- " %u unmarked extension (%u/%u)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, tmp, nr_slots);
+ unsigned int xslot = slot + tmp;
+
+ if (!(block->hdr.bitmap[xslot / 8] & (1 << (xslot % 8)))) {
+ _debug("ENT[%x]: Unmarked extension (%x/%x)",
+ base + slot, tmp, nr_slots);
return afs_bad(dvnode, afs_file_error_dir_unmarked_ext);
}
}
/* skip if starts before the current position */
- if (offset < curr) {
- if (next > curr)
- ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+ if (slot < pos) {
+ if (next > pos)
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
continue;
}
@@ -485,75 +443,110 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
return 0;
}
- ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
}
_leave(" = 1 [more]");
return 1;
}
+struct afs_dir_iteration_ctx {
+ struct dir_context *dir_ctx;
+ int error;
+};
+
/*
- * iterate through the data blob that lists the contents of an AFS directory
+ * Iterate through a kmapped directory segment.
*/
-static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
- struct key *key, afs_dataversion_t *_dir_version)
+static size_t afs_dir_iterate_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- struct afs_vnode *dvnode = AFS_FS_I(dir);
- union afs_xdr_dir_block *dblock;
- struct afs_read *req;
- struct folio *folio;
- unsigned offset, size;
+ struct afs_dir_iteration_ctx *ctx = priv2;
+ struct afs_vnode *dvnode = priv;
int ret;
- _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos);
-
- if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
- _leave(" = -ESTALE");
- return -ESTALE;
+ if (WARN_ON_ONCE(progress % AFS_DIR_BLOCK_SIZE ||
+ len % AFS_DIR_BLOCK_SIZE)) {
+ pr_err("Mis-iteration prog=%zx len=%zx\n",
+ progress % AFS_DIR_BLOCK_SIZE,
+ len % AFS_DIR_BLOCK_SIZE);
+ return len;
}
- req = afs_read_dir(dvnode, key);
- if (IS_ERR(req))
- return PTR_ERR(req);
- *_dir_version = req->data_version;
+ do {
+ ret = afs_dir_iterate_block(dvnode, ctx->dir_ctx, iter_base);
+ if (ret != 1)
+ break;
- /* round the file position up to the next entry boundary */
- ctx->pos += sizeof(union afs_xdr_dirent) - 1;
- ctx->pos &= ~(sizeof(union afs_xdr_dirent) - 1);
+ ctx->dir_ctx->pos = round_up(ctx->dir_ctx->pos, AFS_DIR_BLOCK_SIZE);
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
- /* walk through the blocks in sequence */
- ret = 0;
- while (ctx->pos < req->actual_len) {
- /* Fetch the appropriate folio from the directory and re-add it
- * to the LRU. We have all the pages pinned with an extra ref.
- */
- folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE,
- FGP_ACCESSED, 0);
- if (IS_ERR(folio)) {
- ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
- break;
- }
+ return len;
+}
- offset = round_down(ctx->pos, sizeof(*dblock)) - folio_pos(folio);
- size = min_t(loff_t, folio_size(folio),
- req->actual_len - folio_pos(folio));
+/*
+ * Iterate through the directory folios.
+ */
+static int afs_dir_iterate_contents(struct inode *dir, struct dir_context *dir_ctx)
+{
+ struct afs_dir_iteration_ctx ctx = { .dir_ctx = dir_ctx };
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(dir);
- do {
- dblock = kmap_local_folio(folio, offset);
- ret = afs_dir_iterate_block(dvnode, ctx, dblock,
- folio_pos(folio) + offset);
- kunmap_local(dblock);
- if (ret != 1)
- goto out;
+ /* Round the file position up to the next entry boundary */
+ dir_ctx->pos = round_up(dir_ctx->pos, sizeof(union afs_xdr_dirent));
- } while (offset += sizeof(*dblock), offset < size);
+ if (i_size <= 0 || dir_ctx->pos >= i_size)
+ return 0;
- ret = 0;
- }
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ iov_iter_advance(&iter, round_down(dir_ctx->pos, AFS_DIR_BLOCK_SIZE));
+
+ iterate_folioq(&iter, iov_iter_count(&iter), dvnode, &ctx,
+ afs_dir_iterate_step);
+
+ if (ctx.error == -ESTALE)
+ afs_invalidate_dir(dvnode, afs_dir_invalid_iter_stale);
+ return ctx.error;
+}
+
+/*
+ * iterate through the data blob that lists the contents of an AFS directory
+ */
+static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
+ struct file *file, afs_dataversion_t *_dir_version)
+{
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ int retry_limit = 100;
+ int ret;
+
+ _enter("{%lu},%llx,,", dir->i_ino, ctx->pos);
+
+ do {
+ if (--retry_limit < 0) {
+ pr_warn("afs_read_dir(): Too many retries\n");
+ ret = -ESTALE;
+ break;
+ }
+ ret = afs_read_dir(dvnode, file);
+ if (ret < 0) {
+ if (ret != -ESTALE)
+ break;
+ if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
+ ret = -ESTALE;
+ break;
+ }
+ continue;
+ }
+ *_dir_version = inode_peek_iversion_raw(dir);
+
+ ret = afs_dir_iterate_contents(dir, ctx);
+ up_read(&dvnode->validate_lock);
+ } while (ret == -ESTALE);
-out:
- up_read(&dvnode->validate_lock);
- afs_put_read(req);
_leave(" = %d", ret);
return ret;
}
@@ -565,8 +558,7 @@ static int afs_readdir(struct file *file, struct dir_context *ctx)
{
afs_dataversion_t dir_version;
- return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
- &dir_version);
+ return afs_dir_iterate(file_inode(file), ctx, file, &dir_version);
}
/*
@@ -606,22 +598,22 @@ static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
* Do a lookup of a single name in a directory
* - just returns the FID the dentry name maps to if found
*/
-static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
- struct afs_fid *fid, struct key *key,
+static int afs_do_lookup_one(struct inode *dir, const struct qstr *name,
+ struct afs_fid *fid,
afs_dataversion_t *_dir_version)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
struct afs_lookup_one_cookie cookie = {
.ctx.actor = afs_lookup_one_filldir,
- .name = dentry->d_name,
+ .name = *name,
.fid.vid = as->volume->vid
};
int ret;
- _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
+ _enter("{%lu},{%.*s},", dir->i_ino, name->len, name->name);
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
+ ret = afs_dir_iterate(dir, &cookie.ctx, NULL, _dir_version);
if (ret < 0) {
_leave(" = %d [iter]", ret);
return ret;
@@ -656,19 +648,10 @@ static bool afs_lookup_filldir(struct dir_context *ctx, const char *name,
BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048);
BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32);
- if (cookie->found) {
- if (cookie->nr_fids < 50) {
- cookie->fids[cookie->nr_fids].vnode = ino;
- cookie->fids[cookie->nr_fids].unique = dtype;
- cookie->nr_fids++;
- }
- } else if (cookie->name.len == nlen &&
- memcmp(cookie->name.name, name, nlen) == 0) {
- cookie->fids[1].vnode = ino;
- cookie->fids[1].unique = dtype;
- cookie->found = 1;
- if (cookie->one_only)
- return false;
+ if (cookie->nr_fids < 50) {
+ cookie->fids[cookie->nr_fids].vnode = ino;
+ cookie->fids[cookie->nr_fids].unique = dtype;
+ cookie->nr_fids++;
}
return cookie->nr_fids < 50;
@@ -788,8 +771,7 @@ static bool afs_server_supports_ibulk(struct afs_vnode *dvnode)
* files in one go and create inodes for them. The inode of the file we were
* asked for is returned.
*/
-static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
- struct key *key)
+static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry)
{
struct afs_lookup_cookie *cookie;
struct afs_vnode_param *vp;
@@ -797,6 +779,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
+ bool supports_ibulk, isnew;
long ret;
int i;
@@ -813,19 +796,19 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
cookie->nr_fids = 2; /* slot 1 is saved for the fid we actually want
* and slot 0 for the directory */
- if (!afs_server_supports_ibulk(dvnode))
- cookie->one_only = true;
-
- /* search the directory */
- ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
+ /* Search the directory for the named entry using the hash table... */
+ ret = afs_dir_search(dvnode, &dentry->d_name, &cookie->fids[1], &data_version);
if (ret < 0)
goto out;
- dentry->d_fsdata = (void *)(unsigned long)data_version;
+ supports_ibulk = afs_server_supports_ibulk(dvnode);
+ if (supports_ibulk) {
+ /* ...then scan linearly from that point for entries to lookup-ahead. */
+ cookie->ctx.pos = (ret + 1) * AFS_DIR_DIRENT_SIZE;
+ afs_dir_iterate(dir, &cookie->ctx, NULL, &data_version);
+ }
- ret = -ENOENT;
- if (!cookie->found)
- goto out;
+ dentry->d_fsdata = (void *)(unsigned long)data_version;
/* Check to see if we already have an inode for the primary fid. */
inode = ilookup5(dir->i_sb, cookie->fids[1].vnode,
@@ -867,7 +850,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
* callback counters.
*/
ti = ilookup5_nowait(dir->i_sb, vp->fid.vnode,
- afs_ilookup5_test_by_fid, &vp->fid);
+ afs_ilookup5_test_by_fid, &vp->fid, &isnew);
if (!IS_ERR_OR_NULL(ti)) {
vnode = AFS_FS_I(ti);
vp->dv_before = vnode->status.data_version;
@@ -884,7 +867,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
* the whole operation.
*/
afs_op_set_error(op, -ENOTSUPP);
- if (!cookie->one_only) {
+ if (supports_ibulk) {
op->ops = &afs_inline_bulk_status_operation;
afs_begin_vnode_operation(op);
afs_wait_for_operation(op);
@@ -926,8 +909,7 @@ out:
/*
* Look up an entry in a directory with @sys substitution.
*/
-static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry,
- struct key *key)
+static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry)
{
struct afs_sysnames *subs;
struct afs_net *net = afs_i2net(dir);
@@ -961,7 +943,7 @@ static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry,
}
strcpy(p, name);
- ret = lookup_one_len(buf, dentry->d_parent, len);
+ ret = lookup_noperm(&QSTR(buf), dentry->d_parent);
if (IS_ERR(ret) || d_is_positive(ret))
goto out_s;
dput(ret);
@@ -975,7 +957,6 @@ out_s:
afs_put_sysnames(subs);
kfree(buf);
out_p:
- key_put(key);
return ret;
}
@@ -989,7 +970,6 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
struct afs_fid fid = {};
struct inode *inode;
struct dentry *d;
- struct key *key;
int ret;
_enter("{%llx:%llu},%p{%pd},",
@@ -1007,15 +987,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(-ESTALE);
}
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- _leave(" = %ld [key]", PTR_ERR(key));
- return ERR_CAST(key);
- }
-
- ret = afs_validate(dvnode, key);
+ ret = afs_validate(dvnode, NULL);
if (ret < 0) {
- key_put(key);
+ afs_dir_unuse_cookie(dvnode, ret);
_leave(" = %d [val]", ret);
return ERR_PTR(ret);
}
@@ -1025,15 +999,13 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
dentry->d_name.name[dentry->d_name.len - 3] == 's' &&
dentry->d_name.name[dentry->d_name.len - 2] == 'y' &&
dentry->d_name.name[dentry->d_name.len - 1] == 's')
- return afs_lookup_atsys(dir, dentry, key);
+ return afs_lookup_atsys(dir, dentry);
afs_stat_v(dvnode, n_lookup);
- inode = afs_do_lookup(dir, dentry, key);
- key_put(key);
+ inode = afs_do_lookup(dir, dentry);
if (inode == ERR_PTR(-ENOENT))
- inode = afs_try_auto_mntpt(dentry, dir);
-
- if (!IS_ERR_OR_NULL(inode))
+ inode = NULL;
+ else if (!IS_ERR_OR_NULL(inode))
fid = AFS_FS_I(inode)->fid;
_debug("splice %p", dentry->d_inode);
@@ -1051,21 +1023,12 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
/*
* Check the validity of a dentry under RCU conditions.
*/
-static int afs_d_revalidate_rcu(struct dentry *dentry)
+static int afs_d_revalidate_rcu(struct afs_vnode *dvnode, struct dentry *dentry)
{
- struct afs_vnode *dvnode;
- struct dentry *parent;
- struct inode *dir;
long dir_version, de_version;
_enter("%p", dentry);
- /* Check the parent directory is still valid first. */
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- dvnode = AFS_FS_I(dir);
if (test_bit(AFS_VNODE_DELETED, &dvnode->flags))
return -ECHILD;
@@ -1093,11 +1056,11 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
* - NOTE! the hit can be a negative hit too, so we can't assume we have an
* inode
*/
-static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int afs_d_revalidate(struct inode *parent_dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct afs_vnode *vnode, *dir;
+ struct afs_vnode *vnode, *dir = AFS_FS_I(parent_dir);
struct afs_fid fid;
- struct dentry *parent;
struct inode *inode;
struct key *key;
afs_dataversion_t dir_version, invalid_before;
@@ -1105,7 +1068,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
int ret;
if (flags & LOOKUP_RCU)
- return afs_d_revalidate_rcu(dentry);
+ return afs_d_revalidate_rcu(dir, dentry);
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
@@ -1120,14 +1083,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (IS_ERR(key))
key = NULL;
- /* Hold the parent dentry so we can peer at it */
- parent = dget_parent(dentry);
- dir = AFS_FS_I(d_inode(parent));
-
/* validate the parent directory */
ret = afs_validate(dir, key);
if (ret == -ERESTARTSYS) {
- dput(parent);
key_put(key);
return ret;
}
@@ -1155,7 +1113,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, key, &dir_version);
+ ret = afs_do_lookup_one(&dir->netfs.inode, name, &fid, &dir_version);
switch (ret) {
case 0:
/* the filename maps to something */
@@ -1199,22 +1157,19 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out_valid;
default:
- _debug("failed to iterate dir %pd: %d",
- parent, ret);
+ _debug("failed to iterate parent %pd2: %d", dentry, ret);
goto not_found;
}
out_valid:
dentry->d_fsdata = (void *)(unsigned long)dir_version;
out_valid_noupdate:
- dput(parent);
key_put(key);
_leave(" = 1 [valid]");
return 1;
not_found:
_debug("dropping dentry %pd2", dentry);
- dput(parent);
key_put(key);
_leave(" = 0 [bad]");
@@ -1282,6 +1237,7 @@ void afs_check_for_remote_deletion(struct afs_operation *op)
*/
static void afs_vnode_new_inode(struct afs_operation *op)
{
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode_param *vp = &op->file[1];
struct afs_vnode *vnode;
struct inode *inode;
@@ -1301,6 +1257,10 @@ static void afs_vnode_new_inode(struct afs_operation *op)
vnode = AFS_FS_I(inode);
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
+ if (S_ISDIR(inode->i_mode))
+ afs_mkdir_init_dir(vnode, dvp->vnode);
+ else if (S_ISLNK(inode->i_mode))
+ afs_init_new_symlink(vnode, op);
if (!afs_op_error(op))
afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb);
d_instantiate(op->dentry, inode);
@@ -1317,18 +1277,21 @@ static void afs_create_success(struct afs_operation *op)
static void afs_create_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode_param *vp = &op->file[1];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_add(dvnode, &op->dentry->d_name, &vp->fid,
op->create.reason);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_create_put(struct afs_operation *op)
@@ -1351,11 +1314,12 @@ static const struct afs_operation_ops afs_mkdir_operation = {
/*
* create a directory on an AFS filesystem
*/
-static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
+ int ret;
_enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
@@ -1363,9 +1327,11 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
op = afs_alloc_operation(NULL, dvnode->volume);
if (IS_ERR(op)) {
d_drop(dentry);
- return PTR_ERR(op);
+ return ERR_CAST(op);
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1375,7 +1341,9 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
op->create.reason = afs_edit_dir_for_mkdir;
op->mtime = current_time(dir);
op->ops = &afs_mkdir_operation;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ERR_PTR(ret);
}
/*
@@ -1388,8 +1356,8 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
clear_nlink(&vnode->netfs.inode);
set_bit(AFS_VNODE_DELETED, &vnode->flags);
- atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_rmdir);
+ afs_invalidate_dir(vnode, afs_dir_invalid_subdir_removed);
}
}
@@ -1403,18 +1371,21 @@ static void afs_rmdir_success(struct afs_operation *op)
static void afs_rmdir_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
afs_dir_remove_subdir(op->dentry);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_remove(dvnode, &op->dentry->d_name,
afs_edit_dir_for_rmdir);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_rmdir_put(struct afs_operation *op)
@@ -1449,6 +1420,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1472,10 +1445,18 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
op->file[1].vnode = vnode;
}
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+
+ /* Not all systems that can host afs servers have ENOTEMPTY. */
+ if (ret == -EEXIST)
+ ret = -ENOTEMPTY;
+out:
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
- return afs_put_operation(op);
+ ret = afs_put_operation(op);
+ goto out;
}
/*
@@ -1538,16 +1519,19 @@ static void afs_unlink_success(struct afs_operation *op)
static void afs_unlink_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_remove(dvnode, &op->dentry->d_name,
afs_edit_dir_for_unlink);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_unlink_put(struct afs_operation *op)
@@ -1586,6 +1570,8 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1632,10 +1618,10 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
afs_wait_for_operation(op);
}
- return afs_put_operation(op);
-
error:
- return afs_put_operation(op);
+ ret = afs_put_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
}
static const struct afs_operation_ops afs_create_operation = {
@@ -1669,6 +1655,8 @@ static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1679,7 +1667,9 @@ static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
op->create.reason = afs_edit_dir_for_create;
op->mtime = current_time(dir);
op->ops = &afs_create_operation;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
d_drop(dentry);
@@ -1744,6 +1734,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
ret = afs_validate(vnode, op->key);
if (ret < 0)
goto error_op;
@@ -1759,10 +1751,13 @@ static int afs_link(struct dentry *from, struct inode *dir,
op->dentry_2 = from;
op->ops = &afs_link_operation;
op->create.reason = afs_edit_dir_for_link;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error_op:
afs_put_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1806,6 +1801,8 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
@@ -1814,7 +1811,9 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
op->create.reason = afs_edit_dir_for_symlink;
op->create.symlink = content;
op->mtime = current_time(dir);
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
d_drop(dentry);
@@ -1824,7 +1823,8 @@ error:
static void afs_rename_success(struct afs_operation *op)
{
- struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry));
+ struct afs_vnode *vnode = op->more_files[0].vnode;
+ struct afs_vnode *new_vnode = op->more_files[1].vnode;
_enter("op=%08x", op->debug_id);
@@ -1835,26 +1835,46 @@ static void afs_rename_success(struct afs_operation *op)
op->ctime = op->file[1].scb.status.mtime_client;
afs_vnode_commit_status(op, &op->file[1]);
}
+ if (op->more_files[0].scb.have_status)
+ afs_vnode_commit_status(op, &op->more_files[0]);
+ if (op->more_files[1].scb.have_status)
+ afs_vnode_commit_status(op, &op->more_files[1]);
/* If we're moving a subdir between dirs, we need to update
* its DV counter too as the ".." will be altered.
*/
- if (S_ISDIR(vnode->netfs.inode.i_mode) &&
- op->file[0].vnode != op->file[1].vnode) {
- u64 new_dv;
+ if (op->file[0].vnode != op->file[1].vnode) {
+ if (S_ISDIR(vnode->netfs.inode.i_mode)) {
+ u64 new_dv;
- write_seqlock(&vnode->cb_lock);
+ write_seqlock(&vnode->cb_lock);
- new_dv = vnode->status.data_version + 1;
- vnode->status.data_version = new_dv;
- inode_set_iversion_raw(&vnode->netfs.inode, new_dv);
+ new_dv = vnode->status.data_version + 1;
+ trace_afs_set_dv(vnode, new_dv);
+ vnode->status.data_version = new_dv;
+ inode_set_iversion_raw(&vnode->netfs.inode, new_dv);
- write_sequnlock(&vnode->cb_lock);
+ write_sequnlock(&vnode->cb_lock);
+ }
+
+ if ((op->rename.rename_flags & RENAME_EXCHANGE) &&
+ S_ISDIR(new_vnode->netfs.inode.i_mode)) {
+ u64 new_dv;
+
+ write_seqlock(&new_vnode->cb_lock);
+
+ new_dv = new_vnode->status.data_version + 1;
+ new_vnode->status.data_version = new_dv;
+ inode_set_iversion_raw(&new_vnode->netfs.inode, new_dv);
+
+ write_sequnlock(&new_vnode->cb_lock);
+ }
}
}
static void afs_rename_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources orig_cres = {}, new_cres = {};
struct afs_vnode_param *orig_dvp = &op->file[0];
struct afs_vnode_param *new_dvp = &op->file[1];
struct afs_vnode *orig_dvnode = orig_dvp->vnode;
@@ -1871,6 +1891,10 @@ static void afs_rename_edit_dir(struct afs_operation *op)
op->rename.rehash = NULL;
}
+ fscache_begin_write_operation(&orig_cres, afs_vnode_cache(orig_dvnode));
+ if (new_dvnode != orig_dvnode)
+ fscache_begin_write_operation(&new_cres, afs_vnode_cache(new_dvnode));
+
down_write(&orig_dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta)
@@ -1895,8 +1919,8 @@ static void afs_rename_edit_dir(struct afs_operation *op)
if (S_ISDIR(vnode->netfs.inode.i_mode) &&
new_dvnode != orig_dvnode &&
test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- afs_edit_dir_update_dotdot(vnode, new_dvnode,
- afs_edit_dir_for_rename_sub);
+ afs_edit_dir_update(vnode, &dotdot_name, new_dvnode,
+ afs_edit_dir_for_rename_sub);
new_inode = d_inode(new_dentry);
if (new_inode) {
@@ -1910,9 +1934,6 @@ static void afs_rename_edit_dir(struct afs_operation *op)
/* Now we can update d_fsdata on the dentries to reflect their
* new parent's data_version.
- *
- * Note that if we ever implement RENAME_EXCHANGE, we'll have
- * to update both dentries with opposing dir versions.
*/
afs_update_dentry_version(op, new_dvp, op->dentry);
afs_update_dentry_version(op, new_dvp, op->dentry_2);
@@ -1920,6 +1941,70 @@ static void afs_rename_edit_dir(struct afs_operation *op)
d_move(old_dentry, new_dentry);
up_write(&new_dvnode->validate_lock);
+ fscache_end_operation(&orig_cres);
+ if (new_dvnode != orig_dvnode)
+ fscache_end_operation(&new_cres);
+}
+
+static void afs_rename_exchange_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ struct afs_vnode *orig_dvnode = orig_dvp->vnode;
+ struct afs_vnode *new_dvnode = new_dvp->vnode;
+ struct afs_vnode *old_vnode = op->more_files[0].vnode;
+ struct afs_vnode *new_vnode = op->more_files[1].vnode;
+ struct dentry *old_dentry = op->dentry;
+ struct dentry *new_dentry = op->dentry_2;
+
+ _enter("op=%08x", op->debug_id);
+
+ if (new_dvnode == orig_dvnode) {
+ down_write(&orig_dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
+ orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta) {
+ afs_edit_dir_update(orig_dvnode, &old_dentry->d_name,
+ new_vnode, afs_edit_dir_for_rename_0);
+ afs_edit_dir_update(orig_dvnode, &new_dentry->d_name,
+ old_vnode, afs_edit_dir_for_rename_1);
+ }
+
+ d_exchange(old_dentry, new_dentry);
+ up_write(&orig_dvnode->validate_lock);
+ } else {
+ down_write(&orig_dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
+ orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta)
+ afs_edit_dir_update(orig_dvnode, &old_dentry->d_name,
+ new_vnode, afs_edit_dir_for_rename_0);
+
+ up_write(&orig_dvnode->validate_lock);
+ down_write(&new_dvnode->validate_lock);
+
+ if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) &&
+ new_dvnode->status.data_version == new_dvp->dv_before + new_dvp->dv_delta)
+ afs_edit_dir_update(new_dvnode, &new_dentry->d_name,
+ old_vnode, afs_edit_dir_for_rename_1);
+
+ if (S_ISDIR(old_vnode->netfs.inode.i_mode) &&
+ test_bit(AFS_VNODE_DIR_VALID, &old_vnode->flags))
+ afs_edit_dir_update(old_vnode, &dotdot_name, new_dvnode,
+ afs_edit_dir_for_rename_sub);
+
+ if (S_ISDIR(new_vnode->netfs.inode.i_mode) &&
+ test_bit(AFS_VNODE_DIR_VALID, &new_vnode->flags))
+ afs_edit_dir_update(new_vnode, &dotdot_name, orig_dvnode,
+ afs_edit_dir_for_rename_sub);
+
+ /* Now we can update d_fsdata on the dentries to reflect their
+ * new parents' data_version.
+ */
+ afs_update_dentry_version(op, new_dvp, old_dentry);
+ afs_update_dentry_version(op, orig_dvp, new_dentry);
+
+ d_exchange(old_dentry, new_dentry);
+ up_write(&new_dvnode->validate_lock);
+ }
}
static void afs_rename_put(struct afs_operation *op)
@@ -1940,6 +2025,32 @@ static const struct afs_operation_ops afs_rename_operation = {
.put = afs_rename_put,
};
+#if 0 /* Autoswitched in yfs_fs_rename_replace(). */
+static const struct afs_operation_ops afs_rename_replace_operation = {
+ .issue_afs_rpc = NULL,
+ .issue_yfs_rpc = yfs_fs_rename_replace,
+ .success = afs_rename_success,
+ .edit_dir = afs_rename_edit_dir,
+ .put = afs_rename_put,
+};
+#endif
+
+static const struct afs_operation_ops afs_rename_noreplace_operation = {
+ .issue_afs_rpc = NULL,
+ .issue_yfs_rpc = yfs_fs_rename_noreplace,
+ .success = afs_rename_success,
+ .edit_dir = afs_rename_edit_dir,
+ .put = afs_rename_put,
+};
+
+static const struct afs_operation_ops afs_rename_exchange_operation = {
+ .issue_afs_rpc = NULL,
+ .issue_yfs_rpc = yfs_fs_rename_exchange,
+ .success = afs_rename_success,
+ .edit_dir = afs_rename_exchange_edit_dir,
+ .put = afs_rename_put,
+};
+
/*
* rename a file in an AFS filesystem and/or move it between directories
*/
@@ -1948,10 +2059,10 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *new_dentry, unsigned int flags)
{
struct afs_operation *op;
- struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
+ struct afs_vnode *orig_dvnode, *new_dvnode, *vnode, *new_vnode = NULL;
int ret;
- if (flags)
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
/* Don't allow silly-rename files be moved around. */
@@ -1961,6 +2072,8 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
vnode = AFS_FS_I(d_inode(old_dentry));
orig_dvnode = AFS_FS_I(old_dir);
new_dvnode = AFS_FS_I(new_dir);
+ if (d_is_positive(new_dentry))
+ new_vnode = AFS_FS_I(d_inode(new_dentry));
_enter("{%llx:%llu},{%llx:%llu},{%llx:%llu},{%pd}",
orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
@@ -1972,11 +2085,20 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(orig_dvnode), true);
+ if (new_dvnode != orig_dvnode)
+ fscache_use_cookie(afs_vnode_cache(new_dvnode), true);
+
ret = afs_validate(vnode, op->key);
afs_op_set_error(op, ret);
if (ret < 0)
goto error;
+ ret = -ENOMEM;
+ op->more_files = kvcalloc(2, sizeof(struct afs_vnode_param), GFP_KERNEL);
+ if (!op->more_files)
+ goto error;
+
afs_op_set_vnode(op, 0, orig_dvnode);
afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
op->file[0].dv_delta = 1;
@@ -1985,46 +2107,63 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
op->file[1].modification = true;
op->file[0].update_ctime = true;
op->file[1].update_ctime = true;
+ op->more_files[0].vnode = vnode;
+ op->more_files[0].speculative = true;
+ op->more_files[1].vnode = new_vnode;
+ op->more_files[1].speculative = true;
+ op->nr_files = 4;
op->dentry = old_dentry;
op->dentry_2 = new_dentry;
+ op->rename.rename_flags = flags;
op->rename.new_negative = d_is_negative(new_dentry);
- op->ops = &afs_rename_operation;
- /* For non-directories, check whether the target is busy and if so,
- * make a copy of the dentry and then do a silly-rename. If the
- * silly-rename succeeds, the copied dentry is hashed and becomes the
- * new target.
- */
- if (d_is_positive(new_dentry) && !d_is_dir(new_dentry)) {
- /* To prevent any new references to the target during the
- * rename, we unhash the dentry in advance.
+ if (flags & RENAME_NOREPLACE) {
+ op->ops = &afs_rename_noreplace_operation;
+ } else if (flags & RENAME_EXCHANGE) {
+ op->ops = &afs_rename_exchange_operation;
+ d_drop(new_dentry);
+ } else {
+ /* If we might displace the target, we might need to do silly
+ * rename.
*/
- if (!d_unhashed(new_dentry)) {
- d_drop(new_dentry);
- op->rename.rehash = new_dentry;
- }
+ op->ops = &afs_rename_operation;
- if (d_count(new_dentry) > 2) {
- /* copy the target dentry's name */
- op->rename.tmp = d_alloc(new_dentry->d_parent,
- &new_dentry->d_name);
- if (!op->rename.tmp) {
- afs_op_nomem(op);
- goto error;
+ /* For non-directories, check whether the target is busy and if
+ * so, make a copy of the dentry and then do a silly-rename.
+ * If the silly-rename succeeds, the copied dentry is hashed
+ * and becomes the new target.
+ */
+ if (d_is_positive(new_dentry) && !d_is_dir(new_dentry)) {
+ /* To prevent any new references to the target during
+ * the rename, we unhash the dentry in advance.
+ */
+ if (!d_unhashed(new_dentry)) {
+ d_drop(new_dentry);
+ op->rename.rehash = new_dentry;
}
- ret = afs_sillyrename(new_dvnode,
- AFS_FS_I(d_inode(new_dentry)),
- new_dentry, op->key);
- if (ret) {
- afs_op_set_error(op, ret);
- goto error;
+ if (d_count(new_dentry) > 2) {
+ /* copy the target dentry's name */
+ op->rename.tmp = d_alloc(new_dentry->d_parent,
+ &new_dentry->d_name);
+ if (!op->rename.tmp) {
+ afs_op_nomem(op);
+ goto error;
+ }
+
+ ret = afs_sillyrename(new_dvnode,
+ AFS_FS_I(d_inode(new_dentry)),
+ new_dentry, op->key);
+ if (ret) {
+ afs_op_set_error(op, ret);
+ goto error;
+ }
+
+ op->dentry_2 = op->rename.tmp;
+ op->rename.rehash = NULL;
+ op->rename.new_negative = true;
}
-
- op->dentry_2 = op->rename.tmp;
- op->rename.rehash = NULL;
- op->rename.new_negative = true;
}
}
@@ -2039,47 +2178,45 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
*/
d_drop(old_dentry);
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ if (ret == -ENOTSUPP)
+ ret = -EINVAL;
+out:
+ afs_dir_unuse_cookie(orig_dvnode, ret);
+ if (new_dvnode != orig_dvnode)
+ afs_dir_unuse_cookie(new_dvnode, ret);
+ return ret;
error:
- return afs_put_operation(op);
-}
-
-/*
- * Release a directory folio and clean up its private state if it's not busy
- * - return true if the folio can now be released, false if not
- */
-static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
-{
- struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
-
- _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index);
-
- folio_detach_private(folio);
-
- /* The directory will need reloading. */
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_relpg);
- return true;
+ ret = afs_put_operation(op);
+ goto out;
}
/*
- * Invalidate part or all of a folio.
+ * Write the file contents to the cache as a single blob.
*/
-static void afs_dir_invalidate_folio(struct folio *folio, size_t offset,
- size_t length)
+int afs_single_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
-
- _enter("{%lu},%zu,%zu", folio->index, offset, length);
-
- BUG_ON(!folio_test_locked(folio));
+ struct afs_vnode *dvnode = AFS_FS_I(mapping->host);
+ struct iov_iter iter;
+ bool is_dir = (S_ISDIR(dvnode->netfs.inode.i_mode) &&
+ !test_bit(AFS_VNODE_MOUNTPOINT, &dvnode->flags));
+ int ret = 0;
- /* The directory will need reloading. */
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_inval);
+ /* Need to lock to prevent the folio queue and folios from being thrown
+ * away.
+ */
+ down_read(&dvnode->validate_lock);
+
+ if (is_dir ?
+ test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) :
+ atomic64_read(&dvnode->cb_expires_at) != AFS_NO_CB_PROMISE) {
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0,
+ i_size_read(&dvnode->netfs.inode));
+ ret = netfs_writeback_single(mapping, wbc, &iter);
+ }
- /* we clean up only if the entire folio is being invalidated */
- if (offset == 0 && length == folio_size(folio))
- folio_detach_private(folio);
+ up_read(&dvnode->validate_lock);
+ return ret;
}
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index fe223fb78111..fd3aa9f97ce6 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -10,6 +10,7 @@
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/iversion.h>
+#include <linux/folio_queue.h>
#include "internal.h"
#include "xdr_fs.h"
@@ -105,23 +106,57 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
}
/*
- * Get a new directory folio.
+ * Get a specific block, extending the directory storage to cover it as needed.
*/
-static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
+static union afs_xdr_dir_block *afs_dir_get_block(struct afs_dir_iter *iter, size_t block)
{
- struct address_space *mapping = vnode->netfs.inode.i_mapping;
+ struct folio_queue *fq;
+ struct afs_vnode *dvnode = iter->dvnode;
struct folio *folio;
+ size_t blpos = block * AFS_DIR_BLOCK_SIZE;
+ size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos;
+ int ret;
+
+ if (dvnode->directory_size < blend) {
+ size_t cur_size = dvnode->directory_size;
+
+ ret = netfs_alloc_folioq_buffer(
+ NULL, &dvnode->directory, &cur_size, blend,
+ mapping_gfp_mask(dvnode->netfs.inode.i_mapping));
+ dvnode->directory_size = cur_size;
+ if (ret < 0)
+ goto fail;
+ }
- folio = __filemap_get_folio(mapping, index,
- FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
- mapping->gfp_mask);
- if (IS_ERR(folio)) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- return NULL;
+ fq = iter->fq;
+ if (!fq)
+ fq = dvnode->directory;
+
+ /* Search the folio queue for the folio containing the block... */
+ for (; fq; fq = fq->next) {
+ for (int s = iter->fq_slot; s < folioq_count(fq); s++) {
+ size_t fsize = folioq_folio_size(fq, s);
+
+ if (blend <= fpos + fsize) {
+ /* ... and then return the mapped block. */
+ folio = folioq_folio(fq, s);
+ if (WARN_ON_ONCE(folio_pos(folio) != fpos))
+ goto fail;
+ iter->fq = fq;
+ iter->fq_slot = s;
+ iter->fpos = fpos;
+ return kmap_local_folio(folio, blpos - fpos);
+ }
+ fpos += fsize;
+ }
+ iter->fq_slot = 0;
}
- if (!folio_test_private(folio))
- folio_attach_private(folio, (void *)1);
- return folio;
+
+fail:
+ iter->fq = NULL;
+ iter->fq_slot = 0;
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_get_block);
+ return NULL;
}
/*
@@ -204,14 +239,13 @@ static void afs_edit_init_block(union afs_xdr_dir_block *meta,
* The caller must hold the inode locked.
*/
void afs_edit_dir_add(struct afs_vnode *vnode,
- struct qstr *name, struct afs_fid *new_fid,
+ const struct qstr *name, struct afs_fid *new_fid,
enum afs_edit_dir_reason why)
{
union afs_xdr_dir_block *meta, *block;
union afs_xdr_dirent *de;
- struct folio *folio0, *folio;
- unsigned int need_slots, nr_blocks, b;
- pgoff_t index;
+ struct afs_dir_iter iter = { .dvnode = vnode };
+ unsigned int nr_blocks, b, entry;
loff_t i_size;
int slot;
@@ -220,20 +254,17 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
i_size = i_size_read(&vnode->netfs.inode);
if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_bad_size);
return;
}
- folio0 = afs_dir_get_folio(vnode, 0);
- if (!folio0) {
- _leave(" [fgp]");
+ meta = afs_dir_get_block(&iter, 0);
+ if (!meta)
return;
- }
/* Work out how many slots we're going to need. */
- need_slots = afs_dir_calc_slots(name->len);
+ iter.nr_slots = afs_dir_calc_slots(name->len);
- meta = kmap_local_folio(folio0, 0);
if (i_size == 0)
goto new_directory;
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
@@ -245,22 +276,21 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
/* If the directory extended into a new folio, then we need to
* tack a new folio on the end.
*/
- index = b / AFS_DIR_BLOCKS_PER_PAGE;
if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
- goto error;
- if (index >= folio_nr_pages(folio0)) {
- folio = afs_dir_get_folio(vnode, index);
- if (!folio)
- goto error;
- } else {
- folio = folio0;
- }
+ goto error_too_many_blocks;
- block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
+ /* Lower dir blocks have a counter in the header we can check. */
+ if (b < AFS_DIR_BLOCKS_WITH_CTR &&
+ meta->meta.alloc_ctrs[b] < iter.nr_slots)
+ continue;
+
+ block = afs_dir_get_block(&iter, b);
+ if (!block)
+ goto error;
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- goto invalidated;
+ goto already_invalidated;
_debug("block %u: %2u %3u %u",
b,
@@ -275,31 +305,23 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
- /* Only lower dir blocks have a counter in the header. */
- if (b >= AFS_DIR_BLOCKS_WITH_CTR ||
- meta->meta.alloc_ctrs[b] >= need_slots) {
- /* We need to try and find one or more consecutive
- * slots to hold the entry.
- */
- slot = afs_find_contig_bits(block, need_slots);
- if (slot >= 0) {
- _debug("slot %u", slot);
- goto found_space;
- }
+ /* We need to try and find one or more consecutive slots to
+ * hold the entry.
+ */
+ slot = afs_find_contig_bits(block, iter.nr_slots);
+ if (slot >= 0) {
+ _debug("slot %u", slot);
+ goto found_space;
}
kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
}
/* There are no spare slots of sufficient size, yet the operation
* succeeded. Download the directory again.
*/
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_nospc, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_no_slots);
goto out_unmap;
new_directory:
@@ -307,8 +329,7 @@ new_directory:
i_size = AFS_DIR_BLOCK_SIZE;
afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
- folio = folio0;
- block = kmap_local_folio(folio, 0);
+ block = afs_dir_get_block(&iter, 0);
nr_blocks = 1;
b = 0;
@@ -326,41 +347,39 @@ found_space:
de->u.name[name->len] = 0;
/* Adjust the bitmap. */
- afs_set_contig_bits(block, slot, need_slots);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ afs_set_contig_bits(block, slot, iter.nr_slots);
/* Adjust the allocation counter. */
if (b < AFS_DIR_BLOCKS_WITH_CTR)
- meta->meta.alloc_ctrs[b] -= need_slots;
+ meta->meta.alloc_ctrs[b] -= iter.nr_slots;
+
+ /* Adjust the hash chain. */
+ entry = b * AFS_DIR_SLOTS_PER_BLOCK + slot;
+ iter.bucket = afs_dir_hash_name(name);
+ de->u.hash_next = meta->meta.hashtable[iter.bucket];
+ meta->meta.hashtable[iter.bucket] = htons(entry);
+ kunmap_local(block);
inode_inc_iversion_raw(&vnode->netfs.inode);
afs_stat_v(vnode, n_dir_cr);
_debug("Insert %s in %u[%u]", name->name, b, slot);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
+
out_unmap:
kunmap_local(meta);
- folio_unlock(folio0);
- folio_put(folio0);
_leave("");
return;
-invalidated:
+already_invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
goto out_unmap;
+error_too_many_blocks:
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_too_many_blocks);
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_error, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
}
@@ -372,15 +391,16 @@ error:
* The caller must hold the inode locked.
*/
void afs_edit_dir_remove(struct afs_vnode *vnode,
- struct qstr *name, enum afs_edit_dir_reason why)
+ const struct qstr *name, enum afs_edit_dir_reason why)
{
- union afs_xdr_dir_block *meta, *block;
- union afs_xdr_dirent *de;
- struct folio *folio0, *folio;
- unsigned int need_slots, nr_blocks, b;
- pgoff_t index;
+ union afs_xdr_dir_block *meta, *block, *pblock;
+ union afs_xdr_dirent *de, *pde;
+ struct afs_dir_iter iter = { .dvnode = vnode };
+ struct afs_fid fid;
+ unsigned int b, slot, entry;
loff_t i_size;
- int slot;
+ __be16 next;
+ int found;
_enter(",,{%d,%s},", name->len, name->name);
@@ -388,81 +408,95 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
if (i_size < AFS_DIR_BLOCK_SIZE ||
i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_rem_bad_size);
return;
}
- nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
- folio0 = afs_dir_get_folio(vnode, 0);
- if (!folio0) {
- _leave(" [fgp]");
+ if (!afs_dir_init_iter(&iter, name))
return;
- }
-
- /* Work out how many slots we're going to discard. */
- need_slots = afs_dir_calc_slots(name->len);
-
- meta = kmap_local_folio(folio0, 0);
-
- /* Find a block that has sufficient slots available. Each folio
- * contains two or more directory blocks.
- */
- for (b = 0; b < nr_blocks; b++) {
- index = b / AFS_DIR_BLOCKS_PER_PAGE;
- if (index >= folio_nr_pages(folio0)) {
- folio = afs_dir_get_folio(vnode, index);
- if (!folio)
- goto error;
- } else {
- folio = folio0;
- }
-
- block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
- /* Abandon the edit if we got a callback break. */
- if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- goto invalidated;
-
- if (b > AFS_DIR_BLOCKS_WITH_CTR ||
- meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) {
- slot = afs_dir_scan_block(block, name, b);
- if (slot >= 0)
- goto found_dirent;
- }
+ meta = afs_dir_find_block(&iter, 0);
+ if (!meta)
+ return;
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ /* Find the entry in the blob. */
+ found = afs_dir_search_bucket(&iter, name, &fid);
+ if (found < 0) {
+ /* Didn't find the dirent to clobber. Re-download. */
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent,
+ 0, 0, 0, 0, name->name);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_rem_wrong_name);
+ goto out_unmap;
}
- /* Didn't find the dirent to clobber. Download the directory again. */
- trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent,
- 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- goto out_unmap;
+ entry = found;
+ b = entry / AFS_DIR_SLOTS_PER_BLOCK;
+ slot = entry % AFS_DIR_SLOTS_PER_BLOCK;
-found_dirent:
+ block = afs_dir_find_block(&iter, b);
+ if (!block)
+ goto error;
+ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ goto already_invalidated;
+
+ /* Check and clear the entry. */
de = &block->dirents[slot];
+ if (de->u.valid != 1)
+ goto error_unmap;
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete, b, slot,
ntohl(de->u.vnode), ntohl(de->u.unique),
name->name);
- memset(de, 0, sizeof(*de) * need_slots);
-
/* Adjust the bitmap. */
- afs_clear_contig_bits(block, slot, need_slots);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ afs_clear_contig_bits(block, slot, iter.nr_slots);
/* Adjust the allocation counter. */
if (b < AFS_DIR_BLOCKS_WITH_CTR)
- meta->meta.alloc_ctrs[b] += need_slots;
+ meta->meta.alloc_ctrs[b] += iter.nr_slots;
+
+ /* Clear the constituent entries. */
+ next = de->u.hash_next;
+ memset(de, 0, sizeof(*de) * iter.nr_slots);
+ kunmap_local(block);
+
+ /* Adjust the hash chain: if iter->prev_entry is 0, the hashtable head
+ * index is previous; otherwise it's slot number of the previous entry.
+ */
+ if (!iter.prev_entry) {
+ __be16 prev_next = meta->meta.hashtable[iter.bucket];
+
+ if (unlikely(prev_next != htons(entry))) {
+ pr_warn("%llx:%llx:%x: not head of chain b=%x p=%x,%x e=%x %*s",
+ vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
+ iter.bucket, iter.prev_entry, prev_next, entry,
+ name->len, name->name);
+ goto error;
+ }
+ meta->meta.hashtable[iter.bucket] = next;
+ } else {
+ unsigned int pb = iter.prev_entry / AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int ps = iter.prev_entry % AFS_DIR_SLOTS_PER_BLOCK;
+ __be16 prev_next;
+
+ pblock = afs_dir_find_block(&iter, pb);
+ if (!pblock)
+ goto error;
+ pde = &pblock->dirents[ps];
+ prev_next = pde->u.hash_next;
+ if (prev_next != htons(entry)) {
+ kunmap_local(pblock);
+ pr_warn("%llx:%llx:%x: not prev in chain b=%x p=%x,%x e=%x %*s",
+ vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
+ iter.bucket, iter.prev_entry, prev_next, entry,
+ name->len, name->name);
+ goto error;
+ }
+ pde->u.hash_next = next;
+ kunmap_local(pblock);
+ }
+
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
afs_stat_v(vnode, n_dir_rm);
@@ -470,41 +504,34 @@ found_dirent:
out_unmap:
kunmap_local(meta);
- folio_unlock(folio0);
- folio_put(folio0);
_leave("");
return;
-invalidated:
+already_invalidated:
+ kunmap_local(block);
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval,
0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
goto out_unmap;
+error_unmap:
+ kunmap_local(block);
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_error,
0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
}
/*
- * Edit a subdirectory that has been moved between directories to update the
- * ".." entry.
+ * Edit an entry in a directory to update the vnode it refers to. This is also
+ * used to update the ".." entry in a directory.
*/
-void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_dvnode,
- enum afs_edit_dir_reason why)
+void afs_edit_dir_update(struct afs_vnode *vnode, const struct qstr *name,
+ struct afs_vnode *new_dvnode, enum afs_edit_dir_reason why)
{
union afs_xdr_dir_block *block;
union afs_xdr_dirent *de;
- struct folio *folio;
+ struct afs_dir_iter iter = { .dvnode = vnode };
unsigned int nr_blocks, b;
- pgoff_t index;
loff_t i_size;
int slot;
@@ -512,39 +539,35 @@ void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_d
i_size = i_size_read(&vnode->netfs.inode);
if (i_size < AFS_DIR_BLOCK_SIZE) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_upd_bad_size);
return;
}
+
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
/* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks; b++) {
- index = b / AFS_DIR_BLOCKS_PER_PAGE;
- folio = afs_dir_get_folio(vnode, index);
- if (!folio)
+ block = afs_dir_get_block(&iter, b);
+ if (!block)
goto error;
- block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
-
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- goto invalidated;
+ goto already_invalidated;
- slot = afs_dir_scan_block(block, &dotdot_name, b);
+ slot = afs_dir_scan_block(block, name, b);
if (slot >= 0)
goto found_dirent;
kunmap_local(block);
- folio_unlock(folio);
- folio_put(folio);
}
/* Didn't find the dirent to clobber. Download the directory again. */
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_nodd,
- 0, 0, 0, 0, "..");
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ 0, 0, 0, 0, name->name);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_upd_no_dd);
goto out;
found_dirent:
@@ -553,29 +576,73 @@ found_dirent:
de->u.unique = htonl(new_dvnode->fid.unique);
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_dd, b, slot,
- ntohl(de->u.vnode), ntohl(de->u.unique), "..");
+ ntohl(de->u.vnode), ntohl(de->u.unique), name->name);
kunmap_local(block);
- folio_unlock(folio);
- folio_put(folio);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
out:
_leave("");
return;
-invalidated:
+already_invalidated:
kunmap_local(block);
- folio_unlock(folio);
- folio_put(folio);
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_inval,
- 0, 0, 0, 0, "..");
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ 0, 0, 0, 0, name->name);
goto out;
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_error,
- 0, 0, 0, 0, "..");
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ 0, 0, 0, 0, name->name);
goto out;
}
+
+/*
+ * Initialise a new directory. We need to fill in the "." and ".." entries.
+ */
+void afs_mkdir_init_dir(struct afs_vnode *dvnode, struct afs_vnode *parent_dvnode)
+{
+ union afs_xdr_dir_block *meta;
+ struct afs_dir_iter iter = { .dvnode = dvnode };
+ union afs_xdr_dirent *de;
+ unsigned int slot = AFS_DIR_RESV_BLOCKS0;
+ loff_t i_size;
+
+ i_size = i_size_read(&dvnode->netfs.inode);
+ if (i_size != AFS_DIR_BLOCK_SIZE) {
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_add_bad_size);
+ return;
+ }
+
+ meta = afs_dir_get_block(&iter, 0);
+ if (!meta)
+ return;
+
+ afs_edit_init_block(meta, meta, 0);
+
+ de = &meta->dirents[slot];
+ de->u.valid = 1;
+ de->u.vnode = htonl(dvnode->fid.vnode);
+ de->u.unique = htonl(dvnode->fid.unique);
+ memcpy(de->u.name, ".", 2);
+ trace_afs_edit_dir(dvnode, afs_edit_dir_for_mkdir, afs_edit_dir_mkdir, 0, slot,
+ dvnode->fid.vnode, dvnode->fid.unique, ".");
+ slot++;
+
+ de = &meta->dirents[slot];
+ de->u.valid = 1;
+ de->u.vnode = htonl(parent_dvnode->fid.vnode);
+ de->u.unique = htonl(parent_dvnode->fid.unique);
+ memcpy(de->u.name, "..", 3);
+ trace_afs_edit_dir(dvnode, afs_edit_dir_for_mkdir, afs_edit_dir_mkdir, 0, slot,
+ parent_dvnode->fid.vnode, parent_dvnode->fid.unique, "..");
+
+ afs_set_contig_bits(meta, AFS_DIR_RESV_BLOCKS0, 2);
+ meta->meta.alloc_ctrs[0] -= 2;
+ kunmap_local(meta);
+
+ netfs_single_mark_inode_dirty(&dvnode->netfs.inode);
+ set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
+ set_bit(AFS_VNODE_DIR_READ, &dvnode->flags);
+}
diff --git a/fs/afs/dir_search.c b/fs/afs/dir_search.c
new file mode 100644
index 000000000000..d2516e55b5ed
--- /dev/null
+++ b/fs/afs/dir_search.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Search a directory's hash table.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * https://tools.ietf.org/html/draft-keiser-afs3-directory-object-00
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/iversion.h>
+#include "internal.h"
+#include "afs_fs.h"
+#include "xdr_fs.h"
+
+/*
+ * Calculate the name hash.
+ */
+unsigned int afs_dir_hash_name(const struct qstr *name)
+{
+ const unsigned char *p = name->name;
+ unsigned int hash = 0, i;
+ int bucket;
+
+ for (i = 0; i < name->len; i++)
+ hash = (hash * 173) + p[i];
+ bucket = hash & (AFS_DIR_HASHTBL_SIZE - 1);
+ if (hash > INT_MAX) {
+ bucket = AFS_DIR_HASHTBL_SIZE - bucket;
+ bucket &= (AFS_DIR_HASHTBL_SIZE - 1);
+ }
+ return bucket;
+}
+
+/*
+ * Reset a directory iterator.
+ */
+static bool afs_dir_reset_iter(struct afs_dir_iter *iter)
+{
+ unsigned long long i_size = i_size_read(&iter->dvnode->netfs.inode);
+ unsigned int nblocks;
+
+ /* Work out the maximum number of steps we can take. */
+ nblocks = umin(i_size / AFS_DIR_BLOCK_SIZE, AFS_DIR_MAX_BLOCKS);
+ if (!nblocks)
+ return false;
+ iter->loop_check = nblocks * (AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS);
+ iter->prev_entry = 0; /* Hash head is previous */
+ return true;
+}
+
+/*
+ * Initialise a directory iterator for looking up a name.
+ */
+bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name)
+{
+ iter->nr_slots = afs_dir_calc_slots(name->len);
+ iter->bucket = afs_dir_hash_name(name);
+ return afs_dir_reset_iter(iter);
+}
+
+/*
+ * Get a specific block.
+ */
+union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block)
+{
+ struct folio_queue *fq = iter->fq;
+ struct afs_vnode *dvnode = iter->dvnode;
+ struct folio *folio;
+ size_t blpos = block * AFS_DIR_BLOCK_SIZE;
+ size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos;
+ int slot = iter->fq_slot;
+
+ _enter("%zx,%d", block, slot);
+
+ if (iter->block) {
+ kunmap_local(iter->block);
+ iter->block = NULL;
+ }
+
+ if (dvnode->directory_size < blend)
+ goto fail;
+
+ if (!fq || blpos < fpos) {
+ fq = dvnode->directory;
+ slot = 0;
+ fpos = 0;
+ }
+
+ /* Search the folio queue for the folio containing the block... */
+ for (; fq; fq = fq->next) {
+ for (; slot < folioq_count(fq); slot++) {
+ size_t fsize = folioq_folio_size(fq, slot);
+
+ if (blend <= fpos + fsize) {
+ /* ... and then return the mapped block. */
+ folio = folioq_folio(fq, slot);
+ if (WARN_ON_ONCE(folio_pos(folio) != fpos))
+ goto fail;
+ iter->fq = fq;
+ iter->fq_slot = slot;
+ iter->fpos = fpos;
+ iter->block = kmap_local_folio(folio, blpos - fpos);
+ return iter->block;
+ }
+ fpos += fsize;
+ }
+ slot = 0;
+ }
+
+fail:
+ iter->fq = NULL;
+ iter->fq_slot = 0;
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_get_block);
+ return NULL;
+}
+
+/*
+ * Search through a directory bucket.
+ */
+int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name,
+ struct afs_fid *_fid)
+{
+ const union afs_xdr_dir_block *meta;
+ unsigned int entry;
+ int ret = -ESTALE;
+
+ meta = afs_dir_find_block(iter, 0);
+ if (!meta)
+ return -ESTALE;
+
+ entry = ntohs(meta->meta.hashtable[iter->bucket & (AFS_DIR_HASHTBL_SIZE - 1)]);
+ _enter("%x,%x", iter->bucket, entry);
+
+ while (entry) {
+ const union afs_xdr_dir_block *block;
+ const union afs_xdr_dirent *dire;
+ unsigned int blnum = entry / AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int slot = entry % AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int resv = (blnum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
+
+ _debug("search %x", entry);
+
+ if (slot < resv) {
+ kdebug("slot out of range h=%x rs=%2x sl=%2x-%2x",
+ iter->bucket, resv, slot, slot + iter->nr_slots - 1);
+ goto bad;
+ }
+
+ block = afs_dir_find_block(iter, blnum);
+ if (!block)
+ goto bad;
+ dire = &block->dirents[slot];
+
+ if (slot + iter->nr_slots <= AFS_DIR_SLOTS_PER_BLOCK &&
+ memcmp(dire->u.name, name->name, name->len) == 0 &&
+ dire->u.name[name->len] == '\0') {
+ _fid->vnode = ntohl(dire->u.vnode);
+ _fid->unique = ntohl(dire->u.unique);
+ ret = entry;
+ goto found;
+ }
+
+ iter->prev_entry = entry;
+ entry = ntohs(dire->u.hash_next);
+ if (!--iter->loop_check) {
+ kdebug("dir chain loop h=%x", iter->bucket);
+ goto bad;
+ }
+ }
+
+ ret = -ENOENT;
+found:
+ if (iter->block) {
+ kunmap_local(iter->block);
+ iter->block = NULL;
+ }
+
+bad:
+ if (ret == -ESTALE)
+ afs_invalidate_dir(iter->dvnode, afs_dir_invalid_iter_stale);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Search the appropriate hash chain in the contents of an AFS directory.
+ */
+int afs_dir_search(struct afs_vnode *dvnode, const struct qstr *name,
+ struct afs_fid *_fid, afs_dataversion_t *_dir_version)
+{
+ struct afs_dir_iter iter = { .dvnode = dvnode, };
+ int ret, retry_limit = 3;
+
+ _enter("{%lu},,,", dvnode->netfs.inode.i_ino);
+
+ if (!afs_dir_init_iter(&iter, name))
+ return -ENOENT;
+ do {
+ if (--retry_limit < 0) {
+ pr_warn("afs_read_dir(): Too many retries\n");
+ ret = -ESTALE;
+ break;
+ }
+ ret = afs_read_dir(dvnode, NULL);
+ if (ret < 0) {
+ if (ret != -ESTALE)
+ break;
+ if (test_bit(AFS_VNODE_DELETED, &dvnode->flags)) {
+ ret = -ESTALE;
+ break;
+ }
+ continue;
+ }
+ *_dir_version = inode_peek_iversion_raw(&dvnode->netfs.inode);
+
+ ret = afs_dir_search_bucket(&iter, name, _fid);
+ up_read(&dvnode->validate_lock);
+ if (ret == -ESTALE)
+ afs_dir_reset_iter(&iter);
+ } while (ret == -ESTALE);
+
+ _leave(" = %d", ret);
+ return ret;
+}
diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
index a1e581946b93..014495d4b868 100644
--- a/fs/afs/dir_silly.c
+++ b/fs/afs/dir_silly.c
@@ -69,6 +69,12 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
if (IS_ERR(op))
return PTR_ERR(op);
+ op->more_files = kvcalloc(2, sizeof(struct afs_vnode_param), GFP_KERNEL);
+ if (!op->more_files) {
+ afs_put_operation(op);
+ return -ENOMEM;
+ }
+
afs_op_set_vnode(op, 0, dvnode);
afs_op_set_vnode(op, 1, dvnode);
op->file[0].dv_delta = 1;
@@ -77,6 +83,11 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
op->file[1].modification = true;
op->file[0].update_ctime = true;
op->file[1].update_ctime = true;
+ op->more_files[0].vnode = AFS_FS_I(d_inode(old));
+ op->more_files[0].speculative = true;
+ op->more_files[1].vnode = AFS_FS_I(d_inode(new));
+ op->more_files[1].speculative = true;
+ op->nr_files = 4;
op->dentry = old;
op->dentry_2 = new;
@@ -113,16 +124,14 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
sdentry = NULL;
do {
- int slen;
-
dput(sdentry);
sillycounter++;
/* Create a silly name. Note that the ".__afs" prefix is
* understood by the salvager and must not be changed.
*/
- slen = scnprintf(silly, sizeof(silly), ".__afs%04X", sillycounter);
- sdentry = lookup_one_len(silly, dentry->d_parent, slen);
+ scnprintf(silly, sizeof(silly), ".__afs%04X", sillycounter);
+ sdentry = lookup_noperm(&QSTR(silly), dentry->d_parent);
/* N.B. Better to return EBUSY here ... it could be dangerous
* to delete the file while it's in use.
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index c4d2711e20ad..aa56e8951e03 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -10,16 +10,19 @@
#include <linux/dns_resolver.h>
#include "internal.h"
-static atomic_t afs_autocell_ino;
+#define AFS_MIN_DYNROOT_CELL_INO 4 /* Allow for ., .., @cell, .@cell */
+#define AFS_MAX_DYNROOT_CELL_INO ((unsigned int)INT_MAX)
+
+static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry, ino_t ino);
/*
* iget5() comparator for inode created by autocell operations
- *
- * These pseudo inodes don't match anything.
*/
static int afs_iget5_pseudo_test(struct inode *inode, void *opaque)
{
- return 0;
+ struct afs_fid *fid = opaque;
+
+ return inode->i_ino == fid->vnode;
}
/*
@@ -39,28 +42,16 @@ static int afs_iget5_pseudo_set(struct inode *inode, void *opaque)
}
/*
- * Create an inode for a dynamic root directory or an autocell dynamic
- * automount dir.
+ * Create an inode for an autocell dynamic automount dir.
*/
-struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
+static struct inode *afs_iget_pseudo_dir(struct super_block *sb, ino_t ino)
{
- struct afs_super_info *as = AFS_FS_S(sb);
struct afs_vnode *vnode;
struct inode *inode;
- struct afs_fid fid = {};
+ struct afs_fid fid = { .vnode = ino, .unique = 1, };
_enter("");
- if (as->volume)
- fid.vid = as->volume->vid;
- if (root) {
- fid.vnode = 1;
- fid.unique = 1;
- } else {
- fid.vnode = atomic_inc_return(&afs_autocell_ino);
- fid.unique = 0;
- }
-
inode = iget5_locked(sb, fid.vnode,
afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
if (!inode) {
@@ -73,163 +64,76 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
vnode = AFS_FS_I(inode);
- /* there shouldn't be an existing inode */
- BUG_ON(!(inode->i_state & I_NEW));
-
- netfs_inode_init(&vnode->netfs, NULL, false);
- inode->i_size = 0;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
- if (root) {
- inode->i_op = &afs_dynroot_inode_operations;
- inode->i_fop = &simple_dir_operations;
- } else {
- inode->i_op = &afs_autocell_inode_operations;
- }
- set_nlink(inode, 2);
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- simple_inode_init_ts(inode);
- inode->i_blocks = 0;
- inode->i_generation = 0;
-
- set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
- if (!root) {
+ if (inode_state_read_once(inode) & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 2);
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_autocell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_AUTOMOUNT | S_NOATIME;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- inode->i_flags |= S_AUTOMOUNT;
- }
- inode->i_flags |= S_NOATIME;
- unlock_new_inode(inode);
+ unlock_new_inode(inode);
+ }
_leave(" = %p", inode);
return inode;
}
/*
- * Probe to see if a cell may exist. This prevents positive dentries from
- * being created unnecessarily.
+ * Try to automount the mountpoint with pseudo directory, if the autocell
+ * option is set.
*/
-static int afs_probe_cell_name(struct dentry *dentry)
+static struct dentry *afs_dynroot_lookup_cell(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
- struct afs_cell *cell;
+ struct afs_cell *cell = NULL;
struct afs_net *net = afs_d2net(dentry);
+ struct inode *inode = NULL;
const char *name = dentry->d_name.name;
size_t len = dentry->d_name.len;
- char *result = NULL;
- int ret;
+ bool dotted = false;
+ int ret = -ENOENT;
/* Names prefixed with a dot are R/W mounts. */
if (name[0] == '.') {
- if (len == 1)
- return -EINVAL;
name++;
len--;
+ dotted = true;
}
- cell = afs_find_cell(net, name, len, afs_cell_trace_use_probe);
- if (!IS_ERR(cell)) {
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_probe);
- return 0;
- }
-
- ret = dns_query(net->net, "afsdb", name, len, "srv=1",
- &result, NULL, false);
- if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
- ret = -ENOENT;
- if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
- struct dns_server_list_v1_header *v1 = (void *)result;
-
- if (v1->hdr.zero == 0 &&
- v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
- v1->hdr.version == 1 &&
- (v1->status != DNS_LOOKUP_GOOD &&
- v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
- return -ENOENT;
-
+ cell = afs_lookup_cell(net, name, len, NULL,
+ AFS_LOOKUP_CELL_DYNROOT,
+ afs_cell_trace_use_lookup_dynroot);
+ if (IS_ERR(cell)) {
+ ret = PTR_ERR(cell);
+ goto out_no_cell;
}
- kfree(result);
- return ret;
-}
-
-/*
- * Try to auto mount the mountpoint with pseudo directory, if the autocell
- * operation is setted.
- */
-struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
-{
- struct afs_vnode *vnode = AFS_FS_I(dir);
- struct inode *inode;
- int ret = -ENOENT;
-
- _enter("%p{%pd}, {%llx:%llu}",
- dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
-
- if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
- goto out;
-
- ret = afs_probe_cell_name(dentry);
- if (ret < 0)
- goto out;
-
- inode = afs_iget_pseudo_dir(dir->i_sb, false);
+ inode = afs_iget_pseudo_dir(dir->i_sb, cell->dynroot_ino * 2 + dotted);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out;
}
- _leave("= %p", inode);
- return inode;
+ dentry->d_fsdata = cell;
+ return d_splice_alias(inode, dentry);
out:
- _leave("= %d", ret);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_dynroot);
+out_no_cell:
+ if (!inode)
+ return d_splice_alias(inode, dentry);
return ret == -ENOENT ? NULL : ERR_PTR(ret);
}
/*
- * Look up @cell in a dynroot directory. This is a substitution for the
- * local cell name for the net namespace.
- */
-static struct dentry *afs_lookup_atcell(struct dentry *dentry)
-{
- struct afs_cell *cell;
- struct afs_net *net = afs_d2net(dentry);
- struct dentry *ret;
- char *name;
- int len;
-
- if (!net->ws_cell)
- return ERR_PTR(-ENOENT);
-
- ret = ERR_PTR(-ENOMEM);
- name = kmalloc(AFS_MAXCELLNAME + 1, GFP_KERNEL);
- if (!name)
- goto out_p;
-
- down_read(&net->cells_lock);
- cell = net->ws_cell;
- if (cell) {
- len = cell->name_len;
- memcpy(name, cell->name, len + 1);
- }
- up_read(&net->cells_lock);
-
- ret = ERR_PTR(-ENOENT);
- if (!cell)
- goto out_n;
-
- ret = lookup_one_len(name, dentry->d_parent, len);
-
- /* We don't want to d_add() the @cell dentry here as we don't want to
- * the cached dentry to hide changes to the local cell name.
- */
-
-out_n:
- kfree(name);
-out_p:
- return ret;
-}
-
-/*
* Look up an entry in a dynroot directory.
*/
static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
@@ -237,8 +141,6 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
{
_enter("%pd", dentry);
- ASSERTCMP(d_inode(dentry), ==, NULL);
-
if (flags & LOOKUP_CREATE)
return ERR_PTR(-EOPNOTSUPP);
@@ -249,141 +151,256 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
if (dentry->d_name.len == 5 &&
memcmp(dentry->d_name.name, "@cell", 5) == 0)
- return afs_lookup_atcell(dentry);
+ return afs_lookup_atcell(dir, dentry, 2);
+
+ if (dentry->d_name.len == 6 &&
+ memcmp(dentry->d_name.name, ".@cell", 6) == 0)
+ return afs_lookup_atcell(dir, dentry, 3);
- return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry);
+ return afs_dynroot_lookup_cell(dir, dentry, flags);
}
const struct inode_operations afs_dynroot_inode_operations = {
.lookup = afs_dynroot_lookup,
};
+static void afs_dynroot_d_release(struct dentry *dentry)
+{
+ struct afs_cell *cell = dentry->d_fsdata;
+
+ afs_unuse_cell(cell, afs_cell_trace_unuse_dynroot_mntpt);
+}
+
+/*
+ * Keep @cell symlink dentries around, but only keep cell autodirs when they're
+ * being used.
+ */
+static int afs_dynroot_delete_dentry(const struct dentry *dentry)
+{
+ const struct qstr *name = &dentry->d_name;
+
+ if (name->len == 5 && memcmp(name->name, "@cell", 5) == 0)
+ return 0;
+ if (name->len == 6 && memcmp(name->name, ".@cell", 6) == 0)
+ return 0;
+ return 1;
+}
+
const struct dentry_operations afs_dynroot_dentry_operations = {
- .d_delete = always_delete_dentry,
- .d_release = afs_d_release,
+ .d_delete = afs_dynroot_delete_dentry,
+ .d_release = afs_dynroot_d_release,
.d_automount = afs_d_automount,
};
+static void afs_atcell_delayed_put_cell(void *arg)
+{
+ struct afs_cell *cell = arg;
+
+ afs_put_cell(cell, afs_cell_trace_put_atcell);
+}
+
/*
- * Create a manually added cell mount directory.
- * - The caller must hold net->proc_cells_lock
+ * Read @cell or .@cell symlinks.
*/
-int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell)
+static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
{
- struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *subdir;
- int ret;
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_cell *cell;
+ struct afs_net *net = afs_i2net(inode);
+ const char *name;
+ bool dotted = vnode->fid.vnode == 3;
- if (!sb || atomic_read(&sb->s_active) == 0)
- return 0;
+ if (!rcu_access_pointer(net->ws_cell))
+ return ERR_PTR(-ENOENT);
- /* Let the ->lookup op do the creation */
- root = sb->s_root;
- inode_lock(root->d_inode);
- subdir = lookup_one_len(cell->name, root, cell->name_len);
- if (IS_ERR(subdir)) {
- ret = PTR_ERR(subdir);
- goto unlock;
+ if (!dentry) {
+ /* We're in RCU-pathwalk. */
+ cell = rcu_dereference(net->ws_cell);
+ if (dotted)
+ name = cell->name - 1;
+ else
+ name = cell->name;
+ /* Shouldn't need to set a delayed call. */
+ return name;
}
- /* Note that we're retaining an extra ref on the dentry */
- subdir->d_fsdata = (void *)1UL;
- ret = 0;
-unlock:
- inode_unlock(root->d_inode);
- return ret;
+ down_read(&net->cells_lock);
+
+ cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
+ if (dotted)
+ name = cell->name - 1;
+ else
+ name = cell->name;
+ afs_get_cell(cell, afs_cell_trace_get_atcell);
+ set_delayed_call(done, afs_atcell_delayed_put_cell, cell);
+
+ up_read(&net->cells_lock);
+ return name;
}
+static const struct inode_operations afs_atcell_inode_operations = {
+ .get_link = afs_atcell_get_link,
+};
+
/*
- * Remove a manually added cell mount directory.
- * - The caller must hold net->proc_cells_lock
+ * Create an inode for the @cell or .@cell symlinks.
*/
-void afs_dynroot_rmdir(struct afs_net *net, struct afs_cell *cell)
+static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry, ino_t ino)
{
- struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *subdir;
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ struct afs_fid fid = { .vnode = ino, .unique = 1, };
- if (!sb || atomic_read(&sb->s_active) == 0)
- return;
+ inode = iget5_locked(dir->i_sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
- root = sb->s_root;
- inode_lock(root->d_inode);
+ vnode = AFS_FS_I(inode);
- /* Don't want to trigger a lookup call, which will re-add the cell */
- subdir = try_lookup_one_len(cell->name, root, cell->name_len);
- if (IS_ERR_OR_NULL(subdir)) {
- _debug("lookup %ld", PTR_ERR(subdir));
- goto no_dentry;
+ if (inode_state_read_once(inode) & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 1);
+ inode->i_size = 0;
+ inode->i_mode = S_IFLNK | 0555;
+ inode->i_op = &afs_atcell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ unlock_new_inode(inode);
}
+ return d_splice_alias(inode, dentry);
+}
- _debug("rmdir %pd %u", subdir, d_count(subdir));
+/*
+ * Transcribe the cell database into readdir content under the RCU read lock.
+ * Each cell produces two entries, one prefixed with a dot and one not.
+ */
+static int afs_dynroot_readdir_cells(struct afs_net *net, struct dir_context *ctx)
+{
+ const struct afs_cell *cell;
+ loff_t newpos;
+
+ _enter("%llu", ctx->pos);
+
+ for (;;) {
+ unsigned int ix = ctx->pos >> 1;
+
+ cell = idr_get_next(&net->cells_dyn_ino, &ix);
+ if (!cell)
+ return 0;
+ if (READ_ONCE(cell->state) == AFS_CELL_REMOVING ||
+ READ_ONCE(cell->state) == AFS_CELL_DEAD) {
+ ctx->pos += 2;
+ ctx->pos &= ~1;
+ continue;
+ }
+
+ newpos = ix << 1;
+ if (newpos > ctx->pos)
+ ctx->pos = newpos;
- if (subdir->d_fsdata) {
- _debug("unpin %u", d_count(subdir));
- subdir->d_fsdata = NULL;
- dput(subdir);
+ _debug("pos %llu -> cell %u", ctx->pos, cell->dynroot_ino);
+
+ if ((ctx->pos & 1) == 0) {
+ if (!dir_emit(ctx, cell->name, cell->name_len,
+ cell->dynroot_ino, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
+ if ((ctx->pos & 1) == 1) {
+ if (!dir_emit(ctx, cell->name - 1, cell->name_len + 1,
+ cell->dynroot_ino + 1, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
}
- dput(subdir);
-no_dentry:
- inode_unlock(root->d_inode);
- _leave("");
+ return 0;
}
/*
- * Populate a newly created dynamic root with cell names.
+ * Read the AFS dynamic root directory. This produces a list of cellnames,
+ * dotted and undotted, along with @cell and .@cell links if configured.
*/
-int afs_dynroot_populate(struct super_block *sb)
+static int afs_dynroot_readdir(struct file *file, struct dir_context *ctx)
{
- struct afs_cell *cell;
- struct afs_net *net = afs_sb2net(sb);
- int ret;
+ struct afs_net *net = afs_d2net(file->f_path.dentry);
+ int ret = 0;
- mutex_lock(&net->proc_cells_lock);
+ if (!dir_emit_dots(file, ctx))
+ return 0;
- net->dynroot_sb = sb;
- hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
- ret = afs_dynroot_mkdir(net, cell);
- if (ret < 0)
- goto error;
+ if (ctx->pos == 2) {
+ if (rcu_access_pointer(net->ws_cell) &&
+ !dir_emit(ctx, "@cell", 5, 2, DT_LNK))
+ return 0;
+ ctx->pos = 3;
+ }
+ if (ctx->pos == 3) {
+ if (rcu_access_pointer(net->ws_cell) &&
+ !dir_emit(ctx, ".@cell", 6, 3, DT_LNK))
+ return 0;
+ ctx->pos = 4;
}
- ret = 0;
-out:
- mutex_unlock(&net->proc_cells_lock);
+ if ((unsigned long long)ctx->pos <= AFS_MAX_DYNROOT_CELL_INO) {
+ down_read(&net->cells_lock);
+ ret = afs_dynroot_readdir_cells(net, ctx);
+ up_read(&net->cells_lock);
+ }
return ret;
-
-error:
- net->dynroot_sb = NULL;
- goto out;
}
+static const struct file_operations afs_dynroot_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = afs_dynroot_readdir,
+ .fsync = noop_fsync,
+};
+
/*
- * When a dynamic root that's in the process of being destroyed, depopulate it
- * of pinned directories.
+ * Create an inode for a dynamic root directory.
*/
-void afs_dynroot_depopulate(struct super_block *sb)
+struct inode *afs_dynroot_iget_root(struct super_block *sb)
{
- struct afs_net *net = afs_sb2net(sb);
- struct dentry *root = sb->s_root, *subdir;
-
- /* Prevent more subdirs from being created */
- mutex_lock(&net->proc_cells_lock);
- if (net->dynroot_sb == sb)
- net->dynroot_sb = NULL;
- mutex_unlock(&net->proc_cells_lock);
-
- if (root) {
- struct hlist_node *n;
- inode_lock(root->d_inode);
-
- /* Remove all the pins for dirs created for manually added cells */
- hlist_for_each_entry_safe(subdir, n, &root->d_children, d_sib) {
- if (subdir->d_fsdata) {
- subdir->d_fsdata = NULL;
- dput(subdir);
- }
- }
+ struct afs_super_info *as = AFS_FS_S(sb);
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ struct afs_fid fid = { .vid = 0, .vnode = 1, .unique = 1,};
+
+ if (as->volume)
+ fid.vid = as->volume->vid;
- inode_unlock(root->d_inode);
+ inode = iget5_locked(sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ vnode = AFS_FS_I(inode);
+
+ /* there shouldn't be an existing inode */
+ if (inode_state_read_once(inode) & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 2);
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_dynroot_inode_operations;
+ inode->i_fop = &afs_dynroot_file_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+ unlock_new_inode(inode);
}
+ _leave(" = %p", inode);
+ return inode;
}
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 6762eff97517..f66a92294284 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -19,8 +19,7 @@
#include <trace/events/netfs.h>
#include "internal.h"
-static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
-static int afs_symlink_read_folio(struct file *file, struct folio *folio);
+static int afs_file_mmap_prepare(struct vm_area_desc *desc);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
@@ -36,7 +35,7 @@ const struct file_operations afs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = afs_file_read_iter,
.write_iter = netfs_file_write_iter,
- .mmap = afs_file_mmap,
+ .mmap_prepare = afs_file_mmap_prepare,
.splice_read = afs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = afs_fsync,
@@ -61,13 +60,6 @@ const struct address_space_operations afs_file_aops = {
.writepages = afs_writepages,
};
-const struct address_space_operations afs_symlink_aops = {
- .read_folio = afs_symlink_read_folio,
- .release_folio = netfs_release_folio,
- .invalidate_folio = netfs_invalidate_folio,
- .migrate_folio = filemap_migrate_folio,
-};
-
static const struct vm_operations_struct afs_vm_ops = {
.open = afs_vm_open,
.close = afs_vm_close,
@@ -208,49 +200,12 @@ int afs_release(struct inode *inode, struct file *file)
return ret;
}
-/*
- * Allocate a new read record.
- */
-struct afs_read *afs_alloc_read(gfp_t gfp)
-{
- struct afs_read *req;
-
- req = kzalloc(sizeof(struct afs_read), gfp);
- if (req)
- refcount_set(&req->usage, 1);
-
- return req;
-}
-
-/*
- * Dispose of a ref to a read record.
- */
-void afs_put_read(struct afs_read *req)
-{
- if (refcount_dec_and_test(&req->usage)) {
- if (req->cleanup)
- req->cleanup(req);
- key_put(req->key);
- kfree(req);
- }
-}
-
static void afs_fetch_data_notify(struct afs_operation *op)
{
- struct afs_read *req = op->fetch.req;
- struct netfs_io_subrequest *subreq = req->subreq;
- int error = afs_op_error(op);
-
- req->error = error;
- if (subreq) {
- subreq->rreq->i_size = req->file_size;
- if (req->pos + req->actual_len >= req->file_size)
- __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
- netfs_read_subreq_terminated(subreq, error, false);
- req->subreq = NULL;
- } else if (req->done) {
- req->done(req);
- }
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
+
+ subreq->error = afs_op_error(op);
+ netfs_read_subreq_terminated(subreq);
}
static void afs_fetch_data_success(struct afs_operation *op)
@@ -260,7 +215,7 @@ static void afs_fetch_data_success(struct afs_operation *op)
_enter("op=%08x", op->debug_id);
afs_vnode_commit_status(op, &op->file[0]);
afs_stat_v(vnode, n_fetches);
- atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
+ atomic_long_add(op->fetch.subreq->transferred, &op->net->n_fetch_bytes);
afs_fetch_data_notify(op);
}
@@ -270,107 +225,188 @@ static void afs_fetch_data_aborted(struct afs_operation *op)
afs_fetch_data_notify(op);
}
-static void afs_fetch_data_put(struct afs_operation *op)
-{
- op->fetch.req->error = afs_op_error(op);
- afs_put_read(op->fetch.req);
-}
-
-static const struct afs_operation_ops afs_fetch_data_operation = {
+const struct afs_operation_ops afs_fetch_data_operation = {
.issue_afs_rpc = afs_fs_fetch_data,
.issue_yfs_rpc = yfs_fs_fetch_data,
.success = afs_fetch_data_success,
.aborted = afs_fetch_data_aborted,
.failed = afs_fetch_data_notify,
- .put = afs_fetch_data_put,
};
+static void afs_issue_read_call(struct afs_operation *op)
+{
+ op->call_responded = false;
+ op->call_error = 0;
+ op->call_abort_code = 0;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags))
+ yfs_fs_fetch_data(op);
+ else
+ afs_fs_fetch_data(op);
+}
+
+static void afs_end_read(struct afs_operation *op)
+{
+ if (op->call_responded && op->server)
+ set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags);
+
+ if (!afs_op_error(op))
+ afs_fetch_data_success(op);
+ else if (op->cumul_error.aborted)
+ afs_fetch_data_aborted(op);
+ else
+ afs_fetch_data_notify(op);
+
+ afs_end_vnode_operation(op);
+ afs_put_operation(op);
+}
+
+/*
+ * Perform I/O processing on an asynchronous call. The work item carries a ref
+ * to the call struct that we either need to release or to pass on.
+ */
+static void afs_read_receive(struct afs_call *call)
+{
+ struct afs_operation *op = call->op;
+ enum afs_call_state state;
+
+ _enter("");
+
+ state = READ_ONCE(call->state);
+ if (state == AFS_CALL_COMPLETE)
+ return;
+ trace_afs_read_recv(op, call);
+
+ while (state < AFS_CALL_COMPLETE && READ_ONCE(call->need_attention)) {
+ WRITE_ONCE(call->need_attention, false);
+ afs_deliver_to_call(call);
+ state = READ_ONCE(call->state);
+ }
+
+ if (state < AFS_CALL_COMPLETE) {
+ netfs_read_subreq_progress(op->fetch.subreq);
+ if (rxrpc_kernel_check_life(call->net->socket, call->rxcall))
+ return;
+ /* rxrpc terminated the call. */
+ afs_set_call_complete(call, call->error, call->abort_code);
+ }
+
+ op->call_abort_code = call->abort_code;
+ op->call_error = call->error;
+ op->call_responded = call->responded;
+ op->call = NULL;
+ call->op = NULL;
+ afs_put_call(call);
+
+ /* If the call failed, then we need to crank the server rotation
+ * handle and try the next.
+ */
+ if (afs_select_fileserver(op)) {
+ afs_issue_read_call(op);
+ return;
+ }
+
+ afs_end_read(op);
+}
+
+void afs_fetch_data_async_rx(struct work_struct *work)
+{
+ struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+ afs_read_receive(call);
+ afs_put_call(call);
+}
+
+void afs_fetch_data_immediate_cancel(struct afs_call *call)
+{
+ if (call->async) {
+ afs_get_call(call, afs_call_trace_wake);
+ if (!queue_work(afs_async_calls, &call->async_work))
+ afs_deferred_put_call(call);
+ flush_work(&call->async_work);
+ }
+}
+
/*
* Fetch file data from the volume.
*/
-int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
+static void afs_issue_read(struct netfs_io_subrequest *subreq)
{
struct afs_operation *op;
+ struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
+ struct key *key = subreq->rreq->netfs_priv;
_enter("%s{%llx:%llu.%u},%x,,,",
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
- key_serial(req->key));
+ key_serial(key));
- op = afs_alloc_operation(req->key, vnode->volume);
+ op = afs_alloc_operation(key, vnode->volume);
if (IS_ERR(op)) {
- if (req->subreq)
- netfs_read_subreq_terminated(req->subreq, PTR_ERR(op), false);
- return PTR_ERR(op);
+ subreq->error = PTR_ERR(op);
+ netfs_read_subreq_terminated(subreq);
+ return;
}
afs_op_set_vnode(op, 0, vnode);
- op->fetch.req = afs_get_read(req);
+ op->fetch.subreq = subreq;
op->ops = &afs_fetch_data_operation;
- return afs_do_sync_operation(op);
-}
-
-static void afs_read_worker(struct work_struct *work)
-{
- struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
- struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
- struct afs_read *fsreq;
-
- fsreq = afs_alloc_read(GFP_NOFS);
- if (!fsreq)
- return netfs_read_subreq_terminated(subreq, -ENOMEM, false);
-
- fsreq->subreq = subreq;
- fsreq->pos = subreq->start + subreq->transferred;
- fsreq->len = subreq->len - subreq->transferred;
- fsreq->key = key_get(subreq->rreq->netfs_priv);
- fsreq->vnode = vnode;
- fsreq->iter = &subreq->io_iter;
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- afs_fetch_data(fsreq->vnode, fsreq);
- afs_put_read(fsreq);
-}
-static void afs_issue_read(struct netfs_io_subrequest *subreq)
-{
- INIT_WORK(&subreq->work, afs_read_worker);
- queue_work(system_long_wq, &subreq->work);
-}
-
-static int afs_symlink_read_folio(struct file *file, struct folio *folio)
-{
- struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host);
- struct afs_read *fsreq;
- int ret;
+ if (subreq->rreq->origin == NETFS_READAHEAD ||
+ subreq->rreq->iocb) {
+ op->flags |= AFS_OPERATION_ASYNC;
- fsreq = afs_alloc_read(GFP_NOFS);
- if (!fsreq)
- return -ENOMEM;
+ if (!afs_begin_vnode_operation(op)) {
+ subreq->error = afs_put_operation(op);
+ netfs_read_subreq_terminated(subreq);
+ return;
+ }
- fsreq->pos = folio_pos(folio);
- fsreq->len = folio_size(folio);
- fsreq->vnode = vnode;
- fsreq->iter = &fsreq->def_iter;
- iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &folio->mapping->i_pages,
- fsreq->pos, fsreq->len);
+ if (!afs_select_fileserver(op)) {
+ afs_end_read(op);
+ return;
+ }
- ret = afs_fetch_data(fsreq->vnode, fsreq);
- if (ret == 0)
- folio_mark_uptodate(folio);
- folio_unlock(folio);
- return ret;
+ afs_issue_read_call(op);
+ } else {
+ afs_do_sync_operation(op);
+ }
}
static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
{
+ struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
+
if (file)
rreq->netfs_priv = key_get(afs_file_key(file));
rreq->rsize = 256 * 1024;
rreq->wsize = 256 * 1024 * 1024;
+
+ switch (rreq->origin) {
+ case NETFS_READ_SINGLE:
+ if (!file) {
+ struct key *key = afs_request_key(vnode->volume->cell);
+
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+ rreq->netfs_priv = key;
+ }
+ break;
+ case NETFS_WRITEBACK:
+ case NETFS_WRITETHROUGH:
+ case NETFS_UNBUFFERED_WRITE:
+ case NETFS_DIO_WRITE:
+ if (S_ISREG(rreq->inode->i_mode))
+ rreq->io_streams[0].avail = true;
+ break;
+ case NETFS_WRITEBACK_SINGLE:
+ default:
+ break;
+ }
return 0;
}
@@ -456,16 +492,16 @@ static void afs_drop_open_mmap(struct afs_vnode *vnode)
/*
* Handle setting up a memory mapping on an AFS file.
*/
-static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int afs_file_mmap_prepare(struct vm_area_desc *desc)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(desc->file));
int ret;
afs_add_open_mmap(vnode);
- ret = generic_file_mmap(file, vma);
+ ret = generic_file_mmap_prepare(desc);
if (ret == 0)
- vma->vm_ops = &afs_vm_ops;
+ desc->vm_ops = &afs_vm_ops;
else
afs_drop_open_mmap(vnode);
return ret;
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
index 428721bbe4f6..8418813ee043 100644
--- a/fs/afs/fs_operation.c
+++ b/fs/afs/fs_operation.c
@@ -49,6 +49,105 @@ struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *vo
return op;
}
+struct afs_io_locker {
+ struct list_head link;
+ struct task_struct *task;
+ unsigned long have_lock;
+};
+
+/*
+ * Unlock the I/O lock on a vnode.
+ */
+static void afs_unlock_for_io(struct afs_vnode *vnode)
+{
+ struct afs_io_locker *locker;
+
+ spin_lock(&vnode->lock);
+ locker = list_first_entry_or_null(&vnode->io_lock_waiters,
+ struct afs_io_locker, link);
+ if (locker) {
+ list_del(&locker->link);
+ smp_store_release(&locker->have_lock, 1); /* The unlock barrier. */
+ smp_mb__after_atomic(); /* Store have_lock before task state */
+ wake_up_process(locker->task);
+ } else {
+ clear_bit(AFS_VNODE_IO_LOCK, &vnode->flags);
+ }
+ spin_unlock(&vnode->lock);
+}
+
+/*
+ * Lock the I/O lock on a vnode uninterruptibly. We can't use an ordinary
+ * mutex as lockdep will complain if we unlock it in the wrong thread.
+ */
+static void afs_lock_for_io(struct afs_vnode *vnode)
+{
+ struct afs_io_locker myself = { .task = current, };
+
+ spin_lock(&vnode->lock);
+
+ if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
+ spin_unlock(&vnode->lock);
+ return;
+ }
+
+ list_add_tail(&myself.link, &vnode->io_lock_waiters);
+ spin_unlock(&vnode->lock);
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (smp_load_acquire(&myself.have_lock)) /* The lock barrier */
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+}
+
+/*
+ * Lock the I/O lock on a vnode interruptibly. We can't use an ordinary mutex
+ * as lockdep will complain if we unlock it in the wrong thread.
+ */
+static int afs_lock_for_io_interruptible(struct afs_vnode *vnode)
+{
+ struct afs_io_locker myself = { .task = current, };
+ int ret = 0;
+
+ spin_lock(&vnode->lock);
+
+ if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
+ spin_unlock(&vnode->lock);
+ return 0;
+ }
+
+ list_add_tail(&myself.link, &vnode->io_lock_waiters);
+ spin_unlock(&vnode->lock);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (smp_load_acquire(&myself.have_lock) || /* The lock barrier */
+ signal_pending(current))
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ /* If we got a signal, try to transfer the lock onto the next
+ * waiter.
+ */
+ if (unlikely(signal_pending(current))) {
+ spin_lock(&vnode->lock);
+ if (myself.have_lock) {
+ spin_unlock(&vnode->lock);
+ afs_unlock_for_io(vnode);
+ } else {
+ list_del(&myself.link);
+ spin_unlock(&vnode->lock);
+ }
+ ret = -ERESTARTSYS;
+ }
+ return ret;
+}
+
/*
* Lock the vnode(s) being operated upon.
*/
@@ -60,7 +159,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
_enter("");
if (op->flags & AFS_OPERATION_UNINTR) {
- mutex_lock(&vnode->io_lock);
+ afs_lock_for_io(vnode);
op->flags |= AFS_OPERATION_LOCK_0;
_leave(" = t [1]");
return true;
@@ -72,7 +171,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
if (vnode2 > vnode)
swap(vnode, vnode2);
- if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
+ if (afs_lock_for_io_interruptible(vnode) < 0) {
afs_op_set_error(op, -ERESTARTSYS);
op->flags |= AFS_OPERATION_STOP;
_leave(" = f [I 0]");
@@ -81,10 +180,10 @@ static bool afs_get_io_locks(struct afs_operation *op)
op->flags |= AFS_OPERATION_LOCK_0;
if (vnode2) {
- if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
+ if (afs_lock_for_io_interruptible(vnode2) < 0) {
afs_op_set_error(op, -ERESTARTSYS);
op->flags |= AFS_OPERATION_STOP;
- mutex_unlock(&vnode->io_lock);
+ afs_unlock_for_io(vnode);
op->flags &= ~AFS_OPERATION_LOCK_0;
_leave(" = f [I 1]");
return false;
@@ -104,9 +203,9 @@ static void afs_drop_io_locks(struct afs_operation *op)
_enter("");
if (op->flags & AFS_OPERATION_LOCK_1)
- mutex_unlock(&vnode2->io_lock);
+ afs_unlock_for_io(vnode2);
if (op->flags & AFS_OPERATION_LOCK_0)
- mutex_unlock(&vnode->io_lock);
+ afs_unlock_for_io(vnode);
}
static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp,
@@ -157,7 +256,7 @@ bool afs_begin_vnode_operation(struct afs_operation *op)
/*
* Tidy up a filesystem cursor and unlock the vnode.
*/
-static void afs_end_vnode_operation(struct afs_operation *op)
+void afs_end_vnode_operation(struct afs_operation *op)
{
_enter("");
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index b516d05b0fef..e0030ac74ea0 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -235,20 +235,20 @@ out:
* Probe all of a fileserver's addresses to find out the best route and to
* query its capabilities.
*/
-void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
- struct afs_addr_list *new_alist, struct key *key)
+int afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_list *new_alist, struct key *key)
{
struct afs_endpoint_state *estate, *old;
- struct afs_addr_list *alist;
+ struct afs_addr_list *old_alist = NULL, *alist;
unsigned long unprobed;
_enter("%pU", &server->uuid);
estate = kzalloc(sizeof(*estate), GFP_KERNEL);
if (!estate)
- return;
+ return -ENOMEM;
- refcount_set(&estate->ref, 1);
+ refcount_set(&estate->ref, 2);
estate->server_id = server->debug_id;
estate->rtt = UINT_MAX;
@@ -256,21 +256,31 @@ void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
old = rcu_dereference_protected(server->endpoint_state,
lockdep_is_held(&server->fs_lock));
- estate->responsive_set = old->responsive_set;
- estate->addresses = afs_get_addrlist(new_alist ?: old->addresses,
- afs_alist_trace_get_estate);
+ if (old) {
+ estate->responsive_set = old->responsive_set;
+ if (!new_alist)
+ new_alist = old->addresses;
+ }
+
+ if (old_alist != new_alist)
+ afs_set_peer_appdata(server, old_alist, new_alist);
+
+ estate->addresses = afs_get_addrlist(new_alist, afs_alist_trace_get_estate);
alist = estate->addresses;
estate->probe_seq = ++server->probe_counter;
atomic_set(&estate->nr_probing, alist->nr_addrs);
+ if (new_alist)
+ server->addr_version = new_alist->version;
rcu_assign_pointer(server->endpoint_state, estate);
- set_bit(AFS_ESTATE_SUPERSEDED, &old->flags);
write_unlock(&server->fs_lock);
+ if (old)
+ set_bit(AFS_ESTATE_SUPERSEDED, &old->flags);
trace_afs_estate(estate->server_id, estate->probe_seq, refcount_read(&estate->ref),
afs_estate_trace_alloc_probe);
- afs_get_address_preferences(net, alist);
+ afs_get_address_preferences(net, new_alist);
server->probed_at = jiffies;
unprobed = (1UL << alist->nr_addrs) - 1;
@@ -293,6 +303,8 @@ void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
}
afs_put_endpoint_state(old, afs_estate_trace_put_probe);
+ afs_put_endpoint_state(estate, afs_estate_trace_put_probe);
+ return 0;
}
/*
@@ -522,6 +534,6 @@ dont_wait:
*/
void afs_fs_probe_cleanup(struct afs_net *net)
{
- if (del_timer_sync(&net->fs_probe_timer))
+ if (timer_delete_sync(&net->fs_probe_timer))
afs_dec_servers_outstanding(net);
}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 098fa034a1cc..bc9556991d7c 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -301,19 +301,19 @@ void afs_fs_fetch_status(struct afs_operation *op)
static int afs_deliver_fs_fetch_data(struct afs_call *call)
{
struct afs_operation *op = call->op;
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
const __be32 *bp;
size_t count_before;
int ret;
_enter("{%u,%zu,%zu/%llu}",
call->unmarshall, call->iov_len, iov_iter_count(call->iter),
- req->actual_len);
+ call->remaining);
switch (call->unmarshall) {
case 0:
- req->actual_len = 0;
+ call->remaining = 0;
call->unmarshall++;
if (call->operation_ID == FSFETCHDATA64) {
afs_extract_to_tmp64(call);
@@ -323,8 +323,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
}
fallthrough;
- /* Extract the returned data length into
- * ->actual_len. This may indicate more or less data than was
+ /* Extract the returned data length into ->remaining.
+ * This may indicate more or less data than was
* requested will be returned.
*/
case 1:
@@ -333,42 +333,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
if (ret < 0)
return ret;
- req->actual_len = be64_to_cpu(call->tmp64);
- _debug("DATA length: %llu", req->actual_len);
+ call->remaining = be64_to_cpu(call->tmp64);
+ _debug("DATA length: %llu", call->remaining);
- if (req->actual_len == 0)
+ if (call->remaining == 0)
goto no_more_data;
- call->iter = req->iter;
- call->iov_len = min(req->actual_len, req->len);
+ call->iter = &subreq->io_iter;
+ call->iov_len = umin(call->remaining, subreq->len - subreq->transferred);
call->unmarshall++;
fallthrough;
/* extract the returned data */
case 2:
count_before = call->iov_len;
- _debug("extract data %zu/%llu", count_before, req->actual_len);
+ _debug("extract data %zu/%llu", count_before, call->remaining);
ret = afs_extract_data(call, true);
- if (req->subreq) {
- req->subreq->transferred += count_before - call->iov_len;
- netfs_read_subreq_progress(req->subreq, false);
- }
+ subreq->transferred += count_before - call->iov_len;
+ call->remaining -= count_before - call->iov_len;
if (ret < 0)
return ret;
call->iter = &call->def_iter;
- if (req->actual_len <= req->len)
+ if (call->remaining)
goto no_more_data;
/* Discard any excess data the server gave us */
- afs_extract_discard(call, req->actual_len - req->len);
+ afs_extract_discard(call, call->remaining);
call->unmarshall = 3;
fallthrough;
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), call->remaining);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -390,8 +388,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
xdr_decode_AFSCallBack(&bp, call, &vp->scb);
xdr_decode_AFSVolSync(&bp, &op->volsync);
- req->data_version = vp->scb.status.data_version;
- req->file_size = vp->scb.status.size;
+ if (subreq->start + subreq->transferred >= vp->scb.status.size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
call->unmarshall++;
fallthrough;
@@ -410,14 +408,18 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
static const struct afs_call_type afs_RXFSFetchData = {
.name = "FS.FetchData",
.op = afs_FS_FetchData,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = afs_deliver_fs_fetch_data,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSFetchData64 = {
.name = "FS.FetchData64",
.op = afs_FS_FetchData64,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = afs_deliver_fs_fetch_data,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
@@ -426,8 +428,8 @@ static const struct afs_call_type afs_RXFSFetchData64 = {
*/
static void afs_fs_fetch_data64(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
struct afs_call *call;
__be32 *bp;
@@ -437,16 +439,19 @@ static void afs_fs_fetch_data64(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
+ if (op->flags & AFS_OPERATION_ASYNC)
+ call->async = true;
+
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA64);
bp[1] = htonl(vp->fid.vid);
bp[2] = htonl(vp->fid.vnode);
bp[3] = htonl(vp->fid.unique);
- bp[4] = htonl(upper_32_bits(req->pos));
- bp[5] = htonl(lower_32_bits(req->pos));
+ bp[4] = htonl(upper_32_bits(subreq->start + subreq->transferred));
+ bp[5] = htonl(lower_32_bits(subreq->start + subreq->transferred));
bp[6] = 0;
- bp[7] = htonl(lower_32_bits(req->len));
+ bp[7] = htonl(lower_32_bits(subreq->len - subreq->transferred));
call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
@@ -458,9 +463,9 @@ static void afs_fs_fetch_data64(struct afs_operation *op)
*/
void afs_fs_fetch_data(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_read *req = op->fetch.req;
__be32 *bp;
if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
@@ -472,16 +477,14 @@ void afs_fs_fetch_data(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
- req->call_debug_id = call->debug_id;
-
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA);
bp[1] = htonl(vp->fid.vid);
bp[2] = htonl(vp->fid.vnode);
bp[3] = htonl(vp->fid.unique);
- bp[4] = htonl(lower_32_bits(req->pos));
- bp[5] = htonl(lower_32_bits(req->len));
+ bp[4] = htonl(lower_32_bits(subreq->start + subreq->transferred));
+ bp[5] = htonl(lower_32_bits(subreq->len + subreq->transferred));
call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
@@ -1650,7 +1653,7 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net, struct afs_server *server,
bp = call->request;
*bp++ = htonl(FSGIVEUPALLCALLBACKS);
- call->server = afs_use_server(server, afs_server_trace_give_up_cb);
+ call->server = afs_use_server(server, false, afs_server_trace_use_give_up_cb);
afs_make_call(call, GFP_NOFS);
afs_wait_for_call_to_complete(call);
ret = call->error;
@@ -1733,6 +1736,7 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
.op = afs_FS_GetCapabilities,
.deliver = afs_deliver_fs_get_capabilities,
.done = afs_fileserver_probe_result,
+ .immediate_cancel = afs_fileserver_probe_result,
.destructor = afs_fs_get_capabilities_destructor,
};
@@ -1756,7 +1760,7 @@ bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
return false;
call->key = key;
- call->server = afs_use_server(server, afs_server_trace_get_caps);
+ call->server = afs_use_server(server, false, afs_server_trace_use_get_caps);
call->peer = rxrpc_kernel_get_peer(estate->addresses->addrs[addr_index].peer);
call->probe = afs_get_endpoint_state(estate, afs_estate_trace_get_getcaps);
call->probe_index = addr_index;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index a95e77670b49..dde1857fcabb 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -25,8 +25,94 @@
#include "internal.h"
#include "afs_fs.h"
+void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op)
+{
+ size_t size = strlen(op->create.symlink) + 1;
+ size_t dsize = 0;
+ char *p;
+
+ if (netfs_alloc_folioq_buffer(NULL, &vnode->directory, &dsize, size,
+ mapping_gfp_mask(vnode->netfs.inode.i_mapping)) < 0)
+ return;
+
+ vnode->directory_size = dsize;
+ p = kmap_local_folio(folioq_folio(vnode->directory, 0), 0);
+ memcpy(p, op->create.symlink, size);
+ kunmap_local(p);
+ set_bit(AFS_VNODE_DIR_READ, &vnode->flags);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
+}
+
+static void afs_put_link(void *arg)
+{
+ struct folio *folio = virt_to_folio(arg);
+
+ kunmap_local(arg);
+ folio_put(folio);
+}
+
+const char *afs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct folio *folio;
+ char *content;
+ ssize_t ret;
+
+ if (!dentry) {
+ /* RCU pathwalk. */
+ if (!test_bit(AFS_VNODE_DIR_READ, &vnode->flags) || !afs_check_validity(vnode))
+ return ERR_PTR(-ECHILD);
+ goto good;
+ }
+
+ if (test_bit(AFS_VNODE_DIR_READ, &vnode->flags))
+ goto fetch;
+
+ ret = afs_validate(vnode, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
+ test_bit(AFS_VNODE_DIR_READ, &vnode->flags))
+ goto good;
+
+fetch:
+ ret = afs_read_single(vnode, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ set_bit(AFS_VNODE_DIR_READ, &vnode->flags);
+
+good:
+ folio = folioq_folio(vnode->directory, 0);
+ folio_get(folio);
+ content = kmap_local_folio(folio, 0);
+ set_delayed_call(callback, afs_put_link, content);
+ return content;
+}
+
+int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+{
+ DEFINE_DELAYED_CALL(done);
+ const char *content;
+ int len;
+
+ content = afs_get_link(dentry, d_inode(dentry), &done);
+ if (IS_ERR(content)) {
+ do_delayed_call(&done);
+ return PTR_ERR(content);
+ }
+
+ len = umin(strlen(content), buflen);
+ if (copy_to_user(buffer, content, len))
+ len = -EFAULT;
+ do_delayed_call(&done);
+ return len;
+}
+
static const struct inode_operations afs_symlink_inode_operations = {
- .get_link = page_get_link,
+ .get_link = afs_get_link,
+ .readlink = afs_readlink,
};
static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
@@ -110,7 +196,9 @@ static int afs_inode_init_from_status(struct afs_operation *op,
inode->i_op = &afs_dir_inode_operations;
inode->i_fop = &afs_dir_file_operations;
inode->i_mapping->a_ops = &afs_dir_aops;
- mapping_set_large_folios(inode->i_mapping);
+ __set_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &vnode->netfs.flags);
+ /* Assume locally cached directory data will be valid. */
+ __set_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
break;
case AFS_FTYPE_SYMLINK:
/* Symlinks with a mode of 0644 are actually mountpoints. */
@@ -122,13 +210,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
inode->i_mode = S_IFDIR | 0555;
inode->i_op = &afs_mntpt_inode_operations;
inode->i_fop = &afs_mntpt_file_operations;
- inode->i_mapping->a_ops = &afs_symlink_aops;
} else {
inode->i_mode = S_IFLNK | status->mode;
inode->i_op = &afs_symlink_inode_operations;
- inode->i_mapping->a_ops = &afs_symlink_aops;
}
+ inode->i_mapping->a_ops = &afs_dir_aops;
inode_nohighmem(inode);
+ mapping_set_release_always(inode->i_mapping);
break;
default:
dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL);
@@ -140,15 +228,17 @@ static int afs_inode_init_from_status(struct afs_operation *op,
afs_set_netfs_context(vnode);
vnode->invalid_before = status->data_version;
+ trace_afs_set_dv(vnode, status->data_version);
inode_set_iversion_raw(&vnode->netfs.inode, status->data_version);
if (!vp->scb.have_cb) {
/* it's a symlink we just created (the fileserver
* didn't give us a callback) */
- atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
+ afs_clear_cb_promise(vnode, afs_cb_promise_set_new_symlink);
} else {
vnode->cb_server = op->server;
- atomic64_set(&vnode->cb_expires_at, vp->scb.callback.expires_at);
+ afs_set_cb_promise(vnode, vp->scb.callback.expires_at,
+ afs_cb_promise_set_new_inode);
}
write_sequnlock(&vnode->cb_lock);
@@ -207,12 +297,17 @@ static void afs_apply_status(struct afs_operation *op,
if (vp->update_ctime)
inode_set_ctime_to_ts(inode, op->ctime);
- if (vnode->status.data_version != status->data_version)
+ if (vnode->status.data_version != status->data_version) {
+ trace_afs_set_dv(vnode, status->data_version);
data_changed = true;
+ }
vnode->status = *status;
if (vp->dv_before + vp->dv_delta != status->data_version) {
+ trace_afs_dv_mismatch(vnode, vp->dv_before, vp->dv_delta,
+ status->data_version);
+
if (vnode->cb_ro_snapshot == atomic_read(&vnode->volume->cb_ro_snapshot) &&
atomic64_read(&vnode->cb_expires_at) != AFS_NO_CB_PROMISE)
pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s (op=%x)\n",
@@ -223,12 +318,10 @@ static void afs_apply_status(struct afs_operation *op,
op->debug_id);
vnode->invalid_before = status->data_version;
- if (vnode->status.type == AFS_FTYPE_DIR) {
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- afs_stat_v(vnode, n_inval);
- } else {
+ if (vnode->status.type == AFS_FTYPE_DIR)
+ afs_invalidate_dir(vnode, afs_dir_invalid_dv_mismatch);
+ else
set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
- }
change_size = true;
data_changed = true;
unexpected_jump = true;
@@ -258,6 +351,8 @@ static void afs_apply_status(struct afs_operation *op,
inode_set_ctime_to_ts(inode, t);
inode_set_atime_to_ts(inode, t);
}
+ if (op->ops == &afs_fetch_data_operation)
+ op->fetch.subreq->rreq->i_size = status->size;
}
}
@@ -273,7 +368,7 @@ static void afs_apply_callback(struct afs_operation *op,
if (!afs_cb_is_broken(vp->cb_break_before, vnode)) {
if (op->volume->type == AFSVL_RWVOL)
vnode->cb_server = op->server;
- atomic64_set(&vnode->cb_expires_at, cb->expires_at);
+ afs_set_cb_promise(vnode, cb->expires_at, afs_cb_promise_set_apply_cb);
}
}
@@ -332,7 +427,7 @@ static void afs_fetch_status_success(struct afs_operation *op)
struct afs_vnode *vnode = vp->vnode;
int ret;
- if (vnode->netfs.inode.i_state & I_NEW) {
+ if (inode_state_read_once(&vnode->netfs.inode) & I_NEW) {
ret = afs_inode_init_from_status(op, vp, vnode);
afs_op_set_error(op, ret);
if (ret == 0)
@@ -435,7 +530,9 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
} __packed key;
struct afs_vnode_cache_aux aux;
- if (vnode->status.type != AFS_FTYPE_FILE) {
+ if (vnode->status.type != AFS_FTYPE_FILE &&
+ vnode->status.type != AFS_FTYPE_DIR &&
+ vnode->status.type != AFS_FTYPE_SYMLINK) {
vnode->netfs.cache = NULL;
return;
}
@@ -482,7 +579,7 @@ struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp)
inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
/* deal with an existing inode */
- if (!(inode->i_state & I_NEW)) {
+ if (!(inode_state_read_once(inode) & I_NEW)) {
_leave(" = %p", inode);
return inode;
}
@@ -542,7 +639,7 @@ struct inode *afs_root_iget(struct super_block *sb, struct key *key)
_debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid);
- BUG_ON(!(inode->i_state & I_NEW));
+ BUG_ON(!(inode_state_read_once(inode) & I_NEW));
vnode = AFS_FS_I(inode);
vnode->cb_v_check = atomic_read(&as->volume->cb_v_break);
@@ -626,9 +723,9 @@ int afs_drop_inode(struct inode *inode)
_enter("");
if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags))
- return generic_delete_inode(inode);
+ return inode_just_drop(inode);
else
- return generic_drop_inode(inode);
+ return inode_generic_drop(inode);
}
/*
@@ -637,6 +734,7 @@ int afs_drop_inode(struct inode *inode)
void afs_evict_inode(struct inode *inode)
{
struct afs_vnode_cache_aux aux;
+ struct afs_super_info *sbi = AFS_FS_S(inode->i_sb);
struct afs_vnode *vnode = AFS_FS_I(inode);
_enter("{%llx:%llu.%d}",
@@ -648,8 +746,22 @@ void afs_evict_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
+ if ((S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) &&
+ (inode_state_read_once(inode) & I_DIRTY) &&
+ !sbi->dyn_root) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .for_sync = true,
+ .range_end = LLONG_MAX,
+ };
+
+ afs_single_writepages(inode->i_mapping, &wbc);
+ }
+
netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
+ netfs_free_folioq_buffer(vnode->directory);
afs_set_cache_aux(vnode, &aux);
netfs_clear_inode_writeback(inode, &aux);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index c9d620175e80..009064b8d661 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -20,6 +20,7 @@
#include <linux/uuid.h>
#include <linux/mm_types.h>
#include <linux/dns_resolver.h>
+#include <crypto/krb5.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
@@ -163,6 +164,7 @@ struct afs_call {
spinlock_t state_lock;
int error; /* error code */
u32 abort_code; /* Remote abort ID or 0 */
+ unsigned long long remaining; /* How much is left to receive */
unsigned int max_lifespan; /* Maximum lifespan in secs to set if not 0 */
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
@@ -175,8 +177,10 @@ struct afs_call {
bool intr; /* T if interruptible */
bool unmarshalling_error; /* T if an unmarshalling error occurred */
bool responded; /* Got a response from the call (may be abort) */
+ u8 security_ix; /* Security class */
u16 service_id; /* Actual service ID (after upgrade) */
unsigned int debug_id; /* Trace ID */
+ u32 enctype; /* Security encoding type */
u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */
union { /* place to extract temporary data */
@@ -201,11 +205,17 @@ struct afs_call_type {
/* clean up a call */
void (*destructor)(struct afs_call *call);
+ /* Async receive processing function */
+ void (*async_rx)(struct work_struct *work);
+
/* Work function */
void (*work)(struct work_struct *work);
/* Call done function (gets called immediately on success or failure) */
void (*done)(struct afs_call *call);
+
+ /* Handle a call being immediately cancelled. */
+ void (*immediate_cancel)(struct afs_call *call);
};
/*
@@ -233,28 +243,6 @@ static inline struct key *afs_file_key(struct file *file)
}
/*
- * Record of an outstanding read operation on a vnode.
- */
-struct afs_read {
- loff_t pos; /* Where to start reading */
- loff_t len; /* How much we're asking for */
- loff_t actual_len; /* How much we're actually getting */
- loff_t file_size; /* File size returned by server */
- struct key *key; /* The key to use to reissue the read */
- struct afs_vnode *vnode; /* The file being read into. */
- struct netfs_io_subrequest *subreq; /* Fscache helper read request this belongs to */
- afs_dataversion_t data_version; /* Version number returned by server */
- refcount_t usage;
- unsigned int call_debug_id;
- unsigned int nr_pages;
- int error;
- void (*done)(struct afs_read *);
- void (*cleanup)(struct afs_read *);
- struct iov_iter *iter; /* Iterator representing the buffer */
- struct iov_iter def_iter; /* Default iterator */
-};
-
-/*
* AFS superblock private data
* - there's one superblock per volume
*/
@@ -296,15 +284,15 @@ struct afs_net {
struct socket *socket;
struct afs_call *spare_incoming_call;
struct work_struct charge_preallocation_work;
+ struct work_struct rx_oob_work;
struct mutex socket_mutex;
atomic_t nr_outstanding_calls;
atomic_t nr_superblocks;
/* Cell database */
struct rb_root cells;
- struct afs_cell *ws_cell;
- struct work_struct cells_manager;
- struct timer_list cells_timer;
+ struct idr cells_dyn_ino; /* cell->dynroot_ino mapping */
+ struct afs_cell __rcu *ws_cell;
atomic_t cells_outstanding;
struct rw_semaphore cells_lock;
struct mutex cells_alias_lock;
@@ -316,18 +304,12 @@ struct afs_net {
* cell, but in practice, people create aliases and subsets and there's
* no easy way to distinguish them.
*/
- seqlock_t fs_lock; /* For fs_servers, fs_probe_*, fs_proc */
- struct rb_root fs_servers; /* afs_server (by server UUID or address) */
+ seqlock_t fs_lock; /* For fs_probe_*, fs_proc */
struct list_head fs_probe_fast; /* List of afs_server to probe at 30s intervals */
struct list_head fs_probe_slow; /* List of afs_server to probe at 5m intervals */
struct hlist_head fs_proc; /* procfs servers list */
- struct hlist_head fs_addresses; /* afs_server (by lowest IPv6 addr) */
- seqlock_t fs_addr_lock; /* For fs_addresses[46] */
-
- struct work_struct fs_manager;
- struct timer_list fs_timer;
-
+ struct key *fs_cm_token_key; /* Key for creating CM tokens */
struct work_struct fs_prober;
struct timer_list fs_probe_timer;
atomic_t servers_outstanding;
@@ -360,13 +342,11 @@ struct afs_net {
extern const char afs_init_sysname[];
enum afs_cell_state {
- AFS_CELL_UNSET,
- AFS_CELL_ACTIVATING,
+ AFS_CELL_SETTING_UP,
+ AFS_CELL_UNLOOKED,
AFS_CELL_ACTIVE,
- AFS_CELL_DEACTIVATING,
- AFS_CELL_INACTIVE,
- AFS_CELL_FAILED,
- AFS_CELL_REMOVED,
+ AFS_CELL_REMOVING,
+ AFS_CELL_DEAD,
};
/*
@@ -397,7 +377,9 @@ struct afs_cell {
struct afs_cell *alias_of; /* The cell this is an alias of */
struct afs_volume *root_volume; /* The root.cell volume if there is one */
struct key *anonymous_key; /* anonymous user key for this cell */
+ struct work_struct destroyer; /* Destroyer for cell */
struct work_struct manager; /* Manager for init/deinit/dns */
+ struct timer_list management_timer; /* General management timer */
struct hlist_node proc_link; /* /proc cell list link */
time64_t dns_expiry; /* Time AFSDB/SRV record expires */
time64_t last_inactive; /* Time of last drop of usage count */
@@ -413,6 +395,7 @@ struct afs_cell {
enum dns_lookup_status dns_status:8; /* Latest status of data from lookup */
unsigned int dns_lookup_count; /* Counter of DNS lookups */
unsigned int debug_id;
+ unsigned int dynroot_ino; /* Inode numbers for dynroot (a pair) */
/* The volumes belonging to this cell */
struct rw_semaphore vs_lock; /* Lock for server->volumes */
@@ -422,7 +405,7 @@ struct afs_cell {
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
- seqlock_t fs_lock; /* For fs_servers */
+ struct rw_semaphore fs_lock; /* For fs_servers */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -430,6 +413,7 @@ struct afs_cell {
u8 name_len; /* Length of name */
char *name; /* Cell name, case-flattened and NUL-padded */
+ char *key_desc; /* Authentication key description */
};
/*
@@ -557,31 +541,35 @@ struct afs_server {
};
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
- struct rb_node uuid_rb; /* Link in net->fs_servers */
- struct afs_server __rcu *uuid_next; /* Next server with same UUID */
- struct afs_server *uuid_prev; /* Previous server with same UUID */
- struct list_head probe_link; /* Link in net->fs_probe_list */
- struct hlist_node addr_link; /* Link in net->fs_addresses6 */
+ struct rb_node uuid_rb; /* Link in cell->fs_servers */
+ struct list_head probe_link; /* Link in net->fs_probe_* */
struct hlist_node proc_link; /* Link in net->fs_proc */
struct list_head volumes; /* RCU list of afs_server_entry objects */
- struct afs_server *gc_next; /* Next server in manager's list */
+ struct work_struct destroyer; /* Work item to try and destroy a server */
+ struct timer_list timer; /* Management timer */
+ struct mutex cm_token_lock; /* Lock governing creation of appdata */
+ struct krb5_buffer cm_rxgk_appdata; /* Appdata to be included in RESPONSE packet */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
#define AFS_SERVER_FL_RESPONDING 0 /* The server is responding */
#define AFS_SERVER_FL_UPDATING 1
#define AFS_SERVER_FL_NEEDS_UPDATE 2 /* Fileserver address list is out of date */
-#define AFS_SERVER_FL_NOT_READY 4 /* The record is not ready for use */
-#define AFS_SERVER_FL_NOT_FOUND 5 /* VL server says no such server */
-#define AFS_SERVER_FL_VL_FAIL 6 /* Failed to access VL server */
+#define AFS_SERVER_FL_UNCREATED 3 /* The record needs creating */
+#define AFS_SERVER_FL_CREATING 4 /* The record is being created */
+#define AFS_SERVER_FL_EXPIRED 5 /* The record has expired */
+#define AFS_SERVER_FL_NOT_FOUND 6 /* VL server says no such server */
+#define AFS_SERVER_FL_VL_FAIL 7 /* Failed to access VL server */
#define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */
#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
+#define AFS_SERVER_FL_NO_RENAME2 20 /* YFS Fileserver doesn't support enhanced rename */
refcount_t ref; /* Object refcount */
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
u16 service_id; /* Service ID we're using. */
+ short create_error; /* Creation error */
unsigned int rtt; /* Server's current RTT in uS */
unsigned int debug_id; /* Debugging ID for traces */
@@ -636,6 +624,7 @@ struct afs_volume {
afs_volid_t vid; /* The volume ID of this volume */
afs_volid_t vids[AFS_MAXTYPES]; /* All associated volume IDs */
refcount_t ref;
+ unsigned int debug_id; /* Debugging ID for traces */
time64_t update_at; /* Time at which to next update */
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
struct rb_node cell_node; /* Link in cell->volumes */
@@ -702,24 +691,26 @@ struct afs_vnode {
struct afs_file_status status; /* AFS status info for this file */
afs_dataversion_t invalid_before; /* Child dentries are invalid before this */
struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
- struct mutex io_lock; /* Lock for serialising I/O on this mutex */
+ struct list_head io_lock_waiters; /* Threads waiting for the I/O lock */
struct rw_semaphore validate_lock; /* lock for validating this vnode */
struct rw_semaphore rmdir_lock; /* Lock for rmdir vs sillyrename */
struct key *silly_key; /* Silly rename key */
spinlock_t wb_lock; /* lock for wb_keys */
spinlock_t lock; /* waitqueue/flags lock */
unsigned long flags;
+#define AFS_VNODE_IO_LOCK 0 /* Set if the I/O serialisation lock is held */
#define AFS_VNODE_UNSET 1 /* set if vnode attributes not yet set */
#define AFS_VNODE_DIR_VALID 2 /* Set if dir contents are valid */
#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
#define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
#define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
-#define AFS_VNODE_AUTOCELL 6 /* set if Vnode is an auto mount point */
#define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */
#define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */
#define AFS_VNODE_SILLY_DELETED 9 /* Set if file has been silly-deleted */
#define AFS_VNODE_MODIFYING 10 /* Set if we're performing a modification op */
+#define AFS_VNODE_DIR_READ 11 /* Set if we've read a dir's contents */
+ struct folio_queue *directory; /* Directory contents */
struct list_head wb_keys; /* List of keys available for writeback */
struct list_head pending_locks; /* locks waiting to be granted */
struct list_head granted_locks; /* locks granted on this file */
@@ -728,6 +719,7 @@ struct afs_vnode {
ktime_t locked_at; /* Time at which lock obtained */
enum afs_lock_state lock_state : 8;
afs_lock_type_t lock_type : 8;
+ unsigned int directory_size; /* Amount of space in ->directory */
/* outstanding callback notification on this file */
struct work_struct cb_work; /* Work for mmap'd files */
@@ -902,12 +894,13 @@ struct afs_operation {
bool need_rehash;
} unlink;
struct {
- struct dentry *rehash;
- struct dentry *tmp;
- bool new_negative;
+ struct dentry *rehash;
+ struct dentry *tmp;
+ unsigned int rename_flags;
+ bool new_negative;
} rename;
struct {
- struct afs_read *req;
+ struct netfs_io_subrequest *subreq;
} fetch;
struct {
afs_lock_type_t type;
@@ -959,6 +952,7 @@ struct afs_operation {
#define AFS_OPERATION_TRIED_ALL 0x0400 /* Set if we've tried all the fileservers */
#define AFS_OPERATION_RETRY_SERVER 0x0800 /* Set if we should retry the current server */
#define AFS_OPERATION_DIR_CONFLICT 0x1000 /* Set if we detected a 3rd-party dir change */
+#define AFS_OPERATION_ASYNC 0x2000 /* Set if should run asynchronously */
};
/*
@@ -983,6 +977,21 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl
i_size_read(&vnode->netfs.inode), flags);
}
+/*
+ * Directory iteration management.
+ */
+struct afs_dir_iter {
+ struct afs_vnode *dvnode;
+ union afs_xdr_dir_block *block;
+ struct folio_queue *fq;
+ unsigned int fpos;
+ int fq_slot;
+ unsigned int loop_check;
+ u8 nr_slots;
+ u8 bucket;
+ unsigned int prev_entry;
+};
+
#include <trace/events/afs.h>
/*****************************************************************************/
@@ -1003,6 +1012,9 @@ extern int afs_merge_fs_addr4(struct afs_net *net, struct afs_addr_list *addr,
__be32 xdr, u16 port);
extern int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *addr,
__be32 *xdr, u16 port);
+void afs_set_peer_appdata(struct afs_server *server,
+ struct afs_addr_list *old_alist,
+ struct afs_addr_list *new_alist);
/*
* addr_prefs.c
@@ -1039,16 +1051,26 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
extern int afs_cell_init(struct afs_net *, const char *);
extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned,
enum afs_cell_trace);
-extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned,
- const char *, bool);
+enum afs_lookup_cell_for {
+ AFS_LOOKUP_CELL_DYNROOT,
+ AFS_LOOKUP_CELL_MOUNTPOINT,
+ AFS_LOOKUP_CELL_DIRECT_MOUNT,
+ AFS_LOOKUP_CELL_PRELOAD,
+ AFS_LOOKUP_CELL_ROOTCELL,
+ AFS_LOOKUP_CELL_ALIAS_CHECK,
+};
+struct afs_cell *afs_lookup_cell(struct afs_net *net,
+ const char *name, unsigned int namesz,
+ const char *vllist,
+ enum afs_lookup_cell_for reason,
+ enum afs_cell_trace trace);
extern struct afs_cell *afs_use_cell(struct afs_cell *, enum afs_cell_trace);
-extern void afs_unuse_cell(struct afs_net *, struct afs_cell *, enum afs_cell_trace);
+void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason);
extern struct afs_cell *afs_get_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_see_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_put_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_queue_cell(struct afs_cell *, enum afs_cell_trace);
-extern void afs_manage_cells(struct work_struct *);
-extern void afs_cells_timer(struct timer_list *);
+void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs);
extern void __net_exit afs_cell_purge(struct afs_net *);
/*
@@ -1057,6 +1079,19 @@ extern void __net_exit afs_cell_purge(struct afs_net *);
extern bool afs_cm_incoming_call(struct afs_call *);
/*
+ * cm_security.c
+ */
+void afs_process_oob_queue(struct work_struct *work);
+#ifdef CONFIG_RXGK
+int afs_create_token_key(struct afs_net *net, struct socket *socket);
+#else
+static inline int afs_create_token_key(struct afs_net *net, struct socket *socket)
+{
+ return 0;
+}
+#endif
+
+/*
* dir.c
*/
extern const struct file_operations afs_dir_file_operations;
@@ -1064,17 +1099,34 @@ extern const struct inode_operations afs_dir_inode_operations;
extern const struct address_space_operations afs_dir_aops;
extern const struct dentry_operations afs_fs_dentry_operations;
+ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file);
+ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
+ __acquires(&dvnode->validate_lock);
extern void afs_d_release(struct dentry *);
extern void afs_check_for_remote_deletion(struct afs_operation *);
+int afs_single_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
/*
* dir_edit.c
*/
-extern void afs_edit_dir_add(struct afs_vnode *, struct qstr *, struct afs_fid *,
+extern void afs_edit_dir_add(struct afs_vnode *, const struct qstr *, struct afs_fid *,
enum afs_edit_dir_reason);
-extern void afs_edit_dir_remove(struct afs_vnode *, struct qstr *, enum afs_edit_dir_reason);
-void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_dvnode,
- enum afs_edit_dir_reason why);
+extern void afs_edit_dir_remove(struct afs_vnode *, const struct qstr *, enum afs_edit_dir_reason);
+void afs_edit_dir_update(struct afs_vnode *vnode, const struct qstr *name,
+ struct afs_vnode *new_dvnode, enum afs_edit_dir_reason why);
+void afs_mkdir_init_dir(struct afs_vnode *dvnode, struct afs_vnode *parent_vnode);
+
+/*
+ * dir_search.c
+ */
+unsigned int afs_dir_hash_name(const struct qstr *name);
+bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name);
+union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block);
+int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name,
+ struct afs_fid *_fid);
+int afs_dir_search(struct afs_vnode *dvnode, const struct qstr *name,
+ struct afs_fid *_fid, afs_dataversion_t *_dir_version);
/*
* dir_silly.c
@@ -1089,34 +1141,23 @@ extern int afs_silly_iput(struct dentry *, struct inode *);
extern const struct inode_operations afs_dynroot_inode_operations;
extern const struct dentry_operations afs_dynroot_dentry_operations;
-extern struct inode *afs_try_auto_mntpt(struct dentry *, struct inode *);
-extern int afs_dynroot_mkdir(struct afs_net *, struct afs_cell *);
-extern void afs_dynroot_rmdir(struct afs_net *, struct afs_cell *);
-extern int afs_dynroot_populate(struct super_block *);
-extern void afs_dynroot_depopulate(struct super_block *);
+struct inode *afs_dynroot_iget_root(struct super_block *sb);
/*
* file.c
*/
extern const struct address_space_operations afs_file_aops;
-extern const struct address_space_operations afs_symlink_aops;
extern const struct inode_operations afs_file_inode_operations;
extern const struct file_operations afs_file_operations;
+extern const struct afs_operation_ops afs_fetch_data_operation;
extern const struct netfs_request_ops afs_req_ops;
extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *);
extern void afs_put_wb_key(struct afs_wb_key *);
extern int afs_open(struct inode *, struct file *);
extern int afs_release(struct inode *, struct file *);
-extern int afs_fetch_data(struct afs_vnode *, struct afs_read *);
-extern struct afs_read *afs_alloc_read(gfp_t);
-extern void afs_put_read(struct afs_read *);
-
-static inline struct afs_read *afs_get_read(struct afs_read *req)
-{
- refcount_inc(&req->usage);
- return req;
-}
+void afs_fetch_data_async_rx(struct work_struct *work);
+void afs_fetch_data_immediate_cancel(struct afs_call *call);
/*
* flock.c
@@ -1168,6 +1209,7 @@ extern void afs_fs_store_acl(struct afs_operation *);
extern struct afs_operation *afs_alloc_operation(struct key *, struct afs_volume *);
extern int afs_put_operation(struct afs_operation *);
extern bool afs_begin_vnode_operation(struct afs_operation *);
+extern void afs_end_vnode_operation(struct afs_operation *op);
extern void afs_wait_for_operation(struct afs_operation *);
extern int afs_do_sync_operation(struct afs_operation *);
@@ -1191,8 +1233,8 @@ struct afs_endpoint_state *afs_get_endpoint_state(struct afs_endpoint_state *est
enum afs_estate_trace where);
void afs_put_endpoint_state(struct afs_endpoint_state *estate, enum afs_estate_trace where);
extern void afs_fileserver_probe_result(struct afs_call *);
-void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
- struct afs_addr_list *new_addrs, struct key *key);
+int afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_list *new_alist, struct key *key);
int afs_wait_for_fs_probes(struct afs_operation *op, struct afs_server_state *states, bool intr);
extern void afs_probe_fileserver(struct afs_net *, struct afs_server *);
extern void afs_fs_probe_dispatcher(struct work_struct *);
@@ -1205,10 +1247,13 @@ extern void afs_fs_probe_cleanup(struct afs_net *);
*/
extern const struct afs_operation_ops afs_fetch_status_operation;
+void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op);
+const char *afs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback);
+int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen);
extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
extern int afs_ilookup5_test_by_fid(struct inode *, void *);
-extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
extern struct inode *afs_iget(struct afs_operation *, struct afs_vnode_param *);
extern struct inode *afs_root_iget(struct super_block *, struct key *);
extern int afs_getattr(struct mnt_idmap *idmap, const struct path *,
@@ -1336,6 +1381,7 @@ extern void afs_charge_preallocation(struct work_struct *);
extern void afs_put_call(struct afs_call *);
void afs_deferred_put_call(struct afs_call *call);
void afs_make_call(struct afs_call *call, gfp_t gfp);
+void afs_deliver_to_call(struct afs_call *call);
void afs_wait_for_call_to_complete(struct afs_call *call);
extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
const struct afs_call_type *,
@@ -1346,6 +1392,28 @@ extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, bool);
extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause);
+static inline struct afs_call *afs_get_call(struct afs_call *call,
+ enum afs_call_trace why)
+{
+ int r;
+
+ __refcount_inc(&call->ref, &r);
+
+ trace_afs_call(call->debug_id, why, r + 1,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+ return call;
+}
+
+static inline void afs_see_call(struct afs_call *call, enum afs_call_trace why)
+{
+ int r = refcount_read(&call->ref);
+
+ trace_afs_call(call->debug_id, why, r,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+}
+
static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call,
gfp_t gfp)
{
@@ -1467,20 +1535,30 @@ extern void __exit afs_clean_up_permit_cache(void);
*/
extern spinlock_t afs_server_peer_lock;
-extern struct afs_server *afs_find_server(struct afs_net *, const struct rxrpc_peer *);
-extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *);
+struct afs_server *afs_find_server(const struct rxrpc_peer *peer);
extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *, u32);
extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace);
-extern struct afs_server *afs_use_server(struct afs_server *, enum afs_server_trace);
-extern void afs_unuse_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
-extern void afs_unuse_server_notime(struct afs_net *, struct afs_server *, enum afs_server_trace);
+struct afs_server *afs_use_server(struct afs_server *server, bool activate,
+ enum afs_server_trace reason);
+void afs_unuse_server(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason);
+void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason);
extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
-extern void afs_manage_servers(struct work_struct *);
-extern void afs_servers_timer(struct timer_list *);
+void afs_purge_servers(struct afs_cell *cell);
extern void afs_fs_probe_timer(struct timer_list *);
-extern void __net_exit afs_purge_servers(struct afs_net *);
+void __net_exit afs_wait_for_servers(struct afs_net *net);
bool afs_check_server_record(struct afs_operation *op, struct afs_server *server, struct key *key);
+static inline void afs_see_server(struct afs_server *server, enum afs_server_trace trace)
+{
+ int r = refcount_read(&server->ref);
+ int a = atomic_read(&server->active);
+
+ trace_afs_server(server->debug_id, r, a, trace);
+
+}
+
static inline void afs_inc_servers_outstanding(struct afs_net *net)
{
atomic_inc(&net->servers_outstanding);
@@ -1628,6 +1706,9 @@ extern void yfs_fs_remove_dir(struct afs_operation *);
extern void yfs_fs_link(struct afs_operation *);
extern void yfs_fs_symlink(struct afs_operation *);
extern void yfs_fs_rename(struct afs_operation *);
+void yfs_fs_rename_replace(struct afs_operation *op);
+void yfs_fs_rename_noreplace(struct afs_operation *op);
+void yfs_fs_rename_exchange(struct afs_operation *op);
extern void yfs_fs_store_data(struct afs_operation *);
extern void yfs_fs_setattr(struct afs_operation *);
extern void yfs_fs_get_volume_status(struct afs_operation *);
@@ -1712,6 +1793,38 @@ static inline int afs_bad(struct afs_vnode *vnode, enum afs_file_error where)
return -EIO;
}
+/*
+ * Set the callback promise on a vnode.
+ */
+static inline void afs_set_cb_promise(struct afs_vnode *vnode, time64_t expires_at,
+ enum afs_cb_promise_trace trace)
+{
+ atomic64_set(&vnode->cb_expires_at, expires_at);
+ trace_afs_cb_promise(vnode, trace);
+}
+
+/*
+ * Clear the callback promise on a vnode, returning true if it was promised.
+ */
+static inline bool afs_clear_cb_promise(struct afs_vnode *vnode,
+ enum afs_cb_promise_trace trace)
+{
+ trace_afs_cb_promise(vnode, trace);
+ return atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE;
+}
+
+/*
+ * Mark a directory as being invalid.
+ */
+static inline void afs_invalidate_dir(struct afs_vnode *dvnode,
+ enum afs_dir_invalid_trace trace)
+{
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
+ trace_afs_dir_invalid(dvnode, trace);
+ afs_stat_v(dvnode, n_inval);
+ }
+}
+
/*****************************************************************************/
/*
* debug tracing
diff --git a/fs/afs/main.c b/fs/afs/main.c
index a14f6013e316..e6bb8237db98 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -73,28 +73,21 @@ static int __net_init afs_net_init(struct net *net_ns)
generate_random_uuid((unsigned char *)&net->uuid);
INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation);
+ INIT_WORK(&net->rx_oob_work, afs_process_oob_queue);
mutex_init(&net->socket_mutex);
net->cells = RB_ROOT;
+ idr_init(&net->cells_dyn_ino);
init_rwsem(&net->cells_lock);
- INIT_WORK(&net->cells_manager, afs_manage_cells);
- timer_setup(&net->cells_timer, afs_cells_timer, 0);
-
mutex_init(&net->cells_alias_lock);
mutex_init(&net->proc_cells_lock);
INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
- net->fs_servers = RB_ROOT;
INIT_LIST_HEAD(&net->fs_probe_fast);
INIT_LIST_HEAD(&net->fs_probe_slow);
INIT_HLIST_HEAD(&net->fs_proc);
- INIT_HLIST_HEAD(&net->fs_addresses);
- seqlock_init(&net->fs_addr_lock);
-
- INIT_WORK(&net->fs_manager, afs_manage_servers);
- timer_setup(&net->fs_timer, afs_servers_timer, 0);
INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher);
timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0);
atomic_set(&net->servers_outstanding, 1);
@@ -130,13 +123,14 @@ error_open_socket:
net->live = false;
afs_fs_probe_cleanup(net);
afs_cell_purge(net);
- afs_purge_servers(net);
+ afs_wait_for_servers(net);
error_cell_init:
net->live = false;
afs_proc_cleanup(net);
error_proc:
afs_put_sysnames(net->sysnames);
error_sysnames:
+ idr_destroy(&net->cells_dyn_ino);
net->live = false;
return ret;
}
@@ -151,10 +145,11 @@ static void __net_exit afs_net_exit(struct net *net_ns)
net->live = false;
afs_fs_probe_cleanup(net);
afs_cell_purge(net);
- afs_purge_servers(net);
+ afs_wait_for_servers(net);
afs_close_socket(net);
afs_proc_cleanup(net);
afs_put_sysnames(net->sysnames);
+ idr_destroy(&net->cells_dyn_ino);
kfree_rcu(rcu_access_pointer(net->address_prefs), rcu);
}
@@ -174,13 +169,13 @@ static int __init afs_init(void)
printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
- afs_wq = alloc_workqueue("afs", 0, 0);
+ afs_wq = alloc_workqueue("afs", WQ_PERCPU, 0);
if (!afs_wq)
goto error_afs_wq;
- afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0);
+ afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!afs_async_calls)
goto error_async;
- afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM, 0);
+ afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!afs_lock_manager)
goto error_lockmgr;
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index b8180bf2281f..c8a7f266080d 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <crypto/krb5.h>
#include "internal.h"
#include "afs_fs.h"
#include "protocol_uae.h"
@@ -103,7 +104,34 @@ int afs_abort_to_error(u32 abort_code)
case RXKADDATALEN: return -EKEYREJECTED;
case RXKADILLEGALLEVEL: return -EKEYREJECTED;
+ case RXGK_INCONSISTENCY: return -EPROTO;
+ case RXGK_PACKETSHORT: return -EPROTO;
+ case RXGK_BADCHALLENGE: return -EPROTO;
+ case RXGK_SEALEDINCON: return -EKEYREJECTED;
+ case RXGK_NOTAUTH: return -EKEYREJECTED;
+ case RXGK_EXPIRED: return -EKEYEXPIRED;
+ case RXGK_BADLEVEL: return -EKEYREJECTED;
+ case RXGK_BADKEYNO: return -EKEYREJECTED;
+ case RXGK_NOTRXGK: return -EKEYREJECTED;
+ case RXGK_UNSUPPORTED: return -EKEYREJECTED;
+ case RXGK_GSSERROR: return -EKEYREJECTED;
+#ifdef RXGK_BADETYPE
+ case RXGK_BADETYPE: return -ENOPKG;
+#endif
+#ifdef RXGK_BADTOKEN
+ case RXGK_BADTOKEN: return -EKEYREJECTED;
+#endif
+#ifdef RXGK_BADETYPE
+ case RXGK_DATALEN: return -EPROTO;
+#endif
+#ifdef RXGK_BADQOP
+ case RXGK_BADQOP: return -EKEYREJECTED;
+#endif
+
+ case KRB5_PROG_KEYTYPE_NOSUPP: return -ENOPKG;
+
case RXGEN_OPCODE: return -ENOTSUPP;
+ case RX_INVALID_OPERATION: return -ENOTSUPP;
default: return -EREMOTEIO;
}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 297487ee8323..57c204a3c04e 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -30,7 +30,7 @@ const struct file_operations afs_mntpt_file_operations = {
const struct inode_operations afs_mntpt_inode_operations = {
.lookup = afs_mntpt_lookup,
- .readlink = page_readlink,
+ .readlink = afs_readlink,
.getattr = afs_getattr,
};
@@ -87,7 +87,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
ctx->force = true;
}
if (ctx->cell) {
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_mntpt);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_mntpt);
ctx->cell = NULL;
}
if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
@@ -107,7 +107,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (size > AFS_MAXCELLNAME)
return -ENAMETOOLONG;
- cell = afs_lookup_cell(ctx->net, p, size, NULL, false);
+ cell = afs_lookup_cell(ctx->net, p, size, NULL,
+ AFS_LOOKUP_CELL_MOUNTPOINT,
+ afs_cell_trace_use_lookup_mntpt);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%pd'\n", mntpt);
return PTR_ERR(cell);
@@ -118,9 +120,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
ctx->volnamesz = sizeof(afs_root_volume) - 1;
} else {
/* read the contents of the AFS special symlink */
- struct page *page;
+ DEFINE_DELAYED_CALL(cleanup);
+ const char *content;
loff_t size = i_size_read(d_inode(mntpt));
- char *buf;
if (src_as->cell)
ctx->cell = afs_use_cell(src_as->cell, afs_cell_trace_use_mntpt);
@@ -128,16 +130,17 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (size < 2 || size > PAGE_SIZE - 1)
return -EINVAL;
- page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ content = afs_get_link(mntpt, d_inode(mntpt), &cleanup);
+ if (IS_ERR(content)) {
+ do_delayed_call(&cleanup);
+ return PTR_ERR(content);
+ }
- buf = kmap(page);
ret = -EINVAL;
- if (buf[size - 1] == '.')
- ret = vfs_parse_fs_string(fc, "source", buf, size - 1);
- kunmap(page);
- put_page(page);
+ if (content[size - 1] == '.')
+ ret = vfs_parse_fs_qstr(fc, "source",
+ &QSTR_LEN(content, size - 1));
+ do_delayed_call(&cleanup);
if (ret < 0)
return ret;
@@ -188,7 +191,6 @@ struct vfsmount *afs_d_automount(struct path *path)
if (IS_ERR(newmnt))
return newmnt;
- mntget(newmnt); /* prevent immediate expiration */
mnt_set_expiry(newmnt, &afs_vfsmounts);
queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
afs_mntpt_expiry_timeout * HZ);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 15eab053af6d..44520549b509 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -122,14 +122,16 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
if (strcmp(buf, "add") == 0) {
struct afs_cell *cell;
- cell = afs_lookup_cell(net, name, strlen(name), args, true);
+ cell = afs_lookup_cell(net, name, strlen(name), args,
+ AFS_LOOKUP_CELL_PRELOAD,
+ afs_cell_trace_use_lookup_add);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
goto done;
}
if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_no_pin);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_no_pin);
} else {
goto inval;
}
@@ -206,7 +208,7 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v)
net = afs_seq2net_single(m);
down_read(&net->cells_lock);
- cell = net->ws_cell;
+ cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
if (cell)
seq_printf(m, "%s\n", cell->name);
up_read(&net->cells_lock);
@@ -240,7 +242,13 @@ static int afs_proc_rootcell_write(struct file *file, char *buf, size_t size)
/* determine command to perform */
_debug("rootcell=%s", buf);
- ret = afs_cell_init(net, buf);
+ ret = -EEXIST;
+ inode_lock(file_inode(file));
+ if (!rcu_access_pointer(net->ws_cell))
+ ret = afs_cell_init(net, buf);
+ else
+ printk("busy\n");
+ inode_unlock(file_inode(file));
out:
_leave(" = %d", ret);
@@ -437,8 +445,6 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
}
server = list_entry(v, struct afs_server, proc_link);
- estate = rcu_dereference(server->endpoint_state);
- alist = estate->addresses;
seq_printf(m, "%pU %3d %3d %s\n",
&server->uuid,
refcount_read(&server->ref),
@@ -448,10 +454,16 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
server->flags, server->rtt);
seq_printf(m, " - probe: last=%d\n",
(int)(jiffies - server->probed_at) / HZ);
+
+ estate = rcu_dereference(server->endpoint_state);
+ if (!estate)
+ goto out;
failed = estate->failed_set;
seq_printf(m, " - ESTATE pq=%x np=%u rsp=%lx f=%lx\n",
estate->probe_seq, atomic_read(&estate->nr_probing),
estate->responsive_set, estate->failed_set);
+
+ alist = estate->addresses;
seq_printf(m, " - ALIST v=%u ap=%u\n",
alist->version, alist->addr_pref_version);
for (i = 0; i < alist->nr_addrs; i++) {
@@ -464,6 +476,8 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
rxrpc_kernel_get_srtt(addr->peer),
addr->last_error, addr->prio);
}
+
+out:
return 0;
}
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index e4cd89c44c46..b2f06c1917c2 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -50,6 +50,9 @@ enum YFS_FS_Operations {
YFSREMOVEACL = 64171,
YFSREMOVEFILE2 = 64173,
YFSSTOREOPAQUEACL2 = 64174,
+ YFSRENAME_REPLACE = 64176,
+ YFSRENAME_NOREPLACE = 64177,
+ YFSRENAME_EXCHANGE = 64187,
YFSINLINEBULKSTATUS = 64536, /* YFS Fetch multiple file statuses with errors */
YFSFETCHDATA64 = 64537, /* YFS Fetch file data */
YFSSTOREDATA64 = 64538, /* YFS Store file data */
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index d612983d6f38..6a4e7da10fc4 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -99,7 +99,7 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
write_seqlock(&vnode->cb_lock);
ASSERTCMP(cb_server, ==, vnode->cb_server);
vnode->cb_server = NULL;
- if (atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE)
+ if (afs_clear_cb_promise(vnode, afs_cb_promise_clear_rotate_server))
vnode->cb_break++;
write_sequnlock(&vnode->cb_lock);
}
@@ -432,6 +432,16 @@ bool afs_select_fileserver(struct afs_operation *op)
afs_op_set_error(op, -EDQUOT);
goto failed_but_online;
+ case RX_INVALID_OPERATION:
+ case RXGEN_OPCODE:
+ /* Handle downgrading to an older operation. */
+ afs_op_set_error(op, -ENOTSUPP);
+ if (op->flags & AFS_OPERATION_DOWNGRADE) {
+ op->flags &= ~AFS_OPERATION_DOWNGRADE;
+ goto go_again;
+ }
+ goto failed_but_online;
+
default:
afs_op_accumulate_error(op, error, abort_code);
failed_but_online:
@@ -583,7 +593,7 @@ selected_server:
if (vnode->cb_server != server) {
vnode->cb_server = server;
vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break);
- atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_server_change);
}
retry_server:
@@ -620,12 +630,13 @@ iterate_address:
op->addr_index = addr_index;
set_bit(addr_index, &op->addr_tried);
- op->volsync.creation = TIME64_MIN;
- op->volsync.update = TIME64_MIN;
- op->call_responded = false;
_debug("address [%u] %u/%u %pISp",
op->server_index, addr_index, alist->nr_addrs,
rxrpc_kernel_remote_addr(alist->addrs[op->addr_index].peer));
+go_again:
+ op->volsync.creation = TIME64_MIN;
+ op->volsync.update = TIME64_MIN;
+ op->call_responded = false;
_leave(" = t");
return true;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 9f2a3bb56ec6..bf0e4ea0aafd 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -24,8 +24,17 @@ static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned
static void afs_process_async_call(struct work_struct *);
static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
+static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID);
+static void afs_rx_notify_oob(struct sock *sk, struct sk_buff *oob);
static int afs_deliver_cm_op_id(struct afs_call *);
+static const struct rxrpc_kernel_ops afs_rxrpc_callback_ops = {
+ .notify_new_call = afs_rx_new_call,
+ .discard_new_call = afs_rx_discard_new_call,
+ .user_attach_call = afs_rx_attach,
+ .notify_oob = afs_rx_notify_oob,
+};
+
/* asynchronous incoming call initial processing */
static const struct afs_call_type afs_RXCMxxxx = {
.name = "CB.xxxx",
@@ -49,6 +58,7 @@ int afs_open_socket(struct afs_net *net)
goto error_1;
socket->sk->sk_allocation = GFP_NOFS;
+ socket->sk->sk_user_data = net;
/* bind the callback manager's address to make this a server socket */
memset(&srx, 0, sizeof(srx));
@@ -64,16 +74,24 @@ int afs_open_socket(struct afs_net *net)
if (ret < 0)
goto error_2;
- ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+ ret = rxrpc_sock_set_manage_response(socket->sk, true);
+ if (ret < 0)
+ goto error_2;
+
+ ret = afs_create_token_key(net, socket);
+ if (ret < 0)
+ pr_err("Couldn't create RxGK CM key: %d\n", ret);
+
+ ret = kernel_bind(socket, (struct sockaddr_unsized *) &srx, sizeof(srx));
if (ret == -EADDRINUSE) {
srx.transport.sin6.sin6_port = 0;
- ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+ ret = kernel_bind(socket, (struct sockaddr_unsized *) &srx, sizeof(srx));
}
if (ret < 0)
goto error_2;
srx.srx_service = YFS_CM_SERVICE;
- ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+ ret = kernel_bind(socket, (struct sockaddr_unsized *) &srx, sizeof(srx));
if (ret < 0)
goto error_2;
@@ -84,8 +102,7 @@ int afs_open_socket(struct afs_net *net)
* it sends back to us.
*/
- rxrpc_kernel_new_call_notification(socket, afs_rx_new_call,
- afs_rx_discard_new_call);
+ rxrpc_kernel_set_notifications(socket, &afs_rxrpc_callback_ops);
ret = kernel_listen(socket, INT_MAX);
if (ret < 0)
@@ -125,7 +142,9 @@ void afs_close_socket(struct afs_net *net)
kernel_sock_shutdown(net->socket, SHUT_RDWR);
flush_workqueue(afs_async_calls);
+ net->socket->sk->sk_user_data = NULL;
sock_release(net->socket);
+ key_put(net->fs_cm_token_key);
_debug("dework");
_leave("");
@@ -149,7 +168,8 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
call->net = net;
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
refcount_set(&call->ref, 1);
- INIT_WORK(&call->async_work, afs_process_async_call);
+ INIT_WORK(&call->async_work, type->async_rx ?: afs_process_async_call);
+ INIT_WORK(&call->work, call->type->work);
INIT_WORK(&call->free_work, afs_deferred_free_worker);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->state_lock);
@@ -178,7 +198,7 @@ static void afs_free_call(struct afs_call *call)
if (call->type->destructor)
call->type->destructor(call);
- afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call);
+ afs_unuse_server_notime(call->net, call->server, afs_server_trace_unuse_call);
kfree(call->request);
o = atomic_read(&net->nr_outstanding_calls);
@@ -235,27 +255,12 @@ void afs_deferred_put_call(struct afs_call *call)
schedule_work(&call->free_work);
}
-static struct afs_call *afs_get_call(struct afs_call *call,
- enum afs_call_trace why)
-{
- int r;
-
- __refcount_inc(&call->ref, &r);
-
- trace_afs_call(call->debug_id, why, r + 1,
- atomic_read(&call->net->nr_outstanding_calls),
- __builtin_return_address(0));
- return call;
-}
-
/*
* Queue the call for actual work.
*/
static void afs_queue_call_work(struct afs_call *call)
{
if (call->type->work) {
- INIT_WORK(&call->work, call->type->work);
-
afs_get_call(call, afs_call_trace_work);
if (!queue_work(afs_wq, &call->work))
afs_put_call(call);
@@ -430,11 +435,16 @@ void afs_make_call(struct afs_call *call, gfp_t gfp)
return;
error_do_abort:
- if (ret != -ECONNABORTED) {
+ if (ret != -ECONNABORTED)
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret,
afs_abort_send_data_error);
- } else {
+ if (call->async) {
+ afs_see_call(call, afs_call_trace_async_abort);
+ return;
+ }
+
+ if (ret == -ECONNABORTED) {
len = 0;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
rxrpc_kernel_recv_data(call->net->socket, rxcall,
@@ -445,8 +455,10 @@ error_do_abort:
call->error = ret;
trace_afs_call_done(call);
error_kill_call:
- if (call->type->done)
- call->type->done(call);
+ if (call->async)
+ afs_see_call(call, afs_call_trace_async_kill);
+ if (call->type->immediate_cancel)
+ call->type->immediate_cancel(call);
/* We need to dispose of the extra ref we grabbed for an async call.
* The call, however, might be queued on afs_async_calls and we need to
@@ -501,7 +513,7 @@ static void afs_log_error(struct afs_call *call, s32 remote_abort)
/*
* deliver messages to a call
*/
-static void afs_deliver_to_call(struct afs_call *call)
+void afs_deliver_to_call(struct afs_call *call)
{
enum afs_call_state state;
size_t len;
@@ -602,7 +614,6 @@ local_abort:
abort_code = 0;
call_complete:
afs_set_call_complete(call, ret, remote_abort);
- state = AFS_CALL_COMPLETE;
goto done;
}
@@ -746,7 +757,6 @@ void afs_charge_preallocation(struct work_struct *work)
if (rxrpc_kernel_charge_accept(net->socket,
afs_wake_up_async_call,
- afs_rx_attach,
(unsigned long)call,
GFP_KERNEL,
call->debug_id) < 0)
@@ -774,8 +784,14 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long user_call_ID)
{
+ struct afs_call *call = (struct afs_call *)user_call_ID;
struct afs_net *net = afs_sock2net(sk);
+ call->peer = rxrpc_kernel_get_call_peer(sk->sk_socket, call->rxcall);
+ call->server = afs_find_server(call->peer);
+ if (!call->server)
+ trace_afs_cm_no_server(call, rxrpc_kernel_remote_srx(call->peer));
+
queue_work(afs_wq, &net->charge_preallocation_work);
}
@@ -802,9 +818,14 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
if (!afs_cm_incoming_call(call))
return -ENOTSUPP;
+ call->security_ix = rxrpc_kernel_query_call_security(call->rxcall,
+ &call->service_id,
+ &call->enctype);
+
trace_afs_cb_call(call);
+ call->work.func = call->type->work;
- /* pass responsibility for the remainer of this message off to the
+ /* pass responsibility for the remainder of this message off to the
* cache manager op */
return call->type->deliver(call);
}
@@ -953,3 +974,13 @@ noinline int afs_protocol_error(struct afs_call *call,
call->unmarshalling_error = true;
return -EBADMSG;
}
+
+/*
+ * Wake up OOB notification processing.
+ */
+static void afs_rx_notify_oob(struct sock *sk, struct sk_buff *oob)
+{
+ struct afs_net *net = sk->sk_user_data;
+
+ schedule_work(&net->rx_oob_work);
+}
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 6a7744c9e2a2..55ddce94af03 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -16,6 +16,31 @@
static DEFINE_HASHTABLE(afs_permits_cache, 10);
static DEFINE_SPINLOCK(afs_permits_lock);
+static DEFINE_MUTEX(afs_key_lock);
+
+/*
+ * Allocate a key to use as a placeholder for anonymous user security.
+ */
+static int afs_alloc_anon_key(struct afs_cell *cell)
+{
+ struct key *key;
+
+ mutex_lock(&afs_key_lock);
+ key = cell->anonymous_key;
+ if (!key) {
+ key = rxrpc_get_null_key(cell->key_desc);
+ if (!IS_ERR(key))
+ cell->anonymous_key = key;
+ }
+ mutex_unlock(&afs_key_lock);
+
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ _debug("anon key %p{%x}",
+ cell->anonymous_key, key_serial(cell->anonymous_key));
+ return 0;
+}
/*
* get a key
@@ -23,11 +48,12 @@ static DEFINE_SPINLOCK(afs_permits_lock);
struct key *afs_request_key(struct afs_cell *cell)
{
struct key *key;
+ int ret;
- _enter("{%x}", key_serial(cell->anonymous_key));
+ _enter("{%s}", cell->key_desc);
- _debug("key %s", cell->anonymous_key->description);
- key = request_key_net(&key_type_rxrpc, cell->anonymous_key->description,
+ _debug("key %s", cell->key_desc);
+ key = request_key_net(&key_type_rxrpc, cell->key_desc,
cell->net->net, NULL);
if (IS_ERR(key)) {
if (PTR_ERR(key) != -ENOKEY) {
@@ -35,6 +61,12 @@ struct key *afs_request_key(struct afs_cell *cell)
return key;
}
+ if (!cell->anonymous_key) {
+ ret = afs_alloc_anon_key(cell);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
/* act as anonymous user */
_leave(" = {%x} [anon]", key_serial(cell->anonymous_key));
return key_get(cell->anonymous_key);
@@ -52,11 +84,10 @@ struct key *afs_request_key_rcu(struct afs_cell *cell)
{
struct key *key;
- _enter("{%x}", key_serial(cell->anonymous_key));
+ _enter("{%s}", cell->key_desc);
- _debug("key %s", cell->anonymous_key->description);
- key = request_key_net_rcu(&key_type_rxrpc,
- cell->anonymous_key->description,
+ _debug("key %s", cell->key_desc);
+ key = request_key_net_rcu(&key_type_rxrpc, cell->key_desc,
cell->net->net);
if (IS_ERR(key)) {
if (PTR_ERR(key) != -ENOKEY) {
@@ -65,6 +96,8 @@ struct key *afs_request_key_rcu(struct afs_cell *cell)
}
/* act as anonymous user */
+ if (!cell->anonymous_key)
+ return NULL; /* Need to allocate */
_leave(" = {%x} [anon]", key_serial(cell->anonymous_key));
return key_get(cell->anonymous_key);
} else {
@@ -408,7 +441,7 @@ int afs_permission(struct mnt_idmap *idmap, struct inode *inode,
if (mask & MAY_NOT_BLOCK) {
key = afs_request_key_rcu(vnode->volume->cell);
- if (IS_ERR(key))
+ if (IS_ERR_OR_NULL(key))
return -ECHILD;
ret = -ECHILD;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 038f9d0ae3af..c4428ebddb1d 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -14,188 +14,104 @@
static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */
static atomic_t afs_server_debug_id;
-static struct afs_server *afs_maybe_use_server(struct afs_server *,
- enum afs_server_trace);
static void __afs_put_server(struct afs_net *, struct afs_server *);
+static void afs_server_timer(struct timer_list *timer);
+static void afs_server_destroyer(struct work_struct *work);
/*
* Find a server by one of its addresses.
*/
-struct afs_server *afs_find_server(struct afs_net *net, const struct rxrpc_peer *peer)
+struct afs_server *afs_find_server(const struct rxrpc_peer *peer)
{
- const struct afs_endpoint_state *estate;
- const struct afs_addr_list *alist;
- struct afs_server *server = NULL;
- unsigned int i;
- int seq = 1;
+ struct afs_server *server = (struct afs_server *)rxrpc_kernel_get_peer_data(peer);
- rcu_read_lock();
-
- do {
- if (server)
- afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
- server = NULL;
- seq++; /* 2 on the 1st/lockless path, otherwise odd */
- read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
-
- hlist_for_each_entry_rcu(server, &net->fs_addresses, addr_link) {
- estate = rcu_dereference(server->endpoint_state);
- alist = estate->addresses;
- for (i = 0; i < alist->nr_addrs; i++)
- if (alist->addrs[i].peer == peer)
- goto found;
- }
-
- server = NULL;
- continue;
- found:
- server = afs_maybe_use_server(server, afs_server_trace_get_by_addr);
-
- } while (need_seqretry(&net->fs_addr_lock, seq));
-
- done_seqretry(&net->fs_addr_lock, seq);
-
- rcu_read_unlock();
- return server;
+ if (!server)
+ return NULL;
+ return afs_use_server(server, false, afs_server_trace_use_cm_call);
}
/*
- * Look up a server by its UUID and mark it active.
+ * Look up a server by its UUID and mark it active. The caller must hold
+ * cell->fs_lock.
*/
-struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
+static struct afs_server *afs_find_server_by_uuid(struct afs_cell *cell, const uuid_t *uuid)
{
- struct afs_server *server = NULL;
+ struct afs_server *server;
struct rb_node *p;
- int diff, seq = 1;
+ int diff;
_enter("%pU", uuid);
- do {
- /* Unfortunately, rbtree walking doesn't give reliable results
- * under just the RCU read lock, so we have to check for
- * changes.
- */
- if (server)
- afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
- server = NULL;
- seq++; /* 2 on the 1st/lockless path, otherwise odd */
- read_seqbegin_or_lock(&net->fs_lock, &seq);
-
- p = net->fs_servers.rb_node;
- while (p) {
- server = rb_entry(p, struct afs_server, uuid_rb);
-
- diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
- if (diff < 0) {
- p = p->rb_left;
- } else if (diff > 0) {
- p = p->rb_right;
- } else {
- afs_use_server(server, afs_server_trace_get_by_uuid);
- break;
- }
-
- server = NULL;
- }
- } while (need_seqretry(&net->fs_lock, seq));
+ p = cell->fs_servers.rb_node;
+ while (p) {
+ server = rb_entry(p, struct afs_server, uuid_rb);
- done_seqretry(&net->fs_lock, seq);
+ diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
+ if (diff < 0) {
+ p = p->rb_left;
+ } else if (diff > 0) {
+ p = p->rb_right;
+ } else {
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags))
+ return NULL; /* Need a write lock */
+ afs_use_server(server, true, afs_server_trace_use_by_uuid);
+ return server;
+ }
+ }
- _leave(" = %p", server);
- return server;
+ return NULL;
}
/*
- * Install a server record in the namespace tree. If there's a clash, we stick
- * it into a list anchored on whichever afs_server struct is actually in the
- * tree.
+ * Install a server record in the cell tree. The caller must hold an exclusive
+ * lock on cell->fs_lock.
*/
static struct afs_server *afs_install_server(struct afs_cell *cell,
- struct afs_server *candidate)
+ struct afs_server **candidate)
{
- const struct afs_endpoint_state *estate;
- const struct afs_addr_list *alist;
- struct afs_server *server, *next;
+ struct afs_server *server;
struct afs_net *net = cell->net;
struct rb_node **pp, *p;
int diff;
_enter("%p", candidate);
- write_seqlock(&net->fs_lock);
-
/* Firstly install the server in the UUID lookup tree */
- pp = &net->fs_servers.rb_node;
+ pp = &cell->fs_servers.rb_node;
p = NULL;
while (*pp) {
p = *pp;
_debug("- consider %p", p);
server = rb_entry(p, struct afs_server, uuid_rb);
- diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
- if (diff < 0) {
+ diff = memcmp(&(*candidate)->uuid, &server->uuid, sizeof(uuid_t));
+ if (diff < 0)
pp = &(*pp)->rb_left;
- } else if (diff > 0) {
+ else if (diff > 0)
pp = &(*pp)->rb_right;
- } else {
- if (server->cell == cell)
- goto exists;
-
- /* We have the same UUID representing servers in
- * different cells. Append the new server to the list.
- */
- for (;;) {
- next = rcu_dereference_protected(
- server->uuid_next,
- lockdep_is_held(&net->fs_lock.lock));
- if (!next)
- break;
- server = next;
- }
- rcu_assign_pointer(server->uuid_next, candidate);
- candidate->uuid_prev = server;
- server = candidate;
- goto added_dup;
- }
+ else
+ goto exists;
}
- server = candidate;
+ server = *candidate;
+ *candidate = NULL;
rb_link_node(&server->uuid_rb, p, pp);
- rb_insert_color(&server->uuid_rb, &net->fs_servers);
+ rb_insert_color(&server->uuid_rb, &cell->fs_servers);
+ write_seqlock(&net->fs_lock);
hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+ write_sequnlock(&net->fs_lock);
-added_dup:
- write_seqlock(&net->fs_addr_lock);
- estate = rcu_dereference_protected(server->endpoint_state,
- lockdep_is_held(&net->fs_addr_lock.lock));
- alist = estate->addresses;
-
- /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
- * it in the IPv4 and/or IPv6 reverse-map lists.
- *
- * TODO: For speed we want to use something other than a flat list
- * here; even sorting the list in terms of lowest address would help a
- * bit, but anything we might want to do gets messy and memory
- * intensive.
- */
- if (alist->nr_addrs > 0)
- hlist_add_head_rcu(&server->addr_link, &net->fs_addresses);
-
- write_sequnlock(&net->fs_addr_lock);
+ afs_get_cell(cell, afs_cell_trace_get_server);
exists:
- afs_get_server(server, afs_server_trace_get_install);
- write_sequnlock(&net->fs_lock);
+ afs_use_server(server, true, afs_server_trace_use_install);
return server;
}
/*
- * Allocate a new server record and mark it active.
+ * Allocate a new server record and mark it as active but uncreated.
*/
-static struct afs_server *afs_alloc_server(struct afs_cell *cell,
- const uuid_t *uuid,
- struct afs_addr_list *alist)
+static struct afs_server *afs_alloc_server(struct afs_cell *cell, const uuid_t *uuid)
{
- struct afs_endpoint_state *estate;
struct afs_server *server;
struct afs_net *net = cell->net;
@@ -203,65 +119,50 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
if (!server)
- goto enomem;
-
- estate = kzalloc(sizeof(struct afs_endpoint_state), GFP_KERNEL);
- if (!estate)
- goto enomem_server;
+ return NULL;
refcount_set(&server->ref, 1);
- atomic_set(&server->active, 1);
+ atomic_set(&server->active, 0);
+ __set_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
server->debug_id = atomic_inc_return(&afs_server_debug_id);
- server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
+ INIT_WORK(&server->destroyer, &afs_server_destroyer);
+ timer_setup(&server->timer, afs_server_timer, 0);
INIT_LIST_HEAD(&server->volumes);
init_waitqueue_head(&server->probe_wq);
+ mutex_init(&server->cm_token_lock);
INIT_LIST_HEAD(&server->probe_link);
+ INIT_HLIST_NODE(&server->proc_link);
spin_lock_init(&server->probe_lock);
server->cell = cell;
server->rtt = UINT_MAX;
server->service_id = FS_SERVICE;
-
server->probe_counter = 1;
server->probed_at = jiffies - LONG_MAX / 2;
- refcount_set(&estate->ref, 1);
- estate->addresses = alist;
- estate->server_id = server->debug_id;
- estate->probe_seq = 1;
- rcu_assign_pointer(server->endpoint_state, estate);
afs_inc_servers_outstanding(net);
- trace_afs_server(server->debug_id, 1, 1, afs_server_trace_alloc);
- trace_afs_estate(estate->server_id, estate->probe_seq, refcount_read(&estate->ref),
- afs_estate_trace_alloc_server);
_leave(" = %p", server);
return server;
-
-enomem_server:
- kfree(server);
-enomem:
- _leave(" = NULL [nomem]");
- return NULL;
}
/*
* Look up an address record for a server
*/
-static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
- struct key *key, const uuid_t *uuid)
+static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_server *server,
+ struct key *key)
{
struct afs_vl_cursor vc;
struct afs_addr_list *alist = NULL;
int ret;
ret = -ERESTARTSYS;
- if (afs_begin_vlserver_operation(&vc, cell, key)) {
+ if (afs_begin_vlserver_operation(&vc, server->cell, key)) {
while (afs_select_vlserver(&vc)) {
if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags))
- alist = afs_yfsvl_get_endpoints(&vc, uuid);
+ alist = afs_yfsvl_get_endpoints(&vc, &server->uuid);
else
- alist = afs_vl_get_addrs_u(&vc, uuid);
+ alist = afs_vl_get_addrs_u(&vc, &server->uuid);
}
ret = afs_end_vlserver_operation(&vc);
@@ -271,72 +172,122 @@ static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
}
/*
- * Get or create a fileserver record.
+ * Get or create a fileserver record and return it with an active-use count on
+ * it.
*/
struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
const uuid_t *uuid, u32 addr_version)
{
- struct afs_addr_list *alist;
- struct afs_server *server, *candidate;
+ struct afs_addr_list *alist = NULL;
+ struct afs_server *server, *candidate = NULL;
+ bool creating = false;
+ int ret;
_enter("%p,%pU", cell->net, uuid);
- server = afs_find_server_by_uuid(cell->net, uuid);
+ down_read(&cell->fs_lock);
+ server = afs_find_server_by_uuid(cell, uuid);
+ /* Won't see servers marked uncreated. */
+ up_read(&cell->fs_lock);
+
if (server) {
+ timer_delete_sync(&server->timer);
+ if (test_bit(AFS_SERVER_FL_CREATING, &server->flags))
+ goto wait_for_creation;
if (server->addr_version != addr_version)
set_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
return server;
}
- alist = afs_vl_lookup_addrs(cell, key, uuid);
- if (IS_ERR(alist))
- return ERR_CAST(alist);
-
- candidate = afs_alloc_server(cell, uuid, alist);
+ candidate = afs_alloc_server(cell, uuid);
if (!candidate) {
afs_put_addrlist(alist, afs_alist_trace_put_server_oom);
return ERR_PTR(-ENOMEM);
}
- server = afs_install_server(cell, candidate);
- if (server != candidate) {
- afs_put_addrlist(alist, afs_alist_trace_put_server_dup);
+ down_write(&cell->fs_lock);
+ server = afs_install_server(cell, &candidate);
+ if (test_bit(AFS_SERVER_FL_CREATING, &server->flags)) {
+ /* We need to wait for creation to complete. */
+ up_write(&cell->fs_lock);
+ goto wait_for_creation;
+ }
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ set_bit(AFS_SERVER_FL_CREATING, &server->flags);
+ clear_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+ creating = true;
+ }
+ up_write(&cell->fs_lock);
+ timer_delete_sync(&server->timer);
+
+ /* If we get to create the server, we look up the addresses and then
+ * immediately dispatch an asynchronous probe to each interface on the
+ * fileserver. This will make sure the repeat-probing service is
+ * started.
+ */
+ if (creating) {
+ alist = afs_vl_lookup_addrs(server, key);
+ if (IS_ERR(alist)) {
+ ret = PTR_ERR(alist);
+ goto create_failed;
+ }
+
+ ret = afs_fs_probe_fileserver(cell->net, server, alist, key);
+ if (ret)
+ goto create_failed;
+
+ clear_and_wake_up_bit(AFS_SERVER_FL_CREATING, &server->flags);
+ }
+
+out:
+ afs_put_addrlist(alist, afs_alist_trace_put_server_create);
+ if (candidate) {
+ kfree(rcu_access_pointer(server->endpoint_state));
kfree(candidate);
- } else {
- /* Immediately dispatch an asynchronous probe to each interface
- * on the fileserver. This will make sure the repeat-probing
- * service is started.
- */
- afs_fs_probe_fileserver(cell->net, server, alist, key);
+ afs_dec_servers_outstanding(cell->net);
+ }
+ return server ?: ERR_PTR(ret);
+
+wait_for_creation:
+ afs_see_server(server, afs_server_trace_wait_create);
+ wait_on_bit(&server->flags, AFS_SERVER_FL_CREATING, TASK_UNINTERRUPTIBLE);
+ if (test_bit_acquire(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ /* Barrier: read flag before error */
+ ret = READ_ONCE(server->create_error);
+ afs_put_server(cell->net, server, afs_server_trace_unuse_create_fail);
+ server = NULL;
+ goto out;
}
- return server;
-}
+ ret = 0;
+ goto out;
-/*
- * Set the server timer to fire after a given delay, assuming it's not already
- * set for an earlier time.
- */
-static void afs_set_server_timer(struct afs_net *net, time64_t delay)
-{
- if (net->live) {
- afs_inc_servers_outstanding(net);
- if (timer_reduce(&net->fs_timer, jiffies + delay * HZ))
- afs_dec_servers_outstanding(net);
+create_failed:
+ down_write(&cell->fs_lock);
+
+ WRITE_ONCE(server->create_error, ret);
+ smp_wmb(); /* Barrier: set error before flag. */
+ set_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+
+ clear_and_wake_up_bit(AFS_SERVER_FL_CREATING, &server->flags);
+
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ clear_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+ creating = true;
}
+ afs_unuse_server(cell->net, server, afs_server_trace_unuse_create_fail);
+ server = NULL;
+
+ up_write(&cell->fs_lock);
+ goto out;
}
/*
- * Server management timer. We have an increment on fs_outstanding that we
- * need to pass along to the work item.
+ * Set/reduce a server's timer.
*/
-void afs_servers_timer(struct timer_list *timer)
+static void afs_set_server_timer(struct afs_server *server, unsigned int delay_secs)
{
- struct afs_net *net = container_of(timer, struct afs_net, fs_timer);
-
- _enter("");
- if (!queue_work(afs_wq, &net->fs_manager))
- afs_dec_servers_outstanding(net);
+ mod_timer(&server->timer, jiffies + delay_secs * HZ);
}
/*
@@ -355,32 +306,20 @@ struct afs_server *afs_get_server(struct afs_server *server,
}
/*
- * Try to get a reference on a server object.
+ * Get an active count on a server object and maybe remove from the inactive
+ * list.
*/
-static struct afs_server *afs_maybe_use_server(struct afs_server *server,
- enum afs_server_trace reason)
-{
- unsigned int a;
- int r;
-
- if (!__refcount_inc_not_zero(&server->ref, &r))
- return NULL;
-
- a = atomic_inc_return(&server->active);
- trace_afs_server(server->debug_id, r + 1, a, reason);
- return server;
-}
-
-/*
- * Get an active count on a server object.
- */
-struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason)
+struct afs_server *afs_use_server(struct afs_server *server, bool activate,
+ enum afs_server_trace reason)
{
unsigned int a;
int r;
__refcount_inc(&server->ref, &r);
a = atomic_inc_return(&server->active);
+ if (a == 1 && activate &&
+ !test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ timer_delete(&server->timer);
trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
@@ -392,13 +331,14 @@ struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_tra
void afs_put_server(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- unsigned int a, debug_id = server->debug_id;
+ unsigned int a, debug_id;
bool zero;
int r;
if (!server)
return;
+ debug_id = server->debug_id;
a = atomic_read(&server->active);
zero = __refcount_dec_and_test(&server->ref, &r);
trace_afs_server(debug_id, r - 1, a, reason);
@@ -413,13 +353,16 @@ void afs_put_server(struct afs_net *net, struct afs_server *server,
void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- if (server) {
- unsigned int active = atomic_dec_return(&server->active);
+ if (!server)
+ return;
- if (active == 0)
- afs_set_server_timer(net, afs_server_gc_delay);
- afs_put_server(net, server, reason);
+ if (atomic_dec_and_test(&server->active)) {
+ if (test_bit(AFS_SERVER_FL_EXPIRED, &server->flags) ||
+ READ_ONCE(server->cell->state) >= AFS_CELL_REMOVING)
+ schedule_work(&server->destroyer);
}
+
+ afs_put_server(net, server, reason);
}
/*
@@ -428,10 +371,22 @@ void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
void afs_unuse_server(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- if (server) {
- server->unuse_time = ktime_get_real_seconds();
- afs_unuse_server_notime(net, server, reason);
+ if (!server)
+ return;
+
+ if (atomic_dec_and_test(&server->active)) {
+ if (!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags) &&
+ READ_ONCE(server->cell->state) < AFS_CELL_REMOVING) {
+ time64_t unuse_time = ktime_get_real_seconds();
+
+ server->unuse_time = unuse_time;
+ afs_set_server_timer(server, afs_server_gc_delay);
+ } else {
+ schedule_work(&server->destroyer);
+ }
}
+
+ afs_put_server(net, server, reason);
}
static void afs_server_rcu(struct rcu_head *rcu)
@@ -442,6 +397,8 @@ static void afs_server_rcu(struct rcu_head *rcu)
atomic_read(&server->active), afs_server_trace_free);
afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state),
afs_estate_trace_put_server);
+ afs_put_cell(server->cell, afs_cell_trace_put_server);
+ kfree(server->cm_rxgk_appdata.data);
kfree(server);
}
@@ -460,159 +417,119 @@ static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server
}
/*
- * destroy a dead server
+ * Check to see if the server record has expired.
*/
-static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
+static bool afs_has_server_expired(const struct afs_server *server)
{
- if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
- afs_give_up_callbacks(net, server);
+ time64_t expires_at;
- afs_put_server(net, server, afs_server_trace_destroy);
+ if (atomic_read(&server->active))
+ return false;
+
+ if (server->cell->net->live ||
+ server->cell->state >= AFS_CELL_REMOVING) {
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
+ 0, afs_server_trace_purging);
+ return true;
+ }
+
+ expires_at = server->unuse_time;
+ if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
+ !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
+ expires_at += afs_server_gc_delay;
+
+ return ktime_get_real_seconds() > expires_at;
}
/*
- * Garbage collect any expired servers.
+ * Remove a server record from it's parent cell's database.
*/
-static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
+static bool afs_remove_server_from_cell(struct afs_server *server)
{
- struct afs_server *server, *next, *prev;
- int active;
-
- while ((server = gc_list)) {
- gc_list = server->gc_next;
-
- write_seqlock(&net->fs_lock);
-
- active = atomic_read(&server->active);
- if (active == 0) {
- trace_afs_server(server->debug_id, refcount_read(&server->ref),
- active, afs_server_trace_gc);
- next = rcu_dereference_protected(
- server->uuid_next, lockdep_is_held(&net->fs_lock.lock));
- prev = server->uuid_prev;
- if (!prev) {
- /* The one at the front is in the tree */
- if (!next) {
- rb_erase(&server->uuid_rb, &net->fs_servers);
- } else {
- rb_replace_node_rcu(&server->uuid_rb,
- &next->uuid_rb,
- &net->fs_servers);
- next->uuid_prev = NULL;
- }
- } else {
- /* This server is not at the front */
- rcu_assign_pointer(prev->uuid_next, next);
- if (next)
- next->uuid_prev = prev;
- }
-
- list_del(&server->probe_link);
- hlist_del_rcu(&server->proc_link);
- if (!hlist_unhashed(&server->addr_link))
- hlist_del_rcu(&server->addr_link);
- }
- write_sequnlock(&net->fs_lock);
+ struct afs_cell *cell = server->cell;
+
+ down_write(&cell->fs_lock);
- if (active == 0)
- afs_destroy_server(net, server);
+ if (!afs_has_server_expired(server)) {
+ up_write(&cell->fs_lock);
+ return false;
}
+
+ set_bit(AFS_SERVER_FL_EXPIRED, &server->flags);
+ _debug("expire %pU %u", &server->uuid, atomic_read(&server->active));
+ afs_see_server(server, afs_server_trace_see_expired);
+ rb_erase(&server->uuid_rb, &cell->fs_servers);
+ up_write(&cell->fs_lock);
+ return true;
}
-/*
- * Manage the records of servers known to be within a network namespace. This
- * includes garbage collecting unused servers.
- *
- * Note also that we were given an increment on net->servers_outstanding by
- * whoever queued us that we need to deal with before returning.
- */
-void afs_manage_servers(struct work_struct *work)
+static void afs_server_destroyer(struct work_struct *work)
{
- struct afs_net *net = container_of(work, struct afs_net, fs_manager);
- struct afs_server *gc_list = NULL;
- struct rb_node *cursor;
- time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
- bool purging = !net->live;
-
- _enter("");
+ struct afs_endpoint_state *estate;
+ struct afs_server *server = container_of(work, struct afs_server, destroyer);
+ struct afs_net *net = server->cell->net;
- /* Trawl the server list looking for servers that have expired from
- * lack of use.
- */
- read_seqlock_excl(&net->fs_lock);
+ afs_see_server(server, afs_server_trace_see_destroyer);
- for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
- struct afs_server *server =
- rb_entry(cursor, struct afs_server, uuid_rb);
- int active = atomic_read(&server->active);
+ if (test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ return;
- _debug("manage %pU %u", &server->uuid, active);
+ if (!afs_remove_server_from_cell(server))
+ return;
- if (purging) {
- trace_afs_server(server->debug_id, refcount_read(&server->ref),
- active, afs_server_trace_purging);
- if (active != 0)
- pr_notice("Can't purge s=%08x\n", server->debug_id);
- }
+ timer_shutdown_sync(&server->timer);
+ cancel_work(&server->destroyer);
- if (active == 0) {
- time64_t expire_at = server->unuse_time;
-
- if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
- !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
- expire_at += afs_server_gc_delay;
- if (purging || expire_at <= now) {
- server->gc_next = gc_list;
- gc_list = server;
- } else if (expire_at < next_manage) {
- next_manage = expire_at;
- }
- }
- }
+ if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
+ afs_give_up_callbacks(net, server);
- read_sequnlock_excl(&net->fs_lock);
+ /* Unbind the rxrpc_peer records from the server. */
+ estate = rcu_access_pointer(server->endpoint_state);
+ if (estate)
+ afs_set_peer_appdata(server, estate->addresses, NULL);
- /* Update the timer on the way out. We have to pass an increment on
- * servers_outstanding in the namespace that we are in to the timer or
- * the work scheduler.
- */
- if (!purging && next_manage < TIME64_MAX) {
- now = ktime_get_real_seconds();
+ write_seqlock(&net->fs_lock);
+ list_del_init(&server->probe_link);
+ if (!hlist_unhashed(&server->proc_link))
+ hlist_del_rcu(&server->proc_link);
+ write_sequnlock(&net->fs_lock);
- if (next_manage - now <= 0) {
- if (queue_work(afs_wq, &net->fs_manager))
- afs_inc_servers_outstanding(net);
- } else {
- afs_set_server_timer(net, next_manage - now);
- }
- }
+ afs_put_server(net, server, afs_server_trace_destroy);
+}
- afs_gc_servers(net, gc_list);
+static void afs_server_timer(struct timer_list *timer)
+{
+ struct afs_server *server = container_of(timer, struct afs_server, timer);
- afs_dec_servers_outstanding(net);
- _leave(" [%d]", atomic_read(&net->servers_outstanding));
+ afs_see_server(server, afs_server_trace_see_timer);
+ if (!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ schedule_work(&server->destroyer);
}
-static void afs_queue_server_manager(struct afs_net *net)
+/*
+ * Wake up all the servers in a cell so that they can purge themselves.
+ */
+void afs_purge_servers(struct afs_cell *cell)
{
- afs_inc_servers_outstanding(net);
- if (!queue_work(afs_wq, &net->fs_manager))
- afs_dec_servers_outstanding(net);
+ struct afs_server *server;
+ struct rb_node *rb;
+
+ down_read(&cell->fs_lock);
+ for (rb = rb_first(&cell->fs_servers); rb; rb = rb_next(rb)) {
+ server = rb_entry(rb, struct afs_server, uuid_rb);
+ afs_see_server(server, afs_server_trace_see_purge);
+ schedule_work(&server->destroyer);
+ }
+ up_read(&cell->fs_lock);
}
/*
- * Purge list of servers.
+ * Wait for outstanding servers.
*/
-void afs_purge_servers(struct afs_net *net)
+void afs_wait_for_servers(struct afs_net *net)
{
_enter("");
- if (del_timer_sync(&net->fs_timer))
- afs_dec_servers_outstanding(net);
-
- afs_queue_server_manager(net);
-
- _debug("wait");
atomic_dec(&net->servers_outstanding);
wait_var_event(&net->servers_outstanding,
!atomic_read(&net->servers_outstanding));
@@ -636,7 +553,7 @@ static noinline bool afs_update_server_record(struct afs_operation *op,
atomic_read(&server->active),
afs_server_trace_update);
- alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid);
+ alist = afs_vl_lookup_addrs(server, op->key);
if (IS_ERR(alist)) {
rcu_read_lock();
estate = rcu_dereference(server->endpoint_state);
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 7e7e567a7f8a..20d5474837df 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -16,7 +16,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
if (slist && refcount_dec_and_test(&slist->usage)) {
for (i = 0; i < slist->nr_servers; i++)
afs_unuse_server(net, slist->servers[i].server,
- afs_server_trace_put_slist);
+ afs_server_trace_unuse_slist);
kfree_rcu(slist, rcu);
}
}
@@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
break;
if (j < slist->nr_servers) {
if (slist->servers[j].server == server) {
- afs_put_server(volume->cell->net, server,
- afs_server_trace_put_slist_isort);
+ afs_unuse_server_notime(volume->cell->net, server,
+ afs_server_trace_unuse_slist_isort);
continue;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index f3ba1c3e72f5..d672b7ab57ae 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -194,8 +194,6 @@ static int afs_show_options(struct seq_file *m, struct dentry *root)
if (as->dyn_root)
seq_puts(m, ",dyn");
- if (test_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(d_inode(root))->flags))
- seq_puts(m, ",autocell");
switch (as->flock_mode) {
case afs_flock_mode_unset: break;
case afs_flock_mode_local: p = "local"; break;
@@ -292,13 +290,14 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
/* lookup the cell record */
if (cellname) {
cell = afs_lookup_cell(ctx->net, cellname, cellnamesz,
- NULL, false);
+ NULL, AFS_LOOKUP_CELL_DIRECT_MOUNT,
+ afs_cell_trace_use_lookup_mount);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%*.*s'\n",
cellnamesz, cellnamesz, cellname ?: "");
return PTR_ERR(cell);
}
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_parse);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_parse);
afs_see_cell(cell, afs_cell_trace_see_source);
ctx->cell = cell;
}
@@ -395,7 +394,7 @@ static int afs_validate_fc(struct fs_context *fc)
ctx->key = NULL;
cell = afs_use_cell(ctx->cell->alias_of,
afs_cell_trace_use_fc_alias);
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_fc);
ctx->cell = cell;
goto reget_key;
}
@@ -468,7 +467,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* allocate the root inode and dentry */
if (as->dyn_root) {
- inode = afs_iget_pseudo_dir(sb, true);
+ inode = afs_dynroot_iget_root(sb);
} else {
sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
@@ -478,21 +477,15 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (ctx->autocell || as->dyn_root)
- set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
-
ret = -ENOMEM;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto error;
if (as->dyn_root) {
- sb->s_d_op = &afs_dynroot_dentry_operations;
- ret = afs_dynroot_populate(sb);
- if (ret < 0)
- goto error;
+ set_default_d_op(sb, &afs_dynroot_dentry_operations);
} else {
- sb->s_d_op = &afs_fs_dentry_operations;
+ set_default_d_op(sb, &afs_fs_dentry_operations);
rcu_assign_pointer(as->volume->sb, sb);
}
@@ -527,9 +520,8 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
static void afs_destroy_sbi(struct afs_super_info *as)
{
if (as) {
- struct afs_net *net = afs_net(as->net_ns);
afs_put_volume(as->volume, afs_volume_trace_put_destroy_sbi);
- afs_unuse_cell(net, as->cell, afs_cell_trace_unuse_sbi);
+ afs_unuse_cell(as->cell, afs_cell_trace_unuse_sbi);
put_net(as->net_ns);
kfree(as);
}
@@ -539,9 +531,6 @@ static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = AFS_FS_S(sb);
- if (as->dyn_root)
- afs_dynroot_depopulate(sb);
-
/* Clear the callback interests (which will do ilookup5) before
* deactivating the superblock.
*/
@@ -615,7 +604,7 @@ static void afs_free_fc(struct fs_context *fc)
afs_destroy_sbi(fc->s_fs_info);
afs_put_volume(ctx->volume, afs_volume_trace_put_free_fc);
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_fc);
key_put(ctx->key);
kfree(ctx);
}
@@ -663,7 +652,7 @@ static void afs_i_init_once(void *_vnode)
memset(vnode, 0, sizeof(*vnode));
inode_init_once(&vnode->netfs.inode);
- mutex_init(&vnode->io_lock);
+ INIT_LIST_HEAD(&vnode->io_lock_waiters);
init_rwsem(&vnode->validate_lock);
spin_lock_init(&vnode->wb_lock);
spin_lock_init(&vnode->lock);
@@ -696,6 +685,8 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->volume = NULL;
vnode->lock_key = NULL;
vnode->permit_cache = NULL;
+ vnode->directory = NULL;
+ vnode->directory_size = 0;
vnode->flags = 1 << AFS_VNODE_UNSET;
vnode->lock_state = AFS_VNODE_LOCK_NONE;
diff --git a/fs/afs/validation.c b/fs/afs/validation.c
index bef8af12ebe2..0ba8336c9025 100644
--- a/fs/afs/validation.c
+++ b/fs/afs/validation.c
@@ -120,22 +120,31 @@
bool afs_check_validity(const struct afs_vnode *vnode)
{
const struct afs_volume *volume = vnode->volume;
+ enum afs_vnode_invalid_trace trace = afs_vnode_valid_trace;
+ time64_t cb_expires_at = atomic64_read(&vnode->cb_expires_at);
time64_t deadline = ktime_get_real_seconds() + 10;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
return true;
- if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) ||
- atomic64_read(&vnode->cb_expires_at) <= deadline ||
- volume->cb_expires_at <= deadline ||
- vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot) ||
- vnode->cb_scrub != atomic_read(&volume->cb_scrub) ||
- test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
- _debug("inval");
- return false;
- }
-
- return true;
+ if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break))
+ trace = afs_vnode_invalid_trace_cb_v_break;
+ else if (cb_expires_at == AFS_NO_CB_PROMISE)
+ trace = afs_vnode_invalid_trace_no_cb_promise;
+ else if (cb_expires_at <= deadline)
+ trace = afs_vnode_invalid_trace_expired;
+ else if (volume->cb_expires_at <= deadline)
+ trace = afs_vnode_invalid_trace_vol_expired;
+ else if (vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot))
+ trace = afs_vnode_invalid_trace_cb_ro_snapshot;
+ else if (vnode->cb_scrub != atomic_read(&volume->cb_scrub))
+ trace = afs_vnode_invalid_trace_cb_scrub;
+ else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
+ trace = afs_vnode_invalid_trace_zap_data;
+ else
+ return true;
+ trace_afs_vnode_invalid(vnode, trace);
+ return false;
}
/*
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
index 9f36e14f1c2d..fc9676abd252 100644
--- a/fs/afs/vl_alias.c
+++ b/fs/afs/vl_alias.c
@@ -205,11 +205,11 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
goto is_alias;
if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
- afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias);
+ afs_unuse_cell(p, afs_cell_trace_unuse_check_alias);
return -ERESTARTSYS;
}
- afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias);
+ afs_unuse_cell(p, afs_cell_trace_unuse_check_alias);
}
mutex_unlock(&cell->net->proc_cells_lock);
@@ -253,6 +253,7 @@ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
{
struct afs_cell *master;
+ size_t name_len;
char *cell_name;
cell_name = afs_vl_get_cell_name(cell, key);
@@ -264,8 +265,13 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
return 0;
}
- master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name),
- NULL, false);
+ name_len = strlen(cell_name);
+ if (!name_len || name_len > AFS_MAXCELLNAME)
+ master = ERR_PTR(-EOPNOTSUPP);
+ else
+ master = afs_lookup_cell(cell->net, cell_name, name_len, NULL,
+ AFS_LOOKUP_CELL_ALIAS_CHECK,
+ afs_cell_trace_use_lookup_canonical);
kfree(cell_name);
if (IS_ERR(master))
return PTR_ERR(master);
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index d8f79f6ada3d..6ad9688d8f4b 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -48,7 +48,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
cell->dns_expiry <= ktime_get_real_seconds()) {
dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
- afs_queue_cell(cell, afs_cell_trace_get_queue_dns);
+ afs_queue_cell(cell, afs_cell_trace_queue_dns);
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
if (wait_var_event_interruptible(
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index cac75f89b64a..3a23c0b08eb6 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -370,6 +370,7 @@ static const struct afs_call_type afs_RXVLGetCapabilities = {
.name = "VL.GetCapabilities",
.op = afs_VL_GetCapabilities,
.deliver = afs_deliver_vl_get_capabilities,
+ .immediate_cancel = afs_vlserver_probe_result,
.done = afs_vlserver_probe_result,
.destructor = afs_destroy_vl_get_capabilities,
};
@@ -697,7 +698,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
return ret;
namesz = ntohl(call->tmp);
- if (namesz > AFS_MAXCELLNAME)
+ if (namesz > YFS_VL_MAXCELLNAME)
return afs_protocol_error(call, afs_eproto_cellname_len);
paddedsz = (namesz + 3) & ~3;
call->count = namesz;
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index af3a3f57c1b3..0efff3d25133 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -10,6 +10,7 @@
#include "internal.h"
static unsigned __read_mostly afs_volume_record_life = 60 * 60;
+static atomic_t afs_volume_debug_id;
static void afs_destroy_volume(struct work_struct *work);
@@ -59,7 +60,7 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
struct afs_cell *cell = volume->cell;
if (!hlist_unhashed(&volume->proc_link)) {
- trace_afs_volume(volume->vid, refcount_read(&cell->ref),
+ trace_afs_volume(volume->debug_id, volume->vid, refcount_read(&volume->ref),
afs_volume_trace_remove);
write_seqlock(&cell->volume_lock);
hlist_del_rcu(&volume->proc_link);
@@ -84,6 +85,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
if (!volume)
goto error_0;
+ volume->debug_id = atomic_inc_return(&afs_volume_debug_id);
volume->vid = vldb->vid[params->type];
volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
volume->cell = afs_get_cell(params->cell, afs_cell_trace_get_vol);
@@ -115,7 +117,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
*_slist = slist;
rcu_assign_pointer(volume->servers, slist);
- trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc);
+ trace_afs_volume(volume->debug_id, volume->vid, 1, afs_volume_trace_alloc);
return volume;
error_1:
@@ -247,7 +249,7 @@ static void afs_destroy_volume(struct work_struct *work)
afs_remove_volume_from_cell(volume);
afs_put_serverlist(volume->cell->net, slist);
afs_put_cell(volume->cell, afs_cell_trace_put_vol);
- trace_afs_volume(volume->vid, refcount_read(&volume->ref),
+ trace_afs_volume(volume->debug_id, volume->vid, refcount_read(&volume->ref),
afs_volume_trace_free);
kfree_rcu(volume, rcu);
@@ -262,7 +264,7 @@ bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
int r;
if (__refcount_inc_not_zero(&volume->ref, &r)) {
- trace_afs_volume(volume->vid, r + 1, reason);
+ trace_afs_volume(volume->debug_id, volume->vid, r + 1, reason);
return true;
}
return false;
@@ -278,7 +280,7 @@ struct afs_volume *afs_get_volume(struct afs_volume *volume,
int r;
__refcount_inc(&volume->ref, &r);
- trace_afs_volume(volume->vid, r + 1, reason);
+ trace_afs_volume(volume->debug_id, volume->vid, r + 1, reason);
}
return volume;
}
@@ -290,12 +292,13 @@ struct afs_volume *afs_get_volume(struct afs_volume *volume,
void afs_put_volume(struct afs_volume *volume, enum afs_volume_trace reason)
{
if (volume) {
+ unsigned int debug_id = volume->debug_id;
afs_volid_t vid = volume->vid;
bool zero;
int r;
zero = __refcount_dec_and_test(&volume->ref, &r);
- trace_afs_volume(vid, r - 1, reason);
+ trace_afs_volume(debug_id, vid, r - 1, reason);
if (zero)
schedule_work(&volume->destructor);
}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 34107b55f834..93ad86ff3345 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -120,17 +120,17 @@ static void afs_issue_write_worker(struct work_struct *work)
#if 0 // Error injection
if (subreq->debug_index == 3)
- return netfs_write_subrequest_terminated(subreq, -ENOANO, false);
+ return netfs_write_subrequest_terminated(subreq, -ENOANO);
- if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
+ if (!subreq->retry_count) {
set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
+ return netfs_write_subrequest_terminated(subreq, -EAGAIN);
}
#endif
op = afs_alloc_operation(wreq->netfs_priv, vnode->volume);
if (IS_ERR(op))
- return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
+ return netfs_write_subrequest_terminated(subreq, -EAGAIN);
afs_op_set_vnode(op, 0, vnode);
op->file[0].dv_delta = 1;
@@ -149,6 +149,9 @@ static void afs_issue_write_worker(struct work_struct *work)
afs_wait_for_operation(op);
ret = afs_put_operation(op);
switch (ret) {
+ case 0:
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ break;
case -EACCES:
case -EPERM:
case -ENOKEY:
@@ -163,13 +166,13 @@ static void afs_issue_write_worker(struct work_struct *work)
break;
}
- netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false);
+ netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len);
}
void afs_issue_write(struct netfs_io_subrequest *subreq)
{
subreq->work.func = afs_issue_write_worker;
- if (!queue_work(system_unbound_wq, &subreq->work))
+ if (!queue_work(system_dfl_wq, &subreq->work))
WARN_ON_ONCE(1);
}
@@ -179,8 +182,8 @@ void afs_issue_write(struct netfs_io_subrequest *subreq)
*/
void afs_begin_writeback(struct netfs_io_request *wreq)
{
- afs_get_writeback_key(wreq);
- wreq->io_streams[0].avail = true;
+ if (S_ISREG(wreq->inode->i_mode))
+ afs_get_writeback_key(wreq);
}
/*
@@ -193,6 +196,19 @@ void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *st
list_first_entry(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
+ switch (wreq->origin) {
+ case NETFS_READAHEAD:
+ case NETFS_READPAGE:
+ case NETFS_READ_GAPS:
+ case NETFS_READ_SINGLE:
+ case NETFS_READ_FOR_WRITE:
+ case NETFS_UNBUFFERED_READ:
+ case NETFS_DIO_READ:
+ return;
+ default:
+ break;
+ }
+
switch (subreq->error) {
case -EACCES:
case -EPERM:
diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h
index 8ca868164507..cc5f143d21a3 100644
--- a/fs/afs/xdr_fs.h
+++ b/fs/afs/xdr_fs.h
@@ -88,7 +88,7 @@ union afs_xdr_dir_block {
struct {
struct afs_xdr_dir_hdr hdr;
- u8 alloc_ctrs[AFS_DIR_MAX_BLOCKS];
+ u8 alloc_ctrs[AFS_DIR_BLOCKS_WITH_CTR];
__be16 hashtable[AFS_DIR_HASHTBL_SIZE];
} meta;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 024227aba4cd..febf13a49f0b 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -352,19 +352,19 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call)
static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
{
struct afs_operation *op = call->op;
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
const __be32 *bp;
size_t count_before;
int ret;
_enter("{%u,%zu, %zu/%llu}",
call->unmarshall, call->iov_len, iov_iter_count(call->iter),
- req->actual_len);
+ call->remaining);
switch (call->unmarshall) {
case 0:
- req->actual_len = 0;
+ call->remaining = 0;
afs_extract_to_tmp64(call);
call->unmarshall++;
fallthrough;
@@ -379,42 +379,39 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
if (ret < 0)
return ret;
- req->actual_len = be64_to_cpu(call->tmp64);
- _debug("DATA length: %llu", req->actual_len);
+ call->remaining = be64_to_cpu(call->tmp64);
+ _debug("DATA length: %llu", call->remaining);
- if (req->actual_len == 0)
+ if (call->remaining == 0)
goto no_more_data;
- call->iter = req->iter;
- call->iov_len = min(req->actual_len, req->len);
+ call->iter = &subreq->io_iter;
+ call->iov_len = min(call->remaining, subreq->len - subreq->transferred);
call->unmarshall++;
fallthrough;
/* extract the returned data */
case 2:
count_before = call->iov_len;
- _debug("extract data %zu/%llu", count_before, req->actual_len);
+ _debug("extract data %zu/%llu", count_before, call->remaining);
ret = afs_extract_data(call, true);
- if (req->subreq) {
- req->subreq->transferred += count_before - call->iov_len;
- netfs_read_subreq_progress(req->subreq, false);
- }
+ subreq->transferred += count_before - call->iov_len;
if (ret < 0)
return ret;
call->iter = &call->def_iter;
- if (req->actual_len <= req->len)
+ if (call->remaining)
goto no_more_data;
/* Discard any excess data the server gave us */
- afs_extract_discard(call, req->actual_len - req->len);
+ afs_extract_discard(call, call->remaining);
call->unmarshall = 3;
fallthrough;
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), call->remaining);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -439,8 +436,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
xdr_decode_YFSCallBack(&bp, call, &vp->scb);
xdr_decode_YFSVolSync(&bp, &op->volsync);
- req->data_version = vp->scb.status.data_version;
- req->file_size = vp->scb.status.size;
+ if (subreq->start + subreq->transferred >= vp->scb.status.size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
call->unmarshall++;
fallthrough;
@@ -459,7 +456,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
static const struct afs_call_type yfs_RXYFSFetchData64 = {
.name = "YFS.FetchData64",
.op = yfs_FS_FetchData64,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = yfs_deliver_fs_fetch_data64,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
@@ -468,14 +467,15 @@ static const struct afs_call_type yfs_RXYFSFetchData64 = {
*/
void yfs_fs_fetch_data(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
struct afs_call *call;
__be32 *bp;
- _enter(",%x,{%llx:%llu},%llx,%llx",
+ _enter(",%x,{%llx:%llu},%llx,%zx",
key_serial(op->key), vp->fid.vid, vp->fid.vnode,
- req->pos, req->len);
+ subreq->start + subreq->transferred,
+ subreq->len - subreq->transferred);
call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchData64,
sizeof(__be32) * 2 +
@@ -487,15 +487,16 @@ void yfs_fs_fetch_data(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
- req->call_debug_id = call->debug_id;
+ if (op->flags & AFS_OPERATION_ASYNC)
+ call->async = true;
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
bp = xdr_encode_YFSFid(bp, &vp->fid);
- bp = xdr_encode_u64(bp, req->pos);
- bp = xdr_encode_u64(bp, req->len);
+ bp = xdr_encode_u64(bp, subreq->start + subreq->transferred);
+ bp = xdr_encode_u64(bp, subreq->len - subreq->transferred);
yfs_check_req(call, bp);
call->fid = vp->fid;
@@ -666,8 +667,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call)
static void yfs_done_fs_remove_file2(struct afs_call *call)
{
if (call->error == -ECONNABORTED &&
- call->abort_code == RX_INVALID_OPERATION) {
- set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags);
+ (call->abort_code == RX_INVALID_OPERATION ||
+ call->abort_code == RXGEN_OPCODE)) {
+ set_bit(AFS_SERVER_FL_NO_RM2, &call->op->server->flags);
call->op->flags |= AFS_OPERATION_DOWNGRADE;
}
}
@@ -1040,6 +1042,9 @@ void yfs_fs_rename(struct afs_operation *op)
_enter("");
+ if (!test_bit(AFS_SERVER_FL_NO_RENAME2, &op->server->flags))
+ return yfs_fs_rename_replace(op);
+
call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
@@ -1069,6 +1074,252 @@ void yfs_fs_rename(struct afs_operation *op)
}
/*
+ * Deliver reply data to a YFS.Rename_NoReplace operation. This does not
+ * return the status of a displaced target inode as there cannot be one.
+ */
+static int yfs_deliver_fs_rename_1(struct afs_call *call)
+{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ struct afs_vnode_param *old_vp = &op->more_files[0];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ /* If the two dirs are the same, we have two copies of the same status
+ * report, so we just decode it twice.
+ */
+ xdr_decode_YFSFetchStatus(&bp, call, &orig_dvp->scb);
+ xdr_decode_YFSFid(&bp, &old_vp->fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &old_vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &new_dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * Deliver reply data to a YFS.Rename_Replace or a YFS.Rename_Exchange
+ * operation. These return the status of the displaced target inode if there
+ * was one.
+ */
+static int yfs_deliver_fs_rename_2(struct afs_call *call)
+{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ struct afs_vnode_param *old_vp = &op->more_files[0];
+ struct afs_vnode_param *new_vp = &op->more_files[1];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ /* If the two dirs are the same, we have two copies of the same status
+ * report, so we just decode it twice.
+ */
+ xdr_decode_YFSFetchStatus(&bp, call, &orig_dvp->scb);
+ xdr_decode_YFSFid(&bp, &old_vp->fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &old_vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &new_dvp->scb);
+ xdr_decode_YFSFid(&bp, &new_vp->fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &new_vp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+static void yfs_done_fs_rename_replace(struct afs_call *call)
+{
+ if (call->error == -ECONNABORTED &&
+ (call->abort_code == RX_INVALID_OPERATION ||
+ call->abort_code == RXGEN_OPCODE)) {
+ set_bit(AFS_SERVER_FL_NO_RENAME2, &call->op->server->flags);
+ call->op->flags |= AFS_OPERATION_DOWNGRADE;
+ }
+}
+
+/*
+ * YFS.Rename_Replace operation type
+ */
+static const struct afs_call_type yfs_RXYFSRename_Replace = {
+ .name = "FS.Rename_Replace",
+ .op = yfs_FS_Rename_Replace,
+ .deliver = yfs_deliver_fs_rename_2,
+ .done = yfs_done_fs_rename_replace,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * YFS.Rename_NoReplace operation type
+ */
+static const struct afs_call_type yfs_RXYFSRename_NoReplace = {
+ .name = "FS.Rename_NoReplace",
+ .op = yfs_FS_Rename_NoReplace,
+ .deliver = yfs_deliver_fs_rename_1,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * YFS.Rename_Exchange operation type
+ */
+static const struct afs_call_type yfs_RXYFSRename_Exchange = {
+ .name = "FS.Rename_Exchange",
+ .op = yfs_FS_Rename_Exchange,
+ .deliver = yfs_deliver_fs_rename_2,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Rename a file or directory, replacing the target if it exists. The status
+ * of a displaced target is returned.
+ */
+void yfs_fs_rename_replace(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename_Replace,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(orig_name->len) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(new_name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* Marshall the parameters. */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSRENAME_REPLACE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid);
+ bp = xdr_encode_name(bp, orig_name);
+ bp = xdr_encode_YFSFid(bp, &new_dvp->fid);
+ bp = xdr_encode_name(bp, new_name);
+ yfs_check_req(call, bp);
+
+ call->fid = orig_dvp->fid;
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
+/*
+ * Rename a file or directory, failing if the target dirent exists.
+ */
+void yfs_fs_rename_noreplace(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename_NoReplace,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(orig_name->len) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(new_name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* Marshall the parameters. */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSRENAME_NOREPLACE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid);
+ bp = xdr_encode_name(bp, orig_name);
+ bp = xdr_encode_YFSFid(bp, &new_dvp->fid);
+ bp = xdr_encode_name(bp, new_name);
+ yfs_check_req(call, bp);
+
+ call->fid = orig_dvp->fid;
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
+/*
+ * Exchange a pair of files directories.
+ */
+void yfs_fs_rename_exchange(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename_Exchange,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(orig_name->len) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(new_name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* Marshall the parameters. */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSRENAME_EXCHANGE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid);
+ bp = xdr_encode_name(bp, orig_name);
+ bp = xdr_encode_YFSFid(bp, &new_dvp->fid);
+ bp = xdr_encode_name(bp, new_name);
+ yfs_check_req(call, bp);
+
+ call->fid = orig_dvp->fid;
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
+/*
* YFS.StoreData64 operation type.
*/
static const struct afs_call_type yfs_RXYFSStoreData64 = {
diff --git a/fs/aio.c b/fs/aio.c
index 50671640b588..0a23a8c0717f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -224,7 +224,7 @@ static unsigned long aio_nr; /* current system wide number of aio requests */
static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/
#ifdef CONFIG_SYSCTL
-static struct ctl_table aio_sysctls[] = {
+static const struct ctl_table aio_sysctls[] = {
{
.procname = "aio-nr",
.data = &aio_nr,
@@ -392,15 +392,15 @@ static const struct vm_operations_struct aio_ring_vm_ops = {
#endif
};
-static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+static int aio_ring_mmap_prepare(struct vm_area_desc *desc)
{
- vm_flags_set(vma, VM_DONTEXPAND);
- vma->vm_ops = &aio_ring_vm_ops;
+ desc->vm_flags |= VM_DONTEXPAND;
+ desc->vm_ops = &aio_ring_vm_ops;
return 0;
}
static const struct file_operations aio_ring_fops = {
- .mmap = aio_ring_mmap,
+ .mmap_prepare = aio_ring_mmap_prepare,
};
#if IS_ENABLED(CONFIG_MIGRATION)
@@ -445,7 +445,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
folio_get(dst);
rc = folio_migrate_mapping(mapping, dst, src, 1);
- if (rc != MIGRATEPAGE_SUCCESS) {
+ if (rc) {
folio_put(dst);
goto out_unlock;
}
@@ -636,7 +636,7 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
/* Synchronize against RCU protected table->table[] dereferences */
INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
- queue_rcu_work(system_wq, &ctx->free_rwork);
+ queue_rcu_work(system_percpu_wq, &ctx->free_rwork);
}
/*
@@ -1511,6 +1511,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb, int rw_type)
{
int ret;
+ req->ki_write_stream = 0;
req->ki_complete = aio_complete_rw;
req->private = NULL;
req->ki_pos = iocb->aio_offset;
@@ -1639,10 +1640,10 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
static void aio_fsync_work(struct work_struct *work)
{
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
- const struct cred *old_cred = override_creds(iocb->fsync.creds);
- iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
- revert_creds(old_cred);
+ scoped_with_creds(iocb->fsync.creds)
+ iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+
put_cred(iocb->fsync.creds);
iocb_put(iocb);
}
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 42bd1cb7c9cd..b8381c7fb636 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -24,10 +24,51 @@
#include <linux/uaccess.h>
+#include "internal.h"
+
static struct vfsmount *anon_inode_mnt __ro_after_init;
static struct inode *anon_inode_inode __ro_after_init;
/*
+ * User space expects anonymous inodes to have no file type in st_mode.
+ *
+ * In particular, 'lsof' has this legacy logic:
+ *
+ * type = s->st_mode & S_IFMT;
+ * switch (type) {
+ * ...
+ * case 0:
+ * if (!strcmp(p, "anon_inode"))
+ * Lf->ntype = Ntype = N_ANON_INODE;
+ *
+ * to detect our old anon_inode logic.
+ *
+ * Rather than mess with our internal sane inode data, just fix it
+ * up here in getattr() by masking off the format bits.
+ */
+int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags)
+{
+ struct inode *inode = d_inode(path->dentry);
+
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+ stat->mode &= ~S_IFMT;
+ return 0;
+}
+
+int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct inode_operations anon_inode_operations = {
+ .getattr = anon_inode_getattr,
+ .setattr = anon_inode_setattr,
+};
+
+/*
* anon_inodefs_dname() is called from d_path().
*/
static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen)
@@ -45,6 +86,8 @@ static int anon_inodefs_init_fs_context(struct fs_context *fc)
struct pseudo_fs_context *ctx = init_pseudo(fc, ANON_INODE_FS_MAGIC);
if (!ctx)
return -ENOMEM;
+ fc->s_iflags |= SB_I_NOEXEC;
+ fc->s_iflags |= SB_I_NODEV;
ctx->dops = &anon_inodefs_dentry_operations;
return 0;
}
@@ -55,25 +98,38 @@ static struct file_system_type anon_inode_fs_type = {
.kill_sb = kill_anon_super,
};
-static struct inode *anon_inode_make_secure_inode(
- const char *name,
- const struct inode *context_inode)
+/**
+ * anon_inode_make_secure_inode - allocate an anonymous inode with security context
+ * @sb: [in] Superblock to allocate from
+ * @name: [in] Name of the class of the newfile (e.g., "secretmem")
+ * @context_inode:
+ * [in] Optional parent inode for security inheritance
+ *
+ * The function ensures proper security initialization through the LSM hook
+ * security_inode_init_security_anon().
+ *
+ * Return: Pointer to new inode on success, ERR_PTR on failure.
+ */
+struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
+ const struct inode *context_inode)
{
struct inode *inode;
- const struct qstr qname = QSTR_INIT(name, strlen(name));
int error;
- inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+ inode = alloc_anon_inode(sb);
if (IS_ERR(inode))
return inode;
inode->i_flags &= ~S_PRIVATE;
- error = security_inode_init_security_anon(inode, &qname, context_inode);
+ inode->i_op = &anon_inode_operations;
+ error = security_inode_init_security_anon(inode, &QSTR(name),
+ context_inode);
if (error) {
iput(inode);
return ERR_PTR(error);
}
return inode;
}
+EXPORT_SYMBOL_FOR_MODULES(anon_inode_make_secure_inode, "kvm");
static struct file *__anon_inode_getfile(const char *name,
const struct file_operations *fops,
@@ -88,7 +144,8 @@ static struct file *__anon_inode_getfile(const char *name,
return ERR_PTR(-ENOENT);
if (make_inode) {
- inode = anon_inode_make_secure_inode(name, context_inode);
+ inode = anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb,
+ name, context_inode);
if (IS_ERR(inode)) {
file = ERR_CAST(inode);
goto err;
@@ -223,27 +280,8 @@ static int __anon_inode_getfd(const char *name,
const struct inode *context_inode,
bool make_inode)
{
- int error, fd;
- struct file *file;
-
- error = get_unused_fd_flags(flags);
- if (error < 0)
- return error;
- fd = error;
-
- file = __anon_inode_getfile(name, fops, priv, flags, context_inode,
- make_inode);
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto err_put_unused_fd;
- }
- fd_install(fd, file);
-
- return fd;
-
-err_put_unused_fd:
- put_unused_fd(fd);
- return error;
+ return FD_ADD(flags, __anon_inode_getfile(name, fops, priv, flags,
+ context_inode, make_inode));
}
/**
@@ -313,6 +351,7 @@ static int __init anon_inode_init(void)
anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
if (IS_ERR(anon_inode_inode))
panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
+ anon_inode_inode->i_op = &anon_inode_operations;
return 0;
}
diff --git a/fs/attr.c b/fs/attr.c
index 9caf63d20d03..b9ec6b47bab2 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -230,7 +230,7 @@ EXPORT_SYMBOL(setattr_prepare);
* @inode: the inode to be truncated
* @offset: the new size to assign to the inode
*
- * inode_newsize_ok must be called with i_mutex held.
+ * inode_newsize_ok must be called with i_rwsem held exclusively.
*
* inode_newsize_ok will check filesystem limits and ulimits to check that the
* new inode size is within limits. inode_newsize_ok will also send SIGXFSZ
@@ -286,20 +286,12 @@ static void setattr_copy_mgtime(struct inode *inode, const struct iattr *attr)
unsigned int ia_valid = attr->ia_valid;
struct timespec64 now;
- if (ia_valid & ATTR_CTIME) {
- /*
- * In the case of an update for a write delegation, we must respect
- * the value in ia_ctime and not use the current time.
- */
- if (ia_valid & ATTR_DELEG)
- now = inode_set_ctime_deleg(inode, attr->ia_ctime);
- else
- now = inode_set_ctime_current(inode);
- } else {
- /* If ATTR_CTIME isn't set, then ATTR_MTIME shouldn't be either. */
- WARN_ON_ONCE(ia_valid & ATTR_MTIME);
+ if (ia_valid & ATTR_CTIME_SET)
+ now = inode_set_ctime_deleg(inode, attr->ia_ctime);
+ else if (ia_valid & ATTR_CTIME)
+ now = inode_set_ctime_current(inode);
+ else
now = current_time(inode);
- }
if (ia_valid & ATTR_ATIME_SET)
inode_set_atime_to_ts(inode, attr->ia_atime);
@@ -318,7 +310,7 @@ static void setattr_copy_mgtime(struct inode *inode, const struct iattr *attr)
* @inode: the inode to be updated
* @attr: the new attributes
*
- * setattr_copy must be called with i_mutex held.
+ * setattr_copy must be called with i_rwsem held exclusively.
*
* setattr_copy updates the inode's metadata with that specified
* in attr on idmapped mounts. Necessary permission checks to determine
@@ -359,12 +351,11 @@ void setattr_copy(struct mnt_idmap *idmap, struct inode *inode,
inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
inode_set_mtime_to_ts(inode, attr->ia_mtime);
- if (ia_valid & ATTR_CTIME) {
- if (ia_valid & ATTR_DELEG)
- inode_set_ctime_deleg(inode, attr->ia_ctime);
- else
- inode_set_ctime_to_ts(inode, attr->ia_ctime);
- }
+
+ if (ia_valid & ATTR_CTIME_SET)
+ inode_set_ctime_deleg(inode, attr->ia_ctime);
+ else if (ia_valid & ATTR_CTIME)
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
}
EXPORT_SYMBOL(setattr_copy);
@@ -403,13 +394,13 @@ EXPORT_SYMBOL(may_setattr);
* @attr: new attributes
* @delegated_inode: returns inode, if the inode is delegated
*
- * The caller must hold the i_mutex on the affected object.
+ * The caller must hold the i_rwsem exclusively on the affected object.
*
* If notify_change discovers a delegation in need of breaking,
* it will return -EWOULDBLOCK and return a reference to the inode in
* delegated_inode. The caller should then break the delegation and
* retry. Because breaking a delegation may take a long time, the
- * caller should drop the i_mutex before doing so.
+ * caller should drop the i_rwsem before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
@@ -424,7 +415,7 @@ EXPORT_SYMBOL(may_setattr);
* performed on the raw inode simply pass @nop_mnt_idmap.
*/
int notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
- struct iattr *attr, struct inode **delegated_inode)
+ struct iattr *attr, struct delegated_inode *delegated_inode)
{
struct inode *inode = dentry->d_inode;
umode_t mode = inode->i_mode;
@@ -456,22 +447,25 @@ int notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- /* Flag setting protected by i_mutex */
+ /* Flag setting protected by i_rwsem */
if (is_sxid(attr->ia_mode))
inode->i_flags &= ~S_NOSEC;
}
now = current_time(inode);
- attr->ia_ctime = now;
- if (!(ia_valid & ATTR_ATIME_SET))
- attr->ia_atime = now;
- else
+ if (ia_valid & ATTR_ATIME_SET)
attr->ia_atime = timestamp_truncate(attr->ia_atime, inode);
- if (!(ia_valid & ATTR_MTIME_SET))
- attr->ia_mtime = now;
else
+ attr->ia_atime = now;
+ if (ia_valid & ATTR_CTIME_SET)
+ attr->ia_ctime = timestamp_truncate(attr->ia_ctime, inode);
+ else
+ attr->ia_ctime = now;
+ if (ia_valid & ATTR_MTIME_SET)
attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode);
+ else
+ attr->ia_mtime = now;
if (ia_valid & ATTR_KILL_PRIV) {
error = security_inode_need_killpriv(dentry);
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 77c7991d89aa..4fd555528c5d 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -16,6 +16,7 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
+#include <uapi/linux/mount.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/uaccess.h>
@@ -27,6 +28,9 @@
#include <linux/magic.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
+#include "../mount.h"
+#include <linux/ns_common.h>
+
/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@@ -114,6 +118,7 @@ struct autofs_sb_info {
int pipefd;
struct file *pipe;
struct pid *oz_pgrp;
+ u64 mnt_ns_id;
int version;
int sub_version;
int min_proto;
@@ -218,6 +223,8 @@ void autofs_clean_ino(struct autofs_info *);
static inline int autofs_check_pipe(struct file *pipe)
{
+ if (pipe->f_mode & FMODE_PATH)
+ return -EINVAL;
if (!(pipe->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
if (!S_ISFIFO(file_inode(pipe)->i_mode))
diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c
index 6d57efbb8110..6743b3b64217 100644
--- a/fs/autofs/dev-ioctl.c
+++ b/fs/autofs/dev-ioctl.c
@@ -231,32 +231,14 @@ static int test_by_type(const struct path *path, void *p)
*/
static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid)
{
- int err, fd;
-
- fd = get_unused_fd_flags(O_CLOEXEC);
- if (likely(fd >= 0)) {
- struct file *filp;
- struct path path;
-
- err = find_autofs_mount(name, &path, test_by_dev, &devid);
- if (err)
- goto out;
-
- filp = dentry_open(&path, O_RDONLY, current_cred());
- path_put(&path);
- if (IS_ERR(filp)) {
- err = PTR_ERR(filp);
- goto out;
- }
-
- fd_install(fd, filp);
- }
+ struct path path __free(path_put) = {};
+ int err;
- return fd;
+ err = find_autofs_mount(name, &path, test_by_dev, &devid);
+ if (err)
+ return err;
-out:
- put_unused_fd(fd);
- return err;
+ return FD_ADD(O_CLOEXEC, dentry_open(&path, O_RDONLY, current_cred()));
}
/* Open a file descriptor on an autofs mount point */
@@ -381,6 +363,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
swap(sbi->oz_pgrp, new_pid);
sbi->pipefd = pipefd;
sbi->pipe = pipe;
+ sbi->mnt_ns_id = to_ns_common(current->nsproxy->mnt_ns)->ns_id;
sbi->flags &= ~AUTOFS_SBI_CATATONIC;
}
out:
@@ -442,7 +425,6 @@ static int autofs_dev_ioctl_timeout(struct file *fp,
sbi->exp_timeout = timeout * HZ;
} else {
struct dentry *base = fp->f_path.dentry;
- struct inode *inode = base->d_inode;
int path_len = param->size - AUTOFS_DEV_IOCTL_SIZE - 1;
struct dentry *dentry;
struct autofs_info *ino;
@@ -450,19 +432,8 @@ static int autofs_dev_ioctl_timeout(struct file *fp,
if (!autofs_type_indirect(sbi->type))
return -EINVAL;
- /* An expire timeout greater than the superblock timeout
- * could be a problem at shutdown but the super block
- * timeout itself can change so all we can really do is
- * warn the user.
- */
- if (timeout >= sbi->exp_timeout)
- pr_warn("per-mount expire timeout is greater than "
- "the parent autofs mount timeout which could "
- "prevent shutdown\n");
-
- inode_lock_shared(inode);
- dentry = try_lookup_one_len(param->path, base, path_len);
- inode_unlock_shared(inode);
+ dentry = try_lookup_noperm(&QSTR_LEN(param->path, path_len),
+ base);
if (IS_ERR_OR_NULL(dentry))
return dentry ? PTR_ERR(dentry) : -ENOENT;
ino = autofs_dentry_ino(dentry);
@@ -489,6 +460,18 @@ static int autofs_dev_ioctl_timeout(struct file *fp,
ino->flags |= AUTOFS_INF_EXPIRE_SET;
ino->exp_timeout = timeout * HZ;
}
+
+ /* An expire timeout greater than the superblock timeout
+ * could be a problem at shutdown but the super block
+ * timeout itself can change so all we can really do is
+ * warn the user.
+ */
+ if (ino->flags & AUTOFS_INF_EXPIRE_SET &&
+ ino->exp_timeout > sbi->exp_timeout)
+ pr_warn("per-mount expire timeout is greater than "
+ "the parent autofs mount timeout which could "
+ "prevent shutdown\n");
+
dput(dentry);
}
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index ee2edccaef70..b932b1719dfc 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -55,7 +55,7 @@ void autofs_kill_sb(struct super_block *sb)
}
pr_debug("shutting down\n");
- kill_litter_super(sb);
+ kill_anon_super(sb);
if (sbi)
kfree_rcu(sbi, rcu);
}
@@ -251,6 +251,7 @@ static struct autofs_sb_info *autofs_alloc_sbi(void)
sbi->min_proto = AUTOFS_MIN_PROTO_VERSION;
sbi->max_proto = AUTOFS_MAX_PROTO_VERSION;
sbi->pipefd = -1;
+ sbi->mnt_ns_id = to_ns_common(current->nsproxy->mnt_ns)->ns_id;
set_autofs_type_indirect(&sbi->type);
mutex_init(&sbi->wq_mutex);
@@ -311,7 +312,7 @@ static int autofs_fill_super(struct super_block *s, struct fs_context *fc)
s->s_blocksize_bits = 10;
s->s_magic = AUTOFS_SUPER_MAGIC;
s->s_op = &autofs_sops;
- s->s_d_op = &autofs_dentry_operations;
+ set_default_d_op(s, &autofs_dentry_operations);
s->s_time_gran = 1;
/*
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 530d18827e35..2c31002b314a 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -15,8 +15,8 @@ static int autofs_dir_symlink(struct mnt_idmap *, struct inode *,
struct dentry *, const char *);
static int autofs_dir_unlink(struct inode *, struct dentry *);
static int autofs_dir_rmdir(struct inode *, struct dentry *);
-static int autofs_dir_mkdir(struct mnt_idmap *, struct inode *,
- struct dentry *, umode_t);
+static struct dentry *autofs_dir_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
static long autofs_root_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long autofs_root_compat_ioctl(struct file *,
@@ -341,6 +341,14 @@ static struct vfsmount *autofs_d_automount(struct path *path)
if (autofs_oz_mode(sbi))
return NULL;
+ /* Refuse to trigger mount if current namespace is not the owner
+ * and the mount is propagation private.
+ */
+ if (sbi->mnt_ns_id != to_ns_common(current->nsproxy->mnt_ns)->ns_id) {
+ if (vfsmount_to_propagation_flags(path->mnt) & MS_PRIVATE)
+ return ERR_PTR(-EPERM);
+ }
+
/*
* If an expire request is pending everyone must wait.
* If the expire fails we're still mounted so continue
@@ -594,9 +602,8 @@ static int autofs_dir_symlink(struct mnt_idmap *idmap,
}
inode->i_private = cp;
inode->i_size = size;
- d_add(dentry, inode);
- dget(dentry);
+ d_make_persistent(dentry, inode);
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
@@ -623,12 +630,11 @@ static int autofs_dir_symlink(struct mnt_idmap *idmap,
static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
- struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count--;
- dput(ino->dentry);
+ d_make_discardable(dentry);
d_inode(dentry)->i_size = 0;
clear_nlink(d_inode(dentry));
@@ -710,7 +716,7 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count--;
- dput(ino->dentry);
+ d_make_discardable(dentry);
d_inode(dentry)->i_size = 0;
clear_nlink(d_inode(dentry));
@@ -720,9 +726,9 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
return 0;
}
-static int autofs_dir_mkdir(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static struct dentry *autofs_dir_mkdir(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_info *ino = autofs_dentry_ino(dentry);
@@ -739,19 +745,18 @@ static int autofs_dir_mkdir(struct mnt_idmap *idmap,
inode = autofs_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode)
- return -ENOMEM;
- d_add(dentry, inode);
+ return ERR_PTR(-ENOMEM);
if (sbi->version < 5)
autofs_set_leaf_automount_flags(dentry);
- dget(dentry);
+ d_make_persistent(dentry, inode);
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
inc_nlink(dir);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
- return 0;
+ return NULL;
}
/* Get/set timeout ioctl() operation */
diff --git a/fs/backing-file.c b/fs/backing-file.c
index cbdad8b68474..45da8600d564 100644
--- a/fs/backing-file.c
+++ b/fs/backing-file.c
@@ -41,7 +41,7 @@ struct file *backing_file_open(const struct path *user_path, int flags,
return f;
path_get(user_path);
- *backing_file_user_path(f) = *user_path;
+ backing_file_set_user_path(f, user_path);
error = vfs_open(real_path, f);
if (error) {
fput(f);
@@ -65,7 +65,7 @@ struct file *backing_tmpfile_open(const struct path *user_path, int flags,
return f;
path_get(user_path);
- *backing_file_user_path(f) = *user_path;
+ backing_file_set_user_path(f, user_path);
error = vfs_tmpfile(real_idmap, real_parentpath, f, mode);
if (error) {
fput(f);
@@ -157,13 +157,37 @@ static int backing_aio_init_wq(struct kiocb *iocb)
return sb_init_dio_done_wq(sb);
}
+static int do_backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags)
+{
+ struct backing_aio *aio = NULL;
+ int ret;
+
+ if (is_sync_kiocb(iocb)) {
+ rwf_t rwf = iocb_to_rw_flags(flags);
+
+ return vfs_iter_read(file, iter, &iocb->ki_pos, rwf);
+ }
+
+ aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
+ if (!aio)
+ return -ENOMEM;
+
+ aio->orig_iocb = iocb;
+ kiocb_clone(&aio->iocb, iocb, get_file(file));
+ aio->iocb.ki_complete = backing_aio_rw_complete;
+ refcount_set(&aio->ref, 2);
+ ret = vfs_iocb_iter_read(file, &aio->iocb, iter);
+ backing_aio_put(aio);
+ if (ret != -EIOCBQUEUED)
+ backing_aio_cleanup(aio, ret);
+ return ret;
+}
ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
struct kiocb *iocb, int flags,
struct backing_file_ctx *ctx)
{
- struct backing_aio *aio = NULL;
- const struct cred *old_cred;
ssize_t ret;
if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
@@ -176,41 +200,57 @@ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
!(file->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL;
- old_cred = override_creds_light(ctx->cred);
+ scoped_with_creds(ctx->cred)
+ ret = do_backing_file_read_iter(file, iter, iocb, flags);
+
+ if (ctx->accessed)
+ ctx->accessed(iocb->ki_filp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_read_iter);
+
+static int do_backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ void (*end_write)(struct kiocb *, ssize_t))
+{
+ struct backing_aio *aio;
+ int ret;
+
if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(flags);
- ret = vfs_iter_read(file, iter, &iocb->ki_pos, rwf);
- } else {
- ret = -ENOMEM;
- aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
- if (!aio)
- goto out;
-
- aio->orig_iocb = iocb;
- kiocb_clone(&aio->iocb, iocb, get_file(file));
- aio->iocb.ki_complete = backing_aio_rw_complete;
- refcount_set(&aio->ref, 2);
- ret = vfs_iocb_iter_read(file, &aio->iocb, iter);
- backing_aio_put(aio);
- if (ret != -EIOCBQUEUED)
- backing_aio_cleanup(aio, ret);
+ ret = vfs_iter_write(file, iter, &iocb->ki_pos, rwf);
+ if (end_write)
+ end_write(iocb, ret);
+ return ret;
}
-out:
- revert_creds_light(old_cred);
- if (ctx->accessed)
- ctx->accessed(iocb->ki_filp);
+ ret = backing_aio_init_wq(iocb);
+ if (ret)
+ return ret;
+
+ aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
+ if (!aio)
+ return -ENOMEM;
+ aio->orig_iocb = iocb;
+ aio->end_write = end_write;
+ kiocb_clone(&aio->iocb, iocb, get_file(file));
+ aio->iocb.ki_flags = flags;
+ aio->iocb.ki_complete = backing_aio_queue_completion;
+ refcount_set(&aio->ref, 2);
+ ret = vfs_iocb_iter_write(file, &aio->iocb, iter);
+ backing_aio_put(aio);
+ if (ret != -EIOCBQUEUED)
+ backing_aio_cleanup(aio, ret);
return ret;
}
-EXPORT_SYMBOL_GPL(backing_file_read_iter);
ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
struct kiocb *iocb, int flags,
struct backing_file_ctx *ctx)
{
- const struct cred *old_cred;
ssize_t ret;
if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
@@ -227,46 +267,8 @@ ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
!(file->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL;
- /*
- * Stacked filesystems don't support deferred completions, don't copy
- * this property in case it is set by the issuer.
- */
- flags &= ~IOCB_DIO_CALLER_COMP;
-
- old_cred = override_creds_light(ctx->cred);
- if (is_sync_kiocb(iocb)) {
- rwf_t rwf = iocb_to_rw_flags(flags);
-
- ret = vfs_iter_write(file, iter, &iocb->ki_pos, rwf);
- if (ctx->end_write)
- ctx->end_write(iocb, ret);
- } else {
- struct backing_aio *aio;
-
- ret = backing_aio_init_wq(iocb);
- if (ret)
- goto out;
-
- ret = -ENOMEM;
- aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
- if (!aio)
- goto out;
-
- aio->orig_iocb = iocb;
- aio->end_write = ctx->end_write;
- kiocb_clone(&aio->iocb, iocb, get_file(file));
- aio->iocb.ki_flags = flags;
- aio->iocb.ki_complete = backing_aio_queue_completion;
- refcount_set(&aio->ref, 2);
- ret = vfs_iocb_iter_write(file, &aio->iocb, iter);
- backing_aio_put(aio);
- if (ret != -EIOCBQUEUED)
- backing_aio_cleanup(aio, ret);
- }
-out:
- revert_creds_light(old_cred);
-
- return ret;
+ scoped_with_creds(ctx->cred)
+ return do_backing_file_write_iter(file, iter, iocb, flags, ctx->end_write);
}
EXPORT_SYMBOL_GPL(backing_file_write_iter);
@@ -275,15 +277,13 @@ ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb,
unsigned int flags,
struct backing_file_ctx *ctx)
{
- const struct cred *old_cred;
ssize_t ret;
if (WARN_ON_ONCE(!(in->f_mode & FMODE_BACKING)))
return -EIO;
- old_cred = override_creds_light(ctx->cred);
- ret = vfs_splice_read(in, &iocb->ki_pos, pipe, len, flags);
- revert_creds_light(old_cred);
+ scoped_with_creds(ctx->cred)
+ ret = vfs_splice_read(in, &iocb->ki_pos, pipe, len, flags);
if (ctx->accessed)
ctx->accessed(iocb->ki_filp);
@@ -297,7 +297,6 @@ ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
size_t len, unsigned int flags,
struct backing_file_ctx *ctx)
{
- const struct cred *old_cred;
ssize_t ret;
if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING)))
@@ -310,11 +309,11 @@ ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
if (ret)
return ret;
- old_cred = override_creds_light(ctx->cred);
- file_start_write(out);
- ret = out->f_op->splice_write(pipe, out, &iocb->ki_pos, len, flags);
- file_end_write(out);
- revert_creds_light(old_cred);
+ scoped_with_creds(ctx->cred) {
+ file_start_write(out);
+ ret = out->f_op->splice_write(pipe, out, &iocb->ki_pos, len, flags);
+ file_end_write(out);
+ }
if (ctx->end_write)
ctx->end_write(iocb, ret);
@@ -326,21 +325,19 @@ EXPORT_SYMBOL_GPL(backing_file_splice_write);
int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
struct backing_file_ctx *ctx)
{
- const struct cred *old_cred;
struct file *user_file = vma->vm_file;
int ret;
if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
return -EIO;
- if (!file->f_op->mmap)
+ if (!can_mmap_file(file))
return -ENODEV;
vma_set_file(vma, file);
- old_cred = override_creds_light(ctx->cred);
- ret = call_mmap(vma->vm_file, vma);
- revert_creds_light(old_cred);
+ scoped_with_creds(ctx->cred)
+ ret = vfs_mmap(vma->vm_file, vma);
if (ctx->accessed)
ctx->accessed(user_file);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 316d88da2ce1..0ef9bcb744dd 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -58,10 +58,10 @@ static int bad_inode_symlink(struct mnt_idmap *idmap,
return -EIO;
}
-static int bad_inode_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *bad_inode_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return -EIO;
+ return ERR_PTR(-EIO);
}
static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
deleted file mode 100644
index ab6c95b895b3..000000000000
--- a/fs/bcachefs/Kconfig
+++ /dev/null
@@ -1,105 +0,0 @@
-
-config BCACHEFS_FS
- tristate "bcachefs filesystem support (EXPERIMENTAL)"
- depends on BLOCK
- select EXPORTFS
- select CLOSURES
- select LIBCRC32C
- select CRC64
- select FS_POSIX_ACL
- select LZ4_COMPRESS
- select LZ4_DECOMPRESS
- select LZ4HC_COMPRESS
- select LZ4HC_DECOMPRESS
- select ZLIB_DEFLATE
- select ZLIB_INFLATE
- select ZSTD_COMPRESS
- select ZSTD_DECOMPRESS
- select CRYPTO_SHA256
- select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
- select KEYS
- select RAID6_PQ
- select XOR_BLOCKS
- select XXHASH
- select SRCU
- select SYMBOLIC_ERRNAME
- select MIN_HEAP
- help
- The bcachefs filesystem - a modern, copy on write filesystem, with
- support for multiple devices, compression, checksumming, etc.
-
-config BCACHEFS_QUOTA
- bool "bcachefs quota support"
- depends on BCACHEFS_FS
- select QUOTACTL
-
-config BCACHEFS_ERASURE_CODING
- bool "bcachefs erasure coding (RAID5/6) support (EXPERIMENTAL)"
- depends on BCACHEFS_FS
- select QUOTACTL
- help
- This enables the "erasure_code" filesysystem and inode option, which
- organizes data into reed-solomon stripes instead of ordinary
- replication.
-
- WARNING: this feature is still undergoing on disk format changes, and
- should only be enabled for testing purposes.
-
-config BCACHEFS_POSIX_ACL
- bool "bcachefs POSIX ACL support"
- depends on BCACHEFS_FS
- select FS_POSIX_ACL
-
-config BCACHEFS_DEBUG
- bool "bcachefs debugging"
- depends on BCACHEFS_FS
- help
- Enables many extra debugging checks and assertions.
-
- The resulting code will be significantly slower than normal; you
- probably shouldn't select this option unless you're a developer.
-
-config BCACHEFS_TESTS
- bool "bcachefs unit and performance tests"
- depends on BCACHEFS_FS
- help
- Include some unit and performance tests for the core btree code
-
-config BCACHEFS_LOCK_TIME_STATS
- bool "bcachefs lock time statistics"
- depends on BCACHEFS_FS
- help
- Expose statistics for how long we held a lock in debugfs
-
-config BCACHEFS_NO_LATENCY_ACCT
- bool "disable latency accounting and time stats"
- depends on BCACHEFS_FS
- help
- This disables device latency tracking and time stats, only for performance testing
-
-config BCACHEFS_SIX_OPTIMISTIC_SPIN
- bool "Optimistic spinning for six locks"
- depends on BCACHEFS_FS
- depends on SMP
- default y
- help
- Instead of immediately sleeping when attempting to take a six lock that
- is held by another thread, spin for a short while, as long as the
- thread owning the lock is running.
-
-config BCACHEFS_PATH_TRACEPOINTS
- bool "Extra btree_path tracepoints"
- depends on BCACHEFS_FS
- help
- Enable extra tracepoints for debugging btree_path operations; we don't
- normally want these enabled because they happen at very high rates.
-
-config MEAN_AND_VARIANCE_UNIT_TEST
- tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
- depends on KUNIT
- depends on BCACHEFS_FS
- default KUNIT_ALL_TESTS
- help
- This option enables the kunit tests for mean_and_variance module.
- If unsure, say N.
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
deleted file mode 100644
index 56d20e219f59..000000000000
--- a/fs/bcachefs/Makefile
+++ /dev/null
@@ -1,101 +0,0 @@
-
-obj-$(CONFIG_BCACHEFS_FS) += bcachefs.o
-
-bcachefs-y := \
- acl.o \
- alloc_background.o \
- alloc_foreground.o \
- backpointers.o \
- bkey.o \
- bkey_methods.o \
- bkey_sort.o \
- bset.o \
- btree_cache.o \
- btree_gc.o \
- btree_io.o \
- btree_iter.o \
- btree_journal_iter.o \
- btree_key_cache.o \
- btree_locking.o \
- btree_node_scan.o \
- btree_trans_commit.o \
- btree_update.o \
- btree_update_interior.o \
- btree_write_buffer.o \
- buckets.o \
- buckets_waiting_for_journal.o \
- chardev.o \
- checksum.o \
- clock.o \
- compress.o \
- darray.o \
- data_update.o \
- debug.o \
- dirent.o \
- disk_accounting.o \
- disk_groups.o \
- ec.o \
- errcode.o \
- error.o \
- extents.o \
- extent_update.o \
- eytzinger.o \
- fs.o \
- fs-common.o \
- fs-ioctl.o \
- fs-io.o \
- fs-io-buffered.o \
- fs-io-direct.o \
- fs-io-pagecache.o \
- fsck.o \
- inode.o \
- io_read.o \
- io_misc.o \
- io_write.o \
- journal.o \
- journal_io.o \
- journal_reclaim.o \
- journal_sb.o \
- journal_seq_blacklist.o \
- keylist.o \
- logged_ops.o \
- lru.o \
- mean_and_variance.o \
- migrate.o \
- move.o \
- movinggc.o \
- nocow_locking.o \
- opts.o \
- printbuf.o \
- quota.o \
- rebalance.o \
- rcu_pending.o \
- recovery.o \
- recovery_passes.o \
- reflink.o \
- replicas.o \
- sb-clean.o \
- sb-counters.o \
- sb-downgrade.o \
- sb-errors.o \
- sb-members.o \
- siphash.o \
- six.o \
- snapshot.o \
- subvolume.o \
- super.o \
- super-io.o \
- sysfs.o \
- tests.o \
- time_stats.o \
- thread_with_file.o \
- trace.o \
- two_state_shared_lock.o \
- util.o \
- varint.o \
- xattr.o
-
-obj-$(CONFIG_MEAN_AND_VARIANCE_UNIT_TEST) += mean_and_variance_test.o
-
-# Silence "note: xyz changed in GCC X.X" messages
-subdir-ccflags-y += $(call cc-disable-warning, psabi)
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
deleted file mode 100644
index 87f1be9d4db4..000000000000
--- a/fs/bcachefs/acl.c
+++ /dev/null
@@ -1,450 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-
-#include "acl.h"
-#include "xattr.h"
-
-#include <linux/posix_acl.h>
-
-static const char * const acl_types[] = {
- [ACL_USER_OBJ] = "user_obj",
- [ACL_USER] = "user",
- [ACL_GROUP_OBJ] = "group_obj",
- [ACL_GROUP] = "group",
- [ACL_MASK] = "mask",
- [ACL_OTHER] = "other",
- NULL,
-};
-
-void bch2_acl_to_text(struct printbuf *out, const void *value, size_t size)
-{
- const void *p, *end = value + size;
-
- if (!value ||
- size < sizeof(bch_acl_header) ||
- ((bch_acl_header *)value)->a_version != cpu_to_le32(BCH_ACL_VERSION))
- return;
-
- p = value + sizeof(bch_acl_header);
- while (p < end) {
- const bch_acl_entry *in = p;
- unsigned tag = le16_to_cpu(in->e_tag);
-
- prt_str(out, acl_types[tag]);
-
- switch (tag) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- p += sizeof(bch_acl_entry_short);
- break;
- case ACL_USER:
- prt_printf(out, " uid %u", le32_to_cpu(in->e_id));
- p += sizeof(bch_acl_entry);
- break;
- case ACL_GROUP:
- prt_printf(out, " gid %u", le32_to_cpu(in->e_id));
- p += sizeof(bch_acl_entry);
- break;
- }
-
- prt_printf(out, " %o", le16_to_cpu(in->e_perm));
-
- if (p != end)
- prt_char(out, ' ');
- }
-}
-
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
-
-#include "fs.h"
-
-#include <linux/fs.h>
-#include <linux/posix_acl_xattr.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-static inline size_t bch2_acl_size(unsigned nr_short, unsigned nr_long)
-{
- return sizeof(bch_acl_header) +
- sizeof(bch_acl_entry_short) * nr_short +
- sizeof(bch_acl_entry) * nr_long;
-}
-
-static inline int acl_to_xattr_type(int type)
-{
- switch (type) {
- case ACL_TYPE_ACCESS:
- return KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS;
- case ACL_TYPE_DEFAULT:
- return KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT;
- default:
- BUG();
- }
-}
-
-/*
- * Convert from filesystem to in-memory representation.
- */
-static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans,
- const void *value, size_t size)
-{
- const void *p, *end = value + size;
- struct posix_acl *acl;
- struct posix_acl_entry *out;
- unsigned count = 0;
- int ret;
-
- if (!value)
- return NULL;
- if (size < sizeof(bch_acl_header))
- goto invalid;
- if (((bch_acl_header *)value)->a_version !=
- cpu_to_le32(BCH_ACL_VERSION))
- goto invalid;
-
- p = value + sizeof(bch_acl_header);
- while (p < end) {
- const bch_acl_entry *entry = p;
-
- if (p + sizeof(bch_acl_entry_short) > end)
- goto invalid;
-
- switch (le16_to_cpu(entry->e_tag)) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- p += sizeof(bch_acl_entry_short);
- break;
- case ACL_USER:
- case ACL_GROUP:
- p += sizeof(bch_acl_entry);
- break;
- default:
- goto invalid;
- }
-
- count++;
- }
-
- if (p > end)
- goto invalid;
-
- if (!count)
- return NULL;
-
- acl = allocate_dropping_locks(trans, ret,
- posix_acl_alloc(count, _gfp));
- if (!acl)
- return ERR_PTR(-ENOMEM);
- if (ret) {
- kfree(acl);
- return ERR_PTR(ret);
- }
-
- out = acl->a_entries;
-
- p = value + sizeof(bch_acl_header);
- while (p < end) {
- const bch_acl_entry *in = p;
-
- out->e_tag = le16_to_cpu(in->e_tag);
- out->e_perm = le16_to_cpu(in->e_perm);
-
- switch (out->e_tag) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- p += sizeof(bch_acl_entry_short);
- break;
- case ACL_USER:
- out->e_uid = make_kuid(&init_user_ns,
- le32_to_cpu(in->e_id));
- p += sizeof(bch_acl_entry);
- break;
- case ACL_GROUP:
- out->e_gid = make_kgid(&init_user_ns,
- le32_to_cpu(in->e_id));
- p += sizeof(bch_acl_entry);
- break;
- }
-
- out++;
- }
-
- BUG_ON(out != acl->a_entries + acl->a_count);
-
- return acl;
-invalid:
- pr_err("invalid acl entry");
- return ERR_PTR(-EINVAL);
-}
-
-#define acl_for_each_entry(acl, acl_e) \
- for (acl_e = acl->a_entries; \
- acl_e < acl->a_entries + acl->a_count; \
- acl_e++)
-
-/*
- * Convert from in-memory to filesystem representation.
- */
-static struct bkey_i_xattr *
-bch2_acl_to_xattr(struct btree_trans *trans,
- const struct posix_acl *acl,
- int type)
-{
- struct bkey_i_xattr *xattr;
- bch_acl_header *acl_header;
- const struct posix_acl_entry *acl_e;
- void *outptr;
- unsigned nr_short = 0, nr_long = 0, acl_len, u64s;
-
- acl_for_each_entry(acl, acl_e) {
- switch (acl_e->e_tag) {
- case ACL_USER:
- case ACL_GROUP:
- nr_long++;
- break;
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- nr_short++;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
- }
-
- acl_len = bch2_acl_size(nr_short, nr_long);
- u64s = BKEY_U64s + xattr_val_u64s(0, acl_len);
-
- if (u64s > U8_MAX)
- return ERR_PTR(-E2BIG);
-
- xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
- if (IS_ERR(xattr))
- return xattr;
-
- bkey_xattr_init(&xattr->k_i);
- xattr->k.u64s = u64s;
- xattr->v.x_type = acl_to_xattr_type(type);
- xattr->v.x_name_len = 0;
- xattr->v.x_val_len = cpu_to_le16(acl_len);
-
- acl_header = xattr_val(&xattr->v);
- acl_header->a_version = cpu_to_le32(BCH_ACL_VERSION);
-
- outptr = (void *) acl_header + sizeof(*acl_header);
-
- acl_for_each_entry(acl, acl_e) {
- bch_acl_entry *entry = outptr;
-
- entry->e_tag = cpu_to_le16(acl_e->e_tag);
- entry->e_perm = cpu_to_le16(acl_e->e_perm);
- switch (acl_e->e_tag) {
- case ACL_USER:
- entry->e_id = cpu_to_le32(
- from_kuid(&init_user_ns, acl_e->e_uid));
- outptr += sizeof(bch_acl_entry);
- break;
- case ACL_GROUP:
- entry->e_id = cpu_to_le32(
- from_kgid(&init_user_ns, acl_e->e_gid));
- outptr += sizeof(bch_acl_entry);
- break;
-
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- outptr += sizeof(bch_acl_entry_short);
- break;
- }
- }
-
- BUG_ON(outptr != xattr_val(&xattr->v) + acl_len);
-
- return xattr;
-}
-
-struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
-{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
- struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
- struct btree_iter iter = { NULL };
- struct posix_acl *acl = NULL;
-
- if (rcu)
- return ERR_PTR(-ECHILD);
-
- struct btree_trans *trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
- &hash, inode_inum(inode), &search, 0);
- int ret = bkey_err(k);
- if (ret)
- goto err;
-
- struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
- acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
- le16_to_cpu(xattr.v->x_val_len));
- ret = PTR_ERR_OR_ZERO(acl);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- if (ret)
- acl = !bch2_err_matches(ret, ENOENT) ? ERR_PTR(ret) : NULL;
-
- if (!IS_ERR_OR_NULL(acl))
- set_cached_acl(&inode->v, type, acl);
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return acl;
-}
-
-int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
- struct bch_inode_unpacked *inode_u,
- struct posix_acl *acl, int type)
-{
- struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode_u);
- int ret;
-
- if (type == ACL_TYPE_DEFAULT &&
- !S_ISDIR(inode_u->bi_mode))
- return acl ? -EACCES : 0;
-
- if (acl) {
- struct bkey_i_xattr *xattr =
- bch2_acl_to_xattr(trans, acl, type);
- if (IS_ERR(xattr))
- return PTR_ERR(xattr);
-
- ret = bch2_hash_set(trans, bch2_xattr_hash_desc, &hash_info,
- inum, &xattr->k_i, 0);
- } else {
- struct xattr_search_key search =
- X_SEARCH(acl_to_xattr_type(type), "", 0);
-
- ret = bch2_hash_delete(trans, bch2_xattr_hash_desc, &hash_info,
- inum, &search);
- }
-
- return bch2_err_matches(ret, ENOENT) ? 0 : ret;
-}
-
-int bch2_set_acl(struct mnt_idmap *idmap,
- struct dentry *dentry,
- struct posix_acl *_acl, int type)
-{
- struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_iter inode_iter = { NULL };
- struct bch_inode_unpacked inode_u;
- struct posix_acl *acl;
- umode_t mode;
- int ret;
-
- mutex_lock(&inode->ei_update_lock);
- struct btree_trans *trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
- acl = _acl;
-
- ret = bch2_subvol_is_ro_trans(trans, inode->ei_inum.subvol) ?:
- bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
- BTREE_ITER_intent);
- if (ret)
- goto btree_err;
-
- mode = inode_u.bi_mode;
-
- if (type == ACL_TYPE_ACCESS) {
- ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
- if (ret)
- goto btree_err;
- }
-
- ret = bch2_set_acl_trans(trans, inode_inum(inode), &inode_u, acl, type);
- if (ret)
- goto btree_err;
-
- inode_u.bi_ctime = bch2_current_time(c);
- inode_u.bi_mode = mode;
-
- ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
- bch2_trans_commit(trans, NULL, NULL, 0);
-btree_err:
- bch2_trans_iter_exit(trans, &inode_iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
- if (unlikely(ret))
- goto err;
-
- bch2_inode_update_after_write(trans, inode, &inode_u,
- ATTR_CTIME|ATTR_MODE);
-
- set_cached_acl(&inode->v, type, acl);
-err:
- bch2_trans_put(trans);
- mutex_unlock(&inode->ei_update_lock);
-
- return ret;
-}
-
-int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
- struct bch_inode_unpacked *inode,
- umode_t mode,
- struct posix_acl **new_acl)
-{
- struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
- struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
- struct btree_iter iter;
- struct posix_acl *acl = NULL;
-
- struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
- &hash_info, inum, &search, BTREE_ITER_intent);
- int ret = bkey_err(k);
- if (ret)
- return bch2_err_matches(ret, ENOENT) ? 0 : ret;
-
- struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
-
- acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
- le16_to_cpu(xattr.v->x_val_len));
- ret = PTR_ERR_OR_ZERO(acl);
- if (ret)
- goto err;
-
- ret = allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode));
- if (ret)
- goto err;
-
- struct bkey_i_xattr *new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
- ret = PTR_ERR_OR_ZERO(new);
- if (ret)
- goto err;
-
- new->k.p = iter.pos;
- ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
- *new_acl = acl;
- acl = NULL;
-err:
- bch2_trans_iter_exit(trans, &iter);
- if (!IS_ERR_OR_NULL(acl))
- kfree(acl);
- return ret;
-}
-
-#endif /* CONFIG_BCACHEFS_POSIX_ACL */
diff --git a/fs/bcachefs/acl.h b/fs/bcachefs/acl.h
deleted file mode 100644
index fe730a6bf0c1..000000000000
--- a/fs/bcachefs/acl.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_ACL_H
-#define _BCACHEFS_ACL_H
-
-struct bch_inode_unpacked;
-struct bch_hash_info;
-struct bch_inode_info;
-struct posix_acl;
-
-#define BCH_ACL_VERSION 0x0001
-
-typedef struct {
- __le16 e_tag;
- __le16 e_perm;
- __le32 e_id;
-} bch_acl_entry;
-
-typedef struct {
- __le16 e_tag;
- __le16 e_perm;
-} bch_acl_entry_short;
-
-typedef struct {
- __le32 a_version;
-} bch_acl_header;
-
-void bch2_acl_to_text(struct printbuf *, const void *, size_t);
-
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
-
-struct posix_acl *bch2_get_acl(struct inode *, int, bool);
-
-int bch2_set_acl_trans(struct btree_trans *, subvol_inum,
- struct bch_inode_unpacked *,
- struct posix_acl *, int);
-int bch2_set_acl(struct mnt_idmap *, struct dentry *, struct posix_acl *, int);
-int bch2_acl_chmod(struct btree_trans *, subvol_inum,
- struct bch_inode_unpacked *,
- umode_t, struct posix_acl **);
-
-#else
-
-static inline int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
- struct bch_inode_unpacked *inode_u,
- struct posix_acl *acl, int type)
-{
- return 0;
-}
-
-static inline int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
- struct bch_inode_unpacked *inode,
- umode_t mode,
- struct posix_acl **new_acl)
-{
- return 0;
-}
-
-#endif /* CONFIG_BCACHEFS_POSIX_ACL */
-
-#endif /* _BCACHEFS_ACL_H */
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
deleted file mode 100644
index c84a91572a1d..000000000000
--- a/fs/bcachefs/alloc_background.c
+++ /dev/null
@@ -1,2553 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "backpointers.h"
-#include "bkey_buf.h"
-#include "btree_cache.h"
-#include "btree_io.h"
-#include "btree_key_cache.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "btree_gc.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "buckets_waiting_for_journal.h"
-#include "clock.h"
-#include "debug.h"
-#include "disk_accounting.h"
-#include "ec.h"
-#include "error.h"
-#include "lru.h"
-#include "recovery.h"
-#include "trace.h"
-#include "varint.h"
-
-#include <linux/kthread.h>
-#include <linux/math64.h>
-#include <linux/random.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/sched/task.h>
-#include <linux/sort.h>
-#include <linux/jiffies.h>
-
-static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
-
-/* Persistent alloc info: */
-
-static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
-#define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
- BCH_ALLOC_FIELDS_V1()
-#undef x
-};
-
-struct bkey_alloc_unpacked {
- u64 journal_seq;
- u8 gen;
- u8 oldest_gen;
- u8 data_type;
- bool need_discard:1;
- bool need_inc_gen:1;
-#define x(_name, _bits) u##_bits _name;
- BCH_ALLOC_FIELDS_V2()
-#undef x
-};
-
-static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
- const void **p, unsigned field)
-{
- unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
- u64 v;
-
- if (!(a->fields & (1 << field)))
- return 0;
-
- switch (bytes) {
- case 1:
- v = *((const u8 *) *p);
- break;
- case 2:
- v = le16_to_cpup(*p);
- break;
- case 4:
- v = le32_to_cpup(*p);
- break;
- case 8:
- v = le64_to_cpup(*p);
- break;
- default:
- BUG();
- }
-
- *p += bytes;
- return v;
-}
-
-static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
- struct bkey_s_c k)
-{
- const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
- const void *d = in->data;
- unsigned idx = 0;
-
- out->gen = in->gen;
-
-#define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
- BCH_ALLOC_FIELDS_V1()
-#undef x
-}
-
-static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
- struct bkey_s_c k)
-{
- struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
- const u8 *in = a.v->data;
- const u8 *end = bkey_val_end(a);
- unsigned fieldnr = 0;
- int ret;
- u64 v;
-
- out->gen = a.v->gen;
- out->oldest_gen = a.v->oldest_gen;
- out->data_type = a.v->data_type;
-
-#define x(_name, _bits) \
- if (fieldnr < a.v->nr_fields) { \
- ret = bch2_varint_decode_fast(in, end, &v); \
- if (ret < 0) \
- return ret; \
- in += ret; \
- } else { \
- v = 0; \
- } \
- out->_name = v; \
- if (v != out->_name) \
- return -1; \
- fieldnr++;
-
- BCH_ALLOC_FIELDS_V2()
-#undef x
- return 0;
-}
-
-static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
- struct bkey_s_c k)
-{
- struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
- const u8 *in = a.v->data;
- const u8 *end = bkey_val_end(a);
- unsigned fieldnr = 0;
- int ret;
- u64 v;
-
- out->gen = a.v->gen;
- out->oldest_gen = a.v->oldest_gen;
- out->data_type = a.v->data_type;
- out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
- out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
- out->journal_seq = le64_to_cpu(a.v->journal_seq);
-
-#define x(_name, _bits) \
- if (fieldnr < a.v->nr_fields) { \
- ret = bch2_varint_decode_fast(in, end, &v); \
- if (ret < 0) \
- return ret; \
- in += ret; \
- } else { \
- v = 0; \
- } \
- out->_name = v; \
- if (v != out->_name) \
- return -1; \
- fieldnr++;
-
- BCH_ALLOC_FIELDS_V2()
-#undef x
- return 0;
-}
-
-static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
-{
- struct bkey_alloc_unpacked ret = { .gen = 0 };
-
- switch (k.k->type) {
- case KEY_TYPE_alloc:
- bch2_alloc_unpack_v1(&ret, k);
- break;
- case KEY_TYPE_alloc_v2:
- bch2_alloc_unpack_v2(&ret, k);
- break;
- case KEY_TYPE_alloc_v3:
- bch2_alloc_unpack_v3(&ret, k);
- break;
- }
-
- return ret;
-}
-
-static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
-{
- unsigned i, bytes = offsetof(struct bch_alloc, data);
-
- for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
- if (a->fields & (1 << i))
- bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
-
- return DIV_ROUND_UP(bytes, sizeof(u64));
-}
-
-int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
- int ret = 0;
-
- /* allow for unknown fields */
- bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v),
- c, alloc_v1_val_size_bad,
- "incorrect value size (%zu < %u)",
- bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
-fsck_err:
- return ret;
-}
-
-int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_alloc_unpacked u;
- int ret = 0;
-
- bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k),
- c, alloc_v2_unpack_error,
- "unpack error");
-fsck_err:
- return ret;
-}
-
-int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_alloc_unpacked u;
- int ret = 0;
-
- bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k),
- c, alloc_v2_unpack_error,
- "unpack error");
-fsck_err:
- return ret;
-}
-
-int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bch_alloc_v4 a;
- int ret = 0;
-
- bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k));
-
- bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k),
- c, alloc_v4_val_size_bad,
- "bad val size (%u > %zu)",
- alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k));
-
- bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(&a) &&
- BCH_ALLOC_V4_NR_BACKPOINTERS(&a),
- c, alloc_v4_backpointers_start_bad,
- "invalid backpointers_start");
-
- bkey_fsck_err_on(alloc_data_type(a, a.data_type) != a.data_type,
- c, alloc_key_data_type_bad,
- "invalid data type (got %u should be %u)",
- a.data_type, alloc_data_type(a, a.data_type));
-
- for (unsigned i = 0; i < 2; i++)
- bkey_fsck_err_on(a.io_time[i] > LRU_TIME_MAX,
- c, alloc_key_io_time_bad,
- "invalid io_time[%s]: %llu, max %llu",
- i == READ ? "read" : "write",
- a.io_time[i], LRU_TIME_MAX);
-
- unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(&a) * sizeof(u64) >
- offsetof(struct bch_alloc_v4, stripe_sectors)
- ? a.stripe_sectors
- : 0;
-
- switch (a.data_type) {
- case BCH_DATA_free:
- case BCH_DATA_need_gc_gens:
- case BCH_DATA_need_discard:
- bkey_fsck_err_on(stripe_sectors ||
- a.dirty_sectors ||
- a.cached_sectors ||
- a.stripe,
- c, alloc_key_empty_but_have_data,
- "empty data type free but have data %u.%u.%u %u",
- stripe_sectors,
- a.dirty_sectors,
- a.cached_sectors,
- a.stripe);
- break;
- case BCH_DATA_sb:
- case BCH_DATA_journal:
- case BCH_DATA_btree:
- case BCH_DATA_user:
- case BCH_DATA_parity:
- bkey_fsck_err_on(!a.dirty_sectors &&
- !stripe_sectors,
- c, alloc_key_dirty_sectors_0,
- "data_type %s but dirty_sectors==0",
- bch2_data_type_str(a.data_type));
- break;
- case BCH_DATA_cached:
- bkey_fsck_err_on(!a.cached_sectors ||
- a.dirty_sectors ||
- stripe_sectors ||
- a.stripe,
- c, alloc_key_cached_inconsistency,
- "data type inconsistency");
-
- bkey_fsck_err_on(!a.io_time[READ] &&
- c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
- c, alloc_key_cached_but_read_time_zero,
- "cached bucket with read_time == 0");
- break;
- case BCH_DATA_stripe:
- break;
- }
-fsck_err:
- return ret;
-}
-
-void bch2_alloc_v4_swab(struct bkey_s k)
-{
- struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
- struct bch_backpointer *bp, *bps;
-
- a->journal_seq = swab64(a->journal_seq);
- a->flags = swab32(a->flags);
- a->dirty_sectors = swab32(a->dirty_sectors);
- a->cached_sectors = swab32(a->cached_sectors);
- a->io_time[0] = swab64(a->io_time[0]);
- a->io_time[1] = swab64(a->io_time[1]);
- a->stripe = swab32(a->stripe);
- a->nr_external_backpointers = swab32(a->nr_external_backpointers);
- a->stripe_sectors = swab32(a->stripe_sectors);
-
- bps = alloc_v4_backpointers(a);
- for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
- bp->bucket_offset = swab40(bp->bucket_offset);
- bp->bucket_len = swab32(bp->bucket_len);
- bch2_bpos_swab(&bp->pos);
- }
-}
-
-void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
-{
- struct bch_alloc_v4 _a;
- const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
- struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL;
-
- prt_newline(out);
- printbuf_indent_add(out, 2);
-
- prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
- bch2_prt_data_type(out, a->data_type);
- prt_newline(out);
- prt_printf(out, "journal_seq %llu\n", a->journal_seq);
- prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a));
- prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a));
- prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors);
- prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors);
- prt_printf(out, "cached_sectors %u\n", a->cached_sectors);
- prt_printf(out, "stripe %u\n", a->stripe);
- prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
- prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]);
- prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]);
-
- if (ca)
- prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca));
- prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
- printbuf_indent_sub(out, 2);
-
- bch2_dev_put(ca);
-}
-
-void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
-{
- if (k.k->type == KEY_TYPE_alloc_v4) {
- void *src, *dst;
-
- *out = *bkey_s_c_to_alloc_v4(k).v;
-
- src = alloc_v4_backpointers(out);
- SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
- dst = alloc_v4_backpointers(out);
-
- if (src < dst)
- memset(src, 0, dst - src);
-
- SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
- } else {
- struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
-
- *out = (struct bch_alloc_v4) {
- .journal_seq = u.journal_seq,
- .flags = u.need_discard,
- .gen = u.gen,
- .oldest_gen = u.oldest_gen,
- .data_type = u.data_type,
- .stripe_redundancy = u.stripe_redundancy,
- .dirty_sectors = u.dirty_sectors,
- .cached_sectors = u.cached_sectors,
- .io_time[READ] = u.read_time,
- .io_time[WRITE] = u.write_time,
- .stripe = u.stripe,
- };
-
- SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
- }
-}
-
-static noinline struct bkey_i_alloc_v4 *
-__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
-{
- struct bkey_i_alloc_v4 *ret;
-
- ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
- if (IS_ERR(ret))
- return ret;
-
- if (k.k->type == KEY_TYPE_alloc_v4) {
- void *src, *dst;
-
- bkey_reassemble(&ret->k_i, k);
-
- src = alloc_v4_backpointers(&ret->v);
- SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
- dst = alloc_v4_backpointers(&ret->v);
-
- if (src < dst)
- memset(src, 0, dst - src);
-
- SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
- set_alloc_v4_u64s(ret);
- } else {
- bkey_alloc_v4_init(&ret->k_i);
- ret->k.p = k.k->p;
- bch2_alloc_to_v4(k, &ret->v);
- }
- return ret;
-}
-
-static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
-{
- struct bkey_s_c_alloc_v4 a;
-
- if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
- ((a = bkey_s_c_to_alloc_v4(k), true) &&
- BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
- return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
-
- return __bch2_alloc_to_v4_mut(trans, k);
-}
-
-struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
-{
- return bch2_alloc_to_v4_mut_inlined(trans, k);
-}
-
-struct bkey_i_alloc_v4 *
-bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos pos)
-{
- struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
- BTREE_ITER_with_updates|
- BTREE_ITER_cached|
- BTREE_ITER_intent);
- int ret = bkey_err(k);
- if (unlikely(ret))
- return ERR_PTR(ret);
-
- struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
- ret = PTR_ERR_OR_ZERO(a);
- if (unlikely(ret))
- goto err;
- return a;
-err:
- bch2_trans_iter_exit(trans, iter);
- return ERR_PTR(ret);
-}
-
-__flatten
-struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos,
- enum btree_iter_update_trigger_flags flags)
-{
- struct btree_iter iter;
- struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos);
- int ret = PTR_ERR_OR_ZERO(a);
- if (ret)
- return ERR_PTR(ret);
-
- ret = bch2_trans_update(trans, &iter, &a->k_i, flags);
- bch2_trans_iter_exit(trans, &iter);
- return unlikely(ret) ? ERR_PTR(ret) : a;
-}
-
-static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
-{
- *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
-
- pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
- return pos;
-}
-
-static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
-{
- pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
- pos.offset += offset;
- return pos;
-}
-
-static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
-{
- return k.k->type == KEY_TYPE_bucket_gens
- ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
- : 0;
-}
-
-int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- int ret = 0;
-
- bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens),
- c, bucket_gens_val_size_bad,
- "bad val size (%zu != %zu)",
- bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
-fsck_err:
- return ret;
-}
-
-void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
- if (i)
- prt_char(out, ' ');
- prt_printf(out, "%u", g.v->gens[i]);
- }
-}
-
-int bch2_bucket_gens_init(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct bkey_i_bucket_gens g;
- bool have_bucket_gens_key = false;
- int ret;
-
- ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_prefetch, k, ({
- /*
- * Not a fsck error because this is checked/repaired by
- * bch2_check_alloc_key() which runs later:
- */
- if (!bch2_dev_bucket_exists(c, k.k->p))
- continue;
-
- struct bch_alloc_v4 a;
- u8 gen = bch2_alloc_to_v4(k, &a)->gen;
- unsigned offset;
- struct bpos pos = alloc_gens_pos(iter.pos, &offset);
- int ret2 = 0;
-
- if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) {
- ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?:
- bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
- if (ret2)
- goto iter_err;
- have_bucket_gens_key = false;
- }
-
- if (!have_bucket_gens_key) {
- bkey_bucket_gens_init(&g.k_i);
- g.k.p = pos;
- have_bucket_gens_key = true;
- }
-
- g.v.gens[offset] = gen;
-iter_err:
- ret2;
- }));
-
- if (have_bucket_gens_key && !ret)
- ret = commit_do(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
- bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
-
- bch2_trans_put(trans);
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-int bch2_alloc_read(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct bch_dev *ca = NULL;
- int ret;
-
- if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
- ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
- BTREE_ITER_prefetch, k, ({
- u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
- u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
-
- if (k.k->type != KEY_TYPE_bucket_gens)
- continue;
-
- ca = bch2_dev_iterate(c, ca, k.k->p.inode);
- /*
- * Not a fsck error because this is checked/repaired by
- * bch2_check_alloc_key() which runs later:
- */
- if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
- continue;
- }
-
- const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
-
- for (u64 b = max_t(u64, ca->mi.first_bucket, start);
- b < min_t(u64, ca->mi.nbuckets, end);
- b++)
- *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
- 0;
- }));
- } else {
- ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_prefetch, k, ({
- ca = bch2_dev_iterate(c, ca, k.k->p.inode);
- /*
- * Not a fsck error because this is checked/repaired by
- * bch2_check_alloc_key() which runs later:
- */
- if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
- continue;
- }
-
- if (k.k->p.offset < ca->mi.first_bucket) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket));
- continue;
- }
-
- if (k.k->p.offset >= ca->mi.nbuckets) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
- continue;
- }
-
- struct bch_alloc_v4 a;
- *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
- 0;
- }));
- }
-
- bch2_dev_put(ca);
- bch2_trans_put(trans);
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-/* Free space/discard btree: */
-
-static int bch2_bucket_do_index(struct btree_trans *trans,
- struct bch_dev *ca,
- struct bkey_s_c alloc_k,
- const struct bch_alloc_v4 *a,
- bool set)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c old;
- struct bkey_i *k;
- enum btree_id btree;
- enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
- enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- if (a->data_type != BCH_DATA_free &&
- a->data_type != BCH_DATA_need_discard)
- return 0;
-
- k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
- if (IS_ERR(k))
- return PTR_ERR(k);
-
- bkey_init(&k->k);
- k->k.type = new_type;
-
- switch (a->data_type) {
- case BCH_DATA_free:
- btree = BTREE_ID_freespace;
- k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
- bch2_key_resize(&k->k, 1);
- break;
- case BCH_DATA_need_discard:
- btree = BTREE_ID_need_discard;
- k->k.p = alloc_k.k->p;
- break;
- default:
- return 0;
- }
-
- old = bch2_bkey_get_iter(trans, &iter, btree,
- bkey_start_pos(&k->k),
- BTREE_ITER_intent);
- ret = bkey_err(old);
- if (ret)
- return ret;
-
- if (ca->mi.freespace_initialized &&
- c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
- bch2_trans_inconsistent_on(old.k->type != old_type, trans,
- "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
- " for %s",
- set ? "setting" : "clearing",
- bch2_btree_id_str(btree),
- iter.pos.inode,
- iter.pos.offset,
- bch2_bkey_types[old.k->type],
- bch2_bkey_types[old_type],
- (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
- ret = -EIO;
- goto err;
- }
-
- ret = bch2_trans_update(trans, &iter, k, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- printbuf_exit(&buf);
- return ret;
-}
-
-static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
- struct bpos bucket, u8 gen)
-{
- struct btree_iter iter;
- unsigned offset;
- struct bpos pos = alloc_gens_pos(bucket, &offset);
- struct bkey_i_bucket_gens *g;
- struct bkey_s_c k;
- int ret;
-
- g = bch2_trans_kmalloc(trans, sizeof(*g));
- ret = PTR_ERR_OR_ZERO(g);
- if (ret)
- return ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
- BTREE_ITER_intent|
- BTREE_ITER_with_updates);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- if (k.k->type != KEY_TYPE_bucket_gens) {
- bkey_bucket_gens_init(&g->k_i);
- g->k.p = iter.pos;
- } else {
- bkey_reassemble(&g->k_i, k);
- }
-
- g->v.gens[offset] = gen;
-
- ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca,
- enum bch_data_type data_type,
- s64 delta_buckets,
- s64 delta_sectors,
- s64 delta_fragmented, unsigned flags)
-{
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_dev_data_type,
- .dev_data_type.dev = ca->dev_idx,
- .dev_data_type.data_type = data_type,
- };
- s64 d[3] = { delta_buckets, delta_sectors, delta_fragmented };
-
- return bch2_disk_accounting_mod(trans, &acc, d, 3, flags & BTREE_TRIGGER_gc);
-}
-
-int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca,
- const struct bch_alloc_v4 *old,
- const struct bch_alloc_v4 *new,
- unsigned flags)
-{
- s64 old_sectors = bch2_bucket_sectors(*old);
- s64 new_sectors = bch2_bucket_sectors(*new);
- if (old->data_type != new->data_type) {
- int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type,
- 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?:
- bch2_dev_data_type_accounting_mod(trans, ca, old->data_type,
- -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags);
- if (ret)
- return ret;
- } else if (old_sectors != new_sectors) {
- int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type,
- 0,
- new_sectors - old_sectors,
- bch2_bucket_sectors_fragmented(ca, *new) -
- bch2_bucket_sectors_fragmented(ca, *old), flags);
- if (ret)
- return ret;
- }
-
- s64 old_unstriped = bch2_bucket_sectors_unstriped(*old);
- s64 new_unstriped = bch2_bucket_sectors_unstriped(*new);
- if (old_unstriped != new_unstriped) {
- int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped,
- !!new_unstriped - !!old_unstriped,
- new_unstriped - old_unstriped,
- 0,
- flags);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int bch2_trigger_alloc(struct btree_trans *trans,
- enum btree_id btree, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
- if (!ca)
- return -EIO;
-
- struct bch_alloc_v4 old_a_convert;
- const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
-
- struct bch_alloc_v4 *new_a;
- if (likely(new.k->type == KEY_TYPE_alloc_v4)) {
- new_a = bkey_s_to_alloc_v4(new).v;
- } else {
- BUG_ON(!(flags & (BTREE_TRIGGER_gc|BTREE_TRIGGER_check_repair)));
-
- struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c);
- ret = PTR_ERR_OR_ZERO(new_ka);
- if (unlikely(ret))
- goto err;
- new_a = &new_ka->v;
- }
-
- if (flags & BTREE_TRIGGER_transactional) {
- alloc_data_type_set(new_a, new_a->data_type);
-
- if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) {
- new_a->io_time[READ] = bch2_current_io_time(c, READ);
- new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE);
- SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
- SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
- }
-
- if (data_type_is_empty(new_a->data_type) &&
- BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
- !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
- new_a->gen++;
- SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
- alloc_data_type_set(new_a, new_a->data_type);
- }
-
- if (old_a->data_type != new_a->data_type ||
- (new_a->data_type == BCH_DATA_free &&
- alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
- ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?:
- bch2_bucket_do_index(trans, ca, new.s_c, new_a, true);
- if (ret)
- goto err;
- }
-
- if (new_a->data_type == BCH_DATA_cached &&
- !new_a->io_time[READ])
- new_a->io_time[READ] = bch2_current_io_time(c, READ);
-
- u64 old_lru = alloc_lru_idx_read(*old_a);
- u64 new_lru = alloc_lru_idx_read(*new_a);
- if (old_lru != new_lru) {
- ret = bch2_lru_change(trans, new.k->p.inode,
- bucket_to_u64(new.k->p),
- old_lru, new_lru);
- if (ret)
- goto err;
- }
-
- old_lru = alloc_lru_idx_fragmentation(*old_a, ca);
- new_lru = alloc_lru_idx_fragmentation(*new_a, ca);
- if (old_lru != new_lru) {
- ret = bch2_lru_change(trans,
- BCH_LRU_FRAGMENTATION_START,
- bucket_to_u64(new.k->p),
- old_lru, new_lru);
- if (ret)
- goto err;
- }
-
- if (old_a->gen != new_a->gen) {
- ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
- if (ret)
- goto err;
- }
-
- if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
- old_a->cached_sectors) {
- ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx,
- -((s64) old_a->cached_sectors),
- flags & BTREE_TRIGGER_gc);
- if (ret)
- goto err;
- }
-
- ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags);
- if (ret)
- goto err;
- }
-
- if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
- u64 journal_seq = trans->journal_res.seq;
- u64 bucket_journal_seq = new_a->journal_seq;
-
- if ((flags & BTREE_TRIGGER_insert) &&
- data_type_is_empty(old_a->data_type) !=
- data_type_is_empty(new_a->data_type) &&
- new.k->type == KEY_TYPE_alloc_v4) {
- struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
-
- /*
- * If the btree updates referring to a bucket weren't flushed
- * before the bucket became empty again, then the we don't have
- * to wait on a journal flush before we can reuse the bucket:
- */
- v->journal_seq = bucket_journal_seq =
- data_type_is_empty(new_a->data_type) &&
- (journal_seq == v->journal_seq ||
- bch2_journal_noflush_seq(&c->journal, v->journal_seq))
- ? 0 : journal_seq;
- }
-
- if (!data_type_is_empty(old_a->data_type) &&
- data_type_is_empty(new_a->data_type) &&
- bucket_journal_seq) {
- ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
- c->journal.flushed_seq_ondisk,
- new.k->p.inode, new.k->p.offset,
- bucket_journal_seq);
- if (bch2_fs_fatal_err_on(ret, c,
- "setting bucket_needs_journal_commit: %s", bch2_err_str(ret)))
- goto err;
- }
-
- if (new_a->gen != old_a->gen) {
- rcu_read_lock();
- u8 *gen = bucket_gen(ca, new.k->p.offset);
- if (unlikely(!gen)) {
- rcu_read_unlock();
- goto invalid_bucket;
- }
- *gen = new_a->gen;
- rcu_read_unlock();
- }
-
-#define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; })
-#define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr)
-#define bucket_flushed(a) (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
-
- if (statechange(a->data_type == BCH_DATA_free) &&
- bucket_flushed(new_a))
- closure_wake_up(&c->freelist_wait);
-
- if (statechange(a->data_type == BCH_DATA_need_discard) &&
- !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) &&
- bucket_flushed(new_a))
- bch2_discard_one_bucket_fast(ca, new.k->p.offset);
-
- if (statechange(a->data_type == BCH_DATA_cached) &&
- !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
- should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
- bch2_dev_do_invalidates(ca);
-
- if (statechange(a->data_type == BCH_DATA_need_gc_gens))
- bch2_gc_gens_async(c);
- }
-
- if ((flags & BTREE_TRIGGER_gc) && (flags & BTREE_TRIGGER_insert)) {
- rcu_read_lock();
- struct bucket *g = gc_bucket(ca, new.k->p.offset);
- if (unlikely(!g)) {
- rcu_read_unlock();
- goto invalid_bucket;
- }
- g->gen_valid = 1;
- g->gen = new_a->gen;
- rcu_read_unlock();
- }
-err:
- printbuf_exit(&buf);
- bch2_dev_put(ca);
- return ret;
-invalid_bucket:
- bch2_fs_inconsistent(c, "reference to invalid bucket\n %s",
- (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
- ret = -EIO;
- goto err;
-}
-
-/*
- * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
- * extents style btrees, but works on non-extents btrees:
- */
-static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
-{
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
-
- if (bkey_err(k))
- return k;
-
- if (k.k->type) {
- return k;
- } else {
- struct btree_iter iter2;
- struct bpos next;
-
- bch2_trans_copy_iter(&iter2, iter);
-
- struct btree_path *path = btree_iter_path(iter->trans, iter);
- if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
- end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
-
- end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
-
- /*
- * btree node min/max is a closed interval, upto takes a half
- * open interval:
- */
- k = bch2_btree_iter_peek_upto(&iter2, end);
- next = iter2.pos;
- bch2_trans_iter_exit(iter->trans, &iter2);
-
- BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
-
- if (bkey_err(k))
- return k;
-
- bkey_init(hole);
- hole->p = iter->pos;
-
- bch2_key_resize(hole, next.offset - iter->pos.offset);
- return (struct bkey_s_c) { hole, NULL };
- }
-}
-
-static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket)
-{
- if (*ca) {
- if (bucket->offset < (*ca)->mi.first_bucket)
- bucket->offset = (*ca)->mi.first_bucket;
-
- if (bucket->offset < (*ca)->mi.nbuckets)
- return true;
-
- bch2_dev_put(*ca);
- *ca = NULL;
- bucket->inode++;
- bucket->offset = 0;
- }
-
- rcu_read_lock();
- *ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
- if (*ca) {
- *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket);
- bch2_dev_get(*ca);
- }
- rcu_read_unlock();
-
- return *ca != NULL;
-}
-
-static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
- struct bch_dev **ca, struct bkey *hole)
-{
- struct bch_fs *c = iter->trans->c;
- struct bkey_s_c k;
-again:
- k = bch2_get_key_or_hole(iter, POS_MAX, hole);
- if (bkey_err(k))
- return k;
-
- *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
-
- if (!k.k->type) {
- struct bpos hole_start = bkey_start_pos(k.k);
-
- if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
- if (!next_bucket(c, ca, &hole_start))
- return bkey_s_c_null;
-
- bch2_btree_iter_set_pos(iter, hole_start);
- goto again;
- }
-
- if (k.k->p.offset > (*ca)->mi.nbuckets)
- bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
- }
-
- return k;
-}
-
-static noinline_for_stack
-int bch2_check_alloc_key(struct btree_trans *trans,
- struct bkey_s_c alloc_k,
- struct btree_iter *alloc_iter,
- struct btree_iter *discard_iter,
- struct btree_iter *freespace_iter,
- struct btree_iter *bucket_gens_iter)
-{
- struct bch_fs *c = trans->c;
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
- unsigned discard_key_type, freespace_key_type;
- unsigned gens_offset;
- struct bkey_s_c k;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p);
- if (fsck_err_on(!ca,
- trans, alloc_key_to_missing_dev_bucket,
- "alloc key for invalid device:bucket %llu:%llu",
- alloc_k.k->p.inode, alloc_k.k->p.offset))
- ret = bch2_btree_delete_at(trans, alloc_iter, 0);
- if (!ca)
- return ret;
-
- if (!ca->mi.freespace_initialized)
- goto out;
-
- a = bch2_alloc_to_v4(alloc_k, &a_convert);
-
- discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
- bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
- k = bch2_btree_iter_peek_slot(discard_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (fsck_err_on(k.k->type != discard_key_type,
- trans, need_discard_key_wrong,
- "incorrect key in need_discard btree (got %s should be %s)\n"
- " %s",
- bch2_bkey_types[k.k->type],
- bch2_bkey_types[discard_key_type],
- (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
- struct bkey_i *update =
- bch2_trans_kmalloc(trans, sizeof(*update));
-
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- goto err;
-
- bkey_init(&update->k);
- update->k.type = discard_key_type;
- update->k.p = discard_iter->pos;
-
- ret = bch2_trans_update(trans, discard_iter, update, 0);
- if (ret)
- goto err;
- }
-
- freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
- bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
- k = bch2_btree_iter_peek_slot(freespace_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (fsck_err_on(k.k->type != freespace_key_type,
- trans, freespace_key_wrong,
- "incorrect key in freespace btree (got %s should be %s)\n"
- " %s",
- bch2_bkey_types[k.k->type],
- bch2_bkey_types[freespace_key_type],
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
- struct bkey_i *update =
- bch2_trans_kmalloc(trans, sizeof(*update));
-
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- goto err;
-
- bkey_init(&update->k);
- update->k.type = freespace_key_type;
- update->k.p = freespace_iter->pos;
- bch2_key_resize(&update->k, 1);
-
- ret = bch2_trans_update(trans, freespace_iter, update, 0);
- if (ret)
- goto err;
- }
-
- bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
- k = bch2_btree_iter_peek_slot(bucket_gens_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (fsck_err_on(a->gen != alloc_gen(k, gens_offset),
- trans, bucket_gens_key_wrong,
- "incorrect gen in bucket_gens btree (got %u should be %u)\n"
- " %s",
- alloc_gen(k, gens_offset), a->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
- struct bkey_i_bucket_gens *g =
- bch2_trans_kmalloc(trans, sizeof(*g));
-
- ret = PTR_ERR_OR_ZERO(g);
- if (ret)
- goto err;
-
- if (k.k->type == KEY_TYPE_bucket_gens) {
- bkey_reassemble(&g->k_i, k);
- } else {
- bkey_bucket_gens_init(&g->k_i);
- g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
- }
-
- g->v.gens[gens_offset] = a->gen;
-
- ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
- if (ret)
- goto err;
- }
-out:
-err:
-fsck_err:
- bch2_dev_put(ca);
- printbuf_exit(&buf);
- return ret;
-}
-
-static noinline_for_stack
-int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
- struct bch_dev *ca,
- struct bpos start,
- struct bpos *end,
- struct btree_iter *freespace_iter)
-{
- struct bkey_s_c k;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- if (!ca->mi.freespace_initialized)
- return 0;
-
- bch2_btree_iter_set_pos(freespace_iter, start);
-
- k = bch2_btree_iter_peek_slot(freespace_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- *end = bkey_min(k.k->p, *end);
-
- if (fsck_err_on(k.k->type != KEY_TYPE_set,
- trans, freespace_hole_missing,
- "hole in alloc btree missing in freespace btree\n"
- " device %llu buckets %llu-%llu",
- freespace_iter->pos.inode,
- freespace_iter->pos.offset,
- end->offset)) {
- struct bkey_i *update =
- bch2_trans_kmalloc(trans, sizeof(*update));
-
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- goto err;
-
- bkey_init(&update->k);
- update->k.type = KEY_TYPE_set;
- update->k.p = freespace_iter->pos;
- bch2_key_resize(&update->k,
- min_t(u64, U32_MAX, end->offset -
- freespace_iter->pos.offset));
-
- ret = bch2_trans_update(trans, freespace_iter, update, 0);
- if (ret)
- goto err;
- }
-err:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static noinline_for_stack
-int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
- struct bpos start,
- struct bpos *end,
- struct btree_iter *bucket_gens_iter)
-{
- struct bkey_s_c k;
- struct printbuf buf = PRINTBUF;
- unsigned i, gens_offset, gens_end_offset;
- int ret;
-
- bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
-
- k = bch2_btree_iter_peek_slot(bucket_gens_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
- alloc_gens_pos(*end, &gens_end_offset)))
- gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
-
- if (k.k->type == KEY_TYPE_bucket_gens) {
- struct bkey_i_bucket_gens g;
- bool need_update = false;
-
- bkey_reassemble(&g.k_i, k);
-
- for (i = gens_offset; i < gens_end_offset; i++) {
- if (fsck_err_on(g.v.gens[i], trans,
- bucket_gens_hole_wrong,
- "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
- bucket_gens_pos_to_alloc(k.k->p, i).inode,
- bucket_gens_pos_to_alloc(k.k->p, i).offset,
- g.v.gens[i])) {
- g.v.gens[i] = 0;
- need_update = true;
- }
- }
-
- if (need_update) {
- struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
-
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- goto err;
-
- memcpy(u, &g, sizeof(g));
-
- ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
- if (ret)
- goto err;
- }
- }
-
- *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
-err:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
- struct btree_iter *iter)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter alloc_iter;
- struct bkey_s_c alloc_k;
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
- u64 genbits;
- struct bpos pos;
- enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
- ? BCH_DATA_need_discard
- : BCH_DATA_free;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- pos = iter->pos;
- pos.offset &= ~(~0ULL << 56);
- genbits = iter->pos.offset & (~0ULL << 56);
-
- alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
- ret = bkey_err(alloc_k);
- if (ret)
- return ret;
-
- if (fsck_err_on(!bch2_dev_bucket_exists(c, pos),
- trans, need_discard_freespace_key_to_invalid_dev_bucket,
- "entry in %s btree for nonexistant dev:bucket %llu:%llu",
- bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
- goto delete;
-
- a = bch2_alloc_to_v4(alloc_k, &a_convert);
-
- if (fsck_err_on(a->data_type != state ||
- (state == BCH_DATA_free &&
- genbits != alloc_freespace_genbits(*a)),
- trans, need_discard_freespace_key_bad,
- "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
- (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
- bch2_btree_id_str(iter->btree_id),
- iter->pos.inode,
- iter->pos.offset,
- a->data_type == state,
- genbits >> 56, alloc_freespace_genbits(*a) >> 56))
- goto delete;
-out:
-fsck_err:
- bch2_set_btree_iter_dontneed(&alloc_iter);
- bch2_trans_iter_exit(trans, &alloc_iter);
- printbuf_exit(&buf);
- return ret;
-delete:
- ret = bch2_btree_delete_extent_at(trans, iter,
- iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
- goto out;
-}
-
-/*
- * We've already checked that generation numbers in the bucket_gens btree are
- * valid for buckets that exist; this just checks for keys for nonexistent
- * buckets.
- */
-static noinline_for_stack
-int bch2_check_bucket_gens_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i_bucket_gens g;
- u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
- u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
- u64 b;
- bool need_update = false;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
- bkey_reassemble(&g.k_i, k);
-
- struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode);
- if (!ca) {
- if (fsck_err(trans, bucket_gens_to_invalid_dev,
- "bucket_gens key for invalid device:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- ret = bch2_btree_delete_at(trans, iter, 0);
- goto out;
- }
-
- if (fsck_err_on(end <= ca->mi.first_bucket ||
- start >= ca->mi.nbuckets,
- trans, bucket_gens_to_invalid_buckets,
- "bucket_gens key for invalid buckets:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = bch2_btree_delete_at(trans, iter, 0);
- goto out;
- }
-
- for (b = start; b < ca->mi.first_bucket; b++)
- if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
- trans, bucket_gens_nonzero_for_invalid_buckets,
- "bucket_gens key has nonzero gen for invalid bucket")) {
- g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
- need_update = true;
- }
-
- for (b = ca->mi.nbuckets; b < end; b++)
- if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
- trans, bucket_gens_nonzero_for_invalid_buckets,
- "bucket_gens key has nonzero gen for invalid bucket")) {
- g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
- need_update = true;
- }
-
- if (need_update) {
- struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
-
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- goto out;
-
- memcpy(u, &g, sizeof(g));
- ret = bch2_trans_update(trans, iter, u, 0);
- }
-out:
-fsck_err:
- bch2_dev_put(ca);
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_check_alloc_info(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
- struct bch_dev *ca = NULL;
- struct bkey hole;
- struct bkey_s_c k;
- int ret = 0;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_prefetch);
- bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
- BTREE_ITER_prefetch);
- bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
- BTREE_ITER_prefetch);
- bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
- BTREE_ITER_prefetch);
-
- while (1) {
- struct bpos next;
-
- bch2_trans_begin(trans);
-
- k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
- ret = bkey_err(k);
- if (ret)
- goto bkey_err;
-
- if (!k.k)
- break;
-
- if (k.k->type) {
- next = bpos_nosnap_successor(k.k->p);
-
- ret = bch2_check_alloc_key(trans,
- k, &iter,
- &discard_iter,
- &freespace_iter,
- &bucket_gens_iter);
- if (ret)
- goto bkey_err;
- } else {
- next = k.k->p;
-
- ret = bch2_check_alloc_hole_freespace(trans, ca,
- bkey_start_pos(k.k),
- &next,
- &freespace_iter) ?:
- bch2_check_alloc_hole_bucket_gens(trans,
- bkey_start_pos(k.k),
- &next,
- &bucket_gens_iter);
- if (ret)
- goto bkey_err;
- }
-
- ret = bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
- if (ret)
- goto bkey_err;
-
- bch2_btree_iter_set_pos(&iter, next);
-bkey_err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- break;
- }
- bch2_trans_iter_exit(trans, &bucket_gens_iter);
- bch2_trans_iter_exit(trans, &freespace_iter);
- bch2_trans_iter_exit(trans, &discard_iter);
- bch2_trans_iter_exit(trans, &iter);
- bch2_dev_put(ca);
- ca = NULL;
-
- if (ret < 0)
- goto err;
-
- ret = for_each_btree_key(trans, iter,
- BTREE_ID_need_discard, POS_MIN,
- BTREE_ITER_prefetch, k,
- bch2_check_discard_freespace_key(trans, &iter));
- if (ret)
- goto err;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
- BTREE_ITER_prefetch);
- while (1) {
- bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&iter);
- if (!k.k)
- break;
-
- ret = bkey_err(k) ?:
- bch2_check_discard_freespace_key(trans, &iter);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
- ret = 0;
- continue;
- }
- if (ret) {
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, k);
-
- bch_err(c, "while checking %s", buf.buf);
- printbuf_exit(&buf);
- break;
- }
-
- bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
- }
- bch2_trans_iter_exit(trans, &iter);
- if (ret)
- goto err;
-
- ret = for_each_btree_key_commit(trans, iter,
- BTREE_ID_bucket_gens, POS_MIN,
- BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_check_bucket_gens_key(trans, &iter, k));
-err:
- bch2_trans_put(trans);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
- struct btree_iter *alloc_iter,
- struct bkey_buf *last_flushed)
-{
- struct bch_fs *c = trans->c;
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
- struct bkey_s_c alloc_k;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- alloc_k = bch2_btree_iter_peek(alloc_iter);
- if (!alloc_k.k)
- return 0;
-
- ret = bkey_err(alloc_k);
- if (ret)
- return ret;
-
- struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode);
- if (!ca)
- return 0;
-
- a = bch2_alloc_to_v4(alloc_k, &a_convert);
-
- u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
- if (lru_idx) {
- ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START,
- lru_idx, alloc_k, last_flushed);
- if (ret)
- goto err;
- }
-
- if (a->data_type != BCH_DATA_cached)
- goto err;
-
- if (fsck_err_on(!a->io_time[READ],
- trans, alloc_key_cached_but_read_time_zero,
- "cached bucket with read_time 0\n"
- " %s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
- struct bkey_i_alloc_v4 *a_mut =
- bch2_alloc_to_v4_mut(trans, alloc_k);
- ret = PTR_ERR_OR_ZERO(a_mut);
- if (ret)
- goto err;
-
- a_mut->v.io_time[READ] = bch2_current_io_time(c, READ);
- ret = bch2_trans_update(trans, alloc_iter,
- &a_mut->k_i, BTREE_TRIGGER_norun);
- if (ret)
- goto err;
-
- a = &a_mut->v;
- }
-
- ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ],
- alloc_k, last_flushed);
- if (ret)
- goto err;
-err:
-fsck_err:
- bch2_dev_put(ca);
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
-{
- struct bkey_buf last_flushed;
-
- bch2_bkey_buf_init(&last_flushed);
- bkey_init(&last_flushed.k->k);
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
- POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)));
-
- bch2_bkey_buf_exit(&last_flushed, c);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress)
-{
- int ret;
-
- mutex_lock(&ca->discard_buckets_in_flight_lock);
- darray_for_each(ca->discard_buckets_in_flight, i)
- if (i->bucket == bucket) {
- ret = -BCH_ERR_EEXIST_discard_in_flight_add;
- goto out;
- }
-
- ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) {
- .in_progress = in_progress,
- .bucket = bucket,
- }));
-out:
- mutex_unlock(&ca->discard_buckets_in_flight_lock);
- return ret;
-}
-
-static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket)
-{
- mutex_lock(&ca->discard_buckets_in_flight_lock);
- darray_for_each(ca->discard_buckets_in_flight, i)
- if (i->bucket == bucket) {
- BUG_ON(!i->in_progress);
- darray_remove_item(&ca->discard_buckets_in_flight, i);
- goto found;
- }
- BUG();
-found:
- mutex_unlock(&ca->discard_buckets_in_flight_lock);
-}
-
-struct discard_buckets_state {
- u64 seen;
- u64 open;
- u64 need_journal_commit;
- u64 discarded;
- u64 need_journal_commit_this_dev;
-};
-
-static int bch2_discard_one_bucket(struct btree_trans *trans,
- struct bch_dev *ca,
- struct btree_iter *need_discard_iter,
- struct bpos *discard_pos_done,
- struct discard_buckets_state *s)
-{
- struct bch_fs *c = trans->c;
- struct bpos pos = need_discard_iter->pos;
- struct btree_iter iter = { NULL };
- struct bkey_s_c k;
- struct bkey_i_alloc_v4 *a;
- struct printbuf buf = PRINTBUF;
- bool discard_locked = false;
- int ret = 0;
-
- if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
- s->open++;
- goto out;
- }
-
- if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
- c->journal.flushed_seq_ondisk,
- pos.inode, pos.offset)) {
- s->need_journal_commit++;
- s->need_journal_commit_this_dev++;
- goto out;
- }
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
- need_discard_iter->pos,
- BTREE_ITER_cached);
- ret = bkey_err(k);
- if (ret)
- goto out;
-
- a = bch2_alloc_to_v4_mut(trans, k);
- ret = PTR_ERR_OR_ZERO(a);
- if (ret)
- goto out;
-
- if (bch2_bucket_sectors_total(a->v)) {
- if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
- trans, "attempting to discard bucket with dirty data\n%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- ret = -EIO;
- goto out;
- }
-
- if (a->v.data_type != BCH_DATA_need_discard) {
- if (data_type_is_empty(a->v.data_type) &&
- BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
- a->v.gen++;
- SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
- goto write;
- }
-
- if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
- trans, "bucket incorrectly set in need_discard btree\n"
- "%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- ret = -EIO;
- goto out;
- }
-
- if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
- if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
- trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s",
- a->v.journal_seq,
- c->journal.flushed_seq_ondisk,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- ret = -EIO;
- goto out;
- }
-
- if (discard_in_flight_add(ca, iter.pos.offset, true))
- goto out;
-
- discard_locked = true;
-
- if (!bkey_eq(*discard_pos_done, iter.pos) &&
- ca->mi.discard && !c->opts.nochanges) {
- /*
- * This works without any other locks because this is the only
- * thread that removes items from the need_discard tree
- */
- bch2_trans_unlock_long(trans);
- blkdev_issue_discard(ca->disk_sb.bdev,
- k.k->p.offset * ca->mi.bucket_size,
- ca->mi.bucket_size,
- GFP_KERNEL);
- *discard_pos_done = iter.pos;
-
- ret = bch2_trans_relock_notrace(trans);
- if (ret)
- goto out;
- }
-
- SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
-write:
- alloc_data_type_set(&a->v, a->v.data_type);
-
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_WATERMARK_btree|
- BCH_TRANS_COMMIT_no_enospc);
- if (ret)
- goto out;
-
- count_event(c, bucket_discard);
- s->discarded++;
-out:
- if (discard_locked)
- discard_in_flight_remove(ca, iter.pos.offset);
- s->seen++;
- bch2_trans_iter_exit(trans, &iter);
- printbuf_exit(&buf);
- return ret;
-}
-
-static void bch2_do_discards_work(struct work_struct *work)
-{
- struct bch_dev *ca = container_of(work, struct bch_dev, discard_work);
- struct bch_fs *c = ca->fs;
- struct discard_buckets_state s = {};
- struct bpos discard_pos_done = POS_MAX;
- int ret;
-
- /*
- * We're doing the commit in bch2_discard_one_bucket instead of using
- * for_each_btree_key_commit() so that we can increment counters after
- * successful commit:
- */
- ret = bch2_trans_run(c,
- for_each_btree_key_upto(trans, iter,
- BTREE_ID_need_discard,
- POS(ca->dev_idx, 0),
- POS(ca->dev_idx, U64_MAX), 0, k,
- bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s)));
-
- trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
- bch2_err_str(ret));
-
- percpu_ref_put(&ca->io_ref);
- bch2_write_ref_put(c, BCH_WRITE_REF_discard);
-}
-
-void bch2_dev_do_discards(struct bch_dev *ca)
-{
- struct bch_fs *c = ca->fs;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard))
- return;
-
- if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
- goto put_write_ref;
-
- if (queue_work(c->write_ref_wq, &ca->discard_work))
- return;
-
- percpu_ref_put(&ca->io_ref);
-put_write_ref:
- bch2_write_ref_put(c, BCH_WRITE_REF_discard);
-}
-
-void bch2_do_discards(struct bch_fs *c)
-{
- for_each_member_device(c, ca)
- bch2_dev_do_discards(ca);
-}
-
-static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
-{
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
- int ret = bkey_err(k);
- if (ret)
- goto err;
-
- struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k);
- ret = PTR_ERR_OR_ZERO(a);
- if (ret)
- goto err;
-
- BUG_ON(a->v.dirty_sectors);
- SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
- alloc_data_type_set(&a->v, a->v.data_type);
-
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static void bch2_do_discards_fast_work(struct work_struct *work)
-{
- struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work);
- struct bch_fs *c = ca->fs;
-
- while (1) {
- bool got_bucket = false;
- u64 bucket;
-
- mutex_lock(&ca->discard_buckets_in_flight_lock);
- darray_for_each(ca->discard_buckets_in_flight, i) {
- if (i->in_progress)
- continue;
-
- got_bucket = true;
- bucket = i->bucket;
- i->in_progress = true;
- break;
- }
- mutex_unlock(&ca->discard_buckets_in_flight_lock);
-
- if (!got_bucket)
- break;
-
- if (ca->mi.discard && !c->opts.nochanges)
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca, bucket),
- ca->mi.bucket_size,
- GFP_KERNEL);
-
- int ret = bch2_trans_commit_do(c, NULL, NULL,
- BCH_WATERMARK_btree|
- BCH_TRANS_COMMIT_no_enospc,
- bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket)));
- bch_err_fn(c, ret);
-
- discard_in_flight_remove(ca, bucket);
-
- if (ret)
- break;
- }
-
- percpu_ref_put(&ca->io_ref);
- bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
-}
-
-static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
-{
- struct bch_fs *c = ca->fs;
-
- if (discard_in_flight_add(ca, bucket, false))
- return;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
- return;
-
- if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
- goto put_ref;
-
- if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
- return;
-
- percpu_ref_put(&ca->io_ref);
-put_ref:
- bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
-}
-
-static int invalidate_one_bucket(struct btree_trans *trans,
- struct btree_iter *lru_iter,
- struct bkey_s_c lru_k,
- s64 *nr_to_invalidate)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i_alloc_v4 *a = NULL;
- struct printbuf buf = PRINTBUF;
- struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
- unsigned cached_sectors;
- int ret = 0;
-
- if (*nr_to_invalidate <= 0)
- return 1;
-
- if (!bch2_dev_bucket_exists(c, bucket)) {
- prt_str(&buf, "lru entry points to invalid bucket");
- goto err;
- }
-
- if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
- return 0;
-
- a = bch2_trans_start_alloc_update(trans, bucket, BTREE_TRIGGER_bucket_invalidate);
- ret = PTR_ERR_OR_ZERO(a);
- if (ret)
- goto out;
-
- /* We expect harmless races here due to the btree write buffer: */
- if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
- goto out;
-
- BUG_ON(a->v.data_type != BCH_DATA_cached);
- BUG_ON(a->v.dirty_sectors);
-
- if (!a->v.cached_sectors)
- bch_err(c, "invalidating empty bucket, confused");
-
- cached_sectors = a->v.cached_sectors;
-
- SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
- a->v.gen++;
- a->v.data_type = 0;
- a->v.dirty_sectors = 0;
- a->v.stripe_sectors = 0;
- a->v.cached_sectors = 0;
- a->v.io_time[READ] = bch2_current_io_time(c, READ);
- a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE);
-
- ret = bch2_trans_commit(trans, NULL, NULL,
- BCH_WATERMARK_btree|
- BCH_TRANS_COMMIT_no_enospc);
- if (ret)
- goto out;
-
- trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
- --*nr_to_invalidate;
-out:
- printbuf_exit(&buf);
- return ret;
-err:
- prt_str(&buf, "\n lru key: ");
- bch2_bkey_val_to_text(&buf, c, lru_k);
-
- prt_str(&buf, "\n lru entry: ");
- bch2_lru_pos_to_text(&buf, lru_iter->pos);
-
- prt_str(&buf, "\n alloc key: ");
- if (!a)
- bch2_bpos_to_text(&buf, bucket);
- else
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
-
- bch_err(c, "%s", buf.buf);
- if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
- bch2_inconsistent_error(c);
- ret = -EINVAL;
- }
-
- goto out;
-}
-
-static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter,
- struct bch_dev *ca, bool *wrapped)
-{
- struct bkey_s_c k;
-again:
- k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
- if (!k.k && !*wrapped) {
- bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
- *wrapped = true;
- goto again;
- }
-
- return k;
-}
-
-static void bch2_do_invalidates_work(struct work_struct *work)
-{
- struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work);
- struct bch_fs *c = ca->fs;
- struct btree_trans *trans = bch2_trans_get(c);
- int ret = 0;
-
- ret = bch2_btree_write_buffer_tryflush(trans);
- if (ret)
- goto err;
-
- s64 nr_to_invalidate =
- should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
- struct btree_iter iter;
- bool wrapped = false;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_lru,
- lru_pos(ca->dev_idx, 0,
- ((bch2_current_io_time(c, READ) + U32_MAX) &
- LRU_TIME_MAX)), 0);
-
- while (true) {
- bch2_trans_begin(trans);
-
- struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped);
- ret = bkey_err(k);
- if (ret)
- goto restart_err;
- if (!k.k)
- break;
-
- ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate);
-restart_err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- break;
-
- bch2_btree_iter_advance(&iter);
- }
- bch2_trans_iter_exit(trans, &iter);
-err:
- bch2_trans_put(trans);
- percpu_ref_put(&ca->io_ref);
- bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
-}
-
-void bch2_dev_do_invalidates(struct bch_dev *ca)
-{
- struct bch_fs *c = ca->fs;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
- return;
-
- if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
- goto put_ref;
-
- if (queue_work(c->write_ref_wq, &ca->invalidate_work))
- return;
-
- percpu_ref_put(&ca->io_ref);
-put_ref:
- bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
-}
-
-void bch2_do_invalidates(struct bch_fs *c)
-{
- for_each_member_device(c, ca)
- bch2_dev_do_invalidates(ca);
-}
-
-int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
- u64 bucket_start, u64 bucket_end)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey hole;
- struct bpos end = POS(ca->dev_idx, bucket_end);
- struct bch_member *m;
- unsigned long last_updated = jiffies;
- int ret;
-
- BUG_ON(bucket_start > bucket_end);
- BUG_ON(bucket_end > ca->mi.nbuckets);
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
- POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
- BTREE_ITER_prefetch);
- /*
- * Scan the alloc btree for every bucket on @ca, and add buckets to the
- * freespace/need_discard/need_gc_gens btrees as needed:
- */
- while (1) {
- if (time_after(jiffies, last_updated + HZ * 10)) {
- bch_info(ca, "%s: currently at %llu/%llu",
- __func__, iter.pos.offset, ca->mi.nbuckets);
- last_updated = jiffies;
- }
-
- bch2_trans_begin(trans);
-
- if (bkey_ge(iter.pos, end)) {
- ret = 0;
- break;
- }
-
- k = bch2_get_key_or_hole(&iter, end, &hole);
- ret = bkey_err(k);
- if (ret)
- goto bkey_err;
-
- if (k.k->type) {
- /*
- * We process live keys in the alloc btree one at a
- * time:
- */
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
-
- ret = bch2_bucket_do_index(trans, ca, k, a, true) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
- if (ret)
- goto bkey_err;
-
- bch2_btree_iter_advance(&iter);
- } else {
- struct bkey_i *freespace;
-
- freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
- ret = PTR_ERR_OR_ZERO(freespace);
- if (ret)
- goto bkey_err;
-
- bkey_init(&freespace->k);
- freespace->k.type = KEY_TYPE_set;
- freespace->k.p = k.k->p;
- freespace->k.size = k.k->size;
-
- ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
- if (ret)
- goto bkey_err;
-
- bch2_btree_iter_set_pos(&iter, k.k->p);
- }
-bkey_err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- break;
- }
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
-
- if (ret < 0) {
- bch_err_msg(ca, ret, "initializing free space");
- return ret;
- }
-
- mutex_lock(&c->sb_lock);
- m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
- SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
- mutex_unlock(&c->sb_lock);
-
- return 0;
-}
-
-int bch2_fs_freespace_init(struct bch_fs *c)
-{
- int ret = 0;
- bool doing_init = false;
-
- /*
- * We can crash during the device add path, so we need to check this on
- * every mount:
- */
-
- for_each_member_device(c, ca) {
- if (ca->mi.freespace_initialized)
- continue;
-
- if (!doing_init) {
- bch_info(c, "initializing freespace");
- doing_init = true;
- }
-
- ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
- if (ret) {
- bch2_dev_put(ca);
- bch_err_fn(c, ret);
- return ret;
- }
- }
-
- if (doing_init) {
- mutex_lock(&c->sb_lock);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
- bch_verbose(c, "done initializing freespace");
- }
-
- return 0;
-}
-
-/* device removal */
-
-int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
-{
- struct bpos start = POS(ca->dev_idx, 0);
- struct bpos end = POS(ca->dev_idx, U64_MAX);
- int ret;
-
- /*
- * We clear the LRU and need_discard btrees first so that we don't race
- * with bch2_do_invalidates() and bch2_do_discards()
- */
- ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?:
- bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_dev_usage_remove(c, ca->dev_idx);
- bch_err_msg(ca, ret, "removing dev alloc info");
- return ret;
-}
-
-/* Bucket IO clocks: */
-
-static int __bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
- size_t bucket_nr, int rw)
-{
- struct bch_fs *c = trans->c;
-
- struct btree_iter iter;
- struct bkey_i_alloc_v4 *a =
- bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr));
- int ret = PTR_ERR_OR_ZERO(a);
- if (ret)
- return ret;
-
- u64 now = bch2_current_io_time(c, rw);
- if (a->v.io_time[rw] == now)
- goto out;
-
- a->v.io_time[rw] = now;
-
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
- bch2_trans_commit(trans, NULL, NULL, 0);
-out:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
- size_t bucket_nr, int rw)
-{
- if (bch2_trans_relock(trans))
- bch2_trans_begin(trans);
-
- return nested_lockrestart_do(trans, __bch2_bucket_io_time_reset(trans, dev, bucket_nr, rw));
-}
-
-/* Startup/shutdown (ro/rw): */
-
-void bch2_recalc_capacity(struct bch_fs *c)
-{
- u64 capacity = 0, reserved_sectors = 0, gc_reserve;
- unsigned bucket_size_max = 0;
- unsigned long ra_pages = 0;
-
- lockdep_assert_held(&c->state_lock);
-
- for_each_online_member(c, ca) {
- struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
-
- ra_pages += bdi->ra_pages;
- }
-
- bch2_set_ra_pages(c, ra_pages);
-
- for_each_rw_member(c, ca) {
- u64 dev_reserve = 0;
-
- /*
- * We need to reserve buckets (from the number
- * of currently available buckets) against
- * foreground writes so that mainly copygc can
- * make forward progress.
- *
- * We need enough to refill the various reserves
- * from scratch - copygc will use its entire
- * reserve all at once, then run against when
- * its reserve is refilled (from the formerly
- * available buckets).
- *
- * This reserve is just used when considering if
- * allocations for foreground writes must wait -
- * not -ENOSPC calculations.
- */
-
- dev_reserve += ca->nr_btree_reserve * 2;
- dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
-
- dev_reserve += 1; /* btree write point */
- dev_reserve += 1; /* copygc write point */
- dev_reserve += 1; /* rebalance write point */
-
- dev_reserve *= ca->mi.bucket_size;
-
- capacity += bucket_to_sector(ca, ca->mi.nbuckets -
- ca->mi.first_bucket);
-
- reserved_sectors += dev_reserve * 2;
-
- bucket_size_max = max_t(unsigned, bucket_size_max,
- ca->mi.bucket_size);
- }
-
- gc_reserve = c->opts.gc_reserve_bytes
- ? c->opts.gc_reserve_bytes >> 9
- : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
-
- reserved_sectors = max(gc_reserve, reserved_sectors);
-
- reserved_sectors = min(reserved_sectors, capacity);
-
- c->reserved = reserved_sectors;
- c->capacity = capacity - reserved_sectors;
-
- c->bucket_size_max = bucket_size_max;
-
- /* Wake up case someone was waiting for buckets */
- closure_wake_up(&c->freelist_wait);
-}
-
-u64 bch2_min_rw_member_capacity(struct bch_fs *c)
-{
- u64 ret = U64_MAX;
-
- for_each_rw_member(c, ca)
- ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
- return ret;
-}
-
-static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
-{
- struct open_bucket *ob;
- bool ret = false;
-
- for (ob = c->open_buckets;
- ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
- ob++) {
- spin_lock(&ob->lock);
- if (ob->valid && !ob->on_partial_list &&
- ob->dev == ca->dev_idx)
- ret = true;
- spin_unlock(&ob->lock);
- }
-
- return ret;
-}
-
-/* device goes ro: */
-void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
-{
- lockdep_assert_held(&c->state_lock);
-
- /* First, remove device from allocation groups: */
-
- for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
- clear_bit(ca->dev_idx, c->rw_devs[i].d);
-
- c->rw_devs_change_count++;
-
- /*
- * Capacity is calculated based off of devices in allocation groups:
- */
- bch2_recalc_capacity(c);
-
- bch2_open_buckets_stop(c, ca, false);
-
- /*
- * Wake up threads that were blocked on allocation, so they can notice
- * the device can no longer be removed and the capacity has changed:
- */
- closure_wake_up(&c->freelist_wait);
-
- /*
- * journal_res_get() can block waiting for free space in the journal -
- * it needs to notice there may not be devices to allocate from anymore:
- */
- wake_up(&c->journal.wait);
-
- /* Now wait for any in flight writes: */
-
- closure_wait_event(&c->open_buckets_wait,
- !bch2_dev_has_open_write_point(c, ca));
-}
-
-/* device goes rw: */
-void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
-{
- lockdep_assert_held(&c->state_lock);
-
- for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
- if (ca->mi.data_allowed & (1 << i))
- set_bit(ca->dev_idx, c->rw_devs[i].d);
-
- c->rw_devs_change_count++;
-}
-
-void bch2_dev_allocator_background_exit(struct bch_dev *ca)
-{
- darray_exit(&ca->discard_buckets_in_flight);
-}
-
-void bch2_dev_allocator_background_init(struct bch_dev *ca)
-{
- mutex_init(&ca->discard_buckets_in_flight_lock);
- INIT_WORK(&ca->discard_work, bch2_do_discards_work);
- INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work);
- INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work);
-}
-
-void bch2_fs_allocator_background_init(struct bch_fs *c)
-{
- spin_lock_init(&c->freelist_lock);
-}
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
deleted file mode 100644
index 163a67b97a40..000000000000
--- a/fs/bcachefs/alloc_background.h
+++ /dev/null
@@ -1,357 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_ALLOC_BACKGROUND_H
-#define _BCACHEFS_ALLOC_BACKGROUND_H
-
-#include "bcachefs.h"
-#include "alloc_types.h"
-#include "buckets.h"
-#include "debug.h"
-#include "super.h"
-
-enum bch_validate_flags;
-
-/* How out of date a pointer gen is allowed to be: */
-#define BUCKET_GC_GEN_MAX 96U
-
-static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
-{
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode);
- bool ret = ca && bucket_valid(ca, pos.offset);
- rcu_read_unlock();
- return ret;
-}
-
-static inline u64 bucket_to_u64(struct bpos bucket)
-{
- return (bucket.inode << 48) | bucket.offset;
-}
-
-static inline struct bpos u64_to_bucket(u64 bucket)
-{
- return POS(bucket >> 48, bucket & ~(~0ULL << 48));
-}
-
-static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
-{
- return a.gen - a.oldest_gen;
-}
-
-static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src)
-{
- dst->gen = src.gen;
- dst->data_type = src.data_type;
- dst->stripe_sectors = src.stripe_sectors;
- dst->dirty_sectors = src.dirty_sectors;
- dst->cached_sectors = src.cached_sectors;
- dst->stripe = src.stripe;
-}
-
-static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src)
-{
- dst->gen = src.gen;
- dst->data_type = src.data_type;
- dst->stripe_sectors = src.stripe_sectors;
- dst->dirty_sectors = src.dirty_sectors;
- dst->cached_sectors = src.cached_sectors;
- dst->stripe = src.stripe;
-}
-
-static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
-{
- struct bch_alloc_v4 ret = {};
- __bucket_m_to_alloc(&ret, b);
- return ret;
-}
-
-static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
-{
- switch (data_type) {
- case BCH_DATA_cached:
- case BCH_DATA_stripe:
- return BCH_DATA_user;
- default:
- return data_type;
- }
-}
-
-static inline bool bucket_data_type_mismatch(enum bch_data_type bucket,
- enum bch_data_type ptr)
-{
- return !data_type_is_empty(bucket) &&
- bucket_data_type(bucket) != bucket_data_type(ptr);
-}
-
-/*
- * It is my general preference to use unsigned types for unsigned quantities -
- * however, these helpers are used in disk accounting calculations run by
- * triggers where the output will be negated and added to an s64. unsigned is
- * right out even though all these quantities will fit in 32 bits, since it
- * won't be sign extended correctly; u64 will negate "correctly", but s64 is the
- * simpler option here.
- */
-static inline s64 bch2_bucket_sectors_total(struct bch_alloc_v4 a)
-{
- return a.stripe_sectors + a.dirty_sectors + a.cached_sectors;
-}
-
-static inline s64 bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)
-{
- return a.stripe_sectors + a.dirty_sectors;
-}
-
-static inline s64 bch2_bucket_sectors(struct bch_alloc_v4 a)
-{
- return a.data_type == BCH_DATA_cached
- ? a.cached_sectors
- : bch2_bucket_sectors_dirty(a);
-}
-
-static inline s64 bch2_bucket_sectors_fragmented(struct bch_dev *ca,
- struct bch_alloc_v4 a)
-{
- int d = bch2_bucket_sectors(a);
-
- return d ? max(0, ca->mi.bucket_size - d) : 0;
-}
-
-static inline s64 bch2_gc_bucket_sectors_fragmented(struct bch_dev *ca, struct bucket a)
-{
- int d = a.stripe_sectors + a.dirty_sectors;
-
- return d ? max(0, ca->mi.bucket_size - d) : 0;
-}
-
-static inline s64 bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a)
-{
- return a.data_type == BCH_DATA_stripe ? a.dirty_sectors : 0;
-}
-
-static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
- enum bch_data_type data_type)
-{
- if (a.stripe)
- return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
- if (bch2_bucket_sectors_dirty(a))
- return data_type;
- if (a.cached_sectors)
- return BCH_DATA_cached;
- if (BCH_ALLOC_V4_NEED_DISCARD(&a))
- return BCH_DATA_need_discard;
- if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
- return BCH_DATA_need_gc_gens;
- return BCH_DATA_free;
-}
-
-static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_type data_type)
-{
- a->data_type = alloc_data_type(*a, data_type);
-}
-
-static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
-{
- return a.data_type == BCH_DATA_cached
- ? a.io_time[READ] & LRU_TIME_MAX
- : 0;
-}
-
-#define DATA_TYPES_MOVABLE \
- ((1U << BCH_DATA_btree)| \
- (1U << BCH_DATA_user)| \
- (1U << BCH_DATA_stripe))
-
-static inline bool data_type_movable(enum bch_data_type type)
-{
- return (1U << type) & DATA_TYPES_MOVABLE;
-}
-
-static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
- struct bch_dev *ca)
-{
- if (a.data_type >= BCH_DATA_NR)
- return 0;
-
- if (!data_type_movable(a.data_type) ||
- !bch2_bucket_sectors_fragmented(ca, a))
- return 0;
-
- /*
- * avoid overflowing LRU_TIME_BITS on a corrupted fs, when
- * bucket_sectors_dirty is (much) bigger than bucket_size
- */
- u64 d = min_t(s64, bch2_bucket_sectors_dirty(a),
- ca->mi.bucket_size);
-
- return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
-}
-
-static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
-{
- return ((u64) alloc_gc_gen(a) >> 4) << 56;
-}
-
-static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a)
-{
- pos.offset |= alloc_freespace_genbits(a);
- return pos;
-}
-
-static inline unsigned alloc_v4_u64s_noerror(const struct bch_alloc_v4 *a)
-{
- return (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
- BCH_ALLOC_V4_U64s_V0) +
- BCH_ALLOC_V4_NR_BACKPOINTERS(a) *
- (sizeof(struct bch_backpointer) / sizeof(u64));
-}
-
-static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
-{
- unsigned ret = alloc_v4_u64s_noerror(a);
- BUG_ON(ret > U8_MAX - BKEY_U64s);
- return ret;
-}
-
-static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
-{
- set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v));
-}
-
-struct bkey_i_alloc_v4 *
-bch2_trans_start_alloc_update_noupdate(struct btree_trans *, struct btree_iter *, struct bpos);
-struct bkey_i_alloc_v4 *
-bch2_trans_start_alloc_update(struct btree_trans *, struct bpos,
- enum btree_iter_update_trigger_flags);
-
-void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
-
-static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
-{
- const struct bch_alloc_v4 *ret;
-
- if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
- goto slowpath;
-
- ret = bkey_s_c_to_alloc_v4(k).v;
- if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
- goto slowpath;
-
- return ret;
-slowpath:
- __bch2_alloc_to_v4(k, convert);
- return convert;
-}
-
-struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
-
-int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
-
-int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_alloc_v4_swab(struct bkey_s);
-void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-#define bch2_bkey_ops_alloc ((struct bkey_ops) { \
- .key_validate = bch2_alloc_v1_validate, \
- .val_to_text = bch2_alloc_to_text, \
- .trigger = bch2_trigger_alloc, \
- .min_val_size = 8, \
-})
-
-#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \
- .key_validate = bch2_alloc_v2_validate, \
- .val_to_text = bch2_alloc_to_text, \
- .trigger = bch2_trigger_alloc, \
- .min_val_size = 8, \
-})
-
-#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \
- .key_validate = bch2_alloc_v3_validate, \
- .val_to_text = bch2_alloc_to_text, \
- .trigger = bch2_trigger_alloc, \
- .min_val_size = 16, \
-})
-
-#define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \
- .key_validate = bch2_alloc_v4_validate, \
- .val_to_text = bch2_alloc_to_text, \
- .swab = bch2_alloc_v4_swab, \
- .trigger = bch2_trigger_alloc, \
- .min_val_size = 48, \
-})
-
-int bch2_bucket_gens_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-#define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \
- .key_validate = bch2_bucket_gens_validate, \
- .val_to_text = bch2_bucket_gens_to_text, \
-})
-
-int bch2_bucket_gens_init(struct bch_fs *);
-
-static inline bool bkey_is_alloc(const struct bkey *k)
-{
- return k->type == KEY_TYPE_alloc ||
- k->type == KEY_TYPE_alloc_v2 ||
- k->type == KEY_TYPE_alloc_v3;
-}
-
-int bch2_alloc_read(struct bch_fs *);
-
-int bch2_alloc_key_to_dev_counters(struct btree_trans *, struct bch_dev *,
- const struct bch_alloc_v4 *,
- const struct bch_alloc_v4 *, unsigned);
-int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-int bch2_check_alloc_info(struct bch_fs *);
-int bch2_check_alloc_to_lru_refs(struct bch_fs *);
-void bch2_dev_do_discards(struct bch_dev *);
-void bch2_do_discards(struct bch_fs *);
-
-static inline u64 should_invalidate_buckets(struct bch_dev *ca,
- struct bch_dev_usage u)
-{
- u64 want_free = ca->mi.nbuckets >> 7;
- u64 free = max_t(s64, 0,
- u.d[BCH_DATA_free].buckets
- + u.d[BCH_DATA_need_discard].buckets
- - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
-
- return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
-}
-
-void bch2_dev_do_invalidates(struct bch_dev *);
-void bch2_do_invalidates(struct bch_fs *);
-
-static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
-{
- return (void *) ((u64 *) &a->v +
- (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
- BCH_ALLOC_V4_U64s_V0));
-}
-
-static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct bch_alloc_v4 *a)
-{
- return (void *) ((u64 *) &a->v + BCH_ALLOC_V4_BACKPOINTERS_START(a));
-}
-
-int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
-int bch2_fs_freespace_init(struct bch_fs *);
-int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *);
-
-void bch2_recalc_capacity(struct bch_fs *);
-u64 bch2_min_rw_member_capacity(struct bch_fs *);
-
-void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
-void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
-
-void bch2_dev_allocator_background_exit(struct bch_dev *);
-void bch2_dev_allocator_background_init(struct bch_dev *);
-
-void bch2_fs_allocator_background_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
diff --git a/fs/bcachefs/alloc_background_format.h b/fs/bcachefs/alloc_background_format.h
deleted file mode 100644
index befdaa95c515..000000000000
--- a/fs/bcachefs/alloc_background_format.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H
-#define _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H
-
-struct bch_alloc {
- struct bch_val v;
- __u8 fields;
- __u8 gen;
- __u8 data[];
-} __packed __aligned(8);
-
-#define BCH_ALLOC_FIELDS_V1() \
- x(read_time, 16) \
- x(write_time, 16) \
- x(data_type, 8) \
- x(dirty_sectors, 16) \
- x(cached_sectors, 16) \
- x(oldest_gen, 8) \
- x(stripe, 32) \
- x(stripe_redundancy, 8)
-
-enum {
-#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
- BCH_ALLOC_FIELDS_V1()
-#undef x
-};
-
-struct bch_alloc_v2 {
- struct bch_val v;
- __u8 nr_fields;
- __u8 gen;
- __u8 oldest_gen;
- __u8 data_type;
- __u8 data[];
-} __packed __aligned(8);
-
-#define BCH_ALLOC_FIELDS_V2() \
- x(read_time, 64) \
- x(write_time, 64) \
- x(dirty_sectors, 32) \
- x(cached_sectors, 32) \
- x(stripe, 32) \
- x(stripe_redundancy, 8)
-
-struct bch_alloc_v3 {
- struct bch_val v;
- __le64 journal_seq;
- __le32 flags;
- __u8 nr_fields;
- __u8 gen;
- __u8 oldest_gen;
- __u8 data_type;
- __u8 data[];
-} __packed __aligned(8);
-
-LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
-LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
-
-struct bch_alloc_v4 {
- struct bch_val v;
- __u64 journal_seq;
- __u32 flags;
- __u8 gen;
- __u8 oldest_gen;
- __u8 data_type;
- __u8 stripe_redundancy;
- __u32 dirty_sectors;
- __u32 cached_sectors;
- __u64 io_time[2];
- __u32 stripe;
- __u32 nr_external_backpointers;
- /* end of fields in original version of alloc_v4 */
- __u64 _fragmentation_lru; /* obsolete */
- __u32 stripe_sectors;
- __u32 pad;
-} __packed __aligned(8);
-
-#define BCH_ALLOC_V4_U64s_V0 6
-#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(__u64))
-
-BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
-BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
-BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
-BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
-
-#define KEY_TYPE_BUCKET_GENS_BITS 8
-#define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS)
-#define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1)
-
-struct bch_bucket_gens {
- struct bch_val v;
- u8 gens[KEY_TYPE_BUCKET_GENS_NR];
-} __packed __aligned(8);
-
-#endif /* _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H */
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
deleted file mode 100644
index 372178c8d416..000000000000
--- a/fs/bcachefs/alloc_foreground.c
+++ /dev/null
@@ -1,1818 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2012 Google, Inc.
- *
- * Foreground allocator code: allocate buckets from freelist, and allocate in
- * sector granularity from writepoints.
- *
- * bch2_bucket_alloc() allocates a single bucket from a specific device.
- *
- * bch2_bucket_alloc_set() allocates one or more buckets from different devices
- * in a given filesystem.
- */
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "backpointers.h"
-#include "btree_iter.h"
-#include "btree_update.h"
-#include "btree_gc.h"
-#include "buckets.h"
-#include "buckets_waiting_for_journal.h"
-#include "clock.h"
-#include "debug.h"
-#include "disk_groups.h"
-#include "ec.h"
-#include "error.h"
-#include "io_write.h"
-#include "journal.h"
-#include "movinggc.h"
-#include "nocow_locking.h"
-#include "trace.h"
-
-#include <linux/math64.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-
-static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
- struct mutex *lock)
-{
- if (!mutex_trylock(lock)) {
- bch2_trans_unlock(trans);
- mutex_lock(lock);
- }
-}
-
-const char * const bch2_watermarks[] = {
-#define x(t) #t,
- BCH_WATERMARKS()
-#undef x
- NULL
-};
-
-/*
- * Open buckets represent a bucket that's currently being allocated from. They
- * serve two purposes:
- *
- * - They track buckets that have been partially allocated, allowing for
- * sub-bucket sized allocations - they're used by the sector allocator below
- *
- * - They provide a reference to the buckets they own that mark and sweep GC
- * can find, until the new allocation has a pointer to it inserted into the
- * btree
- *
- * When allocating some space with the sector allocator, the allocation comes
- * with a reference to an open bucket - the caller is required to put that
- * reference _after_ doing the index update that makes its allocation reachable.
- */
-
-void bch2_reset_alloc_cursors(struct bch_fs *c)
-{
- rcu_read_lock();
- for_each_member_device_rcu(c, ca, NULL)
- memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
- rcu_read_unlock();
-}
-
-static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
-{
- open_bucket_idx_t idx = ob - c->open_buckets;
- open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
-
- ob->hash = *slot;
- *slot = idx;
-}
-
-static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
-{
- open_bucket_idx_t idx = ob - c->open_buckets;
- open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
-
- while (*slot != idx) {
- BUG_ON(!*slot);
- slot = &c->open_buckets[*slot].hash;
- }
-
- *slot = ob->hash;
- ob->hash = 0;
-}
-
-void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
-{
- struct bch_dev *ca = ob_dev(c, ob);
-
- if (ob->ec) {
- ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
- return;
- }
-
- percpu_down_read(&c->mark_lock);
- spin_lock(&ob->lock);
-
- ob->valid = false;
- ob->data_type = 0;
-
- spin_unlock(&ob->lock);
- percpu_up_read(&c->mark_lock);
-
- spin_lock(&c->freelist_lock);
- bch2_open_bucket_hash_remove(c, ob);
-
- ob->freelist = c->open_buckets_freelist;
- c->open_buckets_freelist = ob - c->open_buckets;
-
- c->open_buckets_nr_free++;
- ca->nr_open_buckets--;
- spin_unlock(&c->freelist_lock);
-
- closure_wake_up(&c->open_buckets_wait);
-}
-
-void bch2_open_bucket_write_error(struct bch_fs *c,
- struct open_buckets *obs,
- unsigned dev)
-{
- struct open_bucket *ob;
- unsigned i;
-
- open_bucket_for_each(c, obs, ob, i)
- if (ob->dev == dev && ob->ec)
- bch2_ec_bucket_cancel(c, ob);
-}
-
-static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
-{
- struct open_bucket *ob;
-
- BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
-
- ob = c->open_buckets + c->open_buckets_freelist;
- c->open_buckets_freelist = ob->freelist;
- atomic_set(&ob->pin, 1);
- ob->data_type = 0;
-
- c->open_buckets_nr_free--;
- return ob;
-}
-
-static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
-{
- BUG_ON(c->open_buckets_partial_nr >=
- ARRAY_SIZE(c->open_buckets_partial));
-
- spin_lock(&c->freelist_lock);
- rcu_read_lock();
- bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++;
- rcu_read_unlock();
-
- ob->on_partial_list = true;
- c->open_buckets_partial[c->open_buckets_partial_nr++] =
- ob - c->open_buckets;
- spin_unlock(&c->freelist_lock);
-
- closure_wake_up(&c->open_buckets_wait);
- closure_wake_up(&c->freelist_wait);
-}
-
-/* _only_ for allocating the journal on a new device: */
-long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
-{
- while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
- u64 b = ca->new_fs_bucket_idx++;
-
- if (!is_superblock_bucket(ca, b) &&
- (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
- return b;
- }
-
- return -1;
-}
-
-static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
-{
- switch (watermark) {
- case BCH_WATERMARK_interior_updates:
- return 0;
- case BCH_WATERMARK_reclaim:
- return OPEN_BUCKETS_COUNT / 6;
- case BCH_WATERMARK_btree:
- case BCH_WATERMARK_btree_copygc:
- return OPEN_BUCKETS_COUNT / 4;
- case BCH_WATERMARK_copygc:
- return OPEN_BUCKETS_COUNT / 3;
- default:
- return OPEN_BUCKETS_COUNT / 2;
- }
-}
-
-static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
- u64 bucket,
- enum bch_watermark watermark,
- const struct bch_alloc_v4 *a,
- struct bucket_alloc_state *s,
- struct closure *cl)
-{
- struct open_bucket *ob;
-
- if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
- s->skipped_nouse++;
- return NULL;
- }
-
- if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
- s->skipped_open++;
- return NULL;
- }
-
- if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
- c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
- s->skipped_need_journal_commit++;
- return NULL;
- }
-
- if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
- s->skipped_nocow++;
- return NULL;
- }
-
- spin_lock(&c->freelist_lock);
-
- if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
- if (cl)
- closure_wait(&c->open_buckets_wait, cl);
-
- track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
- spin_unlock(&c->freelist_lock);
- return ERR_PTR(-BCH_ERR_open_buckets_empty);
- }
-
- /* Recheck under lock: */
- if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
- spin_unlock(&c->freelist_lock);
- s->skipped_open++;
- return NULL;
- }
-
- ob = bch2_open_bucket_alloc(c);
-
- spin_lock(&ob->lock);
-
- ob->valid = true;
- ob->sectors_free = ca->mi.bucket_size;
- ob->dev = ca->dev_idx;
- ob->gen = a->gen;
- ob->bucket = bucket;
- spin_unlock(&ob->lock);
-
- ca->nr_open_buckets++;
- bch2_open_bucket_hash_add(c, ob);
-
- track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
- track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
-
- spin_unlock(&c->freelist_lock);
- return ob;
-}
-
-static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
- enum bch_watermark watermark, u64 free_entry,
- struct bucket_alloc_state *s,
- struct bkey_s_c freespace_k,
- struct closure *cl)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
- struct bkey_s_c k;
- struct open_bucket *ob;
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
- u64 b = free_entry & ~(~0ULL << 56);
- unsigned genbits = free_entry >> 56;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
- prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
- " freespace key ",
- ca->mi.first_bucket, ca->mi.nbuckets);
- bch2_bkey_val_to_text(&buf, c, freespace_k);
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- ob = ERR_PTR(-EIO);
- goto err;
- }
-
- k = bch2_bkey_get_iter(trans, &iter,
- BTREE_ID_alloc, POS(ca->dev_idx, b),
- BTREE_ITER_cached);
- ret = bkey_err(k);
- if (ret) {
- ob = ERR_PTR(ret);
- goto err;
- }
-
- a = bch2_alloc_to_v4(k, &a_convert);
-
- if (a->data_type != BCH_DATA_free) {
- if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
- ob = NULL;
- goto err;
- }
-
- prt_printf(&buf, "non free bucket in freespace btree\n"
- " freespace key ");
- bch2_bkey_val_to_text(&buf, c, freespace_k);
- prt_printf(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, k);
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- ob = ERR_PTR(-EIO);
- goto err;
- }
-
- if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
- c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
- prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
- " freespace key ",
- genbits, alloc_freespace_genbits(*a) >> 56);
- bch2_bkey_val_to_text(&buf, c, freespace_k);
- prt_printf(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, k);
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- ob = ERR_PTR(-EIO);
- goto err;
- }
-
- if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
- struct bch_backpointer bp;
- struct bpos bp_pos = POS_MIN;
-
- ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1,
- &bp_pos, &bp,
- BTREE_ITER_nopreserve);
- if (ret) {
- ob = ERR_PTR(ret);
- goto err;
- }
-
- if (!bkey_eq(bp_pos, POS_MAX)) {
- /*
- * Bucket may have data in it - we don't call
- * bc2h_trans_inconnsistent() because fsck hasn't
- * finished yet
- */
- ob = NULL;
- goto err;
- }
- }
-
- ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
- if (!ob)
- bch2_set_btree_iter_dontneed(&iter);
-err:
- if (iter.path)
- bch2_set_btree_iter_dontneed(&iter);
- bch2_trans_iter_exit(trans, &iter);
- printbuf_exit(&buf);
- return ob;
-}
-
-/*
- * This path is for before the freespace btree is initialized:
- *
- * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
- * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
- */
-static noinline struct open_bucket *
-bch2_bucket_alloc_early(struct btree_trans *trans,
- struct bch_dev *ca,
- enum bch_watermark watermark,
- struct bucket_alloc_state *s,
- struct closure *cl)
-{
- struct btree_iter iter, citer;
- struct bkey_s_c k, ck;
- struct open_bucket *ob = NULL;
- u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
- u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
- u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
- u64 alloc_cursor = alloc_start;
- int ret;
-
- /*
- * Scan with an uncached iterator to avoid polluting the key cache. An
- * uncached iter will return a cached key if one exists, but if not
- * there is no other underlying protection for the associated key cache
- * slot. To avoid racing bucket allocations, look up the cached key slot
- * of any likely allocation candidate before attempting to proceed with
- * the allocation. This provides proper exclusion on the associated
- * bucket.
- */
-again:
- for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
- BTREE_ITER_slots, k, ret) {
- u64 bucket = k.k->p.offset;
-
- if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
- break;
-
- if (ca->new_fs_bucket_idx &&
- is_superblock_bucket(ca, k.k->p.offset))
- continue;
-
- if (s->btree_bitmap != BTREE_BITMAP_ANY &&
- s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
- bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
- if (s->btree_bitmap == BTREE_BITMAP_YES &&
- bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
- break;
-
- bucket = sector_to_bucket(ca,
- round_up(bucket_to_sector(ca, bucket) + 1,
- 1ULL << ca->mi.btree_bitmap_shift));
- bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
- s->buckets_seen++;
- s->skipped_mi_btree_bitmap++;
- continue;
- }
-
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
- if (a->data_type != BCH_DATA_free)
- continue;
-
- /* now check the cached key to serialize concurrent allocs of the bucket */
- ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
- ret = bkey_err(ck);
- if (ret)
- break;
-
- a = bch2_alloc_to_v4(ck, &a_convert);
- if (a->data_type != BCH_DATA_free)
- goto next;
-
- s->buckets_seen++;
-
- ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
-next:
- bch2_set_btree_iter_dontneed(&citer);
- bch2_trans_iter_exit(trans, &citer);
- if (ob)
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
-
- alloc_cursor = iter.pos.offset;
-
- if (!ob && ret)
- ob = ERR_PTR(ret);
-
- if (!ob && alloc_start > first_bucket) {
- alloc_cursor = alloc_start = first_bucket;
- goto again;
- }
-
- *dev_alloc_cursor = alloc_cursor;
-
- return ob;
-}
-
-static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
- struct bch_dev *ca,
- enum bch_watermark watermark,
- struct bucket_alloc_state *s,
- struct closure *cl)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- struct open_bucket *ob = NULL;
- u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
- u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
- u64 alloc_cursor = alloc_start;
- int ret;
-
- BUG_ON(ca->new_fs_bucket_idx);
-again:
- for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
- POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
- if (k.k->p.inode != ca->dev_idx)
- break;
-
- for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
- alloc_cursor < k.k->p.offset;
- alloc_cursor++) {
- s->buckets_seen++;
-
- u64 bucket = alloc_cursor & ~(~0ULL << 56);
- if (s->btree_bitmap != BTREE_BITMAP_ANY &&
- s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
- bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
- if (s->btree_bitmap == BTREE_BITMAP_YES &&
- bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
- goto fail;
-
- bucket = sector_to_bucket(ca,
- round_up(bucket_to_sector(ca, bucket) + 1,
- 1ULL << ca->mi.btree_bitmap_shift));
- u64 genbits = alloc_cursor >> 56;
- alloc_cursor = bucket | (genbits << 56);
-
- if (alloc_cursor > k.k->p.offset)
- bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
- s->skipped_mi_btree_bitmap++;
- continue;
- }
-
- ob = try_alloc_bucket(trans, ca, watermark,
- alloc_cursor, s, k, cl);
- if (ob) {
- bch2_set_btree_iter_dontneed(&iter);
- break;
- }
- }
-
- if (ob || ret)
- break;
- }
-fail:
- bch2_trans_iter_exit(trans, &iter);
-
- if (!ob && ret)
- ob = ERR_PTR(ret);
-
- if (!ob && alloc_start > ca->mi.first_bucket) {
- alloc_cursor = alloc_start = ca->mi.first_bucket;
- goto again;
- }
-
- *dev_alloc_cursor = alloc_cursor;
-
- return ob;
-}
-
-static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
- enum bch_watermark watermark,
- enum bch_data_type data_type,
- struct closure *cl,
- struct bch_dev_usage *usage,
- struct bucket_alloc_state *s,
- struct open_bucket *ob)
-{
- struct printbuf buf = PRINTBUF;
-
- printbuf_tabstop_push(&buf, 24);
-
- prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx);
- prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
- prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
- prt_printf(&buf, "blocking\t%u\n", cl != NULL);
- prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
- prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
- prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
- bch2_copygc_wait_amount(c),
- c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
- prt_printf(&buf, "seen\t%llu\n", s->buckets_seen);
- prt_printf(&buf, "open\t%llu\n", s->skipped_open);
- prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
- prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow);
- prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse);
- prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
-
- if (!IS_ERR(ob)) {
- prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
- trace_bucket_alloc(c, buf.buf);
- } else {
- prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
- trace_bucket_alloc_fail(c, buf.buf);
- }
-
- printbuf_exit(&buf);
-}
-
-/**
- * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
- * @trans: transaction object
- * @ca: device to allocate from
- * @watermark: how important is this allocation?
- * @data_type: BCH_DATA_journal, btree, user...
- * @cl: if not NULL, closure to be used to wait if buckets not available
- * @usage: for secondarily also returning the current device usage
- *
- * Returns: an open_bucket on success, or an ERR_PTR() on failure.
- */
-static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
- struct bch_dev *ca,
- enum bch_watermark watermark,
- enum bch_data_type data_type,
- struct closure *cl,
- bool nowait,
- struct bch_dev_usage *usage)
-{
- struct bch_fs *c = trans->c;
- struct open_bucket *ob = NULL;
- bool freespace = READ_ONCE(ca->mi.freespace_initialized);
- u64 avail;
- struct bucket_alloc_state s = {
- .btree_bitmap = data_type == BCH_DATA_btree,
- };
- bool waiting = nowait;
-again:
- bch2_dev_usage_read_fast(ca, usage);
- avail = dev_buckets_free(ca, *usage, watermark);
-
- if (usage->d[BCH_DATA_need_discard].buckets > avail)
- bch2_dev_do_discards(ca);
-
- if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
- bch2_gc_gens_async(c);
-
- if (should_invalidate_buckets(ca, *usage))
- bch2_dev_do_invalidates(ca);
-
- if (!avail) {
- if (cl && !waiting) {
- closure_wait(&c->freelist_wait, cl);
- waiting = true;
- goto again;
- }
-
- track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
-
- ob = ERR_PTR(-BCH_ERR_freelist_empty);
- goto err;
- }
-
- if (waiting)
- closure_wake_up(&c->freelist_wait);
-alloc:
- ob = likely(freespace)
- ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
- : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
-
- if (s.skipped_need_journal_commit * 2 > avail)
- bch2_journal_flush_async(&c->journal, NULL);
-
- if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
- s.btree_bitmap = BTREE_BITMAP_ANY;
- goto alloc;
- }
-
- if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
- freespace = false;
- goto alloc;
- }
-err:
- if (!ob)
- ob = ERR_PTR(-BCH_ERR_no_buckets_found);
-
- if (!IS_ERR(ob))
- ob->data_type = data_type;
-
- if (!IS_ERR(ob))
- count_event(c, bucket_alloc);
- else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
- count_event(c, bucket_alloc_fail);
-
- if (!IS_ERR(ob)
- ? trace_bucket_alloc_enabled()
- : trace_bucket_alloc_fail_enabled())
- trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
-
- return ob;
-}
-
-struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
- enum bch_watermark watermark,
- enum bch_data_type data_type,
- struct closure *cl)
-{
- struct bch_dev_usage usage;
- struct open_bucket *ob;
-
- bch2_trans_do(c,
- PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
- data_type, cl, false, &usage)));
- return ob;
-}
-
-static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
- unsigned l, unsigned r)
-{
- return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
- (stripe->next_alloc[l] < stripe->next_alloc[r]));
-}
-
-#define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
-
-struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
- struct dev_stripe_state *stripe,
- struct bch_devs_mask *devs)
-{
- struct dev_alloc_list ret = { .nr = 0 };
- unsigned i;
-
- for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
- ret.devs[ret.nr++] = i;
-
- bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
- return ret;
-}
-
-static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
- struct dev_stripe_state *stripe,
- struct bch_dev_usage *usage)
-{
- u64 *v = stripe->next_alloc + ca->dev_idx;
- u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
- u64 free_space_inv = free_space
- ? div64_u64(1ULL << 48, free_space)
- : 1ULL << 48;
- u64 scale = *v / 4;
-
- if (*v + free_space_inv >= *v)
- *v += free_space_inv;
- else
- *v = U64_MAX;
-
- for (v = stripe->next_alloc;
- v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
- *v = *v < scale ? 0 : *v - scale;
-}
-
-void bch2_dev_stripe_increment(struct bch_dev *ca,
- struct dev_stripe_state *stripe)
-{
- struct bch_dev_usage usage;
-
- bch2_dev_usage_read_fast(ca, &usage);
- bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
-}
-
-static int add_new_bucket(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct bch_devs_mask *devs_may_alloc,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- struct open_bucket *ob)
-{
- unsigned durability = ob_dev(c, ob)->mi.durability;
-
- BUG_ON(*nr_effective >= nr_replicas);
-
- __clear_bit(ob->dev, devs_may_alloc->d);
- *nr_effective += durability;
- *have_cache |= !durability;
-
- ob_push(c, ptrs, ob);
-
- if (*nr_effective >= nr_replicas)
- return 1;
- if (ob->ec)
- return 1;
- return 0;
-}
-
-int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
- struct open_buckets *ptrs,
- struct dev_stripe_state *stripe,
- struct bch_devs_mask *devs_may_alloc,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- enum bch_write_flags flags,
- enum bch_data_type data_type,
- enum bch_watermark watermark,
- struct closure *cl)
-{
- struct bch_fs *c = trans->c;
- struct dev_alloc_list devs_sorted =
- bch2_dev_alloc_list(c, stripe, devs_may_alloc);
- int ret = -BCH_ERR_insufficient_devices;
-
- BUG_ON(*nr_effective >= nr_replicas);
-
- for (unsigned i = 0; i < devs_sorted.nr; i++) {
- struct bch_dev_usage usage;
- struct open_bucket *ob;
-
- unsigned dev = devs_sorted.devs[i];
- struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
- if (!ca)
- continue;
-
- if (!ca->mi.durability && *have_cache) {
- bch2_dev_put(ca);
- continue;
- }
-
- ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
- cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
- if (!IS_ERR(ob))
- bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
- bch2_dev_put(ca);
-
- if (IS_ERR(ob)) {
- ret = PTR_ERR(ob);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
- break;
- continue;
- }
-
- if (add_new_bucket(c, ptrs, devs_may_alloc,
- nr_replicas, nr_effective,
- have_cache, ob)) {
- ret = 0;
- break;
- }
- }
-
- return ret;
-}
-
-/* Allocate from stripes: */
-
-/*
- * if we can't allocate a new stripe because there are already too many
- * partially filled stripes, force allocating from an existing stripe even when
- * it's to a device we don't want:
- */
-
-static int bucket_alloc_from_stripe(struct btree_trans *trans,
- struct open_buckets *ptrs,
- struct write_point *wp,
- struct bch_devs_mask *devs_may_alloc,
- u16 target,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- enum bch_watermark watermark,
- enum bch_write_flags flags,
- struct closure *cl)
-{
- struct bch_fs *c = trans->c;
- struct dev_alloc_list devs_sorted;
- struct ec_stripe_head *h;
- struct open_bucket *ob;
- unsigned i, ec_idx;
- int ret = 0;
-
- if (nr_replicas < 2)
- return 0;
-
- if (ec_open_bucket(c, ptrs))
- return 0;
-
- h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
- if (IS_ERR(h))
- return PTR_ERR(h);
- if (!h)
- return 0;
-
- devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
-
- for (i = 0; i < devs_sorted.nr; i++)
- for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
- if (!h->s->blocks[ec_idx])
- continue;
-
- ob = c->open_buckets + h->s->blocks[ec_idx];
- if (ob->dev == devs_sorted.devs[i] &&
- !test_and_set_bit(ec_idx, h->s->blocks_allocated))
- goto got_bucket;
- }
- goto out_put_head;
-got_bucket:
- ob->ec_idx = ec_idx;
- ob->ec = h->s;
- ec_stripe_new_get(h->s, STRIPE_REF_io);
-
- ret = add_new_bucket(c, ptrs, devs_may_alloc,
- nr_replicas, nr_effective,
- have_cache, ob);
-out_put_head:
- bch2_ec_stripe_head_put(c, h);
- return ret;
-}
-
-/* Sector allocator */
-
-static bool want_bucket(struct bch_fs *c,
- struct write_point *wp,
- struct bch_devs_mask *devs_may_alloc,
- bool *have_cache, bool ec,
- struct open_bucket *ob)
-{
- struct bch_dev *ca = ob_dev(c, ob);
-
- if (!test_bit(ob->dev, devs_may_alloc->d))
- return false;
-
- if (ob->data_type != wp->data_type)
- return false;
-
- if (!ca->mi.durability &&
- (wp->data_type == BCH_DATA_btree || ec || *have_cache))
- return false;
-
- if (ec != (ob->ec != NULL))
- return false;
-
- return true;
-}
-
-static int bucket_alloc_set_writepoint(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct write_point *wp,
- struct bch_devs_mask *devs_may_alloc,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- bool ec)
-{
- struct open_buckets ptrs_skip = { .nr = 0 };
- struct open_bucket *ob;
- unsigned i;
- int ret = 0;
-
- open_bucket_for_each(c, &wp->ptrs, ob, i) {
- if (!ret && want_bucket(c, wp, devs_may_alloc,
- have_cache, ec, ob))
- ret = add_new_bucket(c, ptrs, devs_may_alloc,
- nr_replicas, nr_effective,
- have_cache, ob);
- else
- ob_push(c, &ptrs_skip, ob);
- }
- wp->ptrs = ptrs_skip;
-
- return ret;
-}
-
-static int bucket_alloc_set_partial(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct write_point *wp,
- struct bch_devs_mask *devs_may_alloc,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache, bool ec,
- enum bch_watermark watermark)
-{
- int i, ret = 0;
-
- if (!c->open_buckets_partial_nr)
- return 0;
-
- spin_lock(&c->freelist_lock);
-
- if (!c->open_buckets_partial_nr)
- goto unlock;
-
- for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
- struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
-
- if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
- struct bch_dev *ca = ob_dev(c, ob);
- struct bch_dev_usage usage;
- u64 avail;
-
- bch2_dev_usage_read_fast(ca, &usage);
- avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets;
- if (!avail)
- continue;
-
- array_remove_item(c->open_buckets_partial,
- c->open_buckets_partial_nr,
- i);
- ob->on_partial_list = false;
-
- rcu_read_lock();
- bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
- rcu_read_unlock();
-
- ret = add_new_bucket(c, ptrs, devs_may_alloc,
- nr_replicas, nr_effective,
- have_cache, ob);
- if (ret)
- break;
- }
- }
-unlock:
- spin_unlock(&c->freelist_lock);
- return ret;
-}
-
-static int __open_bucket_add_buckets(struct btree_trans *trans,
- struct open_buckets *ptrs,
- struct write_point *wp,
- struct bch_devs_list *devs_have,
- u16 target,
- bool erasure_code,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- enum bch_watermark watermark,
- enum bch_write_flags flags,
- struct closure *_cl)
-{
- struct bch_fs *c = trans->c;
- struct bch_devs_mask devs;
- struct open_bucket *ob;
- struct closure *cl = NULL;
- unsigned i;
- int ret;
-
- devs = target_rw_devs(c, wp->data_type, target);
-
- /* Don't allocate from devices we already have pointers to: */
- darray_for_each(*devs_have, i)
- __clear_bit(*i, devs.d);
-
- open_bucket_for_each(c, ptrs, ob, i)
- __clear_bit(ob->dev, devs.d);
-
- ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
- nr_replicas, nr_effective,
- have_cache, erasure_code);
- if (ret)
- return ret;
-
- ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
- nr_replicas, nr_effective,
- have_cache, erasure_code, watermark);
- if (ret)
- return ret;
-
- if (erasure_code) {
- ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
- target,
- nr_replicas, nr_effective,
- have_cache,
- watermark, flags, _cl);
- } else {
-retry_blocking:
- /*
- * Try nonblocking first, so that if one device is full we'll try from
- * other devices:
- */
- ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
- nr_replicas, nr_effective, have_cache,
- flags, wp->data_type, watermark, cl);
- if (ret &&
- !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
- !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
- !cl && _cl) {
- cl = _cl;
- goto retry_blocking;
- }
- }
-
- return ret;
-}
-
-static int open_bucket_add_buckets(struct btree_trans *trans,
- struct open_buckets *ptrs,
- struct write_point *wp,
- struct bch_devs_list *devs_have,
- u16 target,
- unsigned erasure_code,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- enum bch_watermark watermark,
- enum bch_write_flags flags,
- struct closure *cl)
-{
- int ret;
-
- if (erasure_code && !ec_open_bucket(trans->c, ptrs)) {
- ret = __open_bucket_add_buckets(trans, ptrs, wp,
- devs_have, target, erasure_code,
- nr_replicas, nr_effective, have_cache,
- watermark, flags, cl);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
- bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
- bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
- bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
- return ret;
- if (*nr_effective >= nr_replicas)
- return 0;
- }
-
- ret = __open_bucket_add_buckets(trans, ptrs, wp,
- devs_have, target, false,
- nr_replicas, nr_effective, have_cache,
- watermark, flags, cl);
- return ret < 0 ? ret : 0;
-}
-
-/**
- * should_drop_bucket - check if this is open_bucket should go away
- * @ob: open_bucket to predicate on
- * @c: filesystem handle
- * @ca: if set, we're killing buckets for a particular device
- * @ec: if true, we're shutting down erasure coding and killing all ec
- * open_buckets
- * otherwise, return true
- * Returns: true if we should kill this open_bucket
- *
- * We're killing open_buckets because we're shutting down a device, erasure
- * coding, or the entire filesystem - check if this open_bucket matches:
- */
-static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
- struct bch_dev *ca, bool ec)
-{
- if (ec) {
- return ob->ec != NULL;
- } else if (ca) {
- bool drop = ob->dev == ca->dev_idx;
- struct open_bucket *ob2;
- unsigned i;
-
- if (!drop && ob->ec) {
- unsigned nr_blocks;
-
- mutex_lock(&ob->ec->lock);
- nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
-
- for (i = 0; i < nr_blocks; i++) {
- if (!ob->ec->blocks[i])
- continue;
-
- ob2 = c->open_buckets + ob->ec->blocks[i];
- drop |= ob2->dev == ca->dev_idx;
- }
- mutex_unlock(&ob->ec->lock);
- }
-
- return drop;
- } else {
- return true;
- }
-}
-
-static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
- bool ec, struct write_point *wp)
-{
- struct open_buckets ptrs = { .nr = 0 };
- struct open_bucket *ob;
- unsigned i;
-
- mutex_lock(&wp->lock);
- open_bucket_for_each(c, &wp->ptrs, ob, i)
- if (should_drop_bucket(ob, c, ca, ec))
- bch2_open_bucket_put(c, ob);
- else
- ob_push(c, &ptrs, ob);
- wp->ptrs = ptrs;
- mutex_unlock(&wp->lock);
-}
-
-void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
- bool ec)
-{
- unsigned i;
-
- /* Next, close write points that point to this device... */
- for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
- bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
-
- bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
- bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
- bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
-
- mutex_lock(&c->btree_reserve_cache_lock);
- while (c->btree_reserve_cache_nr) {
- struct btree_alloc *a =
- &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
-
- bch2_open_buckets_put(c, &a->ob);
- }
- mutex_unlock(&c->btree_reserve_cache_lock);
-
- spin_lock(&c->freelist_lock);
- i = 0;
- while (i < c->open_buckets_partial_nr) {
- struct open_bucket *ob =
- c->open_buckets + c->open_buckets_partial[i];
-
- if (should_drop_bucket(ob, c, ca, ec)) {
- --c->open_buckets_partial_nr;
- swap(c->open_buckets_partial[i],
- c->open_buckets_partial[c->open_buckets_partial_nr]);
-
- ob->on_partial_list = false;
-
- rcu_read_lock();
- bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
- rcu_read_unlock();
-
- spin_unlock(&c->freelist_lock);
- bch2_open_bucket_put(c, ob);
- spin_lock(&c->freelist_lock);
- } else {
- i++;
- }
- }
- spin_unlock(&c->freelist_lock);
-
- bch2_ec_stop_dev(c, ca);
-}
-
-static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
- unsigned long write_point)
-{
- unsigned hash =
- hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
-
- return &c->write_points_hash[hash];
-}
-
-static struct write_point *__writepoint_find(struct hlist_head *head,
- unsigned long write_point)
-{
- struct write_point *wp;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(wp, head, node)
- if (wp->write_point == write_point)
- goto out;
- wp = NULL;
-out:
- rcu_read_unlock();
- return wp;
-}
-
-static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
-{
- u64 stranded = c->write_points_nr * c->bucket_size_max;
- u64 free = bch2_fs_usage_read_short(c).free;
-
- return stranded * factor > free;
-}
-
-static bool try_increase_writepoints(struct bch_fs *c)
-{
- struct write_point *wp;
-
- if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
- too_many_writepoints(c, 32))
- return false;
-
- wp = c->write_points + c->write_points_nr++;
- hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
- return true;
-}
-
-static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
-{
- struct bch_fs *c = trans->c;
- struct write_point *wp;
- struct open_bucket *ob;
- unsigned i;
-
- mutex_lock(&c->write_points_hash_lock);
- if (c->write_points_nr < old_nr) {
- mutex_unlock(&c->write_points_hash_lock);
- return true;
- }
-
- if (c->write_points_nr == 1 ||
- !too_many_writepoints(c, 8)) {
- mutex_unlock(&c->write_points_hash_lock);
- return false;
- }
-
- wp = c->write_points + --c->write_points_nr;
-
- hlist_del_rcu(&wp->node);
- mutex_unlock(&c->write_points_hash_lock);
-
- bch2_trans_mutex_lock_norelock(trans, &wp->lock);
- open_bucket_for_each(c, &wp->ptrs, ob, i)
- open_bucket_free_unused(c, ob);
- wp->ptrs.nr = 0;
- mutex_unlock(&wp->lock);
- return true;
-}
-
-static struct write_point *writepoint_find(struct btree_trans *trans,
- unsigned long write_point)
-{
- struct bch_fs *c = trans->c;
- struct write_point *wp, *oldest;
- struct hlist_head *head;
-
- if (!(write_point & 1UL)) {
- wp = (struct write_point *) write_point;
- bch2_trans_mutex_lock_norelock(trans, &wp->lock);
- return wp;
- }
-
- head = writepoint_hash(c, write_point);
-restart_find:
- wp = __writepoint_find(head, write_point);
- if (wp) {
-lock_wp:
- bch2_trans_mutex_lock_norelock(trans, &wp->lock);
- if (wp->write_point == write_point)
- goto out;
- mutex_unlock(&wp->lock);
- goto restart_find;
- }
-restart_find_oldest:
- oldest = NULL;
- for (wp = c->write_points;
- wp < c->write_points + c->write_points_nr; wp++)
- if (!oldest || time_before64(wp->last_used, oldest->last_used))
- oldest = wp;
-
- bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
- bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
- if (oldest >= c->write_points + c->write_points_nr ||
- try_increase_writepoints(c)) {
- mutex_unlock(&c->write_points_hash_lock);
- mutex_unlock(&oldest->lock);
- goto restart_find_oldest;
- }
-
- wp = __writepoint_find(head, write_point);
- if (wp && wp != oldest) {
- mutex_unlock(&c->write_points_hash_lock);
- mutex_unlock(&oldest->lock);
- goto lock_wp;
- }
-
- wp = oldest;
- hlist_del_rcu(&wp->node);
- wp->write_point = write_point;
- hlist_add_head_rcu(&wp->node, head);
- mutex_unlock(&c->write_points_hash_lock);
-out:
- wp->last_used = local_clock();
- return wp;
-}
-
-static noinline void
-deallocate_extra_replicas(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct open_buckets *ptrs_no_use,
- unsigned extra_replicas)
-{
- struct open_buckets ptrs2 = { 0 };
- struct open_bucket *ob;
- unsigned i;
-
- open_bucket_for_each(c, ptrs, ob, i) {
- unsigned d = ob_dev(c, ob)->mi.durability;
-
- if (d && d <= extra_replicas) {
- extra_replicas -= d;
- ob_push(c, ptrs_no_use, ob);
- } else {
- ob_push(c, &ptrs2, ob);
- }
- }
-
- *ptrs = ptrs2;
-}
-
-/*
- * Get us an open_bucket we can allocate from, return with it locked:
- */
-int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
- unsigned target,
- unsigned erasure_code,
- struct write_point_specifier write_point,
- struct bch_devs_list *devs_have,
- unsigned nr_replicas,
- unsigned nr_replicas_required,
- enum bch_watermark watermark,
- enum bch_write_flags flags,
- struct closure *cl,
- struct write_point **wp_ret)
-{
- struct bch_fs *c = trans->c;
- struct write_point *wp;
- struct open_bucket *ob;
- struct open_buckets ptrs;
- unsigned nr_effective, write_points_nr;
- bool have_cache;
- int ret;
- int i;
-
- if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
- erasure_code = false;
-
- BUG_ON(!nr_replicas || !nr_replicas_required);
-retry:
- ptrs.nr = 0;
- nr_effective = 0;
- write_points_nr = c->write_points_nr;
- have_cache = false;
-
- *wp_ret = wp = writepoint_find(trans, write_point.v);
-
- ret = bch2_trans_relock(trans);
- if (ret)
- goto err;
-
- /* metadata may not allocate on cache devices: */
- if (wp->data_type != BCH_DATA_user)
- have_cache = true;
-
- if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
- ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
- target, erasure_code,
- nr_replicas, &nr_effective,
- &have_cache, watermark,
- flags, NULL);
- if (!ret ||
- bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto alloc_done;
-
- /* Don't retry from all devices if we're out of open buckets: */
- if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
- int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
- target, erasure_code,
- nr_replicas, &nr_effective,
- &have_cache, watermark,
- flags, cl);
- if (!ret2 ||
- bch2_err_matches(ret2, BCH_ERR_transaction_restart) ||
- bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) {
- ret = ret2;
- goto alloc_done;
- }
- }
-
- /*
- * Only try to allocate cache (durability = 0 devices) from the
- * specified target:
- */
- have_cache = true;
-
- ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
- 0, erasure_code,
- nr_replicas, &nr_effective,
- &have_cache, watermark,
- flags, cl);
- } else {
- ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
- target, erasure_code,
- nr_replicas, &nr_effective,
- &have_cache, watermark,
- flags, cl);
- }
-alloc_done:
- BUG_ON(!ret && nr_effective < nr_replicas);
-
- if (erasure_code && !ec_open_bucket(c, &ptrs))
- pr_debug("failed to get ec bucket: ret %u", ret);
-
- if (ret == -BCH_ERR_insufficient_devices &&
- nr_effective >= nr_replicas_required)
- ret = 0;
-
- if (ret)
- goto err;
-
- if (nr_effective > nr_replicas)
- deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
-
- /* Free buckets we didn't use: */
- open_bucket_for_each(c, &wp->ptrs, ob, i)
- open_bucket_free_unused(c, ob);
-
- wp->ptrs = ptrs;
-
- wp->sectors_free = UINT_MAX;
-
- open_bucket_for_each(c, &wp->ptrs, ob, i)
- wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
-
- BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
-
- return 0;
-err:
- open_bucket_for_each(c, &wp->ptrs, ob, i)
- if (ptrs.nr < ARRAY_SIZE(ptrs.v))
- ob_push(c, &ptrs, ob);
- else
- open_bucket_free_unused(c, ob);
- wp->ptrs = ptrs;
-
- mutex_unlock(&wp->lock);
-
- if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
- try_decrease_writepoints(trans, write_points_nr))
- goto retry;
-
- if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
- ret = -BCH_ERR_bucket_alloc_blocked;
-
- if (cl && !(flags & BCH_WRITE_ALLOC_NOWAIT) &&
- bch2_err_matches(ret, BCH_ERR_freelist_empty))
- ret = -BCH_ERR_bucket_alloc_blocked;
-
- return ret;
-}
-
-struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
-{
- struct bch_dev *ca = ob_dev(c, ob);
-
- return (struct bch_extent_ptr) {
- .type = 1 << BCH_EXTENT_ENTRY_ptr,
- .gen = ob->gen,
- .dev = ob->dev,
- .offset = bucket_to_sector(ca, ob->bucket) +
- ca->mi.bucket_size -
- ob->sectors_free,
- };
-}
-
-void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
- struct bkey_i *k, unsigned sectors,
- bool cached)
-{
- bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
-}
-
-/*
- * Append pointers to the space we just allocated to @k, and mark @sectors space
- * as allocated out of @ob
- */
-void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
-{
- bch2_alloc_sectors_done_inlined(c, wp);
-}
-
-static inline void writepoint_init(struct write_point *wp,
- enum bch_data_type type)
-{
- mutex_init(&wp->lock);
- wp->data_type = type;
-
- INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
- INIT_LIST_HEAD(&wp->writes);
- spin_lock_init(&wp->writes_lock);
-}
-
-void bch2_fs_allocator_foreground_init(struct bch_fs *c)
-{
- struct open_bucket *ob;
- struct write_point *wp;
-
- mutex_init(&c->write_points_hash_lock);
- c->write_points_nr = ARRAY_SIZE(c->write_points);
-
- /* open bucket 0 is a sentinal NULL: */
- spin_lock_init(&c->open_buckets[0].lock);
-
- for (ob = c->open_buckets + 1;
- ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
- spin_lock_init(&ob->lock);
- c->open_buckets_nr_free++;
-
- ob->freelist = c->open_buckets_freelist;
- c->open_buckets_freelist = ob - c->open_buckets;
- }
-
- writepoint_init(&c->btree_write_point, BCH_DATA_btree);
- writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
- writepoint_init(&c->copygc_write_point, BCH_DATA_user);
-
- for (wp = c->write_points;
- wp < c->write_points + c->write_points_nr; wp++) {
- writepoint_init(wp, BCH_DATA_user);
-
- wp->last_used = local_clock();
- wp->write_point = (unsigned long) wp;
- hlist_add_head_rcu(&wp->node,
- writepoint_hash(c, wp->write_point));
- }
-}
-
-void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
-{
- struct bch_dev *ca = ob_dev(c, ob);
- unsigned data_type = ob->data_type;
- barrier(); /* READ_ONCE() doesn't work on bitfields */
-
- prt_printf(out, "%zu ref %u ",
- ob - c->open_buckets,
- atomic_read(&ob->pin));
- bch2_prt_data_type(out, data_type);
- prt_printf(out, " %u:%llu gen %u allocated %u/%u",
- ob->dev, ob->bucket, ob->gen,
- ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
- if (ob->ec)
- prt_printf(out, " ec idx %llu", ob->ec->idx);
- if (ob->on_partial_list)
- prt_str(out, " partial");
- prt_newline(out);
-}
-
-void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c,
- struct bch_dev *ca)
-{
- struct open_bucket *ob;
-
- out->atomic++;
-
- for (ob = c->open_buckets;
- ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
- ob++) {
- spin_lock(&ob->lock);
- if (ob->valid && (!ca || ob->dev == ca->dev_idx))
- bch2_open_bucket_to_text(out, c, ob);
- spin_unlock(&ob->lock);
- }
-
- --out->atomic;
-}
-
-void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
-{
- unsigned i;
-
- out->atomic++;
- spin_lock(&c->freelist_lock);
-
- for (i = 0; i < c->open_buckets_partial_nr; i++)
- bch2_open_bucket_to_text(out, c,
- c->open_buckets + c->open_buckets_partial[i]);
-
- spin_unlock(&c->freelist_lock);
- --out->atomic;
-}
-
-static const char * const bch2_write_point_states[] = {
-#define x(n) #n,
- WRITE_POINT_STATES()
-#undef x
- NULL
-};
-
-static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
- struct write_point *wp)
-{
- struct open_bucket *ob;
- unsigned i;
-
- prt_printf(out, "%lu: ", wp->write_point);
- prt_human_readable_u64(out, wp->sectors_allocated);
-
- prt_printf(out, " last wrote: ");
- bch2_pr_time_units(out, sched_clock() - wp->last_used);
-
- for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
- prt_printf(out, " %s: ", bch2_write_point_states[i]);
- bch2_pr_time_units(out, wp->time[i]);
- }
-
- prt_newline(out);
-
- printbuf_indent_add(out, 2);
- open_bucket_for_each(c, &wp->ptrs, ob, i)
- bch2_open_bucket_to_text(out, c, ob);
- printbuf_indent_sub(out, 2);
-}
-
-void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct write_point *wp;
-
- prt_str(out, "Foreground write points\n");
- for (wp = c->write_points;
- wp < c->write_points + ARRAY_SIZE(c->write_points);
- wp++)
- bch2_write_point_to_text(out, c, wp);
-
- prt_str(out, "Copygc write point\n");
- bch2_write_point_to_text(out, c, &c->copygc_write_point);
-
- prt_str(out, "Rebalance write point\n");
- bch2_write_point_to_text(out, c, &c->rebalance_write_point);
-
- prt_str(out, "Btree write point\n");
- bch2_write_point_to_text(out, c, &c->btree_write_point);
-}
-
-void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
-{
- unsigned nr[BCH_DATA_NR];
-
- memset(nr, 0, sizeof(nr));
-
- for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
- nr[c->open_buckets[i].data_type]++;
-
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 24);
-
- prt_printf(out, "capacity\t%llu\n", c->capacity);
- prt_printf(out, "reserved\t%llu\n", c->reserved);
- prt_printf(out, "hidden\t%llu\n", percpu_u64_get(&c->usage->hidden));
- prt_printf(out, "btree\t%llu\n", percpu_u64_get(&c->usage->btree));
- prt_printf(out, "data\t%llu\n", percpu_u64_get(&c->usage->data));
- prt_printf(out, "cached\t%llu\n", percpu_u64_get(&c->usage->cached));
- prt_printf(out, "reserved\t%llu\n", percpu_u64_get(&c->usage->reserved));
- prt_printf(out, "online_reserved\t%llu\n", percpu_u64_get(c->online_reserved));
- prt_printf(out, "nr_inodes\t%llu\n", percpu_u64_get(&c->usage->nr_inodes));
-
- prt_newline(out);
- prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty");
- prt_printf(out, "open buckets allocated\t%i\n", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
- prt_printf(out, "open buckets total\t%u\n", OPEN_BUCKETS_COUNT);
- prt_printf(out, "open_buckets_wait\t%s\n", c->open_buckets_wait.list.first ? "waiting" : "empty");
- prt_printf(out, "open_buckets_btree\t%u\n", nr[BCH_DATA_btree]);
- prt_printf(out, "open_buckets_user\t%u\n", nr[BCH_DATA_user]);
- prt_printf(out, "btree reserve cache\t%u\n", c->btree_reserve_cache_nr);
-}
-
-void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
-{
- struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch2_dev_usage_read(ca);
- unsigned nr[BCH_DATA_NR];
-
- memset(nr, 0, sizeof(nr));
-
- for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
- nr[c->open_buckets[i].data_type]++;
-
- bch2_dev_usage_to_text(out, ca, &stats);
-
- prt_newline(out);
-
- prt_printf(out, "reserves:\n");
- for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
- prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
-
- prt_newline(out);
-
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 12);
- printbuf_tabstop_push(out, 16);
-
- prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
- prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
-}
-
-static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
-{
- struct printbuf buf = PRINTBUF;
-
- prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n",
- c->opts.allocator_stuck_timeout);
-
- prt_printf(&buf, "Allocator debug:\n");
- printbuf_indent_add(&buf, 2);
- bch2_fs_alloc_debug_to_text(&buf, c);
- printbuf_indent_sub(&buf, 2);
- prt_newline(&buf);
-
- for_each_online_member(c, ca) {
- prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
- printbuf_indent_add(&buf, 2);
- bch2_dev_alloc_debug_to_text(&buf, ca);
- printbuf_indent_sub(&buf, 2);
- prt_newline(&buf);
- }
-
- prt_printf(&buf, "Copygc debug:\n");
- printbuf_indent_add(&buf, 2);
- bch2_copygc_wait_to_text(&buf, c);
- printbuf_indent_sub(&buf, 2);
- prt_newline(&buf);
-
- prt_printf(&buf, "Journal debug:\n");
- printbuf_indent_add(&buf, 2);
- bch2_journal_debug_to_text(&buf, &c->journal);
- printbuf_indent_sub(&buf, 2);
-
- bch2_print_string_as_lines(KERN_ERR, buf.buf);
- printbuf_exit(&buf);
-}
-
-static inline unsigned allocator_wait_timeout(struct bch_fs *c)
-{
- if (c->allocator_last_stuck &&
- time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies))
- return 0;
-
- return c->opts.allocator_stuck_timeout * HZ;
-}
-
-void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
-{
- unsigned t = allocator_wait_timeout(c);
-
- if (t && closure_sync_timeout(cl, t)) {
- c->allocator_last_stuck = jiffies;
- bch2_print_allocator_stuck(c);
- }
-
- closure_sync(cl);
-}
diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h
deleted file mode 100644
index 1a16fd5bd4f8..000000000000
--- a/fs/bcachefs/alloc_foreground.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_ALLOC_FOREGROUND_H
-#define _BCACHEFS_ALLOC_FOREGROUND_H
-
-#include "bcachefs.h"
-#include "alloc_types.h"
-#include "extents.h"
-#include "sb-members.h"
-
-#include <linux/hash.h>
-
-struct bkey;
-struct bch_dev;
-struct bch_fs;
-struct bch_devs_List;
-
-extern const char * const bch2_watermarks[];
-
-void bch2_reset_alloc_cursors(struct bch_fs *);
-
-struct dev_alloc_list {
- unsigned nr;
- u8 devs[BCH_SB_MEMBERS_MAX];
-};
-
-struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *,
- struct dev_stripe_state *,
- struct bch_devs_mask *);
-void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
-
-long bch2_bucket_alloc_new_fs(struct bch_dev *);
-
-static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob)
-{
- return bch2_dev_have_ref(c, ob->dev);
-}
-
-struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
- enum bch_watermark, enum bch_data_type,
- struct closure *);
-
-static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
- struct open_bucket *ob)
-{
- BUG_ON(obs->nr >= ARRAY_SIZE(obs->v));
-
- obs->v[obs->nr++] = ob - c->open_buckets;
-}
-
-#define open_bucket_for_each(_c, _obs, _ob, _i) \
- for ((_i) = 0; \
- (_i) < (_obs)->nr && \
- ((_ob) = (_c)->open_buckets + (_obs)->v[_i], true); \
- (_i)++)
-
-static inline struct open_bucket *ec_open_bucket(struct bch_fs *c,
- struct open_buckets *obs)
-{
- struct open_bucket *ob;
- unsigned i;
-
- open_bucket_for_each(c, obs, ob, i)
- if (ob->ec)
- return ob;
-
- return NULL;
-}
-
-void bch2_open_bucket_write_error(struct bch_fs *,
- struct open_buckets *, unsigned);
-
-void __bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
-
-static inline void bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
-{
- if (atomic_dec_and_test(&ob->pin))
- __bch2_open_bucket_put(c, ob);
-}
-
-static inline void bch2_open_buckets_put(struct bch_fs *c,
- struct open_buckets *ptrs)
-{
- struct open_bucket *ob;
- unsigned i;
-
- open_bucket_for_each(c, ptrs, ob, i)
- bch2_open_bucket_put(c, ob);
- ptrs->nr = 0;
-}
-
-static inline void bch2_alloc_sectors_done_inlined(struct bch_fs *c, struct write_point *wp)
-{
- struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
- struct open_bucket *ob;
- unsigned i;
-
- open_bucket_for_each(c, &wp->ptrs, ob, i)
- ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
- wp->ptrs = keep;
-
- mutex_unlock(&wp->lock);
-
- bch2_open_buckets_put(c, &ptrs);
-}
-
-static inline void bch2_open_bucket_get(struct bch_fs *c,
- struct write_point *wp,
- struct open_buckets *ptrs)
-{
- struct open_bucket *ob;
- unsigned i;
-
- open_bucket_for_each(c, &wp->ptrs, ob, i) {
- ob->data_type = wp->data_type;
- atomic_inc(&ob->pin);
- ob_push(c, ptrs, ob);
- }
-}
-
-static inline open_bucket_idx_t *open_bucket_hashslot(struct bch_fs *c,
- unsigned dev, u64 bucket)
-{
- return c->open_buckets_hash +
- (jhash_3words(dev, bucket, bucket >> 32, 0) &
- (OPEN_BUCKETS_COUNT - 1));
-}
-
-static inline bool bch2_bucket_is_open(struct bch_fs *c, unsigned dev, u64 bucket)
-{
- open_bucket_idx_t slot = *open_bucket_hashslot(c, dev, bucket);
-
- while (slot) {
- struct open_bucket *ob = &c->open_buckets[slot];
-
- if (ob->dev == dev && ob->bucket == bucket)
- return true;
-
- slot = ob->hash;
- }
-
- return false;
-}
-
-static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 bucket)
-{
- bool ret;
-
- if (bch2_bucket_is_open(c, dev, bucket))
- return true;
-
- spin_lock(&c->freelist_lock);
- ret = bch2_bucket_is_open(c, dev, bucket);
- spin_unlock(&c->freelist_lock);
-
- return ret;
-}
-
-enum bch_write_flags;
-int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
- struct dev_stripe_state *, struct bch_devs_mask *,
- unsigned, unsigned *, bool *, enum bch_write_flags,
- enum bch_data_type, enum bch_watermark,
- struct closure *);
-
-int bch2_alloc_sectors_start_trans(struct btree_trans *,
- unsigned, unsigned,
- struct write_point_specifier,
- struct bch_devs_list *,
- unsigned, unsigned,
- enum bch_watermark,
- enum bch_write_flags,
- struct closure *,
- struct write_point **);
-
-struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *);
-
-/*
- * Append pointers to the space we just allocated to @k, and mark @sectors space
- * as allocated out of @ob
- */
-static inline void
-bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
- struct bkey_i *k, unsigned sectors,
- bool cached)
-{
- struct open_bucket *ob;
- unsigned i;
-
- BUG_ON(sectors > wp->sectors_free);
- wp->sectors_free -= sectors;
- wp->sectors_allocated += sectors;
-
- open_bucket_for_each(c, &wp->ptrs, ob, i) {
- struct bch_dev *ca = ob_dev(c, ob);
- struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
-
- ptr.cached = cached ||
- (!ca->mi.durability &&
- wp->data_type == BCH_DATA_user);
-
- bch2_bkey_append_ptr(k, ptr);
-
- BUG_ON(sectors > ob->sectors_free);
- ob->sectors_free -= sectors;
- }
-}
-
-void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
- struct bkey_i *, unsigned, bool);
-void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
-
-void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *, bool);
-
-static inline struct write_point_specifier writepoint_hashed(unsigned long v)
-{
- return (struct write_point_specifier) { .v = v | 1 };
-}
-
-static inline struct write_point_specifier writepoint_ptr(struct write_point *wp)
-{
- return (struct write_point_specifier) { .v = (unsigned long) wp };
-}
-
-void bch2_fs_allocator_foreground_init(struct bch_fs *);
-
-void bch2_open_bucket_to_text(struct printbuf *, struct bch_fs *, struct open_bucket *);
-void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *, struct bch_dev *);
-void bch2_open_buckets_partial_to_text(struct printbuf *, struct bch_fs *);
-
-void bch2_write_points_to_text(struct printbuf *, struct bch_fs *);
-
-void bch2_fs_alloc_debug_to_text(struct printbuf *, struct bch_fs *);
-void bch2_dev_alloc_debug_to_text(struct printbuf *, struct bch_dev *);
-
-void __bch2_wait_on_allocator(struct bch_fs *, struct closure *);
-static inline void bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
-{
- if (cl->closure_get_happened)
- __bch2_wait_on_allocator(c, cl);
-}
-
-#endif /* _BCACHEFS_ALLOC_FOREGROUND_H */
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
deleted file mode 100644
index 9bbb28e90b93..000000000000
--- a/fs/bcachefs/alloc_types.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_ALLOC_TYPES_H
-#define _BCACHEFS_ALLOC_TYPES_H
-
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-
-#include "clock_types.h"
-#include "fifo.h"
-
-struct bucket_alloc_state {
- enum {
- BTREE_BITMAP_NO,
- BTREE_BITMAP_YES,
- BTREE_BITMAP_ANY,
- } btree_bitmap;
-
- u64 buckets_seen;
- u64 skipped_open;
- u64 skipped_need_journal_commit;
- u64 skipped_nocow;
- u64 skipped_nouse;
- u64 skipped_mi_btree_bitmap;
-};
-
-#define BCH_WATERMARKS() \
- x(stripe) \
- x(normal) \
- x(copygc) \
- x(btree) \
- x(btree_copygc) \
- x(reclaim) \
- x(interior_updates)
-
-enum bch_watermark {
-#define x(name) BCH_WATERMARK_##name,
- BCH_WATERMARKS()
-#undef x
- BCH_WATERMARK_NR,
-};
-
-#define BCH_WATERMARK_BITS 3
-#define BCH_WATERMARK_MASK ~(~0U << BCH_WATERMARK_BITS)
-
-#define OPEN_BUCKETS_COUNT 1024
-
-#define WRITE_POINT_HASH_NR 32
-#define WRITE_POINT_MAX 32
-
-/*
- * 0 is never a valid open_bucket_idx_t:
- */
-typedef u16 open_bucket_idx_t;
-
-struct open_bucket {
- spinlock_t lock;
- atomic_t pin;
- open_bucket_idx_t freelist;
- open_bucket_idx_t hash;
-
- /*
- * When an open bucket has an ec_stripe attached, this is the index of
- * the block in the stripe this open_bucket corresponds to:
- */
- u8 ec_idx;
- enum bch_data_type data_type:6;
- unsigned valid:1;
- unsigned on_partial_list:1;
-
- u8 dev;
- u8 gen;
- u32 sectors_free;
- u64 bucket;
- struct ec_stripe_new *ec;
-};
-
-#define OPEN_BUCKET_LIST_MAX 15
-
-struct open_buckets {
- open_bucket_idx_t nr;
- open_bucket_idx_t v[OPEN_BUCKET_LIST_MAX];
-};
-
-struct dev_stripe_state {
- u64 next_alloc[BCH_SB_MEMBERS_MAX];
-};
-
-#define WRITE_POINT_STATES() \
- x(stopped) \
- x(waiting_io) \
- x(waiting_work) \
- x(running)
-
-enum write_point_state {
-#define x(n) WRITE_POINT_##n,
- WRITE_POINT_STATES()
-#undef x
- WRITE_POINT_STATE_NR
-};
-
-struct write_point {
- struct {
- struct hlist_node node;
- struct mutex lock;
- u64 last_used;
- unsigned long write_point;
- enum bch_data_type data_type;
-
- /* calculated based on how many pointers we're actually going to use: */
- unsigned sectors_free;
-
- struct open_buckets ptrs;
- struct dev_stripe_state stripe;
-
- u64 sectors_allocated;
- } __aligned(SMP_CACHE_BYTES);
-
- struct {
- struct work_struct index_update_work;
-
- struct list_head writes;
- spinlock_t writes_lock;
-
- enum write_point_state state;
- u64 last_state_change;
- u64 time[WRITE_POINT_STATE_NR];
- } __aligned(SMP_CACHE_BYTES);
-};
-
-struct write_point_specifier {
- unsigned long v;
-};
-
-#endif /* _BCACHEFS_ALLOC_TYPES_H */
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
deleted file mode 100644
index 654a58132a4d..000000000000
--- a/fs/bcachefs/backpointers.c
+++ /dev/null
@@ -1,1068 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "bbpos.h"
-#include "alloc_background.h"
-#include "backpointers.h"
-#include "bkey_buf.h"
-#include "btree_cache.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "btree_write_buffer.h"
-#include "checksum.h"
-#include "disk_accounting.h"
-#include "error.h"
-
-#include <linux/mm.h>
-
-static bool extent_matches_bp(struct bch_fs *c,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k,
- struct bpos bucket,
- struct bch_backpointer bp)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- rcu_read_lock();
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- struct bpos bucket2;
- struct bch_backpointer bp2;
-
- if (p.ptr.cached)
- continue;
-
- struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
- if (!ca)
- continue;
-
- bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, &bucket2, &bp2);
- if (bpos_eq(bucket, bucket2) &&
- !memcmp(&bp, &bp2, sizeof(bp))) {
- rcu_read_unlock();
- return true;
- }
- }
- rcu_read_unlock();
-
- return false;
-}
-
-int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
- int ret = 0;
-
- bkey_fsck_err_on(bp.v->level > BTREE_MAX_DEPTH,
- c, backpointer_level_bad,
- "backpointer level bad: %u >= %u",
- bp.v->level, BTREE_MAX_DEPTH);
-
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode);
- if (!ca) {
- /* these will be caught by fsck */
- rcu_read_unlock();
- return 0;
- }
-
- struct bpos bucket = bp_pos_to_bucket(ca, bp.k->p);
- struct bpos bp_pos = bucket_pos_to_bp_noerror(ca, bucket, bp.v->bucket_offset);
- rcu_read_unlock();
-
- bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size ||
- !bpos_eq(bp.k->p, bp_pos),
- c, backpointer_bucket_offset_wrong,
- "backpointer bucket_offset wrong");
-fsck_err:
- return ret;
-}
-
-void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
-{
- prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
- bch2_btree_id_str(bp->btree_id),
- bp->level,
- (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
- (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
- bp->bucket_len);
- bch2_bpos_to_text(out, bp->pos);
-}
-
-void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
-{
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode);
- if (ca) {
- struct bpos bucket = bp_pos_to_bucket(ca, k.k->p);
- rcu_read_unlock();
- prt_str(out, "bucket=");
- bch2_bpos_to_text(out, bucket);
- prt_str(out, " ");
- } else {
- rcu_read_unlock();
- }
-
- bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
-}
-
-void bch2_backpointer_swab(struct bkey_s k)
-{
- struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
-
- bp.v->bucket_offset = swab40(bp.v->bucket_offset);
- bp.v->bucket_len = swab32(bp.v->bucket_len);
- bch2_bpos_swab(&bp.v->pos);
-}
-
-static noinline int backpointer_mod_err(struct btree_trans *trans,
- struct bch_backpointer bp,
- struct bkey_s_c bp_k,
- struct bkey_s_c orig_k,
- bool insert)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
-
- if (insert) {
- prt_printf(&buf, "existing backpointer found when inserting ");
- bch2_backpointer_to_text(&buf, &bp);
- prt_newline(&buf);
- printbuf_indent_add(&buf, 2);
-
- prt_printf(&buf, "found ");
- bch2_bkey_val_to_text(&buf, c, bp_k);
- prt_newline(&buf);
-
- prt_printf(&buf, "for ");
- bch2_bkey_val_to_text(&buf, c, orig_k);
-
- bch_err(c, "%s", buf.buf);
- } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
- prt_printf(&buf, "backpointer not found when deleting\n");
- printbuf_indent_add(&buf, 2);
-
- prt_printf(&buf, "searching for ");
- bch2_backpointer_to_text(&buf, &bp);
- prt_newline(&buf);
-
- prt_printf(&buf, "got ");
- bch2_bkey_val_to_text(&buf, c, bp_k);
- prt_newline(&buf);
-
- prt_printf(&buf, "for ");
- bch2_bkey_val_to_text(&buf, c, orig_k);
-
- bch_err(c, "%s", buf.buf);
- }
-
- printbuf_exit(&buf);
-
- if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
- return bch2_inconsistent_error(c) ? BCH_ERR_erofs_unfixed_errors : 0;
- } else {
- return 0;
- }
-}
-
-int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
- struct bch_dev *ca,
- struct bpos bucket,
- struct bch_backpointer bp,
- struct bkey_s_c orig_k,
- bool insert)
-{
- struct btree_iter bp_iter;
- struct bkey_s_c k;
- struct bkey_i_backpointer *bp_k;
- int ret;
-
- bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
- ret = PTR_ERR_OR_ZERO(bp_k);
- if (ret)
- return ret;
-
- bkey_backpointer_init(&bp_k->k_i);
- bp_k->k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset);
- bp_k->v = bp;
-
- if (!insert) {
- bp_k->k.type = KEY_TYPE_deleted;
- set_bkey_val_u64s(&bp_k->k, 0);
- }
-
- k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
- bp_k->k.p,
- BTREE_ITER_intent|
- BTREE_ITER_slots|
- BTREE_ITER_with_updates);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (insert
- ? k.k->type
- : (k.k->type != KEY_TYPE_backpointer ||
- memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
- ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
- if (ret)
- goto err;
- }
-
- ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
-err:
- bch2_trans_iter_exit(trans, &bp_iter);
- return ret;
-}
-
-/*
- * Find the next backpointer >= *bp_offset:
- */
-int bch2_get_next_backpointer(struct btree_trans *trans,
- struct bch_dev *ca,
- struct bpos bucket, int gen,
- struct bpos *bp_pos,
- struct bch_backpointer *bp,
- unsigned iter_flags)
-{
- struct bpos bp_end_pos = bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket), 0);
- struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
- struct bkey_s_c k;
- int ret = 0;
-
- if (bpos_ge(*bp_pos, bp_end_pos))
- goto done;
-
- if (gen >= 0) {
- k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
- bucket, BTREE_ITER_cached|iter_flags);
- ret = bkey_err(k);
- if (ret)
- goto out;
-
- if (k.k->type != KEY_TYPE_alloc_v4 ||
- bkey_s_c_to_alloc_v4(k).v->gen != gen)
- goto done;
- }
-
- *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(ca, bucket, 0));
-
- for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
- *bp_pos, iter_flags, k, ret) {
- if (bpos_ge(k.k->p, bp_end_pos))
- break;
-
- *bp_pos = k.k->p;
- *bp = *bkey_s_c_to_backpointer(k).v;
- goto out;
- }
-done:
- *bp_pos = SPOS_MAX;
-out:
- bch2_trans_iter_exit(trans, &bp_iter);
- bch2_trans_iter_exit(trans, &alloc_iter);
- return ret;
-}
-
-static void backpointer_not_found(struct btree_trans *trans,
- struct bpos bp_pos,
- struct bch_backpointer bp,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
-
- /*
- * If we're using the btree write buffer, the backpointer we were
- * looking at may have already been deleted - failure to find what it
- * pointed to is not an error:
- */
- if (likely(!bch2_backpointers_no_use_write_buffer))
- return;
-
- struct bpos bucket;
- if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket))
- return;
-
- prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
- bp.level ? "btree node" : "extent");
- prt_printf(&buf, "bucket: ");
- bch2_bpos_to_text(&buf, bucket);
- prt_printf(&buf, "\n ");
-
- prt_printf(&buf, "backpointer pos: ");
- bch2_bpos_to_text(&buf, bp_pos);
- prt_printf(&buf, "\n ");
-
- bch2_backpointer_to_text(&buf, &bp);
- prt_printf(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, k);
- if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers)
- bch_err_ratelimited(c, "%s", buf.buf);
- else
- bch2_trans_inconsistent(trans, "%s", buf.buf);
-
- printbuf_exit(&buf);
-}
-
-struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos bp_pos,
- struct bch_backpointer bp,
- unsigned iter_flags)
-{
- if (likely(!bp.level)) {
- struct bch_fs *c = trans->c;
-
- struct bpos bucket;
- if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket))
- return bkey_s_c_err(-EIO);
-
- bch2_trans_node_iter_init(trans, iter,
- bp.btree_id,
- bp.pos,
- 0, 0,
- iter_flags);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
- if (bkey_err(k)) {
- bch2_trans_iter_exit(trans, iter);
- return k;
- }
-
- if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
- return k;
-
- bch2_trans_iter_exit(trans, iter);
- backpointer_not_found(trans, bp_pos, bp, k);
- return bkey_s_c_null;
- } else {
- struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
-
- if (IS_ERR_OR_NULL(b)) {
- bch2_trans_iter_exit(trans, iter);
- return IS_ERR(b) ? bkey_s_c_err(PTR_ERR(b)) : bkey_s_c_null;
- }
- return bkey_i_to_s_c(&b->key);
- }
-}
-
-struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos bp_pos,
- struct bch_backpointer bp)
-{
- struct bch_fs *c = trans->c;
-
- BUG_ON(!bp.level);
-
- struct bpos bucket;
- if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket))
- return ERR_PTR(-EIO);
-
- bch2_trans_node_iter_init(trans, iter,
- bp.btree_id,
- bp.pos,
- 0,
- bp.level - 1,
- 0);
- struct btree *b = bch2_btree_iter_peek_node(iter);
- if (IS_ERR_OR_NULL(b))
- goto err;
-
- BUG_ON(b->c.level != bp.level - 1);
-
- if (extent_matches_bp(c, bp.btree_id, bp.level,
- bkey_i_to_s_c(&b->key),
- bucket, bp))
- return b;
-
- if (btree_node_will_make_reachable(b)) {
- b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
- } else {
- backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key));
- b = NULL;
- }
-err:
- bch2_trans_iter_exit(trans, iter);
- return b;
-}
-
-static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter alloc_iter = { NULL };
- struct bkey_s_c alloc_k;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- struct bpos bucket;
- if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) {
- if (fsck_err(trans, backpointer_to_missing_device,
- "backpointer for missing device:\n%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- ret = bch2_btree_delete_at(trans, bp_iter, 0);
- goto out;
- }
-
- alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, bucket, 0);
- ret = bkey_err(alloc_k);
- if (ret)
- goto out;
-
- if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4,
- trans, backpointer_to_missing_alloc,
- "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
- alloc_iter.pos.inode, alloc_iter.pos.offset,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = bch2_btree_delete_at(trans, bp_iter, 0);
- goto out;
- }
-out:
-fsck_err:
- bch2_trans_iter_exit(trans, &alloc_iter);
- printbuf_exit(&buf);
- return ret;
-}
-
-/* verify that every backpointer has a corresponding alloc key */
-int bch2_check_btree_backpointers(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_backpointers, POS_MIN, 0, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_check_btree_backpointer(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
-
-struct extents_to_bp_state {
- struct bpos bucket_start;
- struct bpos bucket_end;
- struct bkey_buf last_flushed;
-};
-
-static int drop_dev_and_update(struct btree_trans *trans, enum btree_id btree,
- struct bkey_s_c extent, unsigned dev)
-{
- struct bkey_i *n = bch2_bkey_make_mut_noupdate(trans, extent);
- int ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- return ret;
-
- bch2_bkey_drop_device(bkey_i_to_s(n), dev);
- return bch2_btree_insert_trans(trans, btree, n, 0);
-}
-
-static int check_extent_checksum(struct btree_trans *trans,
- enum btree_id btree, struct bkey_s_c extent,
- enum btree_id o_btree, struct bkey_s_c extent2, unsigned dev)
-{
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(extent);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- struct printbuf buf = PRINTBUF;
- void *data_buf = NULL;
- struct bio *bio = NULL;
- size_t bytes;
- int ret = 0;
-
- if (bkey_is_btree_ptr(extent.k))
- return false;
-
- bkey_for_each_ptr_decode(extent.k, ptrs, p, entry)
- if (p.ptr.dev == dev)
- goto found;
- BUG();
-found:
- if (!p.crc.csum_type)
- return false;
-
- bytes = p.crc.compressed_size << 9;
-
- struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ);
- if (!ca)
- return false;
-
- data_buf = kvmalloc(bytes, GFP_KERNEL);
- if (!data_buf) {
- ret = -ENOMEM;
- goto err;
- }
-
- bio = bio_alloc(ca->disk_sb.bdev, buf_pages(data_buf, bytes), REQ_OP_READ, GFP_KERNEL);
- bio->bi_iter.bi_sector = p.ptr.offset;
- bch2_bio_map(bio, data_buf, bytes);
- ret = submit_bio_wait(bio);
- if (ret)
- goto err;
-
- prt_str(&buf, "extents pointing to same space, but first extent checksum bad:");
- prt_printf(&buf, "\n %s ", bch2_btree_id_str(btree));
- bch2_bkey_val_to_text(&buf, c, extent);
- prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree));
- bch2_bkey_val_to_text(&buf, c, extent2);
-
- struct nonce nonce = extent_nonce(extent.k->bversion, p.crc);
- struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes);
- if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum),
- trans, dup_backpointer_to_bad_csum_extent,
- "%s", buf.buf))
- ret = drop_dev_and_update(trans, btree, extent, dev) ?: 1;
-fsck_err:
-err:
- if (bio)
- bio_put(bio);
- kvfree(data_buf);
- percpu_ref_put(&ca->io_ref);
- printbuf_exit(&buf);
- return ret;
-}
-
-static int check_bp_exists(struct btree_trans *trans,
- struct extents_to_bp_state *s,
- struct bpos bucket,
- struct bch_backpointer bp,
- struct bkey_s_c orig_k)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter bp_iter = {};
- struct btree_iter other_extent_iter = {};
- struct printbuf buf = PRINTBUF;
- struct bkey_s_c bp_k;
- int ret = 0;
-
- struct bch_dev *ca = bch2_dev_bucket_tryget(c, bucket);
- if (!ca) {
- prt_str(&buf, "extent for nonexistent device:bucket ");
- bch2_bpos_to_text(&buf, bucket);
- prt_str(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, orig_k);
- bch_err(c, "%s", buf.buf);
- ret = -BCH_ERR_fsck_repair_unimplemented;
- goto err;
- }
-
- if (bpos_lt(bucket, s->bucket_start) ||
- bpos_gt(bucket, s->bucket_end))
- goto out;
-
- bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
- bucket_pos_to_bp(ca, bucket, bp.bucket_offset),
- 0);
- ret = bkey_err(bp_k);
- if (ret)
- goto err;
-
- if (bp_k.k->type != KEY_TYPE_backpointer ||
- memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
- ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed);
- if (ret)
- goto err;
-
- goto check_existing_bp;
- }
-out:
-err:
-fsck_err:
- bch2_trans_iter_exit(trans, &other_extent_iter);
- bch2_trans_iter_exit(trans, &bp_iter);
- bch2_dev_put(ca);
- printbuf_exit(&buf);
- return ret;
-check_existing_bp:
- /* Do we have a backpointer for a different extent? */
- if (bp_k.k->type != KEY_TYPE_backpointer)
- goto missing;
-
- struct bch_backpointer other_bp = *bkey_s_c_to_backpointer(bp_k).v;
-
- struct bkey_s_c other_extent =
- bch2_backpointer_get_key(trans, &other_extent_iter, bp_k.k->p, other_bp, 0);
- ret = bkey_err(other_extent);
- if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
- ret = 0;
- if (ret)
- goto err;
-
- if (!other_extent.k)
- goto missing;
-
- if (bch2_extents_match(orig_k, other_extent)) {
- printbuf_reset(&buf);
- prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n ");
- bch2_bkey_val_to_text(&buf, c, orig_k);
- prt_str(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, other_extent);
- bch_err(c, "%s", buf.buf);
-
- if (other_extent.k->size <= orig_k.k->size) {
- ret = drop_dev_and_update(trans, other_bp.btree_id, other_extent, bucket.inode);
- if (ret)
- goto err;
- goto out;
- } else {
- ret = drop_dev_and_update(trans, bp.btree_id, orig_k, bucket.inode);
- if (ret)
- goto err;
- goto missing;
- }
- }
-
- ret = check_extent_checksum(trans, other_bp.btree_id, other_extent, bp.btree_id, orig_k, bucket.inode);
- if (ret < 0)
- goto err;
- if (ret) {
- ret = 0;
- goto missing;
- }
-
- ret = check_extent_checksum(trans, bp.btree_id, orig_k, other_bp.btree_id, other_extent, bucket.inode);
- if (ret < 0)
- goto err;
- if (ret) {
- ret = 0;
- goto out;
- }
-
- printbuf_reset(&buf);
- prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bucket.inode);
- bch2_bkey_val_to_text(&buf, c, orig_k);
- prt_str(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, other_extent);
- bch_err(c, "%s", buf.buf);
- ret = -BCH_ERR_fsck_repair_unimplemented;
- goto err;
-missing:
- printbuf_reset(&buf);
- prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
- bch2_btree_id_str(bp.btree_id), bp.level);
- bch2_bkey_val_to_text(&buf, c, orig_k);
- prt_printf(&buf, "\n got: ");
- bch2_bkey_val_to_text(&buf, c, bp_k);
-
- struct bkey_i_backpointer n_bp_k;
- bkey_backpointer_init(&n_bp_k.k_i);
- n_bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset);
- n_bp_k.v = bp;
- prt_printf(&buf, "\n want: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&n_bp_k.k_i));
-
- if (fsck_err(trans, ptr_to_missing_backpointer, "%s", buf.buf))
- ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, orig_k, true);
-
- goto out;
-}
-
-static int check_extent_to_backpointers(struct btree_trans *trans,
- struct extents_to_bp_state *s,
- enum btree_id btree, unsigned level,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs;
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- int ret;
-
- ptrs = bch2_bkey_ptrs_c(k);
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- struct bpos bucket_pos = POS_MIN;
- struct bch_backpointer bp;
-
- if (p.ptr.cached)
- continue;
-
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev);
- if (ca)
- bch2_extent_ptr_to_bp(c, ca, btree, level, k, p, entry, &bucket_pos, &bp);
- rcu_read_unlock();
-
- if (!ca)
- continue;
-
- ret = check_bp_exists(trans, s, bucket_pos, bp, k);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int check_btree_root_to_backpointers(struct btree_trans *trans,
- struct extents_to_bp_state *s,
- enum btree_id btree_id,
- int *level)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct btree *b;
- struct bkey_s_c k;
- int ret;
-retry:
- bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
- 0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
- b = bch2_btree_iter_peek_node(&iter);
- ret = PTR_ERR_OR_ZERO(b);
- if (ret)
- goto err;
-
- if (b != btree_node_root(c, b)) {
- bch2_trans_iter_exit(trans, &iter);
- goto retry;
- }
-
- *level = b->c.level;
-
- k = bkey_i_to_s_c(&b->key);
- ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
-{
- return (struct bbpos) {
- .btree = bp.btree_id,
- .pos = bp.pos,
- };
-}
-
-static u64 mem_may_pin_bytes(struct bch_fs *c)
-{
- struct sysinfo i;
- si_meminfo(&i);
-
- u64 mem_bytes = i.totalram * i.mem_unit;
- return div_u64(mem_bytes * c->opts.fsck_memory_usage_percent, 100);
-}
-
-static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
-{
- return div_u64(mem_may_pin_bytes(c), c->opts.btree_node_size);
-}
-
-static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
- u64 btree_leaf_mask,
- u64 btree_interior_mask,
- struct bbpos start, struct bbpos *end)
-{
- struct bch_fs *c = trans->c;
- s64 mem_may_pin = mem_may_pin_bytes(c);
- int ret = 0;
-
- bch2_btree_cache_unpin(c);
-
- btree_interior_mask |= btree_leaf_mask;
-
- c->btree_cache.pinned_nodes_mask[0] = btree_leaf_mask;
- c->btree_cache.pinned_nodes_mask[1] = btree_interior_mask;
- c->btree_cache.pinned_nodes_start = start;
- c->btree_cache.pinned_nodes_end = *end = BBPOS_MAX;
-
- for (enum btree_id btree = start.btree;
- btree < BTREE_ID_NR && !ret;
- btree++) {
- unsigned depth = (BIT_ULL(btree) & btree_leaf_mask) ? 0 : 1;
-
- if (!(BIT_ULL(btree) & btree_leaf_mask) &&
- !(BIT_ULL(btree) & btree_interior_mask))
- continue;
-
- ret = __for_each_btree_node(trans, iter, btree,
- btree == start.btree ? start.pos : POS_MIN,
- 0, depth, BTREE_ITER_prefetch, b, ({
- mem_may_pin -= btree_buf_bytes(b);
- if (mem_may_pin <= 0) {
- c->btree_cache.pinned_nodes_end = *end =
- BBPOS(btree, b->key.k.p);
- break;
- }
- bch2_node_pin(c, b);
- 0;
- }));
- }
-
- return ret;
-}
-
-struct progress_indicator_state {
- unsigned long next_print;
- u64 nodes_seen;
- u64 nodes_total;
- struct btree *last_node;
-};
-
-static inline void progress_init(struct progress_indicator_state *s,
- struct bch_fs *c,
- u64 btree_id_mask)
-{
- memset(s, 0, sizeof(*s));
-
- s->next_print = jiffies + HZ * 10;
-
- for (unsigned i = 0; i < BTREE_ID_NR; i++) {
- if (!(btree_id_mask & BIT_ULL(i)))
- continue;
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_btree,
- .btree.id = i,
- };
-
- u64 v;
- bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
- s->nodes_total += div64_ul(v, btree_sectors(c));
- }
-}
-
-static inline bool progress_update_p(struct progress_indicator_state *s)
-{
- bool ret = time_after_eq(jiffies, s->next_print);
-
- if (ret)
- s->next_print = jiffies + HZ * 10;
- return ret;
-}
-
-static void progress_update_iter(struct btree_trans *trans,
- struct progress_indicator_state *s,
- struct btree_iter *iter,
- const char *msg)
-{
- struct bch_fs *c = trans->c;
- struct btree *b = path_l(btree_iter_path(trans, iter))->b;
-
- s->nodes_seen += b != s->last_node;
- s->last_node = b;
-
- if (progress_update_p(s)) {
- struct printbuf buf = PRINTBUF;
- unsigned percent = s->nodes_total
- ? div64_u64(s->nodes_seen * 100, s->nodes_total)
- : 0;
-
- prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ",
- msg, percent, s->nodes_seen, s->nodes_total);
- bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos));
-
- bch_info(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
-}
-
-static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
- struct extents_to_bp_state *s)
-{
- struct bch_fs *c = trans->c;
- struct progress_indicator_state progress;
- int ret = 0;
-
- progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_extents)|BIT_ULL(BTREE_ID_reflink));
-
- for (enum btree_id btree_id = 0;
- btree_id < btree_id_nr_alive(c);
- btree_id++) {
- int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
-
- ret = commit_do(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
- check_btree_root_to_backpointers(trans, s, btree_id, &level));
- if (ret)
- return ret;
-
- while (level >= depth) {
- struct btree_iter iter;
- bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, level,
- BTREE_ITER_prefetch);
-
- ret = for_each_btree_key_continue(trans, iter, 0, k, ({
- progress_update_iter(trans, &progress, &iter, "extents_to_backpointers");
- check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
- bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
- }));
- if (ret)
- return ret;
-
- --level;
- }
- }
-
- return 0;
-}
-
-int bch2_check_extents_to_backpointers(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct extents_to_bp_state s = { .bucket_start = POS_MIN };
- int ret;
-
- bch2_bkey_buf_init(&s.last_flushed);
- bkey_init(&s.last_flushed.k->k);
-
- while (1) {
- struct bbpos end;
- ret = bch2_get_btree_in_memory_pos(trans,
- BIT_ULL(BTREE_ID_backpointers),
- BIT_ULL(BTREE_ID_backpointers),
- BBPOS(BTREE_ID_backpointers, s.bucket_start), &end);
- if (ret)
- break;
-
- s.bucket_end = end.pos;
-
- if ( bpos_eq(s.bucket_start, POS_MIN) &&
- !bpos_eq(s.bucket_end, SPOS_MAX))
- bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
- __func__, btree_nodes_fit_in_ram(c));
-
- if (!bpos_eq(s.bucket_start, POS_MIN) ||
- !bpos_eq(s.bucket_end, SPOS_MAX)) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "check_extents_to_backpointers(): ");
- bch2_bpos_to_text(&buf, s.bucket_start);
- prt_str(&buf, "-");
- bch2_bpos_to_text(&buf, s.bucket_end);
-
- bch_verbose(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
-
- ret = bch2_check_extents_to_backpointers_pass(trans, &s);
- if (ret || bpos_eq(s.bucket_end, SPOS_MAX))
- break;
-
- s.bucket_start = bpos_successor(s.bucket_end);
- }
- bch2_trans_put(trans);
- bch2_bkey_buf_exit(&s.last_flushed, c);
-
- bch2_btree_cache_unpin(c);
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_one_backpointer(struct btree_trans *trans,
- struct bbpos start,
- struct bbpos end,
- struct bkey_s_c bp_k,
- struct bkey_buf *last_flushed)
-{
- if (bp_k.k->type != KEY_TYPE_backpointer)
- return 0;
-
- struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k);
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bbpos pos = bp_to_bbpos(*bp.v);
- struct bkey_s_c k;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- if (bbpos_cmp(pos, start) < 0 ||
- bbpos_cmp(pos, end) > 0)
- return 0;
-
- k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
- ret = bkey_err(k);
- if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
- return 0;
- if (ret)
- return ret;
-
- if (!k.k) {
- ret = bch2_btree_write_buffer_maybe_flush(trans, bp.s_c, last_flushed);
- if (ret)
- goto out;
-
- if (fsck_err(trans, backpointer_to_missing_ptr,
- "backpointer for missing %s\n %s",
- bp.v->level ? "btree node" : "extent",
- (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
- ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
- goto out;
- }
- }
-out:
-fsck_err:
- bch2_trans_iter_exit(trans, &iter);
- printbuf_exit(&buf);
- return ret;
-}
-
-static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
- struct bbpos start,
- struct bbpos end)
-{
- struct bch_fs *c = trans->c;
- struct bkey_buf last_flushed;
- struct progress_indicator_state progress;
-
- bch2_bkey_buf_init(&last_flushed);
- bkey_init(&last_flushed.k->k);
- progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers));
-
- int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
- POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
- progress_update_iter(trans, &progress, &iter, "backpointers_to_extents");
- check_one_backpointer(trans, start, end, k, &last_flushed);
- }));
-
- bch2_bkey_buf_exit(&last_flushed, c);
- return ret;
-}
-
-int bch2_check_backpointers_to_extents(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
- int ret;
-
- while (1) {
- ret = bch2_get_btree_in_memory_pos(trans,
- BIT_ULL(BTREE_ID_extents)|
- BIT_ULL(BTREE_ID_reflink),
- ~0,
- start, &end);
- if (ret)
- break;
-
- if (!bbpos_cmp(start, BBPOS_MIN) &&
- bbpos_cmp(end, BBPOS_MAX))
- bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
- __func__, btree_nodes_fit_in_ram(c));
-
- if (bbpos_cmp(start, BBPOS_MIN) ||
- bbpos_cmp(end, BBPOS_MAX)) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "check_backpointers_to_extents(): ");
- bch2_bbpos_to_text(&buf, start);
- prt_str(&buf, "-");
- bch2_bbpos_to_text(&buf, end);
-
- bch_verbose(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
-
- ret = bch2_check_backpointers_to_extents_pass(trans, start, end);
- if (ret || !bbpos_cmp(end, BBPOS_MAX))
- break;
-
- start = bbpos_successor(end);
- }
- bch2_trans_put(trans);
-
- bch2_btree_cache_unpin(c);
-
- bch_err_fn(c, ret);
- return ret;
-}
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
deleted file mode 100644
index 3b29fdf519dd..000000000000
--- a/fs/bcachefs/backpointers.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
-#define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
-
-#include "btree_cache.h"
-#include "btree_iter.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "error.h"
-#include "super.h"
-
-static inline u64 swab40(u64 x)
-{
- return (((x & 0x00000000ffULL) << 32)|
- ((x & 0x000000ff00ULL) << 16)|
- ((x & 0x0000ff0000ULL) >> 0)|
- ((x & 0x00ff000000ULL) >> 16)|
- ((x & 0xff00000000ULL) >> 32));
-}
-
-int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, enum bch_validate_flags);
-void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
-void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-void bch2_backpointer_swab(struct bkey_s);
-
-#define bch2_bkey_ops_backpointer ((struct bkey_ops) { \
- .key_validate = bch2_backpointer_validate, \
- .val_to_text = bch2_backpointer_k_to_text, \
- .swab = bch2_backpointer_swab, \
- .min_val_size = 32, \
-})
-
-#define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
-
-/*
- * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
- * btree:
- */
-static inline struct bpos bp_pos_to_bucket(const struct bch_dev *ca, struct bpos bp_pos)
-{
- u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
-
- return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
-}
-
-static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
-{
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, bp_pos.inode);
- if (ca)
- *bucket = bp_pos_to_bucket(ca, bp_pos);
- rcu_read_unlock();
- return ca != NULL;
-}
-
-static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
-{
- return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket),
- c, "backpointer for missing device %llu", bp_pos.inode);
-}
-
-static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca,
- struct bpos bucket,
- u64 bucket_offset)
-{
- return POS(bucket.inode,
- (bucket_to_sector(ca, bucket.offset) <<
- MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
-}
-
-/*
- * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
- */
-static inline struct bpos bucket_pos_to_bp(const struct bch_dev *ca,
- struct bpos bucket,
- u64 bucket_offset)
-{
- struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset);
- EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(ca, ret)));
- return ret;
-}
-
-int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bch_dev *,
- struct bpos bucket, struct bch_backpointer, struct bkey_s_c, bool);
-
-static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
- struct bch_dev *ca,
- struct bpos bucket,
- struct bch_backpointer bp,
- struct bkey_s_c orig_k,
- bool insert)
-{
- if (unlikely(bch2_backpointers_no_use_write_buffer))
- return bch2_bucket_backpointer_mod_nowritebuffer(trans, ca, bucket, bp, orig_k, insert);
-
- struct bkey_i_backpointer bp_k;
-
- bkey_backpointer_init(&bp_k.k_i);
- bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset);
- bp_k.v = bp;
-
- if (!insert) {
- bp_k.k.type = KEY_TYPE_deleted;
- set_bkey_val_u64s(&bp_k.k, 0);
- }
-
- return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
-}
-
-static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
- struct extent_ptr_decoded p,
- const union bch_extent_entry *entry)
-{
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- return BCH_DATA_btree;
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v:
- return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
- case KEY_TYPE_stripe: {
- const struct bch_extent_ptr *ptr = &entry->ptr;
- struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
-
- BUG_ON(ptr < s.v->ptrs ||
- ptr >= s.v->ptrs + s.v->nr_blocks);
-
- return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
- ? BCH_DATA_parity
- : BCH_DATA_user;
- }
- default:
- BUG();
- }
-}
-
-static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, struct extent_ptr_decoded p,
- const union bch_extent_entry *entry,
- struct bpos *bucket_pos, struct bch_backpointer *bp,
- u64 sectors)
-{
- u32 bucket_offset;
- *bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
- *bp = (struct bch_backpointer) {
- .btree_id = btree_id,
- .level = level,
- .data_type = bch2_bkey_ptr_data_type(k, p, entry),
- .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
- p.crc.offset,
- .bucket_len = sectors,
- .pos = k.k->p,
- };
-}
-
-static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, struct extent_ptr_decoded p,
- const union bch_extent_entry *entry,
- struct bpos *bucket_pos, struct bch_backpointer *bp)
-{
- u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p);
-
- __bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors);
-}
-
-int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int,
- struct bpos *, struct bch_backpointer *, unsigned);
-struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
- struct bpos, struct bch_backpointer,
- unsigned);
-struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
- struct bpos, struct bch_backpointer);
-
-int bch2_check_btree_backpointers(struct bch_fs *);
-int bch2_check_extents_to_backpointers(struct bch_fs *);
-int bch2_check_backpointers_to_extents(struct bch_fs *);
-
-#endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */
diff --git a/fs/bcachefs/bbpos.h b/fs/bcachefs/bbpos.h
deleted file mode 100644
index be2edced5213..000000000000
--- a/fs/bcachefs/bbpos.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BBPOS_H
-#define _BCACHEFS_BBPOS_H
-
-#include "bbpos_types.h"
-#include "bkey_methods.h"
-#include "btree_cache.h"
-
-static inline int bbpos_cmp(struct bbpos l, struct bbpos r)
-{
- return cmp_int(l.btree, r.btree) ?: bpos_cmp(l.pos, r.pos);
-}
-
-static inline struct bbpos bbpos_successor(struct bbpos pos)
-{
- if (bpos_cmp(pos.pos, SPOS_MAX)) {
- pos.pos = bpos_successor(pos.pos);
- return pos;
- }
-
- if (pos.btree != BTREE_ID_NR) {
- pos.btree++;
- pos.pos = POS_MIN;
- return pos;
- }
-
- BUG();
-}
-
-static inline void bch2_bbpos_to_text(struct printbuf *out, struct bbpos pos)
-{
- prt_str(out, bch2_btree_id_str(pos.btree));
- prt_char(out, ':');
- bch2_bpos_to_text(out, pos.pos);
-}
-
-#endif /* _BCACHEFS_BBPOS_H */
diff --git a/fs/bcachefs/bbpos_types.h b/fs/bcachefs/bbpos_types.h
deleted file mode 100644
index f63893344f80..000000000000
--- a/fs/bcachefs/bbpos_types.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BBPOS_TYPES_H
-#define _BCACHEFS_BBPOS_TYPES_H
-
-struct bbpos {
- enum btree_id btree;
- struct bpos pos;
-};
-
-static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos)
-{
- return (struct bbpos) { btree, pos };
-}
-
-#define BBPOS_MIN BBPOS(0, POS_MIN)
-#define BBPOS_MAX BBPOS(BTREE_ID_NR - 1, SPOS_MAX)
-
-#endif /* _BCACHEFS_BBPOS_TYPES_H */
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
deleted file mode 100644
index e94a83b8113e..000000000000
--- a/fs/bcachefs/bcachefs.h
+++ /dev/null
@@ -1,1254 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_H
-#define _BCACHEFS_H
-
-/*
- * SOME HIGH LEVEL CODE DOCUMENTATION:
- *
- * Bcache mostly works with cache sets, cache devices, and backing devices.
- *
- * Support for multiple cache devices hasn't quite been finished off yet, but
- * it's about 95% plumbed through. A cache set and its cache devices is sort of
- * like a md raid array and its component devices. Most of the code doesn't care
- * about individual cache devices, the main abstraction is the cache set.
- *
- * Multiple cache devices is intended to give us the ability to mirror dirty
- * cached data and metadata, without mirroring clean cached data.
- *
- * Backing devices are different, in that they have a lifetime independent of a
- * cache set. When you register a newly formatted backing device it'll come up
- * in passthrough mode, and then you can attach and detach a backing device from
- * a cache set at runtime - while it's mounted and in use. Detaching implicitly
- * invalidates any cached data for that backing device.
- *
- * A cache set can have multiple (many) backing devices attached to it.
- *
- * There's also flash only volumes - this is the reason for the distinction
- * between struct cached_dev and struct bcache_device. A flash only volume
- * works much like a bcache device that has a backing device, except the
- * "cached" data is always dirty. The end result is that we get thin
- * provisioning with very little additional code.
- *
- * Flash only volumes work but they're not production ready because the moving
- * garbage collector needs more work. More on that later.
- *
- * BUCKETS/ALLOCATION:
- *
- * Bcache is primarily designed for caching, which means that in normal
- * operation all of our available space will be allocated. Thus, we need an
- * efficient way of deleting things from the cache so we can write new things to
- * it.
- *
- * To do this, we first divide the cache device up into buckets. A bucket is the
- * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
- * works efficiently.
- *
- * Each bucket has a 16 bit priority, and an 8 bit generation associated with
- * it. The gens and priorities for all the buckets are stored contiguously and
- * packed on disk (in a linked list of buckets - aside from the superblock, all
- * of bcache's metadata is stored in buckets).
- *
- * The priority is used to implement an LRU. We reset a bucket's priority when
- * we allocate it or on cache it, and every so often we decrement the priority
- * of each bucket. It could be used to implement something more sophisticated,
- * if anyone ever gets around to it.
- *
- * The generation is used for invalidating buckets. Each pointer also has an 8
- * bit generation embedded in it; for a pointer to be considered valid, its gen
- * must match the gen of the bucket it points into. Thus, to reuse a bucket all
- * we have to do is increment its gen (and write its new gen to disk; we batch
- * this up).
- *
- * Bcache is entirely COW - we never write twice to a bucket, even buckets that
- * contain metadata (including btree nodes).
- *
- * THE BTREE:
- *
- * Bcache is in large part design around the btree.
- *
- * At a high level, the btree is just an index of key -> ptr tuples.
- *
- * Keys represent extents, and thus have a size field. Keys also have a variable
- * number of pointers attached to them (potentially zero, which is handy for
- * invalidating the cache).
- *
- * The key itself is an inode:offset pair. The inode number corresponds to a
- * backing device or a flash only volume. The offset is the ending offset of the
- * extent within the inode - not the starting offset; this makes lookups
- * slightly more convenient.
- *
- * Pointers contain the cache device id, the offset on that device, and an 8 bit
- * generation number. More on the gen later.
- *
- * Index lookups are not fully abstracted - cache lookups in particular are
- * still somewhat mixed in with the btree code, but things are headed in that
- * direction.
- *
- * Updates are fairly well abstracted, though. There are two different ways of
- * updating the btree; insert and replace.
- *
- * BTREE_INSERT will just take a list of keys and insert them into the btree -
- * overwriting (possibly only partially) any extents they overlap with. This is
- * used to update the index after a write.
- *
- * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
- * overwriting a key that matches another given key. This is used for inserting
- * data into the cache after a cache miss, and for background writeback, and for
- * the moving garbage collector.
- *
- * There is no "delete" operation; deleting things from the index is
- * accomplished by either by invalidating pointers (by incrementing a bucket's
- * gen) or by inserting a key with 0 pointers - which will overwrite anything
- * previously present at that location in the index.
- *
- * This means that there are always stale/invalid keys in the btree. They're
- * filtered out by the code that iterates through a btree node, and removed when
- * a btree node is rewritten.
- *
- * BTREE NODES:
- *
- * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
- * free smaller than a bucket - so, that's how big our btree nodes are.
- *
- * (If buckets are really big we'll only use part of the bucket for a btree node
- * - no less than 1/4th - but a bucket still contains no more than a single
- * btree node. I'd actually like to change this, but for now we rely on the
- * bucket's gen for deleting btree nodes when we rewrite/split a node.)
- *
- * Anyways, btree nodes are big - big enough to be inefficient with a textbook
- * btree implementation.
- *
- * The way this is solved is that btree nodes are internally log structured; we
- * can append new keys to an existing btree node without rewriting it. This
- * means each set of keys we write is sorted, but the node is not.
- *
- * We maintain this log structure in memory - keeping 1Mb of keys sorted would
- * be expensive, and we have to distinguish between the keys we have written and
- * the keys we haven't. So to do a lookup in a btree node, we have to search
- * each sorted set. But we do merge written sets together lazily, so the cost of
- * these extra searches is quite low (normally most of the keys in a btree node
- * will be in one big set, and then there'll be one or two sets that are much
- * smaller).
- *
- * This log structure makes bcache's btree more of a hybrid between a
- * conventional btree and a compacting data structure, with some of the
- * advantages of both.
- *
- * GARBAGE COLLECTION:
- *
- * We can't just invalidate any bucket - it might contain dirty data or
- * metadata. If it once contained dirty data, other writes might overwrite it
- * later, leaving no valid pointers into that bucket in the index.
- *
- * Thus, the primary purpose of garbage collection is to find buckets to reuse.
- * It also counts how much valid data it each bucket currently contains, so that
- * allocation can reuse buckets sooner when they've been mostly overwritten.
- *
- * It also does some things that are really internal to the btree
- * implementation. If a btree node contains pointers that are stale by more than
- * some threshold, it rewrites the btree node to avoid the bucket's generation
- * wrapping around. It also merges adjacent btree nodes if they're empty enough.
- *
- * THE JOURNAL:
- *
- * Bcache's journal is not necessary for consistency; we always strictly
- * order metadata writes so that the btree and everything else is consistent on
- * disk in the event of an unclean shutdown, and in fact bcache had writeback
- * caching (with recovery from unclean shutdown) before journalling was
- * implemented.
- *
- * Rather, the journal is purely a performance optimization; we can't complete a
- * write until we've updated the index on disk, otherwise the cache would be
- * inconsistent in the event of an unclean shutdown. This means that without the
- * journal, on random write workloads we constantly have to update all the leaf
- * nodes in the btree, and those writes will be mostly empty (appending at most
- * a few keys each) - highly inefficient in terms of amount of metadata writes,
- * and it puts more strain on the various btree resorting/compacting code.
- *
- * The journal is just a log of keys we've inserted; on startup we just reinsert
- * all the keys in the open journal entries. That means that when we're updating
- * a node in the btree, we can wait until a 4k block of keys fills up before
- * writing them out.
- *
- * For simplicity, we only journal updates to leaf nodes; updates to parent
- * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
- * the complexity to deal with journalling them (in particular, journal replay)
- * - updates to non leaf nodes just happen synchronously (see btree_split()).
- */
-
-#undef pr_fmt
-#ifdef __KERNEL__
-#define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
-#else
-#define pr_fmt(fmt) "%s() " fmt "\n", __func__
-#endif
-
-#include <linux/backing-dev-defs.h>
-#include <linux/bug.h>
-#include <linux/bio.h>
-#include <linux/closure.h>
-#include <linux/kobject.h>
-#include <linux/list.h>
-#include <linux/math64.h>
-#include <linux/mutex.h>
-#include <linux/percpu-refcount.h>
-#include <linux/percpu-rwsem.h>
-#include <linux/refcount.h>
-#include <linux/rhashtable.h>
-#include <linux/rwsem.h>
-#include <linux/semaphore.h>
-#include <linux/seqlock.h>
-#include <linux/shrinker.h>
-#include <linux/srcu.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/zstd.h>
-
-#include "bcachefs_format.h"
-#include "disk_accounting_types.h"
-#include "errcode.h"
-#include "fifo.h"
-#include "nocow_locking_types.h"
-#include "opts.h"
-#include "recovery_passes_types.h"
-#include "sb-errors_types.h"
-#include "seqmutex.h"
-#include "time_stats.h"
-#include "util.h"
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-#define BCH_WRITE_REF_DEBUG
-#endif
-
-#ifndef dynamic_fault
-#define dynamic_fault(...) 0
-#endif
-
-#define race_fault(...) dynamic_fault("bcachefs:race")
-
-#define count_event(_c, _name) this_cpu_inc((_c)->counters[BCH_COUNTER_##_name])
-
-#define trace_and_count(_c, _name, ...) \
-do { \
- count_event(_c, _name); \
- trace_##_name(__VA_ARGS__); \
-} while (0)
-
-#define bch2_fs_init_fault(name) \
- dynamic_fault("bcachefs:bch_fs_init:" name)
-#define bch2_meta_read_fault(name) \
- dynamic_fault("bcachefs:meta:read:" name)
-#define bch2_meta_write_fault(name) \
- dynamic_fault("bcachefs:meta:write:" name)
-
-#ifdef __KERNEL__
-#define BCACHEFS_LOG_PREFIX
-#endif
-
-#ifdef BCACHEFS_LOG_PREFIX
-
-#define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
-#define bch2_fmt_dev(_ca, fmt) "bcachefs (%s): " fmt "\n", ((_ca)->name)
-#define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
-#define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
-#define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
- "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
-
-#else
-
-#define bch2_log_msg(_c, fmt) fmt
-#define bch2_fmt_dev(_ca, fmt) "%s: " fmt "\n", ((_ca)->name)
-#define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
-#define bch2_fmt_inum(_c, _inum, fmt) "inum %llu: " fmt "\n", (_inum)
-#define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
- "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
-
-#endif
-
-#define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
-
-void bch2_print_str(struct bch_fs *, const char *);
-
-__printf(2, 3)
-void bch2_print_opts(struct bch_opts *, const char *, ...);
-
-__printf(2, 3)
-void __bch2_print(struct bch_fs *c, const char *fmt, ...);
-
-#define maybe_dev_to_fs(_c) _Generic((_c), \
- struct bch_dev *: ((struct bch_dev *) (_c))->fs, \
- struct bch_fs *: (_c))
-
-#define bch2_print(_c, ...) __bch2_print(maybe_dev_to_fs(_c), __VA_ARGS__)
-
-#define bch2_print_ratelimited(_c, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- \
- if (__ratelimit(&_rs)) \
- bch2_print(_c, __VA_ARGS__); \
-} while (0)
-
-#define bch_info(c, fmt, ...) \
- bch2_print(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
-#define bch_notice(c, fmt, ...) \
- bch2_print(c, KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
-#define bch_warn(c, fmt, ...) \
- bch2_print(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
-#define bch_warn_ratelimited(c, fmt, ...) \
- bch2_print_ratelimited(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
-
-#define bch_err(c, fmt, ...) \
- bch2_print(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
-#define bch_err_dev(ca, fmt, ...) \
- bch2_print(c, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
-#define bch_err_dev_offset(ca, _offset, fmt, ...) \
- bch2_print(c, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
-#define bch_err_inum(c, _inum, fmt, ...) \
- bch2_print(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
-#define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
- bch2_print(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
-
-#define bch_err_ratelimited(c, fmt, ...) \
- bch2_print_ratelimited(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
-#define bch_err_dev_ratelimited(ca, fmt, ...) \
- bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
-#define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
- bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
-#define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
- bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
-#define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
- bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
-
-static inline bool should_print_err(int err)
-{
- return err && !bch2_err_matches(err, BCH_ERR_transaction_restart);
-}
-
-#define bch_err_fn(_c, _ret) \
-do { \
- if (should_print_err(_ret)) \
- bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
-} while (0)
-
-#define bch_err_fn_ratelimited(_c, _ret) \
-do { \
- if (should_print_err(_ret)) \
- bch_err_ratelimited(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
-} while (0)
-
-#define bch_err_msg(_c, _ret, _msg, ...) \
-do { \
- if (should_print_err(_ret)) \
- bch_err(_c, "%s(): error " _msg " %s", __func__, \
- ##__VA_ARGS__, bch2_err_str(_ret)); \
-} while (0)
-
-#define bch_verbose(c, fmt, ...) \
-do { \
- if ((c)->opts.verbose) \
- bch_info(c, fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define pr_verbose_init(opts, fmt, ...) \
-do { \
- if (opt_get(opts, verbose)) \
- pr_info(fmt, ##__VA_ARGS__); \
-} while (0)
-
-/* Parameters that are useful for debugging, but should always be compiled in: */
-#define BCH_DEBUG_PARAMS_ALWAYS() \
- BCH_DEBUG_PARAM(key_merging_disabled, \
- "Disables merging of extents") \
- BCH_DEBUG_PARAM(btree_node_merging_disabled, \
- "Disables merging of btree nodes") \
- BCH_DEBUG_PARAM(btree_gc_always_rewrite, \
- "Causes mark and sweep to compact and rewrite every " \
- "btree node it traverses") \
- BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \
- "Disables rewriting of btree nodes during mark and sweep")\
- BCH_DEBUG_PARAM(btree_shrinker_disabled, \
- "Disables the shrinker callback for the btree node cache")\
- BCH_DEBUG_PARAM(verify_btree_ondisk, \
- "Reread btree nodes at various points to verify the " \
- "mergesort in the read path against modifications " \
- "done in memory") \
- BCH_DEBUG_PARAM(verify_all_btree_replicas, \
- "When reading btree nodes, read all replicas and " \
- "compare them") \
- BCH_DEBUG_PARAM(backpointers_no_use_write_buffer, \
- "Don't use the write buffer for backpointers, enabling "\
- "extra runtime checks")
-
-/* Parameters that should only be compiled in debug mode: */
-#define BCH_DEBUG_PARAMS_DEBUG() \
- BCH_DEBUG_PARAM(expensive_debug_checks, \
- "Enables various runtime debugging checks that " \
- "significantly affect performance") \
- BCH_DEBUG_PARAM(debug_check_iterators, \
- "Enables extra verification for btree iterators") \
- BCH_DEBUG_PARAM(debug_check_btree_accounting, \
- "Verify btree accounting for keys within a node") \
- BCH_DEBUG_PARAM(journal_seq_verify, \
- "Store the journal sequence number in the version " \
- "number of every btree key, and verify that btree " \
- "update ordering is preserved during recovery") \
- BCH_DEBUG_PARAM(inject_invalid_keys, \
- "Store the journal sequence number in the version " \
- "number of every btree key, and verify that btree " \
- "update ordering is preserved during recovery") \
- BCH_DEBUG_PARAM(test_alloc_startup, \
- "Force allocator startup to use the slowpath where it" \
- "can't find enough free buckets without invalidating" \
- "cached data") \
- BCH_DEBUG_PARAM(force_reconstruct_read, \
- "Force reads to use the reconstruct path, when reading" \
- "from erasure coded extents") \
- BCH_DEBUG_PARAM(test_restart_gc, \
- "Test restarting mark and sweep gc when bucket gens change")
-
-#define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-#define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL()
-#else
-#define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
-#endif
-
-#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
-BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
-
-#ifndef CONFIG_BCACHEFS_DEBUG
-#define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name;
-BCH_DEBUG_PARAMS_DEBUG()
-#undef BCH_DEBUG_PARAM
-#endif
-
-#define BCH_TIME_STATS() \
- x(btree_node_mem_alloc) \
- x(btree_node_split) \
- x(btree_node_compact) \
- x(btree_node_merge) \
- x(btree_node_sort) \
- x(btree_node_read) \
- x(btree_node_read_done) \
- x(btree_interior_update_foreground) \
- x(btree_interior_update_total) \
- x(btree_gc) \
- x(data_write) \
- x(data_read) \
- x(data_promote) \
- x(journal_flush_write) \
- x(journal_noflush_write) \
- x(journal_flush_seq) \
- x(blocked_journal_low_on_space) \
- x(blocked_journal_low_on_pin) \
- x(blocked_journal_max_in_flight) \
- x(blocked_key_cache_flush) \
- x(blocked_allocate) \
- x(blocked_allocate_open_bucket) \
- x(blocked_write_buffer_full) \
- x(nocow_lock_contended)
-
-enum bch_time_stats {
-#define x(name) BCH_TIME_##name,
- BCH_TIME_STATS()
-#undef x
- BCH_TIME_STAT_NR
-};
-
-#include "alloc_types.h"
-#include "btree_gc_types.h"
-#include "btree_types.h"
-#include "btree_node_scan_types.h"
-#include "btree_write_buffer_types.h"
-#include "buckets_types.h"
-#include "buckets_waiting_for_journal_types.h"
-#include "clock_types.h"
-#include "disk_groups_types.h"
-#include "ec_types.h"
-#include "journal_types.h"
-#include "keylist_types.h"
-#include "quota_types.h"
-#include "rebalance_types.h"
-#include "replicas_types.h"
-#include "sb-members_types.h"
-#include "subvolume_types.h"
-#include "super_types.h"
-#include "thread_with_file_types.h"
-
-/* Number of nodes btree coalesce will try to coalesce at once */
-#define GC_MERGE_NODES 4U
-
-/* Maximum number of nodes we might need to allocate atomically: */
-#define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
-
-/* Size of the freelist we allocate btree nodes from: */
-#define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
-
-#define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
-
-struct btree;
-
-struct io_count {
- u64 sectors[2][BCH_DATA_NR];
-};
-
-struct discard_in_flight {
- bool in_progress:1;
- u64 bucket:63;
-};
-
-struct bch_dev {
- struct kobject kobj;
-#ifdef CONFIG_BCACHEFS_DEBUG
- atomic_long_t ref;
- bool dying;
- unsigned long last_put;
-#else
- struct percpu_ref ref;
-#endif
- struct completion ref_completion;
- struct percpu_ref io_ref;
- struct completion io_ref_completion;
-
- struct bch_fs *fs;
-
- u8 dev_idx;
- /*
- * Cached version of this device's member info from superblock
- * Committed by bch2_write_super() -> bch_fs_mi_update()
- */
- struct bch_member_cpu mi;
- atomic64_t errors[BCH_MEMBER_ERROR_NR];
-
- __uuid_t uuid;
- char name[BDEVNAME_SIZE];
-
- struct bch_sb_handle disk_sb;
- struct bch_sb *sb_read_scratch;
- int sb_write_error;
- dev_t dev;
- atomic_t flush_seq;
-
- struct bch_devs_mask self;
-
- /*
- * Buckets:
- * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
- * gc_gens_lock, for device resize - holding any is sufficient for
- * access: Or rcu_read_lock(), but only for dev_ptr_stale():
- */
- GENRADIX(struct bucket) buckets_gc;
- struct bucket_gens __rcu *bucket_gens;
- u8 *oldest_gen;
- unsigned long *buckets_nouse;
- struct rw_semaphore bucket_lock;
-
- struct bch_dev_usage __percpu *usage;
-
- /* Allocator: */
- u64 new_fs_bucket_idx;
- u64 alloc_cursor[3];
-
- unsigned nr_open_buckets;
- unsigned nr_partial_buckets;
- unsigned nr_btree_reserve;
-
- size_t inc_gen_needs_gc;
- size_t inc_gen_really_needs_gc;
- size_t buckets_waiting_on_journal;
-
- struct work_struct invalidate_work;
- struct work_struct discard_work;
- struct mutex discard_buckets_in_flight_lock;
- DARRAY(struct discard_in_flight) discard_buckets_in_flight;
- struct work_struct discard_fast_work;
-
- atomic64_t rebalance_work;
-
- struct journal_device journal;
- u64 prev_journal_sector;
-
- struct work_struct io_error_work;
-
- /* The rest of this all shows up in sysfs */
- atomic64_t cur_latency[2];
- struct bch2_time_stats_quantiles io_latency[2];
-
-#define CONGESTED_MAX 1024
- atomic_t congested;
- u64 congested_last;
-
- struct io_count __percpu *io_done;
-};
-
-/*
- * initial_gc_unfixed
- * error
- * topology error
- */
-
-#define BCH_FS_FLAGS() \
- x(new_fs) \
- x(started) \
- x(clean_recovery) \
- x(btree_running) \
- x(accounting_replay_done) \
- x(may_go_rw) \
- x(rw) \
- x(was_rw) \
- x(stopping) \
- x(emergency_ro) \
- x(going_ro) \
- x(write_disable_complete) \
- x(clean_shutdown) \
- x(fsck_running) \
- x(initial_gc_unfixed) \
- x(need_delete_dead_snapshots) \
- x(error) \
- x(topology_error) \
- x(errors_fixed) \
- x(errors_not_fixed) \
- x(no_invalid_checks)
-
-enum bch_fs_flags {
-#define x(n) BCH_FS_##n,
- BCH_FS_FLAGS()
-#undef x
-};
-
-struct btree_debug {
- unsigned id;
-};
-
-#define BCH_TRANSACTIONS_NR 128
-
-struct btree_transaction_stats {
- struct bch2_time_stats duration;
- struct bch2_time_stats lock_hold_times;
- struct mutex lock;
- unsigned nr_max_paths;
- unsigned journal_entries_size;
- unsigned max_mem;
- char *max_paths_text;
-};
-
-struct bch_fs_pcpu {
- u64 sectors_available;
-};
-
-struct journal_seq_blacklist_table {
- size_t nr;
- struct journal_seq_blacklist_table_entry {
- u64 start;
- u64 end;
- bool dirty;
- } entries[];
-};
-
-struct journal_keys {
- /* must match layout in darray_types.h */
- size_t nr, size;
- struct journal_key {
- u64 journal_seq;
- u32 journal_offset;
- enum btree_id btree_id:8;
- unsigned level:8;
- bool allocated;
- bool overwritten;
- struct bkey_i *k;
- } *data;
- /*
- * Gap buffer: instead of all the empty space in the array being at the
- * end of the buffer - from @nr to @size - the empty space is at @gap.
- * This means that sequential insertions are O(n) instead of O(n^2).
- */
- size_t gap;
- atomic_t ref;
- bool initial_ref_held;
-};
-
-struct btree_trans_buf {
- struct btree_trans *trans;
-};
-
-#define BCACHEFS_ROOT_SUBVOL_INUM \
- ((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO })
-
-#define BCH_WRITE_REFS() \
- x(trans) \
- x(write) \
- x(promote) \
- x(node_rewrite) \
- x(stripe_create) \
- x(stripe_delete) \
- x(reflink) \
- x(fallocate) \
- x(fsync) \
- x(dio_write) \
- x(discard) \
- x(discard_fast) \
- x(invalidate) \
- x(delete_dead_snapshots) \
- x(gc_gens) \
- x(snapshot_delete_pagecache) \
- x(sysfs) \
- x(btree_write_buffer)
-
-enum bch_write_ref {
-#define x(n) BCH_WRITE_REF_##n,
- BCH_WRITE_REFS()
-#undef x
- BCH_WRITE_REF_NR,
-};
-
-struct bch_fs {
- struct closure cl;
-
- struct list_head list;
- struct kobject kobj;
- struct kobject counters_kobj;
- struct kobject internal;
- struct kobject opts_dir;
- struct kobject time_stats;
- unsigned long flags;
-
- int minor;
- struct device *chardev;
- struct super_block *vfs_sb;
- dev_t dev;
- char name[40];
- struct stdio_redirect *stdio;
- struct task_struct *stdio_filter;
-
- /* ro/rw, add/remove/resize devices: */
- struct rw_semaphore state_lock;
-
- /* Counts outstanding writes, for clean transition to read-only */
-#ifdef BCH_WRITE_REF_DEBUG
- atomic_long_t writes[BCH_WRITE_REF_NR];
-#else
- struct percpu_ref writes;
-#endif
- /*
- * Analagous to c->writes, for asynchronous ops that don't necessarily
- * need fs to be read-write
- */
- refcount_t ro_ref;
- wait_queue_head_t ro_ref_wait;
-
- struct work_struct read_only_work;
-
- struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX];
-
- struct bch_accounting_mem accounting;
-
- struct bch_replicas_cpu replicas;
- struct bch_replicas_cpu replicas_gc;
- struct mutex replicas_gc_lock;
-
- struct journal_entry_res btree_root_journal_res;
- struct journal_entry_res clock_journal_res;
-
- struct bch_disk_groups_cpu __rcu *disk_groups;
-
- struct bch_opts opts;
-
- /* Updated by bch2_sb_update():*/
- struct {
- __uuid_t uuid;
- __uuid_t user_uuid;
-
- u16 version;
- u16 version_min;
- u16 version_upgrade_complete;
-
- u8 nr_devices;
- u8 clean;
-
- u8 encryption_type;
-
- u64 time_base_lo;
- u32 time_base_hi;
- unsigned time_units_per_sec;
- unsigned nsec_per_time_unit;
- u64 features;
- u64 compat;
- unsigned long errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)];
- u64 btrees_lost_data;
- } sb;
-
-
- struct bch_sb_handle disk_sb;
-
- unsigned short block_bits; /* ilog2(block_size) */
-
- u16 btree_foreground_merge_threshold;
-
- struct closure sb_write;
- struct mutex sb_lock;
-
- /* snapshot.c: */
- struct snapshot_table __rcu *snapshots;
- struct mutex snapshot_table_lock;
- struct rw_semaphore snapshot_create_lock;
-
- struct work_struct snapshot_delete_work;
- struct work_struct snapshot_wait_for_pagecache_and_delete_work;
- snapshot_id_list snapshots_unlinked;
- struct mutex snapshots_unlinked_lock;
-
- /* BTREE CACHE */
- struct bio_set btree_bio;
- struct workqueue_struct *btree_read_complete_wq;
- struct workqueue_struct *btree_write_submit_wq;
-
- struct btree_root btree_roots_known[BTREE_ID_NR];
- DARRAY(struct btree_root) btree_roots_extra;
- struct mutex btree_root_lock;
-
- struct btree_cache btree_cache;
-
- /*
- * Cache of allocated btree nodes - if we allocate a btree node and
- * don't use it, if we free it that space can't be reused until going
- * _all_ the way through the allocator (which exposes us to a livelock
- * when allocating btree reserves fail halfway through) - instead, we
- * can stick them here:
- */
- struct btree_alloc btree_reserve_cache[BTREE_NODE_RESERVE * 2];
- unsigned btree_reserve_cache_nr;
- struct mutex btree_reserve_cache_lock;
-
- mempool_t btree_interior_update_pool;
- struct list_head btree_interior_update_list;
- struct list_head btree_interior_updates_unwritten;
- struct mutex btree_interior_update_lock;
- struct closure_waitlist btree_interior_update_wait;
-
- struct workqueue_struct *btree_interior_update_worker;
- struct work_struct btree_interior_update_work;
-
- struct workqueue_struct *btree_node_rewrite_worker;
-
- struct list_head pending_node_rewrites;
- struct mutex pending_node_rewrites_lock;
-
- /* btree_io.c: */
- spinlock_t btree_write_error_lock;
- struct btree_write_stats {
- atomic64_t nr;
- atomic64_t bytes;
- } btree_write_stats[BTREE_WRITE_TYPE_NR];
-
- /* btree_iter.c: */
- struct seqmutex btree_trans_lock;
- struct list_head btree_trans_list;
- mempool_t btree_trans_pool;
- mempool_t btree_trans_mem_pool;
- struct btree_trans_buf __percpu *btree_trans_bufs;
-
- struct srcu_struct btree_trans_barrier;
- bool btree_trans_barrier_initialized;
-
- struct btree_key_cache btree_key_cache;
- unsigned btree_key_cache_btrees;
-
- struct btree_write_buffer btree_write_buffer;
-
- struct workqueue_struct *btree_update_wq;
- struct workqueue_struct *btree_io_complete_wq;
- /* copygc needs its own workqueue for index updates.. */
- struct workqueue_struct *copygc_wq;
- /*
- * Use a dedicated wq for write ref holder tasks. Required to avoid
- * dependency problems with other wq tasks that can block on ref
- * draining, such as read-only transition.
- */
- struct workqueue_struct *write_ref_wq;
-
- /* ALLOCATION */
- struct bch_devs_mask rw_devs[BCH_DATA_NR];
- unsigned long rw_devs_change_count;
-
- u64 capacity; /* sectors */
- u64 reserved; /* sectors */
-
- /*
- * When capacity _decreases_ (due to a disk being removed), we
- * increment capacity_gen - this invalidates outstanding reservations
- * and forces them to be revalidated
- */
- u32 capacity_gen;
- unsigned bucket_size_max;
-
- atomic64_t sectors_available;
- struct mutex sectors_available_lock;
-
- struct bch_fs_pcpu __percpu *pcpu;
-
- struct percpu_rw_semaphore mark_lock;
-
- seqcount_t usage_lock;
- struct bch_fs_usage_base __percpu *usage;
- u64 __percpu *online_reserved;
-
- unsigned long allocator_last_stuck;
-
- struct io_clock io_clock[2];
-
- /* JOURNAL SEQ BLACKLIST */
- struct journal_seq_blacklist_table *
- journal_seq_blacklist_table;
-
- /* ALLOCATOR */
- spinlock_t freelist_lock;
- struct closure_waitlist freelist_wait;
-
- open_bucket_idx_t open_buckets_freelist;
- open_bucket_idx_t open_buckets_nr_free;
- struct closure_waitlist open_buckets_wait;
- struct open_bucket open_buckets[OPEN_BUCKETS_COUNT];
- open_bucket_idx_t open_buckets_hash[OPEN_BUCKETS_COUNT];
-
- open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT];
- open_bucket_idx_t open_buckets_partial_nr;
-
- struct write_point btree_write_point;
- struct write_point rebalance_write_point;
-
- struct write_point write_points[WRITE_POINT_MAX];
- struct hlist_head write_points_hash[WRITE_POINT_HASH_NR];
- struct mutex write_points_hash_lock;
- unsigned write_points_nr;
-
- struct buckets_waiting_for_journal buckets_waiting_for_journal;
-
- /* GARBAGE COLLECTION */
- struct work_struct gc_gens_work;
- unsigned long gc_count;
-
- enum btree_id gc_gens_btree;
- struct bpos gc_gens_pos;
-
- /*
- * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
- * has been marked by GC.
- *
- * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
- *
- * Protected by gc_pos_lock. Only written to by GC thread, so GC thread
- * can read without a lock.
- */
- seqcount_t gc_pos_lock;
- struct gc_pos gc_pos;
-
- /*
- * The allocation code needs gc_mark in struct bucket to be correct, but
- * it's not while a gc is in progress.
- */
- struct rw_semaphore gc_lock;
- struct mutex gc_gens_lock;
-
- /* IO PATH */
- struct semaphore io_in_flight;
- struct bio_set bio_read;
- struct bio_set bio_read_split;
- struct bio_set bio_write;
- struct bio_set replica_set;
- struct mutex bio_bounce_pages_lock;
- mempool_t bio_bounce_pages;
- struct bucket_nocow_lock_table
- nocow_locks;
- struct rhashtable promote_table;
-
- mempool_t compression_bounce[2];
- mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR];
- mempool_t decompress_workspace;
- size_t zstd_workspace_size;
-
- struct crypto_shash *sha256;
- struct crypto_sync_skcipher *chacha20;
- struct crypto_shash *poly1305;
-
- atomic64_t key_version;
-
- mempool_t large_bkey_pool;
-
- /* MOVE.C */
- struct list_head moving_context_list;
- struct mutex moving_context_lock;
-
- /* REBALANCE */
- struct bch_fs_rebalance rebalance;
-
- /* COPYGC */
- struct task_struct *copygc_thread;
- struct write_point copygc_write_point;
- s64 copygc_wait_at;
- s64 copygc_wait;
- bool copygc_running;
- wait_queue_head_t copygc_running_wq;
-
- /* STRIPES: */
- GENRADIX(struct stripe) stripes;
- GENRADIX(struct gc_stripe) gc_stripes;
-
- struct hlist_head ec_stripes_new[32];
- spinlock_t ec_stripes_new_lock;
-
- ec_stripes_heap ec_stripes_heap;
- struct mutex ec_stripes_heap_lock;
-
- /* ERASURE CODING */
- struct list_head ec_stripe_head_list;
- struct mutex ec_stripe_head_lock;
-
- struct list_head ec_stripe_new_list;
- struct mutex ec_stripe_new_lock;
- wait_queue_head_t ec_stripe_new_wait;
-
- struct work_struct ec_stripe_create_work;
- u64 ec_stripe_hint;
-
- struct work_struct ec_stripe_delete_work;
-
- struct bio_set ec_bioset;
-
- /* REFLINK */
- reflink_gc_table reflink_gc_table;
- size_t reflink_gc_nr;
-
- /* fs.c */
- struct list_head vfs_inodes_list;
- struct mutex vfs_inodes_lock;
- struct rhashtable vfs_inodes_table;
-
- /* VFS IO PATH - fs-io.c */
- struct bio_set writepage_bioset;
- struct bio_set dio_write_bioset;
- struct bio_set dio_read_bioset;
- struct bio_set nocow_flush_bioset;
-
- /* QUOTAS */
- struct bch_memquota_type quotas[QTYP_NR];
-
- /* RECOVERY */
- u64 journal_replay_seq_start;
- u64 journal_replay_seq_end;
- /*
- * Two different uses:
- * "Has this fsck pass?" - i.e. should this type of error be an
- * emergency read-only
- * And, in certain situations fsck will rewind to an earlier pass: used
- * for signaling to the toplevel code which pass we want to run now.
- */
- enum bch_recovery_pass curr_recovery_pass;
- /* bitmask of recovery passes that we actually ran */
- u64 recovery_passes_complete;
- /* never rewinds version of curr_recovery_pass */
- enum bch_recovery_pass recovery_pass_done;
- struct semaphore online_fsck_mutex;
-
- /* DEBUG JUNK */
- struct dentry *fs_debug_dir;
- struct dentry *btree_debug_dir;
- struct btree_debug btree_debug[BTREE_ID_NR];
- struct btree *verify_data;
- struct btree_node *verify_ondisk;
- struct mutex verify_lock;
-
- u64 *unused_inode_hints;
- unsigned inode_shard_bits;
-
- /*
- * A btree node on disk could have too many bsets for an iterator to fit
- * on the stack - have to dynamically allocate them
- */
- mempool_t fill_iter;
-
- mempool_t btree_bounce_pool;
-
- struct journal journal;
- GENRADIX(struct journal_replay *) journal_entries;
- u64 journal_entries_base_seq;
- struct journal_keys journal_keys;
- struct list_head journal_iters;
-
- struct find_btree_nodes found_btree_nodes;
-
- u64 last_bucket_seq_cleanup;
-
- u64 counters_on_mount[BCH_COUNTER_NR];
- u64 __percpu *counters;
-
- unsigned copy_gc_enabled:1;
-
- struct bch2_time_stats times[BCH_TIME_STAT_NR];
-
- struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
-
- /* ERRORS */
- struct list_head fsck_error_msgs;
- struct mutex fsck_error_msgs_lock;
- bool fsck_alloc_msgs_err;
-
- bch_sb_errors_cpu fsck_error_counts;
- struct mutex fsck_error_counts_lock;
-};
-
-extern struct wait_queue_head bch2_read_only_wait;
-
-static inline void bch2_write_ref_get(struct bch_fs *c, enum bch_write_ref ref)
-{
-#ifdef BCH_WRITE_REF_DEBUG
- atomic_long_inc(&c->writes[ref]);
-#else
- percpu_ref_get(&c->writes);
-#endif
-}
-
-static inline bool __bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
-{
-#ifdef BCH_WRITE_REF_DEBUG
- return !test_bit(BCH_FS_going_ro, &c->flags) &&
- atomic_long_inc_not_zero(&c->writes[ref]);
-#else
- return percpu_ref_tryget(&c->writes);
-#endif
-}
-
-static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
-{
-#ifdef BCH_WRITE_REF_DEBUG
- return !test_bit(BCH_FS_going_ro, &c->flags) &&
- atomic_long_inc_not_zero(&c->writes[ref]);
-#else
- return percpu_ref_tryget_live(&c->writes);
-#endif
-}
-
-static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref)
-{
-#ifdef BCH_WRITE_REF_DEBUG
- long v = atomic_long_dec_return(&c->writes[ref]);
-
- BUG_ON(v < 0);
- if (v)
- return;
- for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
- if (atomic_long_read(&c->writes[i]))
- return;
-
- set_bit(BCH_FS_write_disable_complete, &c->flags);
- wake_up(&bch2_read_only_wait);
-#else
- percpu_ref_put(&c->writes);
-#endif
-}
-
-static inline bool bch2_ro_ref_tryget(struct bch_fs *c)
-{
- if (test_bit(BCH_FS_stopping, &c->flags))
- return false;
-
- return refcount_inc_not_zero(&c->ro_ref);
-}
-
-static inline void bch2_ro_ref_put(struct bch_fs *c)
-{
- if (refcount_dec_and_test(&c->ro_ref))
- wake_up(&c->ro_ref_wait);
-}
-
-static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages)
-{
-#ifndef NO_BCACHEFS_FS
- if (c->vfs_sb)
- c->vfs_sb->s_bdi->ra_pages = ra_pages;
-#endif
-}
-
-static inline unsigned bucket_bytes(const struct bch_dev *ca)
-{
- return ca->mi.bucket_size << 9;
-}
-
-static inline unsigned block_bytes(const struct bch_fs *c)
-{
- return c->opts.block_size;
-}
-
-static inline unsigned block_sectors(const struct bch_fs *c)
-{
- return c->opts.block_size >> 9;
-}
-
-static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
-{
- return c->btree_key_cache_btrees & (1U << btree);
-}
-
-static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
-{
- struct timespec64 t;
- s64 sec;
- s32 rem;
-
- time += c->sb.time_base_lo;
-
- sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
-
- set_normalized_timespec64(&t, sec, rem * (s64)c->sb.nsec_per_time_unit);
-
- return t;
-}
-
-static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
-{
- return (ts.tv_sec * c->sb.time_units_per_sec +
- (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
-}
-
-static inline s64 bch2_current_time(const struct bch_fs *c)
-{
- struct timespec64 now;
-
- ktime_get_coarse_real_ts64(&now);
- return timespec_to_bch2_time(c, now);
-}
-
-static inline u64 bch2_current_io_time(const struct bch_fs *c, int rw)
-{
- return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX);
-}
-
-static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
-{
- struct stdio_redirect *stdio = c->stdio;
-
- if (c->stdio_filter && c->stdio_filter != current)
- stdio = NULL;
- return stdio;
-}
-
-static inline unsigned metadata_replicas_required(struct bch_fs *c)
-{
- return min(c->opts.metadata_replicas,
- c->opts.metadata_replicas_required);
-}
-
-static inline unsigned data_replicas_required(struct bch_fs *c)
-{
- return min(c->opts.data_replicas,
- c->opts.data_replicas_required);
-}
-
-#define BKEY_PADDED_ONSTACK(key, pad) \
- struct { struct bkey_i key; __u64 key ## _pad[pad]; }
-
-#endif /* _BCACHEFS_H */
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
deleted file mode 100644
index 5004f6ba997c..000000000000
--- a/fs/bcachefs/bcachefs_format.h
+++ /dev/null
@@ -1,1465 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FORMAT_H
-#define _BCACHEFS_FORMAT_H
-
-/*
- * bcachefs on disk data structures
- *
- * OVERVIEW:
- *
- * There are three main types of on disk data structures in bcachefs (this is
- * reduced from 5 in bcache)
- *
- * - superblock
- * - journal
- * - btree
- *
- * The btree is the primary structure; most metadata exists as keys in the
- * various btrees. There are only a small number of btrees, they're not
- * sharded - we have one btree for extents, another for inodes, et cetera.
- *
- * SUPERBLOCK:
- *
- * The superblock contains the location of the journal, the list of devices in
- * the filesystem, and in general any metadata we need in order to decide
- * whether we can start a filesystem or prior to reading the journal/btree
- * roots.
- *
- * The superblock is extensible, and most of the contents of the superblock are
- * in variable length, type tagged fields; see struct bch_sb_field.
- *
- * Backup superblocks do not reside in a fixed location; also, superblocks do
- * not have a fixed size. To locate backup superblocks we have struct
- * bch_sb_layout; we store a copy of this inside every superblock, and also
- * before the first superblock.
- *
- * JOURNAL:
- *
- * The journal primarily records btree updates in the order they occurred;
- * journal replay consists of just iterating over all the keys in the open
- * journal entries and re-inserting them into the btrees.
- *
- * The journal also contains entry types for the btree roots, and blacklisted
- * journal sequence numbers (see journal_seq_blacklist.c).
- *
- * BTREE:
- *
- * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
- * 128k-256k) and log structured. We use struct btree_node for writing the first
- * entry in a given node (offset 0), and struct btree_node_entry for all
- * subsequent writes.
- *
- * After the header, btree node entries contain a list of keys in sorted order.
- * Values are stored inline with the keys; since values are variable length (and
- * keys effectively are variable length too, due to packing) we can't do random
- * access without building up additional in memory tables in the btree node read
- * path.
- *
- * BTREE KEYS (struct bkey):
- *
- * The various btrees share a common format for the key - so as to avoid
- * switching in fastpath lookup/comparison code - but define their own
- * structures for the key values.
- *
- * The size of a key/value pair is stored as a u8 in units of u64s, so the max
- * size is just under 2k. The common part also contains a type tag for the
- * value, and a format field indicating whether the key is packed or not (and
- * also meant to allow adding new key fields in the future, if desired).
- *
- * bkeys, when stored within a btree node, may also be packed. In that case, the
- * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
- * be generous with field sizes in the common part of the key format (64 bit
- * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
- */
-
-#include <asm/types.h>
-#include <asm/byteorder.h>
-#include <linux/kernel.h>
-#include <linux/uuid.h>
-#include <uapi/linux/magic.h>
-#include "vstructs.h"
-
-#ifdef __KERNEL__
-typedef uuid_t __uuid_t;
-#endif
-
-#define BITMASK(name, type, field, offset, end) \
-static const __maybe_unused unsigned name##_OFFSET = offset; \
-static const __maybe_unused unsigned name##_BITS = (end - offset); \
- \
-static inline __u64 name(const type *k) \
-{ \
- return (k->field >> offset) & ~(~0ULL << (end - offset)); \
-} \
- \
-static inline void SET_##name(type *k, __u64 v) \
-{ \
- k->field &= ~(~(~0ULL << (end - offset)) << offset); \
- k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
-}
-
-#define LE_BITMASK(_bits, name, type, field, offset, end) \
-static const __maybe_unused unsigned name##_OFFSET = offset; \
-static const __maybe_unused unsigned name##_BITS = (end - offset); \
-static const __maybe_unused __u##_bits name##_MAX = (1ULL << (end - offset)) - 1;\
- \
-static inline __u64 name(const type *k) \
-{ \
- return (__le##_bits##_to_cpu(k->field) >> offset) & \
- ~(~0ULL << (end - offset)); \
-} \
- \
-static inline void SET_##name(type *k, __u64 v) \
-{ \
- __u##_bits new = __le##_bits##_to_cpu(k->field); \
- \
- new &= ~(~(~0ULL << (end - offset)) << offset); \
- new |= (v & ~(~0ULL << (end - offset))) << offset; \
- k->field = __cpu_to_le##_bits(new); \
-}
-
-#define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
-#define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
-#define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
-
-struct bkey_format {
- __u8 key_u64s;
- __u8 nr_fields;
- /* One unused slot for now: */
- __u8 bits_per_field[6];
- __le64 field_offset[6];
-};
-
-/* Btree keys - all units are in sectors */
-
-struct bpos {
- /*
- * Word order matches machine byte order - btree code treats a bpos as a
- * single large integer, for search/comparison purposes
- *
- * Note that wherever a bpos is embedded in another on disk data
- * structure, it has to be byte swabbed when reading in metadata that
- * wasn't written in native endian order:
- */
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- __u32 snapshot;
- __u64 offset;
- __u64 inode;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- __u64 inode;
- __u64 offset; /* Points to end of extent - sectors */
- __u32 snapshot;
-#else
-#error edit for your odd byteorder.
-#endif
-} __packed
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-__aligned(4)
-#endif
-;
-
-#define KEY_INODE_MAX ((__u64)~0ULL)
-#define KEY_OFFSET_MAX ((__u64)~0ULL)
-#define KEY_SNAPSHOT_MAX ((__u32)~0U)
-#define KEY_SIZE_MAX ((__u32)~0U)
-
-static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
-{
- return (struct bpos) {
- .inode = inode,
- .offset = offset,
- .snapshot = snapshot,
- };
-}
-
-#define POS_MIN SPOS(0, 0, 0)
-#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
-#define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
-#define POS(_inode, _offset) SPOS(_inode, _offset, 0)
-
-/* Empty placeholder struct, for container_of() */
-struct bch_val {
- __u64 __nothing[0];
-};
-
-struct bversion {
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- __u64 lo;
- __u32 hi;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- __u32 hi;
- __u64 lo;
-#endif
-} __packed
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-__aligned(4)
-#endif
-;
-
-struct bkey {
- /* Size of combined key and value, in u64s */
- __u8 u64s;
-
- /* Format of key (0 for format local to btree node) */
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 format:7,
- needs_whiteout:1;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u8 needs_whiteout:1,
- format:7;
-#else
-#error edit for your odd byteorder.
-#endif
-
- /* Type of the value */
- __u8 type;
-
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- __u8 pad[1];
-
- struct bversion bversion;
- __u32 size; /* extent size, in sectors */
- struct bpos p;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- struct bpos p;
- __u32 size; /* extent size, in sectors */
- struct bversion bversion;
-
- __u8 pad[1];
-#endif
-} __packed
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-/*
- * The big-endian version of bkey can't be compiled by rustc with the "aligned"
- * attr since it doesn't allow types to have both "packed" and "aligned" attrs.
- * So for Rust compatibility, don't include this. It can be included in the LE
- * version because the "packed" attr is redundant in that case.
- *
- * History: (quoting Kent)
- *
- * Specifically, when i was designing bkey, I wanted the header to be no
- * bigger than necessary so that bkey_packed could use the rest. That means that
- * decently offten extent keys will fit into only 8 bytes, instead of spilling over
- * to 16.
- *
- * But packed_bkey treats the part after the header - the packed section -
- * as a single multi word, variable length integer. And bkey, the unpacked
- * version, is just a special case version of a bkey_packed; all the packed
- * bkey code will work on keys in any packed format, the in-memory
- * representation of an unpacked key also is just one type of packed key...
- *
- * So that constrains the key part of a bkig endian bkey to start right
- * after the header.
- *
- * If we ever do a bkey_v2 and need to expand the hedaer by another byte for
- * some reason - that will clean up this wart.
- */
-__aligned(8)
-#endif
-;
-
-struct bkey_packed {
- __u64 _data[0];
-
- /* Size of combined key and value, in u64s */
- __u8 u64s;
-
- /* Format of key (0 for format local to btree node) */
-
- /*
- * XXX: next incompat on disk format change, switch format and
- * needs_whiteout - bkey_packed() will be cheaper if format is the high
- * bits of the bitfield
- */
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 format:7,
- needs_whiteout:1;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u8 needs_whiteout:1,
- format:7;
-#endif
-
- /* Type of the value */
- __u8 type;
- __u8 key_start[0];
-
- /*
- * We copy bkeys with struct assignment in various places, and while
- * that shouldn't be done with packed bkeys we can't disallow it in C,
- * and it's legal to cast a bkey to a bkey_packed - so padding it out
- * to the same size as struct bkey should hopefully be safest.
- */
- __u8 pad[sizeof(struct bkey) - 3];
-} __packed __aligned(8);
-
-typedef struct {
- __le64 lo;
- __le64 hi;
-} bch_le128;
-
-#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
-#define BKEY_U64s_MAX U8_MAX
-#define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
-
-#define KEY_PACKED_BITS_START 24
-
-#define KEY_FORMAT_LOCAL_BTREE 0
-#define KEY_FORMAT_CURRENT 1
-
-enum bch_bkey_fields {
- BKEY_FIELD_INODE,
- BKEY_FIELD_OFFSET,
- BKEY_FIELD_SNAPSHOT,
- BKEY_FIELD_SIZE,
- BKEY_FIELD_VERSION_HI,
- BKEY_FIELD_VERSION_LO,
- BKEY_NR_FIELDS,
-};
-
-#define bkey_format_field(name, field) \
- [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
-
-#define BKEY_FORMAT_CURRENT \
-((struct bkey_format) { \
- .key_u64s = BKEY_U64s, \
- .nr_fields = BKEY_NR_FIELDS, \
- .bits_per_field = { \
- bkey_format_field(INODE, p.inode), \
- bkey_format_field(OFFSET, p.offset), \
- bkey_format_field(SNAPSHOT, p.snapshot), \
- bkey_format_field(SIZE, size), \
- bkey_format_field(VERSION_HI, bversion.hi), \
- bkey_format_field(VERSION_LO, bversion.lo), \
- }, \
-})
-
-/* bkey with inline value */
-struct bkey_i {
- __u64 _data[0];
-
- struct bkey k;
- struct bch_val v;
-};
-
-#define POS_KEY(_pos) \
-((struct bkey) { \
- .u64s = BKEY_U64s, \
- .format = KEY_FORMAT_CURRENT, \
- .p = _pos, \
-})
-
-#define KEY(_inode, _offset, _size) \
-((struct bkey) { \
- .u64s = BKEY_U64s, \
- .format = KEY_FORMAT_CURRENT, \
- .p = POS(_inode, _offset), \
- .size = _size, \
-})
-
-static inline void bkey_init(struct bkey *k)
-{
- *k = KEY(0, 0, 0);
-}
-
-#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
-
-#define __BKEY_PADDED(key, pad) \
- struct bkey_i key; __u64 key ## _pad[pad]
-
-/*
- * - DELETED keys are used internally to mark keys that should be ignored but
- * override keys in composition order. Their version number is ignored.
- *
- * - DISCARDED keys indicate that the data is all 0s because it has been
- * discarded. DISCARDs may have a version; if the version is nonzero the key
- * will be persistent, otherwise the key will be dropped whenever the btree
- * node is rewritten (like DELETED keys).
- *
- * - ERROR: any read of the data returns a read error, as the data was lost due
- * to a failing device. Like DISCARDED keys, they can be removed (overridden)
- * by new writes or cluster-wide GC. Node repair can also overwrite them with
- * the same or a more recent version number, but not with an older version
- * number.
- *
- * - WHITEOUT: for hash table btrees
- */
-#define BCH_BKEY_TYPES() \
- x(deleted, 0) \
- x(whiteout, 1) \
- x(error, 2) \
- x(cookie, 3) \
- x(hash_whiteout, 4) \
- x(btree_ptr, 5) \
- x(extent, 6) \
- x(reservation, 7) \
- x(inode, 8) \
- x(inode_generation, 9) \
- x(dirent, 10) \
- x(xattr, 11) \
- x(alloc, 12) \
- x(quota, 13) \
- x(stripe, 14) \
- x(reflink_p, 15) \
- x(reflink_v, 16) \
- x(inline_data, 17) \
- x(btree_ptr_v2, 18) \
- x(indirect_inline_data, 19) \
- x(alloc_v2, 20) \
- x(subvolume, 21) \
- x(snapshot, 22) \
- x(inode_v2, 23) \
- x(alloc_v3, 24) \
- x(set, 25) \
- x(lru, 26) \
- x(alloc_v4, 27) \
- x(backpointer, 28) \
- x(inode_v3, 29) \
- x(bucket_gens, 30) \
- x(snapshot_tree, 31) \
- x(logged_op_truncate, 32) \
- x(logged_op_finsert, 33) \
- x(accounting, 34)
-
-enum bch_bkey_type {
-#define x(name, nr) KEY_TYPE_##name = nr,
- BCH_BKEY_TYPES()
-#undef x
- KEY_TYPE_MAX,
-};
-
-struct bch_deleted {
- struct bch_val v;
-};
-
-struct bch_whiteout {
- struct bch_val v;
-};
-
-struct bch_error {
- struct bch_val v;
-};
-
-struct bch_cookie {
- struct bch_val v;
- __le64 cookie;
-};
-
-struct bch_hash_whiteout {
- struct bch_val v;
-};
-
-struct bch_set {
- struct bch_val v;
-};
-
-/* 128 bits, sufficient for cryptographic MACs: */
-struct bch_csum {
- __le64 lo;
- __le64 hi;
-} __packed __aligned(8);
-
-struct bch_backpointer {
- struct bch_val v;
- __u8 btree_id;
- __u8 level;
- __u8 data_type;
- __u64 bucket_offset:40;
- __u32 bucket_len;
- struct bpos pos;
-} __packed __aligned(8);
-
-/* Optional/variable size superblock sections: */
-
-struct bch_sb_field {
- __u64 _data[0];
- __le32 u64s;
- __le32 type;
-};
-
-#define BCH_SB_FIELDS() \
- x(journal, 0) \
- x(members_v1, 1) \
- x(crypt, 2) \
- x(replicas_v0, 3) \
- x(quota, 4) \
- x(disk_groups, 5) \
- x(clean, 6) \
- x(replicas, 7) \
- x(journal_seq_blacklist, 8) \
- x(journal_v2, 9) \
- x(counters, 10) \
- x(members_v2, 11) \
- x(errors, 12) \
- x(ext, 13) \
- x(downgrade, 14)
-
-#include "alloc_background_format.h"
-#include "dirent_format.h"
-#include "disk_accounting_format.h"
-#include "disk_groups_format.h"
-#include "extents_format.h"
-#include "ec_format.h"
-#include "dirent_format.h"
-#include "disk_groups_format.h"
-#include "inode_format.h"
-#include "journal_seq_blacklist_format.h"
-#include "logged_ops_format.h"
-#include "lru_format.h"
-#include "quota_format.h"
-#include "reflink_format.h"
-#include "replicas_format.h"
-#include "snapshot_format.h"
-#include "subvolume_format.h"
-#include "sb-counters_format.h"
-#include "sb-downgrade_format.h"
-#include "sb-errors_format.h"
-#include "sb-members_format.h"
-#include "xattr_format.h"
-
-enum bch_sb_field_type {
-#define x(f, nr) BCH_SB_FIELD_##f = nr,
- BCH_SB_FIELDS()
-#undef x
- BCH_SB_FIELD_NR
-};
-
-/*
- * Most superblock fields are replicated in all device's superblocks - a few are
- * not:
- */
-#define BCH_SINGLE_DEVICE_SB_FIELDS \
- ((1U << BCH_SB_FIELD_journal)| \
- (1U << BCH_SB_FIELD_journal_v2))
-
-/* BCH_SB_FIELD_journal: */
-
-struct bch_sb_field_journal {
- struct bch_sb_field field;
- __le64 buckets[];
-};
-
-struct bch_sb_field_journal_v2 {
- struct bch_sb_field field;
-
- struct bch_sb_field_journal_v2_entry {
- __le64 start;
- __le64 nr;
- } d[];
-};
-
-/* BCH_SB_FIELD_crypt: */
-
-struct nonce {
- __le32 d[4];
-};
-
-struct bch_key {
- __le64 key[4];
-};
-
-#define BCH_KEY_MAGIC \
- (((__u64) 'b' << 0)|((__u64) 'c' << 8)| \
- ((__u64) 'h' << 16)|((__u64) '*' << 24)| \
- ((__u64) '*' << 32)|((__u64) 'k' << 40)| \
- ((__u64) 'e' << 48)|((__u64) 'y' << 56))
-
-struct bch_encrypted_key {
- __le64 magic;
- struct bch_key key;
-};
-
-/*
- * If this field is present in the superblock, it stores an encryption key which
- * is used encrypt all other data/metadata. The key will normally be encrypted
- * with the key userspace provides, but if encryption has been turned off we'll
- * just store the master key unencrypted in the superblock so we can access the
- * previously encrypted data.
- */
-struct bch_sb_field_crypt {
- struct bch_sb_field field;
-
- __le64 flags;
- __le64 kdf_flags;
- struct bch_encrypted_key key;
-};
-
-LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
-
-enum bch_kdf_types {
- BCH_KDF_SCRYPT = 0,
- BCH_KDF_NR = 1,
-};
-
-/* stored as base 2 log of scrypt params: */
-LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
-LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
-LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
-
-/*
- * On clean shutdown, store btree roots and current journal sequence number in
- * the superblock:
- */
-struct jset_entry {
- __le16 u64s;
- __u8 btree_id;
- __u8 level;
- __u8 type; /* designates what this jset holds */
- __u8 pad[3];
-
- struct bkey_i start[0];
- __u64 _data[];
-};
-
-struct bch_sb_field_clean {
- struct bch_sb_field field;
-
- __le32 flags;
- __le16 _read_clock; /* no longer used */
- __le16 _write_clock;
- __le64 journal_seq;
-
- struct jset_entry start[0];
- __u64 _data[];
-};
-
-struct bch_sb_field_ext {
- struct bch_sb_field field;
- __le64 recovery_passes_required[2];
- __le64 errors_silent[8];
- __le64 btrees_lost_data;
-};
-
-/* Superblock: */
-
-/*
- * New versioning scheme:
- * One common version number for all on disk data structures - superblock, btree
- * nodes, journal entries
- */
-#define BCH_VERSION_MAJOR(_v) ((__u16) ((_v) >> 10))
-#define BCH_VERSION_MINOR(_v) ((__u16) ((_v) & ~(~0U << 10)))
-#define BCH_VERSION(_major, _minor) (((_major) << 10)|(_minor) << 0)
-
-/*
- * field 1: version name
- * field 2: BCH_VERSION(major, minor)
- * field 3: recovery passess required on upgrade
- */
-#define BCH_METADATA_VERSIONS() \
- x(bkey_renumber, BCH_VERSION(0, 10)) \
- x(inode_btree_change, BCH_VERSION(0, 11)) \
- x(snapshot, BCH_VERSION(0, 12)) \
- x(inode_backpointers, BCH_VERSION(0, 13)) \
- x(btree_ptr_sectors_written, BCH_VERSION(0, 14)) \
- x(snapshot_2, BCH_VERSION(0, 15)) \
- x(reflink_p_fix, BCH_VERSION(0, 16)) \
- x(subvol_dirent, BCH_VERSION(0, 17)) \
- x(inode_v2, BCH_VERSION(0, 18)) \
- x(freespace, BCH_VERSION(0, 19)) \
- x(alloc_v4, BCH_VERSION(0, 20)) \
- x(new_data_types, BCH_VERSION(0, 21)) \
- x(backpointers, BCH_VERSION(0, 22)) \
- x(inode_v3, BCH_VERSION(0, 23)) \
- x(unwritten_extents, BCH_VERSION(0, 24)) \
- x(bucket_gens, BCH_VERSION(0, 25)) \
- x(lru_v2, BCH_VERSION(0, 26)) \
- x(fragmentation_lru, BCH_VERSION(0, 27)) \
- x(no_bps_in_alloc_keys, BCH_VERSION(0, 28)) \
- x(snapshot_trees, BCH_VERSION(0, 29)) \
- x(major_minor, BCH_VERSION(1, 0)) \
- x(snapshot_skiplists, BCH_VERSION(1, 1)) \
- x(deleted_inodes, BCH_VERSION(1, 2)) \
- x(rebalance_work, BCH_VERSION(1, 3)) \
- x(member_seq, BCH_VERSION(1, 4)) \
- x(subvolume_fs_parent, BCH_VERSION(1, 5)) \
- x(btree_subvolume_children, BCH_VERSION(1, 6)) \
- x(mi_btree_bitmap, BCH_VERSION(1, 7)) \
- x(bucket_stripe_sectors, BCH_VERSION(1, 8)) \
- x(disk_accounting_v2, BCH_VERSION(1, 9)) \
- x(disk_accounting_v3, BCH_VERSION(1, 10)) \
- x(disk_accounting_inum, BCH_VERSION(1, 11)) \
- x(rebalance_work_acct_fix, BCH_VERSION(1, 12)) \
- x(inode_has_child_snapshots, BCH_VERSION(1, 13))
-
-enum bcachefs_metadata_version {
- bcachefs_metadata_version_min = 9,
-#define x(t, n) bcachefs_metadata_version_##t = n,
- BCH_METADATA_VERSIONS()
-#undef x
- bcachefs_metadata_version_max
-};
-
-static const __maybe_unused
-unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
-
-#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
-
-#define BCH_SB_SECTOR 8
-
-#define BCH_SB_LAYOUT_SIZE_BITS_MAX 16 /* 32 MB */
-
-struct bch_sb_layout {
- __uuid_t magic; /* bcachefs superblock UUID */
- __u8 layout_type;
- __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
- __u8 nr_superblocks;
- __u8 pad[5];
- __le64 sb_offset[61];
-} __packed __aligned(8);
-
-#define BCH_SB_LAYOUT_SECTOR 7
-
-/*
- * @offset - sector where this sb was written
- * @version - on disk format version
- * @version_min - Oldest metadata version this filesystem contains; so we can
- * safely drop compatibility code and refuse to mount filesystems
- * we'd need it for
- * @magic - identifies as a bcachefs superblock (BCHFS_MAGIC)
- * @seq - incremented each time superblock is written
- * @uuid - used for generating various magic numbers and identifying
- * member devices, never changes
- * @user_uuid - user visible UUID, may be changed
- * @label - filesystem label
- * @seq - identifies most recent superblock, incremented each time
- * superblock is written
- * @features - enabled incompatible features
- */
-struct bch_sb {
- struct bch_csum csum;
- __le16 version;
- __le16 version_min;
- __le16 pad[2];
- __uuid_t magic;
- __uuid_t uuid;
- __uuid_t user_uuid;
- __u8 label[BCH_SB_LABEL_SIZE];
- __le64 offset;
- __le64 seq;
-
- __le16 block_size;
- __u8 dev_idx;
- __u8 nr_devices;
- __le32 u64s;
-
- __le64 time_base_lo;
- __le32 time_base_hi;
- __le32 time_precision;
-
- __le64 flags[7];
- __le64 write_time;
- __le64 features[2];
- __le64 compat[2];
-
- struct bch_sb_layout layout;
-
- struct bch_sb_field start[0];
- __le64 _data[];
-} __packed __aligned(8);
-
-/*
- * Flags:
- * BCH_SB_INITALIZED - set on first mount
- * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
- * behaviour of mount/recovery path:
- * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
- * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
- * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
- * DATA/META_CSUM_TYPE. Also indicates encryption
- * algorithm in use, if/when we get more than one
- */
-
-LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
-
-LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
-LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
-LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
-LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
-
-LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
-
-LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
-LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
-
-LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
-LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
-
-LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
-LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
-
-LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
-LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
-LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
-LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
-
-LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
-LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
-
-LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
-LE64_BITMASK(BCH_SB_PROMOTE_WHOLE_EXTENTS,
- struct bch_sb, flags[0], 63, 64);
-
-LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
-LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8);
-LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
-
-LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
-LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
-
-/*
- * Max size of an extent that may require bouncing to read or write
- * (checksummed, compressed): 64k
- */
-LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
- struct bch_sb, flags[1], 14, 20);
-
-LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
-LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
-
-LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
-LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
-LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
-
-LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
- struct bch_sb, flags[2], 0, 4);
-LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
-
-LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
-LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
-LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
-LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
-LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
-LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
-LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
-LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
-LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
-LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54);
-LE64_BITMASK(BCH_SB_VERSION_UPGRADE, struct bch_sb, flags[4], 54, 56);
-
-LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
-LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
- struct bch_sb, flags[4], 60, 64);
-
-LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
- struct bch_sb, flags[5], 0, 16);
-LE64_BITMASK(BCH_SB_ALLOCATOR_STUCK_TIMEOUT,
- struct bch_sb, flags[5], 16, 32);
-
-static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
-{
- return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
-}
-
-static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
-{
- SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
- SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
-}
-
-static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
-{
- return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
- (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
-}
-
-static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
-{
- SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
- SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
-}
-
-/*
- * Features:
- *
- * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
- * reflink: gates KEY_TYPE_reflink
- * inline_data: gates KEY_TYPE_inline_data
- * new_siphash: gates BCH_STR_HASH_siphash
- * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
- */
-#define BCH_SB_FEATURES() \
- x(lz4, 0) \
- x(gzip, 1) \
- x(zstd, 2) \
- x(atomic_nlink, 3) \
- x(ec, 4) \
- x(journal_seq_blacklist_v3, 5) \
- x(reflink, 6) \
- x(new_siphash, 7) \
- x(inline_data, 8) \
- x(new_extent_overwrite, 9) \
- x(incompressible, 10) \
- x(btree_ptr_v2, 11) \
- x(extents_above_btree_updates, 12) \
- x(btree_updates_journalled, 13) \
- x(reflink_inline_data, 14) \
- x(new_varint, 15) \
- x(journal_no_flush, 16) \
- x(alloc_v2, 17) \
- x(extents_across_btree_nodes, 18)
-
-#define BCH_SB_FEATURES_ALWAYS \
- ((1ULL << BCH_FEATURE_new_extent_overwrite)| \
- (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
- (1ULL << BCH_FEATURE_btree_updates_journalled)|\
- (1ULL << BCH_FEATURE_alloc_v2)|\
- (1ULL << BCH_FEATURE_extents_across_btree_nodes))
-
-#define BCH_SB_FEATURES_ALL \
- (BCH_SB_FEATURES_ALWAYS| \
- (1ULL << BCH_FEATURE_new_siphash)| \
- (1ULL << BCH_FEATURE_btree_ptr_v2)| \
- (1ULL << BCH_FEATURE_new_varint)| \
- (1ULL << BCH_FEATURE_journal_no_flush))
-
-enum bch_sb_feature {
-#define x(f, n) BCH_FEATURE_##f,
- BCH_SB_FEATURES()
-#undef x
- BCH_FEATURE_NR,
-};
-
-#define BCH_SB_COMPAT() \
- x(alloc_info, 0) \
- x(alloc_metadata, 1) \
- x(extents_above_btree_updates_done, 2) \
- x(bformat_overflow_done, 3)
-
-enum bch_sb_compat {
-#define x(f, n) BCH_COMPAT_##f,
- BCH_SB_COMPAT()
-#undef x
- BCH_COMPAT_NR,
-};
-
-/* options: */
-
-#define BCH_VERSION_UPGRADE_OPTS() \
- x(compatible, 0) \
- x(incompatible, 1) \
- x(none, 2)
-
-enum bch_version_upgrade_opts {
-#define x(t, n) BCH_VERSION_UPGRADE_##t = n,
- BCH_VERSION_UPGRADE_OPTS()
-#undef x
-};
-
-#define BCH_REPLICAS_MAX 4U
-
-#define BCH_BKEY_PTRS_MAX 16U
-
-#define BCH_ERROR_ACTIONS() \
- x(continue, 0) \
- x(fix_safe, 1) \
- x(panic, 2) \
- x(ro, 3)
-
-enum bch_error_actions {
-#define x(t, n) BCH_ON_ERROR_##t = n,
- BCH_ERROR_ACTIONS()
-#undef x
- BCH_ON_ERROR_NR
-};
-
-#define BCH_STR_HASH_TYPES() \
- x(crc32c, 0) \
- x(crc64, 1) \
- x(siphash_old, 2) \
- x(siphash, 3)
-
-enum bch_str_hash_type {
-#define x(t, n) BCH_STR_HASH_##t = n,
- BCH_STR_HASH_TYPES()
-#undef x
- BCH_STR_HASH_NR
-};
-
-#define BCH_STR_HASH_OPTS() \
- x(crc32c, 0) \
- x(crc64, 1) \
- x(siphash, 2)
-
-enum bch_str_hash_opts {
-#define x(t, n) BCH_STR_HASH_OPT_##t = n,
- BCH_STR_HASH_OPTS()
-#undef x
- BCH_STR_HASH_OPT_NR
-};
-
-#define BCH_CSUM_TYPES() \
- x(none, 0) \
- x(crc32c_nonzero, 1) \
- x(crc64_nonzero, 2) \
- x(chacha20_poly1305_80, 3) \
- x(chacha20_poly1305_128, 4) \
- x(crc32c, 5) \
- x(crc64, 6) \
- x(xxhash, 7)
-
-enum bch_csum_type {
-#define x(t, n) BCH_CSUM_##t = n,
- BCH_CSUM_TYPES()
-#undef x
- BCH_CSUM_NR
-};
-
-static const __maybe_unused unsigned bch_crc_bytes[] = {
- [BCH_CSUM_none] = 0,
- [BCH_CSUM_crc32c_nonzero] = 4,
- [BCH_CSUM_crc32c] = 4,
- [BCH_CSUM_crc64_nonzero] = 8,
- [BCH_CSUM_crc64] = 8,
- [BCH_CSUM_xxhash] = 8,
- [BCH_CSUM_chacha20_poly1305_80] = 10,
- [BCH_CSUM_chacha20_poly1305_128] = 16,
-};
-
-static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
-{
- switch (type) {
- case BCH_CSUM_chacha20_poly1305_80:
- case BCH_CSUM_chacha20_poly1305_128:
- return true;
- default:
- return false;
- }
-}
-
-#define BCH_CSUM_OPTS() \
- x(none, 0) \
- x(crc32c, 1) \
- x(crc64, 2) \
- x(xxhash, 3)
-
-enum bch_csum_opts {
-#define x(t, n) BCH_CSUM_OPT_##t = n,
- BCH_CSUM_OPTS()
-#undef x
- BCH_CSUM_OPT_NR
-};
-
-#define BCH_COMPRESSION_TYPES() \
- x(none, 0) \
- x(lz4_old, 1) \
- x(gzip, 2) \
- x(lz4, 3) \
- x(zstd, 4) \
- x(incompressible, 5)
-
-enum bch_compression_type {
-#define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
- BCH_COMPRESSION_TYPES()
-#undef x
- BCH_COMPRESSION_TYPE_NR
-};
-
-#define BCH_COMPRESSION_OPTS() \
- x(none, 0) \
- x(lz4, 1) \
- x(gzip, 2) \
- x(zstd, 3)
-
-enum bch_compression_opts {
-#define x(t, n) BCH_COMPRESSION_OPT_##t = n,
- BCH_COMPRESSION_OPTS()
-#undef x
- BCH_COMPRESSION_OPT_NR
-};
-
-/*
- * Magic numbers
- *
- * The various other data structures have their own magic numbers, which are
- * xored with the first part of the cache set's UUID
- */
-
-#define BCACHE_MAGIC \
- UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca, \
- 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
-#define BCHFS_MAGIC \
- UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \
- 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
-
-#define BCACHEFS_STATFS_MAGIC BCACHEFS_SUPER_MAGIC
-
-#define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
-#define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
-
-static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
-{
- __le64 ret;
-
- memcpy(&ret, &sb->uuid, sizeof(ret));
- return ret;
-}
-
-static inline __u64 __jset_magic(struct bch_sb *sb)
-{
- return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
-}
-
-static inline __u64 __bset_magic(struct bch_sb *sb)
-{
- return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
-}
-
-/* Journal */
-
-#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
-
-#define BCH_JSET_ENTRY_TYPES() \
- x(btree_keys, 0) \
- x(btree_root, 1) \
- x(prio_ptrs, 2) \
- x(blacklist, 3) \
- x(blacklist_v2, 4) \
- x(usage, 5) \
- x(data_usage, 6) \
- x(clock, 7) \
- x(dev_usage, 8) \
- x(log, 9) \
- x(overwrite, 10) \
- x(write_buffer_keys, 11) \
- x(datetime, 12)
-
-enum bch_jset_entry_type {
-#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
- BCH_JSET_ENTRY_TYPES()
-#undef x
- BCH_JSET_ENTRY_NR
-};
-
-static inline bool jset_entry_is_key(struct jset_entry *e)
-{
- switch (e->type) {
- case BCH_JSET_ENTRY_btree_keys:
- case BCH_JSET_ENTRY_btree_root:
- case BCH_JSET_ENTRY_write_buffer_keys:
- return true;
- }
-
- return false;
-}
-
-/*
- * Journal sequence numbers can be blacklisted: bsets record the max sequence
- * number of all the journal entries they contain updates for, so that on
- * recovery we can ignore those bsets that contain index updates newer that what
- * made it into the journal.
- *
- * This means that we can't reuse that journal_seq - we have to skip it, and
- * then record that we skipped it so that the next time we crash and recover we
- * don't think there was a missing journal entry.
- */
-struct jset_entry_blacklist {
- struct jset_entry entry;
- __le64 seq;
-};
-
-struct jset_entry_blacklist_v2 {
- struct jset_entry entry;
- __le64 start;
- __le64 end;
-};
-
-#define BCH_FS_USAGE_TYPES() \
- x(reserved, 0) \
- x(inodes, 1) \
- x(key_version, 2)
-
-enum bch_fs_usage_type {
-#define x(f, nr) BCH_FS_USAGE_##f = nr,
- BCH_FS_USAGE_TYPES()
-#undef x
- BCH_FS_USAGE_NR
-};
-
-struct jset_entry_usage {
- struct jset_entry entry;
- __le64 v;
-} __packed;
-
-struct jset_entry_data_usage {
- struct jset_entry entry;
- __le64 v;
- struct bch_replicas_entry_v1 r;
-} __packed;
-
-struct jset_entry_clock {
- struct jset_entry entry;
- __u8 rw;
- __u8 pad[7];
- __le64 time;
-} __packed;
-
-struct jset_entry_dev_usage_type {
- __le64 buckets;
- __le64 sectors;
- __le64 fragmented;
-} __packed;
-
-struct jset_entry_dev_usage {
- struct jset_entry entry;
- __le32 dev;
- __u32 pad;
-
- __le64 _buckets_ec; /* No longer used */
- __le64 _buckets_unavailable; /* No longer used */
-
- struct jset_entry_dev_usage_type d[];
-};
-
-static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
-{
- return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
- sizeof(struct jset_entry_dev_usage_type);
-}
-
-struct jset_entry_log {
- struct jset_entry entry;
- u8 d[];
-} __packed __aligned(8);
-
-struct jset_entry_datetime {
- struct jset_entry entry;
- __le64 seconds;
-} __packed __aligned(8);
-
-/*
- * On disk format for a journal entry:
- * seq is monotonically increasing; every journal entry has its own unique
- * sequence number.
- *
- * last_seq is the oldest journal entry that still has keys the btree hasn't
- * flushed to disk yet.
- *
- * version is for on disk format changes.
- */
-struct jset {
- struct bch_csum csum;
-
- __le64 magic;
- __le64 seq;
- __le32 version;
- __le32 flags;
-
- __le32 u64s; /* size of d[] in u64s */
-
- __u8 encrypted_start[0];
-
- __le16 _read_clock; /* no longer used */
- __le16 _write_clock;
-
- /* Sequence number of oldest dirty journal entry */
- __le64 last_seq;
-
-
- struct jset_entry start[0];
- __u64 _data[];
-} __packed __aligned(8);
-
-LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
-LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
-LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
-
-#define BCH_JOURNAL_BUCKETS_MIN 8
-
-/* Btree: */
-
-enum btree_id_flags {
- BTREE_ID_EXTENTS = BIT(0),
- BTREE_ID_SNAPSHOTS = BIT(1),
- BTREE_ID_SNAPSHOT_FIELD = BIT(2),
- BTREE_ID_DATA = BIT(3),
-};
-
-#define BCH_BTREE_IDS() \
- x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
- BIT_ULL(KEY_TYPE_whiteout)| \
- BIT_ULL(KEY_TYPE_error)| \
- BIT_ULL(KEY_TYPE_cookie)| \
- BIT_ULL(KEY_TYPE_extent)| \
- BIT_ULL(KEY_TYPE_reservation)| \
- BIT_ULL(KEY_TYPE_reflink_p)| \
- BIT_ULL(KEY_TYPE_inline_data)) \
- x(inodes, 1, BTREE_ID_SNAPSHOTS, \
- BIT_ULL(KEY_TYPE_whiteout)| \
- BIT_ULL(KEY_TYPE_inode)| \
- BIT_ULL(KEY_TYPE_inode_v2)| \
- BIT_ULL(KEY_TYPE_inode_v3)| \
- BIT_ULL(KEY_TYPE_inode_generation)) \
- x(dirents, 2, BTREE_ID_SNAPSHOTS, \
- BIT_ULL(KEY_TYPE_whiteout)| \
- BIT_ULL(KEY_TYPE_hash_whiteout)| \
- BIT_ULL(KEY_TYPE_dirent)) \
- x(xattrs, 3, BTREE_ID_SNAPSHOTS, \
- BIT_ULL(KEY_TYPE_whiteout)| \
- BIT_ULL(KEY_TYPE_cookie)| \
- BIT_ULL(KEY_TYPE_hash_whiteout)| \
- BIT_ULL(KEY_TYPE_xattr)) \
- x(alloc, 4, 0, \
- BIT_ULL(KEY_TYPE_alloc)| \
- BIT_ULL(KEY_TYPE_alloc_v2)| \
- BIT_ULL(KEY_TYPE_alloc_v3)| \
- BIT_ULL(KEY_TYPE_alloc_v4)) \
- x(quotas, 5, 0, \
- BIT_ULL(KEY_TYPE_quota)) \
- x(stripes, 6, 0, \
- BIT_ULL(KEY_TYPE_stripe)) \
- x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
- BIT_ULL(KEY_TYPE_reflink_v)| \
- BIT_ULL(KEY_TYPE_indirect_inline_data)| \
- BIT_ULL(KEY_TYPE_error)) \
- x(subvolumes, 8, 0, \
- BIT_ULL(KEY_TYPE_subvolume)) \
- x(snapshots, 9, 0, \
- BIT_ULL(KEY_TYPE_snapshot)) \
- x(lru, 10, 0, \
- BIT_ULL(KEY_TYPE_set)) \
- x(freespace, 11, BTREE_ID_EXTENTS, \
- BIT_ULL(KEY_TYPE_set)) \
- x(need_discard, 12, 0, \
- BIT_ULL(KEY_TYPE_set)) \
- x(backpointers, 13, 0, \
- BIT_ULL(KEY_TYPE_backpointer)) \
- x(bucket_gens, 14, 0, \
- BIT_ULL(KEY_TYPE_bucket_gens)) \
- x(snapshot_trees, 15, 0, \
- BIT_ULL(KEY_TYPE_snapshot_tree)) \
- x(deleted_inodes, 16, BTREE_ID_SNAPSHOT_FIELD, \
- BIT_ULL(KEY_TYPE_set)) \
- x(logged_ops, 17, 0, \
- BIT_ULL(KEY_TYPE_logged_op_truncate)| \
- BIT_ULL(KEY_TYPE_logged_op_finsert)) \
- x(rebalance_work, 18, BTREE_ID_SNAPSHOT_FIELD, \
- BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie)) \
- x(subvolume_children, 19, 0, \
- BIT_ULL(KEY_TYPE_set)) \
- x(accounting, 20, BTREE_ID_SNAPSHOT_FIELD, \
- BIT_ULL(KEY_TYPE_accounting)) \
-
-enum btree_id {
-#define x(name, nr, ...) BTREE_ID_##name = nr,
- BCH_BTREE_IDS()
-#undef x
- BTREE_ID_NR
-};
-
-/*
- * Maximum number of btrees that we will _ever_ have under the current scheme,
- * where we refer to them with 64 bit bitfields - and we also need a bit for
- * the interior btree node type:
- */
-#define BTREE_ID_NR_MAX 63
-
-static inline bool btree_id_is_alloc(enum btree_id id)
-{
- switch (id) {
- case BTREE_ID_alloc:
- case BTREE_ID_backpointers:
- case BTREE_ID_need_discard:
- case BTREE_ID_freespace:
- case BTREE_ID_bucket_gens:
- return true;
- default:
- return false;
- }
-}
-
-#define BTREE_MAX_DEPTH 4U
-
-/* Btree nodes */
-
-/*
- * Btree nodes
- *
- * On disk a btree node is a list/log of these; within each set the keys are
- * sorted
- */
-struct bset {
- __le64 seq;
-
- /*
- * Highest journal entry this bset contains keys for.
- * If on recovery we don't see that journal entry, this bset is ignored:
- * this allows us to preserve the order of all index updates after a
- * crash, since the journal records a total order of all index updates
- * and anything that didn't make it to the journal doesn't get used.
- */
- __le64 journal_seq;
-
- __le32 flags;
- __le16 version;
- __le16 u64s; /* count of d[] in u64s */
-
- struct bkey_packed start[0];
- __u64 _data[];
-} __packed __aligned(8);
-
-LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
-
-LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
-LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
- struct bset, flags, 5, 6);
-
-/* Sector offset within the btree node: */
-LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32);
-
-struct btree_node {
- struct bch_csum csum;
- __le64 magic;
-
- /* this flags field is encrypted, unlike bset->flags: */
- __le64 flags;
-
- /* Closed interval: */
- struct bpos min_key;
- struct bpos max_key;
- struct bch_extent_ptr _ptr; /* not used anymore */
- struct bkey_format format;
-
- union {
- struct bset keys;
- struct {
- __u8 pad[22];
- __le16 u64s;
- __u64 _data[0];
-
- };
- };
-} __packed __aligned(8);
-
-LE64_BITMASK(BTREE_NODE_ID_LO, struct btree_node, flags, 0, 4);
-LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
-LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
- struct btree_node, flags, 8, 9);
-LE64_BITMASK(BTREE_NODE_ID_HI, struct btree_node, flags, 9, 25);
-/* 25-32 unused */
-LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
-
-static inline __u64 BTREE_NODE_ID(struct btree_node *n)
-{
- return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
-}
-
-static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
-{
- SET_BTREE_NODE_ID_LO(n, v);
- SET_BTREE_NODE_ID_HI(n, v >> 4);
-}
-
-struct btree_node_entry {
- struct bch_csum csum;
-
- union {
- struct bset keys;
- struct {
- __u8 pad[22];
- __le16 u64s;
- __u64 _data[0];
- };
- };
-} __packed __aligned(8);
-
-#endif /* _BCACHEFS_FORMAT_H */
diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h
deleted file mode 100644
index 3c23bdf788ce..000000000000
--- a/fs/bcachefs/bcachefs_ioctl.h
+++ /dev/null
@@ -1,446 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_IOCTL_H
-#define _BCACHEFS_IOCTL_H
-
-#include <linux/uuid.h>
-#include <asm/ioctl.h>
-#include "bcachefs_format.h"
-#include "bkey_types.h"
-
-/*
- * Flags common to multiple ioctls:
- */
-#define BCH_FORCE_IF_DATA_LOST (1 << 0)
-#define BCH_FORCE_IF_METADATA_LOST (1 << 1)
-#define BCH_FORCE_IF_DATA_DEGRADED (1 << 2)
-#define BCH_FORCE_IF_METADATA_DEGRADED (1 << 3)
-
-#define BCH_FORCE_IF_LOST \
- (BCH_FORCE_IF_DATA_LOST| \
- BCH_FORCE_IF_METADATA_LOST)
-#define BCH_FORCE_IF_DEGRADED \
- (BCH_FORCE_IF_DATA_DEGRADED| \
- BCH_FORCE_IF_METADATA_DEGRADED)
-
-/*
- * If cleared, ioctl that refer to a device pass it as a pointer to a pathname
- * (e.g. /dev/sda1); if set, the dev field is the device's index within the
- * filesystem:
- */
-#define BCH_BY_INDEX (1 << 4)
-
-/*
- * For BCH_IOCTL_READ_SUPER: get superblock of a specific device, not filesystem
- * wide superblock:
- */
-#define BCH_READ_DEV (1 << 5)
-
-/* global control dev: */
-
-/* These are currently broken, and probably unnecessary: */
-#if 0
-#define BCH_IOCTL_ASSEMBLE _IOW(0xbc, 1, struct bch_ioctl_assemble)
-#define BCH_IOCTL_INCREMENTAL _IOW(0xbc, 2, struct bch_ioctl_incremental)
-
-struct bch_ioctl_assemble {
- __u32 flags;
- __u32 nr_devs;
- __u64 pad;
- __u64 devs[];
-};
-
-struct bch_ioctl_incremental {
- __u32 flags;
- __u64 pad;
- __u64 dev;
-};
-#endif
-
-/* filesystem ioctls: */
-
-#define BCH_IOCTL_QUERY_UUID _IOR(0xbc, 1, struct bch_ioctl_query_uuid)
-
-/* These only make sense when we also have incremental assembly */
-#if 0
-#define BCH_IOCTL_START _IOW(0xbc, 2, struct bch_ioctl_start)
-#define BCH_IOCTL_STOP _IO(0xbc, 3)
-#endif
-
-#define BCH_IOCTL_DISK_ADD _IOW(0xbc, 4, struct bch_ioctl_disk)
-#define BCH_IOCTL_DISK_REMOVE _IOW(0xbc, 5, struct bch_ioctl_disk)
-#define BCH_IOCTL_DISK_ONLINE _IOW(0xbc, 6, struct bch_ioctl_disk)
-#define BCH_IOCTL_DISK_OFFLINE _IOW(0xbc, 7, struct bch_ioctl_disk)
-#define BCH_IOCTL_DISK_SET_STATE _IOW(0xbc, 8, struct bch_ioctl_disk_set_state)
-#define BCH_IOCTL_DATA _IOW(0xbc, 10, struct bch_ioctl_data)
-#define BCH_IOCTL_FS_USAGE _IOWR(0xbc, 11, struct bch_ioctl_fs_usage)
-#define BCH_IOCTL_DEV_USAGE _IOWR(0xbc, 11, struct bch_ioctl_dev_usage)
-#define BCH_IOCTL_READ_SUPER _IOW(0xbc, 12, struct bch_ioctl_read_super)
-#define BCH_IOCTL_DISK_GET_IDX _IOW(0xbc, 13, struct bch_ioctl_disk_get_idx)
-#define BCH_IOCTL_DISK_RESIZE _IOW(0xbc, 14, struct bch_ioctl_disk_resize)
-#define BCH_IOCTL_DISK_RESIZE_JOURNAL _IOW(0xbc,15, struct bch_ioctl_disk_resize_journal)
-
-#define BCH_IOCTL_SUBVOLUME_CREATE _IOW(0xbc, 16, struct bch_ioctl_subvolume)
-#define BCH_IOCTL_SUBVOLUME_DESTROY _IOW(0xbc, 17, struct bch_ioctl_subvolume)
-
-#define BCH_IOCTL_DEV_USAGE_V2 _IOWR(0xbc, 18, struct bch_ioctl_dev_usage_v2)
-
-#define BCH_IOCTL_FSCK_OFFLINE _IOW(0xbc, 19, struct bch_ioctl_fsck_offline)
-#define BCH_IOCTL_FSCK_ONLINE _IOW(0xbc, 20, struct bch_ioctl_fsck_online)
-#define BCH_IOCTL_QUERY_ACCOUNTING _IOW(0xbc, 21, struct bch_ioctl_query_accounting)
-
-/* ioctl below act on a particular file, not the filesystem as a whole: */
-
-#define BCHFS_IOC_REINHERIT_ATTRS _IOR(0xbc, 64, const char __user *)
-
-/*
- * BCH_IOCTL_QUERY_UUID: get filesystem UUID
- *
- * Returns user visible UUID, not internal UUID (which may not ever be changed);
- * the filesystem's sysfs directory may be found under /sys/fs/bcachefs with
- * this UUID.
- */
-struct bch_ioctl_query_uuid {
- __uuid_t uuid;
-};
-
-#if 0
-struct bch_ioctl_start {
- __u32 flags;
- __u32 pad;
-};
-#endif
-
-/*
- * BCH_IOCTL_DISK_ADD: add a new device to an existing filesystem
- *
- * The specified device must not be open or in use. On success, the new device
- * will be an online member of the filesystem just like any other member.
- *
- * The device must first be prepared by userspace by formatting with a bcachefs
- * superblock, which is only used for passing in superblock options/parameters
- * for that device (in struct bch_member). The new device's superblock should
- * not claim to be a member of any existing filesystem - UUIDs on it will be
- * ignored.
- */
-
-/*
- * BCH_IOCTL_DISK_REMOVE: permanently remove a member device from a filesystem
- *
- * Any data present on @dev will be permanently deleted, and @dev will be
- * removed from its slot in the filesystem's list of member devices. The device
- * may be either offline or offline.
- *
- * Will fail removing @dev would leave us with insufficient read write devices
- * or degraded/unavailable data, unless the approprate BCH_FORCE_IF_* flags are
- * set.
- */
-
-/*
- * BCH_IOCTL_DISK_ONLINE: given a disk that is already a member of a filesystem
- * but is not open (e.g. because we started in degraded mode), bring it online
- *
- * all existing data on @dev will be available once the device is online,
- * exactly as if @dev was present when the filesystem was first mounted
- */
-
-/*
- * BCH_IOCTL_DISK_OFFLINE: offline a disk, causing the kernel to close that
- * block device, without removing it from the filesystem (so it can be brought
- * back online later)
- *
- * Data present on @dev will be unavailable while @dev is offline (unless
- * replicated), but will still be intact and untouched if @dev is brought back
- * online
- *
- * Will fail (similarly to BCH_IOCTL_DISK_SET_STATE) if offlining @dev would
- * leave us with insufficient read write devices or degraded/unavailable data,
- * unless the approprate BCH_FORCE_IF_* flags are set.
- */
-
-struct bch_ioctl_disk {
- __u32 flags;
- __u32 pad;
- __u64 dev;
-};
-
-/*
- * BCH_IOCTL_DISK_SET_STATE: modify state of a member device of a filesystem
- *
- * @new_state - one of the bch_member_state states (rw, ro, failed,
- * spare)
- *
- * Will refuse to change member state if we would then have insufficient devices
- * to write to, or if it would result in degraded data (when @new_state is
- * failed or spare) unless the appropriate BCH_FORCE_IF_* flags are set.
- */
-struct bch_ioctl_disk_set_state {
- __u32 flags;
- __u8 new_state;
- __u8 pad[3];
- __u64 dev;
-};
-
-#define BCH_DATA_OPS() \
- x(scrub, 0) \
- x(rereplicate, 1) \
- x(migrate, 2) \
- x(rewrite_old_nodes, 3) \
- x(drop_extra_replicas, 4)
-
-enum bch_data_ops {
-#define x(t, n) BCH_DATA_OP_##t = n,
- BCH_DATA_OPS()
-#undef x
- BCH_DATA_OP_NR
-};
-
-/*
- * BCH_IOCTL_DATA: operations that walk and manipulate filesystem data (e.g.
- * scrub, rereplicate, migrate).
- *
- * This ioctl kicks off a job in the background, and returns a file descriptor.
- * Reading from the file descriptor returns a struct bch_ioctl_data_event,
- * indicating current progress, and closing the file descriptor will stop the
- * job. The file descriptor is O_CLOEXEC.
- */
-struct bch_ioctl_data {
- __u16 op;
- __u8 start_btree;
- __u8 end_btree;
- __u32 flags;
-
- struct bpos start_pos;
- struct bpos end_pos;
-
- union {
- struct {
- __u32 dev;
- __u32 pad;
- } migrate;
- struct {
- __u64 pad[8];
- };
- };
-} __packed __aligned(8);
-
-enum bch_data_event {
- BCH_DATA_EVENT_PROGRESS = 0,
- /* XXX: add an event for reporting errors */
- BCH_DATA_EVENT_NR = 1,
-};
-
-struct bch_ioctl_data_progress {
- __u8 data_type;
- __u8 btree_id;
- __u8 pad[2];
- struct bpos pos;
-
- __u64 sectors_done;
- __u64 sectors_total;
-} __packed __aligned(8);
-
-struct bch_ioctl_data_event {
- __u8 type;
- __u8 pad[7];
- union {
- struct bch_ioctl_data_progress p;
- __u64 pad2[15];
- };
-} __packed __aligned(8);
-
-struct bch_replicas_usage {
- __u64 sectors;
- struct bch_replicas_entry_v1 r;
-} __packed;
-
-static inline unsigned replicas_usage_bytes(struct bch_replicas_usage *u)
-{
- return offsetof(struct bch_replicas_usage, r) + replicas_entry_bytes(&u->r);
-}
-
-static inline struct bch_replicas_usage *
-replicas_usage_next(struct bch_replicas_usage *u)
-{
- return (void *) u + replicas_usage_bytes(u);
-}
-
-/* Obsolete */
-/*
- * BCH_IOCTL_FS_USAGE: query filesystem disk space usage
- *
- * Returns disk space usage broken out by data type, number of replicas, and
- * by component device
- *
- * @replica_entries_bytes - size, in bytes, allocated for replica usage entries
- *
- * On success, @replica_entries_bytes will be changed to indicate the number of
- * bytes actually used.
- *
- * Returns -ERANGE if @replica_entries_bytes was too small
- */
-struct bch_ioctl_fs_usage {
- __u64 capacity;
- __u64 used;
- __u64 online_reserved;
- __u64 persistent_reserved[BCH_REPLICAS_MAX];
-
- __u32 replica_entries_bytes;
- __u32 pad;
-
- struct bch_replicas_usage replicas[];
-};
-
-/* Obsolete */
-/*
- * BCH_IOCTL_DEV_USAGE: query device disk space usage
- *
- * Returns disk space usage broken out by data type - both by buckets and
- * sectors.
- */
-struct bch_ioctl_dev_usage {
- __u64 dev;
- __u32 flags;
- __u8 state;
- __u8 pad[7];
-
- __u32 bucket_size;
- __u64 nr_buckets;
-
- __u64 buckets_ec;
-
- struct bch_ioctl_dev_usage_type {
- __u64 buckets;
- __u64 sectors;
- __u64 fragmented;
- } d[10];
-};
-
-/* Obsolete */
-struct bch_ioctl_dev_usage_v2 {
- __u64 dev;
- __u32 flags;
- __u8 state;
- __u8 nr_data_types;
- __u8 pad[6];
-
- __u32 bucket_size;
- __u64 nr_buckets;
-
- struct bch_ioctl_dev_usage_type d[];
-};
-
-/*
- * BCH_IOCTL_READ_SUPER: read filesystem superblock
- *
- * Equivalent to reading the superblock directly from the block device, except
- * avoids racing with the kernel writing the superblock or having to figure out
- * which block device to read
- *
- * @sb - buffer to read into
- * @size - size of userspace allocated buffer
- * @dev - device to read superblock for, if BCH_READ_DEV flag is
- * specified
- *
- * Returns -ERANGE if buffer provided is too small
- */
-struct bch_ioctl_read_super {
- __u32 flags;
- __u32 pad;
- __u64 dev;
- __u64 size;
- __u64 sb;
-};
-
-/*
- * BCH_IOCTL_DISK_GET_IDX: give a path to a block device, query filesystem to
- * determine if disk is a (online) member - if so, returns device's index
- *
- * Returns -ENOENT if not found
- */
-struct bch_ioctl_disk_get_idx {
- __u64 dev;
-};
-
-/*
- * BCH_IOCTL_DISK_RESIZE: resize filesystem on a device
- *
- * @dev - member to resize
- * @nbuckets - new number of buckets
- */
-struct bch_ioctl_disk_resize {
- __u32 flags;
- __u32 pad;
- __u64 dev;
- __u64 nbuckets;
-};
-
-/*
- * BCH_IOCTL_DISK_RESIZE_JOURNAL: resize journal on a device
- *
- * @dev - member to resize
- * @nbuckets - new number of buckets
- */
-struct bch_ioctl_disk_resize_journal {
- __u32 flags;
- __u32 pad;
- __u64 dev;
- __u64 nbuckets;
-};
-
-struct bch_ioctl_subvolume {
- __u32 flags;
- __u32 dirfd;
- __u16 mode;
- __u16 pad[3];
- __u64 dst_ptr;
- __u64 src_ptr;
-};
-
-#define BCH_SUBVOL_SNAPSHOT_CREATE (1U << 0)
-#define BCH_SUBVOL_SNAPSHOT_RO (1U << 1)
-
-/*
- * BCH_IOCTL_FSCK_OFFLINE: run fsck from the 'bcachefs fsck' userspace command,
- * but with the kernel's implementation of fsck:
- */
-struct bch_ioctl_fsck_offline {
- __u64 flags;
- __u64 opts; /* string */
- __u64 nr_devs;
- __u64 devs[] __counted_by(nr_devs);
-};
-
-/*
- * BCH_IOCTL_FSCK_ONLINE: run fsck from the 'bcachefs fsck' userspace command,
- * but with the kernel's implementation of fsck:
- */
-struct bch_ioctl_fsck_online {
- __u64 flags;
- __u64 opts; /* string */
-};
-
-/*
- * BCH_IOCTL_QUERY_ACCOUNTING: query filesystem disk accounting
- *
- * Returns disk space usage broken out by data type, number of replicas, and
- * by component device
- *
- * @replica_entries_bytes - size, in bytes, allocated for replica usage entries
- *
- * On success, @replica_entries_bytes will be changed to indicate the number of
- * bytes actually used.
- *
- * Returns -ERANGE if @replica_entries_bytes was too small
- */
-struct bch_ioctl_query_accounting {
- __u64 capacity;
- __u64 used;
- __u64 online_reserved;
-
- __u32 accounting_u64s; /* input parameter */
- __u32 accounting_types_mask; /* input parameter */
-
- struct bkey_i_accounting accounting[];
-};
-
-#endif /* _BCACHEFS_IOCTL_H */
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
deleted file mode 100644
index 995ba32e9b6e..000000000000
--- a/fs/bcachefs/bkey.c
+++ /dev/null
@@ -1,1117 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey.h"
-#include "bkey_cmp.h"
-#include "bkey_methods.h"
-#include "bset.h"
-#include "util.h"
-
-const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
-
-void bch2_bkey_packed_to_binary_text(struct printbuf *out,
- const struct bkey_format *f,
- const struct bkey_packed *k)
-{
- const u64 *p = high_word(f, k);
- unsigned word_bits = 64 - high_bit_offset;
- unsigned nr_key_bits = bkey_format_key_bits(f) + high_bit_offset;
- u64 v = *p & (~0ULL >> high_bit_offset);
-
- if (!nr_key_bits) {
- prt_str(out, "(empty)");
- return;
- }
-
- while (1) {
- unsigned next_key_bits = nr_key_bits;
-
- if (nr_key_bits < 64) {
- v >>= 64 - nr_key_bits;
- next_key_bits = 0;
- } else {
- next_key_bits -= 64;
- }
-
- bch2_prt_u64_base2_nbits(out, v, min(word_bits, nr_key_bits));
-
- if (!next_key_bits)
- break;
-
- prt_char(out, ' ');
-
- p = next_word(p);
- v = *p;
- word_bits = 64;
- nr_key_bits = next_key_bits;
- }
-}
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-
-static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
- const struct bkey *unpacked,
- const struct bkey_format *format)
-{
- struct bkey tmp;
-
- BUG_ON(bkeyp_val_u64s(format, packed) !=
- bkey_val_u64s(unpacked));
-
- BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed));
-
- tmp = __bch2_bkey_unpack_key(format, packed);
-
- if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
- struct printbuf buf = PRINTBUF;
-
- prt_printf(&buf, "keys differ: format u64s %u fields %u %u %u %u %u\n",
- format->key_u64s,
- format->bits_per_field[0],
- format->bits_per_field[1],
- format->bits_per_field[2],
- format->bits_per_field[3],
- format->bits_per_field[4]);
-
- prt_printf(&buf, "compiled unpack: ");
- bch2_bkey_to_text(&buf, unpacked);
- prt_newline(&buf);
-
- prt_printf(&buf, "c unpack: ");
- bch2_bkey_to_text(&buf, &tmp);
- prt_newline(&buf);
-
- prt_printf(&buf, "compiled unpack: ");
- bch2_bkey_packed_to_binary_text(&buf, &bch2_bkey_format_current,
- (struct bkey_packed *) unpacked);
- prt_newline(&buf);
-
- prt_printf(&buf, "c unpack: ");
- bch2_bkey_packed_to_binary_text(&buf, &bch2_bkey_format_current,
- (struct bkey_packed *) &tmp);
- prt_newline(&buf);
-
- panic("%s", buf.buf);
- }
-}
-
-#else
-static inline void bch2_bkey_pack_verify(const struct bkey_packed *packed,
- const struct bkey *unpacked,
- const struct bkey_format *format) {}
-#endif
-
-struct pack_state {
- const struct bkey_format *format;
- unsigned bits; /* bits remaining in current word */
- u64 w; /* current word */
- u64 *p; /* pointer to next word */
-};
-
-__always_inline
-static struct pack_state pack_state_init(const struct bkey_format *format,
- struct bkey_packed *k)
-{
- u64 *p = high_word(format, k);
-
- return (struct pack_state) {
- .format = format,
- .bits = 64 - high_bit_offset,
- .w = 0,
- .p = p,
- };
-}
-
-__always_inline
-static void pack_state_finish(struct pack_state *state,
- struct bkey_packed *k)
-{
- EBUG_ON(state->p < k->_data);
- EBUG_ON(state->p >= (u64 *) k->_data + state->format->key_u64s);
-
- *state->p = state->w;
-}
-
-struct unpack_state {
- const struct bkey_format *format;
- unsigned bits; /* bits remaining in current word */
- u64 w; /* current word */
- const u64 *p; /* pointer to next word */
-};
-
-__always_inline
-static struct unpack_state unpack_state_init(const struct bkey_format *format,
- const struct bkey_packed *k)
-{
- const u64 *p = high_word(format, k);
-
- return (struct unpack_state) {
- .format = format,
- .bits = 64 - high_bit_offset,
- .w = *p << high_bit_offset,
- .p = p,
- };
-}
-
-__always_inline
-static u64 get_inc_field(struct unpack_state *state, unsigned field)
-{
- unsigned bits = state->format->bits_per_field[field];
- u64 v = 0, offset = le64_to_cpu(state->format->field_offset[field]);
-
- if (bits >= state->bits) {
- v = state->w >> (64 - bits);
- bits -= state->bits;
-
- state->p = next_word(state->p);
- state->w = *state->p;
- state->bits = 64;
- }
-
- /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
- v |= (state->w >> 1) >> (63 - bits);
- state->w <<= bits;
- state->bits -= bits;
-
- return v + offset;
-}
-
-__always_inline
-static void __set_inc_field(struct pack_state *state, unsigned field, u64 v)
-{
- unsigned bits = state->format->bits_per_field[field];
-
- if (bits) {
- if (bits > state->bits) {
- bits -= state->bits;
- /* avoid shift by 64 if bits is 64 - bits is never 0 here: */
- state->w |= (v >> 1) >> (bits - 1);
-
- *state->p = state->w;
- state->p = next_word(state->p);
- state->w = 0;
- state->bits = 64;
- }
-
- state->bits -= bits;
- state->w |= v << state->bits;
- }
-}
-
-__always_inline
-static bool set_inc_field(struct pack_state *state, unsigned field, u64 v)
-{
- unsigned bits = state->format->bits_per_field[field];
- u64 offset = le64_to_cpu(state->format->field_offset[field]);
-
- if (v < offset)
- return false;
-
- v -= offset;
-
- if (fls64(v) > bits)
- return false;
-
- __set_inc_field(state, field, v);
- return true;
-}
-
-/*
- * Note: does NOT set out->format (we don't know what it should be here!)
- *
- * Also: doesn't work on extents - it doesn't preserve the invariant that
- * if k is packed bkey_start_pos(k) will successfully pack
- */
-static bool bch2_bkey_transform_key(const struct bkey_format *out_f,
- struct bkey_packed *out,
- const struct bkey_format *in_f,
- const struct bkey_packed *in)
-{
- struct pack_state out_s = pack_state_init(out_f, out);
- struct unpack_state in_s = unpack_state_init(in_f, in);
- u64 *w = out->_data;
- unsigned i;
-
- *w = 0;
-
- for (i = 0; i < BKEY_NR_FIELDS; i++)
- if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
- return false;
-
- /* Can't happen because the val would be too big to unpack: */
- EBUG_ON(in->u64s - in_f->key_u64s + out_f->key_u64s > U8_MAX);
-
- pack_state_finish(&out_s, out);
- out->u64s = out_f->key_u64s + in->u64s - in_f->key_u64s;
- out->needs_whiteout = in->needs_whiteout;
- out->type = in->type;
-
- return true;
-}
-
-bool bch2_bkey_transform(const struct bkey_format *out_f,
- struct bkey_packed *out,
- const struct bkey_format *in_f,
- const struct bkey_packed *in)
-{
- if (!bch2_bkey_transform_key(out_f, out, in_f, in))
- return false;
-
- memcpy_u64s((u64 *) out + out_f->key_u64s,
- (u64 *) in + in_f->key_u64s,
- (in->u64s - in_f->key_u64s));
- return true;
-}
-
-struct bkey __bch2_bkey_unpack_key(const struct bkey_format *format,
- const struct bkey_packed *in)
-{
- struct unpack_state state = unpack_state_init(format, in);
- struct bkey out;
-
- EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
- EBUG_ON(in->u64s < format->key_u64s);
- EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
- EBUG_ON(in->u64s - format->key_u64s + BKEY_U64s > U8_MAX);
-
- out.u64s = BKEY_U64s + in->u64s - format->key_u64s;
- out.format = KEY_FORMAT_CURRENT;
- out.needs_whiteout = in->needs_whiteout;
- out.type = in->type;
- out.pad[0] = 0;
-
-#define x(id, field) out.field = get_inc_field(&state, id);
- bkey_fields()
-#undef x
-
- return out;
-}
-
-#ifndef HAVE_BCACHEFS_COMPILED_UNPACK
-struct bpos __bkey_unpack_pos(const struct bkey_format *format,
- const struct bkey_packed *in)
-{
- struct unpack_state state = unpack_state_init(format, in);
- struct bpos out;
-
- EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
- EBUG_ON(in->u64s < format->key_u64s);
- EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
-
- out.inode = get_inc_field(&state, BKEY_FIELD_INODE);
- out.offset = get_inc_field(&state, BKEY_FIELD_OFFSET);
- out.snapshot = get_inc_field(&state, BKEY_FIELD_SNAPSHOT);
-
- return out;
-}
-#endif
-
-/**
- * bch2_bkey_pack_key -- pack just the key, not the value
- * @out: packed result
- * @in: key to pack
- * @format: format of packed result
- *
- * Returns: true on success, false on failure
- */
-bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
- const struct bkey_format *format)
-{
- struct pack_state state = pack_state_init(format, out);
- u64 *w = out->_data;
-
- EBUG_ON((void *) in == (void *) out);
- EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
- EBUG_ON(in->format != KEY_FORMAT_CURRENT);
-
- *w = 0;
-
-#define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
- bkey_fields()
-#undef x
- pack_state_finish(&state, out);
- out->u64s = format->key_u64s + in->u64s - BKEY_U64s;
- out->format = KEY_FORMAT_LOCAL_BTREE;
- out->needs_whiteout = in->needs_whiteout;
- out->type = in->type;
-
- bch2_bkey_pack_verify(out, in, format);
- return true;
-}
-
-/**
- * bch2_bkey_unpack -- unpack the key and the value
- * @b: btree node of @src key (for packed format)
- * @dst: unpacked result
- * @src: packed input
- */
-void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
- const struct bkey_packed *src)
-{
- __bkey_unpack_key(b, &dst->k, src);
-
- memcpy_u64s(&dst->v,
- bkeyp_val(&b->format, src),
- bkeyp_val_u64s(&b->format, src));
-}
-
-/**
- * bch2_bkey_pack -- pack the key and the value
- * @dst: packed result
- * @src: unpacked input
- * @format: format of packed result
- *
- * Returns: true on success, false on failure
- */
-bool bch2_bkey_pack(struct bkey_packed *dst, const struct bkey_i *src,
- const struct bkey_format *format)
-{
- struct bkey_packed tmp;
-
- if (!bch2_bkey_pack_key(&tmp, &src->k, format))
- return false;
-
- memmove_u64s((u64 *) dst + format->key_u64s,
- &src->v,
- bkey_val_u64s(&src->k));
- memcpy_u64s_small(dst, &tmp, format->key_u64s);
-
- return true;
-}
-
-__always_inline
-static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v)
-{
- unsigned bits = state->format->bits_per_field[field];
- u64 offset = le64_to_cpu(state->format->field_offset[field]);
- bool ret = true;
-
- EBUG_ON(v < offset);
- v -= offset;
-
- if (fls64(v) > bits) {
- v = ~(~0ULL << bits);
- ret = false;
- }
-
- __set_inc_field(state, field, v);
- return ret;
-}
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-static bool bkey_packed_successor(struct bkey_packed *out,
- const struct btree *b,
- struct bkey_packed k)
-{
- const struct bkey_format *f = &b->format;
- unsigned nr_key_bits = b->nr_key_bits;
- unsigned first_bit, offset;
- u64 *p;
-
- EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
-
- if (!nr_key_bits)
- return false;
-
- *out = k;
-
- first_bit = high_bit_offset + nr_key_bits - 1;
- p = nth_word(high_word(f, out), first_bit >> 6);
- offset = 63 - (first_bit & 63);
-
- while (nr_key_bits) {
- unsigned bits = min(64 - offset, nr_key_bits);
- u64 mask = (~0ULL >> (64 - bits)) << offset;
-
- if ((*p & mask) != mask) {
- *p += 1ULL << offset;
- EBUG_ON(bch2_bkey_cmp_packed(b, out, &k) <= 0);
- return true;
- }
-
- *p &= ~mask;
- p = prev_word(p);
- nr_key_bits -= bits;
- offset = 0;
- }
-
- return false;
-}
-
-static bool bkey_format_has_too_big_fields(const struct bkey_format *f)
-{
- for (unsigned i = 0; i < f->nr_fields; i++) {
- unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
- u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
- u64 packed_max = f->bits_per_field[i]
- ? ~((~0ULL << 1) << (f->bits_per_field[i] - 1))
- : 0;
- u64 field_offset = le64_to_cpu(f->field_offset[i]);
-
- if (packed_max + field_offset < packed_max ||
- packed_max + field_offset > unpacked_max)
- return true;
- }
-
- return false;
-}
-#endif
-
-/*
- * Returns a packed key that compares <= in
- *
- * This is used in bset_search_tree(), where we need a packed pos in order to be
- * able to compare against the keys in the auxiliary search tree - and it's
- * legal to use a packed pos that isn't equivalent to the original pos,
- * _provided_ it compares <= to the original pos.
- */
-enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
- struct bpos in,
- const struct btree *b)
-{
- const struct bkey_format *f = &b->format;
- struct pack_state state = pack_state_init(f, out);
- u64 *w = out->_data;
-#ifdef CONFIG_BCACHEFS_DEBUG
- struct bpos orig = in;
-#endif
- bool exact = true;
- unsigned i;
-
- /*
- * bch2_bkey_pack_key() will write to all of f->key_u64s, minus the 3
- * byte header, but pack_pos() won't if the len/version fields are big
- * enough - we need to make sure to zero them out:
- */
- for (i = 0; i < f->key_u64s; i++)
- w[i] = 0;
-
- if (unlikely(in.snapshot <
- le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
- if (!in.offset-- &&
- !in.inode--)
- return BKEY_PACK_POS_FAIL;
- in.snapshot = KEY_SNAPSHOT_MAX;
- exact = false;
- }
-
- if (unlikely(in.offset <
- le64_to_cpu(f->field_offset[BKEY_FIELD_OFFSET]))) {
- if (!in.inode--)
- return BKEY_PACK_POS_FAIL;
- in.offset = KEY_OFFSET_MAX;
- in.snapshot = KEY_SNAPSHOT_MAX;
- exact = false;
- }
-
- if (unlikely(in.inode <
- le64_to_cpu(f->field_offset[BKEY_FIELD_INODE])))
- return BKEY_PACK_POS_FAIL;
-
- if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode))) {
- in.offset = KEY_OFFSET_MAX;
- in.snapshot = KEY_SNAPSHOT_MAX;
- exact = false;
- }
-
- if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_OFFSET, in.offset))) {
- in.snapshot = KEY_SNAPSHOT_MAX;
- exact = false;
- }
-
- if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_SNAPSHOT, in.snapshot)))
- exact = false;
-
- pack_state_finish(&state, out);
- out->u64s = f->key_u64s;
- out->format = KEY_FORMAT_LOCAL_BTREE;
- out->type = KEY_TYPE_deleted;
-
-#ifdef CONFIG_BCACHEFS_DEBUG
- if (exact) {
- BUG_ON(bkey_cmp_left_packed(b, out, &orig));
- } else {
- struct bkey_packed successor;
-
- BUG_ON(bkey_cmp_left_packed(b, out, &orig) >= 0);
- BUG_ON(bkey_packed_successor(&successor, b, *out) &&
- bkey_cmp_left_packed(b, &successor, &orig) < 0 &&
- !bkey_format_has_too_big_fields(f));
- }
-#endif
-
- return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER;
-}
-
-void bch2_bkey_format_init(struct bkey_format_state *s)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(s->field_min); i++)
- s->field_min[i] = U64_MAX;
-
- for (i = 0; i < ARRAY_SIZE(s->field_max); i++)
- s->field_max[i] = 0;
-
- /* Make sure we can store a size of 0: */
- s->field_min[BKEY_FIELD_SIZE] = 0;
-}
-
-void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
-{
- unsigned field = 0;
-
- __bkey_format_add(s, field++, p.inode);
- __bkey_format_add(s, field++, p.offset);
- __bkey_format_add(s, field++, p.snapshot);
-}
-
-/*
- * We don't want it to be possible for the packed format to represent fields
- * bigger than a u64... that will cause confusion and issues (like with
- * bkey_packed_successor())
- */
-static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i,
- unsigned bits, u64 offset)
-{
- unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
- u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
-
- bits = min(bits, unpacked_bits);
-
- offset = bits == unpacked_bits ? 0 : min(offset, unpacked_max - ((1ULL << bits) - 1));
-
- f->bits_per_field[i] = bits;
- f->field_offset[i] = cpu_to_le64(offset);
-}
-
-struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s)
-{
- unsigned i, bits = KEY_PACKED_BITS_START;
- struct bkey_format ret = {
- .nr_fields = BKEY_NR_FIELDS,
- };
-
- for (i = 0; i < ARRAY_SIZE(s->field_min); i++) {
- s->field_min[i] = min(s->field_min[i], s->field_max[i]);
-
- set_format_field(&ret, i,
- fls64(s->field_max[i] - s->field_min[i]),
- s->field_min[i]);
-
- bits += ret.bits_per_field[i];
- }
-
- /* allow for extent merging: */
- if (ret.bits_per_field[BKEY_FIELD_SIZE]) {
- unsigned b = min(4U, 32U - ret.bits_per_field[BKEY_FIELD_SIZE]);
-
- ret.bits_per_field[BKEY_FIELD_SIZE] += b;
- bits += b;
- }
-
- ret.key_u64s = DIV_ROUND_UP(bits, 64);
-
- /* if we have enough spare bits, round fields up to nearest byte */
- bits = ret.key_u64s * 64 - bits;
-
- for (i = 0; i < ARRAY_SIZE(ret.bits_per_field); i++) {
- unsigned r = round_up(ret.bits_per_field[i], 8) -
- ret.bits_per_field[i];
-
- if (r <= bits) {
- set_format_field(&ret, i,
- ret.bits_per_field[i] + r,
- le64_to_cpu(ret.field_offset[i]));
- bits -= r;
- }
- }
-
-#ifdef CONFIG_BCACHEFS_DEBUG
- {
- struct printbuf buf = PRINTBUF;
-
- BUG_ON(bch2_bkey_format_invalid(NULL, &ret, 0, &buf));
- printbuf_exit(&buf);
- }
-#endif
- return ret;
-}
-
-int bch2_bkey_format_invalid(struct bch_fs *c,
- struct bkey_format *f,
- enum bch_validate_flags flags,
- struct printbuf *err)
-{
- unsigned bits = KEY_PACKED_BITS_START;
-
- if (f->nr_fields != BKEY_NR_FIELDS) {
- prt_printf(err, "incorrect number of fields: got %u, should be %u",
- f->nr_fields, BKEY_NR_FIELDS);
- return -BCH_ERR_invalid;
- }
-
- /*
- * Verify that the packed format can't represent fields larger than the
- * unpacked format:
- */
- for (unsigned i = 0; i < f->nr_fields; i++) {
- if (bch2_bkey_format_field_overflows(f, i)) {
- unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
- u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
- unsigned packed_bits = min(64, f->bits_per_field[i]);
- u64 packed_max = packed_bits
- ? ~((~0ULL << 1) << (packed_bits - 1))
- : 0;
-
- prt_printf(err, "field %u too large: %llu + %llu > %llu",
- i, packed_max, le64_to_cpu(f->field_offset[i]), unpacked_max);
- return -BCH_ERR_invalid;
- }
-
- bits += f->bits_per_field[i];
- }
-
- if (f->key_u64s != DIV_ROUND_UP(bits, 64)) {
- prt_printf(err, "incorrect key_u64s: got %u, should be %u",
- f->key_u64s, DIV_ROUND_UP(bits, 64));
- return -BCH_ERR_invalid;
- }
-
- return 0;
-}
-
-void bch2_bkey_format_to_text(struct printbuf *out, const struct bkey_format *f)
-{
- prt_printf(out, "u64s %u fields ", f->key_u64s);
-
- for (unsigned i = 0; i < ARRAY_SIZE(f->bits_per_field); i++) {
- if (i)
- prt_str(out, ", ");
- prt_printf(out, "%u:%llu",
- f->bits_per_field[i],
- le64_to_cpu(f->field_offset[i]));
- }
-}
-
-/*
- * Most significant differing bit
- * Bits are indexed from 0 - return is [0, nr_key_bits)
- */
-__pure
-unsigned bch2_bkey_greatest_differing_bit(const struct btree *b,
- const struct bkey_packed *l_k,
- const struct bkey_packed *r_k)
-{
- const u64 *l = high_word(&b->format, l_k);
- const u64 *r = high_word(&b->format, r_k);
- unsigned nr_key_bits = b->nr_key_bits;
- unsigned word_bits = 64 - high_bit_offset;
- u64 l_v, r_v;
-
- EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
-
- /* for big endian, skip past header */
- l_v = *l & (~0ULL >> high_bit_offset);
- r_v = *r & (~0ULL >> high_bit_offset);
-
- while (nr_key_bits) {
- if (nr_key_bits < word_bits) {
- l_v >>= word_bits - nr_key_bits;
- r_v >>= word_bits - nr_key_bits;
- nr_key_bits = 0;
- } else {
- nr_key_bits -= word_bits;
- }
-
- if (l_v != r_v)
- return fls64(l_v ^ r_v) - 1 + nr_key_bits;
-
- l = next_word(l);
- r = next_word(r);
-
- l_v = *l;
- r_v = *r;
- word_bits = 64;
- }
-
- return 0;
-}
-
-/*
- * First set bit
- * Bits are indexed from 0 - return is [0, nr_key_bits)
- */
-__pure
-unsigned bch2_bkey_ffs(const struct btree *b, const struct bkey_packed *k)
-{
- const u64 *p = high_word(&b->format, k);
- unsigned nr_key_bits = b->nr_key_bits;
- unsigned ret = 0, offset;
-
- EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
-
- offset = nr_key_bits;
- while (offset > 64) {
- p = next_word(p);
- offset -= 64;
- }
-
- offset = 64 - offset;
-
- while (nr_key_bits) {
- unsigned bits = nr_key_bits + offset < 64
- ? nr_key_bits
- : 64 - offset;
-
- u64 mask = (~0ULL >> (64 - bits)) << offset;
-
- if (*p & mask)
- return ret + __ffs64(*p & mask) - offset;
-
- p = prev_word(p);
- nr_key_bits -= bits;
- ret += bits;
- offset = 0;
- }
-
- return 0;
-}
-
-#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
-
-#define I(_x) (*(out)++ = (_x))
-#define I1(i0) I(i0)
-#define I2(i0, i1) (I1(i0), I(i1))
-#define I3(i0, i1, i2) (I2(i0, i1), I(i2))
-#define I4(i0, i1, i2, i3) (I3(i0, i1, i2), I(i3))
-#define I5(i0, i1, i2, i3, i4) (I4(i0, i1, i2, i3), I(i4))
-
-static u8 *compile_bkey_field(const struct bkey_format *format, u8 *out,
- enum bch_bkey_fields field,
- unsigned dst_offset, unsigned dst_size,
- bool *eax_zeroed)
-{
- unsigned bits = format->bits_per_field[field];
- u64 offset = le64_to_cpu(format->field_offset[field]);
- unsigned i, byte, bit_offset, align, shl, shr;
-
- if (!bits && !offset) {
- if (!*eax_zeroed) {
- /* xor eax, eax */
- I2(0x31, 0xc0);
- }
-
- *eax_zeroed = true;
- goto set_field;
- }
-
- if (!bits) {
- /* just return offset: */
-
- switch (dst_size) {
- case 8:
- if (offset > S32_MAX) {
- /* mov [rdi + dst_offset], offset */
- I3(0xc7, 0x47, dst_offset);
- memcpy(out, &offset, 4);
- out += 4;
-
- I3(0xc7, 0x47, dst_offset + 4);
- memcpy(out, (void *) &offset + 4, 4);
- out += 4;
- } else {
- /* mov [rdi + dst_offset], offset */
- /* sign extended */
- I4(0x48, 0xc7, 0x47, dst_offset);
- memcpy(out, &offset, 4);
- out += 4;
- }
- break;
- case 4:
- /* mov [rdi + dst_offset], offset */
- I3(0xc7, 0x47, dst_offset);
- memcpy(out, &offset, 4);
- out += 4;
- break;
- default:
- BUG();
- }
-
- return out;
- }
-
- bit_offset = format->key_u64s * 64;
- for (i = 0; i <= field; i++)
- bit_offset -= format->bits_per_field[i];
-
- byte = bit_offset / 8;
- bit_offset -= byte * 8;
-
- *eax_zeroed = false;
-
- if (bit_offset == 0 && bits == 8) {
- /* movzx eax, BYTE PTR [rsi + imm8] */
- I4(0x0f, 0xb6, 0x46, byte);
- } else if (bit_offset == 0 && bits == 16) {
- /* movzx eax, WORD PTR [rsi + imm8] */
- I4(0x0f, 0xb7, 0x46, byte);
- } else if (bit_offset + bits <= 32) {
- align = min(4 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 3);
- byte -= align;
- bit_offset += align * 8;
-
- BUG_ON(bit_offset + bits > 32);
-
- /* mov eax, [rsi + imm8] */
- I3(0x8b, 0x46, byte);
-
- if (bit_offset) {
- /* shr eax, imm8 */
- I3(0xc1, 0xe8, bit_offset);
- }
-
- if (bit_offset + bits < 32) {
- unsigned mask = ~0U >> (32 - bits);
-
- /* and eax, imm32 */
- I1(0x25);
- memcpy(out, &mask, 4);
- out += 4;
- }
- } else if (bit_offset + bits <= 64) {
- align = min(8 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 7);
- byte -= align;
- bit_offset += align * 8;
-
- BUG_ON(bit_offset + bits > 64);
-
- /* mov rax, [rsi + imm8] */
- I4(0x48, 0x8b, 0x46, byte);
-
- shl = 64 - bit_offset - bits;
- shr = bit_offset + shl;
-
- if (shl) {
- /* shl rax, imm8 */
- I4(0x48, 0xc1, 0xe0, shl);
- }
-
- if (shr) {
- /* shr rax, imm8 */
- I4(0x48, 0xc1, 0xe8, shr);
- }
- } else {
- align = min(4 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 3);
- byte -= align;
- bit_offset += align * 8;
-
- BUG_ON(bit_offset + bits > 96);
-
- /* mov rax, [rsi + byte] */
- I4(0x48, 0x8b, 0x46, byte);
-
- /* mov edx, [rsi + byte + 8] */
- I3(0x8b, 0x56, byte + 8);
-
- /* bits from next word: */
- shr = bit_offset + bits - 64;
- BUG_ON(shr > bit_offset);
-
- /* shr rax, bit_offset */
- I4(0x48, 0xc1, 0xe8, shr);
-
- /* shl rdx, imm8 */
- I4(0x48, 0xc1, 0xe2, 64 - shr);
-
- /* or rax, rdx */
- I3(0x48, 0x09, 0xd0);
-
- shr = bit_offset - shr;
-
- if (shr) {
- /* shr rax, imm8 */
- I4(0x48, 0xc1, 0xe8, shr);
- }
- }
-
- /* rax += offset: */
- if (offset > S32_MAX) {
- /* mov rdx, imm64 */
- I2(0x48, 0xba);
- memcpy(out, &offset, 8);
- out += 8;
- /* add %rdx, %rax */
- I3(0x48, 0x01, 0xd0);
- } else if (offset + (~0ULL >> (64 - bits)) > U32_MAX) {
- /* add rax, imm32 */
- I2(0x48, 0x05);
- memcpy(out, &offset, 4);
- out += 4;
- } else if (offset) {
- /* add eax, imm32 */
- I1(0x05);
- memcpy(out, &offset, 4);
- out += 4;
- }
-set_field:
- switch (dst_size) {
- case 8:
- /* mov [rdi + dst_offset], rax */
- I4(0x48, 0x89, 0x47, dst_offset);
- break;
- case 4:
- /* mov [rdi + dst_offset], eax */
- I3(0x89, 0x47, dst_offset);
- break;
- default:
- BUG();
- }
-
- return out;
-}
-
-int bch2_compile_bkey_format(const struct bkey_format *format, void *_out)
-{
- bool eax_zeroed = false;
- u8 *out = _out;
-
- /*
- * rdi: dst - unpacked key
- * rsi: src - packed key
- */
-
- /* k->u64s, k->format, k->type */
-
- /* mov eax, [rsi] */
- I2(0x8b, 0x06);
-
- /* add eax, BKEY_U64s - format->key_u64s */
- I5(0x05, BKEY_U64s - format->key_u64s, KEY_FORMAT_CURRENT, 0, 0);
-
- /* and eax, imm32: mask out k->pad: */
- I5(0x25, 0xff, 0xff, 0xff, 0);
-
- /* mov [rdi], eax */
- I2(0x89, 0x07);
-
-#define x(id, field) \
- out = compile_bkey_field(format, out, id, \
- offsetof(struct bkey, field), \
- sizeof(((struct bkey *) NULL)->field), \
- &eax_zeroed);
- bkey_fields()
-#undef x
-
- /* retq */
- I1(0xc3);
-
- return (void *) out - _out;
-}
-
-#else
-#endif
-
-__pure
-int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *l,
- const struct bkey_packed *r,
- const struct btree *b)
-{
- return __bch2_bkey_cmp_packed_format_checked_inlined(l, r, b);
-}
-
-__pure __flatten
-int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b,
- const struct bkey_packed *l,
- const struct bpos *r)
-{
- return bpos_cmp(bkey_unpack_pos_format_checked(b, l), *r);
-}
-
-__pure __flatten
-int bch2_bkey_cmp_packed(const struct btree *b,
- const struct bkey_packed *l,
- const struct bkey_packed *r)
-{
- return bch2_bkey_cmp_packed_inlined(b, l, r);
-}
-
-__pure __flatten
-int __bch2_bkey_cmp_left_packed(const struct btree *b,
- const struct bkey_packed *l,
- const struct bpos *r)
-{
- const struct bkey *l_unpacked;
-
- return unlikely(l_unpacked = packed_to_bkey_c(l))
- ? bpos_cmp(l_unpacked->p, *r)
- : __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
-}
-
-void bch2_bpos_swab(struct bpos *p)
-{
- u8 *l = (u8 *) p;
- u8 *h = ((u8 *) &p[1]) - 1;
-
- while (l < h) {
- swap(*l, *h);
- l++;
- --h;
- }
-}
-
-void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
-{
- const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current;
- u8 *l = k->key_start;
- u8 *h = (u8 *) ((u64 *) k->_data + f->key_u64s) - 1;
-
- while (l < h) {
- swap(*l, *h);
- l++;
- --h;
- }
-}
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_bkey_pack_test(void)
-{
- struct bkey t = KEY(4134ULL, 1250629070527416633ULL, 0);
- struct bkey_packed p;
-
- struct bkey_format test_format = {
- .key_u64s = 3,
- .nr_fields = BKEY_NR_FIELDS,
- .bits_per_field = {
- 13,
- 64,
- 32,
- },
- };
-
- struct unpack_state in_s =
- unpack_state_init(&bch2_bkey_format_current, (void *) &t);
- struct pack_state out_s = pack_state_init(&test_format, &p);
- unsigned i;
-
- for (i = 0; i < out_s.format->nr_fields; i++) {
- u64 a, v = get_inc_field(&in_s, i);
-
- switch (i) {
-#define x(id, field) case id: a = t.field; break;
- bkey_fields()
-#undef x
- default:
- BUG();
- }
-
- if (a != v)
- panic("got %llu actual %llu i %u\n", v, a, i);
-
- if (!set_inc_field(&out_s, i, v))
- panic("failed at %u\n", i);
- }
-
- BUG_ON(!bch2_bkey_pack_key(&p, &t, &test_format));
-}
-#endif
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
deleted file mode 100644
index 41df24a53d97..000000000000
--- a/fs/bcachefs/bkey.h
+++ /dev/null
@@ -1,612 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BKEY_H
-#define _BCACHEFS_BKEY_H
-
-#include <linux/bug.h>
-#include "bcachefs_format.h"
-#include "bkey_types.h"
-#include "btree_types.h"
-#include "util.h"
-#include "vstructs.h"
-
-enum bch_validate_flags {
- BCH_VALIDATE_write = BIT(0),
- BCH_VALIDATE_commit = BIT(1),
- BCH_VALIDATE_journal = BIT(2),
- BCH_VALIDATE_silent = BIT(3),
-};
-
-#if 0
-
-/*
- * compiled unpack functions are disabled, pending a new interface for
- * dynamically allocating executable memory:
- */
-
-#ifdef CONFIG_X86_64
-#define HAVE_BCACHEFS_COMPILED_UNPACK 1
-#endif
-#endif
-
-void bch2_bkey_packed_to_binary_text(struct printbuf *,
- const struct bkey_format *,
- const struct bkey_packed *);
-
-enum bkey_lr_packed {
- BKEY_PACKED_BOTH,
- BKEY_PACKED_RIGHT,
- BKEY_PACKED_LEFT,
- BKEY_PACKED_NONE,
-};
-
-#define bkey_lr_packed(_l, _r) \
- ((_l)->format + ((_r)->format << 1))
-
-static inline void bkey_p_copy(struct bkey_packed *dst, const struct bkey_packed *src)
-{
- memcpy_u64s_small(dst, src, src->u64s);
-}
-
-static inline void bkey_copy(struct bkey_i *dst, const struct bkey_i *src)
-{
- memcpy_u64s_small(dst, src, src->k.u64s);
-}
-
-struct btree;
-
-__pure
-unsigned bch2_bkey_greatest_differing_bit(const struct btree *,
- const struct bkey_packed *,
- const struct bkey_packed *);
-__pure
-unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *);
-
-__pure
-int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *,
- const struct bkey_packed *,
- const struct btree *);
-
-__pure
-int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *,
- const struct bkey_packed *,
- const struct bpos *);
-
-__pure
-int bch2_bkey_cmp_packed(const struct btree *,
- const struct bkey_packed *,
- const struct bkey_packed *);
-
-__pure
-int __bch2_bkey_cmp_left_packed(const struct btree *,
- const struct bkey_packed *,
- const struct bpos *);
-
-static inline __pure
-int bkey_cmp_left_packed(const struct btree *b,
- const struct bkey_packed *l, const struct bpos *r)
-{
- return __bch2_bkey_cmp_left_packed(b, l, r);
-}
-
-/*
- * The compiler generates better code when we pass bpos by ref, but it's often
- * enough terribly convenient to pass it by val... as much as I hate c++, const
- * ref would be nice here:
- */
-__pure __flatten
-static inline int bkey_cmp_left_packed_byval(const struct btree *b,
- const struct bkey_packed *l,
- struct bpos r)
-{
- return bkey_cmp_left_packed(b, l, &r);
-}
-
-static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
-{
- return !((l.inode ^ r.inode) |
- (l.offset ^ r.offset) |
- (l.snapshot ^ r.snapshot));
-}
-
-static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
-{
- return l.inode != r.inode ? l.inode < r.inode :
- l.offset != r.offset ? l.offset < r.offset :
- l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
-}
-
-static __always_inline bool bpos_le(struct bpos l, struct bpos r)
-{
- return l.inode != r.inode ? l.inode < r.inode :
- l.offset != r.offset ? l.offset < r.offset :
- l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
-}
-
-static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
-{
- return bpos_lt(r, l);
-}
-
-static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
-{
- return bpos_le(r, l);
-}
-
-static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
-{
- return cmp_int(l.inode, r.inode) ?:
- cmp_int(l.offset, r.offset) ?:
- cmp_int(l.snapshot, r.snapshot);
-}
-
-static inline struct bpos bpos_min(struct bpos l, struct bpos r)
-{
- return bpos_lt(l, r) ? l : r;
-}
-
-static inline struct bpos bpos_max(struct bpos l, struct bpos r)
-{
- return bpos_gt(l, r) ? l : r;
-}
-
-static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
-{
- return !((l.inode ^ r.inode) |
- (l.offset ^ r.offset));
-}
-
-static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
-{
- return l.inode != r.inode
- ? l.inode < r.inode
- : l.offset < r.offset;
-}
-
-static __always_inline bool bkey_le(struct bpos l, struct bpos r)
-{
- return l.inode != r.inode
- ? l.inode < r.inode
- : l.offset <= r.offset;
-}
-
-static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
-{
- return bkey_lt(r, l);
-}
-
-static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
-{
- return bkey_le(r, l);
-}
-
-static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
-{
- return cmp_int(l.inode, r.inode) ?:
- cmp_int(l.offset, r.offset);
-}
-
-static inline struct bpos bkey_min(struct bpos l, struct bpos r)
-{
- return bkey_lt(l, r) ? l : r;
-}
-
-static inline struct bpos bkey_max(struct bpos l, struct bpos r)
-{
- return bkey_gt(l, r) ? l : r;
-}
-
-static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
-{
- return bpos_eq(l.k->p, r.k->p) &&
- bkey_bytes(l.k) == bkey_bytes(r.k) &&
- !memcmp(l.v, r.v, bkey_val_bytes(l.k));
-}
-
-void bch2_bpos_swab(struct bpos *);
-void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *);
-
-static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
-{
- return cmp_int(l.hi, r.hi) ?:
- cmp_int(l.lo, r.lo);
-}
-
-#define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })
-#define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL })
-
-static __always_inline bool bversion_zero(struct bversion v)
-{
- return bversion_cmp(v, ZERO_VERSION) == 0;
-}
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-/* statement expressions confusing unlikely()? */
-#define bkey_packed(_k) \
- ({ EBUG_ON((_k)->format > KEY_FORMAT_CURRENT); \
- (_k)->format != KEY_FORMAT_CURRENT; })
-#else
-#define bkey_packed(_k) ((_k)->format != KEY_FORMAT_CURRENT)
-#endif
-
-/*
- * It's safe to treat an unpacked bkey as a packed one, but not the reverse
- */
-static inline struct bkey_packed *bkey_to_packed(struct bkey_i *k)
-{
- return (struct bkey_packed *) k;
-}
-
-static inline const struct bkey_packed *bkey_to_packed_c(const struct bkey_i *k)
-{
- return (const struct bkey_packed *) k;
-}
-
-static inline struct bkey_i *packed_to_bkey(struct bkey_packed *k)
-{
- return bkey_packed(k) ? NULL : (struct bkey_i *) k;
-}
-
-static inline const struct bkey *packed_to_bkey_c(const struct bkey_packed *k)
-{
- return bkey_packed(k) ? NULL : (const struct bkey *) k;
-}
-
-static inline unsigned bkey_format_key_bits(const struct bkey_format *format)
-{
- return format->bits_per_field[BKEY_FIELD_INODE] +
- format->bits_per_field[BKEY_FIELD_OFFSET] +
- format->bits_per_field[BKEY_FIELD_SNAPSHOT];
-}
-
-static inline struct bpos bpos_successor(struct bpos p)
-{
- if (!++p.snapshot &&
- !++p.offset &&
- !++p.inode)
- BUG();
-
- return p;
-}
-
-static inline struct bpos bpos_predecessor(struct bpos p)
-{
- if (!p.snapshot-- &&
- !p.offset-- &&
- !p.inode--)
- BUG();
-
- return p;
-}
-
-static inline struct bpos bpos_nosnap_successor(struct bpos p)
-{
- p.snapshot = 0;
-
- if (!++p.offset &&
- !++p.inode)
- BUG();
-
- return p;
-}
-
-static inline struct bpos bpos_nosnap_predecessor(struct bpos p)
-{
- p.snapshot = 0;
-
- if (!p.offset-- &&
- !p.inode--)
- BUG();
-
- return p;
-}
-
-static inline u64 bkey_start_offset(const struct bkey *k)
-{
- return k->p.offset - k->size;
-}
-
-static inline struct bpos bkey_start_pos(const struct bkey *k)
-{
- return (struct bpos) {
- .inode = k->p.inode,
- .offset = bkey_start_offset(k),
- .snapshot = k->p.snapshot,
- };
-}
-
-/* Packed helpers */
-
-static inline unsigned bkeyp_key_u64s(const struct bkey_format *format,
- const struct bkey_packed *k)
-{
- return bkey_packed(k) ? format->key_u64s : BKEY_U64s;
-}
-
-static inline bool bkeyp_u64s_valid(const struct bkey_format *f,
- const struct bkey_packed *k)
-{
- return ((unsigned) k->u64s - bkeyp_key_u64s(f, k) <= U8_MAX - BKEY_U64s);
-}
-
-static inline unsigned bkeyp_key_bytes(const struct bkey_format *format,
- const struct bkey_packed *k)
-{
- return bkeyp_key_u64s(format, k) * sizeof(u64);
-}
-
-static inline unsigned bkeyp_val_u64s(const struct bkey_format *format,
- const struct bkey_packed *k)
-{
- return k->u64s - bkeyp_key_u64s(format, k);
-}
-
-static inline size_t bkeyp_val_bytes(const struct bkey_format *format,
- const struct bkey_packed *k)
-{
- return bkeyp_val_u64s(format, k) * sizeof(u64);
-}
-
-static inline void set_bkeyp_val_u64s(const struct bkey_format *format,
- struct bkey_packed *k, unsigned val_u64s)
-{
- k->u64s = bkeyp_key_u64s(format, k) + val_u64s;
-}
-
-#define bkeyp_val(_format, _k) \
- ((struct bch_val *) ((u64 *) (_k)->_data + bkeyp_key_u64s(_format, _k)))
-
-extern const struct bkey_format bch2_bkey_format_current;
-
-bool bch2_bkey_transform(const struct bkey_format *,
- struct bkey_packed *,
- const struct bkey_format *,
- const struct bkey_packed *);
-
-struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
- const struct bkey_packed *);
-
-#ifndef HAVE_BCACHEFS_COMPILED_UNPACK
-struct bpos __bkey_unpack_pos(const struct bkey_format *,
- const struct bkey_packed *);
-#endif
-
-bool bch2_bkey_pack_key(struct bkey_packed *, const struct bkey *,
- const struct bkey_format *);
-
-enum bkey_pack_pos_ret {
- BKEY_PACK_POS_EXACT,
- BKEY_PACK_POS_SMALLER,
- BKEY_PACK_POS_FAIL,
-};
-
-enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *, struct bpos,
- const struct btree *);
-
-static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in,
- const struct btree *b)
-{
- return bch2_bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT;
-}
-
-void bch2_bkey_unpack(const struct btree *, struct bkey_i *,
- const struct bkey_packed *);
-bool bch2_bkey_pack(struct bkey_packed *, const struct bkey_i *,
- const struct bkey_format *);
-
-typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *);
-
-static inline void
-__bkey_unpack_key_format_checked(const struct btree *b,
- struct bkey *dst,
- const struct bkey_packed *src)
-{
- if (IS_ENABLED(HAVE_BCACHEFS_COMPILED_UNPACK)) {
- compiled_unpack_fn unpack_fn = b->aux_data;
- unpack_fn(dst, src);
-
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
- bch2_expensive_debug_checks) {
- struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src);
-
- BUG_ON(memcmp(dst, &dst2, sizeof(*dst)));
- }
- } else {
- *dst = __bch2_bkey_unpack_key(&b->format, src);
- }
-}
-
-static inline struct bkey
-bkey_unpack_key_format_checked(const struct btree *b,
- const struct bkey_packed *src)
-{
- struct bkey dst;
-
- __bkey_unpack_key_format_checked(b, &dst, src);
- return dst;
-}
-
-static inline void __bkey_unpack_key(const struct btree *b,
- struct bkey *dst,
- const struct bkey_packed *src)
-{
- if (likely(bkey_packed(src)))
- __bkey_unpack_key_format_checked(b, dst, src);
- else
- *dst = *packed_to_bkey_c(src);
-}
-
-/**
- * bkey_unpack_key -- unpack just the key, not the value
- */
-static inline struct bkey bkey_unpack_key(const struct btree *b,
- const struct bkey_packed *src)
-{
- return likely(bkey_packed(src))
- ? bkey_unpack_key_format_checked(b, src)
- : *packed_to_bkey_c(src);
-}
-
-static inline struct bpos
-bkey_unpack_pos_format_checked(const struct btree *b,
- const struct bkey_packed *src)
-{
-#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
- return bkey_unpack_key_format_checked(b, src).p;
-#else
- return __bkey_unpack_pos(&b->format, src);
-#endif
-}
-
-static inline struct bpos bkey_unpack_pos(const struct btree *b,
- const struct bkey_packed *src)
-{
- return likely(bkey_packed(src))
- ? bkey_unpack_pos_format_checked(b, src)
- : packed_to_bkey_c(src)->p;
-}
-
-/* Disassembled bkeys */
-
-static inline struct bkey_s_c bkey_disassemble(const struct btree *b,
- const struct bkey_packed *k,
- struct bkey *u)
-{
- __bkey_unpack_key(b, u, k);
-
- return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), };
-}
-
-/* non const version: */
-static inline struct bkey_s __bkey_disassemble(const struct btree *b,
- struct bkey_packed *k,
- struct bkey *u)
-{
- __bkey_unpack_key(b, u, k);
-
- return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), };
-}
-
-static inline u64 bkey_field_max(const struct bkey_format *f,
- enum bch_bkey_fields nr)
-{
- return f->bits_per_field[nr] < 64
- ? (le64_to_cpu(f->field_offset[nr]) +
- ~(~0ULL << f->bits_per_field[nr]))
- : U64_MAX;
-}
-
-#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
-
-int bch2_compile_bkey_format(const struct bkey_format *, void *);
-
-#else
-
-static inline int bch2_compile_bkey_format(const struct bkey_format *format,
- void *out) { return 0; }
-
-#endif
-
-static inline void bkey_reassemble(struct bkey_i *dst,
- struct bkey_s_c src)
-{
- dst->k = *src.k;
- memcpy_u64s_small(&dst->v, src.v, bkey_val_u64s(src.k));
-}
-
-/* byte order helpers */
-
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-
-static inline unsigned high_word_offset(const struct bkey_format *f)
-{
- return f->key_u64s - 1;
-}
-
-#define high_bit_offset 0
-#define nth_word(p, n) ((p) - (n))
-
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-
-static inline unsigned high_word_offset(const struct bkey_format *f)
-{
- return 0;
-}
-
-#define high_bit_offset KEY_PACKED_BITS_START
-#define nth_word(p, n) ((p) + (n))
-
-#else
-#error edit for your odd byteorder.
-#endif
-
-#define high_word(f, k) ((u64 *) (k)->_data + high_word_offset(f))
-#define next_word(p) nth_word(p, 1)
-#define prev_word(p) nth_word(p, -1)
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_bkey_pack_test(void);
-#else
-static inline void bch2_bkey_pack_test(void) {}
-#endif
-
-#define bkey_fields() \
- x(BKEY_FIELD_INODE, p.inode) \
- x(BKEY_FIELD_OFFSET, p.offset) \
- x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
- x(BKEY_FIELD_SIZE, size) \
- x(BKEY_FIELD_VERSION_HI, bversion.hi) \
- x(BKEY_FIELD_VERSION_LO, bversion.lo)
-
-struct bkey_format_state {
- u64 field_min[BKEY_NR_FIELDS];
- u64 field_max[BKEY_NR_FIELDS];
-};
-
-void bch2_bkey_format_init(struct bkey_format_state *);
-
-static inline void __bkey_format_add(struct bkey_format_state *s, unsigned field, u64 v)
-{
- s->field_min[field] = min(s->field_min[field], v);
- s->field_max[field] = max(s->field_max[field], v);
-}
-
-/*
- * Changes @format so that @k can be successfully packed with @format
- */
-static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
-{
-#define x(id, field) __bkey_format_add(s, id, k->field);
- bkey_fields()
-#undef x
-}
-
-void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
-struct bkey_format bch2_bkey_format_done(struct bkey_format_state *);
-
-static inline bool bch2_bkey_format_field_overflows(struct bkey_format *f, unsigned i)
-{
- unsigned f_bits = f->bits_per_field[i];
- unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
- u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1));
- u64 field_offset = le64_to_cpu(f->field_offset[i]);
-
- if (f_bits > unpacked_bits)
- return true;
-
- if ((f_bits == unpacked_bits) && field_offset)
- return true;
-
- u64 f_mask = f_bits
- ? ~((~0ULL << (f_bits - 1)) << 1)
- : 0;
-
- if (((field_offset + f_mask) & unpacked_mask) < field_offset)
- return true;
- return false;
-}
-
-int bch2_bkey_format_invalid(struct bch_fs *, struct bkey_format *,
- enum bch_validate_flags, struct printbuf *);
-void bch2_bkey_format_to_text(struct printbuf *, const struct bkey_format *);
-
-#endif /* _BCACHEFS_BKEY_H */
diff --git a/fs/bcachefs/bkey_buf.h b/fs/bcachefs/bkey_buf.h
deleted file mode 100644
index a30c4ae8eb36..000000000000
--- a/fs/bcachefs/bkey_buf.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BKEY_BUF_H
-#define _BCACHEFS_BKEY_BUF_H
-
-#include "bcachefs.h"
-#include "bkey.h"
-
-struct bkey_buf {
- struct bkey_i *k;
- u64 onstack[12];
-};
-
-static inline void bch2_bkey_buf_realloc(struct bkey_buf *s,
- struct bch_fs *c, unsigned u64s)
-{
- if (s->k == (void *) s->onstack &&
- u64s > ARRAY_SIZE(s->onstack)) {
- s->k = mempool_alloc(&c->large_bkey_pool, GFP_NOFS);
- memcpy(s->k, s->onstack, sizeof(s->onstack));
- }
-}
-
-static inline void bch2_bkey_buf_reassemble(struct bkey_buf *s,
- struct bch_fs *c,
- struct bkey_s_c k)
-{
- bch2_bkey_buf_realloc(s, c, k.k->u64s);
- bkey_reassemble(s->k, k);
-}
-
-static inline void bch2_bkey_buf_copy(struct bkey_buf *s,
- struct bch_fs *c,
- struct bkey_i *src)
-{
- bch2_bkey_buf_realloc(s, c, src->k.u64s);
- bkey_copy(s->k, src);
-}
-
-static inline void bch2_bkey_buf_unpack(struct bkey_buf *s,
- struct bch_fs *c,
- struct btree *b,
- struct bkey_packed *src)
-{
- bch2_bkey_buf_realloc(s, c, BKEY_U64s +
- bkeyp_val_u64s(&b->format, src));
- bch2_bkey_unpack(b, s->k, src);
-}
-
-static inline void bch2_bkey_buf_init(struct bkey_buf *s)
-{
- s->k = (void *) s->onstack;
-}
-
-static inline void bch2_bkey_buf_exit(struct bkey_buf *s, struct bch_fs *c)
-{
- if (s->k != (void *) s->onstack)
- mempool_free(s->k, &c->large_bkey_pool);
- s->k = NULL;
-}
-
-#endif /* _BCACHEFS_BKEY_BUF_H */
diff --git a/fs/bcachefs/bkey_cmp.h b/fs/bcachefs/bkey_cmp.h
deleted file mode 100644
index 5f42a6e69360..000000000000
--- a/fs/bcachefs/bkey_cmp.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BKEY_CMP_H
-#define _BCACHEFS_BKEY_CMP_H
-
-#include "bkey.h"
-
-#ifdef CONFIG_X86_64
-static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
- unsigned nr_key_bits)
-{
- long d0, d1, d2, d3;
- int cmp;
-
- /* we shouldn't need asm for this, but gcc is being retarded: */
-
- asm(".intel_syntax noprefix;"
- "xor eax, eax;"
- "xor edx, edx;"
- "1:;"
- "mov r8, [rdi];"
- "mov r9, [rsi];"
- "sub ecx, 64;"
- "jl 2f;"
-
- "cmp r8, r9;"
- "jnz 3f;"
-
- "lea rdi, [rdi - 8];"
- "lea rsi, [rsi - 8];"
- "jmp 1b;"
-
- "2:;"
- "not ecx;"
- "shr r8, 1;"
- "shr r9, 1;"
- "shr r8, cl;"
- "shr r9, cl;"
- "cmp r8, r9;"
-
- "3:\n"
- "seta al;"
- "setb dl;"
- "sub eax, edx;"
- ".att_syntax prefix;"
- : "=&D" (d0), "=&S" (d1), "=&d" (d2), "=&c" (d3), "=&a" (cmp)
- : "0" (l), "1" (r), "3" (nr_key_bits)
- : "r8", "r9", "cc", "memory");
-
- return cmp;
-}
-#else
-static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
- unsigned nr_key_bits)
-{
- u64 l_v, r_v;
-
- if (!nr_key_bits)
- return 0;
-
- /* for big endian, skip past header */
- nr_key_bits += high_bit_offset;
- l_v = *l & (~0ULL >> high_bit_offset);
- r_v = *r & (~0ULL >> high_bit_offset);
-
- while (1) {
- if (nr_key_bits < 64) {
- l_v >>= 64 - nr_key_bits;
- r_v >>= 64 - nr_key_bits;
- nr_key_bits = 0;
- } else {
- nr_key_bits -= 64;
- }
-
- if (!nr_key_bits || l_v != r_v)
- break;
-
- l = next_word(l);
- r = next_word(r);
-
- l_v = *l;
- r_v = *r;
- }
-
- return cmp_int(l_v, r_v);
-}
-#endif
-
-static inline __pure __flatten
-int __bch2_bkey_cmp_packed_format_checked_inlined(const struct bkey_packed *l,
- const struct bkey_packed *r,
- const struct btree *b)
-{
- const struct bkey_format *f = &b->format;
- int ret;
-
- EBUG_ON(!bkey_packed(l) || !bkey_packed(r));
- EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
-
- ret = __bkey_cmp_bits(high_word(f, l),
- high_word(f, r),
- b->nr_key_bits);
-
- EBUG_ON(ret != bpos_cmp(bkey_unpack_pos(b, l),
- bkey_unpack_pos(b, r)));
- return ret;
-}
-
-static inline __pure __flatten
-int bch2_bkey_cmp_packed_inlined(const struct btree *b,
- const struct bkey_packed *l,
- const struct bkey_packed *r)
-{
- struct bkey unpacked;
-
- if (likely(bkey_packed(l) && bkey_packed(r)))
- return __bch2_bkey_cmp_packed_format_checked_inlined(l, r, b);
-
- if (bkey_packed(l)) {
- __bkey_unpack_key_format_checked(b, &unpacked, l);
- l = (void *) &unpacked;
- } else if (bkey_packed(r)) {
- __bkey_unpack_key_format_checked(b, &unpacked, r);
- r = (void *) &unpacked;
- }
-
- return bpos_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
-}
-
-#endif /* _BCACHEFS_BKEY_CMP_H */
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
deleted file mode 100644
index e7ac227ba7e8..000000000000
--- a/fs/bcachefs/bkey_methods.c
+++ /dev/null
@@ -1,480 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "backpointers.h"
-#include "bkey_methods.h"
-#include "btree_cache.h"
-#include "btree_types.h"
-#include "alloc_background.h"
-#include "dirent.h"
-#include "disk_accounting.h"
-#include "ec.h"
-#include "error.h"
-#include "extents.h"
-#include "inode.h"
-#include "io_misc.h"
-#include "lru.h"
-#include "quota.h"
-#include "reflink.h"
-#include "snapshot.h"
-#include "subvolume.h"
-#include "xattr.h"
-
-const char * const bch2_bkey_types[] = {
-#define x(name, nr) #name,
- BCH_BKEY_TYPES()
-#undef x
- NULL
-};
-
-static int deleted_key_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- return 0;
-}
-
-#define bch2_bkey_ops_deleted ((struct bkey_ops) { \
- .key_validate = deleted_key_validate, \
-})
-
-#define bch2_bkey_ops_whiteout ((struct bkey_ops) { \
- .key_validate = deleted_key_validate, \
-})
-
-static int empty_val_key_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- int ret = 0;
-
- bkey_fsck_err_on(bkey_val_bytes(k.k),
- c, bkey_val_size_nonzero,
- "incorrect value size (%zu != 0)",
- bkey_val_bytes(k.k));
-fsck_err:
- return ret;
-}
-
-#define bch2_bkey_ops_error ((struct bkey_ops) { \
- .key_validate = empty_val_key_validate, \
-})
-
-static int key_type_cookie_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- return 0;
-}
-
-static void key_type_cookie_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_cookie ck = bkey_s_c_to_cookie(k);
-
- prt_printf(out, "%llu", le64_to_cpu(ck.v->cookie));
-}
-
-#define bch2_bkey_ops_cookie ((struct bkey_ops) { \
- .key_validate = key_type_cookie_validate, \
- .val_to_text = key_type_cookie_to_text, \
- .min_val_size = 8, \
-})
-
-#define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
- .key_validate = empty_val_key_validate, \
-})
-
-static int key_type_inline_data_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- return 0;
-}
-
-static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
- unsigned datalen = bkey_inline_data_bytes(k.k);
-
- prt_printf(out, "datalen %u: %*phN",
- datalen, min(datalen, 32U), d.v->data);
-}
-
-#define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
- .key_validate = key_type_inline_data_validate, \
- .val_to_text = key_type_inline_data_to_text, \
-})
-
-static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
-{
- bch2_key_resize(l.k, l.k->size + r.k->size);
- return true;
-}
-
-#define bch2_bkey_ops_set ((struct bkey_ops) { \
- .key_validate = empty_val_key_validate, \
- .key_merge = key_type_set_merge, \
-})
-
-const struct bkey_ops bch2_bkey_ops[] = {
-#define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
- BCH_BKEY_TYPES()
-#undef x
-};
-
-const struct bkey_ops bch2_bkey_null_ops = {
-};
-
-int bch2_bkey_val_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- if (test_bit(BCH_FS_no_invalid_checks, &c->flags))
- return 0;
-
- const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
- int ret = 0;
-
- bkey_fsck_err_on(bkey_val_bytes(k.k) < ops->min_val_size,
- c, bkey_val_size_too_small,
- "bad val size (%zu < %u)",
- bkey_val_bytes(k.k), ops->min_val_size);
-
- if (!ops->key_validate)
- return 0;
-
- ret = ops->key_validate(c, k, flags);
-fsck_err:
- return ret;
-}
-
-static u64 bch2_key_types_allowed[] = {
- [BKEY_TYPE_btree] =
- BIT_ULL(KEY_TYPE_deleted)|
- BIT_ULL(KEY_TYPE_btree_ptr)|
- BIT_ULL(KEY_TYPE_btree_ptr_v2),
-#define x(name, nr, flags, keys) [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys,
- BCH_BTREE_IDS()
-#undef x
-};
-
-const char *bch2_btree_node_type_str(enum btree_node_type type)
-{
- return type == BKEY_TYPE_btree ? "internal btree node" : bch2_btree_id_str(type - 1);
-}
-
-int __bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k,
- enum btree_node_type type,
- enum bch_validate_flags flags)
-{
- if (test_bit(BCH_FS_no_invalid_checks, &c->flags))
- return 0;
-
- int ret = 0;
-
- bkey_fsck_err_on(k.k->u64s < BKEY_U64s,
- c, bkey_u64s_too_small,
- "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s);
-
- if (type >= BKEY_TYPE_NR)
- return 0;
-
- bkey_fsck_err_on(k.k->type < KEY_TYPE_MAX &&
- (type == BKEY_TYPE_btree || (flags & BCH_VALIDATE_commit)) &&
- !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)),
- c, bkey_invalid_type_for_btree,
- "invalid key type for btree %s (%s)",
- bch2_btree_node_type_str(type),
- k.k->type < KEY_TYPE_MAX
- ? bch2_bkey_types[k.k->type]
- : "(unknown)");
-
- if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
- bkey_fsck_err_on(k.k->size == 0,
- c, bkey_extent_size_zero,
- "size == 0");
-
- bkey_fsck_err_on(k.k->size > k.k->p.offset,
- c, bkey_extent_size_greater_than_offset,
- "size greater than offset (%u > %llu)",
- k.k->size, k.k->p.offset);
- } else {
- bkey_fsck_err_on(k.k->size,
- c, bkey_size_nonzero,
- "size != 0");
- }
-
- if (type != BKEY_TYPE_btree) {
- enum btree_id btree = type - 1;
-
- if (btree_type_has_snapshots(btree)) {
- bkey_fsck_err_on(!k.k->p.snapshot,
- c, bkey_snapshot_zero,
- "snapshot == 0");
- } else if (!btree_type_has_snapshot_field(btree)) {
- bkey_fsck_err_on(k.k->p.snapshot,
- c, bkey_snapshot_nonzero,
- "nonzero snapshot");
- } else {
- /*
- * btree uses snapshot field but it's not required to be
- * nonzero
- */
- }
-
- bkey_fsck_err_on(bkey_eq(k.k->p, POS_MAX),
- c, bkey_at_pos_max,
- "key at POS_MAX");
- }
-fsck_err:
- return ret;
-}
-
-int bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k,
- enum btree_node_type type,
- enum bch_validate_flags flags)
-{
- return __bch2_bkey_validate(c, k, type, flags) ?:
- bch2_bkey_val_validate(c, k, flags);
-}
-
-int bch2_bkey_in_btree_node(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k, enum bch_validate_flags flags)
-{
- int ret = 0;
-
- bkey_fsck_err_on(bpos_lt(k.k->p, b->data->min_key),
- c, bkey_before_start_of_btree_node,
- "key before start of btree node");
-
- bkey_fsck_err_on(bpos_gt(k.k->p, b->data->max_key),
- c, bkey_after_end_of_btree_node,
- "key past end of btree node");
-fsck_err:
- return ret;
-}
-
-void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
-{
- if (bpos_eq(pos, POS_MIN))
- prt_printf(out, "POS_MIN");
- else if (bpos_eq(pos, POS_MAX))
- prt_printf(out, "POS_MAX");
- else if (bpos_eq(pos, SPOS_MAX))
- prt_printf(out, "SPOS_MAX");
- else {
- if (pos.inode == U64_MAX)
- prt_printf(out, "U64_MAX");
- else
- prt_printf(out, "%llu", pos.inode);
- prt_printf(out, ":");
- if (pos.offset == U64_MAX)
- prt_printf(out, "U64_MAX");
- else
- prt_printf(out, "%llu", pos.offset);
- prt_printf(out, ":");
- if (pos.snapshot == U32_MAX)
- prt_printf(out, "U32_MAX");
- else
- prt_printf(out, "%u", pos.snapshot);
- }
-}
-
-void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
-{
- if (k) {
- prt_printf(out, "u64s %u type ", k->u64s);
-
- if (k->type < KEY_TYPE_MAX)
- prt_printf(out, "%s ", bch2_bkey_types[k->type]);
- else
- prt_printf(out, "%u ", k->type);
-
- bch2_bpos_to_text(out, k->p);
-
- prt_printf(out, " len %u ver %llu", k->size, k->bversion.lo);
- } else {
- prt_printf(out, "(null)");
- }
-}
-
-void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
-
- if (likely(ops->val_to_text))
- ops->val_to_text(out, c, k);
-}
-
-void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- bch2_bkey_to_text(out, k.k);
-
- if (bkey_val_bytes(k.k)) {
- prt_printf(out, ": ");
- bch2_val_to_text(out, c, k);
- }
-}
-
-void bch2_bkey_swab_val(struct bkey_s k)
-{
- const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
-
- if (ops->swab)
- ops->swab(k);
-}
-
-bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
-{
- const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
-
- return ops->key_normalize
- ? ops->key_normalize(c, k)
- : false;
-}
-
-bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
-{
- const struct bkey_ops *ops = bch2_bkey_type_ops(l.k->type);
-
- return ops->key_merge &&
- bch2_bkey_maybe_mergable(l.k, r.k) &&
- (u64) l.k->size + r.k->size <= KEY_SIZE_MAX &&
- !bch2_key_merging_disabled &&
- ops->key_merge(c, l, r);
-}
-
-static const struct old_bkey_type {
- u8 btree_node_type;
- u8 old;
- u8 new;
-} bkey_renumber_table[] = {
- {BKEY_TYPE_btree, 128, KEY_TYPE_btree_ptr },
- {BKEY_TYPE_extents, 128, KEY_TYPE_extent },
- {BKEY_TYPE_extents, 129, KEY_TYPE_extent },
- {BKEY_TYPE_extents, 130, KEY_TYPE_reservation },
- {BKEY_TYPE_inodes, 128, KEY_TYPE_inode },
- {BKEY_TYPE_inodes, 130, KEY_TYPE_inode_generation },
- {BKEY_TYPE_dirents, 128, KEY_TYPE_dirent },
- {BKEY_TYPE_dirents, 129, KEY_TYPE_hash_whiteout },
- {BKEY_TYPE_xattrs, 128, KEY_TYPE_xattr },
- {BKEY_TYPE_xattrs, 129, KEY_TYPE_hash_whiteout },
- {BKEY_TYPE_alloc, 128, KEY_TYPE_alloc },
- {BKEY_TYPE_quotas, 128, KEY_TYPE_quota },
-};
-
-void bch2_bkey_renumber(enum btree_node_type btree_node_type,
- struct bkey_packed *k,
- int write)
-{
- const struct old_bkey_type *i;
-
- for (i = bkey_renumber_table;
- i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table);
- i++)
- if (btree_node_type == i->btree_node_type &&
- k->type == (write ? i->new : i->old)) {
- k->type = write ? i->old : i->new;
- break;
- }
-}
-
-void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
- unsigned version, unsigned big_endian,
- int write,
- struct bkey_format *f,
- struct bkey_packed *k)
-{
- const struct bkey_ops *ops;
- struct bkey uk;
- unsigned nr_compat = 5;
- int i;
-
- /*
- * Do these operations in reverse order in the write path:
- */
-
- for (i = 0; i < nr_compat; i++)
- switch (!write ? i : nr_compat - 1 - i) {
- case 0:
- if (big_endian != CPU_BIG_ENDIAN) {
- bch2_bkey_swab_key(f, k);
- } else if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- bch2_bkey_swab_key(f, k);
- bch2_bkey_swab_key(f, k);
- }
- break;
- case 1:
- if (version < bcachefs_metadata_version_bkey_renumber)
- bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write);
- break;
- case 2:
- if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id == BTREE_ID_inodes) {
- if (!bkey_packed(k)) {
- struct bkey_i *u = packed_to_bkey(k);
-
- swap(u->k.p.inode, u->k.p.offset);
- } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
- f->bits_per_field[BKEY_FIELD_OFFSET]) {
- struct bkey_format tmp = *f, *in = f, *out = &tmp;
-
- swap(tmp.bits_per_field[BKEY_FIELD_INODE],
- tmp.bits_per_field[BKEY_FIELD_OFFSET]);
- swap(tmp.field_offset[BKEY_FIELD_INODE],
- tmp.field_offset[BKEY_FIELD_OFFSET]);
-
- if (!write)
- swap(in, out);
-
- uk = __bch2_bkey_unpack_key(in, k);
- swap(uk.p.inode, uk.p.offset);
- BUG_ON(!bch2_bkey_pack_key(k, &uk, out));
- }
- }
- break;
- case 3:
- if (version < bcachefs_metadata_version_snapshot &&
- (level || btree_type_has_snapshots(btree_id))) {
- struct bkey_i *u = packed_to_bkey(k);
-
- if (u) {
- u->k.p.snapshot = write
- ? 0 : U32_MAX;
- } else {
- u64 min_packed = le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]);
- u64 max_packed = min_packed +
- ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
-
- uk = __bch2_bkey_unpack_key(f, k);
- uk.p.snapshot = write
- ? min_packed : min_t(u64, U32_MAX, max_packed);
-
- BUG_ON(!bch2_bkey_pack_key(k, &uk, f));
- }
- }
-
- break;
- case 4: {
- struct bkey_s u;
-
- if (!bkey_packed(k)) {
- u = bkey_i_to_s(packed_to_bkey(k));
- } else {
- uk = __bch2_bkey_unpack_key(f, k);
- u.k = &uk;
- u.v = bkeyp_val(f, k);
- }
-
- if (big_endian != CPU_BIG_ENDIAN)
- bch2_bkey_swab_val(u);
-
- ops = bch2_bkey_type_ops(k->type);
-
- if (ops->compat)
- ops->compat(btree_id, version, big_endian, write, u);
- break;
- }
- default:
- BUG();
- }
-}
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
deleted file mode 100644
index 018fb72e32d3..000000000000
--- a/fs/bcachefs/bkey_methods.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BKEY_METHODS_H
-#define _BCACHEFS_BKEY_METHODS_H
-
-#include "bkey.h"
-
-struct bch_fs;
-struct btree;
-struct btree_trans;
-struct bkey;
-enum btree_node_type;
-
-extern const char * const bch2_bkey_types[];
-extern const struct bkey_ops bch2_bkey_null_ops;
-
-/*
- * key_validate: checks validity of @k, returns 0 if good or -EINVAL if bad. If
- * invalid, entire key will be deleted.
- *
- * When invalid, error string is returned via @err. @rw indicates whether key is
- * being read or written; more aggressive checks can be enabled when rw == WRITE.
- */
-struct bkey_ops {
- int (*key_validate)(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags);
- void (*val_to_text)(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
- void (*swab)(struct bkey_s);
- bool (*key_normalize)(struct bch_fs *, struct bkey_s);
- bool (*key_merge)(struct bch_fs *, struct bkey_s, struct bkey_s_c);
- int (*trigger)(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
- void (*compat)(enum btree_id id, unsigned version,
- unsigned big_endian, int write,
- struct bkey_s);
-
- /* Size of value type when first created: */
- unsigned min_val_size;
-};
-
-extern const struct bkey_ops bch2_bkey_ops[];
-
-static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type)
-{
- return likely(type < KEY_TYPE_MAX)
- ? &bch2_bkey_ops[type]
- : &bch2_bkey_null_ops;
-}
-
-int bch2_bkey_val_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-int __bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
- enum bch_validate_flags);
-int bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
- enum bch_validate_flags);
-int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *, struct bkey_s_c,
- enum bch_validate_flags);
-
-void bch2_bpos_to_text(struct printbuf *, struct bpos);
-void bch2_bkey_to_text(struct printbuf *, const struct bkey *);
-void bch2_val_to_text(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
-void bch2_bkey_val_to_text(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
-
-void bch2_bkey_swab_val(struct bkey_s);
-
-bool bch2_bkey_normalize(struct bch_fs *, struct bkey_s);
-
-static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct bkey *r)
-{
- return l->type == r->type &&
- !bversion_cmp(l->bversion, r->bversion) &&
- bpos_eq(l->p, bkey_start_pos(r));
-}
-
-bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-
-static inline int bch2_key_trigger(struct btree_trans *trans,
- enum btree_id btree, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- const struct bkey_ops *ops = bch2_bkey_type_ops(old.k->type ?: new.k->type);
-
- return ops->trigger
- ? ops->trigger(trans, btree, level, old, new, flags)
- : 0;
-}
-
-static inline int bch2_key_trigger_old(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bkey_i deleted;
-
- bkey_init(&deleted.k);
- deleted.k.p = old.k->p;
-
- return bch2_key_trigger(trans, btree_id, level, old, bkey_i_to_s(&deleted),
- BTREE_TRIGGER_overwrite|flags);
-}
-
-static inline int bch2_key_trigger_new(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bkey_i deleted;
-
- bkey_init(&deleted.k);
- deleted.k.p = new.k->p;
-
- return bch2_key_trigger(trans, btree_id, level, bkey_i_to_s_c(&deleted), new,
- BTREE_TRIGGER_insert|flags);
-}
-
-void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int);
-
-void __bch2_bkey_compat(unsigned, enum btree_id, unsigned, unsigned,
- int, struct bkey_format *, struct bkey_packed *);
-
-static inline void bch2_bkey_compat(unsigned level, enum btree_id btree_id,
- unsigned version, unsigned big_endian,
- int write,
- struct bkey_format *f,
- struct bkey_packed *k)
-{
- if (version < bcachefs_metadata_version_current ||
- big_endian != CPU_BIG_ENDIAN ||
- IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- __bch2_bkey_compat(level, btree_id, version,
- big_endian, write, f, k);
-
-}
-
-#endif /* _BCACHEFS_BKEY_METHODS_H */
diff --git a/fs/bcachefs/bkey_sort.c b/fs/bcachefs/bkey_sort.c
deleted file mode 100644
index 4536eb50fc40..000000000000
--- a/fs/bcachefs/bkey_sort.c
+++ /dev/null
@@ -1,214 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "bkey_cmp.h"
-#include "bkey_sort.h"
-#include "bset.h"
-#include "extents.h"
-
-typedef int (*sort_cmp_fn)(const struct btree *,
- const struct bkey_packed *,
- const struct bkey_packed *);
-
-static inline bool sort_iter_end(struct sort_iter *iter)
-{
- return !iter->used;
-}
-
-static inline void sort_iter_sift(struct sort_iter *iter, unsigned from,
- sort_cmp_fn cmp)
-{
- unsigned i;
-
- for (i = from;
- i + 1 < iter->used &&
- cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
- i++)
- swap(iter->data[i], iter->data[i + 1]);
-}
-
-static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
-{
- unsigned i = iter->used;
-
- while (i--)
- sort_iter_sift(iter, i, cmp);
-}
-
-static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
-{
- return !sort_iter_end(iter) ? iter->data->k : NULL;
-}
-
-static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
-{
- struct sort_iter_set *i = iter->data;
-
- BUG_ON(!iter->used);
-
- i->k = bkey_p_next(i->k);
-
- BUG_ON(i->k > i->end);
-
- if (i->k == i->end)
- array_remove_item(iter->data, iter->used, 0);
- else
- sort_iter_sift(iter, 0, cmp);
-}
-
-static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
- sort_cmp_fn cmp)
-{
- struct bkey_packed *ret = sort_iter_peek(iter);
-
- if (ret)
- sort_iter_advance(iter, cmp);
-
- return ret;
-}
-
-/*
- * If keys compare equal, compare by pointer order:
- */
-static inline int key_sort_fix_overlapping_cmp(const struct btree *b,
- const struct bkey_packed *l,
- const struct bkey_packed *r)
-{
- return bch2_bkey_cmp_packed(b, l, r) ?:
- cmp_int((unsigned long) l, (unsigned long) r);
-}
-
-static inline bool should_drop_next_key(struct sort_iter *iter)
-{
- /*
- * key_sort_cmp() ensures that when keys compare equal the older key
- * comes first; so if l->k compares equal to r->k then l->k is older
- * and should be dropped.
- */
- return iter->used >= 2 &&
- !bch2_bkey_cmp_packed(iter->b,
- iter->data[0].k,
- iter->data[1].k);
-}
-
-struct btree_nr_keys
-bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
- struct sort_iter *iter)
-{
- struct bkey_packed *out = dst->start;
- struct bkey_packed *k;
- struct btree_nr_keys nr;
-
- memset(&nr, 0, sizeof(nr));
-
- sort_iter_sort(iter, key_sort_fix_overlapping_cmp);
-
- while ((k = sort_iter_peek(iter))) {
- if (!bkey_deleted(k) &&
- !should_drop_next_key(iter)) {
- bkey_p_copy(out, k);
- btree_keys_account_key_add(&nr, 0, out);
- out = bkey_p_next(out);
- }
-
- sort_iter_advance(iter, key_sort_fix_overlapping_cmp);
- }
-
- dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- return nr;
-}
-
-/* Sort + repack in a new format: */
-struct btree_nr_keys
-bch2_sort_repack(struct bset *dst, struct btree *src,
- struct btree_node_iter *src_iter,
- struct bkey_format *out_f,
- bool filter_whiteouts)
-{
- struct bkey_format *in_f = &src->format;
- struct bkey_packed *in, *out = vstruct_last(dst);
- struct btree_nr_keys nr;
- bool transform = memcmp(out_f, &src->format, sizeof(*out_f));
-
- memset(&nr, 0, sizeof(nr));
-
- while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
- if (filter_whiteouts && bkey_deleted(in))
- continue;
-
- if (!transform)
- bkey_p_copy(out, in);
- else if (bch2_bkey_transform(out_f, out, bkey_packed(in)
- ? in_f : &bch2_bkey_format_current, in))
- out->format = KEY_FORMAT_LOCAL_BTREE;
- else
- bch2_bkey_unpack(src, (void *) out, in);
-
- out->needs_whiteout = false;
-
- btree_keys_account_key_add(&nr, 0, out);
- out = bkey_p_next(out);
- }
-
- dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- return nr;
-}
-
-static inline int keep_unwritten_whiteouts_cmp(const struct btree *b,
- const struct bkey_packed *l,
- const struct bkey_packed *r)
-{
- return bch2_bkey_cmp_packed_inlined(b, l, r) ?:
- (int) bkey_deleted(r) - (int) bkey_deleted(l) ?:
- (long) l - (long) r;
-}
-
-#include "btree_update_interior.h"
-
-/*
- * For sorting in the btree node write path: whiteouts not in the unwritten
- * whiteouts area are dropped, whiteouts in the unwritten whiteouts area are
- * dropped if overwritten by real keys:
- */
-unsigned bch2_sort_keys_keep_unwritten_whiteouts(struct bkey_packed *dst, struct sort_iter *iter)
-{
- struct bkey_packed *in, *next, *out = dst;
-
- sort_iter_sort(iter, keep_unwritten_whiteouts_cmp);
-
- while ((in = sort_iter_next(iter, keep_unwritten_whiteouts_cmp))) {
- if (bkey_deleted(in) && in < unwritten_whiteouts_start(iter->b))
- continue;
-
- if ((next = sort_iter_peek(iter)) &&
- !bch2_bkey_cmp_packed_inlined(iter->b, in, next))
- continue;
-
- bkey_p_copy(out, in);
- out = bkey_p_next(out);
- }
-
- return (u64 *) out - (u64 *) dst;
-}
-
-/*
- * Main sort routine for compacting a btree node in memory: we always drop
- * whiteouts because any whiteouts that need to be written are in the unwritten
- * whiteouts area:
- */
-unsigned bch2_sort_keys(struct bkey_packed *dst, struct sort_iter *iter)
-{
- struct bkey_packed *in, *out = dst;
-
- sort_iter_sort(iter, bch2_bkey_cmp_packed_inlined);
-
- while ((in = sort_iter_next(iter, bch2_bkey_cmp_packed_inlined))) {
- if (bkey_deleted(in))
- continue;
-
- bkey_p_copy(out, in);
- out = bkey_p_next(out);
- }
-
- return (u64 *) out - (u64 *) dst;
-}
diff --git a/fs/bcachefs/bkey_sort.h b/fs/bcachefs/bkey_sort.h
deleted file mode 100644
index 9be969d46890..000000000000
--- a/fs/bcachefs/bkey_sort.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BKEY_SORT_H
-#define _BCACHEFS_BKEY_SORT_H
-
-struct sort_iter {
- struct btree *b;
- unsigned used;
- unsigned size;
-
- struct sort_iter_set {
- struct bkey_packed *k, *end;
- } data[];
-};
-
-static inline void sort_iter_init(struct sort_iter *iter, struct btree *b, unsigned size)
-{
- iter->b = b;
- iter->used = 0;
- iter->size = size;
-}
-
-struct sort_iter_stack {
- struct sort_iter iter;
- struct sort_iter_set sets[MAX_BSETS + 1];
-};
-
-static inline void sort_iter_stack_init(struct sort_iter_stack *iter, struct btree *b)
-{
- sort_iter_init(&iter->iter, b, ARRAY_SIZE(iter->sets));
-}
-
-static inline void sort_iter_add(struct sort_iter *iter,
- struct bkey_packed *k,
- struct bkey_packed *end)
-{
- BUG_ON(iter->used >= iter->size);
-
- if (k != end)
- iter->data[iter->used++] = (struct sort_iter_set) { k, end };
-}
-
-struct btree_nr_keys
-bch2_key_sort_fix_overlapping(struct bch_fs *, struct bset *,
- struct sort_iter *);
-
-struct btree_nr_keys
-bch2_sort_repack(struct bset *, struct btree *,
- struct btree_node_iter *,
- struct bkey_format *, bool);
-
-unsigned bch2_sort_keys_keep_unwritten_whiteouts(struct bkey_packed *, struct sort_iter *);
-unsigned bch2_sort_keys(struct bkey_packed *, struct sort_iter *);
-
-#endif /* _BCACHEFS_BKEY_SORT_H */
diff --git a/fs/bcachefs/bkey_types.h b/fs/bcachefs/bkey_types.h
deleted file mode 100644
index c9ae9e42b385..000000000000
--- a/fs/bcachefs/bkey_types.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BKEY_TYPES_H
-#define _BCACHEFS_BKEY_TYPES_H
-
-#include "bcachefs_format.h"
-
-/*
- * bkey_i - bkey with inline value
- * bkey_s - bkey with split value
- * bkey_s_c - bkey with split value, const
- */
-
-#define bkey_p_next(_k) vstruct_next(_k)
-
-static inline struct bkey_i *bkey_next(struct bkey_i *k)
-{
- return (struct bkey_i *) ((u64 *) k->_data + k->k.u64s);
-}
-
-#define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)
-
-static inline size_t bkey_val_bytes(const struct bkey *k)
-{
- return bkey_val_u64s(k) * sizeof(u64);
-}
-
-static inline void set_bkey_val_u64s(struct bkey *k, unsigned val_u64s)
-{
- unsigned u64s = BKEY_U64s + val_u64s;
-
- BUG_ON(u64s > U8_MAX);
- k->u64s = u64s;
-}
-
-static inline void set_bkey_val_bytes(struct bkey *k, unsigned bytes)
-{
- set_bkey_val_u64s(k, DIV_ROUND_UP(bytes, sizeof(u64)));
-}
-
-#define bkey_val_end(_k) ((void *) (((u64 *) (_k).v) + bkey_val_u64s((_k).k)))
-
-#define bkey_deleted(_k) ((_k)->type == KEY_TYPE_deleted)
-
-#define bkey_whiteout(_k) \
- ((_k)->type == KEY_TYPE_deleted || (_k)->type == KEY_TYPE_whiteout)
-
-/* bkey with split value, const */
-struct bkey_s_c {
- const struct bkey *k;
- const struct bch_val *v;
-};
-
-/* bkey with split value */
-struct bkey_s {
- union {
- struct {
- struct bkey *k;
- struct bch_val *v;
- };
- struct bkey_s_c s_c;
- };
-};
-
-#define bkey_s_null ((struct bkey_s) { .k = NULL })
-#define bkey_s_c_null ((struct bkey_s_c) { .k = NULL })
-
-#define bkey_s_err(err) ((struct bkey_s) { .k = ERR_PTR(err) })
-#define bkey_s_c_err(err) ((struct bkey_s_c) { .k = ERR_PTR(err) })
-
-static inline struct bkey_s bkey_to_s(struct bkey *k)
-{
- return (struct bkey_s) { .k = k, .v = NULL };
-}
-
-static inline struct bkey_s_c bkey_to_s_c(const struct bkey *k)
-{
- return (struct bkey_s_c) { .k = k, .v = NULL };
-}
-
-static inline struct bkey_s bkey_i_to_s(struct bkey_i *k)
-{
- return (struct bkey_s) { .k = &k->k, .v = &k->v };
-}
-
-static inline struct bkey_s_c bkey_i_to_s_c(const struct bkey_i *k)
-{
- return (struct bkey_s_c) { .k = &k->k, .v = &k->v };
-}
-
-/*
- * For a given type of value (e.g. struct bch_extent), generates the types for
- * bkey + bch_extent - inline, split, split const - and also all the conversion
- * functions, which also check that the value is of the correct type.
- *
- * We use anonymous unions for upcasting - e.g. converting from e.g. a
- * bkey_i_extent to a bkey_i - since that's always safe, instead of conversion
- * functions.
- */
-#define x(name, ...) \
-struct bkey_i_##name { \
- union { \
- struct bkey k; \
- struct bkey_i k_i; \
- }; \
- struct bch_##name v; \
-}; \
- \
-struct bkey_s_c_##name { \
- union { \
- struct { \
- const struct bkey *k; \
- const struct bch_##name *v; \
- }; \
- struct bkey_s_c s_c; \
- }; \
-}; \
- \
-struct bkey_s_##name { \
- union { \
- struct { \
- struct bkey *k; \
- struct bch_##name *v; \
- }; \
- struct bkey_s_c_##name c; \
- struct bkey_s s; \
- struct bkey_s_c s_c; \
- }; \
-}; \
- \
-static inline struct bkey_i_##name *bkey_i_to_##name(struct bkey_i *k) \
-{ \
- EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
- return container_of(&k->k, struct bkey_i_##name, k); \
-} \
- \
-static inline const struct bkey_i_##name * \
-bkey_i_to_##name##_c(const struct bkey_i *k) \
-{ \
- EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
- return container_of(&k->k, struct bkey_i_##name, k); \
-} \
- \
-static inline struct bkey_s_##name bkey_s_to_##name(struct bkey_s k) \
-{ \
- EBUG_ON(!IS_ERR_OR_NULL(k.k) && k.k->type != KEY_TYPE_##name); \
- return (struct bkey_s_##name) { \
- .k = k.k, \
- .v = container_of(k.v, struct bch_##name, v), \
- }; \
-} \
- \
-static inline struct bkey_s_c_##name bkey_s_c_to_##name(struct bkey_s_c k)\
-{ \
- EBUG_ON(!IS_ERR_OR_NULL(k.k) && k.k->type != KEY_TYPE_##name); \
- return (struct bkey_s_c_##name) { \
- .k = k.k, \
- .v = container_of(k.v, struct bch_##name, v), \
- }; \
-} \
- \
-static inline struct bkey_s_##name name##_i_to_s(struct bkey_i_##name *k)\
-{ \
- return (struct bkey_s_##name) { \
- .k = &k->k, \
- .v = &k->v, \
- }; \
-} \
- \
-static inline struct bkey_s_c_##name \
-name##_i_to_s_c(const struct bkey_i_##name *k) \
-{ \
- return (struct bkey_s_c_##name) { \
- .k = &k->k, \
- .v = &k->v, \
- }; \
-} \
- \
-static inline struct bkey_s_##name bkey_i_to_s_##name(struct bkey_i *k) \
-{ \
- EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
- return (struct bkey_s_##name) { \
- .k = &k->k, \
- .v = container_of(&k->v, struct bch_##name, v), \
- }; \
-} \
- \
-static inline struct bkey_s_c_##name \
-bkey_i_to_s_c_##name(const struct bkey_i *k) \
-{ \
- EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
- return (struct bkey_s_c_##name) { \
- .k = &k->k, \
- .v = container_of(&k->v, struct bch_##name, v), \
- }; \
-} \
- \
-static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\
-{ \
- struct bkey_i_##name *k = \
- container_of(&_k->k, struct bkey_i_##name, k); \
- \
- bkey_init(&k->k); \
- memset(&k->v, 0, sizeof(k->v)); \
- k->k.type = KEY_TYPE_##name; \
- set_bkey_val_bytes(&k->k, sizeof(k->v)); \
- \
- return k; \
-}
-
-BCH_BKEY_TYPES();
-#undef x
-
-#endif /* _BCACHEFS_BKEY_TYPES_H */
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
deleted file mode 100644
index 9a4a83d6fd2d..000000000000
--- a/fs/bcachefs/bset.c
+++ /dev/null
@@ -1,1570 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Code for working with individual keys, and sorted sets of keys with in a
- * btree node
- *
- * Copyright 2012 Google, Inc.
- */
-
-#include "bcachefs.h"
-#include "btree_cache.h"
-#include "bset.h"
-#include "eytzinger.h"
-#include "trace.h"
-#include "util.h"
-
-#include <linux/unaligned.h>
-#include <linux/console.h>
-#include <linux/random.h>
-#include <linux/prefetch.h>
-
-static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
- struct btree *);
-
-static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
-{
- unsigned n = ARRAY_SIZE(iter->data);
-
- while (n && __btree_node_iter_set_end(iter, n - 1))
- --n;
-
- return n;
-}
-
-struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
-{
- return bch2_bkey_to_bset_inlined(b, k);
-}
-
-/*
- * There are never duplicate live keys in the btree - but including keys that
- * have been flagged as deleted (and will be cleaned up later) we _will_ see
- * duplicates.
- *
- * Thus the sort order is: usual key comparison first, but for keys that compare
- * equal the deleted key(s) come first, and the (at most one) live version comes
- * last.
- *
- * The main reason for this is insertion: to handle overwrites, we first iterate
- * over keys that compare equal to our insert key, and then insert immediately
- * prior to the first key greater than the key we're inserting - our insert
- * position will be after all keys that compare equal to our insert key, which
- * by the time we actually do the insert will all be deleted.
- */
-
-void bch2_dump_bset(struct bch_fs *c, struct btree *b,
- struct bset *i, unsigned set)
-{
- struct bkey_packed *_k, *_n;
- struct bkey uk, n;
- struct bkey_s_c k;
- struct printbuf buf = PRINTBUF;
-
- if (!i->u64s)
- return;
-
- for (_k = i->start;
- _k < vstruct_last(i);
- _k = _n) {
- _n = bkey_p_next(_k);
-
- if (!_k->u64s) {
- printk(KERN_ERR "block %u key %5zu - u64s 0? aieee!\n", set,
- _k->_data - i->_data);
- break;
- }
-
- k = bkey_disassemble(b, _k, &uk);
-
- printbuf_reset(&buf);
- if (c)
- bch2_bkey_val_to_text(&buf, c, k);
- else
- bch2_bkey_to_text(&buf, k.k);
- printk(KERN_ERR "block %u key %5zu: %s\n", set,
- _k->_data - i->_data, buf.buf);
-
- if (_n == vstruct_last(i))
- continue;
-
- n = bkey_unpack_key(b, _n);
-
- if (bpos_lt(n.p, k.k->p)) {
- printk(KERN_ERR "Key skipped backwards\n");
- continue;
- }
-
- if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
- printk(KERN_ERR "Duplicate keys\n");
- }
-
- printbuf_exit(&buf);
-}
-
-void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
-{
- console_lock();
- for_each_bset(b, t)
- bch2_dump_bset(c, b, bset(b, t), t - b->set);
- console_unlock();
-}
-
-void bch2_dump_btree_node_iter(struct btree *b,
- struct btree_node_iter *iter)
-{
- struct btree_node_iter_set *set;
- struct printbuf buf = PRINTBUF;
-
- printk(KERN_ERR "btree node iter with %u/%u sets:\n",
- __btree_node_iter_used(iter), b->nsets);
-
- btree_node_iter_for_each(iter, set) {
- struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
- struct bset_tree *t = bch2_bkey_to_bset(b, k);
- struct bkey uk = bkey_unpack_key(b, k);
-
- printbuf_reset(&buf);
- bch2_bkey_to_text(&buf, &uk);
- printk(KERN_ERR "set %zu key %u: %s\n",
- t - b->set, set->k, buf.buf);
- }
-
- printbuf_exit(&buf);
-}
-
-struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b)
-{
- struct bkey_packed *k;
- struct btree_nr_keys nr = {};
-
- for_each_bset(b, t)
- bset_tree_for_each_key(b, t, k)
- if (!bkey_deleted(k))
- btree_keys_account_key_add(&nr, t - b->set, k);
- return nr;
-}
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-
-void __bch2_verify_btree_nr_keys(struct btree *b)
-{
- struct btree_nr_keys nr = bch2_btree_node_count_keys(b);
-
- BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
-}
-
-static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
- struct btree *b)
-{
- struct btree_node_iter iter = *_iter;
- const struct bkey_packed *k, *n;
-
- k = bch2_btree_node_iter_peek_all(&iter, b);
- __bch2_btree_node_iter_advance(&iter, b);
- n = bch2_btree_node_iter_peek_all(&iter, b);
-
- bkey_unpack_key(b, k);
-
- if (n &&
- bkey_iter_cmp(b, k, n) > 0) {
- struct btree_node_iter_set *set;
- struct bkey ku = bkey_unpack_key(b, k);
- struct bkey nu = bkey_unpack_key(b, n);
- struct printbuf buf1 = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
-
- bch2_dump_btree_node(NULL, b);
- bch2_bkey_to_text(&buf1, &ku);
- bch2_bkey_to_text(&buf2, &nu);
- printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
- buf1.buf, buf2.buf);
- printk(KERN_ERR "iter was:");
-
- btree_node_iter_for_each(_iter, set) {
- struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
- struct bset_tree *t = bch2_bkey_to_bset(b, k2);
- printk(" [%zi %zi]", t - b->set,
- k2->_data - bset(b, t)->_data);
- }
- panic("\n");
- }
-}
-
-void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
- struct btree *b)
-{
- struct btree_node_iter_set *set, *s2;
- struct bkey_packed *k, *p;
-
- if (bch2_btree_node_iter_end(iter))
- return;
-
- /* Verify no duplicates: */
- btree_node_iter_for_each(iter, set) {
- BUG_ON(set->k > set->end);
- btree_node_iter_for_each(iter, s2)
- BUG_ON(set != s2 && set->end == s2->end);
- }
-
- /* Verify that set->end is correct: */
- btree_node_iter_for_each(iter, set) {
- for_each_bset(b, t)
- if (set->end == t->end_offset) {
- BUG_ON(set->k < btree_bkey_first_offset(t) ||
- set->k >= t->end_offset);
- goto found;
- }
- BUG();
-found:
- do {} while (0);
- }
-
- /* Verify iterator is sorted: */
- btree_node_iter_for_each(iter, set)
- BUG_ON(set != iter->data &&
- btree_node_iter_cmp(b, set[-1], set[0]) > 0);
-
- k = bch2_btree_node_iter_peek_all(iter, b);
-
- for_each_bset(b, t) {
- if (iter->data[0].end == t->end_offset)
- continue;
-
- p = bch2_bkey_prev_all(b, t,
- bch2_btree_node_iter_bset_pos(iter, b, t));
-
- BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
- }
-}
-
-void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
- struct bkey_packed *insert, unsigned clobber_u64s)
-{
- struct bset_tree *t = bch2_bkey_to_bset(b, where);
- struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
- struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
- struct printbuf buf1 = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
-#if 0
- BUG_ON(prev &&
- bkey_iter_cmp(b, prev, insert) > 0);
-#else
- if (prev &&
- bkey_iter_cmp(b, prev, insert) > 0) {
- struct bkey k1 = bkey_unpack_key(b, prev);
- struct bkey k2 = bkey_unpack_key(b, insert);
-
- bch2_dump_btree_node(NULL, b);
- bch2_bkey_to_text(&buf1, &k1);
- bch2_bkey_to_text(&buf2, &k2);
-
- panic("prev > insert:\n"
- "prev key %s\n"
- "insert key %s\n",
- buf1.buf, buf2.buf);
- }
-#endif
-#if 0
- BUG_ON(next != btree_bkey_last(b, t) &&
- bkey_iter_cmp(b, insert, next) > 0);
-#else
- if (next != btree_bkey_last(b, t) &&
- bkey_iter_cmp(b, insert, next) > 0) {
- struct bkey k1 = bkey_unpack_key(b, insert);
- struct bkey k2 = bkey_unpack_key(b, next);
-
- bch2_dump_btree_node(NULL, b);
- bch2_bkey_to_text(&buf1, &k1);
- bch2_bkey_to_text(&buf2, &k2);
-
- panic("insert > next:\n"
- "insert key %s\n"
- "next key %s\n",
- buf1.buf, buf2.buf);
- }
-#endif
-}
-
-#else
-
-static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
- struct btree *b) {}
-
-#endif
-
-/* Auxiliary search trees */
-
-#define BFLOAT_FAILED_UNPACKED U8_MAX
-#define BFLOAT_FAILED U8_MAX
-
-struct bkey_float {
- u8 exponent;
- u8 key_offset;
- u16 mantissa;
-};
-#define BKEY_MANTISSA_BITS 16
-
-struct ro_aux_tree {
- u8 nothing[0];
- struct bkey_float f[];
-};
-
-struct rw_aux_tree {
- u16 offset;
- struct bpos k;
-};
-
-static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
-{
- BUG_ON(t->aux_data_offset == U16_MAX);
-
- switch (bset_aux_tree_type(t)) {
- case BSET_NO_AUX_TREE:
- return t->aux_data_offset;
- case BSET_RO_AUX_TREE:
- return t->aux_data_offset +
- DIV_ROUND_UP(t->size * sizeof(struct bkey_float), 8);
- case BSET_RW_AUX_TREE:
- return t->aux_data_offset +
- DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
- default:
- BUG();
- }
-}
-
-static unsigned bset_aux_tree_buf_start(const struct btree *b,
- const struct bset_tree *t)
-{
- return t == b->set
- ? DIV_ROUND_UP(b->unpack_fn_len, 8)
- : bset_aux_tree_buf_end(t - 1);
-}
-
-static void *__aux_tree_base(const struct btree *b,
- const struct bset_tree *t)
-{
- return b->aux_data + t->aux_data_offset * 8;
-}
-
-static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
- const struct bset_tree *t)
-{
- EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
-
- return __aux_tree_base(b, t);
-}
-
-static struct bkey_float *bkey_float(const struct btree *b,
- const struct bset_tree *t,
- unsigned idx)
-{
- return ro_aux_tree_base(b, t)->f + idx;
-}
-
-static void bset_aux_tree_verify(struct btree *b)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- for_each_bset(b, t) {
- if (t->aux_data_offset == U16_MAX)
- continue;
-
- BUG_ON(t != b->set &&
- t[-1].aux_data_offset == U16_MAX);
-
- BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
- BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
- BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
- }
-#endif
-}
-
-void bch2_btree_keys_init(struct btree *b)
-{
- unsigned i;
-
- b->nsets = 0;
- memset(&b->nr, 0, sizeof(b->nr));
-
- for (i = 0; i < MAX_BSETS; i++)
- b->set[i].data_offset = U16_MAX;
-
- bch2_bset_set_no_aux_tree(b, b->set);
-}
-
-/* Binary tree stuff for auxiliary search trees */
-
-/*
- * Cacheline/offset <-> bkey pointer arithmetic:
- *
- * t->tree is a binary search tree in an array; each node corresponds to a key
- * in one cacheline in t->set (BSET_CACHELINE bytes).
- *
- * This means we don't have to store the full index of the key that a node in
- * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
- * then bkey_float->m gives us the offset within that cacheline, in units of 8
- * bytes.
- *
- * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
- * make this work.
- *
- * To construct the bfloat for an arbitrary key we need to know what the key
- * immediately preceding it is: we have to check if the two keys differ in the
- * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
- * of the previous key so we can walk backwards to it from t->tree[j]'s key.
- */
-
-static inline void *bset_cacheline(const struct btree *b,
- const struct bset_tree *t,
- unsigned cacheline)
-{
- return (void *) round_down((unsigned long) btree_bkey_first(b, t),
- L1_CACHE_BYTES) +
- cacheline * BSET_CACHELINE;
-}
-
-static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
- const struct bset_tree *t,
- unsigned cacheline,
- unsigned offset)
-{
- return bset_cacheline(b, t, cacheline) + offset * 8;
-}
-
-static unsigned bkey_to_cacheline(const struct btree *b,
- const struct bset_tree *t,
- const struct bkey_packed *k)
-{
- return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
-}
-
-static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
- const struct bset_tree *t,
- unsigned cacheline,
- const struct bkey_packed *k)
-{
- return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
-}
-
-static unsigned bkey_to_cacheline_offset(const struct btree *b,
- const struct bset_tree *t,
- unsigned cacheline,
- const struct bkey_packed *k)
-{
- size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
-
- EBUG_ON(m > U8_MAX);
- return m;
-}
-
-static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
- const struct bset_tree *t,
- unsigned j)
-{
- return cacheline_to_bkey(b, t,
- __eytzinger1_to_inorder(j, t->size - 1, t->extra),
- bkey_float(b, t, j)->key_offset);
-}
-
-static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
- const struct bset_tree *t)
-{
- EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
-
- return __aux_tree_base(b, t);
-}
-
-/*
- * For the write set - the one we're currently inserting keys into - we don't
- * maintain a full search tree, we just keep a simple lookup table in t->prev.
- */
-static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
- struct bset_tree *t,
- unsigned j)
-{
- return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
-}
-
-static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
- unsigned j, struct bkey_packed *k)
-{
- EBUG_ON(k >= btree_bkey_last(b, t));
-
- rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
- .offset = __btree_node_key_to_offset(b, k),
- .k = bkey_unpack_pos(b, k),
- };
-}
-
-static void bch2_bset_verify_rw_aux_tree(struct btree *b,
- struct bset_tree *t)
-{
- struct bkey_packed *k = btree_bkey_first(b, t);
- unsigned j = 0;
-
- if (!bch2_expensive_debug_checks)
- return;
-
- BUG_ON(bset_has_ro_aux_tree(t));
-
- if (!bset_has_rw_aux_tree(t))
- return;
-
- BUG_ON(t->size < 1);
- BUG_ON(rw_aux_to_bkey(b, t, j) != k);
-
- goto start;
- while (1) {
- if (rw_aux_to_bkey(b, t, j) == k) {
- BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
- bkey_unpack_pos(b, k)));
-start:
- if (++j == t->size)
- break;
-
- BUG_ON(rw_aux_tree(b, t)[j].offset <=
- rw_aux_tree(b, t)[j - 1].offset);
- }
-
- k = bkey_p_next(k);
- BUG_ON(k >= btree_bkey_last(b, t));
- }
-}
-
-/* returns idx of first entry >= offset: */
-static unsigned rw_aux_tree_bsearch(struct btree *b,
- struct bset_tree *t,
- unsigned offset)
-{
- unsigned bset_offs = offset - btree_bkey_first_offset(t);
- unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
- unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
-
- EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
- EBUG_ON(!t->size);
- EBUG_ON(idx > t->size);
-
- while (idx < t->size &&
- rw_aux_tree(b, t)[idx].offset < offset)
- idx++;
-
- while (idx &&
- rw_aux_tree(b, t)[idx - 1].offset >= offset)
- idx--;
-
- EBUG_ON(idx < t->size &&
- rw_aux_tree(b, t)[idx].offset < offset);
- EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
- EBUG_ON(idx + 1 < t->size &&
- rw_aux_tree(b, t)[idx].offset ==
- rw_aux_tree(b, t)[idx + 1].offset);
-
- return idx;
-}
-
-static inline unsigned bkey_mantissa(const struct bkey_packed *k,
- const struct bkey_float *f)
-{
- u64 v;
-
- EBUG_ON(!bkey_packed(k));
-
- v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
-
- /*
- * In little endian, we're shifting off low bits (and then the bits we
- * want are at the low end), in big endian we're shifting off high bits
- * (and then the bits we want are at the high end, so we shift them
- * back down):
- */
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- v >>= f->exponent & 7;
-#else
- v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
-#endif
- return (u16) v;
-}
-
-static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
- unsigned j,
- struct bkey_packed *min_key,
- struct bkey_packed *max_key)
-{
- struct bkey_float *f = bkey_float(b, t, j);
- struct bkey_packed *m = tree_to_bkey(b, t, j);
- struct bkey_packed *l = is_power_of_2(j)
- ? min_key
- : tree_to_bkey(b, t, j >> ffs(j));
- struct bkey_packed *r = is_power_of_2(j + 1)
- ? max_key
- : tree_to_bkey(b, t, j >> (ffz(j) + 1));
- unsigned mantissa;
- int shift, exponent, high_bit;
-
- /*
- * for failed bfloats, the lookup code falls back to comparing against
- * the original key.
- */
-
- if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
- !b->nr_key_bits) {
- f->exponent = BFLOAT_FAILED_UNPACKED;
- return;
- }
-
- /*
- * The greatest differing bit of l and r is the first bit we must
- * include in the bfloat mantissa we're creating in order to do
- * comparisons - that bit always becomes the high bit of
- * bfloat->mantissa, and thus the exponent we're calculating here is
- * the position of what will become the low bit in bfloat->mantissa:
- *
- * Note that this may be negative - we may be running off the low end
- * of the key: we handle this later:
- */
- high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
- min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
- exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
-
- /*
- * Then we calculate the actual shift value, from the start of the key
- * (k->_data), to get the key bits starting at exponent:
- */
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
-
- EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
-#else
- shift = high_bit_offset +
- b->nr_key_bits -
- exponent -
- BKEY_MANTISSA_BITS;
-
- EBUG_ON(shift < KEY_PACKED_BITS_START);
-#endif
- EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
-
- f->exponent = shift;
- mantissa = bkey_mantissa(m, f);
-
- /*
- * If we've got garbage bits, set them to all 1s - it's legal for the
- * bfloat to compare larger than the original key, but not smaller:
- */
- if (exponent < 0)
- mantissa |= ~(~0U << -exponent);
-
- f->mantissa = mantissa;
-}
-
-/* bytes remaining - only valid for last bset: */
-static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
-{
- bset_aux_tree_verify(b);
-
- return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
-}
-
-static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
-{
- return __bset_tree_capacity(b, t) / sizeof(struct bkey_float);
-}
-
-static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
-{
- return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
-}
-
-static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
-{
- struct bkey_packed *k;
-
- t->size = 1;
- t->extra = BSET_RW_AUX_TREE_VAL;
- rw_aux_tree(b, t)[0].offset =
- __btree_node_key_to_offset(b, btree_bkey_first(b, t));
-
- bset_tree_for_each_key(b, t, k) {
- if (t->size == bset_rw_tree_capacity(b, t))
- break;
-
- if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
- L1_CACHE_BYTES)
- rw_aux_tree_set(b, t, t->size++, k);
- }
-}
-
-static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
-{
- struct bkey_packed *k = btree_bkey_first(b, t);
- struct bkey_i min_key, max_key;
- unsigned cacheline = 1;
-
- t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
- bset_ro_tree_capacity(b, t));
-retry:
- if (t->size < 2) {
- t->size = 0;
- t->extra = BSET_NO_AUX_TREE_VAL;
- return;
- }
-
- t->extra = eytzinger1_extra(t->size - 1);
-
- /* First we figure out where the first key in each cacheline is */
- eytzinger1_for_each(j, t->size - 1) {
- while (bkey_to_cacheline(b, t, k) < cacheline)
- k = bkey_p_next(k);
-
- if (k >= btree_bkey_last(b, t)) {
- /* XXX: this path sucks */
- t->size--;
- goto retry;
- }
-
- bkey_float(b, t, j)->key_offset =
- bkey_to_cacheline_offset(b, t, cacheline++, k);
-
- EBUG_ON(tree_to_bkey(b, t, j) != k);
- }
-
- if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
- bkey_init(&min_key.k);
- min_key.k.p = b->data->min_key;
- }
-
- if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
- bkey_init(&max_key.k);
- max_key.k.p = b->data->max_key;
- }
-
- /* Then we build the tree */
- eytzinger1_for_each(j, t->size - 1)
- make_bfloat(b, t, j,
- bkey_to_packed(&min_key),
- bkey_to_packed(&max_key));
-}
-
-static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
-{
- struct bset_tree *i;
-
- for (i = b->set; i != t; i++)
- BUG_ON(bset_has_rw_aux_tree(i));
-
- bch2_bset_set_no_aux_tree(b, t);
-
- /* round up to next cacheline: */
- t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
- SMP_CACHE_BYTES / sizeof(u64));
-
- bset_aux_tree_verify(b);
-}
-
-void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
- bool writeable)
-{
- if (writeable
- ? bset_has_rw_aux_tree(t)
- : bset_has_ro_aux_tree(t))
- return;
-
- bset_alloc_tree(b, t);
-
- if (!__bset_tree_capacity(b, t))
- return;
-
- if (writeable)
- __build_rw_aux_tree(b, t);
- else
- __build_ro_aux_tree(b, t);
-
- bset_aux_tree_verify(b);
-}
-
-void bch2_bset_init_first(struct btree *b, struct bset *i)
-{
- struct bset_tree *t;
-
- BUG_ON(b->nsets);
-
- memset(i, 0, sizeof(*i));
- get_random_bytes(&i->seq, sizeof(i->seq));
- SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
-
- t = &b->set[b->nsets++];
- set_btree_bset(b, t, i);
-}
-
-void bch2_bset_init_next(struct btree *b, struct btree_node_entry *bne)
-{
- struct bset *i = &bne->keys;
- struct bset_tree *t;
-
- BUG_ON(bset_byte_offset(b, bne) >= btree_buf_bytes(b));
- BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
- BUG_ON(b->nsets >= MAX_BSETS);
-
- memset(i, 0, sizeof(*i));
- i->seq = btree_bset_first(b)->seq;
- SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
-
- t = &b->set[b->nsets++];
- set_btree_bset(b, t, i);
-}
-
-/*
- * find _some_ key in the same bset as @k that precedes @k - not necessarily the
- * immediate predecessor:
- */
-static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
- struct bkey_packed *k)
-{
- struct bkey_packed *p;
- unsigned offset;
- int j;
-
- EBUG_ON(k < btree_bkey_first(b, t) ||
- k > btree_bkey_last(b, t));
-
- if (k == btree_bkey_first(b, t))
- return NULL;
-
- switch (bset_aux_tree_type(t)) {
- case BSET_NO_AUX_TREE:
- p = btree_bkey_first(b, t);
- break;
- case BSET_RO_AUX_TREE:
- j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
-
- do {
- p = j ? tree_to_bkey(b, t,
- __inorder_to_eytzinger1(j--,
- t->size - 1, t->extra))
- : btree_bkey_first(b, t);
- } while (p >= k);
- break;
- case BSET_RW_AUX_TREE:
- offset = __btree_node_key_to_offset(b, k);
- j = rw_aux_tree_bsearch(b, t, offset);
- p = j ? rw_aux_to_bkey(b, t, j - 1)
- : btree_bkey_first(b, t);
- break;
- }
-
- return p;
-}
-
-struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
- struct bset_tree *t,
- struct bkey_packed *k,
- unsigned min_key_type)
-{
- struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
-
- while ((p = __bkey_prev(b, t, k)) && !ret) {
- for (i = p; i != k; i = bkey_p_next(i))
- if (i->type >= min_key_type)
- ret = i;
-
- k = p;
- }
-
- if (bch2_expensive_debug_checks) {
- BUG_ON(ret >= orig_k);
-
- for (i = ret
- ? bkey_p_next(ret)
- : btree_bkey_first(b, t);
- i != orig_k;
- i = bkey_p_next(i))
- BUG_ON(i->type >= min_key_type);
- }
-
- return ret;
-}
-
-/* Insert */
-
-static void rw_aux_tree_insert_entry(struct btree *b,
- struct bset_tree *t,
- unsigned idx)
-{
- EBUG_ON(!idx || idx > t->size);
- struct bkey_packed *start = rw_aux_to_bkey(b, t, idx - 1);
- struct bkey_packed *end = idx < t->size
- ? rw_aux_to_bkey(b, t, idx)
- : btree_bkey_last(b, t);
-
- if (t->size < bset_rw_tree_capacity(b, t) &&
- (void *) end - (void *) start > L1_CACHE_BYTES) {
- struct bkey_packed *k = start;
-
- while (1) {
- k = bkey_p_next(k);
- if (k == end)
- break;
-
- if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
- memmove(&rw_aux_tree(b, t)[idx + 1],
- &rw_aux_tree(b, t)[idx],
- (void *) &rw_aux_tree(b, t)[t->size] -
- (void *) &rw_aux_tree(b, t)[idx]);
- t->size++;
- rw_aux_tree_set(b, t, idx, k);
- break;
- }
- }
- }
-}
-
-static void bch2_bset_fix_lookup_table(struct btree *b,
- struct bset_tree *t,
- struct bkey_packed *_where,
- unsigned clobber_u64s,
- unsigned new_u64s)
-{
- int shift = new_u64s - clobber_u64s;
- unsigned idx, j, where = __btree_node_key_to_offset(b, _where);
-
- EBUG_ON(bset_has_ro_aux_tree(t));
-
- if (!bset_has_rw_aux_tree(t))
- return;
-
- if (where > rw_aux_tree(b, t)[t->size - 1].offset) {
- rw_aux_tree_insert_entry(b, t, t->size);
- goto verify;
- }
-
- /* returns first entry >= where */
- idx = rw_aux_tree_bsearch(b, t, where);
-
- if (rw_aux_tree(b, t)[idx].offset == where) {
- if (!idx) { /* never delete first entry */
- idx++;
- } else if (where < t->end_offset) {
- rw_aux_tree_set(b, t, idx++, _where);
- } else {
- EBUG_ON(where != t->end_offset);
- rw_aux_tree_insert_entry(b, t, --t->size);
- goto verify;
- }
- }
-
- EBUG_ON(idx < t->size && rw_aux_tree(b, t)[idx].offset <= where);
- if (idx < t->size &&
- rw_aux_tree(b, t)[idx].offset + shift ==
- rw_aux_tree(b, t)[idx - 1].offset) {
- memmove(&rw_aux_tree(b, t)[idx],
- &rw_aux_tree(b, t)[idx + 1],
- (void *) &rw_aux_tree(b, t)[t->size] -
- (void *) &rw_aux_tree(b, t)[idx + 1]);
- t->size -= 1;
- }
-
- for (j = idx; j < t->size; j++)
- rw_aux_tree(b, t)[j].offset += shift;
-
- EBUG_ON(idx < t->size &&
- rw_aux_tree(b, t)[idx].offset ==
- rw_aux_tree(b, t)[idx - 1].offset);
-
- rw_aux_tree_insert_entry(b, t, idx);
-
-verify:
- bch2_bset_verify_rw_aux_tree(b, t);
- bset_aux_tree_verify(b);
-}
-
-void bch2_bset_insert(struct btree *b,
- struct bkey_packed *where,
- struct bkey_i *insert,
- unsigned clobber_u64s)
-{
- struct bkey_format *f = &b->format;
- struct bset_tree *t = bset_tree_last(b);
- struct bkey_packed packed, *src = bkey_to_packed(insert);
-
- bch2_bset_verify_rw_aux_tree(b, t);
- bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
-
- if (bch2_bkey_pack_key(&packed, &insert->k, f))
- src = &packed;
-
- if (!bkey_deleted(&insert->k))
- btree_keys_account_key_add(&b->nr, t - b->set, src);
-
- if (src->u64s != clobber_u64s) {
- u64 *src_p = (u64 *) where->_data + clobber_u64s;
- u64 *dst_p = (u64 *) where->_data + src->u64s;
-
- EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
- (int) clobber_u64s - src->u64s);
-
- memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
- le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
- set_btree_bset_end(b, t);
- }
-
- memcpy_u64s_small(where, src,
- bkeyp_key_u64s(f, src));
- memcpy_u64s(bkeyp_val(f, where), &insert->v,
- bkeyp_val_u64s(f, src));
-
- if (src->u64s != clobber_u64s)
- bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
-
- bch2_verify_btree_nr_keys(b);
-}
-
-void bch2_bset_delete(struct btree *b,
- struct bkey_packed *where,
- unsigned clobber_u64s)
-{
- struct bset_tree *t = bset_tree_last(b);
- u64 *src_p = (u64 *) where->_data + clobber_u64s;
- u64 *dst_p = where->_data;
-
- bch2_bset_verify_rw_aux_tree(b, t);
-
- EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
-
- memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
- le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
- set_btree_bset_end(b, t);
-
- bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
-}
-
-/* Lookup */
-
-__flatten
-static struct bkey_packed *bset_search_write_set(const struct btree *b,
- struct bset_tree *t,
- struct bpos *search)
-{
- unsigned l = 0, r = t->size;
-
- while (l + 1 != r) {
- unsigned m = (l + r) >> 1;
-
- if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
- l = m;
- else
- r = m;
- }
-
- return rw_aux_to_bkey(b, t, l);
-}
-
-static inline void prefetch_four_cachelines(void *p)
-{
-#ifdef CONFIG_X86_64
- asm("prefetcht0 (-127 + 64 * 0)(%0);"
- "prefetcht0 (-127 + 64 * 1)(%0);"
- "prefetcht0 (-127 + 64 * 2)(%0);"
- "prefetcht0 (-127 + 64 * 3)(%0);"
- :
- : "r" (p + 127));
-#else
- prefetch(p + L1_CACHE_BYTES * 0);
- prefetch(p + L1_CACHE_BYTES * 1);
- prefetch(p + L1_CACHE_BYTES * 2);
- prefetch(p + L1_CACHE_BYTES * 3);
-#endif
-}
-
-static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
- const struct bkey_float *f)
-{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
-
- return f->exponent > key_bits_start;
-#else
- unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
-
- return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
-#endif
-}
-
-__flatten
-static struct bkey_packed *bset_search_tree(const struct btree *b,
- const struct bset_tree *t,
- const struct bpos *search,
- const struct bkey_packed *packed_search)
-{
- struct ro_aux_tree *base = ro_aux_tree_base(b, t);
- struct bkey_float *f;
- struct bkey_packed *k;
- unsigned inorder, n = 1, l, r;
- int cmp;
-
- do {
- if (likely(n << 4 < t->size))
- prefetch(&base->f[n << 4]);
-
- f = &base->f[n];
- if (unlikely(f->exponent >= BFLOAT_FAILED))
- goto slowpath;
-
- l = f->mantissa;
- r = bkey_mantissa(packed_search, f);
-
- if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f))
- goto slowpath;
-
- n = n * 2 + (l < r);
- continue;
-slowpath:
- k = tree_to_bkey(b, t, n);
- cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
- if (!cmp)
- return k;
-
- n = n * 2 + (cmp < 0);
- } while (n < t->size);
-
- inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
-
- /*
- * n would have been the node we recursed to - the low bit tells us if
- * we recursed left or recursed right.
- */
- if (likely(!(n & 1))) {
- --inorder;
- if (unlikely(!inorder))
- return btree_bkey_first(b, t);
-
- f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
- }
-
- return cacheline_to_bkey(b, t, inorder, f->key_offset);
-}
-
-static __always_inline __flatten
-struct bkey_packed *__bch2_bset_search(struct btree *b,
- struct bset_tree *t,
- struct bpos *search,
- const struct bkey_packed *lossy_packed_search)
-{
-
- /*
- * First, we search for a cacheline, then lastly we do a linear search
- * within that cacheline.
- *
- * To search for the cacheline, there's three different possibilities:
- * * The set is too small to have a search tree, so we just do a linear
- * search over the whole set.
- * * The set is the one we're currently inserting into; keeping a full
- * auxiliary search tree up to date would be too expensive, so we
- * use a much simpler lookup table to do a binary search -
- * bset_search_write_set().
- * * Or we use the auxiliary search tree we constructed earlier -
- * bset_search_tree()
- */
-
- switch (bset_aux_tree_type(t)) {
- case BSET_NO_AUX_TREE:
- return btree_bkey_first(b, t);
- case BSET_RW_AUX_TREE:
- return bset_search_write_set(b, t, search);
- case BSET_RO_AUX_TREE:
- return bset_search_tree(b, t, search, lossy_packed_search);
- default:
- BUG();
- }
-}
-
-static __always_inline __flatten
-struct bkey_packed *bch2_bset_search_linear(struct btree *b,
- struct bset_tree *t,
- struct bpos *search,
- struct bkey_packed *packed_search,
- const struct bkey_packed *lossy_packed_search,
- struct bkey_packed *m)
-{
- if (lossy_packed_search)
- while (m != btree_bkey_last(b, t) &&
- bkey_iter_cmp_p_or_unp(b, m,
- lossy_packed_search, search) < 0)
- m = bkey_p_next(m);
-
- if (!packed_search)
- while (m != btree_bkey_last(b, t) &&
- bkey_iter_pos_cmp(b, m, search) < 0)
- m = bkey_p_next(m);
-
- if (bch2_expensive_debug_checks) {
- struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
-
- BUG_ON(prev &&
- bkey_iter_cmp_p_or_unp(b, prev,
- packed_search, search) >= 0);
- }
-
- return m;
-}
-
-/* Btree node iterator */
-
-static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
- struct btree *b,
- const struct bkey_packed *k,
- const struct bkey_packed *end)
-{
- if (k != end) {
- struct btree_node_iter_set *pos;
-
- btree_node_iter_for_each(iter, pos)
- ;
-
- BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
- *pos = (struct btree_node_iter_set) {
- __btree_node_key_to_offset(b, k),
- __btree_node_key_to_offset(b, end)
- };
- }
-}
-
-void bch2_btree_node_iter_push(struct btree_node_iter *iter,
- struct btree *b,
- const struct bkey_packed *k,
- const struct bkey_packed *end)
-{
- __bch2_btree_node_iter_push(iter, b, k, end);
- bch2_btree_node_iter_sort(iter, b);
-}
-
-noinline __flatten __cold
-static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
- struct btree *b, struct bpos *search)
-{
- struct bkey_packed *k;
-
- trace_bkey_pack_pos_fail(search);
-
- bch2_btree_node_iter_init_from_start(iter, b);
-
- while ((k = bch2_btree_node_iter_peek(iter, b)) &&
- bkey_iter_pos_cmp(b, k, search) < 0)
- bch2_btree_node_iter_advance(iter, b);
-}
-
-/**
- * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
- * given position
- *
- * @iter: iterator to initialize
- * @b: btree node to search
- * @search: search key
- *
- * Main entry point to the lookup code for individual btree nodes:
- *
- * NOTE:
- *
- * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
- * keys. This doesn't matter for most code, but it does matter for lookups.
- *
- * Some adjacent keys with a string of equal keys:
- * i j k k k k l m
- *
- * If you search for k, the lookup code isn't guaranteed to return you any
- * specific k. The lookup code is conceptually doing a binary search and
- * iterating backwards is very expensive so if the pivot happens to land at the
- * last k that's what you'll get.
- *
- * This works out ok, but it's something to be aware of:
- *
- * - For non extents, we guarantee that the live key comes last - see
- * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
- * see will only be deleted keys you don't care about.
- *
- * - For extents, deleted keys sort last (see the comment at the top of this
- * file). But when you're searching for extents, you actually want the first
- * key strictly greater than your search key - an extent that compares equal
- * to the search key is going to have 0 sectors after the search key.
- *
- * But this does mean that we can't just search for
- * bpos_successor(start_of_range) to get the first extent that overlaps with
- * the range we want - if we're unlucky and there's an extent that ends
- * exactly where we searched, then there could be a deleted key at the same
- * position and we'd get that when we search instead of the preceding extent
- * we needed.
- *
- * So we've got to search for start_of_range, then after the lookup iterate
- * past any extents that compare equal to the position we searched for.
- */
-__flatten
-void bch2_btree_node_iter_init(struct btree_node_iter *iter,
- struct btree *b, struct bpos *search)
-{
- struct bkey_packed p, *packed_search = NULL;
- struct btree_node_iter_set *pos = iter->data;
- struct bkey_packed *k[MAX_BSETS];
- unsigned i;
-
- EBUG_ON(bpos_lt(*search, b->data->min_key));
- EBUG_ON(bpos_gt(*search, b->data->max_key));
- bset_aux_tree_verify(b);
-
- memset(iter, 0, sizeof(*iter));
-
- switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
- case BKEY_PACK_POS_EXACT:
- packed_search = &p;
- break;
- case BKEY_PACK_POS_SMALLER:
- packed_search = NULL;
- break;
- case BKEY_PACK_POS_FAIL:
- btree_node_iter_init_pack_failed(iter, b, search);
- return;
- }
-
- for (i = 0; i < b->nsets; i++) {
- k[i] = __bch2_bset_search(b, b->set + i, search, &p);
- prefetch_four_cachelines(k[i]);
- }
-
- for (i = 0; i < b->nsets; i++) {
- struct bset_tree *t = b->set + i;
- struct bkey_packed *end = btree_bkey_last(b, t);
-
- k[i] = bch2_bset_search_linear(b, t, search,
- packed_search, &p, k[i]);
- if (k[i] != end)
- *pos++ = (struct btree_node_iter_set) {
- __btree_node_key_to_offset(b, k[i]),
- __btree_node_key_to_offset(b, end)
- };
- }
-
- bch2_btree_node_iter_sort(iter, b);
-}
-
-void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
- struct btree *b)
-{
- memset(iter, 0, sizeof(*iter));
-
- for_each_bset(b, t)
- __bch2_btree_node_iter_push(iter, b,
- btree_bkey_first(b, t),
- btree_bkey_last(b, t));
- bch2_btree_node_iter_sort(iter, b);
-}
-
-struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
- struct btree *b,
- struct bset_tree *t)
-{
- struct btree_node_iter_set *set;
-
- btree_node_iter_for_each(iter, set)
- if (set->end == t->end_offset)
- return __btree_node_offset_to_key(b, set->k);
-
- return btree_bkey_last(b, t);
-}
-
-static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
- struct btree *b,
- unsigned first)
-{
- bool ret;
-
- if ((ret = (btree_node_iter_cmp(b,
- iter->data[first],
- iter->data[first + 1]) > 0)))
- swap(iter->data[first], iter->data[first + 1]);
- return ret;
-}
-
-void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
- struct btree *b)
-{
- /* unrolled bubble sort: */
-
- if (!__btree_node_iter_set_end(iter, 2)) {
- btree_node_iter_sort_two(iter, b, 0);
- btree_node_iter_sort_two(iter, b, 1);
- }
-
- if (!__btree_node_iter_set_end(iter, 1))
- btree_node_iter_sort_two(iter, b, 0);
-}
-
-void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
- struct btree_node_iter_set *set)
-{
- struct btree_node_iter_set *last =
- iter->data + ARRAY_SIZE(iter->data) - 1;
-
- memmove(&set[0], &set[1], (void *) last - (void *) set);
- *last = (struct btree_node_iter_set) { 0, 0 };
-}
-
-static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
- struct btree *b)
-{
- iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
-
- EBUG_ON(iter->data->k > iter->data->end);
-
- if (unlikely(__btree_node_iter_set_end(iter, 0))) {
- /* avoid an expensive memmove call: */
- iter->data[0] = iter->data[1];
- iter->data[1] = iter->data[2];
- iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
- return;
- }
-
- if (__btree_node_iter_set_end(iter, 1))
- return;
-
- if (!btree_node_iter_sort_two(iter, b, 0))
- return;
-
- if (__btree_node_iter_set_end(iter, 2))
- return;
-
- btree_node_iter_sort_two(iter, b, 1);
-}
-
-void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
- struct btree *b)
-{
- if (bch2_expensive_debug_checks) {
- bch2_btree_node_iter_verify(iter, b);
- bch2_btree_node_iter_next_check(iter, b);
- }
-
- __bch2_btree_node_iter_advance(iter, b);
-}
-
-/*
- * Expensive:
- */
-struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
- struct btree *b)
-{
- struct bkey_packed *k, *prev = NULL;
- struct btree_node_iter_set *set;
- unsigned end = 0;
-
- if (bch2_expensive_debug_checks)
- bch2_btree_node_iter_verify(iter, b);
-
- for_each_bset(b, t) {
- k = bch2_bkey_prev_all(b, t,
- bch2_btree_node_iter_bset_pos(iter, b, t));
- if (k &&
- (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
- prev = k;
- end = t->end_offset;
- }
- }
-
- if (!prev)
- return NULL;
-
- /*
- * We're manually memmoving instead of just calling sort() to ensure the
- * prev we picked ends up in slot 0 - sort won't necessarily put it
- * there because of duplicate deleted keys:
- */
- btree_node_iter_for_each(iter, set)
- if (set->end == end)
- goto found;
-
- BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
-found:
- BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
-
- memmove(&iter->data[1],
- &iter->data[0],
- (void *) set - (void *) &iter->data[0]);
-
- iter->data[0].k = __btree_node_key_to_offset(b, prev);
- iter->data[0].end = end;
-
- if (bch2_expensive_debug_checks)
- bch2_btree_node_iter_verify(iter, b);
- return prev;
-}
-
-struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
- struct btree *b)
-{
- struct bkey_packed *prev;
-
- do {
- prev = bch2_btree_node_iter_prev_all(iter, b);
- } while (prev && bkey_deleted(prev));
-
- return prev;
-}
-
-struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
- struct btree *b,
- struct bkey *u)
-{
- struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
-
- return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
-}
-
-/* Mergesort */
-
-void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
-{
- for_each_bset_c(b, t) {
- enum bset_aux_tree_type type = bset_aux_tree_type(t);
- size_t j;
-
- stats->sets[type].nr++;
- stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
- sizeof(u64);
-
- if (bset_has_ro_aux_tree(t)) {
- stats->floats += t->size - 1;
-
- for (j = 1; j < t->size; j++)
- stats->failed +=
- bkey_float(b, t, j)->exponent ==
- BFLOAT_FAILED;
- }
- }
-}
-
-void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
- struct bkey_packed *k)
-{
- struct bset_tree *t = bch2_bkey_to_bset(b, k);
- struct bkey uk;
- unsigned j, inorder;
-
- if (!bset_has_ro_aux_tree(t))
- return;
-
- inorder = bkey_to_cacheline(b, t, k);
- if (!inorder || inorder >= t->size)
- return;
-
- j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
- if (k != tree_to_bkey(b, t, j))
- return;
-
- switch (bkey_float(b, t, j)->exponent) {
- case BFLOAT_FAILED:
- uk = bkey_unpack_key(b, k);
- prt_printf(out,
- " failed unpacked at depth %u\n"
- "\t",
- ilog2(j));
- bch2_bpos_to_text(out, uk.p);
- prt_printf(out, "\n");
- break;
- }
-}
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
deleted file mode 100644
index 6953d55b72cc..000000000000
--- a/fs/bcachefs/bset.h
+++ /dev/null
@@ -1,544 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BSET_H
-#define _BCACHEFS_BSET_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "bcachefs.h"
-#include "bkey.h"
-#include "bkey_methods.h"
-#include "btree_types.h"
-#include "util.h" /* for time_stats */
-#include "vstructs.h"
-
-/*
- * BKEYS:
- *
- * A bkey contains a key, a size field, a variable number of pointers, and some
- * ancillary flag bits.
- *
- * We use two different functions for validating bkeys, bkey_invalid and
- * bkey_deleted().
- *
- * The one exception to the rule that ptr_invalid() filters out invalid keys is
- * that it also filters out keys of size 0 - these are keys that have been
- * completely overwritten. It'd be safe to delete these in memory while leaving
- * them on disk, just unnecessary work - so we filter them out when resorting
- * instead.
- *
- * We can't filter out stale keys when we're resorting, because garbage
- * collection needs to find them to ensure bucket gens don't wrap around -
- * unless we're rewriting the btree node those stale keys still exist on disk.
- *
- * We also implement functions here for removing some number of sectors from the
- * front or the back of a bkey - this is mainly used for fixing overlapping
- * extents, by removing the overlapping sectors from the older key.
- *
- * BSETS:
- *
- * A bset is an array of bkeys laid out contiguously in memory in sorted order,
- * along with a header. A btree node is made up of a number of these, written at
- * different times.
- *
- * There could be many of them on disk, but we never allow there to be more than
- * 4 in memory - we lazily resort as needed.
- *
- * We implement code here for creating and maintaining auxiliary search trees
- * (described below) for searching an individial bset, and on top of that we
- * implement a btree iterator.
- *
- * BTREE ITERATOR:
- *
- * Most of the code in bcache doesn't care about an individual bset - it needs
- * to search entire btree nodes and iterate over them in sorted order.
- *
- * The btree iterator code serves both functions; it iterates through the keys
- * in a btree node in sorted order, starting from either keys after a specific
- * point (if you pass it a search key) or the start of the btree node.
- *
- * AUXILIARY SEARCH TREES:
- *
- * Since keys are variable length, we can't use a binary search on a bset - we
- * wouldn't be able to find the start of the next key. But binary searches are
- * slow anyways, due to terrible cache behaviour; bcache originally used binary
- * searches and that code topped out at under 50k lookups/second.
- *
- * So we need to construct some sort of lookup table. Since we only insert keys
- * into the last (unwritten) set, most of the keys within a given btree node are
- * usually in sets that are mostly constant. We use two different types of
- * lookup tables to take advantage of this.
- *
- * Both lookup tables share in common that they don't index every key in the
- * set; they index one key every BSET_CACHELINE bytes, and then a linear search
- * is used for the rest.
- *
- * For sets that have been written to disk and are no longer being inserted
- * into, we construct a binary search tree in an array - traversing a binary
- * search tree in an array gives excellent locality of reference and is very
- * fast, since both children of any node are adjacent to each other in memory
- * (and their grandchildren, and great grandchildren...) - this means
- * prefetching can be used to great effect.
- *
- * It's quite useful performance wise to keep these nodes small - not just
- * because they're more likely to be in L2, but also because we can prefetch
- * more nodes on a single cacheline and thus prefetch more iterations in advance
- * when traversing this tree.
- *
- * Nodes in the auxiliary search tree must contain both a key to compare against
- * (we don't want to fetch the key from the set, that would defeat the purpose),
- * and a pointer to the key. We use a few tricks to compress both of these.
- *
- * To compress the pointer, we take advantage of the fact that one node in the
- * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
- * a function (to_inorder()) that takes the index of a node in a binary tree and
- * returns what its index would be in an inorder traversal, so we only have to
- * store the low bits of the offset.
- *
- * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
- * compress that, we take advantage of the fact that when we're traversing the
- * search tree at every iteration we know that both our search key and the key
- * we're looking for lie within some range - bounded by our previous
- * comparisons. (We special case the start of a search so that this is true even
- * at the root of the tree).
- *
- * So we know the key we're looking for is between a and b, and a and b don't
- * differ higher than bit 50, we don't need to check anything higher than bit
- * 50.
- *
- * We don't usually need the rest of the bits, either; we only need enough bits
- * to partition the key range we're currently checking. Consider key n - the
- * key our auxiliary search tree node corresponds to, and key p, the key
- * immediately preceding n. The lowest bit we need to store in the auxiliary
- * search tree is the highest bit that differs between n and p.
- *
- * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
- * comparison. But we'd really like our nodes in the auxiliary search tree to be
- * of fixed size.
- *
- * The solution is to make them fixed size, and when we're constructing a node
- * check if p and n differed in the bits we needed them to. If they don't we
- * flag that node, and when doing lookups we fallback to comparing against the
- * real key. As long as this doesn't happen to often (and it seems to reliably
- * happen a bit less than 1% of the time), we win - even on failures, that key
- * is then more likely to be in cache than if we were doing binary searches all
- * the way, since we're touching so much less memory.
- *
- * The keys in the auxiliary search tree are stored in (software) floating
- * point, with an exponent and a mantissa. The exponent needs to be big enough
- * to address all the bits in the original key, but the number of bits in the
- * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
- *
- * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
- * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
- * We need one node per 128 bytes in the btree node, which means the auxiliary
- * search trees take up 3% as much memory as the btree itself.
- *
- * Constructing these auxiliary search trees is moderately expensive, and we
- * don't want to be constantly rebuilding the search tree for the last set
- * whenever we insert another key into it. For the unwritten set, we use a much
- * simpler lookup table - it's just a flat array, so index i in the lookup table
- * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
- * within each byte range works the same as with the auxiliary search trees.
- *
- * These are much easier to keep up to date when we insert a key - we do it
- * somewhat lazily; when we shift a key up we usually just increment the pointer
- * to it, only when it would overflow do we go to the trouble of finding the
- * first key in that range of bytes again.
- */
-
-enum bset_aux_tree_type {
- BSET_NO_AUX_TREE,
- BSET_RO_AUX_TREE,
- BSET_RW_AUX_TREE,
-};
-
-#define BSET_TREE_NR_TYPES 3
-
-#define BSET_NO_AUX_TREE_VAL (U16_MAX)
-#define BSET_RW_AUX_TREE_VAL (U16_MAX - 1)
-
-static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree *t)
-{
- switch (t->extra) {
- case BSET_NO_AUX_TREE_VAL:
- EBUG_ON(t->size);
- return BSET_NO_AUX_TREE;
- case BSET_RW_AUX_TREE_VAL:
- EBUG_ON(!t->size);
- return BSET_RW_AUX_TREE;
- default:
- EBUG_ON(!t->size);
- return BSET_RO_AUX_TREE;
- }
-}
-
-/*
- * BSET_CACHELINE was originally intended to match the hardware cacheline size -
- * it used to be 64, but I realized the lookup code would touch slightly less
- * memory if it was 128.
- *
- * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
- * have this many bytes left that we do a linear search over.
- *
- * Since (after level 5) every level of the bset_tree is on a new cacheline,
- * we're touching one fewer cacheline in the bset tree in exchange for one more
- * cacheline in the linear search - but the linear search might stop before it
- * gets to the second cacheline.
- */
-
-#define BSET_CACHELINE 256
-
-static inline size_t btree_keys_cachelines(const struct btree *b)
-{
- return (1U << b->byte_order) / BSET_CACHELINE;
-}
-
-static inline size_t btree_aux_data_bytes(const struct btree *b)
-{
- return btree_keys_cachelines(b) * 8;
-}
-
-static inline size_t btree_aux_data_u64s(const struct btree *b)
-{
- return btree_aux_data_bytes(b) / sizeof(u64);
-}
-
-#define for_each_bset(_b, _t) \
- for (struct bset_tree *_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
-
-#define for_each_bset_c(_b, _t) \
- for (const struct bset_tree *_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
-
-#define bset_tree_for_each_key(_b, _t, _k) \
- for (_k = btree_bkey_first(_b, _t); \
- _k != btree_bkey_last(_b, _t); \
- _k = bkey_p_next(_k))
-
-static inline bool bset_has_ro_aux_tree(const struct bset_tree *t)
-{
- return bset_aux_tree_type(t) == BSET_RO_AUX_TREE;
-}
-
-static inline bool bset_has_rw_aux_tree(struct bset_tree *t)
-{
- return bset_aux_tree_type(t) == BSET_RW_AUX_TREE;
-}
-
-static inline void bch2_bset_set_no_aux_tree(struct btree *b,
- struct bset_tree *t)
-{
- BUG_ON(t < b->set);
-
- for (; t < b->set + ARRAY_SIZE(b->set); t++) {
- t->size = 0;
- t->extra = BSET_NO_AUX_TREE_VAL;
- t->aux_data_offset = U16_MAX;
- }
-}
-
-static inline void btree_node_set_format(struct btree *b,
- struct bkey_format f)
-{
- int len;
-
- b->format = f;
- b->nr_key_bits = bkey_format_key_bits(&f);
-
- len = bch2_compile_bkey_format(&b->format, b->aux_data);
- BUG_ON(len < 0 || len > U8_MAX);
-
- b->unpack_fn_len = len;
-
- bch2_bset_set_no_aux_tree(b, b->set);
-}
-
-static inline struct bset *bset_next_set(struct btree *b,
- unsigned block_bytes)
-{
- struct bset *i = btree_bset_last(b);
-
- EBUG_ON(!is_power_of_2(block_bytes));
-
- return ((void *) i) + round_up(vstruct_bytes(i), block_bytes);
-}
-
-void bch2_btree_keys_init(struct btree *);
-
-void bch2_bset_init_first(struct btree *, struct bset *);
-void bch2_bset_init_next(struct btree *, struct btree_node_entry *);
-void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
-
-void bch2_bset_insert(struct btree *, struct bkey_packed *, struct bkey_i *,
- unsigned);
-void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
-
-/* Bkey utility code */
-
-/* packed or unpacked */
-static inline int bkey_cmp_p_or_unp(const struct btree *b,
- const struct bkey_packed *l,
- const struct bkey_packed *r_packed,
- const struct bpos *r)
-{
- EBUG_ON(r_packed && !bkey_packed(r_packed));
-
- if (unlikely(!bkey_packed(l)))
- return bpos_cmp(packed_to_bkey_c(l)->p, *r);
-
- if (likely(r_packed))
- return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b);
-
- return __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
-}
-
-static inline struct bset_tree *
-bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k)
-{
- unsigned offset = __btree_node_key_to_offset(b, k);
-
- for_each_bset(b, t)
- if (offset <= t->end_offset) {
- EBUG_ON(offset < btree_bkey_first_offset(t));
- return t;
- }
-
- BUG();
-}
-
-struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *);
-
-struct bkey_packed *bch2_bkey_prev_filter(struct btree *, struct bset_tree *,
- struct bkey_packed *, unsigned);
-
-static inline struct bkey_packed *
-bch2_bkey_prev_all(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
-{
- return bch2_bkey_prev_filter(b, t, k, 0);
-}
-
-static inline struct bkey_packed *
-bch2_bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
-{
- return bch2_bkey_prev_filter(b, t, k, 1);
-}
-
-/* Btree key iteration */
-
-void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *,
- const struct bkey_packed *,
- const struct bkey_packed *);
-void bch2_btree_node_iter_init(struct btree_node_iter *, struct btree *,
- struct bpos *);
-void bch2_btree_node_iter_init_from_start(struct btree_node_iter *,
- struct btree *);
-struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *,
- struct btree *,
- struct bset_tree *);
-
-void bch2_btree_node_iter_sort(struct btree_node_iter *, struct btree *);
-void bch2_btree_node_iter_set_drop(struct btree_node_iter *,
- struct btree_node_iter_set *);
-void bch2_btree_node_iter_advance(struct btree_node_iter *, struct btree *);
-
-#define btree_node_iter_for_each(_iter, _set) \
- for (_set = (_iter)->data; \
- _set < (_iter)->data + ARRAY_SIZE((_iter)->data) && \
- (_set)->k != (_set)->end; \
- _set++)
-
-static inline bool __btree_node_iter_set_end(struct btree_node_iter *iter,
- unsigned i)
-{
- return iter->data[i].k == iter->data[i].end;
-}
-
-static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter)
-{
- return __btree_node_iter_set_end(iter, 0);
-}
-
-/*
- * When keys compare equal, deleted keys compare first:
- *
- * XXX: only need to compare pointers for keys that are both within a
- * btree_node_iterator - we need to break ties for prev() to work correctly
- */
-static inline int bkey_iter_cmp(const struct btree *b,
- const struct bkey_packed *l,
- const struct bkey_packed *r)
-{
- return bch2_bkey_cmp_packed(b, l, r)
- ?: (int) bkey_deleted(r) - (int) bkey_deleted(l)
- ?: cmp_int(l, r);
-}
-
-static inline int btree_node_iter_cmp(const struct btree *b,
- struct btree_node_iter_set l,
- struct btree_node_iter_set r)
-{
- return bkey_iter_cmp(b,
- __btree_node_offset_to_key(b, l.k),
- __btree_node_offset_to_key(b, r.k));
-}
-
-/* These assume r (the search key) is not a deleted key: */
-static inline int bkey_iter_pos_cmp(const struct btree *b,
- const struct bkey_packed *l,
- const struct bpos *r)
-{
- return bkey_cmp_left_packed(b, l, r)
- ?: -((int) bkey_deleted(l));
-}
-
-static inline int bkey_iter_cmp_p_or_unp(const struct btree *b,
- const struct bkey_packed *l,
- const struct bkey_packed *r_packed,
- const struct bpos *r)
-{
- return bkey_cmp_p_or_unp(b, l, r_packed, r)
- ?: -((int) bkey_deleted(l));
-}
-
-static inline struct bkey_packed *
-__bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
- struct btree *b)
-{
- return __btree_node_offset_to_key(b, iter->data->k);
-}
-
-static inline struct bkey_packed *
-bch2_btree_node_iter_peek_all(struct btree_node_iter *iter, struct btree *b)
-{
- return !bch2_btree_node_iter_end(iter)
- ? __btree_node_offset_to_key(b, iter->data->k)
- : NULL;
-}
-
-static inline struct bkey_packed *
-bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
-{
- struct bkey_packed *k;
-
- while ((k = bch2_btree_node_iter_peek_all(iter, b)) &&
- bkey_deleted(k))
- bch2_btree_node_iter_advance(iter, b);
-
- return k;
-}
-
-static inline struct bkey_packed *
-bch2_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b)
-{
- struct bkey_packed *ret = bch2_btree_node_iter_peek_all(iter, b);
-
- if (ret)
- bch2_btree_node_iter_advance(iter, b);
-
- return ret;
-}
-
-struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *,
- struct btree *);
-struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *,
- struct btree *);
-
-struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
- struct btree *,
- struct bkey *);
-
-#define for_each_btree_node_key(b, k, iter) \
- for (bch2_btree_node_iter_init_from_start((iter), (b)); \
- (k = bch2_btree_node_iter_peek((iter), (b))); \
- bch2_btree_node_iter_advance(iter, b))
-
-#define for_each_btree_node_key_unpack(b, k, iter, unpacked) \
- for (bch2_btree_node_iter_init_from_start((iter), (b)); \
- (k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
- bch2_btree_node_iter_advance(iter, b))
-
-/* Accounting: */
-
-struct btree_nr_keys bch2_btree_node_count_keys(struct btree *);
-
-static inline void btree_keys_account_key(struct btree_nr_keys *n,
- unsigned bset,
- struct bkey_packed *k,
- int sign)
-{
- n->live_u64s += k->u64s * sign;
- n->bset_u64s[bset] += k->u64s * sign;
-
- if (bkey_packed(k))
- n->packed_keys += sign;
- else
- n->unpacked_keys += sign;
-}
-
-static inline void btree_keys_account_val_delta(struct btree *b,
- struct bkey_packed *k,
- int delta)
-{
- struct bset_tree *t = bch2_bkey_to_bset(b, k);
-
- b->nr.live_u64s += delta;
- b->nr.bset_u64s[t - b->set] += delta;
-}
-
-#define btree_keys_account_key_add(_nr, _bset_idx, _k) \
- btree_keys_account_key(_nr, _bset_idx, _k, 1)
-#define btree_keys_account_key_drop(_nr, _bset_idx, _k) \
- btree_keys_account_key(_nr, _bset_idx, _k, -1)
-
-#define btree_account_key_add(_b, _k) \
- btree_keys_account_key(&(_b)->nr, \
- bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, 1)
-#define btree_account_key_drop(_b, _k) \
- btree_keys_account_key(&(_b)->nr, \
- bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, -1)
-
-struct bset_stats {
- struct {
- size_t nr, bytes;
- } sets[BSET_TREE_NR_TYPES];
-
- size_t floats;
- size_t failed;
-};
-
-void bch2_btree_keys_stats(const struct btree *, struct bset_stats *);
-void bch2_bfloat_to_text(struct printbuf *, struct btree *,
- struct bkey_packed *);
-
-/* Debug stuff */
-
-void bch2_dump_bset(struct bch_fs *, struct btree *, struct bset *, unsigned);
-void bch2_dump_btree_node(struct bch_fs *, struct btree *);
-void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-
-void __bch2_verify_btree_nr_keys(struct btree *);
-void bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *);
-void bch2_verify_insert_pos(struct btree *, struct bkey_packed *,
- struct bkey_packed *, unsigned);
-
-#else
-
-static inline void __bch2_verify_btree_nr_keys(struct btree *b) {}
-static inline void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
- struct btree *b) {}
-static inline void bch2_verify_insert_pos(struct btree *b,
- struct bkey_packed *where,
- struct bkey_packed *insert,
- unsigned clobber_u64s) {}
-#endif
-
-static inline void bch2_verify_btree_nr_keys(struct btree *b)
-{
- if (bch2_debug_check_btree_accounting)
- __bch2_verify_btree_nr_keys(b);
-}
-
-#endif /* _BCACHEFS_BSET_H */
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
deleted file mode 100644
index 7123019ab3bc..000000000000
--- a/fs/bcachefs/btree_cache.c
+++ /dev/null
@@ -1,1491 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bbpos.h"
-#include "bkey_buf.h"
-#include "btree_cache.h"
-#include "btree_io.h"
-#include "btree_iter.h"
-#include "btree_locking.h"
-#include "debug.h"
-#include "errcode.h"
-#include "error.h"
-#include "journal.h"
-#include "trace.h"
-
-#include <linux/prefetch.h>
-#include <linux/sched/mm.h>
-#include <linux/swap.h>
-
-#define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \
-do { \
- if (shrinker_counter) \
- bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_##counter]++; \
-} while (0)
-
-const char * const bch2_btree_node_flags[] = {
-#define x(f) #f,
- BTREE_FLAGS()
-#undef x
- NULL
-};
-
-void bch2_recalc_btree_reserve(struct bch_fs *c)
-{
- unsigned reserve = 16;
-
- if (!c->btree_roots_known[0].b)
- reserve += 8;
-
- for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
- struct btree_root *r = bch2_btree_id_root(c, i);
-
- if (r->b)
- reserve += min_t(unsigned, 1, r->b->c.level) * 8;
- }
-
- c->btree_cache.nr_reserve = reserve;
-}
-
-static inline size_t btree_cache_can_free(struct btree_cache_list *list)
-{
- struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]);
-
- size_t can_free = list->nr;
- if (!list->idx)
- can_free = max_t(ssize_t, 0, can_free - bc->nr_reserve);
- return can_free;
-}
-
-static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
-{
- BUG_ON(!list_empty(&b->list));
-
- if (b->c.lock.readers)
- list_add(&b->list, &bc->freed_pcpu);
- else
- list_add(&b->list, &bc->freed_nonpcpu);
-}
-
-static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b)
-{
- BUG_ON(!list_empty(&b->list));
- BUG_ON(!b->data);
-
- bc->nr_freeable++;
- list_add(&b->list, &bc->freeable);
-}
-
-void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b)
-{
- struct btree_cache *bc = &c->btree_cache;
-
- mutex_lock(&bc->lock);
- __bch2_btree_node_to_freelist(bc, b);
- mutex_unlock(&bc->lock);
-
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
-}
-
-static void __btree_node_data_free(struct btree_cache *bc, struct btree *b)
-{
- BUG_ON(!list_empty(&b->list));
- BUG_ON(btree_node_hashed(b));
-
- /*
- * This should really be done in slub/vmalloc, but we're using the
- * kmalloc_large() path, so we're working around a slub bug by doing
- * this here:
- */
- if (b->data)
- mm_account_reclaimed_pages(btree_buf_bytes(b) / PAGE_SIZE);
- if (b->aux_data)
- mm_account_reclaimed_pages(btree_aux_data_bytes(b) / PAGE_SIZE);
-
- EBUG_ON(btree_node_write_in_flight(b));
-
- clear_btree_node_just_written(b);
-
- kvfree(b->data);
- b->data = NULL;
-#ifdef __KERNEL__
- kvfree(b->aux_data);
-#else
- munmap(b->aux_data, btree_aux_data_bytes(b));
-#endif
- b->aux_data = NULL;
-
- btree_node_to_freedlist(bc, b);
-}
-
-static void btree_node_data_free(struct btree_cache *bc, struct btree *b)
-{
- BUG_ON(list_empty(&b->list));
- list_del_init(&b->list);
- --bc->nr_freeable;
- __btree_node_data_free(bc, b);
-}
-
-static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
- const void *obj)
-{
- const struct btree *b = obj;
- const u64 *v = arg->key;
-
- return b->hash_val == *v ? 0 : 1;
-}
-
-static const struct rhashtable_params bch_btree_cache_params = {
- .head_offset = offsetof(struct btree, hash),
- .key_offset = offsetof(struct btree, hash_val),
- .key_len = sizeof(u64),
- .obj_cmpfn = bch2_btree_cache_cmp_fn,
- .automatic_shrinking = true,
-};
-
-static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
-{
- BUG_ON(b->data || b->aux_data);
-
- gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE;
-
- b->data = kvmalloc(btree_buf_bytes(b), gfp);
- if (!b->data)
- return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
-#ifdef __KERNEL__
- b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
-#else
- b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
- if (b->aux_data == MAP_FAILED)
- b->aux_data = NULL;
-#endif
- if (!b->aux_data) {
- kvfree(b->data);
- b->data = NULL;
- return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
- }
-
- return 0;
-}
-
-static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
-{
- struct btree *b;
-
- b = kzalloc(sizeof(struct btree), gfp);
- if (!b)
- return NULL;
-
- bkey_btree_ptr_init(&b->key);
- INIT_LIST_HEAD(&b->list);
- INIT_LIST_HEAD(&b->write_blocked);
- b->byte_order = ilog2(c->opts.btree_node_size);
- return b;
-}
-
-struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
-{
- struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
-
- b = __btree_node_mem_alloc(c, GFP_KERNEL);
- if (!b)
- return NULL;
-
- if (btree_node_data_alloc(c, b, GFP_KERNEL)) {
- kfree(b);
- return NULL;
- }
-
- bch2_btree_lock_init(&b->c, 0);
-
- __bch2_btree_node_to_freelist(bc, b);
- return b;
-}
-
-static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b)
-{
- struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p);
-
- u64 mask = bc->pinned_nodes_mask[!!b->c.level];
-
- return ((mask & BIT_ULL(b->c.btree_id)) &&
- bbpos_cmp(bc->pinned_nodes_start, pos) < 0 &&
- bbpos_cmp(bc->pinned_nodes_end, pos) >= 0);
-}
-
-void bch2_node_pin(struct bch_fs *c, struct btree *b)
-{
- struct btree_cache *bc = &c->btree_cache;
-
- mutex_lock(&bc->lock);
- BUG_ON(!__btree_node_pinned(bc, b));
- if (b != btree_node_root(c, b) && !btree_node_pinned(b)) {
- set_btree_node_pinned(b);
- list_move(&b->list, &bc->live[1].list);
- bc->live[0].nr--;
- bc->live[1].nr++;
- }
- mutex_unlock(&bc->lock);
-}
-
-void bch2_btree_cache_unpin(struct bch_fs *c)
-{
- struct btree_cache *bc = &c->btree_cache;
- struct btree *b, *n;
-
- mutex_lock(&bc->lock);
- c->btree_cache.pinned_nodes_mask[0] = 0;
- c->btree_cache.pinned_nodes_mask[1] = 0;
-
- list_for_each_entry_safe(b, n, &bc->live[1].list, list) {
- clear_btree_node_pinned(b);
- list_move(&b->list, &bc->live[0].list);
- bc->live[0].nr++;
- bc->live[1].nr--;
- }
-
- mutex_unlock(&bc->lock);
-}
-
-/* Btree in memory cache - hash table */
-
-void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
-{
- lockdep_assert_held(&bc->lock);
-
- int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
- BUG_ON(ret);
-
- /* Cause future lookups for this node to fail: */
- b->hash_val = 0;
-
- if (b->c.btree_id < BTREE_ID_NR)
- --bc->nr_by_btree[b->c.btree_id];
- --bc->live[btree_node_pinned(b)].nr;
- list_del_init(&b->list);
-}
-
-void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
-{
- __bch2_btree_node_hash_remove(bc, b);
- __bch2_btree_node_to_freelist(bc, b);
-}
-
-int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
-{
- BUG_ON(!list_empty(&b->list));
- BUG_ON(b->hash_val);
-
- b->hash_val = btree_ptr_hash_val(&b->key);
- int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
- bch_btree_cache_params);
- if (ret)
- return ret;
-
- if (b->c.btree_id < BTREE_ID_NR)
- bc->nr_by_btree[b->c.btree_id]++;
-
- bool p = __btree_node_pinned(bc, b);
- mod_bit(BTREE_NODE_pinned, &b->flags, p);
-
- list_add_tail(&b->list, &bc->live[p].list);
- bc->live[p].nr++;
- return 0;
-}
-
-int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
- unsigned level, enum btree_id id)
-{
- b->c.level = level;
- b->c.btree_id = id;
-
- mutex_lock(&bc->lock);
- int ret = __bch2_btree_node_hash_insert(bc, b);
- mutex_unlock(&bc->lock);
-
- return ret;
-}
-
-void bch2_btree_node_update_key_early(struct btree_trans *trans,
- enum btree_id btree, unsigned level,
- struct bkey_s_c old, struct bkey_i *new)
-{
- struct bch_fs *c = trans->c;
- struct btree *b;
- struct bkey_buf tmp;
- int ret;
-
- bch2_bkey_buf_init(&tmp);
- bch2_bkey_buf_reassemble(&tmp, c, old);
-
- b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true);
- if (!IS_ERR_OR_NULL(b)) {
- mutex_lock(&c->btree_cache.lock);
-
- bch2_btree_node_hash_remove(&c->btree_cache, b);
-
- bkey_copy(&b->key, new);
- ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
- BUG_ON(ret);
-
- mutex_unlock(&c->btree_cache.lock);
- six_unlock_read(&b->c.lock);
- }
-
- bch2_bkey_buf_exit(&tmp, c);
-}
-
-__flatten
-static inline struct btree *btree_cache_find(struct btree_cache *bc,
- const struct bkey_i *k)
-{
- u64 v = btree_ptr_hash_val(k);
-
- return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params);
-}
-
-/*
- * this version is for btree nodes that have already been freed (we're not
- * reaping a real btree node)
- */
-static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counter)
-{
- struct btree_cache *bc = &c->btree_cache;
- int ret = 0;
-
- lockdep_assert_held(&bc->lock);
-wait_on_io:
- if (b->flags & ((1U << BTREE_NODE_dirty)|
- (1U << BTREE_NODE_read_in_flight)|
- (1U << BTREE_NODE_write_in_flight))) {
- if (!flush) {
- if (btree_node_dirty(b))
- BTREE_CACHE_NOT_FREED_INCREMENT(dirty);
- else if (btree_node_read_in_flight(b))
- BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
- else if (btree_node_write_in_flight(b))
- BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
- return -BCH_ERR_ENOMEM_btree_node_reclaim;
- }
-
- /* XXX: waiting on IO with btree cache lock held */
- bch2_btree_node_wait_on_read(b);
- bch2_btree_node_wait_on_write(b);
- }
-
- if (!six_trylock_intent(&b->c.lock)) {
- BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
- return -BCH_ERR_ENOMEM_btree_node_reclaim;
- }
-
- if (!six_trylock_write(&b->c.lock)) {
- BTREE_CACHE_NOT_FREED_INCREMENT(lock_write);
- goto out_unlock_intent;
- }
-
- /* recheck under lock */
- if (b->flags & ((1U << BTREE_NODE_read_in_flight)|
- (1U << BTREE_NODE_write_in_flight))) {
- if (!flush) {
- if (btree_node_read_in_flight(b))
- BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
- else if (btree_node_write_in_flight(b))
- BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
- goto out_unlock;
- }
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- goto wait_on_io;
- }
-
- if (btree_node_noevict(b)) {
- BTREE_CACHE_NOT_FREED_INCREMENT(noevict);
- goto out_unlock;
- }
- if (btree_node_write_blocked(b)) {
- BTREE_CACHE_NOT_FREED_INCREMENT(write_blocked);
- goto out_unlock;
- }
- if (btree_node_will_make_reachable(b)) {
- BTREE_CACHE_NOT_FREED_INCREMENT(will_make_reachable);
- goto out_unlock;
- }
-
- if (btree_node_dirty(b)) {
- if (!flush) {
- BTREE_CACHE_NOT_FREED_INCREMENT(dirty);
- goto out_unlock;
- }
- /*
- * Using the underscore version because we don't want to compact
- * bsets after the write, since this node is about to be evicted
- * - unless btree verify mode is enabled, since it runs out of
- * the post write cleanup:
- */
- if (bch2_verify_btree_ondisk)
- bch2_btree_node_write(c, b, SIX_LOCK_intent,
- BTREE_WRITE_cache_reclaim);
- else
- __bch2_btree_node_write(c, b,
- BTREE_WRITE_cache_reclaim);
-
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- goto wait_on_io;
- }
-out:
- if (b->hash_val && !ret)
- trace_and_count(c, btree_cache_reap, c, b);
- return ret;
-out_unlock:
- six_unlock_write(&b->c.lock);
-out_unlock_intent:
- six_unlock_intent(&b->c.lock);
- ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
- goto out;
-}
-
-static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter)
-{
- return __btree_node_reclaim(c, b, false, shrinker_counter);
-}
-
-static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b)
-{
- return __btree_node_reclaim(c, b, true, false);
-}
-
-static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- struct btree_cache_list *list = shrink->private_data;
- struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]);
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache);
- struct btree *b, *t;
- unsigned long nr = sc->nr_to_scan;
- unsigned long can_free = 0;
- unsigned long freed = 0;
- unsigned long touched = 0;
- unsigned i, flags;
- unsigned long ret = SHRINK_STOP;
- bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4;
-
- if (bch2_btree_shrinker_disabled)
- return SHRINK_STOP;
-
- mutex_lock(&bc->lock);
- flags = memalloc_nofs_save();
-
- /*
- * It's _really_ critical that we don't free too many btree nodes - we
- * have to always leave ourselves a reserve. The reserve is how we
- * guarantee that allocating memory for a new btree node can always
- * succeed, so that inserting keys into the btree can always succeed and
- * IO can always make forward progress:
- */
- can_free = btree_cache_can_free(list);
- nr = min_t(unsigned long, nr, can_free);
-
- i = 0;
- list_for_each_entry_safe(b, t, &bc->freeable, list) {
- /*
- * Leave a few nodes on the freeable list, so that a btree split
- * won't have to hit the system allocator:
- */
- if (++i <= 3)
- continue;
-
- touched++;
-
- if (touched >= nr)
- goto out;
-
- if (!btree_node_reclaim(c, b, true)) {
- btree_node_data_free(bc, b);
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- freed++;
- bc->nr_freed++;
- }
- }
-restart:
- list_for_each_entry_safe(b, t, &list->list, list) {
- touched++;
-
- if (btree_node_accessed(b)) {
- clear_btree_node_accessed(b);
- bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++;
- --touched;;
- } else if (!btree_node_reclaim(c, b, true)) {
- __bch2_btree_node_hash_remove(bc, b);
- __btree_node_data_free(bc, b);
-
- freed++;
- bc->nr_freed++;
-
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
-
- if (freed == nr)
- goto out_rotate;
- } else if (trigger_writes &&
- btree_node_dirty(b) &&
- !btree_node_will_make_reachable(b) &&
- !btree_node_write_blocked(b) &&
- six_trylock_read(&b->c.lock)) {
- list_move(&list->list, &b->list);
- mutex_unlock(&bc->lock);
- __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
- six_unlock_read(&b->c.lock);
- if (touched >= nr)
- goto out_nounlock;
- mutex_lock(&bc->lock);
- goto restart;
- }
-
- if (touched >= nr)
- break;
- }
-out_rotate:
- if (&t->list != &list->list)
- list_move_tail(&list->list, &t->list);
-out:
- mutex_unlock(&bc->lock);
-out_nounlock:
- ret = freed;
- memalloc_nofs_restore(flags);
- trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret);
- return ret;
-}
-
-static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- struct btree_cache_list *list = shrink->private_data;
-
- if (bch2_btree_shrinker_disabled)
- return 0;
-
- return btree_cache_can_free(list);
-}
-
-void bch2_fs_btree_cache_exit(struct bch_fs *c)
-{
- struct btree_cache *bc = &c->btree_cache;
- struct btree *b, *t;
- unsigned long flags;
-
- shrinker_free(bc->live[1].shrink);
- shrinker_free(bc->live[0].shrink);
-
- /* vfree() can allocate memory: */
- flags = memalloc_nofs_save();
- mutex_lock(&bc->lock);
-
- if (c->verify_data)
- list_move(&c->verify_data->list, &bc->live[0].list);
-
- kvfree(c->verify_ondisk);
-
- for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
- struct btree_root *r = bch2_btree_id_root(c, i);
-
- if (r->b)
- list_add(&r->b->list, &bc->live[0].list);
- }
-
- list_for_each_entry_safe(b, t, &bc->live[1].list, list)
- bch2_btree_node_hash_remove(bc, b);
- list_for_each_entry_safe(b, t, &bc->live[0].list, list)
- bch2_btree_node_hash_remove(bc, b);
-
- list_for_each_entry_safe(b, t, &bc->freeable, list) {
- BUG_ON(btree_node_read_in_flight(b) ||
- btree_node_write_in_flight(b));
-
- btree_node_data_free(bc, b);
- }
-
- BUG_ON(!bch2_journal_error(&c->journal) &&
- atomic_long_read(&c->btree_cache.nr_dirty));
-
- list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
-
- list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) {
- list_del(&b->list);
- six_lock_exit(&b->c.lock);
- kfree(b);
- }
-
- mutex_unlock(&bc->lock);
- memalloc_nofs_restore(flags);
-
- for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
- BUG_ON(bc->nr_by_btree[i]);
- BUG_ON(bc->live[0].nr);
- BUG_ON(bc->live[1].nr);
- BUG_ON(bc->nr_freeable);
-
- if (bc->table_init_done)
- rhashtable_destroy(&bc->table);
-}
-
-int bch2_fs_btree_cache_init(struct bch_fs *c)
-{
- struct btree_cache *bc = &c->btree_cache;
- struct shrinker *shrink;
- unsigned i;
- int ret = 0;
-
- ret = rhashtable_init(&bc->table, &bch_btree_cache_params);
- if (ret)
- goto err;
-
- bc->table_init_done = true;
-
- bch2_recalc_btree_reserve(c);
-
- for (i = 0; i < bc->nr_reserve; i++)
- if (!__bch2_btree_node_mem_alloc(c))
- goto err;
-
- list_splice_init(&bc->live[0].list, &bc->freeable);
-
- mutex_init(&c->verify_lock);
-
- shrink = shrinker_alloc(0, "%s-btree_cache", c->name);
- if (!shrink)
- goto err;
- bc->live[0].shrink = shrink;
- shrink->count_objects = bch2_btree_cache_count;
- shrink->scan_objects = bch2_btree_cache_scan;
- shrink->seeks = 2;
- shrink->private_data = &bc->live[0];
- shrinker_register(shrink);
-
- shrink = shrinker_alloc(0, "%s-btree_cache-pinned", c->name);
- if (!shrink)
- goto err;
- bc->live[1].shrink = shrink;
- shrink->count_objects = bch2_btree_cache_count;
- shrink->scan_objects = bch2_btree_cache_scan;
- shrink->seeks = 8;
- shrink->private_data = &bc->live[1];
- shrinker_register(shrink);
-
- return 0;
-err:
- return -BCH_ERR_ENOMEM_fs_btree_cache_init;
-}
-
-void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
-{
- mutex_init(&bc->lock);
- for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) {
- bc->live[i].idx = i;
- INIT_LIST_HEAD(&bc->live[i].list);
- }
- INIT_LIST_HEAD(&bc->freeable);
- INIT_LIST_HEAD(&bc->freed_pcpu);
- INIT_LIST_HEAD(&bc->freed_nonpcpu);
-}
-
-/*
- * We can only have one thread cannibalizing other cached btree nodes at a time,
- * or we'll deadlock. We use an open coded mutex to ensure that, which a
- * cannibalize_bucket() will take. This means every time we unlock the root of
- * the btree, we need to release this lock if we have it held.
- */
-void bch2_btree_cache_cannibalize_unlock(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
- struct btree_cache *bc = &c->btree_cache;
-
- if (bc->alloc_lock == current) {
- trace_and_count(c, btree_cache_cannibalize_unlock, trans);
- bc->alloc_lock = NULL;
- closure_wake_up(&bc->alloc_wait);
- }
-}
-
-int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure *cl)
-{
- struct bch_fs *c = trans->c;
- struct btree_cache *bc = &c->btree_cache;
- struct task_struct *old;
-
- old = NULL;
- if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current)
- goto success;
-
- if (!cl) {
- trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
- return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
- }
-
- closure_wait(&bc->alloc_wait, cl);
-
- /* Try again, after adding ourselves to waitlist */
- old = NULL;
- if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) {
- /* We raced */
- closure_wake_up(&bc->alloc_wait);
- goto success;
- }
-
- trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
- return -BCH_ERR_btree_cache_cannibalize_lock_blocked;
-
-success:
- trace_and_count(c, btree_cache_cannibalize_lock, trans);
- return 0;
-}
-
-static struct btree *btree_node_cannibalize(struct bch_fs *c)
-{
- struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
-
- for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++)
- list_for_each_entry_reverse(b, &bc->live[i].list, list)
- if (!btree_node_reclaim(c, b, false))
- return b;
-
- while (1) {
- for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++)
- list_for_each_entry_reverse(b, &bc->live[i].list, list)
- if (!btree_node_write_and_reclaim(c, b))
- return b;
-
- /*
- * Rare case: all nodes were intent-locked.
- * Just busy-wait.
- */
- WARN_ONCE(1, "btree cache cannibalize failed\n");
- cond_resched();
- }
-}
-
-struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
-{
- struct bch_fs *c = trans->c;
- struct btree_cache *bc = &c->btree_cache;
- struct list_head *freed = pcpu_read_locks
- ? &bc->freed_pcpu
- : &bc->freed_nonpcpu;
- struct btree *b, *b2;
- u64 start_time = local_clock();
-
- mutex_lock(&bc->lock);
-
- /*
- * We never free struct btree itself, just the memory that holds the on
- * disk node. Check the freed list before allocating a new one:
- */
- list_for_each_entry(b, freed, list)
- if (!btree_node_reclaim(c, b, false)) {
- list_del_init(&b->list);
- goto got_node;
- }
-
- b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
- if (!b) {
- mutex_unlock(&bc->lock);
- bch2_trans_unlock(trans);
- b = __btree_node_mem_alloc(c, GFP_KERNEL);
- if (!b)
- goto err;
- mutex_lock(&bc->lock);
- }
-
- bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
-
- BUG_ON(!six_trylock_intent(&b->c.lock));
- BUG_ON(!six_trylock_write(&b->c.lock));
-
-got_node:
- /*
- * btree_free() doesn't free memory; it sticks the node on the end of
- * the list. Check if there's any freed nodes there:
- */
- list_for_each_entry(b2, &bc->freeable, list)
- if (!btree_node_reclaim(c, b2, false)) {
- swap(b->data, b2->data);
- swap(b->aux_data, b2->aux_data);
-
- list_del_init(&b2->list);
- --bc->nr_freeable;
- btree_node_to_freedlist(bc, b2);
- mutex_unlock(&bc->lock);
-
- six_unlock_write(&b2->c.lock);
- six_unlock_intent(&b2->c.lock);
- goto got_mem;
- }
-
- mutex_unlock(&bc->lock);
-
- if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) {
- bch2_trans_unlock(trans);
- if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))
- goto err;
- }
-
-got_mem:
- BUG_ON(!list_empty(&b->list));
- BUG_ON(btree_node_hashed(b));
- BUG_ON(btree_node_dirty(b));
- BUG_ON(btree_node_write_in_flight(b));
-out:
- b->flags = 0;
- b->written = 0;
- b->nsets = 0;
- b->sib_u64s[0] = 0;
- b->sib_u64s[1] = 0;
- b->whiteout_u64s = 0;
- bch2_btree_keys_init(b);
- set_btree_node_accessed(b);
-
- bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
- start_time);
-
- int ret = bch2_trans_relock(trans);
- if (unlikely(ret)) {
- bch2_btree_node_to_freelist(c, b);
- return ERR_PTR(ret);
- }
-
- return b;
-err:
- mutex_lock(&bc->lock);
-
- /* Try to cannibalize another cached btree node: */
- if (bc->alloc_lock == current) {
- b2 = btree_node_cannibalize(c);
- clear_btree_node_just_written(b2);
- __bch2_btree_node_hash_remove(bc, b2);
-
- if (b) {
- swap(b->data, b2->data);
- swap(b->aux_data, b2->aux_data);
- btree_node_to_freedlist(bc, b2);
- six_unlock_write(&b2->c.lock);
- six_unlock_intent(&b2->c.lock);
- } else {
- b = b2;
- }
-
- BUG_ON(!list_empty(&b->list));
- mutex_unlock(&bc->lock);
-
- trace_and_count(c, btree_cache_cannibalize, trans);
- goto out;
- }
-
- mutex_unlock(&bc->lock);
- return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
-}
-
-/* Slowpath, don't want it inlined into btree_iter_traverse() */
-static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
- struct btree_path *path,
- const struct bkey_i *k,
- enum btree_id btree_id,
- unsigned level,
- enum six_lock_type lock_type,
- bool sync)
-{
- struct bch_fs *c = trans->c;
- struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
-
- if (unlikely(level >= BTREE_MAX_DEPTH)) {
- int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
- level, BTREE_MAX_DEPTH);
- return ERR_PTR(ret);
- }
-
- if (unlikely(!bkey_is_btree_ptr(&k->k))) {
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
-
- int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf);
- printbuf_exit(&buf);
- return ERR_PTR(ret);
- }
-
- if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) {
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
-
- int ret = bch2_fs_topology_error(c, "attempting to get btree node with too big key %s", buf.buf);
- printbuf_exit(&buf);
- return ERR_PTR(ret);
- }
-
- /*
- * Parent node must be locked, else we could read in a btree node that's
- * been freed:
- */
- if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
- trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
- return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
- }
-
- b = bch2_btree_node_mem_alloc(trans, level != 0);
-
- if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
- if (!path)
- return b;
-
- trans->memory_allocation_failure = true;
- trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
- return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
- }
-
- if (IS_ERR(b))
- return b;
-
- bkey_copy(&b->key, k);
- if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
- /* raced with another fill: */
-
- /* mark as unhashed... */
- b->hash_val = 0;
-
- mutex_lock(&bc->lock);
- __bch2_btree_node_to_freelist(bc, b);
- mutex_unlock(&bc->lock);
-
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- return NULL;
- }
-
- set_btree_node_read_in_flight(b);
- six_unlock_write(&b->c.lock);
-
- if (path) {
- u32 seq = six_lock_seq(&b->c.lock);
-
- /* Unlock before doing IO: */
- six_unlock_intent(&b->c.lock);
- bch2_trans_unlock_noassert(trans);
-
- bch2_btree_node_read(trans, b, sync);
-
- int ret = bch2_trans_relock(trans);
- if (ret)
- return ERR_PTR(ret);
-
- if (!sync)
- return NULL;
-
- if (!six_relock_type(&b->c.lock, lock_type, seq))
- b = NULL;
- } else {
- bch2_btree_node_read(trans, b, sync);
- if (lock_type == SIX_LOCK_read)
- six_lock_downgrade(&b->c.lock);
- }
-
- return b;
-}
-
-static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
-{
- struct printbuf buf = PRINTBUF;
-
- if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
- return;
-
- prt_printf(&buf,
- "btree node header doesn't match ptr\n"
- "btree %s level %u\n"
- "ptr: ",
- bch2_btree_id_str(b->c.btree_id), b->c.level);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
-
- prt_printf(&buf, "\nheader: btree %s level %llu\n"
- "min ",
- bch2_btree_id_str(BTREE_NODE_ID(b->data)),
- BTREE_NODE_LEVEL(b->data));
- bch2_bpos_to_text(&buf, b->data->min_key);
-
- prt_printf(&buf, "\nmax ");
- bch2_bpos_to_text(&buf, b->data->max_key);
-
- bch2_fs_topology_error(c, "%s", buf.buf);
-
- printbuf_exit(&buf);
-}
-
-static inline void btree_check_header(struct bch_fs *c, struct btree *b)
-{
- if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
- b->c.level != BTREE_NODE_LEVEL(b->data) ||
- !bpos_eq(b->data->max_key, b->key.k.p) ||
- (b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- !bpos_eq(b->data->min_key,
- bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
- btree_bad_header(c, b);
-}
-
-static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
- const struct bkey_i *k, unsigned level,
- enum six_lock_type lock_type,
- unsigned long trace_ip)
-{
- struct bch_fs *c = trans->c;
- struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
- bool need_relock = false;
- int ret;
-
- EBUG_ON(level >= BTREE_MAX_DEPTH);
-retry:
- b = btree_cache_find(bc, k);
- if (unlikely(!b)) {
- /*
- * We must have the parent locked to call bch2_btree_node_fill(),
- * else we could read in a btree node from disk that's been
- * freed:
- */
- b = bch2_btree_node_fill(trans, path, k, path->btree_id,
- level, lock_type, true);
- need_relock = true;
-
- /* We raced and found the btree node in the cache */
- if (!b)
- goto retry;
-
- if (IS_ERR(b))
- return b;
- } else {
- if (btree_node_read_locked(path, level + 1))
- btree_node_unlock(trans, path, level + 1);
-
- ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- return ERR_PTR(ret);
-
- BUG_ON(ret);
-
- if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
- b->c.level != level ||
- race_fault())) {
- six_unlock_type(&b->c.lock, lock_type);
- if (bch2_btree_node_relock(trans, path, level + 1))
- goto retry;
-
- trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
- return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
- }
-
- /* avoid atomic set bit if it's not needed: */
- if (!btree_node_accessed(b))
- set_btree_node_accessed(b);
- }
-
- if (unlikely(btree_node_read_in_flight(b))) {
- u32 seq = six_lock_seq(&b->c.lock);
-
- six_unlock_type(&b->c.lock, lock_type);
- bch2_trans_unlock(trans);
- need_relock = true;
-
- bch2_btree_node_wait_on_read(b);
-
- ret = bch2_trans_relock(trans);
- if (ret)
- return ERR_PTR(ret);
-
- /*
- * should_be_locked is not set on this path yet, so we need to
- * relock it specifically:
- */
- if (!six_relock_type(&b->c.lock, lock_type, seq))
- goto retry;
- }
-
- if (unlikely(need_relock)) {
- ret = bch2_trans_relock(trans) ?:
- bch2_btree_path_relock_intent(trans, path);
- if (ret) {
- six_unlock_type(&b->c.lock, lock_type);
- return ERR_PTR(ret);
- }
- }
-
- prefetch(b->aux_data);
-
- for_each_bset(b, t) {
- void *p = (u64 *) b->aux_data + t->aux_data_offset;
-
- prefetch(p + L1_CACHE_BYTES * 0);
- prefetch(p + L1_CACHE_BYTES * 1);
- prefetch(p + L1_CACHE_BYTES * 2);
- }
-
- if (unlikely(btree_node_read_error(b))) {
- six_unlock_type(&b->c.lock, lock_type);
- return ERR_PTR(-BCH_ERR_btree_node_read_error);
- }
-
- EBUG_ON(b->c.btree_id != path->btree_id);
- EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
- btree_check_header(c, b);
-
- return b;
-}
-
-/**
- * bch2_btree_node_get - find a btree node in the cache and lock it, reading it
- * in from disk if necessary.
- *
- * @trans: btree transaction object
- * @path: btree_path being traversed
- * @k: pointer to btree node (generally KEY_TYPE_btree_ptr_v2)
- * @level: level of btree node being looked up (0 == leaf node)
- * @lock_type: SIX_LOCK_read or SIX_LOCK_intent
- * @trace_ip: ip of caller of btree iterator code (i.e. caller of bch2_btree_iter_peek())
- *
- * The btree node will have either a read or a write lock held, depending on
- * the @write parameter.
- *
- * Returns: btree node or ERR_PTR()
- */
-struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
- const struct bkey_i *k, unsigned level,
- enum six_lock_type lock_type,
- unsigned long trace_ip)
-{
- struct bch_fs *c = trans->c;
- struct btree *b;
- int ret;
-
- EBUG_ON(level >= BTREE_MAX_DEPTH);
-
- b = btree_node_mem_ptr(k);
-
- /*
- * Check b->hash_val _before_ calling btree_node_lock() - this might not
- * be the node we want anymore, and trying to lock the wrong node could
- * cause an unneccessary transaction restart:
- */
- if (unlikely(!c->opts.btree_node_mem_ptr_optimization ||
- !b ||
- b->hash_val != btree_ptr_hash_val(k)))
- return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
-
- if (btree_node_read_locked(path, level + 1))
- btree_node_unlock(trans, path, level + 1);
-
- ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- return ERR_PTR(ret);
-
- BUG_ON(ret);
-
- if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
- b->c.level != level ||
- race_fault())) {
- six_unlock_type(&b->c.lock, lock_type);
- if (bch2_btree_node_relock(trans, path, level + 1))
- return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
-
- trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
- return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
- }
-
- if (unlikely(btree_node_read_in_flight(b))) {
- six_unlock_type(&b->c.lock, lock_type);
- return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
- }
-
- prefetch(b->aux_data);
-
- for_each_bset(b, t) {
- void *p = (u64 *) b->aux_data + t->aux_data_offset;
-
- prefetch(p + L1_CACHE_BYTES * 0);
- prefetch(p + L1_CACHE_BYTES * 1);
- prefetch(p + L1_CACHE_BYTES * 2);
- }
-
- /* avoid atomic set bit if it's not needed: */
- if (!btree_node_accessed(b))
- set_btree_node_accessed(b);
-
- if (unlikely(btree_node_read_error(b))) {
- six_unlock_type(&b->c.lock, lock_type);
- return ERR_PTR(-BCH_ERR_btree_node_read_error);
- }
-
- EBUG_ON(b->c.btree_id != path->btree_id);
- EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
- btree_check_header(c, b);
-
- return b;
-}
-
-struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
- const struct bkey_i *k,
- enum btree_id btree_id,
- unsigned level,
- bool nofill)
-{
- struct bch_fs *c = trans->c;
- struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
- int ret;
-
- EBUG_ON(level >= BTREE_MAX_DEPTH);
-
- if (c->opts.btree_node_mem_ptr_optimization) {
- b = btree_node_mem_ptr(k);
- if (b)
- goto lock_node;
- }
-retry:
- b = btree_cache_find(bc, k);
- if (unlikely(!b)) {
- if (nofill)
- goto out;
-
- b = bch2_btree_node_fill(trans, NULL, k, btree_id,
- level, SIX_LOCK_read, true);
-
- /* We raced and found the btree node in the cache */
- if (!b)
- goto retry;
-
- if (IS_ERR(b) &&
- !bch2_btree_cache_cannibalize_lock(trans, NULL))
- goto retry;
-
- if (IS_ERR(b))
- goto out;
- } else {
-lock_node:
- ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- return ERR_PTR(ret);
-
- BUG_ON(ret);
-
- if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
- b->c.btree_id != btree_id ||
- b->c.level != level)) {
- six_unlock_read(&b->c.lock);
- goto retry;
- }
- }
-
- /* XXX: waiting on IO with btree locks held: */
- __bch2_btree_node_wait_on_read(b);
-
- prefetch(b->aux_data);
-
- for_each_bset(b, t) {
- void *p = (u64 *) b->aux_data + t->aux_data_offset;
-
- prefetch(p + L1_CACHE_BYTES * 0);
- prefetch(p + L1_CACHE_BYTES * 1);
- prefetch(p + L1_CACHE_BYTES * 2);
- }
-
- /* avoid atomic set bit if it's not needed: */
- if (!btree_node_accessed(b))
- set_btree_node_accessed(b);
-
- if (unlikely(btree_node_read_error(b))) {
- six_unlock_read(&b->c.lock);
- b = ERR_PTR(-BCH_ERR_btree_node_read_error);
- goto out;
- }
-
- EBUG_ON(b->c.btree_id != btree_id);
- EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
- btree_check_header(c, b);
-out:
- bch2_btree_cache_cannibalize_unlock(trans);
- return b;
-}
-
-int bch2_btree_node_prefetch(struct btree_trans *trans,
- struct btree_path *path,
- const struct bkey_i *k,
- enum btree_id btree_id, unsigned level)
-{
- struct bch_fs *c = trans->c;
- struct btree_cache *bc = &c->btree_cache;
-
- BUG_ON(path && !btree_node_locked(path, level + 1));
- BUG_ON(level >= BTREE_MAX_DEPTH);
-
- struct btree *b = btree_cache_find(bc, k);
- if (b)
- return 0;
-
- b = bch2_btree_node_fill(trans, path, k, btree_id,
- level, SIX_LOCK_read, false);
- int ret = PTR_ERR_OR_ZERO(b);
- if (ret)
- return ret;
- if (b)
- six_unlock_read(&b->c.lock);
- return 0;
-}
-
-void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
-{
- struct bch_fs *c = trans->c;
- struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
-
- b = btree_cache_find(bc, k);
- if (!b)
- return;
-
- BUG_ON(b == btree_node_root(trans->c, b));
-wait_on_io:
- /* not allowed to wait on io with btree locks held: */
-
- /* XXX we're called from btree_gc which will be holding other btree
- * nodes locked
- */
- __bch2_btree_node_wait_on_read(b);
- __bch2_btree_node_wait_on_write(b);
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
- if (unlikely(b->hash_val != btree_ptr_hash_val(k)))
- goto out;
-
- if (btree_node_dirty(b)) {
- __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- goto wait_on_io;
- }
-
- BUG_ON(btree_node_dirty(b));
-
- mutex_lock(&bc->lock);
- bch2_btree_node_hash_remove(bc, b);
- btree_node_data_free(bc, b);
- mutex_unlock(&bc->lock);
-out:
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
-}
-
-const char *bch2_btree_id_str(enum btree_id btree)
-{
- return btree < BTREE_ID_NR ? __bch2_btree_ids[btree] : "(unknown)";
-}
-
-void bch2_btree_id_to_text(struct printbuf *out, enum btree_id btree)
-{
- if (btree < BTREE_ID_NR)
- prt_str(out, __bch2_btree_ids[btree]);
- else
- prt_printf(out, "(unknown btree %u)", btree);
-}
-
-void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
-{
- prt_printf(out, "%s level %u/%u\n ",
- bch2_btree_id_str(b->c.btree_id),
- b->c.level,
- bch2_btree_id_root(c, b->c.btree_id)->level);
- bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
-}
-
-void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
-{
- struct bset_stats stats;
-
- memset(&stats, 0, sizeof(stats));
-
- bch2_btree_keys_stats(b, &stats);
-
- prt_printf(out, "l %u ", b->c.level);
- bch2_bpos_to_text(out, b->data->min_key);
- prt_printf(out, " - ");
- bch2_bpos_to_text(out, b->data->max_key);
- prt_printf(out, ":\n"
- " ptrs: ");
- bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key));
- prt_newline(out);
-
- prt_printf(out,
- " format: ");
- bch2_bkey_format_to_text(out, &b->format);
-
- prt_printf(out,
- " unpack fn len: %u\n"
- " bytes used %zu/%zu (%zu%% full)\n"
- " sib u64s: %u, %u (merge threshold %u)\n"
- " nr packed keys %u\n"
- " nr unpacked keys %u\n"
- " floats %zu\n"
- " failed unpacked %zu\n",
- b->unpack_fn_len,
- b->nr.live_u64s * sizeof(u64),
- btree_buf_bytes(b) - sizeof(struct btree_node),
- b->nr.live_u64s * 100 / btree_max_u64s(c),
- b->sib_u64s[0],
- b->sib_u64s[1],
- c->btree_foreground_merge_threshold,
- b->nr.packed_keys,
- b->nr.unpacked_keys,
- stats.floats,
- stats.failed);
-}
-
-static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
- const char *label, size_t nr)
-{
- prt_printf(out, "%s\t", label);
- prt_human_readable_u64(out, nr * c->opts.btree_node_size);
- prt_printf(out, " (%zu)\n", nr);
-}
-
-static const char * const bch2_btree_cache_not_freed_reasons_strs[] = {
-#define x(n) #n,
- BCH_BTREE_CACHE_NOT_FREED_REASONS()
-#undef x
- NULL
-};
-
-void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc)
-{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache);
-
- if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 32);
-
- prt_btree_cache_line(out, c, "live:", bc->live[0].nr);
- prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr);
- prt_btree_cache_line(out, c, "freeable:", bc->nr_freeable);
- prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty));
- prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock);
- prt_newline(out);
-
- for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
- prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]);
-
- prt_newline(out);
- prt_printf(out, "freed:\t%zu\n", bc->nr_freed);
- prt_printf(out, "not freed:\n");
-
- for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++)
- prt_printf(out, " %s\t%llu\n",
- bch2_btree_cache_not_freed_reasons_strs[i], bc->not_freed[i]);
-}
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
deleted file mode 100644
index 66e86d1a178d..000000000000
--- a/fs/bcachefs/btree_cache.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_CACHE_H
-#define _BCACHEFS_BTREE_CACHE_H
-
-#include "bcachefs.h"
-#include "btree_types.h"
-#include "bkey_methods.h"
-
-extern const char * const bch2_btree_node_flags[];
-
-struct btree_iter;
-
-void bch2_recalc_btree_reserve(struct bch_fs *);
-
-void bch2_btree_node_to_freelist(struct bch_fs *, struct btree *);
-
-void __bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
-void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
-
-int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
-int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
- unsigned, enum btree_id);
-
-void bch2_node_pin(struct bch_fs *, struct btree *);
-void bch2_btree_cache_unpin(struct bch_fs *);
-
-void bch2_btree_node_update_key_early(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_i *);
-
-void bch2_btree_cache_cannibalize_unlock(struct btree_trans *);
-int bch2_btree_cache_cannibalize_lock(struct btree_trans *, struct closure *);
-
-struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
-struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
-
-struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
- const struct bkey_i *, unsigned,
- enum six_lock_type, unsigned long);
-
-struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
- enum btree_id, unsigned, bool);
-
-int bch2_btree_node_prefetch(struct btree_trans *, struct btree_path *,
- const struct bkey_i *, enum btree_id, unsigned);
-
-void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *);
-
-void bch2_fs_btree_cache_exit(struct bch_fs *);
-int bch2_fs_btree_cache_init(struct bch_fs *);
-void bch2_fs_btree_cache_init_early(struct btree_cache *);
-
-static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
-{
- switch (k->k.type) {
- case KEY_TYPE_btree_ptr:
- return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
- case KEY_TYPE_btree_ptr_v2:
- /*
- * The cast/deref is only necessary to avoid sparse endianness
- * warnings:
- */
- return *((u64 *) &bkey_i_to_btree_ptr_v2_c(k)->v.seq);
- default:
- return 0;
- }
-}
-
-static inline struct btree *btree_node_mem_ptr(const struct bkey_i *k)
-{
- return k->k.type == KEY_TYPE_btree_ptr_v2
- ? (void *)(unsigned long)bkey_i_to_btree_ptr_v2_c(k)->v.mem_ptr
- : NULL;
-}
-
-/* is btree node in hash table? */
-static inline bool btree_node_hashed(struct btree *b)
-{
- return b->hash_val != 0;
-}
-
-#define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \
- for ((_tbl) = rht_dereference_rcu((_c)->btree_cache.table.tbl, \
- &(_c)->btree_cache.table), \
- _iter = 0; _iter < (_tbl)->size; _iter++) \
- rht_for_each_entry_rcu((_b), (_pos), _tbl, _iter, hash)
-
-static inline size_t btree_buf_bytes(const struct btree *b)
-{
- return 1UL << b->byte_order;
-}
-
-static inline size_t btree_buf_max_u64s(const struct btree *b)
-{
- return (btree_buf_bytes(b) - sizeof(struct btree_node)) / sizeof(u64);
-}
-
-static inline size_t btree_max_u64s(const struct bch_fs *c)
-{
- return (c->opts.btree_node_size - sizeof(struct btree_node)) / sizeof(u64);
-}
-
-static inline size_t btree_sectors(const struct bch_fs *c)
-{
- return c->opts.btree_node_size >> SECTOR_SHIFT;
-}
-
-static inline unsigned btree_blocks(const struct bch_fs *c)
-{
- return btree_sectors(c) >> c->block_bits;
-}
-
-#define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3)
-
-#define BTREE_FOREGROUND_MERGE_THRESHOLD(c) (btree_max_u64s(c) * 1 / 3)
-#define BTREE_FOREGROUND_MERGE_HYSTERESIS(c) \
- (BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \
- (BTREE_FOREGROUND_MERGE_THRESHOLD(c) >> 2))
-
-static inline unsigned btree_id_nr_alive(struct bch_fs *c)
-{
- return BTREE_ID_NR + c->btree_roots_extra.nr;
-}
-
-static inline struct btree_root *bch2_btree_id_root(struct bch_fs *c, unsigned id)
-{
- if (likely(id < BTREE_ID_NR)) {
- return &c->btree_roots_known[id];
- } else {
- unsigned idx = id - BTREE_ID_NR;
-
- EBUG_ON(idx >= c->btree_roots_extra.nr);
- return &c->btree_roots_extra.data[idx];
- }
-}
-
-static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b)
-{
- return bch2_btree_id_root(c, b->c.btree_id)->b;
-}
-
-const char *bch2_btree_id_str(enum btree_id);
-void bch2_btree_id_to_text(struct printbuf *, enum btree_id);
-
-void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
-void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
-void bch2_btree_cache_to_text(struct printbuf *, const struct btree_cache *);
-
-#endif /* _BCACHEFS_BTREE_CACHE_H */
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
deleted file mode 100644
index 81dcf9e512c0..000000000000
--- a/fs/bcachefs/btree_gc.c
+++ /dev/null
@@ -1,1345 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
- * Copyright (C) 2014 Datera Inc.
- */
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "backpointers.h"
-#include "bkey_methods.h"
-#include "bkey_buf.h"
-#include "btree_journal_iter.h"
-#include "btree_key_cache.h"
-#include "btree_locking.h"
-#include "btree_node_scan.h"
-#include "btree_update_interior.h"
-#include "btree_io.h"
-#include "btree_gc.h"
-#include "buckets.h"
-#include "clock.h"
-#include "debug.h"
-#include "disk_accounting.h"
-#include "ec.h"
-#include "error.h"
-#include "extents.h"
-#include "journal.h"
-#include "keylist.h"
-#include "move.h"
-#include "recovery_passes.h"
-#include "reflink.h"
-#include "replicas.h"
-#include "super-io.h"
-#include "trace.h"
-
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/preempt.h>
-#include <linux/rcupdate.h>
-#include <linux/sched/task.h>
-
-#define DROP_THIS_NODE 10
-#define DROP_PREV_NODE 11
-#define DID_FILL_FROM_SCAN 12
-
-static const char * const bch2_gc_phase_strs[] = {
-#define x(n) #n,
- GC_PHASES()
-#undef x
- NULL
-};
-
-void bch2_gc_pos_to_text(struct printbuf *out, struct gc_pos *p)
-{
- prt_str(out, bch2_gc_phase_strs[p->phase]);
- prt_char(out, ' ');
- bch2_btree_id_to_text(out, p->btree);
- prt_printf(out, " l=%u ", p->level);
- bch2_bpos_to_text(out, p->pos);
-}
-
-static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
-{
- return (struct bkey_s) {{{
- (struct bkey *) k.k,
- (struct bch_val *) k.v
- }}};
-}
-
-static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
-{
- preempt_disable();
- write_seqcount_begin(&c->gc_pos_lock);
- c->gc_pos = new_pos;
- write_seqcount_end(&c->gc_pos_lock);
- preempt_enable();
-}
-
-static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
-{
- BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) < 0);
- __gc_pos_set(c, new_pos);
-}
-
-static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
-{
- switch (b->key.k.type) {
- case KEY_TYPE_btree_ptr: {
- struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
-
- dst->k.p = src->k.p;
- dst->v.mem_ptr = 0;
- dst->v.seq = b->data->keys.seq;
- dst->v.sectors_written = 0;
- dst->v.flags = 0;
- dst->v.min_key = b->data->min_key;
- set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
- memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
- break;
- }
- case KEY_TYPE_btree_ptr_v2:
- bkey_copy(&dst->k_i, &b->key);
- break;
- default:
- BUG();
- }
-}
-
-static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
-{
- struct bkey_i_btree_ptr_v2 *new;
- int ret;
-
- if (c->opts.verbose) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- prt_str(&buf, " -> ");
- bch2_bpos_to_text(&buf, new_min);
-
- bch_info(c, "%s(): %s", __func__, buf.buf);
- printbuf_exit(&buf);
- }
-
- new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
- if (!new)
- return -BCH_ERR_ENOMEM_gc_repair_key;
-
- btree_ptr_to_v2(b, new);
- b->data->min_key = new_min;
- new->v.min_key = new_min;
- SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
-
- ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
- if (ret) {
- kfree(new);
- return ret;
- }
-
- bch2_btree_node_drop_keys_outside_node(b);
- bkey_copy(&b->key, &new->k_i);
- return 0;
-}
-
-static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
-{
- struct bkey_i_btree_ptr_v2 *new;
- int ret;
-
- if (c->opts.verbose) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- prt_str(&buf, " -> ");
- bch2_bpos_to_text(&buf, new_max);
-
- bch_info(c, "%s(): %s", __func__, buf.buf);
- printbuf_exit(&buf);
- }
-
- ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
- if (ret)
- return ret;
-
- new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
- if (!new)
- return -BCH_ERR_ENOMEM_gc_repair_key;
-
- btree_ptr_to_v2(b, new);
- b->data->max_key = new_max;
- new->k.p = new_max;
- SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
-
- ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
- if (ret) {
- kfree(new);
- return ret;
- }
-
- bch2_btree_node_drop_keys_outside_node(b);
-
- mutex_lock(&c->btree_cache.lock);
- __bch2_btree_node_hash_remove(&c->btree_cache, b);
-
- bkey_copy(&b->key, &new->k_i);
- ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
- BUG_ON(ret);
- mutex_unlock(&c->btree_cache.lock);
- return 0;
-}
-
-static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *b,
- struct btree *prev, struct btree *cur,
- struct bpos *pulled_from_scan)
-{
- struct bch_fs *c = trans->c;
- struct bpos expected_start = !prev
- ? b->data->min_key
- : bpos_successor(prev->key.k.p);
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
- b->data->min_key));
-
- if (bpos_eq(expected_start, cur->data->min_key))
- return 0;
-
- prt_printf(&buf, " at btree %s level %u:\n parent: ",
- bch2_btree_id_str(b->c.btree_id), b->c.level);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
-
- if (prev) {
- prt_printf(&buf, "\n prev: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&prev->key));
- }
-
- prt_str(&buf, "\n next: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
-
- if (bpos_lt(expected_start, cur->data->min_key)) { /* gap */
- if (b->c.level == 1 &&
- bpos_lt(*pulled_from_scan, cur->data->min_key)) {
- ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
- expected_start,
- bpos_predecessor(cur->data->min_key));
- if (ret)
- goto err;
-
- *pulled_from_scan = cur->data->min_key;
- ret = DID_FILL_FROM_SCAN;
- } else {
- if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key,
- "btree node with incorrect min_key%s", buf.buf))
- ret = set_node_min(c, cur, expected_start);
- }
- } else { /* overlap */
- if (prev && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev */
- if (bpos_ge(prev->data->min_key, cur->data->min_key)) { /* fully? */
- if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_next_node,
- "btree node overwritten by next node%s", buf.buf))
- ret = DROP_PREV_NODE;
- } else {
- if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key,
- "btree node with incorrect max_key%s", buf.buf))
- ret = set_node_max(c, prev,
- bpos_predecessor(cur->data->min_key));
- }
- } else {
- if (bpos_ge(expected_start, cur->data->max_key)) { /* fully? */
- if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_prev_node,
- "btree node overwritten by prev node%s", buf.buf))
- ret = DROP_THIS_NODE;
- } else {
- if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key,
- "btree node with incorrect min_key%s", buf.buf))
- ret = set_node_min(c, cur, expected_start);
- }
- }
- }
-err:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static int btree_repair_node_end(struct btree_trans *trans, struct btree *b,
- struct btree *child, struct bpos *pulled_from_scan)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- if (bpos_eq(child->key.k.p, b->key.k.p))
- return 0;
-
- prt_printf(&buf, "at btree %s level %u:\n parent: ",
- bch2_btree_id_str(b->c.btree_id), b->c.level);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
-
- prt_str(&buf, "\n child: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
-
- if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key,
- "btree node with incorrect max_key%s", buf.buf)) {
- if (b->c.level == 1 &&
- bpos_lt(*pulled_from_scan, b->key.k.p)) {
- ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
- bpos_successor(child->key.k.p), b->key.k.p);
- if (ret)
- goto err;
-
- *pulled_from_scan = b->key.k.p;
- ret = DID_FILL_FROM_SCAN;
- } else {
- ret = set_node_max(c, child, b->key.k.p);
- }
- }
-err:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b,
- struct bpos *pulled_from_scan)
-{
- struct bch_fs *c = trans->c;
- struct btree_and_journal_iter iter;
- struct bkey_s_c k;
- struct bkey_buf prev_k, cur_k;
- struct btree *prev = NULL, *cur = NULL;
- bool have_child, new_pass = false;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- if (!b->c.level)
- return 0;
-
- bch2_bkey_buf_init(&prev_k);
- bch2_bkey_buf_init(&cur_k);
-again:
- cur = prev = NULL;
- have_child = new_pass = false;
- bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- iter.prefetch = true;
-
- while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- BUG_ON(bpos_lt(k.k->p, b->data->min_key));
- BUG_ON(bpos_gt(k.k->p, b->data->max_key));
-
- bch2_btree_and_journal_iter_advance(&iter);
- bch2_bkey_buf_reassemble(&cur_k, c, k);
-
- cur = bch2_btree_node_get_noiter(trans, cur_k.k,
- b->c.btree_id, b->c.level - 1,
- false);
- ret = PTR_ERR_OR_ZERO(cur);
-
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
-
- if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
- trans, btree_node_unreadable,
- "Topology repair: unreadable btree node at btree %s level %u:\n"
- " %s",
- bch2_btree_id_str(b->c.btree_id),
- b->c.level - 1,
- buf.buf)) {
- bch2_btree_node_evict(trans, cur_k.k);
- cur = NULL;
- ret = bch2_journal_key_delete(c, b->c.btree_id,
- b->c.level, cur_k.k->k.p);
- if (ret)
- break;
-
- if (!btree_id_is_alloc(b->c.btree_id)) {
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
- if (ret)
- break;
- }
- continue;
- }
-
- bch_err_msg(c, ret, "getting btree node");
- if (ret)
- break;
-
- if (bch2_btree_node_is_stale(c, cur)) {
- bch_info(c, "btree node %s older than nodes found by scanning", buf.buf);
- six_unlock_read(&cur->c.lock);
- bch2_btree_node_evict(trans, cur_k.k);
- ret = bch2_journal_key_delete(c, b->c.btree_id,
- b->c.level, cur_k.k->k.p);
- cur = NULL;
- if (ret)
- break;
- continue;
- }
-
- ret = btree_check_node_boundaries(trans, b, prev, cur, pulled_from_scan);
- if (ret == DID_FILL_FROM_SCAN) {
- new_pass = true;
- ret = 0;
- }
-
- if (ret == DROP_THIS_NODE) {
- six_unlock_read(&cur->c.lock);
- bch2_btree_node_evict(trans, cur_k.k);
- ret = bch2_journal_key_delete(c, b->c.btree_id,
- b->c.level, cur_k.k->k.p);
- cur = NULL;
- if (ret)
- break;
- continue;
- }
-
- if (prev)
- six_unlock_read(&prev->c.lock);
- prev = NULL;
-
- if (ret == DROP_PREV_NODE) {
- bch_info(c, "dropped prev node");
- bch2_btree_node_evict(trans, prev_k.k);
- ret = bch2_journal_key_delete(c, b->c.btree_id,
- b->c.level, prev_k.k->k.p);
- if (ret)
- break;
-
- bch2_btree_and_journal_iter_exit(&iter);
- goto again;
- } else if (ret)
- break;
-
- prev = cur;
- cur = NULL;
- bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
- }
-
- if (!ret && !IS_ERR_OR_NULL(prev)) {
- BUG_ON(cur);
- ret = btree_repair_node_end(trans, b, prev, pulled_from_scan);
- if (ret == DID_FILL_FROM_SCAN) {
- new_pass = true;
- ret = 0;
- }
- }
-
- if (!IS_ERR_OR_NULL(prev))
- six_unlock_read(&prev->c.lock);
- prev = NULL;
- if (!IS_ERR_OR_NULL(cur))
- six_unlock_read(&cur->c.lock);
- cur = NULL;
-
- if (ret)
- goto err;
-
- bch2_btree_and_journal_iter_exit(&iter);
-
- if (new_pass)
- goto again;
-
- bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- iter.prefetch = true;
-
- while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- bch2_bkey_buf_reassemble(&cur_k, c, k);
- bch2_btree_and_journal_iter_advance(&iter);
-
- cur = bch2_btree_node_get_noiter(trans, cur_k.k,
- b->c.btree_id, b->c.level - 1,
- false);
- ret = PTR_ERR_OR_ZERO(cur);
-
- bch_err_msg(c, ret, "getting btree node");
- if (ret)
- goto err;
-
- ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
- six_unlock_read(&cur->c.lock);
- cur = NULL;
-
- if (ret == DROP_THIS_NODE) {
- bch2_btree_node_evict(trans, cur_k.k);
- ret = bch2_journal_key_delete(c, b->c.btree_id,
- b->c.level, cur_k.k->k.p);
- new_pass = true;
- }
-
- if (ret)
- goto err;
-
- have_child = true;
- }
-
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
-
- if (mustfix_fsck_err_on(!have_child,
- trans, btree_node_topology_interior_node_empty,
- "empty interior btree node at btree %s level %u\n"
- " %s",
- bch2_btree_id_str(b->c.btree_id),
- b->c.level, buf.buf))
- ret = DROP_THIS_NODE;
-err:
-fsck_err:
- if (!IS_ERR_OR_NULL(prev))
- six_unlock_read(&prev->c.lock);
- if (!IS_ERR_OR_NULL(cur))
- six_unlock_read(&cur->c.lock);
-
- bch2_btree_and_journal_iter_exit(&iter);
-
- if (!ret && new_pass)
- goto again;
-
- BUG_ON(!ret && bch2_btree_node_check_topology(trans, b));
-
- bch2_bkey_buf_exit(&prev_k, c);
- bch2_bkey_buf_exit(&cur_k, c);
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_check_topology(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct bpos pulled_from_scan = POS_MIN;
- int ret = 0;
-
- bch2_trans_srcu_unlock(trans);
-
- for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
- struct btree_root *r = bch2_btree_id_root(c, i);
- bool reconstructed_root = false;
-
- if (r->error) {
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
- if (ret)
- break;
-reconstruct_root:
- bch_info(c, "btree root %s unreadable, must recover from scan", bch2_btree_id_str(i));
-
- r->alive = false;
- r->error = 0;
-
- if (!bch2_btree_has_scanned_nodes(c, i)) {
- mustfix_fsck_err(trans, btree_root_unreadable_and_scan_found_nothing,
- "no nodes found for btree %s, continue?", bch2_btree_id_str(i));
- bch2_btree_root_alloc_fake_trans(trans, i, 0);
- } else {
- bch2_btree_root_alloc_fake_trans(trans, i, 1);
- bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
- ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
- if (ret)
- break;
- }
-
- reconstructed_root = true;
- }
-
- struct btree *b = r->b;
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
- ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
- six_unlock_read(&b->c.lock);
-
- if (ret == DROP_THIS_NODE) {
- mutex_lock(&c->btree_cache.lock);
- bch2_btree_node_hash_remove(&c->btree_cache, b);
- mutex_unlock(&c->btree_cache.lock);
-
- r->b = NULL;
-
- if (!reconstructed_root)
- goto reconstruct_root;
-
- bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
- bch2_btree_root_alloc_fake_trans(trans, i, 0);
- r->alive = false;
- ret = 0;
- }
- }
-fsck_err:
- bch2_trans_put(trans);
- return ret;
-}
-
-/* marking of btree keys/nodes: */
-
-static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
- unsigned level, struct btree **prev,
- struct btree_iter *iter, struct bkey_s_c k,
- bool initial)
-{
- struct bch_fs *c = trans->c;
-
- if (iter) {
- struct btree_path *path = btree_iter_path(trans, iter);
- struct btree *b = path_l(path)->b;
-
- if (*prev != b) {
- int ret = bch2_btree_node_check_topology(trans, b);
- if (ret)
- return ret;
- }
- *prev = b;
- }
-
- struct bkey deleted = KEY(0, 0, 0);
- struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- deleted.p = k.k->p;
-
- if (initial) {
- BUG_ON(bch2_journal_seq_verify &&
- k.k->bversion.lo > atomic64_read(&c->journal.seq));
-
- if (fsck_err_on(btree_id != BTREE_ID_accounting &&
- k.k->bversion.lo > atomic64_read(&c->key_version),
- trans, bkey_version_in_future,
- "key version number higher than recorded %llu\n %s",
- atomic64_read(&c->key_version),
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- atomic64_set(&c->key_version, k.k->bversion.lo);
- }
-
- if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
- trans, btree_bitmap_not_marked,
- "btree ptr not marked in member info btree allocated bitmap\n %s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k),
- buf.buf))) {
- mutex_lock(&c->sb_lock);
- bch2_dev_btree_bitmap_mark(c, k);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
- }
-
- /*
- * We require a commit before key_trigger() because
- * key_trigger(BTREE_TRIGGER_GC) is not idempotant; we'll calculate the
- * wrong result if we run it multiple times.
- */
- unsigned flags = !iter ? BTREE_TRIGGER_is_root : 0;
-
- ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
- BTREE_TRIGGER_check_repair|flags);
- if (ret)
- goto out;
-
- if (trans->nr_updates) {
- ret = bch2_trans_commit(trans, NULL, NULL, 0) ?:
- -BCH_ERR_transaction_restart_nested;
- goto out;
- }
-
- ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
- BTREE_TRIGGER_gc|BTREE_TRIGGER_insert|flags);
-out:
-fsck_err:
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool initial)
-{
- struct bch_fs *c = trans->c;
- unsigned target_depth = btree_node_type_has_triggers(__btree_node_type(0, btree)) ? 0 : 1;
- int ret = 0;
-
- /* We need to make sure every leaf node is readable before going RW */
- if (initial)
- target_depth = 0;
-
- for (unsigned level = target_depth; level < BTREE_MAX_DEPTH; level++) {
- struct btree *prev = NULL;
- struct btree_iter iter;
- bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, level,
- BTREE_ITER_prefetch);
-
- ret = for_each_btree_key_continue(trans, iter, 0, k, ({
- gc_pos_set(c, gc_pos_btree(btree, level, k.k->p));
- bch2_gc_mark_key(trans, btree, level, &prev, &iter, k, initial);
- }));
- if (ret)
- goto err;
- }
-
- /* root */
- do {
-retry_root:
- bch2_trans_begin(trans);
-
- struct btree_iter iter;
- bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
- 0, bch2_btree_id_root(c, btree)->b->c.level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
- ret = PTR_ERR_OR_ZERO(b);
- if (ret)
- goto err_root;
-
- if (b != btree_node_root(c, b)) {
- bch2_trans_iter_exit(trans, &iter);
- goto retry_root;
- }
-
- gc_pos_set(c, gc_pos_btree(btree, b->c.level + 1, SPOS_MAX));
- struct bkey_s_c k = bkey_i_to_s_c(&b->key);
- ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial);
-err_root:
- bch2_trans_iter_exit(trans, &iter);
- } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
-err:
- bch_err_fn(c, ret);
- return ret;
-}
-
-static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
-{
- return cmp_int(gc_btree_order(l), gc_btree_order(r));
-}
-
-static int bch2_gc_btrees(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- enum btree_id ids[BTREE_ID_NR];
- unsigned i;
- int ret = 0;
-
- for (i = 0; i < BTREE_ID_NR; i++)
- ids[i] = i;
- bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
-
- for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
- unsigned btree = i < BTREE_ID_NR ? ids[i] : i;
-
- if (IS_ERR_OR_NULL(bch2_btree_id_root(c, btree)->b))
- continue;
-
- ret = bch2_gc_btree(trans, btree, true);
-
- if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
- trans, btree_node_read_error,
- "btree node read error for %s",
- bch2_btree_id_str(btree)))
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
- }
-fsck_err:
- bch2_trans_put(trans);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int bch2_mark_superblocks(struct bch_fs *c)
-{
- gc_pos_set(c, gc_phase(GC_PHASE_sb));
-
- return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
-}
-
-static void bch2_gc_free(struct bch_fs *c)
-{
- bch2_accounting_gc_free(c);
-
- genradix_free(&c->reflink_gc_table);
- genradix_free(&c->gc_stripes);
-
- for_each_member_device(c, ca)
- genradix_free(&ca->buckets_gc);
-}
-
-static int bch2_gc_start(struct bch_fs *c)
-{
- for_each_member_device(c, ca) {
- int ret = bch2_dev_usage_init(ca, true);
- if (ret) {
- bch2_dev_put(ca);
- return ret;
- }
- }
-
- return 0;
-}
-
-/* returns true if not equal */
-static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
- struct bch_alloc_v4 r)
-{
- return l.gen != r.gen ||
- l.oldest_gen != r.oldest_gen ||
- l.data_type != r.data_type ||
- l.dirty_sectors != r.dirty_sectors ||
- l.stripe_sectors != r.stripe_sectors ||
- l.cached_sectors != r.cached_sectors ||
- l.stripe_redundancy != r.stripe_redundancy ||
- l.stripe != r.stripe;
-}
-
-static int bch2_alloc_write_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_dev *ca,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i_alloc_v4 *a;
- struct bch_alloc_v4 old_gc, gc, old_convert, new;
- const struct bch_alloc_v4 *old;
- int ret;
-
- if (!bucket_valid(ca, k.k->p.offset))
- return 0;
-
- old = bch2_alloc_to_v4(k, &old_convert);
- gc = new = *old;
-
- percpu_down_read(&c->mark_lock);
- __bucket_m_to_alloc(&gc, *gc_bucket(ca, iter->pos.offset));
-
- old_gc = gc;
-
- if ((old->data_type == BCH_DATA_sb ||
- old->data_type == BCH_DATA_journal) &&
- !bch2_dev_is_online(ca)) {
- gc.data_type = old->data_type;
- gc.dirty_sectors = old->dirty_sectors;
- }
- percpu_up_read(&c->mark_lock);
-
- /*
- * gc.data_type doesn't yet include need_discard & need_gc_gen states -
- * fix that here:
- */
- alloc_data_type_set(&gc, gc.data_type);
- if (gc.data_type != old_gc.data_type ||
- gc.dirty_sectors != old_gc.dirty_sectors) {
- ret = bch2_alloc_key_to_dev_counters(trans, ca, &old_gc, &gc, BTREE_TRIGGER_gc);
- if (ret)
- return ret;
-
- /*
- * Ugly: alloc_key_to_dev_counters(..., BTREE_TRIGGER_gc) is not
- * safe w.r.t. transaction restarts, so fixup the gc_bucket so
- * we don't run it twice:
- */
- percpu_down_read(&c->mark_lock);
- struct bucket *gc_m = gc_bucket(ca, iter->pos.offset);
- gc_m->data_type = gc.data_type;
- gc_m->dirty_sectors = gc.dirty_sectors;
- percpu_up_read(&c->mark_lock);
- }
-
- if (fsck_err_on(new.data_type != gc.data_type,
- trans, alloc_key_data_type_wrong,
- "bucket %llu:%llu gen %u has wrong data_type"
- ": got %s, should be %s",
- iter->pos.inode, iter->pos.offset,
- gc.gen,
- bch2_data_type_str(new.data_type),
- bch2_data_type_str(gc.data_type)))
- new.data_type = gc.data_type;
-
-#define copy_bucket_field(_errtype, _f) \
- if (fsck_err_on(new._f != gc._f, \
- trans, _errtype, \
- "bucket %llu:%llu gen %u data type %s has wrong " #_f \
- ": got %llu, should be %llu", \
- iter->pos.inode, iter->pos.offset, \
- gc.gen, \
- bch2_data_type_str(gc.data_type), \
- (u64) new._f, (u64) gc._f)) \
- new._f = gc._f; \
-
- copy_bucket_field(alloc_key_gen_wrong, gen);
- copy_bucket_field(alloc_key_dirty_sectors_wrong, dirty_sectors);
- copy_bucket_field(alloc_key_stripe_sectors_wrong, stripe_sectors);
- copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
- copy_bucket_field(alloc_key_stripe_wrong, stripe);
- copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
-#undef copy_bucket_field
-
- if (!bch2_alloc_v4_cmp(*old, new))
- return 0;
-
- a = bch2_alloc_to_v4_mut(trans, k);
- ret = PTR_ERR_OR_ZERO(a);
- if (ret)
- return ret;
-
- a->v = new;
-
- /*
- * The trigger normally makes sure these are set, but we're not running
- * triggers:
- */
- if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
- a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
-
- ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
-fsck_err:
- return ret;
-}
-
-static int bch2_gc_alloc_done(struct bch_fs *c)
-{
- int ret = 0;
-
- for_each_member_device(c, ca) {
- ret = bch2_trans_run(c,
- for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
- POS(ca->dev_idx, ca->mi.first_bucket),
- POS(ca->dev_idx, ca->mi.nbuckets - 1),
- BTREE_ITER_slots|BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
- bch2_alloc_write_key(trans, &iter, ca, k)));
- if (ret) {
- bch2_dev_put(ca);
- break;
- }
- }
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int bch2_gc_alloc_start(struct bch_fs *c)
-{
- int ret = 0;
-
- for_each_member_device(c, ca) {
- ret = genradix_prealloc(&ca->buckets_gc, ca->mi.nbuckets, GFP_KERNEL);
- if (ret) {
- bch2_dev_put(ca);
- ret = -BCH_ERR_ENOMEM_gc_alloc_start;
- break;
- }
- }
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int bch2_gc_write_reflink_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- size_t *idx)
-{
- struct bch_fs *c = trans->c;
- const __le64 *refcount = bkey_refcount_c(k);
- struct printbuf buf = PRINTBUF;
- struct reflink_gc *r;
- int ret = 0;
-
- if (!refcount)
- return 0;
-
- while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
- r->offset < k.k->p.offset)
- ++*idx;
-
- if (!r ||
- r->offset != k.k->p.offset ||
- r->size != k.k->size) {
- bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
- return -EINVAL;
- }
-
- if (fsck_err_on(r->refcount != le64_to_cpu(*refcount),
- trans, reflink_v_refcount_wrong,
- "reflink key has wrong refcount:\n"
- " %s\n"
- " should be %u",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
- r->refcount)) {
- struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
- ret = PTR_ERR_OR_ZERO(new);
- if (ret)
- goto out;
-
- if (!r->refcount)
- new->k.type = KEY_TYPE_deleted;
- else
- *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
- ret = bch2_trans_update(trans, iter, new, 0);
- }
-out:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static int bch2_gc_reflink_done(struct bch_fs *c)
-{
- size_t idx = 0;
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
- c->reflink_gc_nr = 0;
- return ret;
-}
-
-static int bch2_gc_reflink_start(struct bch_fs *c)
-{
- c->reflink_gc_nr = 0;
-
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_prefetch, k, ({
- const __le64 *refcount = bkey_refcount_c(k);
-
- if (!refcount)
- continue;
-
- struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
- c->reflink_gc_nr++, GFP_KERNEL);
- if (!r) {
- ret = -BCH_ERR_ENOMEM_gc_reflink_start;
- break;
- }
-
- r->offset = k.k->p.offset;
- r->size = k.k->size;
- r->refcount = 0;
- 0;
- })));
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int bch2_gc_write_stripes_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- const struct bch_stripe *s;
- struct gc_stripe *m;
- bool bad = false;
- unsigned i;
- int ret = 0;
-
- if (k.k->type != KEY_TYPE_stripe)
- return 0;
-
- s = bkey_s_c_to_stripe(k).v;
- m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
-
- for (i = 0; i < s->nr_blocks; i++) {
- u32 old = stripe_blockcount_get(s, i);
- u32 new = (m ? m->block_sectors[i] : 0);
-
- if (old != new) {
- prt_printf(&buf, "stripe block %u has wrong sector count: got %u, should be %u\n",
- i, old, new);
- bad = true;
- }
- }
-
- if (bad)
- bch2_bkey_val_to_text(&buf, c, k);
-
- if (fsck_err_on(bad,
- trans, stripe_sector_count_wrong,
- "%s", buf.buf)) {
- struct bkey_i_stripe *new;
-
- new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
- ret = PTR_ERR_OR_ZERO(new);
- if (ret)
- return ret;
-
- bkey_reassemble(&new->k_i, k);
-
- for (i = 0; i < new->v.nr_blocks; i++)
- stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
-
- ret = bch2_trans_update(trans, iter, &new->k_i, 0);
- }
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static int bch2_gc_stripes_done(struct bch_fs *c)
-{
- return bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_gc_write_stripes_key(trans, &iter, k)));
-}
-
-/**
- * bch2_check_allocations - walk all references to buckets, and recompute them:
- *
- * @c: filesystem object
- *
- * Returns: 0 on success, or standard errcode on failure
- *
- * Order matters here:
- * - Concurrent GC relies on the fact that we have a total ordering for
- * everything that GC walks - see gc_will_visit_node(),
- * gc_will_visit_root()
- *
- * - also, references move around in the course of index updates and
- * various other crap: everything needs to agree on the ordering
- * references are allowed to move around in - e.g., we're allowed to
- * start with a reference owned by an open_bucket (the allocator) and
- * move it to the btree, but not the reverse.
- *
- * This is necessary to ensure that gc doesn't miss references that
- * move around - if references move backwards in the ordering GC
- * uses, GC could skip past them
- */
-int bch2_check_allocations(struct bch_fs *c)
-{
- int ret;
-
- lockdep_assert_held(&c->state_lock);
-
- down_write(&c->gc_lock);
-
- bch2_btree_interior_updates_flush(c);
-
- ret = bch2_gc_accounting_start(c) ?:
- bch2_gc_start(c) ?:
- bch2_gc_alloc_start(c) ?:
- bch2_gc_reflink_start(c);
- if (ret)
- goto out;
-
- gc_pos_set(c, gc_phase(GC_PHASE_start));
-
- ret = bch2_mark_superblocks(c);
- bch_err_msg(c, ret, "marking superblocks");
- if (ret)
- goto out;
-
- ret = bch2_gc_btrees(c);
- if (ret)
- goto out;
-
- c->gc_count++;
-
- ret = bch2_gc_alloc_done(c) ?:
- bch2_gc_accounting_done(c) ?:
- bch2_gc_stripes_done(c) ?:
- bch2_gc_reflink_done(c);
-out:
- percpu_down_write(&c->mark_lock);
- /* Indicates that gc is no longer in progress: */
- __gc_pos_set(c, gc_phase(GC_PHASE_not_running));
-
- bch2_gc_free(c);
- percpu_up_write(&c->mark_lock);
-
- up_write(&c->gc_lock);
-
- /*
- * At startup, allocations can happen directly instead of via the
- * allocator thread - issue wakeup in case they blocked on gc_lock:
- */
- closure_wake_up(&c->freelist_wait);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int gc_btree_gens_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- struct bkey_i *u;
- int ret;
-
- if (unlikely(test_bit(BCH_FS_going_ro, &c->flags)))
- return -EROFS;
-
- percpu_down_read(&c->mark_lock);
- rcu_read_lock();
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
- if (!ca)
- continue;
-
- if (dev_ptr_stale(ca, ptr) > 16) {
- rcu_read_unlock();
- percpu_up_read(&c->mark_lock);
- goto update;
- }
- }
-
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
- if (!ca)
- continue;
-
- u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
- if (gen_after(*gen, ptr->gen))
- *gen = ptr->gen;
- }
- rcu_read_unlock();
- percpu_up_read(&c->mark_lock);
- return 0;
-update:
- u = bch2_bkey_make_mut(trans, iter, &k, 0);
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- return ret;
-
- bch2_extent_normalize(c, bkey_i_to_s(u));
- return 0;
-}
-
-static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct bch_dev *ca,
- struct btree_iter *iter, struct bkey_s_c k)
-{
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
- struct bkey_i_alloc_v4 *a_mut;
- int ret;
-
- if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
- return 0;
-
- a_mut = bch2_alloc_to_v4_mut(trans, k);
- ret = PTR_ERR_OR_ZERO(a_mut);
- if (ret)
- return ret;
-
- a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
- alloc_data_type_set(&a_mut->v, a_mut->v.data_type);
-
- return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
-}
-
-int bch2_gc_gens(struct bch_fs *c)
-{
- u64 b, start_time = local_clock();
- int ret;
-
- if (!mutex_trylock(&c->gc_gens_lock))
- return 0;
-
- trace_and_count(c, gc_gens_start, c);
-
- /*
- * We have to use trylock here. Otherwise, we would
- * introduce a deadlock in the RO path - we take the
- * state lock at the start of going RO.
- */
- if (!down_read_trylock(&c->state_lock)) {
- mutex_unlock(&c->gc_gens_lock);
- return 0;
- }
-
- for_each_member_device(c, ca) {
- struct bucket_gens *gens = bucket_gens(ca);
-
- BUG_ON(ca->oldest_gen);
-
- ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
- if (!ca->oldest_gen) {
- bch2_dev_put(ca);
- ret = -BCH_ERR_ENOMEM_gc_gens;
- goto err;
- }
-
- for (b = gens->first_bucket;
- b < gens->nbuckets; b++)
- ca->oldest_gen[b] = gens->b[b];
- }
-
- for (unsigned i = 0; i < BTREE_ID_NR; i++)
- if (btree_type_has_ptrs(i)) {
- c->gc_gens_btree = i;
- c->gc_gens_pos = POS_MIN;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, i,
- POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
- k,
- NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
- gc_btree_gens_key(trans, &iter, k)));
- if (ret)
- goto err;
- }
-
- struct bch_dev *ca = NULL;
- ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
- POS_MIN,
- BTREE_ITER_prefetch,
- k,
- NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc, ({
- ca = bch2_dev_iterate(c, ca, k.k->p.inode);
- if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
- continue;
- }
- bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
- })));
- bch2_dev_put(ca);
-
- if (ret)
- goto err;
-
- c->gc_gens_btree = 0;
- c->gc_gens_pos = POS_MIN;
-
- c->gc_count++;
-
- bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
- trace_and_count(c, gc_gens_end, c);
-err:
- for_each_member_device(c, ca) {
- kvfree(ca->oldest_gen);
- ca->oldest_gen = NULL;
- }
-
- up_read(&c->state_lock);
- mutex_unlock(&c->gc_gens_lock);
- if (!bch2_err_matches(ret, EROFS))
- bch_err_fn(c, ret);
- return ret;
-}
-
-static void bch2_gc_gens_work(struct work_struct *work)
-{
- struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work);
- bch2_gc_gens(c);
- bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
-}
-
-void bch2_gc_gens_async(struct bch_fs *c)
-{
- if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) &&
- !queue_work(c->write_ref_wq, &c->gc_gens_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
-}
-
-void bch2_fs_gc_init(struct bch_fs *c)
-{
- seqcount_init(&c->gc_pos_lock);
-
- INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work);
-}
diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
deleted file mode 100644
index 8a47e8bd0791..000000000000
--- a/fs/bcachefs/btree_gc.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_GC_H
-#define _BCACHEFS_BTREE_GC_H
-
-#include "bkey.h"
-#include "btree_gc_types.h"
-#include "btree_types.h"
-
-int bch2_check_topology(struct bch_fs *);
-int bch2_check_allocations(struct bch_fs *);
-
-/*
- * For concurrent mark and sweep (with other index updates), we define a total
- * ordering of _all_ references GC walks:
- *
- * Note that some references will have the same GC position as others - e.g.
- * everything within the same btree node; in those cases we're relying on
- * whatever locking exists for where those references live, i.e. the write lock
- * on a btree node.
- *
- * That locking is also required to ensure GC doesn't pass the updater in
- * between the updater adding/removing the reference and updating the GC marks;
- * without that, we would at best double count sometimes.
- *
- * That part is important - whenever calling bch2_mark_pointers(), a lock _must_
- * be held that prevents GC from passing the position the updater is at.
- *
- * (What about the start of gc, when we're clearing all the marks? GC clears the
- * mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc
- * position inside its cmpxchg loop, so crap magically works).
- */
-
-/* Position of (the start of) a gc phase: */
-static inline struct gc_pos gc_phase(enum gc_phase phase)
-{
- return (struct gc_pos) { .phase = phase, };
-}
-
-static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level,
- struct bpos pos)
-{
- return (struct gc_pos) {
- .phase = GC_PHASE_btree,
- .btree = btree,
- .level = level,
- .pos = pos,
- };
-}
-
-static inline int gc_btree_order(enum btree_id btree)
-{
- if (btree == BTREE_ID_alloc)
- return -2;
- if (btree == BTREE_ID_stripes)
- return -1;
- return btree;
-}
-
-static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
-{
- return cmp_int(l.phase, r.phase) ?:
- cmp_int(gc_btree_order(l.btree),
- gc_btree_order(r.btree)) ?:
- cmp_int(l.level, r.level) ?:
- bpos_cmp(l.pos, r.pos);
-}
-
-static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
-{
- unsigned seq;
- bool ret;
-
- do {
- seq = read_seqcount_begin(&c->gc_pos_lock);
- ret = gc_pos_cmp(pos, c->gc_pos) <= 0;
- } while (read_seqcount_retry(&c->gc_pos_lock, seq));
-
- return ret;
-}
-
-void bch2_gc_pos_to_text(struct printbuf *, struct gc_pos *);
-
-int bch2_gc_gens(struct bch_fs *);
-void bch2_gc_gens_async(struct bch_fs *);
-void bch2_fs_gc_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_BTREE_GC_H */
diff --git a/fs/bcachefs/btree_gc_types.h b/fs/bcachefs/btree_gc_types.h
deleted file mode 100644
index c24dd6edf377..000000000000
--- a/fs/bcachefs/btree_gc_types.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_GC_TYPES_H
-#define _BCACHEFS_BTREE_GC_TYPES_H
-
-#include <linux/generic-radix-tree.h>
-
-#define GC_PHASES() \
- x(not_running) \
- x(start) \
- x(sb) \
- x(btree)
-
-enum gc_phase {
-#define x(n) GC_PHASE_##n,
- GC_PHASES()
-#undef x
-};
-
-struct gc_pos {
- enum gc_phase phase:8;
- enum btree_id btree:8;
- u16 level;
- struct bpos pos;
-};
-
-struct reflink_gc {
- u64 offset;
- u32 size;
- u32 refcount;
-};
-
-typedef GENRADIX(struct reflink_gc) reflink_gc_table;
-
-#endif /* _BCACHEFS_BTREE_GC_TYPES_H */
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
deleted file mode 100644
index 839d68802e42..000000000000
--- a/fs/bcachefs/btree_io.c
+++ /dev/null
@@ -1,2355 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_methods.h"
-#include "bkey_sort.h"
-#include "btree_cache.h"
-#include "btree_io.h"
-#include "btree_iter.h"
-#include "btree_locking.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "buckets.h"
-#include "checksum.h"
-#include "debug.h"
-#include "error.h"
-#include "extents.h"
-#include "io_write.h"
-#include "journal_reclaim.h"
-#include "journal_seq_blacklist.h"
-#include "recovery.h"
-#include "super-io.h"
-#include "trace.h"
-
-#include <linux/sched/mm.h>
-
-static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
-{
- prt_printf(out, "btree=%s l=%u seq %llux\n",
- bch2_btree_id_str(BTREE_NODE_ID(bn)),
- (unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
- prt_str(out, "min: ");
- bch2_bpos_to_text(out, bn->min_key);
- prt_newline(out);
- prt_str(out, "max: ");
- bch2_bpos_to_text(out, bn->max_key);
-}
-
-void bch2_btree_node_io_unlock(struct btree *b)
-{
- EBUG_ON(!btree_node_write_in_flight(b));
-
- clear_btree_node_write_in_flight_inner(b);
- clear_btree_node_write_in_flight(b);
- wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
-}
-
-void bch2_btree_node_io_lock(struct btree *b)
-{
- wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
- TASK_UNINTERRUPTIBLE);
-}
-
-void __bch2_btree_node_wait_on_read(struct btree *b)
-{
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
-}
-
-void __bch2_btree_node_wait_on_write(struct btree *b)
-{
- wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
- TASK_UNINTERRUPTIBLE);
-}
-
-void bch2_btree_node_wait_on_read(struct btree *b)
-{
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
-}
-
-void bch2_btree_node_wait_on_write(struct btree *b)
-{
- wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
- TASK_UNINTERRUPTIBLE);
-}
-
-static void verify_no_dups(struct btree *b,
- struct bkey_packed *start,
- struct bkey_packed *end)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- struct bkey_packed *k, *p;
-
- if (start == end)
- return;
-
- for (p = start, k = bkey_p_next(start);
- k != end;
- p = k, k = bkey_p_next(k)) {
- struct bkey l = bkey_unpack_key(b, p);
- struct bkey r = bkey_unpack_key(b, k);
-
- BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
- }
-#endif
-}
-
-static void set_needs_whiteout(struct bset *i, int v)
-{
- struct bkey_packed *k;
-
- for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
- k->needs_whiteout = v;
-}
-
-static void btree_bounce_free(struct bch_fs *c, size_t size,
- bool used_mempool, void *p)
-{
- if (used_mempool)
- mempool_free(p, &c->btree_bounce_pool);
- else
- kvfree(p);
-}
-
-static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
- bool *used_mempool)
-{
- unsigned flags = memalloc_nofs_save();
- void *p;
-
- BUG_ON(size > c->opts.btree_node_size);
-
- *used_mempool = false;
- p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
- if (!p) {
- *used_mempool = true;
- p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
- }
- memalloc_nofs_restore(flags);
- return p;
-}
-
-static void sort_bkey_ptrs(const struct btree *bt,
- struct bkey_packed **ptrs, unsigned nr)
-{
- unsigned n = nr, a = nr / 2, b, c, d;
-
- if (!a)
- return;
-
- /* Heap sort: see lib/sort.c: */
- while (1) {
- if (a)
- a--;
- else if (--n)
- swap(ptrs[0], ptrs[n]);
- else
- break;
-
- for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
- b = bch2_bkey_cmp_packed(bt,
- ptrs[c],
- ptrs[d]) >= 0 ? c : d;
- if (d == n)
- b = c;
-
- while (b != a &&
- bch2_bkey_cmp_packed(bt,
- ptrs[a],
- ptrs[b]) >= 0)
- b = (b - 1) / 2;
- c = b;
- while (b != a) {
- b = (b - 1) / 2;
- swap(ptrs[b], ptrs[c]);
- }
- }
-}
-
-static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
-{
- struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
- bool used_mempool = false;
- size_t bytes = b->whiteout_u64s * sizeof(u64);
-
- if (!b->whiteout_u64s)
- return;
-
- new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
-
- ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
-
- for (k = unwritten_whiteouts_start(b);
- k != unwritten_whiteouts_end(b);
- k = bkey_p_next(k))
- *--ptrs = k;
-
- sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
-
- k = new_whiteouts;
-
- while (ptrs != ptrs_end) {
- bkey_p_copy(k, *ptrs);
- k = bkey_p_next(k);
- ptrs++;
- }
-
- verify_no_dups(b, new_whiteouts,
- (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
-
- memcpy_u64s(unwritten_whiteouts_start(b),
- new_whiteouts, b->whiteout_u64s);
-
- btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
-}
-
-static bool should_compact_bset(struct btree *b, struct bset_tree *t,
- bool compacting, enum compact_mode mode)
-{
- if (!bset_dead_u64s(b, t))
- return false;
-
- switch (mode) {
- case COMPACT_LAZY:
- return should_compact_bset_lazy(b, t) ||
- (compacting && !bset_written(b, bset(b, t)));
- case COMPACT_ALL:
- return true;
- default:
- BUG();
- }
-}
-
-static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
-{
- bool ret = false;
-
- for_each_bset(b, t) {
- struct bset *i = bset(b, t);
- struct bkey_packed *k, *n, *out, *start, *end;
- struct btree_node_entry *src = NULL, *dst = NULL;
-
- if (t != b->set && !bset_written(b, i)) {
- src = container_of(i, struct btree_node_entry, keys);
- dst = max(write_block(b),
- (void *) btree_bkey_last(b, t - 1));
- }
-
- if (src != dst)
- ret = true;
-
- if (!should_compact_bset(b, t, ret, mode)) {
- if (src != dst) {
- memmove(dst, src, sizeof(*src) +
- le16_to_cpu(src->keys.u64s) *
- sizeof(u64));
- i = &dst->keys;
- set_btree_bset(b, t, i);
- }
- continue;
- }
-
- start = btree_bkey_first(b, t);
- end = btree_bkey_last(b, t);
-
- if (src != dst) {
- memmove(dst, src, sizeof(*src));
- i = &dst->keys;
- set_btree_bset(b, t, i);
- }
-
- out = i->start;
-
- for (k = start; k != end; k = n) {
- n = bkey_p_next(k);
-
- if (!bkey_deleted(k)) {
- bkey_p_copy(out, k);
- out = bkey_p_next(out);
- } else {
- BUG_ON(k->needs_whiteout);
- }
- }
-
- i->u64s = cpu_to_le16((u64 *) out - i->_data);
- set_btree_bset_end(b, t);
- bch2_bset_set_no_aux_tree(b, t);
- ret = true;
- }
-
- bch2_verify_btree_nr_keys(b);
-
- bch2_btree_build_aux_trees(b);
-
- return ret;
-}
-
-bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
- enum compact_mode mode)
-{
- return bch2_drop_whiteouts(b, mode);
-}
-
-static void btree_node_sort(struct bch_fs *c, struct btree *b,
- unsigned start_idx,
- unsigned end_idx)
-{
- struct btree_node *out;
- struct sort_iter_stack sort_iter;
- struct bset_tree *t;
- struct bset *start_bset = bset(b, &b->set[start_idx]);
- bool used_mempool = false;
- u64 start_time, seq = 0;
- unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
- bool sorting_entire_node = start_idx == 0 &&
- end_idx == b->nsets;
-
- sort_iter_stack_init(&sort_iter, b);
-
- for (t = b->set + start_idx;
- t < b->set + end_idx;
- t++) {
- u64s += le16_to_cpu(bset(b, t)->u64s);
- sort_iter_add(&sort_iter.iter,
- btree_bkey_first(b, t),
- btree_bkey_last(b, t));
- }
-
- bytes = sorting_entire_node
- ? btree_buf_bytes(b)
- : __vstruct_bytes(struct btree_node, u64s);
-
- out = btree_bounce_alloc(c, bytes, &used_mempool);
-
- start_time = local_clock();
-
- u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
-
- out->keys.u64s = cpu_to_le16(u64s);
-
- BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
-
- if (sorting_entire_node)
- bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
- start_time);
-
- /* Make sure we preserve bset journal_seq: */
- for (t = b->set + start_idx; t < b->set + end_idx; t++)
- seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
- start_bset->journal_seq = cpu_to_le64(seq);
-
- if (sorting_entire_node) {
- u64s = le16_to_cpu(out->keys.u64s);
-
- BUG_ON(bytes != btree_buf_bytes(b));
-
- /*
- * Our temporary buffer is the same size as the btree node's
- * buffer, we can just swap buffers instead of doing a big
- * memcpy()
- */
- *out = *b->data;
- out->keys.u64s = cpu_to_le16(u64s);
- swap(out, b->data);
- set_btree_bset(b, b->set, &b->data->keys);
- } else {
- start_bset->u64s = out->keys.u64s;
- memcpy_u64s(start_bset->start,
- out->keys.start,
- le16_to_cpu(out->keys.u64s));
- }
-
- for (i = start_idx + 1; i < end_idx; i++)
- b->nr.bset_u64s[start_idx] +=
- b->nr.bset_u64s[i];
-
- b->nsets -= shift;
-
- for (i = start_idx + 1; i < b->nsets; i++) {
- b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
- b->set[i] = b->set[i + shift];
- }
-
- for (i = b->nsets; i < MAX_BSETS; i++)
- b->nr.bset_u64s[i] = 0;
-
- set_btree_bset_end(b, &b->set[start_idx]);
- bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
-
- btree_bounce_free(c, bytes, used_mempool, out);
-
- bch2_verify_btree_nr_keys(b);
-}
-
-void bch2_btree_sort_into(struct bch_fs *c,
- struct btree *dst,
- struct btree *src)
-{
- struct btree_nr_keys nr;
- struct btree_node_iter src_iter;
- u64 start_time = local_clock();
-
- BUG_ON(dst->nsets != 1);
-
- bch2_bset_set_no_aux_tree(dst, dst->set);
-
- bch2_btree_node_iter_init_from_start(&src_iter, src);
-
- nr = bch2_sort_repack(btree_bset_first(dst),
- src, &src_iter,
- &dst->format,
- true);
-
- bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
- start_time);
-
- set_btree_bset_end(dst, dst->set);
-
- dst->nr.live_u64s += nr.live_u64s;
- dst->nr.bset_u64s[0] += nr.bset_u64s[0];
- dst->nr.packed_keys += nr.packed_keys;
- dst->nr.unpacked_keys += nr.unpacked_keys;
-
- bch2_verify_btree_nr_keys(dst);
-}
-
-/*
- * We're about to add another bset to the btree node, so if there's currently
- * too many bsets - sort some of them together:
- */
-static bool btree_node_compact(struct bch_fs *c, struct btree *b)
-{
- unsigned unwritten_idx;
- bool ret = false;
-
- for (unwritten_idx = 0;
- unwritten_idx < b->nsets;
- unwritten_idx++)
- if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
- break;
-
- if (b->nsets - unwritten_idx > 1) {
- btree_node_sort(c, b, unwritten_idx, b->nsets);
- ret = true;
- }
-
- if (unwritten_idx > 1) {
- btree_node_sort(c, b, 0, unwritten_idx);
- ret = true;
- }
-
- return ret;
-}
-
-void bch2_btree_build_aux_trees(struct btree *b)
-{
- for_each_bset(b, t)
- bch2_bset_build_aux_tree(b, t,
- !bset_written(b, bset(b, t)) &&
- t == bset_tree_last(b));
-}
-
-/*
- * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
- *
- * The first bset is going to be of similar order to the size of the node, the
- * last bset is bounded by btree_write_set_buffer(), which is set to keep the
- * memmove on insert from being too expensive: the middle bset should, ideally,
- * be the geometric mean of the first and the last.
- *
- * Returns true if the middle bset is greater than that geometric mean:
- */
-static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
-{
- unsigned mid_u64s_bits =
- (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
-
- return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
-}
-
-/*
- * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
- * inserted into
- *
- * Safe to call if there already is an unwritten bset - will only add a new bset
- * if @b doesn't already have one.
- *
- * Returns true if we sorted (i.e. invalidated iterators
- */
-void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
-{
- struct bch_fs *c = trans->c;
- struct btree_node_entry *bne;
- bool reinit_iter = false;
-
- EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
- BUG_ON(bset_written(b, bset(b, &b->set[1])));
- BUG_ON(btree_node_just_written(b));
-
- if (b->nsets == MAX_BSETS &&
- !btree_node_write_in_flight(b) &&
- should_compact_all(c, b)) {
- bch2_btree_node_write(c, b, SIX_LOCK_write,
- BTREE_WRITE_init_next_bset);
- reinit_iter = true;
- }
-
- if (b->nsets == MAX_BSETS &&
- btree_node_compact(c, b))
- reinit_iter = true;
-
- BUG_ON(b->nsets >= MAX_BSETS);
-
- bne = want_new_bset(c, b);
- if (bne)
- bch2_bset_init_next(b, bne);
-
- bch2_btree_build_aux_trees(b);
-
- if (reinit_iter)
- bch2_trans_node_reinit_iter(trans, b);
-}
-
-static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
- struct bch_dev *ca,
- struct btree *b, struct bset *i, struct bkey_packed *k,
- unsigned offset, int write)
-{
- prt_printf(out, bch2_log_msg(c, "%s"),
- write == READ
- ? "error validating btree node "
- : "corrupt btree node before write ");
- if (ca)
- prt_printf(out, "on %s ", ca->name);
- prt_printf(out, "at btree ");
- bch2_btree_pos_to_text(out, c, b);
-
- printbuf_indent_add(out, 2);
-
- prt_printf(out, "\nnode offset %u/%u",
- b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
- if (i)
- prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
- if (k)
- prt_printf(out, " bset byte offset %lu",
- (unsigned long)(void *)k -
- ((unsigned long)(void *)i & ~511UL));
- prt_str(out, ": ");
-}
-
-__printf(10, 11)
-static int __btree_err(int ret,
- struct bch_fs *c,
- struct bch_dev *ca,
- struct btree *b,
- struct bset *i,
- struct bkey_packed *k,
- int write,
- bool have_retry,
- enum bch_sb_error_id err_type,
- const char *fmt, ...)
-{
- struct printbuf out = PRINTBUF;
- bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
- va_list args;
-
- btree_err_msg(&out, c, ca, b, i, k, b->written, write);
-
- va_start(args, fmt);
- prt_vprintf(&out, fmt, args);
- va_end(args);
-
- if (write == WRITE) {
- bch2_print_string_as_lines(KERN_ERR, out.buf);
- ret = c->opts.errors == BCH_ON_ERROR_continue
- ? 0
- : -BCH_ERR_fsck_errors_not_fixed;
- goto out;
- }
-
- if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
- ret = -BCH_ERR_btree_node_read_err_fixable;
- if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
- ret = -BCH_ERR_btree_node_read_err_bad_node;
-
- if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
- bch2_sb_error_count(c, err_type);
-
- switch (ret) {
- case -BCH_ERR_btree_node_read_err_fixable:
- ret = !silent
- ? __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf)
- : -BCH_ERR_fsck_fix;
- if (ret != -BCH_ERR_fsck_fix &&
- ret != -BCH_ERR_fsck_ignore)
- goto fsck_err;
- ret = -BCH_ERR_fsck_fix;
- break;
- case -BCH_ERR_btree_node_read_err_want_retry:
- case -BCH_ERR_btree_node_read_err_must_retry:
- if (!silent)
- bch2_print_string_as_lines(KERN_ERR, out.buf);
- break;
- case -BCH_ERR_btree_node_read_err_bad_node:
- if (!silent)
- bch2_print_string_as_lines(KERN_ERR, out.buf);
- ret = bch2_topology_error(c);
- break;
- case -BCH_ERR_btree_node_read_err_incompatible:
- if (!silent)
- bch2_print_string_as_lines(KERN_ERR, out.buf);
- ret = -BCH_ERR_fsck_errors_not_fixed;
- break;
- default:
- BUG();
- }
-out:
-fsck_err:
- printbuf_exit(&out);
- return ret;
-}
-
-#define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \
-({ \
- int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
- BCH_FSCK_ERR_##_err_type, \
- msg, ##__VA_ARGS__); \
- \
- if (_ret != -BCH_ERR_fsck_fix) { \
- ret = _ret; \
- goto fsck_err; \
- } \
- \
- *saw_error = true; \
-})
-
-#define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
-
-/*
- * When btree topology repair changes the start or end of a node, that might
- * mean we have to drop keys that are no longer inside the node:
- */
-__cold
-void bch2_btree_node_drop_keys_outside_node(struct btree *b)
-{
- for_each_bset(b, t) {
- struct bset *i = bset(b, t);
- struct bkey_packed *k;
-
- for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
- if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
- break;
-
- if (k != i->start) {
- unsigned shift = (u64 *) k - (u64 *) i->start;
-
- memmove_u64s_down(i->start, k,
- (u64 *) vstruct_end(i) - (u64 *) k);
- i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
- set_btree_bset_end(b, t);
- }
-
- for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
- if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
- break;
-
- if (k != vstruct_last(i)) {
- i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
- set_btree_bset_end(b, t);
- }
- }
-
- /*
- * Always rebuild search trees: eytzinger search tree nodes directly
- * depend on the values of min/max key:
- */
- bch2_bset_set_no_aux_tree(b, b->set);
- bch2_btree_build_aux_trees(b);
- b->nr = bch2_btree_node_count_keys(b);
-
- struct bkey_s_c k;
- struct bkey unpacked;
- struct btree_node_iter iter;
- for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
- BUG_ON(bpos_lt(k.k->p, b->data->min_key));
- BUG_ON(bpos_gt(k.k->p, b->data->max_key));
- }
-}
-
-static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
- struct btree *b, struct bset *i,
- unsigned offset, unsigned sectors,
- int write, bool have_retry, bool *saw_error)
-{
- unsigned version = le16_to_cpu(i->version);
- unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
- struct printbuf buf1 = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
- int ret = 0;
-
- btree_err_on(!bch2_version_compatible(version),
- -BCH_ERR_btree_node_read_err_incompatible,
- c, ca, b, i, NULL,
- btree_node_unsupported_version,
- "unsupported bset version %u.%u",
- BCH_VERSION_MAJOR(version),
- BCH_VERSION_MINOR(version));
-
- if (btree_err_on(version < c->sb.version_min,
- -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i, NULL,
- btree_node_bset_older_than_sb_min,
- "bset version %u older than superblock version_min %u",
- version, c->sb.version_min)) {
- mutex_lock(&c->sb_lock);
- c->disk_sb.sb->version_min = cpu_to_le16(version);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
- }
-
- if (btree_err_on(BCH_VERSION_MAJOR(version) >
- BCH_VERSION_MAJOR(c->sb.version),
- -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i, NULL,
- btree_node_bset_newer_than_sb,
- "bset version %u newer than superblock version %u",
- version, c->sb.version)) {
- mutex_lock(&c->sb_lock);
- c->disk_sb.sb->version = cpu_to_le16(version);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
- }
-
- btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
- -BCH_ERR_btree_node_read_err_incompatible,
- c, ca, b, i, NULL,
- btree_node_unsupported_version,
- "BSET_SEPARATE_WHITEOUTS no longer supported");
-
- if (!write &&
- btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)),
- -BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i, NULL,
- bset_past_end_of_btree_node,
- "bset past end of btree node (offset %u len %u but written %zu)",
- offset, sectors, ptr_written ?: btree_sectors(c)))
- i->u64s = 0;
-
- btree_err_on(offset && !i->u64s,
- -BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i, NULL,
- bset_empty,
- "empty bset");
-
- btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
- -BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i, NULL,
- bset_wrong_sector_offset,
- "bset at wrong sector offset");
-
- if (!offset) {
- struct btree_node *bn =
- container_of(i, struct btree_node, keys);
- /* These indicate that we read the wrong btree node: */
-
- if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
- struct bch_btree_ptr_v2 *bp =
- &bkey_i_to_btree_ptr_v2(&b->key)->v;
-
- /* XXX endianness */
- btree_err_on(bp->seq != bn->keys.seq,
- -BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL, NULL,
- bset_bad_seq,
- "incorrect sequence number (wrong btree node)");
- }
-
- btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
- -BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i, NULL,
- btree_node_bad_btree,
- "incorrect btree id");
-
- btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
- -BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i, NULL,
- btree_node_bad_level,
- "incorrect level");
-
- if (!write)
- compat_btree_node(b->c.level, b->c.btree_id, version,
- BSET_BIG_ENDIAN(i), write, bn);
-
- if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
- struct bch_btree_ptr_v2 *bp =
- &bkey_i_to_btree_ptr_v2(&b->key)->v;
-
- if (BTREE_PTR_RANGE_UPDATED(bp)) {
- b->data->min_key = bp->min_key;
- b->data->max_key = b->key.k.p;
- }
-
- btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
- -BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL, NULL,
- btree_node_bad_min_key,
- "incorrect min_key: got %s should be %s",
- (printbuf_reset(&buf1),
- bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
- (printbuf_reset(&buf2),
- bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
- }
-
- btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
- -BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i, NULL,
- btree_node_bad_max_key,
- "incorrect max key %s",
- (printbuf_reset(&buf1),
- bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
-
- if (write)
- compat_btree_node(b->c.level, b->c.btree_id, version,
- BSET_BIG_ENDIAN(i), write, bn);
-
- btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
- -BCH_ERR_btree_node_read_err_bad_node,
- c, ca, b, i, NULL,
- btree_node_bad_format,
- "invalid bkey format: %s\n %s", buf1.buf,
- (printbuf_reset(&buf2),
- bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
- printbuf_reset(&buf1);
-
- compat_bformat(b->c.level, b->c.btree_id, version,
- BSET_BIG_ENDIAN(i), write,
- &bn->format);
- }
-fsck_err:
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
- return ret;
-}
-
-static int bset_key_validate(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k,
- bool updated_range, int rw)
-{
- return __bch2_bkey_validate(c, k, btree_node_type(b), 0) ?:
- (!updated_range ? bch2_bkey_in_btree_node(c, b, k, 0) : 0) ?:
- (rw == WRITE ? bch2_bkey_val_validate(c, k, 0) : 0);
-}
-
-static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
- struct bset *i, struct bkey_packed *k)
-{
- if (bkey_p_next(k) > vstruct_last(i))
- return false;
-
- if (k->format > KEY_FORMAT_CURRENT)
- return false;
-
- if (!bkeyp_u64s_valid(&b->format, k))
- return false;
-
- struct bkey tmp;
- struct bkey_s u = __bkey_disassemble(b, k, &tmp);
- return !__bch2_bkey_validate(c, u.s_c, btree_node_type(b), BCH_VALIDATE_silent);
-}
-
-static int validate_bset_keys(struct bch_fs *c, struct btree *b,
- struct bset *i, int write,
- bool have_retry, bool *saw_error)
-{
- unsigned version = le16_to_cpu(i->version);
- struct bkey_packed *k, *prev = NULL;
- struct printbuf buf = PRINTBUF;
- bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
- int ret = 0;
-
- for (k = i->start;
- k != vstruct_last(i);) {
- struct bkey_s u;
- struct bkey tmp;
- unsigned next_good_key;
-
- if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
- -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i, k,
- btree_node_bkey_past_bset_end,
- "key extends past end of bset")) {
- i->u64s = cpu_to_le16((u64 *) k - i->_data);
- break;
- }
-
- if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
- -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i, k,
- btree_node_bkey_bad_format,
- "invalid bkey format %u", k->format))
- goto drop_this_key;
-
- if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
- -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i, k,
- btree_node_bkey_bad_u64s,
- "bad k->u64s %u (min %u max %zu)", k->u64s,
- bkeyp_key_u64s(&b->format, k),
- U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
- goto drop_this_key;
-
- if (!write)
- bch2_bkey_compat(b->c.level, b->c.btree_id, version,
- BSET_BIG_ENDIAN(i), write,
- &b->format, k);
-
- u = __bkey_disassemble(b, k, &tmp);
-
- ret = bset_key_validate(c, b, u.s_c, updated_range, write);
- if (ret == -BCH_ERR_fsck_delete_bkey)
- goto drop_this_key;
- if (ret)
- goto fsck_err;
-
- if (write)
- bch2_bkey_compat(b->c.level, b->c.btree_id, version,
- BSET_BIG_ENDIAN(i), write,
- &b->format, k);
-
- if (prev && bkey_iter_cmp(b, prev, k) > 0) {
- struct bkey up = bkey_unpack_key(b, prev);
-
- printbuf_reset(&buf);
- prt_printf(&buf, "keys out of order: ");
- bch2_bkey_to_text(&buf, &up);
- prt_printf(&buf, " > ");
- bch2_bkey_to_text(&buf, u.k);
-
- if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i, k,
- btree_node_bkey_out_of_order,
- "%s", buf.buf))
- goto drop_this_key;
- }
-
- prev = k;
- k = bkey_p_next(k);
- continue;
-drop_this_key:
- next_good_key = k->u64s;
-
- if (!next_good_key ||
- (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
- version >= bcachefs_metadata_version_snapshot)) {
- /*
- * only do scanning if bch2_bkey_compat() has nothing to
- * do
- */
-
- if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
- for (next_good_key = 1;
- next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
- next_good_key++)
- if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
- goto got_good_key;
- }
-
- /*
- * didn't find a good key, have to truncate the rest of
- * the bset
- */
- next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
- }
-got_good_key:
- le16_add_cpu(&i->u64s, -next_good_key);
- memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
- }
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
- struct btree *b, bool have_retry, bool *saw_error)
-{
- struct btree_node_entry *bne;
- struct sort_iter *iter;
- struct btree_node *sorted;
- struct bkey_packed *k;
- struct bset *i;
- bool used_mempool, blacklisted;
- bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
- unsigned u64s;
- unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
- u64 max_journal_seq = 0;
- struct printbuf buf = PRINTBUF;
- int ret = 0, retry_read = 0, write = READ;
- u64 start_time = local_clock();
-
- b->version_ondisk = U16_MAX;
- /* We might get called multiple times on read retry: */
- b->written = 0;
-
- iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
- sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
-
- if (bch2_meta_read_fault("btree"))
- btree_err(-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL, NULL,
- btree_node_fault_injected,
- "dynamic fault");
-
- btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
- -BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL, NULL,
- btree_node_bad_magic,
- "bad magic: want %llx, got %llx",
- bset_magic(c), le64_to_cpu(b->data->magic));
-
- if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
- struct bch_btree_ptr_v2 *bp =
- &bkey_i_to_btree_ptr_v2(&b->key)->v;
-
- bch2_bpos_to_text(&buf, b->data->min_key);
- prt_str(&buf, "-");
- bch2_bpos_to_text(&buf, b->data->max_key);
-
- btree_err_on(b->data->keys.seq != bp->seq,
- -BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL, NULL,
- btree_node_bad_seq,
- "got wrong btree node: got\n%s",
- (printbuf_reset(&buf),
- bch2_btree_node_header_to_text(&buf, b->data),
- buf.buf));
- } else {
- btree_err_on(!b->data->keys.seq,
- -BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL, NULL,
- btree_node_bad_seq,
- "bad btree header: seq 0\n%s",
- (printbuf_reset(&buf),
- bch2_btree_node_header_to_text(&buf, b->data),
- buf.buf));
- }
-
- while (b->written < (ptr_written ?: btree_sectors(c))) {
- unsigned sectors;
- struct nonce nonce;
- bool first = !b->written;
- bool csum_bad;
-
- if (!b->written) {
- i = &b->data->keys;
-
- btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
- -BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i, NULL,
- bset_unknown_csum,
- "unknown checksum type %llu", BSET_CSUM_TYPE(i));
-
- nonce = btree_nonce(i, b->written << 9);
-
- struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
- csum_bad = bch2_crc_cmp(b->data->csum, csum);
- if (csum_bad)
- bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
-
- btree_err_on(csum_bad,
- -BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i, NULL,
- bset_bad_csum,
- "%s",
- (printbuf_reset(&buf),
- bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
- buf.buf));
-
- ret = bset_encrypt(c, i, b->written << 9);
- if (bch2_fs_fatal_err_on(ret, c,
- "decrypting btree node: %s", bch2_err_str(ret)))
- goto fsck_err;
-
- btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
- !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
- -BCH_ERR_btree_node_read_err_incompatible,
- c, NULL, b, NULL, NULL,
- btree_node_unsupported_version,
- "btree node does not have NEW_EXTENT_OVERWRITE set");
-
- sectors = vstruct_sectors(b->data, c->block_bits);
- } else {
- bne = write_block(b);
- i = &bne->keys;
-
- if (i->seq != b->data->keys.seq)
- break;
-
- btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
- -BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i, NULL,
- bset_unknown_csum,
- "unknown checksum type %llu", BSET_CSUM_TYPE(i));
-
- nonce = btree_nonce(i, b->written << 9);
- struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
- csum_bad = bch2_crc_cmp(bne->csum, csum);
- if (ca && csum_bad)
- bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
-
- btree_err_on(csum_bad,
- -BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i, NULL,
- bset_bad_csum,
- "%s",
- (printbuf_reset(&buf),
- bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
- buf.buf));
-
- ret = bset_encrypt(c, i, b->written << 9);
- if (bch2_fs_fatal_err_on(ret, c,
- "decrypting btree node: %s", bch2_err_str(ret)))
- goto fsck_err;
-
- sectors = vstruct_sectors(bne, c->block_bits);
- }
-
- b->version_ondisk = min(b->version_ondisk,
- le16_to_cpu(i->version));
-
- ret = validate_bset(c, ca, b, i, b->written, sectors,
- READ, have_retry, saw_error);
- if (ret)
- goto fsck_err;
-
- if (!b->written)
- btree_node_set_format(b, b->data->format);
-
- ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
- if (ret)
- goto fsck_err;
-
- SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
-
- blacklisted = bch2_journal_seq_is_blacklisted(c,
- le64_to_cpu(i->journal_seq),
- true);
-
- btree_err_on(blacklisted && first,
- -BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i, NULL,
- bset_blacklisted_journal_seq,
- "first btree node bset has blacklisted journal seq (%llu)",
- le64_to_cpu(i->journal_seq));
-
- btree_err_on(blacklisted && ptr_written,
- -BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i, NULL,
- first_bset_blacklisted_journal_seq,
- "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
- le64_to_cpu(i->journal_seq),
- b->written, b->written + sectors, ptr_written);
-
- b->written += sectors;
-
- if (blacklisted && !first)
- continue;
-
- sort_iter_add(iter,
- vstruct_idx(i, 0),
- vstruct_last(i));
-
- max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq));
- }
-
- if (ptr_written) {
- btree_err_on(b->written < ptr_written,
- -BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, NULL, NULL,
- btree_node_data_missing,
- "btree node data missing: expected %u sectors, found %u",
- ptr_written, b->written);
- } else {
- for (bne = write_block(b);
- bset_byte_offset(b, bne) < btree_buf_bytes(b);
- bne = (void *) bne + block_bytes(c))
- btree_err_on(bne->keys.seq == b->data->keys.seq &&
- !bch2_journal_seq_is_blacklisted(c,
- le64_to_cpu(bne->keys.journal_seq),
- true),
- -BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, NULL, NULL,
- btree_node_bset_after_end,
- "found bset signature after last bset");
- }
-
- sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
- sorted->keys.u64s = 0;
-
- set_btree_bset(b, b->set, &b->data->keys);
-
- b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
- memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
- btree_buf_bytes(b) -
- sizeof(struct btree_node) -
- b->nr.live_u64s * sizeof(u64));
-
- u64s = le16_to_cpu(sorted->keys.u64s);
- *sorted = *b->data;
- sorted->keys.u64s = cpu_to_le16(u64s);
- swap(sorted, b->data);
- set_btree_bset(b, b->set, &b->data->keys);
- b->nsets = 1;
- b->data->keys.journal_seq = cpu_to_le64(max_journal_seq);
-
- BUG_ON(b->nr.live_u64s != u64s);
-
- btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
-
- if (updated_range)
- bch2_btree_node_drop_keys_outside_node(b);
-
- i = &b->data->keys;
- for (k = i->start; k != vstruct_last(i);) {
- struct bkey tmp;
- struct bkey_s u = __bkey_disassemble(b, k, &tmp);
-
- ret = bch2_bkey_val_validate(c, u.s_c, READ);
- if (ret == -BCH_ERR_fsck_delete_bkey ||
- (bch2_inject_invalid_keys &&
- !bversion_cmp(u.k->bversion, MAX_VERSION))) {
- btree_keys_account_key_drop(&b->nr, 0, k);
-
- i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
- memmove_u64s_down(k, bkey_p_next(k),
- (u64 *) vstruct_end(i) - (u64 *) k);
- set_btree_bset_end(b, b->set);
- continue;
- }
- if (ret)
- goto fsck_err;
-
- if (u.k->type == KEY_TYPE_btree_ptr_v2) {
- struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
-
- bp.v->mem_ptr = 0;
- }
-
- k = bkey_p_next(k);
- }
-
- bch2_bset_build_aux_tree(b, b->set, false);
-
- set_needs_whiteout(btree_bset_first(b), true);
-
- btree_node_reset_sib_u64s(b);
-
- rcu_read_lock();
- bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
- struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
-
- if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
- set_btree_node_need_rewrite(b);
- }
- rcu_read_unlock();
-
- if (!ptr_written)
- set_btree_node_need_rewrite(b);
-out:
- mempool_free(iter, &c->fill_iter);
- printbuf_exit(&buf);
- bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
- return retry_read;
-fsck_err:
- if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
- ret == -BCH_ERR_btree_node_read_err_must_retry) {
- retry_read = 1;
- } else {
- set_btree_node_read_error(b);
- bch2_btree_lost_data(c, b->c.btree_id);
- }
- goto out;
-}
-
-static void btree_node_read_work(struct work_struct *work)
-{
- struct btree_read_bio *rb =
- container_of(work, struct btree_read_bio, work);
- struct bch_fs *c = rb->c;
- struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
- struct btree *b = rb->b;
- struct bio *bio = &rb->bio;
- struct bch_io_failures failed = { .nr = 0 };
- struct printbuf buf = PRINTBUF;
- bool saw_error = false;
- bool retry = false;
- bool can_retry;
-
- goto start;
- while (1) {
- retry = true;
- bch_info(c, "retrying read");
- ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
- rb->have_ioref = ca != NULL;
- bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
- bio->bi_iter.bi_sector = rb->pick.ptr.offset;
- bio->bi_iter.bi_size = btree_buf_bytes(b);
-
- if (rb->have_ioref) {
- bio_set_dev(bio, ca->disk_sb.bdev);
- submit_bio_wait(bio);
- } else {
- bio->bi_status = BLK_STS_REMOVED;
- }
-start:
- printbuf_reset(&buf);
- bch2_btree_pos_to_text(&buf, c, b);
- bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
- "btree read error %s for %s",
- bch2_blk_status_to_str(bio->bi_status), buf.buf);
- if (rb->have_ioref)
- percpu_ref_put(&ca->io_ref);
- rb->have_ioref = false;
-
- bch2_mark_io_failure(&failed, &rb->pick);
-
- can_retry = bch2_bkey_pick_read_device(c,
- bkey_i_to_s_c(&b->key),
- &failed, &rb->pick) > 0;
-
- if (!bio->bi_status &&
- !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
- if (retry)
- bch_info(c, "retry success");
- break;
- }
-
- saw_error = true;
-
- if (!can_retry) {
- set_btree_node_read_error(b);
- bch2_btree_lost_data(c, b->c.btree_id);
- break;
- }
- }
-
- bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
- rb->start_time);
- bio_put(&rb->bio);
-
- if (saw_error &&
- !btree_node_read_error(b) &&
- c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
- printbuf_reset(&buf);
- bch2_bpos_to_text(&buf, b->key.k.p);
- bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
- __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
-
- bch2_btree_node_rewrite_async(c, b);
- }
-
- printbuf_exit(&buf);
- clear_btree_node_read_in_flight(b);
- wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
-}
-
-static void btree_node_read_endio(struct bio *bio)
-{
- struct btree_read_bio *rb =
- container_of(bio, struct btree_read_bio, bio);
- struct bch_fs *c = rb->c;
-
- if (rb->have_ioref) {
- struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
-
- bch2_latency_acct(ca, rb->start_time, READ);
- }
-
- queue_work(c->btree_read_complete_wq, &rb->work);
-}
-
-struct btree_node_read_all {
- struct closure cl;
- struct bch_fs *c;
- struct btree *b;
- unsigned nr;
- void *buf[BCH_REPLICAS_MAX];
- struct bio *bio[BCH_REPLICAS_MAX];
- blk_status_t err[BCH_REPLICAS_MAX];
-};
-
-static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
-{
- struct btree_node *bn = data;
- struct btree_node_entry *bne;
- unsigned offset = 0;
-
- if (le64_to_cpu(bn->magic) != bset_magic(c))
- return 0;
-
- while (offset < btree_sectors(c)) {
- if (!offset) {
- offset += vstruct_sectors(bn, c->block_bits);
- } else {
- bne = data + (offset << 9);
- if (bne->keys.seq != bn->keys.seq)
- break;
- offset += vstruct_sectors(bne, c->block_bits);
- }
- }
-
- return offset;
-}
-
-static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
-{
- struct btree_node *bn = data;
- struct btree_node_entry *bne;
-
- if (!offset)
- return false;
-
- while (offset < btree_sectors(c)) {
- bne = data + (offset << 9);
- if (bne->keys.seq == bn->keys.seq)
- return true;
- offset++;
- }
-
- return false;
- return offset;
-}
-
-static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
-{
- closure_type(ra, struct btree_node_read_all, cl);
- struct bch_fs *c = ra->c;
- struct btree *b = ra->b;
- struct printbuf buf = PRINTBUF;
- bool dump_bset_maps = false;
- bool have_retry = false;
- int ret = 0, best = -1, write = READ;
- unsigned i, written = 0, written2 = 0;
- __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
- ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
- bool _saw_error = false, *saw_error = &_saw_error;
-
- for (i = 0; i < ra->nr; i++) {
- struct btree_node *bn = ra->buf[i];
-
- if (ra->err[i])
- continue;
-
- if (le64_to_cpu(bn->magic) != bset_magic(c) ||
- (seq && seq != bn->keys.seq))
- continue;
-
- if (best < 0) {
- best = i;
- written = btree_node_sectors_written(c, bn);
- continue;
- }
-
- written2 = btree_node_sectors_written(c, ra->buf[i]);
- if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL, NULL,
- btree_node_replicas_sectors_written_mismatch,
- "btree node sectors written mismatch: %u != %u",
- written, written2) ||
- btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
- -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL, NULL,
- btree_node_bset_after_end,
- "found bset signature after last bset") ||
- btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
- -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL, NULL,
- btree_node_replicas_data_mismatch,
- "btree node replicas content mismatch"))
- dump_bset_maps = true;
-
- if (written2 > written) {
- written = written2;
- best = i;
- }
- }
-fsck_err:
- if (dump_bset_maps) {
- for (i = 0; i < ra->nr; i++) {
- struct btree_node *bn = ra->buf[i];
- struct btree_node_entry *bne = NULL;
- unsigned offset = 0, sectors;
- bool gap = false;
-
- if (ra->err[i])
- continue;
-
- printbuf_reset(&buf);
-
- while (offset < btree_sectors(c)) {
- if (!offset) {
- sectors = vstruct_sectors(bn, c->block_bits);
- } else {
- bne = ra->buf[i] + (offset << 9);
- if (bne->keys.seq != bn->keys.seq)
- break;
- sectors = vstruct_sectors(bne, c->block_bits);
- }
-
- prt_printf(&buf, " %u-%u", offset, offset + sectors);
- if (bne && bch2_journal_seq_is_blacklisted(c,
- le64_to_cpu(bne->keys.journal_seq), false))
- prt_printf(&buf, "*");
- offset += sectors;
- }
-
- while (offset < btree_sectors(c)) {
- bne = ra->buf[i] + (offset << 9);
- if (bne->keys.seq == bn->keys.seq) {
- if (!gap)
- prt_printf(&buf, " GAP");
- gap = true;
-
- sectors = vstruct_sectors(bne, c->block_bits);
- prt_printf(&buf, " %u-%u", offset, offset + sectors);
- if (bch2_journal_seq_is_blacklisted(c,
- le64_to_cpu(bne->keys.journal_seq), false))
- prt_printf(&buf, "*");
- }
- offset++;
- }
-
- bch_err(c, "replica %u:%s", i, buf.buf);
- }
- }
-
- if (best >= 0) {
- memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
- ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
- } else {
- ret = -1;
- }
-
- if (ret) {
- set_btree_node_read_error(b);
- bch2_btree_lost_data(c, b->c.btree_id);
- } else if (*saw_error)
- bch2_btree_node_rewrite_async(c, b);
-
- for (i = 0; i < ra->nr; i++) {
- mempool_free(ra->buf[i], &c->btree_bounce_pool);
- bio_put(ra->bio[i]);
- }
-
- closure_debug_destroy(&ra->cl);
- kfree(ra);
- printbuf_exit(&buf);
-
- clear_btree_node_read_in_flight(b);
- wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
-}
-
-static void btree_node_read_all_replicas_endio(struct bio *bio)
-{
- struct btree_read_bio *rb =
- container_of(bio, struct btree_read_bio, bio);
- struct bch_fs *c = rb->c;
- struct btree_node_read_all *ra = rb->ra;
-
- if (rb->have_ioref) {
- struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
-
- bch2_latency_acct(ca, rb->start_time, READ);
- }
-
- ra->err[rb->idx] = bio->bi_status;
- closure_put(&ra->cl);
-}
-
-/*
- * XXX This allocates multiple times from the same mempools, and can deadlock
- * under sufficient memory pressure (but is only a debug path)
- */
-static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
-{
- struct bkey_s_c k = bkey_i_to_s_c(&b->key);
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded pick;
- struct btree_node_read_all *ra;
- unsigned i;
-
- ra = kzalloc(sizeof(*ra), GFP_NOFS);
- if (!ra)
- return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
-
- closure_init(&ra->cl, NULL);
- ra->c = c;
- ra->b = b;
- ra->nr = bch2_bkey_nr_ptrs(k);
-
- for (i = 0; i < ra->nr; i++) {
- ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
- ra->bio[i] = bio_alloc_bioset(NULL,
- buf_pages(ra->buf[i], btree_buf_bytes(b)),
- REQ_OP_READ|REQ_SYNC|REQ_META,
- GFP_NOFS,
- &c->btree_bio);
- }
-
- i = 0;
- bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
- struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
- struct btree_read_bio *rb =
- container_of(ra->bio[i], struct btree_read_bio, bio);
- rb->c = c;
- rb->b = b;
- rb->ra = ra;
- rb->start_time = local_clock();
- rb->have_ioref = ca != NULL;
- rb->idx = i;
- rb->pick = pick;
- rb->bio.bi_iter.bi_sector = pick.ptr.offset;
- rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
- bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
-
- if (rb->have_ioref) {
- this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
- bio_sectors(&rb->bio));
- bio_set_dev(&rb->bio, ca->disk_sb.bdev);
-
- closure_get(&ra->cl);
- submit_bio(&rb->bio);
- } else {
- ra->err[i] = BLK_STS_REMOVED;
- }
-
- i++;
- }
-
- if (sync) {
- closure_sync(&ra->cl);
- btree_node_read_all_replicas_done(&ra->cl.work);
- } else {
- continue_at(&ra->cl, btree_node_read_all_replicas_done,
- c->btree_read_complete_wq);
- }
-
- return 0;
-}
-
-void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
- bool sync)
-{
- struct bch_fs *c = trans->c;
- struct extent_ptr_decoded pick;
- struct btree_read_bio *rb;
- struct bch_dev *ca;
- struct bio *bio;
- int ret;
-
- trace_and_count(c, btree_node_read, trans, b);
-
- if (bch2_verify_all_btree_replicas &&
- !btree_node_read_all_replicas(c, b, sync))
- return;
-
- ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
- NULL, &pick);
-
- if (ret <= 0) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "btree node read error: no device to read from\n at ");
- bch2_btree_pos_to_text(&buf, c, b);
- bch_err_ratelimited(c, "%s", buf.buf);
-
- if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
- c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
- bch2_fatal_error(c);
-
- set_btree_node_read_error(b);
- bch2_btree_lost_data(c, b->c.btree_id);
- clear_btree_node_read_in_flight(b);
- wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
- printbuf_exit(&buf);
- return;
- }
-
- ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
-
- bio = bio_alloc_bioset(NULL,
- buf_pages(b->data, btree_buf_bytes(b)),
- REQ_OP_READ|REQ_SYNC|REQ_META,
- GFP_NOFS,
- &c->btree_bio);
- rb = container_of(bio, struct btree_read_bio, bio);
- rb->c = c;
- rb->b = b;
- rb->ra = NULL;
- rb->start_time = local_clock();
- rb->have_ioref = ca != NULL;
- rb->pick = pick;
- INIT_WORK(&rb->work, btree_node_read_work);
- bio->bi_iter.bi_sector = pick.ptr.offset;
- bio->bi_end_io = btree_node_read_endio;
- bch2_bio_map(bio, b->data, btree_buf_bytes(b));
-
- if (rb->have_ioref) {
- this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
- bio_sectors(bio));
- bio_set_dev(bio, ca->disk_sb.bdev);
-
- if (sync) {
- submit_bio_wait(bio);
- bch2_latency_acct(ca, rb->start_time, READ);
- btree_node_read_work(&rb->work);
- } else {
- submit_bio(bio);
- }
- } else {
- bio->bi_status = BLK_STS_REMOVED;
-
- if (sync)
- btree_node_read_work(&rb->work);
- else
- queue_work(c->btree_read_complete_wq, &rb->work);
- }
-}
-
-static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
- const struct bkey_i *k, unsigned level)
-{
- struct bch_fs *c = trans->c;
- struct closure cl;
- struct btree *b;
- int ret;
-
- closure_init_stack(&cl);
-
- do {
- ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
- closure_sync(&cl);
- } while (ret);
-
- b = bch2_btree_node_mem_alloc(trans, level != 0);
- bch2_btree_cache_cannibalize_unlock(trans);
-
- BUG_ON(IS_ERR(b));
-
- bkey_copy(&b->key, k);
- BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
-
- set_btree_node_read_in_flight(b);
-
- /* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */
- bch2_trans_unlock(trans);
- bch2_btree_node_read(trans, b, true);
-
- if (btree_node_read_error(b)) {
- mutex_lock(&c->btree_cache.lock);
- bch2_btree_node_hash_remove(&c->btree_cache, b);
- mutex_unlock(&c->btree_cache.lock);
-
- ret = -BCH_ERR_btree_node_read_error;
- goto err;
- }
-
- bch2_btree_set_root_for_read(c, b);
-err:
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
-
- return ret;
-}
-
-int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
- const struct bkey_i *k, unsigned level)
-{
- return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
-}
-
-static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
- struct btree_write *w)
-{
- unsigned long old, new;
-
- old = READ_ONCE(b->will_make_reachable);
- do {
- new = old;
- if (!(old & 1))
- break;
-
- new &= ~1UL;
- } while (!try_cmpxchg(&b->will_make_reachable, &old, new));
-
- if (old & 1)
- closure_put(&((struct btree_update *) new)->cl);
-
- bch2_journal_pin_drop(&c->journal, &w->journal);
-}
-
-static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
-{
- struct btree_write *w = btree_prev_write(b);
- unsigned long old, new;
- unsigned type = 0;
-
- bch2_btree_complete_write(c, b, w);
-
- old = READ_ONCE(b->flags);
- do {
- new = old;
-
- if ((old & (1U << BTREE_NODE_dirty)) &&
- (old & (1U << BTREE_NODE_need_write)) &&
- !(old & (1U << BTREE_NODE_never_write)) &&
- !(old & (1U << BTREE_NODE_write_blocked)) &&
- !(old & (1U << BTREE_NODE_will_make_reachable))) {
- new &= ~(1U << BTREE_NODE_dirty);
- new &= ~(1U << BTREE_NODE_need_write);
- new |= (1U << BTREE_NODE_write_in_flight);
- new |= (1U << BTREE_NODE_write_in_flight_inner);
- new |= (1U << BTREE_NODE_just_written);
- new ^= (1U << BTREE_NODE_write_idx);
-
- type = new & BTREE_WRITE_TYPE_MASK;
- new &= ~BTREE_WRITE_TYPE_MASK;
- } else {
- new &= ~(1U << BTREE_NODE_write_in_flight);
- new &= ~(1U << BTREE_NODE_write_in_flight_inner);
- }
- } while (!try_cmpxchg(&b->flags, &old, new));
-
- if (new & (1U << BTREE_NODE_write_in_flight))
- __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
- else
- wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
-}
-
-static void btree_node_write_done(struct bch_fs *c, struct btree *b)
-{
- struct btree_trans *trans = bch2_trans_get(c);
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
-
- /* we don't need transaction context anymore after we got the lock. */
- bch2_trans_put(trans);
- __btree_node_write_done(c, b);
- six_unlock_read(&b->c.lock);
-}
-
-static void btree_node_write_work(struct work_struct *work)
-{
- struct btree_write_bio *wbio =
- container_of(work, struct btree_write_bio, work);
- struct bch_fs *c = wbio->wbio.c;
- struct btree *b = wbio->wbio.bio.bi_private;
- int ret = 0;
-
- btree_bounce_free(c,
- wbio->data_bytes,
- wbio->wbio.used_mempool,
- wbio->data);
-
- bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
- bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
-
- if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
- ret = -BCH_ERR_btree_node_write_all_failed;
- goto err;
- }
-
- if (wbio->wbio.first_btree_write) {
- if (wbio->wbio.failed.nr) {
-
- }
- } else {
- ret = bch2_trans_do(c,
- bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
- BCH_WATERMARK_interior_updates|
- BCH_TRANS_COMMIT_journal_reclaim|
- BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_no_check_rw,
- !wbio->wbio.failed.nr));
- if (ret)
- goto err;
- }
-out:
- bio_put(&wbio->wbio.bio);
- btree_node_write_done(c, b);
- return;
-err:
- set_btree_node_noevict(b);
- bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
- "writing btree node: %s", bch2_err_str(ret));
- goto out;
-}
-
-static void btree_node_write_endio(struct bio *bio)
-{
- struct bch_write_bio *wbio = to_wbio(bio);
- struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
- struct bch_write_bio *orig = parent ?: wbio;
- struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
- struct bch_fs *c = wbio->c;
- struct btree *b = wbio->bio.bi_private;
- struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
- unsigned long flags;
-
- if (wbio->have_ioref)
- bch2_latency_acct(ca, wbio->submit_time, WRITE);
-
- if (!ca ||
- bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
- "btree write error: %s",
- bch2_blk_status_to_str(bio->bi_status)) ||
- bch2_meta_write_fault("btree")) {
- spin_lock_irqsave(&c->btree_write_error_lock, flags);
- bch2_dev_list_add_dev(&orig->failed, wbio->dev);
- spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
- }
-
- if (wbio->have_ioref)
- percpu_ref_put(&ca->io_ref);
-
- if (parent) {
- bio_put(bio);
- bio_endio(&parent->bio);
- return;
- }
-
- clear_btree_node_write_in_flight_inner(b);
- wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
- INIT_WORK(&wb->work, btree_node_write_work);
- queue_work(c->btree_io_complete_wq, &wb->work);
-}
-
-static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
- struct bset *i, unsigned sectors)
-{
- bool saw_error;
-
- int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key),
- BKEY_TYPE_btree, WRITE);
- if (ret) {
- bch2_fs_inconsistent(c, "invalid btree node key before write");
- return ret;
- }
-
- ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
- validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
- if (ret) {
- bch2_inconsistent_error(c);
- dump_stack();
- }
-
- return ret;
-}
-
-static void btree_write_submit(struct work_struct *work)
-{
- struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
- BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
-
- bkey_copy(&tmp.k, &wbio->key);
-
- bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
- ptr->offset += wbio->sector_offset;
-
- bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
- &tmp.k, false);
-}
-
-void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
-{
- struct btree_write_bio *wbio;
- struct bset *i;
- struct btree_node *bn = NULL;
- struct btree_node_entry *bne = NULL;
- struct sort_iter_stack sort_iter;
- struct nonce nonce;
- unsigned bytes_to_write, sectors_to_write, bytes, u64s;
- u64 seq = 0;
- bool used_mempool;
- unsigned long old, new;
- bool validate_before_checksum = false;
- enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
- void *data;
- int ret;
-
- if (flags & BTREE_WRITE_ALREADY_STARTED)
- goto do_write;
-
- /*
- * We may only have a read lock on the btree node - the dirty bit is our
- * "lock" against racing with other threads that may be trying to start
- * a write, we do a write iff we clear the dirty bit. Since setting the
- * dirty bit requires a write lock, we can't race with other threads
- * redirtying it:
- */
- old = READ_ONCE(b->flags);
- do {
- new = old;
-
- if (!(old & (1 << BTREE_NODE_dirty)))
- return;
-
- if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
- !(old & (1 << BTREE_NODE_need_write)))
- return;
-
- if (old &
- ((1 << BTREE_NODE_never_write)|
- (1 << BTREE_NODE_write_blocked)))
- return;
-
- if (b->written &&
- (old & (1 << BTREE_NODE_will_make_reachable)))
- return;
-
- if (old & (1 << BTREE_NODE_write_in_flight))
- return;
-
- if (flags & BTREE_WRITE_ONLY_IF_NEED)
- type = new & BTREE_WRITE_TYPE_MASK;
- new &= ~BTREE_WRITE_TYPE_MASK;
-
- new &= ~(1 << BTREE_NODE_dirty);
- new &= ~(1 << BTREE_NODE_need_write);
- new |= (1 << BTREE_NODE_write_in_flight);
- new |= (1 << BTREE_NODE_write_in_flight_inner);
- new |= (1 << BTREE_NODE_just_written);
- new ^= (1 << BTREE_NODE_write_idx);
- } while (!try_cmpxchg_acquire(&b->flags, &old, new));
-
- if (new & (1U << BTREE_NODE_need_write))
- return;
-do_write:
- BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
-
- atomic_long_dec(&c->btree_cache.nr_dirty);
-
- BUG_ON(btree_node_fake(b));
- BUG_ON((b->will_make_reachable != 0) != !b->written);
-
- BUG_ON(b->written >= btree_sectors(c));
- BUG_ON(b->written & (block_sectors(c) - 1));
- BUG_ON(bset_written(b, btree_bset_last(b)));
- BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
- BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
-
- bch2_sort_whiteouts(c, b);
-
- sort_iter_stack_init(&sort_iter, b);
-
- bytes = !b->written
- ? sizeof(struct btree_node)
- : sizeof(struct btree_node_entry);
-
- bytes += b->whiteout_u64s * sizeof(u64);
-
- for_each_bset(b, t) {
- i = bset(b, t);
-
- if (bset_written(b, i))
- continue;
-
- bytes += le16_to_cpu(i->u64s) * sizeof(u64);
- sort_iter_add(&sort_iter.iter,
- btree_bkey_first(b, t),
- btree_bkey_last(b, t));
- seq = max(seq, le64_to_cpu(i->journal_seq));
- }
-
- BUG_ON(b->written && !seq);
-
- /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
- bytes += 8;
-
- /* buffer must be a multiple of the block size */
- bytes = round_up(bytes, block_bytes(c));
-
- data = btree_bounce_alloc(c, bytes, &used_mempool);
-
- if (!b->written) {
- bn = data;
- *bn = *b->data;
- i = &bn->keys;
- } else {
- bne = data;
- bne->keys = b->data->keys;
- i = &bne->keys;
- }
-
- i->journal_seq = cpu_to_le64(seq);
- i->u64s = 0;
-
- sort_iter_add(&sort_iter.iter,
- unwritten_whiteouts_start(b),
- unwritten_whiteouts_end(b));
- SET_BSET_SEPARATE_WHITEOUTS(i, false);
-
- u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
- le16_add_cpu(&i->u64s, u64s);
-
- b->whiteout_u64s = 0;
-
- BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
-
- set_needs_whiteout(i, false);
-
- /* do we have data to write? */
- if (b->written && !i->u64s)
- goto nowrite;
-
- bytes_to_write = vstruct_end(i) - data;
- sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
-
- if (!b->written &&
- b->key.k.type == KEY_TYPE_btree_ptr_v2)
- BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
-
- memset(data + bytes_to_write, 0,
- (sectors_to_write << 9) - bytes_to_write);
-
- BUG_ON(b->written + sectors_to_write > btree_sectors(c));
- BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
- BUG_ON(i->seq != b->data->keys.seq);
-
- i->version = cpu_to_le16(c->sb.version);
- SET_BSET_OFFSET(i, b->written);
- SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
-
- if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
- validate_before_checksum = true;
-
- /* validate_bset will be modifying: */
- if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
- validate_before_checksum = true;
-
- /* if we're going to be encrypting, check metadata validity first: */
- if (validate_before_checksum &&
- validate_bset_for_write(c, b, i, sectors_to_write))
- goto err;
-
- ret = bset_encrypt(c, i, b->written << 9);
- if (bch2_fs_fatal_err_on(ret, c,
- "encrypting btree node: %s", bch2_err_str(ret)))
- goto err;
-
- nonce = btree_nonce(i, b->written << 9);
-
- if (bn)
- bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
- else
- bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
-
- /* if we're not encrypting, check metadata after checksumming: */
- if (!validate_before_checksum &&
- validate_bset_for_write(c, b, i, sectors_to_write))
- goto err;
-
- /*
- * We handle btree write errors by immediately halting the journal -
- * after we've done that, we can't issue any subsequent btree writes
- * because they might have pointers to new nodes that failed to write.
- *
- * Furthermore, there's no point in doing any more btree writes because
- * with the journal stopped, we're never going to update the journal to
- * reflect that those writes were done and the data flushed from the
- * journal:
- *
- * Also on journal error, the pending write may have updates that were
- * never journalled (interior nodes, see btree_update_nodes_written()) -
- * it's critical that we don't do the write in that case otherwise we
- * will have updates visible that weren't in the journal:
- *
- * Make sure to update b->written so bch2_btree_init_next() doesn't
- * break:
- */
- if (bch2_journal_error(&c->journal) ||
- c->opts.nochanges)
- goto err;
-
- trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
-
- wbio = container_of(bio_alloc_bioset(NULL,
- buf_pages(data, sectors_to_write << 9),
- REQ_OP_WRITE|REQ_META,
- GFP_NOFS,
- &c->btree_bio),
- struct btree_write_bio, wbio.bio);
- wbio_init(&wbio->wbio.bio);
- wbio->data = data;
- wbio->data_bytes = bytes;
- wbio->sector_offset = b->written;
- wbio->wbio.c = c;
- wbio->wbio.used_mempool = used_mempool;
- wbio->wbio.first_btree_write = !b->written;
- wbio->wbio.bio.bi_end_io = btree_node_write_endio;
- wbio->wbio.bio.bi_private = b;
-
- bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
-
- bkey_copy(&wbio->key, &b->key);
-
- b->written += sectors_to_write;
-
- if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
- bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
- cpu_to_le16(b->written);
-
- atomic64_inc(&c->btree_write_stats[type].nr);
- atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
-
- INIT_WORK(&wbio->work, btree_write_submit);
- queue_work(c->btree_write_submit_wq, &wbio->work);
- return;
-err:
- set_btree_node_noevict(b);
- b->written += sectors_to_write;
-nowrite:
- btree_bounce_free(c, bytes, used_mempool, data);
- __btree_node_write_done(c, b);
-}
-
-/*
- * Work that must be done with write lock held:
- */
-bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
-{
- bool invalidated_iter = false;
- struct btree_node_entry *bne;
-
- if (!btree_node_just_written(b))
- return false;
-
- BUG_ON(b->whiteout_u64s);
-
- clear_btree_node_just_written(b);
-
- /*
- * Note: immediately after write, bset_written() doesn't work - the
- * amount of data we had to write after compaction might have been
- * smaller than the offset of the last bset.
- *
- * However, we know that all bsets have been written here, as long as
- * we're still holding the write lock:
- */
-
- /*
- * XXX: decide if we really want to unconditionally sort down to a
- * single bset:
- */
- if (b->nsets > 1) {
- btree_node_sort(c, b, 0, b->nsets);
- invalidated_iter = true;
- } else {
- invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
- }
-
- for_each_bset(b, t)
- set_needs_whiteout(bset(b, t), true);
-
- bch2_btree_verify(c, b);
-
- /*
- * If later we don't unconditionally sort down to a single bset, we have
- * to ensure this is still true:
- */
- BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
-
- bne = want_new_bset(c, b);
- if (bne)
- bch2_bset_init_next(b, bne);
-
- bch2_btree_build_aux_trees(b);
-
- return invalidated_iter;
-}
-
-/*
- * Use this one if the node is intent locked:
- */
-void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
- enum six_lock_type lock_type_held,
- unsigned flags)
-{
- if (lock_type_held == SIX_LOCK_intent ||
- (lock_type_held == SIX_LOCK_read &&
- six_lock_tryupgrade(&b->c.lock))) {
- __bch2_btree_node_write(c, b, flags);
-
- /* don't cycle lock unnecessarily: */
- if (btree_node_just_written(b) &&
- six_trylock_write(&b->c.lock)) {
- bch2_btree_post_write_cleanup(c, b);
- six_unlock_write(&b->c.lock);
- }
-
- if (lock_type_held == SIX_LOCK_read)
- six_lock_downgrade(&b->c.lock);
- } else {
- __bch2_btree_node_write(c, b, flags);
- if (lock_type_held == SIX_LOCK_write &&
- btree_node_just_written(b))
- bch2_btree_post_write_cleanup(c, b);
- }
-}
-
-static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
-{
- struct bucket_table *tbl;
- struct rhash_head *pos;
- struct btree *b;
- unsigned i;
- bool ret = false;
-restart:
- rcu_read_lock();
- for_each_cached_btree(b, c, tbl, i, pos)
- if (test_bit(flag, &b->flags)) {
- rcu_read_unlock();
- wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
- ret = true;
- goto restart;
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-bool bch2_btree_flush_all_reads(struct bch_fs *c)
-{
- return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
-}
-
-bool bch2_btree_flush_all_writes(struct bch_fs *c)
-{
- return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
-}
-
-static const char * const bch2_btree_write_types[] = {
-#define x(t, n) [n] = #t,
- BCH_BTREE_WRITE_TYPES()
- NULL
-};
-
-void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
-{
- printbuf_tabstop_push(out, 20);
- printbuf_tabstop_push(out, 10);
-
- prt_printf(out, "\tnr\tsize\n");
-
- for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
- u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
- u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
-
- prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
- prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
- prt_newline(out);
- }
-}
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
deleted file mode 100644
index 9b01ca3de907..000000000000
--- a/fs/bcachefs/btree_io.h
+++ /dev/null
@@ -1,223 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_IO_H
-#define _BCACHEFS_BTREE_IO_H
-
-#include "bkey_methods.h"
-#include "bset.h"
-#include "btree_locking.h"
-#include "checksum.h"
-#include "extents.h"
-#include "io_write_types.h"
-
-struct bch_fs;
-struct btree_write;
-struct btree;
-struct btree_iter;
-struct btree_node_read_all;
-
-static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
-{
- if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
- atomic_long_inc(&c->btree_cache.nr_dirty);
-}
-
-static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
-{
- if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
- atomic_long_dec(&c->btree_cache.nr_dirty);
-}
-
-static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k)
-{
- return k.k->type == KEY_TYPE_btree_ptr_v2
- ? le16_to_cpu(bkey_s_c_to_btree_ptr_v2(k).v->sectors_written)
- : 0;
-}
-
-struct btree_read_bio {
- struct bch_fs *c;
- struct btree *b;
- struct btree_node_read_all *ra;
- u64 start_time;
- unsigned have_ioref:1;
- unsigned idx:7;
- struct extent_ptr_decoded pick;
- struct work_struct work;
- struct bio bio;
-};
-
-struct btree_write_bio {
- struct work_struct work;
- __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
- void *data;
- unsigned data_bytes;
- unsigned sector_offset;
- struct bch_write_bio wbio;
-};
-
-void bch2_btree_node_io_unlock(struct btree *);
-void bch2_btree_node_io_lock(struct btree *);
-void __bch2_btree_node_wait_on_read(struct btree *);
-void __bch2_btree_node_wait_on_write(struct btree *);
-void bch2_btree_node_wait_on_read(struct btree *);
-void bch2_btree_node_wait_on_write(struct btree *);
-
-enum compact_mode {
- COMPACT_LAZY,
- COMPACT_ALL,
-};
-
-bool bch2_compact_whiteouts(struct bch_fs *, struct btree *,
- enum compact_mode);
-
-static inline bool should_compact_bset_lazy(struct btree *b,
- struct bset_tree *t)
-{
- unsigned total_u64s = bset_u64s(t);
- unsigned dead_u64s = bset_dead_u64s(b, t);
-
- return dead_u64s > 64 && dead_u64s * 3 > total_u64s;
-}
-
-static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
-{
- for_each_bset(b, t)
- if (should_compact_bset_lazy(b, t))
- return bch2_compact_whiteouts(c, b, COMPACT_LAZY);
-
- return false;
-}
-
-static inline struct nonce btree_nonce(struct bset *i, unsigned offset)
-{
- return (struct nonce) {{
- [0] = cpu_to_le32(offset),
- [1] = ((__le32 *) &i->seq)[0],
- [2] = ((__le32 *) &i->seq)[1],
- [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
- }};
-}
-
-static inline int bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
-{
- struct nonce nonce = btree_nonce(i, offset);
- int ret;
-
- if (!offset) {
- struct btree_node *bn = container_of(i, struct btree_node, keys);
- unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
-
- ret = bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
- &bn->flags, bytes);
- if (ret)
- return ret;
-
- nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
- }
-
- return bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
- vstruct_end(i) - (void *) i->_data);
-}
-
-void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
-
-void bch2_btree_node_drop_keys_outside_node(struct btree *);
-
-void bch2_btree_build_aux_trees(struct btree *);
-void bch2_btree_init_next(struct btree_trans *, struct btree *);
-
-int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *,
- struct btree *, bool, bool *);
-void bch2_btree_node_read(struct btree_trans *, struct btree *, bool);
-int bch2_btree_root_read(struct bch_fs *, enum btree_id,
- const struct bkey_i *, unsigned);
-
-bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
-
-enum btree_write_flags {
- __BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS,
- __BTREE_WRITE_ALREADY_STARTED,
-};
-#define BTREE_WRITE_ONLY_IF_NEED BIT(__BTREE_WRITE_ONLY_IF_NEED)
-#define BTREE_WRITE_ALREADY_STARTED BIT(__BTREE_WRITE_ALREADY_STARTED)
-
-void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
-void bch2_btree_node_write(struct bch_fs *, struct btree *,
- enum six_lock_type, unsigned);
-
-static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
- enum six_lock_type lock_held)
-{
- bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED);
-}
-
-bool bch2_btree_flush_all_reads(struct bch_fs *);
-bool bch2_btree_flush_all_writes(struct bch_fs *);
-
-static inline void compat_bformat(unsigned level, enum btree_id btree_id,
- unsigned version, unsigned big_endian,
- int write, struct bkey_format *f)
-{
- if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id == BTREE_ID_inodes) {
- swap(f->bits_per_field[BKEY_FIELD_INODE],
- f->bits_per_field[BKEY_FIELD_OFFSET]);
- swap(f->field_offset[BKEY_FIELD_INODE],
- f->field_offset[BKEY_FIELD_OFFSET]);
- }
-
- if (version < bcachefs_metadata_version_snapshot &&
- (level || btree_type_has_snapshots(btree_id))) {
- u64 max_packed =
- ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
-
- f->field_offset[BKEY_FIELD_SNAPSHOT] = write
- ? 0
- : cpu_to_le64(U32_MAX - max_packed);
- }
-}
-
-static inline void compat_bpos(unsigned level, enum btree_id btree_id,
- unsigned version, unsigned big_endian,
- int write, struct bpos *p)
-{
- if (big_endian != CPU_BIG_ENDIAN)
- bch2_bpos_swab(p);
-
- if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id == BTREE_ID_inodes)
- swap(p->inode, p->offset);
-}
-
-static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
- unsigned version, unsigned big_endian,
- int write,
- struct btree_node *bn)
-{
- if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id_is_extents(btree_id) &&
- !bpos_eq(bn->min_key, POS_MIN) &&
- write)
- bn->min_key = bpos_nosnap_predecessor(bn->min_key);
-
- if (version < bcachefs_metadata_version_snapshot &&
- write)
- bn->max_key.snapshot = 0;
-
- compat_bpos(level, btree_id, version, big_endian, write, &bn->min_key);
- compat_bpos(level, btree_id, version, big_endian, write, &bn->max_key);
-
- if (version < bcachefs_metadata_version_snapshot &&
- !write)
- bn->max_key.snapshot = U32_MAX;
-
- if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id_is_extents(btree_id) &&
- !bpos_eq(bn->min_key, POS_MIN) &&
- !write)
- bn->min_key = bpos_nosnap_successor(bn->min_key);
-}
-
-void bch2_btree_write_stats_to_text(struct printbuf *, struct bch_fs *);
-
-#endif /* _BCACHEFS_BTREE_IO_H */
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
deleted file mode 100644
index eef9b89c561d..000000000000
--- a/fs/bcachefs/btree_iter.c
+++ /dev/null
@@ -1,3499 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_methods.h"
-#include "bkey_buf.h"
-#include "btree_cache.h"
-#include "btree_iter.h"
-#include "btree_journal_iter.h"
-#include "btree_key_cache.h"
-#include "btree_locking.h"
-#include "btree_update.h"
-#include "debug.h"
-#include "error.h"
-#include "extents.h"
-#include "journal.h"
-#include "journal_io.h"
-#include "replicas.h"
-#include "snapshot.h"
-#include "trace.h"
-
-#include <linux/random.h>
-#include <linux/prefetch.h>
-
-static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
-static inline void btree_path_list_add(struct btree_trans *,
- btree_path_idx_t, btree_path_idx_t);
-
-static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
-{
-#ifdef TRACK_PATH_ALLOCATED
- return iter->ip_allocated;
-#else
- return 0;
-#endif
-}
-
-static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
-static void bch2_trans_srcu_lock(struct btree_trans *);
-
-static inline int __btree_path_cmp(const struct btree_path *l,
- enum btree_id r_btree_id,
- bool r_cached,
- struct bpos r_pos,
- unsigned r_level)
-{
- /*
- * Must match lock ordering as defined by __bch2_btree_node_lock:
- */
- return cmp_int(l->btree_id, r_btree_id) ?:
- cmp_int((int) l->cached, (int) r_cached) ?:
- bpos_cmp(l->pos, r_pos) ?:
- -cmp_int(l->level, r_level);
-}
-
-static inline int btree_path_cmp(const struct btree_path *l,
- const struct btree_path *r)
-{
- return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
-}
-
-static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
-{
- /* Are we iterating over keys in all snapshots? */
- if (iter->flags & BTREE_ITER_all_snapshots) {
- p = bpos_successor(p);
- } else {
- p = bpos_nosnap_successor(p);
- p.snapshot = iter->snapshot;
- }
-
- return p;
-}
-
-static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
-{
- /* Are we iterating over keys in all snapshots? */
- if (iter->flags & BTREE_ITER_all_snapshots) {
- p = bpos_predecessor(p);
- } else {
- p = bpos_nosnap_predecessor(p);
- p.snapshot = iter->snapshot;
- }
-
- return p;
-}
-
-static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
-{
- struct bpos pos = iter->pos;
-
- if ((iter->flags & BTREE_ITER_is_extents) &&
- !bkey_eq(pos, POS_MAX))
- pos = bkey_successor(iter, pos);
- return pos;
-}
-
-static inline bool btree_path_pos_before_node(struct btree_path *path,
- struct btree *b)
-{
- return bpos_lt(path->pos, b->data->min_key);
-}
-
-static inline bool btree_path_pos_after_node(struct btree_path *path,
- struct btree *b)
-{
- return bpos_gt(path->pos, b->key.k.p);
-}
-
-static inline bool btree_path_pos_in_node(struct btree_path *path,
- struct btree *b)
-{
- return path->btree_id == b->c.btree_id &&
- !btree_path_pos_before_node(path, b) &&
- !btree_path_pos_after_node(path, b);
-}
-
-/* Btree iterator: */
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-
-static void bch2_btree_path_verify_cached(struct btree_trans *trans,
- struct btree_path *path)
-{
- struct bkey_cached *ck;
- bool locked = btree_node_locked(path, 0);
-
- if (!bch2_btree_node_relock(trans, path, 0))
- return;
-
- ck = (void *) path->l[0].b;
- BUG_ON(ck->key.btree_id != path->btree_id ||
- !bkey_eq(ck->key.pos, path->pos));
-
- if (!locked)
- btree_node_unlock(trans, path, 0);
-}
-
-static void bch2_btree_path_verify_level(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
- struct btree_path_level *l;
- struct btree_node_iter tmp;
- bool locked;
- struct bkey_packed *p, *k;
- struct printbuf buf1 = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
- struct printbuf buf3 = PRINTBUF;
- const char *msg;
-
- if (!bch2_debug_check_iterators)
- return;
-
- l = &path->l[level];
- tmp = l->iter;
- locked = btree_node_locked(path, level);
-
- if (path->cached) {
- if (!level)
- bch2_btree_path_verify_cached(trans, path);
- return;
- }
-
- if (!btree_path_node(path, level))
- return;
-
- if (!bch2_btree_node_relock_notrace(trans, path, level))
- return;
-
- BUG_ON(!btree_path_pos_in_node(path, l->b));
-
- bch2_btree_node_iter_verify(&l->iter, l->b);
-
- /*
- * For interior nodes, the iterator will have skipped past deleted keys:
- */
- p = level
- ? bch2_btree_node_iter_prev(&tmp, l->b)
- : bch2_btree_node_iter_prev_all(&tmp, l->b);
- k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
-
- if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
- msg = "before";
- goto err;
- }
-
- if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
- msg = "after";
- goto err;
- }
-
- if (!locked)
- btree_node_unlock(trans, path, level);
- return;
-err:
- bch2_bpos_to_text(&buf1, path->pos);
-
- if (p) {
- struct bkey uk = bkey_unpack_key(l->b, p);
-
- bch2_bkey_to_text(&buf2, &uk);
- } else {
- prt_printf(&buf2, "(none)");
- }
-
- if (k) {
- struct bkey uk = bkey_unpack_key(l->b, k);
-
- bch2_bkey_to_text(&buf3, &uk);
- } else {
- prt_printf(&buf3, "(none)");
- }
-
- panic("path should be %s key at level %u:\n"
- "path pos %s\n"
- "prev key %s\n"
- "cur key %s\n",
- msg, level, buf1.buf, buf2.buf, buf3.buf);
-}
-
-static void bch2_btree_path_verify(struct btree_trans *trans,
- struct btree_path *path)
-{
- struct bch_fs *c = trans->c;
-
- for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
- if (!path->l[i].b) {
- BUG_ON(!path->cached &&
- bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
- break;
- }
-
- bch2_btree_path_verify_level(trans, path, i);
- }
-
- bch2_btree_path_verify_locks(path);
-}
-
-void bch2_trans_verify_paths(struct btree_trans *trans)
-{
- struct btree_path *path;
- unsigned iter;
-
- trans_for_each_path(trans, path, iter)
- bch2_btree_path_verify(trans, path);
-}
-
-static void bch2_btree_iter_verify(struct btree_iter *iter)
-{
- struct btree_trans *trans = iter->trans;
-
- BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
-
- BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
- (iter->flags & BTREE_ITER_all_snapshots));
-
- BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
- (iter->flags & BTREE_ITER_all_snapshots) &&
- !btree_type_has_snapshot_field(iter->btree_id));
-
- if (iter->update_path)
- bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
- bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
-}
-
-static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
-{
- BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
- !iter->pos.snapshot);
-
- BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
- iter->pos.snapshot != iter->snapshot);
-
- BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
- bkey_gt(iter->pos, iter->k.p));
-}
-
-static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
-{
- struct btree_trans *trans = iter->trans;
- struct btree_iter copy;
- struct bkey_s_c prev;
- int ret = 0;
-
- if (!bch2_debug_check_iterators)
- return 0;
-
- if (!(iter->flags & BTREE_ITER_filter_snapshots))
- return 0;
-
- if (bkey_err(k) || !k.k)
- return 0;
-
- BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
- iter->snapshot,
- k.k->p.snapshot));
-
- bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
- BTREE_ITER_nopreserve|
- BTREE_ITER_all_snapshots);
- prev = bch2_btree_iter_prev(&copy);
- if (!prev.k)
- goto out;
-
- ret = bkey_err(prev);
- if (ret)
- goto out;
-
- if (bkey_eq(prev.k->p, k.k->p) &&
- bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
- prev.k->p.snapshot) > 0) {
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
-
- bch2_bkey_to_text(&buf1, k.k);
- bch2_bkey_to_text(&buf2, prev.k);
-
- panic("iter snap %u\n"
- "k %s\n"
- "prev %s\n",
- iter->snapshot,
- buf1.buf, buf2.buf);
- }
-out:
- bch2_trans_iter_exit(trans, &copy);
- return ret;
-}
-
-void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
- struct bpos pos)
-{
- bch2_trans_verify_not_unlocked(trans);
-
- struct btree_path *path;
- struct trans_for_each_path_inorder_iter iter;
- struct printbuf buf = PRINTBUF;
-
- btree_trans_sort_paths(trans);
-
- trans_for_each_path_inorder(trans, path, iter) {
- if (path->btree_id != id ||
- !btree_node_locked(path, 0) ||
- !path->should_be_locked)
- continue;
-
- if (!path->cached) {
- if (bkey_ge(pos, path->l[0].b->data->min_key) &&
- bkey_le(pos, path->l[0].b->key.k.p))
- return;
- } else {
- if (bkey_eq(pos, path->pos))
- return;
- }
- }
-
- bch2_dump_trans_paths_updates(trans);
- bch2_bpos_to_text(&buf, pos);
-
- panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
-}
-
-#else
-
-static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
- struct btree_path *path, unsigned l) {}
-static inline void bch2_btree_path_verify(struct btree_trans *trans,
- struct btree_path *path) {}
-static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
-static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
-static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
-
-#endif
-
-/* Btree path: fixups after btree updates */
-
-static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
- struct btree *b,
- struct bset_tree *t,
- struct bkey_packed *k)
-{
- struct btree_node_iter_set *set;
-
- btree_node_iter_for_each(iter, set)
- if (set->end == t->end_offset) {
- set->k = __btree_node_key_to_offset(b, k);
- bch2_btree_node_iter_sort(iter, b);
- return;
- }
-
- bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
-}
-
-static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
- struct btree *b,
- struct bkey_packed *where)
-{
- struct btree_path_level *l = &path->l[b->c.level];
-
- if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
- return;
-
- if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
- bch2_btree_node_iter_advance(&l->iter, l->b);
-}
-
-void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
- struct btree *b,
- struct bkey_packed *where)
-{
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path_with_node(trans, b, path, i) {
- __bch2_btree_path_fix_key_modified(path, b, where);
- bch2_btree_path_verify_level(trans, path, b->c.level);
- }
-}
-
-static void __bch2_btree_node_iter_fix(struct btree_path *path,
- struct btree *b,
- struct btree_node_iter *node_iter,
- struct bset_tree *t,
- struct bkey_packed *where,
- unsigned clobber_u64s,
- unsigned new_u64s)
-{
- const struct bkey_packed *end = btree_bkey_last(b, t);
- struct btree_node_iter_set *set;
- unsigned offset = __btree_node_key_to_offset(b, where);
- int shift = new_u64s - clobber_u64s;
- unsigned old_end = t->end_offset - shift;
- unsigned orig_iter_pos = node_iter->data[0].k;
- bool iter_current_key_modified =
- orig_iter_pos >= offset &&
- orig_iter_pos <= offset + clobber_u64s;
-
- btree_node_iter_for_each(node_iter, set)
- if (set->end == old_end)
- goto found;
-
- /* didn't find the bset in the iterator - might have to readd it: */
- if (new_u64s &&
- bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
- bch2_btree_node_iter_push(node_iter, b, where, end);
- goto fixup_done;
- } else {
- /* Iterator is after key that changed */
- return;
- }
-found:
- set->end = t->end_offset;
-
- /* Iterator hasn't gotten to the key that changed yet: */
- if (set->k < offset)
- return;
-
- if (new_u64s &&
- bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
- set->k = offset;
- } else if (set->k < offset + clobber_u64s) {
- set->k = offset + new_u64s;
- if (set->k == set->end)
- bch2_btree_node_iter_set_drop(node_iter, set);
- } else {
- /* Iterator is after key that changed */
- set->k = (int) set->k + shift;
- return;
- }
-
- bch2_btree_node_iter_sort(node_iter, b);
-fixup_done:
- if (node_iter->data[0].k != orig_iter_pos)
- iter_current_key_modified = true;
-
- /*
- * When a new key is added, and the node iterator now points to that
- * key, the iterator might have skipped past deleted keys that should
- * come after the key the iterator now points to. We have to rewind to
- * before those deleted keys - otherwise
- * bch2_btree_node_iter_prev_all() breaks:
- */
- if (!bch2_btree_node_iter_end(node_iter) &&
- iter_current_key_modified &&
- b->c.level) {
- struct bkey_packed *k, *k2, *p;
-
- k = bch2_btree_node_iter_peek_all(node_iter, b);
-
- for_each_bset(b, t) {
- bool set_pos = false;
-
- if (node_iter->data[0].end == t->end_offset)
- continue;
-
- k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
-
- while ((p = bch2_bkey_prev_all(b, t, k2)) &&
- bkey_iter_cmp(b, k, p) < 0) {
- k2 = p;
- set_pos = true;
- }
-
- if (set_pos)
- btree_node_iter_set_set_pos(node_iter,
- b, t, k2);
- }
- }
-}
-
-void bch2_btree_node_iter_fix(struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct btree_node_iter *node_iter,
- struct bkey_packed *where,
- unsigned clobber_u64s,
- unsigned new_u64s)
-{
- struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
- struct btree_path *linked;
- unsigned i;
-
- if (node_iter != &path->l[b->c.level].iter) {
- __bch2_btree_node_iter_fix(path, b, node_iter, t,
- where, clobber_u64s, new_u64s);
-
- if (bch2_debug_check_iterators)
- bch2_btree_node_iter_verify(node_iter, b);
- }
-
- trans_for_each_path_with_node(trans, b, linked, i) {
- __bch2_btree_node_iter_fix(linked, b,
- &linked->l[b->c.level].iter, t,
- where, clobber_u64s, new_u64s);
- bch2_btree_path_verify_level(trans, linked, b->c.level);
- }
-}
-
-/* Btree path level: pointer to a particular btree node and node iter */
-
-static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
- struct btree_path_level *l,
- struct bkey *u,
- struct bkey_packed *k)
-{
- if (unlikely(!k)) {
- /*
- * signal to bch2_btree_iter_peek_slot() that we're currently at
- * a hole
- */
- u->type = KEY_TYPE_deleted;
- return bkey_s_c_null;
- }
-
- return bkey_disassemble(l->b, k, u);
-}
-
-static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
- struct btree_path_level *l,
- struct bkey *u)
-{
- return __btree_iter_unpack(c, l, u,
- bch2_btree_node_iter_peek_all(&l->iter, l->b));
-}
-
-static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
- struct btree_path *path,
- struct btree_path_level *l,
- struct bkey *u)
-{
- struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
- bch2_btree_node_iter_peek(&l->iter, l->b));
-
- path->pos = k.k ? k.k->p : l->b->key.k.p;
- trans->paths_sorted = false;
- bch2_btree_path_verify_level(trans, path, l - path->l);
- return k;
-}
-
-static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
- struct btree_path *path,
- struct btree_path_level *l,
- struct bkey *u)
-{
- struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
- bch2_btree_node_iter_prev(&l->iter, l->b));
-
- path->pos = k.k ? k.k->p : l->b->data->min_key;
- trans->paths_sorted = false;
- bch2_btree_path_verify_level(trans, path, l - path->l);
- return k;
-}
-
-static inline bool btree_path_advance_to_pos(struct btree_path *path,
- struct btree_path_level *l,
- int max_advance)
-{
- struct bkey_packed *k;
- int nr_advanced = 0;
-
- while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
- bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
- if (max_advance > 0 && nr_advanced >= max_advance)
- return false;
-
- bch2_btree_node_iter_advance(&l->iter, l->b);
- nr_advanced++;
- }
-
- return true;
-}
-
-static inline void __btree_path_level_init(struct btree_path *path,
- unsigned level)
-{
- struct btree_path_level *l = &path->l[level];
-
- bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
-
- /*
- * Iterators to interior nodes should always be pointed at the first non
- * whiteout:
- */
- if (level)
- bch2_btree_node_iter_peek(&l->iter, l->b);
-}
-
-void bch2_btree_path_level_init(struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b)
-{
- BUG_ON(path->cached);
-
- EBUG_ON(!btree_path_pos_in_node(path, b));
-
- path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
- path->l[b->c.level].b = b;
- __btree_path_level_init(path, b->c.level);
-}
-
-/* Btree path: fixups after btree node updates: */
-
-static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
-{
- struct bch_fs *c = trans->c;
-
- trans_for_each_update(trans, i)
- if (!i->cached &&
- i->level == b->c.level &&
- i->btree_id == b->c.btree_id &&
- bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
- bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
- i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
-
- if (unlikely(trans->journal_replay_not_finished)) {
- struct bkey_i *j_k =
- bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
- i->k->k.p);
-
- if (j_k) {
- i->old_k = j_k->k;
- i->old_v = &j_k->v;
- }
- }
- }
-}
-
-/*
- * A btree node is being replaced - update the iterator to point to the new
- * node:
- */
-void bch2_trans_node_add(struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b)
-{
- struct btree_path *prev;
-
- BUG_ON(!btree_path_pos_in_node(path, b));
-
- while ((prev = prev_btree_path(trans, path)) &&
- btree_path_pos_in_node(prev, b))
- path = prev;
-
- for (;
- path && btree_path_pos_in_node(path, b);
- path = next_btree_path(trans, path))
- if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
- enum btree_node_locked_type t =
- btree_lock_want(path, b->c.level);
-
- if (t != BTREE_NODE_UNLOCKED) {
- btree_node_unlock(trans, path, b->c.level);
- six_lock_increment(&b->c.lock, (enum six_lock_type) t);
- mark_btree_node_locked(trans, path, b->c.level, t);
- }
-
- bch2_btree_path_level_init(trans, path, b);
- }
-
- bch2_trans_revalidate_updates_in_node(trans, b);
-}
-
-/*
- * A btree node has been modified in such a way as to invalidate iterators - fix
- * them:
- */
-void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
-{
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path_with_node(trans, b, path, i)
- __btree_path_level_init(path, b->c.level);
-
- bch2_trans_revalidate_updates_in_node(trans, b);
-}
-
-/* Btree path: traverse, set_pos: */
-
-static inline int btree_path_lock_root(struct btree_trans *trans,
- struct btree_path *path,
- unsigned depth_want,
- unsigned long trace_ip)
-{
- struct bch_fs *c = trans->c;
- struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
- enum six_lock_type lock_type;
- unsigned i;
- int ret;
-
- EBUG_ON(path->nodes_locked);
-
- while (1) {
- b = READ_ONCE(*rootp);
- path->level = READ_ONCE(b->c.level);
-
- if (unlikely(path->level < depth_want)) {
- /*
- * the root is at a lower depth than the depth we want:
- * got to the end of the btree, or we're walking nodes
- * greater than some depth and there are no nodes >=
- * that depth
- */
- path->level = depth_want;
- for (i = path->level; i < BTREE_MAX_DEPTH; i++)
- path->l[i].b = NULL;
- return 1;
- }
-
- lock_type = __btree_lock_want(path, path->level);
- ret = btree_node_lock(trans, path, &b->c,
- path->level, lock_type, trace_ip);
- if (unlikely(ret)) {
- if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
- continue;
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- return ret;
- BUG();
- }
-
- if (likely(b == READ_ONCE(*rootp) &&
- b->c.level == path->level &&
- !race_fault())) {
- for (i = 0; i < path->level; i++)
- path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
- path->l[path->level].b = b;
- for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
- path->l[i].b = NULL;
-
- mark_btree_node_locked(trans, path, path->level,
- (enum btree_node_locked_type) lock_type);
- bch2_btree_path_level_init(trans, path, b);
- return 0;
- }
-
- six_unlock_type(&b->c.lock, lock_type);
- }
-}
-
-noinline
-static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
-{
- struct bch_fs *c = trans->c;
- struct btree_path_level *l = path_l(path);
- struct btree_node_iter node_iter = l->iter;
- struct bkey_packed *k;
- struct bkey_buf tmp;
- unsigned nr = test_bit(BCH_FS_started, &c->flags)
- ? (path->level > 1 ? 0 : 2)
- : (path->level > 1 ? 1 : 16);
- bool was_locked = btree_node_locked(path, path->level);
- int ret = 0;
-
- bch2_bkey_buf_init(&tmp);
-
- while (nr-- && !ret) {
- if (!bch2_btree_node_relock(trans, path, path->level))
- break;
-
- bch2_btree_node_iter_advance(&node_iter, l->b);
- k = bch2_btree_node_iter_peek(&node_iter, l->b);
- if (!k)
- break;
-
- bch2_bkey_buf_unpack(&tmp, c, l->b, k);
- ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
- path->level - 1);
- }
-
- if (!was_locked)
- btree_node_unlock(trans, path, path->level);
-
- bch2_bkey_buf_exit(&tmp, c);
- return ret;
-}
-
-static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
- struct btree_and_journal_iter *jiter)
-{
- struct bch_fs *c = trans->c;
- struct bkey_s_c k;
- struct bkey_buf tmp;
- unsigned nr = test_bit(BCH_FS_started, &c->flags)
- ? (path->level > 1 ? 0 : 2)
- : (path->level > 1 ? 1 : 16);
- bool was_locked = btree_node_locked(path, path->level);
- int ret = 0;
-
- bch2_bkey_buf_init(&tmp);
-
- while (nr-- && !ret) {
- if (!bch2_btree_node_relock(trans, path, path->level))
- break;
-
- bch2_btree_and_journal_iter_advance(jiter);
- k = bch2_btree_and_journal_iter_peek(jiter);
- if (!k.k)
- break;
-
- bch2_bkey_buf_reassemble(&tmp, c, k);
- ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
- path->level - 1);
- }
-
- if (!was_locked)
- btree_node_unlock(trans, path, path->level);
-
- bch2_bkey_buf_exit(&tmp, c);
- return ret;
-}
-
-static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
- struct btree_path *path,
- unsigned plevel, struct btree *b)
-{
- struct btree_path_level *l = &path->l[plevel];
- bool locked = btree_node_locked(path, plevel);
- struct bkey_packed *k;
- struct bch_btree_ptr_v2 *bp;
-
- if (!bch2_btree_node_relock(trans, path, plevel))
- return;
-
- k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
- BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
-
- bp = (void *) bkeyp_val(&l->b->format, k);
- bp->mem_ptr = (unsigned long)b;
-
- if (!locked)
- btree_node_unlock(trans, path, plevel);
-}
-
-static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
- struct btree_path *path,
- unsigned flags,
- struct bkey_buf *out)
-{
- struct bch_fs *c = trans->c;
- struct btree_path_level *l = path_l(path);
- struct btree_and_journal_iter jiter;
- struct bkey_s_c k;
- int ret = 0;
-
- __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
-
- k = bch2_btree_and_journal_iter_peek(&jiter);
- if (!k.k) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "node not found at pos ");
- bch2_bpos_to_text(&buf, path->pos);
- prt_str(&buf, " at btree ");
- bch2_btree_pos_to_text(&buf, c, l->b);
-
- ret = bch2_fs_topology_error(c, "%s", buf.buf);
- printbuf_exit(&buf);
- goto err;
- }
-
- bch2_bkey_buf_reassemble(out, c, k);
-
- if ((flags & BTREE_ITER_prefetch) &&
- c->opts.btree_node_prefetch)
- ret = btree_path_prefetch_j(trans, path, &jiter);
-
-err:
- bch2_btree_and_journal_iter_exit(&jiter);
- return ret;
-}
-
-static __always_inline int btree_path_down(struct btree_trans *trans,
- struct btree_path *path,
- unsigned flags,
- unsigned long trace_ip)
-{
- struct bch_fs *c = trans->c;
- struct btree_path_level *l = path_l(path);
- struct btree *b;
- unsigned level = path->level - 1;
- enum six_lock_type lock_type = __btree_lock_want(path, level);
- struct bkey_buf tmp;
- int ret;
-
- EBUG_ON(!btree_node_locked(path, path->level));
-
- bch2_bkey_buf_init(&tmp);
-
- if (unlikely(trans->journal_replay_not_finished)) {
- ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
- if (ret)
- goto err;
- } else {
- struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
- if (!k) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "node not found at pos ");
- bch2_bpos_to_text(&buf, path->pos);
- prt_str(&buf, " within parent node ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
-
- bch2_fs_fatal_error(c, "%s", buf.buf);
- printbuf_exit(&buf);
- ret = -BCH_ERR_btree_need_topology_repair;
- goto err;
- }
-
- bch2_bkey_buf_unpack(&tmp, c, l->b, k);
-
- if ((flags & BTREE_ITER_prefetch) &&
- c->opts.btree_node_prefetch) {
- ret = btree_path_prefetch(trans, path);
- if (ret)
- goto err;
- }
- }
-
- b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
- ret = PTR_ERR_OR_ZERO(b);
- if (unlikely(ret))
- goto err;
-
- if (likely(!trans->journal_replay_not_finished &&
- tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
- unlikely(b != btree_node_mem_ptr(tmp.k)))
- btree_node_mem_ptr_set(trans, path, level + 1, b);
-
- if (btree_node_read_locked(path, level + 1))
- btree_node_unlock(trans, path, level + 1);
-
- mark_btree_node_locked(trans, path, level,
- (enum btree_node_locked_type) lock_type);
- path->level = level;
- bch2_btree_path_level_init(trans, path, b);
-
- bch2_btree_path_verify_locks(path);
-err:
- bch2_bkey_buf_exit(&tmp, c);
- return ret;
-}
-
-static int bch2_btree_path_traverse_all(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
- struct btree_path *path;
- unsigned long trace_ip = _RET_IP_;
- unsigned i;
- int ret = 0;
-
- if (trans->in_traverse_all)
- return -BCH_ERR_transaction_restart_in_traverse_all;
-
- trans->in_traverse_all = true;
-retry_all:
- trans->restarted = 0;
- trans->last_restarted_ip = 0;
-
- trans_for_each_path(trans, path, i)
- path->should_be_locked = false;
-
- btree_trans_sort_paths(trans);
-
- bch2_trans_unlock(trans);
- cond_resched();
- trans_set_locked(trans);
-
- if (unlikely(trans->memory_allocation_failure)) {
- struct closure cl;
-
- closure_init_stack(&cl);
-
- do {
- ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
- closure_sync(&cl);
- } while (ret);
- }
-
- /* Now, redo traversals in correct order: */
- i = 0;
- while (i < trans->nr_sorted) {
- btree_path_idx_t idx = trans->sorted[i];
-
- /*
- * Traversing a path can cause another path to be added at about
- * the same position:
- */
- if (trans->paths[idx].uptodate) {
- __btree_path_get(trans, &trans->paths[idx], false);
- ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
- __btree_path_put(trans, &trans->paths[idx], false);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
- bch2_err_matches(ret, ENOMEM))
- goto retry_all;
- if (ret)
- goto err;
- } else {
- i++;
- }
- }
-
- /*
- * We used to assert that all paths had been traversed here
- * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
- * path->should_be_locked is not set yet, we might have unlocked and
- * then failed to relock a path - that's fine.
- */
-err:
- bch2_btree_cache_cannibalize_unlock(trans);
-
- trans->in_traverse_all = false;
-
- trace_and_count(c, trans_traverse_all, trans, trace_ip);
- return ret;
-}
-
-static inline bool btree_path_check_pos_in_node(struct btree_path *path,
- unsigned l, int check_pos)
-{
- if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
- return false;
- if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
- return false;
- return true;
-}
-
-static inline bool btree_path_good_node(struct btree_trans *trans,
- struct btree_path *path,
- unsigned l, int check_pos)
-{
- return is_btree_node(path, l) &&
- bch2_btree_node_relock(trans, path, l) &&
- btree_path_check_pos_in_node(path, l, check_pos);
-}
-
-static void btree_path_set_level_down(struct btree_trans *trans,
- struct btree_path *path,
- unsigned new_level)
-{
- unsigned l;
-
- path->level = new_level;
-
- for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
- if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
- btree_node_unlock(trans, path, l);
-
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- bch2_btree_path_verify(trans, path);
-}
-
-static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
- struct btree_path *path,
- int check_pos)
-{
- unsigned i, l = path->level;
-again:
- while (btree_path_node(path, l) &&
- !btree_path_good_node(trans, path, l, check_pos))
- __btree_path_set_level_up(trans, path, l++);
-
- /* If we need intent locks, take them too: */
- for (i = l + 1;
- i < path->locks_want && btree_path_node(path, i);
- i++)
- if (!bch2_btree_node_relock(trans, path, i)) {
- while (l <= i)
- __btree_path_set_level_up(trans, path, l++);
- goto again;
- }
-
- return l;
-}
-
-static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
- struct btree_path *path,
- int check_pos)
-{
- return likely(btree_node_locked(path, path->level) &&
- btree_path_check_pos_in_node(path, path->level, check_pos))
- ? path->level
- : __btree_path_up_until_good_node(trans, path, check_pos);
-}
-
-/*
- * This is the main state machine for walking down the btree - walks down to a
- * specified depth
- *
- * Returns 0 on success, -EIO on error (error reading in a btree node).
- *
- * On error, caller (peek_node()/peek_key()) must return NULL; the error is
- * stashed in the iterator and returned from bch2_trans_exit().
- */
-int bch2_btree_path_traverse_one(struct btree_trans *trans,
- btree_path_idx_t path_idx,
- unsigned flags,
- unsigned long trace_ip)
-{
- struct btree_path *path = &trans->paths[path_idx];
- unsigned depth_want = path->level;
- int ret = -((int) trans->restarted);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(!trans->srcu_held))
- bch2_trans_srcu_lock(trans);
-
- trace_btree_path_traverse_start(trans, path);
-
- /*
- * Ensure we obey path->should_be_locked: if it's set, we can't unlock
- * and re-traverse the path without a transaction restart:
- */
- if (path->should_be_locked) {
- ret = bch2_btree_path_relock(trans, path, trace_ip);
- goto out;
- }
-
- if (path->cached) {
- ret = bch2_btree_path_traverse_cached(trans, path, flags);
- goto out;
- }
-
- path = &trans->paths[path_idx];
-
- if (unlikely(path->level >= BTREE_MAX_DEPTH))
- goto out_uptodate;
-
- path->level = btree_path_up_until_good_node(trans, path, 0);
- unsigned max_level = path->level;
-
- EBUG_ON(btree_path_node(path, path->level) &&
- !btree_node_locked(path, path->level));
-
- /*
- * Note: path->nodes[path->level] may be temporarily NULL here - that
- * would indicate to other code that we got to the end of the btree,
- * here it indicates that relocking the root failed - it's critical that
- * btree_path_lock_root() comes next and that it can't fail
- */
- while (path->level > depth_want) {
- ret = btree_path_node(path, path->level)
- ? btree_path_down(trans, path, flags, trace_ip)
- : btree_path_lock_root(trans, path, depth_want, trace_ip);
- if (unlikely(ret)) {
- if (ret == 1) {
- /*
- * No nodes at this level - got to the end of
- * the btree:
- */
- ret = 0;
- goto out;
- }
-
- __bch2_btree_path_unlock(trans, path);
- path->level = depth_want;
- path->l[path->level].b = ERR_PTR(ret);
- goto out;
- }
- }
-
- if (unlikely(max_level > path->level)) {
- struct btree_path *linked;
- unsigned iter;
-
- trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
- for (unsigned j = path->level + 1; j < max_level; j++)
- linked->l[j] = path->l[j];
- }
-
-out_uptodate:
- path->uptodate = BTREE_ITER_UPTODATE;
- trace_btree_path_traverse_end(trans, path);
-out:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
- panic("ret %s (%i) trans->restarted %s (%i)\n",
- bch2_err_str(ret), ret,
- bch2_err_str(trans->restarted), trans->restarted);
- bch2_btree_path_verify(trans, path);
- return ret;
-}
-
-static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
- struct btree_path *src)
-{
- unsigned i, offset = offsetof(struct btree_path, pos);
-
- memcpy((void *) dst + offset,
- (void *) src + offset,
- sizeof(struct btree_path) - offset);
-
- for (i = 0; i < BTREE_MAX_DEPTH; i++) {
- unsigned t = btree_node_locked_type(dst, i);
-
- if (t != BTREE_NODE_UNLOCKED)
- six_lock_increment(&dst->l[i].b->c.lock, t);
- }
-}
-
-static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
- bool intent, unsigned long ip)
-{
- btree_path_idx_t new = btree_path_alloc(trans, src);
- btree_path_copy(trans, trans->paths + new, trans->paths + src);
- __btree_path_get(trans, trans->paths + new, intent);
-#ifdef TRACK_PATH_ALLOCATED
- trans->paths[new].ip_allocated = ip;
-#endif
- return new;
-}
-
-__flatten
-btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
- btree_path_idx_t path, bool intent, unsigned long ip)
-{
- struct btree_path *old = trans->paths + path;
- __btree_path_put(trans, trans->paths + path, intent);
- path = btree_path_clone(trans, path, intent, ip);
- trace_btree_path_clone(trans, old, trans->paths + path);
- trans->paths[path].preserve = false;
- return path;
-}
-
-btree_path_idx_t __must_check
-__bch2_btree_path_set_pos(struct btree_trans *trans,
- btree_path_idx_t path_idx, struct bpos new_pos,
- bool intent, unsigned long ip)
-{
- int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
-
- bch2_trans_verify_not_in_restart(trans);
- EBUG_ON(!trans->paths[path_idx].ref);
-
- trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
-
- path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
-
- struct btree_path *path = trans->paths + path_idx;
- path->pos = new_pos;
- trans->paths_sorted = false;
-
- if (unlikely(path->cached)) {
- btree_node_unlock(trans, path, 0);
- path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- goto out;
- }
-
- unsigned level = btree_path_up_until_good_node(trans, path, cmp);
-
- if (btree_path_node(path, level)) {
- struct btree_path_level *l = &path->l[level];
-
- BUG_ON(!btree_node_locked(path, level));
- /*
- * We might have to skip over many keys, or just a few: try
- * advancing the node iterator, and if we have to skip over too
- * many keys just reinit it (or if we're rewinding, since that
- * is expensive).
- */
- if (cmp < 0 ||
- !btree_path_advance_to_pos(path, l, 8))
- bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
-
- /*
- * Iterators to interior nodes should always be pointed at the first non
- * whiteout:
- */
- if (unlikely(level))
- bch2_btree_node_iter_peek(&l->iter, l->b);
- }
-
- if (unlikely(level != path->level)) {
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- __bch2_btree_path_unlock(trans, path);
- }
-out:
- bch2_btree_path_verify(trans, path);
- return path_idx;
-}
-
-/* Btree path: main interface: */
-
-static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
-{
- struct btree_path *sib;
-
- sib = prev_btree_path(trans, path);
- if (sib && !btree_path_cmp(sib, path))
- return sib;
-
- sib = next_btree_path(trans, path);
- if (sib && !btree_path_cmp(sib, path))
- return sib;
-
- return NULL;
-}
-
-static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
-{
- struct btree_path *sib;
-
- sib = prev_btree_path(trans, path);
- if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
- return sib;
-
- sib = next_btree_path(trans, path);
- if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
- return sib;
-
- return NULL;
-}
-
-static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
-{
- __bch2_btree_path_unlock(trans, trans->paths + path);
- btree_path_list_remove(trans, trans->paths + path);
- __clear_bit(path, trans->paths_allocated);
-}
-
-static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
-{
- unsigned l = path->level;
-
- do {
- if (!btree_path_node(path, l))
- break;
-
- if (!is_btree_node(path, l))
- return false;
-
- if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
- return false;
-
- l++;
- } while (l < path->locks_want);
-
- return true;
-}
-
-void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
-{
- struct btree_path *path = trans->paths + path_idx, *dup;
-
- if (!__btree_path_put(trans, path, intent))
- return;
-
- dup = path->preserve
- ? have_path_at_pos(trans, path)
- : have_node_at_pos(trans, path);
-
- trace_btree_path_free(trans, path_idx, dup);
-
- if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
- return;
-
- if (path->should_be_locked && !trans->restarted) {
- if (!dup)
- return;
-
- if (!(trans->locked
- ? bch2_btree_path_relock_norestart(trans, dup)
- : bch2_btree_path_can_relock(trans, dup)))
- return;
- }
-
- if (dup) {
- dup->preserve |= path->preserve;
- dup->should_be_locked |= path->should_be_locked;
- }
-
- __bch2_path_free(trans, path_idx);
-}
-
-static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
- bool intent)
-{
- if (!__btree_path_put(trans, trans->paths + path, intent))
- return;
-
- __bch2_path_free(trans, path);
-}
-
-void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
-{
- panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
- trans->restart_count, restart_count,
- (void *) trans->last_begin_ip);
-}
-
-void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
-{
- panic("in transaction restart: %s, last restarted by %pS\n",
- bch2_err_str(trans->restarted),
- (void *) trans->last_restarted_ip);
-}
-
-void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
-{
- panic("trans should be locked, unlocked by %pS\n",
- (void *) trans->last_unlock_ip);
-}
-
-noinline __cold
-void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
-{
- prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
- trans->nr_updates, trans->fn, trans->journal_res.seq);
- printbuf_indent_add(buf, 2);
-
- trans_for_each_update(trans, i) {
- struct bkey_s_c old = { &i->old_k, i->old_v };
-
- prt_printf(buf, "update: btree=%s cached=%u %pS\n",
- bch2_btree_id_str(i->btree_id),
- i->cached,
- (void *) i->ip_allocated);
-
- prt_printf(buf, " old ");
- bch2_bkey_val_to_text(buf, trans->c, old);
- prt_newline(buf);
-
- prt_printf(buf, " new ");
- bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
- prt_newline(buf);
- }
-
- for (struct jset_entry *e = trans->journal_entries;
- e != btree_trans_journal_entries_top(trans);
- e = vstruct_next(e))
- bch2_journal_entry_to_text(buf, trans->c, e);
-
- printbuf_indent_sub(buf, 2);
-}
-
-noinline __cold
-void bch2_dump_trans_updates(struct btree_trans *trans)
-{
- struct printbuf buf = PRINTBUF;
-
- bch2_trans_updates_to_text(&buf, trans);
- bch2_print_str(trans->c, buf.buf);
- printbuf_exit(&buf);
-}
-
-static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
-{
- struct btree_path *path = trans->paths + path_idx;
-
- prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
- path_idx, path->ref, path->intent_ref,
- path->preserve ? 'P' : ' ',
- path->should_be_locked ? 'S' : ' ',
- path->cached ? 'C' : 'B',
- bch2_btree_id_str(path->btree_id),
- path->level);
- bch2_bpos_to_text(out, path->pos);
-
- if (!path->cached && btree_node_locked(path, path->level)) {
- prt_char(out, ' ');
- struct btree *b = path_l(path)->b;
- bch2_bpos_to_text(out, b->data->min_key);
- prt_char(out, '-');
- bch2_bpos_to_text(out, b->key.k.p);
- }
-
-#ifdef TRACK_PATH_ALLOCATED
- prt_printf(out, " %pS", (void *) path->ip_allocated);
-#endif
-}
-
-static const char *btree_node_locked_str(enum btree_node_locked_type t)
-{
- switch (t) {
- case BTREE_NODE_UNLOCKED:
- return "unlocked";
- case BTREE_NODE_READ_LOCKED:
- return "read";
- case BTREE_NODE_INTENT_LOCKED:
- return "intent";
- case BTREE_NODE_WRITE_LOCKED:
- return "write";
- default:
- return NULL;
- }
-}
-
-void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
-{
- bch2_btree_path_to_text_short(out, trans, path_idx);
-
- struct btree_path *path = trans->paths + path_idx;
-
- prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
- prt_newline(out);
-
- printbuf_indent_add(out, 2);
- for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
- prt_printf(out, "l=%u locks %s seq %u node ", l,
- btree_node_locked_str(btree_node_locked_type(path, l)),
- path->l[l].lock_seq);
-
- int ret = PTR_ERR_OR_ZERO(path->l[l].b);
- if (ret)
- prt_str(out, bch2_err_str(ret));
- else
- prt_printf(out, "%px", path->l[l].b);
- prt_newline(out);
- }
- printbuf_indent_sub(out, 2);
-}
-
-static noinline __cold
-void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
- bool nosort)
-{
- struct trans_for_each_path_inorder_iter iter;
-
- if (!nosort)
- btree_trans_sort_paths(trans);
-
- trans_for_each_path_idx_inorder(trans, iter) {
- bch2_btree_path_to_text_short(out, trans, iter.path_idx);
- prt_newline(out);
- }
-}
-
-noinline __cold
-void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
-{
- __bch2_trans_paths_to_text(out, trans, false);
-}
-
-static noinline __cold
-void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
-{
- struct printbuf buf = PRINTBUF;
-
- __bch2_trans_paths_to_text(&buf, trans, nosort);
- bch2_trans_updates_to_text(&buf, trans);
-
- bch2_print_str(trans->c, buf.buf);
- printbuf_exit(&buf);
-}
-
-noinline __cold
-void bch2_dump_trans_paths_updates(struct btree_trans *trans)
-{
- __bch2_dump_trans_paths_updates(trans, false);
-}
-
-noinline __cold
-static void bch2_trans_update_max_paths(struct btree_trans *trans)
-{
- struct btree_transaction_stats *s = btree_trans_stats(trans);
- struct printbuf buf = PRINTBUF;
- size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
-
- bch2_trans_paths_to_text(&buf, trans);
-
- if (!buf.allocation_failure) {
- mutex_lock(&s->lock);
- if (nr > s->nr_max_paths) {
- s->nr_max_paths = nr;
- swap(s->max_paths_text, buf.buf);
- }
- mutex_unlock(&s->lock);
- }
-
- printbuf_exit(&buf);
-
- trans->nr_paths_max = nr;
-}
-
-noinline __cold
-int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
-{
- if (trace_trans_restart_too_many_iters_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_trans_paths_to_text(&buf, trans);
- trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
- printbuf_exit(&buf);
- }
-
- count_event(trans->c, trans_restart_too_many_iters);
-
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
-}
-
-static noinline void btree_path_overflow(struct btree_trans *trans)
-{
- bch2_dump_trans_paths_updates(trans);
- bch_err(trans->c, "trans path overflow");
-}
-
-static noinline void btree_paths_realloc(struct btree_trans *trans)
-{
- unsigned nr = trans->nr_paths * 2;
-
- void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
- sizeof(struct btree_trans_paths) +
- nr * sizeof(struct btree_path) +
- nr * sizeof(btree_path_idx_t) + 8 +
- nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
-
- unsigned long *paths_allocated = p;
- memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
- p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
-
- p += sizeof(struct btree_trans_paths);
- struct btree_path *paths = p;
- *trans_paths_nr(paths) = nr;
- memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
- p += nr * sizeof(struct btree_path);
-
- btree_path_idx_t *sorted = p;
- memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
- p += nr * sizeof(btree_path_idx_t) + 8;
-
- struct btree_insert_entry *updates = p;
- memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
-
- unsigned long *old = trans->paths_allocated;
-
- rcu_assign_pointer(trans->paths_allocated, paths_allocated);
- rcu_assign_pointer(trans->paths, paths);
- rcu_assign_pointer(trans->sorted, sorted);
- rcu_assign_pointer(trans->updates, updates);
-
- trans->nr_paths = nr;
-
- if (old != trans->_paths_allocated)
- kfree_rcu_mightsleep(old);
-}
-
-static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
- btree_path_idx_t pos)
-{
- btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
-
- if (unlikely(idx == trans->nr_paths)) {
- if (trans->nr_paths == BTREE_ITER_MAX) {
- btree_path_overflow(trans);
- return 0;
- }
-
- btree_paths_realloc(trans);
- }
-
- /*
- * Do this before marking the new path as allocated, since it won't be
- * initialized yet:
- */
- if (unlikely(idx > trans->nr_paths_max))
- bch2_trans_update_max_paths(trans);
-
- __set_bit(idx, trans->paths_allocated);
-
- struct btree_path *path = &trans->paths[idx];
- path->ref = 0;
- path->intent_ref = 0;
- path->nodes_locked = 0;
-
- btree_path_list_add(trans, pos, idx);
- trans->paths_sorted = false;
- return idx;
-}
-
-btree_path_idx_t bch2_path_get(struct btree_trans *trans,
- enum btree_id btree_id, struct bpos pos,
- unsigned locks_want, unsigned level,
- unsigned flags, unsigned long ip)
-{
- struct btree_path *path;
- bool cached = flags & BTREE_ITER_cached;
- bool intent = flags & BTREE_ITER_intent;
- struct trans_for_each_path_inorder_iter iter;
- btree_path_idx_t path_pos = 0, path_idx;
-
- bch2_trans_verify_not_unlocked(trans);
- bch2_trans_verify_not_in_restart(trans);
- bch2_trans_verify_locks(trans);
-
- btree_trans_sort_paths(trans);
-
- trans_for_each_path_inorder(trans, path, iter) {
- if (__btree_path_cmp(path,
- btree_id,
- cached,
- pos,
- level) > 0)
- break;
-
- path_pos = iter.path_idx;
- }
-
- if (path_pos &&
- trans->paths[path_pos].cached == cached &&
- trans->paths[path_pos].btree_id == btree_id &&
- trans->paths[path_pos].level == level) {
- trace_btree_path_get(trans, trans->paths + path_pos, &pos);
-
- __btree_path_get(trans, trans->paths + path_pos, intent);
- path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
- path = trans->paths + path_idx;
- } else {
- path_idx = btree_path_alloc(trans, path_pos);
- path = trans->paths + path_idx;
-
- __btree_path_get(trans, path, intent);
- path->pos = pos;
- path->btree_id = btree_id;
- path->cached = cached;
- path->uptodate = BTREE_ITER_NEED_TRAVERSE;
- path->should_be_locked = false;
- path->level = level;
- path->locks_want = locks_want;
- path->nodes_locked = 0;
- for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
- path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
-#ifdef TRACK_PATH_ALLOCATED
- path->ip_allocated = ip;
-#endif
- trans->paths_sorted = false;
-
- trace_btree_path_alloc(trans, path);
- }
-
- if (!(flags & BTREE_ITER_nopreserve))
- path->preserve = true;
-
- if (path->intent_ref)
- locks_want = max(locks_want, level + 1);
-
- /*
- * If the path has locks_want greater than requested, we don't downgrade
- * it here - on transaction restart because btree node split needs to
- * upgrade locks, we might be putting/getting the iterator again.
- * Downgrading iterators only happens via bch2_trans_downgrade(), after
- * a successful transaction commit.
- */
-
- locks_want = min(locks_want, BTREE_MAX_DEPTH);
- if (locks_want > path->locks_want)
- bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
-
- return path_idx;
-}
-
-btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
- enum btree_id btree_id,
- unsigned level,
- struct bpos pos)
-{
- btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
- BTREE_ITER_nopreserve|
- BTREE_ITER_intent, _RET_IP_);
- path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
-
- struct btree_path *path = trans->paths + path_idx;
- bch2_btree_path_downgrade(trans, path);
- __bch2_btree_path_unlock(trans, path);
- return path_idx;
-}
-
-struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
-{
-
- struct btree_path_level *l = path_l(path);
- struct bkey_packed *_k;
- struct bkey_s_c k;
-
- if (unlikely(!l->b))
- return bkey_s_c_null;
-
- EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
- EBUG_ON(!btree_node_locked(path, path->level));
-
- if (!path->cached) {
- _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
- k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
-
- EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
-
- if (!k.k || !bpos_eq(path->pos, k.k->p))
- goto hole;
- } else {
- struct bkey_cached *ck = (void *) path->l[0].b;
- if (!ck)
- return bkey_s_c_null;
-
- EBUG_ON(path->btree_id != ck->key.btree_id ||
- !bkey_eq(path->pos, ck->key.pos));
-
- *u = ck->k->k;
- k = bkey_i_to_s_c(ck->k);
- }
-
- return k;
-hole:
- bkey_init(u);
- u->p = path->pos;
- return (struct bkey_s_c) { u, NULL };
-}
-
-
-void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
-{
- struct btree_trans *trans = iter->trans;
-
- if (!iter->path || trans->restarted)
- return;
-
- struct btree_path *path = btree_iter_path(trans, iter);
- path->preserve = false;
- if (path->ref == 1)
- path->should_be_locked = false;
-}
-/* Btree iterators: */
-
-int __must_check
-__bch2_btree_iter_traverse(struct btree_iter *iter)
-{
- return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
-}
-
-int __must_check
-bch2_btree_iter_traverse(struct btree_iter *iter)
-{
- struct btree_trans *trans = iter->trans;
- int ret;
-
- bch2_trans_verify_not_unlocked(trans);
-
- iter->path = bch2_btree_path_set_pos(trans, iter->path,
- btree_iter_search_key(iter),
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
-
- ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
- if (ret)
- return ret;
-
- struct btree_path *path = btree_iter_path(trans, iter);
- if (btree_path_node(path, path->level))
- btree_path_set_should_be_locked(trans, path);
- return 0;
-}
-
-/* Iterate across nodes (leaf and interior nodes) */
-
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
-{
- struct btree_trans *trans = iter->trans;
- struct btree *b = NULL;
- int ret;
-
- EBUG_ON(trans->paths[iter->path].cached);
- bch2_btree_iter_verify(iter);
-
- ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
- if (ret)
- goto err;
-
- struct btree_path *path = btree_iter_path(trans, iter);
- b = btree_path_node(path, path->level);
- if (!b)
- goto out;
-
- BUG_ON(bpos_lt(b->key.k.p, iter->pos));
-
- bkey_init(&iter->k);
- iter->k.p = iter->pos = b->key.k.p;
-
- iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
-out:
- bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
-
- return b;
-err:
- b = ERR_PTR(ret);
- goto out;
-}
-
-/* Only kept for -tools */
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
-{
- struct btree *b;
-
- while (b = bch2_btree_iter_peek_node(iter),
- bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
- bch2_trans_begin(iter->trans);
-
- return b;
-}
-
-struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
-{
- struct btree_trans *trans = iter->trans;
- struct btree *b = NULL;
- int ret;
-
- EBUG_ON(trans->paths[iter->path].cached);
- bch2_trans_verify_not_in_restart(trans);
- bch2_btree_iter_verify(iter);
-
- ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
- if (ret)
- goto err;
-
-
- struct btree_path *path = btree_iter_path(trans, iter);
-
- /* already at end? */
- if (!btree_path_node(path, path->level))
- return NULL;
-
- /* got to end? */
- if (!btree_path_node(path, path->level + 1)) {
- btree_path_set_level_up(trans, path);
- return NULL;
- }
-
- if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
- __bch2_btree_path_unlock(trans, path);
- path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
- path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
- ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
- goto err;
- }
-
- b = btree_path_node(path, path->level + 1);
-
- if (bpos_eq(iter->pos, b->key.k.p)) {
- __btree_path_set_level_up(trans, path, path->level++);
- } else {
- if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
- btree_node_unlock(trans, path, path->level + 1);
-
- /*
- * Haven't gotten to the end of the parent node: go back down to
- * the next child node
- */
- iter->path = bch2_btree_path_set_pos(trans, iter->path,
- bpos_successor(iter->pos),
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
-
- path = btree_iter_path(trans, iter);
- btree_path_set_level_down(trans, path, iter->min_depth);
-
- ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
- if (ret)
- goto err;
-
- path = btree_iter_path(trans, iter);
- b = path->l[path->level].b;
- }
-
- bkey_init(&iter->k);
- iter->k.p = iter->pos = b->key.k.p;
-
- iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
- EBUG_ON(btree_iter_path(trans, iter)->uptodate);
-out:
- bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
-
- return b;
-err:
- b = ERR_PTR(ret);
- goto out;
-}
-
-/* Iterate across keys (in leaf nodes only) */
-
-inline bool bch2_btree_iter_advance(struct btree_iter *iter)
-{
- struct bpos pos = iter->k.p;
- bool ret = !(iter->flags & BTREE_ITER_all_snapshots
- ? bpos_eq(pos, SPOS_MAX)
- : bkey_eq(pos, SPOS_MAX));
-
- if (ret && !(iter->flags & BTREE_ITER_is_extents))
- pos = bkey_successor(iter, pos);
- bch2_btree_iter_set_pos(iter, pos);
- return ret;
-}
-
-inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
-{
- struct bpos pos = bkey_start_pos(&iter->k);
- bool ret = !(iter->flags & BTREE_ITER_all_snapshots
- ? bpos_eq(pos, POS_MIN)
- : bkey_eq(pos, POS_MIN));
-
- if (ret && !(iter->flags & BTREE_ITER_is_extents))
- pos = bkey_predecessor(iter, pos);
- bch2_btree_iter_set_pos(iter, pos);
- return ret;
-}
-
-static noinline
-void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c *k)
-{
- struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
-
- trans_for_each_update(trans, i)
- if (!i->key_cache_already_flushed &&
- i->btree_id == iter->btree_id &&
- bpos_le(i->k->k.p, iter->pos) &&
- bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
- iter->k = i->k->k;
- *k = bkey_i_to_s_c(i->k);
- }
-}
-
-static noinline
-void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c *k)
-{
- struct btree_path *path = btree_iter_path(trans, iter);
- struct bpos end = path_l(path)->b->key.k.p;
-
- trans_for_each_update(trans, i)
- if (!i->key_cache_already_flushed &&
- i->btree_id == iter->btree_id &&
- bpos_ge(i->k->k.p, path->pos) &&
- bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
- iter->k = i->k->k;
- *k = bkey_i_to_s_c(i->k);
- }
-}
-
-static noinline
-void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c *k)
-{
- trans_for_each_update(trans, i)
- if (!i->key_cache_already_flushed &&
- i->btree_id == iter->btree_id &&
- bpos_eq(i->k->k.p, iter->pos)) {
- iter->k = i->k->k;
- *k = bkey_i_to_s_c(i->k);
- }
-}
-
-static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos end_pos)
-{
- struct btree_path *path = btree_iter_path(trans, iter);
-
- return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
- path->level,
- path->pos,
- end_pos,
- &iter->journal_idx);
-}
-
-static noinline
-struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
- struct btree_iter *iter)
-{
- struct btree_path *path = btree_iter_path(trans, iter);
- struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
-
- if (k) {
- iter->k = k->k;
- return bkey_i_to_s_c(k);
- } else {
- return bkey_s_c_null;
- }
-}
-
-static noinline
-struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct btree_path *path = btree_iter_path(trans, iter);
- struct bkey_i *next_journal =
- bch2_btree_journal_peek(trans, iter,
- k.k ? k.k->p : path_l(path)->b->key.k.p);
-
- if (next_journal) {
- iter->k = next_journal->k;
- k = bkey_i_to_s_c(next_journal);
- }
-
- return k;
-}
-
-/*
- * Checks btree key cache for key at iter->pos and returns it if present, or
- * bkey_s_c_null:
- */
-static noinline
-struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
-{
- struct btree_trans *trans = iter->trans;
- struct bch_fs *c = trans->c;
- struct bkey u;
- struct bkey_s_c k;
- int ret;
-
- bch2_trans_verify_not_in_restart(trans);
- bch2_trans_verify_not_unlocked(trans);
-
- if ((iter->flags & BTREE_ITER_key_cache_fill) &&
- bpos_eq(iter->pos, pos))
- return bkey_s_c_null;
-
- if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
- return bkey_s_c_null;
-
- if (!iter->key_cache_path)
- iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
- iter->flags & BTREE_ITER_intent, 0,
- iter->flags|BTREE_ITER_cached|
- BTREE_ITER_cached_nofill,
- _THIS_IP_);
-
- iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
-
- ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
- iter->flags|BTREE_ITER_cached) ?:
- bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
-
- btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
-
- k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
- if (k.k && !bkey_err(k)) {
- iter->k = u;
- k.k = &iter->k;
- }
- return k;
-}
-
-static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
-{
- struct btree_trans *trans = iter->trans;
- struct bkey_s_c k, k2;
- int ret;
-
- EBUG_ON(btree_iter_path(trans, iter)->cached);
- bch2_btree_iter_verify(iter);
-
- while (1) {
- struct btree_path_level *l;
-
- iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
-
- ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
- if (unlikely(ret)) {
- /* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(iter, iter->pos);
- k = bkey_s_c_err(ret);
- goto out;
- }
-
- struct btree_path *path = btree_iter_path(trans, iter);
- l = path_l(path);
-
- if (unlikely(!l->b)) {
- /* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
- k = bkey_s_c_null;
- goto out;
- }
-
- btree_path_set_should_be_locked(trans, path);
-
- k = btree_path_level_peek_all(trans->c, l, &iter->k);
-
- if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
- k.k &&
- (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
- k = k2;
- ret = bkey_err(k);
- if (ret) {
- bch2_btree_iter_set_pos(iter, iter->pos);
- goto out;
- }
- }
-
- if (unlikely(iter->flags & BTREE_ITER_with_journal))
- k = btree_trans_peek_journal(trans, iter, k);
-
- if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
- trans->nr_updates))
- bch2_btree_trans_peek_updates(trans, iter, &k);
-
- if (k.k && bkey_deleted(k.k)) {
- /*
- * If we've got a whiteout, and it's after the search
- * key, advance the search key to the whiteout instead
- * of just after the whiteout - it might be a btree
- * whiteout, with a real key at the same position, since
- * in the btree deleted keys sort before non deleted.
- */
- search_key = !bpos_eq(search_key, k.k->p)
- ? k.k->p
- : bpos_successor(k.k->p);
- continue;
- }
-
- if (likely(k.k)) {
- break;
- } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
- /* Advance to next leaf node: */
- search_key = bpos_successor(l->b->key.k.p);
- } else {
- /* End of btree: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
- k = bkey_s_c_null;
- goto out;
- }
- }
-out:
- bch2_btree_iter_verify(iter);
-
- return k;
-}
-
-/**
- * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
- * iterator's current position
- * @iter: iterator to peek from
- * @end: search limit: returns keys less than or equal to @end
- *
- * Returns: key if found, or an error extractable with bkey_err().
- */
-struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
-{
- struct btree_trans *trans = iter->trans;
- struct bpos search_key = btree_iter_search_key(iter);
- struct bkey_s_c k;
- struct bpos iter_pos;
- int ret;
-
- bch2_trans_verify_not_unlocked(trans);
- EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
-
- if (iter->update_path) {
- bch2_path_put_nokeep(trans, iter->update_path,
- iter->flags & BTREE_ITER_intent);
- iter->update_path = 0;
- }
-
- bch2_btree_iter_verify_entry_exit(iter);
-
- while (1) {
- k = __bch2_btree_iter_peek(iter, search_key);
- if (unlikely(!k.k))
- goto end;
- if (unlikely(bkey_err(k)))
- goto out_no_locked;
-
- /*
- * We need to check against @end before FILTER_SNAPSHOTS because
- * if we get to a different inode that requested we might be
- * seeing keys for a different snapshot tree that will all be
- * filtered out.
- *
- * But we can't do the full check here, because bkey_start_pos()
- * isn't monotonically increasing before FILTER_SNAPSHOTS, and
- * that's what we check against in extents mode:
- */
- if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
- ? bkey_gt(k.k->p, end)
- : k.k->p.inode > end.inode))
- goto end;
-
- if (iter->update_path &&
- !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
- bch2_path_put_nokeep(trans, iter->update_path,
- iter->flags & BTREE_ITER_intent);
- iter->update_path = 0;
- }
-
- if ((iter->flags & BTREE_ITER_filter_snapshots) &&
- (iter->flags & BTREE_ITER_intent) &&
- !(iter->flags & BTREE_ITER_is_extents) &&
- !iter->update_path) {
- struct bpos pos = k.k->p;
-
- if (pos.snapshot < iter->snapshot) {
- search_key = bpos_successor(k.k->p);
- continue;
- }
-
- pos.snapshot = iter->snapshot;
-
- /*
- * advance, same as on exit for iter->path, but only up
- * to snapshot
- */
- __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
- iter->update_path = iter->path;
-
- iter->update_path = bch2_btree_path_set_pos(trans,
- iter->update_path, pos,
- iter->flags & BTREE_ITER_intent,
- _THIS_IP_);
- ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
- if (unlikely(ret)) {
- k = bkey_s_c_err(ret);
- goto out_no_locked;
- }
- }
-
- /*
- * We can never have a key in a leaf node at POS_MAX, so
- * we don't have to check these successor() calls:
- */
- if ((iter->flags & BTREE_ITER_filter_snapshots) &&
- !bch2_snapshot_is_ancestor(trans->c,
- iter->snapshot,
- k.k->p.snapshot)) {
- search_key = bpos_successor(k.k->p);
- continue;
- }
-
- if (bkey_whiteout(k.k) &&
- !(iter->flags & BTREE_ITER_all_snapshots)) {
- search_key = bkey_successor(iter, k.k->p);
- continue;
- }
-
- /*
- * iter->pos should be mononotically increasing, and always be
- * equal to the key we just returned - except extents can
- * straddle iter->pos:
- */
- if (!(iter->flags & BTREE_ITER_is_extents))
- iter_pos = k.k->p;
- else
- iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
-
- if (unlikely(iter->flags & BTREE_ITER_all_snapshots ? bpos_gt(iter_pos, end) :
- iter->flags & BTREE_ITER_is_extents ? bkey_ge(iter_pos, end) :
- bkey_gt(iter_pos, end)))
- goto end;
-
- break;
- }
-
- iter->pos = iter_pos;
-
- iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
-
- btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
-out_no_locked:
- if (iter->update_path) {
- ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
- if (unlikely(ret))
- k = bkey_s_c_err(ret);
- else
- btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
- }
-
- if (!(iter->flags & BTREE_ITER_all_snapshots))
- iter->pos.snapshot = iter->snapshot;
-
- ret = bch2_btree_iter_verify_ret(iter, k);
- if (unlikely(ret)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
- k = bkey_s_c_err(ret);
- }
-
- bch2_btree_iter_verify_entry_exit(iter);
-
- return k;
-end:
- bch2_btree_iter_set_pos(iter, end);
- k = bkey_s_c_null;
- goto out_no_locked;
-}
-
-/**
- * bch2_btree_iter_next() - returns first key greater than iterator's current
- * position
- * @iter: iterator to peek from
- *
- * Returns: key if found, or an error extractable with bkey_err().
- */
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
-{
- if (!bch2_btree_iter_advance(iter))
- return bkey_s_c_null;
-
- return bch2_btree_iter_peek(iter);
-}
-
-/**
- * bch2_btree_iter_peek_prev() - returns first key less than or equal to
- * iterator's current position
- * @iter: iterator to peek from
- *
- * Returns: key if found, or an error extractable with bkey_err().
- */
-struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
-{
- struct btree_trans *trans = iter->trans;
- struct bpos search_key = iter->pos;
- struct bkey_s_c k;
- struct bkey saved_k;
- const struct bch_val *saved_v;
- btree_path_idx_t saved_path = 0;
- int ret;
-
- bch2_trans_verify_not_unlocked(trans);
- EBUG_ON(btree_iter_path(trans, iter)->cached ||
- btree_iter_path(trans, iter)->level);
-
- if (iter->flags & BTREE_ITER_with_journal)
- return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
-
- bch2_btree_iter_verify(iter);
- bch2_btree_iter_verify_entry_exit(iter);
-
- if (iter->flags & BTREE_ITER_filter_snapshots)
- search_key.snapshot = U32_MAX;
-
- while (1) {
- iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
-
- ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
- if (unlikely(ret)) {
- /* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(iter, iter->pos);
- k = bkey_s_c_err(ret);
- goto out_no_locked;
- }
-
- struct btree_path *path = btree_iter_path(trans, iter);
-
- k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
- if (!k.k ||
- ((iter->flags & BTREE_ITER_is_extents)
- ? bpos_ge(bkey_start_pos(k.k), search_key)
- : bpos_gt(k.k->p, search_key)))
- k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
-
- if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
- trans->nr_updates))
- bch2_btree_trans_peek_prev_updates(trans, iter, &k);
-
- if (likely(k.k)) {
- if (iter->flags & BTREE_ITER_filter_snapshots) {
- if (k.k->p.snapshot == iter->snapshot)
- goto got_key;
-
- /*
- * If we have a saved candidate, and we're no
- * longer at the same _key_ (not pos), return
- * that candidate
- */
- if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
- bch2_path_put_nokeep(trans, iter->path,
- iter->flags & BTREE_ITER_intent);
- iter->path = saved_path;
- saved_path = 0;
- iter->k = saved_k;
- k.v = saved_v;
- goto got_key;
- }
-
- if (bch2_snapshot_is_ancestor(trans->c,
- iter->snapshot,
- k.k->p.snapshot)) {
- if (saved_path)
- bch2_path_put_nokeep(trans, saved_path,
- iter->flags & BTREE_ITER_intent);
- saved_path = btree_path_clone(trans, iter->path,
- iter->flags & BTREE_ITER_intent,
- _THIS_IP_);
- path = btree_iter_path(trans, iter);
- trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
- saved_k = *k.k;
- saved_v = k.v;
- }
-
- search_key = bpos_predecessor(k.k->p);
- continue;
- }
-got_key:
- if (bkey_whiteout(k.k) &&
- !(iter->flags & BTREE_ITER_all_snapshots)) {
- search_key = bkey_predecessor(iter, k.k->p);
- if (iter->flags & BTREE_ITER_filter_snapshots)
- search_key.snapshot = U32_MAX;
- continue;
- }
-
- btree_path_set_should_be_locked(trans, path);
- break;
- } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
- /* Advance to previous leaf node: */
- search_key = bpos_predecessor(path->l[0].b->data->min_key);
- } else {
- /* Start of btree: */
- bch2_btree_iter_set_pos(iter, POS_MIN);
- k = bkey_s_c_null;
- goto out_no_locked;
- }
- }
-
- EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
-
- /* Extents can straddle iter->pos: */
- if (bkey_lt(k.k->p, iter->pos))
- iter->pos = k.k->p;
-
- if (iter->flags & BTREE_ITER_filter_snapshots)
- iter->pos.snapshot = iter->snapshot;
-out_no_locked:
- if (saved_path)
- bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
-
- bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
-
- return k;
-}
-
-/**
- * bch2_btree_iter_prev() - returns first key less than iterator's current
- * position
- * @iter: iterator to peek from
- *
- * Returns: key if found, or an error extractable with bkey_err().
- */
-struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
-{
- if (!bch2_btree_iter_rewind(iter))
- return bkey_s_c_null;
-
- return bch2_btree_iter_peek_prev(iter);
-}
-
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
-{
- struct btree_trans *trans = iter->trans;
- struct bpos search_key;
- struct bkey_s_c k;
- int ret;
-
- bch2_trans_verify_not_unlocked(trans);
- bch2_btree_iter_verify(iter);
- bch2_btree_iter_verify_entry_exit(iter);
- EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
-
- /* extents can't span inode numbers: */
- if ((iter->flags & BTREE_ITER_is_extents) &&
- unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
- if (iter->pos.inode == KEY_INODE_MAX)
- return bkey_s_c_null;
-
- bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
- }
-
- search_key = btree_iter_search_key(iter);
- iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_intent,
- btree_iter_ip_allocated(iter));
-
- ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
- if (unlikely(ret)) {
- k = bkey_s_c_err(ret);
- goto out_no_locked;
- }
-
- if ((iter->flags & BTREE_ITER_cached) ||
- !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
- k = bkey_s_c_null;
-
- if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
- trans->nr_updates)) {
- bch2_btree_trans_peek_slot_updates(trans, iter, &k);
- if (k.k)
- goto out;
- }
-
- if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
- (k = btree_trans_peek_slot_journal(trans, iter)).k)
- goto out;
-
- if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
- (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
- if (!bkey_err(k))
- iter->k = *k.k;
- /* We're not returning a key from iter->path: */
- goto out_no_locked;
- }
-
- k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
- if (unlikely(!k.k))
- goto out_no_locked;
- } else {
- struct bpos next;
- struct bpos end = iter->pos;
-
- if (iter->flags & BTREE_ITER_is_extents)
- end.offset = U64_MAX;
-
- EBUG_ON(btree_iter_path(trans, iter)->level);
-
- if (iter->flags & BTREE_ITER_intent) {
- struct btree_iter iter2;
-
- bch2_trans_copy_iter(&iter2, iter);
- k = bch2_btree_iter_peek_upto(&iter2, end);
-
- if (k.k && !bkey_err(k)) {
- swap(iter->key_cache_path, iter2.key_cache_path);
- iter->k = iter2.k;
- k.k = &iter->k;
- }
- bch2_trans_iter_exit(trans, &iter2);
- } else {
- struct bpos pos = iter->pos;
-
- k = bch2_btree_iter_peek_upto(iter, end);
- if (unlikely(bkey_err(k)))
- bch2_btree_iter_set_pos(iter, pos);
- else
- iter->pos = pos;
- }
-
- if (unlikely(bkey_err(k)))
- goto out_no_locked;
-
- next = k.k ? bkey_start_pos(k.k) : POS_MAX;
-
- if (bkey_lt(iter->pos, next)) {
- bkey_init(&iter->k);
- iter->k.p = iter->pos;
-
- if (iter->flags & BTREE_ITER_is_extents) {
- bch2_key_resize(&iter->k,
- min_t(u64, KEY_SIZE_MAX,
- (next.inode == iter->pos.inode
- ? next.offset
- : KEY_OFFSET_MAX) -
- iter->pos.offset));
- EBUG_ON(!iter->k.size);
- }
-
- k = (struct bkey_s_c) { &iter->k, NULL };
- }
- }
-out:
- btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
-out_no_locked:
- bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
- ret = bch2_btree_iter_verify_ret(iter, k);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
-
- return k;
-}
-
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
-{
- if (!bch2_btree_iter_advance(iter))
- return bkey_s_c_null;
-
- return bch2_btree_iter_peek_slot(iter);
-}
-
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
-{
- if (!bch2_btree_iter_rewind(iter))
- return bkey_s_c_null;
-
- return bch2_btree_iter_peek_slot(iter);
-}
-
-/* Obsolete, but still used by rust wrapper in -tools */
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
-{
- struct bkey_s_c k;
-
- while (btree_trans_too_many_iters(iter->trans) ||
- (k = bch2_btree_iter_peek_type(iter, iter->flags),
- bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
- bch2_trans_begin(iter->trans);
-
- return k;
-}
-
-/* new transactional stuff: */
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
-{
- struct btree_path *path;
- unsigned i;
-
- BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
-
- trans_for_each_path(trans, path, i) {
- BUG_ON(path->sorted_idx >= trans->nr_sorted);
- BUG_ON(trans->sorted[path->sorted_idx] != i);
- }
-
- for (i = 0; i < trans->nr_sorted; i++) {
- unsigned idx = trans->sorted[i];
-
- BUG_ON(!test_bit(idx, trans->paths_allocated));
- BUG_ON(trans->paths[idx].sorted_idx != i);
- }
-}
-
-static void btree_trans_verify_sorted(struct btree_trans *trans)
-{
- struct btree_path *path, *prev = NULL;
- struct trans_for_each_path_inorder_iter iter;
-
- if (!bch2_debug_check_iterators)
- return;
-
- trans_for_each_path_inorder(trans, path, iter) {
- if (prev && btree_path_cmp(prev, path) > 0) {
- __bch2_dump_trans_paths_updates(trans, true);
- panic("trans paths out of order!\n");
- }
- prev = path;
- }
-}
-#else
-static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
-static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
-#endif
-
-void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
-{
- int i, l = 0, r = trans->nr_sorted, inc = 1;
- bool swapped;
-
- btree_trans_verify_sorted_refs(trans);
-
- if (trans->paths_sorted)
- goto out;
-
- /*
- * Cocktail shaker sort: this is efficient because iterators will be
- * mostly sorted.
- */
- do {
- swapped = false;
-
- for (i = inc > 0 ? l : r - 2;
- i + 1 < r && i >= l;
- i += inc) {
- if (btree_path_cmp(trans->paths + trans->sorted[i],
- trans->paths + trans->sorted[i + 1]) > 0) {
- swap(trans->sorted[i], trans->sorted[i + 1]);
- trans->paths[trans->sorted[i]].sorted_idx = i;
- trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
- swapped = true;
- }
- }
-
- if (inc > 0)
- --r;
- else
- l++;
- inc = -inc;
- } while (swapped);
-
- trans->paths_sorted = true;
-out:
- btree_trans_verify_sorted(trans);
-}
-
-static inline void btree_path_list_remove(struct btree_trans *trans,
- struct btree_path *path)
-{
- EBUG_ON(path->sorted_idx >= trans->nr_sorted);
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- trans->nr_sorted--;
- memmove_u64s_down_small(trans->sorted + path->sorted_idx,
- trans->sorted + path->sorted_idx + 1,
- DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
- sizeof(u64) / sizeof(btree_path_idx_t)));
-#else
- array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
-#endif
- for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
- trans->paths[trans->sorted[i]].sorted_idx = i;
-}
-
-static inline void btree_path_list_add(struct btree_trans *trans,
- btree_path_idx_t pos,
- btree_path_idx_t path_idx)
-{
- struct btree_path *path = trans->paths + path_idx;
-
- path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
-
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
- trans->sorted + path->sorted_idx,
- DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
- sizeof(u64) / sizeof(btree_path_idx_t)));
- trans->nr_sorted++;
- trans->sorted[path->sorted_idx] = path_idx;
-#else
- array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
-#endif
-
- for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
- trans->paths[trans->sorted[i]].sorted_idx = i;
-
- btree_trans_verify_sorted_refs(trans);
-}
-
-void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
-{
- if (iter->update_path)
- bch2_path_put_nokeep(trans, iter->update_path,
- iter->flags & BTREE_ITER_intent);
- if (iter->path)
- bch2_path_put(trans, iter->path,
- iter->flags & BTREE_ITER_intent);
- if (iter->key_cache_path)
- bch2_path_put(trans, iter->key_cache_path,
- iter->flags & BTREE_ITER_intent);
- iter->path = 0;
- iter->update_path = 0;
- iter->key_cache_path = 0;
- iter->trans = NULL;
-}
-
-void bch2_trans_iter_init_outlined(struct btree_trans *trans,
- struct btree_iter *iter,
- enum btree_id btree_id, struct bpos pos,
- unsigned flags)
-{
- bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, flags),
- _RET_IP_);
-}
-
-void bch2_trans_node_iter_init(struct btree_trans *trans,
- struct btree_iter *iter,
- enum btree_id btree_id,
- struct bpos pos,
- unsigned locks_want,
- unsigned depth,
- unsigned flags)
-{
- flags |= BTREE_ITER_not_extents;
- flags |= BTREE_ITER_snapshot_field;
- flags |= BTREE_ITER_all_snapshots;
-
- bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
- __bch2_btree_iter_flags(trans, btree_id, flags),
- _RET_IP_);
-
- iter->min_depth = depth;
-
- struct btree_path *path = btree_iter_path(trans, iter);
- BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
- BUG_ON(path->level != depth);
- BUG_ON(iter->min_depth != depth);
-}
-
-void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
-{
- struct btree_trans *trans = src->trans;
-
- *dst = *src;
-#ifdef TRACK_PATH_ALLOCATED
- dst->ip_allocated = _RET_IP_;
-#endif
- if (src->path)
- __btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
- if (src->update_path)
- __btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
- dst->key_cache_path = 0;
-}
-
-void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
-{
- struct bch_fs *c = trans->c;
- unsigned new_top = trans->mem_top + size;
- unsigned old_bytes = trans->mem_bytes;
- unsigned new_bytes = roundup_pow_of_two(new_top);
- int ret;
- void *new_mem;
- void *p;
-
- WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
-
- struct btree_transaction_stats *s = btree_trans_stats(trans);
- s->max_mem = max(s->max_mem, new_bytes);
-
- if (trans->used_mempool) {
- if (trans->mem_bytes >= new_bytes)
- goto out_change_top;
-
- /* No more space from mempool item, need malloc new one */
- new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
- if (unlikely(!new_mem)) {
- bch2_trans_unlock(trans);
-
- new_mem = kmalloc(new_bytes, GFP_KERNEL);
- if (!new_mem)
- return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
-
- ret = bch2_trans_relock(trans);
- if (ret) {
- kfree(new_mem);
- return ERR_PTR(ret);
- }
- }
- memcpy(new_mem, trans->mem, trans->mem_top);
- trans->used_mempool = false;
- mempool_free(trans->mem, &c->btree_trans_mem_pool);
- goto out_new_mem;
- }
-
- new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
- if (unlikely(!new_mem)) {
- bch2_trans_unlock(trans);
-
- new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
- if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
- new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
- new_bytes = BTREE_TRANS_MEM_MAX;
- memcpy(new_mem, trans->mem, trans->mem_top);
- trans->used_mempool = true;
- kfree(trans->mem);
- }
-
- if (!new_mem)
- return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
-
- trans->mem = new_mem;
- trans->mem_bytes = new_bytes;
-
- ret = bch2_trans_relock(trans);
- if (ret)
- return ERR_PTR(ret);
- }
-out_new_mem:
- trans->mem = new_mem;
- trans->mem_bytes = new_bytes;
-
- if (old_bytes) {
- trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
- return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
- }
-out_change_top:
- p = trans->mem + trans->mem_top;
- trans->mem_top += size;
- memset(p, 0, size);
- return p;
-}
-
-static inline void check_srcu_held_too_long(struct btree_trans *trans)
-{
- WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
- "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
- (jiffies - trans->srcu_lock_time) / HZ);
-}
-
-void bch2_trans_srcu_unlock(struct btree_trans *trans)
-{
- if (trans->srcu_held) {
- struct bch_fs *c = trans->c;
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i)
- if (path->cached && !btree_node_locked(path, 0))
- path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
-
- check_srcu_held_too_long(trans);
- srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
- trans->srcu_held = false;
- }
-}
-
-static void bch2_trans_srcu_lock(struct btree_trans *trans)
-{
- if (!trans->srcu_held) {
- trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
- trans->srcu_lock_time = jiffies;
- trans->srcu_held = true;
- }
-}
-
-/**
- * bch2_trans_begin() - reset a transaction after a interrupted attempt
- * @trans: transaction to reset
- *
- * Returns: current restart counter, to be used with trans_was_restarted()
- *
- * While iterating over nodes or updating nodes a attempt to lock a btree node
- * may return BCH_ERR_transaction_restart when the trylock fails. When this
- * occurs bch2_trans_begin() should be called and the transaction retried.
- */
-u32 bch2_trans_begin(struct btree_trans *trans)
-{
- struct btree_path *path;
- unsigned i;
- u64 now;
-
- bch2_trans_reset_updates(trans);
-
- trans->restart_count++;
- trans->mem_top = 0;
- trans->journal_entries = NULL;
-
- trans_for_each_path(trans, path, i) {
- path->should_be_locked = false;
-
- /*
- * If the transaction wasn't restarted, we're presuming to be
- * doing something new: dont keep iterators excpt the ones that
- * are in use - except for the subvolumes btree:
- */
- if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
- path->preserve = false;
-
- /*
- * XXX: we probably shouldn't be doing this if the transaction
- * was restarted, but currently we still overflow transaction
- * iterators if we do that
- */
- if (!path->ref && !path->preserve)
- __bch2_path_free(trans, i);
- else
- path->preserve = false;
- }
-
- now = local_clock();
-
- if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
- time_after64(now, trans->last_begin_time + 10))
- __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
- trans->last_begin_time, now);
-
- if (!trans->restarted &&
- (need_resched() ||
- time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
- bch2_trans_unlock(trans);
- cond_resched();
- now = local_clock();
- }
- trans->last_begin_time = now;
-
- if (unlikely(trans->srcu_held &&
- time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
- bch2_trans_srcu_unlock(trans);
-
- trans->last_begin_ip = _RET_IP_;
-
- trans_set_locked(trans);
-
- if (trans->restarted) {
- bch2_btree_path_traverse_all(trans);
- trans->notrace_relock_fail = false;
- }
-
- bch2_trans_verify_not_unlocked(trans);
- return trans->restart_count;
-}
-
-const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
-
-unsigned bch2_trans_get_fn_idx(const char *fn)
-{
- for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
- if (!bch2_btree_transaction_fns[i] ||
- bch2_btree_transaction_fns[i] == fn) {
- bch2_btree_transaction_fns[i] = fn;
- return i;
- }
-
- pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
- return 0;
-}
-
-struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
- __acquires(&c->btree_trans_barrier)
-{
- struct btree_trans *trans;
-
- if (IS_ENABLED(__KERNEL__)) {
- trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
- if (trans) {
- memset(trans, 0, offsetof(struct btree_trans, list));
- goto got_trans;
- }
- }
-
- trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
- memset(trans, 0, sizeof(*trans));
-
- seqmutex_lock(&c->btree_trans_lock);
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- struct btree_trans *pos;
- pid_t pid = current->pid;
-
- trans->locking_wait.task = current;
-
- list_for_each_entry(pos, &c->btree_trans_list, list) {
- struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
- /*
- * We'd much prefer to be stricter here and completely
- * disallow multiple btree_trans in the same thread -
- * but the data move path calls bch2_write when we
- * already have a btree_trans initialized.
- */
- BUG_ON(pos_task &&
- pid == pos_task->pid &&
- pos->locked);
- }
- }
-
- list_add(&trans->list, &c->btree_trans_list);
- seqmutex_unlock(&c->btree_trans_lock);
-got_trans:
- trans->c = c;
- trans->last_begin_time = local_clock();
- trans->fn_idx = fn_idx;
- trans->locking_wait.task = current;
- trans->journal_replay_not_finished =
- unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
- atomic_inc_not_zero(&c->journal_keys.ref);
- trans->nr_paths = ARRAY_SIZE(trans->_paths);
- trans->paths_allocated = trans->_paths_allocated;
- trans->sorted = trans->_sorted;
- trans->paths = trans->_paths;
- trans->updates = trans->_updates;
-
- *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
-
- trans->paths_allocated[0] = 1;
-
- static struct lock_class_key lockdep_key;
- lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
-
- if (fn_idx < BCH_TRANSACTIONS_NR) {
- trans->fn = bch2_btree_transaction_fns[fn_idx];
-
- struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
-
- if (s->max_mem) {
- unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
-
- trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
- if (likely(trans->mem))
- trans->mem_bytes = expected_mem_bytes;
- }
-
- trans->nr_paths_max = s->nr_max_paths;
- trans->journal_entries_size = s->journal_entries_size;
- }
-
- trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
- trans->srcu_lock_time = jiffies;
- trans->srcu_held = true;
- trans_set_locked(trans);
-
- closure_init_stack_release(&trans->ref);
- return trans;
-}
-
-static void check_btree_paths_leaked(struct btree_trans *trans)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- struct bch_fs *c = trans->c;
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i)
- if (path->ref)
- goto leaked;
- return;
-leaked:
- bch_err(c, "btree paths leaked from %s!", trans->fn);
- trans_for_each_path(trans, path, i)
- if (path->ref)
- printk(KERN_ERR " btree %s %pS\n",
- bch2_btree_id_str(path->btree_id),
- (void *) path->ip_allocated);
- /* Be noisy about this: */
- bch2_fatal_error(c);
-#endif
-}
-
-void bch2_trans_put(struct btree_trans *trans)
- __releases(&c->btree_trans_barrier)
-{
- struct bch_fs *c = trans->c;
-
- bch2_trans_unlock(trans);
-
- trans_for_each_update(trans, i)
- __btree_path_put(trans, trans->paths + i->path, true);
- trans->nr_updates = 0;
-
- check_btree_paths_leaked(trans);
-
- if (trans->srcu_held) {
- check_srcu_held_too_long(trans);
- srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
- }
-
- if (unlikely(trans->journal_replay_not_finished))
- bch2_journal_keys_put(c);
-
- /*
- * trans->ref protects trans->locking_wait.task, btree_paths array; used
- * by cycle detector
- */
- closure_return_sync(&trans->ref);
- trans->locking_wait.task = NULL;
-
- unsigned long *paths_allocated = trans->paths_allocated;
- trans->paths_allocated = NULL;
- trans->paths = NULL;
-
- if (paths_allocated != trans->_paths_allocated)
- kvfree_rcu_mightsleep(paths_allocated);
-
- if (trans->used_mempool)
- mempool_free(trans->mem, &c->btree_trans_mem_pool);
- else
- kfree(trans->mem);
-
- /* Userspace doesn't have a real percpu implementation: */
- if (IS_ENABLED(__KERNEL__))
- trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
-
- if (trans) {
- seqmutex_lock(&c->btree_trans_lock);
- list_del(&trans->list);
- seqmutex_unlock(&c->btree_trans_lock);
-
- mempool_free(trans, &c->btree_trans_pool);
- }
-}
-
-bool bch2_current_has_btree_trans(struct bch_fs *c)
-{
- seqmutex_lock(&c->btree_trans_lock);
- struct btree_trans *trans;
- bool ret = false;
- list_for_each_entry(trans, &c->btree_trans_list, list)
- if (trans->locking_wait.task == current &&
- trans->locked) {
- ret = true;
- break;
- }
- seqmutex_unlock(&c->btree_trans_lock);
- return ret;
-}
-
-static void __maybe_unused
-bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
- struct btree_bkey_cached_common *b)
-{
- struct six_lock_count c = six_lock_counts(&b->lock);
- struct task_struct *owner;
- pid_t pid;
-
- rcu_read_lock();
- owner = READ_ONCE(b->lock.owner);
- pid = owner ? owner->pid : 0;
- rcu_read_unlock();
-
- prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
- b->level, bch2_btree_id_str(b->btree_id));
- bch2_bpos_to_text(out, btree_node_pos(b));
-
- prt_printf(out, "\t locks %u:%u:%u held by pid %u",
- c.n[0], c.n[1], c.n[2], pid);
-}
-
-void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
-{
- struct btree_bkey_cached_common *b;
- static char lock_types[] = { 'r', 'i', 'w' };
- struct task_struct *task = READ_ONCE(trans->locking_wait.task);
- unsigned l, idx;
-
- /* before rcu_read_lock(): */
- bch2_printbuf_make_room(out, 4096);
-
- if (!out->nr_tabstops) {
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 32);
- }
-
- prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
-
- /* trans->paths is rcu protected vs. freeing */
- rcu_read_lock();
- out->atomic++;
-
- struct btree_path *paths = rcu_dereference(trans->paths);
- if (!paths)
- goto out;
-
- unsigned long *paths_allocated = trans_paths_allocated(paths);
-
- trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
- struct btree_path *path = paths + idx;
- if (!path->nodes_locked)
- continue;
-
- prt_printf(out, " path %u %c l=%u %s:",
- idx,
- path->cached ? 'c' : 'b',
- path->level,
- bch2_btree_id_str(path->btree_id));
- bch2_bpos_to_text(out, path->pos);
- prt_newline(out);
-
- for (l = 0; l < BTREE_MAX_DEPTH; l++) {
- if (btree_node_locked(path, l) &&
- !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
- prt_printf(out, " %c l=%u ",
- lock_types[btree_node_locked_type(path, l)], l);
- bch2_btree_bkey_cached_common_to_text(out, b);
- prt_newline(out);
- }
- }
- }
-
- b = READ_ONCE(trans->locking);
- if (b) {
- prt_printf(out, " blocked for %lluus on\n",
- div_u64(local_clock() - trans->locking_wait.start_time, 1000));
- prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
- bch2_btree_bkey_cached_common_to_text(out, b);
- prt_newline(out);
- }
-out:
- --out->atomic;
- rcu_read_unlock();
-}
-
-void bch2_fs_btree_iter_exit(struct bch_fs *c)
-{
- struct btree_transaction_stats *s;
- struct btree_trans *trans;
- int cpu;
-
- if (c->btree_trans_bufs)
- for_each_possible_cpu(cpu) {
- struct btree_trans *trans =
- per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
-
- if (trans) {
- seqmutex_lock(&c->btree_trans_lock);
- list_del(&trans->list);
- seqmutex_unlock(&c->btree_trans_lock);
- }
- kfree(trans);
- }
- free_percpu(c->btree_trans_bufs);
-
- trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
- if (trans)
- panic("%s leaked btree_trans\n", trans->fn);
-
- for (s = c->btree_transaction_stats;
- s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
- s++) {
- kfree(s->max_paths_text);
- bch2_time_stats_exit(&s->lock_hold_times);
- }
-
- if (c->btree_trans_barrier_initialized) {
- synchronize_srcu_expedited(&c->btree_trans_barrier);
- cleanup_srcu_struct(&c->btree_trans_barrier);
- }
- mempool_exit(&c->btree_trans_mem_pool);
- mempool_exit(&c->btree_trans_pool);
-}
-
-void bch2_fs_btree_iter_init_early(struct bch_fs *c)
-{
- struct btree_transaction_stats *s;
-
- for (s = c->btree_transaction_stats;
- s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
- s++) {
- bch2_time_stats_init(&s->duration);
- bch2_time_stats_init(&s->lock_hold_times);
- mutex_init(&s->lock);
- }
-
- INIT_LIST_HEAD(&c->btree_trans_list);
- seqmutex_init(&c->btree_trans_lock);
-}
-
-int bch2_fs_btree_iter_init(struct bch_fs *c)
-{
- int ret;
-
- c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
- if (!c->btree_trans_bufs)
- return -ENOMEM;
-
- ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
- sizeof(struct btree_trans)) ?:
- mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
- BTREE_TRANS_MEM_MAX) ?:
- init_srcu_struct(&c->btree_trans_barrier);
- if (ret)
- return ret;
-
- /*
- * static annotation (hackily done) for lock ordering of reclaim vs.
- * btree node locks:
- */
-#ifdef CONFIG_LOCKDEP
- fs_reclaim_acquire(GFP_KERNEL);
- struct btree_trans *trans = bch2_trans_get(c);
- trans_set_locked(trans);
- bch2_trans_put(trans);
- fs_reclaim_release(GFP_KERNEL);
-#endif
-
- c->btree_trans_barrier_initialized = true;
- return 0;
-
-}
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
deleted file mode 100644
index 0bda054f80d7..000000000000
--- a/fs/bcachefs/btree_iter.h
+++ /dev/null
@@ -1,940 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_ITER_H
-#define _BCACHEFS_BTREE_ITER_H
-
-#include "bset.h"
-#include "btree_types.h"
-#include "trace.h"
-
-void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
-void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
-void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
-void bch2_dump_trans_updates(struct btree_trans *);
-void bch2_dump_trans_paths_updates(struct btree_trans *);
-
-static inline int __bkey_err(const struct bkey *k)
-{
- return PTR_ERR_OR_ZERO(k);
-}
-
-#define bkey_err(_k) __bkey_err((_k).k)
-
-static inline void __btree_path_get(struct btree_trans *trans, struct btree_path *path, bool intent)
-{
- unsigned idx = path - trans->paths;
-
- EBUG_ON(!test_bit(idx, trans->paths_allocated));
- if (unlikely(path->ref == U8_MAX)) {
- bch2_dump_trans_paths_updates(trans);
- panic("path %u refcount overflow\n", idx);
- }
-
- path->ref++;
- path->intent_ref += intent;
- trace_btree_path_get_ll(trans, path);
-}
-
-static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
-{
- EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated));
- EBUG_ON(!path->ref);
- EBUG_ON(!path->intent_ref && intent);
-
- trace_btree_path_put_ll(trans, path);
- path->intent_ref -= intent;
- return --path->ref == 0;
-}
-
-static inline void btree_path_set_dirty(struct btree_path *path,
- enum btree_path_uptodate u)
-{
- path->uptodate = max_t(unsigned, path->uptodate, u);
-}
-
-static inline struct btree *btree_path_node(struct btree_path *path,
- unsigned level)
-{
- return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
-}
-
-static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
- const struct btree *b, unsigned level)
-{
- return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
-}
-
-static inline struct btree *btree_node_parent(struct btree_path *path,
- struct btree *b)
-{
- return btree_path_node(path, b->c.level + 1);
-}
-
-/* Iterate over paths within a transaction: */
-
-void __bch2_btree_trans_sort_paths(struct btree_trans *);
-
-static inline void btree_trans_sort_paths(struct btree_trans *trans)
-{
- if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
- trans->paths_sorted)
- return;
- __bch2_btree_trans_sort_paths(trans);
-}
-
-static inline unsigned long *trans_paths_nr(struct btree_path *paths)
-{
- return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
-}
-
-static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
-{
- unsigned long *v = trans_paths_nr(paths);
- return v - BITS_TO_LONGS(*v);
-}
-
-#define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
- for (_idx = _start; \
- (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr; \
- _idx++)
-
-static inline struct btree_path *
-__trans_next_path(struct btree_trans *trans, unsigned *idx)
-{
- unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
- /*
- * Open coded find_next_bit(), because
- * - this is fast path, we can't afford the function call
- * - and we know that nr_paths is a multiple of BITS_PER_LONG,
- */
- while (*idx < trans->nr_paths) {
- unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
- if (v) {
- *idx += __ffs(v);
- return trans->paths + *idx;
- }
-
- *idx += BITS_PER_LONG;
- *idx &= ~(BITS_PER_LONG - 1);
- w++;
- }
-
- return NULL;
-}
-
-/*
- * This version is intended to be safe for use on a btree_trans that is owned by
- * another thread, for bch2_btree_trans_to_text();
- */
-#define trans_for_each_path_from(_trans, _path, _idx, _start) \
- for (_idx = _start; \
- (_path = __trans_next_path((_trans), &_idx)); \
- _idx++)
-
-#define trans_for_each_path(_trans, _path, _idx) \
- trans_for_each_path_from(_trans, _path, _idx, 1)
-
-static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
-{
- unsigned idx = path ? path->sorted_idx + 1 : 0;
-
- EBUG_ON(idx > trans->nr_sorted);
-
- return idx < trans->nr_sorted
- ? trans->paths + trans->sorted[idx]
- : NULL;
-}
-
-static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
-{
- unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
-
- return idx
- ? trans->paths + trans->sorted[idx - 1]
- : NULL;
-}
-
-#define trans_for_each_path_idx_inorder(_trans, _iter) \
- for (_iter = (struct trans_for_each_path_inorder_iter) { 0 }; \
- (_iter.path_idx = trans->sorted[_iter.sorted_idx], \
- _iter.sorted_idx < (_trans)->nr_sorted); \
- _iter.sorted_idx++)
-
-struct trans_for_each_path_inorder_iter {
- btree_path_idx_t sorted_idx;
- btree_path_idx_t path_idx;
-};
-
-#define trans_for_each_path_inorder(_trans, _path, _iter) \
- for (_iter = (struct trans_for_each_path_inorder_iter) { 0 }; \
- (_iter.path_idx = trans->sorted[_iter.sorted_idx], \
- _path = (_trans)->paths + _iter.path_idx, \
- _iter.sorted_idx < (_trans)->nr_sorted); \
- _iter.sorted_idx++)
-
-#define trans_for_each_path_inorder_reverse(_trans, _path, _i) \
- for (_i = trans->nr_sorted - 1; \
- ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
- --_i)
-
-static inline bool __path_has_node(const struct btree_path *path,
- const struct btree *b)
-{
- return path->l[b->c.level].b == b &&
- btree_node_lock_seq_matches(path, b, b->c.level);
-}
-
-static inline struct btree_path *
-__trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
- unsigned *idx)
-{
- struct btree_path *path;
-
- while ((path = __trans_next_path(trans, idx)) &&
- !__path_has_node(path, b))
- (*idx)++;
-
- return path;
-}
-
-#define trans_for_each_path_with_node(_trans, _b, _path, _iter) \
- for (_iter = 1; \
- (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\
- _iter++)
-
-btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t,
- bool, unsigned long);
-
-static inline btree_path_idx_t __must_check
-bch2_btree_path_make_mut(struct btree_trans *trans,
- btree_path_idx_t path, bool intent,
- unsigned long ip)
-{
- if (trans->paths[path].ref > 1 ||
- trans->paths[path].preserve)
- path = __bch2_btree_path_make_mut(trans, path, intent, ip);
- trans->paths[path].should_be_locked = false;
- return path;
-}
-
-btree_path_idx_t __must_check
-__bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t,
- struct bpos, bool, unsigned long);
-
-static inline btree_path_idx_t __must_check
-bch2_btree_path_set_pos(struct btree_trans *trans,
- btree_path_idx_t path, struct bpos new_pos,
- bool intent, unsigned long ip)
-{
- return !bpos_eq(new_pos, trans->paths[path].pos)
- ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip)
- : path;
-}
-
-int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
- btree_path_idx_t,
- unsigned, unsigned long);
-
-static inline void bch2_trans_verify_not_unlocked(struct btree_trans *);
-
-static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
- btree_path_idx_t path, unsigned flags)
-{
- bch2_trans_verify_not_unlocked(trans);
-
- if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
- return 0;
-
- return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
-}
-
-btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
- unsigned, unsigned, unsigned, unsigned long);
-btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
- unsigned, struct bpos);
-
-struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
-
-/*
- * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
- * different snapshot:
- */
-static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
-{
- struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
-
- if (k.k && bpos_eq(path->pos, k.k->p))
- return k;
-
- bkey_init(u);
- u->p = path->pos;
- return (struct bkey_s_c) { u, NULL };
-}
-
-struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
- struct btree_iter *, struct bpos);
-
-void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
-
-int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
-
-static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
-{
- return mutex_trylock(lock)
- ? 0
- : __bch2_trans_mutex_lock(trans, lock);
-}
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_trans_verify_paths(struct btree_trans *);
-void bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos);
-#else
-static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
-static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
- struct bpos pos) {}
-#endif
-
-void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
- struct btree *, struct bkey_packed *);
-void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
- struct btree *, struct btree_node_iter *,
- struct bkey_packed *, unsigned, unsigned);
-
-int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
-
-void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool);
-
-int bch2_trans_relock(struct btree_trans *);
-int bch2_trans_relock_notrace(struct btree_trans *);
-void bch2_trans_unlock(struct btree_trans *);
-void bch2_trans_unlock_long(struct btree_trans *);
-
-static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
-{
- return restart_count != trans->restart_count
- ? -BCH_ERR_transaction_restart_nested
- : 0;
-}
-
-void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
-
-static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
- u32 restart_count)
-{
- if (trans_was_restarted(trans, restart_count))
- bch2_trans_restart_error(trans, restart_count);
-}
-
-void __noreturn bch2_trans_in_restart_error(struct btree_trans *);
-
-static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
-{
- if (trans->restarted)
- bch2_trans_in_restart_error(trans);
-}
-
-void __noreturn bch2_trans_unlocked_error(struct btree_trans *);
-
-static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans)
-{
- if (!trans->locked)
- bch2_trans_unlocked_error(trans);
-}
-
-__always_inline
-static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
-{
- BUG_ON(err <= 0);
- BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
-
- trans->restarted = err;
- trans->last_restarted_ip = _THIS_IP_;
- return -err;
-}
-
-__always_inline
-static int btree_trans_restart(struct btree_trans *trans, int err)
-{
- btree_trans_restart_nounlock(trans, err);
- return -err;
-}
-
-bool bch2_btree_node_upgrade(struct btree_trans *,
- struct btree_path *, unsigned);
-
-void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
-
-static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
- struct btree_path *path)
-{
- unsigned new_locks_want = path->level + !!path->intent_ref;
-
- if (path->locks_want > new_locks_want)
- __bch2_btree_path_downgrade(trans, path, new_locks_want);
-}
-
-void bch2_trans_downgrade(struct btree_trans *);
-
-void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *);
-void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
-
-int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
-int __must_check bch2_btree_iter_traverse(struct btree_iter *);
-
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
-struct btree *bch2_btree_iter_next_node(struct btree_iter *);
-
-struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
-
-static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
-{
- return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
-}
-
-struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
-
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
-
-bool bch2_btree_iter_advance(struct btree_iter *);
-bool bch2_btree_iter_rewind(struct btree_iter *);
-
-static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
-{
- iter->k.type = KEY_TYPE_deleted;
- iter->k.p.inode = iter->pos.inode = new_pos.inode;
- iter->k.p.offset = iter->pos.offset = new_pos.offset;
- iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
- iter->k.size = 0;
-}
-
-static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
-{
- struct btree_trans *trans = iter->trans;
-
- if (unlikely(iter->update_path))
- bch2_path_put(trans, iter->update_path,
- iter->flags & BTREE_ITER_intent);
- iter->update_path = 0;
-
- if (!(iter->flags & BTREE_ITER_all_snapshots))
- new_pos.snapshot = iter->snapshot;
-
- __bch2_btree_iter_set_pos(iter, new_pos);
-}
-
-static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
-{
- BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
- iter->pos = bkey_start_pos(&iter->k);
-}
-
-static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
-{
- struct bpos pos = iter->pos;
-
- iter->snapshot = snapshot;
- pos.snapshot = snapshot;
- bch2_btree_iter_set_pos(iter, pos);
-}
-
-void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
-
-static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
- unsigned btree_id,
- unsigned flags)
-{
- if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
- btree_id_is_extents(btree_id))
- flags |= BTREE_ITER_is_extents;
-
- if (!(flags & BTREE_ITER_snapshot_field) &&
- !btree_type_has_snapshot_field(btree_id))
- flags &= ~BTREE_ITER_all_snapshots;
-
- if (!(flags & BTREE_ITER_all_snapshots) &&
- btree_type_has_snapshots(btree_id))
- flags |= BTREE_ITER_filter_snapshots;
-
- if (trans->journal_replay_not_finished)
- flags |= BTREE_ITER_with_journal;
-
- return flags;
-}
-
-static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
- unsigned btree_id,
- unsigned flags)
-{
- if (!btree_id_cached(trans->c, btree_id)) {
- flags &= ~BTREE_ITER_cached;
- flags &= ~BTREE_ITER_with_key_cache;
- } else if (!(flags & BTREE_ITER_cached))
- flags |= BTREE_ITER_with_key_cache;
-
- return __bch2_btree_iter_flags(trans, btree_id, flags);
-}
-
-static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned locks_want,
- unsigned depth,
- unsigned flags,
- unsigned long ip)
-{
- iter->trans = trans;
- iter->update_path = 0;
- iter->key_cache_path = 0;
- iter->btree_id = btree_id;
- iter->min_depth = 0;
- iter->flags = flags;
- iter->snapshot = pos.snapshot;
- iter->pos = pos;
- iter->k = POS_KEY(pos);
- iter->journal_idx = 0;
-#ifdef CONFIG_BCACHEFS_DEBUG
- iter->ip_allocated = ip;
-#endif
- iter->path = bch2_path_get(trans, btree_id, iter->pos,
- locks_want, depth, flags, ip);
-}
-
-void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
- enum btree_id, struct bpos, unsigned);
-
-static inline void bch2_trans_iter_init(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags)
-{
- if (__builtin_constant_p(btree_id) &&
- __builtin_constant_p(flags))
- bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, flags),
- _THIS_IP_);
- else
- bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
-}
-
-void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
- enum btree_id, struct bpos,
- unsigned, unsigned, unsigned);
-void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
-
-void bch2_set_btree_iter_dontneed(struct btree_iter *);
-
-void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
-
-/**
- * bch2_trans_kmalloc - allocate memory for use by the current transaction
- *
- * Must be called after bch2_trans_begin, which on second and further calls
- * frees all memory allocated in this transaction
- */
-static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
-{
- size = roundup(size, 8);
-
- if (likely(trans->mem_top + size <= trans->mem_bytes)) {
- void *p = trans->mem + trans->mem_top;
-
- trans->mem_top += size;
- memset(p, 0, size);
- return p;
- } else {
- return __bch2_trans_kmalloc(trans, size);
- }
-}
-
-static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
-{
- size = round_up(size, 8);
-
- if (likely(trans->mem_top + size <= trans->mem_bytes)) {
- void *p = trans->mem + trans->mem_top;
-
- trans->mem_top += size;
- return p;
- } else {
- return __bch2_trans_kmalloc(trans, size);
- }
-}
-
-static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags, unsigned type)
-{
- struct bkey_s_c k;
-
- bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
- k = bch2_btree_iter_peek_slot(iter);
-
- if (!bkey_err(k) && type && k.k->type != type)
- k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
- if (unlikely(bkey_err(k)))
- bch2_trans_iter_exit(trans, iter);
- return k;
-}
-
-static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags)
-{
- return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
-}
-
-#define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
- bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \
- _btree_id, _pos, _flags, KEY_TYPE_##_type))
-
-#define bkey_val_copy(_dst_v, _src_k) \
-do { \
- unsigned b = min_t(unsigned, sizeof(*_dst_v), \
- bkey_val_bytes(_src_k.k)); \
- memcpy(_dst_v, _src_k.v, b); \
- if (b < sizeof(*_dst_v)) \
- memset((void *) (_dst_v) + b, 0, sizeof(*_dst_v) - b); \
-} while (0)
-
-static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
- unsigned btree_id, struct bpos pos,
- unsigned flags, unsigned type,
- unsigned val_size, void *val)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
- ret = bkey_err(k);
- if (!ret) {
- unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
-
- memcpy(val, k.v, b);
- if (unlikely(b < sizeof(*val)))
- memset((void *) val + b, 0, sizeof(*val) - b);
- bch2_trans_iter_exit(trans, &iter);
- }
-
- return ret;
-}
-
-#define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
- __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, \
- KEY_TYPE_##_type, sizeof(*_val), _val)
-
-void bch2_trans_srcu_unlock(struct btree_trans *);
-
-u32 bch2_trans_begin(struct btree_trans *);
-
-#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
- _locks_want, _depth, _flags, _b, _do) \
-({ \
- bch2_trans_begin((_trans)); \
- \
- struct btree_iter _iter; \
- bch2_trans_node_iter_init((_trans), &_iter, (_btree_id), \
- _start, _locks_want, _depth, _flags); \
- int _ret3 = 0; \
- do { \
- _ret3 = lockrestart_do((_trans), ({ \
- struct btree *_b = bch2_btree_iter_peek_node(&_iter); \
- if (!_b) \
- break; \
- \
- PTR_ERR_OR_ZERO(_b) ?: (_do); \
- })) ?: \
- lockrestart_do((_trans), \
- PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \
- } while (!_ret3); \
- \
- bch2_trans_iter_exit((_trans), &(_iter)); \
- _ret3; \
-})
-
-#define for_each_btree_node(_trans, _iter, _btree_id, _start, \
- _flags, _b, _do) \
- __for_each_btree_node(_trans, _iter, _btree_id, _start, \
- 0, 0, _flags, _b, _do)
-
-static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
- unsigned flags)
-{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
- bch2_btree_iter_peek_prev(iter);
-}
-
-static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
- unsigned flags)
-{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
- bch2_btree_iter_peek(iter);
-}
-
-static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
- struct bpos end,
- unsigned flags)
-{
- if (!(flags & BTREE_ITER_slots))
- return bch2_btree_iter_peek_upto(iter, end);
-
- if (bkey_gt(iter->pos, end))
- return bkey_s_c_null;
-
- return bch2_btree_iter_peek_slot(iter);
-}
-
-int __bch2_btree_trans_too_many_iters(struct btree_trans *);
-
-static inline int btree_trans_too_many_iters(struct btree_trans *trans)
-{
- if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
- return __bch2_btree_trans_too_many_iters(trans);
-
- return 0;
-}
-
-/*
- * goto instead of loop, so that when used inside for_each_btree_key2()
- * break/continue work correctly
- */
-#define lockrestart_do(_trans, _do) \
-({ \
- __label__ transaction_restart; \
- u32 _restart_count; \
- int _ret2; \
-transaction_restart: \
- _restart_count = bch2_trans_begin(_trans); \
- _ret2 = (_do); \
- \
- if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart)) \
- goto transaction_restart; \
- \
- if (!_ret2) \
- bch2_trans_verify_not_restarted(_trans, _restart_count);\
- _ret2; \
-})
-
-/*
- * nested_lockrestart_do(), nested_commit_do():
- *
- * These are like lockrestart_do() and commit_do(), with two differences:
- *
- * - We don't call bch2_trans_begin() unless we had a transaction restart
- * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
- * transaction restart
- */
-#define nested_lockrestart_do(_trans, _do) \
-({ \
- u32 _restart_count, _orig_restart_count; \
- int _ret2; \
- \
- _restart_count = _orig_restart_count = (_trans)->restart_count; \
- \
- while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
- _restart_count = bch2_trans_begin(_trans); \
- \
- if (!_ret2) \
- bch2_trans_verify_not_restarted(_trans, _restart_count);\
- \
- _ret2 ?: trans_was_restarted(_trans, _restart_count); \
-})
-
-#define for_each_btree_key_upto_continue(_trans, _iter, \
- _end, _flags, _k, _do) \
-({ \
- struct bkey_s_c _k; \
- int _ret3 = 0; \
- \
- do { \
- _ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_upto_type(&(_iter), \
- _end, (_flags)); \
- if (!(_k).k) \
- break; \
- \
- bkey_err(_k) ?: (_do); \
- })); \
- } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
- \
- bch2_trans_iter_exit((_trans), &(_iter)); \
- _ret3; \
-})
-
-#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \
- for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
-
-#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
- _start, _end, _flags, _k, _do) \
-({ \
- bch2_trans_begin(trans); \
- \
- struct btree_iter _iter; \
- bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- \
- for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\
-})
-
-#define for_each_btree_key(_trans, _iter, _btree_id, \
- _start, _flags, _k, _do) \
- for_each_btree_key_upto(_trans, _iter, _btree_id, _start, \
- SPOS_MAX, _flags, _k, _do)
-
-#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
- _start, _flags, _k, _do) \
-({ \
- struct btree_iter _iter; \
- struct bkey_s_c _k; \
- int _ret3 = 0; \
- \
- bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- \
- do { \
- _ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \
- (_flags)); \
- if (!(_k).k) \
- break; \
- \
- bkey_err(_k) ?: (_do); \
- })); \
- } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \
- \
- bch2_trans_iter_exit((_trans), &(_iter)); \
- _ret3; \
-})
-
-#define for_each_btree_key_commit(_trans, _iter, _btree_id, \
- _start, _iter_flags, _k, \
- _disk_res, _journal_seq, _commit_flags,\
- _do) \
- for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
- (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
- (_journal_seq), (_commit_flags)))
-
-#define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id, \
- _start, _iter_flags, _k, \
- _disk_res, _journal_seq, _commit_flags,\
- _do) \
- for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
- (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
- (_journal_seq), (_commit_flags)))
-
-#define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
- _start, _end, _iter_flags, _k, \
- _disk_res, _journal_seq, _commit_flags,\
- _do) \
- for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
- (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
- (_journal_seq), (_commit_flags)))
-
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
-
-#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
- _start, _end, _flags, _k, _ret) \
- for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
- !((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
-
-#define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
- for (; \
- (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
- !((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
-
-#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
- _start, _flags, _k, _ret) \
- for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\
- SPOS_MAX, _flags, _k, _ret)
-
-#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
- _start, _flags, _k, _ret) \
- for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \
- !((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_rewind(&(_iter)))
-
-#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
- for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
-
-/*
- * This should not be used in a fastpath, without first trying _do in
- * nonblocking mode - it will cause excessive transaction restarts and
- * potentially livelocking:
- */
-#define drop_locks_do(_trans, _do) \
-({ \
- bch2_trans_unlock(_trans); \
- (_do) ?: bch2_trans_relock(_trans); \
-})
-
-#define allocate_dropping_locks_errcode(_trans, _do) \
-({ \
- gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
- int _ret = _do; \
- \
- if (bch2_err_matches(_ret, ENOMEM)) { \
- _gfp = GFP_KERNEL; \
- _ret = drop_locks_do(_trans, _do); \
- } \
- _ret; \
-})
-
-#define allocate_dropping_locks(_trans, _ret, _do) \
-({ \
- gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
- typeof(_do) _p = _do; \
- \
- _ret = 0; \
- if (unlikely(!_p)) { \
- _gfp = GFP_KERNEL; \
- _ret = drop_locks_do(_trans, ((_p = _do), 0)); \
- } \
- _p; \
-})
-
-#define bch2_trans_run(_c, _do) \
-({ \
- struct btree_trans *trans = bch2_trans_get(_c); \
- int _ret = (_do); \
- bch2_trans_put(trans); \
- _ret; \
-})
-
-#define bch2_trans_do(_c, _do) bch2_trans_run(_c, lockrestart_do(trans, _do))
-
-struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
-void bch2_trans_put(struct btree_trans *);
-
-bool bch2_current_has_btree_trans(struct bch_fs *);
-
-extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
-unsigned bch2_trans_get_fn_idx(const char *);
-
-#define bch2_trans_get(_c) \
-({ \
- static unsigned trans_fn_idx; \
- \
- if (unlikely(!trans_fn_idx)) \
- trans_fn_idx = bch2_trans_get_fn_idx(__func__); \
- __bch2_trans_get(_c, trans_fn_idx); \
-})
-
-void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
-
-void bch2_fs_btree_iter_exit(struct bch_fs *);
-void bch2_fs_btree_iter_init_early(struct bch_fs *);
-int bch2_fs_btree_iter_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_BTREE_ITER_H */
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
deleted file mode 100644
index c1657182c275..000000000000
--- a/fs/bcachefs/btree_journal_iter.c
+++ /dev/null
@@ -1,635 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "bset.h"
-#include "btree_cache.h"
-#include "btree_journal_iter.h"
-#include "journal_io.h"
-
-#include <linux/sort.h>
-
-/*
- * For managing keys we read from the journal: until journal replay works normal
- * btree lookups need to be able to find and return keys from the journal where
- * they overwrite what's in the btree, so we have a special iterator and
- * operations for the regular btree iter code to use:
- */
-
-static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
-{
- size_t gap_size = keys->size - keys->nr;
-
- if (idx >= keys->gap)
- idx += gap_size;
- return idx;
-}
-
-static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
-{
- return keys->data + idx_to_pos(keys, idx);
-}
-
-static size_t __bch2_journal_key_search(struct journal_keys *keys,
- enum btree_id id, unsigned level,
- struct bpos pos)
-{
- size_t l = 0, r = keys->nr, m;
-
- while (l < r) {
- m = l + ((r - l) >> 1);
- if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
- l = m + 1;
- else
- r = m;
- }
-
- BUG_ON(l < keys->nr &&
- __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
-
- BUG_ON(l &&
- __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
-
- return l;
-}
-
-static size_t bch2_journal_key_search(struct journal_keys *keys,
- enum btree_id id, unsigned level,
- struct bpos pos)
-{
- return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
-}
-
-/* Returns first non-overwritten key >= search key: */
-struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
- unsigned level, struct bpos pos,
- struct bpos end_pos, size_t *idx)
-{
- struct journal_keys *keys = &c->journal_keys;
- unsigned iters = 0;
- struct journal_key *k;
-
- BUG_ON(*idx > keys->nr);
-search:
- if (!*idx)
- *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
-
- while (*idx &&
- __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) {
- --(*idx);
- iters++;
- if (iters == 10) {
- *idx = 0;
- goto search;
- }
- }
-
- while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
- if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
- return NULL;
-
- if (k->overwritten) {
- (*idx)++;
- continue;
- }
-
- if (__journal_key_cmp(btree_id, level, pos, k) <= 0)
- return k->k;
-
- (*idx)++;
- iters++;
- if (iters == 10) {
- *idx = 0;
- goto search;
- }
- }
-
- return NULL;
-}
-
-struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
- unsigned level, struct bpos pos)
-{
- size_t idx = 0;
-
- return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
-}
-
-static void journal_iter_verify(struct journal_iter *iter)
-{
- struct journal_keys *keys = iter->keys;
- size_t gap_size = keys->size - keys->nr;
-
- BUG_ON(iter->idx >= keys->gap &&
- iter->idx < keys->gap + gap_size);
-
- if (iter->idx < keys->size) {
- struct journal_key *k = keys->data + iter->idx;
-
- int cmp = cmp_int(k->btree_id, iter->btree_id) ?:
- cmp_int(k->level, iter->level);
- BUG_ON(cmp < 0);
- }
-}
-
-static void journal_iters_fix(struct bch_fs *c)
-{
- struct journal_keys *keys = &c->journal_keys;
- /* The key we just inserted is immediately before the gap: */
- size_t gap_end = keys->gap + (keys->size - keys->nr);
- struct journal_key *new_key = &keys->data[keys->gap - 1];
- struct journal_iter *iter;
-
- /*
- * If an iterator points one after the key we just inserted, decrement
- * the iterator so it points at the key we just inserted - if the
- * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
- * handle that:
- */
- list_for_each_entry(iter, &c->journal_iters, list) {
- journal_iter_verify(iter);
- if (iter->idx == gap_end &&
- new_key->btree_id == iter->btree_id &&
- new_key->level == iter->level)
- iter->idx = keys->gap - 1;
- journal_iter_verify(iter);
- }
-}
-
-static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
-{
- struct journal_keys *keys = &c->journal_keys;
- struct journal_iter *iter;
- size_t gap_size = keys->size - keys->nr;
-
- list_for_each_entry(iter, &c->journal_iters, list) {
- if (iter->idx > old_gap)
- iter->idx -= gap_size;
- if (iter->idx >= new_gap)
- iter->idx += gap_size;
- }
-}
-
-int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
- unsigned level, struct bkey_i *k)
-{
- struct journal_key n = {
- .btree_id = id,
- .level = level,
- .k = k,
- .allocated = true,
- /*
- * Ensure these keys are done last by journal replay, to unblock
- * journal reclaim:
- */
- .journal_seq = U32_MAX,
- };
- struct journal_keys *keys = &c->journal_keys;
- size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
-
- BUG_ON(test_bit(BCH_FS_rw, &c->flags));
-
- if (idx < keys->size &&
- journal_key_cmp(&n, &keys->data[idx]) == 0) {
- if (keys->data[idx].allocated)
- kfree(keys->data[idx].k);
- keys->data[idx] = n;
- return 0;
- }
-
- if (idx > keys->gap)
- idx -= keys->size - keys->nr;
-
- size_t old_gap = keys->gap;
-
- if (keys->nr == keys->size) {
- journal_iters_move_gap(c, old_gap, keys->size);
- old_gap = keys->size;
-
- struct journal_keys new_keys = {
- .nr = keys->nr,
- .size = max_t(size_t, keys->size, 8) * 2,
- };
-
- new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL);
- if (!new_keys.data) {
- bch_err(c, "%s: error allocating new key array (size %zu)",
- __func__, new_keys.size);
- return -BCH_ERR_ENOMEM_journal_key_insert;
- }
-
- /* Since @keys was full, there was no gap: */
- memcpy(new_keys.data, keys->data, sizeof(keys->data[0]) * keys->nr);
- kvfree(keys->data);
- keys->data = new_keys.data;
- keys->nr = new_keys.nr;
- keys->size = new_keys.size;
-
- /* And now the gap is at the end: */
- keys->gap = keys->nr;
- }
-
- journal_iters_move_gap(c, old_gap, idx);
-
- move_gap(keys, idx);
-
- keys->nr++;
- keys->data[keys->gap++] = n;
-
- journal_iters_fix(c);
-
- return 0;
-}
-
-/*
- * Can only be used from the recovery thread while we're still RO - can't be
- * used once we've got RW, as journal_keys is at that point used by multiple
- * threads:
- */
-int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
- unsigned level, struct bkey_i *k)
-{
- struct bkey_i *n;
- int ret;
-
- n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
- if (!n)
- return -BCH_ERR_ENOMEM_journal_key_insert;
-
- bkey_copy(n, k);
- ret = bch2_journal_key_insert_take(c, id, level, n);
- if (ret)
- kfree(n);
- return ret;
-}
-
-int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
- unsigned level, struct bpos pos)
-{
- struct bkey_i whiteout;
-
- bkey_init(&whiteout.k);
- whiteout.k.p = pos;
-
- return bch2_journal_key_insert(c, id, level, &whiteout);
-}
-
-bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree,
- unsigned level, struct bpos pos)
-{
- struct journal_keys *keys = &trans->c->journal_keys;
- size_t idx = bch2_journal_key_search(keys, btree, level, pos);
-
- if (!trans->journal_replay_not_finished)
- return false;
-
- return (idx < keys->size &&
- keys->data[idx].btree_id == btree &&
- keys->data[idx].level == level &&
- bpos_eq(keys->data[idx].k->k.p, pos) &&
- bkey_deleted(&keys->data[idx].k->k));
-}
-
-void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
- unsigned level, struct bpos pos)
-{
- struct journal_keys *keys = &c->journal_keys;
- size_t idx = bch2_journal_key_search(keys, btree, level, pos);
-
- if (idx < keys->size &&
- keys->data[idx].btree_id == btree &&
- keys->data[idx].level == level &&
- bpos_eq(keys->data[idx].k->k.p, pos))
- keys->data[idx].overwritten = true;
-}
-
-static void bch2_journal_iter_advance(struct journal_iter *iter)
-{
- if (iter->idx < iter->keys->size) {
- iter->idx++;
- if (iter->idx == iter->keys->gap)
- iter->idx += iter->keys->size - iter->keys->nr;
- }
-}
-
-static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
-{
- journal_iter_verify(iter);
-
- while (iter->idx < iter->keys->size) {
- struct journal_key *k = iter->keys->data + iter->idx;
-
- int cmp = cmp_int(k->btree_id, iter->btree_id) ?:
- cmp_int(k->level, iter->level);
- if (cmp > 0)
- break;
- BUG_ON(cmp);
-
- if (!k->overwritten)
- return bkey_i_to_s_c(k->k);
-
- bch2_journal_iter_advance(iter);
- }
-
- return bkey_s_c_null;
-}
-
-static void bch2_journal_iter_exit(struct journal_iter *iter)
-{
- list_del(&iter->list);
-}
-
-static void bch2_journal_iter_init(struct bch_fs *c,
- struct journal_iter *iter,
- enum btree_id id, unsigned level,
- struct bpos pos)
-{
- iter->btree_id = id;
- iter->level = level;
- iter->keys = &c->journal_keys;
- iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
-
- journal_iter_verify(iter);
-}
-
-static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
-{
- return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
- iter->b, &iter->unpacked);
-}
-
-static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
-{
- bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
-}
-
-void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
-{
- if (bpos_eq(iter->pos, SPOS_MAX))
- iter->at_end = true;
- else
- iter->pos = bpos_successor(iter->pos);
-}
-
-static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter)
-{
- struct btree_and_journal_iter iter = *_iter;
- struct bch_fs *c = iter.trans->c;
- unsigned level = iter.journal.level;
- struct bkey_buf tmp;
- unsigned nr = test_bit(BCH_FS_started, &c->flags)
- ? (level > 1 ? 0 : 2)
- : (level > 1 ? 1 : 16);
-
- iter.prefetch = false;
- bch2_bkey_buf_init(&tmp);
-
- while (nr--) {
- bch2_btree_and_journal_iter_advance(&iter);
- struct bkey_s_c k = bch2_btree_and_journal_iter_peek(&iter);
- if (!k.k)
- break;
-
- bch2_bkey_buf_reassemble(&tmp, c, k);
- bch2_btree_node_prefetch(iter.trans, NULL, tmp.k, iter.journal.btree_id, level - 1);
- }
-
- bch2_bkey_buf_exit(&tmp, c);
-}
-
-struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
-{
- struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret;
-
- if (iter->prefetch && iter->journal.level)
- btree_and_journal_iter_prefetch(iter);
-again:
- if (iter->at_end)
- return bkey_s_c_null;
-
- while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
- bpos_lt(btree_k.k->p, iter->pos))
- bch2_journal_iter_advance_btree(iter);
-
- if (iter->trans->journal_replay_not_finished)
- while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
- bpos_lt(journal_k.k->p, iter->pos))
- bch2_journal_iter_advance(&iter->journal);
-
- ret = journal_k.k &&
- (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
- ? journal_k
- : btree_k;
-
- if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
- ret = bkey_s_c_null;
-
- if (ret.k) {
- iter->pos = ret.k->p;
- if (bkey_deleted(ret.k)) {
- bch2_btree_and_journal_iter_advance(iter);
- goto again;
- }
- } else {
- iter->pos = SPOS_MAX;
- iter->at_end = true;
- }
-
- return ret;
-}
-
-void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
-{
- bch2_journal_iter_exit(&iter->journal);
-}
-
-void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
- struct btree_and_journal_iter *iter,
- struct btree *b,
- struct btree_node_iter node_iter,
- struct bpos pos)
-{
- memset(iter, 0, sizeof(*iter));
-
- iter->trans = trans;
- iter->b = b;
- iter->node_iter = node_iter;
- iter->pos = b->data->min_key;
- iter->at_end = false;
- INIT_LIST_HEAD(&iter->journal.list);
-
- if (trans->journal_replay_not_finished) {
- bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos);
- if (!test_bit(BCH_FS_may_go_rw, &trans->c->flags))
- list_add(&iter->journal.list, &trans->c->journal_iters);
- }
-}
-
-/*
- * this version is used by btree_gc before filesystem has gone RW and
- * multithreaded, so uses the journal_iters list:
- */
-void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
- struct btree_and_journal_iter *iter,
- struct btree *b)
-{
- struct btree_node_iter node_iter;
-
- bch2_btree_node_iter_init_from_start(&node_iter, b);
- __bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key);
-}
-
-/* sort and dedup all keys in the journal: */
-
-void bch2_journal_entries_free(struct bch_fs *c)
-{
- struct journal_replay **i;
- struct genradix_iter iter;
-
- genradix_for_each(&c->journal_entries, iter, i)
- kvfree(*i);
- genradix_free(&c->journal_entries);
-}
-
-/*
- * When keys compare equal, oldest compares first:
- */
-static int journal_sort_key_cmp(const void *_l, const void *_r)
-{
- const struct journal_key *l = _l;
- const struct journal_key *r = _r;
-
- return journal_key_cmp(l, r) ?:
- cmp_int(l->journal_seq, r->journal_seq) ?:
- cmp_int(l->journal_offset, r->journal_offset);
-}
-
-void bch2_journal_keys_put(struct bch_fs *c)
-{
- struct journal_keys *keys = &c->journal_keys;
-
- BUG_ON(atomic_read(&keys->ref) <= 0);
-
- if (!atomic_dec_and_test(&keys->ref))
- return;
-
- move_gap(keys, keys->nr);
-
- darray_for_each(*keys, i)
- if (i->allocated)
- kfree(i->k);
-
- kvfree(keys->data);
- keys->data = NULL;
- keys->nr = keys->gap = keys->size = 0;
-
- bch2_journal_entries_free(c);
-}
-
-static void __journal_keys_sort(struct journal_keys *keys)
-{
- sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
-
- cond_resched();
-
- struct journal_key *dst = keys->data;
-
- darray_for_each(*keys, src) {
- /*
- * We don't accumulate accounting keys here because we have to
- * compare each individual accounting key against the version in
- * the btree during replay:
- */
- if (src->k->k.type != KEY_TYPE_accounting &&
- src + 1 < &darray_top(*keys) &&
- !journal_key_cmp(src, src + 1))
- continue;
-
- *dst++ = *src;
- }
-
- keys->nr = dst - keys->data;
-}
-
-int bch2_journal_keys_sort(struct bch_fs *c)
-{
- struct genradix_iter iter;
- struct journal_replay *i, **_i;
- struct journal_keys *keys = &c->journal_keys;
- size_t nr_read = 0;
-
- genradix_for_each(&c->journal_entries, iter, _i) {
- i = *_i;
-
- if (journal_replay_ignore(i))
- continue;
-
- cond_resched();
-
- for_each_jset_key(k, entry, &i->j) {
- struct journal_key n = (struct journal_key) {
- .btree_id = entry->btree_id,
- .level = entry->level,
- .k = k,
- .journal_seq = le64_to_cpu(i->j.seq),
- .journal_offset = k->_data - i->j._data,
- };
-
- if (darray_push(keys, n)) {
- __journal_keys_sort(keys);
-
- if (keys->nr * 8 > keys->size * 7) {
- bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu keys at seq %llu",
- keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq));
- return -BCH_ERR_ENOMEM_journal_keys_sort;
- }
-
- BUG_ON(darray_push(keys, n));
- }
-
- nr_read++;
- }
- }
-
- __journal_keys_sort(keys);
- keys->gap = keys->nr;
-
- bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr);
- return 0;
-}
-
-void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree,
- unsigned level_min, unsigned level_max,
- struct bpos start, struct bpos end)
-{
- struct journal_keys *keys = &c->journal_keys;
- size_t dst = 0;
-
- move_gap(keys, keys->nr);
-
- darray_for_each(*keys, i)
- if (!(i->btree_id == btree &&
- i->level >= level_min &&
- i->level <= level_max &&
- bpos_ge(i->k->k.p, start) &&
- bpos_le(i->k->k.p, end)))
- keys->data[dst++] = *i;
- keys->nr = keys->gap = dst;
-}
-
-void bch2_journal_keys_dump(struct bch_fs *c)
-{
- struct journal_keys *keys = &c->journal_keys;
- struct printbuf buf = PRINTBUF;
-
- pr_info("%zu keys:", keys->nr);
-
- move_gap(keys, keys->nr);
-
- darray_for_each(*keys, i) {
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
- pr_err("%s l=%u %s", bch2_btree_id_str(i->btree_id), i->level, buf.buf);
- }
- printbuf_exit(&buf);
-}
diff --git a/fs/bcachefs/btree_journal_iter.h b/fs/bcachefs/btree_journal_iter.h
deleted file mode 100644
index 1653de9d609b..000000000000
--- a/fs/bcachefs/btree_journal_iter.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_JOURNAL_ITER_H
-#define _BCACHEFS_BTREE_JOURNAL_ITER_H
-
-#include "bkey.h"
-
-struct journal_iter {
- struct list_head list;
- enum btree_id btree_id;
- unsigned level;
- size_t idx;
- struct journal_keys *keys;
-};
-
-/*
- * Iterate over keys in the btree, with keys from the journal overlaid on top:
- */
-
-struct btree_and_journal_iter {
- struct btree_trans *trans;
- struct btree *b;
- struct btree_node_iter node_iter;
- struct bkey unpacked;
-
- struct journal_iter journal;
- struct bpos pos;
- bool at_end;
- bool prefetch;
-};
-
-static inline int __journal_key_cmp(enum btree_id l_btree_id,
- unsigned l_level,
- struct bpos l_pos,
- const struct journal_key *r)
-{
- return (cmp_int(l_btree_id, r->btree_id) ?:
- cmp_int(l_level, r->level) ?:
- bpos_cmp(l_pos, r->k->k.p));
-}
-
-static inline int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
-{
- return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
-}
-
-struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *, enum btree_id,
- unsigned, struct bpos, struct bpos, size_t *);
-struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *, enum btree_id,
- unsigned, struct bpos);
-
-int bch2_btree_and_journal_iter_prefetch(struct btree_trans *, struct btree_path *,
- struct btree_and_journal_iter *);
-
-int bch2_journal_key_insert_take(struct bch_fs *, enum btree_id,
- unsigned, struct bkey_i *);
-int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
- unsigned, struct bkey_i *);
-int bch2_journal_key_delete(struct bch_fs *, enum btree_id,
- unsigned, struct bpos);
-bool bch2_key_deleted_in_journal(struct btree_trans *, enum btree_id, unsigned, struct bpos);
-void bch2_journal_key_overwritten(struct bch_fs *, enum btree_id, unsigned, struct bpos);
-
-void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *);
-struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *);
-
-void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *);
-void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *,
- struct btree_and_journal_iter *, struct btree *,
- struct btree_node_iter, struct bpos);
-void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *,
- struct btree_and_journal_iter *, struct btree *);
-
-void bch2_journal_keys_put(struct bch_fs *);
-
-static inline void bch2_journal_keys_put_initial(struct bch_fs *c)
-{
- if (c->journal_keys.initial_ref_held)
- bch2_journal_keys_put(c);
- c->journal_keys.initial_ref_held = false;
-}
-
-void bch2_journal_entries_free(struct bch_fs *);
-
-int bch2_journal_keys_sort(struct bch_fs *);
-
-void bch2_shoot_down_journal_keys(struct bch_fs *, enum btree_id,
- unsigned, unsigned,
- struct bpos, struct bpos);
-
-void bch2_journal_keys_dump(struct bch_fs *);
-
-#endif /* _BCACHEFS_BTREE_JOURNAL_ITER_H */
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
deleted file mode 100644
index 244610b1d0b5..000000000000
--- a/fs/bcachefs/btree_key_cache.c
+++ /dev/null
@@ -1,813 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_cache.h"
-#include "btree_iter.h"
-#include "btree_key_cache.h"
-#include "btree_locking.h"
-#include "btree_update.h"
-#include "errcode.h"
-#include "error.h"
-#include "journal.h"
-#include "journal_reclaim.h"
-#include "trace.h"
-
-#include <linux/sched/mm.h>
-
-static inline bool btree_uses_pcpu_readers(enum btree_id id)
-{
- return id == BTREE_ID_subvolumes;
-}
-
-static struct kmem_cache *bch2_key_cache;
-
-static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
- const void *obj)
-{
- const struct bkey_cached *ck = obj;
- const struct bkey_cached_key *key = arg->key;
-
- return ck->key.btree_id != key->btree_id ||
- !bpos_eq(ck->key.pos, key->pos);
-}
-
-static const struct rhashtable_params bch2_btree_key_cache_params = {
- .head_offset = offsetof(struct bkey_cached, hash),
- .key_offset = offsetof(struct bkey_cached, key),
- .key_len = sizeof(struct bkey_cached_key),
- .obj_cmpfn = bch2_btree_key_cache_cmp_fn,
- .automatic_shrinking = true,
-};
-
-static inline void btree_path_cached_set(struct btree_trans *trans, struct btree_path *path,
- struct bkey_cached *ck,
- enum btree_node_locked_type lock_held)
-{
- path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
- path->l[0].b = (void *) ck;
- mark_btree_node_locked(trans, path, 0, lock_held);
-}
-
-__flatten
-inline struct bkey_cached *
-bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
-{
- struct bkey_cached_key key = {
- .btree_id = btree_id,
- .pos = pos,
- };
-
- return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
- bch2_btree_key_cache_params);
-}
-
-static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
-{
- if (!six_trylock_intent(&ck->c.lock))
- return false;
-
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- six_unlock_intent(&ck->c.lock);
- return false;
- }
-
- if (!six_trylock_write(&ck->c.lock)) {
- six_unlock_intent(&ck->c.lock);
- return false;
- }
-
- return true;
-}
-
-static bool bkey_cached_evict(struct btree_key_cache *c,
- struct bkey_cached *ck)
-{
- bool ret = !rhashtable_remove_fast(&c->table, &ck->hash,
- bch2_btree_key_cache_params);
- if (ret) {
- memset(&ck->key, ~0, sizeof(ck->key));
- atomic_long_dec(&c->nr_keys);
- }
-
- return ret;
-}
-
-static void __bkey_cached_free(struct rcu_pending *pending, struct rcu_head *rcu)
-{
- struct bch_fs *c = container_of(pending->srcu, struct bch_fs, btree_trans_barrier);
- struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu);
-
- this_cpu_dec(*c->btree_key_cache.nr_pending);
- kmem_cache_free(bch2_key_cache, ck);
-}
-
-static void bkey_cached_free(struct btree_key_cache *bc,
- struct bkey_cached *ck)
-{
- kfree(ck->k);
- ck->k = NULL;
- ck->u64s = 0;
-
- six_unlock_write(&ck->c.lock);
- six_unlock_intent(&ck->c.lock);
-
- bool pcpu_readers = ck->c.lock.readers != NULL;
- rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu);
- this_cpu_inc(*bc->nr_pending);
-}
-
-static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp)
-{
- gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE;
-
- struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp);
- if (unlikely(!ck))
- return NULL;
- ck->k = kmalloc(key_u64s * sizeof(u64), gfp);
- if (unlikely(!ck->k)) {
- kmem_cache_free(bch2_key_cache, ck);
- return NULL;
- }
- ck->u64s = key_u64s;
- return ck;
-}
-
-static struct bkey_cached *
-bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned key_u64s)
-{
- struct bch_fs *c = trans->c;
- struct btree_key_cache *bc = &c->btree_key_cache;
- bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
- int ret;
-
- struct bkey_cached *ck = container_of_or_null(
- rcu_pending_dequeue(&bc->pending[pcpu_readers]),
- struct bkey_cached, rcu);
- if (ck)
- goto lock;
-
- ck = allocate_dropping_locks(trans, ret,
- __bkey_cached_alloc(key_u64s, _gfp));
- if (ret) {
- if (ck)
- kfree(ck->k);
- kmem_cache_free(bch2_key_cache, ck);
- return ERR_PTR(ret);
- }
-
- if (ck) {
- bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
- ck->c.cached = true;
- goto lock;
- }
-
- ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]),
- struct bkey_cached, rcu);
- if (ck)
- goto lock;
-lock:
- six_lock_intent(&ck->c.lock, NULL, NULL);
- six_lock_write(&ck->c.lock, NULL, NULL);
- return ck;
-}
-
-static struct bkey_cached *
-bkey_cached_reuse(struct btree_key_cache *c)
-{
- struct bucket_table *tbl;
- struct rhash_head *pos;
- struct bkey_cached *ck;
- unsigned i;
-
- rcu_read_lock();
- tbl = rht_dereference_rcu(c->table.tbl, &c->table);
- for (i = 0; i < tbl->size; i++)
- rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
- if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
- bkey_cached_lock_for_evict(ck)) {
- if (bkey_cached_evict(c, ck))
- goto out;
- six_unlock_write(&ck->c.lock);
- six_unlock_intent(&ck->c.lock);
- }
- }
- ck = NULL;
-out:
- rcu_read_unlock();
- return ck;
-}
-
-static int btree_key_cache_create(struct btree_trans *trans, struct btree_path *path,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct btree_key_cache *bc = &c->btree_key_cache;
-
- /*
- * bch2_varint_decode can read past the end of the buffer by at
- * most 7 bytes (it won't be used):
- */
- unsigned key_u64s = k.k->u64s + 1;
-
- /*
- * Allocate some extra space so that the transaction commit path is less
- * likely to have to reallocate, since that requires a transaction
- * restart:
- */
- key_u64s = min(256U, (key_u64s * 3) / 2);
- key_u64s = roundup_pow_of_two(key_u64s);
-
- struct bkey_cached *ck = bkey_cached_alloc(trans, path, key_u64s);
- int ret = PTR_ERR_OR_ZERO(ck);
- if (ret)
- return ret;
-
- if (unlikely(!ck)) {
- ck = bkey_cached_reuse(bc);
- if (unlikely(!ck)) {
- bch_err(c, "error allocating memory for key cache item, btree %s",
- bch2_btree_id_str(path->btree_id));
- return -BCH_ERR_ENOMEM_btree_key_cache_create;
- }
- }
-
- ck->c.level = 0;
- ck->c.btree_id = path->btree_id;
- ck->key.btree_id = path->btree_id;
- ck->key.pos = path->pos;
- ck->flags = 1U << BKEY_CACHED_ACCESSED;
-
- if (unlikely(key_u64s > ck->u64s)) {
- mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
-
- struct bkey_i *new_k = allocate_dropping_locks(trans, ret,
- kmalloc(key_u64s * sizeof(u64), _gfp));
- if (unlikely(!new_k)) {
- bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
- bch2_btree_id_str(ck->key.btree_id), key_u64s);
- ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
- } else if (ret) {
- kfree(new_k);
- goto err;
- }
-
- kfree(ck->k);
- ck->k = new_k;
- ck->u64s = key_u64s;
- }
-
- bkey_reassemble(ck->k, k);
-
- ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params);
- if (unlikely(ret)) /* raced with another fill? */
- goto err;
-
- atomic_long_inc(&bc->nr_keys);
- six_unlock_write(&ck->c.lock);
-
- enum six_lock_type lock_want = __btree_lock_want(path, 0);
- if (lock_want == SIX_LOCK_read)
- six_lock_downgrade(&ck->c.lock);
- btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
- path->uptodate = BTREE_ITER_UPTODATE;
- return 0;
-err:
- bkey_cached_free(bc, ck);
- mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
-
- return ret;
-}
-
-static noinline int btree_key_cache_fill(struct btree_trans *trans,
- struct btree_path *ck_path,
- unsigned flags)
-{
- if (flags & BTREE_ITER_cached_nofill) {
- ck_path->uptodate = BTREE_ITER_UPTODATE;
- return 0;
- }
-
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos,
- BTREE_ITER_key_cache_fill|
- BTREE_ITER_cached_nofill);
- iter.flags &= ~BTREE_ITER_with_journal;
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- /* Recheck after btree lookup, before allocating: */
- ret = bch2_btree_key_cache_find(c, ck_path->btree_id, ck_path->pos) ? -EEXIST : 0;
- if (unlikely(ret))
- goto out;
-
- ret = btree_key_cache_create(trans, ck_path, k);
- if (ret)
- goto err;
-out:
- /* We're not likely to need this iterator again: */
- bch2_set_btree_iter_dontneed(&iter);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static inline int btree_path_traverse_cached_fast(struct btree_trans *trans,
- struct btree_path *path)
-{
- struct bch_fs *c = trans->c;
- struct bkey_cached *ck;
-retry:
- ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
- if (!ck)
- return -ENOENT;
-
- enum six_lock_type lock_want = __btree_lock_want(path, 0);
-
- int ret = btree_node_lock(trans, path, (void *) ck, 0, lock_want, _THIS_IP_);
- if (ret)
- return ret;
-
- if (ck->key.btree_id != path->btree_id ||
- !bpos_eq(ck->key.pos, path->pos)) {
- six_unlock_type(&ck->c.lock, lock_want);
- goto retry;
- }
-
- if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
- set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
-
- btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
- path->uptodate = BTREE_ITER_UPTODATE;
- return 0;
-}
-
-int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
- unsigned flags)
-{
- EBUG_ON(path->level);
-
- path->l[1].b = NULL;
-
- int ret;
- do {
- ret = btree_path_traverse_cached_fast(trans, path);
- if (unlikely(ret == -ENOENT))
- ret = btree_key_cache_fill(trans, path, flags);
- } while (ret == -EEXIST);
-
- if (unlikely(ret)) {
- path->uptodate = BTREE_ITER_NEED_TRAVERSE;
- if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
- btree_node_unlock(trans, path, 0);
- path->l[0].b = ERR_PTR(ret);
- }
- }
- return ret;
-}
-
-static int btree_key_cache_flush_pos(struct btree_trans *trans,
- struct bkey_cached_key key,
- u64 journal_seq,
- unsigned commit_flags,
- bool evict)
-{
- struct bch_fs *c = trans->c;
- struct journal *j = &c->journal;
- struct btree_iter c_iter, b_iter;
- struct bkey_cached *ck = NULL;
- int ret;
-
- bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
- BTREE_ITER_slots|
- BTREE_ITER_intent|
- BTREE_ITER_all_snapshots);
- bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
- BTREE_ITER_cached|
- BTREE_ITER_intent);
- b_iter.flags &= ~BTREE_ITER_with_key_cache;
-
- ret = bch2_btree_iter_traverse(&c_iter);
- if (ret)
- goto out;
-
- ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b;
- if (!ck)
- goto out;
-
- if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- if (evict)
- goto evict;
- goto out;
- }
-
- if (journal_seq && ck->journal.seq != journal_seq)
- goto out;
-
- trans->journal_res.seq = ck->journal.seq;
-
- /*
- * If we're at the end of the journal, we really want to free up space
- * in the journal right away - we don't want to pin that old journal
- * sequence number with a new btree node write, we want to re-journal
- * the update
- */
- if (ck->journal.seq == journal_last_seq(j))
- commit_flags |= BCH_WATERMARK_reclaim;
-
- if (ck->journal.seq != journal_last_seq(j) ||
- !test_bit(JOURNAL_space_low, &c->journal.flags))
- commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
-
- ret = bch2_btree_iter_traverse(&b_iter) ?:
- bch2_trans_update(trans, &b_iter, ck->k,
- BTREE_UPDATE_key_cache_reclaim|
- BTREE_UPDATE_internal_snapshot_node|
- BTREE_TRIGGER_norun) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_no_enospc|
- commit_flags);
-
- bch2_fs_fatal_err_on(ret &&
- !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
- !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
- !bch2_journal_error(j), c,
- "flushing key cache: %s", bch2_err_str(ret));
- if (ret)
- goto out;
-
- bch2_journal_pin_drop(j, &ck->journal);
-
- struct btree_path *path = btree_iter_path(trans, &c_iter);
- BUG_ON(!btree_node_locked(path, 0));
-
- if (!evict) {
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
- atomic_long_dec(&c->btree_key_cache.nr_dirty);
- }
- } else {
- struct btree_path *path2;
- unsigned i;
-evict:
- trans_for_each_path(trans, path2, i)
- if (path2 != path)
- __bch2_btree_path_unlock(trans, path2);
-
- bch2_btree_node_lock_write_nofail(trans, path, &ck->c);
-
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
- atomic_long_dec(&c->btree_key_cache.nr_dirty);
- }
-
- mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
- if (bkey_cached_evict(&c->btree_key_cache, ck)) {
- bkey_cached_free(&c->btree_key_cache, ck);
- } else {
- six_unlock_write(&ck->c.lock);
- six_unlock_intent(&ck->c.lock);
- }
- }
-out:
- bch2_trans_iter_exit(trans, &b_iter);
- bch2_trans_iter_exit(trans, &c_iter);
- return ret;
-}
-
-int bch2_btree_key_cache_journal_flush(struct journal *j,
- struct journal_entry_pin *pin, u64 seq)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct bkey_cached *ck =
- container_of(pin, struct bkey_cached, journal);
- struct bkey_cached_key key;
- struct btree_trans *trans = bch2_trans_get(c);
- int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
- int ret = 0;
-
- btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read);
- key = ck->key;
-
- if (ck->journal.seq != seq ||
- !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- six_unlock_read(&ck->c.lock);
- goto unlock;
- }
-
- if (ck->seq != seq) {
- bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal,
- bch2_btree_key_cache_journal_flush);
- six_unlock_read(&ck->c.lock);
- goto unlock;
- }
- six_unlock_read(&ck->c.lock);
-
- ret = lockrestart_do(trans,
- btree_key_cache_flush_pos(trans, key, seq,
- BCH_TRANS_COMMIT_journal_reclaim, false));
-unlock:
- srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
-
- bch2_trans_put(trans);
- return ret;
-}
-
-bool bch2_btree_insert_key_cached(struct btree_trans *trans,
- unsigned flags,
- struct btree_insert_entry *insert_entry)
-{
- struct bch_fs *c = trans->c;
- struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b;
- struct bkey_i *insert = insert_entry->k;
- bool kick_reclaim = false;
-
- BUG_ON(insert->k.u64s > ck->u64s);
-
- bkey_copy(ck->k, insert);
-
- if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
- set_bit(BKEY_CACHED_DIRTY, &ck->flags);
- atomic_long_inc(&c->btree_key_cache.nr_dirty);
-
- if (bch2_nr_btree_keys_need_flush(c))
- kick_reclaim = true;
- }
-
- /*
- * To minimize lock contention, we only add the journal pin here and
- * defer pin updates to the flush callback via ->seq. Be careful not to
- * update ->seq on nojournal commits because we don't want to update the
- * pin to a seq that doesn't include journal updates on disk. Otherwise
- * we risk losing the update after a crash.
- *
- * The only exception is if the pin is not active in the first place. We
- * have to add the pin because journal reclaim drives key cache
- * flushing. The flush callback will not proceed unless ->seq matches
- * the latest pin, so make sure it starts with a consistent value.
- */
- if (!(insert_entry->flags & BTREE_UPDATE_nojournal) ||
- !journal_pin_active(&ck->journal)) {
- ck->seq = trans->journal_res.seq;
- }
- bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
- &ck->journal, bch2_btree_key_cache_journal_flush);
-
- if (kick_reclaim)
- journal_reclaim_kick(&c->journal);
- return true;
-}
-
-void bch2_btree_key_cache_drop(struct btree_trans *trans,
- struct btree_path *path)
-{
- struct bch_fs *c = trans->c;
- struct btree_key_cache *bc = &c->btree_key_cache;
- struct bkey_cached *ck = (void *) path->l[0].b;
-
- /*
- * We just did an update to the btree, bypassing the key cache: the key
- * cache key is now stale and must be dropped, even if dirty:
- */
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
- atomic_long_dec(&c->btree_key_cache.nr_dirty);
- bch2_journal_pin_drop(&c->journal, &ck->journal);
- }
-
- bkey_cached_evict(bc, ck);
- bkey_cached_free(bc, ck);
-
- mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- path->should_be_locked = false;
-}
-
-static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- struct bch_fs *c = shrink->private_data;
- struct btree_key_cache *bc = &c->btree_key_cache;
- struct bucket_table *tbl;
- struct bkey_cached *ck;
- size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
- unsigned iter, start;
- int srcu_idx;
-
- srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
- rcu_read_lock();
-
- tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
-
- /*
- * Scanning is expensive while a rehash is in progress - most elements
- * will be on the new hashtable, if it's in progress
- *
- * A rehash could still start while we're scanning - that's ok, we'll
- * still see most elements.
- */
- if (unlikely(tbl->nest)) {
- rcu_read_unlock();
- srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
- return SHRINK_STOP;
- }
-
- iter = bc->shrink_iter;
- if (iter >= tbl->size)
- iter = 0;
- start = iter;
-
- do {
- struct rhash_head *pos, *next;
-
- pos = rht_ptr_rcu(&tbl->buckets[iter]);
-
- while (!rht_is_a_nulls(pos)) {
- next = rht_dereference_bucket_rcu(pos->next, tbl, iter);
- ck = container_of(pos, struct bkey_cached, hash);
-
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- bc->skipped_dirty++;
- } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
- clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
- bc->skipped_accessed++;
- } else if (!bkey_cached_lock_for_evict(ck)) {
- bc->skipped_lock_fail++;
- } else if (bkey_cached_evict(bc, ck)) {
- bkey_cached_free(bc, ck);
- bc->freed++;
- freed++;
- } else {
- six_unlock_write(&ck->c.lock);
- six_unlock_intent(&ck->c.lock);
- }
-
- scanned++;
- if (scanned >= nr)
- goto out;
-
- pos = next;
- }
-
- iter++;
- if (iter >= tbl->size)
- iter = 0;
- } while (scanned < nr && iter != start);
-out:
- bc->shrink_iter = iter;
-
- rcu_read_unlock();
- srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
-
- return freed;
-}
-
-static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- struct bch_fs *c = shrink->private_data;
- struct btree_key_cache *bc = &c->btree_key_cache;
- long nr = atomic_long_read(&bc->nr_keys) -
- atomic_long_read(&bc->nr_dirty);
-
- /*
- * Avoid hammering our shrinker too much if it's nearly empty - the
- * shrinker code doesn't take into account how big our cache is, if it's
- * mostly empty but the system is under memory pressure it causes nasty
- * lock contention:
- */
- nr -= 128;
-
- return max(0L, nr);
-}
-
-void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
-{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
- struct bucket_table *tbl;
- struct bkey_cached *ck;
- struct rhash_head *pos;
- LIST_HEAD(items);
- unsigned i;
-
- shrinker_free(bc->shrink);
-
- /*
- * The loop is needed to guard against racing with rehash:
- */
- while (atomic_long_read(&bc->nr_keys)) {
- rcu_read_lock();
- tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
- if (tbl) {
- if (tbl->nest) {
- /* wait for in progress rehash */
- rcu_read_unlock();
- mutex_lock(&bc->table.mutex);
- mutex_unlock(&bc->table.mutex);
- rcu_read_lock();
- continue;
- }
- for (i = 0; i < tbl->size; i++)
- while (pos = rht_ptr_rcu(&tbl->buckets[i]), !rht_is_a_nulls(pos)) {
- ck = container_of(pos, struct bkey_cached, hash);
- BUG_ON(!bkey_cached_evict(bc, ck));
- kfree(ck->k);
- kmem_cache_free(bch2_key_cache, ck);
- }
- }
- rcu_read_unlock();
- }
-
- if (atomic_long_read(&bc->nr_dirty) &&
- !bch2_journal_error(&c->journal) &&
- test_bit(BCH_FS_was_rw, &c->flags))
- panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
- atomic_long_read(&bc->nr_dirty));
-
- if (atomic_long_read(&bc->nr_keys))
- panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
- atomic_long_read(&bc->nr_keys));
-
- if (bc->table_init_done)
- rhashtable_destroy(&bc->table);
-
- rcu_pending_exit(&bc->pending[0]);
- rcu_pending_exit(&bc->pending[1]);
-
- free_percpu(bc->nr_pending);
-}
-
-void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
-{
-}
-
-int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
-{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
- struct shrinker *shrink;
-
- bc->nr_pending = alloc_percpu(size_t);
- if (!bc->nr_pending)
- return -BCH_ERR_ENOMEM_fs_btree_cache_init;
-
- if (rcu_pending_init(&bc->pending[0], &c->btree_trans_barrier, __bkey_cached_free) ||
- rcu_pending_init(&bc->pending[1], &c->btree_trans_barrier, __bkey_cached_free))
- return -BCH_ERR_ENOMEM_fs_btree_cache_init;
-
- if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
- return -BCH_ERR_ENOMEM_fs_btree_cache_init;
-
- bc->table_init_done = true;
-
- shrink = shrinker_alloc(0, "%s-btree_key_cache", c->name);
- if (!shrink)
- return -BCH_ERR_ENOMEM_fs_btree_cache_init;
- bc->shrink = shrink;
- shrink->count_objects = bch2_btree_key_cache_count;
- shrink->scan_objects = bch2_btree_key_cache_scan;
- shrink->batch = 1 << 14;
- shrink->seeks = 0;
- shrink->private_data = c;
- shrinker_register(shrink);
- return 0;
-}
-
-void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *bc)
-{
- printbuf_tabstop_push(out, 24);
- printbuf_tabstop_push(out, 12);
-
- prt_printf(out, "keys:\t%lu\r\n", atomic_long_read(&bc->nr_keys));
- prt_printf(out, "dirty:\t%lu\r\n", atomic_long_read(&bc->nr_dirty));
- prt_printf(out, "table size:\t%u\r\n", bc->table.tbl->size);
- prt_newline(out);
- prt_printf(out, "shrinker:\n");
- prt_printf(out, "requested_to_free:\t%lu\r\n", bc->requested_to_free);
- prt_printf(out, "freed:\t%lu\r\n", bc->freed);
- prt_printf(out, "skipped_dirty:\t%lu\r\n", bc->skipped_dirty);
- prt_printf(out, "skipped_accessed:\t%lu\r\n", bc->skipped_accessed);
- prt_printf(out, "skipped_lock_fail:\t%lu\r\n", bc->skipped_lock_fail);
- prt_newline(out);
- prt_printf(out, "pending:\t%zu\r\n", per_cpu_sum(bc->nr_pending));
-}
-
-void bch2_btree_key_cache_exit(void)
-{
- kmem_cache_destroy(bch2_key_cache);
-}
-
-int __init bch2_btree_key_cache_init(void)
-{
- bch2_key_cache = KMEM_CACHE(bkey_cached, SLAB_RECLAIM_ACCOUNT);
- if (!bch2_key_cache)
- return -ENOMEM;
-
- return 0;
-}
diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h
deleted file mode 100644
index 51d6289b8dee..000000000000
--- a/fs/bcachefs/btree_key_cache.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_KEY_CACHE_H
-#define _BCACHEFS_BTREE_KEY_CACHE_H
-
-static inline size_t bch2_nr_btree_keys_need_flush(struct bch_fs *c)
-{
- size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
- size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
- size_t max_dirty = 1024 + nr_keys / 2;
-
- return max_t(ssize_t, 0, nr_dirty - max_dirty);
-}
-
-static inline ssize_t __bch2_btree_key_cache_must_wait(struct bch_fs *c)
-{
- size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
- size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
- size_t max_dirty = 4096 + (nr_keys * 3) / 4;
-
- return nr_dirty - max_dirty;
-}
-
-static inline bool bch2_btree_key_cache_must_wait(struct bch_fs *c)
-{
- return __bch2_btree_key_cache_must_wait(c) > 0;
-}
-
-static inline bool bch2_btree_key_cache_wait_done(struct bch_fs *c)
-{
- size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
- size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
- size_t max_dirty = 2048 + (nr_keys * 5) / 8;
-
- return nr_dirty <= max_dirty;
-}
-
-int bch2_btree_key_cache_journal_flush(struct journal *,
- struct journal_entry_pin *, u64);
-
-struct bkey_cached *
-bch2_btree_key_cache_find(struct bch_fs *, enum btree_id, struct bpos);
-
-int bch2_btree_path_traverse_cached(struct btree_trans *, struct btree_path *,
- unsigned);
-
-bool bch2_btree_insert_key_cached(struct btree_trans *, unsigned,
- struct btree_insert_entry *);
-void bch2_btree_key_cache_drop(struct btree_trans *,
- struct btree_path *);
-
-void bch2_fs_btree_key_cache_exit(struct btree_key_cache *);
-void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *);
-int bch2_fs_btree_key_cache_init(struct btree_key_cache *);
-
-void bch2_btree_key_cache_to_text(struct printbuf *, struct btree_key_cache *);
-
-void bch2_btree_key_cache_exit(void);
-int __init bch2_btree_key_cache_init(void);
-
-#endif /* _BCACHEFS_BTREE_KEY_CACHE_H */
diff --git a/fs/bcachefs/btree_key_cache_types.h b/fs/bcachefs/btree_key_cache_types.h
deleted file mode 100644
index 722f1ed10551..000000000000
--- a/fs/bcachefs/btree_key_cache_types.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_KEY_CACHE_TYPES_H
-#define _BCACHEFS_BTREE_KEY_CACHE_TYPES_H
-
-#include "rcu_pending.h"
-
-struct btree_key_cache {
- struct rhashtable table;
- bool table_init_done;
-
- struct shrinker *shrink;
- unsigned shrink_iter;
-
- /* 0: non pcpu reader locks, 1: pcpu reader locks */
- struct rcu_pending pending[2];
- size_t __percpu *nr_pending;
-
- atomic_long_t nr_keys;
- atomic_long_t nr_dirty;
-
- /* shrinker stats */
- unsigned long requested_to_free;
- unsigned long freed;
- unsigned long skipped_dirty;
- unsigned long skipped_accessed;
- unsigned long skipped_lock_fail;
-};
-
-struct bkey_cached_key {
- u32 btree_id;
- struct bpos pos;
-} __packed __aligned(4);
-
-#endif /* _BCACHEFS_BTREE_KEY_CACHE_TYPES_H */
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
deleted file mode 100644
index efe2a007b482..000000000000
--- a/fs/bcachefs/btree_locking.c
+++ /dev/null
@@ -1,887 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_locking.h"
-#include "btree_types.h"
-
-static struct lock_class_key bch2_btree_node_lock_key;
-
-void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
- enum six_lock_init_flags flags)
-{
- __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
- lockdep_set_notrack_class(&b->lock);
-}
-
-/* Btree node locking: */
-
-struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
- struct btree_path *skip,
- struct btree_bkey_cached_common *b,
- unsigned level)
-{
- struct btree_path *path;
- struct six_lock_count ret;
- unsigned i;
-
- memset(&ret, 0, sizeof(ret));
-
- if (IS_ERR_OR_NULL(b))
- return ret;
-
- trans_for_each_path(trans, path, i)
- if (path != skip && &path->l[level].b->c == b) {
- int t = btree_node_locked_type(path, level);
-
- if (t != BTREE_NODE_UNLOCKED)
- ret.n[t]++;
- }
-
- return ret;
-}
-
-/* unlock */
-
-void bch2_btree_node_unlock_write(struct btree_trans *trans,
- struct btree_path *path, struct btree *b)
-{
- bch2_btree_node_unlock_write_inlined(trans, path, b);
-}
-
-/* lock */
-
-/*
- * @trans wants to lock @b with type @type
- */
-struct trans_waiting_for_lock {
- struct btree_trans *trans;
- struct btree_bkey_cached_common *node_want;
- enum six_lock_type lock_want;
-
- /* for iterating over held locks :*/
- u8 path_idx;
- u8 level;
- u64 lock_start_time;
-};
-
-struct lock_graph {
- struct trans_waiting_for_lock g[8];
- unsigned nr;
-};
-
-static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
-{
- struct trans_waiting_for_lock *i;
-
- prt_printf(out, "Found lock cycle (%u entries):\n", g->nr);
-
- for (i = g->g; i < g->g + g->nr; i++) {
- struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
- if (!task)
- continue;
-
- bch2_btree_trans_to_text(out, i->trans);
- bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
- }
-}
-
-static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
-{
- struct trans_waiting_for_lock *i;
-
- for (i = g->g; i != g->g + g->nr; i++) {
- struct task_struct *task = i->trans->locking_wait.task;
- if (i != g->g)
- prt_str(out, "<- ");
- prt_printf(out, "%u ", task ?task->pid : 0);
- }
- prt_newline(out);
-}
-
-static void lock_graph_up(struct lock_graph *g)
-{
- closure_put(&g->g[--g->nr].trans->ref);
-}
-
-static noinline void lock_graph_pop_all(struct lock_graph *g)
-{
- while (g->nr)
- lock_graph_up(g);
-}
-
-static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
-{
- g->g[g->nr++] = (struct trans_waiting_for_lock) {
- .trans = trans,
- .node_want = trans->locking,
- .lock_want = trans->locking_wait.lock_want,
- };
-}
-
-static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
-{
- closure_get(&trans->ref);
- __lock_graph_down(g, trans);
-}
-
-static bool lock_graph_remove_non_waiters(struct lock_graph *g)
-{
- struct trans_waiting_for_lock *i;
-
- for (i = g->g + 1; i < g->g + g->nr; i++)
- if (i->trans->locking != i->node_want ||
- i->trans->locking_wait.start_time != i[-1].lock_start_time) {
- while (g->g + g->nr > i)
- lock_graph_up(g);
- return true;
- }
-
- return false;
-}
-
-static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
-
- count_event(c, trans_restart_would_deadlock);
-
- if (trace_trans_restart_would_deadlock_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- buf.atomic++;
- print_cycle(&buf, g);
-
- trace_trans_restart_would_deadlock(trans, buf.buf);
- printbuf_exit(&buf);
- }
-}
-
-static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
-{
- if (i == g->g) {
- trace_would_deadlock(g, i->trans);
- return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
- } else {
- i->trans->lock_must_abort = true;
- wake_up_process(i->trans->locking_wait.task);
- return 0;
- }
-}
-
-static int btree_trans_abort_preference(struct btree_trans *trans)
-{
- if (trans->lock_may_not_fail)
- return 0;
- if (trans->locking_wait.lock_want == SIX_LOCK_write)
- return 1;
- if (!trans->in_traverse_all)
- return 2;
- return 3;
-}
-
-static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
-{
- struct trans_waiting_for_lock *i, *abort = NULL;
- unsigned best = 0, pref;
- int ret;
-
- if (lock_graph_remove_non_waiters(g))
- return 0;
-
- /* Only checking, for debugfs: */
- if (cycle) {
- print_cycle(cycle, g);
- ret = -1;
- goto out;
- }
-
- for (i = g->g; i < g->g + g->nr; i++) {
- pref = btree_trans_abort_preference(i->trans);
- if (pref > best) {
- abort = i;
- best = pref;
- }
- }
-
- if (unlikely(!best)) {
- struct printbuf buf = PRINTBUF;
- buf.atomic++;
-
- prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
-
- for (i = g->g; i < g->g + g->nr; i++) {
- struct btree_trans *trans = i->trans;
-
- bch2_btree_trans_to_text(&buf, trans);
-
- prt_printf(&buf, "backtrace:\n");
- printbuf_indent_add(&buf, 2);
- bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
- printbuf_indent_sub(&buf, 2);
- prt_newline(&buf);
- }
-
- bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf);
- printbuf_exit(&buf);
- BUG();
- }
-
- ret = abort_lock(g, abort);
-out:
- if (ret)
- while (g->nr)
- lock_graph_up(g);
- return ret;
-}
-
-static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
- struct printbuf *cycle)
-{
- struct btree_trans *orig_trans = g->g->trans;
- struct trans_waiting_for_lock *i;
-
- for (i = g->g; i < g->g + g->nr; i++)
- if (i->trans == trans) {
- closure_put(&trans->ref);
- return break_cycle(g, cycle);
- }
-
- if (g->nr == ARRAY_SIZE(g->g)) {
- closure_put(&trans->ref);
-
- if (orig_trans->lock_may_not_fail)
- return 0;
-
- while (g->nr)
- lock_graph_up(g);
-
- if (cycle)
- return 0;
-
- trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
- return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
- }
-
- __lock_graph_down(g, trans);
- return 0;
-}
-
-static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
-{
- return t1 + t2 > 1;
-}
-
-int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
-{
- struct lock_graph g;
- struct trans_waiting_for_lock *top;
- struct btree_bkey_cached_common *b;
- btree_path_idx_t path_idx;
- int ret = 0;
-
- g.nr = 0;
-
- if (trans->lock_must_abort) {
- if (cycle)
- return -1;
-
- trace_would_deadlock(&g, trans);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
- }
-
- lock_graph_down(&g, trans);
-
- /* trans->paths is rcu protected vs. freeing */
- rcu_read_lock();
- if (cycle)
- cycle->atomic++;
-next:
- if (!g.nr)
- goto out;
-
- top = &g.g[g.nr - 1];
-
- struct btree_path *paths = rcu_dereference(top->trans->paths);
- if (!paths)
- goto up;
-
- unsigned long *paths_allocated = trans_paths_allocated(paths);
-
- trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
- path_idx, top->path_idx) {
- struct btree_path *path = paths + path_idx;
- if (!path->nodes_locked)
- continue;
-
- if (path_idx != top->path_idx) {
- top->path_idx = path_idx;
- top->level = 0;
- top->lock_start_time = 0;
- }
-
- for (;
- top->level < BTREE_MAX_DEPTH;
- top->level++, top->lock_start_time = 0) {
- int lock_held = btree_node_locked_type(path, top->level);
-
- if (lock_held == BTREE_NODE_UNLOCKED)
- continue;
-
- b = &READ_ONCE(path->l[top->level].b)->c;
-
- if (IS_ERR_OR_NULL(b)) {
- /*
- * If we get here, it means we raced with the
- * other thread updating its btree_path
- * structures - which means it can't be blocked
- * waiting on a lock:
- */
- if (!lock_graph_remove_non_waiters(&g)) {
- /*
- * If lock_graph_remove_non_waiters()
- * didn't do anything, it must be
- * because we're being called by debugfs
- * checking for lock cycles, which
- * invokes us on btree_transactions that
- * aren't actually waiting on anything.
- * Just bail out:
- */
- lock_graph_pop_all(&g);
- }
-
- goto next;
- }
-
- if (list_empty_careful(&b->lock.wait_list))
- continue;
-
- raw_spin_lock(&b->lock.wait_lock);
- list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
- BUG_ON(b != trans->locking);
-
- if (top->lock_start_time &&
- time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
- continue;
-
- top->lock_start_time = trans->locking_wait.start_time;
-
- /* Don't check for self deadlock: */
- if (trans == top->trans ||
- !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
- continue;
-
- closure_get(&trans->ref);
- raw_spin_unlock(&b->lock.wait_lock);
-
- ret = lock_graph_descend(&g, trans, cycle);
- if (ret)
- goto out;
- goto next;
-
- }
- raw_spin_unlock(&b->lock.wait_lock);
- }
- }
-up:
- if (g.nr > 1 && cycle)
- print_chain(cycle, &g);
- lock_graph_up(&g);
- goto next;
-out:
- if (cycle)
- --cycle->atomic;
- rcu_read_unlock();
- return ret;
-}
-
-int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
-{
- struct btree_trans *trans = p;
-
- return bch2_check_for_deadlock(trans, NULL);
-}
-
-int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
- struct btree_bkey_cached_common *b,
- bool lock_may_not_fail)
-{
- int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
- int ret;
-
- /*
- * Must drop our read locks before calling six_lock_write() -
- * six_unlock() won't do wakeups until the reader count
- * goes to 0, and it's safe because we have the node intent
- * locked:
- */
- six_lock_readers_add(&b->lock, -readers);
- ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
- lock_may_not_fail, _RET_IP_);
- six_lock_readers_add(&b->lock, readers);
-
- if (ret)
- mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
-
- return ret;
-}
-
-void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
- struct btree_path *path,
- struct btree_bkey_cached_common *b)
-{
- int ret = __btree_node_lock_write(trans, path, b, true);
- BUG_ON(ret);
-}
-
-/* relock */
-
-static inline bool btree_path_get_locks(struct btree_trans *trans,
- struct btree_path *path,
- bool upgrade,
- struct get_locks_fail *f)
-{
- unsigned l = path->level;
- int fail_idx = -1;
-
- do {
- if (!btree_path_node(path, l))
- break;
-
- if (!(upgrade
- ? bch2_btree_node_upgrade(trans, path, l)
- : bch2_btree_node_relock(trans, path, l))) {
- fail_idx = l;
-
- if (f) {
- f->l = l;
- f->b = path->l[l].b;
- }
- }
-
- l++;
- } while (l < path->locks_want);
-
- /*
- * When we fail to get a lock, we have to ensure that any child nodes
- * can't be relocked so bch2_btree_path_traverse has to walk back up to
- * the node that we failed to relock:
- */
- if (fail_idx >= 0) {
- __bch2_btree_path_unlock(trans, path);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
-
- do {
- path->l[fail_idx].b = upgrade
- ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
- : ERR_PTR(-BCH_ERR_no_btree_node_relock);
- --fail_idx;
- } while (fail_idx >= 0);
- }
-
- if (path->uptodate == BTREE_ITER_NEED_RELOCK)
- path->uptodate = BTREE_ITER_UPTODATE;
-
- return path->uptodate < BTREE_ITER_NEED_RELOCK;
-}
-
-bool __bch2_btree_node_relock(struct btree_trans *trans,
- struct btree_path *path, unsigned level,
- bool trace)
-{
- struct btree *b = btree_path_node(path, level);
- int want = __btree_lock_want(path, level);
-
- if (race_fault())
- goto fail;
-
- if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
- (btree_node_lock_seq_matches(path, b, level) &&
- btree_node_lock_increment(trans, &b->c, level, want))) {
- mark_btree_node_locked(trans, path, level, want);
- return true;
- }
-fail:
- if (trace && !trans->notrace_relock_fail)
- trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
- return false;
-}
-
-/* upgrade */
-
-bool bch2_btree_node_upgrade(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
- struct btree *b = path->l[level].b;
- struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
-
- if (!is_btree_node(path, level))
- return false;
-
- switch (btree_lock_want(path, level)) {
- case BTREE_NODE_UNLOCKED:
- BUG_ON(btree_node_locked(path, level));
- return true;
- case BTREE_NODE_READ_LOCKED:
- BUG_ON(btree_node_intent_locked(path, level));
- return bch2_btree_node_relock(trans, path, level);
- case BTREE_NODE_INTENT_LOCKED:
- break;
- case BTREE_NODE_WRITE_LOCKED:
- BUG();
- }
-
- if (btree_node_intent_locked(path, level))
- return true;
-
- if (race_fault())
- return false;
-
- if (btree_node_locked(path, level)) {
- bool ret;
-
- six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
- ret = six_lock_tryupgrade(&b->c.lock);
- six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
-
- if (ret)
- goto success;
- } else {
- if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
- goto success;
- }
-
- /*
- * Do we already have an intent lock via another path? If so, just bump
- * lock count:
- */
- if (btree_node_lock_seq_matches(path, b, level) &&
- btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
- btree_node_unlock(trans, path, level);
- goto success;
- }
-
- trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
- return false;
-success:
- mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
- return true;
-}
-
-/* Btree path locking: */
-
-/*
- * Only for btree_cache.c - only relocks intent locks
- */
-int bch2_btree_path_relock_intent(struct btree_trans *trans,
- struct btree_path *path)
-{
- unsigned l;
-
- for (l = path->level;
- l < path->locks_want && btree_path_node(path, l);
- l++) {
- if (!bch2_btree_node_relock(trans, path, l)) {
- __bch2_btree_path_unlock(trans, path);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
- }
- }
-
- return 0;
-}
-
-__flatten
-bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
-{
- struct get_locks_fail f;
-
- bool ret = btree_path_get_locks(trans, path, false, &f);
- bch2_trans_verify_locks(trans);
- return ret;
-}
-
-int __bch2_btree_path_relock(struct btree_trans *trans,
- struct btree_path *path, unsigned long trace_ip)
-{
- if (!bch2_btree_path_relock_norestart(trans, path)) {
- trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
- }
-
- return 0;
-}
-
-bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
- struct btree_path *path,
- unsigned new_locks_want,
- struct get_locks_fail *f)
-{
- EBUG_ON(path->locks_want >= new_locks_want);
-
- path->locks_want = new_locks_want;
-
- bool ret = btree_path_get_locks(trans, path, true, f);
- bch2_trans_verify_locks(trans);
- return ret;
-}
-
-bool __bch2_btree_path_upgrade(struct btree_trans *trans,
- struct btree_path *path,
- unsigned new_locks_want,
- struct get_locks_fail *f)
-{
- bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f);
- if (ret)
- goto out;
-
- /*
- * XXX: this is ugly - we'd prefer to not be mucking with other
- * iterators in the btree_trans here.
- *
- * On failure to upgrade the iterator, setting iter->locks_want and
- * calling get_locks() is sufficient to make bch2_btree_path_traverse()
- * get the locks we want on transaction restart.
- *
- * But if this iterator was a clone, on transaction restart what we did
- * to this iterator isn't going to be preserved.
- *
- * Possibly we could add an iterator field for the parent iterator when
- * an iterator is a copy - for now, we'll just upgrade any other
- * iterators with the same btree id.
- *
- * The code below used to be needed to ensure ancestor nodes get locked
- * before interior nodes - now that's handled by
- * bch2_btree_path_traverse_all().
- */
- if (!path->cached && !trans->in_traverse_all) {
- struct btree_path *linked;
- unsigned i;
-
- trans_for_each_path(trans, linked, i)
- if (linked != path &&
- linked->cached == path->cached &&
- linked->btree_id == path->btree_id &&
- linked->locks_want < new_locks_want) {
- linked->locks_want = new_locks_want;
- btree_path_get_locks(trans, linked, true, NULL);
- }
- }
-out:
- bch2_trans_verify_locks(trans);
- return ret;
-}
-
-void __bch2_btree_path_downgrade(struct btree_trans *trans,
- struct btree_path *path,
- unsigned new_locks_want)
-{
- unsigned l, old_locks_want = path->locks_want;
-
- if (trans->restarted)
- return;
-
- EBUG_ON(path->locks_want < new_locks_want);
-
- path->locks_want = new_locks_want;
-
- while (path->nodes_locked &&
- (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
- if (l > path->level) {
- btree_node_unlock(trans, path, l);
- } else {
- if (btree_node_intent_locked(path, l)) {
- six_lock_downgrade(&path->l[l].b->c.lock);
- mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
- }
- break;
- }
- }
-
- bch2_btree_path_verify_locks(path);
-
- trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
-}
-
-/* Btree transaction locking: */
-
-void bch2_trans_downgrade(struct btree_trans *trans)
-{
- struct btree_path *path;
- unsigned i;
-
- if (trans->restarted)
- return;
-
- trans_for_each_path(trans, path, i)
- if (path->ref)
- bch2_btree_path_downgrade(trans, path);
-}
-
-static inline void __bch2_trans_unlock(struct btree_trans *trans)
-{
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i)
- __bch2_btree_path_unlock(trans, path);
-}
-
-static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
- struct get_locks_fail *f, bool trace)
-{
- if (!trace)
- goto out;
-
- if (trace_trans_restart_relock_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bpos_to_text(&buf, path->pos);
- prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
- if (IS_ERR_OR_NULL(f->b)) {
- prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
- } else {
- prt_printf(&buf, "%u", f->b->c.lock.seq);
-
- struct six_lock_count c =
- bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
- prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
-
- c = six_lock_counts(&f->b->c.lock);
- prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
- }
-
- trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
- printbuf_exit(&buf);
- }
-
- count_event(trans->c, trans_restart_relock);
-out:
- __bch2_trans_unlock(trans);
- bch2_trans_verify_locks(trans);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
-}
-
-static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
-{
- bch2_trans_verify_locks(trans);
-
- if (unlikely(trans->restarted))
- return -((int) trans->restarted);
- if (unlikely(trans->locked))
- goto out;
-
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i) {
- struct get_locks_fail f;
-
- if (path->should_be_locked &&
- !btree_path_get_locks(trans, path, false, &f))
- return bch2_trans_relock_fail(trans, path, &f, trace);
- }
-
- trans_set_locked(trans);
-out:
- bch2_trans_verify_locks(trans);
- return 0;
-}
-
-int bch2_trans_relock(struct btree_trans *trans)
-{
- return __bch2_trans_relock(trans, true);
-}
-
-int bch2_trans_relock_notrace(struct btree_trans *trans)
-{
- return __bch2_trans_relock(trans, false);
-}
-
-void bch2_trans_unlock_noassert(struct btree_trans *trans)
-{
- __bch2_trans_unlock(trans);
-
- trans_set_unlocked(trans);
-}
-
-void bch2_trans_unlock(struct btree_trans *trans)
-{
- __bch2_trans_unlock(trans);
-
- trans_set_unlocked(trans);
-}
-
-void bch2_trans_unlock_long(struct btree_trans *trans)
-{
- bch2_trans_unlock(trans);
- bch2_trans_srcu_unlock(trans);
-}
-
-int __bch2_trans_mutex_lock(struct btree_trans *trans,
- struct mutex *lock)
-{
- int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
-
- if (ret)
- mutex_unlock(lock);
- return ret;
-}
-
-/* Debug */
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-
-void bch2_btree_path_verify_locks(struct btree_path *path)
-{
- /*
- * A path may be uptodate and yet have nothing locked if and only if
- * there is no node at path->level, which generally means we were
- * iterating over all nodes and got to the end of the btree
- */
- BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
- btree_path_node(path, path->level) &&
- !path->nodes_locked);
-
- if (!path->nodes_locked)
- return;
-
- for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
- int want = btree_lock_want(path, l);
- int have = btree_node_locked_type(path, l);
-
- BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
-
- BUG_ON(is_btree_node(path, l) &&
- (want == BTREE_NODE_UNLOCKED ||
- have != BTREE_NODE_WRITE_LOCKED) &&
- want != have);
- }
-}
-
-static bool bch2_trans_locked(struct btree_trans *trans)
-{
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i)
- if (path->nodes_locked)
- return true;
- return false;
-}
-
-void bch2_trans_verify_locks(struct btree_trans *trans)
-{
- if (!trans->locked) {
- BUG_ON(bch2_trans_locked(trans));
- return;
- }
-
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i)
- bch2_btree_path_verify_locks(path);
-}
-
-#endif
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
deleted file mode 100644
index 7c07f9fa9add..000000000000
--- a/fs/bcachefs/btree_locking.h
+++ /dev/null
@@ -1,446 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_LOCKING_H
-#define _BCACHEFS_BTREE_LOCKING_H
-
-/*
- * Only for internal btree use:
- *
- * The btree iterator tracks what locks it wants to take, and what locks it
- * currently has - here we have wrappers for locking/unlocking btree nodes and
- * updating the iterator state
- */
-
-#include "btree_iter.h"
-#include "six.h"
-
-void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
-
-void bch2_trans_unlock_noassert(struct btree_trans *);
-
-static inline bool is_btree_node(struct btree_path *path, unsigned l)
-{
- return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
-}
-
-static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
-{
- return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
- ? &trans->c->btree_transaction_stats[trans->fn_idx]
- : NULL;
-}
-
-/* matches six lock types */
-enum btree_node_locked_type {
- BTREE_NODE_UNLOCKED = -1,
- BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
- BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
- BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write,
-};
-
-static inline int btree_node_locked_type(struct btree_path *path,
- unsigned level)
-{
- return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
-}
-
-static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
-{
- return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
-}
-
-static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
-{
- return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
-}
-
-static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
-{
- return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
-}
-
-static inline bool btree_node_locked(struct btree_path *path, unsigned level)
-{
- return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
-}
-
-static inline void mark_btree_node_locked_noreset(struct btree_path *path,
- unsigned level,
- enum btree_node_locked_type type)
-{
- /* relying on this to avoid a branch */
- BUILD_BUG_ON(SIX_LOCK_read != 0);
- BUILD_BUG_ON(SIX_LOCK_intent != 1);
-
- path->nodes_locked &= ~(3U << (level << 1));
- path->nodes_locked |= (type + 1) << (level << 1);
-}
-
-static inline void mark_btree_node_unlocked(struct btree_path *path,
- unsigned level)
-{
- EBUG_ON(btree_node_write_locked(path, level));
- mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
-}
-
-static inline void mark_btree_node_locked(struct btree_trans *trans,
- struct btree_path *path,
- unsigned level,
- enum btree_node_locked_type type)
-{
- mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
-#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
- path->l[level].lock_taken_time = local_clock();
-#endif
-}
-
-static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
-{
- return level < path->locks_want
- ? SIX_LOCK_intent
- : SIX_LOCK_read;
-}
-
-static inline enum btree_node_locked_type
-btree_lock_want(struct btree_path *path, int level)
-{
- if (level < path->level)
- return BTREE_NODE_UNLOCKED;
- if (level < path->locks_want)
- return BTREE_NODE_INTENT_LOCKED;
- if (level == path->level)
- return BTREE_NODE_READ_LOCKED;
- return BTREE_NODE_UNLOCKED;
-}
-
-static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
-#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
- __bch2_time_stats_update(&btree_trans_stats(trans)->lock_hold_times,
- path->l[level].lock_taken_time,
- local_clock());
-#endif
-}
-
-/* unlock: */
-
-static inline void btree_node_unlock(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
- int lock_type = btree_node_locked_type(path, level);
-
- EBUG_ON(level >= BTREE_MAX_DEPTH);
- EBUG_ON(lock_type == BTREE_NODE_WRITE_LOCKED);
-
- if (lock_type != BTREE_NODE_UNLOCKED) {
- six_unlock_type(&path->l[level].b->c.lock, lock_type);
- btree_trans_lock_hold_time_update(trans, path, level);
- }
- mark_btree_node_unlocked(path, level);
-}
-
-static inline int btree_path_lowest_level_locked(struct btree_path *path)
-{
- return __ffs(path->nodes_locked) >> 1;
-}
-
-static inline int btree_path_highest_level_locked(struct btree_path *path)
-{
- return __fls(path->nodes_locked) >> 1;
-}
-
-static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
- struct btree_path *path)
-{
- btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
-
- while (path->nodes_locked)
- btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
-}
-
-/*
- * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
- * succeed:
- */
-static inline void
-bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
- struct btree *b)
-{
- struct btree_path *linked;
- unsigned i;
-
- EBUG_ON(path->l[b->c.level].b != b);
- EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
- EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
-
- mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
-
- trans_for_each_path_with_node(trans, b, linked, i)
- linked->l[b->c.level].lock_seq++;
-
- six_unlock_write(&b->c.lock);
-}
-
-void bch2_btree_node_unlock_write(struct btree_trans *,
- struct btree_path *, struct btree *);
-
-int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
-
-/* lock: */
-
-static inline void trans_set_locked(struct btree_trans *trans)
-{
- if (!trans->locked) {
- lock_acquire_exclusive(&trans->dep_map, 0, 0, NULL, _THIS_IP_);
- trans->locked = true;
- trans->last_unlock_ip = 0;
-
- trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0;
- current->flags |= PF_MEMALLOC_NOFS;
- }
-}
-
-static inline void trans_set_unlocked(struct btree_trans *trans)
-{
- if (trans->locked) {
- lock_release(&trans->dep_map, _THIS_IP_);
- trans->locked = false;
- trans->last_unlock_ip = _RET_IP_;
-
- if (!trans->pf_memalloc_nofs)
- current->flags &= ~PF_MEMALLOC_NOFS;
- }
-}
-
-static inline int __btree_node_lock_nopath(struct btree_trans *trans,
- struct btree_bkey_cached_common *b,
- enum six_lock_type type,
- bool lock_may_not_fail,
- unsigned long ip)
-{
- trans->lock_may_not_fail = lock_may_not_fail;
- trans->lock_must_abort = false;
- trans->locking = b;
-
- int ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
- bch2_six_check_for_deadlock, trans, ip);
- WRITE_ONCE(trans->locking, NULL);
- WRITE_ONCE(trans->locking_wait.start_time, 0);
-
- if (!ret)
- trace_btree_path_lock(trans, _THIS_IP_, b);
- return ret;
-}
-
-static inline int __must_check
-btree_node_lock_nopath(struct btree_trans *trans,
- struct btree_bkey_cached_common *b,
- enum six_lock_type type,
- unsigned long ip)
-{
- return __btree_node_lock_nopath(trans, b, type, false, ip);
-}
-
-static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
- struct btree_bkey_cached_common *b,
- enum six_lock_type type)
-{
- int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
-
- BUG_ON(ret);
-}
-
-/*
- * Lock a btree node if we already have it locked on one of our linked
- * iterators:
- */
-static inline bool btree_node_lock_increment(struct btree_trans *trans,
- struct btree_bkey_cached_common *b,
- unsigned level,
- enum btree_node_locked_type want)
-{
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i)
- if (&path->l[level].b->c == b &&
- btree_node_locked_type(path, level) >= want) {
- six_lock_increment(&b->lock, (enum six_lock_type) want);
- return true;
- }
-
- return false;
-}
-
-static inline int btree_node_lock(struct btree_trans *trans,
- struct btree_path *path,
- struct btree_bkey_cached_common *b,
- unsigned level,
- enum six_lock_type type,
- unsigned long ip)
-{
- int ret = 0;
-
- EBUG_ON(level >= BTREE_MAX_DEPTH);
- bch2_trans_verify_not_unlocked(trans);
-
- if (likely(six_trylock_type(&b->lock, type)) ||
- btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
- !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
-#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
- path->l[b->level].lock_taken_time = local_clock();
-#endif
- }
-
- return ret;
-}
-
-int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
- struct btree_bkey_cached_common *b, bool);
-
-static inline int __btree_node_lock_write(struct btree_trans *trans,
- struct btree_path *path,
- struct btree_bkey_cached_common *b,
- bool lock_may_not_fail)
-{
- EBUG_ON(&path->l[b->level].b->c != b);
- EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
- EBUG_ON(!btree_node_intent_locked(path, b->level));
-
- /*
- * six locks are unfair, and read locks block while a thread wants a
- * write lock: thus, we need to tell the cycle detector we have a write
- * lock _before_ taking the lock:
- */
- mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
-
- return likely(six_trylock_write(&b->lock))
- ? 0
- : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
-}
-
-static inline int __must_check
-bch2_btree_node_lock_write(struct btree_trans *trans,
- struct btree_path *path,
- struct btree_bkey_cached_common *b)
-{
- return __btree_node_lock_write(trans, path, b, false);
-}
-
-void bch2_btree_node_lock_write_nofail(struct btree_trans *,
- struct btree_path *,
- struct btree_bkey_cached_common *);
-
-/* relock: */
-
-bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *);
-int __bch2_btree_path_relock(struct btree_trans *,
- struct btree_path *, unsigned long);
-
-static inline int bch2_btree_path_relock(struct btree_trans *trans,
- struct btree_path *path, unsigned long trace_ip)
-{
- return btree_node_locked(path, path->level)
- ? 0
- : __bch2_btree_path_relock(trans, path, trace_ip);
-}
-
-bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
-
-static inline bool bch2_btree_node_relock(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
- EBUG_ON(btree_node_locked(path, level) &&
- !btree_node_write_locked(path, level) &&
- btree_node_locked_type(path, level) != __btree_lock_want(path, level));
-
- return likely(btree_node_locked(path, level)) ||
- (!IS_ERR_OR_NULL(path->l[level].b) &&
- __bch2_btree_node_relock(trans, path, level, true));
-}
-
-static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
- EBUG_ON(btree_node_locked(path, level) &&
- !btree_node_write_locked(path, level) &&
- btree_node_locked_type(path, level) != __btree_lock_want(path, level));
-
- return likely(btree_node_locked(path, level)) ||
- (!IS_ERR_OR_NULL(path->l[level].b) &&
- __bch2_btree_node_relock(trans, path, level, false));
-}
-
-/* upgrade */
-
-bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
- struct btree_path *, unsigned,
- struct get_locks_fail *);
-
-bool __bch2_btree_path_upgrade(struct btree_trans *,
- struct btree_path *, unsigned,
- struct get_locks_fail *);
-
-static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
- struct btree_path *path,
- unsigned new_locks_want)
-{
- struct get_locks_fail f = {};
- unsigned old_locks_want = path->locks_want;
-
- new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
-
- if (path->locks_want < new_locks_want
- ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
- : path->nodes_locked)
- return 0;
-
- trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
- old_locks_want, new_locks_want, &f);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
-}
-
-/* misc: */
-
-static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
-{
- EBUG_ON(!btree_node_locked(path, path->level));
- EBUG_ON(path->uptodate);
-
- path->should_be_locked = true;
- trace_btree_path_should_be_locked(trans, path);
-}
-
-static inline void __btree_path_set_level_up(struct btree_trans *trans,
- struct btree_path *path,
- unsigned l)
-{
- btree_node_unlock(trans, path, l);
- path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
-}
-
-static inline void btree_path_set_level_up(struct btree_trans *trans,
- struct btree_path *path)
-{
- __btree_path_set_level_up(trans, path, path->level++);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
-}
-
-/* debug */
-
-struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
- struct btree_path *,
- struct btree_bkey_cached_common *b,
- unsigned);
-
-int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_btree_path_verify_locks(struct btree_path *);
-void bch2_trans_verify_locks(struct btree_trans *);
-#else
-static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
-static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
-#endif
-
-#endif /* _BCACHEFS_BTREE_LOCKING_H */
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
deleted file mode 100644
index 30131c3bdd97..000000000000
--- a/fs/bcachefs/btree_node_scan.c
+++ /dev/null
@@ -1,549 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_cache.h"
-#include "btree_io.h"
-#include "btree_journal_iter.h"
-#include "btree_node_scan.h"
-#include "btree_update_interior.h"
-#include "buckets.h"
-#include "error.h"
-#include "journal_io.h"
-#include "recovery_passes.h"
-
-#include <linux/kthread.h>
-#include <linux/sort.h>
-
-struct find_btree_nodes_worker {
- struct closure *cl;
- struct find_btree_nodes *f;
- struct bch_dev *ca;
-};
-
-static void found_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct found_btree_node *n)
-{
- prt_printf(out, "%s l=%u seq=%u journal_seq=%llu cookie=%llx ",
- bch2_btree_id_str(n->btree_id), n->level, n->seq,
- n->journal_seq, n->cookie);
- bch2_bpos_to_text(out, n->min_key);
- prt_str(out, "-");
- bch2_bpos_to_text(out, n->max_key);
-
- if (n->range_updated)
- prt_str(out, " range updated");
- if (n->overwritten)
- prt_str(out, " overwritten");
-
- for (unsigned i = 0; i < n->nr_ptrs; i++) {
- prt_char(out, ' ');
- bch2_extent_ptr_to_text(out, c, n->ptrs + i);
- }
-}
-
-static void found_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c, found_btree_nodes nodes)
-{
- printbuf_indent_add(out, 2);
- darray_for_each(nodes, i) {
- found_btree_node_to_text(out, c, i);
- prt_newline(out);
- }
- printbuf_indent_sub(out, 2);
-}
-
-static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_node *f)
-{
- struct bkey_i_btree_ptr_v2 *bp = bkey_btree_ptr_v2_init(k);
-
- set_bkey_val_u64s(&bp->k, sizeof(struct bch_btree_ptr_v2) / sizeof(u64) + f->nr_ptrs);
- bp->k.p = f->max_key;
- bp->v.seq = cpu_to_le64(f->cookie);
- bp->v.sectors_written = 0;
- bp->v.flags = 0;
- bp->v.sectors_written = cpu_to_le16(f->sectors_written);
- bp->v.min_key = f->min_key;
- SET_BTREE_PTR_RANGE_UPDATED(&bp->v, f->range_updated);
- memcpy(bp->v.start, f->ptrs, sizeof(struct bch_extent_ptr) * f->nr_ptrs);
-}
-
-static inline u64 bkey_journal_seq(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_inode_v3:
- return le64_to_cpu(bkey_s_c_to_inode_v3(k).v->bi_journal_seq);
- default:
- return 0;
- }
-}
-
-static bool found_btree_node_is_readable(struct btree_trans *trans,
- struct found_btree_node *f)
-{
- struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } tmp;
-
- found_btree_node_to_key(&tmp.k, f);
-
- struct btree *b = bch2_btree_node_get_noiter(trans, &tmp.k, f->btree_id, f->level, false);
- bool ret = !IS_ERR_OR_NULL(b);
- if (!ret)
- return ret;
-
- f->sectors_written = b->written;
- f->journal_seq = le64_to_cpu(b->data->keys.journal_seq);
-
- struct bkey_s_c k;
- struct bkey unpacked;
- struct btree_node_iter iter;
- for_each_btree_node_key_unpack(b, k, &iter, &unpacked)
- f->journal_seq = max(f->journal_seq, bkey_journal_seq(k));
-
- six_unlock_read(&b->c.lock);
-
- /*
- * We might update this node's range; if that happens, we need the node
- * to be re-read so the read path can trim keys that are no longer in
- * this node
- */
- if (b != btree_node_root(trans->c, b))
- bch2_btree_node_evict(trans, &tmp.k);
- return ret;
-}
-
-static int found_btree_node_cmp_cookie(const void *_l, const void *_r)
-{
- const struct found_btree_node *l = _l;
- const struct found_btree_node *r = _r;
-
- return cmp_int(l->btree_id, r->btree_id) ?:
- cmp_int(l->level, r->level) ?:
- cmp_int(l->cookie, r->cookie);
-}
-
-/*
- * Given two found btree nodes, if their sequence numbers are equal, take the
- * one that's readable:
- */
-static int found_btree_node_cmp_time(const struct found_btree_node *l,
- const struct found_btree_node *r)
-{
- return cmp_int(l->seq, r->seq) ?:
- cmp_int(l->journal_seq, r->journal_seq);
-}
-
-static int found_btree_node_cmp_pos(const void *_l, const void *_r)
-{
- const struct found_btree_node *l = _l;
- const struct found_btree_node *r = _r;
-
- return cmp_int(l->btree_id, r->btree_id) ?:
- -cmp_int(l->level, r->level) ?:
- bpos_cmp(l->min_key, r->min_key) ?:
- -found_btree_node_cmp_time(l, r);
-}
-
-static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
- struct bio *bio, struct btree_node *bn, u64 offset)
-{
- struct bch_fs *c = container_of(f, struct bch_fs, found_btree_nodes);
-
- bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ);
- bio->bi_iter.bi_sector = offset;
- bch2_bio_map(bio, bn, PAGE_SIZE);
-
- submit_bio_wait(bio);
- if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
- "IO error in try_read_btree_node() at %llu: %s",
- offset, bch2_blk_status_to_str(bio->bi_status)))
- return;
-
- if (le64_to_cpu(bn->magic) != bset_magic(c))
- return;
-
- if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) {
- struct nonce nonce = btree_nonce(&bn->keys, 0);
- unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
-
- bch2_encrypt(c, BSET_CSUM_TYPE(&bn->keys), nonce, &bn->flags, bytes);
- }
-
- if (btree_id_is_alloc(BTREE_NODE_ID(bn)))
- return;
-
- if (BTREE_NODE_LEVEL(bn) >= BTREE_MAX_DEPTH)
- return;
-
- if (BTREE_NODE_ID(bn) >= BTREE_ID_NR_MAX)
- return;
-
- rcu_read_lock();
- struct found_btree_node n = {
- .btree_id = BTREE_NODE_ID(bn),
- .level = BTREE_NODE_LEVEL(bn),
- .seq = BTREE_NODE_SEQ(bn),
- .cookie = le64_to_cpu(bn->keys.seq),
- .min_key = bn->min_key,
- .max_key = bn->max_key,
- .nr_ptrs = 1,
- .ptrs[0].type = 1 << BCH_EXTENT_ENTRY_ptr,
- .ptrs[0].offset = offset,
- .ptrs[0].dev = ca->dev_idx,
- .ptrs[0].gen = bucket_gen_get(ca, sector_to_bucket(ca, offset)),
- };
- rcu_read_unlock();
-
- if (bch2_trans_run(c, found_btree_node_is_readable(trans, &n))) {
- mutex_lock(&f->lock);
- if (BSET_BIG_ENDIAN(&bn->keys) != CPU_BIG_ENDIAN) {
- bch_err(c, "try_read_btree_node() can't handle endian conversion");
- f->ret = -EINVAL;
- goto unlock;
- }
-
- if (darray_push(&f->nodes, n))
- f->ret = -ENOMEM;
-unlock:
- mutex_unlock(&f->lock);
- }
-}
-
-static int read_btree_nodes_worker(void *p)
-{
- struct find_btree_nodes_worker *w = p;
- struct bch_fs *c = container_of(w->f, struct bch_fs, found_btree_nodes);
- struct bch_dev *ca = w->ca;
- void *buf = (void *) __get_free_page(GFP_KERNEL);
- struct bio *bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
- unsigned long last_print = jiffies;
-
- if (!buf || !bio) {
- bch_err(c, "read_btree_nodes_worker: error allocating bio/buf");
- w->f->ret = -ENOMEM;
- goto err;
- }
-
- for (u64 bucket = ca->mi.first_bucket; bucket < ca->mi.nbuckets; bucket++)
- for (unsigned bucket_offset = 0;
- bucket_offset + btree_sectors(c) <= ca->mi.bucket_size;
- bucket_offset += btree_sectors(c)) {
- if (time_after(jiffies, last_print + HZ * 30)) {
- u64 cur_sector = bucket * ca->mi.bucket_size + bucket_offset;
- u64 end_sector = ca->mi.nbuckets * ca->mi.bucket_size;
-
- bch_info(ca, "%s: %2u%% done", __func__,
- (unsigned) div64_u64(cur_sector * 100, end_sector));
- last_print = jiffies;
- }
-
- u64 sector = bucket * ca->mi.bucket_size + bucket_offset;
-
- if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_mi_btree_bitmap &&
- !bch2_dev_btree_bitmap_marked_sectors(ca, sector, btree_sectors(c)))
- continue;
-
- try_read_btree_node(w->f, ca, bio, buf, sector);
- }
-err:
- bio_put(bio);
- free_page((unsigned long) buf);
- percpu_ref_get(&ca->io_ref);
- closure_put(w->cl);
- kfree(w);
- return 0;
-}
-
-static int read_btree_nodes(struct find_btree_nodes *f)
-{
- struct bch_fs *c = container_of(f, struct bch_fs, found_btree_nodes);
- struct closure cl;
- int ret = 0;
-
- closure_init_stack(&cl);
-
- for_each_online_member(c, ca) {
- if (!(ca->mi.data_allowed & BIT(BCH_DATA_btree)))
- continue;
-
- struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL);
- struct task_struct *t;
-
- if (!w) {
- percpu_ref_put(&ca->io_ref);
- ret = -ENOMEM;
- goto err;
- }
-
- percpu_ref_get(&ca->io_ref);
- closure_get(&cl);
- w->cl = &cl;
- w->f = f;
- w->ca = ca;
-
- t = kthread_run(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
- ret = PTR_ERR_OR_ZERO(t);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- closure_put(&cl);
- f->ret = ret;
- bch_err(c, "error starting kthread: %i", ret);
- break;
- }
- }
-err:
- closure_sync(&cl);
- return f->ret ?: ret;
-}
-
-static void bubble_up(struct found_btree_node *n, struct found_btree_node *end)
-{
- while (n + 1 < end &&
- found_btree_node_cmp_pos(n, n + 1) > 0) {
- swap(n[0], n[1]);
- n++;
- }
-}
-
-static int handle_overwrites(struct bch_fs *c,
- struct found_btree_node *start,
- struct found_btree_node *end)
-{
- struct found_btree_node *n;
-again:
- for (n = start + 1;
- n < end &&
- n->btree_id == start->btree_id &&
- n->level == start->level &&
- bpos_lt(n->min_key, start->max_key);
- n++) {
- int cmp = found_btree_node_cmp_time(start, n);
-
- if (cmp > 0) {
- if (bpos_cmp(start->max_key, n->max_key) >= 0)
- n->overwritten = true;
- else {
- n->range_updated = true;
- n->min_key = bpos_successor(start->max_key);
- n->range_updated = true;
- bubble_up(n, end);
- goto again;
- }
- } else if (cmp < 0) {
- BUG_ON(bpos_cmp(n->min_key, start->min_key) <= 0);
-
- start->max_key = bpos_predecessor(n->min_key);
- start->range_updated = true;
- } else if (n->level) {
- n->overwritten = true;
- } else {
- if (bpos_cmp(start->max_key, n->max_key) >= 0)
- n->overwritten = true;
- else {
- n->range_updated = true;
- n->min_key = bpos_successor(start->max_key);
- n->range_updated = true;
- bubble_up(n, end);
- goto again;
- }
- }
- }
-
- return 0;
-}
-
-int bch2_scan_for_btree_nodes(struct bch_fs *c)
-{
- struct find_btree_nodes *f = &c->found_btree_nodes;
- struct printbuf buf = PRINTBUF;
- size_t dst;
- int ret = 0;
-
- if (f->nodes.nr)
- return 0;
-
- mutex_init(&f->lock);
-
- ret = read_btree_nodes(f);
- if (ret)
- return ret;
-
- if (!f->nodes.nr) {
- bch_err(c, "%s: no btree nodes found", __func__);
- ret = -EINVAL;
- goto err;
- }
-
- if (0 && c->opts.verbose) {
- printbuf_reset(&buf);
- prt_printf(&buf, "%s: nodes found:\n", __func__);
- found_btree_nodes_to_text(&buf, c, f->nodes);
- bch2_print_string_as_lines(KERN_INFO, buf.buf);
- }
-
- sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
-
- dst = 0;
- darray_for_each(f->nodes, i) {
- struct found_btree_node *prev = dst ? f->nodes.data + dst - 1 : NULL;
-
- if (prev &&
- prev->cookie == i->cookie) {
- if (prev->nr_ptrs == ARRAY_SIZE(prev->ptrs)) {
- bch_err(c, "%s: found too many replicas for btree node", __func__);
- ret = -EINVAL;
- goto err;
- }
- prev->ptrs[prev->nr_ptrs++] = i->ptrs[0];
- } else {
- f->nodes.data[dst++] = *i;
- }
- }
- f->nodes.nr = dst;
-
- sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
-
- if (0 && c->opts.verbose) {
- printbuf_reset(&buf);
- prt_printf(&buf, "%s: nodes after merging replicas:\n", __func__);
- found_btree_nodes_to_text(&buf, c, f->nodes);
- bch2_print_string_as_lines(KERN_INFO, buf.buf);
- }
-
- dst = 0;
- darray_for_each(f->nodes, i) {
- if (i->overwritten)
- continue;
-
- ret = handle_overwrites(c, i, &darray_top(f->nodes));
- if (ret)
- goto err;
-
- BUG_ON(i->overwritten);
- f->nodes.data[dst++] = *i;
- }
- f->nodes.nr = dst;
-
- if (c->opts.verbose) {
- printbuf_reset(&buf);
- prt_printf(&buf, "%s: nodes found after overwrites:\n", __func__);
- found_btree_nodes_to_text(&buf, c, f->nodes);
- bch2_print_string_as_lines(KERN_INFO, buf.buf);
- }
-
- eytzinger0_sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
-err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static int found_btree_node_range_start_cmp(const void *_l, const void *_r)
-{
- const struct found_btree_node *l = _l;
- const struct found_btree_node *r = _r;
-
- return cmp_int(l->btree_id, r->btree_id) ?:
- -cmp_int(l->level, r->level) ?:
- bpos_cmp(l->max_key, r->min_key);
-}
-
-#define for_each_found_btree_node_in_range(_f, _search, _idx) \
- for (size_t _idx = eytzinger0_find_gt((_f)->nodes.data, (_f)->nodes.nr, \
- sizeof((_f)->nodes.data[0]), \
- found_btree_node_range_start_cmp, &search); \
- _idx < (_f)->nodes.nr && \
- (_f)->nodes.data[_idx].btree_id == _search.btree_id && \
- (_f)->nodes.data[_idx].level == _search.level && \
- bpos_lt((_f)->nodes.data[_idx].min_key, _search.max_key); \
- _idx = eytzinger0_next(_idx, (_f)->nodes.nr))
-
-bool bch2_btree_node_is_stale(struct bch_fs *c, struct btree *b)
-{
- struct find_btree_nodes *f = &c->found_btree_nodes;
-
- struct found_btree_node search = {
- .btree_id = b->c.btree_id,
- .level = b->c.level,
- .min_key = b->data->min_key,
- .max_key = b->key.k.p,
- };
-
- for_each_found_btree_node_in_range(f, search, idx)
- if (f->nodes.data[idx].seq > BTREE_NODE_SEQ(b->data))
- return true;
- return false;
-}
-
-bool bch2_btree_has_scanned_nodes(struct bch_fs *c, enum btree_id btree)
-{
- struct found_btree_node search = {
- .btree_id = btree,
- .level = 0,
- .min_key = POS_MIN,
- .max_key = SPOS_MAX,
- };
-
- for_each_found_btree_node_in_range(&c->found_btree_nodes, search, idx)
- return true;
- return false;
-}
-
-int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
- unsigned level, struct bpos node_min, struct bpos node_max)
-{
- if (btree_id_is_alloc(btree))
- return 0;
-
- struct find_btree_nodes *f = &c->found_btree_nodes;
-
- int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
- if (ret)
- return ret;
-
- if (c->opts.verbose) {
- struct printbuf buf = PRINTBUF;
-
- prt_printf(&buf, "recovering %s l=%u ", bch2_btree_id_str(btree), level);
- bch2_bpos_to_text(&buf, node_min);
- prt_str(&buf, " - ");
- bch2_bpos_to_text(&buf, node_max);
-
- bch_info(c, "%s(): %s", __func__, buf.buf);
- printbuf_exit(&buf);
- }
-
- struct found_btree_node search = {
- .btree_id = btree,
- .level = level,
- .min_key = node_min,
- .max_key = node_max,
- };
-
- for_each_found_btree_node_in_range(f, search, idx) {
- struct found_btree_node n = f->nodes.data[idx];
-
- n.range_updated |= bpos_lt(n.min_key, node_min);
- n.min_key = bpos_max(n.min_key, node_min);
-
- n.range_updated |= bpos_gt(n.max_key, node_max);
- n.max_key = bpos_min(n.max_key, node_max);
-
- struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } tmp;
-
- found_btree_node_to_key(&tmp.k, &n);
-
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&tmp.k));
- bch_verbose(c, "%s(): recovering %s", __func__, buf.buf);
- printbuf_exit(&buf);
-
- BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k), BKEY_TYPE_btree, 0));
-
- ret = bch2_journal_key_insert(c, btree, level + 1, &tmp.k);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-void bch2_find_btree_nodes_exit(struct find_btree_nodes *f)
-{
- darray_exit(&f->nodes);
-}
diff --git a/fs/bcachefs/btree_node_scan.h b/fs/bcachefs/btree_node_scan.h
deleted file mode 100644
index 08687b209787..000000000000
--- a/fs/bcachefs/btree_node_scan.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_NODE_SCAN_H
-#define _BCACHEFS_BTREE_NODE_SCAN_H
-
-int bch2_scan_for_btree_nodes(struct bch_fs *);
-bool bch2_btree_node_is_stale(struct bch_fs *, struct btree *);
-bool bch2_btree_has_scanned_nodes(struct bch_fs *, enum btree_id);
-int bch2_get_scanned_nodes(struct bch_fs *, enum btree_id, unsigned, struct bpos, struct bpos);
-void bch2_find_btree_nodes_exit(struct find_btree_nodes *);
-
-#endif /* _BCACHEFS_BTREE_NODE_SCAN_H */
diff --git a/fs/bcachefs/btree_node_scan_types.h b/fs/bcachefs/btree_node_scan_types.h
deleted file mode 100644
index b6c36c45d0be..000000000000
--- a/fs/bcachefs/btree_node_scan_types.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_NODE_SCAN_TYPES_H
-#define _BCACHEFS_BTREE_NODE_SCAN_TYPES_H
-
-#include "darray.h"
-
-struct found_btree_node {
- bool range_updated:1;
- bool overwritten:1;
- u8 btree_id;
- u8 level;
- unsigned sectors_written;
- u32 seq;
- u64 journal_seq;
- u64 cookie;
-
- struct bpos min_key;
- struct bpos max_key;
-
- unsigned nr_ptrs;
- struct bch_extent_ptr ptrs[BCH_REPLICAS_MAX];
-};
-
-typedef DARRAY(struct found_btree_node) found_btree_nodes;
-
-struct find_btree_nodes {
- int ret;
- struct mutex lock;
- found_btree_nodes nodes;
-};
-
-#endif /* _BCACHEFS_BTREE_NODE_SCAN_TYPES_H */
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
deleted file mode 100644
index 9bf471fa4361..000000000000
--- a/fs/bcachefs/btree_trans_commit.c
+++ /dev/null
@@ -1,1157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "btree_gc.h"
-#include "btree_io.h"
-#include "btree_iter.h"
-#include "btree_journal_iter.h"
-#include "btree_key_cache.h"
-#include "btree_update_interior.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "disk_accounting.h"
-#include "errcode.h"
-#include "error.h"
-#include "journal.h"
-#include "journal_io.h"
-#include "journal_reclaim.h"
-#include "replicas.h"
-#include "snapshot.h"
-
-#include <linux/prefetch.h>
-
-static const char * const trans_commit_flags_strs[] = {
-#define x(n, ...) #n,
- BCH_TRANS_COMMIT_FLAGS()
-#undef x
- NULL
-};
-
-void bch2_trans_commit_flags_to_text(struct printbuf *out, enum bch_trans_commit_flags flags)
-{
- enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
-
- prt_printf(out, "watermark=%s", bch2_watermarks[watermark]);
-
- flags >>= BCH_WATERMARK_BITS;
- if (flags) {
- prt_char(out, ' ');
- bch2_prt_bitflags(out, trans_commit_flags_strs, flags);
- }
-}
-
-static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- struct bch_fs *c = trans->c;
- struct bkey u;
- struct bkey_s_c k = bch2_btree_path_peek_slot_exact(trans->paths + i->path, &u);
-
- if (unlikely(trans->journal_replay_not_finished)) {
- struct bkey_i *j_k =
- bch2_journal_keys_peek_slot(c, i->btree_id, i->level, i->k->k.p);
-
- if (j_k)
- k = bkey_i_to_s_c(j_k);
- }
-
- u = *k.k;
- u.needs_whiteout = i->old_k.needs_whiteout;
-
- BUG_ON(memcmp(&i->old_k, &u, sizeof(struct bkey)));
- BUG_ON(i->old_v != k.v);
-#endif
-}
-
-static inline struct btree_path_level *insert_l(struct btree_trans *trans, struct btree_insert_entry *i)
-{
- return (trans->paths + i->path)->l + i->level;
-}
-
-static inline bool same_leaf_as_prev(struct btree_trans *trans,
- struct btree_insert_entry *i)
-{
- return i != trans->updates &&
- insert_l(trans, &i[0])->b == insert_l(trans, &i[-1])->b;
-}
-
-static inline bool same_leaf_as_next(struct btree_trans *trans,
- struct btree_insert_entry *i)
-{
- return i + 1 < trans->updates + trans->nr_updates &&
- insert_l(trans, &i[0])->b == insert_l(trans, &i[1])->b;
-}
-
-inline void bch2_btree_node_prep_for_write(struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b)
-{
- struct bch_fs *c = trans->c;
-
- if (unlikely(btree_node_just_written(b)) &&
- bch2_btree_post_write_cleanup(c, b))
- bch2_trans_node_reinit_iter(trans, b);
-
- /*
- * If the last bset has been written, or if it's gotten too big - start
- * a new bset to insert into:
- */
- if (want_new_bset(c, b))
- bch2_btree_init_next(trans, b);
-}
-
-static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i)
-{
- while (--i >= trans->updates) {
- if (same_leaf_as_prev(trans, i))
- continue;
-
- bch2_btree_node_unlock_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
- }
-
- trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
-}
-
-static inline int bch2_trans_lock_write(struct btree_trans *trans)
-{
- EBUG_ON(trans->write_locked);
-
- trans_for_each_update(trans, i) {
- if (same_leaf_as_prev(trans, i))
- continue;
-
- if (bch2_btree_node_lock_write(trans, trans->paths + i->path, &insert_l(trans, i)->b->c))
- return trans_lock_write_fail(trans, i);
-
- if (!i->cached)
- bch2_btree_node_prep_for_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
- }
-
- trans->write_locked = true;
- return 0;
-}
-
-static inline void bch2_trans_unlock_write(struct btree_trans *trans)
-{
- if (likely(trans->write_locked)) {
- trans_for_each_update(trans, i)
- if (btree_node_locked_type(trans->paths + i->path, i->level) ==
- BTREE_NODE_WRITE_LOCKED)
- bch2_btree_node_unlock_write_inlined(trans,
- trans->paths + i->path, insert_l(trans, i)->b);
- trans->write_locked = false;
- }
-}
-
-/* Inserting into a given leaf node (last stage of insert): */
-
-/* Handle overwrites and do insert, for non extents: */
-bool bch2_btree_bset_insert_key(struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct btree_node_iter *node_iter,
- struct bkey_i *insert)
-{
- struct bkey_packed *k;
- unsigned clobber_u64s = 0, new_u64s = 0;
-
- EBUG_ON(btree_node_just_written(b));
- EBUG_ON(bset_written(b, btree_bset_last(b)));
- EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
- EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
- EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
- EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b));
- EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos));
-
- k = bch2_btree_node_iter_peek_all(node_iter, b);
- if (k && bkey_cmp_left_packed(b, k, &insert->k.p))
- k = NULL;
-
- /* @k is the key being overwritten/deleted, if any: */
- EBUG_ON(k && bkey_deleted(k));
-
- /* Deleting, but not found? nothing to do: */
- if (bkey_deleted(&insert->k) && !k)
- return false;
-
- if (bkey_deleted(&insert->k)) {
- /* Deleting: */
- btree_account_key_drop(b, k);
- k->type = KEY_TYPE_deleted;
-
- if (k->needs_whiteout)
- push_whiteout(b, insert->k.p);
- k->needs_whiteout = false;
-
- if (k >= btree_bset_last(b)->start) {
- clobber_u64s = k->u64s;
- bch2_bset_delete(b, k, clobber_u64s);
- goto fix_iter;
- } else {
- bch2_btree_path_fix_key_modified(trans, b, k);
- }
-
- return true;
- }
-
- if (k) {
- /* Overwriting: */
- btree_account_key_drop(b, k);
- k->type = KEY_TYPE_deleted;
-
- insert->k.needs_whiteout = k->needs_whiteout;
- k->needs_whiteout = false;
-
- if (k >= btree_bset_last(b)->start) {
- clobber_u64s = k->u64s;
- goto overwrite;
- } else {
- bch2_btree_path_fix_key_modified(trans, b, k);
- }
- }
-
- k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
-overwrite:
- bch2_bset_insert(b, k, insert, clobber_u64s);
- new_u64s = k->u64s;
-fix_iter:
- if (clobber_u64s != new_u64s)
- bch2_btree_node_iter_fix(trans, path, b, node_iter, k,
- clobber_u64s, new_u64s);
- return true;
-}
-
-static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
- unsigned i, u64 seq)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct btree_write *w = container_of(pin, struct btree_write, journal);
- struct btree *b = container_of(w, struct btree, writes[i]);
- struct btree_trans *trans = bch2_trans_get(c);
- unsigned long old, new;
- unsigned idx = w - b->writes;
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
-
- old = READ_ONCE(b->flags);
- do {
- new = old;
-
- if (!(old & (1 << BTREE_NODE_dirty)) ||
- !!(old & (1 << BTREE_NODE_write_idx)) != idx ||
- w->journal.seq != seq)
- break;
-
- new &= ~BTREE_WRITE_TYPE_MASK;
- new |= BTREE_WRITE_journal_reclaim;
- new |= 1 << BTREE_NODE_need_write;
- } while (!try_cmpxchg(&b->flags, &old, new));
-
- btree_node_write_if_need(c, b, SIX_LOCK_read);
- six_unlock_read(&b->c.lock);
-
- bch2_trans_put(trans);
- return 0;
-}
-
-int bch2_btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
-{
- return __btree_node_flush(j, pin, 0, seq);
-}
-
-int bch2_btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
-{
- return __btree_node_flush(j, pin, 1, seq);
-}
-
-inline void bch2_btree_add_journal_pin(struct bch_fs *c,
- struct btree *b, u64 seq)
-{
- struct btree_write *w = btree_current_write(b);
-
- bch2_journal_pin_add(&c->journal, seq, &w->journal,
- btree_node_write_idx(b) == 0
- ? bch2_btree_node_flush0
- : bch2_btree_node_flush1);
-}
-
-/**
- * bch2_btree_insert_key_leaf() - insert a key one key into a leaf node
- * @trans: btree transaction object
- * @path: path pointing to @insert's pos
- * @insert: key to insert
- * @journal_seq: sequence number of journal reservation
- */
-inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
- struct btree_path *path,
- struct bkey_i *insert,
- u64 journal_seq)
-{
- struct bch_fs *c = trans->c;
- struct btree *b = path_l(path)->b;
- struct bset_tree *t = bset_tree_last(b);
- struct bset *i = bset(b, t);
- int old_u64s = bset_u64s(t);
- int old_live_u64s = b->nr.live_u64s;
- int live_u64s_added, u64s_added;
-
- if (unlikely(!bch2_btree_bset_insert_key(trans, path, b,
- &path_l(path)->iter, insert)))
- return;
-
- i->journal_seq = cpu_to_le64(max(journal_seq, le64_to_cpu(i->journal_seq)));
-
- bch2_btree_add_journal_pin(c, b, journal_seq);
-
- if (unlikely(!btree_node_dirty(b))) {
- EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
- set_btree_node_dirty_acct(c, b);
- }
-
- live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
- u64s_added = (int) bset_u64s(t) - old_u64s;
-
- if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
- b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
- if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
- b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
-
- if (u64s_added > live_u64s_added &&
- bch2_maybe_compact_whiteouts(c, b))
- bch2_trans_node_reinit_iter(trans, b);
-}
-
-/* Cached btree updates: */
-
-/* Normal update interface: */
-
-static inline void btree_insert_entry_checks(struct btree_trans *trans,
- struct btree_insert_entry *i)
-{
- struct btree_path *path = trans->paths + i->path;
-
- BUG_ON(!bpos_eq(i->k->k.p, path->pos));
- BUG_ON(i->cached != path->cached);
- BUG_ON(i->level != path->level);
- BUG_ON(i->btree_id != path->btree_id);
- EBUG_ON(!i->level &&
- btree_type_has_snapshots(i->btree_id) &&
- !(i->flags & BTREE_UPDATE_internal_snapshot_node) &&
- test_bit(JOURNAL_replay_done, &trans->c->journal.flags) &&
- i->k->k.p.snapshot &&
- bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot) > 0);
-}
-
-static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
- unsigned flags)
-{
- return bch2_journal_res_get(&trans->c->journal, &trans->journal_res,
- trans->journal_u64s, flags);
-}
-
-#define JSET_ENTRY_LOG_U64s 4
-
-static noinline void journal_transaction_name(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
- struct journal *j = &c->journal;
- struct jset_entry *entry =
- bch2_journal_add_entry(j, &trans->journal_res,
- BCH_JSET_ENTRY_log, 0, 0,
- JSET_ENTRY_LOG_U64s);
- struct jset_entry_log *l =
- container_of(entry, struct jset_entry_log, entry);
-
- strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64));
-}
-
-static inline int btree_key_can_insert(struct btree_trans *trans,
- struct btree *b, unsigned u64s)
-{
- if (!bch2_btree_node_insert_fits(b, u64s))
- return -BCH_ERR_btree_insert_btree_node_full;
-
- return 0;
-}
-
-noinline static int
-btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
- struct btree_path *path, unsigned new_u64s)
-{
- struct bkey_cached *ck = (void *) path->l[0].b;
- struct bkey_i *new_k;
- int ret;
-
- bch2_trans_unlock_write(trans);
- bch2_trans_unlock(trans);
-
- new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
- if (!new_k) {
- bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
- bch2_btree_id_str(path->btree_id), new_u64s);
- return -BCH_ERR_ENOMEM_btree_key_cache_insert;
- }
-
- ret = bch2_trans_relock(trans) ?:
- bch2_trans_lock_write(trans);
- if (unlikely(ret)) {
- kfree(new_k);
- return ret;
- }
-
- memcpy(new_k, ck->k, ck->u64s * sizeof(u64));
-
- trans_for_each_update(trans, i)
- if (i->old_v == &ck->k->v)
- i->old_v = &new_k->v;
-
- kfree(ck->k);
- ck->u64s = new_u64s;
- ck->k = new_k;
- return 0;
-}
-
-static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags,
- struct btree_path *path, unsigned u64s)
-{
- struct bch_fs *c = trans->c;
- struct bkey_cached *ck = (void *) path->l[0].b;
- unsigned new_u64s;
- struct bkey_i *new_k;
- unsigned watermark = flags & BCH_WATERMARK_MASK;
-
- EBUG_ON(path->level);
-
- if (watermark < BCH_WATERMARK_reclaim &&
- !test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
- bch2_btree_key_cache_must_wait(c))
- return -BCH_ERR_btree_insert_need_journal_reclaim;
-
- /*
- * bch2_varint_decode can read past the end of the buffer by at most 7
- * bytes (it won't be used):
- */
- u64s += 1;
-
- if (u64s <= ck->u64s)
- return 0;
-
- new_u64s = roundup_pow_of_two(u64s);
- new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN);
- if (unlikely(!new_k))
- return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s);
-
- trans_for_each_update(trans, i)
- if (i->old_v == &ck->k->v)
- i->old_v = &new_k->v;
-
- ck->u64s = new_u64s;
- ck->k = new_k;
- return 0;
-}
-
-/* Triggers: */
-
-static int run_one_mem_trigger(struct btree_trans *trans,
- struct btree_insert_entry *i,
- unsigned flags)
-{
- verify_update_old_key(trans, i);
-
- if (unlikely(flags & BTREE_TRIGGER_norun))
- return 0;
-
- struct bkey_s_c old = { &i->old_k, i->old_v };
- struct bkey_i *new = i->k;
- const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
- const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
-
- if (old_ops->trigger == new_ops->trigger)
- return bch2_key_trigger(trans, i->btree_id, i->level,
- old, bkey_i_to_s(new),
- BTREE_TRIGGER_insert|BTREE_TRIGGER_overwrite|flags);
- else
- return bch2_key_trigger_new(trans, i->btree_id, i->level,
- bkey_i_to_s(new), flags) ?:
- bch2_key_trigger_old(trans, i->btree_id, i->level,
- old, flags);
-}
-
-static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i,
- bool overwrite)
-{
- verify_update_old_key(trans, i);
-
- if ((i->flags & BTREE_TRIGGER_norun) ||
- !btree_node_type_has_trans_triggers(i->bkey_type))
- return 0;
-
- /*
- * Transactional triggers create new btree_insert_entries, so we can't
- * pass them a pointer to a btree_insert_entry, that memory is going to
- * move:
- */
- struct bkey old_k = i->old_k;
- struct bkey_s_c old = { &old_k, i->old_v };
- const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
- const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
- unsigned flags = i->flags|BTREE_TRIGGER_transactional;
-
- if (!i->insert_trigger_run &&
- !i->overwrite_trigger_run &&
- old_ops->trigger == new_ops->trigger) {
- i->overwrite_trigger_run = true;
- i->insert_trigger_run = true;
- return bch2_key_trigger(trans, i->btree_id, i->level, old, bkey_i_to_s(i->k),
- BTREE_TRIGGER_insert|
- BTREE_TRIGGER_overwrite|flags) ?: 1;
- } else if (overwrite && !i->overwrite_trigger_run) {
- i->overwrite_trigger_run = true;
- return bch2_key_trigger_old(trans, i->btree_id, i->level, old, flags) ?: 1;
- } else if (!overwrite && !i->insert_trigger_run) {
- i->insert_trigger_run = true;
- return bch2_key_trigger_new(trans, i->btree_id, i->level, bkey_i_to_s(i->k), flags) ?: 1;
- } else {
- return 0;
- }
-}
-
-static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
- unsigned btree_id_start)
-{
- for (int overwrite = 1; overwrite >= 0; --overwrite) {
- bool trans_trigger_run;
-
- /*
- * Running triggers will append more updates to the list of updates as
- * we're walking it:
- */
- do {
- trans_trigger_run = false;
-
- for (unsigned i = btree_id_start;
- i < trans->nr_updates && trans->updates[i].btree_id <= btree_id;
- i++) {
- if (trans->updates[i].btree_id != btree_id)
- continue;
-
- int ret = run_one_trans_trigger(trans, trans->updates + i, overwrite);
- if (ret < 0)
- return ret;
- if (ret)
- trans_trigger_run = true;
- }
- } while (trans_trigger_run);
- }
-
- return 0;
-}
-
-static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
-{
- unsigned btree_id = 0, btree_id_start = 0;
- int ret = 0;
-
- /*
- *
- * For a given btree, this algorithm runs insert triggers before
- * overwrite triggers: this is so that when extents are being moved
- * (e.g. by FALLOCATE_FL_INSERT_RANGE), we don't drop references before
- * they are re-added.
- */
- for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
- if (btree_id == BTREE_ID_alloc)
- continue;
-
- while (btree_id_start < trans->nr_updates &&
- trans->updates[btree_id_start].btree_id < btree_id)
- btree_id_start++;
-
- ret = run_btree_triggers(trans, btree_id, btree_id_start);
- if (ret)
- return ret;
- }
-
- for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
- struct btree_insert_entry *i = trans->updates + idx;
-
- if (i->btree_id > BTREE_ID_alloc)
- break;
- if (i->btree_id == BTREE_ID_alloc) {
- ret = run_btree_triggers(trans, BTREE_ID_alloc, idx);
- if (ret)
- return ret;
- break;
- }
- }
-
-#ifdef CONFIG_BCACHEFS_DEBUG
- trans_for_each_update(trans, i)
- BUG_ON(!(i->flags & BTREE_TRIGGER_norun) &&
- btree_node_type_has_trans_triggers(i->bkey_type) &&
- (!i->insert_trigger_run || !i->overwrite_trigger_run));
-#endif
- return 0;
-}
-
-static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
-{
- trans_for_each_update(trans, i)
- if (btree_node_type_has_triggers(i->bkey_type) &&
- gc_visited(trans->c, gc_pos_btree(i->btree_id, i->level, i->k->k.p))) {
- int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_gc);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct bversion journal_pos_to_bversion(struct journal_res *res, unsigned offset)
-{
- return (struct bversion) {
- .hi = res->seq >> 32,
- .lo = (res->seq << 32) | (res->offset + offset),
- };
-}
-
-static inline int
-bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
- struct btree_insert_entry **stopped_at,
- unsigned long trace_ip)
-{
- struct bch_fs *c = trans->c;
- struct btree_trans_commit_hook *h;
- unsigned u64s = 0;
- int ret = 0;
-
- bch2_trans_verify_not_unlocked(trans);
- bch2_trans_verify_not_in_restart(trans);
-
- if (race_fault()) {
- trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
- return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
- }
-
- /*
- * Check if the insert will fit in the leaf node with the write lock
- * held, otherwise another thread could write the node changing the
- * amount of space available:
- */
-
- prefetch(&trans->c->journal.flags);
-
- trans_for_each_update(trans, i) {
- /* Multiple inserts might go to same leaf: */
- if (!same_leaf_as_prev(trans, i))
- u64s = 0;
-
- u64s += i->k->k.u64s;
- ret = !i->cached
- ? btree_key_can_insert(trans, insert_l(trans, i)->b, u64s)
- : btree_key_can_insert_cached(trans, flags, trans->paths + i->path, u64s);
- if (ret) {
- *stopped_at = i;
- return ret;
- }
-
- i->k->k.needs_whiteout = false;
- }
-
- /*
- * Don't get journal reservation until after we know insert will
- * succeed:
- */
- if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
- ret = bch2_trans_journal_res_get(trans,
- (flags & BCH_WATERMARK_MASK)|
- JOURNAL_RES_GET_NONBLOCK);
- if (ret)
- return ret;
-
- if (unlikely(trans->journal_transaction_names))
- journal_transaction_name(trans);
- }
-
- /*
- * Not allowed to fail after we've gotten our journal reservation - we
- * have to use it:
- */
-
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
- !(flags & BCH_TRANS_COMMIT_no_journal_res)) {
- if (bch2_journal_seq_verify)
- trans_for_each_update(trans, i)
- i->k->k.bversion.lo = trans->journal_res.seq;
- else if (bch2_inject_invalid_keys)
- trans_for_each_update(trans, i)
- i->k->k.bversion = MAX_VERSION;
- }
-
- h = trans->hooks;
- while (h) {
- ret = h->fn(trans, h);
- if (ret)
- return ret;
- h = h->next;
- }
-
- struct jset_entry *entry = trans->journal_entries;
-
- percpu_down_read(&c->mark_lock);
-
- for (entry = trans->journal_entries;
- entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
- entry = vstruct_next(entry))
- if (entry->type == BCH_JSET_ENTRY_write_buffer_keys &&
- entry->start->k.type == KEY_TYPE_accounting) {
- BUG_ON(!trans->journal_res.ref);
-
- struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start);
-
- a->k.bversion = journal_pos_to_bversion(&trans->journal_res,
- (u64 *) entry - (u64 *) trans->journal_entries);
- BUG_ON(bversion_zero(a->k.bversion));
-
- if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) {
- ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal);
- if (ret)
- goto revert_fs_usage;
- }
- }
- percpu_up_read(&c->mark_lock);
-
- /* XXX: we only want to run this if deltas are nonzero */
- bch2_trans_account_disk_usage_change(trans);
-
- trans_for_each_update(trans, i)
- if (btree_node_type_has_atomic_triggers(i->bkey_type)) {
- ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_atomic|i->flags);
- if (ret)
- goto fatal_err;
- }
-
- if (unlikely(c->gc_pos.phase)) {
- ret = bch2_trans_commit_run_gc_triggers(trans);
- if (ret)
- goto fatal_err;
- }
-
- trans_for_each_update(trans, i) {
- enum bch_validate_flags invalid_flags = 0;
-
- if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
- invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
-
- ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
- i->bkey_type, invalid_flags);
- if (unlikely(ret)){
- bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
- trans->fn, (void *) i->ip_allocated);
- goto fatal_err;
- }
- btree_insert_entry_checks(trans, i);
- }
-
- for (struct jset_entry *i = trans->journal_entries;
- i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
- i = vstruct_next(i)) {
- enum bch_validate_flags invalid_flags = 0;
-
- if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
- invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
-
- ret = bch2_journal_entry_validate(c, NULL, i,
- bcachefs_metadata_version_current,
- CPU_BIG_ENDIAN, invalid_flags);
- if (unlikely(ret)) {
- bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
- trans->fn);
- goto fatal_err;
- }
- }
-
- if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
- struct journal *j = &c->journal;
- struct jset_entry *entry;
-
- trans_for_each_update(trans, i) {
- if (i->key_cache_already_flushed)
- continue;
-
- if (i->flags & BTREE_UPDATE_nojournal)
- continue;
-
- verify_update_old_key(trans, i);
-
- if (trans->journal_transaction_names) {
- entry = bch2_journal_add_entry(j, &trans->journal_res,
- BCH_JSET_ENTRY_overwrite,
- i->btree_id, i->level,
- i->old_k.u64s);
- bkey_reassemble((struct bkey_i *) entry->start,
- (struct bkey_s_c) { &i->old_k, i->old_v });
- }
-
- entry = bch2_journal_add_entry(j, &trans->journal_res,
- BCH_JSET_ENTRY_btree_keys,
- i->btree_id, i->level,
- i->k->k.u64s);
- bkey_copy((struct bkey_i *) entry->start, i->k);
- }
-
- memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res),
- trans->journal_entries,
- trans->journal_entries_u64s);
-
- trans->journal_res.offset += trans->journal_entries_u64s;
- trans->journal_res.u64s -= trans->journal_entries_u64s;
-
- if (trans->journal_seq)
- *trans->journal_seq = trans->journal_res.seq;
- }
-
- trans_for_each_update(trans, i) {
- struct btree_path *path = trans->paths + i->path;
-
- if (!i->cached)
- bch2_btree_insert_key_leaf(trans, path, i->k, trans->journal_res.seq);
- else if (!i->key_cache_already_flushed)
- bch2_btree_insert_key_cached(trans, flags, i);
- else
- bch2_btree_key_cache_drop(trans, path);
- }
-
- return 0;
-fatal_err:
- bch2_fs_fatal_error(c, "fatal error in transaction commit: %s", bch2_err_str(ret));
- percpu_down_read(&c->mark_lock);
-revert_fs_usage:
- for (struct jset_entry *entry2 = trans->journal_entries;
- entry2 != entry;
- entry2 = vstruct_next(entry2))
- if (entry2->type == BCH_JSET_ENTRY_write_buffer_keys &&
- entry2->start->k.type == KEY_TYPE_accounting) {
- struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start);
-
- bch2_accounting_neg(a);
- bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal);
- bch2_accounting_neg(a);
- }
- percpu_up_read(&c->mark_lock);
- return ret;
-}
-
-static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
-{
- /*
- * Accounting keys aren't deduped in the journal: we have to compare
- * each individual update against what's in the btree to see if it has
- * been applied yet, and accounting updates also don't overwrite,
- * they're deltas that accumulate.
- */
- trans_for_each_update(trans, i)
- if (i->k->k.type != KEY_TYPE_accounting)
- bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
-}
-
-static int bch2_trans_commit_journal_pin_flush(struct journal *j,
- struct journal_entry_pin *_pin, u64 seq)
-{
- return 0;
-}
-
-/*
- * Get journal reservation, take write locks, and attempt to do btree update(s):
- */
-static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags,
- struct btree_insert_entry **stopped_at,
- unsigned long trace_ip)
-{
- struct bch_fs *c = trans->c;
- int ret = 0, u64s_delta = 0;
-
- for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
- struct btree_insert_entry *i = trans->updates + idx;
- if (i->cached)
- continue;
-
- u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0;
- u64s_delta -= i->old_btree_u64s;
-
- if (!same_leaf_as_next(trans, i)) {
- if (u64s_delta <= 0) {
- ret = bch2_foreground_maybe_merge(trans, i->path,
- i->level, flags);
- if (unlikely(ret))
- return ret;
- }
-
- u64s_delta = 0;
- }
- }
-
- ret = bch2_trans_lock_write(trans);
- if (unlikely(ret))
- return ret;
-
- ret = bch2_trans_commit_write_locked(trans, flags, stopped_at, trace_ip);
-
- if (!ret && unlikely(trans->journal_replay_not_finished))
- bch2_drop_overwrites_from_journal(trans);
-
- bch2_trans_unlock_write(trans);
-
- if (!ret && trans->journal_pin)
- bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
- trans->journal_pin,
- bch2_trans_commit_journal_pin_flush);
-
- /*
- * Drop journal reservation after dropping write locks, since dropping
- * the journal reservation may kick off a journal write:
- */
- if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
- bch2_journal_res_put(&c->journal, &trans->journal_res);
-
- return ret;
-}
-
-static int journal_reclaim_wait_done(struct bch_fs *c)
-{
- int ret = bch2_journal_error(&c->journal) ?:
- bch2_btree_key_cache_wait_done(c);
-
- if (!ret)
- journal_reclaim_kick(&c->journal);
- return ret;
-}
-
-static noinline
-int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
- struct btree_insert_entry *i,
- int ret, unsigned long trace_ip)
-{
- struct bch_fs *c = trans->c;
- enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
-
- switch (ret) {
- case -BCH_ERR_btree_insert_btree_node_full:
- ret = bch2_btree_split_leaf(trans, i->path, flags);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- trace_and_count(c, trans_restart_btree_node_split, trans,
- trace_ip, trans->paths + i->path);
- break;
- case -BCH_ERR_btree_insert_need_mark_replicas:
- ret = drop_locks_do(trans,
- bch2_accounting_update_sb(trans));
- break;
- case -BCH_ERR_journal_res_get_blocked:
- /*
- * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
- * flag
- */
- if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
- watermark < BCH_WATERMARK_reclaim) {
- ret = -BCH_ERR_journal_reclaim_would_deadlock;
- break;
- }
-
- ret = drop_locks_do(trans,
- bch2_trans_journal_res_get(trans,
- (flags & BCH_WATERMARK_MASK)|
- JOURNAL_RES_GET_CHECK));
- break;
- case -BCH_ERR_btree_insert_need_journal_reclaim:
- bch2_trans_unlock(trans);
-
- trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
- track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], true);
-
- wait_event_freezable(c->journal.reclaim_wait,
- (ret = journal_reclaim_wait_done(c)));
-
- track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], false);
-
- if (ret < 0)
- break;
-
- ret = bch2_trans_relock(trans);
- break;
- default:
- BUG_ON(ret >= 0);
- break;
- }
-
- BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
-
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOSPC) &&
- (flags & BCH_TRANS_COMMIT_no_enospc), c,
- "%s: incorrectly got %s\n", __func__, bch2_err_str(ret));
-
- return ret;
-}
-
-static noinline int
-bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags)
-{
- struct bch_fs *c = trans->c;
- int ret;
-
- if (likely(!(flags & BCH_TRANS_COMMIT_lazy_rw)) ||
- test_bit(BCH_FS_started, &c->flags))
- return -BCH_ERR_erofs_trans_commit;
-
- ret = drop_locks_do(trans, bch2_fs_read_write_early(c));
- if (ret)
- return ret;
-
- bch2_write_ref_get(c, BCH_WRITE_REF_trans);
- return 0;
-}
-
-/*
- * This is for updates done in the early part of fsck - btree_gc - before we've
- * gone RW. we only add the new key to the list of keys for journal replay to
- * do.
- */
-static noinline int
-do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
-
- trans_for_each_update(trans, i) {
- int ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k);
- if (ret)
- return ret;
- }
-
- for (struct jset_entry *i = trans->journal_entries;
- i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
- i = vstruct_next(i))
- if (i->type == BCH_JSET_ENTRY_btree_keys ||
- i->type == BCH_JSET_ENTRY_write_buffer_keys) {
- int ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->start);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
-{
- struct btree_insert_entry *errored_at = NULL;
- struct bch_fs *c = trans->c;
- int ret = 0;
-
- bch2_trans_verify_not_unlocked(trans);
- bch2_trans_verify_not_in_restart(trans);
-
- if (!trans->nr_updates &&
- !trans->journal_entries_u64s)
- goto out_reset;
-
- ret = bch2_trans_commit_run_triggers(trans);
- if (ret)
- goto out_reset;
-
- if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
- ret = do_bch2_trans_commit_to_journal_replay(trans);
- goto out_reset;
- }
-
- if (!(flags & BCH_TRANS_COMMIT_no_check_rw) &&
- unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) {
- ret = bch2_trans_commit_get_rw_cold(trans, flags);
- if (ret)
- goto out_reset;
- }
-
- EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
-
- trans->journal_u64s = trans->journal_entries_u64s;
- trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
- if (trans->journal_transaction_names)
- trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
-
- trans_for_each_update(trans, i) {
- struct btree_path *path = trans->paths + i->path;
-
- EBUG_ON(!path->should_be_locked);
-
- ret = bch2_btree_path_upgrade(trans, path, i->level + 1);
- if (unlikely(ret))
- goto out;
-
- EBUG_ON(!btree_node_intent_locked(path, i->level));
-
- if (i->key_cache_already_flushed)
- continue;
-
- if (i->flags & BTREE_UPDATE_nojournal)
- continue;
-
- /* we're going to journal the key being updated: */
- trans->journal_u64s += jset_u64s(i->k->k.u64s);
-
- /* and we're also going to log the overwrite: */
- if (trans->journal_transaction_names)
- trans->journal_u64s += jset_u64s(i->old_k.u64s);
- }
-
- if (trans->extra_disk_res) {
- ret = bch2_disk_reservation_add(c, trans->disk_res,
- trans->extra_disk_res,
- (flags & BCH_TRANS_COMMIT_no_enospc)
- ? BCH_DISK_RESERVATION_NOFAIL : 0);
- if (ret)
- goto err;
- }
-retry:
- errored_at = NULL;
- bch2_trans_verify_not_unlocked(trans);
- bch2_trans_verify_not_in_restart(trans);
- if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
- memset(&trans->journal_res, 0, sizeof(trans->journal_res));
- memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta));
-
- ret = do_bch2_trans_commit(trans, flags, &errored_at, _RET_IP_);
-
- /* make sure we didn't drop or screw up locks: */
- bch2_trans_verify_locks(trans);
-
- if (ret)
- goto err;
-
- trace_and_count(c, transaction_commit, trans, _RET_IP_);
-out:
- if (likely(!(flags & BCH_TRANS_COMMIT_no_check_rw)))
- bch2_write_ref_put(c, BCH_WRITE_REF_trans);
-out_reset:
- if (!ret)
- bch2_trans_downgrade(trans);
- bch2_trans_reset_updates(trans);
-
- return ret;
-err:
- ret = bch2_trans_commit_error(trans, flags, errored_at, ret, _RET_IP_);
- if (ret)
- goto out;
-
- /*
- * We might have done another transaction commit in the error path -
- * i.e. btree write buffer flush - which will have made use of
- * trans->journal_res, but with BCH_TRANS_COMMIT_no_journal_res that is
- * how the journal sequence number to pin is passed in - so we must
- * restart:
- */
- if (flags & BCH_TRANS_COMMIT_no_journal_res) {
- ret = -BCH_ERR_transaction_restart_nested;
- goto out;
- }
-
- goto retry;
-}
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
deleted file mode 100644
index 4568a41fefaf..000000000000
--- a/fs/bcachefs/btree_types.h
+++ /dev/null
@@ -1,865 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_TYPES_H
-#define _BCACHEFS_BTREE_TYPES_H
-
-#include <linux/list.h>
-#include <linux/rhashtable.h>
-
-#include "bbpos_types.h"
-#include "btree_key_cache_types.h"
-#include "buckets_types.h"
-#include "darray.h"
-#include "errcode.h"
-#include "journal_types.h"
-#include "replicas_types.h"
-#include "six.h"
-
-struct open_bucket;
-struct btree_update;
-struct btree_trans;
-
-#define MAX_BSETS 3U
-
-struct btree_nr_keys {
-
- /*
- * Amount of live metadata (i.e. size of node after a compaction) in
- * units of u64s
- */
- u16 live_u64s;
- u16 bset_u64s[MAX_BSETS];
-
- /* live keys only: */
- u16 packed_keys;
- u16 unpacked_keys;
-};
-
-struct bset_tree {
- /*
- * We construct a binary tree in an array as if the array
- * started at 1, so that things line up on the same cachelines
- * better: see comments in bset.c at cacheline_to_bkey() for
- * details
- */
-
- /* size of the binary tree and prev array */
- u16 size;
-
- /* function of size - precalculated for to_inorder() */
- u16 extra;
-
- u16 data_offset;
- u16 aux_data_offset;
- u16 end_offset;
-};
-
-struct btree_write {
- struct journal_entry_pin journal;
-};
-
-struct btree_alloc {
- struct open_buckets ob;
- __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
-};
-
-struct btree_bkey_cached_common {
- struct six_lock lock;
- u8 level;
- u8 btree_id;
- bool cached;
-};
-
-struct btree {
- struct btree_bkey_cached_common c;
-
- struct rhash_head hash;
- u64 hash_val;
-
- unsigned long flags;
- u16 written;
- u8 nsets;
- u8 nr_key_bits;
- u16 version_ondisk;
-
- struct bkey_format format;
-
- struct btree_node *data;
- void *aux_data;
-
- /*
- * Sets of sorted keys - the real btree node - plus a binary search tree
- *
- * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
- * to the memory we have allocated for this btree node. Additionally,
- * set[0]->data points to the entire btree node as it exists on disk.
- */
- struct bset_tree set[MAX_BSETS];
-
- struct btree_nr_keys nr;
- u16 sib_u64s[2];
- u16 whiteout_u64s;
- u8 byte_order;
- u8 unpack_fn_len;
-
- struct btree_write writes[2];
-
- /* Key/pointer for this btree node */
- __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
-
- /*
- * XXX: add a delete sequence number, so when bch2_btree_node_relock()
- * fails because the lock sequence number has changed - i.e. the
- * contents were modified - we can still relock the node if it's still
- * the one we want, without redoing the traversal
- */
-
- /*
- * For asynchronous splits/interior node updates:
- * When we do a split, we allocate new child nodes and update the parent
- * node to point to them: we update the parent in memory immediately,
- * but then we must wait until the children have been written out before
- * the update to the parent can be written - this is a list of the
- * btree_updates that are blocking this node from being
- * written:
- */
- struct list_head write_blocked;
-
- /*
- * Also for asynchronous splits/interior node updates:
- * If a btree node isn't reachable yet, we don't want to kick off
- * another write - because that write also won't yet be reachable and
- * marking it as completed before it's reachable would be incorrect:
- */
- unsigned long will_make_reachable;
-
- struct open_buckets ob;
-
- /* lru list */
- struct list_head list;
-};
-
-#define BCH_BTREE_CACHE_NOT_FREED_REASONS() \
- x(lock_intent) \
- x(lock_write) \
- x(dirty) \
- x(read_in_flight) \
- x(write_in_flight) \
- x(noevict) \
- x(write_blocked) \
- x(will_make_reachable) \
- x(access_bit)
-
-enum bch_btree_cache_not_freed_reasons {
-#define x(n) BCH_BTREE_CACHE_NOT_FREED_##n,
- BCH_BTREE_CACHE_NOT_FREED_REASONS()
-#undef x
- BCH_BTREE_CACHE_NOT_FREED_REASONS_NR,
-};
-
-struct btree_cache_list {
- unsigned idx;
- struct shrinker *shrink;
- struct list_head list;
- size_t nr;
-};
-
-struct btree_cache {
- struct rhashtable table;
- bool table_init_done;
- /*
- * We never free a struct btree, except on shutdown - we just put it on
- * the btree_cache_freed list and reuse it later. This simplifies the
- * code, and it doesn't cost us much memory as the memory usage is
- * dominated by buffers that hold the actual btree node data and those
- * can be freed - and the number of struct btrees allocated is
- * effectively bounded.
- *
- * btree_cache_freeable effectively is a small cache - we use it because
- * high order page allocations can be rather expensive, and it's quite
- * common to delete and allocate btree nodes in quick succession. It
- * should never grow past ~2-3 nodes in practice.
- */
- struct mutex lock;
- struct list_head freeable;
- struct list_head freed_pcpu;
- struct list_head freed_nonpcpu;
- struct btree_cache_list live[2];
-
- size_t nr_freeable;
- size_t nr_reserve;
- size_t nr_by_btree[BTREE_ID_NR];
- atomic_long_t nr_dirty;
-
- /* shrinker stats */
- size_t nr_freed;
- u64 not_freed[BCH_BTREE_CACHE_NOT_FREED_REASONS_NR];
-
- /*
- * If we need to allocate memory for a new btree node and that
- * allocation fails, we can cannibalize another node in the btree cache
- * to satisfy the allocation - lock to guarantee only one thread does
- * this at a time:
- */
- struct task_struct *alloc_lock;
- struct closure_waitlist alloc_wait;
-
- struct bbpos pinned_nodes_start;
- struct bbpos pinned_nodes_end;
- /* btree id mask: 0 for leaves, 1 for interior */
- u64 pinned_nodes_mask[2];
-};
-
-struct btree_node_iter {
- struct btree_node_iter_set {
- u16 k, end;
- } data[MAX_BSETS];
-};
-
-#define BTREE_ITER_FLAGS() \
- x(slots) \
- x(intent) \
- x(prefetch) \
- x(is_extents) \
- x(not_extents) \
- x(cached) \
- x(with_key_cache) \
- x(with_updates) \
- x(with_journal) \
- x(snapshot_field) \
- x(all_snapshots) \
- x(filter_snapshots) \
- x(nopreserve) \
- x(cached_nofill) \
- x(key_cache_fill) \
-
-#define STR_HASH_FLAGS() \
- x(must_create) \
- x(must_replace)
-
-#define BTREE_UPDATE_FLAGS() \
- x(internal_snapshot_node) \
- x(nojournal) \
- x(key_cache_reclaim)
-
-
-/*
- * BTREE_TRIGGER_norun - don't run triggers at all
- *
- * BTREE_TRIGGER_transactional - we're running transactional triggers as part of
- * a transaction commit: triggers may generate new updates
- *
- * BTREE_TRIGGER_atomic - we're running atomic triggers during a transaction
- * commit: we have our journal reservation, we're holding btree node write
- * locks, and we know the transaction is going to commit (returning an error
- * here is a fatal error, causing us to go emergency read-only)
- *
- * BTREE_TRIGGER_gc - we're in gc/fsck: running triggers to recalculate e.g. disk usage
- *
- * BTREE_TRIGGER_insert - @new is entering the btree
- * BTREE_TRIGGER_overwrite - @old is leaving the btree
- *
- * BTREE_TRIGGER_bucket_invalidate - signal from bucket invalidate path to alloc
- * trigger
- */
-#define BTREE_TRIGGER_FLAGS() \
- x(norun) \
- x(transactional) \
- x(atomic) \
- x(check_repair) \
- x(gc) \
- x(insert) \
- x(overwrite) \
- x(is_root) \
- x(bucket_invalidate)
-
-enum {
-#define x(n) BTREE_ITER_FLAG_BIT_##n,
- BTREE_ITER_FLAGS()
- STR_HASH_FLAGS()
- BTREE_UPDATE_FLAGS()
- BTREE_TRIGGER_FLAGS()
-#undef x
-};
-
-/* iter flags must fit in a u16: */
-//BUILD_BUG_ON(BTREE_ITER_FLAG_BIT_key_cache_fill > 15);
-
-enum btree_iter_update_trigger_flags {
-#define x(n) BTREE_ITER_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
- BTREE_ITER_FLAGS()
-#undef x
-#define x(n) STR_HASH_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
- STR_HASH_FLAGS()
-#undef x
-#define x(n) BTREE_UPDATE_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
- BTREE_UPDATE_FLAGS()
-#undef x
-#define x(n) BTREE_TRIGGER_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
- BTREE_TRIGGER_FLAGS()
-#undef x
-};
-
-enum btree_path_uptodate {
- BTREE_ITER_UPTODATE = 0,
- BTREE_ITER_NEED_RELOCK = 1,
- BTREE_ITER_NEED_TRAVERSE = 2,
-};
-
-#if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
-#define TRACK_PATH_ALLOCATED
-#endif
-
-typedef u16 btree_path_idx_t;
-
-struct btree_path {
- btree_path_idx_t sorted_idx;
- u8 ref;
- u8 intent_ref;
-
- /* btree_iter_copy starts here: */
- struct bpos pos;
-
- enum btree_id btree_id:5;
- bool cached:1;
- bool preserve:1;
- enum btree_path_uptodate uptodate:2;
- /*
- * When true, failing to relock this path will cause the transaction to
- * restart:
- */
- bool should_be_locked:1;
- unsigned level:3,
- locks_want:3;
- u8 nodes_locked;
-
- struct btree_path_level {
- struct btree *b;
- struct btree_node_iter iter;
- u32 lock_seq;
-#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
- u64 lock_taken_time;
-#endif
- } l[BTREE_MAX_DEPTH];
-#ifdef TRACK_PATH_ALLOCATED
- unsigned long ip_allocated;
-#endif
-};
-
-static inline struct btree_path_level *path_l(struct btree_path *path)
-{
- return path->l + path->level;
-}
-
-static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
-{
-#ifdef TRACK_PATH_ALLOCATED
- return path->ip_allocated;
-#else
- return _THIS_IP_;
-#endif
-}
-
-/*
- * @pos - iterator's current position
- * @level - current btree depth
- * @locks_want - btree level below which we start taking intent locks
- * @nodes_locked - bitmask indicating which nodes in @nodes are locked
- * @nodes_intent_locked - bitmask indicating which locks are intent locks
- */
-struct btree_iter {
- struct btree_trans *trans;
- btree_path_idx_t path;
- btree_path_idx_t update_path;
- btree_path_idx_t key_cache_path;
-
- enum btree_id btree_id:8;
- u8 min_depth;
-
- /* btree_iter_copy starts here: */
- u16 flags;
-
- /* When we're filtering by snapshot, the snapshot ID we're looking for: */
- unsigned snapshot;
-
- struct bpos pos;
- /*
- * Current unpacked key - so that bch2_btree_iter_next()/
- * bch2_btree_iter_next_slot() can correctly advance pos.
- */
- struct bkey k;
-
- /* BTREE_ITER_with_journal: */
- size_t journal_idx;
-#ifdef TRACK_PATH_ALLOCATED
- unsigned long ip_allocated;
-#endif
-};
-
-#define BKEY_CACHED_ACCESSED 0
-#define BKEY_CACHED_DIRTY 1
-
-struct bkey_cached {
- struct btree_bkey_cached_common c;
-
- unsigned long flags;
- u16 u64s;
- struct bkey_cached_key key;
-
- struct rhash_head hash;
-
- struct journal_entry_pin journal;
- u64 seq;
-
- struct bkey_i *k;
- struct rcu_head rcu;
-};
-
-static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
-{
- return !b->cached
- ? container_of(b, struct btree, c)->key.k.p
- : container_of(b, struct bkey_cached, c)->key.pos;
-}
-
-struct btree_insert_entry {
- unsigned flags;
- u8 bkey_type;
- enum btree_id btree_id:8;
- u8 level:4;
- bool cached:1;
- bool insert_trigger_run:1;
- bool overwrite_trigger_run:1;
- bool key_cache_already_flushed:1;
- /*
- * @old_k may be a key from the journal; @old_btree_u64s always refers
- * to the size of the key being overwritten in the btree:
- */
- u8 old_btree_u64s;
- btree_path_idx_t path;
- struct bkey_i *k;
- /* key being overwritten: */
- struct bkey old_k;
- const struct bch_val *old_v;
- unsigned long ip_allocated;
-};
-
-/* Number of btree paths we preallocate, usually enough */
-#define BTREE_ITER_INITIAL 64
-/*
- * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code
- * paths should run inside this limit, and if they don't it usually indicates a
- * bug (leaking/duplicated btree paths).
- *
- * exception: some fsck paths
- *
- * bugs with excessive path usage seem to have possibly been eliminated now, so
- * we might consider eliminating this (and btree_trans_too_many_iter()) at some
- * point.
- */
-#define BTREE_ITER_NORMAL_LIMIT 256
-/* never exceed limit */
-#define BTREE_ITER_MAX (1U << 10)
-
-struct btree_trans_commit_hook;
-typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
-
-struct btree_trans_commit_hook {
- btree_trans_commit_hook_fn *fn;
- struct btree_trans_commit_hook *next;
-};
-
-#define BTREE_TRANS_MEM_MAX (1U << 16)
-
-#define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS 10000
-
-struct btree_trans_paths {
- unsigned long nr_paths;
- struct btree_path paths[];
-};
-
-struct btree_trans {
- struct bch_fs *c;
-
- unsigned long *paths_allocated;
- struct btree_path *paths;
- btree_path_idx_t *sorted;
- struct btree_insert_entry *updates;
-
- void *mem;
- unsigned mem_top;
- unsigned mem_bytes;
-
- btree_path_idx_t nr_sorted;
- btree_path_idx_t nr_paths;
- btree_path_idx_t nr_paths_max;
- btree_path_idx_t nr_updates;
- u8 fn_idx;
- u8 lock_must_abort;
- bool lock_may_not_fail:1;
- bool srcu_held:1;
- bool locked:1;
- bool pf_memalloc_nofs:1;
- bool write_locked:1;
- bool used_mempool:1;
- bool in_traverse_all:1;
- bool paths_sorted:1;
- bool memory_allocation_failure:1;
- bool journal_transaction_names:1;
- bool journal_replay_not_finished:1;
- bool notrace_relock_fail:1;
- enum bch_errcode restarted:16;
- u32 restart_count;
-
- u64 last_begin_time;
- unsigned long last_begin_ip;
- unsigned long last_restarted_ip;
- unsigned long last_unlock_ip;
- unsigned long srcu_lock_time;
-
- const char *fn;
- struct btree_bkey_cached_common *locking;
- struct six_lock_waiter locking_wait;
- int srcu_idx;
-
- /* update path: */
- u16 journal_entries_u64s;
- u16 journal_entries_size;
- struct jset_entry *journal_entries;
-
- struct btree_trans_commit_hook *hooks;
- struct journal_entry_pin *journal_pin;
-
- struct journal_res journal_res;
- u64 *journal_seq;
- struct disk_reservation *disk_res;
-
- struct bch_fs_usage_base fs_usage_delta;
-
- unsigned journal_u64s;
- unsigned extra_disk_res; /* XXX kill */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
- /* Entries before this are zeroed out on every bch2_trans_get() call */
-
- struct list_head list;
- struct closure ref;
-
- unsigned long _paths_allocated[BITS_TO_LONGS(BTREE_ITER_INITIAL)];
- struct btree_trans_paths trans_paths;
- struct btree_path _paths[BTREE_ITER_INITIAL];
- btree_path_idx_t _sorted[BTREE_ITER_INITIAL + 4];
- struct btree_insert_entry _updates[BTREE_ITER_INITIAL];
-};
-
-static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
-{
- return trans->paths + iter->path;
-}
-
-static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
-{
- return iter->key_cache_path
- ? trans->paths + iter->key_cache_path
- : NULL;
-}
-
-#define BCH_BTREE_WRITE_TYPES() \
- x(initial, 0) \
- x(init_next_bset, 1) \
- x(cache_reclaim, 2) \
- x(journal_reclaim, 3) \
- x(interior, 4)
-
-enum btree_write_type {
-#define x(t, n) BTREE_WRITE_##t,
- BCH_BTREE_WRITE_TYPES()
-#undef x
- BTREE_WRITE_TYPE_NR,
-};
-
-#define BTREE_WRITE_TYPE_MASK (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
-#define BTREE_WRITE_TYPE_BITS ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
-
-#define BTREE_FLAGS() \
- x(read_in_flight) \
- x(read_error) \
- x(dirty) \
- x(need_write) \
- x(write_blocked) \
- x(will_make_reachable) \
- x(noevict) \
- x(write_idx) \
- x(accessed) \
- x(write_in_flight) \
- x(write_in_flight_inner) \
- x(just_written) \
- x(dying) \
- x(fake) \
- x(need_rewrite) \
- x(never_write) \
- x(pinned)
-
-enum btree_flags {
- /* First bits for btree node write type */
- BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
-#define x(flag) BTREE_NODE_##flag,
- BTREE_FLAGS()
-#undef x
-};
-
-#define x(flag) \
-static inline bool btree_node_ ## flag(struct btree *b) \
-{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
- \
-static inline void set_btree_node_ ## flag(struct btree *b) \
-{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
- \
-static inline void clear_btree_node_ ## flag(struct btree *b) \
-{ clear_bit(BTREE_NODE_ ## flag, &b->flags); }
-
-BTREE_FLAGS()
-#undef x
-
-static inline struct btree_write *btree_current_write(struct btree *b)
-{
- return b->writes + btree_node_write_idx(b);
-}
-
-static inline struct btree_write *btree_prev_write(struct btree *b)
-{
- return b->writes + (btree_node_write_idx(b) ^ 1);
-}
-
-static inline struct bset_tree *bset_tree_last(struct btree *b)
-{
- EBUG_ON(!b->nsets);
- return b->set + b->nsets - 1;
-}
-
-static inline void *
-__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
-{
- return (void *) ((u64 *) b->data + 1 + offset);
-}
-
-static inline u16
-__btree_node_ptr_to_offset(const struct btree *b, const void *p)
-{
- u16 ret = (u64 *) p - 1 - (u64 *) b->data;
-
- EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
- return ret;
-}
-
-static inline struct bset *bset(const struct btree *b,
- const struct bset_tree *t)
-{
- return __btree_node_offset_to_ptr(b, t->data_offset);
-}
-
-static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
-{
- t->end_offset =
- __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
-}
-
-static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
- const struct bset *i)
-{
- t->data_offset = __btree_node_ptr_to_offset(b, i);
- set_btree_bset_end(b, t);
-}
-
-static inline struct bset *btree_bset_first(struct btree *b)
-{
- return bset(b, b->set);
-}
-
-static inline struct bset *btree_bset_last(struct btree *b)
-{
- return bset(b, bset_tree_last(b));
-}
-
-static inline u16
-__btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
-{
- return __btree_node_ptr_to_offset(b, k);
-}
-
-static inline struct bkey_packed *
-__btree_node_offset_to_key(const struct btree *b, u16 k)
-{
- return __btree_node_offset_to_ptr(b, k);
-}
-
-static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
-{
- return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
-}
-
-#define btree_bkey_first(_b, _t) \
-({ \
- EBUG_ON(bset(_b, _t)->start != \
- __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
- \
- bset(_b, _t)->start; \
-})
-
-#define btree_bkey_last(_b, _t) \
-({ \
- EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
- vstruct_last(bset(_b, _t))); \
- \
- __btree_node_offset_to_key(_b, (_t)->end_offset); \
-})
-
-static inline unsigned bset_u64s(struct bset_tree *t)
-{
- return t->end_offset - t->data_offset -
- sizeof(struct bset) / sizeof(u64);
-}
-
-static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
-{
- return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
-}
-
-static inline unsigned bset_byte_offset(struct btree *b, void *i)
-{
- return i - (void *) b->data;
-}
-
-enum btree_node_type {
- BKEY_TYPE_btree,
-#define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
- BCH_BTREE_IDS()
-#undef x
- BKEY_TYPE_NR
-};
-
-/* Type of a key in btree @id at level @level: */
-static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
-{
- return level ? BKEY_TYPE_btree : (unsigned) id + 1;
-}
-
-/* Type of keys @b contains: */
-static inline enum btree_node_type btree_node_type(struct btree *b)
-{
- return __btree_node_type(b->c.level, b->c.btree_id);
-}
-
-const char *bch2_btree_node_type_str(enum btree_node_type);
-
-#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
- (BIT_ULL(BKEY_TYPE_extents)| \
- BIT_ULL(BKEY_TYPE_alloc)| \
- BIT_ULL(BKEY_TYPE_inodes)| \
- BIT_ULL(BKEY_TYPE_stripes)| \
- BIT_ULL(BKEY_TYPE_reflink)| \
- BIT_ULL(BKEY_TYPE_subvolumes)| \
- BIT_ULL(BKEY_TYPE_btree))
-
-#define BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS \
- (BIT_ULL(BKEY_TYPE_alloc)| \
- BIT_ULL(BKEY_TYPE_inodes)| \
- BIT_ULL(BKEY_TYPE_stripes)| \
- BIT_ULL(BKEY_TYPE_snapshots))
-
-#define BTREE_NODE_TYPE_HAS_TRIGGERS \
- (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
- BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS)
-
-static inline bool btree_node_type_has_trans_triggers(enum btree_node_type type)
-{
- return BIT_ULL(type) & BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS;
-}
-
-static inline bool btree_node_type_has_atomic_triggers(enum btree_node_type type)
-{
- return BIT_ULL(type) & BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS;
-}
-
-static inline bool btree_node_type_has_triggers(enum btree_node_type type)
-{
- return BIT_ULL(type) & BTREE_NODE_TYPE_HAS_TRIGGERS;
-}
-
-static inline bool btree_node_type_is_extents(enum btree_node_type type)
-{
- const u64 mask = 0
-#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
- BCH_BTREE_IDS()
-#undef x
- ;
-
- return BIT_ULL(type) & mask;
-}
-
-static inline bool btree_id_is_extents(enum btree_id btree)
-{
- return btree_node_type_is_extents(__btree_node_type(0, btree));
-}
-
-static inline bool btree_type_has_snapshots(enum btree_id id)
-{
- const u64 mask = 0
-#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
- BCH_BTREE_IDS()
-#undef x
- ;
-
- return BIT_ULL(id) & mask;
-}
-
-static inline bool btree_type_has_snapshot_field(enum btree_id id)
-{
- const u64 mask = 0
-#define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
- BCH_BTREE_IDS()
-#undef x
- ;
-
- return BIT_ULL(id) & mask;
-}
-
-static inline bool btree_type_has_ptrs(enum btree_id id)
-{
- const u64 mask = 0
-#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
- BCH_BTREE_IDS()
-#undef x
- ;
-
- return BIT_ULL(id) & mask;
-}
-
-struct btree_root {
- struct btree *b;
-
- /* On disk root - see async splits: */
- __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
- u8 level;
- u8 alive;
- s16 error;
-};
-
-enum btree_gc_coalesce_fail_reason {
- BTREE_GC_COALESCE_FAIL_RESERVE_GET,
- BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
- BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
-};
-
-enum btree_node_sibling {
- btree_prev_sib,
- btree_next_sib,
-};
-
-struct get_locks_fail {
- unsigned l;
- struct btree *b;
-};
-
-#endif /* _BCACHEFS_BTREE_TYPES_H */
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
deleted file mode 100644
index 5d809e8bd170..000000000000
--- a/fs/bcachefs/btree_update.c
+++ /dev/null
@@ -1,903 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_update.h"
-#include "btree_iter.h"
-#include "btree_journal_iter.h"
-#include "btree_locking.h"
-#include "buckets.h"
-#include "debug.h"
-#include "errcode.h"
-#include "error.h"
-#include "extents.h"
-#include "keylist.h"
-#include "snapshot.h"
-#include "trace.h"
-
-static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
- const struct btree_insert_entry *r)
-{
- return cmp_int(l->btree_id, r->btree_id) ?:
- cmp_int(l->cached, r->cached) ?:
- -cmp_int(l->level, r->level) ?:
- bpos_cmp(l->k->k.p, r->k->k.p);
-}
-
-static int __must_check
-bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
- struct bkey_i *, enum btree_iter_update_trigger_flags,
- unsigned long ip);
-
-static noinline int extent_front_merge(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- struct bkey_i **insert,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i *update;
- int ret;
-
- if (unlikely(trans->journal_replay_not_finished))
- return 0;
-
- update = bch2_bkey_make_mut_noupdate(trans, k);
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- return ret;
-
- if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
- return 0;
-
- ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p) ?:
- bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p);
- if (ret < 0)
- return ret;
- if (ret)
- return 0;
-
- ret = bch2_btree_delete_at(trans, iter, flags);
- if (ret)
- return ret;
-
- *insert = update;
- return 0;
-}
-
-static noinline int extent_back_merge(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i *insert,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- int ret;
-
- if (unlikely(trans->journal_replay_not_finished))
- return 0;
-
- ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
- bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
- if (ret < 0)
- return ret;
- if (ret)
- return 0;
-
- bch2_bkey_merge(c, bkey_i_to_s(insert), k);
- return 0;
-}
-
-/*
- * When deleting, check if we need to emit a whiteout (because we're overwriting
- * something in an ancestor snapshot)
- */
-static int need_whiteout_for_snapshot(struct btree_trans *trans,
- enum btree_id btree_id, struct bpos pos)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- u32 snapshot = pos.snapshot;
- int ret;
-
- if (!bch2_snapshot_parent(trans->c, pos.snapshot))
- return 0;
-
- pos.snapshot++;
-
- for_each_btree_key_norestart(trans, iter, btree_id, pos,
- BTREE_ITER_all_snapshots|
- BTREE_ITER_nopreserve, k, ret) {
- if (!bkey_eq(k.k->p, pos))
- break;
-
- if (bch2_snapshot_is_ancestor(trans->c, snapshot,
- k.k->p.snapshot)) {
- ret = !bkey_whiteout(k.k);
- break;
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
- enum btree_id id,
- struct bpos old_pos,
- struct bpos new_pos)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter old_iter, new_iter = { NULL };
- struct bkey_s_c old_k, new_k;
- snapshot_id_list s;
- struct bkey_i *update;
- int ret = 0;
-
- if (!bch2_snapshot_has_children(c, old_pos.snapshot))
- return 0;
-
- darray_init(&s);
-
- bch2_trans_iter_init(trans, &old_iter, id, old_pos,
- BTREE_ITER_not_extents|
- BTREE_ITER_all_snapshots);
- while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
- !(ret = bkey_err(old_k)) &&
- bkey_eq(old_pos, old_k.k->p)) {
- struct bpos whiteout_pos =
- SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);;
-
- if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) ||
- snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot))
- continue;
-
- new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
- BTREE_ITER_not_extents|
- BTREE_ITER_intent);
- ret = bkey_err(new_k);
- if (ret)
- break;
-
- if (new_k.k->type == KEY_TYPE_deleted) {
- update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- break;
-
- bkey_init(&update->k);
- update->k.p = whiteout_pos;
- update->k.type = KEY_TYPE_whiteout;
-
- ret = bch2_trans_update(trans, &new_iter, update,
- BTREE_UPDATE_internal_snapshot_node);
- }
- bch2_trans_iter_exit(trans, &new_iter);
-
- ret = snapshot_list_add(c, &s, old_k.k->p.snapshot);
- if (ret)
- break;
- }
- bch2_trans_iter_exit(trans, &new_iter);
- bch2_trans_iter_exit(trans, &old_iter);
- darray_exit(&s);
-
- return ret;
-}
-
-int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
- struct btree_iter *iter,
- enum btree_iter_update_trigger_flags flags,
- struct bkey_s_c old,
- struct bkey_s_c new)
-{
- enum btree_id btree_id = iter->btree_id;
- struct bkey_i *update;
- struct bpos new_start = bkey_start_pos(new.k);
- unsigned front_split = bkey_lt(bkey_start_pos(old.k), new_start);
- unsigned back_split = bkey_gt(old.k->p, new.k->p);
- unsigned middle_split = (front_split || back_split) &&
- old.k->p.snapshot != new.k->p.snapshot;
- unsigned nr_splits = front_split + back_split + middle_split;
- int ret = 0, compressed_sectors;
-
- /*
- * If we're going to be splitting a compressed extent, note it
- * so that __bch2_trans_commit() can increase our disk
- * reservation:
- */
- if (nr_splits > 1 &&
- (compressed_sectors = bch2_bkey_sectors_compressed(old)))
- trans->extra_disk_res += compressed_sectors * (nr_splits - 1);
-
- if (front_split) {
- update = bch2_bkey_make_mut_noupdate(trans, old);
- if ((ret = PTR_ERR_OR_ZERO(update)))
- return ret;
-
- bch2_cut_back(new_start, update);
-
- ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
- old.k->p, update->k.p) ?:
- bch2_btree_insert_nonextent(trans, btree_id, update,
- BTREE_UPDATE_internal_snapshot_node|flags);
- if (ret)
- return ret;
- }
-
- /* If we're overwriting in a different snapshot - middle split: */
- if (middle_split) {
- update = bch2_bkey_make_mut_noupdate(trans, old);
- if ((ret = PTR_ERR_OR_ZERO(update)))
- return ret;
-
- bch2_cut_front(new_start, update);
- bch2_cut_back(new.k->p, update);
-
- ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
- old.k->p, update->k.p) ?:
- bch2_btree_insert_nonextent(trans, btree_id, update,
- BTREE_UPDATE_internal_snapshot_node|flags);
- if (ret)
- return ret;
- }
-
- if (bkey_le(old.k->p, new.k->p)) {
- update = bch2_trans_kmalloc(trans, sizeof(*update));
- if ((ret = PTR_ERR_OR_ZERO(update)))
- return ret;
-
- bkey_init(&update->k);
- update->k.p = old.k->p;
- update->k.p.snapshot = new.k->p.snapshot;
-
- if (new.k->p.snapshot != old.k->p.snapshot) {
- update->k.type = KEY_TYPE_whiteout;
- } else if (btree_type_has_snapshots(btree_id)) {
- ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
- if (ret < 0)
- return ret;
- if (ret)
- update->k.type = KEY_TYPE_whiteout;
- }
-
- ret = bch2_btree_insert_nonextent(trans, btree_id, update,
- BTREE_UPDATE_internal_snapshot_node|flags);
- if (ret)
- return ret;
- }
-
- if (back_split) {
- update = bch2_bkey_make_mut_noupdate(trans, old);
- if ((ret = PTR_ERR_OR_ZERO(update)))
- return ret;
-
- bch2_cut_front(new.k->p, update);
-
- ret = bch2_trans_update_by_path(trans, iter->path, update,
- BTREE_UPDATE_internal_snapshot_node|
- flags, _RET_IP_);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int bch2_trans_update_extent(struct btree_trans *trans,
- struct btree_iter *orig_iter,
- struct bkey_i *insert,
- enum btree_iter_update_trigger_flags flags)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- enum btree_id btree_id = orig_iter->btree_id;
- int ret = 0;
-
- bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
- BTREE_ITER_intent|
- BTREE_ITER_with_updates|
- BTREE_ITER_not_extents);
- k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
- if ((ret = bkey_err(k)))
- goto err;
- if (!k.k)
- goto out;
-
- if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
- if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
- ret = extent_front_merge(trans, &iter, k, &insert, flags);
- if (ret)
- goto err;
- }
-
- goto next;
- }
-
- while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
- bool done = bkey_lt(insert->k.p, k.k->p);
-
- ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
- if (ret)
- goto err;
-
- if (done)
- goto out;
-next:
- bch2_btree_iter_advance(&iter);
- k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
- if ((ret = bkey_err(k)))
- goto err;
- if (!k.k)
- goto out;
- }
-
- if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
- ret = extent_back_merge(trans, &iter, insert, k);
- if (ret)
- goto err;
- }
-out:
- if (!bkey_deleted(&insert->k))
- ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
-err:
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-static noinline int flush_new_cached_update(struct btree_trans *trans,
- struct btree_insert_entry *i,
- enum btree_iter_update_trigger_flags flags,
- unsigned long ip)
-{
- struct bkey k;
- int ret;
-
- btree_path_idx_t path_idx =
- bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
- BTREE_ITER_intent, _THIS_IP_);
- ret = bch2_btree_path_traverse(trans, path_idx, 0);
- if (ret)
- goto out;
-
- struct btree_path *btree_path = trans->paths + path_idx;
-
- /*
- * The old key in the insert entry might actually refer to an existing
- * key in the btree that has been deleted from cache and not yet
- * flushed. Check for this and skip the flush so we don't run triggers
- * against a stale key.
- */
- bch2_btree_path_peek_slot_exact(btree_path, &k);
- if (!bkey_deleted(&k))
- goto out;
-
- i->key_cache_already_flushed = true;
- i->flags |= BTREE_TRIGGER_norun;
-
- btree_path_set_should_be_locked(trans, btree_path);
- ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
-out:
- bch2_path_put(trans, path_idx, true);
- return ret;
-}
-
-static int __must_check
-bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
- struct bkey_i *k, enum btree_iter_update_trigger_flags flags,
- unsigned long ip)
-{
- struct bch_fs *c = trans->c;
- struct btree_insert_entry *i, n;
- int cmp;
-
- struct btree_path *path = trans->paths + path_idx;
- EBUG_ON(!path->should_be_locked);
- EBUG_ON(trans->nr_updates >= trans->nr_paths);
- EBUG_ON(!bpos_eq(k->k.p, path->pos));
-
- n = (struct btree_insert_entry) {
- .flags = flags,
- .bkey_type = __btree_node_type(path->level, path->btree_id),
- .btree_id = path->btree_id,
- .level = path->level,
- .cached = path->cached,
- .path = path_idx,
- .k = k,
- .ip_allocated = ip,
- };
-
-#ifdef CONFIG_BCACHEFS_DEBUG
- trans_for_each_update(trans, i)
- BUG_ON(i != trans->updates &&
- btree_insert_entry_cmp(i - 1, i) >= 0);
-#endif
-
- /*
- * Pending updates are kept sorted: first, find position of new update,
- * then delete/trim any updates the new update overwrites:
- */
- for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) {
- cmp = btree_insert_entry_cmp(&n, i);
- if (cmp <= 0)
- break;
- }
-
- bool overwrite = !cmp && i < trans->updates + trans->nr_updates;
-
- if (overwrite) {
- EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
-
- bch2_path_put(trans, i->path, true);
- i->flags = n.flags;
- i->cached = n.cached;
- i->k = n.k;
- i->path = n.path;
- i->ip_allocated = n.ip_allocated;
- } else {
- array_insert_item(trans->updates, trans->nr_updates,
- i - trans->updates, n);
-
- i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
- i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
-
- if (unlikely(trans->journal_replay_not_finished)) {
- struct bkey_i *j_k =
- bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
-
- if (j_k) {
- i->old_k = j_k->k;
- i->old_v = &j_k->v;
- }
- }
- }
-
- __btree_path_get(trans, trans->paths + i->path, true);
-
- trace_update_by_path(trans, path, i, overwrite);
-
- /*
- * If a key is present in the key cache, it must also exist in the
- * btree - this is necessary for cache coherency. When iterating over
- * a btree that's cached in the key cache, the btree iter code checks
- * the key cache - but the key has to exist in the btree for that to
- * work:
- */
- if (path->cached && !i->old_btree_u64s)
- return flush_new_cached_update(trans, i, flags, ip);
-
- return 0;
-}
-
-static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
- struct btree_iter *iter,
- struct btree_path *path)
-{
- struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
-
- if (!key_cache_path ||
- !key_cache_path->should_be_locked ||
- !bpos_eq(key_cache_path->pos, iter->pos)) {
- struct bkey_cached *ck;
- int ret;
-
- if (!iter->key_cache_path)
- iter->key_cache_path =
- bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
- BTREE_ITER_intent|
- BTREE_ITER_cached, _THIS_IP_);
-
- iter->key_cache_path =
- bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
- iter->flags & BTREE_ITER_intent,
- _THIS_IP_);
-
- ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_cached);
- if (unlikely(ret))
- return ret;
-
- ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
-
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
- }
-
- btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
- }
-
- return 0;
-}
-
-int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
-{
- btree_path_idx_t path_idx = iter->update_path ?: iter->path;
- int ret;
-
- if (iter->flags & BTREE_ITER_is_extents)
- return bch2_trans_update_extent(trans, iter, k, flags);
-
- if (bkey_deleted(&k->k) &&
- !(flags & BTREE_UPDATE_key_cache_reclaim) &&
- (iter->flags & BTREE_ITER_filter_snapshots)) {
- ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
- if (unlikely(ret < 0))
- return ret;
-
- if (ret)
- k->k.type = KEY_TYPE_whiteout;
- }
-
- /*
- * Ensure that updates to cached btrees go to the key cache:
- */
- struct btree_path *path = trans->paths + path_idx;
- if (!(flags & BTREE_UPDATE_key_cache_reclaim) &&
- !path->cached &&
- !path->level &&
- btree_id_cached(trans->c, path->btree_id)) {
- ret = bch2_trans_update_get_key_cache(trans, iter, path);
- if (ret)
- return ret;
-
- path_idx = iter->key_cache_path;
- }
-
- return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_);
-}
-
-int bch2_btree_insert_clone_trans(struct btree_trans *trans,
- enum btree_id btree,
- struct bkey_i *k)
-{
- struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k));
- int ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- return ret;
-
- bkey_copy(n, k);
- return bch2_btree_insert_trans(trans, btree, n, 0);
-}
-
-struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
-{
- unsigned new_top = trans->journal_entries_u64s + u64s;
- unsigned old_size = trans->journal_entries_size;
-
- if (new_top > trans->journal_entries_size) {
- trans->journal_entries_size = roundup_pow_of_two(new_top);
-
- btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size;
- }
-
- struct jset_entry *n =
- bch2_trans_kmalloc_nomemzero(trans,
- trans->journal_entries_size * sizeof(u64));
- if (IS_ERR(n))
- return ERR_CAST(n);
-
- if (trans->journal_entries)
- memcpy(n, trans->journal_entries, old_size * sizeof(u64));
- trans->journal_entries = n;
-
- struct jset_entry *e = btree_trans_journal_entries_top(trans);
- trans->journal_entries_u64s = new_top;
- return e;
-}
-
-int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
- enum btree_id btree, struct bpos end)
-{
- struct bkey_s_c k;
- int ret = 0;
-
- bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_intent);
- k = bch2_btree_iter_prev(iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- bch2_btree_iter_advance(iter);
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- BUG_ON(k.k->type != KEY_TYPE_deleted);
-
- if (bkey_gt(k.k->p, end)) {
- ret = -BCH_ERR_ENOSPC_btree_slot;
- goto err;
- }
-
- return 0;
-err:
- bch2_trans_iter_exit(trans, iter);
- return ret;
-}
-
-void bch2_trans_commit_hook(struct btree_trans *trans,
- struct btree_trans_commit_hook *h)
-{
- h->next = trans->hooks;
- trans->hooks = h;
-}
-
-int bch2_btree_insert_nonextent(struct btree_trans *trans,
- enum btree_id btree, struct bkey_i *k,
- enum btree_iter_update_trigger_flags flags)
-{
- struct btree_iter iter;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, btree, k->k.p,
- BTREE_ITER_cached|
- BTREE_ITER_not_extents|
- BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_trans_update(trans, &iter, k, flags);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
- struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
-{
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
- BTREE_ITER_intent|flags);
- int ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_trans_update(trans, &iter, k, flags);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-/**
- * bch2_btree_insert - insert keys into the extent btree
- * @c: pointer to struct bch_fs
- * @id: btree to insert into
- * @k: key to insert
- * @disk_res: must be non-NULL whenever inserting or potentially
- * splitting data extents
- * @flags: transaction commit flags
- * @iter_flags: btree iter update trigger flags
- *
- * Returns: 0 on success, error code on failure
- */
-int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k,
- struct disk_reservation *disk_res, int flags,
- enum btree_iter_update_trigger_flags iter_flags)
-{
- return bch2_trans_commit_do(c, disk_res, NULL, flags,
- bch2_btree_insert_trans(trans, id, k, iter_flags));
-}
-
-int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter,
- unsigned len, unsigned update_flags)
-{
- struct bkey_i *k;
-
- k = bch2_trans_kmalloc(trans, sizeof(*k));
- if (IS_ERR(k))
- return PTR_ERR(k);
-
- bkey_init(&k->k);
- k->k.p = iter->pos;
- bch2_key_resize(&k->k, len);
- return bch2_trans_update(trans, iter, k, update_flags);
-}
-
-int bch2_btree_delete_at(struct btree_trans *trans,
- struct btree_iter *iter, unsigned update_flags)
-{
- return bch2_btree_delete_extent_at(trans, iter, 0, update_flags);
-}
-
-int bch2_btree_delete(struct btree_trans *trans,
- enum btree_id btree, struct bpos pos,
- unsigned update_flags)
-{
- struct btree_iter iter;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, btree, pos,
- BTREE_ITER_cached|
- BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_btree_delete_at(trans, &iter, update_flags);
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
- struct bpos start, struct bpos end,
- unsigned update_flags,
- u64 *journal_seq)
-{
- u32 restart_count = trans->restart_count;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-
- bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
- while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
- struct disk_reservation disk_res =
- bch2_disk_reservation_init(trans->c, 0);
- struct bkey_i delete;
-
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- bkey_init(&delete.k);
-
- /*
- * This could probably be more efficient for extents:
- */
-
- /*
- * For extents, iter.pos won't necessarily be the same as
- * bkey_start_pos(k.k) (for non extents they always will be the
- * same). It's important that we delete starting from iter.pos
- * because the range we want to delete could start in the middle
- * of k.
- *
- * (bch2_btree_iter_peek() does guarantee that iter.pos >=
- * bkey_start_pos(k.k)).
- */
- delete.k.p = iter.pos;
-
- if (iter.flags & BTREE_ITER_is_extents)
- bch2_key_resize(&delete.k,
- bpos_min(end, k.k->p).offset -
- iter.pos.offset);
-
- ret = bch2_trans_update(trans, &iter, &delete, update_flags) ?:
- bch2_trans_commit(trans, &disk_res, journal_seq,
- BCH_TRANS_COMMIT_no_enospc);
- bch2_disk_reservation_put(trans->c, &disk_res);
-err:
- /*
- * the bch2_trans_begin() call is in a weird place because we
- * need to call it after every transaction commit, to avoid path
- * overflow, but don't want to call it if the delete operation
- * is a no-op and we have no work to do:
- */
- bch2_trans_begin(trans);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- ret = 0;
- if (ret)
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
-
- return ret ?: trans_was_restarted(trans, restart_count);
-}
-
-/*
- * bch_btree_delete_range - delete everything within a given range
- *
- * Range is a half open interval - [start, end)
- */
-int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
- struct bpos start, struct bpos end,
- unsigned update_flags,
- u64 *journal_seq)
-{
- int ret = bch2_trans_run(c,
- bch2_btree_delete_range_trans(trans, id, start, end,
- update_flags, journal_seq));
- if (ret == -BCH_ERR_transaction_restart_nested)
- ret = 0;
- return ret;
-}
-
-int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
- struct bpos pos, bool set)
-{
- struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
- int ret = PTR_ERR_OR_ZERO(k);
- if (ret)
- return ret;
-
- bkey_init(&k->k);
- k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
- k->k.p = pos;
-
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
-
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_trans_update(trans, &iter, k, 0);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree,
- struct bpos pos, bool set)
-{
- struct bkey_i k;
-
- bkey_init(&k.k);
- k.k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
- k.k.p = pos;
-
- return bch2_trans_update_buffered(trans, btree, &k);
-}
-
-static int __bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf, unsigned u64s)
-{
- struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s));
- int ret = PTR_ERR_OR_ZERO(e);
- if (ret)
- return ret;
-
- struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry);
- journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s);
- memcpy(l->d, buf->buf, buf->pos);
- return 0;
-}
-
-__printf(3, 0)
-static int
-__bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
- va_list args)
-{
- struct printbuf buf = PRINTBUF;
- prt_vprintf(&buf, fmt, args);
-
- unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
- prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos);
-
- int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
- if (ret)
- goto err;
-
- if (!test_bit(JOURNAL_running, &c->journal.flags)) {
- ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
- if (ret)
- goto err;
-
- struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries);
- journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s);
- memcpy(l->d, buf.buf, buf.pos);
- c->journal.early_journal_entries.nr += jset_u64s(u64s);
- } else {
- ret = bch2_trans_commit_do(c, NULL, NULL,
- BCH_TRANS_COMMIT_lazy_rw|commit_flags,
- __bch2_trans_log_msg(trans, &buf, u64s));
- }
-err:
- printbuf_exit(&buf);
- return ret;
-}
-
-__printf(2, 3)
-int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
-{
- va_list args;
- int ret;
-
- va_start(args, fmt);
- ret = __bch2_fs_log_msg(c, 0, fmt, args);
- va_end(args);
- return ret;
-}
-
-/*
- * Use for logging messages during recovery to enable reserved space and avoid
- * blocking.
- */
-__printf(2, 3)
-int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
-{
- va_list args;
- int ret;
-
- va_start(args, fmt);
- ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);
- va_end(args);
- return ret;
-}
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
deleted file mode 100644
index 70b3c989fac2..000000000000
--- a/fs/bcachefs/btree_update.h
+++ /dev/null
@@ -1,363 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_UPDATE_H
-#define _BCACHEFS_BTREE_UPDATE_H
-
-#include "btree_iter.h"
-#include "journal.h"
-
-struct bch_fs;
-struct btree;
-
-void bch2_btree_node_prep_for_write(struct btree_trans *,
- struct btree_path *, struct btree *);
-bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_path *,
- struct btree *, struct btree_node_iter *,
- struct bkey_i *);
-
-int bch2_btree_node_flush0(struct journal *, struct journal_entry_pin *, u64);
-int bch2_btree_node_flush1(struct journal *, struct journal_entry_pin *, u64);
-void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
-
-void bch2_btree_insert_key_leaf(struct btree_trans *, struct btree_path *,
- struct bkey_i *, u64);
-
-#define BCH_TRANS_COMMIT_FLAGS() \
- x(no_enospc, "don't check for enospc") \
- x(no_check_rw, "don't attempt to take a ref on c->writes") \
- x(lazy_rw, "go read-write if we haven't yet - only for use in recovery") \
- x(no_journal_res, "don't take a journal reservation, instead " \
- "pin journal entry referred to by trans->journal_res.seq") \
- x(journal_reclaim, "operation required for journal reclaim; may return error" \
- "instead of deadlocking if BCH_WATERMARK_reclaim not specified")\
- x(skip_accounting_apply, "we're in journal replay - accounting updates have already been applied")
-
-enum __bch_trans_commit_flags {
- /* First bits for bch_watermark: */
- __BCH_TRANS_COMMIT_FLAGS_START = BCH_WATERMARK_BITS,
-#define x(n, ...) __BCH_TRANS_COMMIT_##n,
- BCH_TRANS_COMMIT_FLAGS()
-#undef x
-};
-
-enum bch_trans_commit_flags {
-#define x(n, ...) BCH_TRANS_COMMIT_##n = BIT(__BCH_TRANS_COMMIT_##n),
- BCH_TRANS_COMMIT_FLAGS()
-#undef x
-};
-
-void bch2_trans_commit_flags_to_text(struct printbuf *, enum bch_trans_commit_flags);
-
-int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *,
- unsigned, unsigned);
-int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
-int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned);
-
-int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id,
- struct bkey_i *, enum btree_iter_update_trigger_flags);
-
-int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *,
- enum btree_iter_update_trigger_flags);
-int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, struct
- disk_reservation *, int flags, enum
- btree_iter_update_trigger_flags iter_flags);
-
-int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
- struct bpos, struct bpos, unsigned, u64 *);
-int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
- struct bpos, struct bpos, unsigned, u64 *);
-
-int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool);
-int bch2_btree_bit_mod_buffered(struct btree_trans *, enum btree_id, struct bpos, bool);
-
-static inline int bch2_btree_delete_at_buffered(struct btree_trans *trans,
- enum btree_id btree, struct bpos pos)
-{
- return bch2_btree_bit_mod_buffered(trans, btree, pos, false);
-}
-
-int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
- struct bpos, struct bpos);
-
-/*
- * For use when splitting extents in existing snapshots:
- *
- * If @old_pos is an interior snapshot node, iterate over descendent snapshot
- * nodes: for every descendent snapshot in whiche @old_pos is overwritten and
- * not visible, emit a whiteout at @new_pos.
- */
-static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
- enum btree_id btree,
- struct bpos old_pos,
- struct bpos new_pos)
-{
- if (!btree_type_has_snapshots(btree) ||
- bkey_eq(old_pos, new_pos))
- return 0;
-
- return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos);
-}
-
-int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *,
- enum btree_iter_update_trigger_flags,
- struct bkey_s_c, struct bkey_s_c);
-
-int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
- enum btree_id, struct bpos);
-
-int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
- struct bkey_i *, enum btree_iter_update_trigger_flags);
-
-struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *, unsigned);
-
-static inline struct jset_entry *btree_trans_journal_entries_top(struct btree_trans *trans)
-{
- return (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
-}
-
-static inline struct jset_entry *
-bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
-{
- if (!trans->journal_entries ||
- trans->journal_entries_u64s + u64s > trans->journal_entries_size)
- return __bch2_trans_jset_entry_alloc(trans, u64s);
-
- struct jset_entry *e = btree_trans_journal_entries_top(trans);
- trans->journal_entries_u64s += u64s;
- return e;
-}
-
-int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bkey_i *);
-
-static inline int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
- enum btree_id btree,
- struct bkey_i *k)
-{
- /*
- * Most updates skip the btree write buffer until journal replay is
- * finished because synchronization with journal replay relies on having
- * a btree node locked - if we're overwriting a key in the journal that
- * journal replay hasn't yet replayed, we have to mark it as
- * overwritten.
- *
- * But accounting updates don't overwrite, they're deltas, and they have
- * to be flushed to the btree strictly in order for journal replay to be
- * able to tell which updates need to be applied:
- */
- if (k->k.type != KEY_TYPE_accounting &&
- unlikely(trans->journal_replay_not_finished))
- return bch2_btree_insert_clone_trans(trans, btree, k);
-
- struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(k->k.u64s));
- int ret = PTR_ERR_OR_ZERO(e);
- if (ret)
- return ret;
-
- journal_entry_init(e, BCH_JSET_ENTRY_write_buffer_keys, btree, 0, k->k.u64s);
- bkey_copy(e->start, k);
- return 0;
-}
-
-void bch2_trans_commit_hook(struct btree_trans *,
- struct btree_trans_commit_hook *);
-int __bch2_trans_commit(struct btree_trans *, unsigned);
-
-__printf(2, 3) int bch2_fs_log_msg(struct bch_fs *, const char *, ...);
-__printf(2, 3) int bch2_journal_log_msg(struct bch_fs *, const char *, ...);
-
-/**
- * bch2_trans_commit - insert keys at given iterator positions
- *
- * This is main entry point for btree updates.
- *
- * Return values:
- * -EROFS: filesystem read only
- * -EIO: journal or btree node IO error
- */
-static inline int bch2_trans_commit(struct btree_trans *trans,
- struct disk_reservation *disk_res,
- u64 *journal_seq,
- unsigned flags)
-{
- trans->disk_res = disk_res;
- trans->journal_seq = journal_seq;
-
- return __bch2_trans_commit(trans, flags);
-}
-
-#define commit_do(_trans, _disk_res, _journal_seq, _flags, _do) \
- lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\
- (_journal_seq), (_flags)))
-
-#define nested_commit_do(_trans, _disk_res, _journal_seq, _flags, _do) \
- nested_lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\
- (_journal_seq), (_flags)))
-
-#define bch2_trans_commit_do(_c, _disk_res, _journal_seq, _flags, _do) \
- bch2_trans_run(_c, commit_do(trans, _disk_res, _journal_seq, _flags, _do))
-
-#define trans_for_each_update(_trans, _i) \
- for (struct btree_insert_entry *_i = (_trans)->updates; \
- (_i) < (_trans)->updates + (_trans)->nr_updates; \
- (_i)++)
-
-static inline void bch2_trans_reset_updates(struct btree_trans *trans)
-{
- trans_for_each_update(trans, i)
- bch2_path_put(trans, i->path, true);
-
- trans->nr_updates = 0;
- trans->journal_entries_u64s = 0;
- trans->hooks = NULL;
- trans->extra_disk_res = 0;
-}
-
-static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k,
- unsigned type, unsigned min_bytes)
-{
- unsigned bytes = max_t(unsigned, min_bytes, bkey_bytes(k.k));
- struct bkey_i *mut;
-
- if (type && k.k->type != type)
- return ERR_PTR(-ENOENT);
-
- /* extra padding for varint_decode_fast... */
- mut = bch2_trans_kmalloc_nomemzero(trans, bytes + 8);
- if (!IS_ERR(mut)) {
- bkey_reassemble(mut, k);
-
- if (unlikely(bytes > bkey_bytes(k.k))) {
- memset((void *) mut + bkey_bytes(k.k), 0,
- bytes - bkey_bytes(k.k));
- mut->k.u64s = DIV_ROUND_UP(bytes, sizeof(u64));
- }
- }
- return mut;
-}
-
-static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k)
-{
- return __bch2_bkey_make_mut_noupdate(trans, k, 0, 0);
-}
-
-#define bch2_bkey_make_mut_noupdate_typed(_trans, _k, _type) \
- bkey_i_to_##_type(__bch2_bkey_make_mut_noupdate(_trans, _k, \
- KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
-
-static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c *k, unsigned flags,
- unsigned type, unsigned min_bytes)
-{
- struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes);
- int ret;
-
- if (IS_ERR(mut))
- return mut;
-
- ret = bch2_trans_update(trans, iter, mut, flags);
- if (ret)
- return ERR_PTR(ret);
-
- *k = bkey_i_to_s_c(mut);
- return mut;
-}
-
-static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c *k, unsigned flags)
-{
- return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0);
-}
-
-#define bch2_bkey_make_mut_typed(_trans, _iter, _k, _flags, _type) \
- bkey_i_to_##_type(__bch2_bkey_make_mut(_trans, _iter, _k, _flags,\
- KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
-
-static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags, unsigned type, unsigned min_bytes)
-{
- struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
- btree_id, pos, flags|BTREE_ITER_intent, type);
- struct bkey_i *ret = IS_ERR(k.k)
- ? ERR_CAST(k.k)
- : __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
- if (IS_ERR(ret))
- bch2_trans_iter_exit(trans, iter);
- return ret;
-}
-
-static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags)
-{
- return __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags, 0, 0);
-}
-
-static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags, unsigned type, unsigned min_bytes)
-{
- struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter,
- btree_id, pos, flags|BTREE_ITER_intent, type, min_bytes);
- int ret;
-
- if (IS_ERR(mut))
- return mut;
-
- ret = bch2_trans_update(trans, iter, mut, flags);
- if (ret) {
- bch2_trans_iter_exit(trans, iter);
- return ERR_PTR(ret);
- }
-
- return mut;
-}
-
-static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags, unsigned min_bytes)
-{
- return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes);
-}
-
-static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags)
-{
- return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, 0);
-}
-
-#define bch2_bkey_get_mut_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
- bkey_i_to_##_type(__bch2_bkey_get_mut(_trans, _iter, \
- _btree_id, _pos, _flags, \
- KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
-
-static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter,
- unsigned flags, unsigned type, unsigned val_size)
-{
- struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k) + val_size);
- int ret;
-
- if (IS_ERR(k))
- return k;
-
- bkey_init(&k->k);
- k->k.p = iter->pos;
- k->k.type = type;
- set_bkey_val_bytes(&k->k, val_size);
-
- ret = bch2_trans_update(trans, iter, k, flags);
- if (unlikely(ret))
- return ERR_PTR(ret);
- return k;
-}
-
-#define bch2_bkey_alloc(_trans, _iter, _flags, _type) \
- bkey_i_to_##_type(__bch2_bkey_alloc(_trans, _iter, _flags, \
- KEY_TYPE_##_type, sizeof(struct bch_##_type)))
-
-#endif /* _BCACHEFS_BTREE_UPDATE_H */
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
deleted file mode 100644
index d596ef93239f..000000000000
--- a/fs/bcachefs/btree_update_interior.c
+++ /dev/null
@@ -1,2716 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "bkey_buf.h"
-#include "bkey_methods.h"
-#include "btree_cache.h"
-#include "btree_gc.h"
-#include "btree_journal_iter.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "btree_io.h"
-#include "btree_iter.h"
-#include "btree_locking.h"
-#include "buckets.h"
-#include "clock.h"
-#include "error.h"
-#include "extents.h"
-#include "io_write.h"
-#include "journal.h"
-#include "journal_reclaim.h"
-#include "keylist.h"
-#include "recovery_passes.h"
-#include "replicas.h"
-#include "sb-members.h"
-#include "super-io.h"
-#include "trace.h"
-
-#include <linux/random.h>
-
-static const char * const bch2_btree_update_modes[] = {
-#define x(t) #t,
- BTREE_UPDATE_MODES()
-#undef x
- NULL
-};
-
-static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
- btree_path_idx_t, struct btree *, struct keylist *);
-static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
-
-/*
- * Verify that child nodes correctly span parent node's range:
- */
-int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
-{
- struct bch_fs *c = trans->c;
- struct bpos node_min = b->key.k.type == KEY_TYPE_btree_ptr_v2
- ? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key
- : b->data->min_key;
- struct btree_and_journal_iter iter;
- struct bkey_s_c k;
- struct printbuf buf = PRINTBUF;
- struct bkey_buf prev;
- int ret = 0;
-
- BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
- b->data->min_key));
-
- if (b == btree_node_root(c, b)) {
- if (!bpos_eq(b->data->min_key, POS_MIN)) {
- printbuf_reset(&buf);
- bch2_bpos_to_text(&buf, b->data->min_key);
- need_fsck_err(trans, btree_root_bad_min_key,
- "btree root with incorrect min_key: %s", buf.buf);
- goto topology_repair;
- }
-
- if (!bpos_eq(b->data->max_key, SPOS_MAX)) {
- printbuf_reset(&buf);
- bch2_bpos_to_text(&buf, b->data->max_key);
- need_fsck_err(trans, btree_root_bad_max_key,
- "btree root with incorrect max_key: %s", buf.buf);
- goto topology_repair;
- }
- }
-
- if (!b->c.level)
- return 0;
-
- bch2_bkey_buf_init(&prev);
- bkey_init(&prev.k->k);
- bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
-
- while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- if (k.k->type != KEY_TYPE_btree_ptr_v2)
- goto out;
-
- struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
-
- struct bpos expected_min = bkey_deleted(&prev.k->k)
- ? node_min
- : bpos_successor(prev.k->k.p);
-
- if (!bpos_eq(expected_min, bp.v->min_key)) {
- bch2_topology_error(c);
-
- printbuf_reset(&buf);
- prt_str(&buf, "end of prev node doesn't match start of next node\n"),
- prt_printf(&buf, " in btree %s level %u node ",
- bch2_btree_id_str(b->c.btree_id), b->c.level);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- prt_str(&buf, "\n prev ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
- prt_str(&buf, "\n next ");
- bch2_bkey_val_to_text(&buf, c, k);
-
- need_fsck_err(trans, btree_node_topology_bad_min_key, "%s", buf.buf);
- goto topology_repair;
- }
-
- bch2_bkey_buf_reassemble(&prev, c, k);
- bch2_btree_and_journal_iter_advance(&iter);
- }
-
- if (bkey_deleted(&prev.k->k)) {
- bch2_topology_error(c);
-
- printbuf_reset(&buf);
- prt_str(&buf, "empty interior node\n");
- prt_printf(&buf, " in btree %s level %u node ",
- bch2_btree_id_str(b->c.btree_id), b->c.level);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
-
- need_fsck_err(trans, btree_node_topology_empty_interior_node, "%s", buf.buf);
- goto topology_repair;
- } else if (!bpos_eq(prev.k->k.p, b->key.k.p)) {
- bch2_topology_error(c);
-
- printbuf_reset(&buf);
- prt_str(&buf, "last child node doesn't end at end of parent node\n");
- prt_printf(&buf, " in btree %s level %u node ",
- bch2_btree_id_str(b->c.btree_id), b->c.level);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- prt_str(&buf, "\n last key ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
-
- need_fsck_err(trans, btree_node_topology_bad_max_key, "%s", buf.buf);
- goto topology_repair;
- }
-out:
-fsck_err:
- bch2_btree_and_journal_iter_exit(&iter);
- bch2_bkey_buf_exit(&prev, c);
- printbuf_exit(&buf);
- return ret;
-topology_repair:
- if ((c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
- c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) {
- bch2_inconsistent_error(c);
- ret = -BCH_ERR_btree_need_topology_repair;
- } else {
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
- }
- goto out;
-}
-
-/* Calculate ideal packed bkey format for new btree nodes: */
-
-static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
-{
- struct bkey_packed *k;
- struct bkey uk;
-
- for_each_bset(b, t)
- bset_tree_for_each_key(b, t, k)
- if (!bkey_deleted(k)) {
- uk = bkey_unpack_key(b, k);
- bch2_bkey_format_add_key(s, &uk);
- }
-}
-
-static struct bkey_format bch2_btree_calc_format(struct btree *b)
-{
- struct bkey_format_state s;
-
- bch2_bkey_format_init(&s);
- bch2_bkey_format_add_pos(&s, b->data->min_key);
- bch2_bkey_format_add_pos(&s, b->data->max_key);
- __bch2_btree_calc_format(&s, b);
-
- return bch2_bkey_format_done(&s);
-}
-
-static size_t btree_node_u64s_with_format(struct btree_nr_keys nr,
- struct bkey_format *old_f,
- struct bkey_format *new_f)
-{
- /* stupid integer promotion rules */
- ssize_t delta =
- (((int) new_f->key_u64s - old_f->key_u64s) *
- (int) nr.packed_keys) +
- (((int) new_f->key_u64s - BKEY_U64s) *
- (int) nr.unpacked_keys);
-
- BUG_ON(delta + nr.live_u64s < 0);
-
- return nr.live_u64s + delta;
-}
-
-/**
- * bch2_btree_node_format_fits - check if we could rewrite node with a new format
- *
- * @c: filesystem handle
- * @b: btree node to rewrite
- * @nr: number of keys for new node (i.e. b->nr)
- * @new_f: bkey format to translate keys to
- *
- * Returns: true if all re-packed keys will be able to fit in a new node.
- *
- * Assumes all keys will successfully pack with the new format.
- */
-static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
- struct btree_nr_keys nr,
- struct bkey_format *new_f)
-{
- size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f);
-
- return __vstruct_bytes(struct btree_node, u64s) < btree_buf_bytes(b);
-}
-
-/* Btree node freeing/allocation: */
-
-static void __btree_node_free(struct btree_trans *trans, struct btree *b)
-{
- struct bch_fs *c = trans->c;
-
- trace_and_count(c, btree_node_free, trans, b);
-
- BUG_ON(btree_node_write_blocked(b));
- BUG_ON(btree_node_dirty(b));
- BUG_ON(btree_node_need_write(b));
- BUG_ON(b == btree_node_root(c, b));
- BUG_ON(b->ob.nr);
- BUG_ON(!list_empty(&b->write_blocked));
- BUG_ON(b->will_make_reachable);
-
- clear_btree_node_noevict(b);
-}
-
-static void bch2_btree_node_free_inmem(struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b)
-{
- struct bch_fs *c = trans->c;
- unsigned i, level = b->c.level;
-
- bch2_btree_node_lock_write_nofail(trans, path, &b->c);
-
- __btree_node_free(trans, b);
-
- mutex_lock(&c->btree_cache.lock);
- bch2_btree_node_hash_remove(&c->btree_cache, b);
- mutex_unlock(&c->btree_cache.lock);
-
- six_unlock_write(&b->c.lock);
- mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
-
- trans_for_each_path(trans, path, i)
- if (path->l[level].b == b) {
- btree_node_unlock(trans, path, level);
- path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
- }
-}
-
-static void bch2_btree_node_free_never_used(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *b)
-{
- struct bch_fs *c = as->c;
- struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL];
- struct btree_path *path;
- unsigned i, level = b->c.level;
-
- BUG_ON(!list_empty(&b->write_blocked));
- BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as));
-
- b->will_make_reachable = 0;
- closure_put(&as->cl);
-
- clear_btree_node_will_make_reachable(b);
- clear_btree_node_accessed(b);
- clear_btree_node_dirty_acct(c, b);
- clear_btree_node_need_write(b);
-
- mutex_lock(&c->btree_cache.lock);
- __bch2_btree_node_hash_remove(&c->btree_cache, b);
- mutex_unlock(&c->btree_cache.lock);
-
- BUG_ON(p->nr >= ARRAY_SIZE(p->b));
- p->b[p->nr++] = b;
-
- six_unlock_intent(&b->c.lock);
-
- trans_for_each_path(trans, path, i)
- if (path->l[level].b == b) {
- btree_node_unlock(trans, path, level);
- path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
- }
-}
-
-static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
- struct disk_reservation *res,
- struct closure *cl,
- bool interior_node,
- unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct write_point *wp;
- struct btree *b;
- BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
- struct open_buckets obs = { .nr = 0 };
- struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
- enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
- unsigned nr_reserve = watermark < BCH_WATERMARK_reclaim
- ? BTREE_NODE_RESERVE
- : 0;
- int ret;
-
- b = bch2_btree_node_mem_alloc(trans, interior_node);
- if (IS_ERR(b))
- return b;
-
- BUG_ON(b->ob.nr);
-
- mutex_lock(&c->btree_reserve_cache_lock);
- if (c->btree_reserve_cache_nr > nr_reserve) {
- struct btree_alloc *a =
- &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
-
- obs = a->ob;
- bkey_copy(&tmp.k, &a->k);
- mutex_unlock(&c->btree_reserve_cache_lock);
- goto out;
- }
- mutex_unlock(&c->btree_reserve_cache_lock);
-retry:
- ret = bch2_alloc_sectors_start_trans(trans,
- c->opts.metadata_target ?:
- c->opts.foreground_target,
- 0,
- writepoint_ptr(&c->btree_write_point),
- &devs_have,
- res->nr_replicas,
- min(res->nr_replicas,
- c->opts.metadata_replicas_required),
- watermark, 0, cl, &wp);
- if (unlikely(ret))
- goto err;
-
- if (wp->sectors_free < btree_sectors(c)) {
- struct open_bucket *ob;
- unsigned i;
-
- open_bucket_for_each(c, &wp->ptrs, ob, i)
- if (ob->sectors_free < btree_sectors(c))
- ob->sectors_free = 0;
-
- bch2_alloc_sectors_done(c, wp);
- goto retry;
- }
-
- bkey_btree_ptr_v2_init(&tmp.k);
- bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false);
-
- bch2_open_bucket_get(c, wp, &obs);
- bch2_alloc_sectors_done(c, wp);
-out:
- bkey_copy(&b->key, &tmp.k);
- b->ob = obs;
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
-
- return b;
-err:
- bch2_btree_node_to_freelist(c, b);
- return ERR_PTR(ret);
-}
-
-static struct btree *bch2_btree_node_alloc(struct btree_update *as,
- struct btree_trans *trans,
- unsigned level)
-{
- struct bch_fs *c = as->c;
- struct btree *b;
- struct prealloc_nodes *p = &as->prealloc_nodes[!!level];
- int ret;
-
- BUG_ON(level >= BTREE_MAX_DEPTH);
- BUG_ON(!p->nr);
-
- b = p->b[--p->nr];
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
-
- set_btree_node_accessed(b);
- set_btree_node_dirty_acct(c, b);
- set_btree_node_need_write(b);
-
- bch2_bset_init_first(b, &b->data->keys);
- b->c.level = level;
- b->c.btree_id = as->btree_id;
- b->version_ondisk = c->sb.version;
-
- memset(&b->nr, 0, sizeof(b->nr));
- b->data->magic = cpu_to_le64(bset_magic(c));
- memset(&b->data->_ptr, 0, sizeof(b->data->_ptr));
- b->data->flags = 0;
- SET_BTREE_NODE_ID(b->data, as->btree_id);
- SET_BTREE_NODE_LEVEL(b->data, level);
-
- if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
- struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(&b->key);
-
- bp->v.mem_ptr = 0;
- bp->v.seq = b->data->keys.seq;
- bp->v.sectors_written = 0;
- }
-
- SET_BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data, true);
-
- bch2_btree_build_aux_trees(b);
-
- ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
- BUG_ON(ret);
-
- trace_and_count(c, btree_node_alloc, trans, b);
- bch2_increment_clock(c, btree_sectors(c), WRITE);
- return b;
-}
-
-static void btree_set_min(struct btree *b, struct bpos pos)
-{
- if (b->key.k.type == KEY_TYPE_btree_ptr_v2)
- bkey_i_to_btree_ptr_v2(&b->key)->v.min_key = pos;
- b->data->min_key = pos;
-}
-
-static void btree_set_max(struct btree *b, struct bpos pos)
-{
- b->key.k.p = pos;
- b->data->max_key = pos;
-}
-
-static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *b)
-{
- struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level);
- struct bkey_format format = bch2_btree_calc_format(b);
-
- /*
- * The keys might expand with the new format - if they wouldn't fit in
- * the btree node anymore, use the old format for now:
- */
- if (!bch2_btree_node_format_fits(as->c, b, b->nr, &format))
- format = b->format;
-
- SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
-
- btree_set_min(n, b->data->min_key);
- btree_set_max(n, b->data->max_key);
-
- n->data->format = format;
- btree_node_set_format(n, format);
-
- bch2_btree_sort_into(as->c, n, b);
-
- btree_node_reset_sib_u64s(n);
- return n;
-}
-
-static struct btree *__btree_root_alloc(struct btree_update *as,
- struct btree_trans *trans, unsigned level)
-{
- struct btree *b = bch2_btree_node_alloc(as, trans, level);
-
- btree_set_min(b, POS_MIN);
- btree_set_max(b, SPOS_MAX);
- b->data->format = bch2_btree_calc_format(b);
-
- btree_node_set_format(b, b->data->format);
- bch2_btree_build_aux_trees(b);
-
- return b;
-}
-
-static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *trans)
-{
- struct bch_fs *c = as->c;
- struct prealloc_nodes *p;
-
- for (p = as->prealloc_nodes;
- p < as->prealloc_nodes + ARRAY_SIZE(as->prealloc_nodes);
- p++) {
- while (p->nr) {
- struct btree *b = p->b[--p->nr];
-
- mutex_lock(&c->btree_reserve_cache_lock);
-
- if (c->btree_reserve_cache_nr <
- ARRAY_SIZE(c->btree_reserve_cache)) {
- struct btree_alloc *a =
- &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
-
- a->ob = b->ob;
- b->ob.nr = 0;
- bkey_copy(&a->k, &b->key);
- } else {
- bch2_open_buckets_put(c, &b->ob);
- }
-
- mutex_unlock(&c->btree_reserve_cache_lock);
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
- __btree_node_free(trans, b);
- bch2_btree_node_to_freelist(c, b);
- }
- }
-}
-
-static int bch2_btree_reserve_get(struct btree_trans *trans,
- struct btree_update *as,
- unsigned nr_nodes[2],
- unsigned flags,
- struct closure *cl)
-{
- struct btree *b;
- unsigned interior;
- int ret = 0;
-
- BUG_ON(nr_nodes[0] + nr_nodes[1] > BTREE_RESERVE_MAX);
-
- /*
- * Protects reaping from the btree node cache and using the btree node
- * open bucket reserve:
- */
- ret = bch2_btree_cache_cannibalize_lock(trans, cl);
- if (ret)
- return ret;
-
- for (interior = 0; interior < 2; interior++) {
- struct prealloc_nodes *p = as->prealloc_nodes + interior;
-
- while (p->nr < nr_nodes[interior]) {
- b = __bch2_btree_node_alloc(trans, &as->disk_res, cl,
- interior, flags);
- if (IS_ERR(b)) {
- ret = PTR_ERR(b);
- goto err;
- }
-
- p->b[p->nr++] = b;
- }
- }
-err:
- bch2_btree_cache_cannibalize_unlock(trans);
- return ret;
-}
-
-/* Asynchronous interior node update machinery */
-
-static void bch2_btree_update_free(struct btree_update *as, struct btree_trans *trans)
-{
- struct bch_fs *c = as->c;
-
- if (as->took_gc_lock)
- up_read(&c->gc_lock);
- as->took_gc_lock = false;
-
- bch2_journal_pin_drop(&c->journal, &as->journal);
- bch2_journal_pin_flush(&c->journal, &as->journal);
- bch2_disk_reservation_put(c, &as->disk_res);
- bch2_btree_reserve_put(as, trans);
-
- bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_total],
- as->start_time);
-
- mutex_lock(&c->btree_interior_update_lock);
- list_del(&as->unwritten_list);
- list_del(&as->list);
-
- closure_debug_destroy(&as->cl);
- mempool_free(as, &c->btree_interior_update_pool);
-
- /*
- * Have to do the wakeup with btree_interior_update_lock still held,
- * since being on btree_interior_update_list is our ref on @c:
- */
- closure_wake_up(&c->btree_interior_update_wait);
-
- mutex_unlock(&c->btree_interior_update_lock);
-}
-
-static void btree_update_add_key(struct btree_update *as,
- struct keylist *keys, struct btree *b)
-{
- struct bkey_i *k = &b->key;
-
- BUG_ON(bch2_keylist_u64s(keys) + k->k.u64s >
- ARRAY_SIZE(as->_old_keys));
-
- bkey_copy(keys->top, k);
- bkey_i_to_btree_ptr_v2(keys->top)->v.mem_ptr = b->c.level + 1;
-
- bch2_keylist_push(keys);
-}
-
-static bool btree_update_new_nodes_marked_sb(struct btree_update *as)
-{
- for_each_keylist_key(&as->new_keys, k)
- if (!bch2_dev_btree_bitmap_marked(as->c, bkey_i_to_s_c(k)))
- return false;
- return true;
-}
-
-static void btree_update_new_nodes_mark_sb(struct btree_update *as)
-{
- struct bch_fs *c = as->c;
-
- mutex_lock(&c->sb_lock);
- for_each_keylist_key(&as->new_keys, k)
- bch2_dev_btree_bitmap_mark(c, bkey_i_to_s_c(k));
-
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-}
-
-/*
- * The transactional part of an interior btree node update, where we journal the
- * update we did to the interior node and update alloc info:
- */
-static int btree_update_nodes_written_trans(struct btree_trans *trans,
- struct btree_update *as)
-{
- struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, as->journal_u64s);
- int ret = PTR_ERR_OR_ZERO(e);
- if (ret)
- return ret;
-
- memcpy(e, as->journal_entries, as->journal_u64s * sizeof(u64));
-
- trans->journal_pin = &as->journal;
-
- for_each_keylist_key(&as->old_keys, k) {
- unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
-
- ret = bch2_key_trigger_old(trans, as->btree_id, level, bkey_i_to_s_c(k),
- BTREE_TRIGGER_transactional);
- if (ret)
- return ret;
- }
-
- for_each_keylist_key(&as->new_keys, k) {
- unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
-
- ret = bch2_key_trigger_new(trans, as->btree_id, level, bkey_i_to_s(k),
- BTREE_TRIGGER_transactional);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void btree_update_nodes_written(struct btree_update *as)
-{
- struct bch_fs *c = as->c;
- struct btree *b;
- struct btree_trans *trans = bch2_trans_get(c);
- u64 journal_seq = 0;
- unsigned i;
- int ret;
-
- /*
- * If we're already in an error state, it might be because a btree node
- * was never written, and we might be trying to free that same btree
- * node here, but it won't have been marked as allocated and we'll see
- * spurious disk usage inconsistencies in the transactional part below
- * if we don't skip it:
- */
- ret = bch2_journal_error(&c->journal);
- if (ret)
- goto err;
-
- if (!btree_update_new_nodes_marked_sb(as))
- btree_update_new_nodes_mark_sb(as);
-
- /*
- * Wait for any in flight writes to finish before we free the old nodes
- * on disk:
- */
- for (i = 0; i < as->nr_old_nodes; i++) {
- __le64 seq;
-
- b = as->old_nodes[i];
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
- seq = b->data ? b->data->keys.seq : 0;
- six_unlock_read(&b->c.lock);
-
- if (seq == as->old_nodes_seq[i])
- wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner,
- TASK_UNINTERRUPTIBLE);
- }
-
- /*
- * We did an update to a parent node where the pointers we added pointed
- * to child nodes that weren't written yet: now, the child nodes have
- * been written so we can write out the update to the interior node.
- */
-
- /*
- * We can't call into journal reclaim here: we'd block on the journal
- * reclaim lock, but we may need to release the open buckets we have
- * pinned in order for other btree updates to make forward progress, and
- * journal reclaim does btree updates when flushing bkey_cached entries,
- * which may require allocations as well.
- */
- ret = commit_do(trans, &as->disk_res, &journal_seq,
- BCH_WATERMARK_interior_updates|
- BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_journal_reclaim,
- btree_update_nodes_written_trans(trans, as));
- bch2_trans_unlock(trans);
-
- bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
- "%s", bch2_err_str(ret));
-err:
- /*
- * Ensure transaction is unlocked before using btree_node_lock_nopath()
- * (the use of which is always suspect, we need to work on removing this
- * in the future)
- *
- * It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get()
- * calls bch2_path_upgrade(), before we call path_make_mut(), so we may
- * rarely end up with a locked path besides the one we have here:
- */
- bch2_trans_unlock(trans);
- bch2_trans_begin(trans);
-
- /*
- * We have to be careful because another thread might be getting ready
- * to free as->b and calling btree_update_reparent() on us - we'll
- * recheck under btree_update_lock below:
- */
- b = READ_ONCE(as->b);
- if (b) {
- /*
- * @b is the node we did the final insert into:
- *
- * On failure to get a journal reservation, we still have to
- * unblock the write and allow most of the write path to happen
- * so that shutdown works, but the i->journal_seq mechanism
- * won't work to prevent the btree write from being visible (we
- * didn't get a journal sequence number) - instead
- * __bch2_btree_node_write() doesn't do the actual write if
- * we're in journal error state:
- */
-
- btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
- as->btree_id, b->c.level, b->key.k.p);
- struct btree_path *path = trans->paths + path_idx;
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
- mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED);
- path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
- path->l[b->c.level].b = b;
-
- bch2_btree_node_lock_write_nofail(trans, path, &b->c);
-
- mutex_lock(&c->btree_interior_update_lock);
-
- list_del(&as->write_blocked_list);
- if (list_empty(&b->write_blocked))
- clear_btree_node_write_blocked(b);
-
- /*
- * Node might have been freed, recheck under
- * btree_interior_update_lock:
- */
- if (as->b == b) {
- BUG_ON(!b->c.level);
- BUG_ON(!btree_node_dirty(b));
-
- if (!ret) {
- struct bset *last = btree_bset_last(b);
-
- last->journal_seq = cpu_to_le64(
- max(journal_seq,
- le64_to_cpu(last->journal_seq)));
-
- bch2_btree_add_journal_pin(c, b, journal_seq);
- } else {
- /*
- * If we didn't get a journal sequence number we
- * can't write this btree node, because recovery
- * won't know to ignore this write:
- */
- set_btree_node_never_write(b);
- }
- }
-
- mutex_unlock(&c->btree_interior_update_lock);
-
- mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
- six_unlock_write(&b->c.lock);
-
- btree_node_write_if_need(c, b, SIX_LOCK_intent);
- btree_node_unlock(trans, path, b->c.level);
- bch2_path_put(trans, path_idx, true);
- }
-
- bch2_journal_pin_drop(&c->journal, &as->journal);
-
- mutex_lock(&c->btree_interior_update_lock);
- for (i = 0; i < as->nr_new_nodes; i++) {
- b = as->new_nodes[i];
-
- BUG_ON(b->will_make_reachable != (unsigned long) as);
- b->will_make_reachable = 0;
- clear_btree_node_will_make_reachable(b);
- }
- mutex_unlock(&c->btree_interior_update_lock);
-
- for (i = 0; i < as->nr_new_nodes; i++) {
- b = as->new_nodes[i];
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
- btree_node_write_if_need(c, b, SIX_LOCK_read);
- six_unlock_read(&b->c.lock);
- }
-
- for (i = 0; i < as->nr_open_buckets; i++)
- bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]);
-
- bch2_btree_update_free(as, trans);
- bch2_trans_put(trans);
-}
-
-static void btree_interior_update_work(struct work_struct *work)
-{
- struct bch_fs *c =
- container_of(work, struct bch_fs, btree_interior_update_work);
- struct btree_update *as;
-
- while (1) {
- mutex_lock(&c->btree_interior_update_lock);
- as = list_first_entry_or_null(&c->btree_interior_updates_unwritten,
- struct btree_update, unwritten_list);
- if (as && !as->nodes_written)
- as = NULL;
- mutex_unlock(&c->btree_interior_update_lock);
-
- if (!as)
- break;
-
- btree_update_nodes_written(as);
- }
-}
-
-static CLOSURE_CALLBACK(btree_update_set_nodes_written)
-{
- closure_type(as, struct btree_update, cl);
- struct bch_fs *c = as->c;
-
- mutex_lock(&c->btree_interior_update_lock);
- as->nodes_written = true;
- mutex_unlock(&c->btree_interior_update_lock);
-
- queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work);
-}
-
-/*
- * We're updating @b with pointers to nodes that haven't finished writing yet:
- * block @b from being written until @as completes
- */
-static void btree_update_updated_node(struct btree_update *as, struct btree *b)
-{
- struct bch_fs *c = as->c;
-
- BUG_ON(as->mode != BTREE_UPDATE_none);
- BUG_ON(as->update_level_end < b->c.level);
- BUG_ON(!btree_node_dirty(b));
- BUG_ON(!b->c.level);
-
- mutex_lock(&c->btree_interior_update_lock);
- list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
-
- as->mode = BTREE_UPDATE_node;
- as->b = b;
- as->update_level_end = b->c.level;
-
- set_btree_node_write_blocked(b);
- list_add(&as->write_blocked_list, &b->write_blocked);
-
- mutex_unlock(&c->btree_interior_update_lock);
-}
-
-static int bch2_update_reparent_journal_pin_flush(struct journal *j,
- struct journal_entry_pin *_pin, u64 seq)
-{
- return 0;
-}
-
-static void btree_update_reparent(struct btree_update *as,
- struct btree_update *child)
-{
- struct bch_fs *c = as->c;
-
- lockdep_assert_held(&c->btree_interior_update_lock);
-
- child->b = NULL;
- child->mode = BTREE_UPDATE_update;
-
- bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal,
- bch2_update_reparent_journal_pin_flush);
-}
-
-static void btree_update_updated_root(struct btree_update *as, struct btree *b)
-{
- struct bkey_i *insert = &b->key;
- struct bch_fs *c = as->c;
-
- BUG_ON(as->mode != BTREE_UPDATE_none);
-
- BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
- ARRAY_SIZE(as->journal_entries));
-
- as->journal_u64s +=
- journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
- BCH_JSET_ENTRY_btree_root,
- b->c.btree_id, b->c.level,
- insert, insert->k.u64s);
-
- mutex_lock(&c->btree_interior_update_lock);
- list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
-
- as->mode = BTREE_UPDATE_root;
- mutex_unlock(&c->btree_interior_update_lock);
-}
-
-/*
- * bch2_btree_update_add_new_node:
- *
- * This causes @as to wait on @b to be written, before it gets to
- * bch2_btree_update_nodes_written
- *
- * Additionally, it sets b->will_make_reachable to prevent any additional writes
- * to @b from happening besides the first until @b is reachable on disk
- *
- * And it adds @b to the list of @as's new nodes, so that we can update sector
- * counts in bch2_btree_update_nodes_written:
- */
-static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
-{
- struct bch_fs *c = as->c;
-
- closure_get(&as->cl);
-
- mutex_lock(&c->btree_interior_update_lock);
- BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
- BUG_ON(b->will_make_reachable);
-
- as->new_nodes[as->nr_new_nodes++] = b;
- b->will_make_reachable = 1UL|(unsigned long) as;
- set_btree_node_will_make_reachable(b);
-
- mutex_unlock(&c->btree_interior_update_lock);
-
- btree_update_add_key(as, &as->new_keys, b);
-
- if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
- unsigned bytes = vstruct_end(&b->data->keys) - (void *) b->data;
- unsigned sectors = round_up(bytes, block_bytes(c)) >> 9;
-
- bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
- cpu_to_le16(sectors);
- }
-}
-
-/*
- * returns true if @b was a new node
- */
-static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
-{
- struct btree_update *as;
- unsigned long v;
- unsigned i;
-
- mutex_lock(&c->btree_interior_update_lock);
- /*
- * When b->will_make_reachable != 0, it owns a ref on as->cl that's
- * dropped when it gets written by bch2_btree_complete_write - the
- * xchg() is for synchronization with bch2_btree_complete_write:
- */
- v = xchg(&b->will_make_reachable, 0);
- clear_btree_node_will_make_reachable(b);
- as = (struct btree_update *) (v & ~1UL);
-
- if (!as) {
- mutex_unlock(&c->btree_interior_update_lock);
- return;
- }
-
- for (i = 0; i < as->nr_new_nodes; i++)
- if (as->new_nodes[i] == b)
- goto found;
-
- BUG();
-found:
- array_remove_item(as->new_nodes, as->nr_new_nodes, i);
- mutex_unlock(&c->btree_interior_update_lock);
-
- if (v & 1)
- closure_put(&as->cl);
-}
-
-static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
-{
- while (b->ob.nr)
- as->open_buckets[as->nr_open_buckets++] =
- b->ob.v[--b->ob.nr];
-}
-
-static int bch2_btree_update_will_free_node_journal_pin_flush(struct journal *j,
- struct journal_entry_pin *_pin, u64 seq)
-{
- return 0;
-}
-
-/*
- * @b is being split/rewritten: it may have pointers to not-yet-written btree
- * nodes and thus outstanding btree_updates - redirect @b's
- * btree_updates to point to this btree_update:
- */
-static void bch2_btree_interior_update_will_free_node(struct btree_update *as,
- struct btree *b)
-{
- struct bch_fs *c = as->c;
- struct btree_update *p, *n;
- struct btree_write *w;
-
- set_btree_node_dying(b);
-
- if (btree_node_fake(b))
- return;
-
- mutex_lock(&c->btree_interior_update_lock);
-
- /*
- * Does this node have any btree_update operations preventing
- * it from being written?
- *
- * If so, redirect them to point to this btree_update: we can
- * write out our new nodes, but we won't make them visible until those
- * operations complete
- */
- list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
- list_del_init(&p->write_blocked_list);
- btree_update_reparent(as, p);
-
- /*
- * for flush_held_btree_writes() waiting on updates to flush or
- * nodes to be writeable:
- */
- closure_wake_up(&c->btree_interior_update_wait);
- }
-
- clear_btree_node_dirty_acct(c, b);
- clear_btree_node_need_write(b);
- clear_btree_node_write_blocked(b);
-
- /*
- * Does this node have unwritten data that has a pin on the journal?
- *
- * If so, transfer that pin to the btree_update operation -
- * note that if we're freeing multiple nodes, we only need to keep the
- * oldest pin of any of the nodes we're freeing. We'll release the pin
- * when the new nodes are persistent and reachable on disk:
- */
- w = btree_current_write(b);
- bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal,
- bch2_btree_update_will_free_node_journal_pin_flush);
- bch2_journal_pin_drop(&c->journal, &w->journal);
-
- w = btree_prev_write(b);
- bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal,
- bch2_btree_update_will_free_node_journal_pin_flush);
- bch2_journal_pin_drop(&c->journal, &w->journal);
-
- mutex_unlock(&c->btree_interior_update_lock);
-
- /*
- * Is this a node that isn't reachable on disk yet?
- *
- * Nodes that aren't reachable yet have writes blocked until they're
- * reachable - now that we've cancelled any pending writes and moved
- * things waiting on that write to wait on this update, we can drop this
- * node from the list of nodes that the other update is making
- * reachable, prior to freeing it:
- */
- btree_update_drop_new_node(c, b);
-
- btree_update_add_key(as, &as->old_keys, b);
-
- as->old_nodes[as->nr_old_nodes] = b;
- as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq;
- as->nr_old_nodes++;
-}
-
-static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *trans)
-{
- struct bch_fs *c = as->c;
- u64 start_time = as->start_time;
-
- BUG_ON(as->mode == BTREE_UPDATE_none);
-
- if (as->took_gc_lock)
- up_read(&as->c->gc_lock);
- as->took_gc_lock = false;
-
- bch2_btree_reserve_put(as, trans);
-
- continue_at(&as->cl, btree_update_set_nodes_written,
- as->c->btree_interior_update_worker);
-
- bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_foreground],
- start_time);
-}
-
-static struct btree_update *
-bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
- unsigned level_start, bool split, unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct btree_update *as;
- u64 start_time = local_clock();
- int disk_res_flags = (flags & BCH_TRANS_COMMIT_no_enospc)
- ? BCH_DISK_RESERVATION_NOFAIL : 0;
- unsigned nr_nodes[2] = { 0, 0 };
- unsigned level_end = level_start;
- enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
- int ret = 0;
- u32 restart_count = trans->restart_count;
-
- BUG_ON(!path->should_be_locked);
-
- if (watermark == BCH_WATERMARK_copygc)
- watermark = BCH_WATERMARK_btree_copygc;
- if (watermark < BCH_WATERMARK_btree)
- watermark = BCH_WATERMARK_btree;
-
- flags &= ~BCH_WATERMARK_MASK;
- flags |= watermark;
-
- if (watermark < BCH_WATERMARK_reclaim &&
- test_bit(JOURNAL_space_low, &c->journal.flags)) {
- if (flags & BCH_TRANS_COMMIT_journal_reclaim)
- return ERR_PTR(-BCH_ERR_journal_reclaim_would_deadlock);
-
- ret = drop_locks_do(trans,
- ({ wait_event(c->journal.wait, !test_bit(JOURNAL_space_low, &c->journal.flags)); 0; }));
- if (ret)
- return ERR_PTR(ret);
- }
-
- while (1) {
- nr_nodes[!!level_end] += 1 + split;
- level_end++;
-
- ret = bch2_btree_path_upgrade(trans, path, level_end + 1);
- if (ret)
- return ERR_PTR(ret);
-
- if (!btree_path_node(path, level_end)) {
- /* Allocating new root? */
- nr_nodes[1] += split;
- level_end = BTREE_MAX_DEPTH;
- break;
- }
-
- /*
- * Always check for space for two keys, even if we won't have to
- * split at prior level - it might have been a merge instead:
- */
- if (bch2_btree_node_insert_fits(path->l[level_end].b,
- BKEY_BTREE_PTR_U64s_MAX * 2))
- break;
-
- split = path->l[level_end].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
- }
-
- if (!down_read_trylock(&c->gc_lock)) {
- ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0));
- if (ret) {
- up_read(&c->gc_lock);
- return ERR_PTR(ret);
- }
- }
-
- as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS);
- memset(as, 0, sizeof(*as));
- closure_init(&as->cl, NULL);
- as->c = c;
- as->start_time = start_time;
- as->ip_started = _RET_IP_;
- as->mode = BTREE_UPDATE_none;
- as->flags = flags;
- as->took_gc_lock = true;
- as->btree_id = path->btree_id;
- as->update_level_start = level_start;
- as->update_level_end = level_end;
- INIT_LIST_HEAD(&as->list);
- INIT_LIST_HEAD(&as->unwritten_list);
- INIT_LIST_HEAD(&as->write_blocked_list);
- bch2_keylist_init(&as->old_keys, as->_old_keys);
- bch2_keylist_init(&as->new_keys, as->_new_keys);
- bch2_keylist_init(&as->parent_keys, as->inline_keys);
-
- mutex_lock(&c->btree_interior_update_lock);
- list_add_tail(&as->list, &c->btree_interior_update_list);
- mutex_unlock(&c->btree_interior_update_lock);
-
- /*
- * We don't want to allocate if we're in an error state, that can cause
- * deadlock on emergency shutdown due to open buckets getting stuck in
- * the btree_reserve_cache after allocator shutdown has cleared it out.
- * This check needs to come after adding us to the btree_interior_update
- * list but before calling bch2_btree_reserve_get, to synchronize with
- * __bch2_fs_read_only().
- */
- ret = bch2_journal_error(&c->journal);
- if (ret)
- goto err;
-
- ret = bch2_disk_reservation_get(c, &as->disk_res,
- (nr_nodes[0] + nr_nodes[1]) * btree_sectors(c),
- c->opts.metadata_replicas,
- disk_res_flags);
- if (ret)
- goto err;
-
- ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, NULL);
- if (bch2_err_matches(ret, ENOSPC) ||
- bch2_err_matches(ret, ENOMEM)) {
- struct closure cl;
-
- /*
- * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
- * flag
- */
- if (bch2_err_matches(ret, ENOSPC) &&
- (flags & BCH_TRANS_COMMIT_journal_reclaim) &&
- watermark < BCH_WATERMARK_reclaim) {
- ret = -BCH_ERR_journal_reclaim_would_deadlock;
- goto err;
- }
-
- closure_init_stack(&cl);
-
- do {
- ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl);
-
- bch2_trans_unlock(trans);
- bch2_wait_on_allocator(c, &cl);
- } while (bch2_err_matches(ret, BCH_ERR_operation_blocked));
- }
-
- if (ret) {
- trace_and_count(c, btree_reserve_get_fail, trans->fn,
- _RET_IP_, nr_nodes[0] + nr_nodes[1], ret);
- goto err;
- }
-
- ret = bch2_trans_relock(trans);
- if (ret)
- goto err;
-
- bch2_trans_verify_not_restarted(trans, restart_count);
- return as;
-err:
- bch2_btree_update_free(as, trans);
- if (!bch2_err_matches(ret, ENOSPC) &&
- !bch2_err_matches(ret, EROFS) &&
- ret != -BCH_ERR_journal_reclaim_would_deadlock)
- bch_err_fn_ratelimited(c, ret);
- return ERR_PTR(ret);
-}
-
-/* Btree root updates: */
-
-static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
-{
- /* Root nodes cannot be reaped */
- mutex_lock(&c->btree_cache.lock);
- list_del_init(&b->list);
- mutex_unlock(&c->btree_cache.lock);
-
- mutex_lock(&c->btree_root_lock);
- bch2_btree_id_root(c, b->c.btree_id)->b = b;
- mutex_unlock(&c->btree_root_lock);
-
- bch2_recalc_btree_reserve(c);
-}
-
-static int bch2_btree_set_root(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- bool nofail)
-{
- struct bch_fs *c = as->c;
-
- trace_and_count(c, btree_node_set_root, trans, b);
-
- struct btree *old = btree_node_root(c, b);
-
- /*
- * Ensure no one is using the old root while we switch to the
- * new root:
- */
- if (nofail) {
- bch2_btree_node_lock_write_nofail(trans, path, &old->c);
- } else {
- int ret = bch2_btree_node_lock_write(trans, path, &old->c);
- if (ret)
- return ret;
- }
-
- bch2_btree_set_root_inmem(c, b);
-
- btree_update_updated_root(as, b);
-
- /*
- * Unlock old root after new root is visible:
- *
- * The new root isn't persistent, but that's ok: we still have
- * an intent lock on the new root, and any updates that would
- * depend on the new root would have to update the new root.
- */
- bch2_btree_node_unlock_write(trans, path, old);
- return 0;
-}
-
-/* Interior node updates: */
-
-static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct btree_node_iter *node_iter,
- struct bkey_i *insert)
-{
- struct bch_fs *c = as->c;
- struct bkey_packed *k;
- struct printbuf buf = PRINTBUF;
- unsigned long old, new;
-
- BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
- !btree_ptr_sectors_written(bkey_i_to_s_c(insert)));
-
- if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)))
- bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p);
-
- if (bch2_bkey_validate(c, bkey_i_to_s_c(insert),
- btree_node_type(b), BCH_VALIDATE_write) ?:
- bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), BCH_VALIDATE_write)) {
- bch2_fs_inconsistent(c, "%s: inserting invalid bkey", __func__);
- dump_stack();
- }
-
- BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
- ARRAY_SIZE(as->journal_entries));
-
- as->journal_u64s +=
- journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
- BCH_JSET_ENTRY_btree_keys,
- b->c.btree_id, b->c.level,
- insert, insert->k.u64s);
-
- while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
- bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
- bch2_btree_node_iter_advance(node_iter, b);
-
- bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
- set_btree_node_dirty_acct(c, b);
-
- old = READ_ONCE(b->flags);
- do {
- new = old;
-
- new &= ~BTREE_WRITE_TYPE_MASK;
- new |= BTREE_WRITE_interior;
- new |= 1 << BTREE_NODE_need_write;
- } while (!try_cmpxchg(&b->flags, &old, new));
-
- printbuf_exit(&buf);
-}
-
-static void
-bch2_btree_insert_keys_interior(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct btree_node_iter node_iter,
- struct keylist *keys)
-{
- struct bkey_i *insert = bch2_keylist_front(keys);
- struct bkey_packed *k;
-
- BUG_ON(btree_node_type(b) != BKEY_TYPE_btree);
-
- while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
- (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0))
- ;
-
- while (!bch2_keylist_empty(keys)) {
- insert = bch2_keylist_front(keys);
-
- if (bpos_gt(insert->k.p, b->key.k.p))
- break;
-
- bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert);
- bch2_keylist_pop_front(keys);
- }
-}
-
-static bool key_deleted_in_insert(struct keylist *insert_keys, struct bpos pos)
-{
- if (insert_keys)
- for_each_keylist_key(insert_keys, k)
- if (bkey_deleted(&k->k) && bpos_eq(k->k.p, pos))
- return true;
- return false;
-}
-
-/*
- * Move keys from n1 (original replacement node, now lower node) to n2 (higher
- * node)
- */
-static void __btree_split_node(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *b,
- struct btree *n[2],
- struct keylist *insert_keys)
-{
- struct bkey_packed *k;
- struct bpos n1_pos = POS_MIN;
- struct btree_node_iter iter;
- struct bset *bsets[2];
- struct bkey_format_state format[2];
- struct bkey_packed *out[2];
- struct bkey uk;
- unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5;
- struct { unsigned nr_keys, val_u64s; } nr_keys[2];
- int i;
-
- memset(&nr_keys, 0, sizeof(nr_keys));
-
- for (i = 0; i < 2; i++) {
- BUG_ON(n[i]->nsets != 1);
-
- bsets[i] = btree_bset_first(n[i]);
- out[i] = bsets[i]->start;
-
- SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1);
- bch2_bkey_format_init(&format[i]);
- }
-
- u64s = 0;
- for_each_btree_node_key(b, k, &iter) {
- if (bkey_deleted(k))
- continue;
-
- uk = bkey_unpack_key(b, k);
-
- if (b->c.level &&
- u64s < n1_u64s &&
- u64s + k->u64s >= n1_u64s &&
- (bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p) ||
- key_deleted_in_insert(insert_keys, uk.p)))
- n1_u64s += k->u64s;
-
- i = u64s >= n1_u64s;
- u64s += k->u64s;
- if (!i)
- n1_pos = uk.p;
- bch2_bkey_format_add_key(&format[i], &uk);
-
- nr_keys[i].nr_keys++;
- nr_keys[i].val_u64s += bkeyp_val_u64s(&b->format, k);
- }
-
- btree_set_min(n[0], b->data->min_key);
- btree_set_max(n[0], n1_pos);
- btree_set_min(n[1], bpos_successor(n1_pos));
- btree_set_max(n[1], b->data->max_key);
-
- for (i = 0; i < 2; i++) {
- bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key);
- bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key);
-
- n[i]->data->format = bch2_bkey_format_done(&format[i]);
-
- unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s +
- nr_keys[i].val_u64s;
- if (__vstruct_bytes(struct btree_node, u64s) > btree_buf_bytes(b))
- n[i]->data->format = b->format;
-
- btree_node_set_format(n[i], n[i]->data->format);
- }
-
- u64s = 0;
- for_each_btree_node_key(b, k, &iter) {
- if (bkey_deleted(k))
- continue;
-
- i = u64s >= n1_u64s;
- u64s += k->u64s;
-
- if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k)
- ? &b->format: &bch2_bkey_format_current, k))
- out[i]->format = KEY_FORMAT_LOCAL_BTREE;
- else
- bch2_bkey_unpack(b, (void *) out[i], k);
-
- out[i]->needs_whiteout = false;
-
- btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
- out[i] = bkey_p_next(out[i]);
- }
-
- for (i = 0; i < 2; i++) {
- bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data);
-
- BUG_ON(!bsets[i]->u64s);
-
- set_btree_bset_end(n[i], n[i]->set);
-
- btree_node_reset_sib_u64s(n[i]);
-
- bch2_verify_btree_nr_keys(n[i]);
-
- BUG_ON(bch2_btree_node_check_topology(trans, n[i]));
- }
-}
-
-/*
- * For updates to interior nodes, we've got to do the insert before we split
- * because the stuff we're inserting has to be inserted atomically. Post split,
- * the keys might have to go in different nodes and the split would no longer be
- * atomic.
- *
- * Worse, if the insert is from btree node coalescing, if we do the insert after
- * we do the split (and pick the pivot) - the pivot we pick might be between
- * nodes that were coalesced, and thus in the middle of a child node post
- * coalescing:
- */
-static void btree_split_insert_keys(struct btree_update *as,
- struct btree_trans *trans,
- btree_path_idx_t path_idx,
- struct btree *b,
- struct keylist *keys)
-{
- struct btree_path *path = trans->paths + path_idx;
-
- if (!bch2_keylist_empty(keys) &&
- bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
- struct btree_node_iter node_iter;
-
- bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
-
- bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
-
- BUG_ON(bch2_btree_node_check_topology(trans, b));
- }
-}
-
-static int btree_split(struct btree_update *as, struct btree_trans *trans,
- btree_path_idx_t path, struct btree *b,
- struct keylist *keys)
-{
- struct bch_fs *c = as->c;
- struct btree *parent = btree_node_parent(trans->paths + path, b);
- struct btree *n1, *n2 = NULL, *n3 = NULL;
- btree_path_idx_t path1 = 0, path2 = 0;
- u64 start_time = local_clock();
- int ret = 0;
-
- bch2_verify_btree_nr_keys(b);
- BUG_ON(!parent && (b != btree_node_root(c, b)));
- BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1));
-
- ret = bch2_btree_node_check_topology(trans, b);
- if (ret)
- return ret;
-
- bch2_btree_interior_update_will_free_node(as, b);
-
- if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
- struct btree *n[2];
-
- trace_and_count(c, btree_node_split, trans, b);
-
- n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level);
- n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level);
-
- __btree_split_node(as, trans, b, n, keys);
-
- if (keys) {
- btree_split_insert_keys(as, trans, path, n1, keys);
- btree_split_insert_keys(as, trans, path, n2, keys);
- BUG_ON(!bch2_keylist_empty(keys));
- }
-
- bch2_btree_build_aux_trees(n2);
- bch2_btree_build_aux_trees(n1);
-
- bch2_btree_update_add_new_node(as, n1);
- bch2_btree_update_add_new_node(as, n2);
- six_unlock_write(&n2->c.lock);
- six_unlock_write(&n1->c.lock);
-
- path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p);
- six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
- mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
- bch2_btree_path_level_init(trans, trans->paths + path1, n1);
-
- path2 = bch2_path_get_unlocked_mut(trans, as->btree_id, n2->c.level, n2->key.k.p);
- six_lock_increment(&n2->c.lock, SIX_LOCK_intent);
- mark_btree_node_locked(trans, trans->paths + path2, n2->c.level, BTREE_NODE_INTENT_LOCKED);
- bch2_btree_path_level_init(trans, trans->paths + path2, n2);
-
- /*
- * Note that on recursive parent_keys == keys, so we
- * can't start adding new keys to parent_keys before emptying it
- * out (which we did with btree_split_insert_keys() above)
- */
- bch2_keylist_add(&as->parent_keys, &n1->key);
- bch2_keylist_add(&as->parent_keys, &n2->key);
-
- if (!parent) {
- /* Depth increases, make a new root */
- n3 = __btree_root_alloc(as, trans, b->c.level + 1);
-
- bch2_btree_update_add_new_node(as, n3);
- six_unlock_write(&n3->c.lock);
-
- trans->paths[path2].locks_want++;
- BUG_ON(btree_node_locked(trans->paths + path2, n3->c.level));
- six_lock_increment(&n3->c.lock, SIX_LOCK_intent);
- mark_btree_node_locked(trans, trans->paths + path2, n3->c.level, BTREE_NODE_INTENT_LOCKED);
- bch2_btree_path_level_init(trans, trans->paths + path2, n3);
-
- n3->sib_u64s[0] = U16_MAX;
- n3->sib_u64s[1] = U16_MAX;
-
- btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
- }
- } else {
- trace_and_count(c, btree_node_compact, trans, b);
-
- n1 = bch2_btree_node_alloc_replacement(as, trans, b);
-
- if (keys) {
- btree_split_insert_keys(as, trans, path, n1, keys);
- BUG_ON(!bch2_keylist_empty(keys));
- }
-
- bch2_btree_build_aux_trees(n1);
- bch2_btree_update_add_new_node(as, n1);
- six_unlock_write(&n1->c.lock);
-
- path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p);
- six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
- mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
- bch2_btree_path_level_init(trans, trans->paths + path1, n1);
-
- if (parent)
- bch2_keylist_add(&as->parent_keys, &n1->key);
- }
-
- /* New nodes all written, now make them visible: */
-
- if (parent) {
- /* Split a non root node */
- ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys);
- } else if (n3) {
- ret = bch2_btree_set_root(as, trans, trans->paths + path, n3, false);
- } else {
- /* Root filled up but didn't need to be split */
- ret = bch2_btree_set_root(as, trans, trans->paths + path, n1, false);
- }
-
- if (ret)
- goto err;
-
- if (n3) {
- bch2_btree_update_get_open_buckets(as, n3);
- bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
- }
- if (n2) {
- bch2_btree_update_get_open_buckets(as, n2);
- bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0);
- }
- bch2_btree_update_get_open_buckets(as, n1);
- bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
-
- /*
- * The old node must be freed (in memory) _before_ unlocking the new
- * nodes - else another thread could re-acquire a read lock on the old
- * node after another thread has locked and updated the new node, thus
- * seeing stale data:
- */
- bch2_btree_node_free_inmem(trans, trans->paths + path, b);
-
- if (n3)
- bch2_trans_node_add(trans, trans->paths + path, n3);
- if (n2)
- bch2_trans_node_add(trans, trans->paths + path2, n2);
- bch2_trans_node_add(trans, trans->paths + path1, n1);
-
- if (n3)
- six_unlock_intent(&n3->c.lock);
- if (n2)
- six_unlock_intent(&n2->c.lock);
- six_unlock_intent(&n1->c.lock);
-out:
- if (path2) {
- __bch2_btree_path_unlock(trans, trans->paths + path2);
- bch2_path_put(trans, path2, true);
- }
- if (path1) {
- __bch2_btree_path_unlock(trans, trans->paths + path1);
- bch2_path_put(trans, path1, true);
- }
-
- bch2_trans_verify_locks(trans);
-
- bch2_time_stats_update(&c->times[n2
- ? BCH_TIME_btree_node_split
- : BCH_TIME_btree_node_compact],
- start_time);
- return ret;
-err:
- if (n3)
- bch2_btree_node_free_never_used(as, trans, n3);
- if (n2)
- bch2_btree_node_free_never_used(as, trans, n2);
- bch2_btree_node_free_never_used(as, trans, n1);
- goto out;
-}
-
-/**
- * bch2_btree_insert_node - insert bkeys into a given btree node
- *
- * @as: btree_update object
- * @trans: btree_trans object
- * @path_idx: path that points to current node
- * @b: node to insert keys into
- * @keys: list of keys to insert
- *
- * Returns: 0 on success, typically transaction restart error on failure
- *
- * Inserts as many keys as it can into a given btree node, splitting it if full.
- * If a split occurred, this function will return early. This can only happen
- * for leaf nodes -- inserts into interior nodes have to be atomic.
- */
-static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans,
- btree_path_idx_t path_idx, struct btree *b,
- struct keylist *keys)
-{
- struct bch_fs *c = as->c;
- struct btree_path *path = trans->paths + path_idx, *linked;
- unsigned i;
- int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
- int old_live_u64s = b->nr.live_u64s;
- int live_u64s_added, u64s_added;
- int ret;
-
- lockdep_assert_held(&c->gc_lock);
- BUG_ON(!btree_node_intent_locked(path, b->c.level));
- BUG_ON(!b->c.level);
- BUG_ON(!as || as->b);
- bch2_verify_keylist_sorted(keys);
-
- ret = bch2_btree_node_lock_write(trans, path, &b->c);
- if (ret)
- return ret;
-
- bch2_btree_node_prep_for_write(trans, path, b);
-
- if (!bch2_btree_node_insert_fits(b, bch2_keylist_u64s(keys))) {
- bch2_btree_node_unlock_write(trans, path, b);
- goto split;
- }
-
- ret = bch2_btree_node_check_topology(trans, b);
- if (ret) {
- bch2_btree_node_unlock_write(trans, path, b);
- return ret;
- }
-
- bch2_btree_insert_keys_interior(as, trans, path, b,
- path->l[b->c.level].iter, keys);
-
- trans_for_each_path_with_node(trans, b, linked, i)
- bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
-
- bch2_trans_verify_paths(trans);
-
- live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
- u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
-
- if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
- b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
- if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
- b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
-
- if (u64s_added > live_u64s_added &&
- bch2_maybe_compact_whiteouts(c, b))
- bch2_trans_node_reinit_iter(trans, b);
-
- btree_update_updated_node(as, b);
- bch2_btree_node_unlock_write(trans, path, b);
-
- BUG_ON(bch2_btree_node_check_topology(trans, b));
- return 0;
-split:
- /*
- * We could attempt to avoid the transaction restart, by calling
- * bch2_btree_path_upgrade() and allocating more nodes:
- */
- if (b->c.level >= as->update_level_end) {
- trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
- }
-
- return btree_split(as, trans, path_idx, b, keys);
-}
-
-int bch2_btree_split_leaf(struct btree_trans *trans,
- btree_path_idx_t path,
- unsigned flags)
-{
- /* btree_split & merge may both cause paths array to be reallocated */
- struct btree *b = path_l(trans->paths + path)->b;
- struct btree_update *as;
- unsigned l;
- int ret = 0;
-
- as = bch2_btree_update_start(trans, trans->paths + path,
- trans->paths[path].level,
- true, flags);
- if (IS_ERR(as))
- return PTR_ERR(as);
-
- ret = btree_split(as, trans, path, b, NULL);
- if (ret) {
- bch2_btree_update_free(as, trans);
- return ret;
- }
-
- bch2_btree_update_done(as, trans);
-
- for (l = trans->paths[path].level + 1;
- btree_node_intent_locked(&trans->paths[path], l) && !ret;
- l++)
- ret = bch2_foreground_maybe_merge(trans, path, l, flags);
-
- return ret;
-}
-
-static void __btree_increase_depth(struct btree_update *as, struct btree_trans *trans,
- btree_path_idx_t path_idx)
-{
- struct bch_fs *c = as->c;
- struct btree_path *path = trans->paths + path_idx;
- struct btree *n, *b = bch2_btree_id_root(c, path->btree_id)->b;
-
- BUG_ON(!btree_node_locked(path, b->c.level));
-
- n = __btree_root_alloc(as, trans, b->c.level + 1);
-
- bch2_btree_update_add_new_node(as, n);
- six_unlock_write(&n->c.lock);
-
- path->locks_want++;
- BUG_ON(btree_node_locked(path, n->c.level));
- six_lock_increment(&n->c.lock, SIX_LOCK_intent);
- mark_btree_node_locked(trans, path, n->c.level, BTREE_NODE_INTENT_LOCKED);
- bch2_btree_path_level_init(trans, path, n);
-
- n->sib_u64s[0] = U16_MAX;
- n->sib_u64s[1] = U16_MAX;
-
- bch2_keylist_add(&as->parent_keys, &b->key);
- btree_split_insert_keys(as, trans, path_idx, n, &as->parent_keys);
-
- int ret = bch2_btree_set_root(as, trans, path, n, true);
- BUG_ON(ret);
-
- bch2_btree_update_get_open_buckets(as, n);
- bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
- bch2_trans_node_add(trans, path, n);
- six_unlock_intent(&n->c.lock);
-
- mutex_lock(&c->btree_cache.lock);
- list_add_tail(&b->list, &c->btree_cache.live[btree_node_pinned(b)].list);
- mutex_unlock(&c->btree_cache.lock);
-
- bch2_trans_verify_locks(trans);
-}
-
-int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path, unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct btree *b = bch2_btree_id_root(c, trans->paths[path].btree_id)->b;
-
- if (btree_node_fake(b))
- return bch2_btree_split_leaf(trans, path, flags);
-
- struct btree_update *as =
- bch2_btree_update_start(trans, trans->paths + path, b->c.level, true, flags);
- if (IS_ERR(as))
- return PTR_ERR(as);
-
- __btree_increase_depth(as, trans, path);
- bch2_btree_update_done(as, trans);
- return 0;
-}
-
-int __bch2_foreground_maybe_merge(struct btree_trans *trans,
- btree_path_idx_t path,
- unsigned level,
- unsigned flags,
- enum btree_node_sibling sib)
-{
- struct bch_fs *c = trans->c;
- struct btree_update *as;
- struct bkey_format_state new_s;
- struct bkey_format new_f;
- struct bkey_i delete;
- struct btree *b, *m, *n, *prev, *next, *parent;
- struct bpos sib_pos;
- size_t sib_u64s;
- enum btree_id btree = trans->paths[path].btree_id;
- btree_path_idx_t sib_path = 0, new_path = 0;
- u64 start_time = local_clock();
- int ret = 0;
-
- bch2_trans_verify_not_in_restart(trans);
- bch2_trans_verify_not_unlocked(trans);
- BUG_ON(!trans->paths[path].should_be_locked);
- BUG_ON(!btree_node_locked(&trans->paths[path], level));
-
- /*
- * Work around a deadlock caused by the btree write buffer not doing
- * merges and leaving tons of merges for us to do - we really don't need
- * to be doing merges at all from the interior update path, and if the
- * interior update path is generating too many new interior updates we
- * deadlock:
- */
- if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates)
- return 0;
-
- if ((flags & BCH_WATERMARK_MASK) <= BCH_WATERMARK_reclaim) {
- flags &= ~BCH_WATERMARK_MASK;
- flags |= BCH_WATERMARK_btree;
- flags |= BCH_TRANS_COMMIT_journal_reclaim;
- }
-
- b = trans->paths[path].l[level].b;
-
- if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
- (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
- b->sib_u64s[sib] = U16_MAX;
- return 0;
- }
-
- sib_pos = sib == btree_prev_sib
- ? bpos_predecessor(b->data->min_key)
- : bpos_successor(b->data->max_key);
-
- sib_path = bch2_path_get(trans, btree, sib_pos,
- U8_MAX, level, BTREE_ITER_intent, _THIS_IP_);
- ret = bch2_btree_path_traverse(trans, sib_path, false);
- if (ret)
- goto err;
-
- btree_path_set_should_be_locked(trans, trans->paths + sib_path);
-
- m = trans->paths[sib_path].l[level].b;
-
- if (btree_node_parent(trans->paths + path, b) !=
- btree_node_parent(trans->paths + sib_path, m)) {
- b->sib_u64s[sib] = U16_MAX;
- goto out;
- }
-
- if (sib == btree_prev_sib) {
- prev = m;
- next = b;
- } else {
- prev = b;
- next = m;
- }
-
- if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
-
- bch2_bpos_to_text(&buf1, prev->data->max_key);
- bch2_bpos_to_text(&buf2, next->data->min_key);
- bch_err(c,
- "%s(): btree topology error:\n"
- " prev ends at %s\n"
- " next starts at %s",
- __func__, buf1.buf, buf2.buf);
- printbuf_exit(&buf1);
- printbuf_exit(&buf2);
- ret = bch2_topology_error(c);
- goto err;
- }
-
- bch2_bkey_format_init(&new_s);
- bch2_bkey_format_add_pos(&new_s, prev->data->min_key);
- __bch2_btree_calc_format(&new_s, prev);
- __bch2_btree_calc_format(&new_s, next);
- bch2_bkey_format_add_pos(&new_s, next->data->max_key);
- new_f = bch2_bkey_format_done(&new_s);
-
- sib_u64s = btree_node_u64s_with_format(b->nr, &b->format, &new_f) +
- btree_node_u64s_with_format(m->nr, &m->format, &new_f);
-
- if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
- sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
- sib_u64s /= 2;
- sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
- }
-
- sib_u64s = min(sib_u64s, btree_max_u64s(c));
- sib_u64s = min(sib_u64s, (size_t) U16_MAX - 1);
- b->sib_u64s[sib] = sib_u64s;
-
- if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
- goto out;
-
- parent = btree_node_parent(trans->paths + path, b);
- as = bch2_btree_update_start(trans, trans->paths + path, level, false,
- BCH_TRANS_COMMIT_no_enospc|flags);
- ret = PTR_ERR_OR_ZERO(as);
- if (ret)
- goto err;
-
- trace_and_count(c, btree_node_merge, trans, b);
-
- bch2_btree_interior_update_will_free_node(as, b);
- bch2_btree_interior_update_will_free_node(as, m);
-
- n = bch2_btree_node_alloc(as, trans, b->c.level);
-
- SET_BTREE_NODE_SEQ(n->data,
- max(BTREE_NODE_SEQ(b->data),
- BTREE_NODE_SEQ(m->data)) + 1);
-
- btree_set_min(n, prev->data->min_key);
- btree_set_max(n, next->data->max_key);
-
- n->data->format = new_f;
- btree_node_set_format(n, new_f);
-
- bch2_btree_sort_into(c, n, prev);
- bch2_btree_sort_into(c, n, next);
-
- bch2_btree_build_aux_trees(n);
- bch2_btree_update_add_new_node(as, n);
- six_unlock_write(&n->c.lock);
-
- new_path = bch2_path_get_unlocked_mut(trans, btree, n->c.level, n->key.k.p);
- six_lock_increment(&n->c.lock, SIX_LOCK_intent);
- mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
- bch2_btree_path_level_init(trans, trans->paths + new_path, n);
-
- bkey_init(&delete.k);
- delete.k.p = prev->key.k.p;
- bch2_keylist_add(&as->parent_keys, &delete);
- bch2_keylist_add(&as->parent_keys, &n->key);
-
- bch2_trans_verify_paths(trans);
-
- ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys);
- if (ret)
- goto err_free_update;
-
- bch2_trans_verify_paths(trans);
-
- bch2_btree_update_get_open_buckets(as, n);
- bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
-
- bch2_btree_node_free_inmem(trans, trans->paths + path, b);
- bch2_btree_node_free_inmem(trans, trans->paths + sib_path, m);
-
- bch2_trans_node_add(trans, trans->paths + path, n);
-
- bch2_trans_verify_paths(trans);
-
- six_unlock_intent(&n->c.lock);
-
- bch2_btree_update_done(as, trans);
-
- bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time);
-out:
-err:
- if (new_path)
- bch2_path_put(trans, new_path, true);
- bch2_path_put(trans, sib_path, true);
- bch2_trans_verify_locks(trans);
- if (ret == -BCH_ERR_journal_reclaim_would_deadlock)
- ret = 0;
- if (!ret)
- ret = bch2_trans_relock(trans);
- return ret;
-err_free_update:
- bch2_btree_node_free_never_used(as, trans, n);
- bch2_btree_update_free(as, trans);
- goto out;
-}
-
-int bch2_btree_node_rewrite(struct btree_trans *trans,
- struct btree_iter *iter,
- struct btree *b,
- unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct btree *n, *parent;
- struct btree_update *as;
- btree_path_idx_t new_path = 0;
- int ret;
-
- flags |= BCH_TRANS_COMMIT_no_enospc;
-
- struct btree_path *path = btree_iter_path(trans, iter);
- parent = btree_node_parent(path, b);
- as = bch2_btree_update_start(trans, path, b->c.level, false, flags);
- ret = PTR_ERR_OR_ZERO(as);
- if (ret)
- goto out;
-
- bch2_btree_interior_update_will_free_node(as, b);
-
- n = bch2_btree_node_alloc_replacement(as, trans, b);
-
- bch2_btree_build_aux_trees(n);
- bch2_btree_update_add_new_node(as, n);
- six_unlock_write(&n->c.lock);
-
- new_path = bch2_path_get_unlocked_mut(trans, iter->btree_id, n->c.level, n->key.k.p);
- six_lock_increment(&n->c.lock, SIX_LOCK_intent);
- mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
- bch2_btree_path_level_init(trans, trans->paths + new_path, n);
-
- trace_and_count(c, btree_node_rewrite, trans, b);
-
- if (parent) {
- bch2_keylist_add(&as->parent_keys, &n->key);
- ret = bch2_btree_insert_node(as, trans, iter->path, parent, &as->parent_keys);
- } else {
- ret = bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n, false);
- }
-
- if (ret)
- goto err;
-
- bch2_btree_update_get_open_buckets(as, n);
- bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
-
- bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b);
-
- bch2_trans_node_add(trans, trans->paths + iter->path, n);
- six_unlock_intent(&n->c.lock);
-
- bch2_btree_update_done(as, trans);
-out:
- if (new_path)
- bch2_path_put(trans, new_path, true);
- bch2_trans_downgrade(trans);
- return ret;
-err:
- bch2_btree_node_free_never_used(as, trans, n);
- bch2_btree_update_free(as, trans);
- goto out;
-}
-
-struct async_btree_rewrite {
- struct bch_fs *c;
- struct work_struct work;
- struct list_head list;
- enum btree_id btree_id;
- unsigned level;
- struct bpos pos;
- __le64 seq;
-};
-
-static int async_btree_node_rewrite_trans(struct btree_trans *trans,
- struct async_btree_rewrite *a)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct btree *b;
- int ret;
-
- bch2_trans_node_iter_init(trans, &iter, a->btree_id, a->pos,
- BTREE_MAX_DEPTH, a->level, 0);
- b = bch2_btree_iter_peek_node(&iter);
- ret = PTR_ERR_OR_ZERO(b);
- if (ret)
- goto out;
-
- if (!b || b->data->keys.seq != a->seq) {
- struct printbuf buf = PRINTBUF;
-
- if (b)
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- else
- prt_str(&buf, "(null");
- bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s",
- __func__, a->seq, buf.buf);
- printbuf_exit(&buf);
- goto out;
- }
-
- ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
-out:
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-static void async_btree_node_rewrite_work(struct work_struct *work)
-{
- struct async_btree_rewrite *a =
- container_of(work, struct async_btree_rewrite, work);
- struct bch_fs *c = a->c;
-
- int ret = bch2_trans_do(c, async_btree_node_rewrite_trans(trans, a));
- bch_err_fn_ratelimited(c, ret);
- bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
- kfree(a);
-}
-
-void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
-{
- struct async_btree_rewrite *a;
- int ret;
-
- a = kmalloc(sizeof(*a), GFP_NOFS);
- if (!a) {
- bch_err(c, "%s: error allocating memory", __func__);
- return;
- }
-
- a->c = c;
- a->btree_id = b->c.btree_id;
- a->level = b->c.level;
- a->pos = b->key.k.p;
- a->seq = b->data->keys.seq;
- INIT_WORK(&a->work, async_btree_node_rewrite_work);
-
- if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
- mutex_lock(&c->pending_node_rewrites_lock);
- list_add(&a->list, &c->pending_node_rewrites);
- mutex_unlock(&c->pending_node_rewrites_lock);
- return;
- }
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
- if (test_bit(BCH_FS_started, &c->flags)) {
- bch_err(c, "%s: error getting c->writes ref", __func__);
- kfree(a);
- return;
- }
-
- ret = bch2_fs_read_write_early(c);
- bch_err_msg(c, ret, "going read-write");
- if (ret) {
- kfree(a);
- return;
- }
-
- bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
- }
-
- queue_work(c->btree_node_rewrite_worker, &a->work);
-}
-
-void bch2_do_pending_node_rewrites(struct bch_fs *c)
-{
- struct async_btree_rewrite *a, *n;
-
- mutex_lock(&c->pending_node_rewrites_lock);
- list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
- list_del(&a->list);
-
- bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
- queue_work(c->btree_node_rewrite_worker, &a->work);
- }
- mutex_unlock(&c->pending_node_rewrites_lock);
-}
-
-void bch2_free_pending_node_rewrites(struct bch_fs *c)
-{
- struct async_btree_rewrite *a, *n;
-
- mutex_lock(&c->pending_node_rewrites_lock);
- list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
- list_del(&a->list);
-
- kfree(a);
- }
- mutex_unlock(&c->pending_node_rewrites_lock);
-}
-
-static int __bch2_btree_node_update_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct btree *b, struct btree *new_hash,
- struct bkey_i *new_key,
- unsigned commit_flags,
- bool skip_triggers)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter2 = { NULL };
- struct btree *parent;
- int ret;
-
- if (!skip_triggers) {
- ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1,
- bkey_i_to_s_c(&b->key),
- BTREE_TRIGGER_transactional) ?:
- bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1,
- bkey_i_to_s(new_key),
- BTREE_TRIGGER_transactional);
- if (ret)
- return ret;
- }
-
- if (new_hash) {
- bkey_copy(&new_hash->key, new_key);
- ret = bch2_btree_node_hash_insert(&c->btree_cache,
- new_hash, b->c.level, b->c.btree_id);
- BUG_ON(ret);
- }
-
- parent = btree_node_parent(btree_iter_path(trans, iter), b);
- if (parent) {
- bch2_trans_copy_iter(&iter2, iter);
-
- iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
- iter2.flags & BTREE_ITER_intent,
- _THIS_IP_);
-
- struct btree_path *path2 = btree_iter_path(trans, &iter2);
- BUG_ON(path2->level != b->c.level);
- BUG_ON(!bpos_eq(path2->pos, new_key->k.p));
-
- btree_path_set_level_up(trans, path2);
-
- trans->paths_sorted = false;
-
- ret = bch2_btree_iter_traverse(&iter2) ?:
- bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
- if (ret)
- goto err;
- } else {
- BUG_ON(btree_node_root(c, b) != b);
-
- struct jset_entry *e = bch2_trans_jset_entry_alloc(trans,
- jset_u64s(new_key->k.u64s));
- ret = PTR_ERR_OR_ZERO(e);
- if (ret)
- return ret;
-
- journal_entry_set(e,
- BCH_JSET_ENTRY_btree_root,
- b->c.btree_id, b->c.level,
- new_key, new_key->k.u64s);
- }
-
- ret = bch2_trans_commit(trans, NULL, NULL, commit_flags);
- if (ret)
- goto err;
-
- bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c);
-
- if (new_hash) {
- mutex_lock(&c->btree_cache.lock);
- bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
-
- __bch2_btree_node_hash_remove(&c->btree_cache, b);
-
- bkey_copy(&b->key, new_key);
- ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
- BUG_ON(ret);
- mutex_unlock(&c->btree_cache.lock);
- } else {
- bkey_copy(&b->key, new_key);
- }
-
- bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b);
-out:
- bch2_trans_iter_exit(trans, &iter2);
- return ret;
-err:
- if (new_hash) {
- mutex_lock(&c->btree_cache.lock);
- bch2_btree_node_hash_remove(&c->btree_cache, b);
- mutex_unlock(&c->btree_cache.lock);
- }
- goto out;
-}
-
-int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
- struct btree *b, struct bkey_i *new_key,
- unsigned commit_flags, bool skip_triggers)
-{
- struct bch_fs *c = trans->c;
- struct btree *new_hash = NULL;
- struct btree_path *path = btree_iter_path(trans, iter);
- struct closure cl;
- int ret = 0;
-
- ret = bch2_btree_path_upgrade(trans, path, b->c.level + 1);
- if (ret)
- return ret;
-
- closure_init_stack(&cl);
-
- /*
- * check btree_ptr_hash_val() after @b is locked by
- * btree_iter_traverse():
- */
- if (btree_ptr_hash_val(new_key) != b->hash_val) {
- ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
- if (ret) {
- ret = drop_locks_do(trans, (closure_sync(&cl), 0));
- if (ret)
- return ret;
- }
-
- new_hash = bch2_btree_node_mem_alloc(trans, false);
- ret = PTR_ERR_OR_ZERO(new_hash);
- if (ret)
- goto err;
- }
-
- path->intent_ref++;
- ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key,
- commit_flags, skip_triggers);
- --path->intent_ref;
-
- if (new_hash)
- bch2_btree_node_to_freelist(c, new_hash);
-err:
- closure_sync(&cl);
- bch2_btree_cache_cannibalize_unlock(trans);
- return ret;
-}
-
-int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
- struct btree *b, struct bkey_i *new_key,
- unsigned commit_flags, bool skip_triggers)
-{
- struct btree_iter iter;
- int ret;
-
- bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p,
- BTREE_MAX_DEPTH, b->c.level,
- BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter);
- if (ret)
- goto out;
-
- /* has node been freed? */
- if (btree_iter_path(trans, &iter)->l[b->c.level].b != b) {
- /* node has been freed: */
- BUG_ON(!btree_node_dying(b));
- goto out;
- }
-
- BUG_ON(!btree_node_hashed(b));
-
- bch2_bkey_drop_ptrs(bkey_i_to_s(new_key), ptr,
- !bch2_bkey_has_device(bkey_i_to_s(&b->key), ptr->dev));
-
- ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
- commit_flags, skip_triggers);
-out:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-/* Init code: */
-
-/*
- * Only for filesystem bringup, when first reading the btree roots or allocating
- * btree roots when initializing a new filesystem:
- */
-void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
-{
- BUG_ON(btree_node_root(c, b));
-
- bch2_btree_set_root_inmem(c, b);
-}
-
-int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id, unsigned level)
-{
- struct bch_fs *c = trans->c;
- struct closure cl;
- struct btree *b;
- int ret;
-
- closure_init_stack(&cl);
-
- do {
- ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
- closure_sync(&cl);
- } while (ret);
-
- b = bch2_btree_node_mem_alloc(trans, false);
- bch2_btree_cache_cannibalize_unlock(trans);
-
- ret = PTR_ERR_OR_ZERO(b);
- if (ret)
- return ret;
-
- set_btree_node_fake(b);
- set_btree_node_need_rewrite(b);
- b->c.level = level;
- b->c.btree_id = id;
-
- bkey_btree_ptr_init(&b->key);
- b->key.k.p = SPOS_MAX;
- *((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id;
-
- bch2_bset_init_first(b, &b->data->keys);
- bch2_btree_build_aux_trees(b);
-
- b->data->flags = 0;
- btree_set_min(b, POS_MIN);
- btree_set_max(b, SPOS_MAX);
- b->data->format = bch2_btree_calc_format(b);
- btree_node_set_format(b, b->data->format);
-
- ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
- b->c.level, b->c.btree_id);
- BUG_ON(ret);
-
- bch2_btree_set_root_inmem(c, b);
-
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- return 0;
-}
-
-void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level)
-{
- bch2_trans_run(c, lockrestart_do(trans, bch2_btree_root_alloc_fake_trans(trans, id, level)));
-}
-
-static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
-{
- prt_printf(out, "%ps: ", (void *) as->ip_started);
- bch2_trans_commit_flags_to_text(out, as->flags);
-
- prt_printf(out, " btree=%s l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
- bch2_btree_id_str(as->btree_id),
- as->update_level_start,
- as->update_level_end,
- bch2_btree_update_modes[as->mode],
- as->nodes_written,
- closure_nr_remaining(&as->cl),
- as->journal.seq);
-}
-
-void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct btree_update *as;
-
- mutex_lock(&c->btree_interior_update_lock);
- list_for_each_entry(as, &c->btree_interior_update_list, list)
- bch2_btree_update_to_text(out, as);
- mutex_unlock(&c->btree_interior_update_lock);
-}
-
-static bool bch2_btree_interior_updates_pending(struct bch_fs *c)
-{
- bool ret;
-
- mutex_lock(&c->btree_interior_update_lock);
- ret = !list_empty(&c->btree_interior_update_list);
- mutex_unlock(&c->btree_interior_update_lock);
-
- return ret;
-}
-
-bool bch2_btree_interior_updates_flush(struct bch_fs *c)
-{
- bool ret = bch2_btree_interior_updates_pending(c);
-
- if (ret)
- closure_wait_event(&c->btree_interior_update_wait,
- !bch2_btree_interior_updates_pending(c));
- return ret;
-}
-
-void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry)
-{
- struct btree_root *r = bch2_btree_id_root(c, entry->btree_id);
-
- mutex_lock(&c->btree_root_lock);
-
- r->level = entry->level;
- r->alive = true;
- bkey_copy(&r->key, (struct bkey_i *) entry->start);
-
- mutex_unlock(&c->btree_root_lock);
-}
-
-struct jset_entry *
-bch2_btree_roots_to_journal_entries(struct bch_fs *c,
- struct jset_entry *end,
- unsigned long skip)
-{
- unsigned i;
-
- mutex_lock(&c->btree_root_lock);
-
- for (i = 0; i < btree_id_nr_alive(c); i++) {
- struct btree_root *r = bch2_btree_id_root(c, i);
-
- if (r->alive && !test_bit(i, &skip)) {
- journal_entry_set(end, BCH_JSET_ENTRY_btree_root,
- i, r->level, &r->key, r->key.k.u64s);
- end = vstruct_next(end);
- }
- }
-
- mutex_unlock(&c->btree_root_lock);
-
- return end;
-}
-
-static void bch2_btree_alloc_to_text(struct printbuf *out,
- struct bch_fs *c,
- struct btree_alloc *a)
-{
- printbuf_indent_add(out, 2);
- bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&a->k));
- prt_newline(out);
-
- struct open_bucket *ob;
- unsigned i;
- open_bucket_for_each(c, &a->ob, ob, i)
- bch2_open_bucket_to_text(out, c, ob);
-
- printbuf_indent_sub(out, 2);
-}
-
-void bch2_btree_reserve_cache_to_text(struct printbuf *out, struct bch_fs *c)
-{
- for (unsigned i = 0; i < c->btree_reserve_cache_nr; i++)
- bch2_btree_alloc_to_text(out, c, &c->btree_reserve_cache[i]);
-}
-
-void bch2_fs_btree_interior_update_exit(struct bch_fs *c)
-{
- if (c->btree_node_rewrite_worker)
- destroy_workqueue(c->btree_node_rewrite_worker);
- if (c->btree_interior_update_worker)
- destroy_workqueue(c->btree_interior_update_worker);
- mempool_exit(&c->btree_interior_update_pool);
-}
-
-void bch2_fs_btree_interior_update_init_early(struct bch_fs *c)
-{
- mutex_init(&c->btree_reserve_cache_lock);
- INIT_LIST_HEAD(&c->btree_interior_update_list);
- INIT_LIST_HEAD(&c->btree_interior_updates_unwritten);
- mutex_init(&c->btree_interior_update_lock);
- INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
-
- INIT_LIST_HEAD(&c->pending_node_rewrites);
- mutex_init(&c->pending_node_rewrites_lock);
-}
-
-int bch2_fs_btree_interior_update_init(struct bch_fs *c)
-{
- c->btree_interior_update_worker =
- alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 8);
- if (!c->btree_interior_update_worker)
- return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
-
- c->btree_node_rewrite_worker =
- alloc_ordered_workqueue("btree_node_rewrite", WQ_UNBOUND);
- if (!c->btree_node_rewrite_worker)
- return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
-
- if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
- sizeof(struct btree_update)))
- return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;
-
- return 0;
-}
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
deleted file mode 100644
index 10f400957f21..000000000000
--- a/fs/bcachefs/btree_update_interior.h
+++ /dev/null
@@ -1,346 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
-#define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
-
-#include "btree_cache.h"
-#include "btree_locking.h"
-#include "btree_update.h"
-
-#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
-
-#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
-
-int bch2_btree_node_check_topology(struct btree_trans *, struct btree *);
-
-#define BTREE_UPDATE_MODES() \
- x(none) \
- x(node) \
- x(root) \
- x(update)
-
-enum btree_update_mode {
-#define x(n) BTREE_UPDATE_##n,
- BTREE_UPDATE_MODES()
-#undef x
-};
-
-/*
- * Tracks an in progress split/rewrite of a btree node and the update to the
- * parent node:
- *
- * When we split/rewrite a node, we do all the updates in memory without
- * waiting for any writes to complete - we allocate the new node(s) and update
- * the parent node, possibly recursively up to the root.
- *
- * The end result is that we have one or more new nodes being written -
- * possibly several, if there were multiple splits - and then a write (updating
- * an interior node) which will make all these new nodes visible.
- *
- * Additionally, as we split/rewrite nodes we free the old nodes - but the old
- * nodes can't be freed (their space on disk can't be reclaimed) until the
- * update to the interior node that makes the new node visible completes -
- * until then, the old nodes are still reachable on disk.
- *
- */
-struct btree_update {
- struct closure cl;
- struct bch_fs *c;
- u64 start_time;
- unsigned long ip_started;
-
- struct list_head list;
- struct list_head unwritten_list;
-
- enum btree_update_mode mode;
- enum bch_trans_commit_flags flags;
- unsigned nodes_written:1;
- unsigned took_gc_lock:1;
-
- enum btree_id btree_id;
- unsigned update_level_start;
- unsigned update_level_end;
-
- struct disk_reservation disk_res;
-
- /*
- * BTREE_UPDATE_node:
- * The update that made the new nodes visible was a regular update to an
- * existing interior node - @b. We can't write out the update to @b
- * until the new nodes we created are finished writing, so we block @b
- * from writing by putting this btree_interior update on the
- * @b->write_blocked list with @write_blocked_list:
- */
- struct btree *b;
- struct list_head write_blocked_list;
-
- /*
- * We may be freeing nodes that were dirty, and thus had journal entries
- * pinned: we need to transfer the oldest of those pins to the
- * btree_update operation, and release it when the new node(s)
- * are all persistent and reachable:
- */
- struct journal_entry_pin journal;
-
- /* Preallocated nodes we reserve when we start the update: */
- struct prealloc_nodes {
- struct btree *b[BTREE_UPDATE_NODES_MAX];
- unsigned nr;
- } prealloc_nodes[2];
-
- /* Nodes being freed: */
- struct keylist old_keys;
- u64 _old_keys[BTREE_UPDATE_NODES_MAX *
- BKEY_BTREE_PTR_U64s_MAX];
-
- /* Nodes being added: */
- struct keylist new_keys;
- u64 _new_keys[BTREE_UPDATE_NODES_MAX *
- BKEY_BTREE_PTR_U64s_MAX];
-
- /* New nodes, that will be made reachable by this update: */
- struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
- unsigned nr_new_nodes;
-
- struct btree *old_nodes[BTREE_UPDATE_NODES_MAX];
- __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX];
- unsigned nr_old_nodes;
-
- open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX *
- BCH_REPLICAS_MAX];
- open_bucket_idx_t nr_open_buckets;
-
- unsigned journal_u64s;
- u64 journal_entries[BTREE_UPDATE_JOURNAL_RES];
-
- /* Only here to reduce stack usage on recursive splits: */
- struct keylist parent_keys;
- /*
- * Enough room for btree_split's keys without realloc - btree node
- * pointers never have crc/compression info, so we only need to acount
- * for the pointers for three keys
- */
- u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
-};
-
-struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
- struct btree_trans *,
- struct btree *,
- struct bkey_format);
-
-int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned);
-
-int bch2_btree_increase_depth(struct btree_trans *, btree_path_idx_t, unsigned);
-
-int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t,
- unsigned, unsigned, enum btree_node_sibling);
-
-static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
- btree_path_idx_t path_idx,
- unsigned level, unsigned flags,
- enum btree_node_sibling sib)
-{
- struct btree_path *path = trans->paths + path_idx;
- struct btree *b;
-
- EBUG_ON(!btree_node_locked(path, level));
-
- if (bch2_btree_node_merging_disabled)
- return 0;
-
- b = path->l[level].b;
- if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
- return 0;
-
- return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib);
-}
-
-static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
- btree_path_idx_t path,
- unsigned level,
- unsigned flags)
-{
- bch2_trans_verify_not_unlocked(trans);
-
- return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
- btree_prev_sib) ?:
- bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
- btree_next_sib);
-}
-
-int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
- struct btree *, unsigned);
-void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
-int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
- struct btree *, struct bkey_i *,
- unsigned, bool);
-int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
- struct bkey_i *, unsigned, bool);
-
-void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
-
-int bch2_btree_root_alloc_fake_trans(struct btree_trans *, enum btree_id, unsigned);
-void bch2_btree_root_alloc_fake(struct bch_fs *, enum btree_id, unsigned);
-
-static inline unsigned btree_update_reserve_required(struct bch_fs *c,
- struct btree *b)
-{
- unsigned depth = btree_node_root(c, b)->c.level + 1;
-
- /*
- * Number of nodes we might have to allocate in a worst case btree
- * split operation - we split all the way up to the root, then allocate
- * a new root, unless we're already at max depth:
- */
- if (depth < BTREE_MAX_DEPTH)
- return (depth - b->c.level) * 2 + 1;
- else
- return (depth - b->c.level) * 2 - 1;
-}
-
-static inline void btree_node_reset_sib_u64s(struct btree *b)
-{
- b->sib_u64s[0] = b->nr.live_u64s;
- b->sib_u64s[1] = b->nr.live_u64s;
-}
-
-static inline void *btree_data_end(struct btree *b)
-{
- return (void *) b->data + btree_buf_bytes(b);
-}
-
-static inline struct bkey_packed *unwritten_whiteouts_start(struct btree *b)
-{
- return (void *) ((u64 *) btree_data_end(b) - b->whiteout_u64s);
-}
-
-static inline struct bkey_packed *unwritten_whiteouts_end(struct btree *b)
-{
- return btree_data_end(b);
-}
-
-static inline void *write_block(struct btree *b)
-{
- return (void *) b->data + (b->written << 9);
-}
-
-static inline bool __btree_addr_written(struct btree *b, void *p)
-{
- return p < write_block(b);
-}
-
-static inline bool bset_written(struct btree *b, struct bset *i)
-{
- return __btree_addr_written(b, i);
-}
-
-static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
-{
- return __btree_addr_written(b, k);
-}
-
-static inline ssize_t __bch2_btree_u64s_remaining(struct btree *b, void *end)
-{
- ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
- b->whiteout_u64s;
- ssize_t total = btree_buf_bytes(b) >> 3;
-
- /* Always leave one extra u64 for bch2_varint_decode: */
- used++;
-
- return total - used;
-}
-
-static inline size_t bch2_btree_keys_u64s_remaining(struct btree *b)
-{
- ssize_t remaining = __bch2_btree_u64s_remaining(b,
- btree_bkey_last(b, bset_tree_last(b)));
-
- BUG_ON(remaining < 0);
-
- if (bset_written(b, btree_bset_last(b)))
- return 0;
-
- return remaining;
-}
-
-#define BTREE_WRITE_SET_U64s_BITS 9
-
-static inline unsigned btree_write_set_buffer(struct btree *b)
-{
- /*
- * Could buffer up larger amounts of keys for btrees with larger keys,
- * pending benchmarking:
- */
- return 8 << BTREE_WRITE_SET_U64s_BITS;
-}
-
-static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree *b)
-{
- struct bset_tree *t = bset_tree_last(b);
- struct btree_node_entry *bne = max(write_block(b),
- (void *) btree_bkey_last(b, bset_tree_last(b)));
- ssize_t remaining_space =
- __bch2_btree_u64s_remaining(b, bne->keys.start);
-
- if (unlikely(bset_written(b, bset(b, t)))) {
- if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
- return bne;
- } else {
- if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
- remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
- return bne;
- }
-
- return NULL;
-}
-
-static inline void push_whiteout(struct btree *b, struct bpos pos)
-{
- struct bkey_packed k;
-
- BUG_ON(bch2_btree_keys_u64s_remaining(b) < BKEY_U64s);
- EBUG_ON(btree_node_just_written(b));
-
- if (!bkey_pack_pos(&k, pos, b)) {
- struct bkey *u = (void *) &k;
-
- bkey_init(u);
- u->p = pos;
- }
-
- k.needs_whiteout = true;
-
- b->whiteout_u64s += k.u64s;
- bkey_p_copy(unwritten_whiteouts_start(b), &k);
-}
-
-/*
- * write lock must be held on @b (else the dirty bset that we were going to
- * insert into could be written out from under us)
- */
-static inline bool bch2_btree_node_insert_fits(struct btree *b, unsigned u64s)
-{
- if (unlikely(btree_node_need_rewrite(b)))
- return false;
-
- return u64s <= bch2_btree_keys_u64s_remaining(b);
-}
-
-void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
-
-bool bch2_btree_interior_updates_flush(struct bch_fs *);
-
-void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
-struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
- struct jset_entry *, unsigned long);
-
-void bch2_do_pending_node_rewrites(struct bch_fs *);
-void bch2_free_pending_node_rewrites(struct bch_fs *);
-
-void bch2_btree_reserve_cache_to_text(struct printbuf *, struct bch_fs *);
-
-void bch2_fs_btree_interior_update_exit(struct bch_fs *);
-void bch2_fs_btree_interior_update_init_early(struct bch_fs *);
-int bch2_fs_btree_interior_update_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
deleted file mode 100644
index 1639c60dffa0..000000000000
--- a/fs/bcachefs/btree_write_buffer.c
+++ /dev/null
@@ -1,847 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "btree_locking.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "btree_write_buffer.h"
-#include "disk_accounting.h"
-#include "error.h"
-#include "extents.h"
-#include "journal.h"
-#include "journal_io.h"
-#include "journal_reclaim.h"
-
-#include <linux/prefetch.h>
-#include <linux/sort.h>
-
-static int bch2_btree_write_buffer_journal_flush(struct journal *,
- struct journal_entry_pin *, u64);
-
-static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *);
-
-static inline bool __wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
-{
- return (cmp_int(l->hi, r->hi) ?:
- cmp_int(l->mi, r->mi) ?:
- cmp_int(l->lo, r->lo)) >= 0;
-}
-
-static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
-{
-#ifdef CONFIG_X86_64
- int cmp;
-
- asm("mov (%[l]), %%rax;"
- "sub (%[r]), %%rax;"
- "mov 8(%[l]), %%rax;"
- "sbb 8(%[r]), %%rax;"
- "mov 16(%[l]), %%rax;"
- "sbb 16(%[r]), %%rax;"
- : "=@ccae" (cmp)
- : [l] "r" (l), [r] "r" (r)
- : "rax", "cc");
-
- EBUG_ON(cmp != __wb_key_ref_cmp(l, r));
- return cmp;
-#else
- return __wb_key_ref_cmp(l, r);
-#endif
-}
-
-static int wb_key_seq_cmp(const void *_l, const void *_r)
-{
- const struct btree_write_buffered_key *l = _l;
- const struct btree_write_buffered_key *r = _r;
-
- return cmp_int(l->journal_seq, r->journal_seq);
-}
-
-/* Compare excluding idx, the low 24 bits: */
-static inline bool wb_key_eq(const void *_l, const void *_r)
-{
- const struct wb_key_ref *l = _l;
- const struct wb_key_ref *r = _r;
-
- return !((l->hi ^ r->hi)|
- (l->mi ^ r->mi)|
- ((l->lo >> 24) ^ (r->lo >> 24)));
-}
-
-static noinline void wb_sort(struct wb_key_ref *base, size_t num)
-{
- size_t n = num, a = num / 2;
-
- if (!a) /* num < 2 || size == 0 */
- return;
-
- for (;;) {
- size_t b, c, d;
-
- if (a) /* Building heap: sift down --a */
- --a;
- else if (--n) /* Sorting: Extract root to --n */
- swap(base[0], base[n]);
- else /* Sort complete */
- break;
-
- /*
- * Sift element at "a" down into heap. This is the
- * "bottom-up" variant, which significantly reduces
- * calls to cmp_func(): we find the sift-down path all
- * the way to the leaves (one compare per level), then
- * backtrack to find where to insert the target element.
- *
- * Because elements tend to sift down close to the leaves,
- * this uses fewer compares than doing two per level
- * on the way down. (A bit more than half as many on
- * average, 3/4 worst-case.)
- */
- for (b = a; c = 2*b + 1, (d = c + 1) < n;)
- b = wb_key_ref_cmp(base + c, base + d) ? c : d;
- if (d == n) /* Special case last leaf with no sibling */
- b = c;
-
- /* Now backtrack from "b" to the correct location for "a" */
- while (b != a && wb_key_ref_cmp(base + a, base + b))
- b = (b - 1) / 2;
- c = b; /* Where "a" belongs */
- while (b != a) { /* Shift it into place */
- b = (b - 1) / 2;
- swap(base[b], base[c]);
- }
- }
-}
-
-static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
- struct btree_iter *iter,
- struct btree_write_buffered_key *wb)
-{
- struct btree_path *path = btree_iter_path(trans, iter);
-
- bch2_btree_node_unlock_write(trans, path, path->l[0].b);
-
- trans->journal_res.seq = wb->journal_seq;
-
- return bch2_trans_update(trans, iter, &wb->k,
- BTREE_UPDATE_internal_snapshot_node) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_no_journal_res|
- BCH_TRANS_COMMIT_journal_reclaim);
-}
-
-static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *iter,
- struct btree_write_buffered_key *wb,
- bool *write_locked,
- bool *accounting_accumulated,
- size_t *fast)
-{
- struct btree_path *path;
- int ret;
-
- EBUG_ON(!wb->journal_seq);
- EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
- EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
-
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return ret;
-
- if (!*accounting_accumulated && wb->k.k.type == KEY_TYPE_accounting) {
- struct bkey u;
- struct bkey_s_c k = bch2_btree_path_peek_slot_exact(btree_iter_path(trans, iter), &u);
-
- if (k.k->type == KEY_TYPE_accounting)
- bch2_accounting_accumulate(bkey_i_to_accounting(&wb->k),
- bkey_s_c_to_accounting(k));
- }
- *accounting_accumulated = true;
-
- /*
- * We can't clone a path that has write locks: unshare it now, before
- * set_pos and traverse():
- */
- if (btree_iter_path(trans, iter)->ref > 1)
- iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
-
- path = btree_iter_path(trans, iter);
-
- if (!*write_locked) {
- ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
- if (ret)
- return ret;
-
- bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
- *write_locked = true;
- }
-
- if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) {
- *write_locked = false;
- return wb_flush_one_slowpath(trans, iter, wb);
- }
-
- bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
- (*fast)++;
- return 0;
-}
-
-/*
- * Update a btree with a write buffered key using the journal seq of the
- * original write buffer insert.
- *
- * It is not safe to rejournal the key once it has been inserted into the write
- * buffer because that may break recovery ordering. For example, the key may
- * have already been modified in the active write buffer in a seq that comes
- * before the current transaction. If we were to journal this key again and
- * crash, recovery would process updates in the wrong order.
- */
-static int
-btree_write_buffered_insert(struct btree_trans *trans,
- struct btree_write_buffered_key *wb)
-{
- struct btree_iter iter;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
- BTREE_ITER_cached|BTREE_ITER_intent);
-
- trans->journal_res.seq = wb->journal_seq;
-
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_trans_update(trans, &iter, &wb->k,
- BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb)
-{
- struct bch_fs *c = container_of(wb, struct bch_fs, btree_write_buffer);
- struct journal *j = &c->journal;
-
- if (!wb->inc.keys.nr)
- return;
-
- bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin,
- bch2_btree_write_buffer_journal_flush);
-
- darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr));
- darray_resize(&wb->sorted, wb->flushing.keys.size);
-
- if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) {
- swap(wb->flushing.keys, wb->inc.keys);
- goto out;
- }
-
- size_t nr = min(darray_room(wb->flushing.keys),
- wb->sorted.size - wb->flushing.keys.nr);
- nr = min(nr, wb->inc.keys.nr);
-
- memcpy(&darray_top(wb->flushing.keys),
- wb->inc.keys.data,
- sizeof(wb->inc.keys.data[0]) * nr);
-
- memmove(wb->inc.keys.data,
- wb->inc.keys.data + nr,
- sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr));
-
- wb->flushing.keys.nr += nr;
- wb->inc.keys.nr -= nr;
-out:
- if (!wb->inc.keys.nr)
- bch2_journal_pin_drop(j, &wb->inc.pin);
- else
- bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin,
- bch2_btree_write_buffer_journal_flush);
-
- if (j->watermark) {
- spin_lock(&j->lock);
- bch2_journal_set_watermark(j);
- spin_unlock(&j->lock);
- }
-
- BUG_ON(wb->sorted.size < wb->flushing.keys.nr);
-}
-
-static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
- struct journal *j = &c->journal;
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- struct btree_iter iter = { NULL };
- size_t overwritten = 0, fast = 0, slowpath = 0, could_not_insert = 0;
- bool write_locked = false;
- bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags);
- int ret = 0;
-
- ret = bch2_journal_error(&c->journal);
- if (ret)
- return ret;
-
- bch2_trans_unlock(trans);
- bch2_trans_begin(trans);
-
- mutex_lock(&wb->inc.lock);
- move_keys_from_inc_to_flushing(wb);
- mutex_unlock(&wb->inc.lock);
-
- for (size_t i = 0; i < wb->flushing.keys.nr; i++) {
- wb->sorted.data[i].idx = i;
- wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree;
- memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos));
- }
- wb->sorted.nr = wb->flushing.keys.nr;
-
- /*
- * We first sort so that we can detect and skip redundant updates, and
- * then we attempt to flush in sorted btree order, as this is most
- * efficient.
- *
- * However, since we're not flushing in the order they appear in the
- * journal we won't be able to drop our journal pin until everything is
- * flushed - which means this could deadlock the journal if we weren't
- * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail
- * if it would block taking a journal reservation.
- *
- * If that happens, simply skip the key so we can optimistically insert
- * as many keys as possible in the fast path.
- */
- wb_sort(wb->sorted.data, wb->sorted.nr);
-
- darray_for_each(wb->sorted, i) {
- struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx];
-
- for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++)
- prefetch(&wb->flushing.keys.data[n->idx]);
-
- BUG_ON(!k->journal_seq);
-
- if (!accounting_replay_done &&
- k->k.k.type == KEY_TYPE_accounting) {
- slowpath++;
- continue;
- }
-
- if (i + 1 < &darray_top(wb->sorted) &&
- wb_key_eq(i, i + 1)) {
- struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx];
-
- if (k->k.k.type == KEY_TYPE_accounting &&
- n->k.k.type == KEY_TYPE_accounting)
- bch2_accounting_accumulate(bkey_i_to_accounting(&n->k),
- bkey_i_to_s_c_accounting(&k->k));
-
- overwritten++;
- n->journal_seq = min_t(u64, n->journal_seq, k->journal_seq);
- k->journal_seq = 0;
- continue;
- }
-
- if (write_locked) {
- struct btree_path *path = btree_iter_path(trans, &iter);
-
- if (path->btree_id != i->btree ||
- bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
- bch2_btree_node_unlock_write(trans, path, path->l[0].b);
- write_locked = false;
-
- ret = lockrestart_do(trans,
- bch2_btree_iter_traverse(&iter) ?:
- bch2_foreground_maybe_merge(trans, iter.path, 0,
- BCH_WATERMARK_reclaim|
- BCH_TRANS_COMMIT_journal_reclaim|
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_no_enospc));
- if (ret)
- goto err;
- }
- }
-
- if (!iter.path || iter.btree_id != k->btree) {
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
- BTREE_ITER_intent|BTREE_ITER_all_snapshots);
- }
-
- bch2_btree_iter_set_pos(&iter, k->k.k.p);
- btree_iter_path(trans, &iter)->preserve = false;
-
- bool accounting_accumulated = false;
- do {
- if (race_fault()) {
- ret = -BCH_ERR_journal_reclaim_would_deadlock;
- break;
- }
-
- ret = wb_flush_one(trans, &iter, k, &write_locked,
- &accounting_accumulated, &fast);
- if (!write_locked)
- bch2_trans_begin(trans);
- } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
-
- if (!ret) {
- k->journal_seq = 0;
- } else if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
- slowpath++;
- ret = 0;
- } else
- break;
- }
-
- if (write_locked) {
- struct btree_path *path = btree_iter_path(trans, &iter);
- bch2_btree_node_unlock_write(trans, path, path->l[0].b);
- }
- bch2_trans_iter_exit(trans, &iter);
-
- if (ret)
- goto err;
-
- if (slowpath) {
- /*
- * Flush in the order they were present in the journal, so that
- * we can release journal pins:
- * The fastpath zapped the seq of keys that were successfully flushed so
- * we can skip those here.
- */
- trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
-
- sort(wb->flushing.keys.data,
- wb->flushing.keys.nr,
- sizeof(wb->flushing.keys.data[0]),
- wb_key_seq_cmp, NULL);
-
- darray_for_each(wb->flushing.keys, i) {
- if (!i->journal_seq)
- continue;
-
- if (!accounting_replay_done &&
- i->k.k.type == KEY_TYPE_accounting) {
- could_not_insert++;
- continue;
- }
-
- if (!could_not_insert)
- bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin,
- bch2_btree_write_buffer_journal_flush);
-
- bch2_trans_begin(trans);
-
- ret = commit_do(trans, NULL, NULL,
- BCH_WATERMARK_reclaim|
- BCH_TRANS_COMMIT_journal_reclaim|
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_no_journal_res ,
- btree_write_buffered_insert(trans, i));
- if (ret)
- goto err;
-
- i->journal_seq = 0;
- }
-
- /*
- * If journal replay hasn't finished with accounting keys we
- * can't flush accounting keys at all - condense them and leave
- * them for next time.
- *
- * Q: Can the write buffer overflow?
- * A Shouldn't be any actual risk. It's just new accounting
- * updates that the write buffer can't flush, and those are only
- * going to be generated by interior btree node updates as
- * journal replay has to split/rewrite nodes to make room for
- * its updates.
- *
- * And for those new acounting updates, updates to the same
- * counters get accumulated as they're flushed from the journal
- * to the write buffer - see the patch for eytzingcer tree
- * accumulated. So we could only overflow if the number of
- * distinct counters touched somehow was very large.
- */
- if (could_not_insert) {
- struct btree_write_buffered_key *dst = wb->flushing.keys.data;
-
- darray_for_each(wb->flushing.keys, i)
- if (i->journal_seq)
- *dst++ = *i;
- wb->flushing.keys.nr = dst - wb->flushing.keys.data;
- }
- }
-err:
- if (ret || !could_not_insert) {
- bch2_journal_pin_drop(j, &wb->flushing.pin);
- wb->flushing.keys.nr = 0;
- }
-
- bch2_fs_fatal_err_on(ret, c, "%s", bch2_err_str(ret));
- trace_write_buffer_flush(trans, wb->flushing.keys.nr, overwritten, fast, 0);
- return ret;
-}
-
-static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq)
-{
- struct journal *j = &c->journal;
- struct journal_buf *buf;
- int ret = 0;
-
- while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, seq))) {
- ret = bch2_journal_keys_to_write_buffer(c, buf);
- mutex_unlock(&j->buf_lock);
- }
-
- return ret;
-}
-
-static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq,
- bool *did_work)
-{
- struct bch_fs *c = trans->c;
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- int ret = 0, fetch_from_journal_err;
-
- do {
- bch2_trans_unlock(trans);
-
- fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq);
-
- *did_work |= wb->inc.keys.nr || wb->flushing.keys.nr;
-
- /*
- * On memory allocation failure, bch2_btree_write_buffer_flush_locked()
- * is not guaranteed to empty wb->inc:
- */
- mutex_lock(&wb->flushing.lock);
- ret = bch2_btree_write_buffer_flush_locked(trans);
- mutex_unlock(&wb->flushing.lock);
- } while (!ret &&
- (fetch_from_journal_err ||
- (wb->inc.pin.seq && wb->inc.pin.seq <= seq) ||
- (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq)));
-
- return ret;
-}
-
-static int bch2_btree_write_buffer_journal_flush(struct journal *j,
- struct journal_entry_pin *_pin, u64 seq)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- bool did_work = false;
-
- return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq, &did_work));
-}
-
-int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
- bool did_work = false;
-
- trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
-
- return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal), &did_work);
-}
-
-/*
- * The write buffer requires flushing when going RO: keys in the journal for the
- * write buffer don't have a journal pin yet
- */
-bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *c)
-{
- if (bch2_journal_error(&c->journal))
- return false;
-
- bool did_work = false;
- bch2_trans_run(c, btree_write_buffer_flush_seq(trans,
- journal_cur_seq(&c->journal), &did_work));
- return did_work;
-}
-
-int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- int ret = 0;
-
- if (mutex_trylock(&wb->flushing.lock)) {
- ret = bch2_btree_write_buffer_flush_locked(trans);
- mutex_unlock(&wb->flushing.lock);
- }
-
- return ret;
-}
-
-int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
- return -BCH_ERR_erofs_no_writes;
-
- int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
- bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
- return ret;
-}
-
-/*
- * In check and repair code, when checking references to write buffer btrees we
- * need to issue a flush before we have a definitive error: this issues a flush
- * if this is a key we haven't yet checked.
- */
-int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans,
- struct bkey_s_c referring_k,
- struct bkey_buf *last_flushed)
-{
- struct bch_fs *c = trans->c;
- struct bkey_buf tmp;
- int ret = 0;
-
- bch2_bkey_buf_init(&tmp);
-
- if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) {
- bch2_bkey_buf_reassemble(&tmp, c, referring_k);
-
- if (bkey_is_btree_ptr(referring_k.k)) {
- bch2_trans_unlock(trans);
- bch2_btree_interior_updates_flush(c);
- }
-
- ret = bch2_btree_write_buffer_flush_sync(trans);
- if (ret)
- goto err;
-
- bch2_bkey_buf_copy(last_flushed, c, tmp.k);
- ret = -BCH_ERR_transaction_restart_write_buffer_flush;
- }
-err:
- bch2_bkey_buf_exit(&tmp, c);
- return ret;
-}
-
-static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
-{
- struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- int ret;
-
- mutex_lock(&wb->flushing.lock);
- do {
- ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
- } while (!ret && bch2_btree_write_buffer_should_flush(c));
- mutex_unlock(&wb->flushing.lock);
-
- bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
-}
-
-static void wb_accounting_sort(struct btree_write_buffer *wb)
-{
- eytzinger0_sort(wb->accounting.data, wb->accounting.nr,
- sizeof(wb->accounting.data[0]),
- wb_key_cmp, NULL);
-}
-
-int bch2_accounting_key_to_wb_slowpath(struct bch_fs *c, enum btree_id btree,
- struct bkey_i_accounting *k)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- struct btree_write_buffered_key new = { .btree = btree };
-
- bkey_copy(&new.k, &k->k_i);
-
- int ret = darray_push(&wb->accounting, new);
- if (ret)
- return ret;
-
- wb_accounting_sort(wb);
- return 0;
-}
-
-int bch2_journal_key_to_wb_slowpath(struct bch_fs *c,
- struct journal_keys_to_wb *dst,
- enum btree_id btree, struct bkey_i *k)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- int ret;
-retry:
- ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL);
- if (!ret && dst->wb == &wb->flushing)
- ret = darray_resize(&wb->sorted, wb->flushing.keys.size);
-
- if (unlikely(ret)) {
- if (dst->wb == &c->btree_write_buffer.flushing) {
- mutex_unlock(&dst->wb->lock);
- dst->wb = &c->btree_write_buffer.inc;
- bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin,
- bch2_btree_write_buffer_journal_flush);
- goto retry;
- }
-
- return ret;
- }
-
- dst->room = darray_room(dst->wb->keys);
- if (dst->wb == &wb->flushing)
- dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
- BUG_ON(!dst->room);
- BUG_ON(!dst->seq);
-
- struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
- wb_k->journal_seq = dst->seq;
- wb_k->btree = btree;
- bkey_copy(&wb_k->k, k);
- dst->wb->keys.nr++;
- dst->room--;
- return 0;
-}
-
-void bch2_journal_keys_to_write_buffer_start(struct bch_fs *c, struct journal_keys_to_wb *dst, u64 seq)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
-
- if (mutex_trylock(&wb->flushing.lock)) {
- mutex_lock(&wb->inc.lock);
- move_keys_from_inc_to_flushing(wb);
-
- /*
- * Attempt to skip wb->inc, and add keys directly to
- * wb->flushing, saving us a copy later:
- */
-
- if (!wb->inc.keys.nr) {
- dst->wb = &wb->flushing;
- } else {
- mutex_unlock(&wb->flushing.lock);
- dst->wb = &wb->inc;
- }
- } else {
- mutex_lock(&wb->inc.lock);
- dst->wb = &wb->inc;
- }
-
- dst->room = darray_room(dst->wb->keys);
- if (dst->wb == &wb->flushing)
- dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
- dst->seq = seq;
-
- bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin,
- bch2_btree_write_buffer_journal_flush);
-
- darray_for_each(wb->accounting, i)
- memset(&i->k.v, 0, bkey_val_bytes(&i->k.k));
-}
-
-int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_to_wb *dst)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- unsigned live_accounting_keys = 0;
- int ret = 0;
-
- darray_for_each(wb->accounting, i)
- if (!bch2_accounting_key_is_zero(bkey_i_to_s_c_accounting(&i->k))) {
- i->journal_seq = dst->seq;
- live_accounting_keys++;
- ret = __bch2_journal_key_to_wb(c, dst, i->btree, &i->k);
- if (ret)
- break;
- }
-
- if (live_accounting_keys * 2 < wb->accounting.nr) {
- struct btree_write_buffered_key *dst = wb->accounting.data;
-
- darray_for_each(wb->accounting, src)
- if (!bch2_accounting_key_is_zero(bkey_i_to_s_c_accounting(&src->k)))
- *dst++ = *src;
- wb->accounting.nr = dst - wb->accounting.data;
- wb_accounting_sort(wb);
- }
-
- if (!dst->wb->keys.nr)
- bch2_journal_pin_drop(&c->journal, &dst->wb->pin);
-
- if (bch2_btree_write_buffer_should_flush(c) &&
- __bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer) &&
- !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
-
- if (dst->wb == &wb->flushing)
- mutex_unlock(&wb->flushing.lock);
- mutex_unlock(&wb->inc.lock);
-
- return ret;
-}
-
-static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf)
-{
- struct journal_keys_to_wb dst;
- int ret = 0;
-
- bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq));
-
- for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) {
- jset_entry_for_each_key(entry, k) {
- ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k);
- if (ret)
- goto out;
- }
-
- entry->type = BCH_JSET_ENTRY_btree_keys;
- }
-
- spin_lock(&c->journal.lock);
- buf->need_flush_to_write_buffer = false;
- spin_unlock(&c->journal.lock);
-out:
- ret = bch2_journal_keys_to_write_buffer_end(c, &dst) ?: ret;
- return ret;
-}
-
-static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size)
-{
- if (wb->keys.size >= new_size)
- return 0;
-
- if (!mutex_trylock(&wb->lock))
- return -EINTR;
-
- int ret = darray_resize(&wb->keys, new_size);
- mutex_unlock(&wb->lock);
- return ret;
-}
-
-int bch2_btree_write_buffer_resize(struct bch_fs *c, size_t new_size)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
-
- return wb_keys_resize(&wb->flushing, new_size) ?:
- wb_keys_resize(&wb->inc, new_size);
-}
-
-void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
-
- BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) &&
- !bch2_journal_error(&c->journal));
-
- darray_exit(&wb->accounting);
- darray_exit(&wb->sorted);
- darray_exit(&wb->flushing.keys);
- darray_exit(&wb->inc.keys);
-}
-
-int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
-
- mutex_init(&wb->inc.lock);
- mutex_init(&wb->flushing.lock);
- INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work);
-
- /* Will be resized by journal as needed: */
- unsigned initial_size = 1 << 16;
-
- return darray_make_room(&wb->inc.keys, initial_size) ?:
- darray_make_room(&wb->flushing.keys, initial_size) ?:
- darray_make_room(&wb->sorted, initial_size);
-}
diff --git a/fs/bcachefs/btree_write_buffer.h b/fs/bcachefs/btree_write_buffer.h
deleted file mode 100644
index d535cea28bde..000000000000
--- a/fs/bcachefs/btree_write_buffer.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_WRITE_BUFFER_H
-#define _BCACHEFS_BTREE_WRITE_BUFFER_H
-
-#include "bkey.h"
-#include "disk_accounting.h"
-
-static inline bool bch2_btree_write_buffer_should_flush(struct bch_fs *c)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
-
- return wb->inc.keys.nr + wb->flushing.keys.nr > wb->inc.keys.size / 4;
-}
-
-static inline bool bch2_btree_write_buffer_must_wait(struct bch_fs *c)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
-
- return wb->inc.keys.nr > wb->inc.keys.size * 3 / 4;
-}
-
-struct btree_trans;
-int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
-bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *);
-int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *);
-int bch2_btree_write_buffer_tryflush(struct btree_trans *);
-
-struct bkey_buf;
-int bch2_btree_write_buffer_maybe_flush(struct btree_trans *, struct bkey_s_c, struct bkey_buf *);
-
-struct journal_keys_to_wb {
- struct btree_write_buffer_keys *wb;
- size_t room;
- u64 seq;
-};
-
-static inline int wb_key_cmp(const void *_l, const void *_r)
-{
- const struct btree_write_buffered_key *l = _l;
- const struct btree_write_buffered_key *r = _r;
-
- return cmp_int(l->btree, r->btree) ?: bpos_cmp(l->k.k.p, r->k.k.p);
-}
-
-int bch2_accounting_key_to_wb_slowpath(struct bch_fs *,
- enum btree_id, struct bkey_i_accounting *);
-
-static inline int bch2_accounting_key_to_wb(struct bch_fs *c,
- enum btree_id btree, struct bkey_i_accounting *k)
-{
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- struct btree_write_buffered_key search;
- search.btree = btree;
- search.k.k.p = k->k.p;
-
- unsigned idx = eytzinger0_find(wb->accounting.data, wb->accounting.nr,
- sizeof(wb->accounting.data[0]),
- wb_key_cmp, &search);
-
- if (idx >= wb->accounting.nr)
- return bch2_accounting_key_to_wb_slowpath(c, btree, k);
-
- struct bkey_i_accounting *dst = bkey_i_to_accounting(&wb->accounting.data[idx].k);
- bch2_accounting_accumulate(dst, accounting_i_to_s_c(k));
- return 0;
-}
-
-int bch2_journal_key_to_wb_slowpath(struct bch_fs *,
- struct journal_keys_to_wb *,
- enum btree_id, struct bkey_i *);
-
-static inline int __bch2_journal_key_to_wb(struct bch_fs *c,
- struct journal_keys_to_wb *dst,
- enum btree_id btree, struct bkey_i *k)
-{
- if (unlikely(!dst->room))
- return bch2_journal_key_to_wb_slowpath(c, dst, btree, k);
-
- struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
- wb_k->journal_seq = dst->seq;
- wb_k->btree = btree;
- bkey_copy(&wb_k->k, k);
- dst->wb->keys.nr++;
- dst->room--;
- return 0;
-}
-
-static inline int bch2_journal_key_to_wb(struct bch_fs *c,
- struct journal_keys_to_wb *dst,
- enum btree_id btree, struct bkey_i *k)
-{
- EBUG_ON(!dst->seq);
-
- return k->k.type == KEY_TYPE_accounting
- ? bch2_accounting_key_to_wb(c, btree, bkey_i_to_accounting(k))
- : __bch2_journal_key_to_wb(c, dst, btree, k);
-}
-
-void bch2_journal_keys_to_write_buffer_start(struct bch_fs *, struct journal_keys_to_wb *, u64);
-int bch2_journal_keys_to_write_buffer_end(struct bch_fs *, struct journal_keys_to_wb *);
-
-int bch2_btree_write_buffer_resize(struct bch_fs *, size_t);
-void bch2_fs_btree_write_buffer_exit(struct bch_fs *);
-int bch2_fs_btree_write_buffer_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_BTREE_WRITE_BUFFER_H */
diff --git a/fs/bcachefs/btree_write_buffer_types.h b/fs/bcachefs/btree_write_buffer_types.h
deleted file mode 100644
index e9e76e20f43b..000000000000
--- a/fs/bcachefs/btree_write_buffer_types.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BTREE_WRITE_BUFFER_TYPES_H
-#define _BCACHEFS_BTREE_WRITE_BUFFER_TYPES_H
-
-#include "darray.h"
-#include "journal_types.h"
-
-#define BTREE_WRITE_BUFERED_VAL_U64s_MAX 4
-#define BTREE_WRITE_BUFERED_U64s_MAX (BKEY_U64s + BTREE_WRITE_BUFERED_VAL_U64s_MAX)
-
-struct wb_key_ref {
-union {
- struct {
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- unsigned idx:24;
- u8 pos[sizeof(struct bpos)];
- enum btree_id btree:8;
-#else
- enum btree_id btree:8;
- u8 pos[sizeof(struct bpos)];
- unsigned idx:24;
-#endif
- } __packed;
- struct {
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- u64 lo;
- u64 mi;
- u64 hi;
-#else
- u64 hi;
- u64 mi;
- u64 lo;
-#endif
- };
-};
-};
-
-struct btree_write_buffered_key {
- enum btree_id btree:8;
- u64 journal_seq:56;
- __BKEY_PADDED(k, BTREE_WRITE_BUFERED_VAL_U64s_MAX);
-};
-
-struct btree_write_buffer_keys {
- DARRAY(struct btree_write_buffered_key) keys;
- struct journal_entry_pin pin;
- struct mutex lock;
-};
-
-struct btree_write_buffer {
- DARRAY(struct wb_key_ref) sorted;
- struct btree_write_buffer_keys inc;
- struct btree_write_buffer_keys flushing;
- struct work_struct flush_work;
-
- DARRAY(struct btree_write_buffered_key) accounting;
-};
-
-#endif /* _BCACHEFS_BTREE_WRITE_BUFFER_TYPES_H */
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
deleted file mode 100644
index ec7d9a59bea9..000000000000
--- a/fs/bcachefs/buckets.c
+++ /dev/null
@@ -1,1327 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Code for manipulating bucket marks for garbage collection.
- *
- * Copyright 2014 Datera, Inc.
- */
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "backpointers.h"
-#include "bset.h"
-#include "btree_gc.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "buckets_waiting_for_journal.h"
-#include "disk_accounting.h"
-#include "ec.h"
-#include "error.h"
-#include "inode.h"
-#include "movinggc.h"
-#include "recovery.h"
-#include "reflink.h"
-#include "replicas.h"
-#include "subvolume.h"
-#include "trace.h"
-
-#include <linux/preempt.h>
-
-void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
-{
- memset(usage, 0, sizeof(*usage));
- acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
-}
-
-static u64 reserve_factor(u64 r)
-{
- return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
-}
-
-static struct bch_fs_usage_short
-__bch2_fs_usage_read_short(struct bch_fs *c)
-{
- struct bch_fs_usage_short ret;
- u64 data, reserved;
-
- ret.capacity = c->capacity -
- percpu_u64_get(&c->usage->hidden);
-
- data = percpu_u64_get(&c->usage->data) +
- percpu_u64_get(&c->usage->btree);
- reserved = percpu_u64_get(&c->usage->reserved) +
- percpu_u64_get(c->online_reserved);
-
- ret.used = min(ret.capacity, data + reserve_factor(reserved));
- ret.free = ret.capacity - ret.used;
-
- ret.nr_inodes = percpu_u64_get(&c->usage->nr_inodes);
-
- return ret;
-}
-
-struct bch_fs_usage_short
-bch2_fs_usage_read_short(struct bch_fs *c)
-{
- struct bch_fs_usage_short ret;
-
- percpu_down_read(&c->mark_lock);
- ret = __bch2_fs_usage_read_short(c);
- percpu_up_read(&c->mark_lock);
-
- return ret;
-}
-
-void bch2_dev_usage_to_text(struct printbuf *out,
- struct bch_dev *ca,
- struct bch_dev_usage *usage)
-{
- if (out->nr_tabstops < 5) {
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 12);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- }
-
- prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
-
- for (unsigned i = 0; i < BCH_DATA_NR; i++) {
- bch2_prt_data_type(out, i);
- prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
- usage->d[i].buckets,
- usage->d[i].sectors,
- usage->d[i].fragmented);
- }
-
- prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets);
-}
-
-static int bch2_check_fix_ptr(struct btree_trans *trans,
- struct bkey_s_c k,
- struct extent_ptr_decoded p,
- const union bch_extent_entry *entry,
- bool *do_update)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
- if (!ca) {
- if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
- trans, ptr_to_invalid_device,
- "pointer to missing device %u\n"
- "while marking %s",
- p.ptr.dev,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- *do_update = true;
- return 0;
- }
-
- struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
- if (!g) {
- if (fsck_err(trans, ptr_to_invalid_device,
- "pointer to invalid bucket on device %u\n"
- "while marking %s",
- p.ptr.dev,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- *do_update = true;
- goto out;
- }
-
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
-
- if (fsck_err_on(!g->gen_valid,
- trans, ptr_to_missing_alloc_key,
- "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (!p.ptr.cached) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- } else {
- *do_update = true;
- }
- }
-
- if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
- trans, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen, g->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (!p.ptr.cached &&
- (g->data_type != BCH_DATA_btree ||
- data_type == BCH_DATA_btree)) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- g->data_type = 0;
- g->stripe_sectors = 0;
- g->dirty_sectors = 0;
- g->cached_sectors = 0;
- } else {
- *do_update = true;
- }
- }
-
- if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
- trans, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- *do_update = true;
-
- if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
- trans, stale_dirty_ptr,
- "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen, g->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- *do_update = true;
-
- if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
- goto out;
-
- if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
- trans, ptr_bucket_data_type_mismatch,
- "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_type_str(g->data_type),
- bch2_data_type_str(data_type),
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (data_type == BCH_DATA_btree) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- g->data_type = data_type;
- g->stripe_sectors = 0;
- g->dirty_sectors = 0;
- g->cached_sectors = 0;
- } else {
- *do_update = true;
- }
- }
-
- if (p.has_ec) {
- struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
-
- if (fsck_err_on(!m || !m->alive,
- trans, ptr_to_missing_stripe,
- "pointer to nonexistent stripe %llu\n"
- "while marking %s",
- (u64) p.ec.idx,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- *do_update = true;
-
- if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
- trans, ptr_to_incorrect_stripe,
- "pointer does not match stripe %llu\n"
- "while marking %s",
- (u64) p.ec.idx,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- *do_update = true;
- }
-out:
-fsck_err:
- bch2_dev_put(ca);
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_check_fix_ptrs(struct btree_trans *trans,
- enum btree_id btree, unsigned level, struct bkey_s_c k,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry_c;
- struct extent_ptr_decoded p = { 0 };
- bool do_update = false;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- percpu_down_read(&c->mark_lock);
-
- bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
- ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
- if (ret)
- goto err;
- }
-
- if (do_update) {
- if (flags & BTREE_TRIGGER_is_root) {
- bch_err(c, "cannot update btree roots yet");
- ret = -EINVAL;
- goto err;
- }
-
- struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
- ret = PTR_ERR_OR_ZERO(new);
- if (ret)
- goto err;
-
- rcu_read_lock();
- bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
- rcu_read_unlock();
-
- if (level) {
- /*
- * We don't want to drop btree node pointers - if the
- * btree node isn't there anymore, the read path will
- * sort it out:
- */
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
- rcu_read_lock();
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
- struct bucket *g = PTR_GC_BUCKET(ca, ptr);
-
- ptr->gen = g->gen;
- }
- rcu_read_unlock();
- } else {
- struct bkey_ptrs ptrs;
- union bch_extent_entry *entry;
-
- rcu_read_lock();
-restart_drop_ptrs:
- ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
- bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
- struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
- struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
-
- if ((p.ptr.cached &&
- (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
- (!p.ptr.cached &&
- gen_cmp(p.ptr.gen, g->gen) < 0) ||
- gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
- (g->data_type &&
- g->data_type != data_type)) {
- bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
- goto restart_drop_ptrs;
- }
- }
- rcu_read_unlock();
-again:
- ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
- bkey_extent_entry_for_each(ptrs, entry) {
- if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
- struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
- entry->stripe_ptr.idx);
- union bch_extent_entry *next_ptr;
-
- bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
- if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
- goto found;
- next_ptr = NULL;
-found:
- if (!next_ptr) {
- bch_err(c, "aieee, found stripe ptr with no data ptr");
- continue;
- }
-
- if (!m || !m->alive ||
- !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
- &next_ptr->ptr,
- m->sectors)) {
- bch2_bkey_extent_entry_drop(new, entry);
- goto again;
- }
- }
- }
- }
-
- if (0) {
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, k);
- bch_info(c, "updated %s", buf.buf);
-
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
- bch_info(c, "new key %s", buf.buf);
- }
-
- percpu_up_read(&c->mark_lock);
- struct btree_iter iter;
- bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
- BTREE_ITER_intent|BTREE_ITER_all_snapshots);
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_trans_update(trans, &iter, new,
- BTREE_UPDATE_internal_snapshot_node|
- BTREE_TRIGGER_norun);
- bch2_trans_iter_exit(trans, &iter);
- percpu_down_read(&c->mark_lock);
-
- if (ret)
- goto err;
-
- if (level)
- bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
- }
-err:
- percpu_up_read(&c->mark_lock);
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
- struct bkey_s_c k,
- const struct bch_extent_ptr *ptr,
- s64 sectors, enum bch_data_type ptr_data_type,
- u8 b_gen, u8 bucket_data_type,
- u32 *bucket_sectors)
-{
- struct bch_fs *c = trans->c;
- size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
- struct printbuf buf = PRINTBUF;
- bool inserting = sectors > 0;
- int ret = 0;
-
- BUG_ON(!sectors);
-
- if (gen_after(ptr->gen, b_gen)) {
- bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
- "while marking %s",
- ptr->dev, bucket_nr, b_gen,
- bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- ptr->gen,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
- goto out;
- }
-
- if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
- bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- ptr_too_stale,
- "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
- "while marking %s",
- ptr->dev, bucket_nr, b_gen,
- bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- ptr->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
- goto out;
- }
-
- if (b_gen != ptr->gen && ptr->cached) {
- ret = 1;
- goto out;
- }
-
- if (b_gen != ptr->gen) {
- bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- stale_dirty_ptr,
- "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
- "while marking %s",
- ptr->dev, bucket_nr, b_gen,
- bucket_gen_get(ca, bucket_nr),
- bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- ptr->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
- goto out;
- }
-
- if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) {
- bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- ptr_bucket_data_type_mismatch,
- "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
- "while marking %s",
- ptr->dev, bucket_nr, b_gen,
- bch2_data_type_str(bucket_data_type),
- bch2_data_type_str(ptr_data_type),
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
- goto out;
- }
-
- if ((u64) *bucket_sectors + sectors > U32_MAX) {
- bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- bucket_sector_count_overflow,
- "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
- "while marking %s",
- ptr->dev, bucket_nr, b_gen,
- bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- *bucket_sectors, sectors,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
- sectors = -*bucket_sectors;
- }
-
- *bucket_sectors += sectors;
-out:
- printbuf_exit(&buf);
- return ret;
-err:
- bch2_dump_trans_updates(trans);
- ret = -BCH_ERR_bucket_ref_update;
- goto out;
-}
-
-void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
- u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
- static int warned_disk_usage = 0;
- bool warn = false;
-
- percpu_down_read(&c->mark_lock);
- struct bch_fs_usage_base *src = &trans->fs_usage_delta;
-
- s64 added = src->btree + src->data + src->reserved;
-
- /*
- * Not allowed to reduce sectors_available except by getting a
- * reservation:
- */
- s64 should_not_have_added = added - (s64) disk_res_sectors;
- if (unlikely(should_not_have_added > 0)) {
- u64 old, new;
-
- old = atomic64_read(&c->sectors_available);
- do {
- new = max_t(s64, 0, old - should_not_have_added);
- } while (!atomic64_try_cmpxchg(&c->sectors_available,
- &old, new));
-
- added -= should_not_have_added;
- warn = true;
- }
-
- if (added > 0) {
- trans->disk_res->sectors -= added;
- this_cpu_sub(*c->online_reserved, added);
- }
-
- preempt_disable();
- struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
- acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64));
- preempt_enable();
- percpu_up_read(&c->mark_lock);
-
- if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
- bch2_trans_inconsistent(trans,
- "disk usage increased %lli more than %llu sectors reserved)",
- should_not_have_added, disk_res_sectors);
-}
-
-/* KEY_TYPE_extent: */
-
-static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
- struct bkey_s_c k,
- const struct extent_ptr_decoded *p,
- s64 sectors, enum bch_data_type ptr_data_type,
- struct bch_alloc_v4 *a)
-{
- u32 *dst_sectors = p->has_ec ? &a->stripe_sectors :
- !p->ptr.cached ? &a->dirty_sectors :
- &a->cached_sectors;
- int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
- a->gen, a->data_type, dst_sectors);
-
- if (ret)
- return ret;
-
- alloc_data_type_set(a, ptr_data_type);
- return 0;
-}
-
-static int bch2_trigger_pointer(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, struct extent_ptr_decoded p,
- const union bch_extent_entry *entry,
- s64 *sectors,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- bool insert = !(flags & BTREE_TRIGGER_overwrite);
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- u64 abs_sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p);
- *sectors = insert ? abs_sectors : -abs_sectors;
-
- struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
- if (unlikely(!ca)) {
- if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
- ret = -BCH_ERR_trigger_pointer;
- goto err;
- }
-
- struct bpos bucket;
- struct bch_backpointer bp;
- __bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp, abs_sectors);
-
- if (flags & BTREE_TRIGGER_transactional) {
- struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
- ret = PTR_ERR_OR_ZERO(a) ?:
- __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &a->v);
- if (ret)
- goto err;
-
- if (!p.ptr.cached) {
- ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, k, insert);
- if (ret)
- goto err;
- }
- }
-
- if (flags & BTREE_TRIGGER_gc) {
- percpu_down_read(&c->mark_lock);
- struct bucket *g = gc_bucket(ca, bucket.offset);
- if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
- p.ptr.dev,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -BCH_ERR_trigger_pointer;
- goto err_unlock;
- }
-
- bucket_lock(g);
- struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
- ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &new);
- alloc_to_bucket(g, new);
- bucket_unlock(g);
-err_unlock:
- percpu_up_read(&c->mark_lock);
-
- if (!ret)
- ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
- }
-err:
- bch2_dev_put(ca);
- printbuf_exit(&buf);
- return ret;
-}
-
-static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
- struct bkey_s_c k,
- struct extent_ptr_decoded p,
- enum bch_data_type data_type,
- s64 sectors,
- enum btree_iter_update_trigger_flags flags)
-{
- if (flags & BTREE_TRIGGER_transactional) {
- struct btree_iter iter;
- struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_stripes, POS(0, p.ec.idx),
- BTREE_ITER_with_updates, stripe);
- int ret = PTR_ERR_OR_ZERO(s);
- if (unlikely(ret)) {
- bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
- "pointer to nonexistent stripe %llu",
- (u64) p.ec.idx);
- goto err;
- }
-
- if (!bch2_ptr_matches_stripe(&s->v, p)) {
- bch2_trans_inconsistent(trans,
- "stripe pointer doesn't match stripe %llu",
- (u64) p.ec.idx);
- ret = -BCH_ERR_trigger_stripe_pointer;
- goto err;
- }
-
- stripe_blockcount_set(&s->v, p.ec.block,
- stripe_blockcount_get(&s->v, p.ec.block) +
- sectors);
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
- bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
- acc.replicas.data_type = data_type;
- ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
- }
-
- if (flags & BTREE_TRIGGER_gc) {
- struct bch_fs *c = trans->c;
-
- struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
- if (!m) {
- bch_err(c, "error allocating memory for gc_stripes, idx %llu",
- (u64) p.ec.idx);
- return -BCH_ERR_ENOMEM_mark_stripe_ptr;
- }
-
- mutex_lock(&c->ec_stripes_heap_lock);
-
- if (!m || !m->alive) {
- mutex_unlock(&c->ec_stripes_heap_lock);
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, k);
- bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n while marking %s",
- (u64) p.ec.idx, buf.buf);
- printbuf_exit(&buf);
- bch2_inconsistent_error(c);
- return -BCH_ERR_trigger_stripe_pointer;
- }
-
- m->block_sectors[p.ec.block] += sectors;
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
- memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
- mutex_unlock(&c->ec_stripes_heap_lock);
-
- acc.replicas.data_type = data_type;
- int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, true);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int __trigger_extent(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k,
- enum btree_iter_update_trigger_flags flags,
- s64 *replicas_sectors)
-{
- bool gc = flags & BTREE_TRIGGER_gc;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
- ? BCH_DATA_btree
- : BCH_DATA_user;
- int ret = 0;
-
- struct disk_accounting_pos acc_replicas_key = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- .replicas.data_type = data_type,
- .replicas.nr_devs = 0,
- .replicas.nr_required = 1,
- };
-
- struct disk_accounting_pos acct_compression_key = {
- .type = BCH_DISK_ACCOUNTING_compression,
- };
- u64 compression_acct[3] = { 1, 0, 0 };
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- s64 disk_sectors = 0;
- ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
- if (ret < 0)
- return ret;
-
- bool stale = ret > 0;
-
- if (p.ptr.cached && stale)
- continue;
-
- if (p.ptr.cached) {
- ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc);
- if (ret)
- return ret;
- } else if (!p.has_ec) {
- *replicas_sectors += disk_sectors;
- replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
- } else {
- ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
- if (ret)
- return ret;
-
- /*
- * There may be other dirty pointers in this extent, but
- * if so they're not required for mounting if we have an
- * erasure coded pointer in this extent:
- */
- acc_replicas_key.replicas.nr_required = 0;
- }
-
- if (acct_compression_key.compression.type &&
- acct_compression_key.compression.type != p.crc.compression_type) {
- if (flags & BTREE_TRIGGER_overwrite)
- bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
-
- ret = bch2_disk_accounting_mod(trans, &acct_compression_key, compression_acct,
- ARRAY_SIZE(compression_acct), gc);
- if (ret)
- return ret;
-
- compression_acct[0] = 1;
- compression_acct[1] = 0;
- compression_acct[2] = 0;
- }
-
- acct_compression_key.compression.type = p.crc.compression_type;
- if (p.crc.compression_type) {
- compression_acct[1] += p.crc.uncompressed_size;
- compression_acct[2] += p.crc.compressed_size;
- }
- }
-
- if (acc_replicas_key.replicas.nr_devs) {
- ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, replicas_sectors, 1, gc);
- if (ret)
- return ret;
- }
-
- if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) {
- struct disk_accounting_pos acc_snapshot_key = {
- .type = BCH_DISK_ACCOUNTING_snapshot,
- .snapshot.id = k.k->p.snapshot,
- };
- ret = bch2_disk_accounting_mod(trans, &acc_snapshot_key, replicas_sectors, 1, gc);
- if (ret)
- return ret;
- }
-
- if (acct_compression_key.compression.type) {
- if (flags & BTREE_TRIGGER_overwrite)
- bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
-
- ret = bch2_disk_accounting_mod(trans, &acct_compression_key, compression_acct,
- ARRAY_SIZE(compression_acct), gc);
- if (ret)
- return ret;
- }
-
- if (level) {
- struct disk_accounting_pos acc_btree_key = {
- .type = BCH_DISK_ACCOUNTING_btree,
- .btree.id = btree_id,
- };
- ret = bch2_disk_accounting_mod(trans, &acc_btree_key, replicas_sectors, 1, gc);
- if (ret)
- return ret;
- } else {
- bool insert = !(flags & BTREE_TRIGGER_overwrite);
- struct disk_accounting_pos acc_inum_key = {
- .type = BCH_DISK_ACCOUNTING_inum,
- .inum.inum = k.k->p.inode,
- };
- s64 v[3] = {
- insert ? 1 : -1,
- insert ? k.k->size : -((s64) k.k->size),
- *replicas_sectors,
- };
- ret = bch2_disk_accounting_mod(trans, &acc_inum_key, v, ARRAY_SIZE(v), gc);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int bch2_trigger_extent(struct btree_trans *trans,
- enum btree_id btree, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
- struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
- unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
- unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
-
- if (unlikely(flags & BTREE_TRIGGER_check_repair))
- return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
-
- /* if pointers aren't changing - nothing to do: */
- if (new_ptrs_bytes == old_ptrs_bytes &&
- !memcmp(new_ptrs.start,
- old_ptrs.start,
- new_ptrs_bytes))
- return 0;
-
- if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
- s64 old_replicas_sectors = 0, new_replicas_sectors = 0;
-
- if (old.k->type) {
- int ret = __trigger_extent(trans, btree, level, old,
- flags & ~BTREE_TRIGGER_insert,
- &old_replicas_sectors);
- if (ret)
- return ret;
- }
-
- if (new.k->type) {
- int ret = __trigger_extent(trans, btree, level, new.s_c,
- flags & ~BTREE_TRIGGER_overwrite,
- &new_replicas_sectors);
- if (ret)
- return ret;
- }
-
- int need_rebalance_delta = 0;
- s64 need_rebalance_sectors_delta = 0;
-
- s64 s = bch2_bkey_sectors_need_rebalance(c, old);
- need_rebalance_delta -= s != 0;
- need_rebalance_sectors_delta -= s;
-
- s = bch2_bkey_sectors_need_rebalance(c, new.s_c);
- need_rebalance_delta += s != 0;
- need_rebalance_sectors_delta += s;
-
- if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) {
- int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work,
- new.k->p, need_rebalance_delta > 0);
- if (ret)
- return ret;
- }
-
- if (need_rebalance_sectors_delta) {
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_rebalance_work,
- };
- int ret = bch2_disk_accounting_mod(trans, &acc, &need_rebalance_sectors_delta, 1,
- flags & BTREE_TRIGGER_gc);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/* KEY_TYPE_reservation */
-
-static int __trigger_reservation(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level, struct bkey_s_c k,
- enum btree_iter_update_trigger_flags flags)
-{
- if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
- s64 sectors = k.k->size;
-
- if (flags & BTREE_TRIGGER_overwrite)
- sectors = -sectors;
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_persistent_reserved,
- .persistent_reserved.nr_replicas = bkey_s_c_to_reservation(k).v->nr_replicas,
- };
-
- return bch2_disk_accounting_mod(trans, &acc, &sectors, 1, flags & BTREE_TRIGGER_gc);
- }
-
- return 0;
-}
-
-int bch2_trigger_reservation(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
-}
-
-/* Mark superblocks: */
-
-static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
- struct bch_dev *ca, u64 b,
- enum bch_data_type type,
- unsigned sectors)
-{
- struct btree_iter iter;
- int ret = 0;
-
- struct bkey_i_alloc_v4 *a =
- bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
- if (IS_ERR(a))
- return PTR_ERR(a);
-
- if (a->v.data_type && type && a->v.data_type != type) {
- bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- bucket_metadata_type_mismatch,
- "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
- "while marking %s",
- iter.pos.inode, iter.pos.offset, a->v.gen,
- bch2_data_type_str(a->v.data_type),
- bch2_data_type_str(type),
- bch2_data_type_str(type));
- ret = -BCH_ERR_metadata_bucket_inconsistency;
- goto err;
- }
-
- if (a->v.data_type != type ||
- a->v.dirty_sectors != sectors) {
- a->v.data_type = type;
- a->v.dirty_sectors = sectors;
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
- }
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca,
- u64 b, enum bch_data_type data_type, unsigned sectors,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- int ret = 0;
-
- percpu_down_read(&c->mark_lock);
- struct bucket *g = gc_bucket(ca, b);
- if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
- ca->dev_idx, bch2_data_type_str(data_type)))
- goto err_unlock;
-
- bucket_lock(g);
- struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
-
- if (bch2_fs_inconsistent_on(g->data_type &&
- g->data_type != data_type, c,
- "different types of data in same bucket: %s, %s",
- bch2_data_type_str(g->data_type),
- bch2_data_type_str(data_type)))
- goto err;
-
- if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
- "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
- ca->dev_idx, b, g->gen,
- bch2_data_type_str(g->data_type ?: data_type),
- g->dirty_sectors, sectors))
- goto err;
-
- g->data_type = data_type;
- g->dirty_sectors += sectors;
- struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
- bucket_unlock(g);
- percpu_up_read(&c->mark_lock);
- ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
- return ret;
-err:
- bucket_unlock(g);
-err_unlock:
- percpu_up_read(&c->mark_lock);
- return -BCH_ERR_metadata_bucket_inconsistency;
-}
-
-int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
- struct bch_dev *ca, u64 b,
- enum bch_data_type type, unsigned sectors,
- enum btree_iter_update_trigger_flags flags)
-{
- BUG_ON(type != BCH_DATA_free &&
- type != BCH_DATA_sb &&
- type != BCH_DATA_journal);
-
- /*
- * Backup superblock might be past the end of our normal usable space:
- */
- if (b >= ca->mi.nbuckets)
- return 0;
-
- if (flags & BTREE_TRIGGER_gc)
- return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags);
- else if (flags & BTREE_TRIGGER_transactional)
- return commit_do(trans, NULL, NULL, 0,
- __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
- else
- BUG();
-}
-
-static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
- struct bch_dev *ca, u64 start, u64 end,
- enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
- enum btree_iter_update_trigger_flags flags)
-{
- do {
- u64 b = sector_to_bucket(ca, start);
- unsigned sectors =
- min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
-
- if (b != *bucket && *bucket_sectors) {
- int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
- type, *bucket_sectors, flags);
- if (ret)
- return ret;
-
- *bucket_sectors = 0;
- }
-
- *bucket = b;
- *bucket_sectors += sectors;
- start += sectors;
- } while (start < end);
-
- return 0;
-}
-
-static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
-
- mutex_lock(&c->sb_lock);
- struct bch_sb_layout layout = ca->disk_sb.sb->layout;
- mutex_unlock(&c->sb_lock);
-
- u64 bucket = 0;
- unsigned i, bucket_sectors = 0;
- int ret;
-
- for (i = 0; i < layout.nr_superblocks; i++) {
- u64 offset = le64_to_cpu(layout.sb_offset[i]);
-
- if (offset == BCH_SB_SECTOR) {
- ret = bch2_trans_mark_metadata_sectors(trans, ca,
- 0, BCH_SB_SECTOR,
- BCH_DATA_sb, &bucket, &bucket_sectors, flags);
- if (ret)
- return ret;
- }
-
- ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
- offset + (1 << layout.sb_max_size_bits),
- BCH_DATA_sb, &bucket, &bucket_sectors, flags);
- if (ret)
- return ret;
- }
-
- if (bucket_sectors) {
- ret = bch2_trans_mark_metadata_bucket(trans, ca,
- bucket, BCH_DATA_sb, bucket_sectors, flags);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < ca->journal.nr; i++) {
- ret = bch2_trans_mark_metadata_bucket(trans, ca,
- ca->journal.buckets[i],
- BCH_DATA_journal, ca->mi.bucket_size, flags);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
- enum btree_iter_update_trigger_flags flags)
-{
- int ret = bch2_trans_run(c,
- __bch2_trans_mark_dev_sb(trans, ca, flags));
- bch_err_fn(c, ret);
- return ret;
-}
-
-int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
- enum btree_iter_update_trigger_flags flags)
-{
- for_each_online_member(c, ca) {
- int ret = bch2_trans_mark_dev_sb(c, ca, flags);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- return ret;
- }
- }
-
- return 0;
-}
-
-int bch2_trans_mark_dev_sbs(struct bch_fs *c)
-{
- return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
-}
-
-/* Disk reservations: */
-
-#define SECTORS_CACHE 1024
-
-int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
- u64 sectors, enum bch_reservation_flags flags)
-{
- struct bch_fs_pcpu *pcpu;
- u64 old, get;
- u64 sectors_available;
- int ret;
-
- percpu_down_read(&c->mark_lock);
- preempt_disable();
- pcpu = this_cpu_ptr(c->pcpu);
-
- if (sectors <= pcpu->sectors_available)
- goto out;
-
- old = atomic64_read(&c->sectors_available);
- do {
- get = min((u64) sectors + SECTORS_CACHE, old);
-
- if (get < sectors) {
- preempt_enable();
- goto recalculate;
- }
- } while (!atomic64_try_cmpxchg(&c->sectors_available,
- &old, old - get));
-
- pcpu->sectors_available += get;
-
-out:
- pcpu->sectors_available -= sectors;
- this_cpu_add(*c->online_reserved, sectors);
- res->sectors += sectors;
-
- preempt_enable();
- percpu_up_read(&c->mark_lock);
- return 0;
-
-recalculate:
- mutex_lock(&c->sectors_available_lock);
-
- percpu_u64_set(&c->pcpu->sectors_available, 0);
- sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
-
- if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL))
- sectors = min(sectors, sectors_available);
-
- if (sectors <= sectors_available ||
- (flags & BCH_DISK_RESERVATION_NOFAIL)) {
- atomic64_set(&c->sectors_available,
- max_t(s64, 0, sectors_available - sectors));
- this_cpu_add(*c->online_reserved, sectors);
- res->sectors += sectors;
- ret = 0;
- } else {
- atomic64_set(&c->sectors_available, sectors_available);
- ret = -BCH_ERR_ENOSPC_disk_reservation;
- }
-
- mutex_unlock(&c->sectors_available_lock);
- percpu_up_read(&c->mark_lock);
-
- return ret;
-}
-
-/* Startup/shutdown: */
-
-void bch2_buckets_nouse_free(struct bch_fs *c)
-{
- for_each_member_device(c, ca) {
- kvfree_rcu_mightsleep(ca->buckets_nouse);
- ca->buckets_nouse = NULL;
- }
-}
-
-int bch2_buckets_nouse_alloc(struct bch_fs *c)
-{
- for_each_member_device(c, ca) {
- BUG_ON(ca->buckets_nouse);
-
- ca->buckets_nouse = kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
- sizeof(unsigned long),
- GFP_KERNEL|__GFP_ZERO);
- if (!ca->buckets_nouse) {
- bch2_dev_put(ca);
- return -BCH_ERR_ENOMEM_buckets_nouse;
- }
- }
-
- return 0;
-}
-
-static void bucket_gens_free_rcu(struct rcu_head *rcu)
-{
- struct bucket_gens *buckets =
- container_of(rcu, struct bucket_gens, rcu);
-
- kvfree(buckets);
-}
-
-int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
-{
- struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
- bool resize = ca->bucket_gens != NULL;
- int ret;
-
- BUG_ON(resize && ca->buckets_nouse);
-
- if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets,
- GFP_KERNEL|__GFP_ZERO))) {
- ret = -BCH_ERR_ENOMEM_bucket_gens;
- goto err;
- }
-
- bucket_gens->first_bucket = ca->mi.first_bucket;
- bucket_gens->nbuckets = nbuckets;
- bucket_gens->nbuckets_minus_first =
- bucket_gens->nbuckets - bucket_gens->first_bucket;
-
- if (resize) {
- down_write(&ca->bucket_lock);
- percpu_down_write(&c->mark_lock);
- }
-
- old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
-
- if (resize) {
- size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
-
- memcpy(bucket_gens->b,
- old_bucket_gens->b,
- n);
- }
-
- rcu_assign_pointer(ca->bucket_gens, bucket_gens);
- bucket_gens = old_bucket_gens;
-
- nbuckets = ca->mi.nbuckets;
-
- if (resize) {
- percpu_up_write(&c->mark_lock);
- up_write(&ca->bucket_lock);
- }
-
- ret = 0;
-err:
- if (bucket_gens)
- call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
-
- return ret;
-}
-
-void bch2_dev_buckets_free(struct bch_dev *ca)
-{
- kvfree(ca->buckets_nouse);
- kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
- free_percpu(ca->usage);
-}
-
-int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
-{
- ca->usage = alloc_percpu(struct bch_dev_usage);
- if (!ca->usage)
- return -BCH_ERR_ENOMEM_usage_init;
-
- return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
-}
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
deleted file mode 100644
index ccc78bfe2fd4..000000000000
--- a/fs/bcachefs/buckets.h
+++ /dev/null
@@ -1,417 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Code for manipulating bucket marks for garbage collection.
- *
- * Copyright 2014 Datera, Inc.
- */
-
-#ifndef _BUCKETS_H
-#define _BUCKETS_H
-
-#include "buckets_types.h"
-#include "extents.h"
-#include "sb-members.h"
-
-static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
-{
- return div_u64(s, ca->mi.bucket_size);
-}
-
-static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
-{
- return ((sector_t) b) * ca->mi.bucket_size;
-}
-
-static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
-{
- u32 remainder;
-
- div_u64_rem(s, ca->mi.bucket_size, &remainder);
- return remainder;
-}
-
-static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
-{
- return div_u64_rem(s, ca->mi.bucket_size, offset);
-}
-
-#define for_each_bucket(_b, _buckets) \
- for (_b = (_buckets)->b + (_buckets)->first_bucket; \
- _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
-
-/*
- * Ugly hack alert:
- *
- * We need to cram a spinlock in a single byte, because that's what we have left
- * in struct bucket, and we care about the size of these - during fsck, we need
- * in memory state for every single bucket on every device.
- *
- * We used to do
- * while (xchg(&b->lock, 1) cpu_relax();
- * but, it turns out not all architectures support xchg on a single byte.
- *
- * So now we use bit_spin_lock(), with fun games since we can't burn a whole
- * ulong for this - we just need to make sure the lock bit always ends up in the
- * first byte.
- */
-
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define BUCKET_LOCK_BITNR 0
-#else
-#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
-#endif
-
-union ulong_byte_assert {
- ulong ulong;
- u8 byte;
-};
-
-static inline void bucket_unlock(struct bucket *b)
-{
- BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
-
- clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
- wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
-}
-
-static inline void bucket_lock(struct bucket *b)
-{
- wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
- TASK_UNINTERRUPTIBLE);
-}
-
-static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
-{
- return genradix_ptr(&ca->buckets_gc, b);
-}
-
-static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
-{
- return rcu_dereference_check(ca->bucket_gens,
- !ca->fs ||
- percpu_rwsem_is_held(&ca->fs->mark_lock) ||
- lockdep_is_held(&ca->fs->state_lock) ||
- lockdep_is_held(&ca->bucket_lock));
-}
-
-static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
-{
- struct bucket_gens *gens = bucket_gens(ca);
-
- if (b - gens->first_bucket >= gens->nbuckets_minus_first)
- return NULL;
- return gens->b + b;
-}
-
-static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b)
-{
- u8 *gen = bucket_gen(ca, b);
- return gen ? *gen : -1;
-}
-
-static inline int bucket_gen_get(struct bch_dev *ca, size_t b)
-{
- rcu_read_lock();
- int ret = bucket_gen_get_rcu(ca, b);
- rcu_read_unlock();
- return ret;
-}
-
-static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
- const struct bch_extent_ptr *ptr)
-{
- return sector_to_bucket(ca, ptr->offset);
-}
-
-static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
- const struct bch_extent_ptr *ptr)
-{
- return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
-}
-
-static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
- const struct bch_extent_ptr *ptr,
- u32 *bucket_offset)
-{
- return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
-}
-
-static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
- const struct bch_extent_ptr *ptr)
-{
- return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
-}
-
-static inline enum bch_data_type ptr_data_type(const struct bkey *k,
- const struct bch_extent_ptr *ptr)
-{
- if (bkey_is_btree_ptr(k))
- return BCH_DATA_btree;
-
- return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
-}
-
-static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
-{
- EBUG_ON(sectors < 0);
-
- return crc_is_compressed(p.crc)
- ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
- p.crc.uncompressed_size)
- : sectors;
-}
-
-static inline int gen_cmp(u8 a, u8 b)
-{
- return (s8) (a - b);
-}
-
-static inline int gen_after(u8 a, u8 b)
-{
- int r = gen_cmp(a, b);
-
- return r > 0 ? r : 0;
-}
-
-static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
-{
- int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr));
- return gen < 0 ? gen : gen_after(gen, ptr->gen);
-}
-
-/**
- * dev_ptr_stale() - check if a pointer points into a bucket that has been
- * invalidated.
- */
-static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
-{
- rcu_read_lock();
- int ret = dev_ptr_stale_rcu(ca, ptr);
- rcu_read_unlock();
- return ret;
-}
-
-/* Device usage: */
-
-void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
-static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
-{
- struct bch_dev_usage ret;
-
- bch2_dev_usage_read_fast(ca, &ret);
- return ret;
-}
-
-void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
-
-static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
-{
- s64 reserved = 0;
-
- switch (watermark) {
- case BCH_WATERMARK_NR:
- BUG();
- case BCH_WATERMARK_stripe:
- reserved += ca->mi.nbuckets >> 6;
- fallthrough;
- case BCH_WATERMARK_normal:
- reserved += ca->mi.nbuckets >> 6;
- fallthrough;
- case BCH_WATERMARK_copygc:
- reserved += ca->nr_btree_reserve;
- fallthrough;
- case BCH_WATERMARK_btree:
- reserved += ca->nr_btree_reserve;
- fallthrough;
- case BCH_WATERMARK_btree_copygc:
- case BCH_WATERMARK_reclaim:
- case BCH_WATERMARK_interior_updates:
- break;
- }
-
- return reserved;
-}
-
-static inline u64 dev_buckets_free(struct bch_dev *ca,
- struct bch_dev_usage usage,
- enum bch_watermark watermark)
-{
- return max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets -
- ca->nr_open_buckets -
- bch2_dev_buckets_reserved(ca, watermark));
-}
-
-static inline u64 __dev_buckets_available(struct bch_dev *ca,
- struct bch_dev_usage usage,
- enum bch_watermark watermark)
-{
- return max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets
- + usage.d[BCH_DATA_cached].buckets
- + usage.d[BCH_DATA_need_gc_gens].buckets
- + usage.d[BCH_DATA_need_discard].buckets
- - ca->nr_open_buckets
- - bch2_dev_buckets_reserved(ca, watermark));
-}
-
-static inline u64 dev_buckets_available(struct bch_dev *ca,
- enum bch_watermark watermark)
-{
- return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
-}
-
-/* Filesystem usage: */
-
-static inline unsigned dev_usage_u64s(void)
-{
- return sizeof(struct bch_dev_usage) / sizeof(u64);
-}
-
-struct bch_fs_usage_short
-bch2_fs_usage_read_short(struct bch_fs *);
-
-int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
- struct bkey_s_c, const struct bch_extent_ptr *,
- s64, enum bch_data_type, u8, u8, u32 *);
-
-int bch2_check_fix_ptrs(struct btree_trans *,
- enum btree_id, unsigned, struct bkey_s_c,
- enum btree_iter_update_trigger_flags);
-
-int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-
-#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
-({ \
- int ret = 0; \
- \
- if (_old.k->type) \
- ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \
- if (!ret && _new.k->type) \
- ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
- ret; \
-})
-
-void bch2_trans_account_disk_usage_change(struct btree_trans *);
-
-int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
- enum bch_data_type, unsigned,
- enum btree_iter_update_trigger_flags);
-int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
- enum btree_iter_update_trigger_flags);
-int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
- enum btree_iter_update_trigger_flags);
-int bch2_trans_mark_dev_sbs(struct bch_fs *);
-
-static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
-{
- struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
- u64 b_offset = bucket_to_sector(ca, b);
- u64 b_end = bucket_to_sector(ca, b + 1);
- unsigned i;
-
- if (!b)
- return true;
-
- for (i = 0; i < layout->nr_superblocks; i++) {
- u64 offset = le64_to_cpu(layout->sb_offset[i]);
- u64 end = offset + (1 << layout->sb_max_size_bits);
-
- if (!(offset >= b_end || end <= b_offset))
- return true;
- }
-
- return false;
-}
-
-static inline const char *bch2_data_type_str(enum bch_data_type type)
-{
- return type < BCH_DATA_NR
- ? __bch2_data_types[type]
- : "(invalid data type)";
-}
-
-/* disk reservations: */
-
-static inline void bch2_disk_reservation_put(struct bch_fs *c,
- struct disk_reservation *res)
-{
- if (res->sectors) {
- this_cpu_sub(*c->online_reserved, res->sectors);
- res->sectors = 0;
- }
-}
-
-enum bch_reservation_flags {
- BCH_DISK_RESERVATION_NOFAIL = 1 << 0,
- BCH_DISK_RESERVATION_PARTIAL = 1 << 1,
-};
-
-int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *,
- u64, enum bch_reservation_flags);
-
-static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
- u64 sectors, enum bch_reservation_flags flags)
-{
-#ifdef __KERNEL__
- u64 old, new;
-
- old = this_cpu_read(c->pcpu->sectors_available);
- do {
- if (sectors > old)
- return __bch2_disk_reservation_add(c, res, sectors, flags);
-
- new = old - sectors;
- } while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
-
- this_cpu_add(*c->online_reserved, sectors);
- res->sectors += sectors;
- return 0;
-#else
- return __bch2_disk_reservation_add(c, res, sectors, flags);
-#endif
-}
-
-static inline struct disk_reservation
-bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
-{
- return (struct disk_reservation) {
- .sectors = 0,
-#if 0
- /* not used yet: */
- .gen = c->capacity_gen,
-#endif
- .nr_replicas = nr_replicas,
- };
-}
-
-static inline int bch2_disk_reservation_get(struct bch_fs *c,
- struct disk_reservation *res,
- u64 sectors, unsigned nr_replicas,
- int flags)
-{
- *res = bch2_disk_reservation_init(c, nr_replicas);
-
- return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
-}
-
-#define RESERVE_FACTOR 6
-
-static inline u64 avail_factor(u64 r)
-{
- return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
-}
-
-void bch2_buckets_nouse_free(struct bch_fs *);
-int bch2_buckets_nouse_alloc(struct bch_fs *);
-
-int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
-void bch2_dev_buckets_free(struct bch_dev *);
-int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
-
-#endif /* _BUCKETS_H */
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
deleted file mode 100644
index 28bd09a253c8..000000000000
--- a/fs/bcachefs/buckets_types.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BUCKETS_TYPES_H
-#define _BUCKETS_TYPES_H
-
-#include "bcachefs_format.h"
-#include "util.h"
-
-#define BUCKET_JOURNAL_SEQ_BITS 16
-
-struct bucket {
- u8 lock;
- u8 gen_valid:1;
- u8 data_type:7;
- u8 gen;
- u8 stripe_redundancy;
- u32 stripe;
- u32 dirty_sectors;
- u32 cached_sectors;
- u32 stripe_sectors;
-} __aligned(sizeof(long));
-
-struct bucket_gens {
- struct rcu_head rcu;
- u16 first_bucket;
- size_t nbuckets;
- size_t nbuckets_minus_first;
- u8 b[];
-};
-
-struct bch_dev_usage {
- struct bch_dev_usage_type {
- u64 buckets;
- u64 sectors; /* _compressed_ sectors: */
- /*
- * XXX
- * Why do we have this? Isn't it just buckets * bucket_size -
- * sectors?
- */
- u64 fragmented;
- } d[BCH_DATA_NR];
-};
-
-struct bch_fs_usage_base {
- u64 hidden;
- u64 btree;
- u64 data;
- u64 cached;
- u64 reserved;
- u64 nr_inodes;
-};
-
-struct bch_fs_usage_short {
- u64 capacity;
- u64 used;
- u64 free;
- u64 nr_inodes;
-};
-
-/*
- * A reservation for space on disk:
- */
-struct disk_reservation {
- u64 sectors;
- u32 gen;
- unsigned nr_replicas;
-};
-
-#endif /* _BUCKETS_TYPES_H */
diff --git a/fs/bcachefs/buckets_waiting_for_journal.c b/fs/bcachefs/buckets_waiting_for_journal.c
deleted file mode 100644
index f9fb150eda70..000000000000
--- a/fs/bcachefs/buckets_waiting_for_journal.c
+++ /dev/null
@@ -1,175 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "buckets_waiting_for_journal.h"
-#include <linux/hash.h>
-#include <linux/random.h>
-
-static inline struct bucket_hashed *
-bucket_hash(struct buckets_waiting_for_journal_table *t,
- unsigned hash_seed_idx, u64 dev_bucket)
-{
- return t->d + hash_64(dev_bucket ^ t->hash_seeds[hash_seed_idx], t->bits);
-}
-
-static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_t bits)
-{
- unsigned i;
-
- t->bits = bits;
- for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++)
- get_random_bytes(&t->hash_seeds[i], sizeof(t->hash_seeds[i]));
- memset(t->d, 0, sizeof(t->d[0]) << t->bits);
-}
-
-bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
- u64 flushed_seq,
- unsigned dev, u64 bucket)
-{
- struct buckets_waiting_for_journal_table *t;
- u64 dev_bucket = (u64) dev << 56 | bucket;
- bool ret = false;
- unsigned i;
-
- mutex_lock(&b->lock);
- t = b->t;
-
- for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
- struct bucket_hashed *h = bucket_hash(t, i, dev_bucket);
-
- if (h->dev_bucket == dev_bucket) {
- ret = h->journal_seq > flushed_seq;
- break;
- }
- }
-
- mutex_unlock(&b->lock);
-
- return ret;
-}
-
-static bool bucket_table_insert(struct buckets_waiting_for_journal_table *t,
- struct bucket_hashed *new,
- u64 flushed_seq)
-{
- struct bucket_hashed *last_evicted = NULL;
- unsigned tries, i;
-
- for (tries = 0; tries < 10; tries++) {
- struct bucket_hashed *old, *victim = NULL;
-
- for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
- old = bucket_hash(t, i, new->dev_bucket);
-
- if (old->dev_bucket == new->dev_bucket ||
- old->journal_seq <= flushed_seq) {
- *old = *new;
- return true;
- }
-
- if (last_evicted != old)
- victim = old;
- }
-
- /* hashed to same slot 3 times: */
- if (!victim)
- break;
-
- /* Failed to find an empty slot: */
- swap(*new, *victim);
- last_evicted = victim;
- }
-
- return false;
-}
-
-int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
- u64 flushed_seq,
- unsigned dev, u64 bucket,
- u64 journal_seq)
-{
- struct buckets_waiting_for_journal_table *t, *n;
- struct bucket_hashed tmp, new = {
- .dev_bucket = (u64) dev << 56 | bucket,
- .journal_seq = journal_seq,
- };
- size_t i, size, new_bits, nr_elements = 1, nr_rehashes = 0, nr_rehashes_this_size = 0;
- int ret = 0;
-
- mutex_lock(&b->lock);
-
- if (likely(bucket_table_insert(b->t, &new, flushed_seq)))
- goto out;
-
- t = b->t;
- size = 1UL << t->bits;
- for (i = 0; i < size; i++)
- nr_elements += t->d[i].journal_seq > flushed_seq;
-
- new_bits = ilog2(roundup_pow_of_two(nr_elements * 3));
-realloc:
- n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL);
- if (!n) {
- ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set;
- goto out;
- }
-
-retry_rehash:
- if (nr_rehashes_this_size == 3) {
- new_bits++;
- nr_rehashes_this_size = 0;
- kvfree(n);
- goto realloc;
- }
-
- nr_rehashes++;
- nr_rehashes_this_size++;
-
- bucket_table_init(n, new_bits);
-
- tmp = new;
- BUG_ON(!bucket_table_insert(n, &tmp, flushed_seq));
-
- for (i = 0; i < 1UL << t->bits; i++) {
- if (t->d[i].journal_seq <= flushed_seq)
- continue;
-
- tmp = t->d[i];
- if (!bucket_table_insert(n, &tmp, flushed_seq))
- goto retry_rehash;
- }
-
- b->t = n;
- kvfree(t);
-
- pr_debug("took %zu rehashes, table at %zu/%lu elements",
- nr_rehashes, nr_elements, 1UL << b->t->bits);
-out:
- mutex_unlock(&b->lock);
-
- return ret;
-}
-
-void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *c)
-{
- struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
-
- kvfree(b->t);
-}
-
-#define INITIAL_TABLE_BITS 3
-
-int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c)
-{
- struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
-
- mutex_init(&b->lock);
-
- b->t = kvmalloc(sizeof(*b->t) +
- (sizeof(b->t->d[0]) << INITIAL_TABLE_BITS), GFP_KERNEL);
- if (!b->t)
- return -BCH_ERR_ENOMEM_buckets_waiting_for_journal_init;
-
- bucket_table_init(b->t, INITIAL_TABLE_BITS);
- return 0;
-}
diff --git a/fs/bcachefs/buckets_waiting_for_journal.h b/fs/bcachefs/buckets_waiting_for_journal.h
deleted file mode 100644
index d2ae19cbe18c..000000000000
--- a/fs/bcachefs/buckets_waiting_for_journal.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BUCKETS_WAITING_FOR_JOURNAL_H
-#define _BUCKETS_WAITING_FOR_JOURNAL_H
-
-#include "buckets_waiting_for_journal_types.h"
-
-bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *,
- u64, unsigned, u64);
-int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *,
- u64, unsigned, u64, u64);
-
-void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *);
-int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *);
-
-#endif /* _BUCKETS_WAITING_FOR_JOURNAL_H */
diff --git a/fs/bcachefs/buckets_waiting_for_journal_types.h b/fs/bcachefs/buckets_waiting_for_journal_types.h
deleted file mode 100644
index e593db061d81..000000000000
--- a/fs/bcachefs/buckets_waiting_for_journal_types.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H
-#define _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H
-
-#include <linux/siphash.h>
-
-struct bucket_hashed {
- u64 dev_bucket;
- u64 journal_seq;
-};
-
-struct buckets_waiting_for_journal_table {
- unsigned bits;
- u64 hash_seeds[3];
- struct bucket_hashed d[];
-};
-
-struct buckets_waiting_for_journal {
- struct mutex lock;
- struct buckets_waiting_for_journal_table *t;
-};
-
-#endif /* _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H */
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
deleted file mode 100644
index 2182b555c112..000000000000
--- a/fs/bcachefs/chardev.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef NO_BCACHEFS_CHARDEV
-
-#include "bcachefs.h"
-#include "bcachefs_ioctl.h"
-#include "buckets.h"
-#include "chardev.h"
-#include "disk_accounting.h"
-#include "journal.h"
-#include "move.h"
-#include "recovery_passes.h"
-#include "replicas.h"
-#include "super.h"
-#include "super-io.h"
-#include "thread_with_file.h"
-
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/ioctl.h>
-#include <linux/major.h>
-#include <linux/sched/task.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-/* returns with ref on ca->ref */
-static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev,
- unsigned flags)
-{
- struct bch_dev *ca;
-
- if (flags & BCH_BY_INDEX) {
- if (dev >= c->sb.nr_devices)
- return ERR_PTR(-EINVAL);
-
- ca = bch2_dev_tryget_noerror(c, dev);
- if (!ca)
- return ERR_PTR(-EINVAL);
- } else {
- char *path;
-
- path = strndup_user((const char __user *)
- (unsigned long) dev, PATH_MAX);
- if (IS_ERR(path))
- return ERR_CAST(path);
-
- ca = bch2_dev_lookup(c, path);
- kfree(path);
- }
-
- return ca;
-}
-
-#if 0
-static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
-{
- struct bch_ioctl_assemble arg;
- struct bch_fs *c;
- u64 *user_devs = NULL;
- char **devs = NULL;
- unsigned i;
- int ret = -EFAULT;
-
- if (copy_from_user(&arg, user_arg, sizeof(arg)))
- return -EFAULT;
-
- if (arg.flags || arg.pad)
- return -EINVAL;
-
- user_devs = kmalloc_array(arg.nr_devs, sizeof(u64), GFP_KERNEL);
- if (!user_devs)
- return -ENOMEM;
-
- devs = kcalloc(arg.nr_devs, sizeof(char *), GFP_KERNEL);
-
- if (copy_from_user(user_devs, user_arg->devs,
- sizeof(u64) * arg.nr_devs))
- goto err;
-
- for (i = 0; i < arg.nr_devs; i++) {
- devs[i] = strndup_user((const char __user *)(unsigned long)
- user_devs[i],
- PATH_MAX);
- ret= PTR_ERR_OR_ZERO(devs[i]);
- if (ret)
- goto err;
- }
-
- c = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty());
- ret = PTR_ERR_OR_ZERO(c);
- if (!ret)
- closure_put(&c->cl);
-err:
- if (devs)
- for (i = 0; i < arg.nr_devs; i++)
- kfree(devs[i]);
- kfree(devs);
- return ret;
-}
-
-static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg)
-{
- struct bch_ioctl_incremental arg;
- const char *err;
- char *path;
-
- if (copy_from_user(&arg, user_arg, sizeof(arg)))
- return -EFAULT;
-
- if (arg.flags || arg.pad)
- return -EINVAL;
-
- path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
- ret = PTR_ERR_OR_ZERO(path);
- if (ret)
- return ret;
-
- err = bch2_fs_open_incremental(path);
- kfree(path);
-
- if (err) {
- pr_err("Could not register bcachefs devices: %s", err);
- return -EINVAL;
- }
-
- return 0;
-}
-#endif
-
-struct fsck_thread {
- struct thread_with_stdio thr;
- struct bch_fs *c;
- struct bch_opts opts;
-};
-
-static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr)
-{
- struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr);
- kfree(thr);
-}
-
-static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio)
-{
- struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
- struct bch_fs *c = thr->c;
-
- int ret = PTR_ERR_OR_ZERO(c);
- if (ret)
- return ret;
-
- ret = bch2_fs_start(thr->c);
- if (ret)
- goto err;
-
- if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
- bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name);
- ret |= 1;
- }
- if (test_bit(BCH_FS_error, &c->flags)) {
- bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name);
- ret |= 4;
- }
-err:
- bch2_fs_stop(c);
- return ret;
-}
-
-static const struct thread_with_stdio_ops bch2_offline_fsck_ops = {
- .exit = bch2_fsck_thread_exit,
- .fn = bch2_fsck_offline_thread_fn,
-};
-
-static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg)
-{
- struct bch_ioctl_fsck_offline arg;
- struct fsck_thread *thr = NULL;
- darray_str(devs) = {};
- long ret = 0;
-
- if (copy_from_user(&arg, user_arg, sizeof(arg)))
- return -EFAULT;
-
- if (arg.flags)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- for (size_t i = 0; i < arg.nr_devs; i++) {
- u64 dev_u64;
- ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64));
- if (ret)
- goto err;
-
- char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX);
- ret = PTR_ERR_OR_ZERO(dev_str);
- if (ret)
- goto err;
-
- ret = darray_push(&devs, dev_str);
- if (ret) {
- kfree(dev_str);
- goto err;
- }
- }
-
- thr = kzalloc(sizeof(*thr), GFP_KERNEL);
- if (!thr) {
- ret = -ENOMEM;
- goto err;
- }
-
- thr->opts = bch2_opts_empty();
-
- if (arg.opts) {
- char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
- ret = PTR_ERR_OR_ZERO(optstr) ?:
- bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr);
- if (!IS_ERR(optstr))
- kfree(optstr);
-
- if (ret)
- goto err;
- }
-
- opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio);
- opt_set(thr->opts, read_only, 1);
- opt_set(thr->opts, ratelimit_errors, 0);
-
- /* We need request_key() to be called before we punt to kthread: */
- opt_set(thr->opts, nostart, true);
-
- bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops);
-
- thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts);
-
- if (!IS_ERR(thr->c) &&
- thr->c->opts.errors == BCH_ON_ERROR_panic)
- thr->c->opts.errors = BCH_ON_ERROR_ro;
-
- ret = __bch2_run_thread_with_stdio(&thr->thr);
-out:
- darray_for_each(devs, i)
- kfree(*i);
- darray_exit(&devs);
- return ret;
-err:
- if (thr)
- bch2_fsck_thread_exit(&thr->thr);
- pr_err("ret %s", bch2_err_str(ret));
- goto out;
-}
-
-static long bch2_global_ioctl(unsigned cmd, void __user *arg)
-{
- long ret;
-
- switch (cmd) {
-#if 0
- case BCH_IOCTL_ASSEMBLE:
- return bch2_ioctl_assemble(arg);
- case BCH_IOCTL_INCREMENTAL:
- return bch2_ioctl_incremental(arg);
-#endif
- case BCH_IOCTL_FSCK_OFFLINE: {
- ret = bch2_ioctl_fsck_offline(arg);
- break;
- }
- default:
- ret = -ENOTTY;
- break;
- }
-
- if (ret < 0)
- ret = bch2_err_class(ret);
- return ret;
-}
-
-static long bch2_ioctl_query_uuid(struct bch_fs *c,
- struct bch_ioctl_query_uuid __user *user_arg)
-{
- return copy_to_user_errcode(&user_arg->uuid, &c->sb.user_uuid,
- sizeof(c->sb.user_uuid));
-}
-
-#if 0
-static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start arg)
-{
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (arg.flags || arg.pad)
- return -EINVAL;
-
- return bch2_fs_start(c);
-}
-
-static long bch2_ioctl_stop(struct bch_fs *c)
-{
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- bch2_fs_stop(c);
- return 0;
-}
-#endif
-
-static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg)
-{
- char *path;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (arg.flags || arg.pad)
- return -EINVAL;
-
- path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
- ret = PTR_ERR_OR_ZERO(path);
- if (ret)
- return ret;
-
- ret = bch2_dev_add(c, path);
- if (!IS_ERR(path))
- kfree(path);
-
- return ret;
-}
-
-static long bch2_ioctl_disk_remove(struct bch_fs *c, struct bch_ioctl_disk arg)
-{
- struct bch_dev *ca;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
- BCH_FORCE_IF_METADATA_LOST|
- BCH_FORCE_IF_DEGRADED|
- BCH_BY_INDEX)) ||
- arg.pad)
- return -EINVAL;
-
- ca = bch2_device_lookup(c, arg.dev, arg.flags);
- if (IS_ERR(ca))
- return PTR_ERR(ca);
-
- return bch2_dev_remove(c, ca, arg.flags);
-}
-
-static long bch2_ioctl_disk_online(struct bch_fs *c, struct bch_ioctl_disk arg)
-{
- char *path;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (arg.flags || arg.pad)
- return -EINVAL;
-
- path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
- ret = PTR_ERR_OR_ZERO(path);
- if (ret)
- return ret;
-
- ret = bch2_dev_online(c, path);
- kfree(path);
- return ret;
-}
-
-static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg)
-{
- struct bch_dev *ca;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
- BCH_FORCE_IF_METADATA_LOST|
- BCH_FORCE_IF_DEGRADED|
- BCH_BY_INDEX)) ||
- arg.pad)
- return -EINVAL;
-
- ca = bch2_device_lookup(c, arg.dev, arg.flags);
- if (IS_ERR(ca))
- return PTR_ERR(ca);
-
- ret = bch2_dev_offline(c, ca, arg.flags);
- bch2_dev_put(ca);
- return ret;
-}
-
-static long bch2_ioctl_disk_set_state(struct bch_fs *c,
- struct bch_ioctl_disk_set_state arg)
-{
- struct bch_dev *ca;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
- BCH_FORCE_IF_METADATA_LOST|
- BCH_FORCE_IF_DEGRADED|
- BCH_BY_INDEX)) ||
- arg.pad[0] || arg.pad[1] || arg.pad[2] ||
- arg.new_state >= BCH_MEMBER_STATE_NR)
- return -EINVAL;
-
- ca = bch2_device_lookup(c, arg.dev, arg.flags);
- if (IS_ERR(ca))
- return PTR_ERR(ca);
-
- ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags);
- if (ret)
- bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
-
- bch2_dev_put(ca);
- return ret;
-}
-
-struct bch_data_ctx {
- struct thread_with_file thr;
-
- struct bch_fs *c;
- struct bch_ioctl_data arg;
- struct bch_move_stats stats;
-};
-
-static int bch2_data_thread(void *arg)
-{
- struct bch_data_ctx *ctx = container_of(arg, struct bch_data_ctx, thr);
-
- ctx->thr.ret = bch2_data_job(ctx->c, &ctx->stats, ctx->arg);
- ctx->stats.data_type = U8_MAX;
- return 0;
-}
-
-static int bch2_data_job_release(struct inode *inode, struct file *file)
-{
- struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr);
-
- bch2_thread_with_file_exit(&ctx->thr);
- kfree(ctx);
- return 0;
-}
-
-static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr);
- struct bch_fs *c = ctx->c;
- struct bch_ioctl_data_event e = {
- .type = BCH_DATA_EVENT_PROGRESS,
- .p.data_type = ctx->stats.data_type,
- .p.btree_id = ctx->stats.pos.btree,
- .p.pos = ctx->stats.pos.pos,
- .p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
- .p.sectors_total = bch2_fs_usage_read_short(c).used,
- };
-
- if (len < sizeof(e))
- return -EINVAL;
-
- return copy_to_user_errcode(buf, &e, sizeof(e)) ?: sizeof(e);
-}
-
-static const struct file_operations bcachefs_data_ops = {
- .release = bch2_data_job_release,
- .read = bch2_data_job_read,
-};
-
-static long bch2_ioctl_data(struct bch_fs *c,
- struct bch_ioctl_data arg)
-{
- struct bch_data_ctx *ctx;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (arg.op >= BCH_DATA_OP_NR || arg.flags)
- return -EINVAL;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->c = c;
- ctx->arg = arg;
-
- ret = bch2_run_thread_with_file(&ctx->thr,
- &bcachefs_data_ops,
- bch2_data_thread);
- if (ret < 0)
- kfree(ctx);
- return ret;
-}
-
-static long bch2_ioctl_fs_usage(struct bch_fs *c,
- struct bch_ioctl_fs_usage __user *user_arg)
-{
- struct bch_ioctl_fs_usage arg = {};
- darray_char replicas = {};
- u32 replica_entries_bytes;
- int ret = 0;
-
- if (!test_bit(BCH_FS_started, &c->flags))
- return -EINVAL;
-
- if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes))
- return -EFAULT;
-
- ret = bch2_fs_replicas_usage_read(c, &replicas) ?:
- (replica_entries_bytes < replicas.nr ? -ERANGE : 0) ?:
- copy_to_user_errcode(&user_arg->replicas, replicas.data, replicas.nr);
- if (ret)
- goto err;
-
- struct bch_fs_usage_short u = bch2_fs_usage_read_short(c);
- arg.capacity = c->capacity;
- arg.used = u.used;
- arg.online_reserved = percpu_u64_get(c->online_reserved);
- arg.replica_entries_bytes = replicas.nr;
-
- for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++) {
- struct disk_accounting_pos k = {
- .type = BCH_DISK_ACCOUNTING_persistent_reserved,
- .persistent_reserved.nr_replicas = i,
- };
-
- bch2_accounting_mem_read(c,
- disk_accounting_pos_to_bpos(&k),
- &arg.persistent_reserved[i], 1);
- }
-
- ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg));
-err:
- darray_exit(&replicas);
- return ret;
-}
-
-static long bch2_ioctl_query_accounting(struct bch_fs *c,
- struct bch_ioctl_query_accounting __user *user_arg)
-{
- struct bch_ioctl_query_accounting arg;
- darray_char accounting = {};
- int ret = 0;
-
- if (!test_bit(BCH_FS_started, &c->flags))
- return -EINVAL;
-
- ret = copy_from_user_errcode(&arg, user_arg, sizeof(arg)) ?:
- bch2_fs_accounting_read(c, &accounting, arg.accounting_types_mask) ?:
- (arg.accounting_u64s * sizeof(u64) < accounting.nr ? -ERANGE : 0) ?:
- copy_to_user_errcode(&user_arg->accounting, accounting.data, accounting.nr);
- if (ret)
- goto err;
-
- arg.capacity = c->capacity;
- arg.used = bch2_fs_usage_read_short(c).used;
- arg.online_reserved = percpu_u64_get(c->online_reserved);
- arg.accounting_u64s = accounting.nr / sizeof(u64);
-
- ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg));
-err:
- darray_exit(&accounting);
- return ret;
-}
-
-/* obsolete, didn't allow for new data types: */
-static long bch2_ioctl_dev_usage(struct bch_fs *c,
- struct bch_ioctl_dev_usage __user *user_arg)
-{
- struct bch_ioctl_dev_usage arg;
- struct bch_dev_usage src;
- struct bch_dev *ca;
- unsigned i;
-
- if (!test_bit(BCH_FS_started, &c->flags))
- return -EINVAL;
-
- if (copy_from_user(&arg, user_arg, sizeof(arg)))
- return -EFAULT;
-
- if ((arg.flags & ~BCH_BY_INDEX) ||
- arg.pad[0] ||
- arg.pad[1] ||
- arg.pad[2])
- return -EINVAL;
-
- ca = bch2_device_lookup(c, arg.dev, arg.flags);
- if (IS_ERR(ca))
- return PTR_ERR(ca);
-
- src = bch2_dev_usage_read(ca);
-
- arg.state = ca->mi.state;
- arg.bucket_size = ca->mi.bucket_size;
- arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
-
- for (i = 0; i < ARRAY_SIZE(arg.d); i++) {
- arg.d[i].buckets = src.d[i].buckets;
- arg.d[i].sectors = src.d[i].sectors;
- arg.d[i].fragmented = src.d[i].fragmented;
- }
-
- bch2_dev_put(ca);
-
- return copy_to_user_errcode(user_arg, &arg, sizeof(arg));
-}
-
-static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
- struct bch_ioctl_dev_usage_v2 __user *user_arg)
-{
- struct bch_ioctl_dev_usage_v2 arg;
- struct bch_dev_usage src;
- struct bch_dev *ca;
- int ret = 0;
-
- if (!test_bit(BCH_FS_started, &c->flags))
- return -EINVAL;
-
- if (copy_from_user(&arg, user_arg, sizeof(arg)))
- return -EFAULT;
-
- if ((arg.flags & ~BCH_BY_INDEX) ||
- arg.pad[0] ||
- arg.pad[1] ||
- arg.pad[2])
- return -EINVAL;
-
- ca = bch2_device_lookup(c, arg.dev, arg.flags);
- if (IS_ERR(ca))
- return PTR_ERR(ca);
-
- src = bch2_dev_usage_read(ca);
-
- arg.state = ca->mi.state;
- arg.bucket_size = ca->mi.bucket_size;
- arg.nr_data_types = min(arg.nr_data_types, BCH_DATA_NR);
- arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
-
- ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg));
- if (ret)
- goto err;
-
- for (unsigned i = 0; i < arg.nr_data_types; i++) {
- struct bch_ioctl_dev_usage_type t = {
- .buckets = src.d[i].buckets,
- .sectors = src.d[i].sectors,
- .fragmented = src.d[i].fragmented,
- };
-
- ret = copy_to_user_errcode(&user_arg->d[i], &t, sizeof(t));
- if (ret)
- goto err;
- }
-err:
- bch2_dev_put(ca);
- return ret;
-}
-
-static long bch2_ioctl_read_super(struct bch_fs *c,
- struct bch_ioctl_read_super arg)
-{
- struct bch_dev *ca = NULL;
- struct bch_sb *sb;
- int ret = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((arg.flags & ~(BCH_BY_INDEX|BCH_READ_DEV)) ||
- arg.pad)
- return -EINVAL;
-
- mutex_lock(&c->sb_lock);
-
- if (arg.flags & BCH_READ_DEV) {
- ca = bch2_device_lookup(c, arg.dev, arg.flags);
- ret = PTR_ERR_OR_ZERO(ca);
- if (ret)
- goto err_unlock;
-
- sb = ca->disk_sb.sb;
- } else {
- sb = c->disk_sb.sb;
- }
-
- if (vstruct_bytes(sb) > arg.size) {
- ret = -ERANGE;
- goto err;
- }
-
- ret = copy_to_user_errcode((void __user *)(unsigned long)arg.sb, sb,
- vstruct_bytes(sb));
-err:
- bch2_dev_put(ca);
-err_unlock:
- mutex_unlock(&c->sb_lock);
- return ret;
-}
-
-static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
- struct bch_ioctl_disk_get_idx arg)
-{
- dev_t dev = huge_decode_dev(arg.dev);
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!dev)
- return -EINVAL;
-
- for_each_online_member(c, ca)
- if (ca->dev == dev) {
- percpu_ref_put(&ca->io_ref);
- return ca->dev_idx;
- }
-
- return -BCH_ERR_ENOENT_dev_idx_not_found;
-}
-
-static long bch2_ioctl_disk_resize(struct bch_fs *c,
- struct bch_ioctl_disk_resize arg)
-{
- struct bch_dev *ca;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((arg.flags & ~BCH_BY_INDEX) ||
- arg.pad)
- return -EINVAL;
-
- ca = bch2_device_lookup(c, arg.dev, arg.flags);
- if (IS_ERR(ca))
- return PTR_ERR(ca);
-
- ret = bch2_dev_resize(c, ca, arg.nbuckets);
-
- bch2_dev_put(ca);
- return ret;
-}
-
-static long bch2_ioctl_disk_resize_journal(struct bch_fs *c,
- struct bch_ioctl_disk_resize_journal arg)
-{
- struct bch_dev *ca;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((arg.flags & ~BCH_BY_INDEX) ||
- arg.pad)
- return -EINVAL;
-
- if (arg.nbuckets > U32_MAX)
- return -EINVAL;
-
- ca = bch2_device_lookup(c, arg.dev, arg.flags);
- if (IS_ERR(ca))
- return PTR_ERR(ca);
-
- ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets);
-
- bch2_dev_put(ca);
- return ret;
-}
-
-static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio)
-{
- struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
- struct bch_fs *c = thr->c;
-
- c->stdio_filter = current;
- c->stdio = &thr->thr.stdio;
-
- /*
- * XXX: can we figure out a way to do this without mucking with c->opts?
- */
- unsigned old_fix_errors = c->opts.fix_errors;
- if (opt_defined(thr->opts, fix_errors))
- c->opts.fix_errors = thr->opts.fix_errors;
- else
- c->opts.fix_errors = FSCK_FIX_ask;
-
- c->opts.fsck = true;
- set_bit(BCH_FS_fsck_running, &c->flags);
-
- c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
- int ret = bch2_run_online_recovery_passes(c);
-
- clear_bit(BCH_FS_fsck_running, &c->flags);
- bch_err_fn(c, ret);
-
- c->stdio = NULL;
- c->stdio_filter = NULL;
- c->opts.fix_errors = old_fix_errors;
-
- up(&c->online_fsck_mutex);
- bch2_ro_ref_put(c);
- return ret;
-}
-
-static const struct thread_with_stdio_ops bch2_online_fsck_ops = {
- .exit = bch2_fsck_thread_exit,
- .fn = bch2_fsck_online_thread_fn,
-};
-
-static long bch2_ioctl_fsck_online(struct bch_fs *c,
- struct bch_ioctl_fsck_online arg)
-{
- struct fsck_thread *thr = NULL;
- long ret = 0;
-
- if (arg.flags)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!bch2_ro_ref_tryget(c))
- return -EROFS;
-
- if (down_trylock(&c->online_fsck_mutex)) {
- bch2_ro_ref_put(c);
- return -EAGAIN;
- }
-
- thr = kzalloc(sizeof(*thr), GFP_KERNEL);
- if (!thr) {
- ret = -ENOMEM;
- goto err;
- }
-
- thr->c = c;
- thr->opts = bch2_opts_empty();
-
- if (arg.opts) {
- char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
-
- ret = PTR_ERR_OR_ZERO(optstr) ?:
- bch2_parse_mount_opts(c, &thr->opts, NULL, optstr);
- if (!IS_ERR(optstr))
- kfree(optstr);
-
- if (ret)
- goto err;
- }
-
- ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops);
-err:
- if (ret < 0) {
- bch_err_fn(c, ret);
- if (thr)
- bch2_fsck_thread_exit(&thr->thr);
- up(&c->online_fsck_mutex);
- bch2_ro_ref_put(c);
- }
- return ret;
-}
-
-#define BCH_IOCTL(_name, _argtype) \
-do { \
- _argtype i; \
- \
- if (copy_from_user(&i, arg, sizeof(i))) \
- return -EFAULT; \
- ret = bch2_ioctl_##_name(c, i); \
- goto out; \
-} while (0)
-
-long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
-{
- long ret;
-
- switch (cmd) {
- case BCH_IOCTL_QUERY_UUID:
- return bch2_ioctl_query_uuid(c, arg);
- case BCH_IOCTL_FS_USAGE:
- return bch2_ioctl_fs_usage(c, arg);
- case BCH_IOCTL_DEV_USAGE:
- return bch2_ioctl_dev_usage(c, arg);
- case BCH_IOCTL_DEV_USAGE_V2:
- return bch2_ioctl_dev_usage_v2(c, arg);
-#if 0
- case BCH_IOCTL_START:
- BCH_IOCTL(start, struct bch_ioctl_start);
- case BCH_IOCTL_STOP:
- return bch2_ioctl_stop(c);
-#endif
- case BCH_IOCTL_READ_SUPER:
- BCH_IOCTL(read_super, struct bch_ioctl_read_super);
- case BCH_IOCTL_DISK_GET_IDX:
- BCH_IOCTL(disk_get_idx, struct bch_ioctl_disk_get_idx);
- }
-
- if (!test_bit(BCH_FS_started, &c->flags))
- return -EINVAL;
-
- switch (cmd) {
- case BCH_IOCTL_DISK_ADD:
- BCH_IOCTL(disk_add, struct bch_ioctl_disk);
- case BCH_IOCTL_DISK_REMOVE:
- BCH_IOCTL(disk_remove, struct bch_ioctl_disk);
- case BCH_IOCTL_DISK_ONLINE:
- BCH_IOCTL(disk_online, struct bch_ioctl_disk);
- case BCH_IOCTL_DISK_OFFLINE:
- BCH_IOCTL(disk_offline, struct bch_ioctl_disk);
- case BCH_IOCTL_DISK_SET_STATE:
- BCH_IOCTL(disk_set_state, struct bch_ioctl_disk_set_state);
- case BCH_IOCTL_DATA:
- BCH_IOCTL(data, struct bch_ioctl_data);
- case BCH_IOCTL_DISK_RESIZE:
- BCH_IOCTL(disk_resize, struct bch_ioctl_disk_resize);
- case BCH_IOCTL_DISK_RESIZE_JOURNAL:
- BCH_IOCTL(disk_resize_journal, struct bch_ioctl_disk_resize_journal);
- case BCH_IOCTL_FSCK_ONLINE:
- BCH_IOCTL(fsck_online, struct bch_ioctl_fsck_online);
- case BCH_IOCTL_QUERY_ACCOUNTING:
- return bch2_ioctl_query_accounting(c, arg);
- default:
- return -ENOTTY;
- }
-out:
- if (ret < 0)
- ret = bch2_err_class(ret);
- return ret;
-}
-
-static DEFINE_IDR(bch_chardev_minor);
-
-static long bch2_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v)
-{
- unsigned minor = iminor(file_inode(filp));
- struct bch_fs *c = minor < U8_MAX ? idr_find(&bch_chardev_minor, minor) : NULL;
- void __user *arg = (void __user *) v;
-
- return c
- ? bch2_fs_ioctl(c, cmd, arg)
- : bch2_global_ioctl(cmd, arg);
-}
-
-static const struct file_operations bch_chardev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = bch2_chardev_ioctl,
- .open = nonseekable_open,
-};
-
-static int bch_chardev_major;
-static const struct class bch_chardev_class = {
- .name = "bcachefs",
-};
-static struct device *bch_chardev;
-
-void bch2_fs_chardev_exit(struct bch_fs *c)
-{
- if (!IS_ERR_OR_NULL(c->chardev))
- device_unregister(c->chardev);
- if (c->minor >= 0)
- idr_remove(&bch_chardev_minor, c->minor);
-}
-
-int bch2_fs_chardev_init(struct bch_fs *c)
-{
- c->minor = idr_alloc(&bch_chardev_minor, c, 0, 0, GFP_KERNEL);
- if (c->minor < 0)
- return c->minor;
-
- c->chardev = device_create(&bch_chardev_class, NULL,
- MKDEV(bch_chardev_major, c->minor), c,
- "bcachefs%u-ctl", c->minor);
- if (IS_ERR(c->chardev))
- return PTR_ERR(c->chardev);
-
- return 0;
-}
-
-void bch2_chardev_exit(void)
-{
- device_destroy(&bch_chardev_class, MKDEV(bch_chardev_major, U8_MAX));
- class_unregister(&bch_chardev_class);
- if (bch_chardev_major > 0)
- unregister_chrdev(bch_chardev_major, "bcachefs");
-}
-
-int __init bch2_chardev_init(void)
-{
- int ret;
-
- bch_chardev_major = register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops);
- if (bch_chardev_major < 0)
- return bch_chardev_major;
-
- ret = class_register(&bch_chardev_class);
- if (ret)
- goto major_out;
-
- bch_chardev = device_create(&bch_chardev_class, NULL,
- MKDEV(bch_chardev_major, U8_MAX),
- NULL, "bcachefs-ctl");
- if (IS_ERR(bch_chardev)) {
- ret = PTR_ERR(bch_chardev);
- goto class_out;
- }
-
- return 0;
-
-class_out:
- class_unregister(&bch_chardev_class);
-major_out:
- unregister_chrdev(bch_chardev_major, "bcachefs-ctl");
- return ret;
-}
-
-#endif /* NO_BCACHEFS_CHARDEV */
diff --git a/fs/bcachefs/chardev.h b/fs/bcachefs/chardev.h
deleted file mode 100644
index 0f563ca53c36..000000000000
--- a/fs/bcachefs/chardev.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_CHARDEV_H
-#define _BCACHEFS_CHARDEV_H
-
-#ifndef NO_BCACHEFS_FS
-
-long bch2_fs_ioctl(struct bch_fs *, unsigned, void __user *);
-
-void bch2_fs_chardev_exit(struct bch_fs *);
-int bch2_fs_chardev_init(struct bch_fs *);
-
-void bch2_chardev_exit(void);
-int __init bch2_chardev_init(void);
-
-#else
-
-static inline long bch2_fs_ioctl(struct bch_fs *c,
- unsigned cmd, void __user * arg)
-{
- return -ENOTTY;
-}
-
-static inline void bch2_fs_chardev_exit(struct bch_fs *c) {}
-static inline int bch2_fs_chardev_init(struct bch_fs *c) { return 0; }
-
-static inline void bch2_chardev_exit(void) {}
-static inline int __init bch2_chardev_init(void) { return 0; }
-
-#endif /* NO_BCACHEFS_FS */
-
-#endif /* _BCACHEFS_CHARDEV_H */
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
deleted file mode 100644
index ce8fc677bef9..000000000000
--- a/fs/bcachefs/checksum.c
+++ /dev/null
@@ -1,822 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "checksum.h"
-#include "errcode.h"
-#include "super.h"
-#include "super-io.h"
-
-#include <linux/crc32c.h>
-#include <linux/crypto.h>
-#include <linux/xxhash.h>
-#include <linux/key.h>
-#include <linux/random.h>
-#include <linux/ratelimit.h>
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
-#include <crypto/chacha.h>
-#include <crypto/hash.h>
-#include <crypto/poly1305.h>
-#include <crypto/skcipher.h>
-#include <keys/user-type.h>
-
-/*
- * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
- * it features page merging without having the checksum algorithm lose its state.
- * for native checksum aglorithms (like crc), a default seed value will do.
- * for hash-like algorithms, a state needs to be stored
- */
-
-struct bch2_checksum_state {
- union {
- u64 seed;
- struct xxh64_state h64state;
- };
- unsigned int type;
-};
-
-static void bch2_checksum_init(struct bch2_checksum_state *state)
-{
- switch (state->type) {
- case BCH_CSUM_none:
- case BCH_CSUM_crc32c:
- case BCH_CSUM_crc64:
- state->seed = 0;
- break;
- case BCH_CSUM_crc32c_nonzero:
- state->seed = U32_MAX;
- break;
- case BCH_CSUM_crc64_nonzero:
- state->seed = U64_MAX;
- break;
- case BCH_CSUM_xxhash:
- xxh64_reset(&state->h64state, 0);
- break;
- default:
- BUG();
- }
-}
-
-static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
-{
- switch (state->type) {
- case BCH_CSUM_none:
- case BCH_CSUM_crc32c:
- case BCH_CSUM_crc64:
- return state->seed;
- case BCH_CSUM_crc32c_nonzero:
- return state->seed ^ U32_MAX;
- case BCH_CSUM_crc64_nonzero:
- return state->seed ^ U64_MAX;
- case BCH_CSUM_xxhash:
- return xxh64_digest(&state->h64state);
- default:
- BUG();
- }
-}
-
-static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
-{
- switch (state->type) {
- case BCH_CSUM_none:
- return;
- case BCH_CSUM_crc32c_nonzero:
- case BCH_CSUM_crc32c:
- state->seed = crc32c(state->seed, data, len);
- break;
- case BCH_CSUM_crc64_nonzero:
- case BCH_CSUM_crc64:
- state->seed = crc64_be(state->seed, data, len);
- break;
- case BCH_CSUM_xxhash:
- xxh64_update(&state->h64state, data, len);
- break;
- default:
- BUG();
- }
-}
-
-static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
- struct nonce nonce,
- struct scatterlist *sg, size_t len)
-{
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
-
- int ret = crypto_skcipher_encrypt(req);
- if (ret)
- pr_err("got error %i from crypto_skcipher_encrypt()", ret);
-
- return ret;
-}
-
-static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
- struct nonce nonce,
- void *buf, size_t len)
-{
- if (!is_vmalloc_addr(buf)) {
- struct scatterlist sg = {};
-
- sg_mark_end(&sg);
- sg_set_page(&sg, virt_to_page(buf), len, offset_in_page(buf));
- return do_encrypt_sg(tfm, nonce, &sg, len);
- } else {
- DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
- size_t sgl_len = 0;
- int ret;
-
- darray_init(&sgl);
-
- while (len) {
- unsigned offset = offset_in_page(buf);
- struct scatterlist sg = {
- .page_link = (unsigned long) vmalloc_to_page(buf),
- .offset = offset,
- .length = min(len, PAGE_SIZE - offset),
- };
-
- if (darray_push(&sgl, sg)) {
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
- if (ret)
- goto err;
-
- nonce = nonce_add(nonce, sgl_len);
- sgl_len = 0;
- sgl.nr = 0;
- BUG_ON(darray_push(&sgl, sg));
- }
-
- buf += sg.length;
- len -= sg.length;
- sgl_len += sg.length;
- }
-
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
-err:
- darray_exit(&sgl);
- return ret;
- }
-}
-
-int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
- void *buf, size_t len)
-{
- struct crypto_sync_skcipher *chacha20 =
- crypto_alloc_sync_skcipher("chacha20", 0, 0);
- int ret;
-
- ret = PTR_ERR_OR_ZERO(chacha20);
- if (ret) {
- pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
- return ret;
- }
-
- ret = crypto_skcipher_setkey(&chacha20->base,
- (void *) key, sizeof(*key));
- if (ret) {
- pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
- goto err;
- }
-
- ret = do_encrypt(chacha20, nonce, buf, len);
-err:
- crypto_free_sync_skcipher(chacha20);
- return ret;
-}
-
-static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
- struct nonce nonce)
-{
- u8 key[POLY1305_KEY_SIZE];
- int ret;
-
- nonce.d[3] ^= BCH_NONCE_POLY;
-
- memset(key, 0, sizeof(key));
- ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
- if (ret)
- return ret;
-
- desc->tfm = c->poly1305;
- crypto_shash_init(desc);
- crypto_shash_update(desc, key, sizeof(key));
- return 0;
-}
-
-struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
- struct nonce nonce, const void *data, size_t len)
-{
- switch (type) {
- case BCH_CSUM_none:
- case BCH_CSUM_crc32c_nonzero:
- case BCH_CSUM_crc64_nonzero:
- case BCH_CSUM_crc32c:
- case BCH_CSUM_xxhash:
- case BCH_CSUM_crc64: {
- struct bch2_checksum_state state;
-
- state.type = type;
-
- bch2_checksum_init(&state);
- bch2_checksum_update(&state, data, len);
-
- return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
- }
-
- case BCH_CSUM_chacha20_poly1305_80:
- case BCH_CSUM_chacha20_poly1305_128: {
- SHASH_DESC_ON_STACK(desc, c->poly1305);
- u8 digest[POLY1305_DIGEST_SIZE];
- struct bch_csum ret = { 0 };
-
- gen_poly_key(c, desc, nonce);
-
- crypto_shash_update(desc, data, len);
- crypto_shash_final(desc, digest);
-
- memcpy(&ret, digest, bch_crc_bytes[type]);
- return ret;
- }
- default:
- return (struct bch_csum) {};
- }
-}
-
-int bch2_encrypt(struct bch_fs *c, unsigned type,
- struct nonce nonce, void *data, size_t len)
-{
- if (!bch2_csum_type_is_encryption(type))
- return 0;
-
- return do_encrypt(c->chacha20, nonce, data, len);
-}
-
-static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
- struct nonce nonce, struct bio *bio,
- struct bvec_iter *iter)
-{
- struct bio_vec bv;
-
- switch (type) {
- case BCH_CSUM_none:
- return (struct bch_csum) { 0 };
- case BCH_CSUM_crc32c_nonzero:
- case BCH_CSUM_crc64_nonzero:
- case BCH_CSUM_crc32c:
- case BCH_CSUM_xxhash:
- case BCH_CSUM_crc64: {
- struct bch2_checksum_state state;
-
- state.type = type;
- bch2_checksum_init(&state);
-
-#ifdef CONFIG_HIGHMEM
- __bio_for_each_segment(bv, bio, *iter, *iter) {
- void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
-
- bch2_checksum_update(&state, p, bv.bv_len);
- kunmap_local(p);
- }
-#else
- __bio_for_each_bvec(bv, bio, *iter, *iter)
- bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
- bv.bv_len);
-#endif
- return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
- }
-
- case BCH_CSUM_chacha20_poly1305_80:
- case BCH_CSUM_chacha20_poly1305_128: {
- SHASH_DESC_ON_STACK(desc, c->poly1305);
- u8 digest[POLY1305_DIGEST_SIZE];
- struct bch_csum ret = { 0 };
-
- gen_poly_key(c, desc, nonce);
-
-#ifdef CONFIG_HIGHMEM
- __bio_for_each_segment(bv, bio, *iter, *iter) {
- void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
-
- crypto_shash_update(desc, p, bv.bv_len);
- kunmap_local(p);
- }
-#else
- __bio_for_each_bvec(bv, bio, *iter, *iter)
- crypto_shash_update(desc,
- page_address(bv.bv_page) + bv.bv_offset,
- bv.bv_len);
-#endif
- crypto_shash_final(desc, digest);
-
- memcpy(&ret, digest, bch_crc_bytes[type]);
- return ret;
- }
- default:
- return (struct bch_csum) {};
- }
-}
-
-struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
- struct nonce nonce, struct bio *bio)
-{
- struct bvec_iter iter = bio->bi_iter;
-
- return __bch2_checksum_bio(c, type, nonce, bio, &iter);
-}
-
-int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
- struct nonce nonce, struct bio *bio)
-{
- struct bio_vec bv;
- struct bvec_iter iter;
- DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
- size_t sgl_len = 0;
- int ret = 0;
-
- if (!bch2_csum_type_is_encryption(type))
- return 0;
-
- darray_init(&sgl);
-
- bio_for_each_segment(bv, bio, iter) {
- struct scatterlist sg = {
- .page_link = (unsigned long) bv.bv_page,
- .offset = bv.bv_offset,
- .length = bv.bv_len,
- };
-
- if (darray_push(&sgl, sg)) {
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
- if (ret)
- goto err;
-
- nonce = nonce_add(nonce, sgl_len);
- sgl_len = 0;
- sgl.nr = 0;
-
- BUG_ON(darray_push(&sgl, sg));
- }
-
- sgl_len += sg.length;
- }
-
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
-err:
- darray_exit(&sgl);
- return ret;
-}
-
-struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
- struct bch_csum b, size_t b_len)
-{
- struct bch2_checksum_state state;
-
- state.type = type;
- bch2_checksum_init(&state);
- state.seed = le64_to_cpu(a.lo);
-
- BUG_ON(!bch2_checksum_mergeable(type));
-
- while (b_len) {
- unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
-
- bch2_checksum_update(&state,
- page_address(ZERO_PAGE(0)), page_len);
- b_len -= page_len;
- }
- a.lo = cpu_to_le64(bch2_checksum_final(&state));
- a.lo ^= b.lo;
- a.hi ^= b.hi;
- return a;
-}
-
-int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
- struct bversion version,
- struct bch_extent_crc_unpacked crc_old,
- struct bch_extent_crc_unpacked *crc_a,
- struct bch_extent_crc_unpacked *crc_b,
- unsigned len_a, unsigned len_b,
- unsigned new_csum_type)
-{
- struct bvec_iter iter = bio->bi_iter;
- struct nonce nonce = extent_nonce(version, crc_old);
- struct bch_csum merged = { 0 };
- struct crc_split {
- struct bch_extent_crc_unpacked *crc;
- unsigned len;
- unsigned csum_type;
- struct bch_csum csum;
- } splits[3] = {
- { crc_a, len_a, new_csum_type, { 0 }},
- { crc_b, len_b, new_csum_type, { 0 } },
- { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
- }, *i;
- bool mergeable = crc_old.csum_type == new_csum_type &&
- bch2_checksum_mergeable(new_csum_type);
- unsigned crc_nonce = crc_old.nonce;
-
- BUG_ON(len_a + len_b > bio_sectors(bio));
- BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
- BUG_ON(crc_is_compressed(crc_old));
- BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
- bch2_csum_type_is_encryption(new_csum_type));
-
- for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
- iter.bi_size = i->len << 9;
- if (mergeable || i->crc)
- i->csum = __bch2_checksum_bio(c, i->csum_type,
- nonce, bio, &iter);
- else
- bio_advance_iter(bio, &iter, i->len << 9);
- nonce = nonce_add(nonce, i->len << 9);
- }
-
- if (mergeable)
- for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
- merged = bch2_checksum_merge(new_csum_type, merged,
- i->csum, i->len << 9);
- else
- merged = bch2_checksum_bio(c, crc_old.csum_type,
- extent_nonce(version, crc_old), bio);
-
- if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
- struct printbuf buf = PRINTBUF;
- prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n"
- " expected %0llx:%0llx got %0llx:%0llx (old type ",
- __func__,
- crc_old.csum.hi,
- crc_old.csum.lo,
- merged.hi,
- merged.lo);
- bch2_prt_csum_type(&buf, crc_old.csum_type);
- prt_str(&buf, " new type ");
- bch2_prt_csum_type(&buf, new_csum_type);
- prt_str(&buf, ")");
- WARN_RATELIMIT(1, "%s", buf.buf);
- printbuf_exit(&buf);
- return -EIO;
- }
-
- for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
- if (i->crc)
- *i->crc = (struct bch_extent_crc_unpacked) {
- .csum_type = i->csum_type,
- .compression_type = crc_old.compression_type,
- .compressed_size = i->len,
- .uncompressed_size = i->len,
- .offset = 0,
- .live_size = i->len,
- .nonce = crc_nonce,
- .csum = i->csum,
- };
-
- if (bch2_csum_type_is_encryption(new_csum_type))
- crc_nonce += i->len;
- }
-
- return 0;
-}
-
-/* BCH_SB_FIELD_crypt: */
-
-static int bch2_sb_crypt_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
-
- if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
- prt_printf(err, "wrong size (got %zu should be %zu)",
- vstruct_bytes(&crypt->field), sizeof(*crypt));
- return -BCH_ERR_invalid_sb_crypt;
- }
-
- if (BCH_CRYPT_KDF_TYPE(crypt)) {
- prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
- return -BCH_ERR_invalid_sb_crypt;
- }
-
- return 0;
-}
-
-static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
-
- prt_printf(out, "KFD: %llu\n", BCH_CRYPT_KDF_TYPE(crypt));
- prt_printf(out, "scrypt n: %llu\n", BCH_KDF_SCRYPT_N(crypt));
- prt_printf(out, "scrypt r: %llu\n", BCH_KDF_SCRYPT_R(crypt));
- prt_printf(out, "scrypt p: %llu\n", BCH_KDF_SCRYPT_P(crypt));
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
- .validate = bch2_sb_crypt_validate,
- .to_text = bch2_sb_crypt_to_text,
-};
-
-#ifdef __KERNEL__
-static int __bch2_request_key(char *key_description, struct bch_key *key)
-{
- struct key *keyring_key;
- const struct user_key_payload *ukp;
- int ret;
-
- keyring_key = request_key(&key_type_user, key_description, NULL);
- if (IS_ERR(keyring_key))
- return PTR_ERR(keyring_key);
-
- down_read(&keyring_key->sem);
- ukp = dereference_key_locked(keyring_key);
- if (ukp->datalen == sizeof(*key)) {
- memcpy(key, ukp->data, ukp->datalen);
- ret = 0;
- } else {
- ret = -EINVAL;
- }
- up_read(&keyring_key->sem);
- key_put(keyring_key);
-
- return ret;
-}
-#else
-#include <keyutils.h>
-
-static int __bch2_request_key(char *key_description, struct bch_key *key)
-{
- key_serial_t key_id;
-
- key_id = request_key("user", key_description, NULL,
- KEY_SPEC_SESSION_KEYRING);
- if (key_id >= 0)
- goto got_key;
-
- key_id = request_key("user", key_description, NULL,
- KEY_SPEC_USER_KEYRING);
- if (key_id >= 0)
- goto got_key;
-
- key_id = request_key("user", key_description, NULL,
- KEY_SPEC_USER_SESSION_KEYRING);
- if (key_id >= 0)
- goto got_key;
-
- return -errno;
-got_key:
-
- if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
- return -1;
-
- return 0;
-}
-
-#include "crypto.h"
-#endif
-
-int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
-{
- struct printbuf key_description = PRINTBUF;
- int ret;
-
- prt_printf(&key_description, "bcachefs:");
- pr_uuid(&key_description, sb->user_uuid.b);
-
- ret = __bch2_request_key(key_description.buf, key);
- printbuf_exit(&key_description);
-
-#ifndef __KERNEL__
- if (ret) {
- char *passphrase = read_passphrase("Enter passphrase: ");
- struct bch_encrypted_key sb_key;
-
- bch2_passphrase_check(sb, passphrase,
- key, &sb_key);
- ret = 0;
- }
-#endif
-
- /* stash with memfd, pass memfd fd to mount */
-
- return ret;
-}
-
-#ifndef __KERNEL__
-int bch2_revoke_key(struct bch_sb *sb)
-{
- key_serial_t key_id;
- struct printbuf key_description = PRINTBUF;
-
- prt_printf(&key_description, "bcachefs:");
- pr_uuid(&key_description, sb->user_uuid.b);
-
- key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING);
- printbuf_exit(&key_description);
- if (key_id < 0)
- return errno;
-
- keyctl_revoke(key_id);
-
- return 0;
-}
-#endif
-
-int bch2_decrypt_sb_key(struct bch_fs *c,
- struct bch_sb_field_crypt *crypt,
- struct bch_key *key)
-{
- struct bch_encrypted_key sb_key = crypt->key;
- struct bch_key user_key;
- int ret = 0;
-
- /* is key encrypted? */
- if (!bch2_key_is_encrypted(&sb_key))
- goto out;
-
- ret = bch2_request_key(c->disk_sb.sb, &user_key);
- if (ret) {
- bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
- goto err;
- }
-
- /* decrypt real key: */
- ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
- &sb_key, sizeof(sb_key));
- if (ret)
- goto err;
-
- if (bch2_key_is_encrypted(&sb_key)) {
- bch_err(c, "incorrect encryption key");
- ret = -EINVAL;
- goto err;
- }
-out:
- *key = sb_key.key;
-err:
- memzero_explicit(&sb_key, sizeof(sb_key));
- memzero_explicit(&user_key, sizeof(user_key));
- return ret;
-}
-
-static int bch2_alloc_ciphers(struct bch_fs *c)
-{
- if (c->chacha20)
- return 0;
-
- struct crypto_sync_skcipher *chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
- int ret = PTR_ERR_OR_ZERO(chacha20);
- if (ret) {
- bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
- return ret;
- }
-
- struct crypto_shash *poly1305 = crypto_alloc_shash("poly1305", 0, 0);
- ret = PTR_ERR_OR_ZERO(poly1305);
- if (ret) {
- bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
- crypto_free_sync_skcipher(chacha20);
- return ret;
- }
-
- c->chacha20 = chacha20;
- c->poly1305 = poly1305;
- return 0;
-}
-
-int bch2_disable_encryption(struct bch_fs *c)
-{
- struct bch_sb_field_crypt *crypt;
- struct bch_key key;
- int ret = -EINVAL;
-
- mutex_lock(&c->sb_lock);
-
- crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
- if (!crypt)
- goto out;
-
- /* is key encrypted? */
- ret = 0;
- if (bch2_key_is_encrypted(&crypt->key))
- goto out;
-
- ret = bch2_decrypt_sb_key(c, crypt, &key);
- if (ret)
- goto out;
-
- crypt->key.magic = cpu_to_le64(BCH_KEY_MAGIC);
- crypt->key.key = key;
-
- SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
- bch2_write_super(c);
-out:
- mutex_unlock(&c->sb_lock);
-
- return ret;
-}
-
-int bch2_enable_encryption(struct bch_fs *c, bool keyed)
-{
- struct bch_encrypted_key key;
- struct bch_key user_key;
- struct bch_sb_field_crypt *crypt;
- int ret = -EINVAL;
-
- mutex_lock(&c->sb_lock);
-
- /* Do we already have an encryption key? */
- if (bch2_sb_field_get(c->disk_sb.sb, crypt))
- goto err;
-
- ret = bch2_alloc_ciphers(c);
- if (ret)
- goto err;
-
- key.magic = cpu_to_le64(BCH_KEY_MAGIC);
- get_random_bytes(&key.key, sizeof(key.key));
-
- if (keyed) {
- ret = bch2_request_key(c->disk_sb.sb, &user_key);
- if (ret) {
- bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
- goto err;
- }
-
- ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
- &key, sizeof(key));
- if (ret)
- goto err;
- }
-
- ret = crypto_skcipher_setkey(&c->chacha20->base,
- (void *) &key.key, sizeof(key.key));
- if (ret)
- goto err;
-
- crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
- sizeof(*crypt) / sizeof(u64));
- if (!crypt) {
- ret = -BCH_ERR_ENOSPC_sb_crypt;
- goto err;
- }
-
- crypt->key = key;
-
- /* write superblock */
- SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
- bch2_write_super(c);
-err:
- mutex_unlock(&c->sb_lock);
- memzero_explicit(&user_key, sizeof(user_key));
- memzero_explicit(&key, sizeof(key));
- return ret;
-}
-
-void bch2_fs_encryption_exit(struct bch_fs *c)
-{
- if (c->poly1305)
- crypto_free_shash(c->poly1305);
- if (c->chacha20)
- crypto_free_sync_skcipher(c->chacha20);
- if (c->sha256)
- crypto_free_shash(c->sha256);
-}
-
-int bch2_fs_encryption_init(struct bch_fs *c)
-{
- struct bch_sb_field_crypt *crypt;
- struct bch_key key;
- int ret = 0;
-
- c->sha256 = crypto_alloc_shash("sha256", 0, 0);
- ret = PTR_ERR_OR_ZERO(c->sha256);
- if (ret) {
- c->sha256 = NULL;
- bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
- goto out;
- }
-
- crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
- if (!crypt)
- goto out;
-
- ret = bch2_alloc_ciphers(c);
- if (ret)
- goto out;
-
- ret = bch2_decrypt_sb_key(c, crypt, &key);
- if (ret)
- goto out;
-
- ret = crypto_skcipher_setkey(&c->chacha20->base,
- (void *) &key.key, sizeof(key.key));
- if (ret)
- goto out;
-out:
- memzero_explicit(&key, sizeof(key));
- return ret;
-}
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
deleted file mode 100644
index e40499fde9a4..000000000000
--- a/fs/bcachefs/checksum.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_CHECKSUM_H
-#define _BCACHEFS_CHECKSUM_H
-
-#include "bcachefs.h"
-#include "extents_types.h"
-#include "super-io.h"
-
-#include <linux/crc64.h>
-#include <crypto/chacha.h>
-
-static inline bool bch2_checksum_mergeable(unsigned type)
-{
-
- switch (type) {
- case BCH_CSUM_none:
- case BCH_CSUM_crc32c:
- case BCH_CSUM_crc64:
- return true;
- default:
- return false;
- }
-}
-
-struct bch_csum bch2_checksum_merge(unsigned, struct bch_csum,
- struct bch_csum, size_t);
-
-#define BCH_NONCE_EXTENT cpu_to_le32(1 << 28)
-#define BCH_NONCE_BTREE cpu_to_le32(2 << 28)
-#define BCH_NONCE_JOURNAL cpu_to_le32(3 << 28)
-#define BCH_NONCE_PRIO cpu_to_le32(4 << 28)
-#define BCH_NONCE_POLY cpu_to_le32(1 << 31)
-
-struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce,
- const void *, size_t);
-
-/*
- * This is used for various on disk data structures - bch_sb, prio_set, bset,
- * jset: The checksum is _always_ the first field of these structs
- */
-#define csum_vstruct(_c, _type, _nonce, _i) \
-({ \
- const void *_start = ((const void *) (_i)) + sizeof((_i)->csum);\
- \
- bch2_checksum(_c, _type, _nonce, _start, vstruct_end(_i) - _start);\
-})
-
-static inline void bch2_csum_to_text(struct printbuf *out,
- enum bch_csum_type type,
- struct bch_csum csum)
-{
- const u8 *p = (u8 *) &csum;
- unsigned bytes = type < BCH_CSUM_NR ? bch_crc_bytes[type] : 16;
-
- for (unsigned i = 0; i < bytes; i++)
- prt_hex_byte(out, p[i]);
-}
-
-static inline void bch2_csum_err_msg(struct printbuf *out,
- enum bch_csum_type type,
- struct bch_csum expected,
- struct bch_csum got)
-{
- prt_str(out, "checksum error, type ");
- bch2_prt_csum_type(out, type);
- prt_str(out, ": got ");
- bch2_csum_to_text(out, type, got);
- prt_str(out, " should be ");
- bch2_csum_to_text(out, type, expected);
-}
-
-int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
-int bch2_request_key(struct bch_sb *, struct bch_key *);
-#ifndef __KERNEL__
-int bch2_revoke_key(struct bch_sb *);
-#endif
-
-int bch2_encrypt(struct bch_fs *, unsigned, struct nonce,
- void *data, size_t);
-
-struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned,
- struct nonce, struct bio *);
-
-int bch2_rechecksum_bio(struct bch_fs *, struct bio *, struct bversion,
- struct bch_extent_crc_unpacked,
- struct bch_extent_crc_unpacked *,
- struct bch_extent_crc_unpacked *,
- unsigned, unsigned, unsigned);
-
-int __bch2_encrypt_bio(struct bch_fs *, unsigned,
- struct nonce, struct bio *);
-
-static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
- struct nonce nonce, struct bio *bio)
-{
- return bch2_csum_type_is_encryption(type)
- ? __bch2_encrypt_bio(c, type, nonce, bio)
- : 0;
-}
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_crypt;
-
-int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *,
- struct bch_key *);
-
-int bch2_disable_encryption(struct bch_fs *);
-int bch2_enable_encryption(struct bch_fs *, bool);
-
-void bch2_fs_encryption_exit(struct bch_fs *);
-int bch2_fs_encryption_init(struct bch_fs *);
-
-static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type,
- bool data)
-{
- switch (type) {
- case BCH_CSUM_OPT_none:
- return BCH_CSUM_none;
- case BCH_CSUM_OPT_crc32c:
- return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
- case BCH_CSUM_OPT_crc64:
- return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
- case BCH_CSUM_OPT_xxhash:
- return BCH_CSUM_xxhash;
- default:
- BUG();
- }
-}
-
-static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c,
- struct bch_io_opts opts)
-{
- if (opts.nocow)
- return 0;
-
- if (c->sb.encryption_type)
- return c->opts.wide_macs
- ? BCH_CSUM_chacha20_poly1305_128
- : BCH_CSUM_chacha20_poly1305_80;
-
- return bch2_csum_opt_to_type(opts.data_checksum, true);
-}
-
-static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c)
-{
- if (c->sb.encryption_type)
- return BCH_CSUM_chacha20_poly1305_128;
-
- return bch2_csum_opt_to_type(c->opts.metadata_checksum, false);
-}
-
-static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
- unsigned type)
-{
- if (type >= BCH_CSUM_NR)
- return false;
-
- if (bch2_csum_type_is_encryption(type) && !c->chacha20)
- return false;
-
- return true;
-}
-
-/* returns true if not equal */
-static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r)
-{
- /*
- * XXX: need some way of preventing the compiler from optimizing this
- * into a form that isn't constant time..
- */
- return ((l.lo ^ r.lo) | (l.hi ^ r.hi)) != 0;
-}
-
-/* for skipping ahead and encrypting/decrypting at an offset: */
-static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
-{
- EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1));
-
- le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE);
- return nonce;
-}
-
-static inline struct nonce null_nonce(void)
-{
- struct nonce ret;
-
- memset(&ret, 0, sizeof(ret));
- return ret;
-}
-
-static inline struct nonce extent_nonce(struct bversion version,
- struct bch_extent_crc_unpacked crc)
-{
- unsigned compression_type = crc_is_compressed(crc)
- ? crc.compression_type
- : 0;
- unsigned size = compression_type ? crc.uncompressed_size : 0;
- struct nonce nonce = (struct nonce) {{
- [0] = cpu_to_le32(size << 22),
- [1] = cpu_to_le32(version.lo),
- [2] = cpu_to_le32(version.lo >> 32),
- [3] = cpu_to_le32(version.hi|
- (compression_type << 24))^BCH_NONCE_EXTENT,
- }};
-
- return nonce_add(nonce, crc.nonce << 9);
-}
-
-static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key)
-{
- return le64_to_cpu(key->magic) != BCH_KEY_MAGIC;
-}
-
-static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb)
-{
- __le64 magic = __bch2_sb_magic(sb);
-
- return (struct nonce) {{
- [0] = 0,
- [1] = 0,
- [2] = ((__le32 *) &magic)[0],
- [3] = ((__le32 *) &magic)[1],
- }};
-}
-
-static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c)
-{
- __le64 magic = bch2_sb_magic(c);
-
- return (struct nonce) {{
- [0] = 0,
- [1] = 0,
- [2] = ((__le32 *) &magic)[0],
- [3] = ((__le32 *) &magic)[1],
- }};
-}
-
-#endif /* _BCACHEFS_CHECKSUM_H */
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
deleted file mode 100644
index 1f8e035d7119..000000000000
--- a/fs/bcachefs/clock.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "clock.h"
-
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/preempt.h>
-
-static inline bool io_timer_cmp(const void *l, const void *r, void __always_unused *args)
-{
- struct io_timer **_l = (struct io_timer **)l;
- struct io_timer **_r = (struct io_timer **)r;
-
- return (*_l)->expire < (*_r)->expire;
-}
-
-static const struct min_heap_callbacks callbacks = {
- .less = io_timer_cmp,
- .swp = NULL,
-};
-
-void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
-{
- spin_lock(&clock->timer_lock);
-
- if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) {
- spin_unlock(&clock->timer_lock);
- timer->fn(timer);
- return;
- }
-
- for (size_t i = 0; i < clock->timers.nr; i++)
- if (clock->timers.data[i] == timer)
- goto out;
-
- BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL));
-out:
- spin_unlock(&clock->timer_lock);
-}
-
-void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
-{
- spin_lock(&clock->timer_lock);
-
- for (size_t i = 0; i < clock->timers.nr; i++)
- if (clock->timers.data[i] == timer) {
- min_heap_del(&clock->timers, i, &callbacks, NULL);
- break;
- }
-
- spin_unlock(&clock->timer_lock);
-}
-
-struct io_clock_wait {
- struct io_timer io_timer;
- struct timer_list cpu_timer;
- struct task_struct *task;
- int expired;
-};
-
-static void io_clock_wait_fn(struct io_timer *timer)
-{
- struct io_clock_wait *wait = container_of(timer,
- struct io_clock_wait, io_timer);
-
- wait->expired = 1;
- wake_up_process(wait->task);
-}
-
-static void io_clock_cpu_timeout(struct timer_list *timer)
-{
- struct io_clock_wait *wait = container_of(timer,
- struct io_clock_wait, cpu_timer);
-
- wait->expired = 1;
- wake_up_process(wait->task);
-}
-
-void bch2_io_clock_schedule_timeout(struct io_clock *clock, u64 until)
-{
- struct io_clock_wait wait = {
- .io_timer.expire = until,
- .io_timer.fn = io_clock_wait_fn,
- .io_timer.fn2 = (void *) _RET_IP_,
- .task = current,
- };
-
- bch2_io_timer_add(clock, &wait.io_timer);
- schedule();
- bch2_io_timer_del(clock, &wait.io_timer);
-}
-
-void bch2_kthread_io_clock_wait(struct io_clock *clock,
- u64 io_until, unsigned long cpu_timeout)
-{
- bool kthread = (current->flags & PF_KTHREAD) != 0;
- struct io_clock_wait wait = {
- .io_timer.expire = io_until,
- .io_timer.fn = io_clock_wait_fn,
- .io_timer.fn2 = (void *) _RET_IP_,
- .task = current,
- };
-
- bch2_io_timer_add(clock, &wait.io_timer);
-
- timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
-
- if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
- mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
-
- do {
- set_current_state(TASK_INTERRUPTIBLE);
- if (kthread && kthread_should_stop())
- break;
-
- if (wait.expired)
- break;
-
- schedule();
- try_to_freeze();
- } while (0);
-
- __set_current_state(TASK_RUNNING);
- del_timer_sync(&wait.cpu_timer);
- destroy_timer_on_stack(&wait.cpu_timer);
- bch2_io_timer_del(clock, &wait.io_timer);
-}
-
-static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now)
-{
- struct io_timer *ret = NULL;
-
- if (clock->timers.nr &&
- time_after_eq64(now, clock->timers.data[0]->expire)) {
- ret = *min_heap_peek(&clock->timers);
- min_heap_pop(&clock->timers, &callbacks, NULL);
- }
-
- return ret;
-}
-
-void __bch2_increment_clock(struct io_clock *clock, u64 sectors)
-{
- struct io_timer *timer;
- u64 now = atomic64_add_return(sectors, &clock->now);
-
- spin_lock(&clock->timer_lock);
- while ((timer = get_expired_timer(clock, now)))
- timer->fn(timer);
- spin_unlock(&clock->timer_lock);
-}
-
-void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
-{
- out->atomic++;
- spin_lock(&clock->timer_lock);
- u64 now = atomic64_read(&clock->now);
-
- printbuf_tabstop_push(out, 40);
- prt_printf(out, "current time:\t%llu\n", now);
-
- for (unsigned i = 0; i < clock->timers.nr; i++)
- prt_printf(out, "%ps %ps:\t%llu\n",
- clock->timers.data[i]->fn,
- clock->timers.data[i]->fn2,
- clock->timers.data[i]->expire);
- spin_unlock(&clock->timer_lock);
- --out->atomic;
-}
-
-void bch2_io_clock_exit(struct io_clock *clock)
-{
- free_heap(&clock->timers);
- free_percpu(clock->pcpu_buf);
-}
-
-int bch2_io_clock_init(struct io_clock *clock)
-{
- atomic64_set(&clock->now, 0);
- spin_lock_init(&clock->timer_lock);
-
- clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
-
- clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
- if (!clock->pcpu_buf)
- return -BCH_ERR_ENOMEM_io_clock_init;
-
- if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
- return -BCH_ERR_ENOMEM_io_clock_init;
-
- return 0;
-}
diff --git a/fs/bcachefs/clock.h b/fs/bcachefs/clock.h
deleted file mode 100644
index 82c79c8baf92..000000000000
--- a/fs/bcachefs/clock.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_CLOCK_H
-#define _BCACHEFS_CLOCK_H
-
-void bch2_io_timer_add(struct io_clock *, struct io_timer *);
-void bch2_io_timer_del(struct io_clock *, struct io_timer *);
-void bch2_kthread_io_clock_wait(struct io_clock *, u64, unsigned long);
-
-void __bch2_increment_clock(struct io_clock *, u64);
-
-static inline void bch2_increment_clock(struct bch_fs *c, u64 sectors,
- int rw)
-{
- struct io_clock *clock = &c->io_clock[rw];
-
- if (unlikely(this_cpu_add_return(*clock->pcpu_buf, sectors) >=
- IO_CLOCK_PCPU_SECTORS))
- __bch2_increment_clock(clock, this_cpu_xchg(*clock->pcpu_buf, 0));
-}
-
-void bch2_io_clock_schedule_timeout(struct io_clock *, u64);
-
-void bch2_io_timers_to_text(struct printbuf *, struct io_clock *);
-
-void bch2_io_clock_exit(struct io_clock *);
-int bch2_io_clock_init(struct io_clock *);
-
-#endif /* _BCACHEFS_CLOCK_H */
diff --git a/fs/bcachefs/clock_types.h b/fs/bcachefs/clock_types.h
deleted file mode 100644
index 37554e4514fe..000000000000
--- a/fs/bcachefs/clock_types.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_CLOCK_TYPES_H
-#define _BCACHEFS_CLOCK_TYPES_H
-
-#include "util.h"
-
-#define NR_IO_TIMERS (BCH_SB_MEMBERS_MAX * 3)
-
-/*
- * Clocks/timers in units of sectors of IO:
- *
- * Note - they use percpu batching, so they're only approximate.
- */
-
-struct io_timer;
-typedef void (*io_timer_fn)(struct io_timer *);
-
-struct io_timer {
- io_timer_fn fn;
- void *fn2;
- u64 expire;
-};
-
-/* Amount to buffer up on a percpu counter */
-#define IO_CLOCK_PCPU_SECTORS 128
-
-typedef DEFINE_MIN_HEAP(struct io_timer *, io_timer_heap) io_timer_heap;
-
-struct io_clock {
- atomic64_t now;
- u16 __percpu *pcpu_buf;
- unsigned max_slop;
-
- spinlock_t timer_lock;
- io_timer_heap timers;
-};
-
-#endif /* _BCACHEFS_CLOCK_TYPES_H */
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
deleted file mode 100644
index 1410365a8891..000000000000
--- a/fs/bcachefs/compress.c
+++ /dev/null
@@ -1,728 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "checksum.h"
-#include "compress.h"
-#include "extents.h"
-#include "super-io.h"
-
-#include <linux/lz4.h>
-#include <linux/zlib.h>
-#include <linux/zstd.h>
-
-/* Bounce buffer: */
-struct bbuf {
- void *b;
- enum {
- BB_NONE,
- BB_VMAP,
- BB_KMALLOC,
- BB_MEMPOOL,
- } type;
- int rw;
-};
-
-static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
-{
- void *b;
-
- BUG_ON(size > c->opts.encoded_extent_max);
-
- b = kmalloc(size, GFP_NOFS|__GFP_NOWARN);
- if (b)
- return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
-
- b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS);
- if (b)
- return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
-
- BUG();
-}
-
-static bool bio_phys_contig(struct bio *bio, struct bvec_iter start)
-{
- struct bio_vec bv;
- struct bvec_iter iter;
- void *expected_start = NULL;
-
- __bio_for_each_bvec(bv, bio, iter, start) {
- if (expected_start &&
- expected_start != page_address(bv.bv_page) + bv.bv_offset)
- return false;
-
- expected_start = page_address(bv.bv_page) +
- bv.bv_offset + bv.bv_len;
- }
-
- return true;
-}
-
-static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
- struct bvec_iter start, int rw)
-{
- struct bbuf ret;
- struct bio_vec bv;
- struct bvec_iter iter;
- unsigned nr_pages = 0;
- struct page *stack_pages[16];
- struct page **pages = NULL;
- void *data;
-
- BUG_ON(start.bi_size > c->opts.encoded_extent_max);
-
- if (!PageHighMem(bio_iter_page(bio, start)) &&
- bio_phys_contig(bio, start))
- return (struct bbuf) {
- .b = page_address(bio_iter_page(bio, start)) +
- bio_iter_offset(bio, start),
- .type = BB_NONE, .rw = rw
- };
-
- /* check if we can map the pages contiguously: */
- __bio_for_each_segment(bv, bio, iter, start) {
- if (iter.bi_size != start.bi_size &&
- bv.bv_offset)
- goto bounce;
-
- if (bv.bv_len < iter.bi_size &&
- bv.bv_offset + bv.bv_len < PAGE_SIZE)
- goto bounce;
-
- nr_pages++;
- }
-
- BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages);
-
- pages = nr_pages > ARRAY_SIZE(stack_pages)
- ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS)
- : stack_pages;
- if (!pages)
- goto bounce;
-
- nr_pages = 0;
- __bio_for_each_segment(bv, bio, iter, start)
- pages[nr_pages++] = bv.bv_page;
-
- data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
- if (pages != stack_pages)
- kfree(pages);
-
- if (data)
- return (struct bbuf) {
- .b = data + bio_iter_offset(bio, start),
- .type = BB_VMAP, .rw = rw
- };
-bounce:
- ret = __bounce_alloc(c, start.bi_size, rw);
-
- if (rw == READ)
- memcpy_from_bio(ret.b, bio, start);
-
- return ret;
-}
-
-static struct bbuf bio_map_or_bounce(struct bch_fs *c, struct bio *bio, int rw)
-{
- return __bio_map_or_bounce(c, bio, bio->bi_iter, rw);
-}
-
-static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
-{
- switch (buf.type) {
- case BB_NONE:
- break;
- case BB_VMAP:
- vunmap((void *) ((unsigned long) buf.b & PAGE_MASK));
- break;
- case BB_KMALLOC:
- kfree(buf.b);
- break;
- case BB_MEMPOOL:
- mempool_free(buf.b, &c->compression_bounce[buf.rw]);
- break;
- }
-}
-
-static inline void zlib_set_workspace(z_stream *strm, void *workspace)
-{
-#ifdef __KERNEL__
- strm->workspace = workspace;
-#endif
-}
-
-static int __bio_uncompress(struct bch_fs *c, struct bio *src,
- void *dst_data, struct bch_extent_crc_unpacked crc)
-{
- struct bbuf src_data = { NULL };
- size_t src_len = src->bi_iter.bi_size;
- size_t dst_len = crc.uncompressed_size << 9;
- void *workspace;
- int ret;
-
- src_data = bio_map_or_bounce(c, src, READ);
-
- switch (crc.compression_type) {
- case BCH_COMPRESSION_TYPE_lz4_old:
- case BCH_COMPRESSION_TYPE_lz4:
- ret = LZ4_decompress_safe_partial(src_data.b, dst_data,
- src_len, dst_len, dst_len);
- if (ret != dst_len)
- goto err;
- break;
- case BCH_COMPRESSION_TYPE_gzip: {
- z_stream strm = {
- .next_in = src_data.b,
- .avail_in = src_len,
- .next_out = dst_data,
- .avail_out = dst_len,
- };
-
- workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
-
- zlib_set_workspace(&strm, workspace);
- zlib_inflateInit2(&strm, -MAX_WBITS);
- ret = zlib_inflate(&strm, Z_FINISH);
-
- mempool_free(workspace, &c->decompress_workspace);
-
- if (ret != Z_STREAM_END)
- goto err;
- break;
- }
- case BCH_COMPRESSION_TYPE_zstd: {
- ZSTD_DCtx *ctx;
- size_t real_src_len = le32_to_cpup(src_data.b);
-
- if (real_src_len > src_len - 4)
- goto err;
-
- workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
- ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
-
- ret = zstd_decompress_dctx(ctx,
- dst_data, dst_len,
- src_data.b + 4, real_src_len);
-
- mempool_free(workspace, &c->decompress_workspace);
-
- if (ret != dst_len)
- goto err;
- break;
- }
- default:
- BUG();
- }
- ret = 0;
-out:
- bio_unmap_or_unbounce(c, src_data);
- return ret;
-err:
- ret = -EIO;
- goto out;
-}
-
-int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
- struct bch_extent_crc_unpacked *crc)
-{
- struct bbuf data = { NULL };
- size_t dst_len = crc->uncompressed_size << 9;
-
- /* bio must own its pages: */
- BUG_ON(!bio->bi_vcnt);
- BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs);
-
- if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max ||
- crc->compressed_size << 9 > c->opts.encoded_extent_max) {
- bch_err(c, "error rewriting existing data: extent too big");
- return -EIO;
- }
-
- data = __bounce_alloc(c, dst_len, WRITE);
-
- if (__bio_uncompress(c, bio, data.b, *crc)) {
- if (!c->opts.no_data_io)
- bch_err(c, "error rewriting existing data: decompression error");
- bio_unmap_or_unbounce(c, data);
- return -EIO;
- }
-
- /*
- * XXX: don't have a good way to assert that the bio was allocated with
- * enough space, we depend on bch2_move_extent doing the right thing
- */
- bio->bi_iter.bi_size = crc->live_size << 9;
-
- memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));
-
- crc->csum_type = 0;
- crc->compression_type = 0;
- crc->compressed_size = crc->live_size;
- crc->uncompressed_size = crc->live_size;
- crc->offset = 0;
- crc->csum = (struct bch_csum) { 0, 0 };
-
- bio_unmap_or_unbounce(c, data);
- return 0;
-}
-
-int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
- struct bio *dst, struct bvec_iter dst_iter,
- struct bch_extent_crc_unpacked crc)
-{
- struct bbuf dst_data = { NULL };
- size_t dst_len = crc.uncompressed_size << 9;
- int ret;
-
- if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max ||
- crc.compressed_size << 9 > c->opts.encoded_extent_max)
- return -EIO;
-
- dst_data = dst_len == dst_iter.bi_size
- ? __bio_map_or_bounce(c, dst, dst_iter, WRITE)
- : __bounce_alloc(c, dst_len, WRITE);
-
- ret = __bio_uncompress(c, src, dst_data.b, crc);
- if (ret)
- goto err;
-
- if (dst_data.type != BB_NONE &&
- dst_data.type != BB_VMAP)
- memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9));
-err:
- bio_unmap_or_unbounce(c, dst_data);
- return ret;
-}
-
-static int attempt_compress(struct bch_fs *c,
- void *workspace,
- void *dst, size_t dst_len,
- void *src, size_t src_len,
- struct bch_compression_opt compression)
-{
- enum bch_compression_type compression_type =
- __bch2_compression_opt_to_type[compression.type];
-
- switch (compression_type) {
- case BCH_COMPRESSION_TYPE_lz4:
- if (compression.level < LZ4HC_MIN_CLEVEL) {
- int len = src_len;
- int ret = LZ4_compress_destSize(
- src, dst,
- &len, dst_len,
- workspace);
- if (len < src_len)
- return -len;
-
- return ret;
- } else {
- int ret = LZ4_compress_HC(
- src, dst,
- src_len, dst_len,
- compression.level,
- workspace);
-
- return ret ?: -1;
- }
- case BCH_COMPRESSION_TYPE_gzip: {
- z_stream strm = {
- .next_in = src,
- .avail_in = src_len,
- .next_out = dst,
- .avail_out = dst_len,
- };
-
- zlib_set_workspace(&strm, workspace);
- zlib_deflateInit2(&strm,
- compression.level
- ? clamp_t(unsigned, compression.level,
- Z_BEST_SPEED, Z_BEST_COMPRESSION)
- : Z_DEFAULT_COMPRESSION,
- Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY);
-
- if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END)
- return 0;
-
- if (zlib_deflateEnd(&strm) != Z_OK)
- return 0;
-
- return strm.total_out;
- }
- case BCH_COMPRESSION_TYPE_zstd: {
- /*
- * rescale:
- * zstd max compression level is 22, our max level is 15
- */
- unsigned level = min((compression.level * 3) / 2, zstd_max_clevel());
- ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max);
- ZSTD_CCtx *ctx = zstd_init_cctx(workspace, c->zstd_workspace_size);
-
- /*
- * ZSTD requires that when we decompress we pass in the exact
- * compressed size - rounding it up to the nearest sector
- * doesn't work, so we use the first 4 bytes of the buffer for
- * that.
- *
- * Additionally, the ZSTD code seems to have a bug where it will
- * write just past the end of the buffer - so subtract a fudge
- * factor (7 bytes) from the dst buffer size to account for
- * that.
- */
- size_t len = zstd_compress_cctx(ctx,
- dst + 4, dst_len - 4 - 7,
- src, src_len,
- &params);
- if (zstd_is_error(len))
- return 0;
-
- *((__le32 *) dst) = cpu_to_le32(len);
- return len + 4;
- }
- default:
- BUG();
- }
-}
-
-static unsigned __bio_compress(struct bch_fs *c,
- struct bio *dst, size_t *dst_len,
- struct bio *src, size_t *src_len,
- struct bch_compression_opt compression)
-{
- struct bbuf src_data = { NULL }, dst_data = { NULL };
- void *workspace;
- enum bch_compression_type compression_type =
- __bch2_compression_opt_to_type[compression.type];
- unsigned pad;
- int ret = 0;
-
- BUG_ON(compression_type >= BCH_COMPRESSION_TYPE_NR);
- BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type]));
-
- /* If it's only one block, don't bother trying to compress: */
- if (src->bi_iter.bi_size <= c->opts.block_size)
- return BCH_COMPRESSION_TYPE_incompressible;
-
- dst_data = bio_map_or_bounce(c, dst, WRITE);
- src_data = bio_map_or_bounce(c, src, READ);
-
- workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS);
-
- *src_len = src->bi_iter.bi_size;
- *dst_len = dst->bi_iter.bi_size;
-
- /*
- * XXX: this algorithm sucks when the compression code doesn't tell us
- * how much would fit, like LZ4 does:
- */
- while (1) {
- if (*src_len <= block_bytes(c)) {
- ret = -1;
- break;
- }
-
- ret = attempt_compress(c, workspace,
- dst_data.b, *dst_len,
- src_data.b, *src_len,
- compression);
- if (ret > 0) {
- *dst_len = ret;
- ret = 0;
- break;
- }
-
- /* Didn't fit: should we retry with a smaller amount? */
- if (*src_len <= *dst_len) {
- ret = -1;
- break;
- }
-
- /*
- * If ret is negative, it's a hint as to how much data would fit
- */
- BUG_ON(-ret >= *src_len);
-
- if (ret < 0)
- *src_len = -ret;
- else
- *src_len -= (*src_len - *dst_len) / 2;
- *src_len = round_down(*src_len, block_bytes(c));
- }
-
- mempool_free(workspace, &c->compress_workspace[compression_type]);
-
- if (ret)
- goto err;
-
- /* Didn't get smaller: */
- if (round_up(*dst_len, block_bytes(c)) >= *src_len)
- goto err;
-
- pad = round_up(*dst_len, block_bytes(c)) - *dst_len;
-
- memset(dst_data.b + *dst_len, 0, pad);
- *dst_len += pad;
-
- if (dst_data.type != BB_NONE &&
- dst_data.type != BB_VMAP)
- memcpy_to_bio(dst, dst->bi_iter, dst_data.b);
-
- BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size);
- BUG_ON(!*src_len || *src_len > src->bi_iter.bi_size);
- BUG_ON(*dst_len & (block_bytes(c) - 1));
- BUG_ON(*src_len & (block_bytes(c) - 1));
- ret = compression_type;
-out:
- bio_unmap_or_unbounce(c, src_data);
- bio_unmap_or_unbounce(c, dst_data);
- return ret;
-err:
- ret = BCH_COMPRESSION_TYPE_incompressible;
- goto out;
-}
-
-unsigned bch2_bio_compress(struct bch_fs *c,
- struct bio *dst, size_t *dst_len,
- struct bio *src, size_t *src_len,
- unsigned compression_opt)
-{
- unsigned orig_dst = dst->bi_iter.bi_size;
- unsigned orig_src = src->bi_iter.bi_size;
- unsigned compression_type;
-
- /* Don't consume more than BCH_ENCODED_EXTENT_MAX from @src: */
- src->bi_iter.bi_size = min_t(unsigned, src->bi_iter.bi_size,
- c->opts.encoded_extent_max);
- /* Don't generate a bigger output than input: */
- dst->bi_iter.bi_size = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
-
- compression_type =
- __bio_compress(c, dst, dst_len, src, src_len,
- bch2_compression_decode(compression_opt));
-
- dst->bi_iter.bi_size = orig_dst;
- src->bi_iter.bi_size = orig_src;
- return compression_type;
-}
-
-static int __bch2_fs_compress_init(struct bch_fs *, u64);
-
-#define BCH_FEATURE_none 0
-
-static const unsigned bch2_compression_opt_to_feature[] = {
-#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_FEATURE_##t,
- BCH_COMPRESSION_OPTS()
-#undef x
-};
-
-#undef BCH_FEATURE_none
-
-static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f)
-{
- int ret = 0;
-
- if ((c->sb.features & f) == f)
- return 0;
-
- mutex_lock(&c->sb_lock);
-
- if ((c->sb.features & f) == f) {
- mutex_unlock(&c->sb_lock);
- return 0;
- }
-
- ret = __bch2_fs_compress_init(c, c->sb.features|f);
- if (ret) {
- mutex_unlock(&c->sb_lock);
- return ret;
- }
-
- c->disk_sb.sb->features[0] |= cpu_to_le64(f);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- return 0;
-}
-
-int bch2_check_set_has_compressed_data(struct bch_fs *c,
- unsigned compression_opt)
-{
- unsigned compression_type = bch2_compression_decode(compression_opt).type;
-
- BUG_ON(compression_type >= ARRAY_SIZE(bch2_compression_opt_to_feature));
-
- return compression_type
- ? __bch2_check_set_has_compressed_data(c,
- 1ULL << bch2_compression_opt_to_feature[compression_type])
- : 0;
-}
-
-void bch2_fs_compress_exit(struct bch_fs *c)
-{
- unsigned i;
-
- mempool_exit(&c->decompress_workspace);
- for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++)
- mempool_exit(&c->compress_workspace[i]);
- mempool_exit(&c->compression_bounce[WRITE]);
- mempool_exit(&c->compression_bounce[READ]);
-}
-
-static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
-{
- size_t decompress_workspace_size = 0;
- ZSTD_parameters params = zstd_get_params(zstd_max_clevel(),
- c->opts.encoded_extent_max);
-
- c->zstd_workspace_size = zstd_cctx_workspace_bound(&params.cParams);
-
- struct {
- unsigned feature;
- enum bch_compression_type type;
- size_t compress_workspace;
- size_t decompress_workspace;
- } compression_types[] = {
- { BCH_FEATURE_lz4, BCH_COMPRESSION_TYPE_lz4,
- max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS),
- 0 },
- { BCH_FEATURE_gzip, BCH_COMPRESSION_TYPE_gzip,
- zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL),
- zlib_inflate_workspacesize(), },
- { BCH_FEATURE_zstd, BCH_COMPRESSION_TYPE_zstd,
- c->zstd_workspace_size,
- zstd_dctx_workspace_bound() },
- }, *i;
- bool have_compressed = false;
-
- for (i = compression_types;
- i < compression_types + ARRAY_SIZE(compression_types);
- i++)
- have_compressed |= (features & (1 << i->feature)) != 0;
-
- if (!have_compressed)
- return 0;
-
- if (!mempool_initialized(&c->compression_bounce[READ]) &&
- mempool_init_kvmalloc_pool(&c->compression_bounce[READ],
- 1, c->opts.encoded_extent_max))
- return -BCH_ERR_ENOMEM_compression_bounce_read_init;
-
- if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
- mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE],
- 1, c->opts.encoded_extent_max))
- return -BCH_ERR_ENOMEM_compression_bounce_write_init;
-
- for (i = compression_types;
- i < compression_types + ARRAY_SIZE(compression_types);
- i++) {
- decompress_workspace_size =
- max(decompress_workspace_size, i->decompress_workspace);
-
- if (!(features & (1 << i->feature)))
- continue;
-
- if (mempool_initialized(&c->compress_workspace[i->type]))
- continue;
-
- if (mempool_init_kvmalloc_pool(
- &c->compress_workspace[i->type],
- 1, i->compress_workspace))
- return -BCH_ERR_ENOMEM_compression_workspace_init;
- }
-
- if (!mempool_initialized(&c->decompress_workspace) &&
- mempool_init_kvmalloc_pool(&c->decompress_workspace,
- 1, decompress_workspace_size))
- return -BCH_ERR_ENOMEM_decompression_workspace_init;
-
- return 0;
-}
-
-static u64 compression_opt_to_feature(unsigned v)
-{
- unsigned type = bch2_compression_decode(v).type;
-
- return BIT_ULL(bch2_compression_opt_to_feature[type]);
-}
-
-int bch2_fs_compress_init(struct bch_fs *c)
-{
- u64 f = c->sb.features;
-
- f |= compression_opt_to_feature(c->opts.compression);
- f |= compression_opt_to_feature(c->opts.background_compression);
-
- return __bch2_fs_compress_init(c, f);
-}
-
-int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res,
- struct printbuf *err)
-{
- char *val = kstrdup(_val, GFP_KERNEL);
- char *p = val, *type_str, *level_str;
- struct bch_compression_opt opt = { 0 };
- int ret;
-
- if (!val)
- return -ENOMEM;
-
- type_str = strsep(&p, ":");
- level_str = p;
-
- ret = match_string(bch2_compression_opts, -1, type_str);
- if (ret < 0 && err)
- prt_str(err, "invalid compression type");
- if (ret < 0)
- goto err;
-
- opt.type = ret;
-
- if (level_str) {
- unsigned level;
-
- ret = kstrtouint(level_str, 10, &level);
- if (!ret && !opt.type && level)
- ret = -EINVAL;
- if (!ret && level > 15)
- ret = -EINVAL;
- if (ret < 0 && err)
- prt_str(err, "invalid compression level");
- if (ret < 0)
- goto err;
-
- opt.level = level;
- }
-
- *res = bch2_compression_encode(opt);
-err:
- kfree(val);
- return ret;
-}
-
-void bch2_compression_opt_to_text(struct printbuf *out, u64 v)
-{
- struct bch_compression_opt opt = bch2_compression_decode(v);
-
- if (opt.type < BCH_COMPRESSION_OPT_NR)
- prt_str(out, bch2_compression_opts[opt.type]);
- else
- prt_printf(out, "(unknown compression opt %u)", opt.type);
- if (opt.level)
- prt_printf(out, ":%u", opt.level);
-}
-
-void bch2_opt_compression_to_text(struct printbuf *out,
- struct bch_fs *c,
- struct bch_sb *sb,
- u64 v)
-{
- return bch2_compression_opt_to_text(out, v);
-}
-
-int bch2_opt_compression_validate(u64 v, struct printbuf *err)
-{
- if (!bch2_compression_opt_valid(v)) {
- prt_printf(err, "invalid compression opt %llu", v);
- return -BCH_ERR_invalid_sb_opt_compression;
- }
-
- return 0;
-}
diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h
deleted file mode 100644
index 607fd5e232c9..000000000000
--- a/fs/bcachefs/compress.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_COMPRESS_H
-#define _BCACHEFS_COMPRESS_H
-
-#include "extents_types.h"
-
-static const unsigned __bch2_compression_opt_to_type[] = {
-#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_COMPRESSION_TYPE_##t,
- BCH_COMPRESSION_OPTS()
-#undef x
-};
-
-struct bch_compression_opt {
- u8 type:4,
- level:4;
-};
-
-static inline struct bch_compression_opt __bch2_compression_decode(unsigned v)
-{
- return (struct bch_compression_opt) {
- .type = v & 15,
- .level = v >> 4,
- };
-}
-
-static inline bool bch2_compression_opt_valid(unsigned v)
-{
- struct bch_compression_opt opt = __bch2_compression_decode(v);
-
- return opt.type < ARRAY_SIZE(__bch2_compression_opt_to_type) && !(!opt.type && opt.level);
-}
-
-static inline struct bch_compression_opt bch2_compression_decode(unsigned v)
-{
- return bch2_compression_opt_valid(v)
- ? __bch2_compression_decode(v)
- : (struct bch_compression_opt) { 0 };
-}
-
-static inline unsigned bch2_compression_encode(struct bch_compression_opt opt)
-{
- return opt.type|(opt.level << 4);
-}
-
-static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
-{
- return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
-}
-
-int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
- struct bch_extent_crc_unpacked *);
-int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
- struct bvec_iter, struct bch_extent_crc_unpacked);
-unsigned bch2_bio_compress(struct bch_fs *, struct bio *, size_t *,
- struct bio *, size_t *, unsigned);
-
-int bch2_check_set_has_compressed_data(struct bch_fs *, unsigned);
-void bch2_fs_compress_exit(struct bch_fs *);
-int bch2_fs_compress_init(struct bch_fs *);
-
-void bch2_compression_opt_to_text(struct printbuf *, u64);
-
-int bch2_opt_compression_parse(struct bch_fs *, const char *, u64 *, struct printbuf *);
-void bch2_opt_compression_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
-int bch2_opt_compression_validate(u64, struct printbuf *);
-
-#define bch2_opt_compression (struct bch_opt_fn) { \
- .parse = bch2_opt_compression_parse, \
- .to_text = bch2_opt_compression_to_text, \
- .validate = bch2_opt_compression_validate, \
-}
-
-#endif /* _BCACHEFS_COMPRESS_H */
diff --git a/fs/bcachefs/darray.c b/fs/bcachefs/darray.c
deleted file mode 100644
index e86d36d23e9e..000000000000
--- a/fs/bcachefs/darray.c
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/log2.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "darray.h"
-
-int __bch2_darray_resize_noprof(darray_char *d, size_t element_size, size_t new_size, gfp_t gfp)
-{
- if (new_size > d->size) {
- new_size = roundup_pow_of_two(new_size);
-
- /*
- * This is a workaround: kvmalloc() doesn't support > INT_MAX
- * allocations, but vmalloc() does.
- * The limit needs to be lifted from kvmalloc, and when it does
- * we'll go back to just using that.
- */
- size_t bytes;
- if (unlikely(check_mul_overflow(new_size, element_size, &bytes)))
- return -ENOMEM;
-
- void *data = likely(bytes < INT_MAX)
- ? kvmalloc_noprof(bytes, gfp)
- : vmalloc_noprof(bytes);
- if (!data)
- return -ENOMEM;
-
- if (d->size)
- memcpy(data, d->data, d->size * element_size);
- if (d->data != d->preallocated)
- kvfree(d->data);
- d->data = data;
- d->size = new_size;
- }
-
- return 0;
-}
diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h
deleted file mode 100644
index 8f4c3f0665c4..000000000000
--- a/fs/bcachefs/darray.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DARRAY_H
-#define _BCACHEFS_DARRAY_H
-
-/*
- * Dynamic arrays:
- *
- * Inspired by CCAN's darray
- */
-
-#include <linux/slab.h>
-
-#define DARRAY_PREALLOCATED(_type, _nr) \
-struct { \
- size_t nr, size; \
- _type *data; \
- _type preallocated[_nr]; \
-}
-
-#define DARRAY(_type) DARRAY_PREALLOCATED(_type, 0)
-
-typedef DARRAY(char) darray_char;
-typedef DARRAY(char *) darray_str;
-
-int __bch2_darray_resize_noprof(darray_char *, size_t, size_t, gfp_t);
-
-#define __bch2_darray_resize(...) alloc_hooks(__bch2_darray_resize_noprof(__VA_ARGS__))
-
-#define __darray_resize(_d, _element_size, _new_size, _gfp) \
- (unlikely((_new_size) > (_d)->size) \
- ? __bch2_darray_resize((_d), (_element_size), (_new_size), (_gfp))\
- : 0)
-
-#define darray_resize_gfp(_d, _new_size, _gfp) \
- __darray_resize((darray_char *) (_d), sizeof((_d)->data[0]), (_new_size), _gfp)
-
-#define darray_resize(_d, _new_size) \
- darray_resize_gfp(_d, _new_size, GFP_KERNEL)
-
-#define darray_make_room_gfp(_d, _more, _gfp) \
- darray_resize_gfp((_d), (_d)->nr + (_more), _gfp)
-
-#define darray_make_room(_d, _more) \
- darray_make_room_gfp(_d, _more, GFP_KERNEL)
-
-#define darray_room(_d) ((_d).size - (_d).nr)
-
-#define darray_top(_d) ((_d).data[(_d).nr])
-
-#define darray_push_gfp(_d, _item, _gfp) \
-({ \
- int _ret = darray_make_room_gfp((_d), 1, _gfp); \
- \
- if (!_ret) \
- (_d)->data[(_d)->nr++] = (_item); \
- _ret; \
-})
-
-#define darray_push(_d, _item) darray_push_gfp(_d, _item, GFP_KERNEL)
-
-#define darray_pop(_d) ((_d)->data[--(_d)->nr])
-
-#define darray_first(_d) ((_d).data[0])
-#define darray_last(_d) ((_d).data[(_d).nr - 1])
-
-#define darray_insert_item(_d, pos, _item) \
-({ \
- size_t _pos = (pos); \
- int _ret = darray_make_room((_d), 1); \
- \
- if (!_ret) \
- array_insert_item((_d)->data, (_d)->nr, _pos, (_item)); \
- _ret; \
-})
-
-#define darray_remove_item(_d, _pos) \
- array_remove_item((_d)->data, (_d)->nr, (_pos) - (_d)->data)
-
-#define __darray_for_each(_d, _i) \
- for ((_i) = (_d).data; _i < (_d).data + (_d).nr; _i++)
-
-#define darray_for_each(_d, _i) \
- for (typeof(&(_d).data[0]) _i = (_d).data; _i < (_d).data + (_d).nr; _i++)
-
-#define darray_for_each_reverse(_d, _i) \
- for (typeof(&(_d).data[0]) _i = (_d).data + (_d).nr - 1; _i >= (_d).data; --_i)
-
-#define darray_init(_d) \
-do { \
- (_d)->nr = 0; \
- (_d)->size = ARRAY_SIZE((_d)->preallocated); \
- (_d)->data = (_d)->size ? (_d)->preallocated : NULL; \
-} while (0)
-
-#define darray_exit(_d) \
-do { \
- if (!ARRAY_SIZE((_d)->preallocated) || \
- (_d)->data != (_d)->preallocated) \
- kvfree((_d)->data); \
- darray_init(_d); \
-} while (0)
-
-#endif /* _BCACHEFS_DARRAY_H */
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
deleted file mode 100644
index 8e75a852b358..000000000000
--- a/fs/bcachefs/data_update.c
+++ /dev/null
@@ -1,763 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "bkey_buf.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "compress.h"
-#include "data_update.h"
-#include "disk_groups.h"
-#include "ec.h"
-#include "error.h"
-#include "extents.h"
-#include "io_write.h"
-#include "keylist.h"
-#include "move.h"
-#include "nocow_locking.h"
-#include "rebalance.h"
-#include "snapshot.h"
-#include "subvolume.h"
-#include "trace.h"
-
-static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(ptrs, ptr)
- bch2_dev_put(bch2_dev_have_ref(c, ptr->dev));
-}
-
-static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(ptrs, ptr) {
- if (!bch2_dev_tryget(c, ptr->dev)) {
- bkey_for_each_ptr(ptrs, ptr2) {
- if (ptr2 == ptr)
- break;
- bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
- }
- return false;
- }
- }
- return true;
-}
-
-static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
- struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
-
- bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
- }
-}
-
-static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
- struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
-
- if (ctxt) {
- bool locked;
-
- move_ctxt_wait_event(ctxt,
- (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) ||
- list_empty(&ctxt->ios));
-
- if (!locked)
- bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
- } else {
- if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
- bkey_for_each_ptr(ptrs, ptr2) {
- if (ptr2 == ptr)
- break;
-
- ca = bch2_dev_have_ref(c, ptr2->dev);
- bucket = PTR_BUCKET_POS(ca, ptr2);
- bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
- }
- return false;
- }
- }
- }
- return true;
-}
-
-static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
-{
- if (trace_move_extent_finish_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- trace_move_extent_finish(c, buf.buf);
- printbuf_exit(&buf);
- }
-}
-
-static void trace_move_extent_fail2(struct data_update *m,
- struct bkey_s_c new,
- struct bkey_s_c wrote,
- struct bkey_i *insert,
- const char *msg)
-{
- struct bch_fs *c = m->op.c;
- struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
- const union bch_extent_entry *entry;
- struct bch_extent_ptr *ptr;
- struct extent_ptr_decoded p;
- struct printbuf buf = PRINTBUF;
- unsigned i, rewrites_found = 0;
-
- if (!trace_move_extent_fail_enabled())
- return;
-
- prt_str(&buf, msg);
-
- if (insert) {
- i = 0;
- bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
- if (((1U << i) & m->data_opts.rewrite_ptrs) &&
- (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
- !ptr->cached)
- rewrites_found |= 1U << i;
- i++;
- }
- }
-
- prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
- (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
- (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
- (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
- (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
-
- prt_printf(&buf, "\nrewrites found: %u%u%u%u",
- (rewrites_found & (1 << 0)) != 0,
- (rewrites_found & (1 << 1)) != 0,
- (rewrites_found & (1 << 2)) != 0,
- (rewrites_found & (1 << 3)) != 0);
-
- prt_str(&buf, "\nold: ");
- bch2_bkey_val_to_text(&buf, c, old);
-
- prt_str(&buf, "\nnew: ");
- bch2_bkey_val_to_text(&buf, c, new);
-
- prt_str(&buf, "\nwrote: ");
- bch2_bkey_val_to_text(&buf, c, wrote);
-
- if (insert) {
- prt_str(&buf, "\ninsert: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
- }
-
- trace_move_extent_fail(c, buf.buf);
- printbuf_exit(&buf);
-}
-
-static int __bch2_data_update_index_update(struct btree_trans *trans,
- struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct btree_iter iter;
- struct data_update *m =
- container_of(op, struct data_update, op);
- struct keylist *keys = &op->insert_keys;
- struct bkey_buf _new, _insert;
- int ret = 0;
-
- bch2_bkey_buf_init(&_new);
- bch2_bkey_buf_init(&_insert);
- bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
-
- bch2_trans_iter_init(trans, &iter, m->btree_id,
- bkey_start_pos(&bch2_keylist_front(keys)->k),
- BTREE_ITER_slots|BTREE_ITER_intent);
-
- while (1) {
- struct bkey_s_c k;
- struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
- struct bkey_i *insert = NULL;
- struct bkey_i_extent *new;
- const union bch_extent_entry *entry_c;
- union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- struct bch_extent_ptr *ptr;
- const struct bch_extent_ptr *ptr_c;
- struct bpos next_pos;
- bool should_check_enospc;
- s64 i_sectors_delta = 0, disk_sectors_delta = 0;
- unsigned rewrites_found = 0, durability, i;
-
- bch2_trans_begin(trans);
-
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- new = bkey_i_to_extent(bch2_keylist_front(keys));
-
- if (!bch2_extents_match(k, old)) {
- trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
- NULL, "no match:");
- goto nowork;
- }
-
- bkey_reassemble(_insert.k, k);
- insert = _insert.k;
-
- bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
- new = bkey_i_to_extent(_new.k);
- bch2_cut_front(iter.pos, &new->k_i);
-
- bch2_cut_front(iter.pos, insert);
- bch2_cut_back(new->k.p, insert);
- bch2_cut_back(insert->k.p, &new->k_i);
-
- /*
- * @old: extent that we read from
- * @insert: key that we're going to update, initialized from
- * extent currently in btree - same as @old unless we raced with
- * other updates
- * @new: extent with new pointers that we'll be adding to @insert
- *
- * Fist, drop rewrite_ptrs from @new:
- */
- i = 0;
- bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
- if (((1U << i) & m->data_opts.rewrite_ptrs) &&
- (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
- !ptr->cached) {
- bch2_extent_ptr_set_cached(c, &m->op.opts,
- bkey_i_to_s(insert), ptr);
- rewrites_found |= 1U << i;
- }
- i++;
- }
-
- if (m->data_opts.rewrite_ptrs &&
- !rewrites_found &&
- bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
- trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
- goto nowork;
- }
-
- /*
- * A replica that we just wrote might conflict with a replica
- * that we want to keep, due to racing with another move:
- */
-restart_drop_conflicting_replicas:
- extent_for_each_ptr(extent_i_to_s(new), ptr)
- if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
- !ptr_c->cached) {
- bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
- goto restart_drop_conflicting_replicas;
- }
-
- if (!bkey_val_u64s(&new->k)) {
- trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
- goto nowork;
- }
-
- /* Now, drop pointers that conflict with what we just wrote: */
- extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
- if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
- bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
-
- durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
- bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
-
- /* Now, drop excess replicas: */
- rcu_read_lock();
-restart_drop_extra_replicas:
- bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
- unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
-
- if (!p.ptr.cached &&
- durability - ptr_durability >= m->op.opts.data_replicas) {
- durability -= ptr_durability;
-
- bch2_extent_ptr_set_cached(c, &m->op.opts,
- bkey_i_to_s(insert), &entry->ptr);
- goto restart_drop_extra_replicas;
- }
- }
- rcu_read_unlock();
-
- /* Finally, add the pointers we just wrote: */
- extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
- bch2_extent_ptr_decoded_append(insert, &p);
-
- bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
- bch2_extent_normalize_by_opts(c, &m->op.opts, bkey_i_to_s(insert));
-
- ret = bch2_sum_sector_overwrites(trans, &iter, insert,
- &should_check_enospc,
- &i_sectors_delta,
- &disk_sectors_delta);
- if (ret)
- goto err;
-
- if (disk_sectors_delta > (s64) op->res.sectors) {
- ret = bch2_disk_reservation_add(c, &op->res,
- disk_sectors_delta - op->res.sectors,
- !should_check_enospc
- ? BCH_DISK_RESERVATION_NOFAIL : 0);
- if (ret)
- goto out;
- }
-
- next_pos = insert->k.p;
-
- /*
- * Check for nonce offset inconsistency:
- * This is debug code - we've been seeing this bug rarely, and
- * it's been hard to reproduce, so this should give us some more
- * information when it does occur:
- */
- int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id),
- BCH_VALIDATE_commit);
- if (invalid) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "about to insert invalid key in data update path");
- prt_str(&buf, "\nold: ");
- bch2_bkey_val_to_text(&buf, c, old);
- prt_str(&buf, "\nk: ");
- bch2_bkey_val_to_text(&buf, c, k);
- prt_str(&buf, "\nnew: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
-
- bch2_print_string_as_lines(KERN_ERR, buf.buf);
- printbuf_exit(&buf);
-
- bch2_fatal_error(c);
- ret = -EIO;
- goto out;
- }
-
- if (trace_data_update_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "\nold: ");
- bch2_bkey_val_to_text(&buf, c, old);
- prt_str(&buf, "\nk: ");
- bch2_bkey_val_to_text(&buf, c, k);
- prt_str(&buf, "\nnew: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
-
- trace_data_update(c, buf.buf);
- printbuf_exit(&buf);
- }
-
- ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
- k.k->p, bkey_start_pos(&insert->k)) ?:
- bch2_insert_snapshot_whiteouts(trans, m->btree_id,
- k.k->p, insert->k.p) ?:
- bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
- bch2_trans_update(trans, &iter, insert,
- BTREE_UPDATE_internal_snapshot_node) ?:
- bch2_trans_commit(trans, &op->res,
- NULL,
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_no_enospc|
- m->data_opts.btree_insert_flags);
- if (!ret) {
- bch2_btree_iter_set_pos(&iter, next_pos);
-
- this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
- trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
- }
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- ret = 0;
- if (ret)
- break;
-next:
- while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
- bch2_keylist_pop_front(keys);
- if (bch2_keylist_empty(keys))
- goto out;
- }
- continue;
-nowork:
- if (m->stats) {
- BUG_ON(k.k->p.offset <= iter.pos.offset);
- atomic64_inc(&m->stats->keys_raced);
- atomic64_add(k.k->p.offset - iter.pos.offset,
- &m->stats->sectors_raced);
- }
-
- count_event(c, move_extent_fail);
-
- bch2_btree_iter_advance(&iter);
- goto next;
- }
-out:
- bch2_trans_iter_exit(trans, &iter);
- bch2_bkey_buf_exit(&_insert, c);
- bch2_bkey_buf_exit(&_new, c);
- BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
- return ret;
-}
-
-int bch2_data_update_index_update(struct bch_write_op *op)
-{
- return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
-}
-
-void bch2_data_update_read_done(struct data_update *m,
- struct bch_extent_crc_unpacked crc)
-{
- /* write bio must own pages: */
- BUG_ON(!m->op.wbio.bio.bi_vcnt);
-
- m->op.crc = crc;
- m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
-
- closure_call(&m->op.cl, bch2_write, NULL, NULL);
-}
-
-void bch2_data_update_exit(struct data_update *update)
-{
- struct bch_fs *c = update->op.c;
- struct bkey_s_c k = bkey_i_to_s_c(update->k.k);
-
- if (c->opts.nocow_enabled)
- bkey_nocow_unlock(c, k);
- bkey_put_dev_refs(c, k);
- bch2_bkey_buf_exit(&update->k, c);
- bch2_disk_reservation_put(c, &update->op.res);
- bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
-}
-
-static void bch2_update_unwritten_extent(struct btree_trans *trans,
- struct data_update *update)
-{
- struct bch_fs *c = update->op.c;
- struct bio *bio = &update->op.wbio.bio;
- struct bkey_i_extent *e;
- struct write_point *wp;
- struct closure cl;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- closure_init_stack(&cl);
- bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
-
- while (bio_sectors(bio)) {
- unsigned sectors = bio_sectors(bio);
-
- bch2_trans_begin(trans);
-
- bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
- BTREE_ITER_slots);
- ret = lockrestart_do(trans, ({
- k = bch2_btree_iter_peek_slot(&iter);
- bkey_err(k);
- }));
- bch2_trans_iter_exit(trans, &iter);
-
- if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
- break;
-
- e = bkey_extent_init(update->op.insert_keys.top);
- e->k.p = update->op.pos;
-
- ret = bch2_alloc_sectors_start_trans(trans,
- update->op.target,
- false,
- update->op.write_point,
- &update->op.devs_have,
- update->op.nr_replicas,
- update->op.nr_replicas,
- update->op.watermark,
- 0, &cl, &wp);
- if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
- bch2_trans_unlock(trans);
- closure_sync(&cl);
- continue;
- }
-
- bch_err_fn_ratelimited(c, ret);
-
- if (ret)
- return;
-
- sectors = min(sectors, wp->sectors_free);
-
- bch2_key_resize(&e->k, sectors);
-
- bch2_open_bucket_get(c, wp, &update->op.open_buckets);
- bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
- bch2_alloc_sectors_done(c, wp);
-
- bio_advance(bio, sectors << 9);
- update->op.pos.offset += sectors;
-
- extent_for_each_ptr(extent_i_to_s(e), ptr)
- ptr->unwritten = true;
- bch2_keylist_push(&update->op.insert_keys);
-
- ret = __bch2_data_update_index_update(trans, &update->op);
-
- bch2_open_buckets_put(c, &update->op.open_buckets);
-
- if (ret)
- break;
- }
-
- if (closure_nr_remaining(&cl) != 1) {
- bch2_trans_unlock(trans);
- closure_sync(&cl);
- }
-}
-
-void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- printbuf_tabstop_push(out, 20);
- prt_str(out, "rewrite ptrs:\t");
- bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
- prt_newline(out);
-
- prt_str(out, "kill ptrs:\t");
- bch2_prt_u64_base2(out, data_opts->kill_ptrs);
- prt_newline(out);
-
- prt_str(out, "target:\t");
- bch2_target_to_text(out, c, data_opts->target);
- prt_newline(out);
-
- prt_str(out, "compression:\t");
- bch2_compression_opt_to_text(out, background_compression(*io_opts));
- prt_newline(out);
-
- prt_str(out, "opts.replicas:\t");
- prt_u64(out, io_opts->data_replicas);
-
- prt_str(out, "extra replicas:\t");
- prt_u64(out, data_opts->extra_replicas);
-}
-
-void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
-{
- bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
- prt_newline(out);
- bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
-}
-
-int bch2_extent_drop_ptrs(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i *n;
- int ret;
-
- n = bch2_bkey_make_mut_noupdate(trans, k);
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- return ret;
-
- while (data_opts->kill_ptrs) {
- unsigned i = 0, drop = __fls(data_opts->kill_ptrs);
-
- bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, i++ == drop);
- data_opts->kill_ptrs ^= 1U << drop;
- }
-
- /*
- * If the new extent no longer has any pointers, bch2_extent_normalize()
- * will do the appropriate thing with it (turning it into a
- * KEY_TYPE_error key, or just a discard if it was a cached extent)
- */
- bch2_extent_normalize_by_opts(c, io_opts, bkey_i_to_s(n));
-
- /*
- * Since we're not inserting through an extent iterator
- * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
- * we aren't using the extent overwrite path to delete, we're
- * just using the normal key deletion path:
- */
- if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
- n->k.size = 0;
-
- return bch2_trans_relock(trans) ?:
- bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
- bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
-}
-
-int bch2_data_update_init(struct btree_trans *trans,
- struct btree_iter *iter,
- struct moving_context *ctxt,
- struct data_update *m,
- struct write_point_specifier wp,
- struct bch_io_opts io_opts,
- struct data_update_opts data_opts,
- enum btree_id btree_id,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
- int ret = 0;
-
- /*
- * fs is corrupt we have a key for a snapshot node that doesn't exist,
- * and we have to check for this because we go rw before repairing the
- * snapshots table - just skip it, we can move it later.
- */
- if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
- return -BCH_ERR_data_update_done;
-
- if (!bkey_get_dev_refs(c, k))
- return -BCH_ERR_data_update_done;
-
- if (c->opts.nocow_enabled &&
- !bkey_nocow_lock(c, ctxt, k)) {
- bkey_put_dev_refs(c, k);
- return -BCH_ERR_nocow_lock_blocked;
- }
-
- bch2_bkey_buf_init(&m->k);
- bch2_bkey_buf_reassemble(&m->k, c, k);
- m->btree_id = btree_id;
- m->data_opts = data_opts;
- m->ctxt = ctxt;
- m->stats = ctxt ? ctxt->stats : NULL;
-
- bch2_write_op_init(&m->op, c, io_opts);
- m->op.pos = bkey_start_pos(k.k);
- m->op.version = k.k->bversion;
- m->op.target = data_opts.target;
- m->op.write_point = wp;
- m->op.nr_replicas = 0;
- m->op.flags |= BCH_WRITE_PAGES_STABLE|
- BCH_WRITE_PAGES_OWNED|
- BCH_WRITE_DATA_ENCODED|
- BCH_WRITE_MOVE|
- m->data_opts.write_flags;
- m->op.compression_opt = background_compression(io_opts);
- m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
-
- unsigned durability_have = 0, durability_removing = 0;
-
- i = 0;
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- if (!p.ptr.cached) {
- rcu_read_lock();
- if (BIT(i) & m->data_opts.rewrite_ptrs) {
- if (crc_is_compressed(p.crc))
- reserve_sectors += k.k->size;
-
- m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
- durability_removing += bch2_extent_ptr_desired_durability(c, &p);
- } else if (!(BIT(i) & m->data_opts.kill_ptrs)) {
- bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
- durability_have += bch2_extent_ptr_durability(c, &p);
- }
- rcu_read_unlock();
- }
-
- /*
- * op->csum_type is normally initialized from the fs/file's
- * current options - but if an extent is encrypted, we require
- * that it stays encrypted:
- */
- if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
- m->op.nonce = p.crc.nonce + p.crc.offset;
- m->op.csum_type = p.crc.csum_type;
- }
-
- if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
- m->op.incompressible = true;
-
- i++;
- }
-
- unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
-
- /*
- * If current extent durability is less than io_opts.data_replicas,
- * we're not trying to rereplicate the extent up to data_replicas here -
- * unless extra_replicas was specified
- *
- * Increasing replication is an explicit operation triggered by
- * rereplicate, currently, so that users don't get an unexpected -ENOSPC
- */
- m->op.nr_replicas = min(durability_removing, durability_required) +
- m->data_opts.extra_replicas;
-
- /*
- * If device(s) were set to durability=0 after data was written to them
- * we can end up with a duribilty=0 extent, and the normal algorithm
- * that tries not to increase durability doesn't work:
- */
- if (!(durability_have + durability_removing))
- m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
-
- m->op.nr_replicas_required = m->op.nr_replicas;
-
- /*
- * It might turn out that we don't need any new replicas, if the
- * replicas or durability settings have been changed since the extent
- * was written:
- */
- if (!m->op.nr_replicas) {
- m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
- m->data_opts.rewrite_ptrs = 0;
- /* if iter == NULL, it's just a promote */
- if (iter)
- ret = bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &m->data_opts);
- goto out;
- }
-
- if (reserve_sectors) {
- ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
- m->data_opts.extra_replicas
- ? 0
- : BCH_DISK_RESERVATION_NOFAIL);
- if (ret)
- goto out;
- }
-
- if (bkey_extent_is_unwritten(k)) {
- bch2_update_unwritten_extent(trans, m);
- goto out;
- }
-
- return 0;
-out:
- bch2_data_update_exit(m);
- return ret ?: -BCH_ERR_data_update_done;
-}
-
-void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- unsigned i = 0;
-
- bkey_for_each_ptr(ptrs, ptr) {
- if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
- opts->kill_ptrs |= 1U << i;
- opts->rewrite_ptrs ^= 1U << i;
- }
-
- i++;
- }
-}
diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h
deleted file mode 100644
index e4b50723428e..000000000000
--- a/fs/bcachefs/data_update.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _BCACHEFS_DATA_UPDATE_H
-#define _BCACHEFS_DATA_UPDATE_H
-
-#include "bkey_buf.h"
-#include "io_write_types.h"
-
-struct moving_context;
-
-struct data_update_opts {
- unsigned rewrite_ptrs;
- unsigned kill_ptrs;
- u16 target;
- u8 extra_replicas;
- unsigned btree_insert_flags;
- unsigned write_flags;
-};
-
-void bch2_data_update_opts_to_text(struct printbuf *, struct bch_fs *,
- struct bch_io_opts *, struct data_update_opts *);
-
-struct data_update {
- /* extent being updated: */
- enum btree_id btree_id;
- struct bkey_buf k;
- struct data_update_opts data_opts;
- struct moving_context *ctxt;
- struct bch_move_stats *stats;
- struct bch_write_op op;
-};
-
-void bch2_data_update_to_text(struct printbuf *, struct data_update *);
-
-int bch2_data_update_index_update(struct bch_write_op *);
-
-void bch2_data_update_read_done(struct data_update *,
- struct bch_extent_crc_unpacked);
-
-int bch2_extent_drop_ptrs(struct btree_trans *,
- struct btree_iter *,
- struct bkey_s_c,
- struct bch_io_opts *,
- struct data_update_opts *);
-
-void bch2_data_update_exit(struct data_update *);
-int bch2_data_update_init(struct btree_trans *, struct btree_iter *,
- struct moving_context *,
- struct data_update *,
- struct write_point_specifier,
- struct bch_io_opts, struct data_update_opts,
- enum btree_id, struct bkey_s_c);
-void bch2_data_update_opts_normalize(struct bkey_s_c, struct data_update_opts *);
-
-#endif /* _BCACHEFS_DATA_UPDATE_H */
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
deleted file mode 100644
index 45aec1afdb0e..000000000000
--- a/fs/bcachefs/debug.c
+++ /dev/null
@@ -1,951 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Assorted bcachefs debug code
- *
- * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
- * Copyright 2012 Google, Inc.
- */
-
-#include "bcachefs.h"
-#include "bkey_methods.h"
-#include "btree_cache.h"
-#include "btree_io.h"
-#include "btree_iter.h"
-#include "btree_locking.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "buckets.h"
-#include "debug.h"
-#include "error.h"
-#include "extents.h"
-#include "fsck.h"
-#include "inode.h"
-#include "super.h"
-
-#include <linux/console.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/seq_file.h>
-
-static struct dentry *bch_debug;
-
-static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
- struct extent_ptr_decoded pick)
-{
- struct btree *v = c->verify_data;
- struct btree_node *n_ondisk = c->verify_ondisk;
- struct btree_node *n_sorted = c->verify_data->data;
- struct bset *sorted, *inmemory = &b->data->keys;
- struct bio *bio;
- bool failed = false, saw_error = false;
-
- struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
- if (!ca)
- return false;
-
- bio = bio_alloc_bioset(ca->disk_sb.bdev,
- buf_pages(n_sorted, btree_buf_bytes(b)),
- REQ_OP_READ|REQ_META,
- GFP_NOFS,
- &c->btree_bio);
- bio->bi_iter.bi_sector = pick.ptr.offset;
- bch2_bio_map(bio, n_sorted, btree_buf_bytes(b));
-
- submit_bio_wait(bio);
-
- bio_put(bio);
- percpu_ref_put(&ca->io_ref);
-
- memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
-
- v->written = 0;
- if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error)
- return false;
-
- n_sorted = c->verify_data->data;
- sorted = &n_sorted->keys;
-
- if (inmemory->u64s != sorted->u64s ||
- memcmp(inmemory->start,
- sorted->start,
- vstruct_end(inmemory) - (void *) inmemory->start)) {
- unsigned offset = 0, sectors;
- struct bset *i;
- unsigned j;
-
- console_lock();
-
- printk(KERN_ERR "*** in memory:\n");
- bch2_dump_bset(c, b, inmemory, 0);
-
- printk(KERN_ERR "*** read back in:\n");
- bch2_dump_bset(c, v, sorted, 0);
-
- while (offset < v->written) {
- if (!offset) {
- i = &n_ondisk->keys;
- sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
- c->block_bits;
- } else {
- struct btree_node_entry *bne =
- (void *) n_ondisk + (offset << 9);
- i = &bne->keys;
-
- sectors = vstruct_blocks(bne, c->block_bits) <<
- c->block_bits;
- }
-
- printk(KERN_ERR "*** on disk block %u:\n", offset);
- bch2_dump_bset(c, b, i, offset);
-
- offset += sectors;
- }
-
- for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
- if (inmemory->_data[j] != sorted->_data[j])
- break;
-
- console_unlock();
- bch_err(c, "verify failed at key %u", j);
-
- failed = true;
- }
-
- if (v->written != b->written) {
- bch_err(c, "written wrong: expected %u, got %u",
- b->written, v->written);
- failed = true;
- }
-
- return failed;
-}
-
-void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
-{
- struct bkey_ptrs_c ptrs;
- struct extent_ptr_decoded p;
- const union bch_extent_entry *entry;
- struct btree *v;
- struct bset *inmemory = &b->data->keys;
- struct bkey_packed *k;
- bool failed = false;
-
- if (c->opts.nochanges)
- return;
-
- bch2_btree_node_io_lock(b);
- mutex_lock(&c->verify_lock);
-
- if (!c->verify_ondisk) {
- c->verify_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
- if (!c->verify_ondisk)
- goto out;
- }
-
- if (!c->verify_data) {
- c->verify_data = __bch2_btree_node_mem_alloc(c);
- if (!c->verify_data)
- goto out;
-
- list_del_init(&c->verify_data->list);
- }
-
- BUG_ON(b->nsets != 1);
-
- for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
- if (k->type == KEY_TYPE_btree_ptr_v2)
- ((struct bch_btree_ptr_v2 *) bkeyp_val(&b->format, k))->mem_ptr = 0;
-
- v = c->verify_data;
- bkey_copy(&v->key, &b->key);
- v->c.level = b->c.level;
- v->c.btree_id = b->c.btree_id;
- bch2_btree_keys_init(v);
-
- ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
- bkey_for_each_ptr_decode(&b->key.k, ptrs, p, entry)
- failed |= bch2_btree_verify_replica(c, b, p);
-
- if (failed) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- bch2_fs_fatal_error(c, ": btree node verify failed for: %s\n", buf.buf);
- printbuf_exit(&buf);
- }
-out:
- mutex_unlock(&c->verify_lock);
- bch2_btree_node_io_unlock(b);
-}
-
-void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
- const struct btree *b)
-{
- struct btree_node *n_ondisk = NULL;
- struct extent_ptr_decoded pick;
- struct bch_dev *ca;
- struct bio *bio = NULL;
- unsigned offset = 0;
- int ret;
-
- if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick) <= 0) {
- prt_printf(out, "error getting device to read from: invalid device\n");
- return;
- }
-
- ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
- if (!ca) {
- prt_printf(out, "error getting device to read from: not online\n");
- return;
- }
-
- n_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
- if (!n_ondisk) {
- prt_printf(out, "memory allocation failure\n");
- goto out;
- }
-
- bio = bio_alloc_bioset(ca->disk_sb.bdev,
- buf_pages(n_ondisk, btree_buf_bytes(b)),
- REQ_OP_READ|REQ_META,
- GFP_NOFS,
- &c->btree_bio);
- bio->bi_iter.bi_sector = pick.ptr.offset;
- bch2_bio_map(bio, n_ondisk, btree_buf_bytes(b));
-
- ret = submit_bio_wait(bio);
- if (ret) {
- prt_printf(out, "IO error reading btree node: %s\n", bch2_err_str(ret));
- goto out;
- }
-
- while (offset < btree_sectors(c)) {
- struct bset *i;
- struct nonce nonce;
- struct bch_csum csum;
- struct bkey_packed *k;
- unsigned sectors;
-
- if (!offset) {
- i = &n_ondisk->keys;
-
- if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) {
- prt_printf(out, "unknown checksum type at offset %u: %llu\n",
- offset, BSET_CSUM_TYPE(i));
- goto out;
- }
-
- nonce = btree_nonce(i, offset << 9);
- csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, n_ondisk);
-
- if (bch2_crc_cmp(csum, n_ondisk->csum)) {
- prt_printf(out, "invalid checksum\n");
- goto out;
- }
-
- bset_encrypt(c, i, offset << 9);
-
- sectors = vstruct_sectors(n_ondisk, c->block_bits);
- } else {
- struct btree_node_entry *bne = (void *) n_ondisk + (offset << 9);
-
- i = &bne->keys;
-
- if (i->seq != n_ondisk->keys.seq)
- break;
-
- if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) {
- prt_printf(out, "unknown checksum type at offset %u: %llu\n",
- offset, BSET_CSUM_TYPE(i));
- goto out;
- }
-
- nonce = btree_nonce(i, offset << 9);
- csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
-
- if (bch2_crc_cmp(csum, bne->csum)) {
- prt_printf(out, "invalid checksum");
- goto out;
- }
-
- bset_encrypt(c, i, offset << 9);
-
- sectors = vstruct_sectors(bne, c->block_bits);
- }
-
- prt_printf(out, " offset %u version %u, journal seq %llu\n",
- offset,
- le16_to_cpu(i->version),
- le64_to_cpu(i->journal_seq));
- offset += sectors;
-
- printbuf_indent_add(out, 4);
-
- for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) {
- struct bkey u;
-
- bch2_bkey_val_to_text(out, c, bkey_disassemble(b, k, &u));
- prt_newline(out);
- }
-
- printbuf_indent_sub(out, 4);
- }
-out:
- if (bio)
- bio_put(bio);
- kvfree(n_ondisk);
- percpu_ref_put(&ca->io_ref);
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-/* XXX: bch_fs refcounting */
-
-struct dump_iter {
- struct bch_fs *c;
- enum btree_id id;
- struct bpos from;
- struct bpos prev_node;
- u64 iter;
-
- struct printbuf buf;
-
- char __user *ubuf; /* destination user buffer */
- size_t size; /* size of requested read */
- ssize_t ret; /* bytes read so far */
-};
-
-static ssize_t flush_buf(struct dump_iter *i)
-{
- if (i->buf.pos) {
- size_t bytes = min_t(size_t, i->buf.pos, i->size);
- int copied = bytes - copy_to_user(i->ubuf, i->buf.buf, bytes);
-
- i->ret += copied;
- i->ubuf += copied;
- i->size -= copied;
- i->buf.pos -= copied;
- memmove(i->buf.buf, i->buf.buf + copied, i->buf.pos);
-
- if (copied != bytes)
- return -EFAULT;
- }
-
- return i->size ? 0 : i->ret;
-}
-
-static int bch2_dump_open(struct inode *inode, struct file *file)
-{
- struct btree_debug *bd = inode->i_private;
- struct dump_iter *i;
-
- i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
- if (!i)
- return -ENOMEM;
-
- file->private_data = i;
- i->from = POS_MIN;
- i->iter = 0;
- i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]);
- i->id = bd->id;
- i->buf = PRINTBUF;
-
- return 0;
-}
-
-static int bch2_dump_release(struct inode *inode, struct file *file)
-{
- struct dump_iter *i = file->private_data;
-
- printbuf_exit(&i->buf);
- kfree(i);
- return 0;
-}
-
-static ssize_t bch2_read_btree(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- return flush_buf(i) ?:
- bch2_trans_run(i->c,
- for_each_btree_key(trans, iter, i->id, i->from,
- BTREE_ITER_prefetch|
- BTREE_ITER_all_snapshots, k, ({
- bch2_bkey_val_to_text(&i->buf, i->c, k);
- prt_newline(&i->buf);
- bch2_trans_unlock(trans);
- i->from = bpos_successor(iter.pos);
- flush_buf(i);
- }))) ?:
- i->ret;
-}
-
-static const struct file_operations btree_debug_ops = {
- .owner = THIS_MODULE,
- .open = bch2_dump_open,
- .release = bch2_dump_release,
- .read = bch2_read_btree,
-};
-
-static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- ssize_t ret = flush_buf(i);
- if (ret)
- return ret;
-
- if (bpos_eq(SPOS_MAX, i->from))
- return i->ret;
-
- return bch2_trans_run(i->c,
- for_each_btree_node(trans, iter, i->id, i->from, 0, b, ({
- bch2_btree_node_to_text(&i->buf, i->c, b);
- i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
- ? bpos_successor(b->key.k.p)
- : b->key.k.p;
-
- drop_locks_do(trans, flush_buf(i));
- }))) ?: i->ret;
-}
-
-static const struct file_operations btree_format_debug_ops = {
- .owner = THIS_MODULE,
- .open = bch2_dump_open,
- .release = bch2_dump_release,
- .read = bch2_read_btree_formats,
-};
-
-static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- return flush_buf(i) ?:
- bch2_trans_run(i->c,
- for_each_btree_key(trans, iter, i->id, i->from,
- BTREE_ITER_prefetch|
- BTREE_ITER_all_snapshots, k, ({
- struct btree_path_level *l =
- &btree_iter_path(trans, &iter)->l[0];
- struct bkey_packed *_k =
- bch2_btree_node_iter_peek(&l->iter, l->b);
-
- if (bpos_gt(l->b->key.k.p, i->prev_node)) {
- bch2_btree_node_to_text(&i->buf, i->c, l->b);
- i->prev_node = l->b->key.k.p;
- }
-
- bch2_bfloat_to_text(&i->buf, l->b, _k);
- bch2_trans_unlock(trans);
- i->from = bpos_successor(iter.pos);
- flush_buf(i);
- }))) ?:
- i->ret;
-}
-
-static const struct file_operations bfloat_failed_debug_ops = {
- .owner = THIS_MODULE,
- .open = bch2_dump_open,
- .release = bch2_dump_release,
- .read = bch2_read_bfloat_failed,
-};
-
-static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
- struct btree *b)
-{
- if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 32);
-
- prt_printf(out, "%px btree=%s l=%u\n", b, bch2_btree_id_str(b->c.btree_id), b->c.level);
-
- printbuf_indent_add(out, 2);
-
- bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
- prt_newline(out);
-
- prt_printf(out, "flags:\t");
- prt_bitflags(out, bch2_btree_node_flags, b->flags);
- prt_newline(out);
-
- prt_printf(out, "pcpu read locks:\t%u\n", b->c.lock.readers != NULL);
- prt_printf(out, "written:\t%u\n", b->written);
- prt_printf(out, "writes blocked:\t%u\n", !list_empty_careful(&b->write_blocked));
- prt_printf(out, "will make reachable:\t%lx\n", b->will_make_reachable);
-
- prt_printf(out, "journal pin %px:\t%llu\n",
- &b->writes[0].journal, b->writes[0].journal.seq);
- prt_printf(out, "journal pin %px:\t%llu\n",
- &b->writes[1].journal, b->writes[1].journal.seq);
-
- printbuf_indent_sub(out, 2);
-}
-
-static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
- struct bch_fs *c = i->c;
- bool done = false;
- ssize_t ret = 0;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- do {
- struct bucket_table *tbl;
- struct rhash_head *pos;
- struct btree *b;
-
- ret = flush_buf(i);
- if (ret)
- return ret;
-
- rcu_read_lock();
- i->buf.atomic++;
- tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
- &c->btree_cache.table);
- if (i->iter < tbl->size) {
- rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
- bch2_cached_btree_node_to_text(&i->buf, c, b);
- i->iter++;
- } else {
- done = true;
- }
- --i->buf.atomic;
- rcu_read_unlock();
- } while (!done);
-
- if (i->buf.allocation_failure)
- ret = -ENOMEM;
-
- if (!ret)
- ret = flush_buf(i);
-
- return ret ?: i->ret;
-}
-
-static const struct file_operations cached_btree_nodes_ops = {
- .owner = THIS_MODULE,
- .open = bch2_dump_open,
- .release = bch2_dump_release,
- .read = bch2_cached_btree_nodes_read,
-};
-
-typedef int (*list_cmp_fn)(const struct list_head *l, const struct list_head *r);
-
-static void list_sort(struct list_head *head, list_cmp_fn cmp)
-{
- struct list_head *pos;
-
- list_for_each(pos, head)
- while (!list_is_last(pos, head) &&
- cmp(pos, pos->next) > 0) {
- struct list_head *pos2, *next = pos->next;
-
- list_del(next);
- list_for_each(pos2, head)
- if (cmp(next, pos2) < 0)
- goto pos_found;
- BUG();
-pos_found:
- list_add_tail(next, pos2);
- }
-}
-
-static int list_ptr_order_cmp(const struct list_head *l, const struct list_head *r)
-{
- return cmp_int(l, r);
-}
-
-static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
- struct bch_fs *c = i->c;
- struct btree_trans *trans;
- ssize_t ret = 0;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-restart:
- seqmutex_lock(&c->btree_trans_lock);
- list_sort(&c->btree_trans_list, list_ptr_order_cmp);
-
- list_for_each_entry(trans, &c->btree_trans_list, list) {
- if ((ulong) trans <= i->iter)
- continue;
-
- i->iter = (ulong) trans;
-
- if (!closure_get_not_zero(&trans->ref))
- continue;
-
- u32 seq = seqmutex_unlock(&c->btree_trans_lock);
-
- bch2_btree_trans_to_text(&i->buf, trans);
-
- prt_printf(&i->buf, "backtrace:\n");
- printbuf_indent_add(&i->buf, 2);
- bch2_prt_task_backtrace(&i->buf, trans->locking_wait.task, 0, GFP_KERNEL);
- printbuf_indent_sub(&i->buf, 2);
- prt_newline(&i->buf);
-
- closure_put(&trans->ref);
-
- ret = flush_buf(i);
- if (ret)
- goto unlocked;
-
- if (!seqmutex_relock(&c->btree_trans_lock, seq))
- goto restart;
- }
- seqmutex_unlock(&c->btree_trans_lock);
-unlocked:
- if (i->buf.allocation_failure)
- ret = -ENOMEM;
-
- if (!ret)
- ret = flush_buf(i);
-
- return ret ?: i->ret;
-}
-
-static const struct file_operations btree_transactions_ops = {
- .owner = THIS_MODULE,
- .open = bch2_dump_open,
- .release = bch2_dump_release,
- .read = bch2_btree_transactions_read,
-};
-
-static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
- struct bch_fs *c = i->c;
- bool done = false;
- int err;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- while (1) {
- err = flush_buf(i);
- if (err)
- return err;
-
- if (!i->size)
- break;
-
- if (done)
- break;
-
- done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter);
- i->iter++;
- }
-
- if (i->buf.allocation_failure)
- return -ENOMEM;
-
- return i->ret;
-}
-
-static const struct file_operations journal_pins_ops = {
- .owner = THIS_MODULE,
- .open = bch2_dump_open,
- .release = bch2_dump_release,
- .read = bch2_journal_pins_read,
-};
-
-static ssize_t bch2_btree_updates_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
- struct bch_fs *c = i->c;
- int err;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- if (!i->iter) {
- bch2_btree_updates_to_text(&i->buf, c);
- i->iter++;
- }
-
- err = flush_buf(i);
- if (err)
- return err;
-
- if (i->buf.allocation_failure)
- return -ENOMEM;
-
- return i->ret;
-}
-
-static const struct file_operations btree_updates_ops = {
- .owner = THIS_MODULE,
- .open = bch2_dump_open,
- .release = bch2_dump_release,
- .read = bch2_btree_updates_read,
-};
-
-static int btree_transaction_stats_open(struct inode *inode, struct file *file)
-{
- struct bch_fs *c = inode->i_private;
- struct dump_iter *i;
-
- i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
- if (!i)
- return -ENOMEM;
-
- i->iter = 1;
- i->c = c;
- i->buf = PRINTBUF;
- file->private_data = i;
-
- return 0;
-}
-
-static int btree_transaction_stats_release(struct inode *inode, struct file *file)
-{
- struct dump_iter *i = file->private_data;
-
- printbuf_exit(&i->buf);
- kfree(i);
-
- return 0;
-}
-
-static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
- struct bch_fs *c = i->c;
- int err;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- while (1) {
- struct btree_transaction_stats *s = &c->btree_transaction_stats[i->iter];
-
- err = flush_buf(i);
- if (err)
- return err;
-
- if (!i->size)
- break;
-
- if (i->iter == ARRAY_SIZE(bch2_btree_transaction_fns) ||
- !bch2_btree_transaction_fns[i->iter])
- break;
-
- prt_printf(&i->buf, "%s:\n", bch2_btree_transaction_fns[i->iter]);
- printbuf_indent_add(&i->buf, 2);
-
- mutex_lock(&s->lock);
-
- prt_printf(&i->buf, "Max mem used: %u\n", s->max_mem);
- prt_printf(&i->buf, "Transaction duration:\n");
-
- printbuf_indent_add(&i->buf, 2);
- bch2_time_stats_to_text(&i->buf, &s->duration);
- printbuf_indent_sub(&i->buf, 2);
-
- if (IS_ENABLED(CONFIG_BCACHEFS_LOCK_TIME_STATS)) {
- prt_printf(&i->buf, "Lock hold times:\n");
-
- printbuf_indent_add(&i->buf, 2);
- bch2_time_stats_to_text(&i->buf, &s->lock_hold_times);
- printbuf_indent_sub(&i->buf, 2);
- }
-
- if (s->max_paths_text) {
- prt_printf(&i->buf, "Maximum allocated btree paths (%u):\n", s->nr_max_paths);
-
- printbuf_indent_add(&i->buf, 2);
- prt_str_indented(&i->buf, s->max_paths_text);
- printbuf_indent_sub(&i->buf, 2);
- }
-
- mutex_unlock(&s->lock);
-
- printbuf_indent_sub(&i->buf, 2);
- prt_newline(&i->buf);
- i->iter++;
- }
-
- if (i->buf.allocation_failure)
- return -ENOMEM;
-
- return i->ret;
-}
-
-static const struct file_operations btree_transaction_stats_op = {
- .owner = THIS_MODULE,
- .open = btree_transaction_stats_open,
- .release = btree_transaction_stats_release,
- .read = btree_transaction_stats_read,
-};
-
-/* walk btree transactions until we find a deadlock and print it */
-static void btree_deadlock_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct btree_trans *trans;
- ulong iter = 0;
-restart:
- seqmutex_lock(&c->btree_trans_lock);
- list_sort(&c->btree_trans_list, list_ptr_order_cmp);
-
- list_for_each_entry(trans, &c->btree_trans_list, list) {
- if ((ulong) trans <= iter)
- continue;
-
- iter = (ulong) trans;
-
- if (!closure_get_not_zero(&trans->ref))
- continue;
-
- u32 seq = seqmutex_unlock(&c->btree_trans_lock);
-
- bool found = bch2_check_for_deadlock(trans, out) != 0;
-
- closure_put(&trans->ref);
-
- if (found)
- return;
-
- if (!seqmutex_relock(&c->btree_trans_lock, seq))
- goto restart;
- }
- seqmutex_unlock(&c->btree_trans_lock);
-}
-
-static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dump_iter *i = file->private_data;
- struct bch_fs *c = i->c;
- ssize_t ret = 0;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- if (!i->iter) {
- btree_deadlock_to_text(&i->buf, c);
- i->iter++;
- }
-
- if (i->buf.allocation_failure)
- ret = -ENOMEM;
-
- if (!ret)
- ret = flush_buf(i);
-
- return ret ?: i->ret;
-}
-
-static const struct file_operations btree_deadlock_ops = {
- .owner = THIS_MODULE,
- .open = bch2_dump_open,
- .release = bch2_dump_release,
- .read = bch2_btree_deadlock_read,
-};
-
-void bch2_fs_debug_exit(struct bch_fs *c)
-{
- if (!IS_ERR_OR_NULL(c->fs_debug_dir))
- debugfs_remove_recursive(c->fs_debug_dir);
-}
-
-static void bch2_fs_debug_btree_init(struct bch_fs *c, struct btree_debug *bd)
-{
- struct dentry *d;
-
- d = debugfs_create_dir(bch2_btree_id_str(bd->id), c->btree_debug_dir);
-
- debugfs_create_file("keys", 0400, d, bd, &btree_debug_ops);
-
- debugfs_create_file("formats", 0400, d, bd, &btree_format_debug_ops);
-
- debugfs_create_file("bfloat-failed", 0400, d, bd,
- &bfloat_failed_debug_ops);
-}
-
-void bch2_fs_debug_init(struct bch_fs *c)
-{
- struct btree_debug *bd;
- char name[100];
-
- if (IS_ERR_OR_NULL(bch_debug))
- return;
-
- snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
- c->fs_debug_dir = debugfs_create_dir(name, bch_debug);
- if (IS_ERR_OR_NULL(c->fs_debug_dir))
- return;
-
- debugfs_create_file("cached_btree_nodes", 0400, c->fs_debug_dir,
- c->btree_debug, &cached_btree_nodes_ops);
-
- debugfs_create_file("btree_transactions", 0400, c->fs_debug_dir,
- c->btree_debug, &btree_transactions_ops);
-
- debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
- c->btree_debug, &journal_pins_ops);
-
- debugfs_create_file("btree_updates", 0400, c->fs_debug_dir,
- c->btree_debug, &btree_updates_ops);
-
- debugfs_create_file("btree_transaction_stats", 0400, c->fs_debug_dir,
- c, &btree_transaction_stats_op);
-
- debugfs_create_file("btree_deadlock", 0400, c->fs_debug_dir,
- c->btree_debug, &btree_deadlock_ops);
-
- c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir);
- if (IS_ERR_OR_NULL(c->btree_debug_dir))
- return;
-
- for (bd = c->btree_debug;
- bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
- bd++) {
- bd->id = bd - c->btree_debug;
- bch2_fs_debug_btree_init(c, bd);
- }
-}
-
-#endif
-
-void bch2_debug_exit(void)
-{
- if (!IS_ERR_OR_NULL(bch_debug))
- debugfs_remove_recursive(bch_debug);
-}
-
-int __init bch2_debug_init(void)
-{
- bch_debug = debugfs_create_dir("bcachefs", NULL);
- return 0;
-}
diff --git a/fs/bcachefs/debug.h b/fs/bcachefs/debug.h
deleted file mode 100644
index 2c37143b5fd1..000000000000
--- a/fs/bcachefs/debug.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DEBUG_H
-#define _BCACHEFS_DEBUG_H
-
-#include "bcachefs.h"
-
-struct bio;
-struct btree;
-struct bch_fs;
-
-void __bch2_btree_verify(struct bch_fs *, struct btree *);
-void bch2_btree_node_ondisk_to_text(struct printbuf *, struct bch_fs *,
- const struct btree *);
-
-static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b)
-{
- if (bch2_verify_btree_ondisk)
- __bch2_btree_verify(c, b);
-}
-
-#ifdef CONFIG_DEBUG_FS
-void bch2_fs_debug_exit(struct bch_fs *);
-void bch2_fs_debug_init(struct bch_fs *);
-#else
-static inline void bch2_fs_debug_exit(struct bch_fs *c) {}
-static inline void bch2_fs_debug_init(struct bch_fs *c) {}
-#endif
-
-void bch2_debug_exit(void);
-int bch2_debug_init(void);
-
-#endif /* _BCACHEFS_DEBUG_H */
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
deleted file mode 100644
index faffc98d5605..000000000000
--- a/fs/bcachefs/dirent.c
+++ /dev/null
@@ -1,574 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "bkey_methods.h"
-#include "btree_update.h"
-#include "extents.h"
-#include "dirent.h"
-#include "fs.h"
-#include "keylist.h"
-#include "str_hash.h"
-#include "subvolume.h"
-
-#include <linux/dcache.h>
-
-static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d)
-{
- if (bkey_val_bytes(d.k) < offsetof(struct bch_dirent, d_name))
- return 0;
-
- unsigned bkey_u64s = bkey_val_u64s(d.k);
- unsigned bkey_bytes = bkey_u64s * sizeof(u64);
- u64 last_u64 = ((u64*)d.v)[bkey_u64s - 1];
-#if CPU_BIG_ENDIAN
- unsigned trailing_nuls = last_u64 ? __builtin_ctzll(last_u64) / 8 : 64 / 8;
-#else
- unsigned trailing_nuls = last_u64 ? __builtin_clzll(last_u64) / 8 : 64 / 8;
-#endif
-
- return bkey_bytes -
- offsetof(struct bch_dirent, d_name) -
- trailing_nuls;
-}
-
-struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d)
-{
- return (struct qstr) QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d));
-}
-
-static u64 bch2_dirent_hash(const struct bch_hash_info *info,
- const struct qstr *name)
-{
- struct bch_str_hash_ctx ctx;
-
- bch2_str_hash_init(&ctx, info);
- bch2_str_hash_update(&ctx, info, name->name, name->len);
-
- /* [0,2) reserved for dots */
- return max_t(u64, bch2_str_hash_end(&ctx, info), 2);
-}
-
-static u64 dirent_hash_key(const struct bch_hash_info *info, const void *key)
-{
- return bch2_dirent_hash(info, key);
-}
-
-static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
-{
- struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
- struct qstr name = bch2_dirent_get_name(d);
-
- return bch2_dirent_hash(info, &name);
-}
-
-static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r)
-{
- struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
- const struct qstr l_name = bch2_dirent_get_name(l);
- const struct qstr *r_name = _r;
-
- return !qstr_eq(l_name, *r_name);
-}
-
-static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
-{
- struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
- struct bkey_s_c_dirent r = bkey_s_c_to_dirent(_r);
- const struct qstr l_name = bch2_dirent_get_name(l);
- const struct qstr r_name = bch2_dirent_get_name(r);
-
- return !qstr_eq(l_name, r_name);
-}
-
-static bool dirent_is_visible(subvol_inum inum, struct bkey_s_c k)
-{
- struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
-
- if (d.v->d_type == DT_SUBVOL)
- return le32_to_cpu(d.v->d_parent_subvol) == inum.subvol;
- return true;
-}
-
-const struct bch_hash_desc bch2_dirent_hash_desc = {
- .btree_id = BTREE_ID_dirents,
- .key_type = KEY_TYPE_dirent,
- .hash_key = dirent_hash_key,
- .hash_bkey = dirent_hash_bkey,
- .cmp_key = dirent_cmp_key,
- .cmp_bkey = dirent_cmp_bkey,
- .is_visible = dirent_is_visible,
-};
-
-int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
- struct qstr d_name = bch2_dirent_get_name(d);
- int ret = 0;
-
- bkey_fsck_err_on(!d_name.len,
- c, dirent_empty_name,
- "empty name");
-
- bkey_fsck_err_on(bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len),
- c, dirent_val_too_big,
- "value too big (%zu > %u)",
- bkey_val_u64s(k.k), dirent_val_u64s(d_name.len));
-
- /*
- * Check new keys don't exceed the max length
- * (older keys may be larger.)
- */
- bkey_fsck_err_on((flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX,
- c, dirent_name_too_long,
- "dirent name too big (%u > %u)",
- d_name.len, BCH_NAME_MAX);
-
- bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len),
- c, dirent_name_embedded_nul,
- "dirent has stray data after name's NUL");
-
- bkey_fsck_err_on((d_name.len == 1 && !memcmp(d_name.name, ".", 1)) ||
- (d_name.len == 2 && !memcmp(d_name.name, "..", 2)),
- c, dirent_name_dot_or_dotdot,
- "invalid name");
-
- bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len),
- c, dirent_name_has_slash,
- "name with /");
-
- bkey_fsck_err_on(d.v->d_type != DT_SUBVOL &&
- le64_to_cpu(d.v->d_inum) == d.k->p.inode,
- c, dirent_to_itself,
- "dirent points to own directory");
-fsck_err:
- return ret;
-}
-
-void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
- struct qstr d_name = bch2_dirent_get_name(d);
-
- prt_printf(out, "%.*s -> ", d_name.len, d_name.name);
-
- if (d.v->d_type != DT_SUBVOL)
- prt_printf(out, "%llu", le64_to_cpu(d.v->d_inum));
- else
- prt_printf(out, "%u -> %u",
- le32_to_cpu(d.v->d_parent_subvol),
- le32_to_cpu(d.v->d_child_subvol));
-
- prt_printf(out, " type %s", bch2_d_type_str(d.v->d_type));
-}
-
-static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
- subvol_inum dir, u8 type,
- const struct qstr *name, u64 dst)
-{
- struct bkey_i_dirent *dirent;
- unsigned u64s = BKEY_U64s + dirent_val_u64s(name->len);
-
- if (name->len > BCH_NAME_MAX)
- return ERR_PTR(-ENAMETOOLONG);
-
- BUG_ON(u64s > U8_MAX);
-
- dirent = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
- if (IS_ERR(dirent))
- return dirent;
-
- bkey_dirent_init(&dirent->k_i);
- dirent->k.u64s = u64s;
-
- if (type != DT_SUBVOL) {
- dirent->v.d_inum = cpu_to_le64(dst);
- } else {
- dirent->v.d_parent_subvol = cpu_to_le32(dir.subvol);
- dirent->v.d_child_subvol = cpu_to_le32(dst);
- }
-
- dirent->v.d_type = type;
-
- memcpy(dirent->v.d_name, name->name, name->len);
- memset(dirent->v.d_name + name->len, 0,
- bkey_val_bytes(&dirent->k) -
- offsetof(struct bch_dirent, d_name) -
- name->len);
-
- EBUG_ON(bch2_dirent_name_bytes(dirent_i_to_s_c(dirent)) != name->len);
-
- return dirent;
-}
-
-int bch2_dirent_create_snapshot(struct btree_trans *trans,
- u32 dir_subvol, u64 dir, u32 snapshot,
- const struct bch_hash_info *hash_info,
- u8 type, const struct qstr *name, u64 dst_inum,
- u64 *dir_offset,
- enum btree_iter_update_trigger_flags flags)
-{
- subvol_inum dir_inum = { .subvol = dir_subvol, .inum = dir };
- struct bkey_i_dirent *dirent;
- int ret;
-
- dirent = dirent_create_key(trans, dir_inum, type, name, dst_inum);
- ret = PTR_ERR_OR_ZERO(dirent);
- if (ret)
- return ret;
-
- dirent->k.p.inode = dir;
- dirent->k.p.snapshot = snapshot;
-
- ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info,
- dir_inum, snapshot, &dirent->k_i,
- flags|BTREE_UPDATE_internal_snapshot_node);
- *dir_offset = dirent->k.p.offset;
-
- return ret;
-}
-
-int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
- const struct bch_hash_info *hash_info,
- u8 type, const struct qstr *name, u64 dst_inum,
- u64 *dir_offset,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bkey_i_dirent *dirent;
- int ret;
-
- dirent = dirent_create_key(trans, dir, type, name, dst_inum);
- ret = PTR_ERR_OR_ZERO(dirent);
- if (ret)
- return ret;
-
- ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
- dir, &dirent->k_i, flags);
- *dir_offset = dirent->k.p.offset;
-
- return ret;
-}
-
-int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
- struct bkey_s_c_dirent d, subvol_inum *target)
-{
- struct bch_subvolume s;
- int ret = 0;
-
- if (d.v->d_type == DT_SUBVOL &&
- le32_to_cpu(d.v->d_parent_subvol) != dir.subvol)
- return 1;
-
- if (likely(d.v->d_type != DT_SUBVOL)) {
- target->subvol = dir.subvol;
- target->inum = le64_to_cpu(d.v->d_inum);
- } else {
- target->subvol = le32_to_cpu(d.v->d_child_subvol);
-
- ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_cached, &s);
-
- target->inum = le64_to_cpu(s.inode);
- }
-
- return ret;
-}
-
-int bch2_dirent_rename(struct btree_trans *trans,
- subvol_inum src_dir, struct bch_hash_info *src_hash,
- subvol_inum dst_dir, struct bch_hash_info *dst_hash,
- const struct qstr *src_name, subvol_inum *src_inum, u64 *src_offset,
- const struct qstr *dst_name, subvol_inum *dst_inum, u64 *dst_offset,
- enum bch_rename_mode mode)
-{
- struct btree_iter src_iter = { NULL };
- struct btree_iter dst_iter = { NULL };
- struct bkey_s_c old_src, old_dst = bkey_s_c_null;
- struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
- struct bpos dst_pos =
- POS(dst_dir.inum, bch2_dirent_hash(dst_hash, dst_name));
- unsigned src_update_flags = 0;
- bool delete_src, delete_dst;
- int ret = 0;
-
- memset(src_inum, 0, sizeof(*src_inum));
- memset(dst_inum, 0, sizeof(*dst_inum));
-
- /* Lookup src: */
- old_src = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
- src_hash, src_dir, src_name,
- BTREE_ITER_intent);
- ret = bkey_err(old_src);
- if (ret)
- goto out;
-
- ret = bch2_dirent_read_target(trans, src_dir,
- bkey_s_c_to_dirent(old_src), src_inum);
- if (ret)
- goto out;
-
- /* Lookup dst: */
- if (mode == BCH_RENAME) {
- /*
- * Note that we're _not_ checking if the target already exists -
- * we're relying on the VFS to do that check for us for
- * correctness:
- */
- ret = bch2_hash_hole(trans, &dst_iter, bch2_dirent_hash_desc,
- dst_hash, dst_dir, dst_name);
- if (ret)
- goto out;
- } else {
- old_dst = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
- dst_hash, dst_dir, dst_name,
- BTREE_ITER_intent);
- ret = bkey_err(old_dst);
- if (ret)
- goto out;
-
- ret = bch2_dirent_read_target(trans, dst_dir,
- bkey_s_c_to_dirent(old_dst), dst_inum);
- if (ret)
- goto out;
- }
-
- if (mode != BCH_RENAME_EXCHANGE)
- *src_offset = dst_iter.pos.offset;
-
- /* Create new dst key: */
- new_dst = dirent_create_key(trans, dst_dir, 0, dst_name, 0);
- ret = PTR_ERR_OR_ZERO(new_dst);
- if (ret)
- goto out;
-
- dirent_copy_target(new_dst, bkey_s_c_to_dirent(old_src));
- new_dst->k.p = dst_iter.pos;
-
- /* Create new src key: */
- if (mode == BCH_RENAME_EXCHANGE) {
- new_src = dirent_create_key(trans, src_dir, 0, src_name, 0);
- ret = PTR_ERR_OR_ZERO(new_src);
- if (ret)
- goto out;
-
- dirent_copy_target(new_src, bkey_s_c_to_dirent(old_dst));
- new_src->k.p = src_iter.pos;
- } else {
- new_src = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
- ret = PTR_ERR_OR_ZERO(new_src);
- if (ret)
- goto out;
-
- bkey_init(&new_src->k);
- new_src->k.p = src_iter.pos;
-
- if (bkey_le(dst_pos, src_iter.pos) &&
- bkey_lt(src_iter.pos, dst_iter.pos)) {
- /*
- * We have a hash collision for the new dst key,
- * and new_src - the key we're deleting - is between
- * new_dst's hashed slot and the slot we're going to be
- * inserting it into - oops. This will break the hash
- * table if we don't deal with it:
- */
- if (mode == BCH_RENAME) {
- /*
- * If we're not overwriting, we can just insert
- * new_dst at the src position:
- */
- new_src = new_dst;
- new_src->k.p = src_iter.pos;
- goto out_set_src;
- } else {
- /* If we're overwriting, we can't insert new_dst
- * at a different slot because it has to
- * overwrite old_dst - just make sure to use a
- * whiteout when deleting src:
- */
- new_src->k.type = KEY_TYPE_hash_whiteout;
- }
- } else {
- /* Check if we need a whiteout to delete src: */
- ret = bch2_hash_needs_whiteout(trans, bch2_dirent_hash_desc,
- src_hash, &src_iter);
- if (ret < 0)
- goto out;
-
- if (ret)
- new_src->k.type = KEY_TYPE_hash_whiteout;
- }
- }
-
- if (new_dst->v.d_type == DT_SUBVOL)
- new_dst->v.d_parent_subvol = cpu_to_le32(dst_dir.subvol);
-
- if ((mode == BCH_RENAME_EXCHANGE) &&
- new_src->v.d_type == DT_SUBVOL)
- new_src->v.d_parent_subvol = cpu_to_le32(src_dir.subvol);
-
- ret = bch2_trans_update(trans, &dst_iter, &new_dst->k_i, 0);
- if (ret)
- goto out;
-out_set_src:
- /*
- * If we're deleting a subvolume we need to really delete the dirent,
- * not just emit a whiteout in the current snapshot - there can only be
- * single dirent that points to a given subvolume.
- *
- * IOW, we don't maintain multiple versions in different snapshots of
- * dirents that point to subvolumes - dirents that point to subvolumes
- * are only visible in one particular subvolume so it's not necessary,
- * and it would be particularly confusing for fsck to have to deal with.
- */
- delete_src = bkey_s_c_to_dirent(old_src).v->d_type == DT_SUBVOL &&
- new_src->k.p.snapshot != old_src.k->p.snapshot;
-
- delete_dst = old_dst.k &&
- bkey_s_c_to_dirent(old_dst).v->d_type == DT_SUBVOL &&
- new_dst->k.p.snapshot != old_dst.k->p.snapshot;
-
- if (!delete_src || !bkey_deleted(&new_src->k)) {
- ret = bch2_trans_update(trans, &src_iter, &new_src->k_i, src_update_flags);
- if (ret)
- goto out;
- }
-
- if (delete_src) {
- bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&src_iter) ?:
- bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
- if (ret)
- goto out;
- }
-
- if (delete_dst) {
- bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&dst_iter) ?:
- bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
- if (ret)
- goto out;
- }
-
- if (mode == BCH_RENAME_EXCHANGE)
- *src_offset = new_src->k.p.offset;
- *dst_offset = new_dst->k.p.offset;
-out:
- bch2_trans_iter_exit(trans, &src_iter);
- bch2_trans_iter_exit(trans, &dst_iter);
- return ret;
-}
-
-int bch2_dirent_lookup_trans(struct btree_trans *trans,
- struct btree_iter *iter,
- subvol_inum dir,
- const struct bch_hash_info *hash_info,
- const struct qstr *name, subvol_inum *inum,
- unsigned flags)
-{
- struct bkey_s_c k = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
- hash_info, dir, name, flags);
- int ret = bkey_err(k);
- if (ret)
- goto err;
-
- ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), inum);
- if (ret > 0)
- ret = -ENOENT;
-err:
- if (ret)
- bch2_trans_iter_exit(trans, iter);
- return ret;
-}
-
-u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
- const struct bch_hash_info *hash_info,
- const struct qstr *name, subvol_inum *inum)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
-
- int ret = lockrestart_do(trans,
- bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0));
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return ret;
-}
-
-int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 snapshot)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents,
- SPOS(dir, 0, snapshot),
- POS(dir, U64_MAX), 0, k, ret)
- if (k.k->type == KEY_TYPE_dirent) {
- struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
- if (d.v->d_type == DT_SUBVOL && le32_to_cpu(d.v->d_parent_subvol) != subvol)
- continue;
- ret = -BCH_ERR_ENOTEMPTY_dir_not_empty;
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
-{
- u32 snapshot;
-
- return bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot) ?:
- bch2_empty_dir_snapshot(trans, dir.inum, dir.subvol, snapshot);
-}
-
-static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subvol_inum target)
-{
- struct qstr name = bch2_dirent_get_name(d);
- /*
- * Although not required by the kernel code, updating ctx->pos is needed
- * for the bcachefs FUSE driver. Without this update, the FUSE
- * implementation will be stuck in an infinite loop when reading
- * directories (via the bcachefs_fuse_readdir callback).
- * In kernel space, ctx->pos is updated by the VFS code.
- */
- ctx->pos = d.k->p.offset;
- bool ret = dir_emit(ctx, name.name,
- name.len,
- target.inum,
- vfs_d_type(d.v->d_type));
- if (ret)
- ctx->pos = d.k->p.offset + 1;
- return ret;
-}
-
-int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
-{
- struct bkey_buf sk;
- bch2_bkey_buf_init(&sk);
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_dirents,
- POS(inum.inum, ctx->pos),
- POS(inum.inum, U64_MAX),
- inum.subvol, 0, k, ({
- if (k.k->type != KEY_TYPE_dirent)
- continue;
-
- /* dir_emit() can fault and block: */
- bch2_bkey_buf_reassemble(&sk, c, k);
- struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
-
- subvol_inum target;
- int ret2 = bch2_dirent_read_target(trans, inum, dirent, &target);
- if (ret2 > 0)
- continue;
-
- ret2 ?: drop_locks_do(trans, bch2_dir_emit(ctx, dirent, target));
- })));
-
- bch2_bkey_buf_exit(&sk, c);
-
- return ret < 0 ? ret : 0;
-}
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
deleted file mode 100644
index 53ad99666022..000000000000
--- a/fs/bcachefs/dirent.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DIRENT_H
-#define _BCACHEFS_DIRENT_H
-
-#include "str_hash.h"
-
-enum bch_validate_flags;
-extern const struct bch_hash_desc bch2_dirent_hash_desc;
-
-int bch2_dirent_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-#define bch2_bkey_ops_dirent ((struct bkey_ops) { \
- .key_validate = bch2_dirent_validate, \
- .val_to_text = bch2_dirent_to_text, \
- .min_val_size = 16, \
-})
-
-struct qstr;
-struct file;
-struct dir_context;
-struct bch_fs;
-struct bch_hash_info;
-struct bch_inode_info;
-
-struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d);
-
-static inline unsigned dirent_val_u64s(unsigned len)
-{
- return DIV_ROUND_UP(offsetof(struct bch_dirent, d_name) + len,
- sizeof(u64));
-}
-
-int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
- struct bkey_s_c_dirent, subvol_inum *);
-
-static inline void dirent_copy_target(struct bkey_i_dirent *dst,
- struct bkey_s_c_dirent src)
-{
- dst->v.d_inum = src.v->d_inum;
- dst->v.d_type = src.v->d_type;
-}
-
-int bch2_dirent_create_snapshot(struct btree_trans *, u32, u64, u32,
- const struct bch_hash_info *, u8,
- const struct qstr *, u64, u64 *,
- enum btree_iter_update_trigger_flags);
-int bch2_dirent_create(struct btree_trans *, subvol_inum,
- const struct bch_hash_info *, u8,
- const struct qstr *, u64, u64 *,
- enum btree_iter_update_trigger_flags);
-
-static inline unsigned vfs_d_type(unsigned type)
-{
- return type == DT_SUBVOL ? DT_DIR : type;
-}
-
-enum bch_rename_mode {
- BCH_RENAME,
- BCH_RENAME_OVERWRITE,
- BCH_RENAME_EXCHANGE,
-};
-
-int bch2_dirent_rename(struct btree_trans *,
- subvol_inum, struct bch_hash_info *,
- subvol_inum, struct bch_hash_info *,
- const struct qstr *, subvol_inum *, u64 *,
- const struct qstr *, subvol_inum *, u64 *,
- enum bch_rename_mode);
-
-int bch2_dirent_lookup_trans(struct btree_trans *, struct btree_iter *,
- subvol_inum, const struct bch_hash_info *,
- const struct qstr *, subvol_inum *, unsigned);
-u64 bch2_dirent_lookup(struct bch_fs *, subvol_inum,
- const struct bch_hash_info *,
- const struct qstr *, subvol_inum *);
-
-int bch2_empty_dir_snapshot(struct btree_trans *, u64, u32, u32);
-int bch2_empty_dir_trans(struct btree_trans *, subvol_inum);
-int bch2_readdir(struct bch_fs *, subvol_inum, struct dir_context *);
-
-#endif /* _BCACHEFS_DIRENT_H */
diff --git a/fs/bcachefs/dirent_format.h b/fs/bcachefs/dirent_format.h
deleted file mode 100644
index 5e116b88e814..000000000000
--- a/fs/bcachefs/dirent_format.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DIRENT_FORMAT_H
-#define _BCACHEFS_DIRENT_FORMAT_H
-
-/*
- * Dirents (and xattrs) have to implement string lookups; since our b-tree
- * doesn't support arbitrary length strings for the key, we instead index by a
- * 64 bit hash (currently truncated sha1) of the string, stored in the offset
- * field of the key - using linear probing to resolve hash collisions. This also
- * provides us with the readdir cookie posix requires.
- *
- * Linear probing requires us to use whiteouts for deletions, in the event of a
- * collision:
- */
-
-struct bch_dirent {
- struct bch_val v;
-
- /* Target inode number: */
- union {
- __le64 d_inum;
- struct { /* DT_SUBVOL */
- __le32 d_child_subvol;
- __le32 d_parent_subvol;
- };
- };
-
- /*
- * Copy of mode bits 12-15 from the target inode - so userspace can get
- * the filetype without having to do a stat()
- */
- __u8 d_type;
-
- __u8 d_name[];
-} __packed __aligned(8);
-
-#define DT_SUBVOL 16
-#define BCH_DT_MAX 17
-
-#define BCH_NAME_MAX 512
-
-#endif /* _BCACHEFS_DIRENT_FORMAT_H */
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
deleted file mode 100644
index 07eb8fa1b026..000000000000
--- a/fs/bcachefs/disk_accounting.c
+++ /dev/null
@@ -1,976 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bcachefs_ioctl.h"
-#include "btree_cache.h"
-#include "btree_journal_iter.h"
-#include "btree_update.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "compress.h"
-#include "disk_accounting.h"
-#include "error.h"
-#include "journal_io.h"
-#include "replicas.h"
-
-/*
- * Notes on disk accounting:
- *
- * We have two parallel sets of counters to be concerned with, and both must be
- * kept in sync.
- *
- * - Persistent/on disk accounting, stored in the accounting btree and updated
- * via btree write buffer updates that treat new accounting keys as deltas to
- * apply to existing values. But reading from a write buffer btree is
- * expensive, so we also have
- *
- * - In memory accounting, where accounting is stored as an array of percpu
- * counters, indexed by an eytzinger array of disk acounting keys/bpos (which
- * are the same thing, excepting byte swabbing on big endian).
- *
- * Cheap to read, but non persistent.
- *
- * Disk accounting updates are generated by transactional triggers; these run as
- * keys enter and leave the btree, and can compare old and new versions of keys;
- * the output of these triggers are deltas to the various counters.
- *
- * Disk accounting updates are done as btree write buffer updates, where the
- * counters in the disk accounting key are deltas that will be applied to the
- * counter in the btree when the key is flushed by the write buffer (or journal
- * replay).
- *
- * To do a disk accounting update:
- * - initialize a disk_accounting_pos, to specify which counter is being update
- * - initialize counter deltas, as an array of 1-3 s64s
- * - call bch2_disk_accounting_mod()
- *
- * This queues up the accounting update to be done at transaction commit time.
- * Underneath, it's a normal btree write buffer update.
- *
- * The transaction commit path is responsible for propagating updates to the in
- * memory counters, with bch2_accounting_mem_mod().
- *
- * The commit path also assigns every disk accounting update a unique version
- * number, based on the journal sequence number and offset within that journal
- * buffer; this is used by journal replay to determine which updates have been
- * done.
- *
- * The transaction commit path also ensures that replicas entry accounting
- * updates are properly marked in the superblock (so that we know whether we can
- * mount without data being unavailable); it will update the superblock if
- * bch2_accounting_mem_mod() tells it to.
- */
-
-static const char * const disk_accounting_type_strs[] = {
-#define x(t, n, ...) [n] = #t,
- BCH_DISK_ACCOUNTING_TYPES()
-#undef x
- NULL
-};
-
-static inline void accounting_key_init(struct bkey_i *k, struct disk_accounting_pos *pos,
- s64 *d, unsigned nr)
-{
- struct bkey_i_accounting *acc = bkey_accounting_init(k);
-
- acc->k.p = disk_accounting_pos_to_bpos(pos);
- set_bkey_val_u64s(&acc->k, sizeof(struct bch_accounting) / sizeof(u64) + nr);
-
- memcpy_u64s_small(acc->v.d, d, nr);
-}
-
-int bch2_disk_accounting_mod(struct btree_trans *trans,
- struct disk_accounting_pos *k,
- s64 *d, unsigned nr, bool gc)
-{
- /* Normalize: */
- switch (k->type) {
- case BCH_DISK_ACCOUNTING_replicas:
- bubble_sort(k->replicas.devs, k->replicas.nr_devs, u8_cmp);
- break;
- }
-
- BUG_ON(nr > BCH_ACCOUNTING_MAX_COUNTERS);
-
- struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
-
- accounting_key_init(&k_i.k, k, d, nr);
-
- return likely(!gc)
- ? bch2_trans_update_buffered(trans, BTREE_ID_accounting, &k_i.k)
- : bch2_accounting_mem_add(trans, bkey_i_to_s_c_accounting(&k_i.k), true);
-}
-
-int bch2_mod_dev_cached_sectors(struct btree_trans *trans,
- unsigned dev, s64 sectors,
- bool gc)
-{
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
-
- bch2_replicas_entry_cached(&acc.replicas, dev);
-
- return bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
-}
-
-static inline bool is_zero(char *start, char *end)
-{
- BUG_ON(start > end);
-
- for (; start < end; start++)
- if (*start)
- return false;
- return true;
-}
-
-#define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member))
-
-int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, k.k->p);
- void *end = &acc_k + 1;
- int ret = 0;
-
- bkey_fsck_err_on(bversion_zero(k.k->bversion),
- c, accounting_key_version_0,
- "accounting key with version=0");
-
- switch (acc_k.type) {
- case BCH_DISK_ACCOUNTING_nr_inodes:
- end = field_end(acc_k, nr_inodes);
- break;
- case BCH_DISK_ACCOUNTING_persistent_reserved:
- end = field_end(acc_k, persistent_reserved);
- break;
- case BCH_DISK_ACCOUNTING_replicas:
- bkey_fsck_err_on(!acc_k.replicas.nr_devs,
- c, accounting_key_replicas_nr_devs_0,
- "accounting key replicas entry with nr_devs=0");
-
- bkey_fsck_err_on(acc_k.replicas.nr_required > acc_k.replicas.nr_devs ||
- (acc_k.replicas.nr_required > 1 &&
- acc_k.replicas.nr_required == acc_k.replicas.nr_devs),
- c, accounting_key_replicas_nr_required_bad,
- "accounting key replicas entry with bad nr_required");
-
- for (unsigned i = 0; i + 1 < acc_k.replicas.nr_devs; i++)
- bkey_fsck_err_on(acc_k.replicas.devs[i] >= acc_k.replicas.devs[i + 1],
- c, accounting_key_replicas_devs_unsorted,
- "accounting key replicas entry with unsorted devs");
-
- end = (void *) &acc_k.replicas + replicas_entry_bytes(&acc_k.replicas);
- break;
- case BCH_DISK_ACCOUNTING_dev_data_type:
- end = field_end(acc_k, dev_data_type);
- break;
- case BCH_DISK_ACCOUNTING_compression:
- end = field_end(acc_k, compression);
- break;
- case BCH_DISK_ACCOUNTING_snapshot:
- end = field_end(acc_k, snapshot);
- break;
- case BCH_DISK_ACCOUNTING_btree:
- end = field_end(acc_k, btree);
- break;
- case BCH_DISK_ACCOUNTING_rebalance_work:
- end = field_end(acc_k, rebalance_work);
- break;
- }
-
- bkey_fsck_err_on(!is_zero(end, (void *) (&acc_k + 1)),
- c, accounting_key_junk_at_end,
- "junk at end of accounting key");
-fsck_err:
- return ret;
-}
-
-void bch2_accounting_key_to_text(struct printbuf *out, struct disk_accounting_pos *k)
-{
- if (k->type >= BCH_DISK_ACCOUNTING_TYPE_NR) {
- prt_printf(out, "unknown type %u", k->type);
- return;
- }
-
- prt_str(out, disk_accounting_type_strs[k->type]);
- prt_str(out, " ");
-
- switch (k->type) {
- case BCH_DISK_ACCOUNTING_nr_inodes:
- break;
- case BCH_DISK_ACCOUNTING_persistent_reserved:
- prt_printf(out, "replicas=%u", k->persistent_reserved.nr_replicas);
- break;
- case BCH_DISK_ACCOUNTING_replicas:
- bch2_replicas_entry_to_text(out, &k->replicas);
- break;
- case BCH_DISK_ACCOUNTING_dev_data_type:
- prt_printf(out, "dev=%u data_type=", k->dev_data_type.dev);
- bch2_prt_data_type(out, k->dev_data_type.data_type);
- break;
- case BCH_DISK_ACCOUNTING_compression:
- bch2_prt_compression_type(out, k->compression.type);
- break;
- case BCH_DISK_ACCOUNTING_snapshot:
- prt_printf(out, "id=%u", k->snapshot.id);
- break;
- case BCH_DISK_ACCOUNTING_btree:
- prt_printf(out, "btree=%s", bch2_btree_id_str(k->btree.id));
- break;
- }
-}
-
-void bch2_accounting_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_accounting acc = bkey_s_c_to_accounting(k);
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, k.k->p);
-
- bch2_accounting_key_to_text(out, &acc_k);
-
- for (unsigned i = 0; i < bch2_accounting_counters(k.k); i++)
- prt_printf(out, " %lli", acc.v->d[i]);
-}
-
-void bch2_accounting_swab(struct bkey_s k)
-{
- for (u64 *p = (u64 *) k.v;
- p < (u64 *) bkey_val_end(k);
- p++)
- *p = swab64(*p);
-}
-
-static inline void __accounting_to_replicas(struct bch_replicas_entry_v1 *r,
- struct disk_accounting_pos acc)
-{
- unsafe_memcpy(r, &acc.replicas,
- replicas_entry_bytes(&acc.replicas),
- "variable length struct");
-}
-
-static inline bool accounting_to_replicas(struct bch_replicas_entry_v1 *r, struct bpos p)
-{
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, p);
-
- switch (acc_k.type) {
- case BCH_DISK_ACCOUNTING_replicas:
- __accounting_to_replicas(r, acc_k);
- return true;
- default:
- return false;
- }
-}
-
-static int bch2_accounting_update_sb_one(struct bch_fs *c, struct bpos p)
-{
- struct bch_replicas_padded r;
- return accounting_to_replicas(&r.e, p)
- ? bch2_mark_replicas(c, &r.e)
- : 0;
-}
-
-/*
- * Ensure accounting keys being updated are present in the superblock, when
- * applicable (i.e. replicas updates)
- */
-int bch2_accounting_update_sb(struct btree_trans *trans)
-{
- for (struct jset_entry *i = trans->journal_entries;
- i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
- i = vstruct_next(i))
- if (jset_entry_is_key(i) && i->start->k.type == KEY_TYPE_accounting) {
- int ret = bch2_accounting_update_sb_one(trans->c, i->start->k.p);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a)
-{
- struct bch_accounting_mem *acc = &c->accounting;
-
- /* raced with another insert, already present: */
- if (eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, &a.k->p) < acc->k.nr)
- return 0;
-
- struct accounting_mem_entry n = {
- .pos = a.k->p,
- .bversion = a.k->bversion,
- .nr_counters = bch2_accounting_counters(a.k),
- .v[0] = __alloc_percpu_gfp(n.nr_counters * sizeof(u64),
- sizeof(u64), GFP_KERNEL),
- };
-
- if (!n.v[0])
- goto err;
-
- if (acc->gc_running) {
- n.v[1] = __alloc_percpu_gfp(n.nr_counters * sizeof(u64),
- sizeof(u64), GFP_KERNEL);
- if (!n.v[1])
- goto err;
- }
-
- if (darray_push(&acc->k, n))
- goto err;
-
- eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, NULL);
- return 0;
-err:
- free_percpu(n.v[1]);
- free_percpu(n.v[0]);
- return -BCH_ERR_ENOMEM_disk_accounting;
-}
-
-int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a,
- enum bch_accounting_mode mode)
-{
- struct bch_replicas_padded r;
-
- if (mode != BCH_ACCOUNTING_read &&
- accounting_to_replicas(&r.e, a.k->p) &&
- !bch2_replicas_marked_locked(c, &r.e))
- return -BCH_ERR_btree_insert_need_mark_replicas;
-
- percpu_up_read(&c->mark_lock);
- percpu_down_write(&c->mark_lock);
- int ret = __bch2_accounting_mem_insert(c, a);
- percpu_up_write(&c->mark_lock);
- percpu_down_read(&c->mark_lock);
- return ret;
-}
-
-static bool accounting_mem_entry_is_zero(struct accounting_mem_entry *e)
-{
- for (unsigned i = 0; i < e->nr_counters; i++)
- if (percpu_u64_get(e->v[0] + i) ||
- (e->v[1] &&
- percpu_u64_get(e->v[1] + i)))
- return false;
- return true;
-}
-
-void bch2_accounting_mem_gc(struct bch_fs *c)
-{
- struct bch_accounting_mem *acc = &c->accounting;
-
- percpu_down_write(&c->mark_lock);
- struct accounting_mem_entry *dst = acc->k.data;
-
- darray_for_each(acc->k, src) {
- if (accounting_mem_entry_is_zero(src)) {
- free_percpu(src->v[0]);
- free_percpu(src->v[1]);
- } else {
- *dst++ = *src;
- }
- }
-
- acc->k.nr = dst - acc->k.data;
- eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, NULL);
- percpu_up_write(&c->mark_lock);
-}
-
-/*
- * Read out accounting keys for replicas entries, as an array of
- * bch_replicas_usage entries.
- *
- * Note: this may be deprecated/removed at smoe point in the future and replaced
- * with something more general, it exists to support the ioctl used by the
- * 'bcachefs fs usage' command.
- */
-int bch2_fs_replicas_usage_read(struct bch_fs *c, darray_char *usage)
-{
- struct bch_accounting_mem *acc = &c->accounting;
- int ret = 0;
-
- darray_init(usage);
-
- percpu_down_read(&c->mark_lock);
- darray_for_each(acc->k, i) {
- struct {
- struct bch_replicas_usage r;
- u8 pad[BCH_BKEY_PTRS_MAX];
- } u;
-
- if (!accounting_to_replicas(&u.r.r, i->pos))
- continue;
-
- u64 sectors;
- bch2_accounting_mem_read_counters(acc, i - acc->k.data, &sectors, 1, false);
- u.r.sectors = sectors;
-
- ret = darray_make_room(usage, replicas_usage_bytes(&u.r));
- if (ret)
- break;
-
- memcpy(&darray_top(*usage), &u.r, replicas_usage_bytes(&u.r));
- usage->nr += replicas_usage_bytes(&u.r);
- }
- percpu_up_read(&c->mark_lock);
-
- if (ret)
- darray_exit(usage);
- return ret;
-}
-
-int bch2_fs_accounting_read(struct bch_fs *c, darray_char *out_buf, unsigned accounting_types_mask)
-{
-
- struct bch_accounting_mem *acc = &c->accounting;
- int ret = 0;
-
- darray_init(out_buf);
-
- percpu_down_read(&c->mark_lock);
- darray_for_each(acc->k, i) {
- struct disk_accounting_pos a_p;
- bpos_to_disk_accounting_pos(&a_p, i->pos);
-
- if (!(accounting_types_mask & BIT(a_p.type)))
- continue;
-
- ret = darray_make_room(out_buf, sizeof(struct bkey_i_accounting) +
- sizeof(u64) * i->nr_counters);
- if (ret)
- break;
-
- struct bkey_i_accounting *a_out =
- bkey_accounting_init((void *) &darray_top(*out_buf));
- set_bkey_val_u64s(&a_out->k, i->nr_counters);
- a_out->k.p = i->pos;
- bch2_accounting_mem_read_counters(acc, i - acc->k.data,
- a_out->v.d, i->nr_counters, false);
-
- if (!bch2_accounting_key_is_zero(accounting_i_to_s_c(a_out)))
- out_buf->nr += bkey_bytes(&a_out->k);
- }
-
- percpu_up_read(&c->mark_lock);
-
- if (ret)
- darray_exit(out_buf);
- return ret;
-}
-
-void bch2_fs_accounting_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct bch_accounting_mem *acc = &c->accounting;
-
- percpu_down_read(&c->mark_lock);
- out->atomic++;
-
- eytzinger0_for_each(i, acc->k.nr) {
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, acc->k.data[i].pos);
-
- bch2_accounting_key_to_text(out, &acc_k);
-
- u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
- bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false);
-
- prt_str(out, ":");
- for (unsigned j = 0; j < acc->k.data[i].nr_counters; j++)
- prt_printf(out, " %llu", v[j]);
- prt_newline(out);
- }
-
- --out->atomic;
- percpu_up_read(&c->mark_lock);
-}
-
-static void bch2_accounting_free_counters(struct bch_accounting_mem *acc, bool gc)
-{
- darray_for_each(acc->k, e) {
- free_percpu(e->v[gc]);
- e->v[gc] = NULL;
- }
-}
-
-int bch2_gc_accounting_start(struct bch_fs *c)
-{
- struct bch_accounting_mem *acc = &c->accounting;
- int ret = 0;
-
- percpu_down_write(&c->mark_lock);
- darray_for_each(acc->k, e) {
- e->v[1] = __alloc_percpu_gfp(e->nr_counters * sizeof(u64),
- sizeof(u64), GFP_KERNEL);
- if (!e->v[1]) {
- bch2_accounting_free_counters(acc, true);
- ret = -BCH_ERR_ENOMEM_disk_accounting;
- break;
- }
- }
-
- acc->gc_running = !ret;
- percpu_up_write(&c->mark_lock);
-
- return ret;
-}
-
-int bch2_gc_accounting_done(struct bch_fs *c)
-{
- struct bch_accounting_mem *acc = &c->accounting;
- struct btree_trans *trans = bch2_trans_get(c);
- struct printbuf buf = PRINTBUF;
- struct bpos pos = POS_MIN;
- int ret = 0;
-
- percpu_down_write(&c->mark_lock);
- while (1) {
- unsigned idx = eytzinger0_find_ge(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, &pos);
-
- if (idx >= acc->k.nr)
- break;
-
- struct accounting_mem_entry *e = acc->k.data + idx;
- pos = bpos_successor(e->pos);
-
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, e->pos);
-
- if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR)
- continue;
-
- u64 src_v[BCH_ACCOUNTING_MAX_COUNTERS];
- u64 dst_v[BCH_ACCOUNTING_MAX_COUNTERS];
-
- unsigned nr = e->nr_counters;
- bch2_accounting_mem_read_counters(acc, idx, dst_v, nr, false);
- bch2_accounting_mem_read_counters(acc, idx, src_v, nr, true);
-
- if (memcmp(dst_v, src_v, nr * sizeof(u64))) {
- printbuf_reset(&buf);
- prt_str(&buf, "accounting mismatch for ");
- bch2_accounting_key_to_text(&buf, &acc_k);
-
- prt_str(&buf, ": got");
- for (unsigned j = 0; j < nr; j++)
- prt_printf(&buf, " %llu", dst_v[j]);
-
- prt_str(&buf, " should be");
- for (unsigned j = 0; j < nr; j++)
- prt_printf(&buf, " %llu", src_v[j]);
-
- for (unsigned j = 0; j < nr; j++)
- src_v[j] -= dst_v[j];
-
- if (fsck_err(trans, accounting_mismatch, "%s", buf.buf)) {
- percpu_up_write(&c->mark_lock);
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_disk_accounting_mod(trans, &acc_k, src_v, nr, false));
- percpu_down_write(&c->mark_lock);
- if (ret)
- goto err;
-
- if (!test_bit(BCH_FS_may_go_rw, &c->flags)) {
- memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta));
- struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
-
- accounting_key_init(&k_i.k, &acc_k, src_v, nr);
- bch2_accounting_mem_mod_locked(trans,
- bkey_i_to_s_c_accounting(&k_i.k),
- BCH_ACCOUNTING_normal);
-
- preempt_disable();
- struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
- struct bch_fs_usage_base *src = &trans->fs_usage_delta;
- acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64));
- preempt_enable();
- }
- }
- }
- }
-err:
-fsck_err:
- percpu_up_write(&c->mark_lock);
- printbuf_exit(&buf);
- bch2_trans_put(trans);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
-
- if (k.k->type != KEY_TYPE_accounting)
- return 0;
-
- percpu_down_read(&c->mark_lock);
- int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k),
- BCH_ACCOUNTING_read);
- percpu_up_read(&c->mark_lock);
- return ret;
-}
-
-static int bch2_disk_accounting_validate_late(struct btree_trans *trans,
- struct disk_accounting_pos acc,
- u64 *v, unsigned nr)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = 0, invalid_dev = -1;
-
- switch (acc.type) {
- case BCH_DISK_ACCOUNTING_replicas: {
- struct bch_replicas_padded r;
- __accounting_to_replicas(&r.e, acc);
-
- for (unsigned i = 0; i < r.e.nr_devs; i++)
- if (r.e.devs[i] != BCH_SB_MEMBER_INVALID &&
- !bch2_dev_exists(c, r.e.devs[i])) {
- invalid_dev = r.e.devs[i];
- goto invalid_device;
- }
-
- /*
- * All replicas entry checks except for invalid device are done
- * in bch2_accounting_validate
- */
- BUG_ON(bch2_replicas_entry_validate(&r.e, c, &buf));
-
- if (fsck_err_on(!bch2_replicas_marked_locked(c, &r.e),
- trans, accounting_replicas_not_marked,
- "accounting not marked in superblock replicas\n %s",
- (printbuf_reset(&buf),
- bch2_accounting_key_to_text(&buf, &acc),
- buf.buf))) {
- /*
- * We're not RW yet and still single threaded, dropping
- * and retaking lock is ok:
- */
- percpu_up_write(&c->mark_lock);
- ret = bch2_mark_replicas(c, &r.e);
- if (ret)
- goto fsck_err;
- percpu_down_write(&c->mark_lock);
- }
- break;
- }
-
- case BCH_DISK_ACCOUNTING_dev_data_type:
- if (!bch2_dev_exists(c, acc.dev_data_type.dev)) {
- invalid_dev = acc.dev_data_type.dev;
- goto invalid_device;
- }
- break;
- }
-
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-invalid_device:
- if (fsck_err(trans, accounting_to_invalid_device,
- "accounting entry points to invalid device %i\n %s",
- invalid_dev,
- (printbuf_reset(&buf),
- bch2_accounting_key_to_text(&buf, &acc),
- buf.buf))) {
- for (unsigned i = 0; i < nr; i++)
- v[i] = -v[i];
-
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_disk_accounting_mod(trans, &acc, v, nr, false)) ?:
- -BCH_ERR_remove_disk_accounting_entry;
- } else {
- ret = -BCH_ERR_remove_disk_accounting_entry;
- }
- goto fsck_err;
-}
-
-/*
- * At startup time, initialize the in memory accounting from the btree (and
- * journal)
- */
-int bch2_accounting_read(struct bch_fs *c)
-{
- struct bch_accounting_mem *acc = &c->accounting;
- struct btree_trans *trans = bch2_trans_get(c);
- struct printbuf buf = PRINTBUF;
-
- int ret = for_each_btree_key(trans, iter,
- BTREE_ID_accounting, POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({
- struct bkey u;
- struct bkey_s_c k = bch2_btree_path_peek_slot_exact(btree_iter_path(trans, &iter), &u);
- accounting_read_key(trans, k);
- }));
- if (ret)
- goto err;
-
- struct journal_keys *keys = &c->journal_keys;
- struct journal_key *dst = keys->data;
- move_gap(keys, keys->nr);
-
- darray_for_each(*keys, i) {
- if (i->k->k.type == KEY_TYPE_accounting) {
- struct bkey_s_c k = bkey_i_to_s_c(i->k);
- unsigned idx = eytzinger0_find(acc->k.data, acc->k.nr,
- sizeof(acc->k.data[0]),
- accounting_pos_cmp, &k.k->p);
-
- bool applied = idx < acc->k.nr &&
- bversion_cmp(acc->k.data[idx].bversion, k.k->bversion) >= 0;
-
- if (applied)
- continue;
-
- if (i + 1 < &darray_top(*keys) &&
- i[1].k->k.type == KEY_TYPE_accounting &&
- !journal_key_cmp(i, i + 1)) {
- WARN_ON(bversion_cmp(i[0].k->k.bversion, i[1].k->k.bversion) >= 0);
-
- i[1].journal_seq = i[0].journal_seq;
-
- bch2_accounting_accumulate(bkey_i_to_accounting(i[1].k),
- bkey_s_c_to_accounting(k));
- continue;
- }
-
- ret = accounting_read_key(trans, k);
- if (ret)
- goto err;
- }
-
- *dst++ = *i;
- }
- keys->gap = keys->nr = dst - keys->data;
-
- percpu_down_write(&c->mark_lock);
- unsigned i = 0;
- while (i < acc->k.nr) {
- unsigned idx = inorder_to_eytzinger0(i, acc->k.nr);
-
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, acc->k.data[idx].pos);
-
- u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
- bch2_accounting_mem_read_counters(acc, idx, v, ARRAY_SIZE(v), false);
-
- /*
- * If the entry counters are zeroed, it should be treated as
- * nonexistent - it might point to an invalid device.
- *
- * Remove it, so that if it's re-added it gets re-marked in the
- * superblock:
- */
- ret = bch2_is_zero(v, sizeof(v[0]) * acc->k.data[idx].nr_counters)
- ? -BCH_ERR_remove_disk_accounting_entry
- : bch2_disk_accounting_validate_late(trans, acc_k,
- v, acc->k.data[idx].nr_counters);
-
- if (ret == -BCH_ERR_remove_disk_accounting_entry) {
- free_percpu(acc->k.data[idx].v[0]);
- free_percpu(acc->k.data[idx].v[1]);
- darray_remove_item(&acc->k, &acc->k.data[idx]);
- eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, NULL);
- ret = 0;
- continue;
- }
-
- if (ret)
- goto fsck_err;
- i++;
- }
-
- preempt_disable();
- struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage);
-
- for (unsigned i = 0; i < acc->k.nr; i++) {
- struct disk_accounting_pos k;
- bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos);
-
- u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
- bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false);
-
- switch (k.type) {
- case BCH_DISK_ACCOUNTING_persistent_reserved:
- usage->reserved += v[0] * k.persistent_reserved.nr_replicas;
- break;
- case BCH_DISK_ACCOUNTING_replicas:
- fs_usage_data_type_to_base(usage, k.replicas.data_type, v[0]);
- break;
- case BCH_DISK_ACCOUNTING_dev_data_type:
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, k.dev_data_type.dev);
- if (ca) {
- struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type];
- percpu_u64_set(&d->buckets, v[0]);
- percpu_u64_set(&d->sectors, v[1]);
- percpu_u64_set(&d->fragmented, v[2]);
-
- if (k.dev_data_type.data_type == BCH_DATA_sb ||
- k.dev_data_type.data_type == BCH_DATA_journal)
- usage->hidden += v[0] * ca->mi.bucket_size;
- }
- rcu_read_unlock();
- break;
- }
- }
- preempt_enable();
-fsck_err:
- percpu_up_write(&c->mark_lock);
-err:
- printbuf_exit(&buf);
- bch2_trans_put(trans);
- bch_err_fn(c, ret);
- return ret;
-}
-
-int bch2_dev_usage_remove(struct bch_fs *c, unsigned dev)
-{
- return bch2_trans_run(c,
- bch2_btree_write_buffer_flush_sync(trans) ?:
- for_each_btree_key_commit(trans, iter, BTREE_ID_accounting, POS_MIN,
- BTREE_ITER_all_snapshots, k, NULL, NULL, 0, ({
- struct disk_accounting_pos acc;
- bpos_to_disk_accounting_pos(&acc, k.k->p);
-
- acc.type == BCH_DISK_ACCOUNTING_dev_data_type &&
- acc.dev_data_type.dev == dev
- ? bch2_btree_bit_mod_buffered(trans, BTREE_ID_accounting, k.k->p, 0)
- : 0;
- })) ?:
- bch2_btree_write_buffer_flush_sync(trans));
-}
-
-int bch2_dev_usage_init(struct bch_dev *ca, bool gc)
-{
- struct bch_fs *c = ca->fs;
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_dev_data_type,
- .dev_data_type.dev = ca->dev_idx,
- .dev_data_type.data_type = BCH_DATA_free,
- };
- u64 v[3] = { ca->mi.nbuckets - ca->mi.first_bucket, 0, 0 };
-
- int ret = bch2_trans_do(c, ({
- bch2_disk_accounting_mod(trans, &acc, v, ARRAY_SIZE(v), gc) ?:
- (!gc ? bch2_trans_commit(trans, NULL, NULL, 0) : 0);
- }));
- bch_err_fn(c, ret);
- return ret;
-}
-
-void bch2_verify_accounting_clean(struct bch_fs *c)
-{
- bool mismatch = false;
- struct bch_fs_usage_base base = {}, base_inmem = {};
-
- bch2_trans_run(c,
- for_each_btree_key(trans, iter,
- BTREE_ID_accounting, POS_MIN,
- BTREE_ITER_all_snapshots, k, ({
- u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
- struct bkey_s_c_accounting a = bkey_s_c_to_accounting(k);
- unsigned nr = bch2_accounting_counters(k.k);
-
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, k.k->p);
-
- if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR)
- continue;
-
- if (acc_k.type == BCH_DISK_ACCOUNTING_inum)
- continue;
-
- bch2_accounting_mem_read(c, k.k->p, v, nr);
-
- if (memcmp(a.v->d, v, nr * sizeof(u64))) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- prt_str(&buf, " !=");
- for (unsigned j = 0; j < nr; j++)
- prt_printf(&buf, " %llu", v[j]);
-
- pr_err("%s", buf.buf);
- printbuf_exit(&buf);
- mismatch = true;
- }
-
- switch (acc_k.type) {
- case BCH_DISK_ACCOUNTING_persistent_reserved:
- base.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
- break;
- case BCH_DISK_ACCOUNTING_replicas:
- fs_usage_data_type_to_base(&base, acc_k.replicas.data_type, a.v->d[0]);
- break;
- case BCH_DISK_ACCOUNTING_dev_data_type: {
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev);
- if (!ca) {
- rcu_read_unlock();
- continue;
- }
-
- v[0] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].buckets);
- v[1] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].sectors);
- v[2] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].fragmented);
- rcu_read_unlock();
-
- if (memcmp(a.v->d, v, 3 * sizeof(u64))) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- prt_str(&buf, " in mem");
- for (unsigned j = 0; j < nr; j++)
- prt_printf(&buf, " %llu", v[j]);
-
- pr_err("dev accounting mismatch: %s", buf.buf);
- printbuf_exit(&buf);
- mismatch = true;
- }
- }
- }
-
- 0;
- })));
-
- acc_u64s_percpu(&base_inmem.hidden, &c->usage->hidden, sizeof(base_inmem) / sizeof(u64));
-
-#define check(x) \
- if (base.x != base_inmem.x) { \
- pr_err("fs_usage_base.%s mismatch: %llu != %llu", #x, base.x, base_inmem.x); \
- mismatch = true; \
- }
-
- //check(hidden);
- check(btree);
- check(data);
- check(cached);
- check(reserved);
- check(nr_inodes);
-
- WARN_ON(mismatch);
-}
-
-void bch2_accounting_gc_free(struct bch_fs *c)
-{
- lockdep_assert_held(&c->mark_lock);
-
- struct bch_accounting_mem *acc = &c->accounting;
-
- bch2_accounting_free_counters(acc, true);
- acc->gc_running = false;
-}
-
-void bch2_fs_accounting_exit(struct bch_fs *c)
-{
- struct bch_accounting_mem *acc = &c->accounting;
-
- bch2_accounting_free_counters(acc, false);
- darray_exit(&acc->k);
-}
diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h
deleted file mode 100644
index 4ea6c8a092bc..000000000000
--- a/fs/bcachefs/disk_accounting.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DISK_ACCOUNTING_H
-#define _BCACHEFS_DISK_ACCOUNTING_H
-
-#include "eytzinger.h"
-#include "sb-members.h"
-
-static inline void bch2_u64s_neg(u64 *v, unsigned nr)
-{
- for (unsigned i = 0; i < nr; i++)
- v[i] = -v[i];
-}
-
-static inline unsigned bch2_accounting_counters(const struct bkey *k)
-{
- return bkey_val_u64s(k) - offsetof(struct bch_accounting, d) / sizeof(u64);
-}
-
-static inline void bch2_accounting_neg(struct bkey_s_accounting a)
-{
- bch2_u64s_neg(a.v->d, bch2_accounting_counters(a.k));
-}
-
-static inline bool bch2_accounting_key_is_zero(struct bkey_s_c_accounting a)
-{
- for (unsigned i = 0; i < bch2_accounting_counters(a.k); i++)
- if (a.v->d[i])
- return false;
- return true;
-}
-
-static inline void bch2_accounting_accumulate(struct bkey_i_accounting *dst,
- struct bkey_s_c_accounting src)
-{
- EBUG_ON(dst->k.u64s != src.k->u64s);
-
- for (unsigned i = 0; i < bch2_accounting_counters(&dst->k); i++)
- dst->v.d[i] += src.v->d[i];
- if (bversion_cmp(dst->k.bversion, src.k->bversion) < 0)
- dst->k.bversion = src.k->bversion;
-}
-
-static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
- enum bch_data_type data_type,
- s64 sectors)
-{
- switch (data_type) {
- case BCH_DATA_btree:
- fs_usage->btree += sectors;
- break;
- case BCH_DATA_user:
- case BCH_DATA_parity:
- fs_usage->data += sectors;
- break;
- case BCH_DATA_cached:
- fs_usage->cached += sectors;
- break;
- default:
- break;
- }
-}
-
-static inline void bpos_to_disk_accounting_pos(struct disk_accounting_pos *acc, struct bpos p)
-{
- acc->_pad = p;
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- bch2_bpos_swab(&acc->_pad);
-#endif
-}
-
-static inline struct bpos disk_accounting_pos_to_bpos(struct disk_accounting_pos *k)
-{
- struct bpos ret = k->_pad;
-
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- bch2_bpos_swab(&ret);
-#endif
- return ret;
-}
-
-int bch2_disk_accounting_mod(struct btree_trans *, struct disk_accounting_pos *,
- s64 *, unsigned, bool);
-int bch2_mod_dev_cached_sectors(struct btree_trans *, unsigned, s64, bool);
-
-int bch2_accounting_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_accounting_key_to_text(struct printbuf *, struct disk_accounting_pos *);
-void bch2_accounting_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-void bch2_accounting_swab(struct bkey_s);
-
-#define bch2_bkey_ops_accounting ((struct bkey_ops) { \
- .key_validate = bch2_accounting_validate, \
- .val_to_text = bch2_accounting_to_text, \
- .swab = bch2_accounting_swab, \
- .min_val_size = 8, \
-})
-
-int bch2_accounting_update_sb(struct btree_trans *);
-
-static inline int accounting_pos_cmp(const void *_l, const void *_r)
-{
- const struct bpos *l = _l, *r = _r;
-
- return bpos_cmp(*l, *r);
-}
-
-enum bch_accounting_mode {
- BCH_ACCOUNTING_normal,
- BCH_ACCOUNTING_gc,
- BCH_ACCOUNTING_read,
-};
-
-int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode);
-void bch2_accounting_mem_gc(struct bch_fs *);
-
-/*
- * Update in memory counters so they match the btree update we're doing; called
- * from transaction commit path
- */
-static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans,
- struct bkey_s_c_accounting a,
- enum bch_accounting_mode mode)
-{
- struct bch_fs *c = trans->c;
- struct bch_accounting_mem *acc = &c->accounting;
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, a.k->p);
- bool gc = mode == BCH_ACCOUNTING_gc;
-
- EBUG_ON(gc && !acc->gc_running);
-
- if (acc_k.type == BCH_DISK_ACCOUNTING_inum)
- return 0;
-
- if (mode == BCH_ACCOUNTING_normal) {
- switch (acc_k.type) {
- case BCH_DISK_ACCOUNTING_persistent_reserved:
- trans->fs_usage_delta.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
- break;
- case BCH_DISK_ACCOUNTING_replicas:
- fs_usage_data_type_to_base(&trans->fs_usage_delta, acc_k.replicas.data_type, a.v->d[0]);
- break;
- case BCH_DISK_ACCOUNTING_dev_data_type:
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev);
- if (ca) {
- this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].buckets, a.v->d[0]);
- this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].sectors, a.v->d[1]);
- this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].fragmented, a.v->d[2]);
- }
- rcu_read_unlock();
- break;
- }
- }
-
- unsigned idx;
-
- while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
- int ret = bch2_accounting_mem_insert(c, a, mode);
- if (ret)
- return ret;
- }
-
- struct accounting_mem_entry *e = &acc->k.data[idx];
-
- EBUG_ON(bch2_accounting_counters(a.k) != e->nr_counters);
-
- for (unsigned i = 0; i < bch2_accounting_counters(a.k); i++)
- this_cpu_add(e->v[gc][i], a.v->d[i]);
- return 0;
-}
-
-static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
-{
- percpu_down_read(&trans->c->mark_lock);
- int ret = bch2_accounting_mem_mod_locked(trans, a, gc ? BCH_ACCOUNTING_gc : BCH_ACCOUNTING_normal);
- percpu_up_read(&trans->c->mark_lock);
- return ret;
-}
-
-static inline void bch2_accounting_mem_read_counters(struct bch_accounting_mem *acc,
- unsigned idx, u64 *v, unsigned nr, bool gc)
-{
- memset(v, 0, sizeof(*v) * nr);
-
- if (unlikely(idx >= acc->k.nr))
- return;
-
- struct accounting_mem_entry *e = &acc->k.data[idx];
-
- nr = min_t(unsigned, nr, e->nr_counters);
-
- for (unsigned i = 0; i < nr; i++)
- v[i] = percpu_u64_get(e->v[gc] + i);
-}
-
-static inline void bch2_accounting_mem_read(struct bch_fs *c, struct bpos p,
- u64 *v, unsigned nr)
-{
- struct bch_accounting_mem *acc = &c->accounting;
- unsigned idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, &p);
-
- bch2_accounting_mem_read_counters(acc, idx, v, nr, false);
-}
-
-int bch2_fs_replicas_usage_read(struct bch_fs *, darray_char *);
-int bch2_fs_accounting_read(struct bch_fs *, darray_char *, unsigned);
-void bch2_fs_accounting_to_text(struct printbuf *, struct bch_fs *);
-
-int bch2_gc_accounting_start(struct bch_fs *);
-int bch2_gc_accounting_done(struct bch_fs *);
-
-int bch2_accounting_read(struct bch_fs *);
-
-int bch2_dev_usage_remove(struct bch_fs *, unsigned);
-int bch2_dev_usage_init(struct bch_dev *, bool);
-
-void bch2_verify_accounting_clean(struct bch_fs *c);
-
-void bch2_accounting_gc_free(struct bch_fs *);
-void bch2_fs_accounting_exit(struct bch_fs *);
-
-#endif /* _BCACHEFS_DISK_ACCOUNTING_H */
diff --git a/fs/bcachefs/disk_accounting_format.h b/fs/bcachefs/disk_accounting_format.h
deleted file mode 100644
index 7b6e6c97e6aa..000000000000
--- a/fs/bcachefs/disk_accounting_format.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DISK_ACCOUNTING_FORMAT_H
-#define _BCACHEFS_DISK_ACCOUNTING_FORMAT_H
-
-#include "replicas_format.h"
-
-/*
- * Disk accounting - KEY_TYPE_accounting - on disk format:
- *
- * Here, the key has considerably more structure than a typical key (bpos); an
- * accounting key is 'struct disk_accounting_pos', which is a union of bpos.
- *
- * More specifically: a key is just a muliword integer (where word endianness
- * matches native byte order), so we're treating bpos as an opaque 20 byte
- * integer and mapping bch_accounting_key to that.
- *
- * This is a type-tagged union of all our various subtypes; a disk accounting
- * key can be device counters, replicas counters, et cetera - it's extensible.
- *
- * The value is a list of u64s or s64s; the number of counters is specific to a
- * given accounting type.
- *
- * Unlike with other key types, updates are _deltas_, and the deltas are not
- * resolved until the update to the underlying btree, done by btree write buffer
- * flush or journal replay.
- *
- * Journal replay in particular requires special handling. The journal tracks a
- * range of entries which may possibly have not yet been applied to the btree
- * yet - it does not know definitively whether individual entries are dirty and
- * still need to be applied.
- *
- * To handle this, we use the version field of struct bkey, and give every
- * accounting update a unique version number - a total ordering in time; the
- * version number is derived from the key's position in the journal. Then
- * journal replay can compare the version number of the key from the journal
- * with the version number of the key in the btree to determine if a key needs
- * to be replayed.
- *
- * For this to work, we must maintain this strict time ordering of updates as
- * they are flushed to the btree, both via write buffer flush and via journal
- * replay. This has complications for the write buffer code while journal replay
- * is still in progress; the write buffer cannot flush any accounting keys to
- * the btree until journal replay has finished replaying its accounting keys, or
- * the (newer) version number of the keys from the write buffer will cause
- * updates from journal replay to be lost.
- */
-
-struct bch_accounting {
- struct bch_val v;
- __u64 d[];
-};
-
-#define BCH_ACCOUNTING_MAX_COUNTERS 3
-
-#define BCH_DATA_TYPES() \
- x(free, 0) \
- x(sb, 1) \
- x(journal, 2) \
- x(btree, 3) \
- x(user, 4) \
- x(cached, 5) \
- x(parity, 6) \
- x(stripe, 7) \
- x(need_gc_gens, 8) \
- x(need_discard, 9) \
- x(unstriped, 10)
-
-enum bch_data_type {
-#define x(t, n) BCH_DATA_##t,
- BCH_DATA_TYPES()
-#undef x
- BCH_DATA_NR
-};
-
-static inline bool data_type_is_empty(enum bch_data_type type)
-{
- switch (type) {
- case BCH_DATA_free:
- case BCH_DATA_need_gc_gens:
- case BCH_DATA_need_discard:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool data_type_is_hidden(enum bch_data_type type)
-{
- switch (type) {
- case BCH_DATA_sb:
- case BCH_DATA_journal:
- return true;
- default:
- return false;
- }
-}
-
-#define BCH_DISK_ACCOUNTING_TYPES() \
- x(nr_inodes, 0) \
- x(persistent_reserved, 1) \
- x(replicas, 2) \
- x(dev_data_type, 3) \
- x(compression, 4) \
- x(snapshot, 5) \
- x(btree, 6) \
- x(rebalance_work, 7) \
- x(inum, 8)
-
-enum disk_accounting_type {
-#define x(f, nr) BCH_DISK_ACCOUNTING_##f = nr,
- BCH_DISK_ACCOUNTING_TYPES()
-#undef x
- BCH_DISK_ACCOUNTING_TYPE_NR,
-};
-
-struct bch_nr_inodes {
-};
-
-struct bch_persistent_reserved {
- __u8 nr_replicas;
-};
-
-struct bch_dev_data_type {
- __u8 dev;
- __u8 data_type;
-};
-
-struct bch_acct_compression {
- __u8 type;
-};
-
-struct bch_acct_snapshot {
- __u32 id;
-} __packed;
-
-struct bch_acct_btree {
- __u32 id;
-} __packed;
-
-struct bch_acct_inum {
- __u64 inum;
-} __packed;
-
-struct bch_acct_rebalance_work {
-};
-
-struct disk_accounting_pos {
- union {
- struct {
- __u8 type;
- union {
- struct bch_nr_inodes nr_inodes;
- struct bch_persistent_reserved persistent_reserved;
- struct bch_replicas_entry_v1 replicas;
- struct bch_dev_data_type dev_data_type;
- struct bch_acct_compression compression;
- struct bch_acct_snapshot snapshot;
- struct bch_acct_btree btree;
- struct bch_acct_rebalance_work rebalance_work;
- struct bch_acct_inum inum;
- } __packed;
- } __packed;
- struct bpos _pad;
- };
-};
-
-#endif /* _BCACHEFS_DISK_ACCOUNTING_FORMAT_H */
diff --git a/fs/bcachefs/disk_accounting_types.h b/fs/bcachefs/disk_accounting_types.h
deleted file mode 100644
index b1982131b206..000000000000
--- a/fs/bcachefs/disk_accounting_types.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DISK_ACCOUNTING_TYPES_H
-#define _BCACHEFS_DISK_ACCOUNTING_TYPES_H
-
-#include "darray.h"
-
-struct accounting_mem_entry {
- struct bpos pos;
- struct bversion bversion;
- unsigned nr_counters;
- u64 __percpu *v[2];
-};
-
-struct bch_accounting_mem {
- DARRAY(struct accounting_mem_entry) k;
- bool gc_running;
-};
-
-#endif /* _BCACHEFS_DISK_ACCOUNTING_TYPES_H */
diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
deleted file mode 100644
index 5df8de0b8c02..000000000000
--- a/fs/bcachefs/disk_groups.c
+++ /dev/null
@@ -1,616 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "disk_groups.h"
-#include "sb-members.h"
-#include "super-io.h"
-
-#include <linux/sort.h>
-
-static int group_cmp(const void *_l, const void *_r)
-{
- const struct bch_disk_group *l = _l;
- const struct bch_disk_group *r = _r;
-
- return ((BCH_GROUP_DELETED(l) > BCH_GROUP_DELETED(r)) -
- (BCH_GROUP_DELETED(l) < BCH_GROUP_DELETED(r))) ?:
- ((BCH_GROUP_PARENT(l) > BCH_GROUP_PARENT(r)) -
- (BCH_GROUP_PARENT(l) < BCH_GROUP_PARENT(r))) ?:
- strncmp(l->label, r->label, sizeof(l->label));
-}
-
-static int bch2_sb_disk_groups_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_disk_groups *groups =
- field_to_type(f, disk_groups);
- struct bch_disk_group *g, *sorted = NULL;
- unsigned nr_groups = disk_groups_nr(groups);
- unsigned i, len;
- int ret = 0;
-
- for (i = 0; i < sb->nr_devices; i++) {
- struct bch_member m = bch2_sb_member_get(sb, i);
- unsigned group_id;
-
- if (!BCH_MEMBER_GROUP(&m))
- continue;
-
- group_id = BCH_MEMBER_GROUP(&m) - 1;
-
- if (group_id >= nr_groups) {
- prt_printf(err, "disk %u has invalid label %u (have %u)",
- i, group_id, nr_groups);
- return -BCH_ERR_invalid_sb_disk_groups;
- }
-
- if (BCH_GROUP_DELETED(&groups->entries[group_id])) {
- prt_printf(err, "disk %u has deleted label %u", i, group_id);
- return -BCH_ERR_invalid_sb_disk_groups;
- }
- }
-
- if (!nr_groups)
- return 0;
-
- for (i = 0; i < nr_groups; i++) {
- g = groups->entries + i;
-
- if (BCH_GROUP_DELETED(g))
- continue;
-
- len = strnlen(g->label, sizeof(g->label));
- if (!len) {
- prt_printf(err, "label %u empty", i);
- return -BCH_ERR_invalid_sb_disk_groups;
- }
- }
-
- sorted = kmalloc_array(nr_groups, sizeof(*sorted), GFP_KERNEL);
- if (!sorted)
- return -BCH_ERR_ENOMEM_disk_groups_validate;
-
- memcpy(sorted, groups->entries, nr_groups * sizeof(*sorted));
- sort(sorted, nr_groups, sizeof(*sorted), group_cmp, NULL);
-
- for (g = sorted; g + 1 < sorted + nr_groups; g++)
- if (!BCH_GROUP_DELETED(g) &&
- !group_cmp(&g[0], &g[1])) {
- prt_printf(err, "duplicate label %llu.%.*s",
- BCH_GROUP_PARENT(g),
- (int) sizeof(g->label), g->label);
- ret = -BCH_ERR_invalid_sb_disk_groups;
- goto err;
- }
-err:
- kfree(sorted);
- return ret;
-}
-
-void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
-{
- out->atomic++;
- rcu_read_lock();
-
- struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
- if (!g)
- goto out;
-
- for (unsigned i = 0; i < g->nr; i++) {
- if (i)
- prt_printf(out, " ");
-
- if (g->entries[i].deleted) {
- prt_printf(out, "[deleted]");
- continue;
- }
-
- prt_printf(out, "[parent %d devs", g->entries[i].parent);
- for_each_member_device_rcu(c, ca, &g->entries[i].devs)
- prt_printf(out, " %s", ca->name);
- prt_printf(out, "]");
- }
-
-out:
- rcu_read_unlock();
- out->atomic--;
-}
-
-static void bch2_sb_disk_groups_to_text(struct printbuf *out,
- struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_disk_groups *groups =
- field_to_type(f, disk_groups);
- struct bch_disk_group *g;
- unsigned nr_groups = disk_groups_nr(groups);
-
- for (g = groups->entries;
- g < groups->entries + nr_groups;
- g++) {
- if (g != groups->entries)
- prt_printf(out, " ");
-
- if (BCH_GROUP_DELETED(g))
- prt_printf(out, "[deleted]");
- else
- prt_printf(out, "[parent %llu name %s]",
- BCH_GROUP_PARENT(g), g->label);
- }
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_disk_groups = {
- .validate = bch2_sb_disk_groups_validate,
- .to_text = bch2_sb_disk_groups_to_text
-};
-
-int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
-{
- struct bch_sb_field_disk_groups *groups;
- struct bch_disk_groups_cpu *cpu_g, *old_g;
- unsigned i, g, nr_groups;
-
- lockdep_assert_held(&c->sb_lock);
-
- groups = bch2_sb_field_get(c->disk_sb.sb, disk_groups);
- nr_groups = disk_groups_nr(groups);
-
- if (!groups)
- return 0;
-
- cpu_g = kzalloc(struct_size(cpu_g, entries, nr_groups), GFP_KERNEL);
- if (!cpu_g)
- return -BCH_ERR_ENOMEM_disk_groups_to_cpu;
-
- cpu_g->nr = nr_groups;
-
- for (i = 0; i < nr_groups; i++) {
- struct bch_disk_group *src = &groups->entries[i];
- struct bch_disk_group_cpu *dst = &cpu_g->entries[i];
-
- dst->deleted = BCH_GROUP_DELETED(src);
- dst->parent = BCH_GROUP_PARENT(src);
- memcpy(dst->label, src->label, sizeof(dst->label));
- }
-
- for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
- struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, i);
- struct bch_disk_group_cpu *dst;
-
- if (!bch2_member_alive(&m))
- continue;
-
- g = BCH_MEMBER_GROUP(&m);
- while (g) {
- dst = &cpu_g->entries[g - 1];
- __set_bit(i, dst->devs.d);
- g = dst->parent;
- }
- }
-
- old_g = rcu_dereference_protected(c->disk_groups,
- lockdep_is_held(&c->sb_lock));
- rcu_assign_pointer(c->disk_groups, cpu_g);
- if (old_g)
- kfree_rcu(old_g, rcu);
-
- return 0;
-}
-
-const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *c, unsigned target)
-{
- struct target t = target_decode(target);
- struct bch_devs_mask *devs;
-
- rcu_read_lock();
-
- switch (t.type) {
- case TARGET_NULL:
- devs = NULL;
- break;
- case TARGET_DEV: {
- struct bch_dev *ca = t.dev < c->sb.nr_devices
- ? rcu_dereference(c->devs[t.dev])
- : NULL;
- devs = ca ? &ca->self : NULL;
- break;
- }
- case TARGET_GROUP: {
- struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
-
- devs = g && t.group < g->nr && !g->entries[t.group].deleted
- ? &g->entries[t.group].devs
- : NULL;
- break;
- }
- default:
- BUG();
- }
-
- rcu_read_unlock();
-
- return devs;
-}
-
-bool bch2_dev_in_target(struct bch_fs *c, unsigned dev, unsigned target)
-{
- struct target t = target_decode(target);
-
- switch (t.type) {
- case TARGET_NULL:
- return false;
- case TARGET_DEV:
- return dev == t.dev;
- case TARGET_GROUP: {
- struct bch_disk_groups_cpu *g;
- const struct bch_devs_mask *m;
- bool ret;
-
- rcu_read_lock();
- g = rcu_dereference(c->disk_groups);
- m = g && t.group < g->nr && !g->entries[t.group].deleted
- ? &g->entries[t.group].devs
- : NULL;
-
- ret = m ? test_bit(dev, m->d) : false;
- rcu_read_unlock();
-
- return ret;
- }
- default:
- BUG();
- }
-}
-
-static int __bch2_disk_group_find(struct bch_sb_field_disk_groups *groups,
- unsigned parent,
- const char *name, unsigned namelen)
-{
- unsigned i, nr_groups = disk_groups_nr(groups);
-
- if (!namelen || namelen > BCH_SB_LABEL_SIZE)
- return -EINVAL;
-
- for (i = 0; i < nr_groups; i++) {
- struct bch_disk_group *g = groups->entries + i;
-
- if (BCH_GROUP_DELETED(g))
- continue;
-
- if (!BCH_GROUP_DELETED(g) &&
- BCH_GROUP_PARENT(g) == parent &&
- strnlen(g->label, sizeof(g->label)) == namelen &&
- !memcmp(name, g->label, namelen))
- return i;
- }
-
- return -1;
-}
-
-static int __bch2_disk_group_add(struct bch_sb_handle *sb, unsigned parent,
- const char *name, unsigned namelen)
-{
- struct bch_sb_field_disk_groups *groups =
- bch2_sb_field_get(sb->sb, disk_groups);
- unsigned i, nr_groups = disk_groups_nr(groups);
- struct bch_disk_group *g;
-
- if (!namelen || namelen > BCH_SB_LABEL_SIZE)
- return -EINVAL;
-
- for (i = 0;
- i < nr_groups && !BCH_GROUP_DELETED(&groups->entries[i]);
- i++)
- ;
-
- if (i == nr_groups) {
- unsigned u64s =
- (sizeof(struct bch_sb_field_disk_groups) +
- sizeof(struct bch_disk_group) * (nr_groups + 1)) /
- sizeof(u64);
-
- groups = bch2_sb_field_resize(sb, disk_groups, u64s);
- if (!groups)
- return -BCH_ERR_ENOSPC_disk_label_add;
-
- nr_groups = disk_groups_nr(groups);
- }
-
- BUG_ON(i >= nr_groups);
-
- g = &groups->entries[i];
-
- memcpy(g->label, name, namelen);
- if (namelen < sizeof(g->label))
- g->label[namelen] = '\0';
- SET_BCH_GROUP_DELETED(g, 0);
- SET_BCH_GROUP_PARENT(g, parent);
- SET_BCH_GROUP_DATA_ALLOWED(g, ~0);
-
- return i;
-}
-
-int bch2_disk_path_find(struct bch_sb_handle *sb, const char *name)
-{
- struct bch_sb_field_disk_groups *groups =
- bch2_sb_field_get(sb->sb, disk_groups);
- int v = -1;
-
- do {
- const char *next = strchrnul(name, '.');
- unsigned len = next - name;
-
- if (*next == '.')
- next++;
-
- v = __bch2_disk_group_find(groups, v + 1, name, len);
- name = next;
- } while (*name && v >= 0);
-
- return v;
-}
-
-int bch2_disk_path_find_or_create(struct bch_sb_handle *sb, const char *name)
-{
- struct bch_sb_field_disk_groups *groups;
- unsigned parent = 0;
- int v = -1;
-
- do {
- const char *next = strchrnul(name, '.');
- unsigned len = next - name;
-
- if (*next == '.')
- next++;
-
- groups = bch2_sb_field_get(sb->sb, disk_groups);
-
- v = __bch2_disk_group_find(groups, parent, name, len);
- if (v < 0)
- v = __bch2_disk_group_add(sb, parent, name, len);
- if (v < 0)
- return v;
-
- parent = v + 1;
- name = next;
- } while (*name && v >= 0);
-
- return v;
-}
-
-void bch2_disk_path_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
-{
- struct bch_disk_groups_cpu *groups;
- struct bch_disk_group_cpu *g;
- unsigned nr = 0;
- u16 path[32];
-
- out->atomic++;
- rcu_read_lock();
- groups = rcu_dereference(c->disk_groups);
- if (!groups)
- goto invalid;
-
- while (1) {
- if (nr == ARRAY_SIZE(path))
- goto invalid;
-
- if (v >= groups->nr)
- goto invalid;
-
- g = groups->entries + v;
-
- if (g->deleted)
- goto invalid;
-
- path[nr++] = v;
-
- if (!g->parent)
- break;
-
- v = g->parent - 1;
- }
-
- while (nr) {
- v = path[--nr];
- g = groups->entries + v;
-
- prt_printf(out, "%.*s", (int) sizeof(g->label), g->label);
- if (nr)
- prt_printf(out, ".");
- }
-out:
- rcu_read_unlock();
- out->atomic--;
- return;
-invalid:
- prt_printf(out, "invalid label %u", v);
- goto out;
-}
-
-void bch2_disk_path_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v)
-{
- struct bch_sb_field_disk_groups *groups =
- bch2_sb_field_get(sb, disk_groups);
- struct bch_disk_group *g;
- unsigned nr = 0;
- u16 path[32];
-
- while (1) {
- if (nr == ARRAY_SIZE(path))
- goto inval;
-
- if (v >= disk_groups_nr(groups))
- goto inval;
-
- g = groups->entries + v;
-
- if (BCH_GROUP_DELETED(g))
- goto inval;
-
- path[nr++] = v;
-
- if (!BCH_GROUP_PARENT(g))
- break;
-
- v = BCH_GROUP_PARENT(g) - 1;
- }
-
- while (nr) {
- v = path[--nr];
- g = groups->entries + v;
-
- prt_printf(out, "%.*s", (int) sizeof(g->label), g->label);
- if (nr)
- prt_printf(out, ".");
- }
- return;
-inval:
- prt_printf(out, "invalid label %u", v);
-}
-
-int __bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name)
-{
- struct bch_member *mi;
- int ret, v = -1;
-
- if (!strlen(name) || !strcmp(name, "none"))
- return 0;
-
- v = bch2_disk_path_find_or_create(&c->disk_sb, name);
- if (v < 0)
- return v;
-
- ret = bch2_sb_disk_groups_to_cpu(c);
- if (ret)
- return ret;
-
- mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
- SET_BCH_MEMBER_GROUP(mi, v + 1);
- return 0;
-}
-
-int bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name)
-{
- int ret;
-
- mutex_lock(&c->sb_lock);
- ret = __bch2_dev_group_set(c, ca, name) ?:
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- return ret;
-}
-
-int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res,
- struct printbuf *err)
-{
- struct bch_dev *ca;
- int g;
-
- if (!val)
- return -EINVAL;
-
- if (!c)
- return -BCH_ERR_option_needs_open_fs;
-
- if (!strlen(val) || !strcmp(val, "none")) {
- *res = 0;
- return 0;
- }
-
- /* Is it a device? */
- ca = bch2_dev_lookup(c, val);
- if (!IS_ERR(ca)) {
- *res = dev_to_target(ca->dev_idx);
- bch2_dev_put(ca);
- return 0;
- }
-
- mutex_lock(&c->sb_lock);
- g = bch2_disk_path_find(&c->disk_sb, val);
- mutex_unlock(&c->sb_lock);
-
- if (g >= 0) {
- *res = group_to_target(g);
- return 0;
- }
-
- return -EINVAL;
-}
-
-void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
-{
- struct target t = target_decode(v);
-
- switch (t.type) {
- case TARGET_NULL:
- prt_printf(out, "none");
- break;
- case TARGET_DEV: {
- struct bch_dev *ca;
-
- out->atomic++;
- rcu_read_lock();
- ca = t.dev < c->sb.nr_devices
- ? rcu_dereference(c->devs[t.dev])
- : NULL;
-
- if (ca && percpu_ref_tryget(&ca->io_ref)) {
- prt_printf(out, "/dev/%s", ca->name);
- percpu_ref_put(&ca->io_ref);
- } else if (ca) {
- prt_printf(out, "offline device %u", t.dev);
- } else {
- prt_printf(out, "invalid device %u", t.dev);
- }
-
- rcu_read_unlock();
- out->atomic--;
- break;
- }
- case TARGET_GROUP:
- bch2_disk_path_to_text(out, c, t.group);
- break;
- default:
- BUG();
- }
-}
-
-static void bch2_target_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v)
-{
- struct target t = target_decode(v);
-
- switch (t.type) {
- case TARGET_NULL:
- prt_printf(out, "none");
- break;
- case TARGET_DEV: {
- struct bch_member m = bch2_sb_member_get(sb, t.dev);
-
- if (bch2_member_exists(sb, t.dev)) {
- prt_printf(out, "Device ");
- pr_uuid(out, m.uuid.b);
- prt_printf(out, " (%u)", t.dev);
- } else {
- prt_printf(out, "Bad device %u", t.dev);
- }
- break;
- }
- case TARGET_GROUP:
- bch2_disk_path_to_text_sb(out, sb, t.group);
- break;
- default:
- BUG();
- }
-}
-
-void bch2_opt_target_to_text(struct printbuf *out,
- struct bch_fs *c,
- struct bch_sb *sb,
- u64 v)
-{
- if (c)
- bch2_target_to_text(out, c, v);
- else
- bch2_target_to_text_sb(out, sb, v);
-}
diff --git a/fs/bcachefs/disk_groups.h b/fs/bcachefs/disk_groups.h
deleted file mode 100644
index 441826fff224..000000000000
--- a/fs/bcachefs/disk_groups.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DISK_GROUPS_H
-#define _BCACHEFS_DISK_GROUPS_H
-
-#include "disk_groups_types.h"
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_disk_groups;
-
-static inline unsigned disk_groups_nr(struct bch_sb_field_disk_groups *groups)
-{
- return groups
- ? (vstruct_end(&groups->field) -
- (void *) &groups->entries[0]) / sizeof(struct bch_disk_group)
- : 0;
-}
-
-struct target {
- enum {
- TARGET_NULL,
- TARGET_DEV,
- TARGET_GROUP,
- } type;
- union {
- unsigned dev;
- unsigned group;
- };
-};
-
-#define TARGET_DEV_START 1
-#define TARGET_GROUP_START (256 + TARGET_DEV_START)
-
-static inline u16 dev_to_target(unsigned dev)
-{
- return TARGET_DEV_START + dev;
-}
-
-static inline u16 group_to_target(unsigned group)
-{
- return TARGET_GROUP_START + group;
-}
-
-static inline struct target target_decode(unsigned target)
-{
- if (target >= TARGET_GROUP_START)
- return (struct target) {
- .type = TARGET_GROUP,
- .group = target - TARGET_GROUP_START
- };
-
- if (target >= TARGET_DEV_START)
- return (struct target) {
- .type = TARGET_DEV,
- .group = target - TARGET_DEV_START
- };
-
- return (struct target) { .type = TARGET_NULL };
-}
-
-const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *, unsigned);
-
-static inline struct bch_devs_mask target_rw_devs(struct bch_fs *c,
- enum bch_data_type data_type,
- u16 target)
-{
- struct bch_devs_mask devs = c->rw_devs[data_type];
- const struct bch_devs_mask *t = bch2_target_to_mask(c, target);
-
- if (t)
- bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX);
- return devs;
-}
-
-static inline bool bch2_target_accepts_data(struct bch_fs *c,
- enum bch_data_type data_type,
- u16 target)
-{
- struct bch_devs_mask rw_devs = target_rw_devs(c, data_type, target);
- return !bitmap_empty(rw_devs.d, BCH_SB_MEMBERS_MAX);
-}
-
-bool bch2_dev_in_target(struct bch_fs *, unsigned, unsigned);
-
-int bch2_disk_path_find(struct bch_sb_handle *, const char *);
-
-/* Exported for userspace bcachefs-tools: */
-int bch2_disk_path_find_or_create(struct bch_sb_handle *, const char *);
-
-void bch2_disk_path_to_text(struct printbuf *, struct bch_fs *, unsigned);
-void bch2_disk_path_to_text_sb(struct printbuf *, struct bch_sb *, unsigned);
-
-void bch2_target_to_text(struct printbuf *out, struct bch_fs *, unsigned);
-
-int bch2_opt_target_parse(struct bch_fs *, const char *, u64 *, struct printbuf *);
-void bch2_opt_target_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
-
-#define bch2_opt_target (struct bch_opt_fn) { \
- .parse = bch2_opt_target_parse, \
- .to_text = bch2_opt_target_to_text, \
-}
-
-int bch2_sb_disk_groups_to_cpu(struct bch_fs *);
-
-int __bch2_dev_group_set(struct bch_fs *, struct bch_dev *, const char *);
-int bch2_dev_group_set(struct bch_fs *, struct bch_dev *, const char *);
-
-const char *bch2_sb_validate_disk_groups(struct bch_sb *,
- struct bch_sb_field *);
-
-void bch2_disk_groups_to_text(struct printbuf *, struct bch_fs *);
-
-#endif /* _BCACHEFS_DISK_GROUPS_H */
diff --git a/fs/bcachefs/disk_groups_format.h b/fs/bcachefs/disk_groups_format.h
deleted file mode 100644
index 698990bbf1d2..000000000000
--- a/fs/bcachefs/disk_groups_format.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DISK_GROUPS_FORMAT_H
-#define _BCACHEFS_DISK_GROUPS_FORMAT_H
-
-#define BCH_SB_LABEL_SIZE 32
-
-struct bch_disk_group {
- __u8 label[BCH_SB_LABEL_SIZE];
- __le64 flags[2];
-} __packed __aligned(8);
-
-LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
-LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
-LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
-
-struct bch_sb_field_disk_groups {
- struct bch_sb_field field;
- struct bch_disk_group entries[];
-} __packed __aligned(8);
-
-#endif /* _BCACHEFS_DISK_GROUPS_FORMAT_H */
diff --git a/fs/bcachefs/disk_groups_types.h b/fs/bcachefs/disk_groups_types.h
deleted file mode 100644
index a54ef085b13d..000000000000
--- a/fs/bcachefs/disk_groups_types.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_DISK_GROUPS_TYPES_H
-#define _BCACHEFS_DISK_GROUPS_TYPES_H
-
-struct bch_disk_group_cpu {
- bool deleted;
- u16 parent;
- u8 label[BCH_SB_LABEL_SIZE];
- struct bch_devs_mask devs;
-};
-
-struct bch_disk_groups_cpu {
- struct rcu_head rcu;
- unsigned nr;
- struct bch_disk_group_cpu entries[] __counted_by(nr);
-};
-
-#endif /* _BCACHEFS_DISK_GROUPS_TYPES_H */
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
deleted file mode 100644
index 6094afb0c6be..000000000000
--- a/fs/bcachefs/ec.c
+++ /dev/null
@@ -1,2496 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/* erasure coding */
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "backpointers.h"
-#include "bkey_buf.h"
-#include "bset.h"
-#include "btree_gc.h"
-#include "btree_update.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "checksum.h"
-#include "disk_accounting.h"
-#include "disk_groups.h"
-#include "ec.h"
-#include "error.h"
-#include "io_read.h"
-#include "io_write.h"
-#include "keylist.h"
-#include "recovery.h"
-#include "replicas.h"
-#include "super-io.h"
-#include "util.h"
-
-#include <linux/sort.h>
-
-#ifdef __KERNEL__
-
-#include <linux/raid/pq.h>
-#include <linux/raid/xor.h>
-
-static void raid5_recov(unsigned disks, unsigned failed_idx,
- size_t size, void **data)
-{
- unsigned i = 2, nr;
-
- BUG_ON(failed_idx >= disks);
-
- swap(data[0], data[failed_idx]);
- memcpy(data[0], data[1], size);
-
- while (i < disks) {
- nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
- xor_blocks(nr, size, data[0], data + i);
- i += nr;
- }
-
- swap(data[0], data[failed_idx]);
-}
-
-static void raid_gen(int nd, int np, size_t size, void **v)
-{
- if (np >= 1)
- raid5_recov(nd + np, nd, size, v);
- if (np >= 2)
- raid6_call.gen_syndrome(nd + np, size, v);
- BUG_ON(np > 2);
-}
-
-static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
-{
- switch (nr) {
- case 0:
- break;
- case 1:
- if (ir[0] < nd + 1)
- raid5_recov(nd + 1, ir[0], size, v);
- else
- raid6_call.gen_syndrome(nd + np, size, v);
- break;
- case 2:
- if (ir[1] < nd) {
- /* data+data failure. */
- raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
- } else if (ir[0] < nd) {
- /* data + p/q failure */
-
- if (ir[1] == nd) /* data + p failure */
- raid6_datap_recov(nd + np, size, ir[0], v);
- else { /* data + q failure */
- raid5_recov(nd + 1, ir[0], size, v);
- raid6_call.gen_syndrome(nd + np, size, v);
- }
- } else {
- raid_gen(nd, np, size, v);
- }
- break;
- default:
- BUG();
- }
-}
-
-#else
-
-#include <raid/raid.h>
-
-#endif
-
-struct ec_bio {
- struct bch_dev *ca;
- struct ec_stripe_buf *buf;
- size_t idx;
- struct bio bio;
-};
-
-/* Stripes btree keys: */
-
-int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
- int ret = 0;
-
- bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
- bpos_gt(k.k->p, POS(0, U32_MAX)),
- c, stripe_pos_bad,
- "stripe at bad pos");
-
- bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s),
- c, stripe_val_size_bad,
- "incorrect value size (%zu < %u)",
- bkey_val_u64s(k.k), stripe_val_u64s(s));
-
- bkey_fsck_err_on(s->csum_granularity_bits >= 64,
- c, stripe_csum_granularity_bad,
- "invalid csum granularity (%u >= 64)",
- s->csum_granularity_bits);
-
- ret = bch2_bkey_ptrs_validate(c, k, flags);
-fsck_err:
- return ret;
-}
-
-void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v;
- struct bch_stripe s = {};
-
- memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k)));
-
- unsigned nr_data = s.nr_blocks - s.nr_redundant;
-
- prt_printf(out, "algo %u sectors %u blocks %u:%u csum ",
- s.algorithm,
- le16_to_cpu(s.sectors),
- nr_data,
- s.nr_redundant);
- bch2_prt_csum_type(out, s.csum_type);
- prt_str(out, " gran ");
- if (s.csum_granularity_bits < 64)
- prt_printf(out, "%llu", 1ULL << s.csum_granularity_bits);
- else
- prt_printf(out, "(invalid shift %u)", s.csum_granularity_bits);
-
- if (s.disk_label) {
- prt_str(out, " label");
- bch2_disk_path_to_text(out, c, s.disk_label - 1);
- }
-
- for (unsigned i = 0; i < s.nr_blocks; i++) {
- const struct bch_extent_ptr *ptr = sp->ptrs + i;
-
- if ((void *) ptr >= bkey_val_end(k))
- break;
-
- prt_char(out, ' ');
- bch2_extent_ptr_to_text(out, c, ptr);
-
- if (s.csum_type < BCH_CSUM_NR &&
- i < nr_data &&
- stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k))
- prt_printf(out, "#%u", stripe_blockcount_get(sp, i));
- }
-}
-
-/* Triggers: */
-
-static int __mark_stripe_bucket(struct btree_trans *trans,
- struct bch_dev *ca,
- struct bkey_s_c_stripe s,
- unsigned ptr_idx, bool deleting,
- struct bpos bucket,
- struct bch_alloc_v4 *a,
- enum btree_iter_update_trigger_flags flags)
-{
- const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
- unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant;
- bool parity = ptr_idx >= nr_data;
- enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
- s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- struct bch_fs *c = trans->c;
- if (deleting)
- sectors = -sectors;
-
- if (!deleting) {
- if (bch2_trans_inconsistent_on(a->stripe ||
- a->stripe_redundancy, trans,
- "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s",
- bucket.inode, bucket.offset, a->gen,
- bch2_data_type_str(a->data_type),
- a->dirty_sectors,
- a->stripe, s.k->p.offset,
- (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -BCH_ERR_mark_stripe;
- goto err;
- }
-
- if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans,
- "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s",
- bucket.inode, bucket.offset, a->gen,
- bch2_data_type_str(a->data_type),
- a->dirty_sectors,
- a->cached_sectors,
- (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -BCH_ERR_mark_stripe;
- goto err;
- }
- } else {
- if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset ||
- a->stripe_redundancy != s.v->nr_redundant, trans,
- "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s",
- bucket.inode, bucket.offset, a->gen,
- a->stripe,
- (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -BCH_ERR_mark_stripe;
- goto err;
- }
-
- if (bch2_trans_inconsistent_on(a->data_type != data_type, trans,
- "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s",
- bucket.inode, bucket.offset, a->gen,
- bch2_data_type_str(a->data_type),
- bch2_data_type_str(data_type),
- (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -BCH_ERR_mark_stripe;
- goto err;
- }
-
- if (bch2_trans_inconsistent_on(parity &&
- (a->dirty_sectors != -sectors ||
- a->cached_sectors), trans,
- "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s",
- bucket.inode, bucket.offset, a->gen,
- a->dirty_sectors,
- a->cached_sectors,
- (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -BCH_ERR_mark_stripe;
- goto err;
- }
- }
-
- if (sectors) {
- ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type,
- a->gen, a->data_type, &a->dirty_sectors);
- if (ret)
- goto err;
- }
-
- if (!deleting) {
- a->stripe = s.k->p.offset;
- a->stripe_redundancy = s.v->nr_redundant;
- alloc_data_type_set(a, data_type);
- } else {
- a->stripe = 0;
- a->stripe_redundancy = 0;
- alloc_data_type_set(a, BCH_DATA_user);
- }
-err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static int mark_stripe_bucket(struct btree_trans *trans,
- struct bkey_s_c_stripe s,
- unsigned ptr_idx, bool deleting,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
- if (unlikely(!ca)) {
- if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite))
- ret = -BCH_ERR_mark_stripe;
- goto err;
- }
-
- struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
-
- if (flags & BTREE_TRIGGER_transactional) {
- struct bkey_i_alloc_v4 *a =
- bch2_trans_start_alloc_update(trans, bucket, 0);
- ret = PTR_ERR_OR_ZERO(a) ?:
- __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags);
- }
-
- if (flags & BTREE_TRIGGER_gc) {
- percpu_down_read(&c->mark_lock);
- struct bucket *g = gc_bucket(ca, bucket.offset);
- if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
- ptr->dev,
- (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -BCH_ERR_mark_stripe;
- goto err_unlock;
- }
-
- bucket_lock(g);
- struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
- ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
- alloc_to_bucket(g, new);
- bucket_unlock(g);
-err_unlock:
- percpu_up_read(&c->mark_lock);
- if (!ret)
- ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
- }
-err:
- bch2_dev_put(ca);
- printbuf_exit(&buf);
- return ret;
-}
-
-static int mark_stripe_buckets(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- enum btree_iter_update_trigger_flags flags)
-{
- const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
- ? bkey_s_c_to_stripe(old).v : NULL;
- const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
- ? bkey_s_c_to_stripe(new).v : NULL;
-
- BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks);
-
- unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
-
- for (unsigned i = 0; i < nr_blocks; i++) {
- if (new_s && old_s &&
- !memcmp(&new_s->ptrs[i],
- &old_s->ptrs[i],
- sizeof(new_s->ptrs[i])))
- continue;
-
- if (new_s) {
- int ret = mark_stripe_bucket(trans,
- bkey_s_c_to_stripe(new), i, false, flags);
- if (ret)
- return ret;
- }
-
- if (old_s) {
- int ret = mark_stripe_bucket(trans,
- bkey_s_c_to_stripe(old), i, true, flags);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static inline void stripe_to_mem(struct stripe *m, const struct bch_stripe *s)
-{
- m->sectors = le16_to_cpu(s->sectors);
- m->algorithm = s->algorithm;
- m->nr_blocks = s->nr_blocks;
- m->nr_redundant = s->nr_redundant;
- m->disk_label = s->disk_label;
- m->blocks_nonempty = 0;
-
- for (unsigned i = 0; i < s->nr_blocks; i++)
- m->blocks_nonempty += !!stripe_blockcount_get(s, i);
-}
-
-int bch2_trigger_stripe(struct btree_trans *trans,
- enum btree_id btree, unsigned level,
- struct bkey_s_c old, struct bkey_s _new,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bkey_s_c new = _new.s_c;
- struct bch_fs *c = trans->c;
- u64 idx = new.k->p.offset;
- const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
- ? bkey_s_c_to_stripe(old).v : NULL;
- const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
- ? bkey_s_c_to_stripe(new).v : NULL;
-
- if (unlikely(flags & BTREE_TRIGGER_check_repair))
- return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags);
-
- BUG_ON(new_s && old_s &&
- (new_s->nr_blocks != old_s->nr_blocks ||
- new_s->nr_redundant != old_s->nr_redundant));
-
-
- if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
- /*
- * If the pointers aren't changing, we don't need to do anything:
- */
- if (new_s && old_s &&
- new_s->nr_blocks == old_s->nr_blocks &&
- new_s->nr_redundant == old_s->nr_redundant &&
- !memcmp(old_s->ptrs, new_s->ptrs,
- new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
- return 0;
-
- struct gc_stripe *gc = NULL;
- if (flags & BTREE_TRIGGER_gc) {
- gc = genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
- if (!gc) {
- bch_err(c, "error allocating memory for gc_stripes, idx %llu", idx);
- return -BCH_ERR_ENOMEM_mark_stripe;
- }
-
- /*
- * This will be wrong when we bring back runtime gc: we should
- * be unmarking the old key and then marking the new key
- *
- * Also: when we bring back runtime gc, locking
- */
- gc->alive = true;
- gc->sectors = le16_to_cpu(new_s->sectors);
- gc->nr_blocks = new_s->nr_blocks;
- gc->nr_redundant = new_s->nr_redundant;
-
- for (unsigned i = 0; i < new_s->nr_blocks; i++)
- gc->ptrs[i] = new_s->ptrs[i];
-
- /*
- * gc recalculates this field from stripe ptr
- * references:
- */
- memset(gc->block_sectors, 0, sizeof(gc->block_sectors));
- }
-
- if (new_s) {
- s64 sectors = (u64) le16_to_cpu(new_s->sectors) * new_s->nr_redundant;
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
- bch2_bkey_to_replicas(&acc.replicas, new);
- int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
- if (ret)
- return ret;
-
- if (gc)
- memcpy(&gc->r.e, &acc.replicas, replicas_entry_bytes(&acc.replicas));
- }
-
- if (old_s) {
- s64 sectors = -((s64) le16_to_cpu(old_s->sectors)) * old_s->nr_redundant;
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
- bch2_bkey_to_replicas(&acc.replicas, old);
- int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
- if (ret)
- return ret;
- }
-
- int ret = mark_stripe_buckets(trans, old, new, flags);
- if (ret)
- return ret;
- }
-
- if (flags & BTREE_TRIGGER_atomic) {
- struct stripe *m = genradix_ptr(&c->stripes, idx);
-
- if (!m) {
- struct printbuf buf1 = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf1, c, old);
- bch2_bkey_val_to_text(&buf2, c, new);
- bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
- "old %s\n"
- "new %s", idx, buf1.buf, buf2.buf);
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
- bch2_inconsistent_error(c);
- return -1;
- }
-
- if (!new_s) {
- bch2_stripes_heap_del(c, m, idx);
-
- memset(m, 0, sizeof(*m));
- } else {
- stripe_to_mem(m, new_s);
-
- if (!old_s)
- bch2_stripes_heap_insert(c, m, idx);
- else
- bch2_stripes_heap_update(c, m, idx);
- }
- }
-
- return 0;
-}
-
-/* returns blocknr in stripe that we matched: */
-static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
- struct bkey_s_c k, unsigned *block)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
-
- bkey_for_each_ptr(ptrs, ptr)
- for (i = 0; i < nr_data; i++)
- if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
- le16_to_cpu(s->sectors))) {
- *block = i;
- return ptr;
- }
-
- return NULL;
-}
-
-static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
-{
- switch (k.k->type) {
- case KEY_TYPE_extent: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
-
- extent_for_each_entry(e, entry)
- if (extent_entry_type(entry) ==
- BCH_EXTENT_ENTRY_stripe_ptr &&
- entry->stripe_ptr.idx == idx)
- return true;
-
- break;
- }
- }
-
- return false;
-}
-
-/* Stripe bufs: */
-
-static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
-{
- if (buf->key.k.type == KEY_TYPE_stripe) {
- struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
- unsigned i;
-
- for (i = 0; i < s->v.nr_blocks; i++) {
- kvfree(buf->data[i]);
- buf->data[i] = NULL;
- }
- }
-}
-
-/* XXX: this is a non-mempoolified memory allocation: */
-static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
- unsigned offset, unsigned size)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
- unsigned csum_granularity = 1U << v->csum_granularity_bits;
- unsigned end = offset + size;
- unsigned i;
-
- BUG_ON(end > le16_to_cpu(v->sectors));
-
- offset = round_down(offset, csum_granularity);
- end = min_t(unsigned, le16_to_cpu(v->sectors),
- round_up(end, csum_granularity));
-
- buf->offset = offset;
- buf->size = end - offset;
-
- memset(buf->valid, 0xFF, sizeof(buf->valid));
-
- for (i = 0; i < v->nr_blocks; i++) {
- buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
- if (!buf->data[i])
- goto err;
- }
-
- return 0;
-err:
- ec_stripe_buf_exit(buf);
- return -BCH_ERR_ENOMEM_stripe_buf;
-}
-
-/* Checksumming: */
-
-static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
- unsigned block, unsigned offset)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
- unsigned csum_granularity = 1 << v->csum_granularity_bits;
- unsigned end = buf->offset + buf->size;
- unsigned len = min(csum_granularity, end - offset);
-
- BUG_ON(offset >= end);
- BUG_ON(offset < buf->offset);
- BUG_ON(offset & (csum_granularity - 1));
- BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
- (len & (csum_granularity - 1)));
-
- return bch2_checksum(NULL, v->csum_type,
- null_nonce(),
- buf->data[block] + ((offset - buf->offset) << 9),
- len << 9);
-}
-
-static void ec_generate_checksums(struct ec_stripe_buf *buf)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
- unsigned i, j, csums_per_device = stripe_csums_per_device(v);
-
- if (!v->csum_type)
- return;
-
- BUG_ON(buf->offset);
- BUG_ON(buf->size != le16_to_cpu(v->sectors));
-
- for (i = 0; i < v->nr_blocks; i++)
- for (j = 0; j < csums_per_device; j++)
- stripe_csum_set(v, i, j,
- ec_block_checksum(buf, i, j << v->csum_granularity_bits));
-}
-
-static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
- unsigned csum_granularity = 1 << v->csum_granularity_bits;
- unsigned i;
-
- if (!v->csum_type)
- return;
-
- for (i = 0; i < v->nr_blocks; i++) {
- unsigned offset = buf->offset;
- unsigned end = buf->offset + buf->size;
-
- if (!test_bit(i, buf->valid))
- continue;
-
- while (offset < end) {
- unsigned j = offset >> v->csum_granularity_bits;
- unsigned len = min(csum_granularity, end - offset);
- struct bch_csum want = stripe_csum_get(v, i, j);
- struct bch_csum got = ec_block_checksum(buf, i, offset);
-
- if (bch2_crc_cmp(want, got)) {
- struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev);
- if (ca) {
- struct printbuf err = PRINTBUF;
-
- prt_str(&err, "stripe ");
- bch2_csum_err_msg(&err, v->csum_type, want, got);
- prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i);
- bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
- bch_err_ratelimited(ca, "%s", err.buf);
- printbuf_exit(&err);
-
- bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
- }
-
- clear_bit(i, buf->valid);
- break;
- }
-
- offset += len;
- }
- }
-}
-
-/* Erasure coding: */
-
-static void ec_generate_ec(struct ec_stripe_buf *buf)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
- unsigned nr_data = v->nr_blocks - v->nr_redundant;
- unsigned bytes = le16_to_cpu(v->sectors) << 9;
-
- raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
-}
-
-static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
-
- return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
-}
-
-static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
- unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
- unsigned nr_data = v->nr_blocks - v->nr_redundant;
- unsigned bytes = buf->size << 9;
-
- if (ec_nr_failed(buf) > v->nr_redundant) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: unable to read enough blocks");
- return -1;
- }
-
- for (i = 0; i < nr_data; i++)
- if (!test_bit(i, buf->valid))
- failed[nr_failed++] = i;
-
- raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
- return 0;
-}
-
-/* IO: */
-
-static void ec_block_endio(struct bio *bio)
-{
- struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
- struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
- struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
- struct bch_dev *ca = ec_bio->ca;
- struct closure *cl = bio->bi_private;
-
- if (bch2_dev_io_err_on(bio->bi_status, ca,
- bio_data_dir(bio)
- ? BCH_MEMBER_ERROR_write
- : BCH_MEMBER_ERROR_read,
- "erasure coding %s error: %s",
- bio_data_dir(bio) ? "write" : "read",
- bch2_blk_status_to_str(bio->bi_status)))
- clear_bit(ec_bio->idx, ec_bio->buf->valid);
-
- int stale = dev_ptr_stale(ca, ptr);
- if (stale) {
- bch_err_ratelimited(ca->fs,
- "error %s stripe: stale/invalid pointer (%i) after io",
- bio_data_dir(bio) == READ ? "reading from" : "writing to",
- stale);
- clear_bit(ec_bio->idx, ec_bio->buf->valid);
- }
-
- bio_put(&ec_bio->bio);
- percpu_ref_put(&ca->io_ref);
- closure_put(cl);
-}
-
-static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
- blk_opf_t opf, unsigned idx, struct closure *cl)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
- unsigned offset = 0, bytes = buf->size << 9;
- struct bch_extent_ptr *ptr = &v->ptrs[idx];
- enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
- ? BCH_DATA_user
- : BCH_DATA_parity;
- int rw = op_is_write(opf);
-
- struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw);
- if (!ca) {
- clear_bit(idx, buf->valid);
- return;
- }
-
- int stale = dev_ptr_stale(ca, ptr);
- if (stale) {
- bch_err_ratelimited(c,
- "error %s stripe: stale pointer (%i)",
- rw == READ ? "reading from" : "writing to",
- stale);
- clear_bit(idx, buf->valid);
- return;
- }
-
-
- this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
-
- while (offset < bytes) {
- unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
- DIV_ROUND_UP(bytes, PAGE_SIZE));
- unsigned b = min_t(size_t, bytes - offset,
- nr_iovecs << PAGE_SHIFT);
- struct ec_bio *ec_bio;
-
- ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
- nr_iovecs,
- opf,
- GFP_KERNEL,
- &c->ec_bioset),
- struct ec_bio, bio);
-
- ec_bio->ca = ca;
- ec_bio->buf = buf;
- ec_bio->idx = idx;
-
- ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
- ec_bio->bio.bi_end_io = ec_block_endio;
- ec_bio->bio.bi_private = cl;
-
- bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
-
- closure_get(cl);
- percpu_ref_get(&ca->io_ref);
-
- submit_bio(&ec_bio->bio);
-
- offset += b;
- }
-
- percpu_ref_put(&ca->io_ref);
-}
-
-static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
- struct ec_stripe_buf *stripe)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
- POS(0, idx), BTREE_ITER_slots);
- ret = bkey_err(k);
- if (ret)
- goto err;
- if (k.k->type != KEY_TYPE_stripe) {
- ret = -ENOENT;
- goto err;
- }
- bkey_reassemble(&stripe->key, k);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-/* recovery read path: */
-int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio,
- struct bkey_s_c orig_k)
-{
- struct bch_fs *c = trans->c;
- struct ec_stripe_buf *buf = NULL;
- struct closure cl;
- struct bch_stripe *v;
- unsigned i, offset;
- const char *msg = NULL;
- struct printbuf msgbuf = PRINTBUF;
- int ret = 0;
-
- closure_init_stack(&cl);
-
- BUG_ON(!rbio->pick.has_ec);
-
- buf = kzalloc(sizeof(*buf), GFP_NOFS);
- if (!buf)
- return -BCH_ERR_ENOMEM_ec_read_extent;
-
- ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
- if (ret) {
- msg = "stripe not found";
- goto err;
- }
-
- v = &bkey_i_to_stripe(&buf->key)->v;
-
- if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
- msg = "pointer doesn't match stripe";
- goto err;
- }
-
- offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
- if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
- msg = "read is bigger than stripe";
- goto err;
- }
-
- ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
- if (ret) {
- msg = "-ENOMEM";
- goto err;
- }
-
- for (i = 0; i < v->nr_blocks; i++)
- ec_block_io(c, buf, REQ_OP_READ, i, &cl);
-
- closure_sync(&cl);
-
- if (ec_nr_failed(buf) > v->nr_redundant) {
- msg = "unable to read enough blocks";
- goto err;
- }
-
- ec_validate_checksums(c, buf);
-
- ret = ec_do_recov(c, buf);
- if (ret)
- goto err;
-
- memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
- buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
-out:
- ec_stripe_buf_exit(buf);
- kfree(buf);
- return ret;
-err:
- bch2_bkey_val_to_text(&msgbuf, c, orig_k);
- bch_err_ratelimited(c,
- "error doing reconstruct read: %s\n %s", msg, msgbuf.buf);
- printbuf_exit(&msgbuf);;
- ret = -BCH_ERR_stripe_reconstruct;
- goto out;
-}
-
-/* stripe bucket accounting: */
-
-static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
-{
- ec_stripes_heap n, *h = &c->ec_stripes_heap;
-
- if (idx >= h->size) {
- if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
- return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
-
- mutex_lock(&c->ec_stripes_heap_lock);
- if (n.size > h->size) {
- memcpy(n.data, h->data, h->nr * sizeof(h->data[0]));
- n.nr = h->nr;
- swap(*h, n);
- }
- mutex_unlock(&c->ec_stripes_heap_lock);
-
- free_heap(&n);
- }
-
- if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
- return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
-
- if (c->gc_pos.phase != GC_PHASE_not_running &&
- !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
- return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
-
- return 0;
-}
-
-static int ec_stripe_mem_alloc(struct btree_trans *trans,
- struct btree_iter *iter)
-{
- return allocate_dropping_locks_errcode(trans,
- __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
-}
-
-/*
- * Hash table of open stripes:
- * Stripes that are being created or modified are kept in a hash table, so that
- * stripe deletion can skip them.
- */
-
-static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
-{
- unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
- struct ec_stripe_new *s;
-
- hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
- if (s->idx == idx)
- return true;
- return false;
-}
-
-static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
-{
- bool ret = false;
-
- spin_lock(&c->ec_stripes_new_lock);
- ret = __bch2_stripe_is_open(c, idx);
- spin_unlock(&c->ec_stripes_new_lock);
-
- return ret;
-}
-
-static bool bch2_try_open_stripe(struct bch_fs *c,
- struct ec_stripe_new *s,
- u64 idx)
-{
- bool ret;
-
- spin_lock(&c->ec_stripes_new_lock);
- ret = !__bch2_stripe_is_open(c, idx);
- if (ret) {
- unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
-
- s->idx = idx;
- hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
- }
- spin_unlock(&c->ec_stripes_new_lock);
-
- return ret;
-}
-
-static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
-{
- BUG_ON(!s->idx);
-
- spin_lock(&c->ec_stripes_new_lock);
- hlist_del_init(&s->hash);
- spin_unlock(&c->ec_stripes_new_lock);
-
- s->idx = 0;
-}
-
-/* Heap of all existing stripes, ordered by blocks_nonempty */
-
-static u64 stripe_idx_to_delete(struct bch_fs *c)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
-
- lockdep_assert_held(&c->ec_stripes_heap_lock);
-
- if (h->nr &&
- h->data[0].blocks_nonempty == 0 &&
- !bch2_stripe_is_open(c, h->data[0].idx))
- return h->data[0].idx;
-
- return 0;
-}
-
-static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
- size_t i)
-{
- struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
-
- genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
-}
-
-static inline bool ec_stripes_heap_cmp(const void *l, const void *r, void __always_unused *args)
-{
- struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l;
- struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r;
-
- return ((_l->blocks_nonempty > _r->blocks_nonempty) <
- (_l->blocks_nonempty < _r->blocks_nonempty));
-}
-
-static inline void ec_stripes_heap_swap(void *l, void *r, void *h)
-{
- struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l;
- struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r;
- ec_stripes_heap *_h = (ec_stripes_heap *)h;
- size_t i = _l - _h->data;
- size_t j = _r - _h->data;
-
- swap(*_l, *_r);
-
- ec_stripes_heap_set_backpointer(_h, i);
- ec_stripes_heap_set_backpointer(_h, j);
-}
-
-static const struct min_heap_callbacks callbacks = {
- .less = ec_stripes_heap_cmp,
- .swp = ec_stripes_heap_swap,
-};
-
-static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
- struct stripe *m = genradix_ptr(&c->stripes, idx);
-
- BUG_ON(m->heap_idx >= h->nr);
- BUG_ON(h->data[m->heap_idx].idx != idx);
-}
-
-void bch2_stripes_heap_del(struct bch_fs *c,
- struct stripe *m, size_t idx)
-{
- mutex_lock(&c->ec_stripes_heap_lock);
- heap_verify_backpointer(c, idx);
-
- min_heap_del(&c->ec_stripes_heap, m->heap_idx, &callbacks, &c->ec_stripes_heap);
- mutex_unlock(&c->ec_stripes_heap_lock);
-}
-
-void bch2_stripes_heap_insert(struct bch_fs *c,
- struct stripe *m, size_t idx)
-{
- mutex_lock(&c->ec_stripes_heap_lock);
- BUG_ON(min_heap_full(&c->ec_stripes_heap));
-
- genradix_ptr(&c->stripes, idx)->heap_idx = c->ec_stripes_heap.nr;
- min_heap_push(&c->ec_stripes_heap, &((struct ec_stripe_heap_entry) {
- .idx = idx,
- .blocks_nonempty = m->blocks_nonempty,
- }),
- &callbacks,
- &c->ec_stripes_heap);
-
- heap_verify_backpointer(c, idx);
- mutex_unlock(&c->ec_stripes_heap_lock);
-}
-
-void bch2_stripes_heap_update(struct bch_fs *c,
- struct stripe *m, size_t idx)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
- bool do_deletes;
- size_t i;
-
- mutex_lock(&c->ec_stripes_heap_lock);
- heap_verify_backpointer(c, idx);
-
- h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
-
- i = m->heap_idx;
- min_heap_sift_up(h, i, &callbacks, &c->ec_stripes_heap);
- min_heap_sift_down(h, i, &callbacks, &c->ec_stripes_heap);
-
- heap_verify_backpointer(c, idx);
-
- do_deletes = stripe_idx_to_delete(c) != 0;
- mutex_unlock(&c->ec_stripes_heap_lock);
-
- if (do_deletes)
- bch2_do_stripe_deletes(c);
-}
-
-/* stripe deletion */
-
-static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey_s_c_stripe s;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
- BTREE_ITER_intent);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_stripe) {
- bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
- ret = -EINVAL;
- goto err;
- }
-
- s = bkey_s_c_to_stripe(k);
- for (unsigned i = 0; i < s.v->nr_blocks; i++)
- if (stripe_blockcount_get(s.v, i)) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
- printbuf_exit(&buf);
- ret = -EINVAL;
- goto err;
- }
-
- ret = bch2_btree_delete_at(trans, &iter, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static void ec_stripe_delete_work(struct work_struct *work)
-{
- struct bch_fs *c =
- container_of(work, struct bch_fs, ec_stripe_delete_work);
-
- while (1) {
- mutex_lock(&c->ec_stripes_heap_lock);
- u64 idx = stripe_idx_to_delete(c);
- mutex_unlock(&c->ec_stripes_heap_lock);
-
- if (!idx)
- break;
-
- int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- ec_stripe_delete(trans, idx));
- bch_err_fn(c, ret);
- if (ret)
- break;
- }
-
- bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
-}
-
-void bch2_do_stripe_deletes(struct bch_fs *c)
-{
- if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
- !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
-}
-
-/* stripe creation: */
-
-static int ec_stripe_key_update(struct btree_trans *trans,
- struct bkey_i_stripe *old,
- struct bkey_i_stripe *new)
-{
- struct bch_fs *c = trans->c;
- bool create = !old;
-
- struct btree_iter iter;
- struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
- new->k.p, BTREE_ITER_intent);
- int ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (bch2_fs_inconsistent_on(k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe),
- c, "error %s stripe: got existing key type %s",
- create ? "creating" : "updating",
- bch2_bkey_types[k.k->type])) {
- ret = -EINVAL;
- goto err;
- }
-
- if (k.k->type == KEY_TYPE_stripe) {
- const struct bch_stripe *v = bkey_s_c_to_stripe(k).v;
-
- BUG_ON(old->v.nr_blocks != new->v.nr_blocks);
- BUG_ON(old->v.nr_blocks != v->nr_blocks);
-
- for (unsigned i = 0; i < new->v.nr_blocks; i++) {
- unsigned sectors = stripe_blockcount_get(v, i);
-
- if (!bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]) && sectors) {
- struct printbuf buf = PRINTBUF;
-
- prt_printf(&buf, "stripe changed nonempty block %u", i);
- prt_str(&buf, "\nold: ");
- bch2_bkey_val_to_text(&buf, c, k);
- prt_str(&buf, "\nnew: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new->k_i));
- bch2_fs_inconsistent(c, "%s", buf.buf);
- printbuf_exit(&buf);
- ret = -EINVAL;
- goto err;
- }
-
- /*
- * If the stripe ptr changed underneath us, it must have
- * been dev_remove_stripes() -> * invalidate_stripe_to_dev()
- */
- if (!bch2_extent_ptr_eq(old->v.ptrs[i], v->ptrs[i])) {
- BUG_ON(v->ptrs[i].dev != BCH_SB_MEMBER_INVALID);
-
- if (bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]))
- new->v.ptrs[i].dev = BCH_SB_MEMBER_INVALID;
- }
-
- stripe_blockcount_set(&new->v, i, sectors);
- }
- }
-
- ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int ec_stripe_update_extent(struct btree_trans *trans,
- struct bch_dev *ca,
- struct bpos bucket, u8 gen,
- struct ec_stripe_buf *s,
- struct bpos *bp_pos)
-{
- struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
- struct bch_fs *c = trans->c;
- struct bch_backpointer bp;
- struct btree_iter iter;
- struct bkey_s_c k;
- const struct bch_extent_ptr *ptr_c;
- struct bch_extent_ptr *ec_ptr = NULL;
- struct bch_extent_stripe_ptr stripe_ptr;
- struct bkey_i *n;
- int ret, dev, block;
-
- ret = bch2_get_next_backpointer(trans, ca, bucket, gen,
- bp_pos, &bp, BTREE_ITER_cached);
- if (ret)
- return ret;
- if (bpos_eq(*bp_pos, SPOS_MAX))
- return 0;
-
- if (bp.level) {
- struct printbuf buf = PRINTBUF;
- struct btree_iter node_iter;
- struct btree *b;
-
- b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp);
- bch2_trans_iter_exit(trans, &node_iter);
-
- if (!b)
- return 0;
-
- prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
- bch2_backpointer_to_text(&buf, &bp);
-
- bch2_fs_inconsistent(c, "%s", buf.buf);
- printbuf_exit(&buf);
- return -EIO;
- }
-
- k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent);
- ret = bkey_err(k);
- if (ret)
- return ret;
- if (!k.k) {
- /*
- * extent no longer exists - we could flush the btree
- * write buffer and retry to verify, but no need:
- */
- return 0;
- }
-
- if (extent_has_stripe_ptr(k, s->key.k.p.offset))
- goto out;
-
- ptr_c = bkey_matches_stripe(v, k, &block);
- /*
- * It doesn't generally make sense to erasure code cached ptrs:
- * XXX: should we be incrementing a counter?
- */
- if (!ptr_c || ptr_c->cached)
- goto out;
-
- dev = v->ptrs[block].dev;
-
- n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- goto out;
-
- bkey_reassemble(n, k);
-
- bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, ptr->dev != dev);
- ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
- BUG_ON(!ec_ptr);
-
- stripe_ptr = (struct bch_extent_stripe_ptr) {
- .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
- .block = block,
- .redundancy = v->nr_redundant,
- .idx = s->key.k.p.offset,
- };
-
- __extent_entry_insert(n,
- (union bch_extent_entry *) ec_ptr,
- (union bch_extent_entry *) &stripe_ptr);
-
- ret = bch2_trans_update(trans, &iter, n, 0);
-out:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
- unsigned block)
-{
- struct bch_fs *c = trans->c;
- struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
- struct bch_extent_ptr ptr = v->ptrs[block];
- struct bpos bp_pos = POS_MIN;
- int ret = 0;
-
- struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
- if (!ca)
- return -EIO;
-
- struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
-
- while (1) {
- ret = commit_do(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_no_enospc,
- ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, &bp_pos));
- if (ret)
- break;
- if (bkey_eq(bp_pos, POS_MAX))
- break;
-
- bp_pos = bpos_nosnap_successor(bp_pos);
- }
-
- bch2_dev_put(ca);
- return ret;
-}
-
-static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
- unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
- int ret = 0;
-
- ret = bch2_btree_write_buffer_flush_sync(trans);
- if (ret)
- goto err;
-
- for (i = 0; i < nr_data; i++) {
- ret = ec_stripe_update_bucket(trans, s, i);
- if (ret)
- break;
- }
-err:
- bch2_trans_put(trans);
-
- return ret;
-}
-
-static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
- struct ec_stripe_new *s,
- unsigned block,
- struct open_bucket *ob)
-{
- struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE);
- if (!ca) {
- s->err = -BCH_ERR_erofs_no_writes;
- return;
- }
-
- unsigned offset = ca->mi.bucket_size - ob->sectors_free;
- memset(s->new_stripe.data[block] + (offset << 9),
- 0,
- ob->sectors_free << 9);
-
- int ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
- ob->bucket * ca->mi.bucket_size + offset,
- ob->sectors_free,
- GFP_KERNEL, 0);
-
- percpu_ref_put(&ca->io_ref);
-
- if (ret)
- s->err = ret;
-}
-
-void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
-{
- if (s->idx)
- bch2_stripe_close(c, s);
- kfree(s);
-}
-
-/*
- * data buckets of new stripe all written: create the stripe
- */
-static void ec_stripe_create(struct ec_stripe_new *s)
-{
- struct bch_fs *c = s->c;
- struct open_bucket *ob;
- struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
- unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
- int ret;
-
- BUG_ON(s->h->s == s);
-
- closure_sync(&s->iodone);
-
- if (!s->err) {
- for (i = 0; i < nr_data; i++)
- if (s->blocks[i]) {
- ob = c->open_buckets + s->blocks[i];
-
- if (ob->sectors_free)
- zero_out_rest_of_ec_bucket(c, s, i, ob);
- }
- }
-
- if (s->err) {
- if (!bch2_err_matches(s->err, EROFS))
- bch_err(c, "error creating stripe: error writing data buckets");
- goto err;
- }
-
- if (s->have_existing_stripe) {
- ec_validate_checksums(c, &s->existing_stripe);
-
- if (ec_do_recov(c, &s->existing_stripe)) {
- bch_err(c, "error creating stripe: error reading existing stripe");
- goto err;
- }
-
- for (i = 0; i < nr_data; i++)
- if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
- swap(s->new_stripe.data[i],
- s->existing_stripe.data[i]);
-
- ec_stripe_buf_exit(&s->existing_stripe);
- }
-
- BUG_ON(!s->allocated);
- BUG_ON(!s->idx);
-
- ec_generate_ec(&s->new_stripe);
-
- ec_generate_checksums(&s->new_stripe);
-
- /* write p/q: */
- for (i = nr_data; i < v->nr_blocks; i++)
- ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
- closure_sync(&s->iodone);
-
- if (ec_nr_failed(&s->new_stripe)) {
- bch_err(c, "error creating stripe: error writing redundancy buckets");
- goto err;
- }
-
- ret = bch2_trans_commit_do(c, &s->res, NULL,
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_no_enospc,
- ec_stripe_key_update(trans,
- s->have_existing_stripe
- ? bkey_i_to_stripe(&s->existing_stripe.key)
- : NULL,
- bkey_i_to_stripe(&s->new_stripe.key)));
- bch_err_msg(c, ret, "creating stripe key");
- if (ret) {
- goto err;
- }
-
- ret = ec_stripe_update_extents(c, &s->new_stripe);
- bch_err_msg(c, ret, "error updating extents");
- if (ret)
- goto err;
-err:
- bch2_disk_reservation_put(c, &s->res);
-
- for (i = 0; i < v->nr_blocks; i++)
- if (s->blocks[i]) {
- ob = c->open_buckets + s->blocks[i];
-
- if (i < nr_data) {
- ob->ec = NULL;
- __bch2_open_bucket_put(c, ob);
- } else {
- bch2_open_bucket_put(c, ob);
- }
- }
-
- mutex_lock(&c->ec_stripe_new_lock);
- list_del(&s->list);
- mutex_unlock(&c->ec_stripe_new_lock);
- wake_up(&c->ec_stripe_new_wait);
-
- ec_stripe_buf_exit(&s->existing_stripe);
- ec_stripe_buf_exit(&s->new_stripe);
- closure_debug_destroy(&s->iodone);
-
- ec_stripe_new_put(c, s, STRIPE_REF_stripe);
-}
-
-static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
-{
- struct ec_stripe_new *s;
-
- mutex_lock(&c->ec_stripe_new_lock);
- list_for_each_entry(s, &c->ec_stripe_new_list, list)
- if (!atomic_read(&s->ref[STRIPE_REF_io]))
- goto out;
- s = NULL;
-out:
- mutex_unlock(&c->ec_stripe_new_lock);
-
- return s;
-}
-
-static void ec_stripe_create_work(struct work_struct *work)
-{
- struct bch_fs *c = container_of(work,
- struct bch_fs, ec_stripe_create_work);
- struct ec_stripe_new *s;
-
- while ((s = get_pending_stripe(c)))
- ec_stripe_create(s);
-
- bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
-}
-
-void bch2_ec_do_stripe_creates(struct bch_fs *c)
-{
- bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
-
- if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
-}
-
-static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
-{
- struct ec_stripe_new *s = h->s;
-
- lockdep_assert_held(&h->lock);
-
- BUG_ON(!s->allocated && !s->err);
-
- h->s = NULL;
- s->pending = true;
-
- mutex_lock(&c->ec_stripe_new_lock);
- list_add(&s->list, &c->ec_stripe_new_list);
- mutex_unlock(&c->ec_stripe_new_lock);
-
- ec_stripe_new_put(c, s, STRIPE_REF_io);
-}
-
-static void ec_stripe_new_cancel(struct bch_fs *c, struct ec_stripe_head *h, int err)
-{
- h->s->err = err;
- ec_stripe_new_set_pending(c, h);
-}
-
-void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
-{
- struct ec_stripe_new *s = ob->ec;
-
- s->err = -EIO;
-}
-
-void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
-{
- struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
- if (!ob)
- return NULL;
-
- BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
-
- struct bch_dev *ca = ob_dev(c, ob);
- unsigned offset = ca->mi.bucket_size - ob->sectors_free;
-
- return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
-}
-
-static int unsigned_cmp(const void *_l, const void *_r)
-{
- unsigned l = *((const unsigned *) _l);
- unsigned r = *((const unsigned *) _r);
-
- return cmp_int(l, r);
-}
-
-/* pick most common bucket size: */
-static unsigned pick_blocksize(struct bch_fs *c,
- struct bch_devs_mask *devs)
-{
- unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
- struct {
- unsigned nr, size;
- } cur = { 0, 0 }, best = { 0, 0 };
-
- for_each_member_device_rcu(c, ca, devs)
- sizes[nr++] = ca->mi.bucket_size;
-
- sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
-
- for (unsigned i = 0; i < nr; i++) {
- if (sizes[i] != cur.size) {
- if (cur.nr > best.nr)
- best = cur;
-
- cur.nr = 0;
- cur.size = sizes[i];
- }
-
- cur.nr++;
- }
-
- if (cur.nr > best.nr)
- best = cur;
-
- return best.size;
-}
-
-static bool may_create_new_stripe(struct bch_fs *c)
-{
- return false;
-}
-
-static void ec_stripe_key_init(struct bch_fs *c,
- struct bkey_i *k,
- unsigned nr_data,
- unsigned nr_parity,
- unsigned stripe_size,
- unsigned disk_label)
-{
- struct bkey_i_stripe *s = bkey_stripe_init(k);
- unsigned u64s;
-
- s->v.sectors = cpu_to_le16(stripe_size);
- s->v.algorithm = 0;
- s->v.nr_blocks = nr_data + nr_parity;
- s->v.nr_redundant = nr_parity;
- s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9);
- s->v.csum_type = BCH_CSUM_crc32c;
- s->v.disk_label = disk_label;
-
- while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
- BUG_ON(1 << s->v.csum_granularity_bits >=
- le16_to_cpu(s->v.sectors) ||
- s->v.csum_granularity_bits == U8_MAX);
- s->v.csum_granularity_bits++;
- }
-
- set_bkey_val_u64s(&s->k, u64s);
-}
-
-static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
-{
- struct ec_stripe_new *s;
-
- lockdep_assert_held(&h->lock);
-
- s = kzalloc(sizeof(*s), GFP_KERNEL);
- if (!s)
- return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
-
- mutex_init(&s->lock);
- closure_init(&s->iodone, NULL);
- atomic_set(&s->ref[STRIPE_REF_stripe], 1);
- atomic_set(&s->ref[STRIPE_REF_io], 1);
- s->c = c;
- s->h = h;
- s->nr_data = min_t(unsigned, h->nr_active_devs,
- BCH_BKEY_PTRS_MAX) - h->redundancy;
- s->nr_parity = h->redundancy;
-
- ec_stripe_key_init(c, &s->new_stripe.key,
- s->nr_data, s->nr_parity,
- h->blocksize, h->disk_label);
-
- h->s = s;
- h->nr_created++;
- return 0;
-}
-
-static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h)
-{
- struct bch_devs_mask devs = h->devs;
-
- rcu_read_lock();
- h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label
- ? group_to_target(h->disk_label - 1)
- : 0);
- unsigned nr_devs = dev_mask_nr(&h->devs);
-
- for_each_member_device_rcu(c, ca, &h->devs)
- if (!ca->mi.durability)
- __clear_bit(ca->dev_idx, h->devs.d);
- unsigned nr_devs_with_durability = dev_mask_nr(&h->devs);
-
- h->blocksize = pick_blocksize(c, &h->devs);
-
- h->nr_active_devs = 0;
- for_each_member_device_rcu(c, ca, &h->devs)
- if (ca->mi.bucket_size == h->blocksize)
- h->nr_active_devs++;
-
- rcu_read_unlock();
-
- /*
- * If we only have redundancy + 1 devices, we're better off with just
- * replication:
- */
- h->insufficient_devs = h->nr_active_devs < h->redundancy + 2;
-
- if (h->insufficient_devs) {
- const char *err;
-
- if (nr_devs < h->redundancy + 2)
- err = NULL;
- else if (nr_devs_with_durability < h->redundancy + 2)
- err = "cannot use durability=0 devices";
- else
- err = "mismatched bucket sizes";
-
- if (err)
- bch_err(c, "insufficient devices available to create stripe (have %u, need %u): %s",
- h->nr_active_devs, h->redundancy + 2, err);
- }
-
- struct bch_devs_mask devs_leaving;
- bitmap_andnot(devs_leaving.d, devs.d, h->devs.d, BCH_SB_MEMBERS_MAX);
-
- if (h->s && !h->s->allocated && dev_mask_nr(&devs_leaving))
- ec_stripe_new_cancel(c, h, -EINTR);
-
- h->rw_devs_change_count = c->rw_devs_change_count;
-}
-
-static struct ec_stripe_head *
-ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label,
- unsigned algo, unsigned redundancy,
- enum bch_watermark watermark)
-{
- struct ec_stripe_head *h;
-
- h = kzalloc(sizeof(*h), GFP_KERNEL);
- if (!h)
- return NULL;
-
- mutex_init(&h->lock);
- BUG_ON(!mutex_trylock(&h->lock));
-
- h->disk_label = disk_label;
- h->algo = algo;
- h->redundancy = redundancy;
- h->watermark = watermark;
-
- list_add(&h->list, &c->ec_stripe_head_list);
- return h;
-}
-
-void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
-{
- if (h->s &&
- h->s->allocated &&
- bitmap_weight(h->s->blocks_allocated,
- h->s->nr_data) == h->s->nr_data)
- ec_stripe_new_set_pending(c, h);
-
- mutex_unlock(&h->lock);
-}
-
-static struct ec_stripe_head *
-__bch2_ec_stripe_head_get(struct btree_trans *trans,
- unsigned disk_label,
- unsigned algo,
- unsigned redundancy,
- enum bch_watermark watermark)
-{
- struct bch_fs *c = trans->c;
- struct ec_stripe_head *h;
- int ret;
-
- if (!redundancy)
- return NULL;
-
- ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
- if (ret)
- return ERR_PTR(ret);
-
- if (test_bit(BCH_FS_going_ro, &c->flags)) {
- h = ERR_PTR(-BCH_ERR_erofs_no_writes);
- goto err;
- }
-
- list_for_each_entry(h, &c->ec_stripe_head_list, list)
- if (h->disk_label == disk_label &&
- h->algo == algo &&
- h->redundancy == redundancy &&
- h->watermark == watermark) {
- ret = bch2_trans_mutex_lock(trans, &h->lock);
- if (ret) {
- h = ERR_PTR(ret);
- goto err;
- }
- goto found;
- }
-
- h = ec_new_stripe_head_alloc(c, disk_label, algo, redundancy, watermark);
- if (!h) {
- h = ERR_PTR(-BCH_ERR_ENOMEM_stripe_head_alloc);
- goto err;
- }
-found:
- if (h->rw_devs_change_count != c->rw_devs_change_count)
- ec_stripe_head_devs_update(c, h);
-
- if (h->insufficient_devs) {
- mutex_unlock(&h->lock);
- h = NULL;
- }
-err:
- mutex_unlock(&c->ec_stripe_head_lock);
- return h;
-}
-
-static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
- enum bch_watermark watermark, struct closure *cl)
-{
- struct bch_fs *c = trans->c;
- struct bch_devs_mask devs = h->devs;
- struct open_bucket *ob;
- struct open_buckets buckets;
- struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
- unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
- bool have_cache = true;
- int ret = 0;
-
- BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity);
- BUG_ON(v->nr_redundant != h->s->nr_parity);
-
- /* * We bypass the sector allocator which normally does this: */
- bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX);
-
- for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
- /*
- * Note: we don't yet repair invalid blocks (failed/removed
- * devices) when reusing stripes - we still need a codepath to
- * walk backpointers and update all extents that point to that
- * block when updating the stripe
- */
- if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID)
- __clear_bit(v->ptrs[i].dev, devs.d);
-
- if (i < h->s->nr_data)
- nr_have_data++;
- else
- nr_have_parity++;
- }
-
- BUG_ON(nr_have_data > h->s->nr_data);
- BUG_ON(nr_have_parity > h->s->nr_parity);
-
- buckets.nr = 0;
- if (nr_have_parity < h->s->nr_parity) {
- ret = bch2_bucket_alloc_set_trans(trans, &buckets,
- &h->parity_stripe,
- &devs,
- h->s->nr_parity,
- &nr_have_parity,
- &have_cache, 0,
- BCH_DATA_parity,
- watermark,
- cl);
-
- open_bucket_for_each(c, &buckets, ob, i) {
- j = find_next_zero_bit(h->s->blocks_gotten,
- h->s->nr_data + h->s->nr_parity,
- h->s->nr_data);
- BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
-
- h->s->blocks[j] = buckets.v[i];
- v->ptrs[j] = bch2_ob_ptr(c, ob);
- __set_bit(j, h->s->blocks_gotten);
- }
-
- if (ret)
- return ret;
- }
-
- buckets.nr = 0;
- if (nr_have_data < h->s->nr_data) {
- ret = bch2_bucket_alloc_set_trans(trans, &buckets,
- &h->block_stripe,
- &devs,
- h->s->nr_data,
- &nr_have_data,
- &have_cache, 0,
- BCH_DATA_user,
- watermark,
- cl);
-
- open_bucket_for_each(c, &buckets, ob, i) {
- j = find_next_zero_bit(h->s->blocks_gotten,
- h->s->nr_data, 0);
- BUG_ON(j >= h->s->nr_data);
-
- h->s->blocks[j] = buckets.v[i];
- v->ptrs[j] = bch2_ob_ptr(c, ob);
- __set_bit(j, h->s->blocks_gotten);
- }
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static s64 get_existing_stripe(struct bch_fs *c,
- struct ec_stripe_head *head)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
- struct stripe *m;
- size_t heap_idx;
- u64 stripe_idx;
- s64 ret = -1;
-
- if (may_create_new_stripe(c))
- return -1;
-
- mutex_lock(&c->ec_stripes_heap_lock);
- for (heap_idx = 0; heap_idx < h->nr; heap_idx++) {
- /* No blocks worth reusing, stripe will just be deleted: */
- if (!h->data[heap_idx].blocks_nonempty)
- continue;
-
- stripe_idx = h->data[heap_idx].idx;
-
- m = genradix_ptr(&c->stripes, stripe_idx);
-
- if (m->disk_label == head->disk_label &&
- m->algorithm == head->algo &&
- m->nr_redundant == head->redundancy &&
- m->sectors == head->blocksize &&
- m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
- bch2_try_open_stripe(c, head->s, stripe_idx)) {
- ret = stripe_idx;
- break;
- }
- }
- mutex_unlock(&c->ec_stripes_heap_lock);
- return ret;
-}
-
-static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
-{
- struct bch_fs *c = trans->c;
- struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
- struct bch_stripe *existing_v;
- unsigned i;
- s64 idx;
- int ret;
-
- /*
- * If we can't allocate a new stripe, and there's no stripes with empty
- * blocks for us to reuse, that means we have to wait on copygc:
- */
- idx = get_existing_stripe(c, h);
- if (idx < 0)
- return -BCH_ERR_stripe_alloc_blocked;
-
- ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe);
- bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c,
- "reading stripe key: %s", bch2_err_str(ret));
- if (ret) {
- bch2_stripe_close(c, h->s);
- return ret;
- }
-
- existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v;
-
- BUG_ON(existing_v->nr_redundant != h->s->nr_parity);
- h->s->nr_data = existing_v->nr_blocks -
- existing_v->nr_redundant;
-
- ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
- if (ret) {
- bch2_stripe_close(c, h->s);
- return ret;
- }
-
- BUG_ON(h->s->existing_stripe.size != h->blocksize);
- BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
-
- /*
- * Free buckets we initially allocated - they might conflict with
- * blocks from the stripe we're reusing:
- */
- for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) {
- bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
- h->s->blocks[i] = 0;
- }
- memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
- memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
-
- for (i = 0; i < existing_v->nr_blocks; i++) {
- if (stripe_blockcount_get(existing_v, i)) {
- __set_bit(i, h->s->blocks_gotten);
- __set_bit(i, h->s->blocks_allocated);
- }
-
- ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
- }
-
- bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key);
- h->s->have_existing_stripe = true;
-
- return 0;
-}
-
-static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bpos min_pos = POS(0, 1);
- struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
- int ret;
-
- if (!h->s->res.sectors) {
- ret = bch2_disk_reservation_get(c, &h->s->res,
- h->blocksize,
- h->s->nr_parity,
- BCH_DISK_RESERVATION_NOFAIL);
- if (ret)
- return ret;
- }
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
- BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
- if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
- if (start_pos.offset) {
- start_pos = min_pos;
- bch2_btree_iter_set_pos(&iter, start_pos);
- continue;
- }
-
- ret = -BCH_ERR_ENOSPC_stripe_create;
- break;
- }
-
- if (bkey_deleted(k.k) &&
- bch2_try_open_stripe(c, h->s, k.k->p.offset))
- break;
- }
-
- c->ec_stripe_hint = iter.pos.offset;
-
- if (ret)
- goto err;
-
- ret = ec_stripe_mem_alloc(trans, &iter);
- if (ret) {
- bch2_stripe_close(c, h->s);
- goto err;
- }
-
- h->s->new_stripe.key.k.p = iter.pos;
-out:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-err:
- bch2_disk_reservation_put(c, &h->s->res);
- goto out;
-}
-
-struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
- unsigned target,
- unsigned algo,
- unsigned redundancy,
- enum bch_watermark watermark,
- struct closure *cl)
-{
- struct bch_fs *c = trans->c;
- struct ec_stripe_head *h;
- bool waiting = false;
- unsigned disk_label = 0;
- struct target t = target_decode(target);
- int ret;
-
- if (t.type == TARGET_GROUP) {
- if (t.group > U8_MAX) {
- bch_err(c, "cannot create a stripe when disk_label > U8_MAX");
- return NULL;
- }
- disk_label = t.group + 1; /* 0 == no label */
- }
-
- h = __bch2_ec_stripe_head_get(trans, disk_label, algo, redundancy, watermark);
- if (IS_ERR_OR_NULL(h))
- return h;
-
- if (!h->s) {
- ret = ec_new_stripe_alloc(c, h);
- if (ret) {
- bch_err(c, "failed to allocate new stripe");
- goto err;
- }
- }
-
- if (h->s->allocated)
- goto allocated;
-
- if (h->s->have_existing_stripe)
- goto alloc_existing;
-
- /* First, try to allocate a full stripe: */
- ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
- __bch2_ec_stripe_head_reserve(trans, h);
- if (!ret)
- goto allocate_buf;
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
- bch2_err_matches(ret, ENOMEM))
- goto err;
-
- /*
- * Not enough buckets available for a full stripe: we must reuse an
- * existing stripe:
- */
- while (1) {
- ret = __bch2_ec_stripe_head_reuse(trans, h);
- if (!ret)
- break;
- if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
- goto err;
-
- if (watermark == BCH_WATERMARK_copygc) {
- ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
- __bch2_ec_stripe_head_reserve(trans, h);
- if (ret)
- goto err;
- goto allocate_buf;
- }
-
- /* XXX freelist_wait? */
- closure_wait(&c->freelist_wait, cl);
- waiting = true;
- }
-
- if (waiting)
- closure_wake_up(&c->freelist_wait);
-alloc_existing:
- /*
- * Retry allocating buckets, with the watermark for this
- * particular write:
- */
- ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
- if (ret)
- goto err;
-
-allocate_buf:
- ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize);
- if (ret)
- goto err;
-
- h->s->allocated = true;
-allocated:
- BUG_ON(!h->s->idx);
- BUG_ON(!h->s->new_stripe.data[0]);
- BUG_ON(trans->restarted);
- return h;
-err:
- bch2_ec_stripe_head_put(c, h);
- return ERR_PTR(ret);
-}
-
-/* device removal */
-
-static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_s_c k_a)
-{
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert);
-
- if (!a->stripe)
- return 0;
-
- if (a->stripe_sectors) {
- bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data");
- return -BCH_ERR_invalidate_stripe_to_dev;
- }
-
- struct btree_iter iter;
- struct bkey_i_stripe *s =
- bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe),
- BTREE_ITER_slots, stripe);
- int ret = PTR_ERR_OR_ZERO(s);
- if (ret)
- return ret;
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
-
- s64 sectors = 0;
- for (unsigned i = 0; i < s->v.nr_blocks; i++)
- sectors -= stripe_blockcount_get(&s->v, i);
-
- bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
- acc.replicas.data_type = BCH_DATA_user;
- ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
- if (ret)
- goto err;
-
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(&s->k_i));
- bkey_for_each_ptr(ptrs, ptr)
- if (ptr->dev == k_a.k->p.inode)
- ptr->dev = BCH_SB_MEMBER_INVALID;
-
- sectors = -sectors;
-
- bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
- acc.replicas.data_type = BCH_DATA_user;
- ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
- if (ret)
- goto err;
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx)
-{
- return bch2_trans_run(c,
- for_each_btree_key_upto_commit(trans, iter,
- BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX),
- BTREE_ITER_intent, k,
- NULL, NULL, 0, ({
- bch2_invalidate_stripe_to_dev(trans, k);
- })));
-}
-
-/* startup/shutdown */
-
-static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
-{
- struct ec_stripe_head *h;
- struct open_bucket *ob;
- unsigned i;
-
- mutex_lock(&c->ec_stripe_head_lock);
- list_for_each_entry(h, &c->ec_stripe_head_list, list) {
- mutex_lock(&h->lock);
- if (!h->s)
- goto unlock;
-
- if (!ca)
- goto found;
-
- for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
- if (!h->s->blocks[i])
- continue;
-
- ob = c->open_buckets + h->s->blocks[i];
- if (ob->dev == ca->dev_idx)
- goto found;
- }
- goto unlock;
-found:
- ec_stripe_new_cancel(c, h, -BCH_ERR_erofs_no_writes);
-unlock:
- mutex_unlock(&h->lock);
- }
- mutex_unlock(&c->ec_stripe_head_lock);
-}
-
-void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
-{
- __bch2_ec_stop(c, ca);
-}
-
-void bch2_fs_ec_stop(struct bch_fs *c)
-{
- __bch2_ec_stop(c, NULL);
-}
-
-static bool bch2_fs_ec_flush_done(struct bch_fs *c)
-{
- bool ret;
-
- mutex_lock(&c->ec_stripe_new_lock);
- ret = list_empty(&c->ec_stripe_new_list);
- mutex_unlock(&c->ec_stripe_new_lock);
-
- return ret;
-}
-
-void bch2_fs_ec_flush(struct bch_fs *c)
-{
- wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
-}
-
-int bch2_stripes_read(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_prefetch, k, ({
- if (k.k->type != KEY_TYPE_stripe)
- continue;
-
- ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
- if (ret)
- break;
-
- struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
-
- stripe_to_mem(m, bkey_s_c_to_stripe(k).v);
-
- bch2_stripes_heap_insert(c, m, k.k->p.offset);
- 0;
- })));
- bch_err_fn(c, ret);
- return ret;
-}
-
-void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
- struct stripe *m;
- size_t i;
-
- mutex_lock(&c->ec_stripes_heap_lock);
- for (i = 0; i < min_t(size_t, h->nr, 50); i++) {
- m = genradix_ptr(&c->stripes, h->data[i].idx);
-
- prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
- h->data[i].blocks_nonempty,
- m->nr_blocks - m->nr_redundant,
- m->nr_redundant);
- if (bch2_stripe_is_open(c, h->data[i].idx))
- prt_str(out, " open");
- prt_newline(out);
- }
- mutex_unlock(&c->ec_stripes_heap_lock);
-}
-
-static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c,
- struct ec_stripe_new *s)
-{
- prt_printf(out, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs",
- s->idx, s->nr_data, s->nr_parity,
- bitmap_weight(s->blocks_allocated, s->nr_data),
- atomic_read(&s->ref[STRIPE_REF_io]),
- atomic_read(&s->ref[STRIPE_REF_stripe]),
- bch2_watermarks[s->h->watermark]);
-
- struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
- unsigned i;
- for_each_set_bit(i, s->blocks_gotten, v->nr_blocks)
- prt_printf(out, " %u", s->blocks[i]);
- prt_newline(out);
- bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&s->new_stripe.key));
- prt_newline(out);
-}
-
-void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct ec_stripe_head *h;
- struct ec_stripe_new *s;
-
- mutex_lock(&c->ec_stripe_head_lock);
- list_for_each_entry(h, &c->ec_stripe_head_list, list) {
- prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n",
- h->disk_label, h->algo, h->redundancy,
- bch2_watermarks[h->watermark],
- h->nr_created);
-
- if (h->s)
- bch2_new_stripe_to_text(out, c, h->s);
- }
- mutex_unlock(&c->ec_stripe_head_lock);
-
- prt_printf(out, "in flight:\n");
-
- mutex_lock(&c->ec_stripe_new_lock);
- list_for_each_entry(s, &c->ec_stripe_new_list, list)
- bch2_new_stripe_to_text(out, c, s);
- mutex_unlock(&c->ec_stripe_new_lock);
-}
-
-void bch2_fs_ec_exit(struct bch_fs *c)
-{
- struct ec_stripe_head *h;
- unsigned i;
-
- while (1) {
- mutex_lock(&c->ec_stripe_head_lock);
- h = list_first_entry_or_null(&c->ec_stripe_head_list,
- struct ec_stripe_head, list);
- if (h)
- list_del(&h->list);
- mutex_unlock(&c->ec_stripe_head_lock);
- if (!h)
- break;
-
- if (h->s) {
- for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
- BUG_ON(h->s->blocks[i]);
-
- kfree(h->s);
- }
- kfree(h);
- }
-
- BUG_ON(!list_empty(&c->ec_stripe_new_list));
-
- free_heap(&c->ec_stripes_heap);
- genradix_free(&c->stripes);
- bioset_exit(&c->ec_bioset);
-}
-
-void bch2_fs_ec_init_early(struct bch_fs *c)
-{
- spin_lock_init(&c->ec_stripes_new_lock);
- mutex_init(&c->ec_stripes_heap_lock);
-
- INIT_LIST_HEAD(&c->ec_stripe_head_list);
- mutex_init(&c->ec_stripe_head_lock);
-
- INIT_LIST_HEAD(&c->ec_stripe_new_list);
- mutex_init(&c->ec_stripe_new_lock);
- init_waitqueue_head(&c->ec_stripe_new_wait);
-
- INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
- INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
-}
-
-int bch2_fs_ec_init(struct bch_fs *c)
-{
- return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),
- BIOSET_NEED_BVECS);
-}
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
deleted file mode 100644
index 43326370b410..000000000000
--- a/fs/bcachefs/ec.h
+++ /dev/null
@@ -1,272 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_EC_H
-#define _BCACHEFS_EC_H
-
-#include "ec_types.h"
-#include "buckets_types.h"
-#include "extents_types.h"
-
-enum bch_validate_flags;
-
-int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
-int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-
-#define bch2_bkey_ops_stripe ((struct bkey_ops) { \
- .key_validate = bch2_stripe_validate, \
- .val_to_text = bch2_stripe_to_text, \
- .swab = bch2_ptr_swab, \
- .trigger = bch2_trigger_stripe, \
- .min_val_size = 8, \
-})
-
-static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
-{
- return DIV_ROUND_UP(le16_to_cpu(s->sectors),
- 1 << s->csum_granularity_bits);
-}
-
-static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
- unsigned dev, unsigned csum_idx)
-{
- EBUG_ON(s->csum_type >= BCH_CSUM_NR);
-
- unsigned csum_bytes = bch_crc_bytes[s->csum_type];
-
- return sizeof(struct bch_stripe) +
- sizeof(struct bch_extent_ptr) * s->nr_blocks +
- (dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
-}
-
-static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
- unsigned idx)
-{
- return stripe_csum_offset(s, s->nr_blocks, 0) +
- sizeof(u16) * idx;
-}
-
-static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
- unsigned idx)
-{
- return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
-}
-
-static inline void stripe_blockcount_set(struct bch_stripe *s,
- unsigned idx, unsigned v)
-{
- __le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
-
- *p = cpu_to_le16(v);
-}
-
-static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
-{
- return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
- sizeof(u64));
-}
-
-static inline void *stripe_csum(struct bch_stripe *s,
- unsigned block, unsigned csum_idx)
-{
- EBUG_ON(block >= s->nr_blocks);
- EBUG_ON(csum_idx >= stripe_csums_per_device(s));
-
- return (void *) s + stripe_csum_offset(s, block, csum_idx);
-}
-
-static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
- unsigned block, unsigned csum_idx)
-{
- struct bch_csum csum = { 0 };
-
- memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
- return csum;
-}
-
-static inline void stripe_csum_set(struct bch_stripe *s,
- unsigned block, unsigned csum_idx,
- struct bch_csum csum)
-{
- memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
-}
-
-static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
- const struct bch_extent_ptr *data_ptr,
- unsigned sectors)
-{
- return (data_ptr->dev == stripe_ptr->dev ||
- data_ptr->dev == BCH_SB_MEMBER_INVALID ||
- stripe_ptr->dev == BCH_SB_MEMBER_INVALID) &&
- data_ptr->gen == stripe_ptr->gen &&
- data_ptr->offset >= stripe_ptr->offset &&
- data_ptr->offset < stripe_ptr->offset + sectors;
-}
-
-static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
- struct extent_ptr_decoded p)
-{
- unsigned nr_data = s->nr_blocks - s->nr_redundant;
-
- BUG_ON(!p.has_ec);
-
- if (p.ec.block >= nr_data)
- return false;
-
- return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
- le16_to_cpu(s->sectors));
-}
-
-static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
- struct extent_ptr_decoded p)
-{
- unsigned nr_data = m->nr_blocks - m->nr_redundant;
-
- BUG_ON(!p.has_ec);
-
- if (p.ec.block >= nr_data)
- return false;
-
- return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
- m->sectors);
-}
-
-struct bch_read_bio;
-
-struct ec_stripe_buf {
- /* might not be buffering the entire stripe: */
- unsigned offset;
- unsigned size;
- unsigned long valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
-
- void *data[BCH_BKEY_PTRS_MAX];
-
- __BKEY_PADDED(key, 255);
-};
-
-struct ec_stripe_head;
-
-enum ec_stripe_ref {
- STRIPE_REF_io,
- STRIPE_REF_stripe,
- STRIPE_REF_NR
-};
-
-struct ec_stripe_new {
- struct bch_fs *c;
- struct ec_stripe_head *h;
- struct mutex lock;
- struct list_head list;
-
- struct hlist_node hash;
- u64 idx;
-
- struct closure iodone;
-
- atomic_t ref[STRIPE_REF_NR];
-
- int err;
-
- u8 nr_data;
- u8 nr_parity;
- bool allocated;
- bool pending;
- bool have_existing_stripe;
-
- unsigned long blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
- unsigned long blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
- open_bucket_idx_t blocks[BCH_BKEY_PTRS_MAX];
- struct disk_reservation res;
-
- struct ec_stripe_buf new_stripe;
- struct ec_stripe_buf existing_stripe;
-};
-
-struct ec_stripe_head {
- struct list_head list;
- struct mutex lock;
-
- unsigned disk_label;
- unsigned algo;
- unsigned redundancy;
- enum bch_watermark watermark;
- bool insufficient_devs;
-
- unsigned long rw_devs_change_count;
-
- u64 nr_created;
-
- struct bch_devs_mask devs;
- unsigned nr_active_devs;
-
- unsigned blocksize;
-
- struct dev_stripe_state block_stripe;
- struct dev_stripe_state parity_stripe;
-
- struct ec_stripe_new *s;
-};
-
-int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *, struct bkey_s_c);
-
-void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
-
-void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
-
-int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
-
-void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
-struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
- unsigned, unsigned, unsigned,
- enum bch_watermark, struct closure *);
-
-void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
-void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
-void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
-
-void bch2_do_stripe_deletes(struct bch_fs *);
-void bch2_ec_do_stripe_creates(struct bch_fs *);
-void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
-
-static inline void ec_stripe_new_get(struct ec_stripe_new *s,
- enum ec_stripe_ref ref)
-{
- atomic_inc(&s->ref[ref]);
-}
-
-static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
- enum ec_stripe_ref ref)
-{
- BUG_ON(atomic_read(&s->ref[ref]) <= 0);
-
- if (atomic_dec_and_test(&s->ref[ref]))
- switch (ref) {
- case STRIPE_REF_stripe:
- bch2_ec_stripe_new_free(c, s);
- break;
- case STRIPE_REF_io:
- bch2_ec_do_stripe_creates(c);
- break;
- default:
- BUG();
- }
-}
-
-int bch2_dev_remove_stripes(struct bch_fs *, unsigned);
-
-void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
-void bch2_fs_ec_stop(struct bch_fs *);
-void bch2_fs_ec_flush(struct bch_fs *);
-
-int bch2_stripes_read(struct bch_fs *);
-
-void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
-void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
-
-void bch2_fs_ec_exit(struct bch_fs *);
-void bch2_fs_ec_init_early(struct bch_fs *);
-int bch2_fs_ec_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_EC_H */
diff --git a/fs/bcachefs/ec_format.h b/fs/bcachefs/ec_format.h
deleted file mode 100644
index 64ef52e00078..000000000000
--- a/fs/bcachefs/ec_format.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_EC_FORMAT_H
-#define _BCACHEFS_EC_FORMAT_H
-
-struct bch_stripe {
- struct bch_val v;
- __le16 sectors;
- __u8 algorithm;
- __u8 nr_blocks;
- __u8 nr_redundant;
-
- __u8 csum_granularity_bits;
- __u8 csum_type;
-
- /*
- * XXX: targets should be 16 bits - fix this if we ever do a stripe_v2
- *
- * we can manage with this because this only needs to point to a
- * disk label, not a target:
- */
- __u8 disk_label;
-
- struct bch_extent_ptr ptrs[];
-} __packed __aligned(8);
-
-#endif /* _BCACHEFS_EC_FORMAT_H */
diff --git a/fs/bcachefs/ec_types.h b/fs/bcachefs/ec_types.h
deleted file mode 100644
index 8d1e70e830ac..000000000000
--- a/fs/bcachefs/ec_types.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_EC_TYPES_H
-#define _BCACHEFS_EC_TYPES_H
-
-#include "bcachefs_format.h"
-
-struct bch_replicas_padded {
- struct bch_replicas_entry_v1 e;
- u8 pad[BCH_BKEY_PTRS_MAX];
-};
-
-struct stripe {
- size_t heap_idx;
- u16 sectors;
- u8 algorithm;
- u8 nr_blocks;
- u8 nr_redundant;
- u8 blocks_nonempty;
- u8 disk_label;
-};
-
-struct gc_stripe {
- u16 sectors;
-
- u8 nr_blocks;
- u8 nr_redundant;
-
- unsigned alive:1; /* does a corresponding key exist in stripes btree? */
- u16 block_sectors[BCH_BKEY_PTRS_MAX];
- struct bch_extent_ptr ptrs[BCH_BKEY_PTRS_MAX];
-
- struct bch_replicas_padded r;
-};
-
-struct ec_stripe_heap_entry {
- size_t idx;
- unsigned blocks_nonempty;
-};
-
-typedef DEFINE_MIN_HEAP(struct ec_stripe_heap_entry, ec_stripes_heap) ec_stripes_heap;
-
-#endif /* _BCACHEFS_EC_TYPES_H */
diff --git a/fs/bcachefs/errcode.c b/fs/bcachefs/errcode.c
deleted file mode 100644
index 43557bebd0f8..000000000000
--- a/fs/bcachefs/errcode.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "errcode.h"
-#include "trace.h"
-
-#include <linux/errname.h>
-
-static const char * const bch2_errcode_strs[] = {
-#define x(class, err) [BCH_ERR_##err - BCH_ERR_START] = #err,
- BCH_ERRCODES()
-#undef x
- NULL
-};
-
-static unsigned bch2_errcode_parents[] = {
-#define x(class, err) [BCH_ERR_##err - BCH_ERR_START] = class,
- BCH_ERRCODES()
-#undef x
-};
-
-const char *bch2_err_str(int err)
-{
- const char *errstr;
-
- err = abs(err);
-
- BUG_ON(err >= BCH_ERR_MAX);
-
- if (err >= BCH_ERR_START)
- errstr = bch2_errcode_strs[err - BCH_ERR_START];
- else if (err)
- errstr = errname(err);
- else
- errstr = "(No error)";
- return errstr ?: "(Invalid error)";
-}
-
-bool __bch2_err_matches(int err, int class)
-{
- err = abs(err);
- class = abs(class);
-
- BUG_ON(err >= BCH_ERR_MAX);
- BUG_ON(class >= BCH_ERR_MAX);
-
- while (err >= BCH_ERR_START && err != class)
- err = bch2_errcode_parents[err - BCH_ERR_START];
-
- return err == class;
-}
-
-int __bch2_err_class(int bch_err)
-{
- int std_err = -bch_err;
- BUG_ON((unsigned) std_err >= BCH_ERR_MAX);
-
- while (std_err >= BCH_ERR_START && bch2_errcode_parents[std_err - BCH_ERR_START])
- std_err = bch2_errcode_parents[std_err - BCH_ERR_START];
-
- trace_error_downcast(bch_err, std_err, _RET_IP_);
-
- return -std_err;
-}
-
-const char *bch2_blk_status_to_str(blk_status_t status)
-{
- if (status == BLK_STS_REMOVED)
- return "device removed";
- return blk_status_to_str(status);
-}
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
deleted file mode 100644
index 9c4fe5cdbfb7..000000000000
--- a/fs/bcachefs/errcode.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_ERRCODE_H
-#define _BCACHEFS_ERRCODE_H
-
-#define BCH_ERRCODES() \
- x(ERANGE, ERANGE_option_too_small) \
- x(ERANGE, ERANGE_option_too_big) \
- x(EINVAL, mount_option) \
- x(BCH_ERR_mount_option, option_name) \
- x(BCH_ERR_mount_option, option_value) \
- x(BCH_ERR_mount_option, option_not_bool) \
- x(ENOMEM, ENOMEM_stripe_buf) \
- x(ENOMEM, ENOMEM_replicas_table) \
- x(ENOMEM, ENOMEM_cpu_replicas) \
- x(ENOMEM, ENOMEM_replicas_gc) \
- x(ENOMEM, ENOMEM_disk_groups_validate) \
- x(ENOMEM, ENOMEM_disk_groups_to_cpu) \
- x(ENOMEM, ENOMEM_mark_snapshot) \
- x(ENOMEM, ENOMEM_mark_stripe) \
- x(ENOMEM, ENOMEM_mark_stripe_ptr) \
- x(ENOMEM, ENOMEM_btree_key_cache_create) \
- x(ENOMEM, ENOMEM_btree_key_cache_fill) \
- x(ENOMEM, ENOMEM_btree_key_cache_insert) \
- x(ENOMEM, ENOMEM_trans_kmalloc) \
- x(ENOMEM, ENOMEM_trans_log_msg) \
- x(ENOMEM, ENOMEM_do_encrypt) \
- x(ENOMEM, ENOMEM_ec_read_extent) \
- x(ENOMEM, ENOMEM_ec_stripe_mem_alloc) \
- x(ENOMEM, ENOMEM_ec_new_stripe_alloc) \
- x(ENOMEM, ENOMEM_fs_btree_cache_init) \
- x(ENOMEM, ENOMEM_fs_btree_key_cache_init) \
- x(ENOMEM, ENOMEM_fs_counters_init) \
- x(ENOMEM, ENOMEM_fs_btree_write_buffer_init) \
- x(ENOMEM, ENOMEM_io_clock_init) \
- x(ENOMEM, ENOMEM_blacklist_table_init) \
- x(ENOMEM, ENOMEM_sb_realloc_injected) \
- x(ENOMEM, ENOMEM_sb_bio_realloc) \
- x(ENOMEM, ENOMEM_sb_buf_realloc) \
- x(ENOMEM, ENOMEM_sb_journal_validate) \
- x(ENOMEM, ENOMEM_sb_journal_v2_validate) \
- x(ENOMEM, ENOMEM_journal_entry_add) \
- x(ENOMEM, ENOMEM_journal_read_buf_realloc) \
- x(ENOMEM, ENOMEM_btree_interior_update_worker_init)\
- x(ENOMEM, ENOMEM_btree_interior_update_pool_init) \
- x(ENOMEM, ENOMEM_bio_read_init) \
- x(ENOMEM, ENOMEM_bio_read_split_init) \
- x(ENOMEM, ENOMEM_bio_write_init) \
- x(ENOMEM, ENOMEM_bio_bounce_pages_init) \
- x(ENOMEM, ENOMEM_writepage_bioset_init) \
- x(ENOMEM, ENOMEM_dio_read_bioset_init) \
- x(ENOMEM, ENOMEM_dio_write_bioset_init) \
- x(ENOMEM, ENOMEM_nocow_flush_bioset_init) \
- x(ENOMEM, ENOMEM_promote_table_init) \
- x(ENOMEM, ENOMEM_compression_bounce_read_init) \
- x(ENOMEM, ENOMEM_compression_bounce_write_init) \
- x(ENOMEM, ENOMEM_compression_workspace_init) \
- x(ENOMEM, ENOMEM_decompression_workspace_init) \
- x(ENOMEM, ENOMEM_bucket_gens) \
- x(ENOMEM, ENOMEM_buckets_nouse) \
- x(ENOMEM, ENOMEM_usage_init) \
- x(ENOMEM, ENOMEM_btree_node_read_all_replicas) \
- x(ENOMEM, ENOMEM_btree_node_reclaim) \
- x(ENOMEM, ENOMEM_btree_node_mem_alloc) \
- x(ENOMEM, ENOMEM_btree_cache_cannibalize_lock) \
- x(ENOMEM, ENOMEM_buckets_waiting_for_journal_init)\
- x(ENOMEM, ENOMEM_buckets_waiting_for_journal_set) \
- x(ENOMEM, ENOMEM_set_nr_journal_buckets) \
- x(ENOMEM, ENOMEM_dev_journal_init) \
- x(ENOMEM, ENOMEM_journal_pin_fifo) \
- x(ENOMEM, ENOMEM_journal_buf) \
- x(ENOMEM, ENOMEM_gc_start) \
- x(ENOMEM, ENOMEM_gc_alloc_start) \
- x(ENOMEM, ENOMEM_gc_reflink_start) \
- x(ENOMEM, ENOMEM_gc_gens) \
- x(ENOMEM, ENOMEM_gc_repair_key) \
- x(ENOMEM, ENOMEM_fsck_extent_ends_at) \
- x(ENOMEM, ENOMEM_fsck_add_nlink) \
- x(ENOMEM, ENOMEM_journal_key_insert) \
- x(ENOMEM, ENOMEM_journal_keys_sort) \
- x(ENOMEM, ENOMEM_read_superblock_clean) \
- x(ENOMEM, ENOMEM_fs_alloc) \
- x(ENOMEM, ENOMEM_fs_name_alloc) \
- x(ENOMEM, ENOMEM_fs_other_alloc) \
- x(ENOMEM, ENOMEM_dev_alloc) \
- x(ENOMEM, ENOMEM_disk_accounting) \
- x(ENOMEM, ENOMEM_stripe_head_alloc) \
- x(ENOMEM, ENOMEM_journal_read_bucket) \
- x(ENOSPC, ENOSPC_disk_reservation) \
- x(ENOSPC, ENOSPC_bucket_alloc) \
- x(ENOSPC, ENOSPC_disk_label_add) \
- x(ENOSPC, ENOSPC_stripe_create) \
- x(ENOSPC, ENOSPC_inode_create) \
- x(ENOSPC, ENOSPC_str_hash_create) \
- x(ENOSPC, ENOSPC_snapshot_create) \
- x(ENOSPC, ENOSPC_subvolume_create) \
- x(ENOSPC, ENOSPC_sb) \
- x(ENOSPC, ENOSPC_sb_journal) \
- x(ENOSPC, ENOSPC_sb_journal_seq_blacklist) \
- x(ENOSPC, ENOSPC_sb_quota) \
- x(ENOSPC, ENOSPC_sb_replicas) \
- x(ENOSPC, ENOSPC_sb_members) \
- x(ENOSPC, ENOSPC_sb_members_v2) \
- x(ENOSPC, ENOSPC_sb_crypt) \
- x(ENOSPC, ENOSPC_sb_downgrade) \
- x(ENOSPC, ENOSPC_btree_slot) \
- x(ENOSPC, ENOSPC_snapshot_tree) \
- x(ENOENT, ENOENT_bkey_type_mismatch) \
- x(ENOENT, ENOENT_str_hash_lookup) \
- x(ENOENT, ENOENT_str_hash_set_must_replace) \
- x(ENOENT, ENOENT_inode) \
- x(ENOENT, ENOENT_not_subvol) \
- x(ENOENT, ENOENT_not_directory) \
- x(ENOENT, ENOENT_directory_dead) \
- x(ENOENT, ENOENT_subvolume) \
- x(ENOENT, ENOENT_snapshot_tree) \
- x(ENOENT, ENOENT_dirent_doesnt_match_inode) \
- x(ENOENT, ENOENT_dev_not_found) \
- x(ENOENT, ENOENT_dev_idx_not_found) \
- x(ENOTEMPTY, ENOTEMPTY_dir_not_empty) \
- x(ENOTEMPTY, ENOTEMPTY_subvol_not_empty) \
- x(EEXIST, EEXIST_str_hash_set) \
- x(EEXIST, EEXIST_discard_in_flight_add) \
- x(EEXIST, EEXIST_subvolume_create) \
- x(ENOSPC, open_buckets_empty) \
- x(ENOSPC, freelist_empty) \
- x(BCH_ERR_freelist_empty, no_buckets_found) \
- x(0, transaction_restart) \
- x(BCH_ERR_transaction_restart, transaction_restart_fault_inject) \
- x(BCH_ERR_transaction_restart, transaction_restart_relock) \
- x(BCH_ERR_transaction_restart, transaction_restart_relock_path) \
- x(BCH_ERR_transaction_restart, transaction_restart_relock_path_intent) \
- x(BCH_ERR_transaction_restart, transaction_restart_relock_after_fill) \
- x(BCH_ERR_transaction_restart, transaction_restart_too_many_iters) \
- x(BCH_ERR_transaction_restart, transaction_restart_lock_node_reused) \
- x(BCH_ERR_transaction_restart, transaction_restart_fill_relock) \
- x(BCH_ERR_transaction_restart, transaction_restart_fill_mem_alloc_fail)\
- x(BCH_ERR_transaction_restart, transaction_restart_mem_realloced) \
- x(BCH_ERR_transaction_restart, transaction_restart_in_traverse_all) \
- x(BCH_ERR_transaction_restart, transaction_restart_would_deadlock) \
- x(BCH_ERR_transaction_restart, transaction_restart_would_deadlock_write)\
- x(BCH_ERR_transaction_restart, transaction_restart_deadlock_recursion_limit)\
- x(BCH_ERR_transaction_restart, transaction_restart_upgrade) \
- x(BCH_ERR_transaction_restart, transaction_restart_key_cache_upgrade) \
- x(BCH_ERR_transaction_restart, transaction_restart_key_cache_fill) \
- x(BCH_ERR_transaction_restart, transaction_restart_key_cache_raced) \
- x(BCH_ERR_transaction_restart, transaction_restart_key_cache_realloced)\
- x(BCH_ERR_transaction_restart, transaction_restart_journal_preres_get) \
- x(BCH_ERR_transaction_restart, transaction_restart_split_race) \
- x(BCH_ERR_transaction_restart, transaction_restart_write_buffer_flush) \
- x(BCH_ERR_transaction_restart, transaction_restart_nested) \
- x(0, no_btree_node) \
- x(BCH_ERR_no_btree_node, no_btree_node_relock) \
- x(BCH_ERR_no_btree_node, no_btree_node_upgrade) \
- x(BCH_ERR_no_btree_node, no_btree_node_drop) \
- x(BCH_ERR_no_btree_node, no_btree_node_lock_root) \
- x(BCH_ERR_no_btree_node, no_btree_node_up) \
- x(BCH_ERR_no_btree_node, no_btree_node_down) \
- x(BCH_ERR_no_btree_node, no_btree_node_init) \
- x(BCH_ERR_no_btree_node, no_btree_node_cached) \
- x(BCH_ERR_no_btree_node, no_btree_node_srcu_reset) \
- x(0, btree_insert_fail) \
- x(BCH_ERR_btree_insert_fail, btree_insert_btree_node_full) \
- x(BCH_ERR_btree_insert_fail, btree_insert_need_mark_replicas) \
- x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_res) \
- x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_reclaim) \
- x(0, backpointer_to_overwritten_btree_node) \
- x(0, lock_fail_root_changed) \
- x(0, journal_reclaim_would_deadlock) \
- x(EINVAL, fsck) \
- x(BCH_ERR_fsck, fsck_fix) \
- x(BCH_ERR_fsck, fsck_delete_bkey) \
- x(BCH_ERR_fsck, fsck_ignore) \
- x(BCH_ERR_fsck, fsck_errors_not_fixed) \
- x(BCH_ERR_fsck, fsck_repair_unimplemented) \
- x(BCH_ERR_fsck, fsck_repair_impossible) \
- x(0, restart_recovery) \
- x(0, data_update_done) \
- x(EINVAL, device_state_not_allowed) \
- x(EINVAL, member_info_missing) \
- x(EINVAL, mismatched_block_size) \
- x(EINVAL, block_size_too_small) \
- x(EINVAL, bucket_size_too_small) \
- x(EINVAL, device_size_too_small) \
- x(EINVAL, device_size_too_big) \
- x(EINVAL, device_not_a_member_of_filesystem) \
- x(EINVAL, device_has_been_removed) \
- x(EINVAL, device_splitbrain) \
- x(EINVAL, device_already_online) \
- x(EINVAL, insufficient_devices_to_start) \
- x(EINVAL, invalid) \
- x(EINVAL, internal_fsck_err) \
- x(EINVAL, opt_parse_error) \
- x(EINVAL, remove_with_metadata_missing_unimplemented)\
- x(EINVAL, remove_would_lose_data) \
- x(EINVAL, btree_iter_with_journal_not_supported) \
- x(EROFS, erofs_trans_commit) \
- x(EROFS, erofs_no_writes) \
- x(EROFS, erofs_journal_err) \
- x(EROFS, erofs_sb_err) \
- x(EROFS, erofs_unfixed_errors) \
- x(EROFS, erofs_norecovery) \
- x(EROFS, erofs_nochanges) \
- x(EROFS, insufficient_devices) \
- x(0, operation_blocked) \
- x(BCH_ERR_operation_blocked, btree_cache_cannibalize_lock_blocked) \
- x(BCH_ERR_operation_blocked, journal_res_get_blocked) \
- x(BCH_ERR_operation_blocked, journal_preres_get_blocked) \
- x(BCH_ERR_operation_blocked, bucket_alloc_blocked) \
- x(BCH_ERR_operation_blocked, stripe_alloc_blocked) \
- x(BCH_ERR_invalid, invalid_sb) \
- x(BCH_ERR_invalid_sb, invalid_sb_magic) \
- x(BCH_ERR_invalid_sb, invalid_sb_version) \
- x(BCH_ERR_invalid_sb, invalid_sb_features) \
- x(BCH_ERR_invalid_sb, invalid_sb_too_big) \
- x(BCH_ERR_invalid_sb, invalid_sb_csum_type) \
- x(BCH_ERR_invalid_sb, invalid_sb_csum) \
- x(BCH_ERR_invalid_sb, invalid_sb_block_size) \
- x(BCH_ERR_invalid_sb, invalid_sb_uuid) \
- x(BCH_ERR_invalid_sb, invalid_sb_too_many_members) \
- x(BCH_ERR_invalid_sb, invalid_sb_dev_idx) \
- x(BCH_ERR_invalid_sb, invalid_sb_time_precision) \
- x(BCH_ERR_invalid_sb, invalid_sb_field_size) \
- x(BCH_ERR_invalid_sb, invalid_sb_layout) \
- x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_type) \
- x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_nr_superblocks) \
- x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_superblocks_overlap) \
- x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_sb_max_size_bits) \
- x(BCH_ERR_invalid_sb, invalid_sb_members_missing) \
- x(BCH_ERR_invalid_sb, invalid_sb_members) \
- x(BCH_ERR_invalid_sb, invalid_sb_disk_groups) \
- x(BCH_ERR_invalid_sb, invalid_sb_replicas) \
- x(BCH_ERR_invalid_sb, invalid_replicas_entry) \
- x(BCH_ERR_invalid_sb, invalid_sb_journal) \
- x(BCH_ERR_invalid_sb, invalid_sb_journal_seq_blacklist) \
- x(BCH_ERR_invalid_sb, invalid_sb_crypt) \
- x(BCH_ERR_invalid_sb, invalid_sb_clean) \
- x(BCH_ERR_invalid_sb, invalid_sb_quota) \
- x(BCH_ERR_invalid_sb, invalid_sb_errors) \
- x(BCH_ERR_invalid_sb, invalid_sb_opt_compression) \
- x(BCH_ERR_invalid_sb, invalid_sb_ext) \
- x(BCH_ERR_invalid_sb, invalid_sb_downgrade) \
- x(BCH_ERR_invalid, invalid_bkey) \
- x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
- x(EIO, btree_node_read_err) \
- x(EIO, sb_not_downgraded) \
- x(EIO, btree_node_write_all_failed) \
- x(EIO, btree_node_read_error) \
- x(EIO, btree_node_read_validate_error) \
- x(EIO, btree_need_topology_repair) \
- x(EIO, bucket_ref_update) \
- x(EIO, trigger_pointer) \
- x(EIO, trigger_stripe_pointer) \
- x(EIO, metadata_bucket_inconsistency) \
- x(EIO, mark_stripe) \
- x(EIO, stripe_reconstruct) \
- x(EIO, key_type_error) \
- x(EIO, no_device_to_read_from) \
- x(EIO, missing_indirect_extent) \
- x(EIO, invalidate_stripe_to_dev) \
- x(BCH_ERR_btree_node_read_err, btree_node_read_err_fixable) \
- x(BCH_ERR_btree_node_read_err, btree_node_read_err_want_retry) \
- x(BCH_ERR_btree_node_read_err, btree_node_read_err_must_retry) \
- x(BCH_ERR_btree_node_read_err, btree_node_read_err_bad_node) \
- x(BCH_ERR_btree_node_read_err, btree_node_read_err_incompatible) \
- x(0, nopromote) \
- x(BCH_ERR_nopromote, nopromote_may_not) \
- x(BCH_ERR_nopromote, nopromote_already_promoted) \
- x(BCH_ERR_nopromote, nopromote_unwritten) \
- x(BCH_ERR_nopromote, nopromote_congested) \
- x(BCH_ERR_nopromote, nopromote_in_flight) \
- x(BCH_ERR_nopromote, nopromote_no_writes) \
- x(BCH_ERR_nopromote, nopromote_enomem) \
- x(0, invalid_snapshot_node) \
- x(0, option_needs_open_fs) \
- x(0, remove_disk_accounting_entry)
-
-enum bch_errcode {
- BCH_ERR_START = 2048,
-#define x(class, err) BCH_ERR_##err,
- BCH_ERRCODES()
-#undef x
- BCH_ERR_MAX
-};
-
-const char *bch2_err_str(int);
-bool __bch2_err_matches(int, int);
-
-static inline bool _bch2_err_matches(int err, int class)
-{
- return err < 0 && __bch2_err_matches(err, class);
-}
-
-#define bch2_err_matches(_err, _class) \
-({ \
- BUILD_BUG_ON(!__builtin_constant_p(_class)); \
- unlikely(_bch2_err_matches(_err, _class)); \
-})
-
-int __bch2_err_class(int);
-
-static inline long bch2_err_class(long err)
-{
- return err < 0 ? __bch2_err_class(err) : err;
-}
-
-#define BLK_STS_REMOVED ((__force blk_status_t)128)
-
-const char *bch2_blk_status_to_str(blk_status_t);
-
-#endif /* _BCACHFES_ERRCODE_H */
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
deleted file mode 100644
index b679def8fb98..000000000000
--- a/fs/bcachefs/error.c
+++ /dev/null
@@ -1,485 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "btree_iter.h"
-#include "error.h"
-#include "journal.h"
-#include "recovery_passes.h"
-#include "super.h"
-#include "thread_with_file.h"
-
-#define FSCK_ERR_RATELIMIT_NR 10
-
-bool bch2_inconsistent_error(struct bch_fs *c)
-{
- set_bit(BCH_FS_error, &c->flags);
-
- switch (c->opts.errors) {
- case BCH_ON_ERROR_continue:
- return false;
- case BCH_ON_ERROR_fix_safe:
- case BCH_ON_ERROR_ro:
- if (bch2_fs_emergency_read_only(c))
- bch_err(c, "inconsistency detected - emergency read only at journal seq %llu",
- journal_cur_seq(&c->journal));
- return true;
- case BCH_ON_ERROR_panic:
- panic(bch2_fmt(c, "panic after error"));
- return true;
- default:
- BUG();
- }
-}
-
-int bch2_topology_error(struct bch_fs *c)
-{
- set_bit(BCH_FS_topology_error, &c->flags);
- if (!test_bit(BCH_FS_fsck_running, &c->flags)) {
- bch2_inconsistent_error(c);
- return -BCH_ERR_btree_need_topology_repair;
- } else {
- return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?:
- -BCH_ERR_btree_node_read_validate_error;
- }
-}
-
-void bch2_fatal_error(struct bch_fs *c)
-{
- if (bch2_fs_emergency_read_only(c))
- bch_err(c, "fatal error - emergency read only");
-}
-
-void bch2_io_error_work(struct work_struct *work)
-{
- struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work);
- struct bch_fs *c = ca->fs;
- bool dev;
-
- down_write(&c->state_lock);
- dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_ro,
- BCH_FORCE_IF_DEGRADED);
- if (dev
- ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro,
- BCH_FORCE_IF_DEGRADED)
- : bch2_fs_emergency_read_only(c))
- bch_err(ca,
- "too many IO errors, setting %s RO",
- dev ? "device" : "filesystem");
- up_write(&c->state_lock);
-}
-
-void bch2_io_error(struct bch_dev *ca, enum bch_member_error_type type)
-{
- atomic64_inc(&ca->errors[type]);
- //queue_work(system_long_wq, &ca->io_error_work);
-}
-
-enum ask_yn {
- YN_NO,
- YN_YES,
- YN_ALLNO,
- YN_ALLYES,
-};
-
-static enum ask_yn parse_yn_response(char *buf)
-{
- buf = strim(buf);
-
- if (strlen(buf) == 1)
- switch (buf[0]) {
- case 'n':
- return YN_NO;
- case 'y':
- return YN_YES;
- case 'N':
- return YN_ALLNO;
- case 'Y':
- return YN_ALLYES;
- }
- return -1;
-}
-
-#ifdef __KERNEL__
-static enum ask_yn bch2_fsck_ask_yn(struct bch_fs *c, struct btree_trans *trans)
-{
- struct stdio_redirect *stdio = c->stdio;
-
- if (c->stdio_filter && c->stdio_filter != current)
- stdio = NULL;
-
- if (!stdio)
- return YN_NO;
-
- if (trans)
- bch2_trans_unlock(trans);
-
- unsigned long unlock_long_at = trans ? jiffies + HZ * 2 : 0;
- darray_char line = {};
- int ret;
-
- do {
- unsigned long t;
- bch2_print(c, " (y,n, or Y,N for all errors of this type) ");
-rewait:
- t = unlock_long_at
- ? max_t(long, unlock_long_at - jiffies, 0)
- : MAX_SCHEDULE_TIMEOUT;
-
- int r = bch2_stdio_redirect_readline_timeout(stdio, &line, t);
- if (r == -ETIME) {
- bch2_trans_unlock_long(trans);
- unlock_long_at = 0;
- goto rewait;
- }
-
- if (r < 0) {
- ret = YN_NO;
- break;
- }
-
- darray_last(line) = '\0';
- } while ((ret = parse_yn_response(line.data)) < 0);
-
- darray_exit(&line);
- return ret;
-}
-#else
-
-#include "tools-util.h"
-
-static enum ask_yn bch2_fsck_ask_yn(struct bch_fs *c, struct btree_trans *trans)
-{
- char *buf = NULL;
- size_t buflen = 0;
- int ret;
-
- do {
- fputs(" (y,n, or Y,N for all errors of this type) ", stdout);
- fflush(stdout);
-
- if (getline(&buf, &buflen, stdin) < 0)
- die("error reading from standard input");
- } while ((ret = parse_yn_response(buf)) < 0);
-
- free(buf);
- return ret;
-}
-
-#endif
-
-static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
-{
- struct fsck_err_state *s;
-
- if (!test_bit(BCH_FS_fsck_running, &c->flags))
- return NULL;
-
- list_for_each_entry(s, &c->fsck_error_msgs, list)
- if (s->fmt == fmt) {
- /*
- * move it to the head of the list: repeated fsck errors
- * are common
- */
- list_move(&s->list, &c->fsck_error_msgs);
- return s;
- }
-
- s = kzalloc(sizeof(*s), GFP_NOFS);
- if (!s) {
- if (!c->fsck_alloc_msgs_err)
- bch_err(c, "kmalloc err, cannot ratelimit fsck errs");
- c->fsck_alloc_msgs_err = true;
- return NULL;
- }
-
- INIT_LIST_HEAD(&s->list);
- s->fmt = fmt;
- list_add(&s->list, &c->fsck_error_msgs);
- return s;
-}
-
-/* s/fix?/fixing/ s/recreate?/recreating/ */
-static void prt_actioning(struct printbuf *out, const char *action)
-{
- unsigned len = strlen(action);
-
- BUG_ON(action[len - 1] != '?');
- --len;
-
- if (action[len - 1] == 'e')
- --len;
-
- prt_bytes(out, action, len);
- prt_str(out, "ing");
-}
-
-static const u8 fsck_flags_extra[] = {
-#define x(t, n, flags) [BCH_FSCK_ERR_##t] = flags,
- BCH_SB_ERRS()
-#undef x
-};
-
-int __bch2_fsck_err(struct bch_fs *c,
- struct btree_trans *trans,
- enum bch_fsck_flags flags,
- enum bch_sb_error_id err,
- const char *fmt, ...)
-{
- struct fsck_err_state *s = NULL;
- va_list args;
- bool print = true, suppressing = false, inconsistent = false;
- struct printbuf buf = PRINTBUF, *out = &buf;
- int ret = -BCH_ERR_fsck_ignore;
- const char *action_orig = "fix?", *action = action_orig;
-
- might_sleep();
-
- if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra)))
- flags |= fsck_flags_extra[err];
-
- if (!c)
- c = trans->c;
-
- /*
- * Ugly: if there's a transaction in the current task it has to be
- * passed in to unlock if we prompt for user input.
- *
- * But, plumbing a transaction and transaction restarts into
- * bkey_validate() is problematic.
- *
- * So:
- * - make all bkey errors AUTOFIX, they're simple anyways (we just
- * delete the key)
- * - and we don't need to warn if we're not prompting
- */
- WARN_ON((flags & FSCK_CAN_FIX) &&
- !(flags & FSCK_AUTOFIX) &&
- !trans &&
- bch2_current_has_btree_trans(c));
-
- if ((flags & FSCK_CAN_FIX) &&
- test_bit(err, c->sb.errors_silent))
- return -BCH_ERR_fsck_fix;
-
- bch2_sb_error_count(c, err);
-
- va_start(args, fmt);
- prt_vprintf(out, fmt, args);
- va_end(args);
-
- /* Custom fix/continue/recreate/etc.? */
- if (out->buf[out->pos - 1] == '?') {
- const char *p = strrchr(out->buf, ',');
- if (p) {
- out->pos = p - out->buf;
- action = kstrdup(p + 2, GFP_KERNEL);
- if (!action) {
- ret = -ENOMEM;
- goto err;
- }
- }
- }
-
- mutex_lock(&c->fsck_error_msgs_lock);
- s = fsck_err_get(c, fmt);
- if (s) {
- /*
- * We may be called multiple times for the same error on
- * transaction restart - this memoizes instead of asking the user
- * multiple times for the same error:
- */
- if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
- ret = s->ret;
- mutex_unlock(&c->fsck_error_msgs_lock);
- goto err;
- }
-
- kfree(s->last_msg);
- s->last_msg = kstrdup(buf.buf, GFP_KERNEL);
- if (!s->last_msg) {
- mutex_unlock(&c->fsck_error_msgs_lock);
- ret = -ENOMEM;
- goto err;
- }
-
- if (c->opts.ratelimit_errors &&
- !(flags & FSCK_NO_RATELIMIT) &&
- s->nr >= FSCK_ERR_RATELIMIT_NR) {
- if (s->nr == FSCK_ERR_RATELIMIT_NR)
- suppressing = true;
- else
- print = false;
- }
-
- s->nr++;
- }
-
-#ifdef BCACHEFS_LOG_PREFIX
- if (!strncmp(fmt, "bcachefs:", 9))
- prt_printf(out, bch2_log_msg(c, ""));
-#endif
-
- if ((flags & FSCK_CAN_FIX) &&
- (flags & FSCK_AUTOFIX) &&
- (c->opts.errors == BCH_ON_ERROR_continue ||
- c->opts.errors == BCH_ON_ERROR_fix_safe)) {
- prt_str(out, ", ");
- prt_actioning(out, action);
- ret = -BCH_ERR_fsck_fix;
- } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) {
- if (c->opts.errors != BCH_ON_ERROR_continue ||
- !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) {
- prt_str(out, ", shutting down");
- inconsistent = true;
- ret = -BCH_ERR_fsck_errors_not_fixed;
- } else if (flags & FSCK_CAN_FIX) {
- prt_str(out, ", ");
- prt_actioning(out, action);
- ret = -BCH_ERR_fsck_fix;
- } else {
- prt_str(out, ", continuing");
- ret = -BCH_ERR_fsck_ignore;
- }
- } else if (c->opts.fix_errors == FSCK_FIX_exit) {
- prt_str(out, ", exiting");
- ret = -BCH_ERR_fsck_errors_not_fixed;
- } else if (flags & FSCK_CAN_FIX) {
- int fix = s && s->fix
- ? s->fix
- : c->opts.fix_errors;
-
- if (fix == FSCK_FIX_ask) {
- prt_str(out, ", ");
- prt_str(out, action);
-
- if (bch2_fs_stdio_redirect(c))
- bch2_print(c, "%s", out->buf);
- else
- bch2_print_string_as_lines(KERN_ERR, out->buf);
- print = false;
-
- int ask = bch2_fsck_ask_yn(c, trans);
-
- if (trans) {
- ret = bch2_trans_relock(trans);
- if (ret) {
- mutex_unlock(&c->fsck_error_msgs_lock);
- goto err;
- }
- }
-
- if (ask >= YN_ALLNO && s)
- s->fix = ask == YN_ALLNO
- ? FSCK_FIX_no
- : FSCK_FIX_yes;
-
- ret = ask & 1
- ? -BCH_ERR_fsck_fix
- : -BCH_ERR_fsck_ignore;
- } else if (fix == FSCK_FIX_yes ||
- (c->opts.nochanges &&
- !(flags & FSCK_CAN_IGNORE))) {
- prt_str(out, ", ");
- prt_actioning(out, action);
- ret = -BCH_ERR_fsck_fix;
- } else {
- prt_str(out, ", not ");
- prt_actioning(out, action);
- }
- } else if (flags & FSCK_NEED_FSCK) {
- prt_str(out, " (run fsck to correct)");
- } else {
- prt_str(out, " (repair unimplemented)");
- }
-
- if (ret == -BCH_ERR_fsck_ignore &&
- (c->opts.fix_errors == FSCK_FIX_exit ||
- !(flags & FSCK_CAN_IGNORE)))
- ret = -BCH_ERR_fsck_errors_not_fixed;
-
- bool exiting =
- test_bit(BCH_FS_fsck_running, &c->flags) &&
- (ret != -BCH_ERR_fsck_fix &&
- ret != -BCH_ERR_fsck_ignore);
-
- if (exiting)
- print = true;
-
- if (print) {
- if (bch2_fs_stdio_redirect(c))
- bch2_print(c, "%s\n", out->buf);
- else
- bch2_print_string_as_lines(KERN_ERR, out->buf);
- }
-
- if (exiting)
- bch_err(c, "Unable to continue, halting");
- else if (suppressing)
- bch_err(c, "Ratelimiting new instances of previous error");
-
- if (s)
- s->ret = ret;
-
- mutex_unlock(&c->fsck_error_msgs_lock);
-
- if (inconsistent)
- bch2_inconsistent_error(c);
-
- if (ret == -BCH_ERR_fsck_fix) {
- set_bit(BCH_FS_errors_fixed, &c->flags);
- } else {
- set_bit(BCH_FS_errors_not_fixed, &c->flags);
- set_bit(BCH_FS_error, &c->flags);
- }
-err:
- if (action != action_orig)
- kfree(action);
- printbuf_exit(&buf);
- return ret;
-}
-
-int __bch2_bkey_fsck_err(struct bch_fs *c,
- struct bkey_s_c k,
- enum bch_validate_flags validate_flags,
- enum bch_sb_error_id err,
- const char *fmt, ...)
-{
- if (validate_flags & BCH_VALIDATE_silent)
- return -BCH_ERR_fsck_delete_bkey;
-
- unsigned fsck_flags = 0;
- if (!(validate_flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)))
- fsck_flags |= FSCK_AUTOFIX|FSCK_CAN_FIX;
-
- struct printbuf buf = PRINTBUF;
- va_list args;
-
- prt_str(&buf, "invalid bkey ");
- bch2_bkey_val_to_text(&buf, c, k);
- prt_str(&buf, "\n ");
- va_start(args, fmt);
- prt_vprintf(&buf, fmt, args);
- va_end(args);
- prt_str(&buf, ": delete?");
-
- int ret = __bch2_fsck_err(c, NULL, fsck_flags, err, "%s", buf.buf);
- printbuf_exit(&buf);
- return ret;
-}
-
-void bch2_flush_fsck_errs(struct bch_fs *c)
-{
- struct fsck_err_state *s, *n;
-
- mutex_lock(&c->fsck_error_msgs_lock);
-
- list_for_each_entry_safe(s, n, &c->fsck_error_msgs, list) {
- if (s->ratelimited && s->last_msg)
- bch_err(c, "Saw %llu errors like:\n %s", s->nr, s->last_msg);
-
- list_del(&s->list);
- kfree(s->last_msg);
- kfree(s);
- }
-
- mutex_unlock(&c->fsck_error_msgs_lock);
-}
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
deleted file mode 100644
index 6551ada926b6..000000000000
--- a/fs/bcachefs/error.h
+++ /dev/null
@@ -1,255 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_ERROR_H
-#define _BCACHEFS_ERROR_H
-
-#include <linux/list.h>
-#include <linux/printk.h>
-#include "bkey_types.h"
-#include "sb-errors.h"
-
-struct bch_dev;
-struct bch_fs;
-struct work_struct;
-
-/*
- * XXX: separate out errors that indicate on disk data is inconsistent, and flag
- * superblock as such
- */
-
-/* Error messages: */
-
-/*
- * Inconsistency errors: The on disk data is inconsistent. If these occur during
- * initial recovery, they don't indicate a bug in the running code - we walk all
- * the metadata before modifying anything. If they occur at runtime, they
- * indicate either a bug in the running code or (less likely) data is being
- * silently corrupted under us.
- *
- * XXX: audit all inconsistent errors and make sure they're all recoverable, in
- * BCH_ON_ERROR_CONTINUE mode
- */
-
-bool bch2_inconsistent_error(struct bch_fs *);
-
-int bch2_topology_error(struct bch_fs *);
-
-#define bch2_fs_topology_error(c, ...) \
-({ \
- bch_err(c, "btree topology error: " __VA_ARGS__); \
- bch2_topology_error(c); \
-})
-
-#define bch2_fs_inconsistent(c, ...) \
-({ \
- bch_err(c, __VA_ARGS__); \
- bch2_inconsistent_error(c); \
-})
-
-#define bch2_fs_inconsistent_on(cond, c, ...) \
-({ \
- bool _ret = unlikely(!!(cond)); \
- \
- if (_ret) \
- bch2_fs_inconsistent(c, __VA_ARGS__); \
- _ret; \
-})
-
-/*
- * Later we might want to mark only the particular device inconsistent, not the
- * entire filesystem:
- */
-
-#define bch2_dev_inconsistent(ca, ...) \
-do { \
- bch_err(ca, __VA_ARGS__); \
- bch2_inconsistent_error((ca)->fs); \
-} while (0)
-
-#define bch2_dev_inconsistent_on(cond, ca, ...) \
-({ \
- bool _ret = unlikely(!!(cond)); \
- \
- if (_ret) \
- bch2_dev_inconsistent(ca, __VA_ARGS__); \
- _ret; \
-})
-
-/*
- * When a transaction update discovers or is causing a fs inconsistency, it's
- * helpful to also dump the pending updates:
- */
-#define bch2_trans_inconsistent(trans, ...) \
-({ \
- bch_err(trans->c, __VA_ARGS__); \
- bch2_dump_trans_updates(trans); \
- bch2_inconsistent_error(trans->c); \
-})
-
-#define bch2_trans_inconsistent_on(cond, trans, ...) \
-({ \
- bool _ret = unlikely(!!(cond)); \
- \
- if (_ret) \
- bch2_trans_inconsistent(trans, __VA_ARGS__); \
- _ret; \
-})
-
-/*
- * Fsck errors: inconsistency errors we detect at mount time, and should ideally
- * be able to repair:
- */
-
-struct fsck_err_state {
- struct list_head list;
- const char *fmt;
- u64 nr;
- bool ratelimited;
- int ret;
- int fix;
- char *last_msg;
-};
-
-#define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err)
-
-__printf(5, 6) __cold
-int __bch2_fsck_err(struct bch_fs *, struct btree_trans *,
- enum bch_fsck_flags,
- enum bch_sb_error_id,
- const char *, ...);
-#define bch2_fsck_err(c, _flags, _err_type, ...) \
- __bch2_fsck_err(type_is(c, struct bch_fs *) ? (struct bch_fs *) c : NULL,\
- type_is(c, struct btree_trans *) ? (struct btree_trans *) c : NULL,\
- _flags, BCH_FSCK_ERR_##_err_type, __VA_ARGS__)
-
-void bch2_flush_fsck_errs(struct bch_fs *);
-
-#define __fsck_err(c, _flags, _err_type, ...) \
-({ \
- int _ret = bch2_fsck_err(c, _flags, _err_type, __VA_ARGS__); \
- if (_ret != -BCH_ERR_fsck_fix && \
- _ret != -BCH_ERR_fsck_ignore) { \
- ret = _ret; \
- goto fsck_err; \
- } \
- \
- _ret == -BCH_ERR_fsck_fix; \
-})
-
-/* These macros return true if error should be fixed: */
-
-/* XXX: mark in superblock that filesystem contains errors, if we ignore: */
-
-#define __fsck_err_on(cond, c, _flags, _err_type, ...) \
-({ \
- might_sleep(); \
- \
- if (type_is(c, struct bch_fs *)) \
- WARN_ON(bch2_current_has_btree_trans((struct bch_fs *) c));\
- \
- (unlikely(cond) ? __fsck_err(c, _flags, _err_type, __VA_ARGS__) : false);\
-})
-
-#define need_fsck_err_on(cond, c, _err_type, ...) \
- __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__)
-
-#define need_fsck_err(c, _err_type, ...) \
- __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__)
-
-#define mustfix_fsck_err(c, _err_type, ...) \
- __fsck_err(c, FSCK_CAN_FIX, _err_type, __VA_ARGS__)
-
-#define mustfix_fsck_err_on(cond, c, _err_type, ...) \
- __fsck_err_on(cond, c, FSCK_CAN_FIX, _err_type, __VA_ARGS__)
-
-#define fsck_err(c, _err_type, ...) \
- __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
-
-#define fsck_err_on(cond, c, _err_type, ...) \
- __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
-
-enum bch_validate_flags;
-__printf(5, 6)
-int __bch2_bkey_fsck_err(struct bch_fs *,
- struct bkey_s_c,
- enum bch_validate_flags,
- enum bch_sb_error_id,
- const char *, ...);
-
-/*
- * for now, bkey fsck errors are always handled by deleting the entire key -
- * this will change at some point
- */
-#define bkey_fsck_err(c, _err_type, _err_msg, ...) \
-do { \
- int _ret = __bch2_bkey_fsck_err(c, k, flags, \
- BCH_FSCK_ERR_##_err_type, \
- _err_msg, ##__VA_ARGS__); \
- if (_ret != -BCH_ERR_fsck_fix && \
- _ret != -BCH_ERR_fsck_ignore) \
- ret = _ret; \
- ret = -BCH_ERR_fsck_delete_bkey; \
- goto fsck_err; \
-} while (0)
-
-#define bkey_fsck_err_on(cond, ...) \
-do { \
- if (unlikely(cond)) \
- bkey_fsck_err(__VA_ARGS__); \
-} while (0)
-
-/*
- * Fatal errors: these don't indicate a bug, but we can't continue running in RW
- * mode - pretty much just due to metadata IO errors:
- */
-
-void bch2_fatal_error(struct bch_fs *);
-
-#define bch2_fs_fatal_error(c, _msg, ...) \
-do { \
- bch_err(c, "%s(): fatal error " _msg, __func__, ##__VA_ARGS__); \
- bch2_fatal_error(c); \
-} while (0)
-
-#define bch2_fs_fatal_err_on(cond, c, ...) \
-({ \
- bool _ret = unlikely(!!(cond)); \
- \
- if (_ret) \
- bch2_fs_fatal_error(c, __VA_ARGS__); \
- _ret; \
-})
-
-/*
- * IO errors: either recoverable metadata IO (because we have replicas), or data
- * IO - we need to log it and print out a message, but we don't (necessarily)
- * want to shut down the fs:
- */
-
-void bch2_io_error_work(struct work_struct *);
-
-/* Does the error handling without logging a message */
-void bch2_io_error(struct bch_dev *, enum bch_member_error_type);
-
-#define bch2_dev_io_err_on(cond, ca, _type, ...) \
-({ \
- bool _ret = (cond); \
- \
- if (_ret) { \
- bch_err_dev_ratelimited(ca, __VA_ARGS__); \
- bch2_io_error(ca, _type); \
- } \
- _ret; \
-})
-
-#define bch2_dev_inum_io_err_on(cond, ca, _type, ...) \
-({ \
- bool _ret = (cond); \
- \
- if (_ret) { \
- bch_err_inum_offset_ratelimited(ca, __VA_ARGS__); \
- bch2_io_error(ca, _type); \
- } \
- _ret; \
-})
-
-#endif /* _BCACHEFS_ERROR_H */
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
deleted file mode 100644
index 5f4fecb358da..000000000000
--- a/fs/bcachefs/extent_update.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "buckets.h"
-#include "debug.h"
-#include "extents.h"
-#include "extent_update.h"
-
-/*
- * This counts the number of iterators to the alloc & ec btrees we'll need
- * inserting/removing this extent:
- */
-static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- unsigned ret = 0, lru = 0;
-
- bkey_extent_entry_for_each(ptrs, entry) {
- switch (__extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- /* Might also be updating LRU btree */
- if (entry->ptr.cached)
- lru++;
-
- fallthrough;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- ret++;
- }
- }
-
- /*
- * Updating keys in the alloc btree may also update keys in the
- * freespace or discard btrees:
- */
- return lru + ret * 2;
-}
-
-static int count_iters_for_insert(struct btree_trans *trans,
- struct bkey_s_c k,
- unsigned offset,
- struct bpos *end,
- unsigned *nr_iters,
- unsigned max_iters)
-{
- int ret = 0, ret2 = 0;
-
- if (*nr_iters >= max_iters) {
- *end = bpos_min(*end, k.k->p);
- ret = 1;
- }
-
- switch (k.k->type) {
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v:
- *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
-
- if (*nr_iters >= max_iters) {
- *end = bpos_min(*end, k.k->p);
- ret = 1;
- }
-
- break;
- case KEY_TYPE_reflink_p: {
- struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
- u64 idx = le64_to_cpu(p.v->idx);
- unsigned sectors = bpos_min(*end, p.k->p).offset -
- bkey_start_offset(p.k);
- struct btree_iter iter;
- struct bkey_s_c r_k;
-
- for_each_btree_key_norestart(trans, iter,
- BTREE_ID_reflink, POS(0, idx + offset),
- BTREE_ITER_slots, r_k, ret2) {
- if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
- break;
-
- /* extent_update_to_keys(), for the reflink_v update */
- *nr_iters += 1;
-
- *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
-
- if (*nr_iters >= max_iters) {
- struct bpos pos = bkey_start_pos(k.k);
- pos.offset += min_t(u64, k.k->size,
- r_k.k->p.offset - idx);
-
- *end = bpos_min(*end, pos);
- ret = 1;
- break;
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-
- break;
- }
- }
-
- return ret2 ?: ret;
-}
-
-#define EXTENT_ITERS_MAX (BTREE_ITER_INITIAL / 3)
-
-int bch2_extent_atomic_end(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i *insert,
- struct bpos *end)
-{
- struct btree_iter copy;
- struct bkey_s_c k;
- unsigned nr_iters = 0;
- int ret;
-
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return ret;
-
- *end = insert->k.p;
-
- /* extent_update_to_keys(): */
- nr_iters += 1;
-
- ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
- &nr_iters, EXTENT_ITERS_MAX / 2);
- if (ret < 0)
- return ret;
-
- bch2_trans_copy_iter(&copy, iter);
-
- for_each_btree_key_upto_continue_norestart(copy, insert->k.p, 0, k, ret) {
- unsigned offset = 0;
-
- if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
- offset = bkey_start_offset(&insert->k) -
- bkey_start_offset(k.k);
-
- /* extent_handle_overwrites(): */
- switch (bch2_extent_overlap(&insert->k, k.k)) {
- case BCH_EXTENT_OVERLAP_ALL:
- case BCH_EXTENT_OVERLAP_FRONT:
- nr_iters += 1;
- break;
- case BCH_EXTENT_OVERLAP_BACK:
- case BCH_EXTENT_OVERLAP_MIDDLE:
- nr_iters += 2;
- break;
- }
-
- ret = count_iters_for_insert(trans, k, offset, end,
- &nr_iters, EXTENT_ITERS_MAX);
- if (ret)
- break;
- }
-
- bch2_trans_iter_exit(trans, &copy);
- return ret < 0 ? ret : 0;
-}
-
-int bch2_extent_trim_atomic(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i *k)
-{
- struct bpos end;
- int ret;
-
- ret = bch2_extent_atomic_end(trans, iter, k, &end);
- if (ret)
- return ret;
-
- bch2_cut_back(end, k);
- return 0;
-}
diff --git a/fs/bcachefs/extent_update.h b/fs/bcachefs/extent_update.h
deleted file mode 100644
index 6f5cf449361a..000000000000
--- a/fs/bcachefs/extent_update.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_EXTENT_UPDATE_H
-#define _BCACHEFS_EXTENT_UPDATE_H
-
-#include "bcachefs.h"
-
-int bch2_extent_atomic_end(struct btree_trans *, struct btree_iter *,
- struct bkey_i *, struct bpos *);
-int bch2_extent_trim_atomic(struct btree_trans *, struct btree_iter *,
- struct bkey_i *);
-
-#endif /* _BCACHEFS_EXTENT_UPDATE_H */
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
deleted file mode 100644
index 37e3d69bec06..000000000000
--- a/fs/bcachefs/extents.c
+++ /dev/null
@@ -1,1673 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
- *
- * Code for managing the extent btree and dynamically updating the writeback
- * dirty sector count.
- */
-
-#include "bcachefs.h"
-#include "bkey_methods.h"
-#include "btree_cache.h"
-#include "btree_gc.h"
-#include "btree_io.h"
-#include "btree_iter.h"
-#include "buckets.h"
-#include "checksum.h"
-#include "compress.h"
-#include "debug.h"
-#include "disk_groups.h"
-#include "error.h"
-#include "extents.h"
-#include "inode.h"
-#include "journal.h"
-#include "replicas.h"
-#include "super.h"
-#include "super-io.h"
-#include "trace.h"
-#include "util.h"
-
-static unsigned bch2_crc_field_size_max[] = {
- [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
- [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
- [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
-};
-
-static void bch2_extent_crc_pack(union bch_extent_crc *,
- struct bch_extent_crc_unpacked,
- enum bch_extent_entry_type);
-
-struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *f,
- unsigned dev)
-{
- struct bch_dev_io_failures *i;
-
- for (i = f->devs; i < f->devs + f->nr; i++)
- if (i->dev == dev)
- return i;
-
- return NULL;
-}
-
-void bch2_mark_io_failure(struct bch_io_failures *failed,
- struct extent_ptr_decoded *p)
-{
- struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev);
-
- if (!f) {
- BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
-
- f = &failed->devs[failed->nr++];
- f->dev = p->ptr.dev;
- f->idx = p->idx;
- f->nr_failed = 1;
- f->nr_retries = 0;
- } else if (p->idx != f->idx) {
- f->idx = p->idx;
- f->nr_failed = 1;
- f->nr_retries = 0;
- } else {
- f->nr_failed++;
- }
-}
-
-static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
-{
- struct bch_dev *ca = bch2_dev_rcu(c, dev);
- return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
-}
-
-/*
- * returns true if p1 is better than p2:
- */
-static inline bool ptr_better(struct bch_fs *c,
- const struct extent_ptr_decoded p1,
- const struct extent_ptr_decoded p2)
-{
- if (likely(!p1.idx && !p2.idx)) {
- u64 l1 = dev_latency(c, p1.ptr.dev);
- u64 l2 = dev_latency(c, p2.ptr.dev);
-
- /* Pick at random, biased in favor of the faster device: */
-
- return bch2_rand_range(l1 + l2) > l1;
- }
-
- if (bch2_force_reconstruct_read)
- return p1.idx > p2.idx;
-
- return p1.idx < p2.idx;
-}
-
-/*
- * This picks a non-stale pointer, preferably from a device other than @avoid.
- * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
- * other devices, it will still pick a pointer from avoid.
- */
-int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
- struct bch_io_failures *failed,
- struct extent_ptr_decoded *pick)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- struct bch_dev_io_failures *f;
- int ret = 0;
-
- if (k.k->type == KEY_TYPE_error)
- return -BCH_ERR_key_type_error;
-
- rcu_read_lock();
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- /*
- * Unwritten extent: no need to actually read, treat it as a
- * hole and return 0s:
- */
- if (p.ptr.unwritten) {
- ret = 0;
- break;
- }
-
- /*
- * If there are any dirty pointers it's an error if we can't
- * read:
- */
- if (!ret && !p.ptr.cached)
- ret = -BCH_ERR_no_device_to_read_from;
-
- struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
-
- if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
- continue;
-
- f = failed ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL;
- if (f)
- p.idx = f->nr_failed < f->nr_retries
- ? f->idx
- : f->idx + 1;
-
- if (!p.idx && (!ca || !bch2_dev_is_readable(ca)))
- p.idx++;
-
- if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
- p.idx++;
-
- if (p.idx > (unsigned) p.has_ec)
- continue;
-
- if (ret > 0 && !ptr_better(c, p, *pick))
- continue;
-
- *pick = p;
- ret = 1;
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-/* KEY_TYPE_btree_ptr: */
-
-int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- int ret = 0;
-
- bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX,
- c, btree_ptr_val_too_big,
- "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
-
- ret = bch2_bkey_ptrs_validate(c, k, flags);
-fsck_err:
- return ret;
-}
-
-void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- bch2_bkey_ptrs_to_text(out, c, k);
-}
-
-int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
- int ret = 0;
-
- bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
- c, btree_ptr_v2_val_too_big,
- "value too big (%zu > %zu)",
- bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
-
- bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
- c, btree_ptr_v2_min_key_bad,
- "min_key > key");
-
- if (flags & BCH_VALIDATE_write)
- bkey_fsck_err_on(!bp.v->sectors_written,
- c, btree_ptr_v2_written_0,
- "sectors_written == 0");
-
- ret = bch2_bkey_ptrs_validate(c, k, flags);
-fsck_err:
- return ret;
-}
-
-void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
-
- prt_printf(out, "seq %llx written %u min_key %s",
- le64_to_cpu(bp.v->seq),
- le16_to_cpu(bp.v->sectors_written),
- BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
-
- bch2_bpos_to_text(out, bp.v->min_key);
- prt_printf(out, " ");
- bch2_bkey_ptrs_to_text(out, c, k);
-}
-
-void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
- unsigned big_endian, int write,
- struct bkey_s k)
-{
- struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
-
- compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
-
- if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id_is_extents(btree_id) &&
- !bkey_eq(bp.v->min_key, POS_MIN))
- bp.v->min_key = write
- ? bpos_nosnap_predecessor(bp.v->min_key)
- : bpos_nosnap_successor(bp.v->min_key);
-}
-
-/* KEY_TYPE_extent: */
-
-bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
-{
- struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
- struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
- union bch_extent_entry *en_l;
- const union bch_extent_entry *en_r;
- struct extent_ptr_decoded lp, rp;
- bool use_right_ptr;
-
- en_l = l_ptrs.start;
- en_r = r_ptrs.start;
- while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
- if (extent_entry_type(en_l) != extent_entry_type(en_r))
- return false;
-
- en_l = extent_entry_next(en_l);
- en_r = extent_entry_next(en_r);
- }
-
- if (en_l < l_ptrs.end || en_r < r_ptrs.end)
- return false;
-
- en_l = l_ptrs.start;
- en_r = r_ptrs.start;
- lp.crc = bch2_extent_crc_unpack(l.k, NULL);
- rp.crc = bch2_extent_crc_unpack(r.k, NULL);
-
- while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
- __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
- if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
- rp.ptr.offset + rp.crc.offset ||
- lp.ptr.dev != rp.ptr.dev ||
- lp.ptr.gen != rp.ptr.gen ||
- lp.ptr.unwritten != rp.ptr.unwritten ||
- lp.has_ec != rp.has_ec)
- return false;
-
- /* Extents may not straddle buckets: */
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
- bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
- rcu_read_unlock();
-
- if (!same_bucket)
- return false;
-
- if (lp.has_ec != rp.has_ec ||
- (lp.has_ec &&
- (lp.ec.block != rp.ec.block ||
- lp.ec.redundancy != rp.ec.redundancy ||
- lp.ec.idx != rp.ec.idx)))
- return false;
-
- if (lp.crc.compression_type != rp.crc.compression_type ||
- lp.crc.nonce != rp.crc.nonce)
- return false;
-
- if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
- lp.crc.uncompressed_size) {
- /* can use left extent's crc entry */
- } else if (lp.crc.live_size <= rp.crc.offset) {
- /* can use right extent's crc entry */
- } else {
- /* check if checksums can be merged: */
- if (lp.crc.csum_type != rp.crc.csum_type ||
- lp.crc.nonce != rp.crc.nonce ||
- crc_is_compressed(lp.crc) ||
- !bch2_checksum_mergeable(lp.crc.csum_type))
- return false;
-
- if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
- rp.crc.offset)
- return false;
-
- if (lp.crc.csum_type &&
- lp.crc.uncompressed_size +
- rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
- return false;
- }
-
- en_l = extent_entry_next(en_l);
- en_r = extent_entry_next(en_r);
- }
-
- en_l = l_ptrs.start;
- en_r = r_ptrs.start;
- while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
- if (extent_entry_is_crc(en_l)) {
- struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
- struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
-
- if (crc_l.uncompressed_size + crc_r.uncompressed_size >
- bch2_crc_field_size_max[extent_entry_type(en_l)])
- return false;
- }
-
- en_l = extent_entry_next(en_l);
- en_r = extent_entry_next(en_r);
- }
-
- use_right_ptr = false;
- en_l = l_ptrs.start;
- en_r = r_ptrs.start;
- while (en_l < l_ptrs.end) {
- if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
- use_right_ptr)
- en_l->ptr = en_r->ptr;
-
- if (extent_entry_is_crc(en_l)) {
- struct bch_extent_crc_unpacked crc_l =
- bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
- struct bch_extent_crc_unpacked crc_r =
- bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
-
- use_right_ptr = false;
-
- if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
- crc_l.uncompressed_size) {
- /* can use left extent's crc entry */
- } else if (crc_l.live_size <= crc_r.offset) {
- /* can use right extent's crc entry */
- crc_r.offset -= crc_l.live_size;
- bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
- extent_entry_type(en_l));
- use_right_ptr = true;
- } else {
- crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
- crc_l.csum,
- crc_r.csum,
- crc_r.uncompressed_size << 9);
-
- crc_l.uncompressed_size += crc_r.uncompressed_size;
- crc_l.compressed_size += crc_r.compressed_size;
- bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
- extent_entry_type(en_l));
- }
- }
-
- en_l = extent_entry_next(en_l);
- en_r = extent_entry_next(en_r);
- }
-
- bch2_key_resize(l.k, l.k->size + r.k->size);
- return true;
-}
-
-/* KEY_TYPE_reservation: */
-
-int bch2_reservation_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
- int ret = 0;
-
- bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX,
- c, reservation_key_nr_replicas_invalid,
- "invalid nr_replicas (%u)", r.v->nr_replicas);
-fsck_err:
- return ret;
-}
-
-void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
-
- prt_printf(out, "generation %u replicas %u",
- le32_to_cpu(r.v->generation),
- r.v->nr_replicas);
-}
-
-bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
-{
- struct bkey_s_reservation l = bkey_s_to_reservation(_l);
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
-
- if (l.v->generation != r.v->generation ||
- l.v->nr_replicas != r.v->nr_replicas)
- return false;
-
- bch2_key_resize(l.k, l.k->size + r.k->size);
- return true;
-}
-
-/* Extent checksum entries: */
-
-/* returns true if not equal */
-static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
- struct bch_extent_crc_unpacked r)
-{
- return (l.csum_type != r.csum_type ||
- l.compression_type != r.compression_type ||
- l.compressed_size != r.compressed_size ||
- l.uncompressed_size != r.uncompressed_size ||
- l.offset != r.offset ||
- l.live_size != r.live_size ||
- l.nonce != r.nonce ||
- bch2_crc_cmp(l.csum, r.csum));
-}
-
-static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
- struct bch_extent_crc_unpacked n)
-{
- return !crc_is_compressed(u) &&
- u.csum_type &&
- u.uncompressed_size > u.live_size &&
- bch2_csum_type_is_encryption(u.csum_type) ==
- bch2_csum_type_is_encryption(n.csum_type);
-}
-
-bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
- struct bch_extent_crc_unpacked n)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- struct bch_extent_crc_unpacked crc;
- const union bch_extent_entry *i;
-
- if (!n.csum_type)
- return false;
-
- bkey_for_each_crc(k.k, ptrs, crc, i)
- if (can_narrow_crc(crc, n))
- return true;
-
- return false;
-}
-
-/*
- * We're writing another replica for this extent, so while we've got the data in
- * memory we'll be computing a new checksum for the currently live data.
- *
- * If there are other replicas we aren't moving, and they are checksummed but
- * not compressed, we can modify them to point to only the data that is
- * currently live (so that readers won't have to bounce) while we've got the
- * checksum we need:
- */
-bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
- struct bch_extent_crc_unpacked u;
- struct extent_ptr_decoded p;
- union bch_extent_entry *i;
- bool ret = false;
-
- /* Find a checksum entry that covers only live data: */
- if (!n.csum_type) {
- bkey_for_each_crc(&k->k, ptrs, u, i)
- if (!crc_is_compressed(u) &&
- u.csum_type &&
- u.live_size == u.uncompressed_size) {
- n = u;
- goto found;
- }
- return false;
- }
-found:
- BUG_ON(crc_is_compressed(n));
- BUG_ON(n.offset);
- BUG_ON(n.live_size != k->k.size);
-
-restart_narrow_pointers:
- ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
-
- bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
- if (can_narrow_crc(p.crc, n)) {
- bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
- p.ptr.offset += p.crc.offset;
- p.crc = n;
- bch2_extent_ptr_decoded_append(k, &p);
- ret = true;
- goto restart_narrow_pointers;
- }
-
- return ret;
-}
-
-static void bch2_extent_crc_pack(union bch_extent_crc *dst,
- struct bch_extent_crc_unpacked src,
- enum bch_extent_entry_type type)
-{
-#define set_common_fields(_dst, _src) \
- _dst.type = 1 << type; \
- _dst.csum_type = _src.csum_type, \
- _dst.compression_type = _src.compression_type, \
- _dst._compressed_size = _src.compressed_size - 1, \
- _dst._uncompressed_size = _src.uncompressed_size - 1, \
- _dst.offset = _src.offset
-
- switch (type) {
- case BCH_EXTENT_ENTRY_crc32:
- set_common_fields(dst->crc32, src);
- dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
- break;
- case BCH_EXTENT_ENTRY_crc64:
- set_common_fields(dst->crc64, src);
- dst->crc64.nonce = src.nonce;
- dst->crc64.csum_lo = (u64 __force) src.csum.lo;
- dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
- break;
- case BCH_EXTENT_ENTRY_crc128:
- set_common_fields(dst->crc128, src);
- dst->crc128.nonce = src.nonce;
- dst->crc128.csum = src.csum;
- break;
- default:
- BUG();
- }
-#undef set_common_fields
-}
-
-void bch2_extent_crc_append(struct bkey_i *k,
- struct bch_extent_crc_unpacked new)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
- union bch_extent_crc *crc = (void *) ptrs.end;
- enum bch_extent_entry_type type;
-
- if (bch_crc_bytes[new.csum_type] <= 4 &&
- new.uncompressed_size <= CRC32_SIZE_MAX &&
- new.nonce <= CRC32_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc32;
- else if (bch_crc_bytes[new.csum_type] <= 10 &&
- new.uncompressed_size <= CRC64_SIZE_MAX &&
- new.nonce <= CRC64_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc64;
- else if (bch_crc_bytes[new.csum_type] <= 16 &&
- new.uncompressed_size <= CRC128_SIZE_MAX &&
- new.nonce <= CRC128_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc128;
- else
- BUG();
-
- bch2_extent_crc_pack(crc, new, type);
-
- k->k.u64s += extent_entry_u64s(ptrs.end);
-
- EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
-}
-
-/* Generic code for keys with pointers: */
-
-unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
-{
- return bch2_bkey_devs(k).nr;
-}
-
-unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
-{
- return k.k->type == KEY_TYPE_reservation
- ? bkey_s_c_to_reservation(k).v->nr_replicas
- : bch2_bkey_dirty_devs(k).nr;
-}
-
-unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
-{
- unsigned ret = 0;
-
- if (k.k->type == KEY_TYPE_reservation) {
- ret = bkey_s_c_to_reservation(k).v->nr_replicas;
- } else {
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- ret += !p.ptr.cached && !crc_is_compressed(p.crc);
- }
-
- return ret;
-}
-
-unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned ret = 0;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (!p.ptr.cached && crc_is_compressed(p.crc))
- ret += p.crc.compressed_size;
-
- return ret;
-}
-
-bool bch2_bkey_is_incompressible(struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct bch_extent_crc_unpacked crc;
-
- bkey_for_each_crc(k.k, ptrs, crc, entry)
- if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
- return true;
- return false;
-}
-
-unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p = { 0 };
- unsigned replicas = 0;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- if (p.ptr.cached)
- continue;
-
- if (p.has_ec)
- replicas += p.ec.redundancy;
-
- replicas++;
-
- }
-
- return replicas;
-}
-
-static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
-{
- if (p->ptr.cached)
- return 0;
-
- return p->has_ec
- ? p->ec.redundancy + 1
- : ca->mi.durability;
-}
-
-unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
-{
- struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
-
- return ca ? __extent_ptr_durability(ca, p) : 0;
-}
-
-unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
-{
- struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
-
- if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed)
- return 0;
-
- return __extent_ptr_durability(ca, p);
-}
-
-unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned durability = 0;
-
- rcu_read_lock();
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- durability += bch2_extent_ptr_durability(c, &p);
- rcu_read_unlock();
-
- return durability;
-}
-
-static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned durability = 0;
-
- rcu_read_lock();
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
- durability += bch2_extent_ptr_durability(c, &p);
- rcu_read_unlock();
-
- return durability;
-}
-
-void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
-{
- union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
- union bch_extent_entry *next = extent_entry_next(entry);
-
- memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
- k->k.u64s -= extent_entry_u64s(entry);
-}
-
-void bch2_extent_ptr_decoded_append(struct bkey_i *k,
- struct extent_ptr_decoded *p)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
- struct bch_extent_crc_unpacked crc =
- bch2_extent_crc_unpack(&k->k, NULL);
- union bch_extent_entry *pos;
-
- if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
- pos = ptrs.start;
- goto found;
- }
-
- bkey_for_each_crc(&k->k, ptrs, crc, pos)
- if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
- pos = extent_entry_next(pos);
- goto found;
- }
-
- bch2_extent_crc_append(k, p->crc);
- pos = bkey_val_end(bkey_i_to_s(k));
-found:
- p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
- __extent_entry_insert(k, pos, to_entry(&p->ptr));
-
- if (p->has_ec) {
- p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
- __extent_entry_insert(k, pos, to_entry(&p->ec));
- }
-}
-
-static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
- union bch_extent_entry *entry)
-{
- union bch_extent_entry *i = ptrs.start;
-
- if (i == entry)
- return NULL;
-
- while (extent_entry_next(i) != entry)
- i = extent_entry_next(i);
- return i;
-}
-
-/*
- * Returns pointer to the next entry after the one being dropped:
- */
-void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *entry = to_entry(ptr), *next;
- bool drop_crc = true;
-
- if (k.k->type == KEY_TYPE_stripe) {
- ptr->dev = BCH_SB_MEMBER_INVALID;
- return;
- }
-
- EBUG_ON(ptr < &ptrs.start->ptr ||
- ptr >= &ptrs.end->ptr);
- EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
-
- for (next = extent_entry_next(entry);
- next != ptrs.end;
- next = extent_entry_next(next)) {
- if (extent_entry_is_crc(next)) {
- break;
- } else if (extent_entry_is_ptr(next)) {
- drop_crc = false;
- break;
- }
- }
-
- extent_entry_drop(k, entry);
-
- while ((entry = extent_entry_prev(ptrs, entry))) {
- if (extent_entry_is_ptr(entry))
- break;
-
- if ((extent_entry_is_crc(entry) && drop_crc) ||
- extent_entry_is_stripe_ptr(entry))
- extent_entry_drop(k, entry);
- }
-}
-
-void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr)
-{
- if (k.k->type != KEY_TYPE_stripe) {
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k.s_c);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (p.ptr.dev == ptr->dev && p.has_ec) {
- ptr->dev = BCH_SB_MEMBER_INVALID;
- return;
- }
- }
-
- bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
-
- bch2_bkey_drop_ptr_noerror(k, ptr);
-
- /*
- * If we deleted all the dirty pointers and there's still cached
- * pointers, we could set the cached pointers to dirty if they're not
- * stale - but to do that correctly we'd need to grab an open_bucket
- * reference so that we don't race with bucket reuse:
- */
- if (have_dirty &&
- !bch2_bkey_dirty_devs(k.s_c).nr) {
- k.k->type = KEY_TYPE_error;
- set_bkey_val_u64s(k.k, 0);
- } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
- k.k->type = KEY_TYPE_deleted;
- set_bkey_val_u64s(k.k, 0);
- }
-}
-
-void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
-{
- bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
-}
-
-void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
-{
- bch2_bkey_drop_ptrs_noerror(k, ptr, ptr->dev == dev);
-}
-
-const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(ptrs, ptr)
- if (ptr->dev == dev)
- return ptr;
-
- return NULL;
-}
-
-bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- struct bch_dev *ca;
- bool ret = false;
-
- rcu_read_lock();
- bkey_for_each_ptr(ptrs, ptr)
- if (bch2_dev_in_target(c, ptr->dev, target) &&
- (ca = bch2_dev_rcu(c, ptr->dev)) &&
- (!ptr->cached ||
- !dev_ptr_stale_rcu(ca, ptr))) {
- ret = true;
- break;
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
- struct bch_extent_ptr m, u64 offset)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (p.ptr.dev == m.dev &&
- p.ptr.gen == m.gen &&
- (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
- (s64) m.offset - offset)
- return true;
-
- return false;
-}
-
-/*
- * Returns true if two extents refer to the same data:
- */
-bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
-{
- if (k1.k->type != k2.k->type)
- return false;
-
- if (bkey_extent_is_direct_data(k1.k)) {
- struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
- struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
- const union bch_extent_entry *entry1, *entry2;
- struct extent_ptr_decoded p1, p2;
-
- if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
- return false;
-
- bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
- bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
- if (p1.ptr.dev == p2.ptr.dev &&
- p1.ptr.gen == p2.ptr.gen &&
-
- /*
- * This checks that the two pointers point
- * to the same region on disk - adjusting
- * for the difference in where the extents
- * start, since one may have been trimmed:
- */
- (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
- (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k) &&
-
- /*
- * This additionally checks that the
- * extents overlap on disk, since the
- * previous check may trigger spuriously
- * when one extent is immediately partially
- * overwritten with another extent (so that
- * on disk they are adjacent) and
- * compression is in use:
- */
- ((p1.ptr.offset >= p2.ptr.offset &&
- p1.ptr.offset < p2.ptr.offset + p2.crc.compressed_size) ||
- (p2.ptr.offset >= p1.ptr.offset &&
- p2.ptr.offset < p1.ptr.offset + p1.crc.compressed_size)))
- return true;
-
- return false;
- } else {
- /* KEY_TYPE_deleted, etc. */
- return true;
- }
-}
-
-struct bch_extent_ptr *
-bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
-{
- struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
- union bch_extent_entry *entry2;
- struct extent_ptr_decoded p2;
-
- bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
- if (p1.ptr.dev == p2.ptr.dev &&
- p1.ptr.gen == p2.ptr.gen &&
- (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
- (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
- return &entry2->ptr;
-
- return NULL;
-}
-
-static bool want_cached_ptr(struct bch_fs *c, struct bch_io_opts *opts,
- struct bch_extent_ptr *ptr)
-{
- if (!opts->promote_target ||
- !bch2_dev_in_target(c, ptr->dev, opts->promote_target))
- return false;
-
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
-
- return ca && bch2_dev_is_readable(ca) && !dev_ptr_stale_rcu(ca, ptr);
-}
-
-void bch2_extent_ptr_set_cached(struct bch_fs *c,
- struct bch_io_opts *opts,
- struct bkey_s k,
- struct bch_extent_ptr *ptr)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- rcu_read_lock();
- if (!want_cached_ptr(c, opts, ptr)) {
- bch2_bkey_drop_ptr_noerror(k, ptr);
- goto out;
- }
-
- /*
- * Stripes can't contain cached data, for - reasons.
- *
- * Possibly something we can fix in the future?
- */
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (&entry->ptr == ptr) {
- if (p.has_ec)
- bch2_bkey_drop_ptr_noerror(k, ptr);
- else
- ptr->cached = true;
- goto out;
- }
-
- BUG();
-out:
- rcu_read_unlock();
-}
-
-/*
- * bch2_extent_normalize - clean up an extent, dropping stale pointers etc.
- *
- * Returns true if @k should be dropped entirely
- *
- * For existing keys, only called when btree nodes are being rewritten, not when
- * they're merely being compacted/resorted in memory.
- */
-bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
-{
- struct bch_dev *ca;
-
- rcu_read_lock();
- bch2_bkey_drop_ptrs(k, ptr,
- ptr->cached &&
- (!(ca = bch2_dev_rcu(c, ptr->dev)) ||
- dev_ptr_stale_rcu(ca, ptr) > 0));
- rcu_read_unlock();
-
- return bkey_deleted(k.k);
-}
-
-/*
- * bch2_extent_normalize_by_opts - clean up an extent, dropping stale pointers etc.
- *
- * Like bch2_extent_normalize(), but also only keeps a single cached pointer on
- * the promote target.
- */
-bool bch2_extent_normalize_by_opts(struct bch_fs *c,
- struct bch_io_opts *opts,
- struct bkey_s k)
-{
- struct bkey_ptrs ptrs;
- bool have_cached_ptr;
-
- rcu_read_lock();
-restart_drop_ptrs:
- ptrs = bch2_bkey_ptrs(k);
- have_cached_ptr = false;
-
- bkey_for_each_ptr(ptrs, ptr)
- if (ptr->cached) {
- if (have_cached_ptr || !want_cached_ptr(c, opts, ptr)) {
- bch2_bkey_drop_ptr(k, ptr);
- goto restart_drop_ptrs;
- }
- have_cached_ptr = true;
- }
- rcu_read_unlock();
-
- return bkey_deleted(k.k);
-}
-
-void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
-{
- out->atomic++;
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
- if (!ca) {
- prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
- (u64) ptr->offset, ptr->gen,
- ptr->cached ? " cached" : "");
- } else {
- u32 offset;
- u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
-
- prt_printf(out, "ptr: %u:%llu:%u gen %u",
- ptr->dev, b, offset, ptr->gen);
- if (ca->mi.durability != 1)
- prt_printf(out, " d=%u", ca->mi.durability);
- if (ptr->cached)
- prt_str(out, " cached");
- if (ptr->unwritten)
- prt_str(out, " unwritten");
- int stale = dev_ptr_stale_rcu(ca, ptr);
- if (stale > 0)
- prt_printf(out, " stale");
- else if (stale)
- prt_printf(out, " invalid");
- }
- rcu_read_unlock();
- --out->atomic;
-}
-
-void bch2_extent_crc_unpacked_to_text(struct printbuf *out, struct bch_extent_crc_unpacked *crc)
-{
- prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
- crc->compressed_size,
- crc->uncompressed_size,
- crc->offset, crc->nonce);
- bch2_prt_csum_type(out, crc->csum_type);
- prt_printf(out, " %0llx:%0llx ", crc->csum.hi, crc->csum.lo);
- prt_str(out, " compress ");
- bch2_prt_compression_type(out, crc->compression_type);
-}
-
-void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- bool first = true;
-
- if (c)
- prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
-
- bkey_extent_entry_for_each(ptrs, entry) {
- if (!first)
- prt_printf(out, " ");
-
- switch (__extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry));
- break;
-
- case BCH_EXTENT_ENTRY_crc32:
- case BCH_EXTENT_ENTRY_crc64:
- case BCH_EXTENT_ENTRY_crc128: {
- struct bch_extent_crc_unpacked crc =
- bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
-
- bch2_extent_crc_unpacked_to_text(out, &crc);
- break;
- }
- case BCH_EXTENT_ENTRY_stripe_ptr: {
- const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
-
- prt_printf(out, "ec: idx %llu block %u",
- (u64) ec->idx, ec->block);
- break;
- }
- case BCH_EXTENT_ENTRY_rebalance: {
- const struct bch_extent_rebalance *r = &entry->rebalance;
-
- prt_str(out, "rebalance: target ");
- if (c)
- bch2_target_to_text(out, c, r->target);
- else
- prt_printf(out, "%u", r->target);
- prt_str(out, " compression ");
- bch2_compression_opt_to_text(out, r->compression);
- break;
- }
- default:
- prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
- return;
- }
-
- first = false;
- }
-}
-
-static int extent_ptr_validate(struct bch_fs *c,
- struct bkey_s_c k,
- enum bch_validate_flags flags,
- const struct bch_extent_ptr *ptr,
- unsigned size_ondisk,
- bool metadata)
-{
- int ret = 0;
-
- /* bad pointers are repaired by check_fix_ptrs(): */
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
- if (!ca) {
- rcu_read_unlock();
- return 0;
- }
- u32 bucket_offset;
- u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
- unsigned first_bucket = ca->mi.first_bucket;
- u64 nbuckets = ca->mi.nbuckets;
- unsigned bucket_size = ca->mi.bucket_size;
- rcu_read_unlock();
-
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- bkey_for_each_ptr(ptrs, ptr2)
- bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev,
- c, ptr_to_duplicate_device,
- "multiple pointers to same device (%u)", ptr->dev);
-
-
- bkey_fsck_err_on(bucket >= nbuckets,
- c, ptr_after_last_bucket,
- "pointer past last bucket (%llu > %llu)", bucket, nbuckets);
- bkey_fsck_err_on(bucket < first_bucket,
- c, ptr_before_first_bucket,
- "pointer before first bucket (%llu < %u)", bucket, first_bucket);
- bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size,
- c, ptr_spans_multiple_buckets,
- "pointer spans multiple buckets (%u + %u > %u)",
- bucket_offset, size_ondisk, bucket_size);
-fsck_err:
- return ret;
-}
-
-int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct bch_extent_crc_unpacked crc;
- unsigned size_ondisk = k.k->size;
- unsigned nonce = UINT_MAX;
- unsigned nr_ptrs = 0;
- bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
- int ret = 0;
-
- if (bkey_is_btree_ptr(k.k))
- size_ondisk = btree_sectors(c);
-
- bkey_extent_entry_for_each(ptrs, entry) {
- bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX,
- c, extent_ptrs_invalid_entry,
- "invalid extent entry type (got %u, max %u)",
- __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
-
- bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
- !extent_entry_is_ptr(entry),
- c, btree_ptr_has_non_ptr,
- "has non ptr field");
-
- switch (extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- ret = extent_ptr_validate(c, k, flags, &entry->ptr, size_ondisk, false);
- if (ret)
- return ret;
-
- bkey_fsck_err_on(entry->ptr.cached && have_ec,
- c, ptr_cached_and_erasure_coded,
- "cached, erasure coded ptr");
-
- if (!entry->ptr.unwritten)
- have_written = true;
- else
- have_unwritten = true;
-
- have_ec = false;
- crc_since_last_ptr = false;
- nr_ptrs++;
- break;
- case BCH_EXTENT_ENTRY_crc32:
- case BCH_EXTENT_ENTRY_crc64:
- case BCH_EXTENT_ENTRY_crc128:
- crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
-
- bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size,
- c, ptr_crc_uncompressed_size_too_small,
- "checksum offset + key size > uncompressed size");
- bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type),
- c, ptr_crc_csum_type_unknown,
- "invalid checksum type");
- bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR,
- c, ptr_crc_compression_type_unknown,
- "invalid compression type");
-
- if (bch2_csum_type_is_encryption(crc.csum_type)) {
- if (nonce == UINT_MAX)
- nonce = crc.offset + crc.nonce;
- else if (nonce != crc.offset + crc.nonce)
- bkey_fsck_err(c, ptr_crc_nonce_mismatch,
- "incorrect nonce");
- }
-
- bkey_fsck_err_on(crc_since_last_ptr,
- c, ptr_crc_redundant,
- "redundant crc entry");
- crc_since_last_ptr = true;
-
- bkey_fsck_err_on(crc_is_encoded(crc) &&
- (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
- (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)),
- c, ptr_crc_uncompressed_size_too_big,
- "too large encoded extent");
-
- size_ondisk = crc.compressed_size;
- break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- bkey_fsck_err_on(have_ec,
- c, ptr_stripe_redundant,
- "redundant stripe entry");
- have_ec = true;
- break;
- case BCH_EXTENT_ENTRY_rebalance: {
- /*
- * this shouldn't be a fsck error, for forward
- * compatibility; the rebalance code should just refetch
- * the compression opt if it's unknown
- */
-#if 0
- const struct bch_extent_rebalance *r = &entry->rebalance;
-
- if (!bch2_compression_opt_valid(r->compression)) {
- struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
- prt_printf(err, "invalid compression opt %u:%u",
- opt.type, opt.level);
- return -BCH_ERR_invalid_bkey;
- }
-#endif
- break;
- }
- }
- }
-
- bkey_fsck_err_on(!nr_ptrs,
- c, extent_ptrs_no_ptrs,
- "no ptrs");
- bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX,
- c, extent_ptrs_too_many_ptrs,
- "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
- bkey_fsck_err_on(have_written && have_unwritten,
- c, extent_ptrs_written_and_unwritten,
- "extent with unwritten and written ptrs");
- bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten,
- c, extent_ptrs_unwritten,
- "has unwritten ptrs");
- bkey_fsck_err_on(crc_since_last_ptr,
- c, extent_ptrs_redundant_crc,
- "redundant crc entry");
- bkey_fsck_err_on(have_ec,
- c, extent_ptrs_redundant_stripe,
- "redundant stripe entry");
-fsck_err:
- return ret;
-}
-
-void bch2_ptr_swab(struct bkey_s k)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *entry;
- u64 *d;
-
- for (d = (u64 *) ptrs.start;
- d != (u64 *) ptrs.end;
- d++)
- *d = swab64(*d);
-
- for (entry = ptrs.start;
- entry < ptrs.end;
- entry = extent_entry_next(entry)) {
- switch (__extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- break;
- case BCH_EXTENT_ENTRY_crc32:
- entry->crc32.csum = swab32(entry->crc32.csum);
- break;
- case BCH_EXTENT_ENTRY_crc64:
- entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
- entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
- break;
- case BCH_EXTENT_ENTRY_crc128:
- entry->crc128.csum.hi = (__force __le64)
- swab64((__force u64) entry->crc128.csum.hi);
- entry->crc128.csum.lo = (__force __le64)
- swab64((__force u64) entry->crc128.csum.lo);
- break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- break;
- case BCH_EXTENT_ENTRY_rebalance:
- break;
- default:
- /* Bad entry type: will be caught by validate() */
- return;
- }
- }
-}
-
-const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
-
- bkey_extent_entry_for_each(ptrs, entry)
- if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
- return &entry->rebalance;
-
- return NULL;
-}
-
-unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
- unsigned target, unsigned compression)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- unsigned rewrite_ptrs = 0;
-
- if (compression) {
- unsigned compression_type = bch2_compression_opt_to_type(compression);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned i = 0;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
- p.ptr.unwritten) {
- rewrite_ptrs = 0;
- goto incompressible;
- }
-
- if (!p.ptr.cached && p.crc.compression_type != compression_type)
- rewrite_ptrs |= 1U << i;
- i++;
- }
- }
-incompressible:
- if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
- unsigned i = 0;
-
- bkey_for_each_ptr(ptrs, ptr) {
- if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
- rewrite_ptrs |= 1U << i;
- i++;
- }
- }
-
- return rewrite_ptrs;
-}
-
-bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
-{
- const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
-
- /*
- * If it's an indirect extent, we don't delete the rebalance entry when
- * done so that we know what options were applied - check if it still
- * needs work done:
- */
- if (r &&
- k.k->type == KEY_TYPE_reflink_v &&
- !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
- r = NULL;
-
- return r != NULL;
-}
-
-static u64 __bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
- unsigned target, unsigned compression)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- u64 sectors = 0;
-
- if (compression) {
- unsigned compression_type = bch2_compression_opt_to_type(compression);
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
- p.ptr.unwritten) {
- sectors = 0;
- goto incompressible;
- }
-
- if (!p.ptr.cached && p.crc.compression_type != compression_type)
- sectors += p.crc.compressed_size;
- }
- }
-incompressible:
- if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, target))
- sectors += p.crc.compressed_size;
- }
-
- return sectors;
-}
-
-u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
-{
- const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
-
- return r ? __bch2_bkey_sectors_need_rebalance(c, k, r->target, r->compression) : 0;
-}
-
-int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
- struct bch_io_opts *opts)
-{
- struct bkey_s k = bkey_i_to_s(_k);
- struct bch_extent_rebalance *r;
- unsigned target = opts->background_target;
- unsigned compression = background_compression(*opts);
- bool needs_rebalance;
-
- if (!bkey_extent_is_direct_data(k.k))
- return 0;
-
- /* get existing rebalance entry: */
- r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
- if (r) {
- if (k.k->type == KEY_TYPE_reflink_v) {
- /*
- * indirect extents: existing options take precedence,
- * so that we don't move extents back and forth if
- * they're referenced by different inodes with different
- * options:
- */
- if (r->target)
- target = r->target;
- if (r->compression)
- compression = r->compression;
- }
-
- r->target = target;
- r->compression = compression;
- }
-
- needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
-
- if (needs_rebalance && !r) {
- union bch_extent_entry *new = bkey_val_end(k);
-
- new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance;
- new->rebalance.compression = compression;
- new->rebalance.target = target;
- new->rebalance.unused = 0;
- k.k->u64s += extent_entry_u64s(new);
- } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
- /*
- * For indirect extents, don't delete the rebalance entry when
- * we're finished so that we know we specifically moved it or
- * compressed it to its current location/compression type
- */
- extent_entry_drop(k, (union bch_extent_entry *) r);
- }
-
- return 0;
-}
-
-/* Generic extent code: */
-
-int bch2_cut_front_s(struct bpos where, struct bkey_s k)
-{
- unsigned new_val_u64s = bkey_val_u64s(k.k);
- int val_u64s_delta;
- u64 sub;
-
- if (bkey_le(where, bkey_start_pos(k.k)))
- return 0;
-
- EBUG_ON(bkey_gt(where, k.k->p));
-
- sub = where.offset - bkey_start_offset(k.k);
-
- k.k->size -= sub;
-
- if (!k.k->size) {
- k.k->type = KEY_TYPE_deleted;
- new_val_u64s = 0;
- }
-
- switch (k.k->type) {
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v: {
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *entry;
- bool seen_crc = false;
-
- bkey_extent_entry_for_each(ptrs, entry) {
- switch (extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- if (!seen_crc)
- entry->ptr.offset += sub;
- break;
- case BCH_EXTENT_ENTRY_crc32:
- entry->crc32.offset += sub;
- break;
- case BCH_EXTENT_ENTRY_crc64:
- entry->crc64.offset += sub;
- break;
- case BCH_EXTENT_ENTRY_crc128:
- entry->crc128.offset += sub;
- break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- break;
- case BCH_EXTENT_ENTRY_rebalance:
- break;
- }
-
- if (extent_entry_is_crc(entry))
- seen_crc = true;
- }
-
- break;
- }
- case KEY_TYPE_reflink_p: {
- struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
-
- le64_add_cpu(&p.v->idx, sub);
- break;
- }
- case KEY_TYPE_inline_data:
- case KEY_TYPE_indirect_inline_data: {
- void *p = bkey_inline_data_p(k);
- unsigned bytes = bkey_inline_data_bytes(k.k);
-
- sub = min_t(u64, sub << 9, bytes);
-
- memmove(p, p + sub, bytes - sub);
-
- new_val_u64s -= sub >> 3;
- break;
- }
- }
-
- val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
- BUG_ON(val_u64s_delta < 0);
-
- set_bkey_val_u64s(k.k, new_val_u64s);
- memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
- return -val_u64s_delta;
-}
-
-int bch2_cut_back_s(struct bpos where, struct bkey_s k)
-{
- unsigned new_val_u64s = bkey_val_u64s(k.k);
- int val_u64s_delta;
- u64 len = 0;
-
- if (bkey_ge(where, k.k->p))
- return 0;
-
- EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
-
- len = where.offset - bkey_start_offset(k.k);
-
- k.k->p.offset = where.offset;
- k.k->size = len;
-
- if (!len) {
- k.k->type = KEY_TYPE_deleted;
- new_val_u64s = 0;
- }
-
- switch (k.k->type) {
- case KEY_TYPE_inline_data:
- case KEY_TYPE_indirect_inline_data:
- new_val_u64s = (bkey_inline_data_offset(k.k) +
- min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
- break;
- }
-
- val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
- BUG_ON(val_u64s_delta < 0);
-
- set_bkey_val_u64s(k.k, new_val_u64s);
- memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
- return -val_u64s_delta;
-}
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
deleted file mode 100644
index bcffcf60aaaf..000000000000
--- a/fs/bcachefs/extents.h
+++ /dev/null
@@ -1,766 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_EXTENTS_H
-#define _BCACHEFS_EXTENTS_H
-
-#include "bcachefs.h"
-#include "bkey.h"
-#include "extents_types.h"
-
-struct bch_fs;
-struct btree_trans;
-enum bch_validate_flags;
-
-/* extent entries: */
-
-#define extent_entry_last(_e) \
- ((typeof(&(_e).v->start[0])) bkey_val_end(_e))
-
-#define entry_to_ptr(_entry) \
-({ \
- EBUG_ON((_entry) && !extent_entry_is_ptr(_entry)); \
- \
- __builtin_choose_expr( \
- type_is_exact(_entry, const union bch_extent_entry *), \
- (const struct bch_extent_ptr *) (_entry), \
- (struct bch_extent_ptr *) (_entry)); \
-})
-
-/* downcast, preserves const */
-#define to_entry(_entry) \
-({ \
- BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) && \
- !type_is(_entry, struct bch_extent_ptr *) && \
- !type_is(_entry, struct bch_extent_stripe_ptr *)); \
- \
- __builtin_choose_expr( \
- (type_is_exact(_entry, const union bch_extent_crc *) || \
- type_is_exact(_entry, const struct bch_extent_ptr *) ||\
- type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
- (const union bch_extent_entry *) (_entry), \
- (union bch_extent_entry *) (_entry)); \
-})
-
-#define extent_entry_next(_entry) \
- ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
-
-#define extent_entry_next_safe(_entry, _end) \
- (likely(__extent_entry_type(_entry) < BCH_EXTENT_ENTRY_MAX) \
- ? extent_entry_next(_entry) \
- : _end)
-
-static inline unsigned
-__extent_entry_type(const union bch_extent_entry *e)
-{
- return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
-}
-
-static inline enum bch_extent_entry_type
-extent_entry_type(const union bch_extent_entry *e)
-{
- int ret = __ffs(e->type);
-
- EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
-
- return ret;
-}
-
-static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
-{
- switch (extent_entry_type(entry)) {
-#define x(f, n) \
- case BCH_EXTENT_ENTRY_##f: \
- return sizeof(struct bch_extent_##f);
- BCH_EXTENT_ENTRY_TYPES()
-#undef x
- default:
- BUG();
- }
-}
-
-static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
-{
- return extent_entry_bytes(entry) / sizeof(u64);
-}
-
-static inline void __extent_entry_insert(struct bkey_i *k,
- union bch_extent_entry *dst,
- union bch_extent_entry *new)
-{
- union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
-
- memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
- dst, (u64 *) end - (u64 *) dst);
- k->k.u64s += extent_entry_u64s(new);
- memcpy_u64s_small(dst, new, extent_entry_u64s(new));
-}
-
-static inline void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
-{
- union bch_extent_entry *next = extent_entry_next(entry);
-
- /* stripes have ptrs, but their layout doesn't work with this code */
- BUG_ON(k.k->type == KEY_TYPE_stripe);
-
- memmove_u64s_down(entry, next,
- (u64 *) bkey_val_end(k) - (u64 *) next);
- k.k->u64s -= (u64 *) next - (u64 *) entry;
-}
-
-static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
-{
- return __extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
-}
-
-static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
-{
- return __extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
-}
-
-static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
-{
- switch (__extent_entry_type(e)) {
- case BCH_EXTENT_ENTRY_crc32:
- case BCH_EXTENT_ENTRY_crc64:
- case BCH_EXTENT_ENTRY_crc128:
- return true;
- default:
- return false;
- }
-}
-
-union bch_extent_crc {
- u8 type;
- struct bch_extent_crc32 crc32;
- struct bch_extent_crc64 crc64;
- struct bch_extent_crc128 crc128;
-};
-
-#define __entry_to_crc(_entry) \
- __builtin_choose_expr( \
- type_is_exact(_entry, const union bch_extent_entry *), \
- (const union bch_extent_crc *) (_entry), \
- (union bch_extent_crc *) (_entry))
-
-#define entry_to_crc(_entry) \
-({ \
- EBUG_ON((_entry) && !extent_entry_is_crc(_entry)); \
- \
- __entry_to_crc(_entry); \
-})
-
-static inline struct bch_extent_crc_unpacked
-bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
-{
-#define common_fields(_crc) \
- .csum_type = _crc.csum_type, \
- .compression_type = _crc.compression_type, \
- .compressed_size = _crc._compressed_size + 1, \
- .uncompressed_size = _crc._uncompressed_size + 1, \
- .offset = _crc.offset, \
- .live_size = k->size
-
- if (!crc)
- return (struct bch_extent_crc_unpacked) {
- .compressed_size = k->size,
- .uncompressed_size = k->size,
- .live_size = k->size,
- };
-
- switch (extent_entry_type(to_entry(crc))) {
- case BCH_EXTENT_ENTRY_crc32: {
- struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
- common_fields(crc->crc32),
- };
-
- *((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
- return ret;
- }
- case BCH_EXTENT_ENTRY_crc64: {
- struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
- common_fields(crc->crc64),
- .nonce = crc->crc64.nonce,
- .csum.lo = (__force __le64) crc->crc64.csum_lo,
- };
-
- *((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
-
- return ret;
- }
- case BCH_EXTENT_ENTRY_crc128: {
- struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
- common_fields(crc->crc128),
- .nonce = crc->crc128.nonce,
- .csum = crc->crc128.csum,
- };
-
- return ret;
- }
- default:
- BUG();
- }
-#undef common_fields
-}
-
-static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
-{
- return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
- crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
-}
-
-static inline bool crc_is_encoded(struct bch_extent_crc_unpacked crc)
-{
- return crc.csum_type != BCH_CSUM_none || crc_is_compressed(crc);
-}
-
-void bch2_extent_crc_unpacked_to_text(struct printbuf *, struct bch_extent_crc_unpacked *);
-
-/* bkey_ptrs: generically over any key type that has ptrs */
-
-struct bkey_ptrs_c {
- const union bch_extent_entry *start;
- const union bch_extent_entry *end;
-};
-
-struct bkey_ptrs {
- union bch_extent_entry *start;
- union bch_extent_entry *end;
-};
-
-static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr: {
- struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
-
- return (struct bkey_ptrs_c) {
- to_entry(&e.v->start[0]),
- to_entry(extent_entry_last(e))
- };
- }
- case KEY_TYPE_extent: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
-
- return (struct bkey_ptrs_c) {
- e.v->start,
- extent_entry_last(e)
- };
- }
- case KEY_TYPE_stripe: {
- struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
-
- return (struct bkey_ptrs_c) {
- to_entry(&s.v->ptrs[0]),
- to_entry(&s.v->ptrs[s.v->nr_blocks]),
- };
- }
- case KEY_TYPE_reflink_v: {
- struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
-
- return (struct bkey_ptrs_c) {
- r.v->start,
- bkey_val_end(r),
- };
- }
- case KEY_TYPE_btree_ptr_v2: {
- struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
-
- return (struct bkey_ptrs_c) {
- to_entry(&e.v->start[0]),
- to_entry(extent_entry_last(e))
- };
- }
- default:
- return (struct bkey_ptrs_c) { NULL, NULL };
- }
-}
-
-static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
-{
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
-
- return (struct bkey_ptrs) {
- (void *) p.start,
- (void *) p.end
- };
-}
-
-#define __bkey_extent_entry_for_each_from(_start, _end, _entry) \
- for ((_entry) = (_start); \
- (_entry) < (_end); \
- (_entry) = extent_entry_next_safe(_entry, _end))
-
-#define __bkey_ptr_next(_ptr, _end) \
-({ \
- typeof(_end) _entry; \
- \
- __bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry) \
- if (extent_entry_is_ptr(_entry)) \
- break; \
- \
- _entry < (_end) ? entry_to_ptr(_entry) : NULL; \
-})
-
-#define bkey_extent_entry_for_each_from(_p, _entry, _start) \
- __bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
-
-#define bkey_extent_entry_for_each(_p, _entry) \
- bkey_extent_entry_for_each_from(_p, _entry, _p.start)
-
-#define __bkey_for_each_ptr(_start, _end, _ptr) \
- for (typeof(_start) (_ptr) = (_start); \
- ((_ptr) = __bkey_ptr_next(_ptr, _end)); \
- (_ptr)++)
-
-#define bkey_ptr_next(_p, _ptr) \
- __bkey_ptr_next(_ptr, (_p).end)
-
-#define bkey_for_each_ptr(_p, _ptr) \
- __bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
-
-#define __bkey_ptr_next_decode(_k, _end, _ptr, _entry) \
-({ \
- __label__ out; \
- \
- (_ptr).idx = 0; \
- (_ptr).has_ec = false; \
- \
- __bkey_extent_entry_for_each_from(_entry, _end, _entry) \
- switch (__extent_entry_type(_entry)) { \
- case BCH_EXTENT_ENTRY_ptr: \
- (_ptr).ptr = _entry->ptr; \
- goto out; \
- case BCH_EXTENT_ENTRY_crc32: \
- case BCH_EXTENT_ENTRY_crc64: \
- case BCH_EXTENT_ENTRY_crc128: \
- (_ptr).crc = bch2_extent_crc_unpack(_k, \
- entry_to_crc(_entry)); \
- break; \
- case BCH_EXTENT_ENTRY_stripe_ptr: \
- (_ptr).ec = _entry->stripe_ptr; \
- (_ptr).has_ec = true; \
- break; \
- default: \
- /* nothing */ \
- break; \
- } \
-out: \
- _entry < (_end); \
-})
-
-#define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry) \
- for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL), \
- (_entry) = _start; \
- __bkey_ptr_next_decode(_k, _end, _ptr, _entry); \
- (_entry) = extent_entry_next_safe(_entry, _end))
-
-#define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry) \
- __bkey_for_each_ptr_decode(_k, (_p).start, (_p).end, \
- _ptr, _entry)
-
-#define bkey_crc_next(_k, _end, _crc, _iter) \
-({ \
- __bkey_extent_entry_for_each_from(_iter, _end, _iter) \
- if (extent_entry_is_crc(_iter)) { \
- (_crc) = bch2_extent_crc_unpack(_k, \
- entry_to_crc(_iter)); \
- break; \
- } \
- \
- (_iter) < (_end); \
-})
-
-#define __bkey_for_each_crc(_k, _start, _end, _crc, _iter) \
- for ((_crc) = bch2_extent_crc_unpack(_k, NULL), \
- (_iter) = (_start); \
- bkey_crc_next(_k, _end, _crc, _iter); \
- (_iter) = extent_entry_next(_iter))
-
-#define bkey_for_each_crc(_k, _p, _crc, _iter) \
- __bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
-
-/* Iterate over pointers in KEY_TYPE_extent: */
-
-#define extent_for_each_entry_from(_e, _entry, _start) \
- __bkey_extent_entry_for_each_from(_start, \
- extent_entry_last(_e), _entry)
-
-#define extent_for_each_entry(_e, _entry) \
- extent_for_each_entry_from(_e, _entry, (_e).v->start)
-
-#define extent_ptr_next(_e, _ptr) \
- __bkey_ptr_next(_ptr, extent_entry_last(_e))
-
-#define extent_for_each_ptr(_e, _ptr) \
- __bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
-
-#define extent_for_each_ptr_decode(_e, _ptr, _entry) \
- __bkey_for_each_ptr_decode((_e).k, (_e).v->start, \
- extent_entry_last(_e), _ptr, _entry)
-
-/* utility code common to all keys with pointers: */
-
-struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *,
- unsigned);
-void bch2_mark_io_failure(struct bch_io_failures *,
- struct extent_ptr_decoded *);
-int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
- struct bch_io_failures *,
- struct extent_ptr_decoded *);
-
-/* KEY_TYPE_btree_ptr: */
-
-int bch2_btree_ptr_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
-
-int bch2_btree_ptr_v2_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
- int, struct bkey_s);
-
-#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) { \
- .key_validate = bch2_btree_ptr_validate, \
- .val_to_text = bch2_btree_ptr_to_text, \
- .swab = bch2_ptr_swab, \
- .trigger = bch2_trigger_extent, \
-})
-
-#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) { \
- .key_validate = bch2_btree_ptr_v2_validate, \
- .val_to_text = bch2_btree_ptr_v2_to_text, \
- .swab = bch2_ptr_swab, \
- .compat = bch2_btree_ptr_v2_compat, \
- .trigger = bch2_trigger_extent, \
- .min_val_size = 40, \
-})
-
-/* KEY_TYPE_extent: */
-
-bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-
-#define bch2_bkey_ops_extent ((struct bkey_ops) { \
- .key_validate = bch2_bkey_ptrs_validate, \
- .val_to_text = bch2_bkey_ptrs_to_text, \
- .swab = bch2_ptr_swab, \
- .key_normalize = bch2_extent_normalize, \
- .key_merge = bch2_extent_merge, \
- .trigger = bch2_trigger_extent, \
-})
-
-/* KEY_TYPE_reservation: */
-
-int bch2_reservation_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-
-#define bch2_bkey_ops_reservation ((struct bkey_ops) { \
- .key_validate = bch2_reservation_validate, \
- .val_to_text = bch2_reservation_to_text, \
- .key_merge = bch2_reservation_merge, \
- .trigger = bch2_trigger_reservation, \
- .min_val_size = 8, \
-})
-
-/* Extent checksum entries: */
-
-bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
- struct bch_extent_crc_unpacked);
-bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
-void bch2_extent_crc_append(struct bkey_i *,
- struct bch_extent_crc_unpacked);
-
-/* Generic code for keys with pointers: */
-
-static inline bool bkey_is_btree_ptr(const struct bkey *k)
-{
- switch (k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool bkey_extent_is_direct_data(const struct bkey *k)
-{
- switch (k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool bkey_extent_is_inline_data(const struct bkey *k)
-{
- return k->type == KEY_TYPE_inline_data ||
- k->type == KEY_TYPE_indirect_inline_data;
-}
-
-static inline unsigned bkey_inline_data_offset(const struct bkey *k)
-{
- switch (k->type) {
- case KEY_TYPE_inline_data:
- return sizeof(struct bch_inline_data);
- case KEY_TYPE_indirect_inline_data:
- return sizeof(struct bch_indirect_inline_data);
- default:
- BUG();
- }
-}
-
-static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
-{
- return bkey_val_bytes(k) - bkey_inline_data_offset(k);
-}
-
-#define bkey_inline_data_p(_k) (((void *) (_k).v) + bkey_inline_data_offset((_k).k))
-
-static inline bool bkey_extent_is_data(const struct bkey *k)
-{
- return bkey_extent_is_direct_data(k) ||
- bkey_extent_is_inline_data(k) ||
- k->type == KEY_TYPE_reflink_p;
-}
-
-/*
- * Should extent be counted under inode->i_sectors?
- */
-static inline bool bkey_extent_is_allocation(const struct bkey *k)
-{
- switch (k->type) {
- case KEY_TYPE_extent:
- case KEY_TYPE_reservation:
- case KEY_TYPE_reflink_p:
- case KEY_TYPE_reflink_v:
- case KEY_TYPE_inline_data:
- case KEY_TYPE_indirect_inline_data:
- case KEY_TYPE_error:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(ptrs, ptr)
- if (ptr->unwritten)
- return true;
- return false;
-}
-
-static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
-{
- return k.k->type == KEY_TYPE_reservation ||
- bkey_extent_is_unwritten(k);
-}
-
-static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
-{
- struct bch_devs_list ret = (struct bch_devs_list) { 0 };
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(p, ptr)
- ret.data[ret.nr++] = ptr->dev;
-
- return ret;
-}
-
-static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
-{
- struct bch_devs_list ret = (struct bch_devs_list) { 0 };
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(p, ptr)
- if (!ptr->cached)
- ret.data[ret.nr++] = ptr->dev;
-
- return ret;
-}
-
-static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
-{
- struct bch_devs_list ret = (struct bch_devs_list) { 0 };
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
-
- bkey_for_each_ptr(p, ptr)
- if (ptr->cached)
- ret.data[ret.nr++] = ptr->dev;
-
- return ret;
-}
-
-unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
-unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
-unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
-bool bch2_bkey_is_incompressible(struct bkey_s_c);
-unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
-
-unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
-unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
-unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
-unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
-
-const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
-
-static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
-{
- return (void *) bch2_bkey_has_device_c(k.s_c, dev);
-}
-
-bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
-
-void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
-
-static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
-{
- struct bch_extent_ptr *dest;
-
- EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev));
-
- switch (k->k.type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- case KEY_TYPE_extent:
- EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
-
- ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
- dest = (struct bch_extent_ptr *)((void *) &k->v + bkey_val_bytes(&k->k));
- *dest = ptr;
- k->k.u64s++;
- break;
- default:
- BUG();
- }
-}
-
-void bch2_extent_ptr_decoded_append(struct bkey_i *,
- struct extent_ptr_decoded *);
-void bch2_bkey_drop_ptr_noerror(struct bkey_s, struct bch_extent_ptr *);
-void bch2_bkey_drop_ptr(struct bkey_s, struct bch_extent_ptr *);
-
-void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
-void bch2_bkey_drop_device(struct bkey_s, unsigned);
-
-#define bch2_bkey_drop_ptrs_noerror(_k, _ptr, _cond) \
-do { \
- __label__ _again; \
- struct bkey_ptrs _ptrs; \
-_again: \
- _ptrs = bch2_bkey_ptrs(_k); \
- \
- bkey_for_each_ptr(_ptrs, _ptr) \
- if (_cond) { \
- bch2_bkey_drop_ptr_noerror(_k, _ptr); \
- goto _again; \
- } \
-} while (0)
-
-#define bch2_bkey_drop_ptrs(_k, _ptr, _cond) \
-do { \
- __label__ _again; \
- struct bkey_ptrs _ptrs; \
-_again: \
- _ptrs = bch2_bkey_ptrs(_k); \
- \
- bkey_for_each_ptr(_ptrs, _ptr) \
- if (_cond) { \
- bch2_bkey_drop_ptr(_k, _ptr); \
- goto _again; \
- } \
-} while (0)
-
-bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
- struct bch_extent_ptr, u64);
-bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
-struct bch_extent_ptr *
-bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
-
-void bch2_extent_ptr_set_cached(struct bch_fs *, struct bch_io_opts *,
- struct bkey_s, struct bch_extent_ptr *);
-
-bool bch2_extent_normalize_by_opts(struct bch_fs *, struct bch_io_opts *, struct bkey_s);
-bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
-
-void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *);
-void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
-int bch2_bkey_ptrs_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-
-static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,
- struct bch_extent_ptr ptr2)
-{
- return (ptr1.cached == ptr2.cached &&
- ptr1.unwritten == ptr2.unwritten &&
- ptr1.offset == ptr2.offset &&
- ptr1.dev == ptr2.dev &&
- ptr1.dev == ptr2.dev);
-}
-
-void bch2_ptr_swab(struct bkey_s);
-
-const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c);
-unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c,
- unsigned, unsigned);
-bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c);
-u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c);
-
-int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *,
- struct bch_io_opts *);
-
-/* Generic extent code: */
-
-enum bch_extent_overlap {
- BCH_EXTENT_OVERLAP_ALL = 0,
- BCH_EXTENT_OVERLAP_BACK = 1,
- BCH_EXTENT_OVERLAP_FRONT = 2,
- BCH_EXTENT_OVERLAP_MIDDLE = 3,
-};
-
-/* Returns how k overlaps with m */
-static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
- const struct bkey *m)
-{
- int cmp1 = bkey_lt(k->p, m->p);
- int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
-
- return (cmp1 << 1) + cmp2;
-}
-
-int bch2_cut_front_s(struct bpos, struct bkey_s);
-int bch2_cut_back_s(struct bpos, struct bkey_s);
-
-static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
-{
- bch2_cut_front_s(where, bkey_i_to_s(k));
-}
-
-static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
-{
- bch2_cut_back_s(where, bkey_i_to_s(k));
-}
-
-/**
- * bch_key_resize - adjust size of @k
- *
- * bkey_start_offset(k) will be preserved, modifies where the extent ends
- */
-static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
-{
- k->p.offset -= k->size;
- k->p.offset += new_size;
- k->size = new_size;
-}
-
-#endif /* _BCACHEFS_EXTENTS_H */
diff --git a/fs/bcachefs/extents_format.h b/fs/bcachefs/extents_format.h
deleted file mode 100644
index 3bd2fdbb0817..000000000000
--- a/fs/bcachefs/extents_format.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_EXTENTS_FORMAT_H
-#define _BCACHEFS_EXTENTS_FORMAT_H
-
-/*
- * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
- * preceded by checksum/compression information (bch_extent_crc32 or
- * bch_extent_crc64).
- *
- * One major determining factor in the format of extents is how we handle and
- * represent extents that have been partially overwritten and thus trimmed:
- *
- * If an extent is not checksummed or compressed, when the extent is trimmed we
- * don't have to remember the extent we originally allocated and wrote: we can
- * merely adjust ptr->offset to point to the start of the data that is currently
- * live. The size field in struct bkey records the current (live) size of the
- * extent, and is also used to mean "size of region on disk that we point to" in
- * this case.
- *
- * Thus an extent that is not checksummed or compressed will consist only of a
- * list of bch_extent_ptrs, with none of the fields in
- * bch_extent_crc32/bch_extent_crc64.
- *
- * When an extent is checksummed or compressed, it's not possible to read only
- * the data that is currently live: we have to read the entire extent that was
- * originally written, and then return only the part of the extent that is
- * currently live.
- *
- * Thus, in addition to the current size of the extent in struct bkey, we need
- * to store the size of the originally allocated space - this is the
- * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
- * when the extent is trimmed, instead of modifying the offset field of the
- * pointer, we keep a second smaller offset field - "offset into the original
- * extent of the currently live region".
- *
- * The other major determining factor is replication and data migration:
- *
- * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
- * write, we will initially write all the replicas in the same format, with the
- * same checksum type and compression format - however, when copygc runs later (or
- * tiering/cache promotion, anything that moves data), it is not in general
- * going to rewrite all the pointers at once - one of the replicas may be in a
- * bucket on one device that has very little fragmentation while another lives
- * in a bucket that has become heavily fragmented, and thus is being rewritten
- * sooner than the rest.
- *
- * Thus it will only move a subset of the pointers (or in the case of
- * tiering/cache promotion perhaps add a single pointer without dropping any
- * current pointers), and if the extent has been partially overwritten it must
- * write only the currently live portion (or copygc would not be able to reduce
- * fragmentation!) - which necessitates a different bch_extent_crc format for
- * the new pointer.
- *
- * But in the interests of space efficiency, we don't want to store one
- * bch_extent_crc for each pointer if we don't have to.
- *
- * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
- * bch_extent_ptrs appended arbitrarily one after the other. We determine the
- * type of a given entry with a scheme similar to utf8 (except we're encoding a
- * type, not a size), encoding the type in the position of the first set bit:
- *
- * bch_extent_crc32 - 0b1
- * bch_extent_ptr - 0b10
- * bch_extent_crc64 - 0b100
- *
- * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
- * bch_extent_crc64 is the least constrained).
- *
- * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
- * until the next bch_extent_crc32/64.
- *
- * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
- * is neither checksummed nor compressed.
- */
-
-#define BCH_EXTENT_ENTRY_TYPES() \
- x(ptr, 0) \
- x(crc32, 1) \
- x(crc64, 2) \
- x(crc128, 3) \
- x(stripe_ptr, 4) \
- x(rebalance, 5)
-#define BCH_EXTENT_ENTRY_MAX 6
-
-enum bch_extent_entry_type {
-#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
- BCH_EXTENT_ENTRY_TYPES()
-#undef x
-};
-
-/* Compressed/uncompressed size are stored biased by 1: */
-struct bch_extent_crc32 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u32 type:2,
- _compressed_size:7,
- _uncompressed_size:7,
- offset:7,
- _unused:1,
- csum_type:4,
- compression_type:4;
- __u32 csum;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u32 csum;
- __u32 compression_type:4,
- csum_type:4,
- _unused:1,
- offset:7,
- _uncompressed_size:7,
- _compressed_size:7,
- type:2;
-#endif
-} __packed __aligned(8);
-
-#define CRC32_SIZE_MAX (1U << 7)
-#define CRC32_NONCE_MAX 0
-
-struct bch_extent_crc64 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:3,
- _compressed_size:9,
- _uncompressed_size:9,
- offset:9,
- nonce:10,
- csum_type:4,
- compression_type:4,
- csum_hi:16;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 csum_hi:16,
- compression_type:4,
- csum_type:4,
- nonce:10,
- offset:9,
- _uncompressed_size:9,
- _compressed_size:9,
- type:3;
-#endif
- __u64 csum_lo;
-} __packed __aligned(8);
-
-#define CRC64_SIZE_MAX (1U << 9)
-#define CRC64_NONCE_MAX ((1U << 10) - 1)
-
-struct bch_extent_crc128 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:4,
- _compressed_size:13,
- _uncompressed_size:13,
- offset:13,
- nonce:13,
- csum_type:4,
- compression_type:4;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 compression_type:4,
- csum_type:4,
- nonce:13,
- offset:13,
- _uncompressed_size:13,
- _compressed_size:13,
- type:4;
-#endif
- struct bch_csum csum;
-} __packed __aligned(8);
-
-#define CRC128_SIZE_MAX (1U << 13)
-#define CRC128_NONCE_MAX ((1U << 13) - 1)
-
-/*
- * @reservation - pointer hasn't been written to, just reserved
- */
-struct bch_extent_ptr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:1,
- cached:1,
- unused:1,
- unwritten:1,
- offset:44, /* 8 petabytes */
- dev:8,
- gen:8;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 gen:8,
- dev:8,
- offset:44,
- unwritten:1,
- unused:1,
- cached:1,
- type:1;
-#endif
-} __packed __aligned(8);
-
-struct bch_extent_stripe_ptr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:5,
- block:8,
- redundancy:4,
- idx:47;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 idx:47,
- redundancy:4,
- block:8,
- type:5;
-#endif
-};
-
-struct bch_extent_rebalance {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:6,
- unused:34,
- compression:8, /* enum bch_compression_opt */
- target:16;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 target:16,
- compression:8,
- unused:34,
- type:6;
-#endif
-};
-
-union bch_extent_entry {
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
- unsigned long type;
-#elif __BITS_PER_LONG == 32
- struct {
- unsigned long pad;
- unsigned long type;
- };
-#else
-#error edit for your odd byteorder.
-#endif
-
-#define x(f, n) struct bch_extent_##f f;
- BCH_EXTENT_ENTRY_TYPES()
-#undef x
-};
-
-struct bch_btree_ptr {
- struct bch_val v;
-
- __u64 _data[0];
- struct bch_extent_ptr start[];
-} __packed __aligned(8);
-
-struct bch_btree_ptr_v2 {
- struct bch_val v;
-
- __u64 mem_ptr;
- __le64 seq;
- __le16 sectors_written;
- __le16 flags;
- struct bpos min_key;
- __u64 _data[0];
- struct bch_extent_ptr start[];
-} __packed __aligned(8);
-
-LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
-
-struct bch_extent {
- struct bch_val v;
-
- __u64 _data[0];
- union bch_extent_entry start[];
-} __packed __aligned(8);
-
-/* Maximum size (in u64s) a single pointer could be: */
-#define BKEY_EXTENT_PTR_U64s_MAX\
- ((sizeof(struct bch_extent_crc128) + \
- sizeof(struct bch_extent_ptr)) / sizeof(__u64))
-
-/* Maximum possible size of an entire extent value: */
-#define BKEY_EXTENT_VAL_U64s_MAX \
- (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
-
-/* * Maximum possible size of an entire extent, key + value: */
-#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
-
-/* Btree pointers don't carry around checksums: */
-#define BKEY_BTREE_PTR_VAL_U64s_MAX \
- ((sizeof(struct bch_btree_ptr_v2) + \
- sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
-#define BKEY_BTREE_PTR_U64s_MAX \
- (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
-
-struct bch_reservation {
- struct bch_val v;
-
- __le32 generation;
- __u8 nr_replicas;
- __u8 pad[3];
-} __packed __aligned(8);
-
-struct bch_inline_data {
- struct bch_val v;
- u8 data[];
-};
-
-#endif /* _BCACHEFS_EXTENTS_FORMAT_H */
diff --git a/fs/bcachefs/extents_types.h b/fs/bcachefs/extents_types.h
deleted file mode 100644
index 43d6c341ecca..000000000000
--- a/fs/bcachefs/extents_types.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_EXTENTS_TYPES_H
-#define _BCACHEFS_EXTENTS_TYPES_H
-
-#include "bcachefs_format.h"
-
-struct bch_extent_crc_unpacked {
- u32 compressed_size;
- u32 uncompressed_size;
- u32 live_size;
-
- u8 csum_type;
- u8 compression_type;
-
- u16 offset;
-
- u16 nonce;
-
- struct bch_csum csum;
-};
-
-struct extent_ptr_decoded {
- unsigned idx;
- bool has_ec;
- struct bch_extent_crc_unpacked crc;
- struct bch_extent_ptr ptr;
- struct bch_extent_stripe_ptr ec;
-};
-
-struct bch_io_failures {
- u8 nr;
- struct bch_dev_io_failures {
- u8 dev;
- u8 idx;
- u8 nr_failed;
- u8 nr_retries;
- } devs[BCH_REPLICAS_MAX];
-};
-
-#endif /* _BCACHEFS_EXTENTS_TYPES_H */
diff --git a/fs/bcachefs/eytzinger.c b/fs/bcachefs/eytzinger.c
deleted file mode 100644
index 2eaffe37b5e7..000000000000
--- a/fs/bcachefs/eytzinger.c
+++ /dev/null
@@ -1,305 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "eytzinger.h"
-
-/**
- * is_aligned - is this pointer & size okay for word-wide copying?
- * @base: pointer to data
- * @size: size of each element
- * @align: required alignment (typically 4 or 8)
- *
- * Returns true if elements can be copied using word loads and stores.
- * The size must be a multiple of the alignment, and the base address must
- * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
- *
- * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
- * to "if ((a | b) & mask)", so we do that by hand.
- */
-__attribute_const__ __always_inline
-static bool is_aligned(const void *base, size_t size, unsigned char align)
-{
- unsigned char lsbits = (unsigned char)size;
-
- (void)base;
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- lsbits |= (unsigned char)(uintptr_t)base;
-#endif
- return (lsbits & (align - 1)) == 0;
-}
-
-/**
- * swap_words_32 - swap two elements in 32-bit chunks
- * @a: pointer to the first element to swap
- * @b: pointer to the second element to swap
- * @n: element size (must be a multiple of 4)
- *
- * Exchange the two objects in memory. This exploits base+index addressing,
- * which basically all CPUs have, to minimize loop overhead computations.
- *
- * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
- * bottom of the loop, even though the zero flag is still valid from the
- * subtract (since the intervening mov instructions don't alter the flags).
- * Gcc 8.1.0 doesn't have that problem.
- */
-static void swap_words_32(void *a, void *b, size_t n)
-{
- do {
- u32 t = *(u32 *)(a + (n -= 4));
- *(u32 *)(a + n) = *(u32 *)(b + n);
- *(u32 *)(b + n) = t;
- } while (n);
-}
-
-/**
- * swap_words_64 - swap two elements in 64-bit chunks
- * @a: pointer to the first element to swap
- * @b: pointer to the second element to swap
- * @n: element size (must be a multiple of 8)
- *
- * Exchange the two objects in memory. This exploits base+index
- * addressing, which basically all CPUs have, to minimize loop overhead
- * computations.
- *
- * We'd like to use 64-bit loads if possible. If they're not, emulating
- * one requires base+index+4 addressing which x86 has but most other
- * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
- * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
- * x32 ABI). Are there any cases the kernel needs to worry about?
- */
-static void swap_words_64(void *a, void *b, size_t n)
-{
- do {
-#ifdef CONFIG_64BIT
- u64 t = *(u64 *)(a + (n -= 8));
- *(u64 *)(a + n) = *(u64 *)(b + n);
- *(u64 *)(b + n) = t;
-#else
- /* Use two 32-bit transfers to avoid base+index+4 addressing */
- u32 t = *(u32 *)(a + (n -= 4));
- *(u32 *)(a + n) = *(u32 *)(b + n);
- *(u32 *)(b + n) = t;
-
- t = *(u32 *)(a + (n -= 4));
- *(u32 *)(a + n) = *(u32 *)(b + n);
- *(u32 *)(b + n) = t;
-#endif
- } while (n);
-}
-
-/**
- * swap_bytes - swap two elements a byte at a time
- * @a: pointer to the first element to swap
- * @b: pointer to the second element to swap
- * @n: element size
- *
- * This is the fallback if alignment doesn't allow using larger chunks.
- */
-static void swap_bytes(void *a, void *b, size_t n)
-{
- do {
- char t = ((char *)a)[--n];
- ((char *)a)[n] = ((char *)b)[n];
- ((char *)b)[n] = t;
- } while (n);
-}
-
-/*
- * The values are arbitrary as long as they can't be confused with
- * a pointer, but small integers make for the smallest compare
- * instructions.
- */
-#define SWAP_WORDS_64 (swap_r_func_t)0
-#define SWAP_WORDS_32 (swap_r_func_t)1
-#define SWAP_BYTES (swap_r_func_t)2
-#define SWAP_WRAPPER (swap_r_func_t)3
-
-struct wrapper {
- cmp_func_t cmp;
- swap_func_t swap_func;
-};
-
-/*
- * The function pointer is last to make tail calls most efficient if the
- * compiler decides not to inline this function.
- */
-static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
-{
- if (swap_func == SWAP_WRAPPER) {
- ((const struct wrapper *)priv)->swap_func(a, b, (int)size);
- return;
- }
-
- if (swap_func == SWAP_WORDS_64)
- swap_words_64(a, b, size);
- else if (swap_func == SWAP_WORDS_32)
- swap_words_32(a, b, size);
- else if (swap_func == SWAP_BYTES)
- swap_bytes(a, b, size);
- else
- swap_func(a, b, (int)size, priv);
-}
-
-#define _CMP_WRAPPER ((cmp_r_func_t)0L)
-
-static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
-{
- if (cmp == _CMP_WRAPPER)
- return ((const struct wrapper *)priv)->cmp(a, b);
- return cmp(a, b, priv);
-}
-
-static inline int eytzinger0_do_cmp(void *base, size_t n, size_t size,
- cmp_r_func_t cmp_func, const void *priv,
- size_t l, size_t r)
-{
- return do_cmp(base + inorder_to_eytzinger0(l, n) * size,
- base + inorder_to_eytzinger0(r, n) * size,
- cmp_func, priv);
-}
-
-static inline void eytzinger0_do_swap(void *base, size_t n, size_t size,
- swap_r_func_t swap_func, const void *priv,
- size_t l, size_t r)
-{
- do_swap(base + inorder_to_eytzinger0(l, n) * size,
- base + inorder_to_eytzinger0(r, n) * size,
- size, swap_func, priv);
-}
-
-void eytzinger0_sort_r(void *base, size_t n, size_t size,
- cmp_r_func_t cmp_func,
- swap_r_func_t swap_func,
- const void *priv)
-{
- int i, j, k;
-
- /* called from 'sort' without swap function, let's pick the default */
- if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
- swap_func = NULL;
-
- if (!swap_func) {
- if (is_aligned(base, size, 8))
- swap_func = SWAP_WORDS_64;
- else if (is_aligned(base, size, 4))
- swap_func = SWAP_WORDS_32;
- else
- swap_func = SWAP_BYTES;
- }
-
- /* heapify */
- for (i = n / 2 - 1; i >= 0; --i) {
- /* Find the sift-down path all the way to the leaves. */
- for (j = i; k = j * 2 + 1, k + 1 < n;)
- j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
-
- /* Special case for the last leaf with no sibling. */
- if (j * 2 + 2 == n)
- j = j * 2 + 1;
-
- /* Backtrack to the correct location. */
- while (j != i && eytzinger0_do_cmp(base, n, size, cmp_func, priv, i, j) >= 0)
- j = (j - 1) / 2;
-
- /* Shift the element into its correct place. */
- for (k = j; j != i;) {
- j = (j - 1) / 2;
- eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
- }
- }
-
- /* sort */
- for (i = n - 1; i > 0; --i) {
- eytzinger0_do_swap(base, n, size, swap_func, priv, 0, i);
-
- /* Find the sift-down path all the way to the leaves. */
- for (j = 0; k = j * 2 + 1, k + 1 < i;)
- j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
-
- /* Special case for the last leaf with no sibling. */
- if (j * 2 + 2 == i)
- j = j * 2 + 1;
-
- /* Backtrack to the correct location. */
- while (j && eytzinger0_do_cmp(base, n, size, cmp_func, priv, 0, j) >= 0)
- j = (j - 1) / 2;
-
- /* Shift the element into its correct place. */
- for (k = j; j;) {
- j = (j - 1) / 2;
- eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
- }
- }
-}
-
-void eytzinger0_sort(void *base, size_t n, size_t size,
- cmp_func_t cmp_func,
- swap_func_t swap_func)
-{
- struct wrapper w = {
- .cmp = cmp_func,
- .swap_func = swap_func,
- };
-
- return eytzinger0_sort_r(base, n, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
-}
-
-#if 0
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/ktime.h>
-
-static u64 cmp_count;
-
-static int mycmp(const void *a, const void *b)
-{
- u32 _a = *(u32 *)a;
- u32 _b = *(u32 *)b;
-
- cmp_count++;
- if (_a < _b)
- return -1;
- else if (_a > _b)
- return 1;
- else
- return 0;
-}
-
-static int test(void)
-{
- size_t N, i;
- ktime_t start, end;
- s64 delta;
- u32 *arr;
-
- for (N = 10000; N <= 100000; N += 10000) {
- arr = kmalloc_array(N, sizeof(u32), GFP_KERNEL);
- cmp_count = 0;
-
- for (i = 0; i < N; i++)
- arr[i] = get_random_u32();
-
- start = ktime_get();
- eytzinger0_sort(arr, N, sizeof(u32), mycmp, NULL);
- end = ktime_get();
-
- delta = ktime_us_delta(end, start);
- printk(KERN_INFO "time: %lld\n", delta);
- printk(KERN_INFO "comparisons: %lld\n", cmp_count);
-
- u32 prev = 0;
-
- eytzinger0_for_each(i, N) {
- if (prev > arr[i])
- goto err;
- prev = arr[i];
- }
-
- kfree(arr);
- }
- return 0;
-
-err:
- kfree(arr);
- return -1;
-}
-#endif
diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h
deleted file mode 100644
index 0541192d7bc0..000000000000
--- a/fs/bcachefs/eytzinger.h
+++ /dev/null
@@ -1,319 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _EYTZINGER_H
-#define _EYTZINGER_H
-
-#include <linux/bitops.h>
-#include <linux/log2.h>
-
-#ifdef EYTZINGER_DEBUG
-#define EYTZINGER_BUG_ON(cond) BUG_ON(cond)
-#else
-#define EYTZINGER_BUG_ON(cond)
-#endif
-
-/*
- * Traversal for trees in eytzinger layout - a full binary tree layed out in an
- * array.
- *
- * Consider using an eytzinger tree any time you would otherwise be doing binary
- * search over an array. Binary search is a worst case scenario for branch
- * prediction and prefetching, but in an eytzinger tree every node's children
- * are adjacent in memory, thus we can prefetch children before knowing the
- * result of the comparison, assuming multiple nodes fit on a cacheline.
- *
- * Two variants are provided, for one based indexing and zero based indexing.
- *
- * Zero based indexing is more convenient, but one based indexing has better
- * alignment and thus better performance because each new level of the tree
- * starts at a power of two, and thus if element 0 was cacheline aligned, each
- * new level will be as well.
- */
-
-static inline unsigned eytzinger1_child(unsigned i, unsigned child)
-{
- EYTZINGER_BUG_ON(child > 1);
-
- return (i << 1) + child;
-}
-
-static inline unsigned eytzinger1_left_child(unsigned i)
-{
- return eytzinger1_child(i, 0);
-}
-
-static inline unsigned eytzinger1_right_child(unsigned i)
-{
- return eytzinger1_child(i, 1);
-}
-
-static inline unsigned eytzinger1_first(unsigned size)
-{
- return size ? rounddown_pow_of_two(size) : 0;
-}
-
-static inline unsigned eytzinger1_last(unsigned size)
-{
- return rounddown_pow_of_two(size + 1) - 1;
-}
-
-/*
- * eytzinger1_next() and eytzinger1_prev() have the nice properties that
- *
- * eytzinger1_next(0) == eytzinger1_first())
- * eytzinger1_prev(0) == eytzinger1_last())
- *
- * eytzinger1_prev(eytzinger1_first()) == 0
- * eytzinger1_next(eytzinger1_last()) == 0
- */
-
-static inline unsigned eytzinger1_next(unsigned i, unsigned size)
-{
- EYTZINGER_BUG_ON(i > size);
-
- if (eytzinger1_right_child(i) <= size) {
- i = eytzinger1_right_child(i);
-
- i <<= __fls(size + 1) - __fls(i);
- i >>= i > size;
- } else {
- i >>= ffz(i) + 1;
- }
-
- return i;
-}
-
-static inline unsigned eytzinger1_prev(unsigned i, unsigned size)
-{
- EYTZINGER_BUG_ON(i > size);
-
- if (eytzinger1_left_child(i) <= size) {
- i = eytzinger1_left_child(i) + 1;
-
- i <<= __fls(size + 1) - __fls(i);
- i -= 1;
- i >>= i > size;
- } else {
- i >>= __ffs(i) + 1;
- }
-
- return i;
-}
-
-static inline unsigned eytzinger1_extra(unsigned size)
-{
- return size
- ? (size + 1 - rounddown_pow_of_two(size)) << 1
- : 0;
-}
-
-static inline unsigned __eytzinger1_to_inorder(unsigned i, unsigned size,
- unsigned extra)
-{
- unsigned b = __fls(i);
- unsigned shift = __fls(size) - b;
- int s;
-
- EYTZINGER_BUG_ON(!i || i > size);
-
- i ^= 1U << b;
- i <<= 1;
- i |= 1;
- i <<= shift;
-
- /*
- * sign bit trick:
- *
- * if (i > extra)
- * i -= (i - extra) >> 1;
- */
- s = extra - i;
- i += (s >> 1) & (s >> 31);
-
- return i;
-}
-
-static inline unsigned __inorder_to_eytzinger1(unsigned i, unsigned size,
- unsigned extra)
-{
- unsigned shift;
- int s;
-
- EYTZINGER_BUG_ON(!i || i > size);
-
- /*
- * sign bit trick:
- *
- * if (i > extra)
- * i += i - extra;
- */
- s = extra - i;
- i -= s & (s >> 31);
-
- shift = __ffs(i);
-
- i >>= shift + 1;
- i |= 1U << (__fls(size) - shift);
-
- return i;
-}
-
-static inline unsigned eytzinger1_to_inorder(unsigned i, unsigned size)
-{
- return __eytzinger1_to_inorder(i, size, eytzinger1_extra(size));
-}
-
-static inline unsigned inorder_to_eytzinger1(unsigned i, unsigned size)
-{
- return __inorder_to_eytzinger1(i, size, eytzinger1_extra(size));
-}
-
-#define eytzinger1_for_each(_i, _size) \
- for (unsigned (_i) = eytzinger1_first((_size)); \
- (_i) != 0; \
- (_i) = eytzinger1_next((_i), (_size)))
-
-/* Zero based indexing version: */
-
-static inline unsigned eytzinger0_child(unsigned i, unsigned child)
-{
- EYTZINGER_BUG_ON(child > 1);
-
- return (i << 1) + 1 + child;
-}
-
-static inline unsigned eytzinger0_left_child(unsigned i)
-{
- return eytzinger0_child(i, 0);
-}
-
-static inline unsigned eytzinger0_right_child(unsigned i)
-{
- return eytzinger0_child(i, 1);
-}
-
-static inline unsigned eytzinger0_first(unsigned size)
-{
- return eytzinger1_first(size) - 1;
-}
-
-static inline unsigned eytzinger0_last(unsigned size)
-{
- return eytzinger1_last(size) - 1;
-}
-
-static inline unsigned eytzinger0_next(unsigned i, unsigned size)
-{
- return eytzinger1_next(i + 1, size) - 1;
-}
-
-static inline unsigned eytzinger0_prev(unsigned i, unsigned size)
-{
- return eytzinger1_prev(i + 1, size) - 1;
-}
-
-static inline unsigned eytzinger0_extra(unsigned size)
-{
- return eytzinger1_extra(size);
-}
-
-static inline unsigned __eytzinger0_to_inorder(unsigned i, unsigned size,
- unsigned extra)
-{
- return __eytzinger1_to_inorder(i + 1, size, extra) - 1;
-}
-
-static inline unsigned __inorder_to_eytzinger0(unsigned i, unsigned size,
- unsigned extra)
-{
- return __inorder_to_eytzinger1(i + 1, size, extra) - 1;
-}
-
-static inline unsigned eytzinger0_to_inorder(unsigned i, unsigned size)
-{
- return __eytzinger0_to_inorder(i, size, eytzinger0_extra(size));
-}
-
-static inline unsigned inorder_to_eytzinger0(unsigned i, unsigned size)
-{
- return __inorder_to_eytzinger0(i, size, eytzinger0_extra(size));
-}
-
-#define eytzinger0_for_each(_i, _size) \
- for (unsigned (_i) = eytzinger0_first((_size)); \
- (_i) != -1; \
- (_i) = eytzinger0_next((_i), (_size)))
-
-/* return greatest node <= @search, or -1 if not found */
-static inline int eytzinger0_find_le(void *base, size_t nr, size_t size,
- cmp_func_t cmp, const void *search)
-{
- unsigned i, n = 0;
-
- if (!nr)
- return -1;
-
- do {
- i = n;
- n = eytzinger0_child(i, cmp(base + i * size, search) <= 0);
- } while (n < nr);
-
- if (n & 1) {
- /*
- * @i was greater than @search, return previous node:
- *
- * if @i was leftmost/smallest element,
- * eytzinger0_prev(eytzinger0_first())) returns -1, as expected
- */
- return eytzinger0_prev(i, nr);
- } else {
- return i;
- }
-}
-
-static inline int eytzinger0_find_gt(void *base, size_t nr, size_t size,
- cmp_func_t cmp, const void *search)
-{
- ssize_t idx = eytzinger0_find_le(base, nr, size, cmp, search);
-
- /*
- * if eytitzinger0_find_le() returned -1 - no element was <= search - we
- * want to return the first element; next/prev identities mean this work
- * as expected
- *
- * similarly if find_le() returns last element, we should return -1;
- * identities mean this all works out:
- */
- return eytzinger0_next(idx, nr);
-}
-
-static inline int eytzinger0_find_ge(void *base, size_t nr, size_t size,
- cmp_func_t cmp, const void *search)
-{
- ssize_t idx = eytzinger0_find_le(base, nr, size, cmp, search);
-
- if (idx < nr && !cmp(base + idx * size, search))
- return idx;
-
- return eytzinger0_next(idx, nr);
-}
-
-#define eytzinger0_find(base, nr, size, _cmp, search) \
-({ \
- void *_base = (base); \
- const void *_search = (search); \
- size_t _nr = (nr); \
- size_t _size = (size); \
- size_t _i = 0; \
- int _res; \
- \
- while (_i < _nr && \
- (_res = _cmp(_search, _base + _i * _size))) \
- _i = eytzinger0_child(_i, _res > 0); \
- _i; \
-})
-
-void eytzinger0_sort_r(void *, size_t, size_t,
- cmp_r_func_t, swap_r_func_t, const void *);
-void eytzinger0_sort(void *, size_t, size_t, cmp_func_t, swap_func_t);
-
-#endif /* _EYTZINGER_H */
diff --git a/fs/bcachefs/fifo.h b/fs/bcachefs/fifo.h
deleted file mode 100644
index d8153fe27037..000000000000
--- a/fs/bcachefs/fifo.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FIFO_H
-#define _BCACHEFS_FIFO_H
-
-#include "util.h"
-
-#define FIFO(type) \
-struct { \
- size_t front, back, size, mask; \
- type *data; \
-}
-
-#define DECLARE_FIFO(type, name) FIFO(type) name
-
-#define fifo_buf_size(fifo) \
- ((fifo)->size \
- ? roundup_pow_of_two((fifo)->size) * sizeof((fifo)->data[0]) \
- : 0)
-
-#define init_fifo(fifo, _size, _gfp) \
-({ \
- (fifo)->front = (fifo)->back = 0; \
- (fifo)->size = (_size); \
- (fifo)->mask = (fifo)->size \
- ? roundup_pow_of_two((fifo)->size) - 1 \
- : 0; \
- (fifo)->data = kvmalloc(fifo_buf_size(fifo), (_gfp)); \
-})
-
-#define free_fifo(fifo) \
-do { \
- kvfree((fifo)->data); \
- (fifo)->data = NULL; \
-} while (0)
-
-#define fifo_swap(l, r) \
-do { \
- swap((l)->front, (r)->front); \
- swap((l)->back, (r)->back); \
- swap((l)->size, (r)->size); \
- swap((l)->mask, (r)->mask); \
- swap((l)->data, (r)->data); \
-} while (0)
-
-#define fifo_move(dest, src) \
-do { \
- typeof(*((dest)->data)) _t; \
- while (!fifo_full(dest) && \
- fifo_pop(src, _t)) \
- fifo_push(dest, _t); \
-} while (0)
-
-#define fifo_used(fifo) (((fifo)->back - (fifo)->front))
-#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
-
-#define fifo_empty(fifo) ((fifo)->front == (fifo)->back)
-#define fifo_full(fifo) (fifo_used(fifo) == (fifo)->size)
-
-#define fifo_peek_front(fifo) ((fifo)->data[(fifo)->front & (fifo)->mask])
-#define fifo_peek_back(fifo) ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
-
-#define fifo_entry_idx_abs(fifo, p) \
- ((((p) >= &fifo_peek_front(fifo) \
- ? (fifo)->front : (fifo)->back) & ~(fifo)->mask) + \
- (((p) - (fifo)->data)))
-
-#define fifo_entry_idx(fifo, p) (((p) - &fifo_peek_front(fifo)) & (fifo)->mask)
-#define fifo_idx_entry(fifo, i) ((fifo)->data[((fifo)->front + (i)) & (fifo)->mask])
-
-#define fifo_push_back_ref(f) \
- (fifo_full((f)) ? NULL : &(f)->data[(f)->back++ & (f)->mask])
-
-#define fifo_push_front_ref(f) \
- (fifo_full((f)) ? NULL : &(f)->data[--(f)->front & (f)->mask])
-
-#define fifo_push_back(fifo, new) \
-({ \
- typeof((fifo)->data) _r = fifo_push_back_ref(fifo); \
- if (_r) \
- *_r = (new); \
- _r != NULL; \
-})
-
-#define fifo_push_front(fifo, new) \
-({ \
- typeof((fifo)->data) _r = fifo_push_front_ref(fifo); \
- if (_r) \
- *_r = (new); \
- _r != NULL; \
-})
-
-#define fifo_pop_front(fifo, i) \
-({ \
- bool _r = !fifo_empty((fifo)); \
- if (_r) \
- (i) = (fifo)->data[(fifo)->front++ & (fifo)->mask]; \
- _r; \
-})
-
-#define fifo_pop_back(fifo, i) \
-({ \
- bool _r = !fifo_empty((fifo)); \
- if (_r) \
- (i) = (fifo)->data[--(fifo)->back & (fifo)->mask]; \
- _r; \
-})
-
-#define fifo_push_ref(fifo) fifo_push_back_ref(fifo)
-#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
-#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
-#define fifo_peek(fifo) fifo_peek_front(fifo)
-
-#define fifo_for_each_entry(_entry, _fifo, _iter) \
- for (typecheck(typeof((_fifo)->front), _iter), \
- (_iter) = (_fifo)->front; \
- ((_iter != (_fifo)->back) && \
- (_entry = (_fifo)->data[(_iter) & (_fifo)->mask], true)); \
- (_iter)++)
-
-#define fifo_for_each_entry_ptr(_ptr, _fifo, _iter) \
- for (typecheck(typeof((_fifo)->front), _iter), \
- (_iter) = (_fifo)->front; \
- ((_iter != (_fifo)->back) && \
- (_ptr = &(_fifo)->data[(_iter) & (_fifo)->mask], true)); \
- (_iter)++)
-
-#endif /* _BCACHEFS_FIFO_H */
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c
deleted file mode 100644
index 7e10a9ddcfd9..000000000000
--- a/fs/bcachefs/fs-common.c
+++ /dev/null
@@ -1,550 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "acl.h"
-#include "btree_update.h"
-#include "dirent.h"
-#include "fs-common.h"
-#include "inode.h"
-#include "subvolume.h"
-#include "xattr.h"
-
-#include <linux/posix_acl.h>
-
-static inline int is_subdir_for_nlink(struct bch_inode_unpacked *inode)
-{
- return S_ISDIR(inode->bi_mode) && !inode->bi_subvol;
-}
-
-int bch2_create_trans(struct btree_trans *trans,
- subvol_inum dir,
- struct bch_inode_unpacked *dir_u,
- struct bch_inode_unpacked *new_inode,
- const struct qstr *name,
- uid_t uid, gid_t gid, umode_t mode, dev_t rdev,
- struct posix_acl *default_acl,
- struct posix_acl *acl,
- subvol_inum snapshot_src,
- unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
- subvol_inum new_inum = dir;
- u64 now = bch2_current_time(c);
- u64 cpu = raw_smp_processor_id();
- u64 dir_target;
- u32 snapshot;
- unsigned dir_type = mode_to_type(mode);
- int ret;
-
- ret = bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot);
- if (ret)
- goto err;
-
- ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir,
- BTREE_ITER_intent|BTREE_ITER_with_updates);
- if (ret)
- goto err;
-
- if (!(flags & BCH_CREATE_SNAPSHOT)) {
- /* Normal create path - allocate a new inode: */
- bch2_inode_init_late(new_inode, now, uid, gid, mode, rdev, dir_u);
-
- if (flags & BCH_CREATE_TMPFILE)
- new_inode->bi_flags |= BCH_INODE_unlinked;
-
- ret = bch2_inode_create(trans, &inode_iter, new_inode, snapshot, cpu);
- if (ret)
- goto err;
-
- snapshot_src = (subvol_inum) { 0 };
- } else {
- /*
- * Creating a snapshot - we're not allocating a new inode, but
- * we do have to lookup the root inode of the subvolume we're
- * snapshotting and update it (in the new snapshot):
- */
-
- if (!snapshot_src.inum) {
- /* Inode wasn't specified, just snapshot: */
- struct bch_subvolume s;
-
- ret = bch2_subvolume_get(trans, snapshot_src.subvol, true,
- BTREE_ITER_cached, &s);
- if (ret)
- goto err;
-
- snapshot_src.inum = le64_to_cpu(s.inode);
- }
-
- ret = bch2_inode_peek(trans, &inode_iter, new_inode, snapshot_src,
- BTREE_ITER_intent);
- if (ret)
- goto err;
-
- if (new_inode->bi_subvol != snapshot_src.subvol) {
- /* Not a subvolume root: */
- ret = -EINVAL;
- goto err;
- }
-
- /*
- * If we're not root, we have to own the subvolume being
- * snapshotted:
- */
- if (uid && new_inode->bi_uid != uid) {
- ret = -EPERM;
- goto err;
- }
-
- flags |= BCH_CREATE_SUBVOL;
- }
-
- new_inum.inum = new_inode->bi_inum;
- dir_target = new_inode->bi_inum;
-
- if (flags & BCH_CREATE_SUBVOL) {
- u32 new_subvol, dir_snapshot;
-
- ret = bch2_subvolume_create(trans, new_inode->bi_inum,
- dir.subvol,
- snapshot_src.subvol,
- &new_subvol, &snapshot,
- (flags & BCH_CREATE_SNAPSHOT_RO) != 0);
- if (ret)
- goto err;
-
- new_inode->bi_parent_subvol = dir.subvol;
- new_inode->bi_subvol = new_subvol;
- new_inum.subvol = new_subvol;
- dir_target = new_subvol;
- dir_type = DT_SUBVOL;
-
- ret = bch2_subvolume_get_snapshot(trans, dir.subvol, &dir_snapshot);
- if (ret)
- goto err;
-
- bch2_btree_iter_set_snapshot(&dir_iter, dir_snapshot);
- ret = bch2_btree_iter_traverse(&dir_iter);
- if (ret)
- goto err;
- }
-
- if (!(flags & BCH_CREATE_SNAPSHOT)) {
- if (default_acl) {
- ret = bch2_set_acl_trans(trans, new_inum, new_inode,
- default_acl, ACL_TYPE_DEFAULT);
- if (ret)
- goto err;
- }
-
- if (acl) {
- ret = bch2_set_acl_trans(trans, new_inum, new_inode,
- acl, ACL_TYPE_ACCESS);
- if (ret)
- goto err;
- }
- }
-
- if (!(flags & BCH_CREATE_TMPFILE)) {
- struct bch_hash_info dir_hash = bch2_hash_info_init(c, dir_u);
- u64 dir_offset;
-
- if (is_subdir_for_nlink(new_inode))
- dir_u->bi_nlink++;
- dir_u->bi_mtime = dir_u->bi_ctime = now;
-
- ret = bch2_inode_write(trans, &dir_iter, dir_u);
- if (ret)
- goto err;
-
- ret = bch2_dirent_create(trans, dir, &dir_hash,
- dir_type,
- name,
- dir_target,
- &dir_offset,
- STR_HASH_must_create|BTREE_ITER_with_updates);
- if (ret)
- goto err;
-
- new_inode->bi_dir = dir_u->bi_inum;
- new_inode->bi_dir_offset = dir_offset;
- }
-
- inode_iter.flags &= ~BTREE_ITER_all_snapshots;
- bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
-
- ret = bch2_btree_iter_traverse(&inode_iter) ?:
- bch2_inode_write(trans, &inode_iter, new_inode);
-err:
- bch2_trans_iter_exit(trans, &inode_iter);
- bch2_trans_iter_exit(trans, &dir_iter);
- return ret;
-}
-
-int bch2_link_trans(struct btree_trans *trans,
- subvol_inum dir, struct bch_inode_unpacked *dir_u,
- subvol_inum inum, struct bch_inode_unpacked *inode_u,
- const struct qstr *name)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
- struct bch_hash_info dir_hash;
- u64 now = bch2_current_time(c);
- u64 dir_offset = 0;
- int ret;
-
- if (dir.subvol != inum.subvol)
- return -EXDEV;
-
- ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent);
- if (ret)
- return ret;
-
- inode_u->bi_ctime = now;
- ret = bch2_inode_nlink_inc(inode_u);
- if (ret)
- goto err;
-
- ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
- if (ret)
- goto err;
-
- if (bch2_reinherit_attrs(inode_u, dir_u)) {
- ret = -EXDEV;
- goto err;
- }
-
- dir_u->bi_mtime = dir_u->bi_ctime = now;
-
- dir_hash = bch2_hash_info_init(c, dir_u);
-
- ret = bch2_dirent_create(trans, dir, &dir_hash,
- mode_to_type(inode_u->bi_mode),
- name, inum.inum, &dir_offset,
- STR_HASH_must_create);
- if (ret)
- goto err;
-
- inode_u->bi_dir = dir.inum;
- inode_u->bi_dir_offset = dir_offset;
-
- ret = bch2_inode_write(trans, &dir_iter, dir_u) ?:
- bch2_inode_write(trans, &inode_iter, inode_u);
-err:
- bch2_trans_iter_exit(trans, &dir_iter);
- bch2_trans_iter_exit(trans, &inode_iter);
- return ret;
-}
-
-int bch2_unlink_trans(struct btree_trans *trans,
- subvol_inum dir,
- struct bch_inode_unpacked *dir_u,
- struct bch_inode_unpacked *inode_u,
- const struct qstr *name,
- bool deleting_subvol)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter dirent_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
- struct bch_hash_info dir_hash;
- subvol_inum inum;
- u64 now = bch2_current_time(c);
- struct bkey_s_c k;
- int ret;
-
- ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
- if (ret)
- goto err;
-
- dir_hash = bch2_hash_info_init(c, dir_u);
-
- ret = bch2_dirent_lookup_trans(trans, &dirent_iter, dir, &dir_hash,
- name, &inum, BTREE_ITER_intent);
- if (ret)
- goto err;
-
- ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum,
- BTREE_ITER_intent);
- if (ret)
- goto err;
-
- if (!deleting_subvol && S_ISDIR(inode_u->bi_mode)) {
- ret = bch2_empty_dir_trans(trans, inum);
- if (ret)
- goto err;
- }
-
- if (deleting_subvol && !inode_u->bi_subvol) {
- ret = -BCH_ERR_ENOENT_not_subvol;
- goto err;
- }
-
- if (inode_u->bi_subvol) {
- /* Recursive subvolume destroy not allowed (yet?) */
- ret = bch2_subvol_has_children(trans, inode_u->bi_subvol);
- if (ret)
- goto err;
- }
-
- if (deleting_subvol || inode_u->bi_subvol) {
- ret = bch2_subvolume_unlink(trans, inode_u->bi_subvol);
- if (ret)
- goto err;
-
- k = bch2_btree_iter_peek_slot(&dirent_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- /*
- * If we're deleting a subvolume, we need to really delete the
- * dirent, not just emit a whiteout in the current snapshot:
- */
- bch2_btree_iter_set_snapshot(&dirent_iter, k.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&dirent_iter);
- if (ret)
- goto err;
- } else {
- bch2_inode_nlink_dec(trans, inode_u);
- }
-
- if (inode_u->bi_dir == dirent_iter.pos.inode &&
- inode_u->bi_dir_offset == dirent_iter.pos.offset) {
- inode_u->bi_dir = 0;
- inode_u->bi_dir_offset = 0;
- }
-
- dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now;
- dir_u->bi_nlink -= is_subdir_for_nlink(inode_u);
-
- ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
- &dir_hash, &dirent_iter,
- BTREE_UPDATE_internal_snapshot_node) ?:
- bch2_inode_write(trans, &dir_iter, dir_u) ?:
- bch2_inode_write(trans, &inode_iter, inode_u);
-err:
- bch2_trans_iter_exit(trans, &inode_iter);
- bch2_trans_iter_exit(trans, &dirent_iter);
- bch2_trans_iter_exit(trans, &dir_iter);
- return ret;
-}
-
-bool bch2_reinherit_attrs(struct bch_inode_unpacked *dst_u,
- struct bch_inode_unpacked *src_u)
-{
- u64 src, dst;
- unsigned id;
- bool ret = false;
-
- for (id = 0; id < Inode_opt_nr; id++) {
- /* Skip attributes that were explicitly set on this inode */
- if (dst_u->bi_fields_set & (1 << id))
- continue;
-
- src = bch2_inode_opt_get(src_u, id);
- dst = bch2_inode_opt_get(dst_u, id);
-
- if (src == dst)
- continue;
-
- bch2_inode_opt_set(dst_u, id, src);
- ret = true;
- }
-
- return ret;
-}
-
-static int subvol_update_parent(struct btree_trans *trans, u32 subvol, u32 new_parent)
-{
- struct btree_iter iter;
- struct bkey_i_subvolume *s =
- bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_subvolumes, POS(0, subvol),
- BTREE_ITER_cached, subvolume);
- int ret = PTR_ERR_OR_ZERO(s);
- if (ret)
- return ret;
-
- s->v.fs_path_parent = cpu_to_le32(new_parent);
- bch2_trans_iter_exit(trans, &iter);
- return 0;
-}
-
-int bch2_rename_trans(struct btree_trans *trans,
- subvol_inum src_dir, struct bch_inode_unpacked *src_dir_u,
- subvol_inum dst_dir, struct bch_inode_unpacked *dst_dir_u,
- struct bch_inode_unpacked *src_inode_u,
- struct bch_inode_unpacked *dst_inode_u,
- const struct qstr *src_name,
- const struct qstr *dst_name,
- enum bch_rename_mode mode)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter src_dir_iter = { NULL };
- struct btree_iter dst_dir_iter = { NULL };
- struct btree_iter src_inode_iter = { NULL };
- struct btree_iter dst_inode_iter = { NULL };
- struct bch_hash_info src_hash, dst_hash;
- subvol_inum src_inum, dst_inum;
- u64 src_offset, dst_offset;
- u64 now = bch2_current_time(c);
- int ret;
-
- ret = bch2_inode_peek(trans, &src_dir_iter, src_dir_u, src_dir,
- BTREE_ITER_intent);
- if (ret)
- goto err;
-
- src_hash = bch2_hash_info_init(c, src_dir_u);
-
- if (dst_dir.inum != src_dir.inum ||
- dst_dir.subvol != src_dir.subvol) {
- ret = bch2_inode_peek(trans, &dst_dir_iter, dst_dir_u, dst_dir,
- BTREE_ITER_intent);
- if (ret)
- goto err;
-
- dst_hash = bch2_hash_info_init(c, dst_dir_u);
- } else {
- dst_dir_u = src_dir_u;
- dst_hash = src_hash;
- }
-
- ret = bch2_dirent_rename(trans,
- src_dir, &src_hash,
- dst_dir, &dst_hash,
- src_name, &src_inum, &src_offset,
- dst_name, &dst_inum, &dst_offset,
- mode);
- if (ret)
- goto err;
-
- ret = bch2_inode_peek(trans, &src_inode_iter, src_inode_u, src_inum,
- BTREE_ITER_intent);
- if (ret)
- goto err;
-
- if (dst_inum.inum) {
- ret = bch2_inode_peek(trans, &dst_inode_iter, dst_inode_u, dst_inum,
- BTREE_ITER_intent);
- if (ret)
- goto err;
- }
-
- if (src_inode_u->bi_subvol &&
- dst_dir.subvol != src_inode_u->bi_parent_subvol) {
- ret = subvol_update_parent(trans, src_inode_u->bi_subvol, dst_dir.subvol);
- if (ret)
- goto err;
- }
-
- if (mode == BCH_RENAME_EXCHANGE &&
- dst_inode_u->bi_subvol &&
- src_dir.subvol != dst_inode_u->bi_parent_subvol) {
- ret = subvol_update_parent(trans, dst_inode_u->bi_subvol, src_dir.subvol);
- if (ret)
- goto err;
- }
-
- /* Can't move across subvolumes, unless it's a subvolume root: */
- if (src_dir.subvol != dst_dir.subvol &&
- (!src_inode_u->bi_subvol ||
- (dst_inum.inum && !dst_inode_u->bi_subvol))) {
- ret = -EXDEV;
- goto err;
- }
-
- if (src_inode_u->bi_parent_subvol)
- src_inode_u->bi_parent_subvol = dst_dir.subvol;
-
- if ((mode == BCH_RENAME_EXCHANGE) &&
- dst_inode_u->bi_parent_subvol)
- dst_inode_u->bi_parent_subvol = src_dir.subvol;
-
- src_inode_u->bi_dir = dst_dir_u->bi_inum;
- src_inode_u->bi_dir_offset = dst_offset;
-
- if (mode == BCH_RENAME_EXCHANGE) {
- dst_inode_u->bi_dir = src_dir_u->bi_inum;
- dst_inode_u->bi_dir_offset = src_offset;
- }
-
- if (mode == BCH_RENAME_OVERWRITE &&
- dst_inode_u->bi_dir == dst_dir_u->bi_inum &&
- dst_inode_u->bi_dir_offset == src_offset) {
- dst_inode_u->bi_dir = 0;
- dst_inode_u->bi_dir_offset = 0;
- }
-
- if (mode == BCH_RENAME_OVERWRITE) {
- if (S_ISDIR(src_inode_u->bi_mode) !=
- S_ISDIR(dst_inode_u->bi_mode)) {
- ret = -ENOTDIR;
- goto err;
- }
-
- if (S_ISDIR(dst_inode_u->bi_mode)) {
- ret = bch2_empty_dir_trans(trans, dst_inum);
- if (ret)
- goto err;
- }
- }
-
- if (bch2_reinherit_attrs(src_inode_u, dst_dir_u) &&
- S_ISDIR(src_inode_u->bi_mode)) {
- ret = -EXDEV;
- goto err;
- }
-
- if (mode == BCH_RENAME_EXCHANGE &&
- bch2_reinherit_attrs(dst_inode_u, src_dir_u) &&
- S_ISDIR(dst_inode_u->bi_mode)) {
- ret = -EXDEV;
- goto err;
- }
-
- if (is_subdir_for_nlink(src_inode_u)) {
- src_dir_u->bi_nlink--;
- dst_dir_u->bi_nlink++;
- }
-
- if (dst_inum.inum && is_subdir_for_nlink(dst_inode_u)) {
- dst_dir_u->bi_nlink--;
- src_dir_u->bi_nlink += mode == BCH_RENAME_EXCHANGE;
- }
-
- if (mode == BCH_RENAME_OVERWRITE)
- bch2_inode_nlink_dec(trans, dst_inode_u);
-
- src_dir_u->bi_mtime = now;
- src_dir_u->bi_ctime = now;
-
- if (src_dir.inum != dst_dir.inum) {
- dst_dir_u->bi_mtime = now;
- dst_dir_u->bi_ctime = now;
- }
-
- src_inode_u->bi_ctime = now;
-
- if (dst_inum.inum)
- dst_inode_u->bi_ctime = now;
-
- ret = bch2_inode_write(trans, &src_dir_iter, src_dir_u) ?:
- (src_dir.inum != dst_dir.inum
- ? bch2_inode_write(trans, &dst_dir_iter, dst_dir_u)
- : 0) ?:
- bch2_inode_write(trans, &src_inode_iter, src_inode_u) ?:
- (dst_inum.inum
- ? bch2_inode_write(trans, &dst_inode_iter, dst_inode_u)
- : 0);
-err:
- bch2_trans_iter_exit(trans, &dst_inode_iter);
- bch2_trans_iter_exit(trans, &src_inode_iter);
- bch2_trans_iter_exit(trans, &dst_dir_iter);
- bch2_trans_iter_exit(trans, &src_dir_iter);
- return ret;
-}
diff --git a/fs/bcachefs/fs-common.h b/fs/bcachefs/fs-common.h
deleted file mode 100644
index c934e807b380..000000000000
--- a/fs/bcachefs/fs-common.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FS_COMMON_H
-#define _BCACHEFS_FS_COMMON_H
-
-#include "dirent.h"
-
-struct posix_acl;
-
-#define BCH_CREATE_TMPFILE (1U << 0)
-#define BCH_CREATE_SUBVOL (1U << 1)
-#define BCH_CREATE_SNAPSHOT (1U << 2)
-#define BCH_CREATE_SNAPSHOT_RO (1U << 3)
-
-int bch2_create_trans(struct btree_trans *, subvol_inum,
- struct bch_inode_unpacked *,
- struct bch_inode_unpacked *,
- const struct qstr *,
- uid_t, gid_t, umode_t, dev_t,
- struct posix_acl *,
- struct posix_acl *,
- subvol_inum, unsigned);
-
-int bch2_link_trans(struct btree_trans *,
- subvol_inum, struct bch_inode_unpacked *,
- subvol_inum, struct bch_inode_unpacked *,
- const struct qstr *);
-
-int bch2_unlink_trans(struct btree_trans *, subvol_inum,
- struct bch_inode_unpacked *,
- struct bch_inode_unpacked *,
- const struct qstr *, bool);
-
-int bch2_rename_trans(struct btree_trans *,
- subvol_inum, struct bch_inode_unpacked *,
- subvol_inum, struct bch_inode_unpacked *,
- struct bch_inode_unpacked *,
- struct bch_inode_unpacked *,
- const struct qstr *,
- const struct qstr *,
- enum bch_rename_mode);
-
-bool bch2_reinherit_attrs(struct bch_inode_unpacked *,
- struct bch_inode_unpacked *);
-
-#endif /* _BCACHEFS_FS_COMMON_H */
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
deleted file mode 100644
index 95972809e76d..000000000000
--- a/fs/bcachefs/fs-io-buffered.c
+++ /dev/null
@@ -1,1081 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef NO_BCACHEFS_FS
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "bkey_buf.h"
-#include "fs-io.h"
-#include "fs-io-buffered.h"
-#include "fs-io-direct.h"
-#include "fs-io-pagecache.h"
-#include "io_read.h"
-#include "io_write.h"
-
-#include <linux/backing-dev.h>
-#include <linux/pagemap.h>
-#include <linux/writeback.h>
-
-static inline bool bio_full(struct bio *bio, unsigned len)
-{
- if (bio->bi_vcnt >= bio->bi_max_vecs)
- return true;
- if (bio->bi_iter.bi_size > UINT_MAX - len)
- return true;
- return false;
-}
-
-/* readpage(s): */
-
-static void bch2_readpages_end_io(struct bio *bio)
-{
- struct folio_iter fi;
-
- bio_for_each_folio_all(fi, bio)
- folio_end_read(fi.folio, bio->bi_status == BLK_STS_OK);
-
- bio_put(bio);
-}
-
-struct readpages_iter {
- struct address_space *mapping;
- unsigned idx;
- folios folios;
-};
-
-static int readpages_iter_init(struct readpages_iter *iter,
- struct readahead_control *ractl)
-{
- struct folio *folio;
-
- *iter = (struct readpages_iter) { ractl->mapping };
-
- while ((folio = __readahead_folio(ractl))) {
- if (!bch2_folio_create(folio, GFP_KERNEL) ||
- darray_push(&iter->folios, folio)) {
- bch2_folio_release(folio);
- ractl->_nr_pages += folio_nr_pages(folio);
- ractl->_index -= folio_nr_pages(folio);
- return iter->folios.nr ? 0 : -ENOMEM;
- }
-
- folio_put(folio);
- }
-
- return 0;
-}
-
-static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
-{
- if (iter->idx >= iter->folios.nr)
- return NULL;
- return iter->folios.data[iter->idx];
-}
-
-static inline void readpage_iter_advance(struct readpages_iter *iter)
-{
- iter->idx++;
-}
-
-static bool extent_partial_reads_expensive(struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- struct bch_extent_crc_unpacked crc;
- const union bch_extent_entry *i;
-
- bkey_for_each_crc(k.k, ptrs, crc, i)
- if (crc.csum_type || crc.compression_type)
- return true;
- return false;
-}
-
-static int readpage_bio_extend(struct btree_trans *trans,
- struct readpages_iter *iter,
- struct bio *bio,
- unsigned sectors_this_extent,
- bool get_more)
-{
- /* Don't hold btree locks while allocating memory: */
- bch2_trans_unlock(trans);
-
- while (bio_sectors(bio) < sectors_this_extent &&
- bio->bi_vcnt < bio->bi_max_vecs) {
- struct folio *folio = readpage_iter_peek(iter);
- int ret;
-
- if (folio) {
- readpage_iter_advance(iter);
- } else {
- pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
-
- if (!get_more)
- break;
-
- folio = xa_load(&iter->mapping->i_pages, folio_offset);
- if (folio && !xa_is_value(folio))
- break;
-
- folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
- if (!folio)
- break;
-
- if (!__bch2_folio_create(folio, GFP_KERNEL)) {
- folio_put(folio);
- break;
- }
-
- ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
- if (ret) {
- __bch2_folio_release(folio);
- folio_put(folio);
- break;
- }
-
- folio_put(folio);
- }
-
- BUG_ON(folio_sector(folio) != bio_end_sector(bio));
-
- BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
- }
-
- return bch2_trans_relock(trans);
-}
-
-static void bchfs_read(struct btree_trans *trans,
- struct bch_read_bio *rbio,
- subvol_inum inum,
- struct readpages_iter *readpages_iter)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_buf sk;
- int flags = BCH_READ_RETRY_IF_STALE|
- BCH_READ_MAY_PROMOTE;
- int ret = 0;
-
- rbio->c = c;
- rbio->start_time = local_clock();
- rbio->subvol = inum.subvol;
-
- bch2_bkey_buf_init(&sk);
- bch2_trans_begin(trans);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- POS(inum.inum, rbio->bio.bi_iter.bi_sector),
- BTREE_ITER_slots);
- while (1) {
- struct bkey_s_c k;
- unsigned bytes, sectors, offset_into_extent;
- enum btree_id data_btree = BTREE_ID_extents;
-
- bch2_trans_begin(trans);
-
- u32 snapshot;
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- bch2_btree_iter_set_snapshot(&iter, snapshot);
-
- bch2_btree_iter_set_pos(&iter,
- POS(inum.inum, rbio->bio.bi_iter.bi_sector));
-
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- offset_into_extent = iter.pos.offset -
- bkey_start_offset(k.k);
- sectors = k.k->size - offset_into_extent;
-
- bch2_bkey_buf_reassemble(&sk, c, k);
-
- ret = bch2_read_indirect_extent(trans, &data_btree,
- &offset_into_extent, &sk);
- if (ret)
- goto err;
-
- k = bkey_i_to_s_c(sk.k);
-
- sectors = min(sectors, k.k->size - offset_into_extent);
-
- if (readpages_iter) {
- ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
- extent_partial_reads_expensive(k));
- if (ret)
- goto err;
- }
-
- bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
- swap(rbio->bio.bi_iter.bi_size, bytes);
-
- if (rbio->bio.bi_iter.bi_size == bytes)
- flags |= BCH_READ_LAST_FRAGMENT;
-
- bch2_bio_page_state_set(&rbio->bio, k);
-
- bch2_read_extent(trans, rbio, iter.pos,
- data_btree, k, offset_into_extent, flags);
-
- if (flags & BCH_READ_LAST_FRAGMENT)
- break;
-
- swap(rbio->bio.bi_iter.bi_size, bytes);
- bio_advance(&rbio->bio, bytes);
-err:
- if (ret &&
- !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
-
- if (ret) {
- bch_err_inum_offset_ratelimited(c,
- iter.pos.inode,
- iter.pos.offset << 9,
- "read error %i from btree lookup", ret);
- rbio->bio.bi_status = BLK_STS_IOERR;
- bio_endio(&rbio->bio);
- }
-
- bch2_bkey_buf_exit(&sk, c);
-}
-
-void bch2_readahead(struct readahead_control *ractl)
-{
- struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts;
- struct folio *folio;
- struct readpages_iter readpages_iter;
-
- bch2_inode_opts_get(&opts, c, &inode->ei_inode);
-
- int ret = readpages_iter_init(&readpages_iter, ractl);
- if (ret)
- return;
-
- bch2_pagecache_add_get(inode);
-
- struct btree_trans *trans = bch2_trans_get(c);
- while ((folio = readpage_iter_peek(&readpages_iter))) {
- unsigned n = min_t(unsigned,
- readpages_iter.folios.nr -
- readpages_iter.idx,
- BIO_MAX_VECS);
- struct bch_read_bio *rbio =
- rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
- GFP_KERNEL, &c->bio_read),
- opts);
-
- readpage_iter_advance(&readpages_iter);
-
- rbio->bio.bi_iter.bi_sector = folio_sector(folio);
- rbio->bio.bi_end_io = bch2_readpages_end_io;
- BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
-
- bchfs_read(trans, rbio, inode_inum(inode),
- &readpages_iter);
- bch2_trans_unlock(trans);
- }
- bch2_trans_put(trans);
-
- bch2_pagecache_add_put(inode);
-
- darray_exit(&readpages_iter.folios);
-}
-
-static void bch2_read_single_folio_end_io(struct bio *bio)
-{
- complete(bio->bi_private);
-}
-
-int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
-{
- struct bch_inode_info *inode = to_bch_ei(mapping->host);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_read_bio *rbio;
- struct bch_io_opts opts;
- int ret;
- DECLARE_COMPLETION_ONSTACK(done);
-
- if (!bch2_folio_create(folio, GFP_KERNEL))
- return -ENOMEM;
-
- bch2_inode_opts_get(&opts, c, &inode->ei_inode);
-
- rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
- opts);
- rbio->bio.bi_private = &done;
- rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
-
- rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
- rbio->bio.bi_iter.bi_sector = folio_sector(folio);
- BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
-
- bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0));
- wait_for_completion(&done);
-
- ret = blk_status_to_errno(rbio->bio.bi_status);
- bio_put(&rbio->bio);
-
- if (ret < 0)
- return ret;
-
- folio_mark_uptodate(folio);
- return 0;
-}
-
-int bch2_read_folio(struct file *file, struct folio *folio)
-{
- int ret;
-
- ret = bch2_read_single_folio(folio, folio->mapping);
- folio_unlock(folio);
- return bch2_err_class(ret);
-}
-
-/* writepages: */
-
-struct bch_writepage_io {
- struct bch_inode_info *inode;
-
- /* must be last: */
- struct bch_write_op op;
-};
-
-struct bch_writepage_state {
- struct bch_writepage_io *io;
- struct bch_io_opts opts;
- struct bch_folio_sector *tmp;
- unsigned tmp_sectors;
-};
-
-static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
- struct bch_inode_info *inode)
-{
- struct bch_writepage_state ret = { 0 };
-
- bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
- return ret;
-}
-
-/*
- * Determine when a writepage io is full. We have to limit writepage bios to a
- * single page per bvec (i.e. 1MB with 4k pages) because that is the limit to
- * what the bounce path in bch2_write_extent() can handle. In theory we could
- * loosen this restriction for non-bounce I/O, but we don't have that context
- * here. Ideally, we can up this limit and make it configurable in the future
- * when the bounce path can be enhanced to accommodate larger source bios.
- */
-static inline bool bch_io_full(struct bch_writepage_io *io, unsigned len)
-{
- struct bio *bio = &io->op.wbio.bio;
- return bio_full(bio, len) ||
- (bio->bi_iter.bi_size + len > BIO_MAX_VECS * PAGE_SIZE);
-}
-
-static void bch2_writepage_io_done(struct bch_write_op *op)
-{
- struct bch_writepage_io *io =
- container_of(op, struct bch_writepage_io, op);
- struct bch_fs *c = io->op.c;
- struct bio *bio = &io->op.wbio.bio;
- struct folio_iter fi;
- unsigned i;
-
- if (io->op.error) {
- set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
-
- bio_for_each_folio_all(fi, bio) {
- struct bch_folio *s;
-
- mapping_set_error(fi.folio->mapping, -EIO);
-
- s = __bch2_folio(fi.folio);
- spin_lock(&s->lock);
- for (i = 0; i < folio_sectors(fi.folio); i++)
- s->s[i].nr_replicas = 0;
- spin_unlock(&s->lock);
- }
- }
-
- if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
- bio_for_each_folio_all(fi, bio) {
- struct bch_folio *s;
-
- s = __bch2_folio(fi.folio);
- spin_lock(&s->lock);
- for (i = 0; i < folio_sectors(fi.folio); i++)
- s->s[i].nr_replicas = 0;
- spin_unlock(&s->lock);
- }
- }
-
- /*
- * racing with fallocate can cause us to add fewer sectors than
- * expected - but we shouldn't add more sectors than expected:
- */
- WARN_ON_ONCE(io->op.i_sectors_delta > 0);
-
- /*
- * (error (due to going RO) halfway through a page can screw that up
- * slightly)
- * XXX wtf?
- BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
- */
-
- /*
- * The writeback flag is effectively our ref on the inode -
- * fixup i_blocks before calling folio_end_writeback:
- */
- bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
-
- bio_for_each_folio_all(fi, bio) {
- struct bch_folio *s = __bch2_folio(fi.folio);
-
- if (atomic_dec_and_test(&s->write_count))
- folio_end_writeback(fi.folio);
- }
-
- bio_put(&io->op.wbio.bio);
-}
-
-static void bch2_writepage_do_io(struct bch_writepage_state *w)
-{
- struct bch_writepage_io *io = w->io;
-
- w->io = NULL;
- closure_call(&io->op.cl, bch2_write, NULL, NULL);
-}
-
-/*
- * Get a bch_writepage_io and add @page to it - appending to an existing one if
- * possible, else allocating a new one:
- */
-static void bch2_writepage_io_alloc(struct bch_fs *c,
- struct writeback_control *wbc,
- struct bch_writepage_state *w,
- struct bch_inode_info *inode,
- u64 sector,
- unsigned nr_replicas)
-{
- struct bch_write_op *op;
-
- w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
- REQ_OP_WRITE,
- GFP_KERNEL,
- &c->writepage_bioset),
- struct bch_writepage_io, op.wbio.bio);
-
- w->io->inode = inode;
- op = &w->io->op;
- bch2_write_op_init(op, c, w->opts);
- op->target = w->opts.foreground_target;
- op->nr_replicas = nr_replicas;
- op->res.nr_replicas = nr_replicas;
- op->write_point = writepoint_hashed(inode->ei_last_dirtied);
- op->subvol = inode->ei_inum.subvol;
- op->pos = POS(inode->v.i_ino, sector);
- op->end_io = bch2_writepage_io_done;
- op->devs_need_flush = &inode->ei_devs_need_flush;
- op->wbio.bio.bi_iter.bi_sector = sector;
- op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
-}
-
-static int __bch2_writepage(struct folio *folio,
- struct writeback_control *wbc,
- void *data)
-{
- struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_writepage_state *w = data;
- struct bch_folio *s;
- unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
- loff_t i_size = i_size_read(&inode->v);
- int ret;
-
- EBUG_ON(!folio_test_uptodate(folio));
-
- /* Is the folio fully inside i_size? */
- if (folio_end_pos(folio) <= i_size)
- goto do_io;
-
- /* Is the folio fully outside i_size? (truncate in progress) */
- if (folio_pos(folio) >= i_size) {
- folio_unlock(folio);
- return 0;
- }
-
- /*
- * The folio straddles i_size. It must be zeroed out on each and every
- * writepage invocation because it may be mmapped. "A file is mapped
- * in multiples of the folio size. For a file that is not a multiple of
- * the folio size, the remaining memory is zeroed when mapped, and
- * writes to that region are not written out to the file."
- */
- folio_zero_segment(folio,
- i_size - folio_pos(folio),
- folio_size(folio));
-do_io:
- f_sectors = folio_sectors(folio);
- s = bch2_folio(folio);
-
- if (f_sectors > w->tmp_sectors) {
- kfree(w->tmp);
- w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), GFP_NOFS|__GFP_NOFAIL);
- w->tmp_sectors = f_sectors;
- }
-
- /*
- * Things get really hairy with errors during writeback:
- */
- ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
- BUG_ON(ret);
-
- /* Before unlocking the page, get copy of reservations: */
- spin_lock(&s->lock);
- memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
-
- for (i = 0; i < f_sectors; i++) {
- if (s->s[i].state < SECTOR_dirty)
- continue;
-
- nr_replicas_this_write =
- min_t(unsigned, nr_replicas_this_write,
- s->s[i].nr_replicas +
- s->s[i].replicas_reserved);
- }
-
- for (i = 0; i < f_sectors; i++) {
- if (s->s[i].state < SECTOR_dirty)
- continue;
-
- s->s[i].nr_replicas = w->opts.compression
- ? 0 : nr_replicas_this_write;
-
- s->s[i].replicas_reserved = 0;
- bch2_folio_sector_set(folio, s, i, SECTOR_allocated);
- }
- spin_unlock(&s->lock);
-
- BUG_ON(atomic_read(&s->write_count));
- atomic_set(&s->write_count, 1);
-
- BUG_ON(folio_test_writeback(folio));
- folio_start_writeback(folio);
-
- folio_unlock(folio);
-
- offset = 0;
- while (1) {
- unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
- u64 sector;
-
- while (offset < f_sectors &&
- w->tmp[offset].state < SECTOR_dirty)
- offset++;
-
- if (offset == f_sectors)
- break;
-
- while (offset + sectors < f_sectors &&
- w->tmp[offset + sectors].state >= SECTOR_dirty) {
- reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
- dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
- sectors++;
- }
- BUG_ON(!sectors);
-
- sector = folio_sector(folio) + offset;
-
- if (w->io &&
- (w->io->op.res.nr_replicas != nr_replicas_this_write ||
- bch_io_full(w->io, sectors << 9) ||
- bio_end_sector(&w->io->op.wbio.bio) != sector))
- bch2_writepage_do_io(w);
-
- if (!w->io)
- bch2_writepage_io_alloc(c, wbc, w, inode, sector,
- nr_replicas_this_write);
-
- atomic_inc(&s->write_count);
-
- BUG_ON(inode != w->io->inode);
- BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
- sectors << 9, offset << 9));
-
- /* Check for writing past i_size: */
- WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
- round_up(i_size, block_bytes(c)) &&
- !test_bit(BCH_FS_emergency_ro, &c->flags),
- "writing past i_size: %llu > %llu (unrounded %llu)\n",
- bio_end_sector(&w->io->op.wbio.bio) << 9,
- round_up(i_size, block_bytes(c)),
- i_size);
-
- w->io->op.res.sectors += reserved_sectors;
- w->io->op.i_sectors_delta -= dirty_sectors;
- w->io->op.new_i_size = i_size;
-
- offset += sectors;
- }
-
- if (atomic_dec_and_test(&s->write_count))
- folio_end_writeback(folio);
-
- return 0;
-}
-
-int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
-{
- struct bch_fs *c = mapping->host->i_sb->s_fs_info;
- struct bch_writepage_state w =
- bch_writepage_state_init(c, to_bch_ei(mapping->host));
- struct blk_plug plug;
- int ret;
-
- blk_start_plug(&plug);
- ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
- if (w.io)
- bch2_writepage_do_io(&w);
- blk_finish_plug(&plug);
- kfree(w.tmp);
- return bch2_err_class(ret);
-}
-
-/* buffered writes: */
-
-int bch2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
-{
- struct bch_inode_info *inode = to_bch_ei(mapping->host);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_folio_reservation *res;
- struct folio *folio;
- unsigned offset;
- int ret = -ENOMEM;
-
- res = kmalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- bch2_folio_reservation_init(c, inode, res);
- *fsdata = res;
-
- bch2_pagecache_add_get(inode);
-
- folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
- FGP_WRITEBEGIN | fgf_set_order(len),
- mapping_gfp_mask(mapping));
- if (IS_ERR_OR_NULL(folio))
- goto err_unlock;
-
- offset = pos - folio_pos(folio);
- len = min_t(size_t, len, folio_end_pos(folio) - pos);
-
- if (folio_test_uptodate(folio))
- goto out;
-
- /* If we're writing entire folio, don't need to read it in first: */
- if (!offset && len == folio_size(folio))
- goto out;
-
- if (!offset && pos + len >= inode->v.i_size) {
- folio_zero_segment(folio, len, folio_size(folio));
- flush_dcache_folio(folio);
- goto out;
- }
-
- if (folio_pos(folio) >= inode->v.i_size) {
- folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
- flush_dcache_folio(folio);
- goto out;
- }
-readpage:
- ret = bch2_read_single_folio(folio, mapping);
- if (ret)
- goto err;
-out:
- ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
- if (ret)
- goto err;
-
- ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
- if (ret) {
- if (!folio_test_uptodate(folio)) {
- /*
- * If the folio hasn't been read in, we won't know if we
- * actually need a reservation - we don't actually need
- * to read here, we just need to check if the folio is
- * fully backed by uncompressed data:
- */
- goto readpage;
- }
-
- goto err;
- }
-
- *foliop = folio;
- return 0;
-err:
- folio_unlock(folio);
- folio_put(folio);
-err_unlock:
- bch2_pagecache_add_put(inode);
- kfree(res);
- *fsdata = NULL;
- return bch2_err_class(ret);
-}
-
-int bch2_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
-{
- struct bch_inode_info *inode = to_bch_ei(mapping->host);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_folio_reservation *res = fsdata;
- unsigned offset = pos - folio_pos(folio);
-
- lockdep_assert_held(&inode->v.i_rwsem);
- BUG_ON(offset + copied > folio_size(folio));
-
- if (unlikely(copied < len && !folio_test_uptodate(folio))) {
- /*
- * The folio needs to be read in, but that would destroy
- * our partial write - simplest thing is to just force
- * userspace to redo the write:
- */
- folio_zero_range(folio, 0, folio_size(folio));
- flush_dcache_folio(folio);
- copied = 0;
- }
-
- spin_lock(&inode->v.i_lock);
- if (pos + copied > inode->v.i_size)
- i_size_write(&inode->v, pos + copied);
- spin_unlock(&inode->v.i_lock);
-
- if (copied) {
- if (!folio_test_uptodate(folio))
- folio_mark_uptodate(folio);
-
- bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
-
- inode->ei_last_dirtied = (unsigned long) current;
- }
-
- folio_unlock(folio);
- folio_put(folio);
- bch2_pagecache_add_put(inode);
-
- bch2_folio_reservation_put(c, inode, res);
- kfree(res);
-
- return copied;
-}
-
-static noinline void folios_trunc(folios *fs, struct folio **fi)
-{
- while (fs->data + fs->nr > fi) {
- struct folio *f = darray_pop(fs);
-
- folio_unlock(f);
- folio_put(f);
- }
-}
-
-static int __bch2_buffered_write(struct bch_inode_info *inode,
- struct address_space *mapping,
- struct iov_iter *iter,
- loff_t pos, unsigned len)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_folio_reservation res;
- folios fs;
- struct folio *f;
- unsigned copied = 0, f_offset, f_copied;
- u64 end = pos + len, f_pos, f_len;
- loff_t last_folio_pos = inode->v.i_size;
- int ret = 0;
-
- BUG_ON(!len);
-
- bch2_folio_reservation_init(c, inode, &res);
- darray_init(&fs);
-
- ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
- FGP_WRITEBEGIN | fgf_set_order(len),
- mapping_gfp_mask(mapping), &fs);
- if (ret)
- goto out;
-
- BUG_ON(!fs.nr);
-
- f = darray_first(fs);
- if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
- ret = bch2_read_single_folio(f, mapping);
- if (ret)
- goto out;
- }
-
- f = darray_last(fs);
- end = min(end, folio_end_pos(f));
- last_folio_pos = folio_pos(f);
- if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
- if (end >= inode->v.i_size) {
- folio_zero_range(f, 0, folio_size(f));
- } else {
- ret = bch2_read_single_folio(f, mapping);
- if (ret)
- goto out;
- }
- }
-
- ret = bch2_folio_set(c, inode_inum(inode), fs.data, fs.nr);
- if (ret)
- goto out;
-
- f_pos = pos;
- f_offset = pos - folio_pos(darray_first(fs));
- darray_for_each(fs, fi) {
- ssize_t f_reserved;
-
- f = *fi;
- f_len = min(end, folio_end_pos(f)) - f_pos;
- f_reserved = bch2_folio_reservation_get_partial(c, inode, f, &res, f_offset, f_len);
-
- if (unlikely(f_reserved != f_len)) {
- if (f_reserved < 0) {
- if (f == darray_first(fs)) {
- ret = f_reserved;
- goto out;
- }
-
- folios_trunc(&fs, fi);
- end = min(end, folio_end_pos(darray_last(fs)));
- } else {
- if (!folio_test_uptodate(f)) {
- ret = bch2_read_single_folio(f, mapping);
- if (ret)
- goto out;
- }
-
- folios_trunc(&fs, fi + 1);
- end = f_pos + f_reserved;
- }
-
- break;
- }
-
- f_pos = folio_end_pos(f);
- f_offset = 0;
- }
-
- if (mapping_writably_mapped(mapping))
- darray_for_each(fs, fi)
- flush_dcache_folio(*fi);
-
- f_pos = pos;
- f_offset = pos - folio_pos(darray_first(fs));
- darray_for_each(fs, fi) {
- f = *fi;
- f_len = min(end, folio_end_pos(f)) - f_pos;
- f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
- if (!f_copied) {
- folios_trunc(&fs, fi);
- break;
- }
-
- if (!folio_test_uptodate(f) &&
- f_copied != folio_size(f) &&
- pos + copied + f_copied < inode->v.i_size) {
- iov_iter_revert(iter, f_copied);
- folio_zero_range(f, 0, folio_size(f));
- folios_trunc(&fs, fi);
- break;
- }
-
- flush_dcache_folio(f);
- copied += f_copied;
-
- if (f_copied != f_len) {
- folios_trunc(&fs, fi + 1);
- break;
- }
-
- f_pos = folio_end_pos(f);
- f_offset = 0;
- }
-
- if (!copied)
- goto out;
-
- end = pos + copied;
-
- spin_lock(&inode->v.i_lock);
- if (end > inode->v.i_size)
- i_size_write(&inode->v, end);
- spin_unlock(&inode->v.i_lock);
-
- f_pos = pos;
- f_offset = pos - folio_pos(darray_first(fs));
- darray_for_each(fs, fi) {
- f = *fi;
- f_len = min(end, folio_end_pos(f)) - f_pos;
-
- if (!folio_test_uptodate(f))
- folio_mark_uptodate(f);
-
- bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
-
- f_pos = folio_end_pos(f);
- f_offset = 0;
- }
-
- inode->ei_last_dirtied = (unsigned long) current;
-out:
- darray_for_each(fs, fi) {
- folio_unlock(*fi);
- folio_put(*fi);
- }
-
- /*
- * If the last folio added to the mapping starts beyond current EOF, we
- * performed a short write but left around at least one post-EOF folio.
- * Clean up the mapping before we return.
- */
- if (last_folio_pos >= inode->v.i_size)
- truncate_pagecache(&inode->v, inode->v.i_size);
-
- darray_exit(&fs);
- bch2_folio_reservation_put(c, inode, &res);
-
- return copied ?: ret;
-}
-
-static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct bch_inode_info *inode = file_bch_inode(file);
- loff_t pos = iocb->ki_pos;
- ssize_t written = 0;
- int ret = 0;
-
- bch2_pagecache_add_get(inode);
-
- do {
- unsigned offset = pos & (PAGE_SIZE - 1);
- unsigned bytes = iov_iter_count(iter);
-again:
- /*
- * Bring in the user page that we will copy from _first_.
- * Otherwise there's a nasty deadlock on copying from the
- * same page as we're writing to, without it being marked
- * up-to-date.
- *
- * Not only is this an optimisation, but it is also required
- * to check that the address is actually valid, when atomic
- * usercopies are used, below.
- */
- if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
- bytes = min_t(unsigned long, iov_iter_count(iter),
- PAGE_SIZE - offset);
-
- if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
- ret = -EFAULT;
- break;
- }
- }
-
- if (unlikely(fatal_signal_pending(current))) {
- ret = -EINTR;
- break;
- }
-
- ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
- if (unlikely(ret < 0))
- break;
-
- cond_resched();
-
- if (unlikely(ret == 0)) {
- /*
- * If we were unable to copy any data at all, we must
- * fall back to a single segment length write.
- *
- * If we didn't fallback here, we could livelock
- * because not all segments in the iov can be copied at
- * once without a pagefault.
- */
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_single_seg_count(iter));
- goto again;
- }
- pos += ret;
- written += ret;
- ret = 0;
-
- balance_dirty_pages_ratelimited(mapping);
- } while (iov_iter_count(iter));
-
- bch2_pagecache_add_put(inode);
-
- return written ? written : ret;
-}
-
-ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *file = iocb->ki_filp;
- struct bch_inode_info *inode = file_bch_inode(file);
- ssize_t ret;
-
- if (iocb->ki_flags & IOCB_DIRECT) {
- ret = bch2_direct_write(iocb, from);
- goto out;
- }
-
- inode_lock(&inode->v);
-
- ret = generic_write_checks(iocb, from);
- if (ret <= 0)
- goto unlock;
-
- ret = file_remove_privs(file);
- if (ret)
- goto unlock;
-
- ret = file_update_time(file);
- if (ret)
- goto unlock;
-
- ret = bch2_buffered_write(iocb, from);
- if (likely(ret > 0))
- iocb->ki_pos += ret;
-unlock:
- inode_unlock(&inode->v);
-
- if (ret > 0)
- ret = generic_write_sync(iocb, ret);
-out:
- return bch2_err_class(ret);
-}
-
-void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
-{
- bioset_exit(&c->writepage_bioset);
-}
-
-int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
-{
- if (bioset_init(&c->writepage_bioset,
- 4, offsetof(struct bch_writepage_io, op.wbio.bio),
- BIOSET_NEED_BVECS))
- return -BCH_ERR_ENOMEM_writepage_bioset_init;
-
- return 0;
-}
-
-#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-io-buffered.h b/fs/bcachefs/fs-io-buffered.h
deleted file mode 100644
index 3207ebbb4ab4..000000000000
--- a/fs/bcachefs/fs-io-buffered.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FS_IO_BUFFERED_H
-#define _BCACHEFS_FS_IO_BUFFERED_H
-
-#ifndef NO_BCACHEFS_FS
-
-int bch2_read_single_folio(struct folio *, struct address_space *);
-int bch2_read_folio(struct file *, struct folio *);
-
-int bch2_writepages(struct address_space *, struct writeback_control *);
-void bch2_readahead(struct readahead_control *);
-
-int bch2_write_begin(struct file *, struct address_space *, loff_t pos,
- unsigned len, struct folio **, void **);
-int bch2_write_end(struct file *, struct address_space *, loff_t,
- unsigned len, unsigned copied, struct folio *, void *);
-
-ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);
-
-void bch2_fs_fs_io_buffered_exit(struct bch_fs *);
-int bch2_fs_fs_io_buffered_init(struct bch_fs *);
-#else
-static inline void bch2_fs_fs_io_buffered_exit(struct bch_fs *c) {}
-static inline int bch2_fs_fs_io_buffered_init(struct bch_fs *c) { return 0; }
-#endif
-
-#endif /* _BCACHEFS_FS_IO_BUFFERED_H */
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
deleted file mode 100644
index 6d3a05ae5da8..000000000000
--- a/fs/bcachefs/fs-io-direct.c
+++ /dev/null
@@ -1,690 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef NO_BCACHEFS_FS
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "fs.h"
-#include "fs-io.h"
-#include "fs-io-direct.h"
-#include "fs-io-pagecache.h"
-#include "io_read.h"
-#include "io_write.h"
-
-#include <linux/kthread.h>
-#include <linux/pagemap.h>
-#include <linux/prefetch.h>
-#include <linux/task_io_accounting_ops.h>
-
-/* O_DIRECT reads */
-
-struct dio_read {
- struct closure cl;
- struct kiocb *req;
- long ret;
- bool should_dirty;
- struct bch_read_bio rbio;
-};
-
-static void bio_check_or_release(struct bio *bio, bool check_dirty)
-{
- if (check_dirty) {
- bio_check_pages_dirty(bio);
- } else {
- bio_release_pages(bio, false);
- bio_put(bio);
- }
-}
-
-static CLOSURE_CALLBACK(bch2_dio_read_complete)
-{
- closure_type(dio, struct dio_read, cl);
-
- dio->req->ki_complete(dio->req, dio->ret);
- bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
-}
-
-static void bch2_direct_IO_read_endio(struct bio *bio)
-{
- struct dio_read *dio = bio->bi_private;
-
- if (bio->bi_status)
- dio->ret = blk_status_to_errno(bio->bi_status);
-
- closure_put(&dio->cl);
-}
-
-static void bch2_direct_IO_read_split_endio(struct bio *bio)
-{
- struct dio_read *dio = bio->bi_private;
- bool should_dirty = dio->should_dirty;
-
- bch2_direct_IO_read_endio(bio);
- bio_check_or_release(bio, should_dirty);
-}
-
-static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
-{
- struct file *file = req->ki_filp;
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts;
- struct dio_read *dio;
- struct bio *bio;
- loff_t offset = req->ki_pos;
- bool sync = is_sync_kiocb(req);
- size_t shorten;
- ssize_t ret;
-
- bch2_inode_opts_get(&opts, c, &inode->ei_inode);
-
- /* bios must be 512 byte aligned: */
- if ((offset|iter->count) & (SECTOR_SIZE - 1))
- return -EINVAL;
-
- ret = min_t(loff_t, iter->count,
- max_t(loff_t, 0, i_size_read(&inode->v) - offset));
-
- if (!ret)
- return ret;
-
- shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
- if (shorten >= iter->count)
- shorten = 0;
- iter->count -= shorten;
-
- bio = bio_alloc_bioset(NULL,
- bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
- REQ_OP_READ,
- GFP_KERNEL,
- &c->dio_read_bioset);
-
- bio->bi_end_io = bch2_direct_IO_read_endio;
-
- dio = container_of(bio, struct dio_read, rbio.bio);
- closure_init(&dio->cl, NULL);
-
- /*
- * this is a _really_ horrible hack just to avoid an atomic sub at the
- * end:
- */
- if (!sync) {
- set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
- atomic_set(&dio->cl.remaining,
- CLOSURE_REMAINING_INITIALIZER -
- CLOSURE_RUNNING +
- CLOSURE_DESTRUCTOR);
- } else {
- atomic_set(&dio->cl.remaining,
- CLOSURE_REMAINING_INITIALIZER + 1);
- dio->cl.closure_get_happened = true;
- }
-
- dio->req = req;
- dio->ret = ret;
- /*
- * This is one of the sketchier things I've encountered: we have to skip
- * the dirtying of requests that are internal from the kernel (i.e. from
- * loopback), because we'll deadlock on page_lock.
- */
- dio->should_dirty = iter_is_iovec(iter);
-
- goto start;
- while (iter->count) {
- bio = bio_alloc_bioset(NULL,
- bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
- REQ_OP_READ,
- GFP_KERNEL,
- &c->bio_read);
- bio->bi_end_io = bch2_direct_IO_read_split_endio;
-start:
- bio->bi_opf = REQ_OP_READ|REQ_SYNC;
- bio->bi_iter.bi_sector = offset >> 9;
- bio->bi_private = dio;
-
- ret = bio_iov_iter_get_pages(bio, iter);
- if (ret < 0) {
- /* XXX: fault inject this path */
- bio->bi_status = BLK_STS_RESOURCE;
- bio_endio(bio);
- break;
- }
-
- offset += bio->bi_iter.bi_size;
-
- if (dio->should_dirty)
- bio_set_pages_dirty(bio);
-
- if (iter->count)
- closure_get(&dio->cl);
-
- bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
- }
-
- iter->count += shorten;
-
- if (sync) {
- closure_sync(&dio->cl);
- closure_debug_destroy(&dio->cl);
- ret = dio->ret;
- bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
- return ret;
- } else {
- return -EIOCBQUEUED;
- }
-}
-
-ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct bch_inode_info *inode = file_bch_inode(file);
- struct address_space *mapping = file->f_mapping;
- size_t count = iov_iter_count(iter);
- ssize_t ret = 0;
-
- if (!count)
- return 0; /* skip atime */
-
- if (iocb->ki_flags & IOCB_DIRECT) {
- struct blk_plug plug;
-
- if (unlikely(mapping->nrpages)) {
- ret = filemap_write_and_wait_range(mapping,
- iocb->ki_pos,
- iocb->ki_pos + count - 1);
- if (ret < 0)
- goto out;
- }
-
- file_accessed(file);
-
- blk_start_plug(&plug);
- ret = bch2_direct_IO_read(iocb, iter);
- blk_finish_plug(&plug);
-
- if (ret >= 0)
- iocb->ki_pos += ret;
- } else {
- bch2_pagecache_add_get(inode);
- ret = filemap_read(iocb, iter, ret);
- bch2_pagecache_add_put(inode);
- }
-out:
- return bch2_err_class(ret);
-}
-
-/* O_DIRECT writes */
-
-struct dio_write {
- struct kiocb *req;
- struct address_space *mapping;
- struct bch_inode_info *inode;
- struct mm_struct *mm;
- const struct iovec *iov;
- unsigned loop:1,
- extending:1,
- sync:1,
- flush:1;
- struct quota_res quota_res;
- u64 written;
-
- struct iov_iter iter;
- struct iovec inline_vecs[2];
-
- /* must be last: */
- struct bch_write_op op;
-};
-
-static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
- u64 offset, u64 size,
- unsigned nr_replicas, bool compressed)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- u64 end = offset + size;
- u32 snapshot;
- bool ret = true;
- int err;
-retry:
- bch2_trans_begin(trans);
-
- err = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (err)
- goto err;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
- SPOS(inum.inum, offset, snapshot),
- BTREE_ITER_slots, k, err) {
- if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
- break;
-
- if (k.k->p.snapshot != snapshot ||
- nr_replicas > bch2_bkey_replicas(c, k) ||
- (!compressed && bch2_bkey_sectors_compressed(k))) {
- ret = false;
- break;
- }
- }
-
- offset = iter.pos.offset;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(err, BCH_ERR_transaction_restart))
- goto retry;
- bch2_trans_put(trans);
-
- return err ? false : ret;
-}
-
-static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
-{
- struct bch_fs *c = dio->op.c;
- struct bch_inode_info *inode = dio->inode;
- struct bio *bio = &dio->op.wbio.bio;
-
- return bch2_check_range_allocated(c, inode_inum(inode),
- dio->op.pos.offset, bio_sectors(bio),
- dio->op.opts.data_replicas,
- dio->op.opts.compression != 0);
-}
-
-static void bch2_dio_write_loop_async(struct bch_write_op *);
-static __always_inline long bch2_dio_write_done(struct dio_write *dio);
-
-/*
- * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
- * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
- * caller's stack, we're not guaranteed that it will live for the duration of
- * the IO:
- */
-static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
-{
- struct iovec *iov = dio->inline_vecs;
-
- /*
- * iov_iter has a single embedded iovec - nothing to do:
- */
- if (iter_is_ubuf(&dio->iter))
- return 0;
-
- /*
- * We don't currently handle non-iovec iov_iters here - return an error,
- * and we'll fall back to doing the IO synchronously:
- */
- if (!iter_is_iovec(&dio->iter))
- return -1;
-
- if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
- dio->iov = iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
- GFP_KERNEL);
- if (unlikely(!iov))
- return -ENOMEM;
- }
-
- memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
- dio->iter.__iov = iov;
- return 0;
-}
-
-static CLOSURE_CALLBACK(bch2_dio_write_flush_done)
-{
- closure_type(dio, struct dio_write, op.cl);
- struct bch_fs *c = dio->op.c;
-
- closure_debug_destroy(cl);
-
- dio->op.error = bch2_journal_error(&c->journal);
-
- bch2_dio_write_done(dio);
-}
-
-static noinline void bch2_dio_write_flush(struct dio_write *dio)
-{
- struct bch_fs *c = dio->op.c;
- struct bch_inode_unpacked inode;
- int ret;
-
- dio->flush = 0;
-
- closure_init(&dio->op.cl, NULL);
-
- if (!dio->op.error) {
- ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
- if (ret) {
- dio->op.error = ret;
- } else {
- bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq,
- &dio->op.cl);
- bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
- }
- }
-
- if (dio->sync) {
- closure_sync(&dio->op.cl);
- closure_debug_destroy(&dio->op.cl);
- } else {
- continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
- }
-}
-
-static __always_inline long bch2_dio_write_done(struct dio_write *dio)
-{
- struct bch_fs *c = dio->op.c;
- struct kiocb *req = dio->req;
- struct bch_inode_info *inode = dio->inode;
- bool sync = dio->sync;
- long ret;
-
- if (unlikely(dio->flush)) {
- bch2_dio_write_flush(dio);
- if (!sync)
- return -EIOCBQUEUED;
- }
-
- bch2_pagecache_block_put(inode);
-
- kfree(dio->iov);
-
- ret = dio->op.error ?: ((long) dio->written << 9);
- bio_put(&dio->op.wbio.bio);
-
- bch2_write_ref_put(c, BCH_WRITE_REF_dio_write);
-
- /* inode->i_dio_count is our ref on inode and thus bch_fs */
- inode_dio_end(&inode->v);
-
- if (ret < 0)
- ret = bch2_err_class(ret);
-
- if (!sync) {
- req->ki_complete(req, ret);
- ret = -EIOCBQUEUED;
- }
- return ret;
-}
-
-static __always_inline void bch2_dio_write_end(struct dio_write *dio)
-{
- struct bch_fs *c = dio->op.c;
- struct kiocb *req = dio->req;
- struct bch_inode_info *inode = dio->inode;
- struct bio *bio = &dio->op.wbio.bio;
-
- req->ki_pos += (u64) dio->op.written << 9;
- dio->written += dio->op.written;
-
- if (dio->extending) {
- spin_lock(&inode->v.i_lock);
- if (req->ki_pos > inode->v.i_size)
- i_size_write(&inode->v, req->ki_pos);
- spin_unlock(&inode->v.i_lock);
- }
-
- if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
- mutex_lock(&inode->ei_quota_lock);
- __bch2_i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
- __bch2_quota_reservation_put(c, inode, &dio->quota_res);
- mutex_unlock(&inode->ei_quota_lock);
- }
-
- bio_release_pages(bio, false);
-
- if (unlikely(dio->op.error))
- set_bit(EI_INODE_ERROR, &inode->ei_flags);
-}
-
-static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
-{
- struct bch_fs *c = dio->op.c;
- struct kiocb *req = dio->req;
- struct address_space *mapping = dio->mapping;
- struct bch_inode_info *inode = dio->inode;
- struct bch_io_opts opts;
- struct bio *bio = &dio->op.wbio.bio;
- unsigned unaligned, iter_count;
- bool sync = dio->sync, dropped_locks;
- long ret;
-
- bch2_inode_opts_get(&opts, c, &inode->ei_inode);
-
- while (1) {
- iter_count = dio->iter.count;
-
- EBUG_ON(current->faults_disabled_mapping);
- current->faults_disabled_mapping = mapping;
-
- ret = bio_iov_iter_get_pages(bio, &dio->iter);
-
- dropped_locks = fdm_dropped_locks();
-
- current->faults_disabled_mapping = NULL;
-
- /*
- * If the fault handler returned an error but also signalled
- * that it dropped & retook ei_pagecache_lock, we just need to
- * re-shoot down the page cache and retry:
- */
- if (dropped_locks && ret)
- ret = 0;
-
- if (unlikely(ret < 0))
- goto err;
-
- if (unlikely(dropped_locks)) {
- ret = bch2_write_invalidate_inode_pages_range(mapping,
- req->ki_pos,
- req->ki_pos + iter_count - 1);
- if (unlikely(ret))
- goto err;
-
- if (!bio->bi_iter.bi_size)
- continue;
- }
-
- unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
- bio->bi_iter.bi_size -= unaligned;
- iov_iter_revert(&dio->iter, unaligned);
-
- if (!bio->bi_iter.bi_size) {
- /*
- * bio_iov_iter_get_pages was only able to get <
- * blocksize worth of pages:
- */
- ret = -EFAULT;
- goto err;
- }
-
- bch2_write_op_init(&dio->op, c, opts);
- dio->op.end_io = sync
- ? NULL
- : bch2_dio_write_loop_async;
- dio->op.target = dio->op.opts.foreground_target;
- dio->op.write_point = writepoint_hashed((unsigned long) current);
- dio->op.nr_replicas = dio->op.opts.data_replicas;
- dio->op.subvol = inode->ei_inum.subvol;
- dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
- dio->op.devs_need_flush = &inode->ei_devs_need_flush;
-
- if (sync)
- dio->op.flags |= BCH_WRITE_SYNC;
- dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
-
- ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
- bio_sectors(bio), true);
- if (unlikely(ret))
- goto err;
-
- ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
- dio->op.opts.data_replicas, 0);
- if (unlikely(ret) &&
- !bch2_dio_write_check_allocated(dio))
- goto err;
-
- task_io_account_write(bio->bi_iter.bi_size);
-
- if (unlikely(dio->iter.count) &&
- !dio->sync &&
- !dio->loop &&
- bch2_dio_write_copy_iov(dio))
- dio->sync = sync = true;
-
- dio->loop = true;
- closure_call(&dio->op.cl, bch2_write, NULL, NULL);
-
- if (!sync)
- return -EIOCBQUEUED;
-
- bch2_dio_write_end(dio);
-
- if (likely(!dio->iter.count) || dio->op.error)
- break;
-
- bio_reset(bio, NULL, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
- }
-out:
- return bch2_dio_write_done(dio);
-err:
- dio->op.error = ret;
-
- bio_release_pages(bio, false);
-
- bch2_quota_reservation_put(c, inode, &dio->quota_res);
- goto out;
-}
-
-static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
-{
- struct mm_struct *mm = dio->mm;
-
- bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
-
- if (mm)
- kthread_use_mm(mm);
- bch2_dio_write_loop(dio);
- if (mm)
- kthread_unuse_mm(mm);
-}
-
-static void bch2_dio_write_loop_async(struct bch_write_op *op)
-{
- struct dio_write *dio = container_of(op, struct dio_write, op);
-
- bch2_dio_write_end(dio);
-
- if (likely(!dio->iter.count) || dio->op.error)
- bch2_dio_write_done(dio);
- else
- bch2_dio_write_continue(dio);
-}
-
-ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
-{
- struct file *file = req->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct dio_write *dio;
- struct bio *bio;
- bool locked = true, extending;
- ssize_t ret;
-
- prefetch(&c->opts);
- prefetch((void *) &c->opts + 64);
- prefetch(&inode->ei_inode);
- prefetch((void *) &inode->ei_inode + 64);
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_dio_write))
- return -EROFS;
-
- inode_lock(&inode->v);
-
- ret = generic_write_checks(req, iter);
- if (unlikely(ret <= 0))
- goto err_put_write_ref;
-
- ret = file_remove_privs(file);
- if (unlikely(ret))
- goto err_put_write_ref;
-
- ret = file_update_time(file);
- if (unlikely(ret))
- goto err_put_write_ref;
-
- if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1))) {
- ret = -EINVAL;
- goto err_put_write_ref;
- }
-
- inode_dio_begin(&inode->v);
- bch2_pagecache_block_get(inode);
-
- extending = req->ki_pos + iter->count > inode->v.i_size;
- if (!extending) {
- inode_unlock(&inode->v);
- locked = false;
- }
-
- bio = bio_alloc_bioset(NULL,
- bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
- REQ_OP_WRITE | REQ_SYNC | REQ_IDLE,
- GFP_KERNEL,
- &c->dio_write_bioset);
- dio = container_of(bio, struct dio_write, op.wbio.bio);
- dio->req = req;
- dio->mapping = mapping;
- dio->inode = inode;
- dio->mm = current->mm;
- dio->iov = NULL;
- dio->loop = false;
- dio->extending = extending;
- dio->sync = is_sync_kiocb(req) || extending;
- dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
- dio->quota_res.sectors = 0;
- dio->written = 0;
- dio->iter = *iter;
- dio->op.c = c;
-
- if (unlikely(mapping->nrpages)) {
- ret = bch2_write_invalidate_inode_pages_range(mapping,
- req->ki_pos,
- req->ki_pos + iter->count - 1);
- if (unlikely(ret))
- goto err_put_bio;
- }
-
- ret = bch2_dio_write_loop(dio);
-out:
- if (locked)
- inode_unlock(&inode->v);
- return ret;
-err_put_bio:
- bch2_pagecache_block_put(inode);
- bio_put(bio);
- inode_dio_end(&inode->v);
-err_put_write_ref:
- bch2_write_ref_put(c, BCH_WRITE_REF_dio_write);
- goto out;
-}
-
-void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
-{
- bioset_exit(&c->dio_write_bioset);
- bioset_exit(&c->dio_read_bioset);
-}
-
-int bch2_fs_fs_io_direct_init(struct bch_fs *c)
-{
- if (bioset_init(&c->dio_read_bioset,
- 4, offsetof(struct dio_read, rbio.bio),
- BIOSET_NEED_BVECS))
- return -BCH_ERR_ENOMEM_dio_read_bioset_init;
-
- if (bioset_init(&c->dio_write_bioset,
- 4, offsetof(struct dio_write, op.wbio.bio),
- BIOSET_NEED_BVECS))
- return -BCH_ERR_ENOMEM_dio_write_bioset_init;
-
- return 0;
-}
-
-#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-io-direct.h b/fs/bcachefs/fs-io-direct.h
deleted file mode 100644
index 814621ec7f81..000000000000
--- a/fs/bcachefs/fs-io-direct.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FS_IO_DIRECT_H
-#define _BCACHEFS_FS_IO_DIRECT_H
-
-#ifndef NO_BCACHEFS_FS
-ssize_t bch2_direct_write(struct kiocb *, struct iov_iter *);
-ssize_t bch2_read_iter(struct kiocb *, struct iov_iter *);
-
-void bch2_fs_fs_io_direct_exit(struct bch_fs *);
-int bch2_fs_fs_io_direct_init(struct bch_fs *);
-#else
-static inline void bch2_fs_fs_io_direct_exit(struct bch_fs *c) {}
-static inline int bch2_fs_fs_io_direct_init(struct bch_fs *c) { return 0; }
-#endif
-
-#endif /* _BCACHEFS_FS_IO_DIRECT_H */
diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c
deleted file mode 100644
index 1d4910ea0f1d..000000000000
--- a/fs/bcachefs/fs-io-pagecache.c
+++ /dev/null
@@ -1,823 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef NO_BCACHEFS_FS
-
-#include "bcachefs.h"
-#include "btree_iter.h"
-#include "extents.h"
-#include "fs-io.h"
-#include "fs-io-pagecache.h"
-#include "subvolume.h"
-
-#include <linux/pagevec.h>
-#include <linux/writeback.h>
-
-int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
- loff_t start, u64 end,
- fgf_t fgp_flags, gfp_t gfp,
- folios *fs)
-{
- struct folio *f;
- u64 pos = start;
- int ret = 0;
-
- while (pos < end) {
- if ((u64) pos >= (u64) start + (1ULL << 20))
- fgp_flags &= ~FGP_CREAT;
-
- ret = darray_make_room_gfp(fs, 1, gfp & GFP_KERNEL);
- if (ret)
- break;
-
- f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
- if (IS_ERR_OR_NULL(f))
- break;
-
- BUG_ON(fs->nr && folio_pos(f) != pos);
-
- pos = folio_end_pos(f);
- darray_push(fs, f);
- }
-
- if (!fs->nr && !ret && (fgp_flags & FGP_CREAT))
- ret = -ENOMEM;
-
- return fs->nr ? 0 : ret;
-}
-
-/* pagecache_block must be held */
-int bch2_write_invalidate_inode_pages_range(struct address_space *mapping,
- loff_t start, loff_t end)
-{
- int ret;
-
- /*
- * XXX: the way this is currently implemented, we can spin if a process
- * is continually redirtying a specific page
- */
- do {
- if (!mapping->nrpages)
- return 0;
-
- ret = filemap_write_and_wait_range(mapping, start, end);
- if (ret)
- break;
-
- if (!mapping->nrpages)
- return 0;
-
- ret = invalidate_inode_pages2_range(mapping,
- start >> PAGE_SHIFT,
- end >> PAGE_SHIFT);
- } while (ret == -EBUSY);
-
- return ret;
-}
-
-#if 0
-/* Useful for debug tracing: */
-static const char * const bch2_folio_sector_states[] = {
-#define x(n) #n,
- BCH_FOLIO_SECTOR_STATE()
-#undef x
- NULL
-};
-#endif
-
-static inline enum bch_folio_sector_state
-folio_sector_dirty(enum bch_folio_sector_state state)
-{
- switch (state) {
- case SECTOR_unallocated:
- return SECTOR_dirty;
- case SECTOR_reserved:
- return SECTOR_dirty_reserved;
- default:
- return state;
- }
-}
-
-static inline enum bch_folio_sector_state
-folio_sector_undirty(enum bch_folio_sector_state state)
-{
- switch (state) {
- case SECTOR_dirty:
- return SECTOR_unallocated;
- case SECTOR_dirty_reserved:
- return SECTOR_reserved;
- default:
- return state;
- }
-}
-
-static inline enum bch_folio_sector_state
-folio_sector_reserve(enum bch_folio_sector_state state)
-{
- switch (state) {
- case SECTOR_unallocated:
- return SECTOR_reserved;
- case SECTOR_dirty:
- return SECTOR_dirty_reserved;
- default:
- return state;
- }
-}
-
-/* for newly allocated folios: */
-struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
-{
- struct bch_folio *s;
-
- s = kzalloc(sizeof(*s) +
- sizeof(struct bch_folio_sector) *
- folio_sectors(folio), gfp);
- if (!s)
- return NULL;
-
- spin_lock_init(&s->lock);
- folio_attach_private(folio, s);
- return s;
-}
-
-struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
-{
- return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
-}
-
-static unsigned bkey_to_sector_state(struct bkey_s_c k)
-{
- if (bkey_extent_is_reservation(k))
- return SECTOR_reserved;
- if (bkey_extent_is_allocation(k.k))
- return SECTOR_allocated;
- return SECTOR_unallocated;
-}
-
-static void __bch2_folio_set(struct folio *folio,
- unsigned pg_offset, unsigned pg_len,
- unsigned nr_ptrs, unsigned state)
-{
- struct bch_folio *s = bch2_folio(folio);
- unsigned i, sectors = folio_sectors(folio);
-
- BUG_ON(pg_offset >= sectors);
- BUG_ON(pg_offset + pg_len > sectors);
-
- spin_lock(&s->lock);
-
- for (i = pg_offset; i < pg_offset + pg_len; i++) {
- s->s[i].nr_replicas = nr_ptrs;
- bch2_folio_sector_set(folio, s, i, state);
- }
-
- if (i == sectors)
- s->uptodate = true;
-
- spin_unlock(&s->lock);
-}
-
-/*
- * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
- * extents btree:
- */
-int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
- struct folio **fs, unsigned nr_folios)
-{
- u64 offset = folio_sector(fs[0]);
- bool need_set = false;
-
- for (unsigned folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
- struct bch_folio *s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
- if (!s)
- return -ENOMEM;
-
- need_set |= !s->uptodate;
- }
-
- if (!need_set)
- return 0;
-
- unsigned folio_idx = 0;
-
- return bch2_trans_run(c,
- for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
- POS(inum.inum, offset),
- POS(inum.inum, U64_MAX),
- inum.subvol, BTREE_ITER_slots, k, ({
- unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = bkey_to_sector_state(k);
-
- while (folio_idx < nr_folios) {
- struct folio *folio = fs[folio_idx];
- u64 folio_start = folio_sector(folio);
- u64 folio_end = folio_end_sector(folio);
- unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
- folio_start;
- unsigned folio_len = min(k.k->p.offset, folio_end) -
- folio_offset - folio_start;
-
- BUG_ON(k.k->p.offset < folio_start);
- BUG_ON(bkey_start_offset(k.k) > folio_end);
-
- if (!bch2_folio(folio)->uptodate)
- __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
-
- if (k.k->p.offset < folio_end)
- break;
- folio_idx++;
- }
-
- if (folio_idx == nr_folios)
- break;
- 0;
- })));
-}
-
-void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
-{
- struct bvec_iter iter;
- struct folio_vec fv;
- unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
- ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = bkey_to_sector_state(k);
-
- bio_for_each_folio(fv, bio, iter)
- __bch2_folio_set(fv.fv_folio,
- fv.fv_offset >> 9,
- fv.fv_len >> 9,
- nr_ptrs, state);
-}
-
-void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode,
- u64 start, u64 end)
-{
- pgoff_t index = start >> PAGE_SECTORS_SHIFT;
- pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
- struct folio_batch fbatch;
- unsigned i, j;
-
- if (end <= start)
- return;
-
- folio_batch_init(&fbatch);
-
- while (filemap_get_folios(inode->v.i_mapping,
- &index, end_index, &fbatch)) {
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
- struct folio *folio = fbatch.folios[i];
- u64 folio_start = folio_sector(folio);
- u64 folio_end = folio_end_sector(folio);
- unsigned folio_offset = max(start, folio_start) - folio_start;
- unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
- struct bch_folio *s;
-
- BUG_ON(end <= folio_start);
-
- folio_lock(folio);
- s = bch2_folio(folio);
-
- if (s) {
- spin_lock(&s->lock);
- for (j = folio_offset; j < folio_offset + folio_len; j++)
- s->s[j].nr_replicas = 0;
- spin_unlock(&s->lock);
- }
-
- folio_unlock(folio);
- }
- folio_batch_release(&fbatch);
- cond_resched();
- }
-}
-
-int bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
- u64 *start, u64 end,
- bool nonblocking)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- pgoff_t index = *start >> PAGE_SECTORS_SHIFT;
- pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
- struct folio_batch fbatch;
- s64 i_sectors_delta = 0;
- int ret = 0;
-
- if (end <= *start)
- return 0;
-
- folio_batch_init(&fbatch);
-
- while (filemap_get_folios(inode->v.i_mapping,
- &index, end_index, &fbatch)) {
- for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) {
- struct folio *folio = fbatch.folios[i];
-
- if (!nonblocking)
- folio_lock(folio);
- else if (!folio_trylock(folio)) {
- folio_batch_release(&fbatch);
- ret = -EAGAIN;
- break;
- }
-
- u64 folio_start = folio_sector(folio);
- u64 folio_end = folio_end_sector(folio);
-
- BUG_ON(end <= folio_start);
-
- *start = min(end, folio_end);
-
- struct bch_folio *s = bch2_folio(folio);
- if (s) {
- unsigned folio_offset = max(*start, folio_start) - folio_start;
- unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
-
- spin_lock(&s->lock);
- for (unsigned j = folio_offset; j < folio_offset + folio_len; j++) {
- i_sectors_delta -= s->s[j].state == SECTOR_dirty;
- bch2_folio_sector_set(folio, s, j,
- folio_sector_reserve(s->s[j].state));
- }
- spin_unlock(&s->lock);
- }
-
- folio_unlock(folio);
- }
- folio_batch_release(&fbatch);
- cond_resched();
- }
-
- bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
- return ret;
-}
-
-static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
- unsigned nr_replicas)
-{
- return max(0, (int) nr_replicas -
- s->nr_replicas -
- s->replicas_reserved);
-}
-
-int bch2_get_folio_disk_reservation(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct folio *folio, bool check_enospc)
-{
- struct bch_folio *s = bch2_folio_create(folio, 0);
- unsigned nr_replicas = inode_nr_replicas(c, inode);
- struct disk_reservation disk_res = { 0 };
- unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
- int ret;
-
- if (!s)
- return -ENOMEM;
-
- for (i = 0; i < sectors; i++)
- disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
-
- if (!disk_res_sectors)
- return 0;
-
- ret = bch2_disk_reservation_get(c, &disk_res,
- disk_res_sectors, 1,
- !check_enospc
- ? BCH_DISK_RESERVATION_NOFAIL
- : 0);
- if (unlikely(ret))
- return ret;
-
- for (i = 0; i < sectors; i++)
- s->s[i].replicas_reserved +=
- sectors_to_reserve(&s->s[i], nr_replicas);
-
- return 0;
-}
-
-void bch2_folio_reservation_put(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct bch2_folio_reservation *res)
-{
- bch2_disk_reservation_put(c, &res->disk);
- bch2_quota_reservation_put(c, inode, &res->quota);
-}
-
-static int __bch2_folio_reservation_get(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct folio *folio,
- struct bch2_folio_reservation *res,
- size_t offset, size_t len,
- bool partial)
-{
- struct bch_folio *s = bch2_folio_create(folio, 0);
- unsigned i, disk_sectors = 0, quota_sectors = 0;
- struct disk_reservation disk_res = {};
- size_t reserved = len;
- int ret;
-
- if (!s)
- return -ENOMEM;
-
- BUG_ON(!s->uptodate);
-
- for (i = round_down(offset, block_bytes(c)) >> 9;
- i < round_up(offset + len, block_bytes(c)) >> 9;
- i++) {
- disk_sectors += sectors_to_reserve(&s->s[i], res->disk.nr_replicas);
- quota_sectors += s->s[i].state == SECTOR_unallocated;
- }
-
- if (disk_sectors) {
- ret = bch2_disk_reservation_add(c, &disk_res, disk_sectors,
- partial ? BCH_DISK_RESERVATION_PARTIAL : 0);
- if (unlikely(ret))
- return ret;
-
- if (unlikely(disk_res.sectors != disk_sectors)) {
- disk_sectors = quota_sectors = 0;
-
- for (i = round_down(offset, block_bytes(c)) >> 9;
- i < round_up(offset + len, block_bytes(c)) >> 9;
- i++) {
- disk_sectors += sectors_to_reserve(&s->s[i], res->disk.nr_replicas);
- if (disk_sectors > disk_res.sectors) {
- /*
- * Make sure to get a reservation that's
- * aligned to the filesystem blocksize:
- */
- unsigned reserved_offset = round_down(i << 9, block_bytes(c));
- reserved = clamp(reserved_offset, offset, offset + len) - offset;
-
- if (!reserved) {
- bch2_disk_reservation_put(c, &disk_res);
- return -BCH_ERR_ENOSPC_disk_reservation;
- }
- break;
- }
- quota_sectors += s->s[i].state == SECTOR_unallocated;
- }
- }
- }
-
- if (quota_sectors) {
- ret = bch2_quota_reservation_add(c, inode, &res->quota, quota_sectors, true);
- if (unlikely(ret)) {
- bch2_disk_reservation_put(c, &disk_res);
- return ret;
- }
- }
-
- res->disk.sectors += disk_res.sectors;
- return partial ? reserved : 0;
-}
-
-int bch2_folio_reservation_get(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct folio *folio,
- struct bch2_folio_reservation *res,
- size_t offset, size_t len)
-{
- return __bch2_folio_reservation_get(c, inode, folio, res, offset, len, false);
-}
-
-ssize_t bch2_folio_reservation_get_partial(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct folio *folio,
- struct bch2_folio_reservation *res,
- size_t offset, size_t len)
-{
- return __bch2_folio_reservation_get(c, inode, folio, res, offset, len, true);
-}
-
-static void bch2_clear_folio_bits(struct folio *folio)
-{
- struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_folio *s = bch2_folio(folio);
- struct disk_reservation disk_res = { 0 };
- int i, sectors = folio_sectors(folio), dirty_sectors = 0;
-
- if (!s)
- return;
-
- EBUG_ON(!folio_test_locked(folio));
- EBUG_ON(folio_test_writeback(folio));
-
- for (i = 0; i < sectors; i++) {
- disk_res.sectors += s->s[i].replicas_reserved;
- s->s[i].replicas_reserved = 0;
-
- dirty_sectors -= s->s[i].state == SECTOR_dirty;
- bch2_folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
- }
-
- bch2_disk_reservation_put(c, &disk_res);
-
- bch2_i_sectors_acct(c, inode, NULL, dirty_sectors);
-
- bch2_folio_release(folio);
-}
-
-void bch2_set_folio_dirty(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct folio *folio,
- struct bch2_folio_reservation *res,
- unsigned offset, unsigned len)
-{
- struct bch_folio *s = bch2_folio(folio);
- unsigned i, dirty_sectors = 0;
-
- WARN_ON((u64) folio_pos(folio) + offset + len >
- round_up((u64) i_size_read(&inode->v), block_bytes(c)));
-
- BUG_ON(!s->uptodate);
-
- spin_lock(&s->lock);
-
- for (i = round_down(offset, block_bytes(c)) >> 9;
- i < round_up(offset + len, block_bytes(c)) >> 9;
- i++) {
- unsigned sectors = sectors_to_reserve(&s->s[i],
- res->disk.nr_replicas);
-
- /*
- * This can happen if we race with the error path in
- * bch2_writepage_io_done():
- */
- sectors = min_t(unsigned, sectors, res->disk.sectors);
-
- s->s[i].replicas_reserved += sectors;
- res->disk.sectors -= sectors;
-
- dirty_sectors += s->s[i].state == SECTOR_unallocated;
-
- bch2_folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
- }
-
- spin_unlock(&s->lock);
-
- bch2_i_sectors_acct(c, inode, &res->quota, dirty_sectors);
-
- if (!folio_test_dirty(folio))
- filemap_dirty_folio(inode->v.i_mapping, folio);
-}
-
-vm_fault_t bch2_page_fault(struct vm_fault *vmf)
-{
- struct file *file = vmf->vma->vm_file;
- struct address_space *mapping = file->f_mapping;
- struct address_space *fdm = faults_disabled_mapping();
- struct bch_inode_info *inode = file_bch_inode(file);
- vm_fault_t ret;
-
- if (fdm == mapping)
- return VM_FAULT_SIGBUS;
-
- /* Lock ordering: */
- if (fdm > mapping) {
- struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
-
- if (bch2_pagecache_add_tryget(inode))
- goto got_lock;
-
- bch2_pagecache_block_put(fdm_host);
-
- bch2_pagecache_add_get(inode);
- bch2_pagecache_add_put(inode);
-
- bch2_pagecache_block_get(fdm_host);
-
- /* Signal that lock has been dropped: */
- set_fdm_dropped_locks();
- return VM_FAULT_SIGBUS;
- }
-
- bch2_pagecache_add_get(inode);
-got_lock:
- ret = filemap_fault(vmf);
- bch2_pagecache_add_put(inode);
-
- return ret;
-}
-
-vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
-{
- struct folio *folio = page_folio(vmf->page);
- struct file *file = vmf->vma->vm_file;
- struct bch_inode_info *inode = file_bch_inode(file);
- struct address_space *mapping = file->f_mapping;
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_folio_reservation res;
- unsigned len;
- loff_t isize;
- vm_fault_t ret;
-
- bch2_folio_reservation_init(c, inode, &res);
-
- sb_start_pagefault(inode->v.i_sb);
- file_update_time(file);
-
- /*
- * Not strictly necessary, but helps avoid dio writes livelocking in
- * bch2_write_invalidate_inode_pages_range() - can drop this if/when we get
- * a bch2_write_invalidate_inode_pages_range() that works without dropping
- * page lock before invalidating page
- */
- bch2_pagecache_add_get(inode);
-
- folio_lock(folio);
- isize = i_size_read(&inode->v);
-
- if (folio->mapping != mapping || folio_pos(folio) >= isize) {
- folio_unlock(folio);
- ret = VM_FAULT_NOPAGE;
- goto out;
- }
-
- len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
-
- if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
- bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
- folio_unlock(folio);
- ret = VM_FAULT_SIGBUS;
- goto out;
- }
-
- bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
- bch2_folio_reservation_put(c, inode, &res);
-
- folio_wait_stable(folio);
- ret = VM_FAULT_LOCKED;
-out:
- bch2_pagecache_add_put(inode);
- sb_end_pagefault(inode->v.i_sb);
-
- return ret;
-}
-
-void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
-{
- if (offset || length < folio_size(folio))
- return;
-
- bch2_clear_folio_bits(folio);
-}
-
-bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
-{
- if (folio_test_dirty(folio) || folio_test_writeback(folio))
- return false;
-
- bch2_clear_folio_bits(folio);
- return true;
-}
-
-/* fseek: */
-
-static int folio_data_offset(struct folio *folio, loff_t pos,
- unsigned min_replicas)
-{
- struct bch_folio *s = bch2_folio(folio);
- unsigned i, sectors = folio_sectors(folio);
-
- if (s)
- for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
- if (s->s[i].state >= SECTOR_dirty &&
- s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
- return i << SECTOR_SHIFT;
-
- return -1;
-}
-
-loff_t bch2_seek_pagecache_data(struct inode *vinode,
- loff_t start_offset,
- loff_t end_offset,
- unsigned min_replicas,
- bool nonblock)
-{
- struct folio_batch fbatch;
- pgoff_t start_index = start_offset >> PAGE_SHIFT;
- pgoff_t end_index = end_offset >> PAGE_SHIFT;
- pgoff_t index = start_index;
- unsigned i;
- loff_t ret;
- int offset;
-
- folio_batch_init(&fbatch);
-
- while (filemap_get_folios(vinode->i_mapping,
- &index, end_index, &fbatch)) {
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
- struct folio *folio = fbatch.folios[i];
-
- if (!nonblock) {
- folio_lock(folio);
- } else if (!folio_trylock(folio)) {
- folio_batch_release(&fbatch);
- return -EAGAIN;
- }
-
- offset = folio_data_offset(folio,
- max(folio_pos(folio), start_offset),
- min_replicas);
- if (offset >= 0) {
- ret = clamp(folio_pos(folio) + offset,
- start_offset, end_offset);
- folio_unlock(folio);
- folio_batch_release(&fbatch);
- return ret;
- }
- folio_unlock(folio);
- }
- folio_batch_release(&fbatch);
- cond_resched();
- }
-
- return end_offset;
-}
-
-/*
- * Search for a hole in a folio.
- *
- * The filemap layer returns -ENOENT if no folio exists, so reuse the same error
- * code to indicate a pagecache hole exists at the returned offset. Otherwise
- * return 0 if the folio is filled with data, or an error code. This function
- * can return -EAGAIN if nonblock is specified.
- */
-static int folio_hole_offset(struct address_space *mapping, loff_t *offset,
- unsigned min_replicas, bool nonblock)
-{
- struct folio *folio;
- struct bch_folio *s;
- unsigned i, sectors;
- int ret = -ENOENT;
-
- folio = __filemap_get_folio(mapping, *offset >> PAGE_SHIFT,
- FGP_LOCK|(nonblock ? FGP_NOWAIT : 0), 0);
- if (IS_ERR(folio))
- return PTR_ERR(folio);
-
- s = bch2_folio(folio);
- if (!s)
- goto unlock;
-
- sectors = folio_sectors(folio);
- for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
- if (s->s[i].state < SECTOR_dirty ||
- s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
- *offset = max(*offset,
- folio_pos(folio) + (i << SECTOR_SHIFT));
- goto unlock;
- }
-
- *offset = folio_end_pos(folio);
- ret = 0;
-unlock:
- folio_unlock(folio);
- folio_put(folio);
- return ret;
-}
-
-loff_t bch2_seek_pagecache_hole(struct inode *vinode,
- loff_t start_offset,
- loff_t end_offset,
- unsigned min_replicas,
- bool nonblock)
-{
- struct address_space *mapping = vinode->i_mapping;
- loff_t offset = start_offset;
- loff_t ret = 0;
-
- while (!ret && offset < end_offset)
- ret = folio_hole_offset(mapping, &offset, min_replicas, nonblock);
-
- if (ret && ret != -ENOENT)
- return ret;
- return min(offset, end_offset);
-}
-
-int bch2_clamp_data_hole(struct inode *inode,
- u64 *hole_start,
- u64 *hole_end,
- unsigned min_replicas,
- bool nonblock)
-{
- loff_t ret;
-
- ret = bch2_seek_pagecache_hole(inode,
- *hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
- if (ret < 0)
- return ret;
-
- *hole_start = ret;
-
- if (*hole_start == *hole_end)
- return 0;
-
- ret = bch2_seek_pagecache_data(inode,
- *hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
- if (ret < 0)
- return ret;
-
- *hole_end = ret;
- return 0;
-}
-
-#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-io-pagecache.h b/fs/bcachefs/fs-io-pagecache.h
deleted file mode 100644
index fad911cf5068..000000000000
--- a/fs/bcachefs/fs-io-pagecache.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FS_IO_PAGECACHE_H
-#define _BCACHEFS_FS_IO_PAGECACHE_H
-
-#include <linux/pagemap.h>
-
-typedef DARRAY(struct folio *) folios;
-
-int bch2_filemap_get_contig_folios_d(struct address_space *, loff_t,
- u64, fgf_t, gfp_t, folios *);
-int bch2_write_invalidate_inode_pages_range(struct address_space *, loff_t, loff_t);
-
-/*
- * Use u64 for the end pos and sector helpers because if the folio covers the
- * max supported range of the mapping, the start offset of the next folio
- * overflows loff_t. This breaks much of the range based processing in the
- * buffered write path.
- */
-static inline u64 folio_end_pos(struct folio *folio)
-{
- return folio_pos(folio) + folio_size(folio);
-}
-
-static inline size_t folio_sectors(struct folio *folio)
-{
- return PAGE_SECTORS << folio_order(folio);
-}
-
-static inline loff_t folio_sector(struct folio *folio)
-{
- return folio_pos(folio) >> 9;
-}
-
-static inline u64 folio_end_sector(struct folio *folio)
-{
- return folio_end_pos(folio) >> 9;
-}
-
-#define BCH_FOLIO_SECTOR_STATE() \
- x(unallocated) \
- x(reserved) \
- x(dirty) \
- x(dirty_reserved) \
- x(allocated)
-
-enum bch_folio_sector_state {
-#define x(n) SECTOR_##n,
- BCH_FOLIO_SECTOR_STATE()
-#undef x
-};
-
-struct bch_folio_sector {
- /* Uncompressed, fully allocated replicas (or on disk reservation): */
- u8 nr_replicas:4,
- /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
- replicas_reserved:4;
- u8 state;
-};
-
-struct bch_folio {
- spinlock_t lock;
- atomic_t write_count;
- /*
- * Is the sector state up to date with the btree?
- * (Not the data itself)
- */
- bool uptodate;
- struct bch_folio_sector s[];
-};
-
-/* Helper for when we need to add debug instrumentation: */
-static inline void bch2_folio_sector_set(struct folio *folio,
- struct bch_folio *s,
- unsigned i, unsigned n)
-{
- s->s[i].state = n;
-}
-
-/* file offset (to folio offset) to bch_folio_sector index */
-static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
-{
- u64 f_offset = pos - folio_pos(folio);
-
- BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
- return f_offset >> SECTOR_SHIFT;
-}
-
-/* for newly allocated folios: */
-static inline void __bch2_folio_release(struct folio *folio)
-{
- kfree(folio_detach_private(folio));
-}
-
-static inline void bch2_folio_release(struct folio *folio)
-{
- EBUG_ON(!folio_test_locked(folio));
- __bch2_folio_release(folio);
-}
-
-static inline struct bch_folio *__bch2_folio(struct folio *folio)
-{
- return folio_get_private(folio);
-}
-
-static inline struct bch_folio *bch2_folio(struct folio *folio)
-{
- EBUG_ON(!folio_test_locked(folio));
-
- return __bch2_folio(folio);
-}
-
-struct bch_folio *__bch2_folio_create(struct folio *, gfp_t);
-struct bch_folio *bch2_folio_create(struct folio *, gfp_t);
-
-struct bch2_folio_reservation {
- struct disk_reservation disk;
- struct quota_res quota;
-};
-
-static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
-{
- /* XXX: this should not be open coded */
- return inode->ei_inode.bi_data_replicas
- ? inode->ei_inode.bi_data_replicas - 1
- : c->opts.data_replicas;
-}
-
-static inline void bch2_folio_reservation_init(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct bch2_folio_reservation *res)
-{
- memset(res, 0, sizeof(*res));
-
- res->disk.nr_replicas = inode_nr_replicas(c, inode);
-}
-
-int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
-void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
-
-void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
-int bch2_mark_pagecache_reserved(struct bch_inode_info *, u64 *, u64, bool);
-
-int bch2_get_folio_disk_reservation(struct bch_fs *,
- struct bch_inode_info *,
- struct folio *, bool);
-
-void bch2_folio_reservation_put(struct bch_fs *,
- struct bch_inode_info *,
- struct bch2_folio_reservation *);
-int bch2_folio_reservation_get(struct bch_fs *,
- struct bch_inode_info *,
- struct folio *,
- struct bch2_folio_reservation *,
- size_t, size_t);
-ssize_t bch2_folio_reservation_get_partial(struct bch_fs *,
- struct bch_inode_info *,
- struct folio *,
- struct bch2_folio_reservation *,
- size_t, size_t);
-
-void bch2_set_folio_dirty(struct bch_fs *,
- struct bch_inode_info *,
- struct folio *,
- struct bch2_folio_reservation *,
- unsigned, unsigned);
-
-vm_fault_t bch2_page_fault(struct vm_fault *);
-vm_fault_t bch2_page_mkwrite(struct vm_fault *);
-void bch2_invalidate_folio(struct folio *, size_t, size_t);
-bool bch2_release_folio(struct folio *, gfp_t);
-
-loff_t bch2_seek_pagecache_data(struct inode *, loff_t, loff_t, unsigned, bool);
-loff_t bch2_seek_pagecache_hole(struct inode *, loff_t, loff_t, unsigned, bool);
-int bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned, bool);
-
-#endif /* _BCACHEFS_FS_IO_PAGECACHE_H */
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
deleted file mode 100644
index 2456c41b215e..000000000000
--- a/fs/bcachefs/fs-io.c
+++ /dev/null
@@ -1,1028 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef NO_BCACHEFS_FS
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "bkey_buf.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "clock.h"
-#include "error.h"
-#include "extents.h"
-#include "extent_update.h"
-#include "fs.h"
-#include "fs-io.h"
-#include "fs-io-buffered.h"
-#include "fs-io-pagecache.h"
-#include "fsck.h"
-#include "inode.h"
-#include "journal.h"
-#include "io_misc.h"
-#include "keylist.h"
-#include "quota.h"
-#include "reflink.h"
-#include "trace.h"
-
-#include <linux/aio.h>
-#include <linux/backing-dev.h>
-#include <linux/falloc.h>
-#include <linux/migrate.h>
-#include <linux/mmu_context.h>
-#include <linux/pagevec.h>
-#include <linux/rmap.h>
-#include <linux/sched/signal.h>
-#include <linux/task_io_accounting_ops.h>
-#include <linux/uio.h>
-
-#include <trace/events/writeback.h>
-
-struct nocow_flush {
- struct closure *cl;
- struct bch_dev *ca;
- struct bio bio;
-};
-
-static void nocow_flush_endio(struct bio *_bio)
-{
-
- struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
-
- closure_put(bio->cl);
- percpu_ref_put(&bio->ca->io_ref);
- bio_put(&bio->bio);
-}
-
-void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct closure *cl)
-{
- struct nocow_flush *bio;
- struct bch_dev *ca;
- struct bch_devs_mask devs;
- unsigned dev;
-
- dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
- if (dev == BCH_SB_MEMBERS_MAX)
- return;
-
- devs = inode->ei_devs_need_flush;
- memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
-
- for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
- rcu_read_lock();
- ca = rcu_dereference(c->devs[dev]);
- if (ca && !percpu_ref_tryget(&ca->io_ref))
- ca = NULL;
- rcu_read_unlock();
-
- if (!ca)
- continue;
-
- bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
- REQ_OP_WRITE|REQ_PREFLUSH,
- GFP_KERNEL,
- &c->nocow_flush_bioset),
- struct nocow_flush, bio);
- bio->cl = cl;
- bio->ca = ca;
- bio->bio.bi_end_io = nocow_flush_endio;
- closure_bio_submit(&bio->bio, cl);
- }
-}
-
-static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
- struct bch_inode_info *inode)
-{
- struct closure cl;
-
- closure_init_stack(&cl);
- bch2_inode_flush_nocow_writes_async(c, inode, &cl);
- closure_sync(&cl);
-
- return 0;
-}
-
-/* i_size updates: */
-
-struct inode_new_size {
- loff_t new_size;
- u64 now;
- unsigned fields;
-};
-
-static int inode_set_size(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct inode_new_size *s = p;
-
- bi->bi_size = s->new_size;
- if (s->fields & ATTR_ATIME)
- bi->bi_atime = s->now;
- if (s->fields & ATTR_MTIME)
- bi->bi_mtime = s->now;
- if (s->fields & ATTR_CTIME)
- bi->bi_ctime = s->now;
-
- return 0;
-}
-
-int __must_check bch2_write_inode_size(struct bch_fs *c,
- struct bch_inode_info *inode,
- loff_t new_size, unsigned fields)
-{
- struct inode_new_size s = {
- .new_size = new_size,
- .now = bch2_current_time(c),
- .fields = fields,
- };
-
- return bch2_write_inode(c, inode, inode_set_size, &s, fields);
-}
-
-void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
- struct quota_res *quota_res, s64 sectors)
-{
- bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
- "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
- inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
- inode->ei_inode.bi_sectors);
- inode->v.i_blocks += sectors;
-
-#ifdef CONFIG_BCACHEFS_QUOTA
- if (quota_res &&
- !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
- sectors > 0) {
- BUG_ON(sectors > quota_res->sectors);
- BUG_ON(sectors > inode->ei_quota_reserved);
-
- quota_res->sectors -= sectors;
- inode->ei_quota_reserved -= sectors;
- } else {
- bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
- }
-#endif
-}
-
-/* fsync: */
-
-/*
- * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
- * insert trigger: look up the btree inode instead
- */
-static int bch2_flush_inode(struct bch_fs *c,
- struct bch_inode_info *inode)
-{
- if (c->opts.journal_flush_disabled)
- return 0;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync))
- return -EROFS;
-
- struct bch_inode_unpacked u;
- int ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u) ?:
- bch2_journal_flush_seq(&c->journal, u.bi_journal_seq, TASK_INTERRUPTIBLE) ?:
- bch2_inode_flush_nocow_writes(c, inode);
- bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
- return ret;
-}
-
-int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
-{
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- int ret, err;
-
- trace_bch2_fsync(file, datasync);
-
- ret = file_write_and_wait_range(file, start, end);
- if (ret)
- goto out;
- ret = sync_inode_metadata(&inode->v, 1);
- if (ret)
- goto out;
- ret = bch2_flush_inode(c, inode);
-out:
- ret = bch2_err_class(ret);
- if (ret == -EROFS)
- ret = -EIO;
-
- err = file_check_and_advance_wb_err(file);
- if (!ret)
- ret = err;
-
- return ret;
-}
-
-/* truncate: */
-
-static inline int range_has_data(struct bch_fs *c, u32 subvol,
- struct bpos start,
- struct bpos end)
-{
- return bch2_trans_run(c,
- for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, start, end,
- subvol, 0, k, ({
- bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k);
- })));
-}
-
-static int __bch2_truncate_folio(struct bch_inode_info *inode,
- pgoff_t index, loff_t start, loff_t end)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct address_space *mapping = inode->v.i_mapping;
- struct bch_folio *s;
- unsigned start_offset;
- unsigned end_offset;
- unsigned i;
- struct folio *folio;
- s64 i_sectors_delta = 0;
- int ret = 0;
- u64 end_pos;
-
- folio = filemap_lock_folio(mapping, index);
- if (IS_ERR_OR_NULL(folio)) {
- /*
- * XXX: we're doing two index lookups when we end up reading the
- * folio
- */
- ret = range_has_data(c, inode->ei_inum.subvol,
- POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
- POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
- if (ret <= 0)
- return ret;
-
- folio = __filemap_get_folio(mapping, index,
- FGP_LOCK|FGP_CREAT, GFP_KERNEL);
- if (IS_ERR_OR_NULL(folio)) {
- ret = -ENOMEM;
- goto out;
- }
- }
-
- BUG_ON(start >= folio_end_pos(folio));
- BUG_ON(end <= folio_pos(folio));
-
- start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
- end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
-
- /* Folio boundary? Nothing to do */
- if (start_offset == 0 &&
- end_offset == folio_size(folio)) {
- ret = 0;
- goto unlock;
- }
-
- s = bch2_folio_create(folio, 0);
- if (!s) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- if (!folio_test_uptodate(folio)) {
- ret = bch2_read_single_folio(folio, mapping);
- if (ret)
- goto unlock;
- }
-
- ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
- if (ret)
- goto unlock;
-
- for (i = round_up(start_offset, block_bytes(c)) >> 9;
- i < round_down(end_offset, block_bytes(c)) >> 9;
- i++) {
- s->s[i].nr_replicas = 0;
-
- i_sectors_delta -= s->s[i].state == SECTOR_dirty;
- bch2_folio_sector_set(folio, s, i, SECTOR_unallocated);
- }
-
- bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
-
- /*
- * Caller needs to know whether this folio will be written out by
- * writeback - doing an i_size update if necessary - or whether it will
- * be responsible for the i_size update.
- *
- * Note that we shouldn't ever see a folio beyond EOF, but check and
- * warn if so. This has been observed by failure to clean up folios
- * after a short write and there's still a chance reclaim will fix
- * things up.
- */
- WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
- end_pos = folio_end_pos(folio);
- if (inode->v.i_size > folio_pos(folio))
- end_pos = min_t(u64, inode->v.i_size, end_pos);
- ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
-
- folio_zero_segment(folio, start_offset, end_offset);
-
- /*
- * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
- *
- * XXX: because we aren't currently tracking whether the folio has actual
- * data in it (vs. just 0s, or only partially written) this wrong. ick.
- */
- BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
-
- /*
- * This removes any writeable userspace mappings; we need to force
- * .page_mkwrite to be called again before any mmapped writes, to
- * redirty the full page:
- */
- folio_mkclean(folio);
- filemap_dirty_folio(mapping, folio);
-unlock:
- folio_unlock(folio);
- folio_put(folio);
-out:
- return ret;
-}
-
-static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
-{
- return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
- from, ANYSINT_MAX(loff_t));
-}
-
-static int bch2_truncate_folios(struct bch_inode_info *inode,
- loff_t start, loff_t end)
-{
- int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
- start, end);
-
- if (ret >= 0 &&
- start >> PAGE_SHIFT != end >> PAGE_SHIFT)
- ret = __bch2_truncate_folio(inode,
- (end - 1) >> PAGE_SHIFT,
- start, end);
- return ret;
-}
-
-static int bch2_extend(struct mnt_idmap *idmap,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *inode_u,
- struct iattr *iattr)
-{
- struct address_space *mapping = inode->v.i_mapping;
- int ret;
-
- /*
- * sync appends:
- *
- * this has to be done _before_ extending i_size:
- */
- ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
- if (ret)
- return ret;
-
- truncate_setsize(&inode->v, iattr->ia_size);
-
- return bch2_setattr_nonsize(idmap, inode, iattr);
-}
-
-int bchfs_truncate(struct mnt_idmap *idmap,
- struct bch_inode_info *inode, struct iattr *iattr)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct address_space *mapping = inode->v.i_mapping;
- struct bch_inode_unpacked inode_u;
- s64 i_sectors_delta = 0;
- int ret = 0;
-
- /*
- * If the truncate call with change the size of the file, the
- * cmtimes should be updated. If the size will not change, we
- * do not need to update the cmtimes.
- */
- if (iattr->ia_size != inode->v.i_size) {
- if (!(iattr->ia_valid & ATTR_MTIME))
- ktime_get_coarse_real_ts64(&iattr->ia_mtime);
- if (!(iattr->ia_valid & ATTR_CTIME))
- ktime_get_coarse_real_ts64(&iattr->ia_ctime);
- iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
- }
-
- inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(inode);
-
- ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
- if (ret)
- goto err;
-
- /*
- * check this before next assertion; on filesystem error our normal
- * invariants are a bit broken (truncate has to truncate the page cache
- * before the inode).
- */
- ret = bch2_journal_error(&c->journal);
- if (ret)
- goto err;
-
- WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
- inode->v.i_size < inode_u.bi_size,
- "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
- (u64) inode->v.i_size, inode_u.bi_size);
-
- if (iattr->ia_size > inode->v.i_size) {
- ret = bch2_extend(idmap, inode, &inode_u, iattr);
- goto err;
- }
-
- iattr->ia_valid &= ~ATTR_SIZE;
-
- ret = bch2_truncate_folio(inode, iattr->ia_size);
- if (unlikely(ret < 0))
- goto err;
-
- truncate_setsize(&inode->v, iattr->ia_size);
-
- /*
- * When extending, we're going to write the new i_size to disk
- * immediately so we need to flush anything above the current on disk
- * i_size first:
- *
- * Also, when extending we need to flush the page that i_size currently
- * straddles - if it's mapped to userspace, we need to ensure that
- * userspace has to redirty it and call .mkwrite -> set_page_dirty
- * again to allocate the part of the page that was extended.
- */
- if (iattr->ia_size > inode_u.bi_size)
- ret = filemap_write_and_wait_range(mapping,
- inode_u.bi_size,
- iattr->ia_size - 1);
- else if (iattr->ia_size & (PAGE_SIZE - 1))
- ret = filemap_write_and_wait_range(mapping,
- round_down(iattr->ia_size, PAGE_SIZE),
- iattr->ia_size - 1);
- if (ret)
- goto err;
-
- ret = bch2_truncate(c, inode_inum(inode), iattr->ia_size, &i_sectors_delta);
- bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
-
- if (unlikely(ret)) {
- /*
- * If we error here, VFS caches are now inconsistent with btree
- */
- set_bit(EI_INODE_ERROR, &inode->ei_flags);
- goto err;
- }
-
- bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
- !bch2_journal_error(&c->journal), c,
- "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
- inode->v.i_ino, (u64) inode->v.i_blocks,
- inode->ei_inode.bi_sectors);
-
- ret = bch2_setattr_nonsize(idmap, inode, iattr);
-err:
- bch2_pagecache_block_put(inode);
- return bch2_err_class(ret);
-}
-
-/* fallocate: */
-
-static int inode_update_times_fn(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi, void *p)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
- bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
- return 0;
-}
-
-static noinline long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- u64 end = offset + len;
- u64 block_start = round_up(offset, block_bytes(c));
- u64 block_end = round_down(end, block_bytes(c));
- bool truncated_last_page;
- int ret = 0;
-
- ret = bch2_truncate_folios(inode, offset, end);
- if (unlikely(ret < 0))
- goto err;
-
- truncated_last_page = ret;
-
- truncate_pagecache_range(&inode->v, offset, end - 1);
-
- if (block_start < block_end) {
- s64 i_sectors_delta = 0;
-
- ret = bch2_fpunch(c, inode_inum(inode),
- block_start >> 9, block_end >> 9,
- &i_sectors_delta);
- bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
- }
-
- mutex_lock(&inode->ei_update_lock);
- if (end >= inode->v.i_size && !truncated_last_page) {
- ret = bch2_write_inode_size(c, inode, inode->v.i_size,
- ATTR_MTIME|ATTR_CTIME);
- } else {
- ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
- ATTR_MTIME|ATTR_CTIME);
- }
- mutex_unlock(&inode->ei_update_lock);
-err:
- return ret;
-}
-
-static noinline long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
- loff_t offset, loff_t len,
- bool insert)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct address_space *mapping = inode->v.i_mapping;
- s64 i_sectors_delta = 0;
- int ret = 0;
-
- if ((offset | len) & (block_bytes(c) - 1))
- return -EINVAL;
-
- if (insert) {
- if (offset >= inode->v.i_size)
- return -EINVAL;
- } else {
- if (offset + len >= inode->v.i_size)
- return -EINVAL;
- }
-
- ret = bch2_write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
- if (ret)
- return ret;
-
- if (insert)
- i_size_write(&inode->v, inode->v.i_size + len);
-
- ret = bch2_fcollapse_finsert(c, inode_inum(inode), offset >> 9, len >> 9,
- insert, &i_sectors_delta);
- if (!ret && !insert)
- i_size_write(&inode->v, inode->v.i_size - len);
- bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
-
- return ret;
-}
-
-static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
- u64 start_sector, u64 end_sector)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bpos end_pos = POS(inode->v.i_ino, end_sector);
- struct bch_io_opts opts;
- int ret = 0;
-
- bch2_inode_opts_get(&opts, c, &inode->ei_inode);
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- POS(inode->v.i_ino, start_sector),
- BTREE_ITER_slots|BTREE_ITER_intent);
-
- while (!ret) {
- s64 i_sectors_delta = 0;
- struct quota_res quota_res = { 0 };
- struct bkey_s_c k;
- unsigned sectors;
- bool is_allocation;
- u64 hole_start, hole_end;
- u32 snapshot;
-
- bch2_trans_begin(trans);
-
- if (bkey_ge(iter.pos, end_pos))
- break;
-
- ret = bch2_subvolume_get_snapshot(trans,
- inode->ei_inum.subvol, &snapshot);
- if (ret)
- goto bkey_err;
-
- bch2_btree_iter_set_snapshot(&iter, snapshot);
-
- k = bch2_btree_iter_peek_slot(&iter);
- if ((ret = bkey_err(k)))
- goto bkey_err;
-
- hole_start = iter.pos.offset;
- hole_end = bpos_min(k.k->p, end_pos).offset;
- is_allocation = bkey_extent_is_allocation(k.k);
-
- /* already reserved */
- if (bkey_extent_is_reservation(k) &&
- bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
- bch2_btree_iter_advance(&iter);
- continue;
- }
-
- if (bkey_extent_is_data(k.k) &&
- !(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_advance(&iter);
- continue;
- }
-
- if (!(mode & FALLOC_FL_ZERO_RANGE)) {
- /*
- * Lock ordering - can't be holding btree locks while
- * blocking on a folio lock:
- */
- if (bch2_clamp_data_hole(&inode->v,
- &hole_start,
- &hole_end,
- opts.data_replicas, true)) {
- ret = drop_locks_do(trans,
- (bch2_clamp_data_hole(&inode->v,
- &hole_start,
- &hole_end,
- opts.data_replicas, false), 0));
- if (ret)
- goto bkey_err;
- }
- bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
-
- if (ret)
- goto bkey_err;
-
- if (hole_start == hole_end)
- continue;
- }
-
- sectors = hole_end - hole_start;
-
- if (!is_allocation) {
- ret = bch2_quota_reservation_add(c, inode,
- &quota_res, sectors, true);
- if (unlikely(ret))
- goto bkey_err;
- }
-
- ret = bch2_extent_fallocate(trans, inode_inum(inode), &iter,
- sectors, opts, &i_sectors_delta,
- writepoint_hashed((unsigned long) current));
- if (ret)
- goto bkey_err;
-
- bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
-
- if (bch2_mark_pagecache_reserved(inode, &hole_start,
- iter.pos.offset, true)) {
- ret = drop_locks_do(trans,
- bch2_mark_pagecache_reserved(inode, &hole_start,
- iter.pos.offset, false));
- if (ret)
- goto bkey_err;
- }
-bkey_err:
- bch2_quota_reservation_put(c, inode, &quota_res);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- ret = 0;
- }
-
- if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
- struct quota_res quota_res = { 0 };
- s64 i_sectors_delta = 0;
-
- bch2_fpunch_at(trans, &iter, inode_inum(inode),
- end_sector, &i_sectors_delta);
- bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
- bch2_quota_reservation_put(c, inode, &quota_res);
- }
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return ret;
-}
-
-static noinline long bchfs_fallocate(struct bch_inode_info *inode, int mode,
- loff_t offset, loff_t len)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- u64 end = offset + len;
- u64 block_start = round_down(offset, block_bytes(c));
- u64 block_end = round_up(end, block_bytes(c));
- bool truncated_last_page = false;
- int ret, ret2 = 0;
-
- if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
- ret = inode_newsize_ok(&inode->v, end);
- if (ret)
- return ret;
- }
-
- if (mode & FALLOC_FL_ZERO_RANGE) {
- ret = bch2_truncate_folios(inode, offset, end);
- if (unlikely(ret < 0))
- return ret;
-
- truncated_last_page = ret;
-
- truncate_pagecache_range(&inode->v, offset, end - 1);
-
- block_start = round_up(offset, block_bytes(c));
- block_end = round_down(end, block_bytes(c));
- }
-
- ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
-
- /*
- * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
- * so that the VFS cache i_size is consistent with the btree i_size:
- */
- if (ret &&
- !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
- return ret;
-
- if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
- end = inode->v.i_size;
-
- if (end >= inode->v.i_size &&
- (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
- !(mode & FALLOC_FL_KEEP_SIZE))) {
- spin_lock(&inode->v.i_lock);
- i_size_write(&inode->v, end);
- spin_unlock(&inode->v.i_lock);
-
- mutex_lock(&inode->ei_update_lock);
- ret2 = bch2_write_inode_size(c, inode, end, 0);
- mutex_unlock(&inode->ei_update_lock);
- }
-
- return ret ?: ret2;
-}
-
-long bch2_fallocate_dispatch(struct file *file, int mode,
- loff_t offset, loff_t len)
-{
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- long ret;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
- return -EROFS;
-
- inode_lock(&inode->v);
- inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(inode);
-
- ret = file_modified(file);
- if (ret)
- goto err;
-
- if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
- ret = bchfs_fallocate(inode, mode, offset, len);
- else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
- ret = bchfs_fpunch(inode, offset, len);
- else if (mode == FALLOC_FL_INSERT_RANGE)
- ret = bchfs_fcollapse_finsert(inode, offset, len, true);
- else if (mode == FALLOC_FL_COLLAPSE_RANGE)
- ret = bchfs_fcollapse_finsert(inode, offset, len, false);
- else
- ret = -EOPNOTSUPP;
-err:
- bch2_pagecache_block_put(inode);
- inode_unlock(&inode->v);
- bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
-
- return bch2_err_class(ret);
-}
-
-/*
- * Take a quota reservation for unallocated blocks in a given file range
- * Does not check pagecache
- */
-static int quota_reserve_range(struct bch_inode_info *inode,
- struct quota_res *res,
- u64 start, u64 end)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- u64 sectors = end - start;
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_in_subvolume_upto(trans, iter,
- BTREE_ID_extents,
- POS(inode->v.i_ino, start),
- POS(inode->v.i_ino, end - 1),
- inode->ei_inum.subvol, 0, k, ({
- if (bkey_extent_is_allocation(k.k)) {
- u64 s = min(end, k.k->p.offset) -
- max(start, bkey_start_offset(k.k));
- BUG_ON(s > sectors);
- sectors -= s;
- }
-
- 0;
- })));
-
- return ret ?: bch2_quota_reservation_add(c, inode, res, sectors, true);
-}
-
-loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
- struct file *file_dst, loff_t pos_dst,
- loff_t len, unsigned remap_flags)
-{
- struct bch_inode_info *src = file_bch_inode(file_src);
- struct bch_inode_info *dst = file_bch_inode(file_dst);
- struct bch_fs *c = src->v.i_sb->s_fs_info;
- struct quota_res quota_res = { 0 };
- s64 i_sectors_delta = 0;
- u64 aligned_len;
- loff_t ret = 0;
-
- if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
- return -EINVAL;
-
- if ((pos_src & (block_bytes(c) - 1)) ||
- (pos_dst & (block_bytes(c) - 1)))
- return -EINVAL;
-
- if (src == dst &&
- abs(pos_src - pos_dst) < len)
- return -EINVAL;
-
- lock_two_nondirectories(&src->v, &dst->v);
- bch2_lock_inodes(INODE_PAGECACHE_BLOCK, src, dst);
-
- inode_dio_wait(&src->v);
- inode_dio_wait(&dst->v);
-
- ret = generic_remap_file_range_prep(file_src, pos_src,
- file_dst, pos_dst,
- &len, remap_flags);
- if (ret < 0 || len == 0)
- goto err;
-
- aligned_len = round_up((u64) len, block_bytes(c));
-
- ret = bch2_write_invalidate_inode_pages_range(dst->v.i_mapping,
- pos_dst, pos_dst + len - 1);
- if (ret)
- goto err;
-
- ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
- (pos_dst + aligned_len) >> 9);
- if (ret)
- goto err;
-
- if (!(remap_flags & REMAP_FILE_DEDUP))
- file_update_time(file_dst);
-
- bch2_mark_pagecache_unallocated(src, pos_src >> 9,
- (pos_src + aligned_len) >> 9);
-
- ret = bch2_remap_range(c,
- inode_inum(dst), pos_dst >> 9,
- inode_inum(src), pos_src >> 9,
- aligned_len >> 9,
- pos_dst + len, &i_sectors_delta);
- if (ret < 0)
- goto err;
-
- /*
- * due to alignment, we might have remapped slightly more than requsted
- */
- ret = min((u64) ret << 9, (u64) len);
-
- bch2_i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
-
- spin_lock(&dst->v.i_lock);
- if (pos_dst + ret > dst->v.i_size)
- i_size_write(&dst->v, pos_dst + ret);
- spin_unlock(&dst->v.i_lock);
-
- if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
- IS_SYNC(file_inode(file_dst)))
- ret = bch2_flush_inode(c, dst);
-err:
- bch2_quota_reservation_put(c, dst, &quota_res);
- bch2_unlock_inodes(INODE_PAGECACHE_BLOCK, src, dst);
- unlock_two_nondirectories(&src->v, &dst->v);
-
- return bch2_err_class(ret);
-}
-
-/* fseek: */
-
-static loff_t bch2_seek_data(struct file *file, u64 offset)
-{
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- subvol_inum inum = inode_inum(inode);
- u64 isize, next_data = MAX_LFS_FILESIZE;
-
- isize = i_size_read(&inode->v);
- if (offset >= isize)
- return -ENXIO;
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
- POS(inode->v.i_ino, offset >> 9),
- POS(inode->v.i_ino, U64_MAX),
- inum.subvol, 0, k, ({
- if (bkey_extent_is_data(k.k)) {
- next_data = max(offset, bkey_start_offset(k.k) << 9);
- break;
- } else if (k.k->p.offset >> 9 > isize)
- break;
- 0;
- })));
- if (ret)
- return ret;
-
- if (next_data > offset)
- next_data = bch2_seek_pagecache_data(&inode->v,
- offset, next_data, 0, false);
-
- if (next_data >= isize)
- return -ENXIO;
-
- return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
-}
-
-static loff_t bch2_seek_hole(struct file *file, u64 offset)
-{
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- subvol_inum inum = inode_inum(inode);
- u64 isize, next_hole = MAX_LFS_FILESIZE;
-
- isize = i_size_read(&inode->v);
- if (offset >= isize)
- return -ENXIO;
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
- POS(inode->v.i_ino, offset >> 9),
- POS(inode->v.i_ino, U64_MAX),
- inum.subvol, BTREE_ITER_slots, k, ({
- if (k.k->p.inode != inode->v.i_ino) {
- next_hole = bch2_seek_pagecache_hole(&inode->v,
- offset, MAX_LFS_FILESIZE, 0, false);
- break;
- } else if (!bkey_extent_is_data(k.k)) {
- next_hole = bch2_seek_pagecache_hole(&inode->v,
- max(offset, bkey_start_offset(k.k) << 9),
- k.k->p.offset << 9, 0, false);
-
- if (next_hole < k.k->p.offset << 9)
- break;
- } else {
- offset = max(offset, bkey_start_offset(k.k) << 9);
- }
- 0;
- })));
- if (ret)
- return ret;
-
- if (next_hole > isize)
- next_hole = isize;
-
- return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
-}
-
-loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
-{
- loff_t ret;
-
- switch (whence) {
- case SEEK_SET:
- case SEEK_CUR:
- case SEEK_END:
- ret = generic_file_llseek(file, offset, whence);
- break;
- case SEEK_DATA:
- ret = bch2_seek_data(file, offset);
- break;
- case SEEK_HOLE:
- ret = bch2_seek_hole(file, offset);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return bch2_err_class(ret);
-}
-
-void bch2_fs_fsio_exit(struct bch_fs *c)
-{
- bioset_exit(&c->nocow_flush_bioset);
-}
-
-int bch2_fs_fsio_init(struct bch_fs *c)
-{
- if (bioset_init(&c->nocow_flush_bioset,
- 1, offsetof(struct nocow_flush, bio), 0))
- return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
-
- return 0;
-}
-
-#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-io.h b/fs/bcachefs/fs-io.h
deleted file mode 100644
index ca70346e68dc..000000000000
--- a/fs/bcachefs/fs-io.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FS_IO_H
-#define _BCACHEFS_FS_IO_H
-
-#ifndef NO_BCACHEFS_FS
-
-#include "buckets.h"
-#include "fs.h"
-#include "io_write_types.h"
-#include "quota.h"
-
-#include <linux/uio.h>
-
-struct folio_vec {
- struct folio *fv_folio;
- size_t fv_offset;
- size_t fv_len;
-};
-
-static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
-{
-
- struct folio *folio = page_folio(bv.bv_page);
- size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
- bv.bv_offset;
- size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
-
- return (struct folio_vec) {
- .fv_folio = folio,
- .fv_offset = offset,
- .fv_len = len,
- };
-}
-
-static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
- struct bvec_iter iter)
-{
- return biovec_to_foliovec(bio_iter_iovec(bio, iter));
-}
-
-#define __bio_for_each_folio(bvl, bio, iter, start) \
- for (iter = (start); \
- (iter).bi_size && \
- ((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
- bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
-
-/**
- * bio_for_each_folio - iterate over folios within a bio
- *
- * Like other non-_all versions, this iterates over what bio->bi_iter currently
- * points to. This version is for drivers, where the bio may have previously
- * been split or cloned.
- */
-#define bio_for_each_folio(bvl, bio, iter) \
- __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
-
-struct quota_res {
- u64 sectors;
-};
-
-#ifdef CONFIG_BCACHEFS_QUOTA
-
-static inline void __bch2_quota_reservation_put(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res)
-{
- BUG_ON(res->sectors > inode->ei_quota_reserved);
-
- bch2_quota_acct(c, inode->ei_qid, Q_SPC,
- -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
- inode->ei_quota_reserved -= res->sectors;
- res->sectors = 0;
-}
-
-static inline void bch2_quota_reservation_put(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res)
-{
- if (res->sectors) {
- mutex_lock(&inode->ei_quota_lock);
- __bch2_quota_reservation_put(c, inode, res);
- mutex_unlock(&inode->ei_quota_lock);
- }
-}
-
-static inline int bch2_quota_reservation_add(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res,
- u64 sectors,
- bool check_enospc)
-{
- int ret;
-
- if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
- return 0;
-
- mutex_lock(&inode->ei_quota_lock);
- ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
- check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
- if (likely(!ret)) {
- inode->ei_quota_reserved += sectors;
- res->sectors += sectors;
- }
- mutex_unlock(&inode->ei_quota_lock);
-
- return ret;
-}
-
-#else
-
-static inline void __bch2_quota_reservation_put(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res) {}
-
-static inline void bch2_quota_reservation_put(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res) {}
-
-static inline int bch2_quota_reservation_add(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res,
- unsigned sectors,
- bool check_enospc)
-{
- return 0;
-}
-
-#endif
-
-void __bch2_i_sectors_acct(struct bch_fs *, struct bch_inode_info *,
- struct quota_res *, s64);
-
-static inline void bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
- struct quota_res *quota_res, s64 sectors)
-{
- if (sectors) {
- mutex_lock(&inode->ei_quota_lock);
- __bch2_i_sectors_acct(c, inode, quota_res, sectors);
- mutex_unlock(&inode->ei_quota_lock);
- }
-}
-
-static inline struct address_space *faults_disabled_mapping(void)
-{
- return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
-}
-
-static inline void set_fdm_dropped_locks(void)
-{
- current->faults_disabled_mapping =
- (void *) (((unsigned long) current->faults_disabled_mapping)|1);
-}
-
-static inline bool fdm_dropped_locks(void)
-{
- return ((unsigned long) current->faults_disabled_mapping) & 1;
-}
-
-void bch2_inode_flush_nocow_writes_async(struct bch_fs *,
- struct bch_inode_info *, struct closure *);
-
-int __must_check bch2_write_inode_size(struct bch_fs *,
- struct bch_inode_info *,
- loff_t, unsigned);
-
-int bch2_fsync(struct file *, loff_t, loff_t, int);
-
-int bchfs_truncate(struct mnt_idmap *,
- struct bch_inode_info *, struct iattr *);
-long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t);
-
-loff_t bch2_remap_file_range(struct file *, loff_t, struct file *,
- loff_t, loff_t, unsigned);
-
-loff_t bch2_llseek(struct file *, loff_t, int);
-
-void bch2_fs_fsio_exit(struct bch_fs *);
-int bch2_fs_fsio_init(struct bch_fs *);
-#else
-static inline void bch2_fs_fsio_exit(struct bch_fs *c) {}
-static inline int bch2_fs_fsio_init(struct bch_fs *c) { return 0; }
-#endif
-
-#endif /* _BCACHEFS_FS_IO_H */
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
deleted file mode 100644
index 405cf08bda34..000000000000
--- a/fs/bcachefs/fs-ioctl.c
+++ /dev/null
@@ -1,634 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef NO_BCACHEFS_FS
-
-#include "bcachefs.h"
-#include "chardev.h"
-#include "dirent.h"
-#include "fs.h"
-#include "fs-common.h"
-#include "fs-ioctl.h"
-#include "quota.h"
-
-#include <linux/compat.h>
-#include <linux/fsnotify.h>
-#include <linux/mount.h>
-#include <linux/namei.h>
-#include <linux/security.h>
-#include <linux/writeback.h>
-
-#define FS_IOC_GOINGDOWN _IOR('X', 125, __u32)
-#define FSOP_GOING_FLAGS_DEFAULT 0x0 /* going down */
-#define FSOP_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */
-#define FSOP_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
-
-struct flags_set {
- unsigned mask;
- unsigned flags;
-
- unsigned projid;
-
- bool set_projinherit;
- bool projinherit;
-};
-
-static int bch2_inode_flags_set(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- /*
- * We're relying on btree locking here for exclusion with other ioctl
- * calls - use the flags in the btree (@bi), not inode->i_flags:
- */
- struct flags_set *s = p;
- unsigned newflags = s->flags;
- unsigned oldflags = bi->bi_flags & s->mask;
-
- if (((newflags ^ oldflags) & (BCH_INODE_append|BCH_INODE_immutable)) &&
- !capable(CAP_LINUX_IMMUTABLE))
- return -EPERM;
-
- if (!S_ISREG(bi->bi_mode) &&
- !S_ISDIR(bi->bi_mode) &&
- (newflags & (BCH_INODE_nodump|BCH_INODE_noatime)) != newflags)
- return -EINVAL;
-
- if (s->set_projinherit) {
- bi->bi_fields_set &= ~(1 << Inode_opt_project);
- bi->bi_fields_set |= ((int) s->projinherit << Inode_opt_project);
- }
-
- bi->bi_flags &= ~s->mask;
- bi->bi_flags |= newflags;
-
- bi->bi_ctime = timespec_to_bch2_time(c, current_time(&inode->v));
- return 0;
-}
-
-static int bch2_ioc_getflags(struct bch_inode_info *inode, int __user *arg)
-{
- unsigned flags = map_flags(bch_flags_to_uflags, inode->ei_inode.bi_flags);
-
- return put_user(flags, arg);
-}
-
-static int bch2_ioc_setflags(struct bch_fs *c,
- struct file *file,
- struct bch_inode_info *inode,
- void __user *arg)
-{
- struct flags_set s = { .mask = map_defined(bch_flags_to_uflags) };
- unsigned uflags;
- int ret;
-
- if (get_user(uflags, (int __user *) arg))
- return -EFAULT;
-
- s.flags = map_flags_rev(bch_flags_to_uflags, uflags);
- if (uflags)
- return -EOPNOTSUPP;
-
- ret = mnt_want_write_file(file);
- if (ret)
- return ret;
-
- inode_lock(&inode->v);
- if (!inode_owner_or_capable(file_mnt_idmap(file), &inode->v)) {
- ret = -EACCES;
- goto setflags_out;
- }
-
- mutex_lock(&inode->ei_update_lock);
- ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
- bch2_write_inode(c, inode, bch2_inode_flags_set, &s,
- ATTR_CTIME);
- mutex_unlock(&inode->ei_update_lock);
-
-setflags_out:
- inode_unlock(&inode->v);
- mnt_drop_write_file(file);
- return ret;
-}
-
-static int bch2_ioc_fsgetxattr(struct bch_inode_info *inode,
- struct fsxattr __user *arg)
-{
- struct fsxattr fa = { 0 };
-
- fa.fsx_xflags = map_flags(bch_flags_to_xflags, inode->ei_inode.bi_flags);
-
- if (inode->ei_inode.bi_fields_set & (1 << Inode_opt_project))
- fa.fsx_xflags |= FS_XFLAG_PROJINHERIT;
-
- fa.fsx_projid = inode->ei_qid.q[QTYP_PRJ];
-
- if (copy_to_user(arg, &fa, sizeof(fa)))
- return -EFAULT;
-
- return 0;
-}
-
-static int fssetxattr_inode_update_fn(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct flags_set *s = p;
-
- if (s->projid != bi->bi_project) {
- bi->bi_fields_set |= 1U << Inode_opt_project;
- bi->bi_project = s->projid;
- }
-
- return bch2_inode_flags_set(trans, inode, bi, p);
-}
-
-static int bch2_ioc_fssetxattr(struct bch_fs *c,
- struct file *file,
- struct bch_inode_info *inode,
- struct fsxattr __user *arg)
-{
- struct flags_set s = { .mask = map_defined(bch_flags_to_xflags) };
- struct fsxattr fa;
- int ret;
-
- if (copy_from_user(&fa, arg, sizeof(fa)))
- return -EFAULT;
-
- s.set_projinherit = true;
- s.projinherit = (fa.fsx_xflags & FS_XFLAG_PROJINHERIT) != 0;
- fa.fsx_xflags &= ~FS_XFLAG_PROJINHERIT;
-
- s.flags = map_flags_rev(bch_flags_to_xflags, fa.fsx_xflags);
- if (fa.fsx_xflags)
- return -EOPNOTSUPP;
-
- if (fa.fsx_projid >= U32_MAX)
- return -EINVAL;
-
- /*
- * inode fields accessible via the xattr interface are stored with a +1
- * bias, so that 0 means unset:
- */
- s.projid = fa.fsx_projid + 1;
-
- ret = mnt_want_write_file(file);
- if (ret)
- return ret;
-
- inode_lock(&inode->v);
- if (!inode_owner_or_capable(file_mnt_idmap(file), &inode->v)) {
- ret = -EACCES;
- goto err;
- }
-
- mutex_lock(&inode->ei_update_lock);
- ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
- bch2_set_projid(c, inode, fa.fsx_projid) ?:
- bch2_write_inode(c, inode, fssetxattr_inode_update_fn, &s,
- ATTR_CTIME);
- mutex_unlock(&inode->ei_update_lock);
-err:
- inode_unlock(&inode->v);
- mnt_drop_write_file(file);
- return ret;
-}
-
-static int bch2_reinherit_attrs_fn(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct bch_inode_info *dir = p;
-
- return !bch2_reinherit_attrs(bi, &dir->ei_inode);
-}
-
-static int bch2_ioc_reinherit_attrs(struct bch_fs *c,
- struct file *file,
- struct bch_inode_info *src,
- const char __user *name)
-{
- struct bch_hash_info hash = bch2_hash_info_init(c, &src->ei_inode);
- struct bch_inode_info *dst;
- struct inode *vinode = NULL;
- char *kname = NULL;
- struct qstr qstr;
- int ret = 0;
- subvol_inum inum;
-
- kname = kmalloc(BCH_NAME_MAX + 1, GFP_KERNEL);
- if (!kname)
- return -ENOMEM;
-
- ret = strncpy_from_user(kname, name, BCH_NAME_MAX);
- if (unlikely(ret < 0))
- goto err1;
-
- qstr.len = ret;
- qstr.name = kname;
-
- ret = bch2_dirent_lookup(c, inode_inum(src), &hash, &qstr, &inum);
- if (ret)
- goto err1;
-
- vinode = bch2_vfs_inode_get(c, inum);
- ret = PTR_ERR_OR_ZERO(vinode);
- if (ret)
- goto err1;
-
- dst = to_bch_ei(vinode);
-
- ret = mnt_want_write_file(file);
- if (ret)
- goto err2;
-
- bch2_lock_inodes(INODE_UPDATE_LOCK, src, dst);
-
- if (inode_attr_changing(src, dst, Inode_opt_project)) {
- ret = bch2_fs_quota_transfer(c, dst,
- src->ei_qid,
- 1 << QTYP_PRJ,
- KEY_TYPE_QUOTA_PREALLOC);
- if (ret)
- goto err3;
- }
-
- ret = bch2_write_inode(c, dst, bch2_reinherit_attrs_fn, src, 0);
-err3:
- bch2_unlock_inodes(INODE_UPDATE_LOCK, src, dst);
-
- /* return true if we did work */
- if (ret >= 0)
- ret = !ret;
-
- mnt_drop_write_file(file);
-err2:
- iput(vinode);
-err1:
- kfree(kname);
-
- return ret;
-}
-
-static int bch2_ioc_getversion(struct bch_inode_info *inode, u32 __user *arg)
-{
- return put_user(inode->v.i_generation, arg);
-}
-
-static int bch2_ioc_getlabel(struct bch_fs *c, char __user *user_label)
-{
- int ret;
- size_t len;
- char label[BCH_SB_LABEL_SIZE];
-
- BUILD_BUG_ON(BCH_SB_LABEL_SIZE >= FSLABEL_MAX);
-
- mutex_lock(&c->sb_lock);
- memcpy(label, c->disk_sb.sb->label, BCH_SB_LABEL_SIZE);
- mutex_unlock(&c->sb_lock);
-
- len = strnlen(label, BCH_SB_LABEL_SIZE);
- if (len == BCH_SB_LABEL_SIZE) {
- bch_warn(c,
- "label is too long, return the first %zu bytes",
- --len);
- }
-
- ret = copy_to_user(user_label, label, len);
-
- return ret ? -EFAULT : 0;
-}
-
-static int bch2_ioc_setlabel(struct bch_fs *c,
- struct file *file,
- struct bch_inode_info *inode,
- const char __user *user_label)
-{
- int ret;
- char label[BCH_SB_LABEL_SIZE];
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(label, user_label, sizeof(label)))
- return -EFAULT;
-
- if (strnlen(label, BCH_SB_LABEL_SIZE) == BCH_SB_LABEL_SIZE) {
- bch_err(c,
- "unable to set label with more than %d bytes",
- BCH_SB_LABEL_SIZE - 1);
- return -EINVAL;
- }
-
- ret = mnt_want_write_file(file);
- if (ret)
- return ret;
-
- mutex_lock(&c->sb_lock);
- strscpy(c->disk_sb.sb->label, label, BCH_SB_LABEL_SIZE);
- ret = bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- mnt_drop_write_file(file);
- return ret;
-}
-
-static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg)
-{
- u32 flags;
- int ret = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (get_user(flags, arg))
- return -EFAULT;
-
- bch_notice(c, "shutdown by ioctl type %u", flags);
-
- switch (flags) {
- case FSOP_GOING_FLAGS_DEFAULT:
- ret = bdev_freeze(c->vfs_sb->s_bdev);
- if (ret)
- break;
- bch2_journal_flush(&c->journal);
- bch2_fs_emergency_read_only(c);
- bdev_thaw(c->vfs_sb->s_bdev);
- break;
- case FSOP_GOING_FLAGS_LOGFLUSH:
- bch2_journal_flush(&c->journal);
- fallthrough;
- case FSOP_GOING_FLAGS_NOLOGFLUSH:
- bch2_fs_emergency_read_only(c);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
- struct bch_ioctl_subvolume arg)
-{
- struct inode *dir;
- struct bch_inode_info *inode;
- struct user_namespace *s_user_ns;
- struct dentry *dst_dentry;
- struct path src_path, dst_path;
- int how = LOOKUP_FOLLOW;
- int error;
- subvol_inum snapshot_src = { 0 };
- unsigned lookup_flags = 0;
- unsigned create_flags = BCH_CREATE_SUBVOL;
-
- if (arg.flags & ~(BCH_SUBVOL_SNAPSHOT_CREATE|
- BCH_SUBVOL_SNAPSHOT_RO))
- return -EINVAL;
-
- if (!(arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) &&
- (arg.src_ptr ||
- (arg.flags & BCH_SUBVOL_SNAPSHOT_RO)))
- return -EINVAL;
-
- if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE)
- create_flags |= BCH_CREATE_SNAPSHOT;
-
- if (arg.flags & BCH_SUBVOL_SNAPSHOT_RO)
- create_flags |= BCH_CREATE_SNAPSHOT_RO;
-
- if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) {
- /* sync_inodes_sb enforce s_umount is locked */
- down_read(&c->vfs_sb->s_umount);
- sync_inodes_sb(c->vfs_sb);
- up_read(&c->vfs_sb->s_umount);
- }
-retry:
- if (arg.src_ptr) {
- error = user_path_at(arg.dirfd,
- (const char __user *)(unsigned long)arg.src_ptr,
- how, &src_path);
- if (error)
- goto err1;
-
- if (src_path.dentry->d_sb->s_fs_info != c) {
- path_put(&src_path);
- error = -EXDEV;
- goto err1;
- }
-
- snapshot_src = inode_inum(to_bch_ei(src_path.dentry->d_inode));
- }
-
- dst_dentry = user_path_create(arg.dirfd,
- (const char __user *)(unsigned long)arg.dst_ptr,
- &dst_path, lookup_flags);
- error = PTR_ERR_OR_ZERO(dst_dentry);
- if (error)
- goto err2;
-
- if (dst_dentry->d_sb->s_fs_info != c) {
- error = -EXDEV;
- goto err3;
- }
-
- if (dst_dentry->d_inode) {
- error = -BCH_ERR_EEXIST_subvolume_create;
- goto err3;
- }
-
- dir = dst_path.dentry->d_inode;
- if (IS_DEADDIR(dir)) {
- error = -BCH_ERR_ENOENT_directory_dead;
- goto err3;
- }
-
- s_user_ns = dir->i_sb->s_user_ns;
- if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
- !kgid_has_mapping(s_user_ns, current_fsgid())) {
- error = -EOVERFLOW;
- goto err3;
- }
-
- error = inode_permission(file_mnt_idmap(filp),
- dir, MAY_WRITE | MAY_EXEC);
- if (error)
- goto err3;
-
- if (!IS_POSIXACL(dir))
- arg.mode &= ~current_umask();
-
- error = security_path_mkdir(&dst_path, dst_dentry, arg.mode);
- if (error)
- goto err3;
-
- if ((arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) &&
- !arg.src_ptr)
- snapshot_src.subvol = inode_inum(to_bch_ei(dir)).subvol;
-
- down_write(&c->snapshot_create_lock);
- inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir),
- dst_dentry, arg.mode|S_IFDIR,
- 0, snapshot_src, create_flags);
- up_write(&c->snapshot_create_lock);
-
- error = PTR_ERR_OR_ZERO(inode);
- if (error)
- goto err3;
-
- d_instantiate(dst_dentry, &inode->v);
- fsnotify_mkdir(dir, dst_dentry);
-err3:
- done_path_create(&dst_path, dst_dentry);
-err2:
- if (arg.src_ptr)
- path_put(&src_path);
-
- if (retry_estale(error, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
- }
-err1:
- return error;
-}
-
-static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
- struct bch_ioctl_subvolume arg)
-{
- const char __user *name = (void __user *)(unsigned long)arg.dst_ptr;
- struct path path;
- struct inode *dir;
- struct dentry *victim;
- int ret = 0;
-
- if (arg.flags)
- return -EINVAL;
-
- victim = user_path_locked_at(arg.dirfd, name, &path);
- if (IS_ERR(victim))
- return PTR_ERR(victim);
-
- dir = d_inode(path.dentry);
- if (victim->d_sb->s_fs_info != c) {
- ret = -EXDEV;
- goto err;
- }
- if (!d_is_positive(victim)) {
- ret = -ENOENT;
- goto err;
- }
- ret = __bch2_unlink(dir, victim, true);
- if (!ret) {
- fsnotify_rmdir(dir, victim);
- d_delete(victim);
- }
-err:
- inode_unlock(dir);
- dput(victim);
- path_put(&path);
- return ret;
-}
-
-long bch2_fs_file_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- long ret;
-
- switch (cmd) {
- case FS_IOC_GETFLAGS:
- ret = bch2_ioc_getflags(inode, (int __user *) arg);
- break;
-
- case FS_IOC_SETFLAGS:
- ret = bch2_ioc_setflags(c, file, inode, (int __user *) arg);
- break;
-
- case FS_IOC_FSGETXATTR:
- ret = bch2_ioc_fsgetxattr(inode, (void __user *) arg);
- break;
-
- case FS_IOC_FSSETXATTR:
- ret = bch2_ioc_fssetxattr(c, file, inode,
- (void __user *) arg);
- break;
-
- case BCHFS_IOC_REINHERIT_ATTRS:
- ret = bch2_ioc_reinherit_attrs(c, file, inode,
- (void __user *) arg);
- break;
-
- case FS_IOC_GETVERSION:
- ret = bch2_ioc_getversion(inode, (u32 __user *) arg);
- break;
-
- case FS_IOC_SETVERSION:
- ret = -ENOTTY;
- break;
-
- case FS_IOC_GETFSLABEL:
- ret = bch2_ioc_getlabel(c, (void __user *) arg);
- break;
-
- case FS_IOC_SETFSLABEL:
- ret = bch2_ioc_setlabel(c, file, inode, (const void __user *) arg);
- break;
-
- case FS_IOC_GOINGDOWN:
- ret = bch2_ioc_goingdown(c, (u32 __user *) arg);
- break;
-
- case BCH_IOCTL_SUBVOLUME_CREATE: {
- struct bch_ioctl_subvolume i;
-
- ret = copy_from_user(&i, (void __user *) arg, sizeof(i))
- ? -EFAULT
- : bch2_ioctl_subvolume_create(c, file, i);
- break;
- }
-
- case BCH_IOCTL_SUBVOLUME_DESTROY: {
- struct bch_ioctl_subvolume i;
-
- ret = copy_from_user(&i, (void __user *) arg, sizeof(i))
- ? -EFAULT
- : bch2_ioctl_subvolume_destroy(c, file, i);
- break;
- }
-
- default:
- ret = bch2_fs_ioctl(c, cmd, (void __user *) arg);
- break;
- }
-
- return bch2_err_class(ret);
-}
-
-#ifdef CONFIG_COMPAT
-long bch2_compat_fs_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
- /* These are just misnamed, they actually get/put from/to user an int */
- switch (cmd) {
- case FS_IOC32_GETFLAGS:
- cmd = FS_IOC_GETFLAGS;
- break;
- case FS_IOC32_SETFLAGS:
- cmd = FS_IOC_SETFLAGS;
- break;
- case FS_IOC32_GETVERSION:
- cmd = FS_IOC_GETVERSION;
- break;
- case FS_IOC_GETFSLABEL:
- case FS_IOC_SETFSLABEL:
- break;
- default:
- return -ENOIOCTLCMD;
- }
- return bch2_fs_file_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
-}
-#endif
-
-#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-ioctl.h b/fs/bcachefs/fs-ioctl.h
deleted file mode 100644
index d30f9bb056fd..000000000000
--- a/fs/bcachefs/fs-ioctl.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FS_IOCTL_H
-#define _BCACHEFS_FS_IOCTL_H
-
-/* Inode flags: */
-
-/* bcachefs inode flags -> vfs inode flags: */
-static const __maybe_unused unsigned bch_flags_to_vfs[] = {
- [__BCH_INODE_sync] = S_SYNC,
- [__BCH_INODE_immutable] = S_IMMUTABLE,
- [__BCH_INODE_append] = S_APPEND,
- [__BCH_INODE_noatime] = S_NOATIME,
-};
-
-/* bcachefs inode flags -> FS_IOC_GETFLAGS: */
-static const __maybe_unused unsigned bch_flags_to_uflags[] = {
- [__BCH_INODE_sync] = FS_SYNC_FL,
- [__BCH_INODE_immutable] = FS_IMMUTABLE_FL,
- [__BCH_INODE_append] = FS_APPEND_FL,
- [__BCH_INODE_nodump] = FS_NODUMP_FL,
- [__BCH_INODE_noatime] = FS_NOATIME_FL,
-};
-
-/* bcachefs inode flags -> FS_IOC_FSGETXATTR: */
-static const __maybe_unused unsigned bch_flags_to_xflags[] = {
- [__BCH_INODE_sync] = FS_XFLAG_SYNC,
- [__BCH_INODE_immutable] = FS_XFLAG_IMMUTABLE,
- [__BCH_INODE_append] = FS_XFLAG_APPEND,
- [__BCH_INODE_nodump] = FS_XFLAG_NODUMP,
- [__BCH_INODE_noatime] = FS_XFLAG_NOATIME,
- //[__BCH_INODE_PROJINHERIT] = FS_XFLAG_PROJINHERIT;
-};
-
-#define set_flags(_map, _in, _out) \
-do { \
- unsigned _i; \
- \
- for (_i = 0; _i < ARRAY_SIZE(_map); _i++) \
- if ((_in) & (1 << _i)) \
- (_out) |= _map[_i]; \
- else \
- (_out) &= ~_map[_i]; \
-} while (0)
-
-#define map_flags(_map, _in) \
-({ \
- unsigned _out = 0; \
- \
- set_flags(_map, _in, _out); \
- _out; \
-})
-
-#define map_flags_rev(_map, _in) \
-({ \
- unsigned _i, _out = 0; \
- \
- for (_i = 0; _i < ARRAY_SIZE(_map); _i++) \
- if ((_in) & _map[_i]) { \
- (_out) |= 1 << _i; \
- (_in) &= ~_map[_i]; \
- } \
- (_out); \
-})
-
-#define map_defined(_map) \
-({ \
- unsigned _in = ~0; \
- \
- map_flags_rev(_map, _in); \
-})
-
-/* Set VFS inode flags from bcachefs inode: */
-static inline void bch2_inode_flags_to_vfs(struct bch_inode_info *inode)
-{
- set_flags(bch_flags_to_vfs, inode->ei_inode.bi_flags, inode->v.i_flags);
-}
-
-long bch2_fs_file_ioctl(struct file *, unsigned, unsigned long);
-long bch2_compat_fs_ioctl(struct file *, unsigned, unsigned long);
-
-#endif /* _BCACHEFS_FS_IOCTL_H */
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
deleted file mode 100644
index a41d0d8a2f7b..000000000000
--- a/fs/bcachefs/fs.c
+++ /dev/null
@@ -1,2392 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef NO_BCACHEFS_FS
-
-#include "bcachefs.h"
-#include "acl.h"
-#include "bkey_buf.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "chardev.h"
-#include "dirent.h"
-#include "errcode.h"
-#include "extents.h"
-#include "fs.h"
-#include "fs-common.h"
-#include "fs-io.h"
-#include "fs-ioctl.h"
-#include "fs-io-buffered.h"
-#include "fs-io-direct.h"
-#include "fs-io-pagecache.h"
-#include "fsck.h"
-#include "inode.h"
-#include "io_read.h"
-#include "journal.h"
-#include "keylist.h"
-#include "quota.h"
-#include "snapshot.h"
-#include "super.h"
-#include "xattr.h"
-#include "trace.h"
-
-#include <linux/aio.h>
-#include <linux/backing-dev.h>
-#include <linux/exportfs.h>
-#include <linux/fiemap.h>
-#include <linux/fs_context.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <linux/posix_acl.h>
-#include <linux/random.h>
-#include <linux/seq_file.h>
-#include <linux/statfs.h>
-#include <linux/string.h>
-#include <linux/xattr.h>
-
-static struct kmem_cache *bch2_inode_cache;
-
-static void bch2_vfs_inode_init(struct btree_trans *, subvol_inum,
- struct bch_inode_info *,
- struct bch_inode_unpacked *,
- struct bch_subvolume *);
-
-void bch2_inode_update_after_write(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- unsigned fields)
-{
- struct bch_fs *c = trans->c;
-
- BUG_ON(bi->bi_inum != inode->v.i_ino);
-
- bch2_assert_pos_locked(trans, BTREE_ID_inodes, POS(0, bi->bi_inum));
-
- set_nlink(&inode->v, bch2_inode_nlink_get(bi));
- i_uid_write(&inode->v, bi->bi_uid);
- i_gid_write(&inode->v, bi->bi_gid);
- inode->v.i_mode = bi->bi_mode;
-
- if (fields & ATTR_ATIME)
- inode_set_atime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_atime));
- if (fields & ATTR_MTIME)
- inode_set_mtime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_mtime));
- if (fields & ATTR_CTIME)
- inode_set_ctime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_ctime));
-
- inode->ei_inode = *bi;
-
- bch2_inode_flags_to_vfs(inode);
-}
-
-int __must_check bch2_write_inode(struct bch_fs *c,
- struct bch_inode_info *inode,
- inode_set_fn set,
- void *p, unsigned fields)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
- struct bch_inode_unpacked inode_u;
- int ret;
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode),
- BTREE_ITER_intent) ?:
- (set ? set(trans, inode, &inode_u, p) : 0) ?:
- bch2_inode_write(trans, &iter, &inode_u) ?:
- bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
-
- /*
- * the btree node lock protects inode->ei_inode, not ei_update_lock;
- * this is important for inode updates via bchfs_write_index_update
- */
- if (!ret)
- bch2_inode_update_after_write(trans, inode, &inode_u, fields);
-
- bch2_trans_iter_exit(trans, &iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_fs_fatal_err_on(bch2_err_matches(ret, ENOENT), c,
- "%s: inode %llu:%llu not found when updating",
- bch2_err_str(ret),
- inode_inum(inode).subvol,
- inode_inum(inode).inum);
-
- bch2_trans_put(trans);
- return ret < 0 ? ret : 0;
-}
-
-int bch2_fs_quota_transfer(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct bch_qid new_qid,
- unsigned qtypes,
- enum quota_acct_mode mode)
-{
- unsigned i;
- int ret;
-
- qtypes &= enabled_qtypes(c);
-
- for (i = 0; i < QTYP_NR; i++)
- if (new_qid.q[i] == inode->ei_qid.q[i])
- qtypes &= ~(1U << i);
-
- if (!qtypes)
- return 0;
-
- mutex_lock(&inode->ei_quota_lock);
-
- ret = bch2_quota_transfer(c, qtypes, new_qid,
- inode->ei_qid,
- inode->v.i_blocks +
- inode->ei_quota_reserved,
- mode);
- if (!ret)
- for (i = 0; i < QTYP_NR; i++)
- if (qtypes & (1 << i))
- inode->ei_qid.q[i] = new_qid.q[i];
-
- mutex_unlock(&inode->ei_quota_lock);
-
- return ret;
-}
-
-static bool subvol_inum_eq(subvol_inum a, subvol_inum b)
-{
- return a.subvol == b.subvol && a.inum == b.inum;
-}
-
-static u32 bch2_vfs_inode_hash_fn(const void *data, u32 len, u32 seed)
-{
- const subvol_inum *inum = data;
-
- return jhash(&inum->inum, sizeof(inum->inum), seed);
-}
-
-static u32 bch2_vfs_inode_obj_hash_fn(const void *data, u32 len, u32 seed)
-{
- const struct bch_inode_info *inode = data;
-
- return bch2_vfs_inode_hash_fn(&inode->ei_inum, sizeof(inode->ei_inum), seed);
-}
-
-static int bch2_vfs_inode_cmp_fn(struct rhashtable_compare_arg *arg,
- const void *obj)
-{
- const struct bch_inode_info *inode = obj;
- const subvol_inum *v = arg->key;
-
- return !subvol_inum_eq(inode->ei_inum, *v);
-}
-
-static const struct rhashtable_params bch2_vfs_inodes_params = {
- .head_offset = offsetof(struct bch_inode_info, hash),
- .key_offset = offsetof(struct bch_inode_info, ei_inum),
- .key_len = sizeof(subvol_inum),
- .hashfn = bch2_vfs_inode_hash_fn,
- .obj_hashfn = bch2_vfs_inode_obj_hash_fn,
- .obj_cmpfn = bch2_vfs_inode_cmp_fn,
- .automatic_shrinking = true,
-};
-
-int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p)
-{
- struct bch_fs *c = trans->c;
- struct rhashtable *ht = &c->vfs_inodes_table;
- subvol_inum inum = (subvol_inum) { .inum = p.offset };
- DARRAY(u32) subvols;
- int ret = 0;
-
- if (!test_bit(BCH_FS_started, &c->flags))
- return false;
-
- darray_init(&subvols);
-restart_from_top:
-
- /*
- * Tweaked version of __rhashtable_lookup(); we need to get a list of
- * subvolumes in which the given inode number is open.
- *
- * For this to work, we don't include the subvolume ID in the key that
- * we hash - all inodes with the same inode number regardless of
- * subvolume will hash to the same slot.
- *
- * This will be less than ideal if the same file is ever open
- * simultaneously in many different snapshots:
- */
- rcu_read_lock();
- struct rhash_lock_head __rcu *const *bkt;
- struct rhash_head *he;
- unsigned int hash;
- struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-restart:
- hash = rht_key_hashfn(ht, tbl, &inum, bch2_vfs_inodes_params);
- bkt = rht_bucket(tbl, hash);
- do {
- struct bch_inode_info *inode;
-
- rht_for_each_entry_rcu_from(inode, he, rht_ptr_rcu(bkt), tbl, hash, hash) {
- if (inode->ei_inum.inum == inum.inum) {
- ret = darray_push_gfp(&subvols, inode->ei_inum.subvol,
- GFP_NOWAIT|__GFP_NOWARN);
- if (ret) {
- rcu_read_unlock();
- ret = darray_make_room(&subvols, 1);
- if (ret)
- goto err;
- subvols.nr = 0;
- goto restart_from_top;
- }
- }
- }
- /* An object might have been moved to a different hash chain,
- * while we walk along it - better check and retry.
- */
- } while (he != RHT_NULLS_MARKER(bkt));
-
- /* Ensure we see any new tables. */
- smp_rmb();
-
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(tbl))
- goto restart;
- rcu_read_unlock();
-
- darray_for_each(subvols, i) {
- u32 snap;
- ret = bch2_subvolume_get_snapshot(trans, *i, &snap);
- if (ret)
- goto err;
-
- ret = bch2_snapshot_is_ancestor(c, snap, p.snapshot);
- if (ret)
- break;
- }
-err:
- darray_exit(&subvols);
- return ret;
-}
-
-static struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum)
-{
- return rhashtable_lookup_fast(&c->vfs_inodes_table, &inum, bch2_vfs_inodes_params);
-}
-
-static void __wait_on_freeing_inode(struct bch_fs *c,
- struct bch_inode_info *inode,
- subvol_inum inum)
-{
- wait_queue_head_t *wq;
- struct wait_bit_queue_entry wait;
-
- wq = inode_bit_waitqueue(&wait, &inode->v, __I_NEW);
- prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
- spin_unlock(&inode->v.i_lock);
-
- if (__bch2_inode_hash_find(c, inum) == inode)
- schedule_timeout(HZ * 10);
- finish_wait(wq, &wait.wq_entry);
-}
-
-static struct bch_inode_info *bch2_inode_hash_find(struct bch_fs *c, struct btree_trans *trans,
- subvol_inum inum)
-{
- struct bch_inode_info *inode;
-repeat:
- inode = __bch2_inode_hash_find(c, inum);
- if (inode) {
- spin_lock(&inode->v.i_lock);
- if (!test_bit(EI_INODE_HASHED, &inode->ei_flags)) {
- spin_unlock(&inode->v.i_lock);
- return NULL;
- }
- if ((inode->v.i_state & (I_FREEING|I_WILL_FREE))) {
- if (!trans) {
- __wait_on_freeing_inode(c, inode, inum);
- } else {
- bch2_trans_unlock(trans);
- __wait_on_freeing_inode(c, inode, inum);
- int ret = bch2_trans_relock(trans);
- if (ret)
- return ERR_PTR(ret);
- }
- goto repeat;
- }
- __iget(&inode->v);
- spin_unlock(&inode->v.i_lock);
- }
-
- return inode;
-}
-
-static void bch2_inode_hash_remove(struct bch_fs *c, struct bch_inode_info *inode)
-{
- spin_lock(&inode->v.i_lock);
- bool remove = test_and_clear_bit(EI_INODE_HASHED, &inode->ei_flags);
- spin_unlock(&inode->v.i_lock);
-
- if (remove) {
- int ret = rhashtable_remove_fast(&c->vfs_inodes_table,
- &inode->hash, bch2_vfs_inodes_params);
- BUG_ON(ret);
- inode->v.i_hash.pprev = NULL;
- /*
- * This pairs with the bch2_inode_hash_find() ->
- * __wait_on_freeing_inode() path
- */
- inode_wake_up_bit(&inode->v, __I_NEW);
- }
-}
-
-static struct bch_inode_info *bch2_inode_hash_insert(struct bch_fs *c,
- struct btree_trans *trans,
- struct bch_inode_info *inode)
-{
- struct bch_inode_info *old = inode;
-
- set_bit(EI_INODE_HASHED, &inode->ei_flags);
-retry:
- if (unlikely(rhashtable_lookup_insert_key(&c->vfs_inodes_table,
- &inode->ei_inum,
- &inode->hash,
- bch2_vfs_inodes_params))) {
- old = bch2_inode_hash_find(c, trans, inode->ei_inum);
- if (!old)
- goto retry;
-
- clear_bit(EI_INODE_HASHED, &inode->ei_flags);
-
- /*
- * bcachefs doesn't use I_NEW; we have no use for it since we
- * only insert fully created inodes in the inode hash table. But
- * discard_new_inode() expects it to be set...
- */
- inode->v.i_state |= I_NEW;
- /*
- * We don't want bch2_evict_inode() to delete the inode on disk,
- * we just raced and had another inode in cache. Normally new
- * inodes don't have nlink == 0 - except tmpfiles do...
- */
- set_nlink(&inode->v, 1);
- discard_new_inode(&inode->v);
- return old;
- } else {
- inode_fake_hash(&inode->v);
-
- inode_sb_list_add(&inode->v);
-
- mutex_lock(&c->vfs_inodes_lock);
- list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list);
- mutex_unlock(&c->vfs_inodes_lock);
- return inode;
- }
-}
-
-#define memalloc_flags_do(_flags, _do) \
-({ \
- unsigned _saved_flags = memalloc_flags_save(_flags); \
- typeof(_do) _ret = _do; \
- memalloc_noreclaim_restore(_saved_flags); \
- _ret; \
-})
-
-static struct inode *bch2_alloc_inode(struct super_block *sb)
-{
- BUG();
-}
-
-static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c, gfp_t gfp)
-{
- struct bch_inode_info *inode = alloc_inode_sb(c->vfs_sb,
- bch2_inode_cache, gfp);
- if (!inode)
- return NULL;
-
- inode_init_once(&inode->v);
- mutex_init(&inode->ei_update_lock);
- two_state_lock_init(&inode->ei_pagecache_lock);
- INIT_LIST_HEAD(&inode->ei_vfs_inode_list);
- inode->ei_flags = 0;
- mutex_init(&inode->ei_quota_lock);
- memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
-
- if (unlikely(inode_init_always_gfp(c->vfs_sb, &inode->v, gfp))) {
- kmem_cache_free(bch2_inode_cache, inode);
- return NULL;
- }
-
- return inode;
-}
-
-/*
- * Allocate a new inode, dropping/retaking btree locks if necessary:
- */
-static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans)
-{
- struct bch_inode_info *inode = __bch2_new_inode(trans->c, GFP_NOWAIT);
-
- if (unlikely(!inode)) {
- int ret = drop_locks_do(trans, (inode = __bch2_new_inode(trans->c, GFP_NOFS)) ? 0 : -ENOMEM);
- if (ret && inode) {
- __destroy_inode(&inode->v);
- kmem_cache_free(bch2_inode_cache, inode);
- }
- if (ret)
- return ERR_PTR(ret);
- }
-
- return inode;
-}
-
-static struct bch_inode_info *bch2_inode_hash_init_insert(struct btree_trans *trans,
- subvol_inum inum,
- struct bch_inode_unpacked *bi,
- struct bch_subvolume *subvol)
-{
- struct bch_inode_info *inode = bch2_new_inode(trans);
- if (IS_ERR(inode))
- return inode;
-
- bch2_vfs_inode_init(trans, inum, inode, bi, subvol);
-
- return bch2_inode_hash_insert(trans->c, trans, inode);
-
-}
-
-struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum)
-{
- struct bch_inode_info *inode = bch2_inode_hash_find(c, NULL, inum);
- if (inode)
- return &inode->v;
-
- struct btree_trans *trans = bch2_trans_get(c);
-
- struct bch_inode_unpacked inode_u;
- struct bch_subvolume subvol;
- int ret = lockrestart_do(trans,
- bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
- bch2_inode_find_by_inum_trans(trans, inum, &inode_u)) ?:
- PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol));
- bch2_trans_put(trans);
-
- return ret ? ERR_PTR(ret) : &inode->v;
-}
-
-struct bch_inode_info *
-__bch2_create(struct mnt_idmap *idmap,
- struct bch_inode_info *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev, subvol_inum snapshot_src,
- unsigned flags)
-{
- struct bch_fs *c = dir->v.i_sb->s_fs_info;
- struct btree_trans *trans;
- struct bch_inode_unpacked dir_u;
- struct bch_inode_info *inode;
- struct bch_inode_unpacked inode_u;
- struct posix_acl *default_acl = NULL, *acl = NULL;
- subvol_inum inum;
- struct bch_subvolume subvol;
- u64 journal_seq = 0;
- kuid_t kuid;
- kgid_t kgid;
- int ret;
-
- /*
- * preallocate acls + vfs inode before btree transaction, so that
- * nothing can fail after the transaction succeeds:
- */
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
- ret = posix_acl_create(&dir->v, &mode, &default_acl, &acl);
- if (ret)
- return ERR_PTR(ret);
-#endif
- inode = __bch2_new_inode(c, GFP_NOFS);
- if (unlikely(!inode)) {
- inode = ERR_PTR(-ENOMEM);
- goto err;
- }
-
- bch2_inode_init_early(c, &inode_u);
-
- if (!(flags & BCH_CREATE_TMPFILE))
- mutex_lock(&dir->ei_update_lock);
-
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- kuid = mapped_fsuid(idmap, i_user_ns(&dir->v));
- kgid = mapped_fsgid(idmap, i_user_ns(&dir->v));
- ret = bch2_subvol_is_ro_trans(trans, dir->ei_inum.subvol) ?:
- bch2_create_trans(trans,
- inode_inum(dir), &dir_u, &inode_u,
- !(flags & BCH_CREATE_TMPFILE)
- ? &dentry->d_name : NULL,
- from_kuid(i_user_ns(&dir->v), kuid),
- from_kgid(i_user_ns(&dir->v), kgid),
- mode, rdev,
- default_acl, acl, snapshot_src, flags) ?:
- bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1,
- KEY_TYPE_QUOTA_PREALLOC);
- if (unlikely(ret))
- goto err_before_quota;
-
- inum.subvol = inode_u.bi_subvol ?: dir->ei_inum.subvol;
- inum.inum = inode_u.bi_inum;
-
- ret = bch2_subvolume_get(trans, inum.subvol, true,
- BTREE_ITER_with_updates, &subvol) ?:
- bch2_trans_commit(trans, NULL, &journal_seq, 0);
- if (unlikely(ret)) {
- bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
- KEY_TYPE_QUOTA_WARN);
-err_before_quota:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
- goto err_trans;
- }
-
- if (!(flags & BCH_CREATE_TMPFILE)) {
- bch2_inode_update_after_write(trans, dir, &dir_u,
- ATTR_MTIME|ATTR_CTIME);
- mutex_unlock(&dir->ei_update_lock);
- }
-
- bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
-
- set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
- set_cached_acl(&inode->v, ACL_TYPE_DEFAULT, default_acl);
-
- /*
- * we must insert the new inode into the inode cache before calling
- * bch2_trans_exit() and dropping locks, else we could race with another
- * thread pulling the inode in and modifying it:
- *
- * also, calling bch2_inode_hash_insert() without passing in the
- * transaction object is sketchy - if we could ever end up in
- * __wait_on_freeing_inode(), we'd risk deadlock.
- *
- * But that shouldn't be possible, since we still have the inode locked
- * that we just created, and we _really_ can't take a transaction
- * restart here.
- */
- inode = bch2_inode_hash_insert(c, NULL, inode);
- bch2_trans_put(trans);
-err:
- posix_acl_release(default_acl);
- posix_acl_release(acl);
- return inode;
-err_trans:
- if (!(flags & BCH_CREATE_TMPFILE))
- mutex_unlock(&dir->ei_update_lock);
-
- bch2_trans_put(trans);
- make_bad_inode(&inode->v);
- iput(&inode->v);
- inode = ERR_PTR(ret);
- goto err;
-}
-
-/* methods */
-
-static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
- subvol_inum dir, struct bch_hash_info *dir_hash_info,
- const struct qstr *name)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter dirent_iter = {};
- subvol_inum inum = {};
- struct printbuf buf = PRINTBUF;
-
- struct bkey_s_c k = bch2_hash_lookup(trans, &dirent_iter, bch2_dirent_hash_desc,
- dir_hash_info, dir, name, 0);
- int ret = bkey_err(k);
- if (ret)
- return ERR_PTR(ret);
-
- ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), &inum);
- if (ret > 0)
- ret = -ENOENT;
- if (ret)
- goto err;
-
- struct bch_inode_info *inode = bch2_inode_hash_find(c, trans, inum);
- if (inode)
- goto out;
-
- struct bch_subvolume subvol;
- struct bch_inode_unpacked inode_u;
- ret = bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
- bch2_inode_find_by_inum_nowarn_trans(trans, inum, &inode_u) ?:
- PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol));
-
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
- c, "dirent to missing inode:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (ret)
- goto err;
-
- /* regular files may have hardlinks: */
- if (bch2_fs_inconsistent_on(bch2_inode_should_have_bp(&inode_u) &&
- !bkey_eq(k.k->p, POS(inode_u.bi_dir, inode_u.bi_dir_offset)),
- c,
- "dirent points to inode that does not point back:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k),
- prt_printf(&buf, "\n "),
- bch2_inode_unpacked_to_text(&buf, &inode_u),
- buf.buf))) {
- ret = -ENOENT;
- goto err;
- }
-out:
- bch2_trans_iter_exit(trans, &dirent_iter);
- printbuf_exit(&buf);
- return inode;
-err:
- inode = ERR_PTR(ret);
- goto out;
-}
-
-static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry,
- unsigned int flags)
-{
- struct bch_fs *c = vdir->i_sb->s_fs_info;
- struct bch_inode_info *dir = to_bch_ei(vdir);
- struct bch_hash_info hash = bch2_hash_info_init(c, &dir->ei_inode);
-
- struct bch_inode_info *inode;
- bch2_trans_do(c,
- PTR_ERR_OR_ZERO(inode = bch2_lookup_trans(trans, inode_inum(dir),
- &hash, &dentry->d_name)));
- if (IS_ERR(inode))
- inode = NULL;
-
- return d_splice_alias(&inode->v, dentry);
-}
-
-static int bch2_mknod(struct mnt_idmap *idmap,
- struct inode *vdir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
-{
- struct bch_inode_info *inode =
- __bch2_create(idmap, to_bch_ei(vdir), dentry, mode, rdev,
- (subvol_inum) { 0 }, 0);
-
- if (IS_ERR(inode))
- return bch2_err_class(PTR_ERR(inode));
-
- d_instantiate(dentry, &inode->v);
- return 0;
-}
-
-static int bch2_create(struct mnt_idmap *idmap,
- struct inode *vdir, struct dentry *dentry,
- umode_t mode, bool excl)
-{
- return bch2_mknod(idmap, vdir, dentry, mode|S_IFREG, 0);
-}
-
-static int __bch2_link(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct bch_inode_info *dir,
- struct dentry *dentry)
-{
- struct bch_inode_unpacked dir_u, inode_u;
- int ret;
-
- mutex_lock(&inode->ei_update_lock);
- struct btree_trans *trans = bch2_trans_get(c);
-
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_link_trans(trans,
- inode_inum(dir), &dir_u,
- inode_inum(inode), &inode_u,
- &dentry->d_name));
-
- if (likely(!ret)) {
- bch2_inode_update_after_write(trans, dir, &dir_u,
- ATTR_MTIME|ATTR_CTIME);
- bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME);
- }
-
- bch2_trans_put(trans);
- mutex_unlock(&inode->ei_update_lock);
- return ret;
-}
-
-static int bch2_link(struct dentry *old_dentry, struct inode *vdir,
- struct dentry *dentry)
-{
- struct bch_fs *c = vdir->i_sb->s_fs_info;
- struct bch_inode_info *dir = to_bch_ei(vdir);
- struct bch_inode_info *inode = to_bch_ei(old_dentry->d_inode);
- int ret;
-
- lockdep_assert_held(&inode->v.i_rwsem);
-
- ret = bch2_subvol_is_ro(c, dir->ei_inum.subvol) ?:
- bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
- __bch2_link(c, inode, dir, dentry);
- if (unlikely(ret))
- return bch2_err_class(ret);
-
- ihold(&inode->v);
- d_instantiate(dentry, &inode->v);
- return 0;
-}
-
-int __bch2_unlink(struct inode *vdir, struct dentry *dentry,
- bool deleting_snapshot)
-{
- struct bch_fs *c = vdir->i_sb->s_fs_info;
- struct bch_inode_info *dir = to_bch_ei(vdir);
- struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
- struct bch_inode_unpacked dir_u, inode_u;
- int ret;
-
- bch2_lock_inodes(INODE_UPDATE_LOCK, dir, inode);
-
- struct btree_trans *trans = bch2_trans_get(c);
-
- ret = commit_do(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
- bch2_unlink_trans(trans,
- inode_inum(dir), &dir_u,
- &inode_u, &dentry->d_name,
- deleting_snapshot));
- if (unlikely(ret))
- goto err;
-
- bch2_inode_update_after_write(trans, dir, &dir_u,
- ATTR_MTIME|ATTR_CTIME);
- bch2_inode_update_after_write(trans, inode, &inode_u,
- ATTR_MTIME);
-
- if (inode_u.bi_subvol) {
- /*
- * Subvolume deletion is asynchronous, but we still want to tell
- * the VFS that it's been deleted here:
- */
- set_nlink(&inode->v, 0);
- }
-err:
- bch2_trans_put(trans);
- bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode);
-
- return ret;
-}
-
-static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
-{
- struct bch_inode_info *dir= to_bch_ei(vdir);
- struct bch_fs *c = dir->v.i_sb->s_fs_info;
-
- int ret = bch2_subvol_is_ro(c, dir->ei_inum.subvol) ?:
- __bch2_unlink(vdir, dentry, false);
- return bch2_err_class(ret);
-}
-
-static int bch2_symlink(struct mnt_idmap *idmap,
- struct inode *vdir, struct dentry *dentry,
- const char *symname)
-{
- struct bch_fs *c = vdir->i_sb->s_fs_info;
- struct bch_inode_info *dir = to_bch_ei(vdir), *inode;
- int ret;
-
- inode = __bch2_create(idmap, dir, dentry, S_IFLNK|S_IRWXUGO, 0,
- (subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
- if (IS_ERR(inode))
- return bch2_err_class(PTR_ERR(inode));
-
- inode_lock(&inode->v);
- ret = page_symlink(&inode->v, symname, strlen(symname) + 1);
- inode_unlock(&inode->v);
-
- if (unlikely(ret))
- goto err;
-
- ret = filemap_write_and_wait_range(inode->v.i_mapping, 0, LLONG_MAX);
- if (unlikely(ret))
- goto err;
-
- ret = __bch2_link(c, inode, dir, dentry);
- if (unlikely(ret))
- goto err;
-
- d_instantiate(dentry, &inode->v);
- return 0;
-err:
- iput(&inode->v);
- return bch2_err_class(ret);
-}
-
-static int bch2_mkdir(struct mnt_idmap *idmap,
- struct inode *vdir, struct dentry *dentry, umode_t mode)
-{
- return bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0);
-}
-
-static int bch2_rename2(struct mnt_idmap *idmap,
- struct inode *src_vdir, struct dentry *src_dentry,
- struct inode *dst_vdir, struct dentry *dst_dentry,
- unsigned flags)
-{
- struct bch_fs *c = src_vdir->i_sb->s_fs_info;
- struct bch_inode_info *src_dir = to_bch_ei(src_vdir);
- struct bch_inode_info *dst_dir = to_bch_ei(dst_vdir);
- struct bch_inode_info *src_inode = to_bch_ei(src_dentry->d_inode);
- struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode);
- struct bch_inode_unpacked dst_dir_u, src_dir_u;
- struct bch_inode_unpacked src_inode_u, dst_inode_u, *whiteout_inode_u;
- struct btree_trans *trans;
- enum bch_rename_mode mode = flags & RENAME_EXCHANGE
- ? BCH_RENAME_EXCHANGE
- : dst_dentry->d_inode
- ? BCH_RENAME_OVERWRITE : BCH_RENAME;
- bool whiteout = !!(flags & RENAME_WHITEOUT);
- int ret;
-
- if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE|RENAME_WHITEOUT))
- return -EINVAL;
-
- if (mode == BCH_RENAME_OVERWRITE) {
- ret = filemap_write_and_wait_range(src_inode->v.i_mapping,
- 0, LLONG_MAX);
- if (ret)
- return ret;
- }
-
- bch2_lock_inodes(INODE_UPDATE_LOCK,
- src_dir,
- dst_dir,
- src_inode,
- dst_inode);
-
- trans = bch2_trans_get(c);
-
- ret = bch2_subvol_is_ro_trans(trans, src_dir->ei_inum.subvol) ?:
- bch2_subvol_is_ro_trans(trans, dst_dir->ei_inum.subvol);
- if (ret)
- goto err_tx_restart;
-
- if (inode_attr_changing(dst_dir, src_inode, Inode_opt_project)) {
- ret = bch2_fs_quota_transfer(c, src_inode,
- dst_dir->ei_qid,
- 1 << QTYP_PRJ,
- KEY_TYPE_QUOTA_PREALLOC);
- if (ret)
- goto err;
- }
-
- if (mode == BCH_RENAME_EXCHANGE &&
- inode_attr_changing(src_dir, dst_inode, Inode_opt_project)) {
- ret = bch2_fs_quota_transfer(c, dst_inode,
- src_dir->ei_qid,
- 1 << QTYP_PRJ,
- KEY_TYPE_QUOTA_PREALLOC);
- if (ret)
- goto err;
- }
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_rename_trans(trans,
- inode_inum(src_dir), &src_dir_u,
- inode_inum(dst_dir), &dst_dir_u,
- &src_inode_u,
- &dst_inode_u,
- &src_dentry->d_name,
- &dst_dentry->d_name,
- mode);
- if (unlikely(ret))
- goto err_tx_restart;
-
- if (whiteout) {
- whiteout_inode_u = bch2_trans_kmalloc_nomemzero(trans, sizeof(*whiteout_inode_u));
- ret = PTR_ERR_OR_ZERO(whiteout_inode_u);
- if (unlikely(ret))
- goto err_tx_restart;
- bch2_inode_init_early(c, whiteout_inode_u);
-
- ret = bch2_create_trans(trans,
- inode_inum(src_dir), &src_dir_u,
- whiteout_inode_u,
- &src_dentry->d_name,
- from_kuid(i_user_ns(&src_dir->v), current_fsuid()),
- from_kgid(i_user_ns(&src_dir->v), current_fsgid()),
- S_IFCHR|WHITEOUT_MODE, 0,
- NULL, NULL, (subvol_inum) { 0 }, 0) ?:
- bch2_quota_acct(c, bch_qid(whiteout_inode_u), Q_INO, 1,
- KEY_TYPE_QUOTA_PREALLOC);
- if (unlikely(ret))
- goto err_tx_restart;
- }
-
- ret = bch2_trans_commit(trans, NULL, NULL, 0);
- if (unlikely(ret)) {
-err_tx_restart:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
- goto err;
- }
-
- BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum);
- BUG_ON(dst_inode &&
- dst_inode->v.i_ino != dst_inode_u.bi_inum);
-
- bch2_inode_update_after_write(trans, src_dir, &src_dir_u,
- ATTR_MTIME|ATTR_CTIME);
-
- if (src_dir != dst_dir)
- bch2_inode_update_after_write(trans, dst_dir, &dst_dir_u,
- ATTR_MTIME|ATTR_CTIME);
-
- bch2_inode_update_after_write(trans, src_inode, &src_inode_u,
- ATTR_CTIME);
-
- if (dst_inode)
- bch2_inode_update_after_write(trans, dst_inode, &dst_inode_u,
- ATTR_CTIME);
-err:
- bch2_trans_put(trans);
-
- bch2_fs_quota_transfer(c, src_inode,
- bch_qid(&src_inode->ei_inode),
- 1 << QTYP_PRJ,
- KEY_TYPE_QUOTA_NOCHECK);
- if (dst_inode)
- bch2_fs_quota_transfer(c, dst_inode,
- bch_qid(&dst_inode->ei_inode),
- 1 << QTYP_PRJ,
- KEY_TYPE_QUOTA_NOCHECK);
-
- bch2_unlock_inodes(INODE_UPDATE_LOCK,
- src_dir,
- dst_dir,
- src_inode,
- dst_inode);
-
- return bch2_err_class(ret);
-}
-
-static void bch2_setattr_copy(struct mnt_idmap *idmap,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- struct iattr *attr)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- unsigned int ia_valid = attr->ia_valid;
- kuid_t kuid;
- kgid_t kgid;
-
- if (ia_valid & ATTR_UID) {
- kuid = from_vfsuid(idmap, i_user_ns(&inode->v), attr->ia_vfsuid);
- bi->bi_uid = from_kuid(i_user_ns(&inode->v), kuid);
- }
- if (ia_valid & ATTR_GID) {
- kgid = from_vfsgid(idmap, i_user_ns(&inode->v), attr->ia_vfsgid);
- bi->bi_gid = from_kgid(i_user_ns(&inode->v), kgid);
- }
-
- if (ia_valid & ATTR_SIZE)
- bi->bi_size = attr->ia_size;
-
- if (ia_valid & ATTR_ATIME)
- bi->bi_atime = timespec_to_bch2_time(c, attr->ia_atime);
- if (ia_valid & ATTR_MTIME)
- bi->bi_mtime = timespec_to_bch2_time(c, attr->ia_mtime);
- if (ia_valid & ATTR_CTIME)
- bi->bi_ctime = timespec_to_bch2_time(c, attr->ia_ctime);
-
- if (ia_valid & ATTR_MODE) {
- umode_t mode = attr->ia_mode;
- kgid_t gid = ia_valid & ATTR_GID
- ? kgid
- : inode->v.i_gid;
-
- if (!in_group_or_capable(idmap, &inode->v,
- make_vfsgid(idmap, i_user_ns(&inode->v), gid)))
- mode &= ~S_ISGID;
- bi->bi_mode = mode;
- }
-}
-
-int bch2_setattr_nonsize(struct mnt_idmap *idmap,
- struct bch_inode_info *inode,
- struct iattr *attr)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_qid qid;
- struct btree_trans *trans;
- struct btree_iter inode_iter = { NULL };
- struct bch_inode_unpacked inode_u;
- struct posix_acl *acl = NULL;
- kuid_t kuid;
- kgid_t kgid;
- int ret;
-
- mutex_lock(&inode->ei_update_lock);
-
- qid = inode->ei_qid;
-
- if (attr->ia_valid & ATTR_UID) {
- kuid = from_vfsuid(idmap, i_user_ns(&inode->v), attr->ia_vfsuid);
- qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), kuid);
- }
-
- if (attr->ia_valid & ATTR_GID) {
- kgid = from_vfsgid(idmap, i_user_ns(&inode->v), attr->ia_vfsgid);
- qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), kgid);
- }
-
- ret = bch2_fs_quota_transfer(c, inode, qid, ~0,
- KEY_TYPE_QUOTA_PREALLOC);
- if (ret)
- goto err;
-
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
- kfree(acl);
- acl = NULL;
-
- ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
- BTREE_ITER_intent);
- if (ret)
- goto btree_err;
-
- bch2_setattr_copy(idmap, inode, &inode_u, attr);
-
- if (attr->ia_valid & ATTR_MODE) {
- ret = bch2_acl_chmod(trans, inode_inum(inode), &inode_u,
- inode_u.bi_mode, &acl);
- if (ret)
- goto btree_err;
- }
-
- ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
-btree_err:
- bch2_trans_iter_exit(trans, &inode_iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
- if (unlikely(ret))
- goto err_trans;
-
- bch2_inode_update_after_write(trans, inode, &inode_u, attr->ia_valid);
-
- if (acl)
- set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
-err_trans:
- bch2_trans_put(trans);
-err:
- mutex_unlock(&inode->ei_update_lock);
-
- return bch2_err_class(ret);
-}
-
-static int bch2_getattr(struct mnt_idmap *idmap,
- const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned query_flags)
-{
- struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry));
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, &inode->v);
- vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, &inode->v);
-
- stat->dev = inode->v.i_sb->s_dev;
- stat->ino = inode->v.i_ino;
- stat->mode = inode->v.i_mode;
- stat->nlink = inode->v.i_nlink;
- stat->uid = vfsuid_into_kuid(vfsuid);
- stat->gid = vfsgid_into_kgid(vfsgid);
- stat->rdev = inode->v.i_rdev;
- stat->size = i_size_read(&inode->v);
- stat->atime = inode_get_atime(&inode->v);
- stat->mtime = inode_get_mtime(&inode->v);
- stat->ctime = inode_get_ctime(&inode->v);
- stat->blksize = block_bytes(c);
- stat->blocks = inode->v.i_blocks;
-
- stat->subvol = inode->ei_inum.subvol;
- stat->result_mask |= STATX_SUBVOL;
-
- if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->v.i_mode)) {
- stat->result_mask |= STATX_DIOALIGN;
- /*
- * this is incorrect; we should be tracking this in superblock,
- * and checking the alignment of open devices
- */
- stat->dio_mem_align = SECTOR_SIZE;
- stat->dio_offset_align = block_bytes(c);
- }
-
- if (request_mask & STATX_BTIME) {
- stat->result_mask |= STATX_BTIME;
- stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime);
- }
-
- if (inode->ei_inode.bi_flags & BCH_INODE_immutable)
- stat->attributes |= STATX_ATTR_IMMUTABLE;
- stat->attributes_mask |= STATX_ATTR_IMMUTABLE;
-
- if (inode->ei_inode.bi_flags & BCH_INODE_append)
- stat->attributes |= STATX_ATTR_APPEND;
- stat->attributes_mask |= STATX_ATTR_APPEND;
-
- if (inode->ei_inode.bi_flags & BCH_INODE_nodump)
- stat->attributes |= STATX_ATTR_NODUMP;
- stat->attributes_mask |= STATX_ATTR_NODUMP;
-
- return 0;
-}
-
-static int bch2_setattr(struct mnt_idmap *idmap,
- struct dentry *dentry, struct iattr *iattr)
-{
- struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- int ret;
-
- lockdep_assert_held(&inode->v.i_rwsem);
-
- ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
- setattr_prepare(idmap, dentry, iattr);
- if (ret)
- return ret;
-
- return iattr->ia_valid & ATTR_SIZE
- ? bchfs_truncate(idmap, inode, iattr)
- : bch2_setattr_nonsize(idmap, inode, iattr);
-}
-
-static int bch2_tmpfile(struct mnt_idmap *idmap,
- struct inode *vdir, struct file *file, umode_t mode)
-{
- struct bch_inode_info *inode =
- __bch2_create(idmap, to_bch_ei(vdir),
- file->f_path.dentry, mode, 0,
- (subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
-
- if (IS_ERR(inode))
- return bch2_err_class(PTR_ERR(inode));
-
- d_mark_tmpfile(file, &inode->v);
- d_instantiate(file->f_path.dentry, &inode->v);
- return finish_open_simple(file, 0);
-}
-
-static int bch2_fill_extent(struct bch_fs *c,
- struct fiemap_extent_info *info,
- struct bkey_s_c k, unsigned flags)
-{
- if (bkey_extent_is_direct_data(k.k)) {
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- int ret;
-
- if (k.k->type == KEY_TYPE_reflink_v)
- flags |= FIEMAP_EXTENT_SHARED;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- int flags2 = 0;
- u64 offset = p.ptr.offset;
-
- if (p.ptr.unwritten)
- flags2 |= FIEMAP_EXTENT_UNWRITTEN;
-
- if (p.crc.compression_type)
- flags2 |= FIEMAP_EXTENT_ENCODED;
- else
- offset += p.crc.offset;
-
- if ((offset & (block_sectors(c) - 1)) ||
- (k.k->size & (block_sectors(c) - 1)))
- flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
-
- ret = fiemap_fill_next_extent(info,
- bkey_start_offset(k.k) << 9,
- offset << 9,
- k.k->size << 9, flags|flags2);
- if (ret)
- return ret;
- }
-
- return 0;
- } else if (bkey_extent_is_inline_data(k.k)) {
- return fiemap_fill_next_extent(info,
- bkey_start_offset(k.k) << 9,
- 0, k.k->size << 9,
- flags|
- FIEMAP_EXTENT_DATA_INLINE);
- } else if (k.k->type == KEY_TYPE_reservation) {
- return fiemap_fill_next_extent(info,
- bkey_start_offset(k.k) << 9,
- 0, k.k->size << 9,
- flags|
- FIEMAP_EXTENT_DELALLOC|
- FIEMAP_EXTENT_UNWRITTEN);
- } else {
- BUG();
- }
-}
-
-static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
- u64 start, u64 len)
-{
- struct bch_fs *c = vinode->i_sb->s_fs_info;
- struct bch_inode_info *ei = to_bch_ei(vinode);
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey_buf cur, prev;
- unsigned offset_into_extent, sectors;
- bool have_extent = false;
- int ret = 0;
-
- ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC);
- if (ret)
- return ret;
-
- struct bpos end = POS(ei->v.i_ino, (start + len) >> 9);
- if (start + len < start)
- return -EINVAL;
-
- start >>= 9;
-
- bch2_bkey_buf_init(&cur);
- bch2_bkey_buf_init(&prev);
- trans = bch2_trans_get(c);
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- POS(ei->v.i_ino, start), 0);
-
- while (!ret || bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
- enum btree_id data_btree = BTREE_ID_extents;
-
- bch2_trans_begin(trans);
-
- u32 snapshot;
- ret = bch2_subvolume_get_snapshot(trans, ei->ei_inum.subvol, &snapshot);
- if (ret)
- continue;
-
- bch2_btree_iter_set_snapshot(&iter, snapshot);
-
- k = bch2_btree_iter_peek_upto(&iter, end);
- ret = bkey_err(k);
- if (ret)
- continue;
-
- if (!k.k)
- break;
-
- if (!bkey_extent_is_data(k.k) &&
- k.k->type != KEY_TYPE_reservation) {
- bch2_btree_iter_advance(&iter);
- continue;
- }
-
- offset_into_extent = iter.pos.offset -
- bkey_start_offset(k.k);
- sectors = k.k->size - offset_into_extent;
-
- bch2_bkey_buf_reassemble(&cur, c, k);
-
- ret = bch2_read_indirect_extent(trans, &data_btree,
- &offset_into_extent, &cur);
- if (ret)
- continue;
-
- k = bkey_i_to_s_c(cur.k);
- bch2_bkey_buf_realloc(&prev, c, k.k->u64s);
-
- sectors = min(sectors, k.k->size - offset_into_extent);
-
- bch2_cut_front(POS(k.k->p.inode,
- bkey_start_offset(k.k) +
- offset_into_extent),
- cur.k);
- bch2_key_resize(&cur.k->k, sectors);
- cur.k->k.p = iter.pos;
- cur.k->k.p.offset += cur.k->k.size;
-
- if (have_extent) {
- bch2_trans_unlock(trans);
- ret = bch2_fill_extent(c, info,
- bkey_i_to_s_c(prev.k), 0);
- if (ret)
- break;
- }
-
- bkey_copy(prev.k, cur.k);
- have_extent = true;
-
- bch2_btree_iter_set_pos(&iter,
- POS(iter.pos.inode, iter.pos.offset + sectors));
- }
- bch2_trans_iter_exit(trans, &iter);
-
- if (!ret && have_extent) {
- bch2_trans_unlock(trans);
- ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k),
- FIEMAP_EXTENT_LAST);
- }
-
- bch2_trans_put(trans);
- bch2_bkey_buf_exit(&cur, c);
- bch2_bkey_buf_exit(&prev, c);
- return ret < 0 ? ret : 0;
-}
-
-static const struct vm_operations_struct bch_vm_ops = {
- .fault = bch2_page_fault,
- .map_pages = filemap_map_pages,
- .page_mkwrite = bch2_page_mkwrite,
-};
-
-static int bch2_mmap(struct file *file, struct vm_area_struct *vma)
-{
- file_accessed(file);
-
- vma->vm_ops = &bch_vm_ops;
- return 0;
-}
-
-/* Directories: */
-
-static loff_t bch2_dir_llseek(struct file *file, loff_t offset, int whence)
-{
- return generic_file_llseek_size(file, offset, whence,
- S64_MAX, S64_MAX);
-}
-
-static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
-{
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
- if (!dir_emit_dots(file, ctx))
- return 0;
-
- int ret = bch2_readdir(c, inode_inum(inode), ctx);
-
- bch_err_fn(c, ret);
- return bch2_err_class(ret);
-}
-
-static int bch2_open(struct inode *vinode, struct file *file)
-{
- if (file->f_flags & (O_WRONLY|O_RDWR)) {
- struct bch_inode_info *inode = to_bch_ei(vinode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
- int ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol);
- if (ret)
- return ret;
- }
-
- file->f_mode |= FMODE_CAN_ODIRECT;
-
- return generic_file_open(vinode, file);
-}
-
-static const struct file_operations bch_file_operations = {
- .open = bch2_open,
- .llseek = bch2_llseek,
- .read_iter = bch2_read_iter,
- .write_iter = bch2_write_iter,
- .mmap = bch2_mmap,
- .get_unmapped_area = thp_get_unmapped_area,
- .fsync = bch2_fsync,
- .splice_read = filemap_splice_read,
- .splice_write = iter_file_splice_write,
- .fallocate = bch2_fallocate_dispatch,
- .unlocked_ioctl = bch2_fs_file_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = bch2_compat_fs_ioctl,
-#endif
- .remap_file_range = bch2_remap_file_range,
-};
-
-static const struct inode_operations bch_file_inode_operations = {
- .getattr = bch2_getattr,
- .setattr = bch2_setattr,
- .fiemap = bch2_fiemap,
- .listxattr = bch2_xattr_list,
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
- .get_inode_acl = bch2_get_acl,
- .set_acl = bch2_set_acl,
-#endif
-};
-
-static const struct inode_operations bch_dir_inode_operations = {
- .lookup = bch2_lookup,
- .create = bch2_create,
- .link = bch2_link,
- .unlink = bch2_unlink,
- .symlink = bch2_symlink,
- .mkdir = bch2_mkdir,
- .rmdir = bch2_unlink,
- .mknod = bch2_mknod,
- .rename = bch2_rename2,
- .getattr = bch2_getattr,
- .setattr = bch2_setattr,
- .tmpfile = bch2_tmpfile,
- .listxattr = bch2_xattr_list,
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
- .get_inode_acl = bch2_get_acl,
- .set_acl = bch2_set_acl,
-#endif
-};
-
-static const struct file_operations bch_dir_file_operations = {
- .llseek = bch2_dir_llseek,
- .read = generic_read_dir,
- .iterate_shared = bch2_vfs_readdir,
- .fsync = bch2_fsync,
- .unlocked_ioctl = bch2_fs_file_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = bch2_compat_fs_ioctl,
-#endif
-};
-
-static const struct inode_operations bch_symlink_inode_operations = {
- .get_link = page_get_link,
- .getattr = bch2_getattr,
- .setattr = bch2_setattr,
- .listxattr = bch2_xattr_list,
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
- .get_inode_acl = bch2_get_acl,
- .set_acl = bch2_set_acl,
-#endif
-};
-
-static const struct inode_operations bch_special_inode_operations = {
- .getattr = bch2_getattr,
- .setattr = bch2_setattr,
- .listxattr = bch2_xattr_list,
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
- .get_inode_acl = bch2_get_acl,
- .set_acl = bch2_set_acl,
-#endif
-};
-
-static const struct address_space_operations bch_address_space_operations = {
- .read_folio = bch2_read_folio,
- .writepages = bch2_writepages,
- .readahead = bch2_readahead,
- .dirty_folio = filemap_dirty_folio,
- .write_begin = bch2_write_begin,
- .write_end = bch2_write_end,
- .invalidate_folio = bch2_invalidate_folio,
- .release_folio = bch2_release_folio,
-#ifdef CONFIG_MIGRATION
- .migrate_folio = filemap_migrate_folio,
-#endif
- .error_remove_folio = generic_error_remove_folio,
-};
-
-struct bcachefs_fid {
- u64 inum;
- u32 subvol;
- u32 gen;
-} __packed;
-
-struct bcachefs_fid_with_parent {
- struct bcachefs_fid fid;
- struct bcachefs_fid dir;
-} __packed;
-
-static int bcachefs_fid_valid(int fh_len, int fh_type)
-{
- switch (fh_type) {
- case FILEID_BCACHEFS_WITHOUT_PARENT:
- return fh_len == sizeof(struct bcachefs_fid) / sizeof(u32);
- case FILEID_BCACHEFS_WITH_PARENT:
- return fh_len == sizeof(struct bcachefs_fid_with_parent) / sizeof(u32);
- default:
- return false;
- }
-}
-
-static struct bcachefs_fid bch2_inode_to_fid(struct bch_inode_info *inode)
-{
- return (struct bcachefs_fid) {
- .inum = inode->ei_inum.inum,
- .subvol = inode->ei_inum.subvol,
- .gen = inode->ei_inode.bi_generation,
- };
-}
-
-static int bch2_encode_fh(struct inode *vinode, u32 *fh, int *len,
- struct inode *vdir)
-{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- struct bch_inode_info *dir = to_bch_ei(vdir);
- int min_len;
-
- if (!S_ISDIR(inode->v.i_mode) && dir) {
- struct bcachefs_fid_with_parent *fid = (void *) fh;
-
- min_len = sizeof(*fid) / sizeof(u32);
- if (*len < min_len) {
- *len = min_len;
- return FILEID_INVALID;
- }
-
- fid->fid = bch2_inode_to_fid(inode);
- fid->dir = bch2_inode_to_fid(dir);
-
- *len = min_len;
- return FILEID_BCACHEFS_WITH_PARENT;
- } else {
- struct bcachefs_fid *fid = (void *) fh;
-
- min_len = sizeof(*fid) / sizeof(u32);
- if (*len < min_len) {
- *len = min_len;
- return FILEID_INVALID;
- }
- *fid = bch2_inode_to_fid(inode);
-
- *len = min_len;
- return FILEID_BCACHEFS_WITHOUT_PARENT;
- }
-}
-
-static struct inode *bch2_nfs_get_inode(struct super_block *sb,
- struct bcachefs_fid fid)
-{
- struct bch_fs *c = sb->s_fs_info;
- struct inode *vinode = bch2_vfs_inode_get(c, (subvol_inum) {
- .subvol = fid.subvol,
- .inum = fid.inum,
- });
- if (!IS_ERR(vinode) && vinode->i_generation != fid.gen) {
- iput(vinode);
- vinode = ERR_PTR(-ESTALE);
- }
- return vinode;
-}
-
-static struct dentry *bch2_fh_to_dentry(struct super_block *sb, struct fid *_fid,
- int fh_len, int fh_type)
-{
- struct bcachefs_fid *fid = (void *) _fid;
-
- if (!bcachefs_fid_valid(fh_len, fh_type))
- return NULL;
-
- return d_obtain_alias(bch2_nfs_get_inode(sb, *fid));
-}
-
-static struct dentry *bch2_fh_to_parent(struct super_block *sb, struct fid *_fid,
- int fh_len, int fh_type)
-{
- struct bcachefs_fid_with_parent *fid = (void *) _fid;
-
- if (!bcachefs_fid_valid(fh_len, fh_type) ||
- fh_type != FILEID_BCACHEFS_WITH_PARENT)
- return NULL;
-
- return d_obtain_alias(bch2_nfs_get_inode(sb, fid->dir));
-}
-
-static struct dentry *bch2_get_parent(struct dentry *child)
-{
- struct bch_inode_info *inode = to_bch_ei(child->d_inode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- subvol_inum parent_inum = {
- .subvol = inode->ei_inode.bi_parent_subvol ?:
- inode->ei_inum.subvol,
- .inum = inode->ei_inode.bi_dir,
- };
-
- return d_obtain_alias(bch2_vfs_inode_get(c, parent_inum));
-}
-
-static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child)
-{
- struct bch_inode_info *inode = to_bch_ei(child->d_inode);
- struct bch_inode_info *dir = to_bch_ei(parent->d_inode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_trans *trans;
- struct btree_iter iter1;
- struct btree_iter iter2;
- struct bkey_s_c k;
- struct bkey_s_c_dirent d;
- struct bch_inode_unpacked inode_u;
- subvol_inum target;
- u32 snapshot;
- struct qstr dirent_name;
- unsigned name_len = 0;
- int ret;
-
- if (!S_ISDIR(dir->v.i_mode))
- return -EINVAL;
-
- trans = bch2_trans_get(c);
-
- bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents,
- POS(dir->ei_inode.bi_inum, 0), 0);
- bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents,
- POS(dir->ei_inode.bi_inum, 0), 0);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, dir->ei_inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- bch2_btree_iter_set_snapshot(&iter1, snapshot);
- bch2_btree_iter_set_snapshot(&iter2, snapshot);
-
- ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u);
- if (ret)
- goto err;
-
- if (inode_u.bi_dir == dir->ei_inode.bi_inum) {
- bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
-
- k = bch2_btree_iter_peek_slot(&iter1);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_dirent) {
- ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
- goto err;
- }
-
- d = bkey_s_c_to_dirent(k);
- ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target);
- if (ret > 0)
- ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
- if (ret)
- goto err;
-
- if (subvol_inum_eq(target, inode->ei_inum))
- goto found;
- } else {
- /*
- * File with multiple hardlinks and our backref is to the wrong
- * directory - linear search:
- */
- for_each_btree_key_continue_norestart(iter2, 0, k, ret) {
- if (k.k->p.inode > dir->ei_inode.bi_inum)
- break;
-
- if (k.k->type != KEY_TYPE_dirent)
- continue;
-
- d = bkey_s_c_to_dirent(k);
- ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target);
- if (ret < 0)
- break;
- if (ret)
- continue;
-
- if (subvol_inum_eq(target, inode->ei_inum))
- goto found;
- }
- }
-
- ret = -ENOENT;
- goto err;
-found:
- dirent_name = bch2_dirent_get_name(d);
-
- name_len = min_t(unsigned, dirent_name.len, NAME_MAX);
- memcpy(name, dirent_name.name, name_len);
- name[name_len] = '\0';
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_iter_exit(trans, &iter1);
- bch2_trans_iter_exit(trans, &iter2);
- bch2_trans_put(trans);
-
- return ret;
-}
-
-static const struct export_operations bch_export_ops = {
- .encode_fh = bch2_encode_fh,
- .fh_to_dentry = bch2_fh_to_dentry,
- .fh_to_parent = bch2_fh_to_parent,
- .get_parent = bch2_get_parent,
- .get_name = bch2_get_name,
-};
-
-static void bch2_vfs_inode_init(struct btree_trans *trans,
- subvol_inum inum,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- struct bch_subvolume *subvol)
-{
- inode->v.i_ino = inum.inum;
- inode->ei_inum = inum;
- inode->ei_inode.bi_inum = inum.inum;
- bch2_inode_update_after_write(trans, inode, bi, ~0);
-
- inode->v.i_blocks = bi->bi_sectors;
- inode->v.i_ino = bi->bi_inum;
- inode->v.i_rdev = bi->bi_dev;
- inode->v.i_generation = bi->bi_generation;
- inode->v.i_size = bi->bi_size;
-
- inode->ei_flags = 0;
- inode->ei_quota_reserved = 0;
- inode->ei_qid = bch_qid(bi);
-
- if (BCH_SUBVOLUME_SNAP(subvol))
- set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
-
- inode->v.i_mapping->a_ops = &bch_address_space_operations;
-
- switch (inode->v.i_mode & S_IFMT) {
- case S_IFREG:
- inode->v.i_op = &bch_file_inode_operations;
- inode->v.i_fop = &bch_file_operations;
- break;
- case S_IFDIR:
- inode->v.i_op = &bch_dir_inode_operations;
- inode->v.i_fop = &bch_dir_file_operations;
- break;
- case S_IFLNK:
- inode_nohighmem(&inode->v);
- inode->v.i_op = &bch_symlink_inode_operations;
- break;
- default:
- init_special_inode(&inode->v, inode->v.i_mode, inode->v.i_rdev);
- inode->v.i_op = &bch_special_inode_operations;
- break;
- }
-
- mapping_set_large_folios(inode->v.i_mapping);
-}
-
-static void bch2_free_inode(struct inode *vinode)
-{
- kmem_cache_free(bch2_inode_cache, to_bch_ei(vinode));
-}
-
-static int inode_update_times_fn(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
- bi->bi_atime = timespec_to_bch2_time(c, inode_get_atime(&inode->v));
- bi->bi_mtime = timespec_to_bch2_time(c, inode_get_mtime(&inode->v));
- bi->bi_ctime = timespec_to_bch2_time(c, inode_get_ctime(&inode->v));
-
- return 0;
-}
-
-static int bch2_vfs_write_inode(struct inode *vinode,
- struct writeback_control *wbc)
-{
- struct bch_fs *c = vinode->i_sb->s_fs_info;
- struct bch_inode_info *inode = to_bch_ei(vinode);
- int ret;
-
- mutex_lock(&inode->ei_update_lock);
- ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
- ATTR_ATIME|ATTR_MTIME|ATTR_CTIME);
- mutex_unlock(&inode->ei_update_lock);
-
- return bch2_err_class(ret);
-}
-
-static void bch2_evict_inode(struct inode *vinode)
-{
- struct bch_fs *c = vinode->i_sb->s_fs_info;
- struct bch_inode_info *inode = to_bch_ei(vinode);
- bool delete = !inode->v.i_nlink && !is_bad_inode(&inode->v);
-
- /*
- * evict() has waited for outstanding writeback, we'll do no more IO
- * through this inode: it's safe to remove from VFS inode hashtable here
- *
- * Do that now so that other threads aren't blocked from pulling it back
- * in, there's no reason for them to be:
- */
- if (!delete)
- bch2_inode_hash_remove(c, inode);
-
- truncate_inode_pages_final(&inode->v.i_data);
-
- clear_inode(&inode->v);
-
- BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
-
- if (delete) {
- bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
- KEY_TYPE_QUOTA_WARN);
- bch2_quota_acct(c, inode->ei_qid, Q_INO, -1,
- KEY_TYPE_QUOTA_WARN);
- bch2_inode_rm(c, inode_inum(inode));
-
- /*
- * If we are deleting, we need it present in the vfs hash table
- * so that fsck can check if unlinked inodes are still open:
- */
- bch2_inode_hash_remove(c, inode);
- }
-
- mutex_lock(&c->vfs_inodes_lock);
- list_del_init(&inode->ei_vfs_inode_list);
- mutex_unlock(&c->vfs_inodes_lock);
-}
-
-void bch2_evict_subvolume_inodes(struct bch_fs *c, snapshot_id_list *s)
-{
- struct bch_inode_info *inode;
- DARRAY(struct bch_inode_info *) grabbed;
- bool clean_pass = false, this_pass_clean;
-
- /*
- * Initially, we scan for inodes without I_DONTCACHE, then mark them to
- * be pruned with d_mark_dontcache().
- *
- * Once we've had a clean pass where we didn't find any inodes without
- * I_DONTCACHE, we wait for them to be freed:
- */
-
- darray_init(&grabbed);
- darray_make_room(&grabbed, 1024);
-again:
- cond_resched();
- this_pass_clean = true;
-
- mutex_lock(&c->vfs_inodes_lock);
- list_for_each_entry(inode, &c->vfs_inodes_list, ei_vfs_inode_list) {
- if (!snapshot_list_has_id(s, inode->ei_inum.subvol))
- continue;
-
- if (!(inode->v.i_state & I_DONTCACHE) &&
- !(inode->v.i_state & I_FREEING) &&
- igrab(&inode->v)) {
- this_pass_clean = false;
-
- if (darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN)) {
- iput(&inode->v);
- break;
- }
- } else if (clean_pass && this_pass_clean) {
- struct wait_bit_queue_entry wqe;
- struct wait_queue_head *wq_head;
-
- wq_head = inode_bit_waitqueue(&wqe, &inode->v, __I_NEW);
- prepare_to_wait_event(wq_head, &wqe.wq_entry,
- TASK_UNINTERRUPTIBLE);
- mutex_unlock(&c->vfs_inodes_lock);
-
- schedule();
- finish_wait(wq_head, &wqe.wq_entry);
- goto again;
- }
- }
- mutex_unlock(&c->vfs_inodes_lock);
-
- darray_for_each(grabbed, i) {
- inode = *i;
- d_mark_dontcache(&inode->v);
- d_prune_aliases(&inode->v);
- iput(&inode->v);
- }
- grabbed.nr = 0;
-
- if (!clean_pass || !this_pass_clean) {
- clean_pass = this_pass_clean;
- goto again;
- }
-
- darray_exit(&grabbed);
-}
-
-static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
-{
- struct super_block *sb = dentry->d_sb;
- struct bch_fs *c = sb->s_fs_info;
- struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
- unsigned shift = sb->s_blocksize_bits - 9;
- /*
- * this assumes inodes take up 64 bytes, which is a decent average
- * number:
- */
- u64 avail_inodes = ((usage.capacity - usage.used) << 3);
-
- buf->f_type = BCACHEFS_STATFS_MAGIC;
- buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = usage.capacity >> shift;
- buf->f_bfree = usage.free >> shift;
- buf->f_bavail = avail_factor(usage.free) >> shift;
-
- buf->f_files = usage.nr_inodes + avail_inodes;
- buf->f_ffree = avail_inodes;
-
- buf->f_fsid = uuid_to_fsid(c->sb.user_uuid.b);
- buf->f_namelen = BCH_NAME_MAX;
-
- return 0;
-}
-
-static int bch2_sync_fs(struct super_block *sb, int wait)
-{
- struct bch_fs *c = sb->s_fs_info;
- int ret;
-
- trace_bch2_sync_fs(sb, wait);
-
- if (c->opts.journal_flush_disabled)
- return 0;
-
- if (!wait) {
- bch2_journal_flush_async(&c->journal, NULL);
- return 0;
- }
-
- ret = bch2_journal_flush(&c->journal);
- return bch2_err_class(ret);
-}
-
-static struct bch_fs *bch2_path_to_fs(const char *path)
-{
- struct bch_fs *c;
- dev_t dev;
- int ret;
-
- ret = lookup_bdev(path, &dev);
- if (ret)
- return ERR_PTR(ret);
-
- c = bch2_dev_to_fs(dev);
- if (c)
- closure_put(&c->cl);
- return c ?: ERR_PTR(-ENOENT);
-}
-
-static int bch2_remount(struct super_block *sb, int *flags,
- struct bch_opts opts)
-{
- struct bch_fs *c = sb->s_fs_info;
- int ret = 0;
-
- opt_set(opts, read_only, (*flags & SB_RDONLY) != 0);
-
- if (opts.read_only != c->opts.read_only) {
- down_write(&c->state_lock);
-
- if (opts.read_only) {
- bch2_fs_read_only(c);
-
- sb->s_flags |= SB_RDONLY;
- } else {
- ret = bch2_fs_read_write(c);
- if (ret) {
- bch_err(c, "error going rw: %i", ret);
- up_write(&c->state_lock);
- ret = -EINVAL;
- goto err;
- }
-
- sb->s_flags &= ~SB_RDONLY;
- }
-
- c->opts.read_only = opts.read_only;
-
- up_write(&c->state_lock);
- }
-
- if (opt_defined(opts, errors))
- c->opts.errors = opts.errors;
-err:
- return bch2_err_class(ret);
-}
-
-static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
-{
- struct bch_fs *c = root->d_sb->s_fs_info;
- bool first = true;
-
- for_each_online_member(c, ca) {
- if (!first)
- seq_putc(seq, ':');
- first = false;
- seq_puts(seq, ca->disk_sb.sb_name);
- }
-
- return 0;
-}
-
-static int bch2_show_options(struct seq_file *seq, struct dentry *root)
-{
- struct bch_fs *c = root->d_sb->s_fs_info;
- struct printbuf buf = PRINTBUF;
-
- bch2_opts_to_text(&buf, c->opts, c, c->disk_sb.sb,
- OPT_MOUNT, OPT_HIDDEN, OPT_SHOW_MOUNT_STYLE);
- printbuf_nul_terminate(&buf);
- seq_printf(seq, ",%s", buf.buf);
-
- int ret = buf.allocation_failure ? -ENOMEM : 0;
- printbuf_exit(&buf);
- return ret;
-}
-
-static void bch2_put_super(struct super_block *sb)
-{
- struct bch_fs *c = sb->s_fs_info;
-
- __bch2_fs_stop(c);
-}
-
-/*
- * bcachefs doesn't currently integrate intwrite freeze protection but the
- * internal write references serve the same purpose. Therefore reuse the
- * read-only transition code to perform the quiesce. The caveat is that we don't
- * currently have the ability to block tasks that want a write reference while
- * the superblock is frozen. This is fine for now, but we should either add
- * blocking support or find a way to integrate sb_start_intwrite() and friends.
- */
-static int bch2_freeze(struct super_block *sb)
-{
- struct bch_fs *c = sb->s_fs_info;
-
- down_write(&c->state_lock);
- bch2_fs_read_only(c);
- up_write(&c->state_lock);
- return 0;
-}
-
-static int bch2_unfreeze(struct super_block *sb)
-{
- struct bch_fs *c = sb->s_fs_info;
- int ret;
-
- if (test_bit(BCH_FS_emergency_ro, &c->flags))
- return 0;
-
- down_write(&c->state_lock);
- ret = bch2_fs_read_write(c);
- up_write(&c->state_lock);
- return ret;
-}
-
-static const struct super_operations bch_super_operations = {
- .alloc_inode = bch2_alloc_inode,
- .free_inode = bch2_free_inode,
- .write_inode = bch2_vfs_write_inode,
- .evict_inode = bch2_evict_inode,
- .sync_fs = bch2_sync_fs,
- .statfs = bch2_statfs,
- .show_devname = bch2_show_devname,
- .show_options = bch2_show_options,
- .put_super = bch2_put_super,
- .freeze_fs = bch2_freeze,
- .unfreeze_fs = bch2_unfreeze,
-};
-
-static int bch2_set_super(struct super_block *s, void *data)
-{
- s->s_fs_info = data;
- return 0;
-}
-
-static int bch2_noset_super(struct super_block *s, void *data)
-{
- return -EBUSY;
-}
-
-typedef DARRAY(struct bch_fs *) darray_fs;
-
-static int bch2_test_super(struct super_block *s, void *data)
-{
- struct bch_fs *c = s->s_fs_info;
- darray_fs *d = data;
-
- if (!c)
- return false;
-
- darray_for_each(*d, i)
- if (c != *i)
- return false;
- return true;
-}
-
-static int bch2_fs_get_tree(struct fs_context *fc)
-{
- struct bch_fs *c;
- struct super_block *sb;
- struct inode *vinode;
- struct bch2_opts_parse *opts_parse = fc->fs_private;
- struct bch_opts opts = opts_parse->opts;
- darray_str devs;
- darray_fs devs_to_fs = {};
- int ret;
-
- opt_set(opts, read_only, (fc->sb_flags & SB_RDONLY) != 0);
- opt_set(opts, nostart, true);
-
- if (!fc->source || strlen(fc->source) == 0)
- return -EINVAL;
-
- ret = bch2_split_devs(fc->source, &devs);
- if (ret)
- return ret;
-
- darray_for_each(devs, i) {
- ret = darray_push(&devs_to_fs, bch2_path_to_fs(*i));
- if (ret)
- goto err;
- }
-
- sb = sget(fc->fs_type, bch2_test_super, bch2_noset_super, fc->sb_flags|SB_NOSEC, &devs_to_fs);
- if (!IS_ERR(sb))
- goto got_sb;
-
- c = bch2_fs_open(devs.data, devs.nr, opts);
- ret = PTR_ERR_OR_ZERO(c);
- if (ret)
- goto err;
-
- /* Some options can't be parsed until after the fs is started: */
- opts = bch2_opts_empty();
- ret = bch2_parse_mount_opts(c, &opts, NULL, opts_parse->parse_later.buf);
- if (ret)
- goto err_stop_fs;
-
- bch2_opts_apply(&c->opts, opts);
-
- ret = bch2_fs_start(c);
- if (ret)
- goto err_stop_fs;
-
- sb = sget(fc->fs_type, NULL, bch2_set_super, fc->sb_flags|SB_NOSEC, c);
- ret = PTR_ERR_OR_ZERO(sb);
- if (ret)
- goto err_stop_fs;
-got_sb:
- c = sb->s_fs_info;
-
- if (sb->s_root) {
- if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY) {
- ret = -EBUSY;
- goto err_put_super;
- }
- goto out;
- }
-
- sb->s_blocksize = block_bytes(c);
- sb->s_blocksize_bits = ilog2(block_bytes(c));
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_op = &bch_super_operations;
- sb->s_export_op = &bch_export_ops;
-#ifdef CONFIG_BCACHEFS_QUOTA
- sb->s_qcop = &bch2_quotactl_operations;
- sb->s_quota_types = QTYPE_MASK_USR|QTYPE_MASK_GRP|QTYPE_MASK_PRJ;
-#endif
- sb->s_xattr = bch2_xattr_handlers;
- sb->s_magic = BCACHEFS_STATFS_MAGIC;
- sb->s_time_gran = c->sb.nsec_per_time_unit;
- sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
- sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec);
- sb->s_uuid = c->sb.user_uuid;
- sb->s_shrink->seeks = 0;
- c->vfs_sb = sb;
- strscpy(sb->s_id, c->name, sizeof(sb->s_id));
-
- ret = super_setup_bdi(sb);
- if (ret)
- goto err_put_super;
-
- sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
-
- for_each_online_member(c, ca) {
- struct block_device *bdev = ca->disk_sb.bdev;
-
- /* XXX: create an anonymous device for multi device filesystems */
- sb->s_bdev = bdev;
- sb->s_dev = bdev->bd_dev;
- percpu_ref_put(&ca->io_ref);
- break;
- }
-
- c->dev = sb->s_dev;
-
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
- if (c->opts.acl)
- sb->s_flags |= SB_POSIXACL;
-#endif
-
- sb->s_shrink->seeks = 0;
-
- vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM);
- ret = PTR_ERR_OR_ZERO(vinode);
- bch_err_msg(c, ret, "mounting: error getting root inode");
- if (ret)
- goto err_put_super;
-
- sb->s_root = d_make_root(vinode);
- if (!sb->s_root) {
- bch_err(c, "error mounting: error allocating root dentry");
- ret = -ENOMEM;
- goto err_put_super;
- }
-
- sb->s_flags |= SB_ACTIVE;
-out:
- fc->root = dget(sb->s_root);
-err:
- darray_exit(&devs_to_fs);
- bch2_darray_str_exit(&devs);
- if (ret)
- pr_err("error: %s", bch2_err_str(ret));
- /*
- * On an inconsistency error in recovery we might see an -EROFS derived
- * errorcode (from the journal), but we don't want to return that to
- * userspace as that causes util-linux to retry the mount RO - which is
- * confusing:
- */
- if (bch2_err_matches(ret, EROFS) && ret != -EROFS)
- ret = -EIO;
- return bch2_err_class(ret);
-
-err_stop_fs:
- bch2_fs_stop(c);
- goto err;
-
-err_put_super:
- __bch2_fs_stop(c);
- deactivate_locked_super(sb);
- goto err;
-}
-
-static void bch2_kill_sb(struct super_block *sb)
-{
- struct bch_fs *c = sb->s_fs_info;
-
- generic_shutdown_super(sb);
- bch2_fs_free(c);
-}
-
-static void bch2_fs_context_free(struct fs_context *fc)
-{
- struct bch2_opts_parse *opts = fc->fs_private;
-
- if (opts) {
- printbuf_exit(&opts->parse_later);
- kfree(opts);
- }
-}
-
-static int bch2_fs_parse_param(struct fs_context *fc,
- struct fs_parameter *param)
-{
- /*
- * the "source" param, i.e., the name of the device(s) to mount,
- * is handled by the VFS layer.
- */
- if (!strcmp(param->key, "source"))
- return -ENOPARAM;
-
- struct bch2_opts_parse *opts = fc->fs_private;
- struct bch_fs *c = NULL;
-
- /* for reconfigure, we already have a struct bch_fs */
- if (fc->root)
- c = fc->root->d_sb->s_fs_info;
-
- int ret = bch2_parse_one_mount_opt(c, &opts->opts,
- &opts->parse_later, param->key,
- param->string);
-
- return bch2_err_class(ret);
-}
-
-static int bch2_fs_reconfigure(struct fs_context *fc)
-{
- struct super_block *sb = fc->root->d_sb;
- struct bch2_opts_parse *opts = fc->fs_private;
-
- return bch2_remount(sb, &fc->sb_flags, opts->opts);
-}
-
-static const struct fs_context_operations bch2_context_ops = {
- .free = bch2_fs_context_free,
- .parse_param = bch2_fs_parse_param,
- .get_tree = bch2_fs_get_tree,
- .reconfigure = bch2_fs_reconfigure,
-};
-
-static int bch2_init_fs_context(struct fs_context *fc)
-{
- struct bch2_opts_parse *opts = kzalloc(sizeof(*opts), GFP_KERNEL);
-
- if (!opts)
- return -ENOMEM;
-
- opts->parse_later = PRINTBUF;
-
- fc->ops = &bch2_context_ops;
- fc->fs_private = opts;
-
- return 0;
-}
-
-void bch2_fs_vfs_exit(struct bch_fs *c)
-{
- if (c->vfs_inodes_table.tbl)
- rhashtable_destroy(&c->vfs_inodes_table);
-}
-
-int bch2_fs_vfs_init(struct bch_fs *c)
-{
- return rhashtable_init(&c->vfs_inodes_table, &bch2_vfs_inodes_params);
-}
-
-static struct file_system_type bcache_fs_type = {
- .owner = THIS_MODULE,
- .name = "bcachefs",
- .init_fs_context = bch2_init_fs_context,
- .kill_sb = bch2_kill_sb,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
-};
-
-MODULE_ALIAS_FS("bcachefs");
-
-void bch2_vfs_exit(void)
-{
- unregister_filesystem(&bcache_fs_type);
- kmem_cache_destroy(bch2_inode_cache);
-}
-
-int __init bch2_vfs_init(void)
-{
- int ret = -ENOMEM;
-
- bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT |
- SLAB_ACCOUNT);
- if (!bch2_inode_cache)
- goto err;
-
- ret = register_filesystem(&bcache_fs_type);
- if (ret)
- goto err;
-
- return 0;
-err:
- bch2_vfs_exit();
- return ret;
-}
-
-#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h
deleted file mode 100644
index 59f9f7ae728d..000000000000
--- a/fs/bcachefs/fs.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FS_H
-#define _BCACHEFS_FS_H
-
-#include "inode.h"
-#include "opts.h"
-#include "str_hash.h"
-#include "quota_types.h"
-#include "two_state_shared_lock.h"
-
-#include <linux/seqlock.h>
-#include <linux/stat.h>
-
-struct bch_inode_info {
- struct inode v;
- struct rhash_head hash;
- subvol_inum ei_inum;
-
- struct list_head ei_vfs_inode_list;
- unsigned long ei_flags;
-
- struct mutex ei_update_lock;
- u64 ei_quota_reserved;
- unsigned long ei_last_dirtied;
- two_state_lock_t ei_pagecache_lock;
-
- struct mutex ei_quota_lock;
- struct bch_qid ei_qid;
-
- /*
- * When we've been doing nocow writes we'll need to issue flushes to the
- * underlying block devices
- *
- * XXX: a device may have had a flush issued by some other codepath. It
- * would be better to keep for each device a sequence number that's
- * incremented when we isusue a cache flush, and track here the sequence
- * number that needs flushing.
- */
- struct bch_devs_mask ei_devs_need_flush;
-
- /* copy of inode in btree: */
- struct bch_inode_unpacked ei_inode;
-};
-
-#define bch2_pagecache_add_put(i) bch2_two_state_unlock(&i->ei_pagecache_lock, 0)
-#define bch2_pagecache_add_tryget(i) bch2_two_state_trylock(&i->ei_pagecache_lock, 0)
-#define bch2_pagecache_add_get(i) bch2_two_state_lock(&i->ei_pagecache_lock, 0)
-
-#define bch2_pagecache_block_put(i) bch2_two_state_unlock(&i->ei_pagecache_lock, 1)
-#define bch2_pagecache_block_get(i) bch2_two_state_lock(&i->ei_pagecache_lock, 1)
-
-static inline subvol_inum inode_inum(struct bch_inode_info *inode)
-{
- return inode->ei_inum;
-}
-
-/*
- * Set if we've gotten a btree error for this inode, and thus the vfs inode and
- * btree inode may be inconsistent:
- */
-#define EI_INODE_ERROR 0
-
-/*
- * Set in the inode is in a snapshot subvolume - we don't do quota accounting in
- * those:
- */
-#define EI_INODE_SNAPSHOT 1
-#define EI_INODE_HASHED 2
-
-#define to_bch_ei(_inode) \
- container_of_or_null(_inode, struct bch_inode_info, v)
-
-static inline int ptrcmp(void *l, void *r)
-{
- return cmp_int(l, r);
-}
-
-enum bch_inode_lock_op {
- INODE_PAGECACHE_BLOCK = (1U << 0),
- INODE_UPDATE_LOCK = (1U << 1),
-};
-
-#define bch2_lock_inodes(_locks, ...) \
-do { \
- struct bch_inode_info *a[] = { NULL, __VA_ARGS__ }; \
- unsigned i; \
- \
- bubble_sort(&a[1], ARRAY_SIZE(a) - 1, ptrcmp); \
- \
- for (i = 1; i < ARRAY_SIZE(a); i++) \
- if (a[i] != a[i - 1]) { \
- if ((_locks) & INODE_PAGECACHE_BLOCK) \
- bch2_pagecache_block_get(a[i]);\
- if ((_locks) & INODE_UPDATE_LOCK) \
- mutex_lock_nested(&a[i]->ei_update_lock, i);\
- } \
-} while (0)
-
-#define bch2_unlock_inodes(_locks, ...) \
-do { \
- struct bch_inode_info *a[] = { NULL, __VA_ARGS__ }; \
- unsigned i; \
- \
- bubble_sort(&a[1], ARRAY_SIZE(a) - 1, ptrcmp); \
- \
- for (i = 1; i < ARRAY_SIZE(a); i++) \
- if (a[i] != a[i - 1]) { \
- if ((_locks) & INODE_PAGECACHE_BLOCK) \
- bch2_pagecache_block_put(a[i]);\
- if ((_locks) & INODE_UPDATE_LOCK) \
- mutex_unlock(&a[i]->ei_update_lock); \
- } \
-} while (0)
-
-static inline struct bch_inode_info *file_bch_inode(struct file *file)
-{
- return to_bch_ei(file_inode(file));
-}
-
-static inline bool inode_attr_changing(struct bch_inode_info *dir,
- struct bch_inode_info *inode,
- enum inode_opt_id id)
-{
- return !(inode->ei_inode.bi_fields_set & (1 << id)) &&
- bch2_inode_opt_get(&dir->ei_inode, id) !=
- bch2_inode_opt_get(&inode->ei_inode, id);
-}
-
-static inline bool inode_attrs_changing(struct bch_inode_info *dir,
- struct bch_inode_info *inode)
-{
- unsigned id;
-
- for (id = 0; id < Inode_opt_nr; id++)
- if (inode_attr_changing(dir, inode, id))
- return true;
-
- return false;
-}
-
-struct bch_inode_unpacked;
-
-#ifndef NO_BCACHEFS_FS
-
-struct bch_inode_info *
-__bch2_create(struct mnt_idmap *, struct bch_inode_info *,
- struct dentry *, umode_t, dev_t, subvol_inum, unsigned);
-
-int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p);
-
-int bch2_fs_quota_transfer(struct bch_fs *,
- struct bch_inode_info *,
- struct bch_qid,
- unsigned,
- enum quota_acct_mode);
-
-static inline int bch2_set_projid(struct bch_fs *c,
- struct bch_inode_info *inode,
- u32 projid)
-{
- struct bch_qid qid = inode->ei_qid;
-
- qid.q[QTYP_PRJ] = projid;
-
- return bch2_fs_quota_transfer(c, inode, qid,
- 1 << QTYP_PRJ,
- KEY_TYPE_QUOTA_PREALLOC);
-}
-
-struct inode *bch2_vfs_inode_get(struct bch_fs *, subvol_inum);
-
-/* returns 0 if we want to do the update, or error is passed up */
-typedef int (*inode_set_fn)(struct btree_trans *,
- struct bch_inode_info *,
- struct bch_inode_unpacked *, void *);
-
-void bch2_inode_update_after_write(struct btree_trans *,
- struct bch_inode_info *,
- struct bch_inode_unpacked *,
- unsigned);
-int __must_check bch2_write_inode(struct bch_fs *, struct bch_inode_info *,
- inode_set_fn, void *, unsigned);
-
-int bch2_setattr_nonsize(struct mnt_idmap *,
- struct bch_inode_info *,
- struct iattr *);
-int __bch2_unlink(struct inode *, struct dentry *, bool);
-
-void bch2_evict_subvolume_inodes(struct bch_fs *, snapshot_id_list *);
-
-void bch2_fs_vfs_exit(struct bch_fs *);
-int bch2_fs_vfs_init(struct bch_fs *);
-
-void bch2_vfs_exit(void);
-int bch2_vfs_init(void);
-
-#else
-
-#define bch2_inode_update_after_write(_trans, _inode, _inode_u, _fields) ({ do {} while (0); })
-
-static inline int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p) { return 0; }
-
-static inline void bch2_evict_subvolume_inodes(struct bch_fs *c,
- snapshot_id_list *s) {}
-
-static inline void bch2_fs_vfs_exit(struct bch_fs *c) {}
-static inline int bch2_fs_vfs_init(struct bch_fs *c) { return 0; }
-
-static inline void bch2_vfs_exit(void) {}
-static inline int bch2_vfs_init(void) { return 0; }
-
-#endif /* NO_BCACHEFS_FS */
-
-#endif /* _BCACHEFS_FS_H */
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
deleted file mode 100644
index 75c8a97a6954..000000000000
--- a/fs/bcachefs/fsck.c
+++ /dev/null
@@ -1,3196 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "btree_cache.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "darray.h"
-#include "dirent.h"
-#include "error.h"
-#include "fs.h"
-#include "fs-common.h"
-#include "fsck.h"
-#include "inode.h"
-#include "keylist.h"
-#include "recovery_passes.h"
-#include "snapshot.h"
-#include "super.h"
-#include "xattr.h"
-
-#include <linux/bsearch.h>
-#include <linux/dcache.h> /* struct qstr */
-
-static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
- struct bkey_s_c_dirent d)
-{
- return inode->bi_dir == d.k->p.inode &&
- inode->bi_dir_offset == d.k->p.offset;
-}
-
-static int dirent_points_to_inode_nowarn(struct bkey_s_c_dirent d,
- struct bch_inode_unpacked *inode)
-{
- if (d.v->d_type == DT_SUBVOL
- ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
- : le64_to_cpu(d.v->d_inum) == inode->bi_inum)
- return 0;
- return -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
-}
-
-static void dirent_inode_mismatch_msg(struct printbuf *out,
- struct bch_fs *c,
- struct bkey_s_c_dirent dirent,
- struct bch_inode_unpacked *inode)
-{
- prt_str(out, "inode points to dirent that does not point back:");
- prt_newline(out);
- bch2_bkey_val_to_text(out, c, dirent.s_c);
- prt_newline(out);
- bch2_inode_unpacked_to_text(out, inode);
-}
-
-static int dirent_points_to_inode(struct bch_fs *c,
- struct bkey_s_c_dirent dirent,
- struct bch_inode_unpacked *inode)
-{
- int ret = dirent_points_to_inode_nowarn(dirent, inode);
- if (ret) {
- struct printbuf buf = PRINTBUF;
- dirent_inode_mismatch_msg(&buf, c, dirent, inode);
- bch_warn(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
- return ret;
-}
-
-/*
- * XXX: this is handling transaction restarts without returning
- * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
- */
-static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
- u32 snapshot)
-{
- u64 sectors = 0;
-
- int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
- SPOS(inum, 0, snapshot),
- POS(inum, U64_MAX),
- 0, k, ({
- if (bkey_extent_is_allocation(k.k))
- sectors += k.k->size;
- 0;
- }));
-
- return ret ?: sectors;
-}
-
-static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
- u32 snapshot)
-{
- u64 subdirs = 0;
-
- int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
- SPOS(inum, 0, snapshot),
- POS(inum, U64_MAX),
- 0, k, ({
- if (k.k->type == KEY_TYPE_dirent &&
- bkey_s_c_to_dirent(k).v->d_type == DT_DIR)
- subdirs++;
- 0;
- }));
-
- return ret ?: subdirs;
-}
-
-static int subvol_lookup(struct btree_trans *trans, u32 subvol,
- u32 *snapshot, u64 *inum)
-{
- struct bch_subvolume s;
- int ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
-
- *snapshot = le32_to_cpu(s.snapshot);
- *inum = le64_to_cpu(s.inode);
- return ret;
-}
-
-static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
- struct bch_inode_unpacked *inode)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
- BTREE_ITER_all_snapshots, k, ret) {
- if (k.k->p.offset != inode_nr)
- break;
- if (!bkey_is_inode(k.k))
- continue;
- ret = bch2_inode_unpack(k, inode);
- goto found;
- }
- ret = -BCH_ERR_ENOENT_inode;
-found:
- bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int lookup_inode(struct btree_trans *trans, u64 inode_nr, u32 snapshot,
- struct bch_inode_unpacked *inode)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, inode_nr, snapshot), 0);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- ret = bkey_is_inode(k.k)
- ? bch2_inode_unpack(k, inode)
- : -BCH_ERR_ENOENT_inode;
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int lookup_dirent_in_snapshot(struct btree_trans *trans,
- struct bch_hash_info hash_info,
- subvol_inum dir, struct qstr *name,
- u64 *target, unsigned *type, u32 snapshot)
-{
- struct btree_iter iter;
- struct bkey_s_c k = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
- &hash_info, dir, name, 0, snapshot);
- int ret = bkey_err(k);
- if (ret)
- return ret;
-
- struct bkey_s_c_dirent d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
- *target = le64_to_cpu(d.v->d_inum);
- *type = d.v->d_type;
- bch2_trans_iter_exit(trans, &iter);
- return 0;
-}
-
-static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bch_inode_unpacked dir_inode;
- struct bch_hash_info dir_hash_info;
- int ret;
-
- ret = lookup_first_inode(trans, pos.inode, &dir_inode);
- if (ret)
- goto err;
-
- dir_hash_info = bch2_hash_info_init(c, &dir_inode);
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
-
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
- &dir_hash_info, &iter,
- BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &iter);
-err:
- bch_err_fn(c, ret);
- return ret;
-}
-
-/* Get lost+found, create if it doesn't exist: */
-static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
- struct bch_inode_unpacked *lostfound,
- u64 reattaching_inum)
-{
- struct bch_fs *c = trans->c;
- struct qstr lostfound_str = QSTR("lost+found");
- u64 inum = 0;
- unsigned d_type = 0;
- int ret;
-
- struct bch_snapshot_tree st;
- ret = bch2_snapshot_tree_lookup(trans,
- bch2_snapshot_tree(c, snapshot), &st);
- if (ret)
- return ret;
-
- subvol_inum root_inum = { .subvol = le32_to_cpu(st.master_subvol) };
-
- struct bch_subvolume subvol;
- ret = bch2_subvolume_get(trans, le32_to_cpu(st.master_subvol),
- false, 0, &subvol);
- bch_err_msg(c, ret, "looking up root subvol %u for snapshot %u",
- le32_to_cpu(st.master_subvol), snapshot);
- if (ret)
- return ret;
-
- if (!subvol.inode) {
- struct btree_iter iter;
- struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_subvolumes, POS(0, le32_to_cpu(st.master_subvol)),
- 0, subvolume);
- ret = PTR_ERR_OR_ZERO(subvol);
- if (ret)
- return ret;
-
- subvol->v.inode = cpu_to_le64(reattaching_inum);
- bch2_trans_iter_exit(trans, &iter);
- }
-
- root_inum.inum = le64_to_cpu(subvol.inode);
-
- struct bch_inode_unpacked root_inode;
- struct bch_hash_info root_hash_info;
- ret = lookup_inode(trans, root_inum.inum, snapshot, &root_inode);
- bch_err_msg(c, ret, "looking up root inode %llu for subvol %u",
- root_inum.inum, le32_to_cpu(st.master_subvol));
- if (ret)
- return ret;
-
- root_hash_info = bch2_hash_info_init(c, &root_inode);
-
- ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
- &lostfound_str, &inum, &d_type, snapshot);
- if (bch2_err_matches(ret, ENOENT))
- goto create_lostfound;
-
- bch_err_fn(c, ret);
- if (ret)
- return ret;
-
- if (d_type != DT_DIR) {
- bch_err(c, "error looking up lost+found: not a directory");
- return -BCH_ERR_ENOENT_not_directory;
- }
-
- /*
- * The bch2_check_dirents pass has already run, dangling dirents
- * shouldn't exist here:
- */
- ret = lookup_inode(trans, inum, snapshot, lostfound);
- bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)",
- inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot));
- return ret;
-
-create_lostfound:
- /*
- * we always create lost+found in the root snapshot; we don't want
- * different branches of the snapshot tree to have different lost+found
- */
- snapshot = le32_to_cpu(st.root_snapshot);
- /*
- * XXX: we could have a nicer log message here if we had a nice way to
- * walk backpointers to print a path
- */
- bch_notice(c, "creating lost+found in subvol %llu snapshot %u",
- root_inum.subvol, le32_to_cpu(st.root_snapshot));
-
- u64 now = bch2_current_time(c);
- struct btree_iter lostfound_iter = { NULL };
- u64 cpu = raw_smp_processor_id();
-
- bch2_inode_init_early(c, lostfound);
- bch2_inode_init_late(lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode);
- lostfound->bi_dir = root_inode.bi_inum;
- lostfound->bi_snapshot = le32_to_cpu(st.root_snapshot);
-
- root_inode.bi_nlink++;
-
- ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu);
- if (ret)
- goto err;
-
- bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
- ret = bch2_btree_iter_traverse(&lostfound_iter);
- if (ret)
- goto err;
-
- ret = bch2_dirent_create_snapshot(trans,
- 0, root_inode.bi_inum, snapshot, &root_hash_info,
- mode_to_type(lostfound->bi_mode),
- &lostfound_str,
- lostfound->bi_inum,
- &lostfound->bi_dir_offset,
- STR_HASH_must_create) ?:
- bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
- BTREE_UPDATE_internal_snapshot_node);
-err:
- bch_err_msg(c, ret, "creating lost+found");
- bch2_trans_iter_exit(trans, &lostfound_iter);
- return ret;
-}
-
-static inline bool inode_should_reattach(struct bch_inode_unpacked *inode)
-{
- if (inode->bi_inum == BCACHEFS_ROOT_INO &&
- inode->bi_subvol == BCACHEFS_ROOT_SUBVOL)
- return false;
-
- return !inode->bi_dir && !(inode->bi_flags & BCH_INODE_unlinked);
-}
-
-static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 snapshot)
-{
- struct btree_iter iter;
- struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_dirents,
- SPOS(d_pos.inode, d_pos.offset, snapshot),
- BTREE_ITER_intent|
- BTREE_ITER_with_updates);
- int ret = bkey_err(k);
- if (ret)
- return ret;
-
- if (bpos_eq(k.k->p, d_pos)) {
- /*
- * delet_at() doesn't work because the update path doesn't
- * internally use BTREE_ITER_with_updates yet
- */
- struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
- ret = PTR_ERR_OR_ZERO(k);
- if (ret)
- goto err;
-
- bkey_init(&k->k);
- k->k.type = KEY_TYPE_whiteout;
- k->k.p = iter.pos;
- ret = bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node);
- }
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode)
-{
- struct bch_fs *c = trans->c;
- struct bch_inode_unpacked lostfound;
- char name_buf[20];
- int ret;
-
- u32 dirent_snapshot = inode->bi_snapshot;
- if (inode->bi_subvol) {
- inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL;
-
- u64 root_inum;
- ret = subvol_lookup(trans, inode->bi_parent_subvol,
- &dirent_snapshot, &root_inum);
- if (ret)
- return ret;
-
- snprintf(name_buf, sizeof(name_buf), "subvol-%u", inode->bi_subvol);
- } else {
- snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
- }
-
- ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum);
- if (ret)
- return ret;
-
- lostfound.bi_nlink += S_ISDIR(inode->bi_mode);
-
- /* ensure lost+found inode is also present in inode snapshot */
- if (!inode->bi_subvol) {
- BUG_ON(!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, lostfound.bi_snapshot));
- lostfound.bi_snapshot = inode->bi_snapshot;
- }
-
- ret = __bch2_fsck_write_inode(trans, &lostfound);
- if (ret)
- return ret;
-
- struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound);
- struct qstr name = (struct qstr) QSTR(name_buf);
-
- inode->bi_dir = lostfound.bi_inum;
-
- ret = bch2_dirent_create_snapshot(trans,
- inode->bi_parent_subvol, lostfound.bi_inum,
- dirent_snapshot,
- &dir_hash,
- inode_d_type(inode),
- &name,
- inode->bi_subvol ?: inode->bi_inum,
- &inode->bi_dir_offset,
- STR_HASH_must_create);
- if (ret) {
- bch_err_msg(c, ret, "error creating dirent");
- return ret;
- }
-
- ret = __bch2_fsck_write_inode(trans, inode);
- if (ret)
- return ret;
-
- /*
- * Fix up inodes in child snapshots: if they should also be reattached
- * update the backpointer field, if they should not be we need to emit
- * whiteouts for the dirent we just created.
- */
- if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) {
- snapshot_id_list whiteouts_done;
- struct btree_iter iter;
- struct bkey_s_c k;
-
- darray_init(&whiteouts_done);
-
- for_each_btree_key_reverse_norestart(trans, iter,
- BTREE_ID_inodes, SPOS(0, inode->bi_inum, inode->bi_snapshot - 1),
- BTREE_ITER_all_snapshots|BTREE_ITER_intent, k, ret) {
- if (k.k->p.offset != inode->bi_inum)
- break;
-
- if (!bkey_is_inode(k.k) ||
- !bch2_snapshot_is_ancestor(c, k.k->p.snapshot, inode->bi_snapshot) ||
- snapshot_list_has_ancestor(c, &whiteouts_done, k.k->p.snapshot))
- continue;
-
- struct bch_inode_unpacked child_inode;
- bch2_inode_unpack(k, &child_inode);
-
- if (!inode_should_reattach(&child_inode)) {
- ret = maybe_delete_dirent(trans,
- SPOS(lostfound.bi_inum, inode->bi_dir_offset,
- dirent_snapshot),
- k.k->p.snapshot);
- if (ret)
- break;
-
- ret = snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot);
- if (ret)
- break;
- } else {
- iter.snapshot = k.k->p.snapshot;
- child_inode.bi_dir = inode->bi_dir;
- child_inode.bi_dir_offset = inode->bi_dir_offset;
-
- ret = bch2_inode_write_flags(trans, &iter, &child_inode,
- BTREE_UPDATE_internal_snapshot_node);
- if (ret)
- break;
- }
- }
- darray_exit(&whiteouts_done);
- bch2_trans_iter_exit(trans, &iter);
- }
-
- return ret;
-}
-
-static int remove_backpointer(struct btree_trans *trans,
- struct bch_inode_unpacked *inode)
-{
- if (!inode->bi_dir)
- return 0;
-
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c_dirent d =
- bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
- SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot), 0,
- dirent);
- int ret = bkey_err(d) ?:
- dirent_points_to_inode(c, d, inode) ?:
- __remove_dirent(trans, d.k->p);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume s)
-{
- struct bch_fs *c = trans->c;
-
- struct bch_inode_unpacked inode;
- int ret = bch2_inode_find_by_inum_trans(trans,
- (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
- &inode);
- if (ret)
- return ret;
-
- ret = remove_backpointer(trans, &inode);
- if (!bch2_err_matches(ret, ENOENT))
- bch_err_msg(c, ret, "removing dirent");
- if (ret)
- return ret;
-
- ret = reattach_inode(trans, &inode);
- bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
- return ret;
-}
-
-static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 subvolid, u64 inum)
-{
- struct bch_fs *c = trans->c;
-
- if (!bch2_snapshot_is_leaf(c, snapshotid)) {
- bch_err(c, "need to reconstruct subvol, but have interior node snapshot");
- return -BCH_ERR_fsck_repair_unimplemented;
- }
-
- /*
- * If inum isn't set, that means we're being called from check_dirents,
- * not check_inodes - the root of this subvolume doesn't exist or we
- * would have found it there:
- */
- if (!inum) {
- struct btree_iter inode_iter = {};
- struct bch_inode_unpacked new_inode;
- u64 cpu = raw_smp_processor_id();
-
- bch2_inode_init_early(c, &new_inode);
- bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL);
-
- new_inode.bi_subvol = subvolid;
-
- int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
- bch2_btree_iter_traverse(&inode_iter) ?:
- bch2_inode_write(trans, &inode_iter, &new_inode);
- bch2_trans_iter_exit(trans, &inode_iter);
- if (ret)
- return ret;
-
- inum = new_inode.bi_inum;
- }
-
- bch_info(c, "reconstructing subvol %u with root inode %llu", subvolid, inum);
-
- struct bkey_i_subvolume *new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
- int ret = PTR_ERR_OR_ZERO(new_subvol);
- if (ret)
- return ret;
-
- bkey_subvolume_init(&new_subvol->k_i);
- new_subvol->k.p.offset = subvolid;
- new_subvol->v.snapshot = cpu_to_le32(snapshotid);
- new_subvol->v.inode = cpu_to_le64(inum);
- ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0);
- if (ret)
- return ret;
-
- struct btree_iter iter;
- struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_snapshots, POS(0, snapshotid),
- 0, snapshot);
- ret = PTR_ERR_OR_ZERO(s);
- bch_err_msg(c, ret, "getting snapshot %u", snapshotid);
- if (ret)
- return ret;
-
- u32 snapshot_tree = le32_to_cpu(s->v.tree);
-
- s->v.subvol = cpu_to_le32(subvolid);
- SET_BCH_SNAPSHOT_SUBVOL(&s->v, true);
- bch2_trans_iter_exit(trans, &iter);
-
- struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_snapshot_trees, POS(0, snapshot_tree),
- 0, snapshot_tree);
- ret = PTR_ERR_OR_ZERO(st);
- bch_err_msg(c, ret, "getting snapshot tree %u", snapshot_tree);
- if (ret)
- return ret;
-
- if (!st->v.master_subvol)
- st->v.master_subvol = cpu_to_le32(subvolid);
-
- bch2_trans_iter_exit(trans, &iter);
- return 0;
-}
-
-static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 snapshot, u64 inum)
-{
- struct bch_fs *c = trans->c;
- unsigned i_mode = S_IFREG;
- u64 i_size = 0;
-
- switch (btree) {
- case BTREE_ID_extents: {
- struct btree_iter iter = {};
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
- struct bkey_s_c k = bch2_btree_iter_peek_prev(&iter);
- bch2_trans_iter_exit(trans, &iter);
- int ret = bkey_err(k);
- if (ret)
- return ret;
-
- i_size = k.k->p.offset << 9;
- break;
- }
- case BTREE_ID_dirents:
- i_mode = S_IFDIR;
- break;
- case BTREE_ID_xattrs:
- break;
- default:
- BUG();
- }
-
- struct bch_inode_unpacked new_inode;
- bch2_inode_init_early(c, &new_inode);
- bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, i_mode|0600, 0, NULL);
- new_inode.bi_size = i_size;
- new_inode.bi_inum = inum;
- new_inode.bi_snapshot = snapshot;
-
- return __bch2_fsck_write_inode(trans, &new_inode);
-}
-
-struct snapshots_seen {
- struct bpos pos;
- snapshot_id_list ids;
-};
-
-static inline void snapshots_seen_exit(struct snapshots_seen *s)
-{
- darray_exit(&s->ids);
-}
-
-static inline void snapshots_seen_init(struct snapshots_seen *s)
-{
- memset(s, 0, sizeof(*s));
-}
-
-static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
-{
- u32 *i;
- __darray_for_each(s->ids, i) {
- if (*i == id)
- return 0;
- if (*i > id)
- break;
- }
-
- int ret = darray_insert_item(&s->ids, i - s->ids.data, id);
- if (ret)
- bch_err(c, "error reallocating snapshots_seen table (size %zu)",
- s->ids.size);
- return ret;
-}
-
-static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
- enum btree_id btree_id, struct bpos pos)
-{
- if (!bkey_eq(s->pos, pos))
- s->ids.nr = 0;
- s->pos = pos;
-
- return snapshot_list_add_nodup(c, &s->ids, pos.snapshot);
-}
-
-/**
- * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
- * and @ancestor hasn't been overwritten in @seen
- *
- * @c: filesystem handle
- * @seen: list of snapshot ids already seen at current position
- * @id: descendent snapshot id
- * @ancestor: ancestor snapshot id
- *
- * Returns: whether key in @ancestor snapshot is visible in @id snapshot
- */
-static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
- u32 id, u32 ancestor)
-{
- ssize_t i;
-
- EBUG_ON(id > ancestor);
-
- /* @ancestor should be the snapshot most recently added to @seen */
- EBUG_ON(ancestor != seen->pos.snapshot);
- EBUG_ON(ancestor != darray_last(seen->ids));
-
- if (id == ancestor)
- return true;
-
- if (!bch2_snapshot_is_ancestor(c, id, ancestor))
- return false;
-
- /*
- * We know that @id is a descendant of @ancestor, we're checking if
- * we've seen a key that overwrote @ancestor - i.e. also a descendent of
- * @ascestor and with @id as a descendent.
- *
- * But we already know that we're scanning IDs between @id and @ancestor
- * numerically, since snapshot ID lists are kept sorted, so if we find
- * an id that's an ancestor of @id we're done:
- */
-
- for (i = seen->ids.nr - 2;
- i >= 0 && seen->ids.data[i] >= id;
- --i)
- if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i]))
- return false;
-
- return true;
-}
-
-/**
- * ref_visible - given a key with snapshot id @src that points to a key with
- * snapshot id @dst, test whether there is some snapshot in which @dst is
- * visible.
- *
- * @c: filesystem handle
- * @s: list of snapshot IDs already seen at @src
- * @src: snapshot ID of src key
- * @dst: snapshot ID of dst key
- * Returns: true if there is some snapshot in which @dst is visible
- *
- * Assumes we're visiting @src keys in natural key order
- */
-static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
- u32 src, u32 dst)
-{
- return dst <= src
- ? key_visible_in_snapshot(c, s, dst, src)
- : bch2_snapshot_is_ancestor(c, src, dst);
-}
-
-static int ref_visible2(struct bch_fs *c,
- u32 src, struct snapshots_seen *src_seen,
- u32 dst, struct snapshots_seen *dst_seen)
-{
- if (dst > src) {
- swap(dst, src);
- swap(dst_seen, src_seen);
- }
- return key_visible_in_snapshot(c, src_seen, dst, src);
-}
-
-#define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \
- for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \
- (_i)->snapshot <= (_snapshot); _i++) \
- if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
-
-struct inode_walker_entry {
- struct bch_inode_unpacked inode;
- u32 snapshot;
- u64 count;
-};
-
-struct inode_walker {
- bool first_this_inode;
- bool have_inodes;
- bool recalculate_sums;
- struct bpos last_pos;
-
- DARRAY(struct inode_walker_entry) inodes;
-};
-
-static void inode_walker_exit(struct inode_walker *w)
-{
- darray_exit(&w->inodes);
-}
-
-static struct inode_walker inode_walker_init(void)
-{
- return (struct inode_walker) { 0, };
-}
-
-static int add_inode(struct bch_fs *c, struct inode_walker *w,
- struct bkey_s_c inode)
-{
- struct bch_inode_unpacked u;
-
- BUG_ON(bch2_inode_unpack(inode, &u));
-
- return darray_push(&w->inodes, ((struct inode_walker_entry) {
- .inode = u,
- .snapshot = inode.k->p.snapshot,
- }));
-}
-
-static int get_inodes_all_snapshots(struct btree_trans *trans,
- struct inode_walker *w, u64 inum)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- /*
- * We no longer have inodes for w->last_pos; clear this to avoid
- * screwing up check_i_sectors/check_subdir_count if we take a
- * transaction restart here:
- */
- w->have_inodes = false;
- w->recalculate_sums = false;
- w->inodes.nr = 0;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
- BTREE_ITER_all_snapshots, k, ret) {
- if (k.k->p.offset != inum)
- break;
-
- if (bkey_is_inode(k.k))
- add_inode(c, w, k);
- }
- bch2_trans_iter_exit(trans, &iter);
-
- if (ret)
- return ret;
-
- w->first_this_inode = true;
- w->have_inodes = true;
- return 0;
-}
-
-static struct inode_walker_entry *
-lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c k)
-{
- bool is_whiteout = k.k->type == KEY_TYPE_whiteout;
-
- struct inode_walker_entry *i;
- __darray_for_each(w->inodes, i)
- if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->snapshot))
- goto found;
-
- return NULL;
-found:
- BUG_ON(k.k->p.snapshot > i->snapshot);
-
- if (k.k->p.snapshot != i->snapshot && !is_whiteout) {
- struct inode_walker_entry new = *i;
-
- new.snapshot = k.k->p.snapshot;
- new.count = 0;
-
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, k);
-
- bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u\n"
- "unexpected because we should always update the inode when we update a key in that inode\n"
- "%s",
- w->last_pos.inode, k.k->p.snapshot, i->snapshot, buf.buf);
- printbuf_exit(&buf);
-
- while (i > w->inodes.data && i[-1].snapshot > k.k->p.snapshot)
- --i;
-
- size_t pos = i - w->inodes.data;
- int ret = darray_insert_item(&w->inodes, pos, new);
- if (ret)
- return ERR_PTR(ret);
-
- i = w->inodes.data + pos;
- }
-
- return i;
-}
-
-static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
- struct inode_walker *w,
- struct bkey_s_c k)
-{
- if (w->last_pos.inode != k.k->p.inode) {
- int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode);
- if (ret)
- return ERR_PTR(ret);
- }
-
- w->last_pos = k.k->p;
-
- return lookup_inode_for_snapshot(trans->c, w, k);
-}
-
-static int get_visible_inodes(struct btree_trans *trans,
- struct inode_walker *w,
- struct snapshots_seen *s,
- u64 inum)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- w->inodes.nr = 0;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
- BTREE_ITER_all_snapshots, k, ret) {
- if (k.k->p.offset != inum)
- break;
-
- if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot))
- continue;
-
- if (bkey_is_inode(k.k))
- add_inode(c, w, k);
-
- if (k.k->p.snapshot >= s->pos.snapshot)
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-static int dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dirent d)
-{
- if (d.v->d_type == DT_SUBVOL) {
- u32 snap;
- u64 inum;
- int ret = subvol_lookup(trans, le32_to_cpu(d.v->d_child_subvol), &snap, &inum);
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
- return !ret;
- } else {
- struct btree_iter iter;
- struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, le64_to_cpu(d.v->d_inum), d.k->p.snapshot), 0);
- int ret = bkey_err(k);
- if (ret)
- return ret;
-
- ret = bkey_is_inode(k.k);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
- }
-}
-
-/*
- * Prefer to delete the first one, since that will be the one at the wrong
- * offset:
- * return value: 0 -> delete k1, 1 -> delete k2
- */
-static int hash_pick_winner(struct btree_trans *trans,
- const struct bch_hash_desc desc,
- struct bch_hash_info *hash_info,
- struct bkey_s_c k1,
- struct bkey_s_c k2)
-{
- if (bkey_val_bytes(k1.k) == bkey_val_bytes(k2.k) &&
- !memcmp(k1.v, k2.v, bkey_val_bytes(k1.k)))
- return 0;
-
- switch (desc.btree_id) {
- case BTREE_ID_dirents: {
- int ret = dirent_has_target(trans, bkey_s_c_to_dirent(k1));
- if (ret < 0)
- return ret;
- if (!ret)
- return 0;
-
- ret = dirent_has_target(trans, bkey_s_c_to_dirent(k2));
- if (ret < 0)
- return ret;
- if (!ret)
- return 1;
- return 2;
- }
- default:
- return 0;
- }
-}
-
-static int fsck_update_backpointers(struct btree_trans *trans,
- struct snapshots_seen *s,
- const struct bch_hash_desc desc,
- struct bch_hash_info *hash_info,
- struct bkey_i *new)
-{
- if (new->k.type != KEY_TYPE_dirent)
- return 0;
-
- struct bkey_i_dirent *d = bkey_i_to_dirent(new);
- struct inode_walker target = inode_walker_init();
- int ret = 0;
-
- if (d->v.d_type == DT_SUBVOL) {
- BUG();
- } else {
- ret = get_visible_inodes(trans, &target, s, le64_to_cpu(d->v.d_inum));
- if (ret)
- goto err;
-
- darray_for_each(target.inodes, i) {
- i->inode.bi_dir_offset = d->k.p.offset;
- ret = __bch2_fsck_write_inode(trans, &i->inode);
- if (ret)
- goto err;
- }
- }
-err:
- inode_walker_exit(&target);
- return ret;
-}
-
-static int fsck_rename_dirent(struct btree_trans *trans,
- struct snapshots_seen *s,
- const struct bch_hash_desc desc,
- struct bch_hash_info *hash_info,
- struct bkey_s_c_dirent old)
-{
- struct qstr old_name = bch2_dirent_get_name(old);
- struct bkey_i_dirent *new = bch2_trans_kmalloc(trans, bkey_bytes(old.k) + 32);
- int ret = PTR_ERR_OR_ZERO(new);
- if (ret)
- return ret;
-
- bkey_dirent_init(&new->k_i);
- dirent_copy_target(new, old);
- new->k.p = old.k->p;
-
- for (unsigned i = 0; i < 1000; i++) {
- unsigned len = sprintf(new->v.d_name, "%.*s.fsck_renamed-%u",
- old_name.len, old_name.name, i);
- unsigned u64s = BKEY_U64s + dirent_val_u64s(len);
-
- if (u64s > U8_MAX)
- return -EINVAL;
-
- new->k.u64s = u64s;
-
- ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info,
- (subvol_inum) { 0, old.k->p.inode },
- old.k->p.snapshot, &new->k_i,
- BTREE_UPDATE_internal_snapshot_node);
- if (!bch2_err_matches(ret, EEXIST))
- break;
- }
-
- if (ret)
- return ret;
-
- return fsck_update_backpointers(trans, s, desc, hash_info, &new->k_i);
-}
-
-static int hash_check_key(struct btree_trans *trans,
- struct snapshots_seen *s,
- const struct bch_hash_desc desc,
- struct bch_hash_info *hash_info,
- struct btree_iter *k_iter, struct bkey_s_c hash_k)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
- struct printbuf buf = PRINTBUF;
- struct bkey_s_c k;
- u64 hash;
- int ret = 0;
-
- if (hash_k.k->type != desc.key_type)
- return 0;
-
- hash = desc.hash_bkey(hash_info, hash_k);
-
- if (likely(hash == hash_k.k->p.offset))
- return 0;
-
- if (hash_k.k->p.offset < hash)
- goto bad_hash;
-
- for_each_btree_key_norestart(trans, iter, desc.btree_id,
- SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
- BTREE_ITER_slots, k, ret) {
- if (bkey_eq(k.k->p, hash_k.k->p))
- break;
-
- if (k.k->type == desc.key_type &&
- !desc.cmp_bkey(k, hash_k))
- goto duplicate_entries;
-
- if (bkey_deleted(k.k)) {
- bch2_trans_iter_exit(trans, &iter);
- goto bad_hash;
- }
- }
-out:
- bch2_trans_iter_exit(trans, &iter);
- printbuf_exit(&buf);
- return ret;
-bad_hash:
- if (fsck_err(trans, hash_table_key_wrong_offset,
- "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n %s",
- bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
- struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, hash_k);
- if (IS_ERR(new))
- return PTR_ERR(new);
-
- k = bch2_hash_set_or_get_in_snapshot(trans, &iter, desc, hash_info,
- (subvol_inum) { 0, hash_k.k->p.inode },
- hash_k.k->p.snapshot, new,
- STR_HASH_must_create|
- BTREE_ITER_with_updates|
- BTREE_UPDATE_internal_snapshot_node);
- ret = bkey_err(k);
- if (ret)
- goto out;
- if (k.k)
- goto duplicate_entries;
-
- ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter,
- BTREE_UPDATE_internal_snapshot_node) ?:
- fsck_update_backpointers(trans, s, desc, hash_info, new) ?:
- bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?:
- -BCH_ERR_transaction_restart_nested;
- goto out;
- }
-fsck_err:
- goto out;
-duplicate_entries:
- ret = hash_pick_winner(trans, desc, hash_info, hash_k, k);
- if (ret < 0)
- goto out;
-
- if (!fsck_err(trans, hash_table_key_duplicate,
- "duplicate hash table keys%s:\n%s",
- ret != 2 ? "" : ", both point to valid inodes",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, hash_k),
- prt_newline(&buf),
- bch2_bkey_val_to_text(&buf, c, k),
- buf.buf)))
- goto out;
-
- switch (ret) {
- case 0:
- ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0);
- break;
- case 1:
- ret = bch2_hash_delete_at(trans, desc, hash_info, &iter, 0);
- break;
- case 2:
- ret = fsck_rename_dirent(trans, s, desc, hash_info, bkey_s_c_to_dirent(hash_k)) ?:
- bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0);
- goto out;
- }
-
- ret = bch2_trans_commit(trans, NULL, NULL, 0) ?:
- -BCH_ERR_transaction_restart_nested;
- goto out;
-}
-
-static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos pos)
-{
- return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
-}
-
-static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_inode_unpacked *inode,
- u32 *snapshot)
-{
- if (inode->bi_subvol) {
- u64 inum;
- int ret = subvol_lookup(trans, inode->bi_parent_subvol, snapshot, &inum);
- if (ret)
- return ((struct bkey_s_c_dirent) { .k = ERR_PTR(ret) });
- }
-
- return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot));
-}
-
-static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
-{
- struct btree_iter iter;
- struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0);
- int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set;
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int check_inode_dirent_inode(struct btree_trans *trans,
- struct bch_inode_unpacked *inode,
- bool *write_inode)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
-
- u32 inode_snapshot = inode->bi_snapshot;
- struct btree_iter dirent_iter = {};
- struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot);
- int ret = bkey_err(d);
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (fsck_err_on(ret,
- trans, inode_points_to_missing_dirent,
- "inode points to missing dirent\n%s",
- (bch2_inode_unpacked_to_text(&buf, inode), buf.buf)) ||
- fsck_err_on(!ret && dirent_points_to_inode_nowarn(d, inode),
- trans, inode_points_to_wrong_dirent,
- "%s",
- (printbuf_reset(&buf),
- dirent_inode_mismatch_msg(&buf, c, d, inode),
- buf.buf))) {
- /*
- * We just clear the backpointer fields for now. If we find a
- * dirent that points to this inode in check_dirents(), we'll
- * update it then; then when we get to check_path() if the
- * backpointer is still 0 we'll reattach it.
- */
- inode->bi_dir = 0;
- inode->bi_dir_offset = 0;
- *write_inode = true;
- }
-
- ret = 0;
-fsck_err:
- bch2_trans_iter_exit(trans, &dirent_iter);
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int get_snapshot_root_inode(struct btree_trans *trans,
- struct bch_inode_unpacked *root,
- u64 inum)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-
- for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes,
- SPOS(0, inum, U32_MAX),
- BTREE_ITER_all_snapshots, k, ret) {
- if (k.k->p.offset != inum)
- break;
- if (bkey_is_inode(k.k))
- goto found_root;
- }
- if (ret)
- goto err;
- BUG();
-found_root:
- BUG_ON(bch2_inode_unpack(k, root));
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int check_inode(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- struct bch_inode_unpacked *snapshot_root,
- struct snapshots_seen *s)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- struct bch_inode_unpacked u;
- bool do_update = false;
- int ret;
-
- ret = bch2_check_key_has_snapshot(trans, iter, k);
- if (ret < 0)
- goto err;
- if (ret)
- return 0;
-
- ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
- if (ret)
- goto err;
-
- if (!bkey_is_inode(k.k))
- return 0;
-
- BUG_ON(bch2_inode_unpack(k, &u));
-
- if (snapshot_root->bi_inum != u.bi_inum) {
- ret = get_snapshot_root_inode(trans, snapshot_root, u.bi_inum);
- if (ret)
- goto err;
- }
-
- if (fsck_err_on(u.bi_hash_seed != snapshot_root->bi_hash_seed ||
- INODE_STR_HASH(&u) != INODE_STR_HASH(snapshot_root),
- trans, inode_snapshot_mismatch,
- "inodes in different snapshots don't match")) {
- u.bi_hash_seed = snapshot_root->bi_hash_seed;
- SET_INODE_STR_HASH(&u, INODE_STR_HASH(snapshot_root));
- do_update = true;
- }
-
- if (u.bi_dir || u.bi_dir_offset) {
- ret = check_inode_dirent_inode(trans, &u, &do_update);
- if (ret)
- goto err;
- }
-
- if (fsck_err_on(u.bi_dir && (u.bi_flags & BCH_INODE_unlinked),
- trans, inode_unlinked_but_has_dirent,
- "inode unlinked but has dirent\n%s",
- (printbuf_reset(&buf),
- bch2_inode_unpacked_to_text(&buf, &u),
- buf.buf))) {
- u.bi_flags &= ~BCH_INODE_unlinked;
- do_update = true;
- }
-
- if (S_ISDIR(u.bi_mode) && (u.bi_flags & BCH_INODE_unlinked)) {
- /* Check for this early so that check_unreachable_inode() will reattach it */
-
- ret = bch2_empty_dir_snapshot(trans, k.k->p.offset, 0, k.k->p.snapshot);
- if (ret && ret != -BCH_ERR_ENOTEMPTY_dir_not_empty)
- goto err;
-
- fsck_err_on(ret, trans, inode_dir_unlinked_but_not_empty,
- "dir unlinked but not empty\n%s",
- (printbuf_reset(&buf),
- bch2_inode_unpacked_to_text(&buf, &u),
- buf.buf));
- u.bi_flags &= ~BCH_INODE_unlinked;
- do_update = true;
- ret = 0;
- }
-
- ret = bch2_inode_has_child_snapshots(trans, k.k->p);
- if (ret < 0)
- goto err;
-
- if (fsck_err_on(ret != !!(u.bi_flags & BCH_INODE_has_child_snapshot),
- trans, inode_has_child_snapshots_wrong,
- "inode has_child_snapshots flag wrong (should be %u)\n%s",
- ret,
- (printbuf_reset(&buf),
- bch2_inode_unpacked_to_text(&buf, &u),
- buf.buf))) {
- if (ret)
- u.bi_flags |= BCH_INODE_has_child_snapshot;
- else
- u.bi_flags &= ~BCH_INODE_has_child_snapshot;
- do_update = true;
- }
- ret = 0;
-
- if ((u.bi_flags & BCH_INODE_unlinked) &&
- !(u.bi_flags & BCH_INODE_has_child_snapshot)) {
- if (!test_bit(BCH_FS_started, &c->flags)) {
- /*
- * If we're not in online fsck, don't delete unlinked
- * inodes, just make sure they're on the deleted list.
- *
- * They might be referred to by a logged operation -
- * i.e. we might have crashed in the middle of a
- * truncate on an unlinked but open file - so we want to
- * let the delete_dead_inodes kill it after resuming
- * logged ops.
- */
- ret = check_inode_deleted_list(trans, k.k->p);
- if (ret < 0)
- goto err_noprint;
-
- fsck_err_on(!ret,
- trans, unlinked_inode_not_on_deleted_list,
- "inode %llu:%u unlinked, but not on deleted list",
- u.bi_inum, k.k->p.snapshot);
-
- ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, k.k->p, 1);
- if (ret)
- goto err;
- } else {
- ret = bch2_inode_or_descendents_is_open(trans, k.k->p);
- if (ret < 0)
- goto err;
-
- if (fsck_err_on(!ret,
- trans, inode_unlinked_and_not_open,
- "inode %llu%u unlinked and not open",
- u.bi_inum, u.bi_snapshot)) {
- ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
- bch_err_msg(c, ret, "in fsck deleting inode");
- goto err_noprint;
- }
- ret = 0;
- }
- }
-
- if (fsck_err_on(u.bi_parent_subvol &&
- (u.bi_subvol == 0 ||
- u.bi_subvol == BCACHEFS_ROOT_SUBVOL),
- trans, inode_bi_parent_nonzero,
- "inode %llu:%u has subvol %u but nonzero parent subvol %u",
- u.bi_inum, k.k->p.snapshot, u.bi_subvol, u.bi_parent_subvol)) {
- u.bi_parent_subvol = 0;
- do_update = true;
- }
-
- if (u.bi_subvol) {
- struct bch_subvolume s;
-
- ret = bch2_subvolume_get(trans, u.bi_subvol, false, 0, &s);
- if (ret && !bch2_err_matches(ret, ENOENT))
- goto err;
-
- if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
- ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum);
- goto do_update;
- }
-
- if (fsck_err_on(ret,
- trans, inode_bi_subvol_missing,
- "inode %llu:%u bi_subvol points to missing subvolume %u",
- u.bi_inum, k.k->p.snapshot, u.bi_subvol) ||
- fsck_err_on(le64_to_cpu(s.inode) != u.bi_inum ||
- !bch2_snapshot_is_ancestor(c, le32_to_cpu(s.snapshot),
- k.k->p.snapshot),
- trans, inode_bi_subvol_wrong,
- "inode %llu:%u points to subvol %u, but subvol points to %llu:%u",
- u.bi_inum, k.k->p.snapshot, u.bi_subvol,
- le64_to_cpu(s.inode),
- le32_to_cpu(s.snapshot))) {
- u.bi_subvol = 0;
- u.bi_parent_subvol = 0;
- do_update = true;
- }
- }
-do_update:
- if (do_update) {
- ret = __bch2_fsck_write_inode(trans, &u);
- bch_err_msg(c, ret, "in fsck updating inode");
- if (ret)
- goto err_noprint;
- }
-err:
-fsck_err:
- bch_err_fn(c, ret);
-err_noprint:
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_check_inodes(struct bch_fs *c)
-{
- struct bch_inode_unpacked snapshot_root = {};
- struct snapshots_seen s;
-
- snapshots_seen_init(&s);
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
- POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_inode(trans, &iter, k, &snapshot_root, &s)));
-
- snapshots_seen_exit(&s);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int find_oldest_inode_needs_reattach(struct btree_trans *trans,
- struct bch_inode_unpacked *inode)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-
- /*
- * We look for inodes to reattach in natural key order, leaves first,
- * but we should do the reattach at the oldest version that needs to be
- * reattached:
- */
- for_each_btree_key_norestart(trans, iter,
- BTREE_ID_inodes,
- SPOS(0, inode->bi_inum, inode->bi_snapshot + 1),
- BTREE_ITER_all_snapshots, k, ret) {
- if (k.k->p.offset != inode->bi_inum)
- break;
-
- if (!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, k.k->p.snapshot))
- continue;
-
- if (!bkey_is_inode(k.k))
- break;
-
- struct bch_inode_unpacked parent_inode;
- bch2_inode_unpack(k, &parent_inode);
-
- if (!inode_should_reattach(&parent_inode))
- break;
-
- *inode = parent_inode;
- }
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-static int check_unreachable_inode(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- if (!bkey_is_inode(k.k))
- return 0;
-
- struct bch_inode_unpacked inode;
- BUG_ON(bch2_inode_unpack(k, &inode));
-
- if (!inode_should_reattach(&inode))
- return 0;
-
- ret = find_oldest_inode_needs_reattach(trans, &inode);
- if (ret)
- return ret;
-
- if (fsck_err(trans, inode_unreachable,
- "unreachable inode:\n%s",
- (bch2_inode_unpacked_to_text(&buf, &inode),
- buf.buf)))
- ret = reattach_inode(trans, &inode);
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-/*
- * Reattach unreachable (but not unlinked) inodes
- *
- * Run after check_inodes() and check_dirents(), so we node that inode
- * backpointer fields point to valid dirents, and every inode that has a dirent
- * that points to it has its backpointer field set - so we're just looking for
- * non-unlinked inodes without backpointers:
- *
- * XXX: this is racy w.r.t. hardlink removal in online fsck
- */
-int bch2_check_unreachable_inodes(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
- POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_unreachable_inode(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
-
-static inline bool btree_matches_i_mode(enum btree_id btree, unsigned mode)
-{
- switch (btree) {
- case BTREE_ID_extents:
- return S_ISREG(mode) || S_ISLNK(mode);
- case BTREE_ID_dirents:
- return S_ISDIR(mode);
- case BTREE_ID_xattrs:
- return true;
- default:
- BUG();
- }
-}
-
-static int check_key_has_inode(struct btree_trans *trans,
- struct btree_iter *iter,
- struct inode_walker *inode,
- struct inode_walker_entry *i,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = PTR_ERR_OR_ZERO(i);
- if (ret)
- return ret;
-
- if (k.k->type == KEY_TYPE_whiteout)
- goto out;
-
- if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) {
- ret = reconstruct_inode(trans, iter->btree_id, k.k->p.snapshot, k.k->p.inode) ?:
- bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
- if (ret)
- goto err;
-
- inode->last_pos.inode--;
- ret = -BCH_ERR_transaction_restart_nested;
- goto err;
- }
-
- if (fsck_err_on(!i,
- trans, key_in_missing_inode,
- "key in missing inode:\n %s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- goto delete;
-
- if (fsck_err_on(i && !btree_matches_i_mode(iter->btree_id, i->inode.bi_mode),
- trans, key_in_wrong_inode_type,
- "key for wrong inode mode %o:\n %s",
- i->inode.bi_mode,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- goto delete;
-out:
-err:
-fsck_err:
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-delete:
- ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node);
- goto out;
-}
-
-static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_walker *w)
-{
- struct bch_fs *c = trans->c;
- int ret = 0;
- s64 count2;
-
- darray_for_each(w->inodes, i) {
- if (i->inode.bi_sectors == i->count)
- continue;
-
- count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot);
-
- if (w->recalculate_sums)
- i->count = count2;
-
- if (i->count != count2) {
- bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
- w->last_pos.inode, i->snapshot, i->count, count2);
- return -BCH_ERR_internal_fsck_err;
- }
-
- if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty),
- trans, inode_i_sectors_wrong,
- "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
- w->last_pos.inode, i->snapshot,
- i->inode.bi_sectors, i->count)) {
- i->inode.bi_sectors = i->count;
- ret = bch2_fsck_write_inode(trans, &i->inode);
- if (ret)
- break;
- }
- }
-fsck_err:
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
-{
- u32 restart_count = trans->restart_count;
- return check_i_sectors_notnested(trans, w) ?:
- trans_was_restarted(trans, restart_count);
-}
-
-struct extent_end {
- u32 snapshot;
- u64 offset;
- struct snapshots_seen seen;
-};
-
-struct extent_ends {
- struct bpos last_pos;
- DARRAY(struct extent_end) e;
-};
-
-static void extent_ends_reset(struct extent_ends *extent_ends)
-{
- darray_for_each(extent_ends->e, i)
- snapshots_seen_exit(&i->seen);
- extent_ends->e.nr = 0;
-}
-
-static void extent_ends_exit(struct extent_ends *extent_ends)
-{
- extent_ends_reset(extent_ends);
- darray_exit(&extent_ends->e);
-}
-
-static void extent_ends_init(struct extent_ends *extent_ends)
-{
- memset(extent_ends, 0, sizeof(*extent_ends));
-}
-
-static int extent_ends_at(struct bch_fs *c,
- struct extent_ends *extent_ends,
- struct snapshots_seen *seen,
- struct bkey_s_c k)
-{
- struct extent_end *i, n = (struct extent_end) {
- .offset = k.k->p.offset,
- .snapshot = k.k->p.snapshot,
- .seen = *seen,
- };
-
- n.seen.ids.data = kmemdup(seen->ids.data,
- sizeof(seen->ids.data[0]) * seen->ids.size,
- GFP_KERNEL);
- if (!n.seen.ids.data)
- return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
-
- __darray_for_each(extent_ends->e, i) {
- if (i->snapshot == k.k->p.snapshot) {
- snapshots_seen_exit(&i->seen);
- *i = n;
- return 0;
- }
-
- if (i->snapshot >= k.k->p.snapshot)
- break;
- }
-
- return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
-}
-
-static int overlapping_extents_found(struct btree_trans *trans,
- enum btree_id btree,
- struct bpos pos1, struct snapshots_seen *pos1_seen,
- struct bkey pos2,
- bool *fixed,
- struct extent_end *extent_end)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- struct btree_iter iter1, iter2 = { NULL };
- struct bkey_s_c k1, k2;
- int ret;
-
- BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
-
- bch2_trans_iter_init(trans, &iter1, btree, pos1,
- BTREE_ITER_all_snapshots|
- BTREE_ITER_not_extents);
- k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
- ret = bkey_err(k1);
- if (ret)
- goto err;
-
- prt_str(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, k1);
-
- if (!bpos_eq(pos1, k1.k->p)) {
- prt_str(&buf, "\n wanted\n ");
- bch2_bpos_to_text(&buf, pos1);
- prt_str(&buf, "\n ");
- bch2_bkey_to_text(&buf, &pos2);
-
- bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
- __func__, buf.buf);
- ret = -BCH_ERR_internal_fsck_err;
- goto err;
- }
-
- bch2_trans_copy_iter(&iter2, &iter1);
-
- while (1) {
- bch2_btree_iter_advance(&iter2);
-
- k2 = bch2_btree_iter_peek_upto(&iter2, POS(pos1.inode, U64_MAX));
- ret = bkey_err(k2);
- if (ret)
- goto err;
-
- if (bpos_ge(k2.k->p, pos2.p))
- break;
- }
-
- prt_str(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, k2);
-
- if (bpos_gt(k2.k->p, pos2.p) ||
- pos2.size != k2.k->size) {
- bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
- __func__, buf.buf);
- ret = -BCH_ERR_internal_fsck_err;
- goto err;
- }
-
- prt_printf(&buf, "\n overwriting %s extent",
- pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
-
- if (fsck_err(trans, extent_overlapping,
- "overlapping extents%s", buf.buf)) {
- struct btree_iter *old_iter = &iter1;
- struct disk_reservation res = { 0 };
-
- if (pos1.snapshot < pos2.p.snapshot) {
- old_iter = &iter2;
- swap(k1, k2);
- }
-
- trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
-
- ret = bch2_trans_update_extent_overwrite(trans, old_iter,
- BTREE_UPDATE_internal_snapshot_node,
- k1, k2) ?:
- bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
- bch2_disk_reservation_put(c, &res);
-
- if (ret)
- goto err;
-
- *fixed = true;
-
- if (pos1.snapshot == pos2.p.snapshot) {
- /*
- * We overwrote the first extent, and did the overwrite
- * in the same snapshot:
- */
- extent_end->offset = bkey_start_offset(&pos2);
- } else if (pos1.snapshot > pos2.p.snapshot) {
- /*
- * We overwrote the first extent in pos2's snapshot:
- */
- ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot);
- } else {
- /*
- * We overwrote the second extent - restart
- * check_extent() from the top:
- */
- ret = -BCH_ERR_transaction_restart_nested;
- }
- }
-fsck_err:
-err:
- bch2_trans_iter_exit(trans, &iter2);
- bch2_trans_iter_exit(trans, &iter1);
- printbuf_exit(&buf);
- return ret;
-}
-
-static int check_overlapping_extents(struct btree_trans *trans,
- struct snapshots_seen *seen,
- struct extent_ends *extent_ends,
- struct bkey_s_c k,
- struct btree_iter *iter,
- bool *fixed)
-{
- struct bch_fs *c = trans->c;
- int ret = 0;
-
- /* transaction restart, running again */
- if (bpos_eq(extent_ends->last_pos, k.k->p))
- return 0;
-
- if (extent_ends->last_pos.inode != k.k->p.inode)
- extent_ends_reset(extent_ends);
-
- darray_for_each(extent_ends->e, i) {
- if (i->offset <= bkey_start_offset(k.k))
- continue;
-
- if (!ref_visible2(c,
- k.k->p.snapshot, seen,
- i->snapshot, &i->seen))
- continue;
-
- ret = overlapping_extents_found(trans, iter->btree_id,
- SPOS(iter->pos.inode,
- i->offset,
- i->snapshot),
- &i->seen,
- *k.k, fixed, i);
- if (ret)
- goto err;
- }
-
- extent_ends->last_pos = k.k->p;
-err:
- return ret;
-}
-
-static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- struct bch_extent_crc_unpacked crc;
- const union bch_extent_entry *i;
- unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9;
-
- bkey_for_each_crc(k.k, ptrs, crc, i)
- if (crc_is_encoded(crc) &&
- crc.uncompressed_size > encoded_extent_max_sectors) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- bch_err(c, "overbig encoded extent, please report this:\n %s", buf.buf);
- printbuf_exit(&buf);
- }
-
- return 0;
-}
-
-static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k,
- struct inode_walker *inode,
- struct snapshots_seen *s,
- struct extent_ends *extent_ends,
- struct disk_reservation *res)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- ret = bch2_check_key_has_snapshot(trans, iter, k);
- if (ret) {
- ret = ret < 0 ? ret : 0;
- goto out;
- }
-
- if (inode->last_pos.inode != k.k->p.inode && inode->have_inodes) {
- ret = check_i_sectors(trans, inode);
- if (ret)
- goto err;
- }
-
- ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
- if (ret)
- goto err;
-
- struct inode_walker_entry *extent_i = walk_inode(trans, inode, k);
- ret = PTR_ERR_OR_ZERO(extent_i);
- if (ret)
- goto err;
-
- ret = check_key_has_inode(trans, iter, inode, extent_i, k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_whiteout) {
- ret = check_overlapping_extents(trans, s, extent_ends, k, iter,
- &inode->recalculate_sums);
- if (ret)
- goto err;
-
- /*
- * Check inodes in reverse order, from oldest snapshots to
- * newest, starting from the inode that matches this extent's
- * snapshot. If we didn't have one, iterate over all inodes:
- */
- for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
- inode->inodes.data && i >= inode->inodes.data;
- --i) {
- if (i->snapshot > k.k->p.snapshot ||
- !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
- continue;
-
- if (fsck_err_on(k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
- !bkey_extent_is_reservation(k),
- trans, extent_past_end_of_inode,
- "extent type past end of inode %llu:%u, i_size %llu\n %s",
- i->inode.bi_inum, i->snapshot, i->inode.bi_size,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- struct btree_iter iter2;
-
- bch2_trans_copy_iter(&iter2, iter);
- bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
- ret = bch2_btree_iter_traverse(&iter2) ?:
- bch2_btree_delete_at(trans, &iter2,
- BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &iter2);
- if (ret)
- goto err;
-
- iter->k.type = KEY_TYPE_whiteout;
- break;
- }
- }
- }
-
- ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc);
- if (ret)
- goto err;
-
- if (bkey_extent_is_allocation(k.k)) {
- for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
- inode->inodes.data && i >= inode->inodes.data;
- --i) {
- if (i->snapshot > k.k->p.snapshot ||
- !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
- continue;
-
- i->count += k.k->size;
- }
- }
-
- if (k.k->type != KEY_TYPE_whiteout) {
- ret = extent_ends_at(c, extent_ends, s, k);
- if (ret)
- goto err;
- }
-out:
-err:
-fsck_err:
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-/*
- * Walk extents: verify that extents have a corresponding S_ISREG inode, and
- * that i_size an i_sectors are consistent
- */
-int bch2_check_extents(struct bch_fs *c)
-{
- struct inode_walker w = inode_walker_init();
- struct snapshots_seen s;
- struct extent_ends extent_ends;
- struct disk_reservation res = { 0 };
-
- snapshots_seen_init(&s);
- extent_ends_init(&extent_ends);
-
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_extents,
- POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({
- bch2_disk_reservation_put(c, &res);
- check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?:
- check_extent_overbig(trans, &iter, k);
- })) ?:
- check_i_sectors_notnested(trans, &w));
-
- bch2_disk_reservation_put(c, &res);
- extent_ends_exit(&extent_ends);
- inode_walker_exit(&w);
- snapshots_seen_exit(&s);
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-int bch2_check_indirect_extents(struct bch_fs *c)
-{
- struct disk_reservation res = { 0 };
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
- POS_MIN,
- BTREE_ITER_prefetch, k,
- &res, NULL,
- BCH_TRANS_COMMIT_no_enospc, ({
- bch2_disk_reservation_put(c, &res);
- check_extent_overbig(trans, &iter, k);
- })));
-
- bch2_disk_reservation_put(c, &res);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_walker *w)
-{
- struct bch_fs *c = trans->c;
- int ret = 0;
- s64 count2;
-
- darray_for_each(w->inodes, i) {
- if (i->inode.bi_nlink == i->count)
- continue;
-
- count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot);
- if (count2 < 0)
- return count2;
-
- if (i->count != count2) {
- bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu",
- w->last_pos.inode, i->snapshot, i->count, count2);
- i->count = count2;
- if (i->inode.bi_nlink == i->count)
- continue;
- }
-
- if (fsck_err_on(i->inode.bi_nlink != i->count,
- trans, inode_dir_wrong_nlink,
- "directory %llu:%u with wrong i_nlink: got %u, should be %llu",
- w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
- i->inode.bi_nlink = i->count;
- ret = bch2_fsck_write_inode(trans, &i->inode);
- if (ret)
- break;
- }
- }
-fsck_err:
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
-{
- u32 restart_count = trans->restart_count;
- return check_subdir_count_notnested(trans, w) ?:
- trans_was_restarted(trans, restart_count);
-}
-
-noinline_for_stack
-static int check_dirent_inode_dirent(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c_dirent d,
- struct bch_inode_unpacked *target)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- struct btree_iter bp_iter = { NULL };
- int ret = 0;
-
- if (inode_points_to_dirent(target, d))
- return 0;
-
- if (!target->bi_dir &&
- !target->bi_dir_offset) {
- fsck_err_on(S_ISDIR(target->bi_mode),
- trans, inode_dir_missing_backpointer,
- "directory with missing backpointer\n%s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, d.s_c),
- prt_printf(&buf, "\n"),
- bch2_inode_unpacked_to_text(&buf, target),
- buf.buf));
-
- fsck_err_on(target->bi_flags & BCH_INODE_unlinked,
- trans, inode_unlinked_but_has_dirent,
- "inode unlinked but has dirent\n%s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, d.s_c),
- prt_printf(&buf, "\n"),
- bch2_inode_unpacked_to_text(&buf, target),
- buf.buf));
-
- target->bi_flags &= ~BCH_INODE_unlinked;
- target->bi_dir = d.k->p.inode;
- target->bi_dir_offset = d.k->p.offset;
- return __bch2_fsck_write_inode(trans, target);
- }
-
- if (bch2_inode_should_have_bp(target) &&
- !fsck_err(trans, inode_wrong_backpointer,
- "dirent points to inode that does not point back:\n %s",
- (bch2_bkey_val_to_text(&buf, c, d.s_c),
- prt_printf(&buf, "\n "),
- bch2_inode_unpacked_to_text(&buf, target),
- buf.buf)))
- goto err;
-
- struct bkey_s_c_dirent bp_dirent = dirent_get_by_pos(trans, &bp_iter,
- SPOS(target->bi_dir, target->bi_dir_offset, target->bi_snapshot));
- ret = bkey_err(bp_dirent);
- if (ret && !bch2_err_matches(ret, ENOENT))
- goto err;
-
- bool backpointer_exists = !ret;
- ret = 0;
-
- if (fsck_err_on(!backpointer_exists,
- trans, inode_wrong_backpointer,
- "inode %llu:%u has wrong backpointer:\n"
- "got %llu:%llu\n"
- "should be %llu:%llu",
- target->bi_inum, target->bi_snapshot,
- target->bi_dir,
- target->bi_dir_offset,
- d.k->p.inode,
- d.k->p.offset)) {
- target->bi_dir = d.k->p.inode;
- target->bi_dir_offset = d.k->p.offset;
- ret = __bch2_fsck_write_inode(trans, target);
- goto out;
- }
-
- bch2_bkey_val_to_text(&buf, c, d.s_c);
- prt_newline(&buf);
- if (backpointer_exists)
- bch2_bkey_val_to_text(&buf, c, bp_dirent.s_c);
-
- if (fsck_err_on(backpointer_exists &&
- (S_ISDIR(target->bi_mode) ||
- target->bi_subvol),
- trans, inode_dir_multiple_links,
- "%s %llu:%u with multiple links\n%s",
- S_ISDIR(target->bi_mode) ? "directory" : "subvolume",
- target->bi_inum, target->bi_snapshot, buf.buf)) {
- ret = __remove_dirent(trans, d.k->p);
- goto out;
- }
-
- /*
- * hardlinked file with nlink 0:
- * We're just adjusting nlink here so check_nlinks() will pick
- * it up, it ignores inodes with nlink 0
- */
- if (fsck_err_on(backpointer_exists && !target->bi_nlink,
- trans, inode_multiple_links_but_nlink_0,
- "inode %llu:%u type %s has multiple links but i_nlink 0\n%s",
- target->bi_inum, target->bi_snapshot, bch2_d_types[d.v->d_type], buf.buf)) {
- target->bi_nlink++;
- target->bi_flags &= ~BCH_INODE_unlinked;
- ret = __bch2_fsck_write_inode(trans, target);
- if (ret)
- goto err;
- }
-out:
-err:
-fsck_err:
- bch2_trans_iter_exit(trans, &bp_iter);
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-noinline_for_stack
-static int check_dirent_target(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c_dirent d,
- struct bch_inode_unpacked *target)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i_dirent *n;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- ret = check_dirent_inode_dirent(trans, iter, d, target);
- if (ret)
- goto err;
-
- if (fsck_err_on(d.v->d_type != inode_d_type(target),
- trans, dirent_d_type_wrong,
- "incorrect d_type: got %s, should be %s:\n%s",
- bch2_d_type_str(d.v->d_type),
- bch2_d_type_str(inode_d_type(target)),
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
- n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- goto err;
-
- bkey_reassemble(&n->k_i, d.s_c);
- n->v.d_type = inode_d_type(target);
- if (n->v.d_type == DT_SUBVOL) {
- n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
- n->v.d_child_subvol = cpu_to_le32(target->bi_subvol);
- } else {
- n->v.d_inum = cpu_to_le64(target->bi_inum);
- }
-
- ret = bch2_trans_update(trans, iter, &n->k_i, 0);
- if (ret)
- goto err;
-
- d = dirent_i_to_s_c(n);
- }
-err:
-fsck_err:
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-/* find a subvolume that's a descendent of @snapshot: */
-static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, 0, k, ret) {
- if (k.k->type != KEY_TYPE_subvolume)
- continue;
-
- struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
- if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) {
- bch2_trans_iter_exit(trans, &iter);
- *subvolid = k.k->p.offset;
- goto found;
- }
- }
- if (!ret)
- ret = -ENOENT;
-found:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-noinline_for_stack
-static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c_dirent d)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter subvol_iter = {};
- struct bch_inode_unpacked subvol_root;
- u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol);
- u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
- u32 parent_snapshot;
- u32 new_parent_subvol = 0;
- u64 parent_inum;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- ret = subvol_lookup(trans, parent_subvol, &parent_snapshot, &parent_inum);
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (ret ||
- (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) {
- int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
- if (ret2 && !bch2_err_matches(ret, ENOENT))
- return ret2;
- }
-
- if (ret &&
- !new_parent_subvol &&
- (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
- /*
- * Couldn't find a subvol for dirent's snapshot - but we lost
- * subvols, so we need to reconstruct:
- */
- ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0);
- if (ret)
- return ret;
-
- parent_snapshot = d.k->p.snapshot;
- }
-
- if (fsck_err_on(ret,
- trans, dirent_to_missing_parent_subvol,
- "dirent parent_subvol points to missing subvolume\n%s",
- (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) ||
- fsck_err_on(!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot),
- trans, dirent_not_visible_in_parent_subvol,
- "dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s",
- parent_snapshot,
- (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
- if (!new_parent_subvol) {
- bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot);
- return -BCH_ERR_fsck_repair_unimplemented;
- }
-
- struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent);
- ret = PTR_ERR_OR_ZERO(new_dirent);
- if (ret)
- goto err;
-
- new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol);
- }
-
- struct bkey_s_c_subvolume s =
- bch2_bkey_get_iter_typed(trans, &subvol_iter,
- BTREE_ID_subvolumes, POS(0, target_subvol),
- 0, subvolume);
- ret = bkey_err(s.s_c);
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (ret) {
- if (fsck_err(trans, dirent_to_missing_subvol,
- "dirent points to missing subvolume\n%s",
- (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)))
- return __remove_dirent(trans, d.k->p);
- ret = 0;
- goto out;
- }
-
- if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol,
- trans, subvol_fs_path_parent_wrong,
- "subvol with wrong fs_path_parent, should be be %u\n%s",
- parent_subvol,
- (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- struct bkey_i_subvolume *n =
- bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume);
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- goto err;
-
- n->v.fs_path_parent = cpu_to_le32(parent_subvol);
- }
-
- u64 target_inum = le64_to_cpu(s.v->inode);
- u32 target_snapshot = le32_to_cpu(s.v->snapshot);
-
- ret = lookup_inode(trans, target_inum, target_snapshot, &subvol_root);
- if (ret && !bch2_err_matches(ret, ENOENT))
- goto err;
-
- if (ret) {
- bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum);
- ret = -BCH_ERR_fsck_repair_unimplemented;
- goto err;
- }
-
- if (fsck_err_on(!ret && parent_subvol != subvol_root.bi_parent_subvol,
- trans, inode_bi_parent_wrong,
- "subvol root %llu has wrong bi_parent_subvol: got %u, should be %u",
- target_inum,
- subvol_root.bi_parent_subvol, parent_subvol)) {
- subvol_root.bi_parent_subvol = parent_subvol;
- subvol_root.bi_snapshot = le32_to_cpu(s.v->snapshot);
- ret = __bch2_fsck_write_inode(trans, &subvol_root);
- if (ret)
- goto err;
- }
-
- ret = check_dirent_target(trans, iter, d, &subvol_root);
- if (ret)
- goto err;
-out:
-err:
-fsck_err:
- bch2_trans_iter_exit(trans, &subvol_iter);
- printbuf_exit(&buf);
- return ret;
-}
-
-static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k,
- struct bch_hash_info *hash_info,
- struct inode_walker *dir,
- struct inode_walker *target,
- struct snapshots_seen *s)
-{
- struct bch_fs *c = trans->c;
- struct inode_walker_entry *i;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- ret = bch2_check_key_has_snapshot(trans, iter, k);
- if (ret) {
- ret = ret < 0 ? ret : 0;
- goto out;
- }
-
- ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
- if (ret)
- goto err;
-
- if (k.k->type == KEY_TYPE_whiteout)
- goto out;
-
- if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) {
- ret = check_subdir_count(trans, dir);
- if (ret)
- goto err;
- }
-
- i = walk_inode(trans, dir, k);
- ret = PTR_ERR_OR_ZERO(i);
- if (ret < 0)
- goto err;
-
- ret = check_key_has_inode(trans, iter, dir, i, k);
- if (ret)
- goto err;
-
- if (!i)
- goto out;
-
- if (dir->first_this_inode)
- *hash_info = bch2_hash_info_init(c, &i->inode);
- dir->first_this_inode = false;
-
- ret = hash_check_key(trans, s, bch2_dirent_hash_desc, hash_info, iter, k);
- if (ret < 0)
- goto err;
- if (ret) {
- /* dirent has been deleted */
- ret = 0;
- goto out;
- }
-
- if (k.k->type != KEY_TYPE_dirent)
- goto out;
-
- struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
-
- if (d.v->d_type == DT_SUBVOL) {
- ret = check_dirent_to_subvol(trans, iter, d);
- if (ret)
- goto err;
- } else {
- ret = get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
- if (ret)
- goto err;
-
- if (fsck_err_on(!target->inodes.nr,
- trans, dirent_to_missing_inode,
- "dirent points to missing inode:\n%s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k),
- buf.buf))) {
- ret = __remove_dirent(trans, d.k->p);
- if (ret)
- goto err;
- }
-
- darray_for_each(target->inodes, i) {
- ret = check_dirent_target(trans, iter, d, &i->inode);
- if (ret)
- goto err;
- }
- }
-
- ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
- if (ret)
- goto err;
-
- if (d.v->d_type == DT_DIR)
- for_each_visible_inode(c, s, dir, d.k->p.snapshot, i)
- i->count++;
-out:
-err:
-fsck_err:
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-/*
- * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
- * validate d_type
- */
-int bch2_check_dirents(struct bch_fs *c)
-{
- struct inode_walker dir = inode_walker_init();
- struct inode_walker target = inode_walker_init();
- struct snapshots_seen s;
- struct bch_hash_info hash_info;
-
- snapshots_seen_init(&s);
-
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_dirents,
- POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)) ?:
- check_subdir_count_notnested(trans, &dir));
-
- snapshots_seen_exit(&s);
- inode_walker_exit(&dir);
- inode_walker_exit(&target);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k,
- struct bch_hash_info *hash_info,
- struct inode_walker *inode)
-{
- struct bch_fs *c = trans->c;
- struct inode_walker_entry *i;
- int ret;
-
- ret = bch2_check_key_has_snapshot(trans, iter, k);
- if (ret < 0)
- return ret;
- if (ret)
- return 0;
-
- i = walk_inode(trans, inode, k);
- ret = PTR_ERR_OR_ZERO(i);
- if (ret)
- return ret;
-
- ret = check_key_has_inode(trans, iter, inode, i, k);
- if (ret)
- return ret;
-
- if (!i)
- return 0;
-
- if (inode->first_this_inode)
- *hash_info = bch2_hash_info_init(c, &i->inode);
- inode->first_this_inode = false;
-
- ret = hash_check_key(trans, NULL, bch2_xattr_hash_desc, hash_info, iter, k);
- bch_err_fn(c, ret);
- return ret;
-}
-
-/*
- * Walk xattrs: verify that they all have a corresponding inode
- */
-int bch2_check_xattrs(struct bch_fs *c)
-{
- struct inode_walker inode = inode_walker_init();
- struct bch_hash_info hash_info;
- int ret = 0;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
- POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
- k,
- NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
- check_xattr(trans, &iter, k, &hash_info, &inode)));
-
- inode_walker_exit(&inode);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_root_trans(struct btree_trans *trans)
-{
- struct bch_fs *c = trans->c;
- struct bch_inode_unpacked root_inode;
- u32 snapshot;
- u64 inum;
- int ret;
-
- ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (mustfix_fsck_err_on(ret, trans, root_subvol_missing,
- "root subvol missing")) {
- struct bkey_i_subvolume *root_subvol =
- bch2_trans_kmalloc(trans, sizeof(*root_subvol));
- ret = PTR_ERR_OR_ZERO(root_subvol);
- if (ret)
- goto err;
-
- snapshot = U32_MAX;
- inum = BCACHEFS_ROOT_INO;
-
- bkey_subvolume_init(&root_subvol->k_i);
- root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL;
- root_subvol->v.flags = 0;
- root_subvol->v.snapshot = cpu_to_le32(snapshot);
- root_subvol->v.inode = cpu_to_le64(inum);
- ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0);
- bch_err_msg(c, ret, "writing root subvol");
- if (ret)
- goto err;
- }
-
- ret = lookup_inode(trans, BCACHEFS_ROOT_INO, snapshot, &root_inode);
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (mustfix_fsck_err_on(ret,
- trans, root_dir_missing,
- "root directory missing") ||
- mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode),
- trans, root_inode_not_dir,
- "root inode not a directory")) {
- bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
- 0, NULL);
- root_inode.bi_inum = inum;
- root_inode.bi_snapshot = snapshot;
-
- ret = __bch2_fsck_write_inode(trans, &root_inode);
- bch_err_msg(c, ret, "writing root inode");
- }
-err:
-fsck_err:
- return ret;
-}
-
-/* Get root directory, create if it doesn't exist: */
-int bch2_check_root(struct bch_fs *c)
-{
- int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_root_trans(trans));
- bch_err_fn(c, ret);
- return ret;
-}
-
-typedef DARRAY(u32) darray_u32;
-
-static bool darray_u32_has(darray_u32 *d, u32 v)
-{
- darray_for_each(*d, i)
- if (*i == v)
- return true;
- return false;
-}
-
-static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter parent_iter = {};
- darray_u32 subvol_path = {};
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- if (k.k->type != KEY_TYPE_subvolume)
- return 0;
-
- while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) {
- ret = darray_push(&subvol_path, k.k->p.offset);
- if (ret)
- goto err;
-
- struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
-
- struct bch_inode_unpacked subvol_root;
- ret = bch2_inode_find_by_inum_trans(trans,
- (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
- &subvol_root);
- if (ret)
- break;
-
- u32 parent = le32_to_cpu(s.v->fs_path_parent);
-
- if (darray_u32_has(&subvol_path, parent)) {
- if (fsck_err(c, subvol_loop, "subvolume loop"))
- ret = reattach_subvol(trans, s);
- break;
- }
-
- bch2_trans_iter_exit(trans, &parent_iter);
- bch2_trans_iter_init(trans, &parent_iter,
- BTREE_ID_subvolumes, POS(0, parent), 0);
- k = bch2_btree_iter_peek_slot(&parent_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (fsck_err_on(k.k->type != KEY_TYPE_subvolume,
- trans, subvol_unreachable,
- "unreachable subvolume %s",
- (bch2_bkey_val_to_text(&buf, c, s.s_c),
- buf.buf))) {
- ret = reattach_subvol(trans, s);
- break;
- }
- }
-fsck_err:
-err:
- printbuf_exit(&buf);
- darray_exit(&subvol_path);
- bch2_trans_iter_exit(trans, &parent_iter);
- return ret;
-}
-
-int bch2_check_subvolume_structure(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_subvol_path(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
-
-struct pathbuf_entry {
- u64 inum;
- u32 snapshot;
-};
-
-typedef DARRAY(struct pathbuf_entry) pathbuf;
-
-static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
-{
- darray_for_each(*p, i)
- if (i->inum == inum &&
- i->snapshot == snapshot)
- return true;
- return false;
-}
-
-static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c inode_k)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter inode_iter = {};
- struct bch_inode_unpacked inode;
- struct printbuf buf = PRINTBUF;
- u32 snapshot = inode_k.k->p.snapshot;
- int ret = 0;
-
- p->nr = 0;
-
- BUG_ON(bch2_inode_unpack(inode_k, &inode));
-
- if (!S_ISDIR(inode.bi_mode))
- return 0;
-
- while (!inode.bi_subvol) {
- struct btree_iter dirent_iter;
- struct bkey_s_c_dirent d;
- u32 parent_snapshot = snapshot;
-
- d = inode_get_dirent(trans, &dirent_iter, &inode, &parent_snapshot);
- ret = bkey_err(d.s_c);
- if (ret && !bch2_err_matches(ret, ENOENT))
- break;
-
- if (!ret && (ret = dirent_points_to_inode(c, d, &inode)))
- bch2_trans_iter_exit(trans, &dirent_iter);
-
- if (bch2_err_matches(ret, ENOENT)) {
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, inode_k);
- bch_err(c, "unreachable inode in check_directory_structure: %s\n%s",
- bch2_err_str(ret), buf.buf);
- goto out;
- }
-
- bch2_trans_iter_exit(trans, &dirent_iter);
-
- ret = darray_push(p, ((struct pathbuf_entry) {
- .inum = inode.bi_inum,
- .snapshot = snapshot,
- }));
- if (ret)
- return ret;
-
- snapshot = parent_snapshot;
-
- bch2_trans_iter_exit(trans, &inode_iter);
- inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes,
- SPOS(0, inode.bi_dir, snapshot), 0);
- ret = bkey_err(inode_k) ?:
- !bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode
- : bch2_inode_unpack(inode_k, &inode);
- if (ret) {
- /* Should have been caught in dirents pass */
- bch_err_msg(c, ret, "error looking up parent directory");
- break;
- }
-
- snapshot = inode_k.k->p.snapshot;
-
- if (path_is_dup(p, inode.bi_inum, snapshot)) {
- /* XXX print path */
- bch_err(c, "directory structure loop");
-
- darray_for_each(*p, i)
- pr_err("%llu:%u", i->inum, i->snapshot);
- pr_err("%llu:%u", inode.bi_inum, snapshot);
-
- if (fsck_err(trans, dir_loop, "directory structure loop")) {
- ret = remove_backpointer(trans, &inode);
- bch_err_msg(c, ret, "removing dirent");
- if (ret)
- break;
-
- ret = reattach_inode(trans, &inode);
- bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
- }
- break;
- }
- }
-out:
-fsck_err:
- bch2_trans_iter_exit(trans, &inode_iter);
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-/*
- * Check for loops in the directory structure: all other connectivity issues
- * have been fixed by prior passes
- */
-int bch2_check_directory_structure(struct bch_fs *c)
-{
- pathbuf path = { 0, };
- int ret;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
- BTREE_ITER_intent|
- BTREE_ITER_prefetch|
- BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
- if (!bkey_is_inode(k.k))
- continue;
-
- if (bch2_inode_flags(k) & BCH_INODE_unlinked)
- continue;
-
- check_path(trans, &path, k);
- })));
- darray_exit(&path);
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-struct nlink_table {
- size_t nr;
- size_t size;
-
- struct nlink {
- u64 inum;
- u32 snapshot;
- u32 count;
- } *d;
-};
-
-static int add_nlink(struct bch_fs *c, struct nlink_table *t,
- u64 inum, u32 snapshot)
-{
- if (t->nr == t->size) {
- size_t new_size = max_t(size_t, 128UL, t->size * 2);
- void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
-
- if (!d) {
- bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
- new_size);
- return -BCH_ERR_ENOMEM_fsck_add_nlink;
- }
-
- if (t->d)
- memcpy(d, t->d, t->size * sizeof(t->d[0]));
- kvfree(t->d);
-
- t->d = d;
- t->size = new_size;
- }
-
-
- t->d[t->nr++] = (struct nlink) {
- .inum = inum,
- .snapshot = snapshot,
- };
-
- return 0;
-}
-
-static int nlink_cmp(const void *_l, const void *_r)
-{
- const struct nlink *l = _l;
- const struct nlink *r = _r;
-
- return cmp_int(l->inum, r->inum);
-}
-
-static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
- struct nlink_table *links,
- u64 range_start, u64 range_end, u64 inum, u32 snapshot)
-{
- struct nlink *link, key = {
- .inum = inum, .snapshot = U32_MAX,
- };
-
- if (inum < range_start || inum >= range_end)
- return;
-
- link = __inline_bsearch(&key, links->d, links->nr,
- sizeof(links->d[0]), nlink_cmp);
- if (!link)
- return;
-
- while (link > links->d && link[0].inum == link[-1].inum)
- --link;
-
- for (; link < links->d + links->nr && link->inum == inum; link++)
- if (ref_visible(c, s, snapshot, link->snapshot)) {
- link->count++;
- if (link->snapshot >= snapshot)
- break;
- }
-}
-
-noinline_for_stack
-static int check_nlinks_find_hardlinks(struct bch_fs *c,
- struct nlink_table *t,
- u64 start, u64 *end)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_inodes,
- POS(0, start),
- BTREE_ITER_intent|
- BTREE_ITER_prefetch|
- BTREE_ITER_all_snapshots, k, ({
- if (!bkey_is_inode(k.k))
- continue;
-
- /* Should never fail, checked by bch2_inode_invalid: */
- struct bch_inode_unpacked u;
- BUG_ON(bch2_inode_unpack(k, &u));
-
- /*
- * Backpointer and directory structure checks are sufficient for
- * directories, since they can't have hardlinks:
- */
- if (S_ISDIR(u.bi_mode))
- continue;
-
- /*
- * Previous passes ensured that bi_nlink is nonzero if
- * it had multiple hardlinks:
- */
- if (!u.bi_nlink)
- continue;
-
- ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
- if (ret) {
- *end = k.k->p.offset;
- ret = 0;
- break;
- }
- 0;
- })));
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-noinline_for_stack
-static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
- u64 range_start, u64 range_end)
-{
- struct snapshots_seen s;
-
- snapshots_seen_init(&s);
-
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
- BTREE_ITER_intent|
- BTREE_ITER_prefetch|
- BTREE_ITER_all_snapshots, k, ({
- ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
- if (ret)
- break;
-
- if (k.k->type == KEY_TYPE_dirent) {
- struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
-
- if (d.v->d_type != DT_DIR &&
- d.v->d_type != DT_SUBVOL)
- inc_link(c, &s, links, range_start, range_end,
- le64_to_cpu(d.v->d_inum), d.k->p.snapshot);
- }
- 0;
- })));
-
- snapshots_seen_exit(&s);
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k,
- struct nlink_table *links,
- size_t *idx, u64 range_end)
-{
- struct bch_inode_unpacked u;
- struct nlink *link = &links->d[*idx];
- int ret = 0;
-
- if (k.k->p.offset >= range_end)
- return 1;
-
- if (!bkey_is_inode(k.k))
- return 0;
-
- BUG_ON(bch2_inode_unpack(k, &u));
-
- if (S_ISDIR(u.bi_mode))
- return 0;
-
- if (!u.bi_nlink)
- return 0;
-
- while ((cmp_int(link->inum, k.k->p.offset) ?:
- cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
- BUG_ON(*idx == links->nr);
- link = &links->d[++*idx];
- }
-
- if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count,
- trans, inode_wrong_nlink,
- "inode %llu type %s has wrong i_nlink (%u, should be %u)",
- u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
- bch2_inode_nlink_get(&u), link->count)) {
- bch2_inode_nlink_set(&u, link->count);
- ret = __bch2_fsck_write_inode(trans, &u);
- }
-fsck_err:
- return ret;
-}
-
-noinline_for_stack
-static int check_nlinks_update_hardlinks(struct bch_fs *c,
- struct nlink_table *links,
- u64 range_start, u64 range_end)
-{
- size_t idx = 0;
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
- POS(0, range_start),
- BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
- if (ret < 0) {
- bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret));
- return ret;
- }
-
- return 0;
-}
-
-int bch2_check_nlinks(struct bch_fs *c)
-{
- struct nlink_table links = { 0 };
- u64 this_iter_range_start, next_iter_range_start = 0;
- int ret = 0;
-
- do {
- this_iter_range_start = next_iter_range_start;
- next_iter_range_start = U64_MAX;
-
- ret = check_nlinks_find_hardlinks(c, &links,
- this_iter_range_start,
- &next_iter_range_start);
-
- ret = check_nlinks_walk_dirents(c, &links,
- this_iter_range_start,
- next_iter_range_start);
- if (ret)
- break;
-
- ret = check_nlinks_update_hardlinks(c, &links,
- this_iter_range_start,
- next_iter_range_start);
- if (ret)
- break;
-
- links.nr = 0;
- } while (next_iter_range_start != U64_MAX);
-
- kvfree(links.d);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bkey_s_c_reflink_p p;
- struct bkey_i_reflink_p *u;
-
- if (k.k->type != KEY_TYPE_reflink_p)
- return 0;
-
- p = bkey_s_c_to_reflink_p(k);
-
- if (!p.v->front_pad && !p.v->back_pad)
- return 0;
-
- u = bch2_trans_kmalloc(trans, sizeof(*u));
- int ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- return ret;
-
- bkey_reassemble(&u->k_i, k);
- u->v.front_pad = 0;
- u->v.back_pad = 0;
-
- return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun);
-}
-
-int bch2_fix_reflink_p(struct bch_fs *c)
-{
- if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
- return 0;
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_extents, POS_MIN,
- BTREE_ITER_intent|BTREE_ITER_prefetch|
- BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- fix_reflink_p_key(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
diff --git a/fs/bcachefs/fsck.h b/fs/bcachefs/fsck.h
deleted file mode 100644
index 1cca31011530..000000000000
--- a/fs/bcachefs/fsck.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FSCK_H
-#define _BCACHEFS_FSCK_H
-
-int bch2_check_inodes(struct bch_fs *);
-int bch2_check_extents(struct bch_fs *);
-int bch2_check_indirect_extents(struct bch_fs *);
-int bch2_check_dirents(struct bch_fs *);
-int bch2_check_xattrs(struct bch_fs *);
-int bch2_check_root(struct bch_fs *);
-int bch2_check_subvolume_structure(struct bch_fs *);
-int bch2_check_unreachable_inodes(struct bch_fs *);
-int bch2_check_directory_structure(struct bch_fs *);
-int bch2_check_nlinks(struct bch_fs *);
-int bch2_fix_reflink_p(struct bch_fs *);
-
-#endif /* _BCACHEFS_FSCK_H */
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
deleted file mode 100644
index 039cb7a22244..000000000000
--- a/fs/bcachefs/inode.c
+++ /dev/null
@@ -1,1407 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_key_cache.h"
-#include "btree_write_buffer.h"
-#include "bkey_methods.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "compress.h"
-#include "dirent.h"
-#include "disk_accounting.h"
-#include "error.h"
-#include "extents.h"
-#include "extent_update.h"
-#include "fs.h"
-#include "inode.h"
-#include "str_hash.h"
-#include "snapshot.h"
-#include "subvolume.h"
-#include "varint.h"
-
-#include <linux/random.h>
-
-#include <linux/unaligned.h>
-
-#define x(name, ...) #name,
-const char * const bch2_inode_opts[] = {
- BCH_INODE_OPTS()
- NULL,
-};
-
-static const char * const bch2_inode_flag_strs[] = {
- BCH_INODE_FLAGS()
- NULL
-};
-#undef x
-
-static int delete_ancestor_snapshot_inodes(struct btree_trans *, struct bpos);
-
-static const u8 byte_table[8] = { 1, 2, 3, 4, 6, 8, 10, 13 };
-
-static int inode_decode_field(const u8 *in, const u8 *end,
- u64 out[2], unsigned *out_bits)
-{
- __be64 be[2] = { 0, 0 };
- unsigned bytes, shift;
- u8 *p;
-
- if (in >= end)
- return -1;
-
- if (!*in)
- return -1;
-
- /*
- * position of highest set bit indicates number of bytes:
- * shift = number of bits to remove in high byte:
- */
- shift = 8 - __fls(*in); /* 1 <= shift <= 8 */
- bytes = byte_table[shift - 1];
-
- if (in + bytes > end)
- return -1;
-
- p = (u8 *) be + 16 - bytes;
- memcpy(p, in, bytes);
- *p ^= (1 << 8) >> shift;
-
- out[0] = be64_to_cpu(be[0]);
- out[1] = be64_to_cpu(be[1]);
- *out_bits = out[0] ? 64 + fls64(out[0]) : fls64(out[1]);
-
- return bytes;
-}
-
-static inline void bch2_inode_pack_inlined(struct bkey_inode_buf *packed,
- const struct bch_inode_unpacked *inode)
-{
- struct bkey_i_inode_v3 *k = &packed->inode;
- u8 *out = k->v.fields;
- u8 *end = (void *) &packed[1];
- u8 *last_nonzero_field = out;
- unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
- unsigned bytes;
- int ret;
-
- bkey_inode_v3_init(&packed->inode.k_i);
- packed->inode.k.p.offset = inode->bi_inum;
- packed->inode.v.bi_journal_seq = cpu_to_le64(inode->bi_journal_seq);
- packed->inode.v.bi_hash_seed = inode->bi_hash_seed;
- packed->inode.v.bi_flags = cpu_to_le64(inode->bi_flags);
- packed->inode.v.bi_sectors = cpu_to_le64(inode->bi_sectors);
- packed->inode.v.bi_size = cpu_to_le64(inode->bi_size);
- packed->inode.v.bi_version = cpu_to_le64(inode->bi_version);
- SET_INODEv3_MODE(&packed->inode.v, inode->bi_mode);
- SET_INODEv3_FIELDS_START(&packed->inode.v, INODEv3_FIELDS_START_CUR);
-
-
-#define x(_name, _bits) \
- nr_fields++; \
- \
- if (inode->_name) { \
- ret = bch2_varint_encode_fast(out, inode->_name); \
- out += ret; \
- \
- if (_bits > 64) \
- *out++ = 0; \
- \
- last_nonzero_field = out; \
- last_nonzero_fieldnr = nr_fields; \
- } else { \
- *out++ = 0; \
- \
- if (_bits > 64) \
- *out++ = 0; \
- }
-
- BCH_INODE_FIELDS_v3()
-#undef x
- BUG_ON(out > end);
-
- out = last_nonzero_field;
- nr_fields = last_nonzero_fieldnr;
-
- bytes = out - (u8 *) &packed->inode.v;
- set_bkey_val_bytes(&packed->inode.k, bytes);
- memset_u64s_tail(&packed->inode.v, 0, bytes);
-
- SET_INODEv3_NR_FIELDS(&k->v, nr_fields);
-
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- struct bch_inode_unpacked unpacked;
-
- ret = bch2_inode_unpack(bkey_i_to_s_c(&packed->inode.k_i), &unpacked);
- BUG_ON(ret);
- BUG_ON(unpacked.bi_inum != inode->bi_inum);
- BUG_ON(unpacked.bi_hash_seed != inode->bi_hash_seed);
- BUG_ON(unpacked.bi_sectors != inode->bi_sectors);
- BUG_ON(unpacked.bi_size != inode->bi_size);
- BUG_ON(unpacked.bi_version != inode->bi_version);
- BUG_ON(unpacked.bi_mode != inode->bi_mode);
-
-#define x(_name, _bits) if (unpacked._name != inode->_name) \
- panic("unpacked %llu should be %llu", \
- (u64) unpacked._name, (u64) inode->_name);
- BCH_INODE_FIELDS_v3()
-#undef x
- }
-}
-
-void bch2_inode_pack(struct bkey_inode_buf *packed,
- const struct bch_inode_unpacked *inode)
-{
- bch2_inode_pack_inlined(packed, inode);
-}
-
-static noinline int bch2_inode_unpack_v1(struct bkey_s_c_inode inode,
- struct bch_inode_unpacked *unpacked)
-{
- const u8 *in = inode.v->fields;
- const u8 *end = bkey_val_end(inode);
- u64 field[2];
- unsigned fieldnr = 0, field_bits;
- int ret;
-
-#define x(_name, _bits) \
- if (fieldnr++ == INODEv1_NR_FIELDS(inode.v)) { \
- unsigned offset = offsetof(struct bch_inode_unpacked, _name);\
- memset((void *) unpacked + offset, 0, \
- sizeof(*unpacked) - offset); \
- return 0; \
- } \
- \
- ret = inode_decode_field(in, end, field, &field_bits); \
- if (ret < 0) \
- return ret; \
- \
- if (field_bits > sizeof(unpacked->_name) * 8) \
- return -1; \
- \
- unpacked->_name = field[1]; \
- in += ret;
-
- BCH_INODE_FIELDS_v2()
-#undef x
-
- /* XXX: signal if there were more fields than expected? */
- return 0;
-}
-
-static int bch2_inode_unpack_v2(struct bch_inode_unpacked *unpacked,
- const u8 *in, const u8 *end,
- unsigned nr_fields)
-{
- unsigned fieldnr = 0;
- int ret;
- u64 v[2];
-
-#define x(_name, _bits) \
- if (fieldnr < nr_fields) { \
- ret = bch2_varint_decode_fast(in, end, &v[0]); \
- if (ret < 0) \
- return ret; \
- in += ret; \
- \
- if (_bits > 64) { \
- ret = bch2_varint_decode_fast(in, end, &v[1]); \
- if (ret < 0) \
- return ret; \
- in += ret; \
- } else { \
- v[1] = 0; \
- } \
- } else { \
- v[0] = v[1] = 0; \
- } \
- \
- unpacked->_name = v[0]; \
- if (v[1] || v[0] != unpacked->_name) \
- return -1; \
- fieldnr++;
-
- BCH_INODE_FIELDS_v2()
-#undef x
-
- /* XXX: signal if there were more fields than expected? */
- return 0;
-}
-
-static int bch2_inode_unpack_v3(struct bkey_s_c k,
- struct bch_inode_unpacked *unpacked)
-{
- struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
- const u8 *in = inode.v->fields;
- const u8 *end = bkey_val_end(inode);
- unsigned nr_fields = INODEv3_NR_FIELDS(inode.v);
- unsigned fieldnr = 0;
- int ret;
- u64 v[2];
-
- unpacked->bi_inum = inode.k->p.offset;
- unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq);
- unpacked->bi_hash_seed = inode.v->bi_hash_seed;
- unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags);
- unpacked->bi_sectors = le64_to_cpu(inode.v->bi_sectors);
- unpacked->bi_size = le64_to_cpu(inode.v->bi_size);
- unpacked->bi_version = le64_to_cpu(inode.v->bi_version);
- unpacked->bi_mode = INODEv3_MODE(inode.v);
-
-#define x(_name, _bits) \
- if (fieldnr < nr_fields) { \
- ret = bch2_varint_decode_fast(in, end, &v[0]); \
- if (ret < 0) \
- return ret; \
- in += ret; \
- \
- if (_bits > 64) { \
- ret = bch2_varint_decode_fast(in, end, &v[1]); \
- if (ret < 0) \
- return ret; \
- in += ret; \
- } else { \
- v[1] = 0; \
- } \
- } else { \
- v[0] = v[1] = 0; \
- } \
- \
- unpacked->_name = v[0]; \
- if (v[1] || v[0] != unpacked->_name) \
- return -1; \
- fieldnr++;
-
- BCH_INODE_FIELDS_v3()
-#undef x
-
- /* XXX: signal if there were more fields than expected? */
- return 0;
-}
-
-static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k,
- struct bch_inode_unpacked *unpacked)
-{
- memset(unpacked, 0, sizeof(*unpacked));
-
- unpacked->bi_snapshot = k.k->p.snapshot;
-
- switch (k.k->type) {
- case KEY_TYPE_inode: {
- struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
-
- unpacked->bi_inum = inode.k->p.offset;
- unpacked->bi_journal_seq= 0;
- unpacked->bi_hash_seed = inode.v->bi_hash_seed;
- unpacked->bi_flags = le32_to_cpu(inode.v->bi_flags);
- unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode);
-
- if (INODEv1_NEW_VARINT(inode.v)) {
- return bch2_inode_unpack_v2(unpacked, inode.v->fields,
- bkey_val_end(inode),
- INODEv1_NR_FIELDS(inode.v));
- } else {
- return bch2_inode_unpack_v1(inode, unpacked);
- }
- break;
- }
- case KEY_TYPE_inode_v2: {
- struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
-
- unpacked->bi_inum = inode.k->p.offset;
- unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq);
- unpacked->bi_hash_seed = inode.v->bi_hash_seed;
- unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags);
- unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode);
-
- return bch2_inode_unpack_v2(unpacked, inode.v->fields,
- bkey_val_end(inode),
- INODEv2_NR_FIELDS(inode.v));
- }
- default:
- BUG();
- }
-}
-
-int bch2_inode_unpack(struct bkey_s_c k,
- struct bch_inode_unpacked *unpacked)
-{
- unpacked->bi_snapshot = k.k->p.snapshot;
-
- return likely(k.k->type == KEY_TYPE_inode_v3)
- ? bch2_inode_unpack_v3(k, unpacked)
- : bch2_inode_unpack_slowpath(k, unpacked);
-}
-
-int __bch2_inode_peek(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_inode_unpacked *inode,
- subvol_inum inum, unsigned flags,
- bool warn)
-{
- u32 snapshot;
- int ret = __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, warn);
- if (ret)
- return ret;
-
- struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_inodes,
- SPOS(0, inum.inum, snapshot),
- flags|BTREE_ITER_cached);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
- if (ret)
- goto err;
-
- ret = bch2_inode_unpack(k, inode);
- if (ret)
- goto err;
-
- return 0;
-err:
- if (warn)
- bch_err_msg(trans->c, ret, "looking up inum %llu:%llu:", inum.subvol, inum.inum);
- bch2_trans_iter_exit(trans, iter);
- return ret;
-}
-
-int bch2_inode_write_flags(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_inode_unpacked *inode,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bkey_inode_buf *inode_p;
-
- inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
- if (IS_ERR(inode_p))
- return PTR_ERR(inode_p);
-
- bch2_inode_pack_inlined(inode_p, inode);
- inode_p->inode.k.p.snapshot = iter->snapshot;
- return bch2_trans_update(trans, iter, &inode_p->inode.k_i, flags);
-}
-
-int __bch2_fsck_write_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode)
-{
- struct bkey_inode_buf *inode_p =
- bch2_trans_kmalloc(trans, sizeof(*inode_p));
-
- if (IS_ERR(inode_p))
- return PTR_ERR(inode_p);
-
- bch2_inode_pack(inode_p, inode);
- inode_p->inode.k.p.snapshot = inode->bi_snapshot;
-
- return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
- &inode_p->inode.k_i,
- BTREE_UPDATE_internal_snapshot_node);
-}
-
-int bch2_fsck_write_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode)
-{
- int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- __bch2_fsck_write_inode(trans, inode));
- bch_err_fn(trans->c, ret);
- return ret;
-}
-
-struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
-{
- struct bch_inode_unpacked u;
- struct bkey_inode_buf *inode_p;
- int ret;
-
- if (!bkey_is_inode(&k->k))
- return ERR_PTR(-ENOENT);
-
- inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
- if (IS_ERR(inode_p))
- return ERR_CAST(inode_p);
-
- ret = bch2_inode_unpack(bkey_i_to_s_c(k), &u);
- if (ret)
- return ERR_PTR(ret);
-
- bch2_inode_pack(inode_p, &u);
- return &inode_p->inode.k_i;
-}
-
-static int __bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bch_inode_unpacked unpacked;
- int ret = 0;
-
- bkey_fsck_err_on(k.k->p.inode,
- c, inode_pos_inode_nonzero,
- "nonzero k.p.inode");
-
- bkey_fsck_err_on(k.k->p.offset < BLOCKDEV_INODE_MAX,
- c, inode_pos_blockdev_range,
- "fs inode in blockdev range");
-
- bkey_fsck_err_on(bch2_inode_unpack(k, &unpacked),
- c, inode_unpack_error,
- "invalid variable length fields");
-
- bkey_fsck_err_on(unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1,
- c, inode_checksum_type_invalid,
- "invalid data checksum type (%u >= %u",
- unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1);
-
- bkey_fsck_err_on(unpacked.bi_compression &&
- !bch2_compression_opt_valid(unpacked.bi_compression - 1),
- c, inode_compression_type_invalid,
- "invalid compression opt %u", unpacked.bi_compression - 1);
-
- bkey_fsck_err_on((unpacked.bi_flags & BCH_INODE_unlinked) &&
- unpacked.bi_nlink != 0,
- c, inode_unlinked_but_nlink_nonzero,
- "flagged as unlinked but bi_nlink != 0");
-
- bkey_fsck_err_on(unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode),
- c, inode_subvol_root_but_not_dir,
- "subvolume root but not a directory");
-fsck_err:
- return ret;
-}
-
-int bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
- int ret = 0;
-
- bkey_fsck_err_on(INODEv1_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
- c, inode_str_hash_invalid,
- "invalid str hash type (%llu >= %u)",
- INODEv1_STR_HASH(inode.v), BCH_STR_HASH_NR);
-
- ret = __bch2_inode_validate(c, k, flags);
-fsck_err:
- return ret;
-}
-
-int bch2_inode_v2_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
- int ret = 0;
-
- bkey_fsck_err_on(INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
- c, inode_str_hash_invalid,
- "invalid str hash type (%llu >= %u)",
- INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR);
-
- ret = __bch2_inode_validate(c, k, flags);
-fsck_err:
- return ret;
-}
-
-int bch2_inode_v3_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
- int ret = 0;
-
- bkey_fsck_err_on(INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL ||
- INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k),
- c, inode_v3_fields_start_bad,
- "invalid fields_start (got %llu, min %u max %zu)",
- INODEv3_FIELDS_START(inode.v),
- INODEv3_FIELDS_START_INITIAL,
- bkey_val_u64s(inode.k));
-
- bkey_fsck_err_on(INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
- c, inode_str_hash_invalid,
- "invalid str hash type (%llu >= %u)",
- INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR);
-
- ret = __bch2_inode_validate(c, k, flags);
-fsck_err:
- return ret;
-}
-
-static void __bch2_inode_unpacked_to_text(struct printbuf *out,
- struct bch_inode_unpacked *inode)
-{
- prt_printf(out, "\n");
- printbuf_indent_add(out, 2);
- prt_printf(out, "mode=%o\n", inode->bi_mode);
-
- prt_str(out, "flags=");
- prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1));
- prt_printf(out, "(%x)\n", inode->bi_flags);
-
- prt_printf(out, "journal_seq=%llu\n", inode->bi_journal_seq);
- prt_printf(out, "hash_seed=%llx\n", inode->bi_hash_seed);
- prt_printf(out, "hash_type=");
- bch2_prt_str_hash_type(out, INODE_STR_HASH(inode));
- prt_newline(out);
- prt_printf(out, "bi_size=%llu\n", inode->bi_size);
- prt_printf(out, "bi_sectors=%llu\n", inode->bi_sectors);
- prt_printf(out, "bi_version=%llu\n", inode->bi_version);
-
-#define x(_name, _bits) \
- prt_printf(out, #_name "=%llu\n", (u64) inode->_name);
- BCH_INODE_FIELDS_v3()
-#undef x
-
- bch2_printbuf_strip_trailing_newline(out);
- printbuf_indent_sub(out, 2);
-}
-
-void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode)
-{
- prt_printf(out, "inum: %llu:%u ", inode->bi_inum, inode->bi_snapshot);
- __bch2_inode_unpacked_to_text(out, inode);
-}
-
-void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
-{
- struct bch_inode_unpacked inode;
-
- if (bch2_inode_unpack(k, &inode)) {
- prt_printf(out, "(unpack error)");
- return;
- }
-
- __bch2_inode_unpacked_to_text(out, &inode);
-}
-
-static inline u64 bkey_inode_flags(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_inode:
- return le32_to_cpu(bkey_s_c_to_inode(k).v->bi_flags);
- case KEY_TYPE_inode_v2:
- return le64_to_cpu(bkey_s_c_to_inode_v2(k).v->bi_flags);
- case KEY_TYPE_inode_v3:
- return le64_to_cpu(bkey_s_c_to_inode_v3(k).v->bi_flags);
- default:
- return 0;
- }
-}
-
-static inline void bkey_inode_flags_set(struct bkey_s k, u64 f)
-{
- switch (k.k->type) {
- case KEY_TYPE_inode:
- bkey_s_to_inode(k).v->bi_flags = cpu_to_le32(f);
- return;
- case KEY_TYPE_inode_v2:
- bkey_s_to_inode_v2(k).v->bi_flags = cpu_to_le64(f);
- return;
- case KEY_TYPE_inode_v3:
- bkey_s_to_inode_v3(k).v->bi_flags = cpu_to_le64(f);
- return;
- default:
- BUG();
- }
-}
-
-static inline bool bkey_is_unlinked_inode(struct bkey_s_c k)
-{
- unsigned f = bkey_inode_flags(k) & BCH_INODE_unlinked;
-
- return (f & BCH_INODE_unlinked) && !(f & BCH_INODE_has_child_snapshot);
-}
-
-static struct bkey_s_c
-bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter *iter,
- enum btree_id btree, struct bpos pos,
- unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct bkey_s_c k;
- int ret = 0;
-
- for_each_btree_key_upto_norestart(trans, *iter, btree,
- bpos_successor(pos),
- SPOS(pos.inode, pos.offset, U32_MAX),
- flags|BTREE_ITER_all_snapshots, k, ret)
- if (bch2_snapshot_is_ancestor(c, pos.snapshot, k.k->p.snapshot))
- return k;
-
- bch2_trans_iter_exit(trans, iter);
- return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
-}
-
-static struct bkey_s_c
-bch2_inode_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos pos, unsigned flags)
-{
- struct bkey_s_c k;
-again:
- k = bch2_bkey_get_iter_snapshot_parent(trans, iter, BTREE_ID_inodes, pos, flags);
- if (!k.k ||
- bkey_err(k) ||
- bkey_is_inode(k.k))
- return k;
-
- bch2_trans_iter_exit(trans, iter);
- pos = k.k->p;
- goto again;
-}
-
-int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-
- for_each_btree_key_upto_norestart(trans, iter,
- BTREE_ID_inodes, POS(0, pos.offset), bpos_predecessor(pos),
- BTREE_ITER_all_snapshots|
- BTREE_ITER_with_updates, k, ret)
- if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot) &&
- bkey_is_inode(k.k)) {
- ret = 1;
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int update_inode_has_children(struct btree_trans *trans,
- struct bkey_s k,
- bool have_child)
-{
- if (!have_child) {
- int ret = bch2_inode_has_child_snapshots(trans, k.k->p);
- if (ret)
- return ret < 0 ? ret : 0;
- }
-
- u64 f = bkey_inode_flags(k.s_c);
- if (have_child != !!(f & BCH_INODE_has_child_snapshot))
- bkey_inode_flags_set(k, f ^ BCH_INODE_has_child_snapshot);
-
- return 0;
-}
-
-static int update_parent_inode_has_children(struct btree_trans *trans, struct bpos pos,
- bool have_child)
-{
- struct btree_iter iter;
- struct bkey_s_c k = bch2_inode_get_iter_snapshot_parent(trans,
- &iter, pos, BTREE_ITER_with_updates);
- int ret = bkey_err(k);
- if (ret)
- return ret;
- if (!k.k)
- return 0;
-
- if (!have_child) {
- ret = bch2_inode_has_child_snapshots(trans, k.k->p);
- if (ret) {
- ret = ret < 0 ? ret : 0;
- goto err;
- }
- }
-
- u64 f = bkey_inode_flags(k);
- if (have_child != !!(f & BCH_INODE_has_child_snapshot)) {
- struct bkey_i *update = bch2_bkey_make_mut(trans, &iter, &k,
- BTREE_UPDATE_internal_snapshot_node);
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- goto err;
-
- bkey_inode_flags_set(bkey_i_to_s(update), f ^ BCH_INODE_has_child_snapshot);
- }
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_trigger_inode(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
-
- if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
- BUG_ON(!trans->journal_res.seq);
- bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq);
- }
-
- s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
- if ((flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) && nr) {
- struct disk_accounting_pos acc = { .type = BCH_DISK_ACCOUNTING_nr_inodes };
- int ret = bch2_disk_accounting_mod(trans, &acc, &nr, 1, flags & BTREE_TRIGGER_gc);
- if (ret)
- return ret;
- }
-
- if (flags & BTREE_TRIGGER_transactional) {
- int unlinked_delta = (int) bkey_is_unlinked_inode(new.s_c) -
- (int) bkey_is_unlinked_inode(old);
- if (unlinked_delta) {
- int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes,
- new.k->p, unlinked_delta > 0);
- if (ret)
- return ret;
- }
-
- /*
- * If we're creating or deleting an inode at this snapshot ID,
- * and there might be an inode in a parent snapshot ID, we might
- * need to set or clear the has_child_snapshot flag on the
- * parent.
- */
- int deleted_delta = (int) bkey_is_inode(new.k) -
- (int) bkey_is_inode(old.k);
- if (deleted_delta &&
- bch2_snapshot_parent(c, new.k->p.snapshot)) {
- int ret = update_parent_inode_has_children(trans, new.k->p,
- deleted_delta > 0);
- if (ret)
- return ret;
- }
-
- /*
- * When an inode is first updated in a new snapshot, we may need
- * to clear has_child_snapshot
- */
- if (deleted_delta > 0) {
- int ret = update_inode_has_children(trans, new, false);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-int bch2_inode_generation_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- int ret = 0;
-
- bkey_fsck_err_on(k.k->p.inode,
- c, inode_pos_inode_nonzero,
- "nonzero k.p.inode");
-fsck_err:
- return ret;
-}
-
-void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_inode_generation gen = bkey_s_c_to_inode_generation(k);
-
- prt_printf(out, "generation: %u", le32_to_cpu(gen.v->bi_generation));
-}
-
-void bch2_inode_init_early(struct bch_fs *c,
- struct bch_inode_unpacked *inode_u)
-{
- enum bch_str_hash_type str_hash =
- bch2_str_hash_opt_to_type(c, c->opts.str_hash);
-
- memset(inode_u, 0, sizeof(*inode_u));
-
- SET_INODE_STR_HASH(inode_u, str_hash);
- get_random_bytes(&inode_u->bi_hash_seed, sizeof(inode_u->bi_hash_seed));
-}
-
-void bch2_inode_init_late(struct bch_inode_unpacked *inode_u, u64 now,
- uid_t uid, gid_t gid, umode_t mode, dev_t rdev,
- struct bch_inode_unpacked *parent)
-{
- inode_u->bi_mode = mode;
- inode_u->bi_uid = uid;
- inode_u->bi_gid = gid;
- inode_u->bi_dev = rdev;
- inode_u->bi_atime = now;
- inode_u->bi_mtime = now;
- inode_u->bi_ctime = now;
- inode_u->bi_otime = now;
-
- if (parent && parent->bi_mode & S_ISGID) {
- inode_u->bi_gid = parent->bi_gid;
- if (S_ISDIR(mode))
- inode_u->bi_mode |= S_ISGID;
- }
-
- if (parent) {
-#define x(_name, ...) inode_u->bi_##_name = parent->bi_##_name;
- BCH_INODE_OPTS()
-#undef x
- }
-}
-
-void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
- uid_t uid, gid_t gid, umode_t mode, dev_t rdev,
- struct bch_inode_unpacked *parent)
-{
- bch2_inode_init_early(c, inode_u);
- bch2_inode_init_late(inode_u, bch2_current_time(c),
- uid, gid, mode, rdev, parent);
-}
-
-static inline u32 bkey_generation(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_inode:
- case KEY_TYPE_inode_v2:
- BUG();
- case KEY_TYPE_inode_generation:
- return le32_to_cpu(bkey_s_c_to_inode_generation(k).v->bi_generation);
- default:
- return 0;
- }
-}
-
-/*
- * This just finds an empty slot:
- */
-int bch2_inode_create(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_inode_unpacked *inode_u,
- u32 snapshot, u64 cpu)
-{
- struct bch_fs *c = trans->c;
- struct bkey_s_c k;
- u64 min, max, start, pos, *hint;
- int ret = 0;
- unsigned bits = (c->opts.inodes_32bit ? 31 : 63);
-
- if (c->opts.shard_inode_numbers) {
- bits -= c->inode_shard_bits;
-
- min = (cpu << bits);
- max = (cpu << bits) | ~(ULLONG_MAX << bits);
-
- min = max_t(u64, min, BLOCKDEV_INODE_MAX);
- hint = c->unused_inode_hints + cpu;
- } else {
- min = BLOCKDEV_INODE_MAX;
- max = ~(ULLONG_MAX << bits);
- hint = c->unused_inode_hints;
- }
-
- start = READ_ONCE(*hint);
-
- if (start >= max || start < min)
- start = min;
-
- pos = start;
- bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos),
- BTREE_ITER_all_snapshots|
- BTREE_ITER_intent);
-again:
- while ((k = bch2_btree_iter_peek(iter)).k &&
- !(ret = bkey_err(k)) &&
- bkey_lt(k.k->p, POS(0, max))) {
- if (pos < iter->pos.offset)
- goto found_slot;
-
- /*
- * We don't need to iterate over keys in every snapshot once
- * we've found just one:
- */
- pos = iter->pos.offset + 1;
- bch2_btree_iter_set_pos(iter, POS(0, pos));
- }
-
- if (!ret && pos < max)
- goto found_slot;
-
- if (!ret && start == min)
- ret = -BCH_ERR_ENOSPC_inode_create;
-
- if (ret) {
- bch2_trans_iter_exit(trans, iter);
- return ret;
- }
-
- /* Retry from start */
- pos = start = min;
- bch2_btree_iter_set_pos(iter, POS(0, pos));
- goto again;
-found_slot:
- bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
- if (ret) {
- bch2_trans_iter_exit(trans, iter);
- return ret;
- }
-
- *hint = k.k->p.offset;
- inode_u->bi_inum = k.k->p.offset;
- inode_u->bi_generation = bkey_generation(k);
- return 0;
-}
-
-static int bch2_inode_delete_keys(struct btree_trans *trans,
- subvol_inum inum, enum btree_id id)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey_i delete;
- struct bpos end = POS(inum.inum, U64_MAX);
- u32 snapshot;
- int ret = 0;
-
- /*
- * We're never going to be deleting partial extents, no need to use an
- * extent iterator:
- */
- bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
- BTREE_ITER_intent);
-
- while (1) {
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- bch2_btree_iter_set_snapshot(&iter, snapshot);
-
- k = bch2_btree_iter_peek_upto(&iter, end);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (!k.k)
- break;
-
- bkey_init(&delete.k);
- delete.k.p = iter.pos;
-
- if (iter.flags & BTREE_ITER_is_extents)
- bch2_key_resize(&delete.k,
- bpos_min(end, k.k->p).offset -
- iter.pos.offset);
-
- ret = bch2_trans_update(trans, &iter, &delete, 0) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
-err:
- if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- break;
- }
-
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_inode_rm(struct bch_fs *c, subvol_inum inum)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
- struct bkey_i_inode_generation delete;
- struct bch_inode_unpacked inode_u;
- struct bkey_s_c k;
- u32 snapshot;
- int ret;
-
- /*
- * If this was a directory, there shouldn't be any real dirents left -
- * but there could be whiteouts (from hash collisions) that we should
- * delete:
- *
- * XXX: the dirent could ideally would delete whiteouts when they're no
- * longer needed
- */
- ret = bch2_inode_delete_keys(trans, inum, BTREE_ID_extents) ?:
- bch2_inode_delete_keys(trans, inum, BTREE_ID_xattrs) ?:
- bch2_inode_delete_keys(trans, inum, BTREE_ID_dirents);
- if (ret)
- goto err;
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, inum.inum, snapshot),
- BTREE_ITER_intent|BTREE_ITER_cached);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (!bkey_is_inode(k.k)) {
- bch2_fs_inconsistent(c,
- "inode %llu:%u not found when deleting",
- inum.inum, snapshot);
- ret = -EIO;
- goto err;
- }
-
- bch2_inode_unpack(k, &inode_u);
-
- bkey_inode_generation_init(&delete.k_i);
- delete.k.p = iter.pos;
- delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
-
- ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
-err:
- bch2_trans_iter_exit(trans, &iter);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- if (ret)
- goto err2;
-
- ret = delete_ancestor_snapshot_inodes(trans, SPOS(0, inum.inum, snapshot));
-err2:
- bch2_trans_put(trans);
- return ret;
-}
-
-int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans,
- subvol_inum inum,
- struct bch_inode_unpacked *inode)
-{
- struct btree_iter iter;
- int ret;
-
- ret = bch2_inode_peek_nowarn(trans, &iter, inode, inum, 0);
- if (!ret)
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_inode_find_by_inum_trans(struct btree_trans *trans,
- subvol_inum inum,
- struct bch_inode_unpacked *inode)
-{
- struct btree_iter iter;
- int ret;
-
- ret = bch2_inode_peek(trans, &iter, inode, inum, 0);
- if (!ret)
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum,
- struct bch_inode_unpacked *inode)
-{
- return bch2_trans_do(c, bch2_inode_find_by_inum_trans(trans, inum, inode));
-}
-
-int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi)
-{
- if (bi->bi_flags & BCH_INODE_unlinked)
- bi->bi_flags &= ~BCH_INODE_unlinked;
- else {
- if (bi->bi_nlink == U32_MAX)
- return -EINVAL;
-
- bi->bi_nlink++;
- }
-
- return 0;
-}
-
-void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *bi)
-{
- if (bi->bi_nlink && (bi->bi_flags & BCH_INODE_unlinked)) {
- bch2_trans_inconsistent(trans, "inode %llu unlinked but link count nonzero",
- bi->bi_inum);
- return;
- }
-
- if (bi->bi_flags & BCH_INODE_unlinked) {
- bch2_trans_inconsistent(trans, "inode %llu link count underflow", bi->bi_inum);
- return;
- }
-
- if (bi->bi_nlink)
- bi->bi_nlink--;
- else
- bi->bi_flags |= BCH_INODE_unlinked;
-}
-
-struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode)
-{
- struct bch_opts ret = { 0 };
-#define x(_name, _bits) \
- if (inode->bi_##_name) \
- opt_set(ret, _name, inode->bi_##_name - 1);
- BCH_INODE_OPTS()
-#undef x
- return ret;
-}
-
-void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
- struct bch_inode_unpacked *inode)
-{
-#define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name);
- BCH_INODE_OPTS()
-#undef x
-
- if (opts->nocow)
- opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
-}
-
-int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_io_opts *opts)
-{
- struct bch_inode_unpacked inode;
- int ret = lockrestart_do(trans, bch2_inode_find_by_inum_trans(trans, inum, &inode));
-
- if (ret)
- return ret;
-
- bch2_inode_opts_get(opts, trans->c, &inode);
- return 0;
-}
-
-static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
- struct bkey_i_inode_generation delete;
- struct bch_inode_unpacked inode_u;
- struct bkey_s_c k;
- int ret;
-
- do {
- ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
- SPOS(inum, 0, snapshot),
- SPOS(inum, U64_MAX, snapshot),
- 0, NULL) ?:
- bch2_btree_delete_range_trans(trans, BTREE_ID_dirents,
- SPOS(inum, 0, snapshot),
- SPOS(inum, U64_MAX, snapshot),
- 0, NULL) ?:
- bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs,
- SPOS(inum, 0, snapshot),
- SPOS(inum, U64_MAX, snapshot),
- 0, NULL);
- } while (ret == -BCH_ERR_transaction_restart_nested);
- if (ret)
- goto err;
-retry:
- bch2_trans_begin(trans);
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, inum, snapshot), BTREE_ITER_intent);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (!bkey_is_inode(k.k)) {
- bch2_fs_inconsistent(c,
- "inode %llu:%u not found when deleting",
- inum, snapshot);
- ret = -EIO;
- goto err;
- }
-
- bch2_inode_unpack(k, &inode_u);
-
- /* Subvolume root? */
- if (inode_u.bi_subvol)
- bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum);
-
- bkey_inode_generation_init(&delete.k_i);
- delete.k.p = iter.pos;
- delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
-
- ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
-err:
- bch2_trans_iter_exit(trans, &iter);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- return ret ?: -BCH_ERR_transaction_restart_nested;
-}
-
-/*
- * After deleting an inode, there may be versions in older snapshots that should
- * also be deleted - if they're not referenced by sibling snapshots and not open
- * in other subvolumes:
- */
-static int delete_ancestor_snapshot_inodes(struct btree_trans *trans, struct bpos pos)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-next_parent:
- ret = lockrestart_do(trans,
- bkey_err(k = bch2_inode_get_iter_snapshot_parent(trans, &iter, pos, 0)));
- if (ret || !k.k)
- return ret;
-
- bool unlinked = bkey_is_unlinked_inode(k);
- pos = k.k->p;
- bch2_trans_iter_exit(trans, &iter);
-
- if (!unlinked)
- return 0;
-
- ret = lockrestart_do(trans, bch2_inode_or_descendents_is_open(trans, pos));
- if (ret)
- return ret < 0 ? ret : 0;
-
- ret = __bch2_inode_rm_snapshot(trans, pos.offset, pos.snapshot);
- if (ret)
- return ret;
- goto next_parent;
-}
-
-int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
-{
- return __bch2_inode_rm_snapshot(trans, inum, snapshot) ?:
- delete_ancestor_snapshot_inodes(trans, SPOS(0, inum, snapshot));
-}
-
-static int may_delete_deleted_inode(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos pos,
- bool *need_another_pass)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter inode_iter;
- struct bkey_s_c k;
- struct bch_inode_unpacked inode;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
- if (fsck_err_on(!bkey_is_inode(k.k),
- trans, deleted_inode_missing,
- "nonexistent inode %llu:%u in deleted_inodes btree",
- pos.offset, pos.snapshot))
- goto delete;
-
- ret = bch2_inode_unpack(k, &inode);
- if (ret)
- goto out;
-
- if (S_ISDIR(inode.bi_mode)) {
- ret = bch2_empty_dir_snapshot(trans, pos.offset, 0, pos.snapshot);
- if (fsck_err_on(bch2_err_matches(ret, ENOTEMPTY),
- trans, deleted_inode_is_dir,
- "non empty directory %llu:%u in deleted_inodes btree",
- pos.offset, pos.snapshot))
- goto delete;
- if (ret)
- goto out;
- }
-
- if (fsck_err_on(!(inode.bi_flags & BCH_INODE_unlinked),
- trans, deleted_inode_not_unlinked,
- "non-deleted inode %llu:%u in deleted_inodes btree",
- pos.offset, pos.snapshot))
- goto delete;
-
- if (fsck_err_on(inode.bi_flags & BCH_INODE_has_child_snapshot,
- trans, deleted_inode_has_child_snapshots,
- "inode with child snapshots %llu:%u in deleted_inodes btree",
- pos.offset, pos.snapshot))
- goto delete;
-
- ret = bch2_inode_has_child_snapshots(trans, k.k->p);
- if (ret < 0)
- goto out;
-
- if (ret) {
- if (fsck_err(trans, inode_has_child_snapshots_wrong,
- "inode has_child_snapshots flag wrong (should be set)\n%s",
- (printbuf_reset(&buf),
- bch2_inode_unpacked_to_text(&buf, &inode),
- buf.buf))) {
- inode.bi_flags |= BCH_INODE_has_child_snapshot;
- ret = __bch2_fsck_write_inode(trans, &inode);
- if (ret)
- goto out;
- }
- goto delete;
-
- }
-
- if (test_bit(BCH_FS_clean_recovery, &c->flags) &&
- !fsck_err(trans, deleted_inode_but_clean,
- "filesystem marked as clean but have deleted inode %llu:%u",
- pos.offset, pos.snapshot)) {
- ret = 0;
- goto out;
- }
-
- ret = 1;
-out:
-fsck_err:
- bch2_trans_iter_exit(trans, &inode_iter);
- printbuf_exit(&buf);
- return ret;
-delete:
- ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false);
- goto out;
-}
-
-int bch2_delete_dead_inodes(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- bool need_another_pass;
- int ret;
-again:
- /*
- * if we ran check_inodes() unlinked inodes will have already been
- * cleaned up but the write buffer will be out of sync; therefore we
- * alway need a write buffer flush
- */
- ret = bch2_btree_write_buffer_flush_sync(trans);
- if (ret)
- goto err;
-
- need_another_pass = false;
-
- /*
- * Weird transaction restart handling here because on successful delete,
- * bch2_inode_rm_snapshot() will return a nested transaction restart,
- * but we can't retry because the btree write buffer won't have been
- * flushed and we'd spin:
- */
- ret = for_each_btree_key_commit(trans, iter, BTREE_ID_deleted_inodes, POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
- ret = may_delete_deleted_inode(trans, &iter, k.k->p, &need_another_pass);
- if (ret > 0) {
- bch_verbose(c, "deleting unlinked inode %llu:%u", k.k->p.offset, k.k->p.snapshot);
-
- ret = bch2_inode_rm_snapshot(trans, k.k->p.offset, k.k->p.snapshot);
- /*
- * We don't want to loop here: a transaction restart
- * error here means we handled a transaction restart and
- * we're actually done, but if we loop we'll retry the
- * same key because the write buffer hasn't been flushed
- * yet
- */
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
- ret = 0;
- continue;
- }
- }
-
- ret;
- }));
-
- if (!ret && need_another_pass)
- goto again;
-err:
- bch2_trans_put(trans);
- return ret;
-}
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
deleted file mode 100644
index eab82b5eb897..000000000000
--- a/fs/bcachefs/inode.h
+++ /dev/null
@@ -1,268 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_INODE_H
-#define _BCACHEFS_INODE_H
-
-#include "bkey.h"
-#include "bkey_methods.h"
-#include "opts.h"
-#include "snapshot.h"
-
-enum bch_validate_flags;
-extern const char * const bch2_inode_opts[];
-
-int bch2_inode_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-int bch2_inode_v2_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-int bch2_inode_v3_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-int __bch2_inode_has_child_snapshots(struct btree_trans *, struct bpos);
-
-static inline int bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos)
-{
- return bch2_snapshot_is_leaf(trans->c, pos.snapshot) <= 0
- ? __bch2_inode_has_child_snapshots(trans, pos)
- : 0;
-}
-
-int bch2_trigger_inode(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-
-#define bch2_bkey_ops_inode ((struct bkey_ops) { \
- .key_validate = bch2_inode_validate, \
- .val_to_text = bch2_inode_to_text, \
- .trigger = bch2_trigger_inode, \
- .min_val_size = 16, \
-})
-
-#define bch2_bkey_ops_inode_v2 ((struct bkey_ops) { \
- .key_validate = bch2_inode_v2_validate, \
- .val_to_text = bch2_inode_to_text, \
- .trigger = bch2_trigger_inode, \
- .min_val_size = 32, \
-})
-
-#define bch2_bkey_ops_inode_v3 ((struct bkey_ops) { \
- .key_validate = bch2_inode_v3_validate, \
- .val_to_text = bch2_inode_to_text, \
- .trigger = bch2_trigger_inode, \
- .min_val_size = 48, \
-})
-
-static inline bool bkey_is_inode(const struct bkey *k)
-{
- return k->type == KEY_TYPE_inode ||
- k->type == KEY_TYPE_inode_v2 ||
- k->type == KEY_TYPE_inode_v3;
-}
-
-int bch2_inode_generation_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-#define bch2_bkey_ops_inode_generation ((struct bkey_ops) { \
- .key_validate = bch2_inode_generation_validate, \
- .val_to_text = bch2_inode_generation_to_text, \
- .min_val_size = 8, \
-})
-
-#if 0
-typedef struct {
- u64 lo;
- u32 hi;
-} __packed __aligned(4) u96;
-#endif
-typedef u64 u96;
-
-struct bch_inode_unpacked {
- u64 bi_inum;
- u32 bi_snapshot;
- u64 bi_journal_seq;
- __le64 bi_hash_seed;
- u64 bi_size;
- u64 bi_sectors;
- u64 bi_version;
- u32 bi_flags;
- u16 bi_mode;
-
-#define x(_name, _bits) u##_bits _name;
- BCH_INODE_FIELDS_v3()
-#undef x
-};
-BITMASK(INODE_STR_HASH, struct bch_inode_unpacked, bi_flags, 20, 24);
-
-struct bkey_inode_buf {
- struct bkey_i_inode_v3 inode;
-
-#define x(_name, _bits) + 8 + _bits / 8
- u8 _pad[0 + BCH_INODE_FIELDS_v3()];
-#undef x
-};
-
-void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
-int bch2_inode_unpack(struct bkey_s_c, struct bch_inode_unpacked *);
-struct bkey_i *bch2_inode_to_v3(struct btree_trans *, struct bkey_i *);
-
-void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *);
-
-int __bch2_inode_peek(struct btree_trans *, struct btree_iter *,
- struct bch_inode_unpacked *, subvol_inum, unsigned, bool);
-
-static inline int bch2_inode_peek_nowarn(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_inode_unpacked *inode,
- subvol_inum inum, unsigned flags)
-{
- return __bch2_inode_peek(trans, iter, inode, inum, flags, false);
-}
-
-static inline int bch2_inode_peek(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_inode_unpacked *inode,
- subvol_inum inum, unsigned flags)
-{
- return __bch2_inode_peek(trans, iter, inode, inum, flags, true);
- int ret = bch2_inode_peek_nowarn(trans, iter, inode, inum, flags);
- return ret;
-}
-
-int bch2_inode_write_flags(struct btree_trans *, struct btree_iter *,
- struct bch_inode_unpacked *, enum btree_iter_update_trigger_flags);
-
-static inline int bch2_inode_write(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_inode_unpacked *inode)
-{
- return bch2_inode_write_flags(trans, iter, inode, 0);
-}
-
-int __bch2_fsck_write_inode(struct btree_trans *, struct bch_inode_unpacked *);
-int bch2_fsck_write_inode(struct btree_trans *, struct bch_inode_unpacked *);
-
-void bch2_inode_init_early(struct bch_fs *,
- struct bch_inode_unpacked *);
-void bch2_inode_init_late(struct bch_inode_unpacked *, u64,
- uid_t, gid_t, umode_t, dev_t,
- struct bch_inode_unpacked *);
-void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *,
- uid_t, gid_t, umode_t, dev_t,
- struct bch_inode_unpacked *);
-
-int bch2_inode_create(struct btree_trans *, struct btree_iter *,
- struct bch_inode_unpacked *, u32, u64);
-
-int bch2_inode_rm(struct bch_fs *, subvol_inum);
-
-int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *,
- subvol_inum,
- struct bch_inode_unpacked *);
-int bch2_inode_find_by_inum_trans(struct btree_trans *, subvol_inum,
- struct bch_inode_unpacked *);
-int bch2_inode_find_by_inum(struct bch_fs *, subvol_inum,
- struct bch_inode_unpacked *);
-
-#define inode_opt_get(_c, _inode, _name) \
- ((_inode)->bi_##_name ? (_inode)->bi_##_name - 1 : (_c)->opts._name)
-
-static inline void bch2_inode_opt_set(struct bch_inode_unpacked *inode,
- enum inode_opt_id id, u64 v)
-{
- switch (id) {
-#define x(_name, ...) \
- case Inode_opt_##_name: \
- inode->bi_##_name = v; \
- break;
- BCH_INODE_OPTS()
-#undef x
- default:
- BUG();
- }
-}
-
-static inline u64 bch2_inode_opt_get(struct bch_inode_unpacked *inode,
- enum inode_opt_id id)
-{
- switch (id) {
-#define x(_name, ...) \
- case Inode_opt_##_name: \
- return inode->bi_##_name;
- BCH_INODE_OPTS()
-#undef x
- default:
- BUG();
- }
-}
-
-static inline u8 mode_to_type(umode_t mode)
-{
- return (mode >> 12) & 15;
-}
-
-static inline u8 inode_d_type(struct bch_inode_unpacked *inode)
-{
- return inode->bi_subvol ? DT_SUBVOL : mode_to_type(inode->bi_mode);
-}
-
-static inline u32 bch2_inode_flags(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_inode:
- return le32_to_cpu(bkey_s_c_to_inode(k).v->bi_flags);
- case KEY_TYPE_inode_v2:
- return le64_to_cpu(bkey_s_c_to_inode_v2(k).v->bi_flags);
- case KEY_TYPE_inode_v3:
- return le64_to_cpu(bkey_s_c_to_inode_v3(k).v->bi_flags);
- default:
- return 0;
- }
-}
-
-/* i_nlink: */
-
-static inline unsigned nlink_bias(umode_t mode)
-{
- return S_ISDIR(mode) ? 2 : 1;
-}
-
-static inline unsigned bch2_inode_nlink_get(struct bch_inode_unpacked *bi)
-{
- return bi->bi_flags & BCH_INODE_unlinked
- ? 0
- : bi->bi_nlink + nlink_bias(bi->bi_mode);
-}
-
-static inline void bch2_inode_nlink_set(struct bch_inode_unpacked *bi,
- unsigned nlink)
-{
- if (nlink) {
- bi->bi_nlink = nlink - nlink_bias(bi->bi_mode);
- bi->bi_flags &= ~BCH_INODE_unlinked;
- } else {
- bi->bi_nlink = 0;
- bi->bi_flags |= BCH_INODE_unlinked;
- }
-}
-
-int bch2_inode_nlink_inc(struct bch_inode_unpacked *);
-void bch2_inode_nlink_dec(struct btree_trans *, struct bch_inode_unpacked *);
-
-static inline bool bch2_inode_should_have_bp(struct bch_inode_unpacked *inode)
-{
- bool inode_has_bp = inode->bi_dir || inode->bi_dir_offset;
-
- return S_ISDIR(inode->bi_mode) ||
- (!inode->bi_nlink && inode_has_bp);
-}
-
-struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *);
-void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
- struct bch_inode_unpacked *);
-int bch2_inum_opts_get(struct btree_trans*, subvol_inum, struct bch_io_opts *);
-
-int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32);
-int bch2_delete_dead_inodes(struct bch_fs *);
-
-#endif /* _BCACHEFS_INODE_H */
diff --git a/fs/bcachefs/inode_format.h b/fs/bcachefs/inode_format.h
deleted file mode 100644
index 7928d0c6954f..000000000000
--- a/fs/bcachefs/inode_format.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_INODE_FORMAT_H
-#define _BCACHEFS_INODE_FORMAT_H
-
-#define BLOCKDEV_INODE_MAX 4096
-#define BCACHEFS_ROOT_INO 4096
-
-struct bch_inode {
- struct bch_val v;
-
- __le64 bi_hash_seed;
- __le32 bi_flags;
- __le16 bi_mode;
- __u8 fields[];
-} __packed __aligned(8);
-
-struct bch_inode_v2 {
- struct bch_val v;
-
- __le64 bi_journal_seq;
- __le64 bi_hash_seed;
- __le64 bi_flags;
- __le16 bi_mode;
- __u8 fields[];
-} __packed __aligned(8);
-
-struct bch_inode_v3 {
- struct bch_val v;
-
- __le64 bi_journal_seq;
- __le64 bi_hash_seed;
- __le64 bi_flags;
- __le64 bi_sectors;
- __le64 bi_size;
- __le64 bi_version;
- __u8 fields[];
-} __packed __aligned(8);
-
-#define INODEv3_FIELDS_START_INITIAL 6
-#define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
-
-struct bch_inode_generation {
- struct bch_val v;
-
- __le32 bi_generation;
- __le32 pad;
-} __packed __aligned(8);
-
-/*
- * bi_subvol and bi_parent_subvol are only set for subvolume roots:
- */
-
-#define BCH_INODE_FIELDS_v2() \
- x(bi_atime, 96) \
- x(bi_ctime, 96) \
- x(bi_mtime, 96) \
- x(bi_otime, 96) \
- x(bi_size, 64) \
- x(bi_sectors, 64) \
- x(bi_uid, 32) \
- x(bi_gid, 32) \
- x(bi_nlink, 32) \
- x(bi_generation, 32) \
- x(bi_dev, 32) \
- x(bi_data_checksum, 8) \
- x(bi_compression, 8) \
- x(bi_project, 32) \
- x(bi_background_compression, 8) \
- x(bi_data_replicas, 8) \
- x(bi_promote_target, 16) \
- x(bi_foreground_target, 16) \
- x(bi_background_target, 16) \
- x(bi_erasure_code, 16) \
- x(bi_fields_set, 16) \
- x(bi_dir, 64) \
- x(bi_dir_offset, 64) \
- x(bi_subvol, 32) \
- x(bi_parent_subvol, 32)
-
-#define BCH_INODE_FIELDS_v3() \
- x(bi_atime, 96) \
- x(bi_ctime, 96) \
- x(bi_mtime, 96) \
- x(bi_otime, 96) \
- x(bi_uid, 32) \
- x(bi_gid, 32) \
- x(bi_nlink, 32) \
- x(bi_generation, 32) \
- x(bi_dev, 32) \
- x(bi_data_checksum, 8) \
- x(bi_compression, 8) \
- x(bi_project, 32) \
- x(bi_background_compression, 8) \
- x(bi_data_replicas, 8) \
- x(bi_promote_target, 16) \
- x(bi_foreground_target, 16) \
- x(bi_background_target, 16) \
- x(bi_erasure_code, 16) \
- x(bi_fields_set, 16) \
- x(bi_dir, 64) \
- x(bi_dir_offset, 64) \
- x(bi_subvol, 32) \
- x(bi_parent_subvol, 32) \
- x(bi_nocow, 8)
-
-/* subset of BCH_INODE_FIELDS */
-#define BCH_INODE_OPTS() \
- x(data_checksum, 8) \
- x(compression, 8) \
- x(project, 32) \
- x(background_compression, 8) \
- x(data_replicas, 8) \
- x(promote_target, 16) \
- x(foreground_target, 16) \
- x(background_target, 16) \
- x(erasure_code, 16) \
- x(nocow, 8)
-
-enum inode_opt_id {
-#define x(name, ...) \
- Inode_opt_##name,
- BCH_INODE_OPTS()
-#undef x
- Inode_opt_nr,
-};
-
-#define BCH_INODE_FLAGS() \
- x(sync, 0) \
- x(immutable, 1) \
- x(append, 2) \
- x(nodump, 3) \
- x(noatime, 4) \
- x(i_size_dirty, 5) \
- x(i_sectors_dirty, 6) \
- x(unlinked, 7) \
- x(backptr_untrusted, 8) \
- x(has_child_snapshot, 9)
-
-/* bits 20+ reserved for packed fields below: */
-
-enum bch_inode_flags {
-#define x(t, n) BCH_INODE_##t = 1U << n,
- BCH_INODE_FLAGS()
-#undef x
-};
-
-enum __bch_inode_flags {
-#define x(t, n) __BCH_INODE_##t = n,
- BCH_INODE_FLAGS()
-#undef x
-};
-
-LE32_BITMASK(INODEv1_STR_HASH, struct bch_inode, bi_flags, 20, 24);
-LE32_BITMASK(INODEv1_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
-LE32_BITMASK(INODEv1_NEW_VARINT,struct bch_inode, bi_flags, 31, 32);
-
-LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
-LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
-
-LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
-LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
-
-LE64_BITMASK(INODEv3_FIELDS_START,
- struct bch_inode_v3, bi_flags, 31, 36);
-LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52);
-
-#endif /* _BCACHEFS_INODE_FORMAT_H */
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
deleted file mode 100644
index f283051758d6..000000000000
--- a/fs/bcachefs/io_misc.c
+++ /dev/null
@@ -1,540 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * io_misc.c - fallocate, fpunch, truncate:
- */
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "bkey_buf.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "clock.h"
-#include "error.h"
-#include "extents.h"
-#include "extent_update.h"
-#include "inode.h"
-#include "io_misc.h"
-#include "io_write.h"
-#include "logged_ops.h"
-#include "rebalance.h"
-#include "subvolume.h"
-
-/* Overwrites whatever was present with zeroes: */
-int bch2_extent_fallocate(struct btree_trans *trans,
- subvol_inum inum,
- struct btree_iter *iter,
- u64 sectors,
- struct bch_io_opts opts,
- s64 *i_sectors_delta,
- struct write_point_specifier write_point)
-{
- struct bch_fs *c = trans->c;
- struct disk_reservation disk_res = { 0 };
- struct closure cl;
- struct open_buckets open_buckets = { 0 };
- struct bkey_s_c k;
- struct bkey_buf old, new;
- unsigned sectors_allocated = 0, new_replicas;
- bool unwritten = opts.nocow &&
- c->sb.version >= bcachefs_metadata_version_unwritten_extents;
- int ret;
-
- bch2_bkey_buf_init(&old);
- bch2_bkey_buf_init(&new);
- closure_init_stack(&cl);
-
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
- new_replicas = max(0, (int) opts.data_replicas -
- (int) bch2_bkey_nr_ptrs_fully_allocated(k));
-
- /*
- * Get a disk reservation before (in the nocow case) calling
- * into the allocator:
- */
- ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
- if (unlikely(ret))
- goto err_noprint;
-
- bch2_bkey_buf_reassemble(&old, c, k);
-
- if (!unwritten) {
- struct bkey_i_reservation *reservation;
-
- bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
- reservation = bkey_reservation_init(new.k);
- reservation->k.p = iter->pos;
- bch2_key_resize(&reservation->k, sectors);
- reservation->v.nr_replicas = opts.data_replicas;
- } else {
- struct bkey_i_extent *e;
- struct bch_devs_list devs_have;
- struct write_point *wp;
-
- devs_have.nr = 0;
-
- bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
-
- e = bkey_extent_init(new.k);
- e->k.p = iter->pos;
-
- ret = bch2_alloc_sectors_start_trans(trans,
- opts.foreground_target,
- false,
- write_point,
- &devs_have,
- opts.data_replicas,
- opts.data_replicas,
- BCH_WATERMARK_normal, 0, &cl, &wp);
- if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
- ret = -BCH_ERR_transaction_restart_nested;
- if (ret)
- goto err;
-
- sectors = min_t(u64, sectors, wp->sectors_free);
- sectors_allocated = sectors;
-
- bch2_key_resize(&e->k, sectors);
-
- bch2_open_bucket_get(c, wp, &open_buckets);
- bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
- bch2_alloc_sectors_done(c, wp);
-
- extent_for_each_ptr(extent_i_to_s(e), ptr)
- ptr->unwritten = true;
- }
-
- ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
- 0, i_sectors_delta, true);
-err:
- if (!ret && sectors_allocated)
- bch2_increment_clock(c, sectors_allocated, WRITE);
- if (should_print_err(ret))
- bch_err_inum_offset_ratelimited(c,
- inum.inum,
- iter->pos.offset << 9,
- "%s(): error: %s", __func__, bch2_err_str(ret));
-err_noprint:
- bch2_open_buckets_put(c, &open_buckets);
- bch2_disk_reservation_put(c, &disk_res);
- bch2_bkey_buf_exit(&new, c);
- bch2_bkey_buf_exit(&old, c);
-
- if (closure_nr_remaining(&cl) != 1) {
- bch2_trans_unlock_long(trans);
- bch2_wait_on_allocator(c, &cl);
- }
-
- return ret;
-}
-
-/*
- * Returns -BCH_ERR_transacton_restart if we had to drop locks:
- */
-int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
- subvol_inum inum, u64 end,
- s64 *i_sectors_delta)
-{
- struct bch_fs *c = trans->c;
- unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
- struct bpos end_pos = POS(inum.inum, end);
- struct bkey_s_c k;
- int ret = 0, ret2 = 0;
- u32 snapshot;
-
- while (!ret ||
- bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
- struct disk_reservation disk_res =
- bch2_disk_reservation_init(c, 0);
- struct bkey_i delete;
-
- if (ret)
- ret2 = ret;
-
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- continue;
-
- bch2_btree_iter_set_snapshot(iter, snapshot);
-
- /*
- * peek_upto() doesn't have ideal semantics for extents:
- */
- k = bch2_btree_iter_peek_upto(iter, end_pos);
- if (!k.k)
- break;
-
- ret = bkey_err(k);
- if (ret)
- continue;
-
- bkey_init(&delete.k);
- delete.k.p = iter->pos;
-
- /* create the biggest key we can */
- bch2_key_resize(&delete.k, max_sectors);
- bch2_cut_back(end_pos, &delete);
-
- ret = bch2_extent_update(trans, inum, iter, &delete,
- &disk_res, 0, i_sectors_delta, false);
- bch2_disk_reservation_put(c, &disk_res);
- }
-
- return ret ?: ret2;
-}
-
-int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
- s64 *i_sectors_delta)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- POS(inum.inum, start),
- BTREE_ITER_intent);
-
- ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- ret = 0;
-
- return ret;
-}
-
-/* truncate: */
-
-void bch2_logged_op_truncate_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_logged_op_truncate op = bkey_s_c_to_logged_op_truncate(k);
-
- prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
- prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
- prt_printf(out, " new_i_size=%llu", le64_to_cpu(op.v->new_i_size));
-}
-
-static int truncate_set_isize(struct btree_trans *trans,
- subvol_inum inum,
- u64 new_i_size,
- bool warn)
-{
- struct btree_iter iter = { NULL };
- struct bch_inode_unpacked inode_u;
- int ret;
-
- ret = __bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent, warn) ?:
- (inode_u.bi_size = new_i_size, 0) ?:
- bch2_inode_write(trans, &iter, &inode_u);
-
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
- struct bkey_i *op_k,
- u64 *i_sectors_delta)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter fpunch_iter;
- struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k);
- subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
- u64 new_i_size = le64_to_cpu(op->v.new_i_size);
- bool warn_errors = i_sectors_delta != NULL;
- int ret;
-
- ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- truncate_set_isize(trans, inum, new_i_size, i_sectors_delta != NULL));
- if (ret)
- goto err;
-
- bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
- POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
- BTREE_ITER_intent);
- ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
- bch2_trans_iter_exit(trans, &fpunch_iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- ret = 0;
-err:
- if (warn_errors)
- bch_err_fn(c, ret);
- return ret;
-}
-
-int bch2_resume_logged_op_truncate(struct btree_trans *trans, struct bkey_i *op_k)
-{
- return __bch2_resume_logged_op_truncate(trans, op_k, NULL);
-}
-
-int bch2_truncate(struct bch_fs *c, subvol_inum inum, u64 new_i_size, u64 *i_sectors_delta)
-{
- struct bkey_i_logged_op_truncate op;
-
- bkey_logged_op_truncate_init(&op.k_i);
- op.v.subvol = cpu_to_le32(inum.subvol);
- op.v.inum = cpu_to_le64(inum.inum);
- op.v.new_i_size = cpu_to_le64(new_i_size);
-
- /*
- * Logged ops aren't atomic w.r.t. snapshot creation: creating a
- * snapshot while they're in progress, then crashing, will result in the
- * resume only proceeding in one of the snapshots
- */
- down_read(&c->snapshot_create_lock);
- struct btree_trans *trans = bch2_trans_get(c);
- int ret = bch2_logged_op_start(trans, &op.k_i);
- if (ret)
- goto out;
- ret = __bch2_resume_logged_op_truncate(trans, &op.k_i, i_sectors_delta);
- ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret;
-out:
- bch2_trans_put(trans);
- up_read(&c->snapshot_create_lock);
-
- return ret;
-}
-
-/* finsert/fcollapse: */
-
-void bch2_logged_op_finsert_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_logged_op_finsert op = bkey_s_c_to_logged_op_finsert(k);
-
- prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
- prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
- prt_printf(out, " dst_offset=%lli", le64_to_cpu(op.v->dst_offset));
- prt_printf(out, " src_offset=%llu", le64_to_cpu(op.v->src_offset));
-}
-
-static int adjust_i_size(struct btree_trans *trans, subvol_inum inum,
- u64 offset, s64 len, bool warn)
-{
- struct btree_iter iter;
- struct bch_inode_unpacked inode_u;
- int ret;
-
- offset <<= 9;
- len <<= 9;
-
- ret = __bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent, warn);
- if (ret)
- return ret;
-
- if (len > 0) {
- if (MAX_LFS_FILESIZE - inode_u.bi_size < len) {
- ret = -EFBIG;
- goto err;
- }
-
- if (offset >= inode_u.bi_size) {
- ret = -EINVAL;
- goto err;
- }
- }
-
- inode_u.bi_size += len;
- inode_u.bi_mtime = inode_u.bi_ctime = bch2_current_time(trans->c);
-
- ret = bch2_inode_write(trans, &iter, &inode_u);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
- struct bkey_i *op_k,
- u64 *i_sectors_delta)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k);
- subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
- struct bch_io_opts opts;
- u64 dst_offset = le64_to_cpu(op->v.dst_offset);
- u64 src_offset = le64_to_cpu(op->v.src_offset);
- s64 shift = dst_offset - src_offset;
- u64 len = abs(shift);
- u64 pos = le64_to_cpu(op->v.pos);
- bool insert = shift > 0;
- u32 snapshot;
- bool warn_errors = i_sectors_delta != NULL;
- int ret = 0;
-
- ret = bch2_inum_opts_get(trans, inum, &opts);
- if (ret)
- return ret;
-
- /*
- * check for missing subvolume before fpunch, as in resume we don't want
- * it to be a fatal error
- */
- ret = lockrestart_do(trans, __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, warn_errors));
- if (ret)
- return ret;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- POS(inum.inum, 0),
- BTREE_ITER_intent);
-
- switch (op->v.state) {
-case LOGGED_OP_FINSERT_start:
- op->v.state = LOGGED_OP_FINSERT_shift_extents;
-
- if (insert) {
- ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- adjust_i_size(trans, inum, src_offset, len, warn_errors) ?:
- bch2_logged_op_update(trans, &op->k_i));
- if (ret)
- goto err;
- } else {
- bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset));
-
- ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
- if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto err;
-
- ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_logged_op_update(trans, &op->k_i));
- }
-
- fallthrough;
-case LOGGED_OP_FINSERT_shift_extents:
- while (1) {
- struct disk_reservation disk_res =
- bch2_disk_reservation_init(c, 0);
- struct bkey_i delete, *copy;
- struct bkey_s_c k;
- struct bpos src_pos = POS(inum.inum, src_offset);
-
- bch2_trans_begin(trans);
-
- ret = __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot,
- warn_errors);
- if (ret)
- goto btree_err;
-
- bch2_btree_iter_set_snapshot(&iter, snapshot);
- bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot));
-
- k = insert
- ? bch2_btree_iter_peek_prev(&iter)
- : bch2_btree_iter_peek_upto(&iter, POS(inum.inum, U64_MAX));
- if ((ret = bkey_err(k)))
- goto btree_err;
-
- if (!k.k ||
- k.k->p.inode != inum.inum ||
- bkey_le(k.k->p, POS(inum.inum, src_offset)))
- break;
-
- copy = bch2_bkey_make_mut_noupdate(trans, k);
- if ((ret = PTR_ERR_OR_ZERO(copy)))
- goto btree_err;
-
- if (insert &&
- bkey_lt(bkey_start_pos(k.k), src_pos)) {
- bch2_cut_front(src_pos, copy);
-
- /* Splitting compressed extent? */
- bch2_disk_reservation_add(c, &disk_res,
- copy->k.size *
- bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy)),
- BCH_DISK_RESERVATION_NOFAIL);
- }
-
- bkey_init(&delete.k);
- delete.k.p = copy->k.p;
- delete.k.p.snapshot = snapshot;
- delete.k.size = copy->k.size;
-
- copy->k.p.offset += shift;
- copy->k.p.snapshot = snapshot;
-
- op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
-
- ret = bch2_bkey_set_needs_rebalance(c, copy, &opts) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
- bch2_logged_op_update(trans, &op->k_i) ?:
- bch2_trans_commit(trans, &disk_res, NULL, BCH_TRANS_COMMIT_no_enospc);
-btree_err:
- bch2_disk_reservation_put(c, &disk_res);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- goto err;
-
- pos = le64_to_cpu(op->v.pos);
- }
-
- op->v.state = LOGGED_OP_FINSERT_finish;
-
- if (!insert) {
- ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- adjust_i_size(trans, inum, src_offset, shift, warn_errors) ?:
- bch2_logged_op_update(trans, &op->k_i));
- } else {
- /* We need an inode update to update bi_journal_seq for fsync: */
- ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- adjust_i_size(trans, inum, 0, 0, warn_errors) ?:
- bch2_logged_op_update(trans, &op->k_i));
- }
-
- break;
-case LOGGED_OP_FINSERT_finish:
- break;
- }
-err:
- bch2_trans_iter_exit(trans, &iter);
- if (warn_errors)
- bch_err_fn(c, ret);
- return ret;
-}
-
-int bch2_resume_logged_op_finsert(struct btree_trans *trans, struct bkey_i *op_k)
-{
- return __bch2_resume_logged_op_finsert(trans, op_k, NULL);
-}
-
-int bch2_fcollapse_finsert(struct bch_fs *c, subvol_inum inum,
- u64 offset, u64 len, bool insert,
- s64 *i_sectors_delta)
-{
- struct bkey_i_logged_op_finsert op;
- s64 shift = insert ? len : -len;
-
- bkey_logged_op_finsert_init(&op.k_i);
- op.v.subvol = cpu_to_le32(inum.subvol);
- op.v.inum = cpu_to_le64(inum.inum);
- op.v.dst_offset = cpu_to_le64(offset + shift);
- op.v.src_offset = cpu_to_le64(offset);
- op.v.pos = cpu_to_le64(insert ? U64_MAX : offset);
-
- /*
- * Logged ops aren't atomic w.r.t. snapshot creation: creating a
- * snapshot while they're in progress, then crashing, will result in the
- * resume only proceeding in one of the snapshots
- */
- down_read(&c->snapshot_create_lock);
- struct btree_trans *trans = bch2_trans_get(c);
- int ret = bch2_logged_op_start(trans, &op.k_i);
- if (ret)
- goto out;
- ret = __bch2_resume_logged_op_finsert(trans, &op.k_i, i_sectors_delta);
- ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret;
-out:
- bch2_trans_put(trans);
- up_read(&c->snapshot_create_lock);
-
- return ret;
-}
diff --git a/fs/bcachefs/io_misc.h b/fs/bcachefs/io_misc.h
deleted file mode 100644
index 9cb44a7c43c1..000000000000
--- a/fs/bcachefs/io_misc.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_IO_MISC_H
-#define _BCACHEFS_IO_MISC_H
-
-int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
- u64, struct bch_io_opts, s64 *,
- struct write_point_specifier);
-int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
- subvol_inum, u64, s64 *);
-int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *);
-
-void bch2_logged_op_truncate_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-#define bch2_bkey_ops_logged_op_truncate ((struct bkey_ops) { \
- .val_to_text = bch2_logged_op_truncate_to_text, \
- .min_val_size = 24, \
-})
-
-int bch2_resume_logged_op_truncate(struct btree_trans *, struct bkey_i *);
-
-int bch2_truncate(struct bch_fs *, subvol_inum, u64, u64 *);
-
-void bch2_logged_op_finsert_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-#define bch2_bkey_ops_logged_op_finsert ((struct bkey_ops) { \
- .val_to_text = bch2_logged_op_finsert_to_text, \
- .min_val_size = 24, \
-})
-
-int bch2_resume_logged_op_finsert(struct btree_trans *, struct bkey_i *);
-
-int bch2_fcollapse_finsert(struct bch_fs *, subvol_inum, u64, u64, bool, s64 *);
-
-#endif /* _BCACHEFS_IO_MISC_H */
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
deleted file mode 100644
index b3b934a87c6d..000000000000
--- a/fs/bcachefs/io_read.c
+++ /dev/null
@@ -1,1266 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Some low level IO code, and hacks for various block layer limitations
- *
- * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
- * Copyright 2012 Google, Inc.
- */
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "checksum.h"
-#include "clock.h"
-#include "compress.h"
-#include "data_update.h"
-#include "disk_groups.h"
-#include "ec.h"
-#include "error.h"
-#include "io_read.h"
-#include "io_misc.h"
-#include "io_write.h"
-#include "subvolume.h"
-#include "trace.h"
-
-#include <linux/sched/mm.h>
-
-#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
-
-static bool bch2_target_congested(struct bch_fs *c, u16 target)
-{
- const struct bch_devs_mask *devs;
- unsigned d, nr = 0, total = 0;
- u64 now = local_clock(), last;
- s64 congested;
- struct bch_dev *ca;
-
- if (!target)
- return false;
-
- rcu_read_lock();
- devs = bch2_target_to_mask(c, target) ?:
- &c->rw_devs[BCH_DATA_user];
-
- for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
- ca = rcu_dereference(c->devs[d]);
- if (!ca)
- continue;
-
- congested = atomic_read(&ca->congested);
- last = READ_ONCE(ca->congested_last);
- if (time_after64(now, last))
- congested -= (now - last) >> 12;
-
- total += max(congested, 0LL);
- nr++;
- }
- rcu_read_unlock();
-
- return bch2_rand_range(nr * CONGESTED_MAX) < total;
-}
-
-#else
-
-static bool bch2_target_congested(struct bch_fs *c, u16 target)
-{
- return false;
-}
-
-#endif
-
-/* Cache promotion on read */
-
-struct promote_op {
- struct rcu_head rcu;
- u64 start_time;
-
- struct rhash_head hash;
- struct bpos pos;
-
- struct data_update write;
- struct bio_vec bi_inline_vecs[]; /* must be last */
-};
-
-static const struct rhashtable_params bch_promote_params = {
- .head_offset = offsetof(struct promote_op, hash),
- .key_offset = offsetof(struct promote_op, pos),
- .key_len = sizeof(struct bpos),
- .automatic_shrinking = true,
-};
-
-static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
- struct bpos pos,
- struct bch_io_opts opts,
- unsigned flags,
- struct bch_io_failures *failed)
-{
- if (!failed) {
- BUG_ON(!opts.promote_target);
-
- if (!(flags & BCH_READ_MAY_PROMOTE))
- return -BCH_ERR_nopromote_may_not;
-
- if (bch2_bkey_has_target(c, k, opts.promote_target))
- return -BCH_ERR_nopromote_already_promoted;
-
- if (bkey_extent_is_unwritten(k))
- return -BCH_ERR_nopromote_unwritten;
-
- if (bch2_target_congested(c, opts.promote_target))
- return -BCH_ERR_nopromote_congested;
- }
-
- if (rhashtable_lookup_fast(&c->promote_table, &pos,
- bch_promote_params))
- return -BCH_ERR_nopromote_in_flight;
-
- return 0;
-}
-
-static void promote_free(struct bch_fs *c, struct promote_op *op)
-{
- int ret;
-
- bch2_data_update_exit(&op->write);
-
- ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
- bch_promote_params);
- BUG_ON(ret);
- bch2_write_ref_put(c, BCH_WRITE_REF_promote);
- kfree_rcu(op, rcu);
-}
-
-static void promote_done(struct bch_write_op *wop)
-{
- struct promote_op *op =
- container_of(wop, struct promote_op, write.op);
- struct bch_fs *c = op->write.op.c;
-
- bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
- op->start_time);
- promote_free(c, op);
-}
-
-static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
-{
- struct bio *bio = &op->write.op.wbio.bio;
-
- trace_and_count(op->write.op.c, read_promote, &rbio->bio);
-
- /* we now own pages: */
- BUG_ON(!rbio->bounce);
- BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
-
- memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
- sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
- swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
-
- bch2_data_update_read_done(&op->write, rbio->pick.crc);
-}
-
-static struct promote_op *__promote_alloc(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bkey_s_c k,
- struct bpos pos,
- struct extent_ptr_decoded *pick,
- struct bch_io_opts opts,
- unsigned sectors,
- struct bch_read_bio **rbio,
- struct bch_io_failures *failed)
-{
- struct bch_fs *c = trans->c;
- struct promote_op *op = NULL;
- struct bio *bio;
- unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
- int ret;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
- return ERR_PTR(-BCH_ERR_nopromote_no_writes);
-
- op = kzalloc(struct_size(op, bi_inline_vecs, pages), GFP_KERNEL);
- if (!op) {
- ret = -BCH_ERR_nopromote_enomem;
- goto err;
- }
-
- op->start_time = local_clock();
- op->pos = pos;
-
- /*
- * We don't use the mempool here because extents that aren't
- * checksummed or compressed can be too big for the mempool:
- */
- *rbio = kzalloc(sizeof(struct bch_read_bio) +
- sizeof(struct bio_vec) * pages,
- GFP_KERNEL);
- if (!*rbio) {
- ret = -BCH_ERR_nopromote_enomem;
- goto err;
- }
-
- rbio_init(&(*rbio)->bio, opts);
- bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
-
- if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) {
- ret = -BCH_ERR_nopromote_enomem;
- goto err;
- }
-
- (*rbio)->bounce = true;
- (*rbio)->split = true;
- (*rbio)->kmalloc = true;
-
- if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
- bch_promote_params)) {
- ret = -BCH_ERR_nopromote_in_flight;
- goto err;
- }
-
- bio = &op->write.op.wbio.bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
-
- struct data_update_opts update_opts = {};
-
- if (!failed) {
- update_opts.target = opts.promote_target;
- update_opts.extra_replicas = 1;
- update_opts.write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED;
- } else {
- update_opts.target = opts.foreground_target;
-
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- unsigned i = 0;
- bkey_for_each_ptr(ptrs, ptr) {
- if (bch2_dev_io_failures(failed, ptr->dev))
- update_opts.rewrite_ptrs |= BIT(i);
- i++;
- }
- }
-
- ret = bch2_data_update_init(trans, NULL, NULL, &op->write,
- writepoint_hashed((unsigned long) current),
- opts,
- update_opts,
- btree_id, k);
- /*
- * possible errors: -BCH_ERR_nocow_lock_blocked,
- * -BCH_ERR_ENOSPC_disk_reservation:
- */
- if (ret) {
- BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash,
- bch_promote_params));
- goto err;
- }
-
- op->write.op.end_io = promote_done;
-
- return op;
-err:
- if (*rbio)
- bio_free_pages(&(*rbio)->bio);
- kfree(*rbio);
- *rbio = NULL;
- /* We may have added to the rhashtable and thus need rcu freeing: */
- kfree_rcu(op, rcu);
- bch2_write_ref_put(c, BCH_WRITE_REF_promote);
- return ERR_PTR(ret);
-}
-
-noinline
-static struct promote_op *promote_alloc(struct btree_trans *trans,
- struct bvec_iter iter,
- struct bkey_s_c k,
- struct extent_ptr_decoded *pick,
- struct bch_io_opts opts,
- unsigned flags,
- struct bch_read_bio **rbio,
- bool *bounce,
- bool *read_full,
- struct bch_io_failures *failed)
-{
- struct bch_fs *c = trans->c;
- /*
- * if failed != NULL we're not actually doing a promote, we're
- * recovering from an io/checksum error
- */
- bool promote_full = (failed ||
- *read_full ||
- READ_ONCE(c->opts.promote_whole_extents));
- /* data might have to be decompressed in the write path: */
- unsigned sectors = promote_full
- ? max(pick->crc.compressed_size, pick->crc.live_size)
- : bvec_iter_sectors(iter);
- struct bpos pos = promote_full
- ? bkey_start_pos(k.k)
- : POS(k.k->p.inode, iter.bi_sector);
- struct promote_op *promote;
- int ret;
-
- ret = should_promote(c, k, pos, opts, flags, failed);
- if (ret)
- goto nopromote;
-
- promote = __promote_alloc(trans,
- k.k->type == KEY_TYPE_reflink_v
- ? BTREE_ID_reflink
- : BTREE_ID_extents,
- k, pos, pick, opts, sectors, rbio, failed);
- ret = PTR_ERR_OR_ZERO(promote);
- if (ret)
- goto nopromote;
-
- *bounce = true;
- *read_full = promote_full;
- return promote;
-nopromote:
- trace_read_nopromote(c, ret);
- return NULL;
-}
-
-/* Read */
-
-#define READ_RETRY_AVOID 1
-#define READ_RETRY 2
-#define READ_ERR 3
-
-enum rbio_context {
- RBIO_CONTEXT_NULL,
- RBIO_CONTEXT_HIGHPRI,
- RBIO_CONTEXT_UNBOUND,
-};
-
-static inline struct bch_read_bio *
-bch2_rbio_parent(struct bch_read_bio *rbio)
-{
- return rbio->split ? rbio->parent : rbio;
-}
-
-__always_inline
-static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
- enum rbio_context context,
- struct workqueue_struct *wq)
-{
- if (context <= rbio->context) {
- fn(&rbio->work);
- } else {
- rbio->work.func = fn;
- rbio->context = context;
- queue_work(wq, &rbio->work);
- }
-}
-
-static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
-{
- BUG_ON(rbio->bounce && !rbio->split);
-
- if (rbio->promote)
- promote_free(rbio->c, rbio->promote);
- rbio->promote = NULL;
-
- if (rbio->bounce)
- bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
-
- if (rbio->split) {
- struct bch_read_bio *parent = rbio->parent;
-
- if (rbio->kmalloc)
- kfree(rbio);
- else
- bio_put(&rbio->bio);
-
- rbio = parent;
- }
-
- return rbio;
-}
-
-/*
- * Only called on a top level bch_read_bio to complete an entire read request,
- * not a split:
- */
-static void bch2_rbio_done(struct bch_read_bio *rbio)
-{
- if (rbio->start_time)
- bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
- rbio->start_time);
- bio_endio(&rbio->bio);
-}
-
-static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
- struct bvec_iter bvec_iter,
- struct bch_io_failures *failed,
- unsigned flags)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_buf sk;
- struct bkey_s_c k;
- int ret;
-
- flags &= ~BCH_READ_LAST_FRAGMENT;
- flags |= BCH_READ_MUST_CLONE;
-
- bch2_bkey_buf_init(&sk);
-
- bch2_trans_iter_init(trans, &iter, rbio->data_btree,
- rbio->read_pos, BTREE_ITER_slots);
-retry:
- bch2_trans_begin(trans);
- rbio->bio.bi_status = 0;
-
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
- if (ret)
- goto err;
-
- bch2_bkey_buf_reassemble(&sk, c, k);
- k = bkey_i_to_s_c(sk.k);
-
- if (!bch2_bkey_matches_ptr(c, k,
- rbio->pick.ptr,
- rbio->data_pos.offset -
- rbio->pick.crc.offset)) {
- /* extent we wanted to read no longer exists: */
- rbio->hole = true;
- goto out;
- }
-
- ret = __bch2_read_extent(trans, rbio, bvec_iter,
- rbio->read_pos,
- rbio->data_btree,
- k, 0, failed, flags);
- if (ret == READ_RETRY)
- goto retry;
- if (ret)
- goto err;
-out:
- bch2_rbio_done(rbio);
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- bch2_bkey_buf_exit(&sk, c);
- return;
-err:
- rbio->bio.bi_status = BLK_STS_IOERR;
- goto out;
-}
-
-static void bch2_rbio_retry(struct work_struct *work)
-{
- struct bch_read_bio *rbio =
- container_of(work, struct bch_read_bio, work);
- struct bch_fs *c = rbio->c;
- struct bvec_iter iter = rbio->bvec_iter;
- unsigned flags = rbio->flags;
- subvol_inum inum = {
- .subvol = rbio->subvol,
- .inum = rbio->read_pos.inode,
- };
- struct bch_io_failures failed = { .nr = 0 };
-
- trace_and_count(c, read_retry, &rbio->bio);
-
- if (rbio->retry == READ_RETRY_AVOID)
- bch2_mark_io_failure(&failed, &rbio->pick);
-
- rbio->bio.bi_status = 0;
-
- rbio = bch2_rbio_free(rbio);
-
- flags |= BCH_READ_IN_RETRY;
- flags &= ~BCH_READ_MAY_PROMOTE;
-
- if (flags & BCH_READ_NODECODE) {
- bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
- } else {
- flags &= ~BCH_READ_LAST_FRAGMENT;
- flags |= BCH_READ_MUST_CLONE;
-
- __bch2_read(c, rbio, iter, inum, &failed, flags);
- }
-}
-
-static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
- blk_status_t error)
-{
- rbio->retry = retry;
-
- if (rbio->flags & BCH_READ_IN_RETRY)
- return;
-
- if (retry == READ_ERR) {
- rbio = bch2_rbio_free(rbio);
-
- rbio->bio.bi_status = error;
- bch2_rbio_done(rbio);
- } else {
- bch2_rbio_punt(rbio, bch2_rbio_retry,
- RBIO_CONTEXT_UNBOUND, system_unbound_wq);
- }
-}
-
-static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
- struct bch_read_bio *rbio)
-{
- struct bch_fs *c = rbio->c;
- u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
- struct bch_extent_crc_unpacked new_crc;
- struct btree_iter iter;
- struct bkey_i *new;
- struct bkey_s_c k;
- int ret = 0;
-
- if (crc_is_compressed(rbio->pick.crc))
- return 0;
-
- k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
- BTREE_ITER_slots|BTREE_ITER_intent);
- if ((ret = bkey_err(k)))
- goto out;
-
- if (bversion_cmp(k.k->bversion, rbio->version) ||
- !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
- goto out;
-
- /* Extent was merged? */
- if (bkey_start_offset(k.k) < data_offset ||
- k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
- goto out;
-
- if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
- rbio->pick.crc, NULL, &new_crc,
- bkey_start_offset(k.k) - data_offset, k.k->size,
- rbio->pick.crc.csum_type)) {
- bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
- ret = 0;
- goto out;
- }
-
- /*
- * going to be temporarily appending another checksum entry:
- */
- new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
- sizeof(struct bch_extent_crc128));
- if ((ret = PTR_ERR_OR_ZERO(new)))
- goto out;
-
- bkey_reassemble(new, k);
-
- if (!bch2_bkey_narrow_crcs(new, new_crc))
- goto out;
-
- ret = bch2_trans_update(trans, &iter, new,
- BTREE_UPDATE_internal_snapshot_node);
-out:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
-{
- bch2_trans_commit_do(rbio->c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- __bch2_rbio_narrow_crcs(trans, rbio));
-}
-
-/* Inner part that may run in process context */
-static void __bch2_read_endio(struct work_struct *work)
-{
- struct bch_read_bio *rbio =
- container_of(work, struct bch_read_bio, work);
- struct bch_fs *c = rbio->c;
- struct bio *src = &rbio->bio;
- struct bio *dst = &bch2_rbio_parent(rbio)->bio;
- struct bvec_iter dst_iter = rbio->bvec_iter;
- struct bch_extent_crc_unpacked crc = rbio->pick.crc;
- struct nonce nonce = extent_nonce(rbio->version, crc);
- unsigned nofs_flags;
- struct bch_csum csum;
- int ret;
-
- nofs_flags = memalloc_nofs_save();
-
- /* Reset iterator for checksumming and copying bounced data: */
- if (rbio->bounce) {
- src->bi_iter.bi_size = crc.compressed_size << 9;
- src->bi_iter.bi_idx = 0;
- src->bi_iter.bi_bvec_done = 0;
- } else {
- src->bi_iter = rbio->bvec_iter;
- }
-
- csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
- if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
- goto csum_err;
-
- /*
- * XXX
- * We need to rework the narrow_crcs path to deliver the read completion
- * first, and then punt to a different workqueue, otherwise we're
- * holding up reads while doing btree updates which is bad for memory
- * reclaim.
- */
- if (unlikely(rbio->narrow_crcs))
- bch2_rbio_narrow_crcs(rbio);
-
- if (rbio->flags & BCH_READ_NODECODE)
- goto nodecode;
-
- /* Adjust crc to point to subset of data we want: */
- crc.offset += rbio->offset_into_extent;
- crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
-
- if (crc_is_compressed(crc)) {
- ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
- if (ret)
- goto decrypt_err;
-
- if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
- !c->opts.no_data_io)
- goto decompression_err;
- } else {
- /* don't need to decrypt the entire bio: */
- nonce = nonce_add(nonce, crc.offset << 9);
- bio_advance(src, crc.offset << 9);
-
- BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
- src->bi_iter.bi_size = dst_iter.bi_size;
-
- ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
- if (ret)
- goto decrypt_err;
-
- if (rbio->bounce) {
- struct bvec_iter src_iter = src->bi_iter;
-
- bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
- }
- }
-
- if (rbio->promote) {
- /*
- * Re encrypt data we decrypted, so it's consistent with
- * rbio->crc:
- */
- ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
- if (ret)
- goto decrypt_err;
-
- promote_start(rbio->promote, rbio);
- rbio->promote = NULL;
- }
-nodecode:
- if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
- rbio = bch2_rbio_free(rbio);
- bch2_rbio_done(rbio);
- }
-out:
- memalloc_nofs_restore(nofs_flags);
- return;
-csum_err:
- /*
- * Checksum error: if the bio wasn't bounced, we may have been
- * reading into buffers owned by userspace (that userspace can
- * scribble over) - retry the read, bouncing it this time:
- */
- if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
- rbio->flags |= BCH_READ_MUST_BOUNCE;
- bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
- goto out;
- }
-
- struct printbuf buf = PRINTBUF;
- buf.atomic++;
- prt_str(&buf, "data ");
- bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum);
-
- struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
- if (ca) {
- bch_err_inum_offset_ratelimited(ca,
- rbio->read_pos.inode,
- rbio->read_pos.offset << 9,
- "data %s", buf.buf);
- bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
- }
- printbuf_exit(&buf);
- bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
- goto out;
-decompression_err:
- bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
- rbio->read_pos.offset << 9,
- "decompression error");
- bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
- goto out;
-decrypt_err:
- bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
- rbio->read_pos.offset << 9,
- "decrypt error");
- bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
- goto out;
-}
-
-static void bch2_read_endio(struct bio *bio)
-{
- struct bch_read_bio *rbio =
- container_of(bio, struct bch_read_bio, bio);
- struct bch_fs *c = rbio->c;
- struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
- struct workqueue_struct *wq = NULL;
- enum rbio_context context = RBIO_CONTEXT_NULL;
-
- if (rbio->have_ioref) {
- bch2_latency_acct(ca, rbio->submit_time, READ);
- percpu_ref_put(&ca->io_ref);
- }
-
- if (!rbio->split)
- rbio->bio.bi_end_io = rbio->end_io;
-
- if (bio->bi_status) {
- if (ca) {
- bch_err_inum_offset_ratelimited(ca,
- rbio->read_pos.inode,
- rbio->read_pos.offset,
- "data read error: %s",
- bch2_blk_status_to_str(bio->bi_status));
- bch2_io_error(ca, BCH_MEMBER_ERROR_read);
- }
- bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
- return;
- }
-
- if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
- (ca && dev_ptr_stale(ca, &rbio->pick.ptr))) {
- trace_and_count(c, read_reuse_race, &rbio->bio);
-
- if (rbio->flags & BCH_READ_RETRY_IF_STALE)
- bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
- else
- bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
- return;
- }
-
- if (rbio->narrow_crcs ||
- rbio->promote ||
- crc_is_compressed(rbio->pick.crc) ||
- bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
- context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
- else if (rbio->pick.crc.csum_type)
- context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
-
- bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
-}
-
-int __bch2_read_indirect_extent(struct btree_trans *trans,
- unsigned *offset_into_extent,
- struct bkey_buf *orig_k)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- u64 reflink_offset;
- int ret;
-
- reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
- *offset_into_extent;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink,
- POS(0, reflink_offset), 0);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_reflink_v &&
- k.k->type != KEY_TYPE_indirect_inline_data) {
- bch_err_inum_offset_ratelimited(trans->c,
- orig_k->k->k.p.inode,
- orig_k->k->k.p.offset << 9,
- "%llu len %u points to nonexistent indirect extent %llu",
- orig_k->k->k.p.offset,
- orig_k->k->k.size,
- reflink_offset);
- bch2_inconsistent_error(trans->c);
- ret = -BCH_ERR_missing_indirect_extent;
- goto err;
- }
-
- *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
- bch2_bkey_buf_reassemble(orig_k, trans->c, k);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
- struct bch_dev *ca,
- struct bkey_s_c k,
- struct bch_extent_ptr ptr)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
- PTR_BUCKET_POS(ca, &ptr),
- BTREE_ITER_cached);
-
- int gen = bucket_gen_get(ca, iter.pos.offset);
- if (gen >= 0) {
- prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
- printbuf_indent_add(&buf, 2);
-
- bch2_bkey_val_to_text(&buf, c, k);
- prt_newline(&buf);
-
- prt_printf(&buf, "memory gen: %u", gen);
-
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
- if (!ret) {
- prt_newline(&buf);
- bch2_bkey_val_to_text(&buf, c, k);
- }
- } else {
- prt_printf(&buf, "Attempting to read from invalid bucket %llu:%llu:\n",
- iter.pos.inode, iter.pos.offset);
- printbuf_indent_add(&buf, 2);
-
- prt_printf(&buf, "first bucket %u nbuckets %llu\n",
- ca->mi.first_bucket, ca->mi.nbuckets);
-
- bch2_bkey_val_to_text(&buf, c, k);
- prt_newline(&buf);
- }
-
- bch2_fs_inconsistent(c, "%s", buf.buf);
-
- bch2_trans_iter_exit(trans, &iter);
- printbuf_exit(&buf);
-}
-
-int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
- struct bvec_iter iter, struct bpos read_pos,
- enum btree_id data_btree, struct bkey_s_c k,
- unsigned offset_into_extent,
- struct bch_io_failures *failed, unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct extent_ptr_decoded pick;
- struct bch_read_bio *rbio = NULL;
- struct promote_op *promote = NULL;
- bool bounce = false, read_full = false, narrow_crcs = false;
- struct bpos data_pos = bkey_start_pos(k.k);
- int pick_ret;
-
- if (bkey_extent_is_inline_data(k.k)) {
- unsigned bytes = min_t(unsigned, iter.bi_size,
- bkey_inline_data_bytes(k.k));
-
- swap(iter.bi_size, bytes);
- memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
- swap(iter.bi_size, bytes);
- bio_advance_iter(&orig->bio, &iter, bytes);
- zero_fill_bio_iter(&orig->bio, iter);
- goto out_read_done;
- }
-retry_pick:
- pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
-
- /* hole or reservation - just zero fill: */
- if (!pick_ret)
- goto hole;
-
- if (pick_ret < 0) {
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, k);
-
- bch_err_inum_offset_ratelimited(c,
- read_pos.inode, read_pos.offset << 9,
- "no device to read from: %s\n %s",
- bch2_err_str(pick_ret),
- buf.buf);
- printbuf_exit(&buf);
- goto err;
- }
-
- struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
-
- /*
- * Stale dirty pointers are treated as IO errors, but @failed isn't
- * allocated unless we're in the retry path - so if we're not in the
- * retry path, don't check here, it'll be caught in bch2_read_endio()
- * and we'll end up in the retry path:
- */
- if ((flags & BCH_READ_IN_RETRY) &&
- !pick.ptr.cached &&
- ca &&
- unlikely(dev_ptr_stale(ca, &pick.ptr))) {
- read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
- bch2_mark_io_failure(failed, &pick);
- percpu_ref_put(&ca->io_ref);
- goto retry_pick;
- }
-
- /*
- * Unlock the iterator while the btree node's lock is still in
- * cache, before doing the IO:
- */
- bch2_trans_unlock(trans);
-
- if (flags & BCH_READ_NODECODE) {
- /*
- * can happen if we retry, and the extent we were going to read
- * has been merged in the meantime:
- */
- if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS) {
- if (ca)
- percpu_ref_put(&ca->io_ref);
- goto hole;
- }
-
- iter.bi_size = pick.crc.compressed_size << 9;
- goto get_bio;
- }
-
- if (!(flags & BCH_READ_LAST_FRAGMENT) ||
- bio_flagged(&orig->bio, BIO_CHAIN))
- flags |= BCH_READ_MUST_CLONE;
-
- narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
- bch2_can_narrow_extent_crcs(k, pick.crc);
-
- if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
- flags |= BCH_READ_MUST_BOUNCE;
-
- EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
-
- if (crc_is_compressed(pick.crc) ||
- (pick.crc.csum_type != BCH_CSUM_none &&
- (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
- (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
- (flags & BCH_READ_USER_MAPPED)) ||
- (flags & BCH_READ_MUST_BOUNCE)))) {
- read_full = true;
- bounce = true;
- }
-
- if (orig->opts.promote_target)// || failed)
- promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags,
- &rbio, &bounce, &read_full, failed);
-
- if (!read_full) {
- EBUG_ON(crc_is_compressed(pick.crc));
- EBUG_ON(pick.crc.csum_type &&
- (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
- bvec_iter_sectors(iter) != pick.crc.live_size ||
- pick.crc.offset ||
- offset_into_extent));
-
- data_pos.offset += offset_into_extent;
- pick.ptr.offset += pick.crc.offset +
- offset_into_extent;
- offset_into_extent = 0;
- pick.crc.compressed_size = bvec_iter_sectors(iter);
- pick.crc.uncompressed_size = bvec_iter_sectors(iter);
- pick.crc.offset = 0;
- pick.crc.live_size = bvec_iter_sectors(iter);
- }
-get_bio:
- if (rbio) {
- /*
- * promote already allocated bounce rbio:
- * promote needs to allocate a bio big enough for uncompressing
- * data in the write path, but we're not going to use it all
- * here:
- */
- EBUG_ON(rbio->bio.bi_iter.bi_size <
- pick.crc.compressed_size << 9);
- rbio->bio.bi_iter.bi_size =
- pick.crc.compressed_size << 9;
- } else if (bounce) {
- unsigned sectors = pick.crc.compressed_size;
-
- rbio = rbio_init(bio_alloc_bioset(NULL,
- DIV_ROUND_UP(sectors, PAGE_SECTORS),
- 0,
- GFP_NOFS,
- &c->bio_read_split),
- orig->opts);
-
- bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
- rbio->bounce = true;
- rbio->split = true;
- } else if (flags & BCH_READ_MUST_CLONE) {
- /*
- * Have to clone if there were any splits, due to error
- * reporting issues (if a split errored, and retrying didn't
- * work, when it reports the error to its parent (us) we don't
- * know if the error was from our bio, and we should retry, or
- * from the whole bio, in which case we don't want to retry and
- * lose the error)
- */
- rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
- &c->bio_read_split),
- orig->opts);
- rbio->bio.bi_iter = iter;
- rbio->split = true;
- } else {
- rbio = orig;
- rbio->bio.bi_iter = iter;
- EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
- }
-
- EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
-
- rbio->c = c;
- rbio->submit_time = local_clock();
- if (rbio->split)
- rbio->parent = orig;
- else
- rbio->end_io = orig->bio.bi_end_io;
- rbio->bvec_iter = iter;
- rbio->offset_into_extent= offset_into_extent;
- rbio->flags = flags;
- rbio->have_ioref = ca != NULL;
- rbio->narrow_crcs = narrow_crcs;
- rbio->hole = 0;
- rbio->retry = 0;
- rbio->context = 0;
- /* XXX: only initialize this if needed */
- rbio->devs_have = bch2_bkey_devs(k);
- rbio->pick = pick;
- rbio->subvol = orig->subvol;
- rbio->read_pos = read_pos;
- rbio->data_btree = data_btree;
- rbio->data_pos = data_pos;
- rbio->version = k.k->bversion;
- rbio->promote = promote;
- INIT_WORK(&rbio->work, NULL);
-
- if (flags & BCH_READ_NODECODE)
- orig->pick = pick;
-
- rbio->bio.bi_opf = orig->bio.bi_opf;
- rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
- rbio->bio.bi_end_io = bch2_read_endio;
-
- if (rbio->bounce)
- trace_and_count(c, read_bounce, &rbio->bio);
-
- this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
- bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
-
- /*
- * If it's being moved internally, we don't want to flag it as a cache
- * hit:
- */
- if (ca && pick.ptr.cached && !(flags & BCH_READ_NODECODE))
- bch2_bucket_io_time_reset(trans, pick.ptr.dev,
- PTR_BUCKET_NR(ca, &pick.ptr), READ);
-
- if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
- bio_inc_remaining(&orig->bio);
- trace_and_count(c, read_split, &orig->bio);
- }
-
- if (!rbio->pick.idx) {
- if (!rbio->have_ioref) {
- bch_err_inum_offset_ratelimited(c,
- read_pos.inode,
- read_pos.offset << 9,
- "no device to read from");
- bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
- goto out;
- }
-
- this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
- bio_sectors(&rbio->bio));
- bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
-
- if (unlikely(c->opts.no_data_io)) {
- if (likely(!(flags & BCH_READ_IN_RETRY)))
- bio_endio(&rbio->bio);
- } else {
- if (likely(!(flags & BCH_READ_IN_RETRY)))
- submit_bio(&rbio->bio);
- else
- submit_bio_wait(&rbio->bio);
- }
-
- /*
- * We just submitted IO which may block, we expect relock fail
- * events and shouldn't count them:
- */
- trans->notrace_relock_fail = true;
- } else {
- /* Attempting reconstruct read: */
- if (bch2_ec_read_extent(trans, rbio, k)) {
- bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
- goto out;
- }
-
- if (likely(!(flags & BCH_READ_IN_RETRY)))
- bio_endio(&rbio->bio);
- }
-out:
- if (likely(!(flags & BCH_READ_IN_RETRY))) {
- return 0;
- } else {
- int ret;
-
- rbio->context = RBIO_CONTEXT_UNBOUND;
- bch2_read_endio(&rbio->bio);
-
- ret = rbio->retry;
- rbio = bch2_rbio_free(rbio);
-
- if (ret == READ_RETRY_AVOID) {
- bch2_mark_io_failure(failed, &pick);
- ret = READ_RETRY;
- }
-
- if (!ret)
- goto out_read_done;
-
- return ret;
- }
-
-err:
- if (flags & BCH_READ_IN_RETRY)
- return READ_ERR;
-
- orig->bio.bi_status = BLK_STS_IOERR;
- goto out_read_done;
-
-hole:
- /*
- * won't normally happen in the BCH_READ_NODECODE
- * (bch2_move_extent()) path, but if we retry and the extent we wanted
- * to read no longer exists we have to signal that:
- */
- if (flags & BCH_READ_NODECODE)
- orig->hole = true;
-
- zero_fill_bio_iter(&orig->bio, iter);
-out_read_done:
- if (flags & BCH_READ_LAST_FRAGMENT)
- bch2_rbio_done(orig);
- return 0;
-}
-
-void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
- struct bvec_iter bvec_iter, subvol_inum inum,
- struct bch_io_failures *failed, unsigned flags)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_buf sk;
- struct bkey_s_c k;
- int ret;
-
- BUG_ON(flags & BCH_READ_NODECODE);
-
- bch2_bkey_buf_init(&sk);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- POS(inum.inum, bvec_iter.bi_sector),
- BTREE_ITER_slots);
-
- while (1) {
- unsigned bytes, sectors, offset_into_extent;
- enum btree_id data_btree = BTREE_ID_extents;
-
- bch2_trans_begin(trans);
-
- u32 snapshot;
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- bch2_btree_iter_set_snapshot(&iter, snapshot);
-
- bch2_btree_iter_set_pos(&iter,
- POS(inum.inum, bvec_iter.bi_sector));
-
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- offset_into_extent = iter.pos.offset -
- bkey_start_offset(k.k);
- sectors = k.k->size - offset_into_extent;
-
- bch2_bkey_buf_reassemble(&sk, c, k);
-
- ret = bch2_read_indirect_extent(trans, &data_btree,
- &offset_into_extent, &sk);
- if (ret)
- goto err;
-
- k = bkey_i_to_s_c(sk.k);
-
- /*
- * With indirect extents, the amount of data to read is the min
- * of the original extent and the indirect extent:
- */
- sectors = min(sectors, k.k->size - offset_into_extent);
-
- bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
- swap(bvec_iter.bi_size, bytes);
-
- if (bvec_iter.bi_size == bytes)
- flags |= BCH_READ_LAST_FRAGMENT;
-
- ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos,
- data_btree, k,
- offset_into_extent, failed, flags);
- if (ret)
- goto err;
-
- if (flags & BCH_READ_LAST_FRAGMENT)
- break;
-
- swap(bvec_iter.bi_size, bytes);
- bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
-err:
- if (ret &&
- !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
- ret != READ_RETRY &&
- ret != READ_RETRY_AVOID)
- break;
- }
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- bch2_bkey_buf_exit(&sk, c);
-
- if (ret) {
- bch_err_inum_offset_ratelimited(c, inum.inum,
- bvec_iter.bi_sector << 9,
- "read error %i from btree lookup", ret);
- rbio->bio.bi_status = BLK_STS_IOERR;
- bch2_rbio_done(rbio);
- }
-}
-
-void bch2_fs_io_read_exit(struct bch_fs *c)
-{
- if (c->promote_table.tbl)
- rhashtable_destroy(&c->promote_table);
- bioset_exit(&c->bio_read_split);
- bioset_exit(&c->bio_read);
-}
-
-int bch2_fs_io_read_init(struct bch_fs *c)
-{
- if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
- BIOSET_NEED_BVECS))
- return -BCH_ERR_ENOMEM_bio_read_init;
-
- if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
- BIOSET_NEED_BVECS))
- return -BCH_ERR_ENOMEM_bio_read_split_init;
-
- if (rhashtable_init(&c->promote_table, &bch_promote_params))
- return -BCH_ERR_ENOMEM_promote_table_init;
-
- return 0;
-}
diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h
deleted file mode 100644
index d9c18bb7d403..000000000000
--- a/fs/bcachefs/io_read.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_IO_READ_H
-#define _BCACHEFS_IO_READ_H
-
-#include "bkey_buf.h"
-
-struct bch_read_bio {
- struct bch_fs *c;
- u64 start_time;
- u64 submit_time;
-
- /*
- * Reads will often have to be split, and if the extent being read from
- * was checksummed or compressed we'll also have to allocate bounce
- * buffers and copy the data back into the original bio.
- *
- * If we didn't have to split, we have to save and restore the original
- * bi_end_io - @split below indicates which:
- */
- union {
- struct bch_read_bio *parent;
- bio_end_io_t *end_io;
- };
-
- /*
- * Saved copy of bio->bi_iter, from submission time - allows us to
- * resubmit on IO error, and also to copy data back to the original bio
- * when we're bouncing:
- */
- struct bvec_iter bvec_iter;
-
- unsigned offset_into_extent;
-
- u16 flags;
- union {
- struct {
- u16 bounce:1,
- split:1,
- kmalloc:1,
- have_ioref:1,
- narrow_crcs:1,
- hole:1,
- retry:2,
- context:2;
- };
- u16 _state;
- };
-
- struct bch_devs_list devs_have;
-
- struct extent_ptr_decoded pick;
-
- /*
- * pos we read from - different from data_pos for indirect extents:
- */
- u32 subvol;
- struct bpos read_pos;
-
- /*
- * start pos of data we read (may not be pos of data we want) - for
- * promote, narrow extents paths:
- */
- enum btree_id data_btree;
- struct bpos data_pos;
- struct bversion version;
-
- struct promote_op *promote;
-
- struct bch_io_opts opts;
-
- struct work_struct work;
-
- struct bio bio;
-};
-
-#define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio)
-
-struct bch_devs_mask;
-struct cache_promote_op;
-struct extent_ptr_decoded;
-
-int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
- struct bkey_buf *);
-
-static inline int bch2_read_indirect_extent(struct btree_trans *trans,
- enum btree_id *data_btree,
- unsigned *offset_into_extent,
- struct bkey_buf *k)
-{
- if (k->k->k.type != KEY_TYPE_reflink_p)
- return 0;
-
- *data_btree = BTREE_ID_reflink;
- return __bch2_read_indirect_extent(trans, offset_into_extent, k);
-}
-
-enum bch_read_flags {
- BCH_READ_RETRY_IF_STALE = 1 << 0,
- BCH_READ_MAY_PROMOTE = 1 << 1,
- BCH_READ_USER_MAPPED = 1 << 2,
- BCH_READ_NODECODE = 1 << 3,
- BCH_READ_LAST_FRAGMENT = 1 << 4,
-
- /* internal: */
- BCH_READ_MUST_BOUNCE = 1 << 5,
- BCH_READ_MUST_CLONE = 1 << 6,
- BCH_READ_IN_RETRY = 1 << 7,
-};
-
-int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
- struct bvec_iter, struct bpos, enum btree_id,
- struct bkey_s_c, unsigned,
- struct bch_io_failures *, unsigned);
-
-static inline void bch2_read_extent(struct btree_trans *trans,
- struct bch_read_bio *rbio, struct bpos read_pos,
- enum btree_id data_btree, struct bkey_s_c k,
- unsigned offset_into_extent, unsigned flags)
-{
- __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
- data_btree, k, offset_into_extent, NULL, flags);
-}
-
-void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
- subvol_inum, struct bch_io_failures *, unsigned flags);
-
-static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
- subvol_inum inum)
-{
- struct bch_io_failures failed = { .nr = 0 };
-
- BUG_ON(rbio->_state);
-
- rbio->c = c;
- rbio->start_time = local_clock();
- rbio->subvol = inum.subvol;
-
- __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
- BCH_READ_RETRY_IF_STALE|
- BCH_READ_MAY_PROMOTE|
- BCH_READ_USER_MAPPED);
-}
-
-static inline struct bch_read_bio *rbio_init(struct bio *bio,
- struct bch_io_opts opts)
-{
- struct bch_read_bio *rbio = to_rbio(bio);
-
- rbio->_state = 0;
- rbio->promote = NULL;
- rbio->opts = opts;
- return rbio;
-}
-
-void bch2_fs_io_read_exit(struct bch_fs *);
-int bch2_fs_io_read_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_IO_READ_H */
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
deleted file mode 100644
index 96720adcfee0..000000000000
--- a/fs/bcachefs/io_write.c
+++ /dev/null
@@ -1,1689 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
- * Copyright 2012 Google, Inc.
- */
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "bkey_buf.h"
-#include "bset.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "checksum.h"
-#include "clock.h"
-#include "compress.h"
-#include "debug.h"
-#include "ec.h"
-#include "error.h"
-#include "extent_update.h"
-#include "inode.h"
-#include "io_write.h"
-#include "journal.h"
-#include "keylist.h"
-#include "move.h"
-#include "nocow_locking.h"
-#include "rebalance.h"
-#include "subvolume.h"
-#include "super.h"
-#include "super-io.h"
-#include "trace.h"
-
-#include <linux/blkdev.h>
-#include <linux/prefetch.h>
-#include <linux/random.h>
-#include <linux/sched/mm.h>
-
-#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
-
-static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
- u64 now, int rw)
-{
- u64 latency_capable =
- ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
- /* ideally we'd be taking into account the device's variance here: */
- u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
- s64 latency_over = io_latency - latency_threshold;
-
- if (latency_threshold && latency_over > 0) {
- /*
- * bump up congested by approximately latency_over * 4 /
- * latency_threshold - we don't need much accuracy here so don't
- * bother with the divide:
- */
- if (atomic_read(&ca->congested) < CONGESTED_MAX)
- atomic_add(latency_over >>
- max_t(int, ilog2(latency_threshold) - 2, 0),
- &ca->congested);
-
- ca->congested_last = now;
- } else if (atomic_read(&ca->congested) > 0) {
- atomic_dec(&ca->congested);
- }
-}
-
-void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
-{
- atomic64_t *latency = &ca->cur_latency[rw];
- u64 now = local_clock();
- u64 io_latency = time_after64(now, submit_time)
- ? now - submit_time
- : 0;
- u64 old, new;
-
- old = atomic64_read(latency);
- do {
- /*
- * If the io latency was reasonably close to the current
- * latency, skip doing the update and atomic operation - most of
- * the time:
- */
- if (abs((int) (old - io_latency)) < (old >> 1) &&
- now & ~(~0U << 5))
- break;
-
- new = ewma_add(old, io_latency, 5);
- } while (!atomic64_try_cmpxchg(latency, &old, new));
-
- bch2_congested_acct(ca, io_latency, now, rw);
-
- __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
-}
-
-#endif
-
-/* Allocate, free from mempool: */
-
-void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
-{
- struct bvec_iter_all iter;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, bio, iter)
- if (bv->bv_page != ZERO_PAGE(0))
- mempool_free(bv->bv_page, &c->bio_bounce_pages);
- bio->bi_vcnt = 0;
-}
-
-static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
-{
- struct page *page;
-
- if (likely(!*using_mempool)) {
- page = alloc_page(GFP_NOFS);
- if (unlikely(!page)) {
- mutex_lock(&c->bio_bounce_pages_lock);
- *using_mempool = true;
- goto pool_alloc;
-
- }
- } else {
-pool_alloc:
- page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
- }
-
- return page;
-}
-
-void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
- size_t size)
-{
- bool using_mempool = false;
-
- while (size) {
- struct page *page = __bio_alloc_page_pool(c, &using_mempool);
- unsigned len = min_t(size_t, PAGE_SIZE, size);
-
- BUG_ON(!bio_add_page(bio, page, len, 0));
- size -= len;
- }
-
- if (using_mempool)
- mutex_unlock(&c->bio_bounce_pages_lock);
-}
-
-/* Extent update path: */
-
-int bch2_sum_sector_overwrites(struct btree_trans *trans,
- struct btree_iter *extent_iter,
- struct bkey_i *new,
- bool *usage_increasing,
- s64 *i_sectors_delta,
- s64 *disk_sectors_delta)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c old;
- unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
- bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
- int ret = 0;
-
- *usage_increasing = false;
- *i_sectors_delta = 0;
- *disk_sectors_delta = 0;
-
- bch2_trans_copy_iter(&iter, extent_iter);
-
- for_each_btree_key_upto_continue_norestart(iter,
- new->k.p, BTREE_ITER_slots, old, ret) {
- s64 sectors = min(new->k.p.offset, old.k->p.offset) -
- max(bkey_start_offset(&new->k),
- bkey_start_offset(old.k));
-
- *i_sectors_delta += sectors *
- (bkey_extent_is_allocation(&new->k) -
- bkey_extent_is_allocation(old.k));
-
- *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
- *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
- ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
- : 0;
-
- if (!*usage_increasing &&
- (new->k.p.snapshot != old.k->p.snapshot ||
- new_replicas > bch2_bkey_replicas(c, old) ||
- (!new_compressed && bch2_bkey_sectors_compressed(old))))
- *usage_increasing = true;
-
- if (bkey_ge(old.k->p, new->k.p))
- break;
- }
-
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
- struct btree_iter *extent_iter,
- u64 new_i_size,
- s64 i_sectors_delta)
-{
- /*
- * Crazy performance optimization:
- * Every extent update needs to also update the inode: the inode trigger
- * will set bi->journal_seq to the journal sequence number of this
- * transaction - for fsync.
- *
- * But if that's the only reason we're updating the inode (we're not
- * updating bi_size or bi_sectors), then we don't need the inode update
- * to be journalled - if we crash, the bi_journal_seq update will be
- * lost, but that's fine.
- */
- unsigned inode_update_flags = BTREE_UPDATE_nojournal;
-
- struct btree_iter iter;
- struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0,
- extent_iter->pos.inode,
- extent_iter->snapshot),
- BTREE_ITER_cached);
- int ret = bkey_err(k);
- if (unlikely(ret))
- return ret;
-
- /*
- * varint_decode_fast(), in the inode .invalid method, reads up to 7
- * bytes past the end of the buffer:
- */
- struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8);
- ret = PTR_ERR_OR_ZERO(k_mut);
- if (unlikely(ret))
- goto err;
-
- bkey_reassemble(k_mut, k);
-
- if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) {
- k_mut = bch2_inode_to_v3(trans, k_mut);
- ret = PTR_ERR_OR_ZERO(k_mut);
- if (unlikely(ret))
- goto err;
- }
-
- struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut);
-
- if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
- new_i_size > le64_to_cpu(inode->v.bi_size)) {
- inode->v.bi_size = cpu_to_le64(new_i_size);
- inode_update_flags = 0;
- }
-
- if (i_sectors_delta) {
- le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
- inode_update_flags = 0;
- }
-
- if (inode->k.p.snapshot != iter.snapshot) {
- inode->k.p.snapshot = iter.snapshot;
- inode_update_flags = 0;
- }
-
- ret = bch2_trans_update(trans, &iter, &inode->k_i,
- BTREE_UPDATE_internal_snapshot_node|
- inode_update_flags);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_extent_update(struct btree_trans *trans,
- subvol_inum inum,
- struct btree_iter *iter,
- struct bkey_i *k,
- struct disk_reservation *disk_res,
- u64 new_i_size,
- s64 *i_sectors_delta_total,
- bool check_enospc)
-{
- struct bpos next_pos;
- bool usage_increasing;
- s64 i_sectors_delta = 0, disk_sectors_delta = 0;
- int ret;
-
- /*
- * This traverses us the iterator without changing iter->path->pos to
- * search_key() (which is pos + 1 for extents): we want there to be a
- * path already traversed at iter->pos because
- * bch2_trans_extent_update() will use it to attempt extent merging
- */
- ret = __bch2_btree_iter_traverse(iter);
- if (ret)
- return ret;
-
- ret = bch2_extent_trim_atomic(trans, iter, k);
- if (ret)
- return ret;
-
- next_pos = k->k.p;
-
- ret = bch2_sum_sector_overwrites(trans, iter, k,
- &usage_increasing,
- &i_sectors_delta,
- &disk_sectors_delta);
- if (ret)
- return ret;
-
- if (disk_res &&
- disk_sectors_delta > (s64) disk_res->sectors) {
- ret = bch2_disk_reservation_add(trans->c, disk_res,
- disk_sectors_delta - disk_res->sectors,
- !check_enospc || !usage_increasing
- ? BCH_DISK_RESERVATION_NOFAIL : 0);
- if (ret)
- return ret;
- }
-
- /*
- * Note:
- * We always have to do an inode update - even when i_size/i_sectors
- * aren't changing - for fsync to work properly; fsync relies on
- * inode->bi_journal_seq which is updated by the trigger code:
- */
- ret = bch2_extent_update_i_size_sectors(trans, iter,
- min(k->k.p.offset << 9, new_i_size),
- i_sectors_delta) ?:
- bch2_trans_update(trans, iter, k, 0) ?:
- bch2_trans_commit(trans, disk_res, NULL,
- BCH_TRANS_COMMIT_no_check_rw|
- BCH_TRANS_COMMIT_no_enospc);
- if (unlikely(ret))
- return ret;
-
- if (i_sectors_delta_total)
- *i_sectors_delta_total += i_sectors_delta;
- bch2_btree_iter_set_pos(iter, next_pos);
- return 0;
-}
-
-static int bch2_write_index_default(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct bkey_buf sk;
- struct keylist *keys = &op->insert_keys;
- struct bkey_i *k = bch2_keylist_front(keys);
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- subvol_inum inum = {
- .subvol = op->subvol,
- .inum = k->k.p.inode,
- };
- int ret;
-
- BUG_ON(!inum.subvol);
-
- bch2_bkey_buf_init(&sk);
-
- do {
- bch2_trans_begin(trans);
-
- k = bch2_keylist_front(keys);
- bch2_bkey_buf_copy(&sk, c, k);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
- &sk.k->k.p.snapshot);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- break;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- bkey_start_pos(&sk.k->k),
- BTREE_ITER_slots|BTREE_ITER_intent);
-
- ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
- bch2_extent_update(trans, inum, &iter, sk.k,
- &op->res,
- op->new_i_size, &op->i_sectors_delta,
- op->flags & BCH_WRITE_CHECK_ENOSPC);
- bch2_trans_iter_exit(trans, &iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- break;
-
- if (bkey_ge(iter.pos, k->k.p))
- bch2_keylist_pop_front(&op->insert_keys);
- else
- bch2_cut_front(iter.pos, k);
- } while (!bch2_keylist_empty(keys));
-
- bch2_trans_put(trans);
- bch2_bkey_buf_exit(&sk, c);
-
- return ret;
-}
-
-/* Writes */
-
-void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
- enum bch_data_type type,
- const struct bkey_i *k,
- bool nocow)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
- struct bch_write_bio *n;
-
- BUG_ON(c->opts.nochanges);
-
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = nocow
- ? bch2_dev_have_ref(c, ptr->dev)
- : bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
-
- if (to_entry(ptr + 1) < ptrs.end) {
- n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
-
- n->bio.bi_end_io = wbio->bio.bi_end_io;
- n->bio.bi_private = wbio->bio.bi_private;
- n->parent = wbio;
- n->split = true;
- n->bounce = false;
- n->put_bio = true;
- n->bio.bi_opf = wbio->bio.bi_opf;
- bio_inc_remaining(&wbio->bio);
- } else {
- n = wbio;
- n->split = false;
- }
-
- n->c = c;
- n->dev = ptr->dev;
- n->have_ioref = ca != NULL;
- n->nocow = nocow;
- n->submit_time = local_clock();
- n->inode_offset = bkey_start_offset(&k->k);
- if (nocow)
- n->nocow_bucket = PTR_BUCKET_NR(ca, ptr);
- n->bio.bi_iter.bi_sector = ptr->offset;
-
- if (likely(n->have_ioref)) {
- this_cpu_add(ca->io_done->sectors[WRITE][type],
- bio_sectors(&n->bio));
-
- bio_set_dev(&n->bio, ca->disk_sb.bdev);
-
- if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
- bio_endio(&n->bio);
- continue;
- }
-
- submit_bio(&n->bio);
- } else {
- n->bio.bi_status = BLK_STS_REMOVED;
- bio_endio(&n->bio);
- }
- }
-}
-
-static void __bch2_write(struct bch_write_op *);
-
-static void bch2_write_done(struct closure *cl)
-{
- struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
- struct bch_fs *c = op->c;
-
- EBUG_ON(op->open_buckets.nr);
-
- bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
- bch2_disk_reservation_put(c, &op->res);
-
- if (!(op->flags & BCH_WRITE_MOVE))
- bch2_write_ref_put(c, BCH_WRITE_REF_write);
- bch2_keylist_free(&op->insert_keys, op->inline_keys);
-
- EBUG_ON(cl->parent);
- closure_debug_destroy(cl);
- if (op->end_io)
- op->end_io(op);
-}
-
-static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
-{
- struct keylist *keys = &op->insert_keys;
- struct bkey_i *src, *dst = keys->keys, *n;
-
- for (src = keys->keys; src != keys->top; src = n) {
- n = bkey_next(src);
-
- if (bkey_extent_is_direct_data(&src->k)) {
- bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
- test_bit(ptr->dev, op->failed.d));
-
- if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
- return -EIO;
- }
-
- if (dst != src)
- memmove_u64s_down(dst, src, src->k.u64s);
- dst = bkey_next(dst);
- }
-
- keys->top = dst;
- return 0;
-}
-
-/**
- * __bch2_write_index - after a write, update index to point to new data
- * @op: bch_write_op to process
- */
-static void __bch2_write_index(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct keylist *keys = &op->insert_keys;
- unsigned dev;
- int ret = 0;
-
- if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
- ret = bch2_write_drop_io_error_ptrs(op);
- if (ret)
- goto err;
- }
-
- if (!bch2_keylist_empty(keys)) {
- u64 sectors_start = keylist_sectors(keys);
-
- ret = !(op->flags & BCH_WRITE_MOVE)
- ? bch2_write_index_default(op)
- : bch2_data_update_index_update(op);
-
- BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
- BUG_ON(keylist_sectors(keys) && !ret);
-
- op->written += sectors_start - keylist_sectors(keys);
-
- if (ret && !bch2_err_matches(ret, EROFS)) {
- struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
-
- bch_err_inum_offset_ratelimited(c,
- insert->k.p.inode, insert->k.p.offset << 9,
- "%s write error while doing btree update: %s",
- op->flags & BCH_WRITE_MOVE ? "move" : "user",
- bch2_err_str(ret));
- }
-
- if (ret)
- goto err;
- }
-out:
- /* If some a bucket wasn't written, we can't erasure code it: */
- for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
- bch2_open_bucket_write_error(c, &op->open_buckets, dev);
-
- bch2_open_buckets_put(c, &op->open_buckets);
- return;
-err:
- keys->top = keys->keys;
- op->error = ret;
- op->flags |= BCH_WRITE_SUBMITTED;
- goto out;
-}
-
-static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
-{
- if (state != wp->state) {
- u64 now = ktime_get_ns();
-
- if (wp->last_state_change &&
- time_after64(now, wp->last_state_change))
- wp->time[wp->state] += now - wp->last_state_change;
- wp->state = state;
- wp->last_state_change = now;
- }
-}
-
-static inline void wp_update_state(struct write_point *wp, bool running)
-{
- enum write_point_state state;
-
- state = running ? WRITE_POINT_running :
- !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
- : WRITE_POINT_stopped;
-
- __wp_update_state(wp, state);
-}
-
-static CLOSURE_CALLBACK(bch2_write_index)
-{
- closure_type(op, struct bch_write_op, cl);
- struct write_point *wp = op->wp;
- struct workqueue_struct *wq = index_update_wq(op);
- unsigned long flags;
-
- if ((op->flags & BCH_WRITE_SUBMITTED) &&
- (op->flags & BCH_WRITE_MOVE))
- bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
-
- spin_lock_irqsave(&wp->writes_lock, flags);
- if (wp->state == WRITE_POINT_waiting_io)
- __wp_update_state(wp, WRITE_POINT_waiting_work);
- list_add_tail(&op->wp_list, &wp->writes);
- spin_unlock_irqrestore (&wp->writes_lock, flags);
-
- queue_work(wq, &wp->index_update_work);
-}
-
-static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
-{
- op->wp = wp;
-
- if (wp->state == WRITE_POINT_stopped) {
- spin_lock_irq(&wp->writes_lock);
- __wp_update_state(wp, WRITE_POINT_waiting_io);
- spin_unlock_irq(&wp->writes_lock);
- }
-}
-
-void bch2_write_point_do_index_updates(struct work_struct *work)
-{
- struct write_point *wp =
- container_of(work, struct write_point, index_update_work);
- struct bch_write_op *op;
-
- while (1) {
- spin_lock_irq(&wp->writes_lock);
- op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
- if (op)
- list_del(&op->wp_list);
- wp_update_state(wp, op != NULL);
- spin_unlock_irq(&wp->writes_lock);
-
- if (!op)
- break;
-
- op->flags |= BCH_WRITE_IN_WORKER;
-
- __bch2_write_index(op);
-
- if (!(op->flags & BCH_WRITE_SUBMITTED))
- __bch2_write(op);
- else
- bch2_write_done(&op->cl);
- }
-}
-
-static void bch2_write_endio(struct bio *bio)
-{
- struct closure *cl = bio->bi_private;
- struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
- struct bch_write_bio *wbio = to_wbio(bio);
- struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
- struct bch_fs *c = wbio->c;
- struct bch_dev *ca = wbio->have_ioref
- ? bch2_dev_have_ref(c, wbio->dev)
- : NULL;
-
- if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
- op->pos.inode,
- wbio->inode_offset << 9,
- "data write error: %s",
- bch2_blk_status_to_str(bio->bi_status))) {
- set_bit(wbio->dev, op->failed.d);
- op->flags |= BCH_WRITE_IO_ERROR;
- }
-
- if (wbio->nocow) {
- bch2_bucket_nocow_unlock(&c->nocow_locks,
- POS(ca->dev_idx, wbio->nocow_bucket),
- BUCKET_NOCOW_LOCK_UPDATE);
- set_bit(wbio->dev, op->devs_need_flush->d);
- }
-
- if (wbio->have_ioref) {
- bch2_latency_acct(ca, wbio->submit_time, WRITE);
- percpu_ref_put(&ca->io_ref);
- }
-
- if (wbio->bounce)
- bch2_bio_free_pages_pool(c, bio);
-
- if (wbio->put_bio)
- bio_put(bio);
-
- if (parent)
- bio_endio(&parent->bio);
- else
- closure_put(cl);
-}
-
-static void init_append_extent(struct bch_write_op *op,
- struct write_point *wp,
- struct bversion version,
- struct bch_extent_crc_unpacked crc)
-{
- struct bkey_i_extent *e;
-
- op->pos.offset += crc.uncompressed_size;
-
- e = bkey_extent_init(op->insert_keys.top);
- e->k.p = op->pos;
- e->k.size = crc.uncompressed_size;
- e->k.bversion = version;
-
- if (crc.csum_type ||
- crc.compression_type ||
- crc.nonce)
- bch2_extent_crc_append(&e->k_i, crc);
-
- bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
- op->flags & BCH_WRITE_CACHED);
-
- bch2_keylist_push(&op->insert_keys);
-}
-
-static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
- struct write_point *wp,
- struct bio *src,
- bool *page_alloc_failed,
- void *buf)
-{
- struct bch_write_bio *wbio;
- struct bio *bio;
- unsigned output_available =
- min(wp->sectors_free << 9, src->bi_iter.bi_size);
- unsigned pages = DIV_ROUND_UP(output_available +
- (buf
- ? ((unsigned long) buf & (PAGE_SIZE - 1))
- : 0), PAGE_SIZE);
-
- pages = min(pages, BIO_MAX_VECS);
-
- bio = bio_alloc_bioset(NULL, pages, 0,
- GFP_NOFS, &c->bio_write);
- wbio = wbio_init(bio);
- wbio->put_bio = true;
- /* copy WRITE_SYNC flag */
- wbio->bio.bi_opf = src->bi_opf;
-
- if (buf) {
- bch2_bio_map(bio, buf, output_available);
- return bio;
- }
-
- wbio->bounce = true;
-
- /*
- * We can't use mempool for more than c->sb.encoded_extent_max
- * worth of pages, but we'd like to allocate more if we can:
- */
- bch2_bio_alloc_pages_pool(c, bio,
- min_t(unsigned, output_available,
- c->opts.encoded_extent_max));
-
- if (bio->bi_iter.bi_size < output_available)
- *page_alloc_failed =
- bch2_bio_alloc_pages(bio,
- output_available -
- bio->bi_iter.bi_size,
- GFP_NOFS) != 0;
-
- return bio;
-}
-
-static int bch2_write_rechecksum(struct bch_fs *c,
- struct bch_write_op *op,
- unsigned new_csum_type)
-{
- struct bio *bio = &op->wbio.bio;
- struct bch_extent_crc_unpacked new_crc;
- int ret;
-
- /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
-
- if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
- bch2_csum_type_is_encryption(new_csum_type))
- new_csum_type = op->crc.csum_type;
-
- ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
- NULL, &new_crc,
- op->crc.offset, op->crc.live_size,
- new_csum_type);
- if (ret)
- return ret;
-
- bio_advance(bio, op->crc.offset << 9);
- bio->bi_iter.bi_size = op->crc.live_size << 9;
- op->crc = new_crc;
- return 0;
-}
-
-static int bch2_write_decrypt(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct nonce nonce = extent_nonce(op->version, op->crc);
- struct bch_csum csum;
- int ret;
-
- if (!bch2_csum_type_is_encryption(op->crc.csum_type))
- return 0;
-
- /*
- * If we need to decrypt data in the write path, we'll no longer be able
- * to verify the existing checksum (poly1305 mac, in this case) after
- * it's decrypted - this is the last point we'll be able to reverify the
- * checksum:
- */
- csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
- if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
- return -EIO;
-
- ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
- op->crc.csum_type = 0;
- op->crc.csum = (struct bch_csum) { 0, 0 };
- return ret;
-}
-
-static enum prep_encoded_ret {
- PREP_ENCODED_OK,
- PREP_ENCODED_ERR,
- PREP_ENCODED_CHECKSUM_ERR,
- PREP_ENCODED_DO_WRITE,
-} bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
-{
- struct bch_fs *c = op->c;
- struct bio *bio = &op->wbio.bio;
-
- if (!(op->flags & BCH_WRITE_DATA_ENCODED))
- return PREP_ENCODED_OK;
-
- BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
-
- /* Can we just write the entire extent as is? */
- if (op->crc.uncompressed_size == op->crc.live_size &&
- op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
- op->crc.compressed_size <= wp->sectors_free &&
- (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
- op->incompressible)) {
- if (!crc_is_compressed(op->crc) &&
- op->csum_type != op->crc.csum_type &&
- bch2_write_rechecksum(c, op, op->csum_type) &&
- !c->opts.no_data_io)
- return PREP_ENCODED_CHECKSUM_ERR;
-
- return PREP_ENCODED_DO_WRITE;
- }
-
- /*
- * If the data is compressed and we couldn't write the entire extent as
- * is, we have to decompress it:
- */
- if (crc_is_compressed(op->crc)) {
- struct bch_csum csum;
-
- if (bch2_write_decrypt(op))
- return PREP_ENCODED_CHECKSUM_ERR;
-
- /* Last point we can still verify checksum: */
- csum = bch2_checksum_bio(c, op->crc.csum_type,
- extent_nonce(op->version, op->crc),
- bio);
- if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
- return PREP_ENCODED_CHECKSUM_ERR;
-
- if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
- return PREP_ENCODED_ERR;
- }
-
- /*
- * No longer have compressed data after this point - data might be
- * encrypted:
- */
-
- /*
- * If the data is checksummed and we're only writing a subset,
- * rechecksum and adjust bio to point to currently live data:
- */
- if ((op->crc.live_size != op->crc.uncompressed_size ||
- op->crc.csum_type != op->csum_type) &&
- bch2_write_rechecksum(c, op, op->csum_type) &&
- !c->opts.no_data_io)
- return PREP_ENCODED_CHECKSUM_ERR;
-
- /*
- * If we want to compress the data, it has to be decrypted:
- */
- if ((op->compression_opt ||
- bch2_csum_type_is_encryption(op->crc.csum_type) !=
- bch2_csum_type_is_encryption(op->csum_type)) &&
- bch2_write_decrypt(op))
- return PREP_ENCODED_CHECKSUM_ERR;
-
- return PREP_ENCODED_OK;
-}
-
-static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
- struct bio **_dst)
-{
- struct bch_fs *c = op->c;
- struct bio *src = &op->wbio.bio, *dst = src;
- struct bvec_iter saved_iter;
- void *ec_buf;
- unsigned total_output = 0, total_input = 0;
- bool bounce = false;
- bool page_alloc_failed = false;
- int ret, more = 0;
-
- BUG_ON(!bio_sectors(src));
-
- ec_buf = bch2_writepoint_ec_buf(c, wp);
-
- switch (bch2_write_prep_encoded_data(op, wp)) {
- case PREP_ENCODED_OK:
- break;
- case PREP_ENCODED_ERR:
- ret = -EIO;
- goto err;
- case PREP_ENCODED_CHECKSUM_ERR:
- goto csum_err;
- case PREP_ENCODED_DO_WRITE:
- /* XXX look for bug here */
- if (ec_buf) {
- dst = bch2_write_bio_alloc(c, wp, src,
- &page_alloc_failed,
- ec_buf);
- bio_copy_data(dst, src);
- bounce = true;
- }
- init_append_extent(op, wp, op->version, op->crc);
- goto do_write;
- }
-
- if (ec_buf ||
- op->compression_opt ||
- (op->csum_type &&
- !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
- (bch2_csum_type_is_encryption(op->csum_type) &&
- !(op->flags & BCH_WRITE_PAGES_OWNED))) {
- dst = bch2_write_bio_alloc(c, wp, src,
- &page_alloc_failed,
- ec_buf);
- bounce = true;
- }
-
- saved_iter = dst->bi_iter;
-
- do {
- struct bch_extent_crc_unpacked crc = { 0 };
- struct bversion version = op->version;
- size_t dst_len = 0, src_len = 0;
-
- if (page_alloc_failed &&
- dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
- dst->bi_iter.bi_size < c->opts.encoded_extent_max)
- break;
-
- BUG_ON(op->compression_opt &&
- (op->flags & BCH_WRITE_DATA_ENCODED) &&
- bch2_csum_type_is_encryption(op->crc.csum_type));
- BUG_ON(op->compression_opt && !bounce);
-
- crc.compression_type = op->incompressible
- ? BCH_COMPRESSION_TYPE_incompressible
- : op->compression_opt
- ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
- op->compression_opt)
- : 0;
- if (!crc_is_compressed(crc)) {
- dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
- dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
-
- if (op->csum_type)
- dst_len = min_t(unsigned, dst_len,
- c->opts.encoded_extent_max);
-
- if (bounce) {
- swap(dst->bi_iter.bi_size, dst_len);
- bio_copy_data(dst, src);
- swap(dst->bi_iter.bi_size, dst_len);
- }
-
- src_len = dst_len;
- }
-
- BUG_ON(!src_len || !dst_len);
-
- if (bch2_csum_type_is_encryption(op->csum_type)) {
- if (bversion_zero(version)) {
- version.lo = atomic64_inc_return(&c->key_version);
- } else {
- crc.nonce = op->nonce;
- op->nonce += src_len >> 9;
- }
- }
-
- if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
- !crc_is_compressed(crc) &&
- bch2_csum_type_is_encryption(op->crc.csum_type) ==
- bch2_csum_type_is_encryption(op->csum_type)) {
- u8 compression_type = crc.compression_type;
- u16 nonce = crc.nonce;
- /*
- * Note: when we're using rechecksum(), we need to be
- * checksumming @src because it has all the data our
- * existing checksum covers - if we bounced (because we
- * were trying to compress), @dst will only have the
- * part of the data the new checksum will cover.
- *
- * But normally we want to be checksumming post bounce,
- * because part of the reason for bouncing is so the
- * data can't be modified (by userspace) while it's in
- * flight.
- */
- if (bch2_rechecksum_bio(c, src, version, op->crc,
- &crc, &op->crc,
- src_len >> 9,
- bio_sectors(src) - (src_len >> 9),
- op->csum_type))
- goto csum_err;
- /*
- * rchecksum_bio sets compression_type on crc from op->crc,
- * this isn't always correct as sometimes we're changing
- * an extent from uncompressed to incompressible.
- */
- crc.compression_type = compression_type;
- crc.nonce = nonce;
- } else {
- if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
- bch2_rechecksum_bio(c, src, version, op->crc,
- NULL, &op->crc,
- src_len >> 9,
- bio_sectors(src) - (src_len >> 9),
- op->crc.csum_type))
- goto csum_err;
-
- crc.compressed_size = dst_len >> 9;
- crc.uncompressed_size = src_len >> 9;
- crc.live_size = src_len >> 9;
-
- swap(dst->bi_iter.bi_size, dst_len);
- ret = bch2_encrypt_bio(c, op->csum_type,
- extent_nonce(version, crc), dst);
- if (ret)
- goto err;
-
- crc.csum = bch2_checksum_bio(c, op->csum_type,
- extent_nonce(version, crc), dst);
- crc.csum_type = op->csum_type;
- swap(dst->bi_iter.bi_size, dst_len);
- }
-
- init_append_extent(op, wp, version, crc);
-
- if (dst != src)
- bio_advance(dst, dst_len);
- bio_advance(src, src_len);
- total_output += dst_len;
- total_input += src_len;
- } while (dst->bi_iter.bi_size &&
- src->bi_iter.bi_size &&
- wp->sectors_free &&
- !bch2_keylist_realloc(&op->insert_keys,
- op->inline_keys,
- ARRAY_SIZE(op->inline_keys),
- BKEY_EXTENT_U64s_MAX));
-
- more = src->bi_iter.bi_size != 0;
-
- dst->bi_iter = saved_iter;
-
- if (dst == src && more) {
- BUG_ON(total_output != total_input);
-
- dst = bio_split(src, total_input >> 9,
- GFP_NOFS, &c->bio_write);
- wbio_init(dst)->put_bio = true;
- /* copy WRITE_SYNC flag */
- dst->bi_opf = src->bi_opf;
- }
-
- dst->bi_iter.bi_size = total_output;
-do_write:
- *_dst = dst;
- return more;
-csum_err:
- bch_err_inum_offset_ratelimited(c,
- op->pos.inode,
- op->pos.offset << 9,
- "%s write error: error verifying existing checksum while rewriting existing data (memory corruption?)",
- op->flags & BCH_WRITE_MOVE ? "move" : "user");
- ret = -EIO;
-err:
- if (to_wbio(dst)->bounce)
- bch2_bio_free_pages_pool(c, dst);
- if (to_wbio(dst)->put_bio)
- bio_put(dst);
-
- return ret;
-}
-
-static bool bch2_extent_is_writeable(struct bch_write_op *op,
- struct bkey_s_c k)
-{
- struct bch_fs *c = op->c;
- struct bkey_s_c_extent e;
- struct extent_ptr_decoded p;
- const union bch_extent_entry *entry;
- unsigned replicas = 0;
-
- if (k.k->type != KEY_TYPE_extent)
- return false;
-
- e = bkey_s_c_to_extent(k);
-
- rcu_read_lock();
- extent_for_each_ptr_decode(e, p, entry) {
- if (crc_is_encoded(p.crc) || p.has_ec) {
- rcu_read_unlock();
- return false;
- }
-
- replicas += bch2_extent_ptr_durability(c, &p);
- }
- rcu_read_unlock();
-
- return replicas >= op->opts.data_replicas;
-}
-
-static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i *orig,
- struct bkey_s_c k,
- u64 new_i_size)
-{
- if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
- /* trace this */
- return 0;
- }
-
- struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
- int ret = PTR_ERR_OR_ZERO(new);
- if (ret)
- return ret;
-
- bch2_cut_front(bkey_start_pos(&orig->k), new);
- bch2_cut_back(orig->k.p, new);
-
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
- bkey_for_each_ptr(ptrs, ptr)
- ptr->unwritten = 0;
-
- /*
- * Note that we're not calling bch2_subvol_get_snapshot() in this path -
- * that was done when we kicked off the write, and here it's important
- * that we update the extent that we wrote to - even if a snapshot has
- * since been created. The write is still outstanding, so we're ok
- * w.r.t. snapshot atomicity:
- */
- return bch2_extent_update_i_size_sectors(trans, iter,
- min(new->k.p.offset << 9, new_i_size), 0) ?:
- bch2_trans_update(trans, iter, new,
- BTREE_UPDATE_internal_snapshot_node);
-}
-
-static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct btree_trans *trans = bch2_trans_get(c);
-
- for_each_keylist_key(&op->insert_keys, orig) {
- int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
- bkey_start_pos(&orig->k), orig->k.p,
- BTREE_ITER_intent, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
- bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
- }));
-
- if (ret && !bch2_err_matches(ret, EROFS)) {
- struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
-
- bch_err_inum_offset_ratelimited(c,
- insert->k.p.inode, insert->k.p.offset << 9,
- "%s write error while doing btree update: %s",
- op->flags & BCH_WRITE_MOVE ? "move" : "user",
- bch2_err_str(ret));
- }
-
- if (ret) {
- op->error = ret;
- break;
- }
- }
-
- bch2_trans_put(trans);
-}
-
-static void __bch2_nocow_write_done(struct bch_write_op *op)
-{
- if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
- op->error = -EIO;
- } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
- bch2_nocow_write_convert_unwritten(op);
-}
-
-static CLOSURE_CALLBACK(bch2_nocow_write_done)
-{
- closure_type(op, struct bch_write_op, cl);
-
- __bch2_nocow_write_done(op);
- bch2_write_done(cl);
-}
-
-struct bucket_to_lock {
- struct bpos b;
- unsigned gen;
- struct nocow_lock_bucket *l;
-};
-
-static void bch2_nocow_write(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
- u32 snapshot;
- struct bucket_to_lock *stale_at;
- int stale, ret;
-
- if (op->flags & BCH_WRITE_MOVE)
- return;
-
- darray_init(&buckets);
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
- if (unlikely(ret))
- goto err;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(op->pos.inode, op->pos.offset, snapshot),
- BTREE_ITER_slots);
- while (1) {
- struct bio *bio = &op->wbio.bio;
-
- buckets.nr = 0;
-
- ret = bch2_trans_relock(trans);
- if (ret)
- break;
-
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- break;
-
- /* fall back to normal cow write path? */
- if (unlikely(k.k->p.snapshot != snapshot ||
- !bch2_extent_is_writeable(op, k)))
- break;
-
- if (bch2_keylist_realloc(&op->insert_keys,
- op->inline_keys,
- ARRAY_SIZE(op->inline_keys),
- k.k->u64s))
- break;
-
- /* Get iorefs before dropping btree locks: */
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
- if (unlikely(!ca))
- goto err_get_ioref;
-
- struct bpos b = PTR_BUCKET_POS(ca, ptr);
- struct nocow_lock_bucket *l =
- bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
- prefetch(l);
-
- /* XXX allocating memory with btree locks held - rare */
- darray_push_gfp(&buckets, ((struct bucket_to_lock) {
- .b = b, .gen = ptr->gen, .l = l,
- }), GFP_KERNEL|__GFP_NOFAIL);
-
- if (ptr->unwritten)
- op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
- }
-
- /* Unlock before taking nocow locks, doing IO: */
- bkey_reassemble(op->insert_keys.top, k);
- bch2_trans_unlock(trans);
-
- bch2_cut_front(op->pos, op->insert_keys.top);
- if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
- bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
-
- darray_for_each(buckets, i) {
- struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode);
-
- __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
- bucket_to_u64(i->b),
- BUCKET_NOCOW_LOCK_UPDATE);
-
- int gen = bucket_gen_get(ca, i->b.offset);
- stale = gen < 0 ? gen : gen_after(gen, i->gen);
- if (unlikely(stale)) {
- stale_at = i;
- goto err_bucket_stale;
- }
- }
-
- bio = &op->wbio.bio;
- if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
- bio = bio_split(bio, k.k->p.offset - op->pos.offset,
- GFP_KERNEL, &c->bio_write);
- wbio_init(bio)->put_bio = true;
- bio->bi_opf = op->wbio.bio.bi_opf;
- } else {
- op->flags |= BCH_WRITE_SUBMITTED;
- }
-
- op->pos.offset += bio_sectors(bio);
- op->written += bio_sectors(bio);
-
- bio->bi_end_io = bch2_write_endio;
- bio->bi_private = &op->cl;
- bio->bi_opf |= REQ_OP_WRITE;
- closure_get(&op->cl);
- bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
- op->insert_keys.top, true);
-
- bch2_keylist_push(&op->insert_keys);
- if (op->flags & BCH_WRITE_SUBMITTED)
- break;
- bch2_btree_iter_advance(&iter);
- }
-out:
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- if (ret) {
- bch_err_inum_offset_ratelimited(c,
- op->pos.inode, op->pos.offset << 9,
- "%s: btree lookup error %s", __func__, bch2_err_str(ret));
- op->error = ret;
- op->flags |= BCH_WRITE_SUBMITTED;
- }
-
- bch2_trans_put(trans);
- darray_exit(&buckets);
-
- /* fallback to cow write path? */
- if (!(op->flags & BCH_WRITE_SUBMITTED)) {
- closure_sync(&op->cl);
- __bch2_nocow_write_done(op);
- op->insert_keys.top = op->insert_keys.keys;
- } else if (op->flags & BCH_WRITE_SYNC) {
- closure_sync(&op->cl);
- bch2_nocow_write_done(&op->cl.work);
- } else {
- /*
- * XXX
- * needs to run out of process context because ei_quota_lock is
- * a mutex
- */
- continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
- }
- return;
-err_get_ioref:
- darray_for_each(buckets, i)
- percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref);
-
- /* Fall back to COW path: */
- goto out;
-err_bucket_stale:
- darray_for_each(buckets, i) {
- bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
- if (i == stale_at)
- break;
- }
-
- struct printbuf buf = PRINTBUF;
- if (bch2_fs_inconsistent_on(stale < 0, c,
- "pointer to invalid bucket in nocow path on device %llu\n %s",
- stale_at->b.inode,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
- } else {
- /* We can retry this: */
- ret = -BCH_ERR_transaction_restart;
- }
- printbuf_exit(&buf);
-
- goto err_get_ioref;
-}
-
-static void __bch2_write(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct write_point *wp = NULL;
- struct bio *bio = NULL;
- unsigned nofs_flags;
- int ret;
-
- nofs_flags = memalloc_nofs_save();
-
- if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
- bch2_nocow_write(op);
- if (op->flags & BCH_WRITE_SUBMITTED)
- goto out_nofs_restore;
- }
-again:
- memset(&op->failed, 0, sizeof(op->failed));
-
- do {
- struct bkey_i *key_to_write;
- unsigned key_to_write_offset = op->insert_keys.top_p -
- op->insert_keys.keys_p;
-
- /* +1 for possible cache device: */
- if (op->open_buckets.nr + op->nr_replicas + 1 >
- ARRAY_SIZE(op->open_buckets.v))
- break;
-
- if (bch2_keylist_realloc(&op->insert_keys,
- op->inline_keys,
- ARRAY_SIZE(op->inline_keys),
- BKEY_EXTENT_U64s_MAX))
- break;
-
- /*
- * The copygc thread is now global, which means it's no longer
- * freeing up space on specific disks, which means that
- * allocations for specific disks may hang arbitrarily long:
- */
- ret = bch2_trans_run(c, lockrestart_do(trans,
- bch2_alloc_sectors_start_trans(trans,
- op->target,
- op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
- op->write_point,
- &op->devs_have,
- op->nr_replicas,
- op->nr_replicas_required,
- op->watermark,
- op->flags,
- &op->cl, &wp)));
- if (unlikely(ret)) {
- if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
- break;
-
- goto err;
- }
-
- EBUG_ON(!wp);
-
- bch2_open_bucket_get(c, wp, &op->open_buckets);
- ret = bch2_write_extent(op, wp, &bio);
-
- bch2_alloc_sectors_done_inlined(c, wp);
-err:
- if (ret <= 0) {
- op->flags |= BCH_WRITE_SUBMITTED;
-
- if (ret < 0) {
- if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT))
- bch_err_inum_offset_ratelimited(c,
- op->pos.inode,
- op->pos.offset << 9,
- "%s(): %s error: %s", __func__,
- op->flags & BCH_WRITE_MOVE ? "move" : "user",
- bch2_err_str(ret));
- op->error = ret;
- break;
- }
- }
-
- bio->bi_end_io = bch2_write_endio;
- bio->bi_private = &op->cl;
- bio->bi_opf |= REQ_OP_WRITE;
-
- closure_get(bio->bi_private);
-
- key_to_write = (void *) (op->insert_keys.keys_p +
- key_to_write_offset);
-
- bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
- key_to_write, false);
- } while (ret);
-
- /*
- * Sync or no?
- *
- * If we're running asynchronously, wne may still want to block
- * synchronously here if we weren't able to submit all of the IO at
- * once, as that signals backpressure to the caller.
- */
- if ((op->flags & BCH_WRITE_SYNC) ||
- (!(op->flags & BCH_WRITE_SUBMITTED) &&
- !(op->flags & BCH_WRITE_IN_WORKER))) {
- bch2_wait_on_allocator(c, &op->cl);
-
- __bch2_write_index(op);
-
- if (!(op->flags & BCH_WRITE_SUBMITTED))
- goto again;
- bch2_write_done(&op->cl);
- } else {
- bch2_write_queue(op, wp);
- continue_at(&op->cl, bch2_write_index, NULL);
- }
-out_nofs_restore:
- memalloc_nofs_restore(nofs_flags);
-}
-
-static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
-{
- struct bio *bio = &op->wbio.bio;
- struct bvec_iter iter;
- struct bkey_i_inline_data *id;
- unsigned sectors;
- int ret;
-
- memset(&op->failed, 0, sizeof(op->failed));
-
- op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
- op->flags |= BCH_WRITE_SUBMITTED;
-
- bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
-
- ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
- ARRAY_SIZE(op->inline_keys),
- BKEY_U64s + DIV_ROUND_UP(data_len, 8));
- if (ret) {
- op->error = ret;
- goto err;
- }
-
- sectors = bio_sectors(bio);
- op->pos.offset += sectors;
-
- id = bkey_inline_data_init(op->insert_keys.top);
- id->k.p = op->pos;
- id->k.bversion = op->version;
- id->k.size = sectors;
-
- iter = bio->bi_iter;
- iter.bi_size = data_len;
- memcpy_from_bio(id->v.data, bio, iter);
-
- while (data_len & 7)
- id->v.data[data_len++] = '\0';
- set_bkey_val_bytes(&id->k, data_len);
- bch2_keylist_push(&op->insert_keys);
-
- __bch2_write_index(op);
-err:
- bch2_write_done(&op->cl);
-}
-
-/**
- * bch2_write() - handle a write to a cache device or flash only volume
- * @cl: &bch_write_op->cl
- *
- * This is the starting point for any data to end up in a cache device; it could
- * be from a normal write, or a writeback write, or a write to a flash only
- * volume - it's also used by the moving garbage collector to compact data in
- * mostly empty buckets.
- *
- * It first writes the data to the cache, creating a list of keys to be inserted
- * (if the data won't fit in a single open bucket, there will be multiple keys);
- * after the data is written it calls bch_journal, and after the keys have been
- * added to the next journal write they're inserted into the btree.
- *
- * If op->discard is true, instead of inserting the data it invalidates the
- * region of the cache represented by op->bio and op->inode.
- */
-CLOSURE_CALLBACK(bch2_write)
-{
- closure_type(op, struct bch_write_op, cl);
- struct bio *bio = &op->wbio.bio;
- struct bch_fs *c = op->c;
- unsigned data_len;
-
- EBUG_ON(op->cl.parent);
- BUG_ON(!op->nr_replicas);
- BUG_ON(!op->write_point.v);
- BUG_ON(bkey_eq(op->pos, POS_MAX));
-
- if (op->flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
- op->flags |= BCH_WRITE_ALLOC_NOWAIT;
-
- op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
- op->start_time = local_clock();
- bch2_keylist_init(&op->insert_keys, op->inline_keys);
- wbio_init(bio)->put_bio = false;
-
- if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
- bch_err_inum_offset_ratelimited(c,
- op->pos.inode,
- op->pos.offset << 9,
- "%s write error: misaligned write",
- op->flags & BCH_WRITE_MOVE ? "move" : "user");
- op->error = -EIO;
- goto err;
- }
-
- if (c->opts.nochanges) {
- op->error = -BCH_ERR_erofs_no_writes;
- goto err;
- }
-
- if (!(op->flags & BCH_WRITE_MOVE) &&
- !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
- op->error = -BCH_ERR_erofs_no_writes;
- goto err;
- }
-
- this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
- bch2_increment_clock(c, bio_sectors(bio), WRITE);
-
- data_len = min_t(u64, bio->bi_iter.bi_size,
- op->new_i_size - (op->pos.offset << 9));
-
- if (c->opts.inline_data &&
- data_len <= min(block_bytes(c) / 2, 1024U)) {
- bch2_write_data_inline(op, data_len);
- return;
- }
-
- __bch2_write(op);
- return;
-err:
- bch2_disk_reservation_put(c, &op->res);
-
- closure_debug_destroy(&op->cl);
- if (op->end_io)
- op->end_io(op);
-}
-
-static const char * const bch2_write_flags[] = {
-#define x(f) #f,
- BCH_WRITE_FLAGS()
-#undef x
- NULL
-};
-
-void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
-{
- prt_str(out, "pos: ");
- bch2_bpos_to_text(out, op->pos);
- prt_newline(out);
- printbuf_indent_add(out, 2);
-
- prt_str(out, "started: ");
- bch2_pr_time_units(out, local_clock() - op->start_time);
- prt_newline(out);
-
- prt_str(out, "flags: ");
- prt_bitflags(out, bch2_write_flags, op->flags);
- prt_newline(out);
-
- prt_printf(out, "ref: %u\n", closure_nr_remaining(&op->cl));
-
- printbuf_indent_sub(out, 2);
-}
-
-void bch2_fs_io_write_exit(struct bch_fs *c)
-{
- mempool_exit(&c->bio_bounce_pages);
- bioset_exit(&c->replica_set);
- bioset_exit(&c->bio_write);
-}
-
-int bch2_fs_io_write_init(struct bch_fs *c)
-{
- if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
- bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
- return -BCH_ERR_ENOMEM_bio_write_init;
-
- if (mempool_init_page_pool(&c->bio_bounce_pages,
- max_t(unsigned,
- c->opts.btree_node_size,
- c->opts.encoded_extent_max) /
- PAGE_SIZE, 0))
- return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
-
- return 0;
-}
diff --git a/fs/bcachefs/io_write.h b/fs/bcachefs/io_write.h
deleted file mode 100644
index 5400ce94ee57..000000000000
--- a/fs/bcachefs/io_write.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_IO_WRITE_H
-#define _BCACHEFS_IO_WRITE_H
-
-#include "checksum.h"
-#include "io_write_types.h"
-
-#define to_wbio(_bio) \
- container_of((_bio), struct bch_write_bio, bio)
-
-void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
-void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
-
-#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
-void bch2_latency_acct(struct bch_dev *, u64, int);
-#else
-static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) {}
-#endif
-
-void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
- enum bch_data_type, const struct bkey_i *, bool);
-
-#define BCH_WRITE_FLAGS() \
- x(ALLOC_NOWAIT) \
- x(CACHED) \
- x(DATA_ENCODED) \
- x(PAGES_STABLE) \
- x(PAGES_OWNED) \
- x(ONLY_SPECIFIED_DEVS) \
- x(WROTE_DATA_INLINE) \
- x(FROM_INTERNAL) \
- x(CHECK_ENOSPC) \
- x(SYNC) \
- x(MOVE) \
- x(IN_WORKER) \
- x(SUBMITTED) \
- x(IO_ERROR) \
- x(CONVERT_UNWRITTEN)
-
-enum __bch_write_flags {
-#define x(f) __BCH_WRITE_##f,
- BCH_WRITE_FLAGS()
-#undef x
-};
-
-enum bch_write_flags {
-#define x(f) BCH_WRITE_##f = BIT(__BCH_WRITE_##f),
- BCH_WRITE_FLAGS()
-#undef x
-};
-
-static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
-{
- return op->watermark == BCH_WATERMARK_copygc
- ? op->c->copygc_wq
- : op->c->btree_update_wq;
-}
-
-int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
- struct bkey_i *, bool *, s64 *, s64 *);
-int bch2_extent_update(struct btree_trans *, subvol_inum,
- struct btree_iter *, struct bkey_i *,
- struct disk_reservation *, u64, s64 *, bool);
-
-static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
- struct bch_io_opts opts)
-{
- op->c = c;
- op->end_io = NULL;
- op->flags = 0;
- op->written = 0;
- op->error = 0;
- op->csum_type = bch2_data_checksum_type(c, opts);
- op->compression_opt = opts.compression;
- op->nr_replicas = 0;
- op->nr_replicas_required = c->opts.data_replicas_required;
- op->watermark = BCH_WATERMARK_normal;
- op->incompressible = 0;
- op->open_buckets.nr = 0;
- op->devs_have.nr = 0;
- op->target = 0;
- op->opts = opts;
- op->subvol = 0;
- op->pos = POS_MAX;
- op->version = ZERO_VERSION;
- op->write_point = (struct write_point_specifier) { 0 };
- op->res = (struct disk_reservation) { 0 };
- op->new_i_size = U64_MAX;
- op->i_sectors_delta = 0;
- op->devs_need_flush = NULL;
-}
-
-CLOSURE_CALLBACK(bch2_write);
-void bch2_write_point_do_index_updates(struct work_struct *);
-
-static inline struct bch_write_bio *wbio_init(struct bio *bio)
-{
- struct bch_write_bio *wbio = to_wbio(bio);
-
- memset(&wbio->wbio, 0, sizeof(wbio->wbio));
- return wbio;
-}
-
-void bch2_write_op_to_text(struct printbuf *, struct bch_write_op *);
-
-void bch2_fs_io_write_exit(struct bch_fs *);
-int bch2_fs_io_write_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_IO_WRITE_H */
diff --git a/fs/bcachefs/io_write_types.h b/fs/bcachefs/io_write_types.h
deleted file mode 100644
index 6e878a6f2f0b..000000000000
--- a/fs/bcachefs/io_write_types.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_IO_WRITE_TYPES_H
-#define _BCACHEFS_IO_WRITE_TYPES_H
-
-#include "alloc_types.h"
-#include "btree_types.h"
-#include "buckets_types.h"
-#include "extents_types.h"
-#include "keylist_types.h"
-#include "opts.h"
-#include "super_types.h"
-
-#include <linux/llist.h>
-#include <linux/workqueue.h>
-
-struct bch_write_bio {
- struct_group(wbio,
- struct bch_fs *c;
- struct bch_write_bio *parent;
-
- u64 submit_time;
- u64 inode_offset;
- u64 nocow_bucket;
-
- struct bch_devs_list failed;
- u8 dev;
-
- unsigned split:1,
- bounce:1,
- put_bio:1,
- have_ioref:1,
- nocow:1,
- used_mempool:1,
- first_btree_write:1;
- );
-
- struct bio bio;
-};
-
-struct bch_write_op {
- struct closure cl;
- struct bch_fs *c;
- void (*end_io)(struct bch_write_op *);
- u64 start_time;
-
- unsigned written; /* sectors */
- u16 flags;
- s16 error; /* dio write path expects it to hold -ERESTARTSYS... */
-
- unsigned compression_opt:8;
- unsigned csum_type:4;
- unsigned nr_replicas:4;
- unsigned nr_replicas_required:4;
- unsigned watermark:3;
- unsigned incompressible:1;
- unsigned stripe_waited:1;
-
- struct bch_devs_list devs_have;
- u16 target;
- u16 nonce;
- struct bch_io_opts opts;
-
- u32 subvol;
- struct bpos pos;
- struct bversion version;
-
- /* For BCH_WRITE_DATA_ENCODED: */
- struct bch_extent_crc_unpacked crc;
-
- struct write_point_specifier write_point;
-
- struct write_point *wp;
- struct list_head wp_list;
-
- struct disk_reservation res;
-
- struct open_buckets open_buckets;
-
- u64 new_i_size;
- s64 i_sectors_delta;
-
- struct bch_devs_mask failed;
-
- struct keylist insert_keys;
- u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2];
-
- /*
- * Bitmask of devices that have had nocow writes issued to them since
- * last flush:
- */
- struct bch_devs_mask *devs_need_flush;
-
- /* Must be last: */
- struct bch_write_bio wbio;
-};
-
-#endif /* _BCACHEFS_IO_WRITE_TYPES_H */
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
deleted file mode 100644
index 2dc0d60c1745..000000000000
--- a/fs/bcachefs/journal.c
+++ /dev/null
@@ -1,1583 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bcachefs journalling code, for btree insertions
- *
- * Copyright 2012 Google, Inc.
- */
-
-#include "bcachefs.h"
-#include "alloc_foreground.h"
-#include "bkey_methods.h"
-#include "btree_gc.h"
-#include "btree_update.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "error.h"
-#include "journal.h"
-#include "journal_io.h"
-#include "journal_reclaim.h"
-#include "journal_sb.h"
-#include "journal_seq_blacklist.h"
-#include "trace.h"
-
-static const char * const bch2_journal_errors[] = {
-#define x(n) #n,
- JOURNAL_ERRORS()
-#undef x
- NULL
-};
-
-static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
-{
- return seq > j->seq_ondisk;
-}
-
-static bool __journal_entry_is_open(union journal_res_state state)
-{
- return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
-}
-
-static inline unsigned nr_unwritten_journal_entries(struct journal *j)
-{
- return atomic64_read(&j->seq) - j->seq_ondisk;
-}
-
-static bool journal_entry_is_open(struct journal *j)
-{
- return __journal_entry_is_open(j->reservations);
-}
-
-static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
-{
- union journal_res_state s = READ_ONCE(j->reservations);
- unsigned i = seq & JOURNAL_BUF_MASK;
- struct journal_buf *buf = j->buf + i;
-
- prt_printf(out, "seq:\t%llu\n", seq);
- printbuf_indent_add(out, 2);
-
- prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
-
- prt_printf(out, "size:\t");
- prt_human_readable_u64(out, vstruct_bytes(buf->data));
- prt_newline(out);
-
- prt_printf(out, "expires:\t");
- prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
-
- prt_printf(out, "flags:\t");
- if (buf->noflush)
- prt_str(out, "noflush ");
- if (buf->must_flush)
- prt_str(out, "must_flush ");
- if (buf->separate_flush)
- prt_str(out, "separate_flush ");
- if (buf->need_flush_to_write_buffer)
- prt_str(out, "need_flush_to_write_buffer ");
- if (buf->write_started)
- prt_str(out, "write_started ");
- if (buf->write_allocated)
- prt_str(out, "write_allocated ");
- if (buf->write_done)
- prt_str(out, "write_done");
- prt_newline(out);
-
- printbuf_indent_sub(out, 2);
-}
-
-static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
-{
- if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 24);
-
- for (u64 seq = journal_last_unwritten_seq(j);
- seq <= journal_cur_seq(j);
- seq++)
- bch2_journal_buf_to_text(out, j, seq);
- prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
-}
-
-static inline struct journal_buf *
-journal_seq_to_buf(struct journal *j, u64 seq)
-{
- struct journal_buf *buf = NULL;
-
- EBUG_ON(seq > journal_cur_seq(j));
-
- if (journal_seq_unwritten(j, seq)) {
- buf = j->buf + (seq & JOURNAL_BUF_MASK);
- EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
- }
- return buf;
-}
-
-static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(p->list); i++)
- INIT_LIST_HEAD(&p->list[i]);
- INIT_LIST_HEAD(&p->flushed);
- atomic_set(&p->count, count);
- p->devs.nr = 0;
-}
-
-/*
- * Detect stuck journal conditions and trigger shutdown. Technically the journal
- * can end up stuck for a variety of reasons, such as a blocked I/O, journal
- * reservation lockup, etc. Since this is a fatal error with potentially
- * unpredictable characteristics, we want to be fairly conservative before we
- * decide to shut things down.
- *
- * Consider the journal stuck when it appears full with no ability to commit
- * btree transactions, to discard journal buckets, nor acquire priority
- * (reserved watermark) reservation.
- */
-static inline bool
-journal_error_check_stuck(struct journal *j, int error, unsigned flags)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- bool stuck = false;
- struct printbuf buf = PRINTBUF;
-
- if (!(error == JOURNAL_ERR_journal_full ||
- error == JOURNAL_ERR_journal_pin_full) ||
- nr_unwritten_journal_entries(j) ||
- (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
- return stuck;
-
- spin_lock(&j->lock);
-
- if (j->can_discard) {
- spin_unlock(&j->lock);
- return stuck;
- }
-
- stuck = true;
-
- /*
- * The journal shutdown path will set ->err_seq, but do it here first to
- * serialize against concurrent failures and avoid duplicate error
- * reports.
- */
- if (j->err_seq) {
- spin_unlock(&j->lock);
- return stuck;
- }
- j->err_seq = journal_cur_seq(j);
- spin_unlock(&j->lock);
-
- bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
- bch2_journal_errors[error]);
- bch2_journal_debug_to_text(&buf, j);
- bch_err(c, "%s", buf.buf);
-
- printbuf_reset(&buf);
- bch2_journal_pins_to_text(&buf, j);
- bch_err(c, "Journal pins:\n%s", buf.buf);
- printbuf_exit(&buf);
-
- bch2_fatal_error(c);
- dump_stack();
-
- return stuck;
-}
-
-void bch2_journal_do_writes(struct journal *j)
-{
- for (u64 seq = journal_last_unwritten_seq(j);
- seq <= journal_cur_seq(j);
- seq++) {
- unsigned idx = seq & JOURNAL_BUF_MASK;
- struct journal_buf *w = j->buf + idx;
-
- if (w->write_started && !w->write_allocated)
- break;
- if (w->write_started)
- continue;
-
- if (!journal_state_count(j->reservations, idx)) {
- w->write_started = true;
- closure_call(&w->io, bch2_journal_write, j->wq, NULL);
- }
-
- break;
- }
-}
-
-/*
- * Final processing when the last reference of a journal buffer has been
- * dropped. Drop the pin list reference acquired at journal entry open and write
- * the buffer, if requested.
- */
-void bch2_journal_buf_put_final(struct journal *j, u64 seq)
-{
- lockdep_assert_held(&j->lock);
-
- if (__bch2_journal_pin_put(j, seq))
- bch2_journal_reclaim_fast(j);
- bch2_journal_do_writes(j);
-}
-
-/*
- * Returns true if journal entry is now closed:
- *
- * We don't close a journal_buf until the next journal_buf is finished writing,
- * and can be opened again - this also initializes the next journal_buf:
- */
-static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_buf *buf = journal_cur_buf(j);
- union journal_res_state old, new;
- unsigned sectors;
-
- BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
- closed_val != JOURNAL_ENTRY_ERROR_VAL);
-
- lockdep_assert_held(&j->lock);
-
- old.v = atomic64_read(&j->reservations.counter);
- do {
- new.v = old.v;
- new.cur_entry_offset = closed_val;
-
- if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
- old.cur_entry_offset == new.cur_entry_offset)
- return;
- } while (!atomic64_try_cmpxchg(&j->reservations.counter,
- &old.v, new.v));
-
- if (!__journal_entry_is_open(old))
- return;
-
- /* Close out old buffer: */
- buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
-
- if (trace_journal_entry_close_enabled() && trace) {
- struct printbuf pbuf = PRINTBUF;
- pbuf.atomic++;
-
- prt_str(&pbuf, "entry size: ");
- prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
- prt_newline(&pbuf);
- bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
- trace_journal_entry_close(c, pbuf.buf);
- printbuf_exit(&pbuf);
- }
-
- sectors = vstruct_blocks_plus(buf->data, c->block_bits,
- buf->u64s_reserved) << c->block_bits;
- BUG_ON(sectors > buf->sectors);
- buf->sectors = sectors;
-
- /*
- * We have to set last_seq here, _before_ opening a new journal entry:
- *
- * A threads may replace an old pin with a new pin on their current
- * journal reservation - the expectation being that the journal will
- * contain either what the old pin protected or what the new pin
- * protects.
- *
- * After the old pin is dropped journal_last_seq() won't include the old
- * pin, so we can only write the updated last_seq on the entry that
- * contains whatever the new pin protects.
- *
- * Restated, we can _not_ update last_seq for a given entry if there
- * could be a newer entry open with reservations/pins that have been
- * taken against it.
- *
- * Hence, we want update/set last_seq on the current journal entry right
- * before we open a new one:
- */
- buf->last_seq = journal_last_seq(j);
- buf->data->last_seq = cpu_to_le64(buf->last_seq);
- BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
-
- cancel_delayed_work(&j->write_work);
-
- bch2_journal_space_available(j);
-
- __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
-}
-
-void bch2_journal_halt(struct journal *j)
-{
- spin_lock(&j->lock);
- __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
- if (!j->err_seq)
- j->err_seq = journal_cur_seq(j);
- journal_wake(j);
- spin_unlock(&j->lock);
-}
-
-static bool journal_entry_want_write(struct journal *j)
-{
- bool ret = !journal_entry_is_open(j) ||
- journal_cur_seq(j) == journal_last_unwritten_seq(j);
-
- /* Don't close it yet if we already have a write in flight: */
- if (ret)
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
- else if (nr_unwritten_journal_entries(j)) {
- struct journal_buf *buf = journal_cur_buf(j);
-
- if (!buf->flush_time) {
- buf->flush_time = local_clock() ?: 1;
- buf->expires = jiffies;
- }
- }
-
- return ret;
-}
-
-bool bch2_journal_entry_close(struct journal *j)
-{
- bool ret;
-
- spin_lock(&j->lock);
- ret = journal_entry_want_write(j);
- spin_unlock(&j->lock);
-
- return ret;
-}
-
-/*
- * should _only_ called from journal_res_get() - when we actually want a
- * journal reservation - journal entry is open means journal is dirty:
- */
-static int journal_entry_open(struct journal *j)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_buf *buf = j->buf +
- ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
- union journal_res_state old, new;
- int u64s;
-
- lockdep_assert_held(&j->lock);
- BUG_ON(journal_entry_is_open(j));
- BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
-
- if (j->blocked)
- return JOURNAL_ERR_blocked;
-
- if (j->cur_entry_error)
- return j->cur_entry_error;
-
- if (bch2_journal_error(j))
- return JOURNAL_ERR_insufficient_devices; /* -EROFS */
-
- if (!fifo_free(&j->pin))
- return JOURNAL_ERR_journal_pin_full;
-
- if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
- return JOURNAL_ERR_max_in_flight;
-
- BUG_ON(!j->cur_entry_sectors);
-
- buf->expires =
- (journal_cur_seq(j) == j->flushed_seq_ondisk
- ? jiffies
- : j->last_flush_write) +
- msecs_to_jiffies(c->opts.journal_flush_delay);
-
- buf->u64s_reserved = j->entry_u64s_reserved;
- buf->disk_sectors = j->cur_entry_sectors;
- buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
-
- u64s = (int) (buf->sectors << 9) / sizeof(u64) -
- journal_entry_overhead(j);
- u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
-
- if (u64s <= (ssize_t) j->early_journal_entries.nr)
- return JOURNAL_ERR_journal_full;
-
- if (fifo_empty(&j->pin) && j->reclaim_thread)
- wake_up_process(j->reclaim_thread);
-
- /*
- * The fifo_push() needs to happen at the same time as j->seq is
- * incremented for journal_last_seq() to be calculated correctly
- */
- atomic64_inc(&j->seq);
- journal_pin_list_init(fifo_push_ref(&j->pin), 1);
-
- BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
-
- BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
-
- bkey_extent_init(&buf->key);
- buf->noflush = false;
- buf->must_flush = false;
- buf->separate_flush = false;
- buf->flush_time = 0;
- buf->need_flush_to_write_buffer = true;
- buf->write_started = false;
- buf->write_allocated = false;
- buf->write_done = false;
-
- memset(buf->data, 0, sizeof(*buf->data));
- buf->data->seq = cpu_to_le64(journal_cur_seq(j));
- buf->data->u64s = 0;
-
- if (j->early_journal_entries.nr) {
- memcpy(buf->data->_data, j->early_journal_entries.data,
- j->early_journal_entries.nr * sizeof(u64));
- le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
- }
-
- /*
- * Must be set before marking the journal entry as open:
- */
- j->cur_entry_u64s = u64s;
-
- old.v = atomic64_read(&j->reservations.counter);
- do {
- new.v = old.v;
-
- BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
-
- new.idx++;
- BUG_ON(journal_state_count(new, new.idx));
- BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
-
- journal_state_inc(&new);
-
- /* Handle any already added entries */
- new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
- } while (!atomic64_try_cmpxchg(&j->reservations.counter,
- &old.v, new.v));
-
- if (nr_unwritten_journal_entries(j) == 1)
- mod_delayed_work(j->wq,
- &j->write_work,
- msecs_to_jiffies(c->opts.journal_flush_delay));
- journal_wake(j);
-
- if (j->early_journal_entries.nr)
- darray_exit(&j->early_journal_entries);
- return 0;
-}
-
-static bool journal_quiesced(struct journal *j)
-{
- bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
-
- if (!ret)
- bch2_journal_entry_close(j);
- return ret;
-}
-
-static void journal_quiesce(struct journal *j)
-{
- wait_event(j->wait, journal_quiesced(j));
-}
-
-static void journal_write_work(struct work_struct *work)
-{
- struct journal *j = container_of(work, struct journal, write_work.work);
-
- spin_lock(&j->lock);
- if (__journal_entry_is_open(j->reservations)) {
- long delta = journal_cur_buf(j)->expires - jiffies;
-
- if (delta > 0)
- mod_delayed_work(j->wq, &j->write_work, delta);
- else
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
- }
- spin_unlock(&j->lock);
-}
-
-static int __journal_res_get(struct journal *j, struct journal_res *res,
- unsigned flags)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_buf *buf;
- bool can_discard;
- int ret;
-retry:
- if (journal_res_get_fast(j, res, flags))
- return 0;
-
- if (bch2_journal_error(j))
- return -BCH_ERR_erofs_journal_err;
-
- if (j->blocked)
- return -BCH_ERR_journal_res_get_blocked;
-
- if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
- ret = JOURNAL_ERR_journal_full;
- can_discard = j->can_discard;
- goto out;
- }
-
- if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
- ret = JOURNAL_ERR_max_in_flight;
- goto out;
- }
-
- spin_lock(&j->lock);
-
- /*
- * Recheck after taking the lock, so we don't race with another thread
- * that just did journal_entry_open() and call bch2_journal_entry_close()
- * unnecessarily
- */
- if (journal_res_get_fast(j, res, flags)) {
- ret = 0;
- goto unlock;
- }
-
- /*
- * If we couldn't get a reservation because the current buf filled up,
- * and we had room for a bigger entry on disk, signal that we want to
- * realloc the journal bufs:
- */
- buf = journal_cur_buf(j);
- if (journal_entry_is_open(j) &&
- buf->buf_size >> 9 < buf->disk_sectors &&
- buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
- j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
-
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
- ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
-unlock:
- can_discard = j->can_discard;
- spin_unlock(&j->lock);
-out:
- if (ret == JOURNAL_ERR_retry)
- goto retry;
- if (!ret)
- return 0;
-
- if (journal_error_check_stuck(j, ret, flags))
- ret = -BCH_ERR_journal_res_get_blocked;
-
- if (ret == JOURNAL_ERR_max_in_flight &&
- track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
-
- struct printbuf buf = PRINTBUF;
- prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
- bch2_journal_bufs_to_text(&buf, j);
- trace_journal_entry_full(c, buf.buf);
- printbuf_exit(&buf);
- count_event(c, journal_entry_full);
- }
-
- /*
- * Journal is full - can't rely on reclaim from work item due to
- * freezing:
- */
- if ((ret == JOURNAL_ERR_journal_full ||
- ret == JOURNAL_ERR_journal_pin_full) &&
- !(flags & JOURNAL_RES_GET_NONBLOCK)) {
- if (can_discard) {
- bch2_journal_do_discards(j);
- goto retry;
- }
-
- if (mutex_trylock(&j->reclaim_lock)) {
- bch2_journal_reclaim(j);
- mutex_unlock(&j->reclaim_lock);
- }
- }
-
- return ret == JOURNAL_ERR_insufficient_devices
- ? -BCH_ERR_erofs_journal_err
- : -BCH_ERR_journal_res_get_blocked;
-}
-
-/*
- * Essentially the entry function to the journaling code. When bcachefs is doing
- * a btree insert, it calls this function to get the current journal write.
- * Journal write is the structure used set up journal writes. The calling
- * function will then add its keys to the structure, queuing them for the next
- * write.
- *
- * To ensure forward progress, the current task must not be holding any
- * btree node write locks.
- */
-int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
- unsigned flags)
-{
- int ret;
-
- if (closure_wait_event_timeout(&j->async_wait,
- (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
- (flags & JOURNAL_RES_GET_NONBLOCK),
- HZ * 10))
- return ret;
-
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct printbuf buf = PRINTBUF;
- bch2_journal_debug_to_text(&buf, j);
- bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s",
- buf.buf);
- printbuf_exit(&buf);
-
- closure_wait_event(&j->async_wait,
- (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
- (flags & JOURNAL_RES_GET_NONBLOCK));
- return ret;
-}
-
-/* journal_entry_res: */
-
-void bch2_journal_entry_res_resize(struct journal *j,
- struct journal_entry_res *res,
- unsigned new_u64s)
-{
- union journal_res_state state;
- int d = new_u64s - res->u64s;
-
- spin_lock(&j->lock);
-
- j->entry_u64s_reserved += d;
- if (d <= 0)
- goto out;
-
- j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
- smp_mb();
- state = READ_ONCE(j->reservations);
-
- if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
- state.cur_entry_offset > j->cur_entry_u64s) {
- j->cur_entry_u64s += d;
- /*
- * Not enough room in current journal entry, have to flush it:
- */
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
- } else {
- journal_cur_buf(j)->u64s_reserved += d;
- }
-out:
- spin_unlock(&j->lock);
- res->u64s += d;
-}
-
-/* journal flushing: */
-
-/**
- * bch2_journal_flush_seq_async - wait for a journal entry to be written
- * @j: journal object
- * @seq: seq to flush
- * @parent: closure object to wait with
- * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed,
- * -EIO if @seq will never be flushed
- *
- * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
- * necessary
- */
-int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
- struct closure *parent)
-{
- struct journal_buf *buf;
- int ret = 0;
-
- if (seq <= j->flushed_seq_ondisk)
- return 1;
-
- spin_lock(&j->lock);
-
- if (WARN_ONCE(seq > journal_cur_seq(j),
- "requested to flush journal seq %llu, but currently at %llu",
- seq, journal_cur_seq(j)))
- goto out;
-
- /* Recheck under lock: */
- if (j->err_seq && seq >= j->err_seq) {
- ret = -EIO;
- goto out;
- }
-
- if (seq <= j->flushed_seq_ondisk) {
- ret = 1;
- goto out;
- }
-
- /* if seq was written, but not flushed - flush a newer one instead */
- seq = max(seq, journal_last_unwritten_seq(j));
-
-recheck_need_open:
- if (seq > journal_cur_seq(j)) {
- struct journal_res res = { 0 };
-
- if (journal_entry_is_open(j))
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
-
- spin_unlock(&j->lock);
-
- /*
- * We're called from bch2_journal_flush_seq() -> wait_event();
- * but this might block. We won't usually block, so we won't
- * livelock:
- */
- sched_annotate_sleep();
- ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
- if (ret)
- return ret;
-
- seq = res.seq;
- buf = journal_seq_to_buf(j, seq);
- buf->must_flush = true;
-
- if (!buf->flush_time) {
- buf->flush_time = local_clock() ?: 1;
- buf->expires = jiffies;
- }
-
- if (parent && !closure_wait(&buf->wait, parent))
- BUG();
-
- bch2_journal_res_put(j, &res);
-
- spin_lock(&j->lock);
- goto want_write;
- }
-
- /*
- * if write was kicked off without a flush, or if we promised it
- * wouldn't be a flush, flush the next sequence number instead
- */
- buf = journal_seq_to_buf(j, seq);
- if (buf->noflush) {
- seq++;
- goto recheck_need_open;
- }
-
- buf->must_flush = true;
-
- if (parent && !closure_wait(&buf->wait, parent))
- BUG();
-want_write:
- if (seq == journal_cur_seq(j))
- journal_entry_want_write(j);
-out:
- spin_unlock(&j->lock);
- return ret;
-}
-
-int bch2_journal_flush_seq(struct journal *j, u64 seq, unsigned task_state)
-{
- u64 start_time = local_clock();
- int ret, ret2;
-
- /*
- * Don't update time_stats when @seq is already flushed:
- */
- if (seq <= j->flushed_seq_ondisk)
- return 0;
-
- ret = wait_event_state(j->wait,
- (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)),
- task_state);
-
- if (!ret)
- bch2_time_stats_update(j->flush_seq_time, start_time);
-
- return ret ?: ret2 < 0 ? ret2 : 0;
-}
-
-/*
- * bch2_journal_flush_async - if there is an open journal entry, or a journal
- * still being written, write it and wait for the write to complete
- */
-void bch2_journal_flush_async(struct journal *j, struct closure *parent)
-{
- bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
-}
-
-int bch2_journal_flush(struct journal *j)
-{
- return bch2_journal_flush_seq(j, atomic64_read(&j->seq), TASK_UNINTERRUPTIBLE);
-}
-
-/*
- * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
- * @seq
- */
-bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- u64 unwritten_seq;
- bool ret = false;
-
- if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
- return false;
-
- if (seq <= c->journal.flushed_seq_ondisk)
- return false;
-
- spin_lock(&j->lock);
- if (seq <= c->journal.flushed_seq_ondisk)
- goto out;
-
- for (unwritten_seq = journal_last_unwritten_seq(j);
- unwritten_seq < seq;
- unwritten_seq++) {
- struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
-
- /* journal flush already in flight, or flush requseted */
- if (buf->must_flush)
- goto out;
-
- buf->noflush = true;
- }
-
- ret = true;
-out:
- spin_unlock(&j->lock);
- return ret;
-}
-
-int bch2_journal_meta(struct journal *j)
-{
- struct journal_buf *buf;
- struct journal_res res;
- int ret;
-
- memset(&res, 0, sizeof(res));
-
- ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
- if (ret)
- return ret;
-
- buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
- buf->must_flush = true;
-
- if (!buf->flush_time) {
- buf->flush_time = local_clock() ?: 1;
- buf->expires = jiffies;
- }
-
- bch2_journal_res_put(j, &res);
-
- return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE);
-}
-
-/* block/unlock the journal: */
-
-void bch2_journal_unblock(struct journal *j)
-{
- spin_lock(&j->lock);
- j->blocked--;
- spin_unlock(&j->lock);
-
- journal_wake(j);
-}
-
-void bch2_journal_block(struct journal *j)
-{
- spin_lock(&j->lock);
- j->blocked++;
- spin_unlock(&j->lock);
-
- journal_quiesce(j);
-}
-
-static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
-{
- struct journal_buf *ret = NULL;
-
- /* We're inside wait_event(), but using mutex_lock(: */
- sched_annotate_sleep();
- mutex_lock(&j->buf_lock);
- spin_lock(&j->lock);
- max_seq = min(max_seq, journal_cur_seq(j));
-
- for (u64 seq = journal_last_unwritten_seq(j);
- seq <= max_seq;
- seq++) {
- unsigned idx = seq & JOURNAL_BUF_MASK;
- struct journal_buf *buf = j->buf + idx;
-
- if (buf->need_flush_to_write_buffer) {
- if (seq == journal_cur_seq(j))
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
-
- union journal_res_state s;
- s.v = atomic64_read_acquire(&j->reservations.counter);
-
- ret = journal_state_count(s, idx)
- ? ERR_PTR(-EAGAIN)
- : buf;
- break;
- }
- }
-
- spin_unlock(&j->lock);
- if (IS_ERR_OR_NULL(ret))
- mutex_unlock(&j->buf_lock);
- return ret;
-}
-
-struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
-{
- struct journal_buf *ret;
-
- wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
- return ret;
-}
-
-/* allocate journal on a device: */
-
-static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
- bool new_fs, struct closure *cl)
-{
- struct bch_fs *c = ca->fs;
- struct journal_device *ja = &ca->journal;
- u64 *new_bucket_seq = NULL, *new_buckets = NULL;
- struct open_bucket **ob = NULL;
- long *bu = NULL;
- unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
- int ret = 0;
-
- BUG_ON(nr <= ja->nr);
-
- bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
- ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
- new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
- new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
- if (!bu || !ob || !new_buckets || !new_bucket_seq) {
- ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
- goto err_free;
- }
-
- for (nr_got = 0; nr_got < nr_want; nr_got++) {
- if (new_fs) {
- bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
- if (bu[nr_got] < 0) {
- ret = -BCH_ERR_ENOSPC_bucket_alloc;
- break;
- }
- } else {
- ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal,
- BCH_DATA_journal, cl);
- ret = PTR_ERR_OR_ZERO(ob[nr_got]);
- if (ret)
- break;
-
- ret = bch2_trans_run(c,
- bch2_trans_mark_metadata_bucket(trans, ca,
- ob[nr_got]->bucket, BCH_DATA_journal,
- ca->mi.bucket_size, BTREE_TRIGGER_transactional));
- if (ret) {
- bch2_open_bucket_put(c, ob[nr_got]);
- bch_err_msg(c, ret, "marking new journal buckets");
- break;
- }
-
- bu[nr_got] = ob[nr_got]->bucket;
- }
- }
-
- if (!nr_got)
- goto err_free;
-
- /* Don't return an error if we successfully allocated some buckets: */
- ret = 0;
-
- if (c) {
- bch2_journal_flush_all_pins(&c->journal);
- bch2_journal_block(&c->journal);
- mutex_lock(&c->sb_lock);
- }
-
- memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
- memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
-
- BUG_ON(ja->discard_idx > ja->nr);
-
- pos = ja->discard_idx ?: ja->nr;
-
- memmove(new_buckets + pos + nr_got,
- new_buckets + pos,
- sizeof(new_buckets[0]) * (ja->nr - pos));
- memmove(new_bucket_seq + pos + nr_got,
- new_bucket_seq + pos,
- sizeof(new_bucket_seq[0]) * (ja->nr - pos));
-
- for (i = 0; i < nr_got; i++) {
- new_buckets[pos + i] = bu[i];
- new_bucket_seq[pos + i] = 0;
- }
-
- nr = ja->nr + nr_got;
-
- ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
- if (ret)
- goto err_unblock;
-
- if (!new_fs)
- bch2_write_super(c);
-
- /* Commit: */
- if (c)
- spin_lock(&c->journal.lock);
-
- swap(new_buckets, ja->buckets);
- swap(new_bucket_seq, ja->bucket_seq);
- ja->nr = nr;
-
- if (pos <= ja->discard_idx)
- ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
- if (pos <= ja->dirty_idx_ondisk)
- ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
- if (pos <= ja->dirty_idx)
- ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
- if (pos <= ja->cur_idx)
- ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
-
- if (c)
- spin_unlock(&c->journal.lock);
-err_unblock:
- if (c) {
- bch2_journal_unblock(&c->journal);
- mutex_unlock(&c->sb_lock);
- }
-
- if (ret && !new_fs)
- for (i = 0; i < nr_got; i++)
- bch2_trans_run(c,
- bch2_trans_mark_metadata_bucket(trans, ca,
- bu[i], BCH_DATA_free, 0,
- BTREE_TRIGGER_transactional));
-err_free:
- if (!new_fs)
- for (i = 0; i < nr_got; i++)
- bch2_open_bucket_put(c, ob[i]);
-
- kfree(new_bucket_seq);
- kfree(new_buckets);
- kfree(ob);
- kfree(bu);
- return ret;
-}
-
-/*
- * Allocate more journal space at runtime - not currently making use if it, but
- * the code works:
- */
-int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
- unsigned nr)
-{
- struct journal_device *ja = &ca->journal;
- struct closure cl;
- int ret = 0;
-
- closure_init_stack(&cl);
-
- down_write(&c->state_lock);
-
- /* don't handle reducing nr of buckets yet: */
- if (nr < ja->nr)
- goto unlock;
-
- while (ja->nr < nr) {
- struct disk_reservation disk_res = { 0, 0, 0 };
-
- /*
- * note: journal buckets aren't really counted as _sectors_ used yet, so
- * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
- * when space used goes up without a reservation - but we do need the
- * reservation to ensure we'll actually be able to allocate:
- *
- * XXX: that's not right, disk reservations only ensure a
- * filesystem-wide allocation will succeed, this is a device
- * specific allocation - we can hang here:
- */
-
- ret = bch2_disk_reservation_get(c, &disk_res,
- bucket_to_sector(ca, nr - ja->nr), 1, 0);
- if (ret)
- break;
-
- ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
-
- bch2_disk_reservation_put(c, &disk_res);
-
- closure_sync(&cl);
-
- if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
- break;
- }
-
- bch_err_fn(c, ret);
-unlock:
- up_write(&c->state_lock);
- return ret;
-}
-
-int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
-{
- unsigned nr;
- int ret;
-
- if (dynamic_fault("bcachefs:add:journal_alloc")) {
- ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
- goto err;
- }
-
- /* 1/128th of the device by default: */
- nr = ca->mi.nbuckets >> 7;
-
- /*
- * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
- * is smaller:
- */
- nr = clamp_t(unsigned, nr,
- BCH_JOURNAL_BUCKETS_MIN,
- min(1 << 13,
- (1 << 24) / ca->mi.bucket_size));
-
- ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
-err:
- bch_err_fn(ca, ret);
- return ret;
-}
-
-int bch2_fs_journal_alloc(struct bch_fs *c)
-{
- for_each_online_member(c, ca) {
- if (ca->journal.nr)
- continue;
-
- int ret = bch2_dev_journal_alloc(ca, true);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- return ret;
- }
- }
-
- return 0;
-}
-
-/* startup/shutdown: */
-
-static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
-{
- bool ret = false;
- u64 seq;
-
- spin_lock(&j->lock);
- for (seq = journal_last_unwritten_seq(j);
- seq <= journal_cur_seq(j) && !ret;
- seq++) {
- struct journal_buf *buf = journal_seq_to_buf(j, seq);
-
- if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
- ret = true;
- }
- spin_unlock(&j->lock);
-
- return ret;
-}
-
-void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
-{
- wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
-}
-
-void bch2_fs_journal_stop(struct journal *j)
-{
- if (!test_bit(JOURNAL_running, &j->flags))
- return;
-
- bch2_journal_reclaim_stop(j);
- bch2_journal_flush_all_pins(j);
-
- wait_event(j->wait, bch2_journal_entry_close(j));
-
- /*
- * Always write a new journal entry, to make sure the clock hands are up
- * to date (and match the superblock)
- */
- bch2_journal_meta(j);
-
- journal_quiesce(j);
- cancel_delayed_work_sync(&j->write_work);
-
- WARN(!bch2_journal_error(j) &&
- test_bit(JOURNAL_replay_done, &j->flags) &&
- j->last_empty_seq != journal_cur_seq(j),
- "journal shutdown error: cur seq %llu but last empty seq %llu",
- journal_cur_seq(j), j->last_empty_seq);
-
- if (!bch2_journal_error(j))
- clear_bit(JOURNAL_running, &j->flags);
-}
-
-int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_entry_pin_list *p;
- struct journal_replay *i, **_i;
- struct genradix_iter iter;
- bool had_entries = false;
- u64 last_seq = cur_seq, nr, seq;
-
- genradix_for_each_reverse(&c->journal_entries, iter, _i) {
- i = *_i;
-
- if (journal_replay_ignore(i))
- continue;
-
- last_seq = le64_to_cpu(i->j.last_seq);
- break;
- }
-
- nr = cur_seq - last_seq;
-
- if (nr + 1 > j->pin.size) {
- free_fifo(&j->pin);
- init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
- if (!j->pin.data) {
- bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
- return -BCH_ERR_ENOMEM_journal_pin_fifo;
- }
- }
-
- j->replay_journal_seq = last_seq;
- j->replay_journal_seq_end = cur_seq;
- j->last_seq_ondisk = last_seq;
- j->flushed_seq_ondisk = cur_seq - 1;
- j->seq_ondisk = cur_seq - 1;
- j->pin.front = last_seq;
- j->pin.back = cur_seq;
- atomic64_set(&j->seq, cur_seq - 1);
-
- fifo_for_each_entry_ptr(p, &j->pin, seq)
- journal_pin_list_init(p, 1);
-
- genradix_for_each(&c->journal_entries, iter, _i) {
- i = *_i;
-
- if (journal_replay_ignore(i))
- continue;
-
- seq = le64_to_cpu(i->j.seq);
- BUG_ON(seq >= cur_seq);
-
- if (seq < last_seq)
- continue;
-
- if (journal_entry_empty(&i->j))
- j->last_empty_seq = le64_to_cpu(i->j.seq);
-
- p = journal_seq_pin(j, seq);
-
- p->devs.nr = 0;
- darray_for_each(i->ptrs, ptr)
- bch2_dev_list_add_dev(&p->devs, ptr->dev);
-
- had_entries = true;
- }
-
- if (!had_entries)
- j->last_empty_seq = cur_seq - 1; /* to match j->seq */
-
- spin_lock(&j->lock);
-
- set_bit(JOURNAL_running, &j->flags);
- j->last_flush_write = jiffies;
-
- j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
- j->reservations.unwritten_idx++;
-
- c->last_bucket_seq_cleanup = journal_cur_seq(j);
-
- bch2_journal_space_available(j);
- spin_unlock(&j->lock);
-
- return bch2_journal_reclaim_start(j);
-}
-
-/* init/exit: */
-
-void bch2_dev_journal_exit(struct bch_dev *ca)
-{
- struct journal_device *ja = &ca->journal;
-
- for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
- kfree(ja->bio[i]);
- ja->bio[i] = NULL;
- }
-
- kfree(ja->buckets);
- kfree(ja->bucket_seq);
- ja->buckets = NULL;
- ja->bucket_seq = NULL;
-}
-
-int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
-{
- struct journal_device *ja = &ca->journal;
- struct bch_sb_field_journal *journal_buckets =
- bch2_sb_field_get(sb, journal);
- struct bch_sb_field_journal_v2 *journal_buckets_v2 =
- bch2_sb_field_get(sb, journal_v2);
-
- ja->nr = 0;
-
- if (journal_buckets_v2) {
- unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
-
- for (unsigned i = 0; i < nr; i++)
- ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
- } else if (journal_buckets) {
- ja->nr = bch2_nr_journal_buckets(journal_buckets);
- }
-
- ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
- if (!ja->bucket_seq)
- return -BCH_ERR_ENOMEM_dev_journal_init;
-
- unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
-
- for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
- ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
- nr_bvecs), GFP_KERNEL);
- if (!ja->bio[i])
- return -BCH_ERR_ENOMEM_dev_journal_init;
-
- ja->bio[i]->ca = ca;
- ja->bio[i]->buf_idx = i;
- bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
- }
-
- ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
- if (!ja->buckets)
- return -BCH_ERR_ENOMEM_dev_journal_init;
-
- if (journal_buckets_v2) {
- unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
- unsigned dst = 0;
-
- for (unsigned i = 0; i < nr; i++)
- for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
- ja->buckets[dst++] =
- le64_to_cpu(journal_buckets_v2->d[i].start) + j;
- } else if (journal_buckets) {
- for (unsigned i = 0; i < ja->nr; i++)
- ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
- }
-
- return 0;
-}
-
-void bch2_fs_journal_exit(struct journal *j)
-{
- if (j->wq)
- destroy_workqueue(j->wq);
-
- darray_exit(&j->early_journal_entries);
-
- for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
- kvfree(j->buf[i].data);
- free_fifo(&j->pin);
-}
-
-int bch2_fs_journal_init(struct journal *j)
-{
- static struct lock_class_key res_key;
-
- mutex_init(&j->buf_lock);
- spin_lock_init(&j->lock);
- spin_lock_init(&j->err_lock);
- init_waitqueue_head(&j->wait);
- INIT_DELAYED_WORK(&j->write_work, journal_write_work);
- init_waitqueue_head(&j->reclaim_wait);
- init_waitqueue_head(&j->pin_flush_wait);
- mutex_init(&j->reclaim_lock);
- mutex_init(&j->discard_lock);
-
- lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
-
- atomic64_set(&j->reservations.counter,
- ((union journal_res_state)
- { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
-
- if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
- return -BCH_ERR_ENOMEM_journal_pin_fifo;
-
- for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
- j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
- j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
- if (!j->buf[i].data)
- return -BCH_ERR_ENOMEM_journal_buf;
- j->buf[i].idx = i;
- }
-
- j->pin.front = j->pin.back = 1;
-
- j->wq = alloc_workqueue("bcachefs_journal",
- WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
- if (!j->wq)
- return -BCH_ERR_ENOMEM_fs_other_alloc;
- return 0;
-}
-
-/* debug: */
-
-static const char * const bch2_journal_flags_strs[] = {
-#define x(n) #n,
- JOURNAL_FLAGS()
-#undef x
- NULL
-};
-
-void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- union journal_res_state s;
- unsigned long now = jiffies;
- u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
-
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 28);
- out->atomic++;
-
- rcu_read_lock();
- s = READ_ONCE(j->reservations);
-
- prt_printf(out, "flags:\t");
- prt_bitflags(out, bch2_journal_flags_strs, j->flags);
- prt_newline(out);
- prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
- prt_printf(out, "seq:\t%llu\n", journal_cur_seq(j));
- prt_printf(out, "seq_ondisk:\t%llu\n", j->seq_ondisk);
- prt_printf(out, "last_seq:\t%llu\n", journal_last_seq(j));
- prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
- prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
- prt_printf(out, "watermark:\t%s\n", bch2_watermarks[j->watermark]);
- prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
- prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
- prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
- prt_printf(out, "average write size:\t");
- prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
- prt_newline(out);
- prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
- prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
- prt_printf(out, "reclaim kicked:\t%u\n", j->reclaim_kicked);
- prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
- ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
- prt_printf(out, "blocked:\t%u\n", j->blocked);
- prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
- prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]);
- prt_printf(out, "current entry:\t");
-
- switch (s.cur_entry_offset) {
- case JOURNAL_ENTRY_ERROR_VAL:
- prt_printf(out, "error\n");
- break;
- case JOURNAL_ENTRY_CLOSED_VAL:
- prt_printf(out, "closed\n");
- break;
- default:
- prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
- break;
- }
-
- prt_printf(out, "unwritten entries:\n");
- bch2_journal_bufs_to_text(out, j);
-
- prt_printf(out, "space:\n");
- printbuf_indent_add(out, 2);
- prt_printf(out, "discarded\t%u:%u\n",
- j->space[journal_space_discarded].next_entry,
- j->space[journal_space_discarded].total);
- prt_printf(out, "clean ondisk\t%u:%u\n",
- j->space[journal_space_clean_ondisk].next_entry,
- j->space[journal_space_clean_ondisk].total);
- prt_printf(out, "clean\t%u:%u\n",
- j->space[journal_space_clean].next_entry,
- j->space[journal_space_clean].total);
- prt_printf(out, "total\t%u:%u\n",
- j->space[journal_space_total].next_entry,
- j->space[journal_space_total].total);
- printbuf_indent_sub(out, 2);
-
- for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
- struct journal_device *ja = &ca->journal;
-
- if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
- continue;
-
- if (!ja->nr)
- continue;
-
- prt_printf(out, "dev %u:\n", ca->dev_idx);
- printbuf_indent_add(out, 2);
- prt_printf(out, "nr\t%u\n", ja->nr);
- prt_printf(out, "bucket size\t%u\n", ca->mi.bucket_size);
- prt_printf(out, "available\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
- prt_printf(out, "discard_idx\t%u\n", ja->discard_idx);
- prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
- prt_printf(out, "dirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
- prt_printf(out, "cur_idx\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
- printbuf_indent_sub(out, 2);
- }
-
- rcu_read_unlock();
-
- --out->atomic;
-}
-
-void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
-{
- spin_lock(&j->lock);
- __bch2_journal_debug_to_text(out, j);
- spin_unlock(&j->lock);
-}
-
-bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
-{
- struct journal_entry_pin_list *pin_list;
- struct journal_entry_pin *pin;
-
- spin_lock(&j->lock);
- if (!test_bit(JOURNAL_running, &j->flags)) {
- spin_unlock(&j->lock);
- return true;
- }
-
- *seq = max(*seq, j->pin.front);
-
- if (*seq >= j->pin.back) {
- spin_unlock(&j->lock);
- return true;
- }
-
- out->atomic++;
-
- pin_list = journal_seq_pin(j, *seq);
-
- prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
- printbuf_indent_add(out, 2);
-
- for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
- list_for_each_entry(pin, &pin_list->list[i], list)
- prt_printf(out, "\t%px %ps\n", pin, pin->flush);
-
- if (!list_empty(&pin_list->flushed))
- prt_printf(out, "flushed:\n");
-
- list_for_each_entry(pin, &pin_list->flushed, list)
- prt_printf(out, "\t%px %ps\n", pin, pin->flush);
-
- printbuf_indent_sub(out, 2);
-
- --out->atomic;
- spin_unlock(&j->lock);
-
- return false;
-}
-
-void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
-{
- u64 seq = 0;
-
- while (!bch2_journal_seq_pins_to_text(out, j, &seq))
- seq++;
-}
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
deleted file mode 100644
index 2762be6f9814..000000000000
--- a/fs/bcachefs/journal.h
+++ /dev/null
@@ -1,449 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_JOURNAL_H
-#define _BCACHEFS_JOURNAL_H
-
-/*
- * THE JOURNAL:
- *
- * The primary purpose of the journal is to log updates (insertions) to the
- * b-tree, to avoid having to do synchronous updates to the b-tree on disk.
- *
- * Without the journal, the b-tree is always internally consistent on
- * disk - and in fact, in the earliest incarnations bcache didn't have a journal
- * but did handle unclean shutdowns by doing all index updates synchronously
- * (with coalescing).
- *
- * Updates to interior nodes still happen synchronously and without the journal
- * (for simplicity) - this may change eventually but updates to interior nodes
- * are rare enough it's not a huge priority.
- *
- * This means the journal is relatively separate from the b-tree; it consists of
- * just a list of keys and journal replay consists of just redoing those
- * insertions in same order that they appear in the journal.
- *
- * PERSISTENCE:
- *
- * For synchronous updates (where we're waiting on the index update to hit
- * disk), the journal entry will be written out immediately (or as soon as
- * possible, if the write for the previous journal entry was still in flight).
- *
- * Synchronous updates are specified by passing a closure (@flush_cl) to
- * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
- * down to the journalling code. That closure will wait on the journal write to
- * complete (via closure_wait()).
- *
- * If the index update wasn't synchronous, the journal entry will be
- * written out after 10 ms have elapsed, by default (the delay_ms field
- * in struct journal).
- *
- * JOURNAL ENTRIES:
- *
- * A journal entry is variable size (struct jset), it's got a fixed length
- * header and then a variable number of struct jset_entry entries.
- *
- * Journal entries are identified by monotonically increasing 64 bit sequence
- * numbers - jset->seq; other places in the code refer to this sequence number.
- *
- * A jset_entry entry contains one or more bkeys (which is what gets inserted
- * into the b-tree). We need a container to indicate which b-tree the key is
- * for; also, the roots of the various b-trees are stored in jset_entry entries
- * (one for each b-tree) - this lets us add new b-tree types without changing
- * the on disk format.
- *
- * We also keep some things in the journal header that are logically part of the
- * superblock - all the things that are frequently updated. This is for future
- * bcache on raw flash support; the superblock (which will become another
- * journal) can't be moved or wear leveled, so it contains just enough
- * information to find the main journal, and the superblock only has to be
- * rewritten when we want to move/wear level the main journal.
- *
- * JOURNAL LAYOUT ON DISK:
- *
- * The journal is written to a ringbuffer of buckets (which is kept in the
- * superblock); the individual buckets are not necessarily contiguous on disk
- * which means that journal entries are not allowed to span buckets, but also
- * that we can resize the journal at runtime if desired (unimplemented).
- *
- * The journal buckets exist in the same pool as all the other buckets that are
- * managed by the allocator and garbage collection - garbage collection marks
- * the journal buckets as metadata buckets.
- *
- * OPEN/DIRTY JOURNAL ENTRIES:
- *
- * Open/dirty journal entries are journal entries that contain b-tree updates
- * that have not yet been written out to the b-tree on disk. We have to track
- * which journal entries are dirty, and we also have to avoid wrapping around
- * the journal and overwriting old but still dirty journal entries with new
- * journal entries.
- *
- * On disk, this is represented with the "last_seq" field of struct jset;
- * last_seq is the first sequence number that journal replay has to replay.
- *
- * To avoid overwriting dirty journal entries on disk, we keep a mapping (in
- * journal_device->seq) of for each journal bucket, the highest sequence number
- * any journal entry it contains. Then, by comparing that against last_seq we
- * can determine whether that journal bucket contains dirty journal entries or
- * not.
- *
- * To track which journal entries are dirty, we maintain a fifo of refcounts
- * (where each entry corresponds to a specific sequence number) - when a ref
- * goes to 0, that journal entry is no longer dirty.
- *
- * Journalling of index updates is done at the same time as the b-tree itself is
- * being modified (see btree_insert_key()); when we add the key to the journal
- * the pending b-tree write takes a ref on the journal entry the key was added
- * to. If a pending b-tree write would need to take refs on multiple dirty
- * journal entries, it only keeps the ref on the oldest one (since a newer
- * journal entry will still be replayed if an older entry was dirty).
- *
- * JOURNAL FILLING UP:
- *
- * There are two ways the journal could fill up; either we could run out of
- * space to write to, or we could have too many open journal entries and run out
- * of room in the fifo of refcounts. Since those refcounts are decremented
- * without any locking we can't safely resize that fifo, so we handle it the
- * same way.
- *
- * If the journal fills up, we start flushing dirty btree nodes until we can
- * allocate space for a journal write again - preferentially flushing btree
- * nodes that are pinning the oldest journal entries first.
- */
-
-#include <linux/hash.h>
-
-#include "journal_types.h"
-
-struct bch_fs;
-
-static inline void journal_wake(struct journal *j)
-{
- wake_up(&j->wait);
- closure_wake_up(&j->async_wait);
-}
-
-static inline struct journal_buf *journal_cur_buf(struct journal *j)
-{
- return j->buf + j->reservations.idx;
-}
-
-/* Sequence number of oldest dirty journal entry */
-
-static inline u64 journal_last_seq(struct journal *j)
-{
- return j->pin.front;
-}
-
-static inline u64 journal_cur_seq(struct journal *j)
-{
- return atomic64_read(&j->seq);
-}
-
-static inline u64 journal_last_unwritten_seq(struct journal *j)
-{
- return j->seq_ondisk + 1;
-}
-
-static inline int journal_state_count(union journal_res_state s, int idx)
-{
- switch (idx) {
- case 0: return s.buf0_count;
- case 1: return s.buf1_count;
- case 2: return s.buf2_count;
- case 3: return s.buf3_count;
- }
- BUG();
-}
-
-static inline void journal_state_inc(union journal_res_state *s)
-{
- s->buf0_count += s->idx == 0;
- s->buf1_count += s->idx == 1;
- s->buf2_count += s->idx == 2;
- s->buf3_count += s->idx == 3;
-}
-
-/*
- * Amount of space that will be taken up by some keys in the journal (i.e.
- * including the jset header)
- */
-static inline unsigned jset_u64s(unsigned u64s)
-{
- return u64s + sizeof(struct jset_entry) / sizeof(u64);
-}
-
-static inline int journal_entry_overhead(struct journal *j)
-{
- return sizeof(struct jset) / sizeof(u64) + j->entry_u64s_reserved;
-}
-
-static inline struct jset_entry *
-bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s)
-{
- struct jset *jset = buf->data;
- struct jset_entry *entry = vstruct_idx(jset, le32_to_cpu(jset->u64s));
-
- memset(entry, 0, sizeof(*entry));
- entry->u64s = cpu_to_le16(u64s);
-
- le32_add_cpu(&jset->u64s, jset_u64s(u64s));
-
- return entry;
-}
-
-static inline struct jset_entry *
-journal_res_entry(struct journal *j, struct journal_res *res)
-{
- return vstruct_idx(j->buf[res->idx].data, res->offset);
-}
-
-static inline unsigned journal_entry_init(struct jset_entry *entry, unsigned type,
- enum btree_id id, unsigned level,
- unsigned u64s)
-{
- entry->u64s = cpu_to_le16(u64s);
- entry->btree_id = id;
- entry->level = level;
- entry->type = type;
- entry->pad[0] = 0;
- entry->pad[1] = 0;
- entry->pad[2] = 0;
- return jset_u64s(u64s);
-}
-
-static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type,
- enum btree_id id, unsigned level,
- const void *data, unsigned u64s)
-{
- unsigned ret = journal_entry_init(entry, type, id, level, u64s);
-
- memcpy_u64s_small(entry->_data, data, u64s);
- return ret;
-}
-
-static inline struct jset_entry *
-bch2_journal_add_entry(struct journal *j, struct journal_res *res,
- unsigned type, enum btree_id id,
- unsigned level, unsigned u64s)
-{
- struct jset_entry *entry = journal_res_entry(j, res);
- unsigned actual = journal_entry_init(entry, type, id, level, u64s);
-
- EBUG_ON(!res->ref);
- EBUG_ON(actual > res->u64s);
-
- res->offset += actual;
- res->u64s -= actual;
- return entry;
-}
-
-static inline bool journal_entry_empty(struct jset *j)
-{
- if (j->seq != j->last_seq)
- return false;
-
- vstruct_for_each(j, i)
- if (i->type == BCH_JSET_ENTRY_btree_keys && i->u64s)
- return false;
- return true;
-}
-
-/*
- * Drop reference on a buffer index and return true if the count has hit zero.
- */
-static inline union journal_res_state journal_state_buf_put(struct journal *j, unsigned idx)
-{
- union journal_res_state s;
-
- s.v = atomic64_sub_return(((union journal_res_state) {
- .buf0_count = idx == 0,
- .buf1_count = idx == 1,
- .buf2_count = idx == 2,
- .buf3_count = idx == 3,
- }).v, &j->reservations.counter);
- return s;
-}
-
-bool bch2_journal_entry_close(struct journal *);
-void bch2_journal_do_writes(struct journal *);
-void bch2_journal_buf_put_final(struct journal *, u64);
-
-static inline void __bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
-{
- union journal_res_state s;
-
- s = journal_state_buf_put(j, idx);
- if (!journal_state_count(s, idx))
- bch2_journal_buf_put_final(j, seq);
-}
-
-static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
-{
- union journal_res_state s;
-
- s = journal_state_buf_put(j, idx);
- if (!journal_state_count(s, idx)) {
- spin_lock(&j->lock);
- bch2_journal_buf_put_final(j, seq);
- spin_unlock(&j->lock);
- }
-}
-
-/*
- * This function releases the journal write structure so other threads can
- * then proceed to add their keys as well.
- */
-static inline void bch2_journal_res_put(struct journal *j,
- struct journal_res *res)
-{
- if (!res->ref)
- return;
-
- lock_release(&j->res_map, _THIS_IP_);
-
- while (res->u64s)
- bch2_journal_add_entry(j, res,
- BCH_JSET_ENTRY_btree_keys,
- 0, 0, 0);
-
- bch2_journal_buf_put(j, res->idx, res->seq);
-
- res->ref = 0;
-}
-
-int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
- unsigned);
-
-/* First bits for BCH_WATERMARK: */
-enum journal_res_flags {
- __JOURNAL_RES_GET_NONBLOCK = BCH_WATERMARK_BITS,
- __JOURNAL_RES_GET_CHECK,
-};
-
-#define JOURNAL_RES_GET_NONBLOCK (1 << __JOURNAL_RES_GET_NONBLOCK)
-#define JOURNAL_RES_GET_CHECK (1 << __JOURNAL_RES_GET_CHECK)
-
-static inline int journal_res_get_fast(struct journal *j,
- struct journal_res *res,
- unsigned flags)
-{
- union journal_res_state old, new;
-
- old.v = atomic64_read(&j->reservations.counter);
- do {
- new.v = old.v;
-
- /*
- * Check if there is still room in the current journal
- * entry:
- */
- if (new.cur_entry_offset + res->u64s > j->cur_entry_u64s)
- return 0;
-
- EBUG_ON(!journal_state_count(new, new.idx));
-
- if ((flags & BCH_WATERMARK_MASK) < j->watermark)
- return 0;
-
- new.cur_entry_offset += res->u64s;
- journal_state_inc(&new);
-
- /*
- * If the refcount would overflow, we have to wait:
- * XXX - tracepoint this:
- */
- if (!journal_state_count(new, new.idx))
- return 0;
-
- if (flags & JOURNAL_RES_GET_CHECK)
- return 1;
- } while (!atomic64_try_cmpxchg(&j->reservations.counter,
- &old.v, new.v));
-
- res->ref = true;
- res->idx = old.idx;
- res->offset = old.cur_entry_offset;
- res->seq = le64_to_cpu(j->buf[old.idx].data->seq);
- return 1;
-}
-
-static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res,
- unsigned u64s, unsigned flags)
-{
- int ret;
-
- EBUG_ON(res->ref);
- EBUG_ON(!test_bit(JOURNAL_running, &j->flags));
-
- res->u64s = u64s;
-
- if (journal_res_get_fast(j, res, flags))
- goto out;
-
- ret = bch2_journal_res_get_slowpath(j, res, flags);
- if (ret)
- return ret;
-out:
- if (!(flags & JOURNAL_RES_GET_CHECK)) {
- lock_acquire_shared(&j->res_map, 0,
- (flags & JOURNAL_RES_GET_NONBLOCK) != 0,
- NULL, _THIS_IP_);
- EBUG_ON(!res->ref);
- }
- return 0;
-}
-
-/* journal_entry_res: */
-
-void bch2_journal_entry_res_resize(struct journal *,
- struct journal_entry_res *,
- unsigned);
-
-int bch2_journal_flush_seq_async(struct journal *, u64, struct closure *);
-void bch2_journal_flush_async(struct journal *, struct closure *);
-
-int bch2_journal_flush_seq(struct journal *, u64, unsigned);
-int bch2_journal_flush(struct journal *);
-bool bch2_journal_noflush_seq(struct journal *, u64);
-int bch2_journal_meta(struct journal *);
-
-void bch2_journal_halt(struct journal *);
-
-static inline int bch2_journal_error(struct journal *j)
-{
- return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL
- ? -EIO : 0;
-}
-
-struct bch_dev;
-
-static inline void bch2_journal_set_replay_done(struct journal *j)
-{
- BUG_ON(!test_bit(JOURNAL_running, &j->flags));
- set_bit(JOURNAL_replay_done, &j->flags);
-}
-
-void bch2_journal_unblock(struct journal *);
-void bch2_journal_block(struct journal *);
-struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq);
-
-void __bch2_journal_debug_to_text(struct printbuf *, struct journal *);
-void bch2_journal_debug_to_text(struct printbuf *, struct journal *);
-void bch2_journal_pins_to_text(struct printbuf *, struct journal *);
-bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *);
-
-int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
- unsigned nr);
-int bch2_dev_journal_alloc(struct bch_dev *, bool);
-int bch2_fs_journal_alloc(struct bch_fs *);
-
-void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
-
-void bch2_fs_journal_stop(struct journal *);
-int bch2_fs_journal_start(struct journal *, u64);
-
-void bch2_dev_journal_exit(struct bch_dev *);
-int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *);
-void bch2_fs_journal_exit(struct journal *);
-int bch2_fs_journal_init(struct journal *);
-
-#endif /* _BCACHEFS_JOURNAL_H */
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
deleted file mode 100644
index fb35dd336331..000000000000
--- a/fs/bcachefs/journal_io.c
+++ /dev/null
@@ -1,2081 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "btree_io.h"
-#include "btree_update_interior.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "checksum.h"
-#include "disk_groups.h"
-#include "error.h"
-#include "journal.h"
-#include "journal_io.h"
-#include "journal_reclaim.h"
-#include "journal_seq_blacklist.h"
-#include "replicas.h"
-#include "sb-clean.h"
-#include "trace.h"
-
-void bch2_journal_pos_from_member_info_set(struct bch_fs *c)
-{
- lockdep_assert_held(&c->sb_lock);
-
- for_each_member_device(c, ca) {
- struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
-
- m->last_journal_bucket = cpu_to_le32(ca->journal.cur_idx);
- m->last_journal_bucket_offset = cpu_to_le32(ca->mi.bucket_size - ca->journal.sectors_free);
- }
-}
-
-void bch2_journal_pos_from_member_info_resume(struct bch_fs *c)
-{
- mutex_lock(&c->sb_lock);
- for_each_member_device(c, ca) {
- struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx);
-
- unsigned idx = le32_to_cpu(m.last_journal_bucket);
- if (idx < ca->journal.nr)
- ca->journal.cur_idx = idx;
- unsigned offset = le32_to_cpu(m.last_journal_bucket_offset);
- if (offset <= ca->mi.bucket_size)
- ca->journal.sectors_free = ca->mi.bucket_size - offset;
- }
- mutex_unlock(&c->sb_lock);
-}
-
-void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
- struct journal_replay *j)
-{
- darray_for_each(j->ptrs, i) {
- if (i != j->ptrs.data)
- prt_printf(out, " ");
- prt_printf(out, "%u:%u:%u (sector %llu)",
- i->dev, i->bucket, i->bucket_offset, i->sector);
- }
-}
-
-static void bch2_journal_replay_to_text(struct printbuf *out, struct bch_fs *c,
- struct journal_replay *j)
-{
- prt_printf(out, "seq %llu ", le64_to_cpu(j->j.seq));
-
- bch2_journal_ptrs_to_text(out, c, j);
-
- for_each_jset_entry_type(entry, &j->j, BCH_JSET_ENTRY_datetime) {
- struct jset_entry_datetime *datetime =
- container_of(entry, struct jset_entry_datetime, entry);
- bch2_prt_datetime(out, le64_to_cpu(datetime->seconds));
- break;
- }
-}
-
-static struct nonce journal_nonce(const struct jset *jset)
-{
- return (struct nonce) {{
- [0] = 0,
- [1] = ((__le32 *) &jset->seq)[0],
- [2] = ((__le32 *) &jset->seq)[1],
- [3] = BCH_NONCE_JOURNAL,
- }};
-}
-
-static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum)
-{
- if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) {
- *csum = (struct bch_csum) {};
- return false;
- }
-
- *csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j);
- return !bch2_crc_cmp(j->csum, *csum);
-}
-
-static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
-{
- return (seq - c->journal_entries_base_seq) & (~0U >> 1);
-}
-
-static void __journal_replay_free(struct bch_fs *c,
- struct journal_replay *i)
-{
- struct journal_replay **p =
- genradix_ptr(&c->journal_entries,
- journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
-
- BUG_ON(*p != i);
- *p = NULL;
- kvfree(i);
-}
-
-static void journal_replay_free(struct bch_fs *c, struct journal_replay *i, bool blacklisted)
-{
- if (blacklisted)
- i->ignore_blacklisted = true;
- else
- i->ignore_not_dirty = true;
-
- if (!c->opts.read_entire_journal)
- __journal_replay_free(c, i);
-}
-
-struct journal_list {
- struct closure cl;
- u64 last_seq;
- struct mutex lock;
- int ret;
-};
-
-#define JOURNAL_ENTRY_ADD_OK 0
-#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
-
-/*
- * Given a journal entry we just read, add it to the list of journal entries to
- * be replayed:
- */
-static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
- struct journal_ptr entry_ptr,
- struct journal_list *jlist, struct jset *j)
-{
- struct genradix_iter iter;
- struct journal_replay **_i, *i, *dup;
- size_t bytes = vstruct_bytes(j);
- u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
- struct printbuf buf = PRINTBUF;
- int ret = JOURNAL_ENTRY_ADD_OK;
-
- if (!c->journal.oldest_seq_found_ondisk ||
- le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk)
- c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq);
-
- /* Is this entry older than the range we need? */
- if (!c->opts.read_entire_journal &&
- le64_to_cpu(j->seq) < jlist->last_seq)
- return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
-
- /*
- * genradixes are indexed by a ulong, not a u64, so we can't index them
- * by sequence number directly: Assume instead that they will all fall
- * within the range of +-2billion of the filrst one we find.
- */
- if (!c->journal_entries_base_seq)
- c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
-
- /* Drop entries we don't need anymore */
- if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
- genradix_for_each_from(&c->journal_entries, iter, _i,
- journal_entry_radix_idx(c, jlist->last_seq)) {
- i = *_i;
-
- if (journal_replay_ignore(i))
- continue;
-
- if (le64_to_cpu(i->j.seq) >= last_seq)
- break;
-
- journal_replay_free(c, i, false);
- }
- }
-
- jlist->last_seq = max(jlist->last_seq, last_seq);
-
- _i = genradix_ptr_alloc(&c->journal_entries,
- journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
- GFP_KERNEL);
- if (!_i)
- return -BCH_ERR_ENOMEM_journal_entry_add;
-
- /*
- * Duplicate journal entries? If so we want the one that didn't have a
- * checksum error:
- */
- dup = *_i;
- if (dup) {
- bool identical = bytes == vstruct_bytes(&dup->j) &&
- !memcmp(j, &dup->j, bytes);
- bool not_identical = !identical &&
- entry_ptr.csum_good &&
- dup->csum_good;
-
- bool same_device = false;
- darray_for_each(dup->ptrs, ptr)
- if (ptr->dev == ca->dev_idx)
- same_device = true;
-
- ret = darray_push(&dup->ptrs, entry_ptr);
- if (ret)
- goto out;
-
- bch2_journal_replay_to_text(&buf, c, dup);
-
- fsck_err_on(same_device,
- c, journal_entry_dup_same_device,
- "duplicate journal entry on same device\n %s",
- buf.buf);
-
- fsck_err_on(not_identical,
- c, journal_entry_replicas_data_mismatch,
- "found duplicate but non identical journal entries\n %s",
- buf.buf);
-
- if (entry_ptr.csum_good && !identical)
- goto replace;
-
- goto out;
- }
-replace:
- i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
- if (!i)
- return -BCH_ERR_ENOMEM_journal_entry_add;
-
- darray_init(&i->ptrs);
- i->csum_good = entry_ptr.csum_good;
- i->ignore_blacklisted = false;
- i->ignore_not_dirty = false;
- unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
-
- if (dup) {
- /* The first ptr should represent the jset we kept: */
- darray_for_each(dup->ptrs, ptr)
- darray_push(&i->ptrs, *ptr);
- __journal_replay_free(c, dup);
- } else {
- darray_push(&i->ptrs, entry_ptr);
- }
-
- *_i = i;
-out:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-/* this fills in a range with empty jset_entries: */
-static void journal_entry_null_range(void *start, void *end)
-{
- struct jset_entry *entry;
-
- for (entry = start; entry != end; entry = vstruct_next(entry))
- memset(entry, 0, sizeof(*entry));
-}
-
-#define JOURNAL_ENTRY_REREAD 5
-#define JOURNAL_ENTRY_NONE 6
-#define JOURNAL_ENTRY_BAD 7
-
-static void journal_entry_err_msg(struct printbuf *out,
- u32 version,
- struct jset *jset,
- struct jset_entry *entry)
-{
- prt_str(out, "invalid journal entry, version=");
- bch2_version_to_text(out, version);
-
- if (entry) {
- prt_str(out, " type=");
- bch2_prt_jset_entry_type(out, entry->type);
- }
-
- if (!jset) {
- prt_printf(out, " in superblock");
- } else {
-
- prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq));
-
- if (entry)
- prt_printf(out, " offset=%zi/%u",
- (u64 *) entry - jset->_data,
- le32_to_cpu(jset->u64s));
- }
-
- prt_str(out, ": ");
-}
-
-#define journal_entry_err(c, version, jset, entry, _err, msg, ...) \
-({ \
- struct printbuf _buf = PRINTBUF; \
- \
- journal_entry_err_msg(&_buf, version, jset, entry); \
- prt_printf(&_buf, msg, ##__VA_ARGS__); \
- \
- switch (flags & BCH_VALIDATE_write) { \
- case READ: \
- mustfix_fsck_err(c, _err, "%s", _buf.buf); \
- break; \
- case WRITE: \
- bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \
- bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
- if (bch2_fs_inconsistent(c)) { \
- ret = -BCH_ERR_fsck_errors_not_fixed; \
- goto fsck_err; \
- } \
- break; \
- } \
- \
- printbuf_exit(&_buf); \
- true; \
-})
-
-#define journal_entry_err_on(cond, ...) \
- ((cond) ? journal_entry_err(__VA_ARGS__) : false)
-
-#define FSCK_DELETED_KEY 5
-
-static int journal_validate_key(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned level, enum btree_id btree_id,
- struct bkey_i *k,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- int write = flags & BCH_VALIDATE_write;
- void *next = vstruct_next(entry);
- int ret = 0;
-
- if (journal_entry_err_on(!k->k.u64s,
- c, version, jset, entry,
- journal_entry_bkey_u64s_0,
- "k->u64s 0")) {
- entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
- journal_entry_null_range(vstruct_next(entry), next);
- return FSCK_DELETED_KEY;
- }
-
- if (journal_entry_err_on((void *) bkey_next(k) >
- (void *) vstruct_next(entry),
- c, version, jset, entry,
- journal_entry_bkey_past_end,
- "extends past end of journal entry")) {
- entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
- journal_entry_null_range(vstruct_next(entry), next);
- return FSCK_DELETED_KEY;
- }
-
- if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
- c, version, jset, entry,
- journal_entry_bkey_bad_format,
- "bad format %u", k->k.format)) {
- le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
- memmove(k, bkey_next(k), next - (void *) bkey_next(k));
- journal_entry_null_range(vstruct_next(entry), next);
- return FSCK_DELETED_KEY;
- }
-
- if (!write)
- bch2_bkey_compat(level, btree_id, version, big_endian,
- write, NULL, bkey_to_packed(k));
-
- ret = bch2_bkey_validate(c, bkey_i_to_s_c(k),
- __btree_node_type(level, btree_id), write);
- if (ret == -BCH_ERR_fsck_delete_bkey) {
- le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
- memmove(k, bkey_next(k), next - (void *) bkey_next(k));
- journal_entry_null_range(vstruct_next(entry), next);
- return FSCK_DELETED_KEY;
- }
- if (ret)
- goto fsck_err;
-
- if (write)
- bch2_bkey_compat(level, btree_id, version, big_endian,
- write, NULL, bkey_to_packed(k));
-fsck_err:
- return ret;
-}
-
-static int journal_entry_btree_keys_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- struct bkey_i *k = entry->start;
-
- while (k != vstruct_last(entry)) {
- int ret = journal_validate_key(c, jset, entry,
- entry->level,
- entry->btree_id,
- k, version, big_endian,
- flags|BCH_VALIDATE_journal);
- if (ret == FSCK_DELETED_KEY)
- continue;
- else if (ret)
- return ret;
-
- k = bkey_next(k);
- }
-
- return 0;
-}
-
-static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- bool first = true;
-
- jset_entry_for_each_key(entry, k) {
- if (!first) {
- prt_newline(out);
- bch2_prt_jset_entry_type(out, entry->type);
- prt_str(out, ": ");
- }
- prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
- bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
- first = false;
- }
-}
-
-static int journal_entry_btree_root_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- struct bkey_i *k = entry->start;
- int ret = 0;
-
- if (journal_entry_err_on(!entry->u64s ||
- le16_to_cpu(entry->u64s) != k->k.u64s,
- c, version, jset, entry,
- journal_entry_btree_root_bad_size,
- "invalid btree root journal entry: wrong number of keys")) {
- void *next = vstruct_next(entry);
- /*
- * we don't want to null out this jset_entry,
- * just the contents, so that later we can tell
- * we were _supposed_ to have a btree root
- */
- entry->u64s = 0;
- journal_entry_null_range(vstruct_next(entry), next);
- return 0;
- }
-
- ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
- version, big_endian, flags);
- if (ret == FSCK_DELETED_KEY)
- ret = 0;
-fsck_err:
- return ret;
-}
-
-static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- journal_entry_btree_keys_to_text(out, c, entry);
-}
-
-static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- /* obsolete, don't care: */
- return 0;
-}
-
-static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
-}
-
-static int journal_entry_blacklist_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- int ret = 0;
-
- if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
- c, version, jset, entry,
- journal_entry_blacklist_bad_size,
- "invalid journal seq blacklist entry: bad size")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- }
-fsck_err:
- return ret;
-}
-
-static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- struct jset_entry_blacklist *bl =
- container_of(entry, struct jset_entry_blacklist, entry);
-
- prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
-}
-
-static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- struct jset_entry_blacklist_v2 *bl_entry;
- int ret = 0;
-
- if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
- c, version, jset, entry,
- journal_entry_blacklist_v2_bad_size,
- "invalid journal seq blacklist entry: bad size")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- goto out;
- }
-
- bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
-
- if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
- le64_to_cpu(bl_entry->end),
- c, version, jset, entry,
- journal_entry_blacklist_v2_start_past_end,
- "invalid journal seq blacklist entry: start > end")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- }
-out:
-fsck_err:
- return ret;
-}
-
-static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- struct jset_entry_blacklist_v2 *bl =
- container_of(entry, struct jset_entry_blacklist_v2, entry);
-
- prt_printf(out, "start=%llu end=%llu",
- le64_to_cpu(bl->start),
- le64_to_cpu(bl->end));
-}
-
-static int journal_entry_usage_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- struct jset_entry_usage *u =
- container_of(entry, struct jset_entry_usage, entry);
- unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
- int ret = 0;
-
- if (journal_entry_err_on(bytes < sizeof(*u),
- c, version, jset, entry,
- journal_entry_usage_bad_size,
- "invalid journal entry usage: bad size")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- return ret;
- }
-
-fsck_err:
- return ret;
-}
-
-static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- struct jset_entry_usage *u =
- container_of(entry, struct jset_entry_usage, entry);
-
- prt_str(out, "type=");
- bch2_prt_fs_usage_type(out, u->entry.btree_id);
- prt_printf(out, " v=%llu", le64_to_cpu(u->v));
-}
-
-static int journal_entry_data_usage_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- struct jset_entry_data_usage *u =
- container_of(entry, struct jset_entry_data_usage, entry);
- unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
- struct printbuf err = PRINTBUF;
- int ret = 0;
-
- if (journal_entry_err_on(bytes < sizeof(*u) ||
- bytes < sizeof(*u) + u->r.nr_devs,
- c, version, jset, entry,
- journal_entry_data_usage_bad_size,
- "invalid journal entry usage: bad size")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- goto out;
- }
-
- if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c, &err),
- c, version, jset, entry,
- journal_entry_data_usage_bad_size,
- "invalid journal entry usage: %s", err.buf)) {
- journal_entry_null_range(entry, vstruct_next(entry));
- goto out;
- }
-out:
-fsck_err:
- printbuf_exit(&err);
- return ret;
-}
-
-static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- struct jset_entry_data_usage *u =
- container_of(entry, struct jset_entry_data_usage, entry);
-
- bch2_replicas_entry_to_text(out, &u->r);
- prt_printf(out, "=%llu", le64_to_cpu(u->v));
-}
-
-static int journal_entry_clock_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- struct jset_entry_clock *clock =
- container_of(entry, struct jset_entry_clock, entry);
- unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
- int ret = 0;
-
- if (journal_entry_err_on(bytes != sizeof(*clock),
- c, version, jset, entry,
- journal_entry_clock_bad_size,
- "bad size")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- return ret;
- }
-
- if (journal_entry_err_on(clock->rw > 1,
- c, version, jset, entry,
- journal_entry_clock_bad_rw,
- "bad rw")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- return ret;
- }
-
-fsck_err:
- return ret;
-}
-
-static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- struct jset_entry_clock *clock =
- container_of(entry, struct jset_entry_clock, entry);
-
- prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
-}
-
-static int journal_entry_dev_usage_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- struct jset_entry_dev_usage *u =
- container_of(entry, struct jset_entry_dev_usage, entry);
- unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
- unsigned expected = sizeof(*u);
- int ret = 0;
-
- if (journal_entry_err_on(bytes < expected,
- c, version, jset, entry,
- journal_entry_dev_usage_bad_size,
- "bad size (%u < %u)",
- bytes, expected)) {
- journal_entry_null_range(entry, vstruct_next(entry));
- return ret;
- }
-
- if (journal_entry_err_on(u->pad,
- c, version, jset, entry,
- journal_entry_dev_usage_bad_pad,
- "bad pad")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- return ret;
- }
-
-fsck_err:
- return ret;
-}
-
-static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- struct jset_entry_dev_usage *u =
- container_of(entry, struct jset_entry_dev_usage, entry);
- unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
-
- if (vstruct_bytes(entry) < sizeof(*u))
- return;
-
- prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
-
- printbuf_indent_add(out, 2);
- for (i = 0; i < nr_types; i++) {
- prt_newline(out);
- bch2_prt_data_type(out, i);
- prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
- le64_to_cpu(u->d[i].buckets),
- le64_to_cpu(u->d[i].sectors),
- le64_to_cpu(u->d[i].fragmented));
- }
- printbuf_indent_sub(out, 2);
-}
-
-static int journal_entry_log_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- return 0;
-}
-
-static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
- unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
-
- prt_printf(out, "%.*s", bytes, l->d);
-}
-
-static int journal_entry_overwrite_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- return journal_entry_btree_keys_validate(c, jset, entry,
- version, big_endian, READ);
-}
-
-static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- journal_entry_btree_keys_to_text(out, c, entry);
-}
-
-static int journal_entry_write_buffer_keys_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- return journal_entry_btree_keys_validate(c, jset, entry,
- version, big_endian, READ);
-}
-
-static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- journal_entry_btree_keys_to_text(out, c, entry);
-}
-
-static int journal_entry_datetime_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- unsigned bytes = vstruct_bytes(entry);
- unsigned expected = 16;
- int ret = 0;
-
- if (journal_entry_err_on(vstruct_bytes(entry) < expected,
- c, version, jset, entry,
- journal_entry_dev_usage_bad_size,
- "bad size (%u < %u)",
- bytes, expected)) {
- journal_entry_null_range(entry, vstruct_next(entry));
- return ret;
- }
-fsck_err:
- return ret;
-}
-
-static void journal_entry_datetime_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- struct jset_entry_datetime *datetime =
- container_of(entry, struct jset_entry_datetime, entry);
-
- bch2_prt_datetime(out, le64_to_cpu(datetime->seconds));
-}
-
-struct jset_entry_ops {
- int (*validate)(struct bch_fs *, struct jset *,
- struct jset_entry *, unsigned, int,
- enum bch_validate_flags);
- void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
-};
-
-static const struct jset_entry_ops bch2_jset_entry_ops[] = {
-#define x(f, nr) \
- [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
- .validate = journal_entry_##f##_validate, \
- .to_text = journal_entry_##f##_to_text, \
- },
- BCH_JSET_ENTRY_TYPES()
-#undef x
-};
-
-int bch2_journal_entry_validate(struct bch_fs *c,
- struct jset *jset,
- struct jset_entry *entry,
- unsigned version, int big_endian,
- enum bch_validate_flags flags)
-{
- return entry->type < BCH_JSET_ENTRY_NR
- ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
- version, big_endian, flags)
- : 0;
-}
-
-void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
- struct jset_entry *entry)
-{
- bch2_prt_jset_entry_type(out, entry->type);
-
- if (entry->type < BCH_JSET_ENTRY_NR) {
- prt_str(out, ": ");
- bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
- }
-}
-
-static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
- enum bch_validate_flags flags)
-{
- unsigned version = le32_to_cpu(jset->version);
- int ret = 0;
-
- vstruct_for_each(jset, entry) {
- if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
- c, version, jset, entry,
- journal_entry_past_jset_end,
- "journal entry extends past end of jset")) {
- jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
- break;
- }
-
- ret = bch2_journal_entry_validate(c, jset, entry,
- version, JSET_BIG_ENDIAN(jset), flags);
- if (ret)
- break;
- }
-fsck_err:
- return ret;
-}
-
-static int jset_validate(struct bch_fs *c,
- struct bch_dev *ca,
- struct jset *jset, u64 sector,
- enum bch_validate_flags flags)
-{
- unsigned version;
- int ret = 0;
-
- if (le64_to_cpu(jset->magic) != jset_magic(c))
- return JOURNAL_ENTRY_NONE;
-
- version = le32_to_cpu(jset->version);
- if (journal_entry_err_on(!bch2_version_compatible(version),
- c, version, jset, NULL,
- jset_unsupported_version,
- "%s sector %llu seq %llu: incompatible journal entry version %u.%u",
- ca ? ca->name : c->name,
- sector, le64_to_cpu(jset->seq),
- BCH_VERSION_MAJOR(version),
- BCH_VERSION_MINOR(version))) {
- /* don't try to continue: */
- return -EINVAL;
- }
-
- if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
- c, version, jset, NULL,
- jset_unknown_csum,
- "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
- ca ? ca->name : c->name,
- sector, le64_to_cpu(jset->seq),
- JSET_CSUM_TYPE(jset)))
- ret = JOURNAL_ENTRY_BAD;
-
- /* last_seq is ignored when JSET_NO_FLUSH is true */
- if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
- le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
- c, version, jset, NULL,
- jset_last_seq_newer_than_seq,
- "invalid journal entry: last_seq > seq (%llu > %llu)",
- le64_to_cpu(jset->last_seq),
- le64_to_cpu(jset->seq))) {
- jset->last_seq = jset->seq;
- return JOURNAL_ENTRY_BAD;
- }
-
- ret = jset_validate_entries(c, jset, flags);
-fsck_err:
- return ret;
-}
-
-static int jset_validate_early(struct bch_fs *c,
- struct bch_dev *ca,
- struct jset *jset, u64 sector,
- unsigned bucket_sectors_left,
- unsigned sectors_read)
-{
- size_t bytes = vstruct_bytes(jset);
- unsigned version;
- enum bch_validate_flags flags = BCH_VALIDATE_journal;
- int ret = 0;
-
- if (le64_to_cpu(jset->magic) != jset_magic(c))
- return JOURNAL_ENTRY_NONE;
-
- version = le32_to_cpu(jset->version);
- if (journal_entry_err_on(!bch2_version_compatible(version),
- c, version, jset, NULL,
- jset_unsupported_version,
- "%s sector %llu seq %llu: unknown journal entry version %u.%u",
- ca ? ca->name : c->name,
- sector, le64_to_cpu(jset->seq),
- BCH_VERSION_MAJOR(version),
- BCH_VERSION_MINOR(version))) {
- /* don't try to continue: */
- return -EINVAL;
- }
-
- if (bytes > (sectors_read << 9) &&
- sectors_read < bucket_sectors_left)
- return JOURNAL_ENTRY_REREAD;
-
- if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
- c, version, jset, NULL,
- jset_past_bucket_end,
- "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
- ca ? ca->name : c->name,
- sector, le64_to_cpu(jset->seq), bytes))
- le32_add_cpu(&jset->u64s,
- -((bytes - (bucket_sectors_left << 9)) / 8));
-fsck_err:
- return ret;
-}
-
-struct journal_read_buf {
- void *data;
- size_t size;
-};
-
-static int journal_read_buf_realloc(struct journal_read_buf *b,
- size_t new_size)
-{
- void *n;
-
- /* the bios are sized for this many pages, max: */
- if (new_size > JOURNAL_ENTRY_SIZE_MAX)
- return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
-
- new_size = roundup_pow_of_two(new_size);
- n = kvmalloc(new_size, GFP_KERNEL);
- if (!n)
- return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
-
- kvfree(b->data);
- b->data = n;
- b->size = new_size;
- return 0;
-}
-
-static int journal_read_bucket(struct bch_dev *ca,
- struct journal_read_buf *buf,
- struct journal_list *jlist,
- unsigned bucket)
-{
- struct bch_fs *c = ca->fs;
- struct journal_device *ja = &ca->journal;
- struct jset *j = NULL;
- unsigned sectors, sectors_read = 0;
- u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
- end = offset + ca->mi.bucket_size;
- bool saw_bad = false, csum_good;
- struct printbuf err = PRINTBUF;
- int ret = 0;
-
- pr_debug("reading %u", bucket);
-
- while (offset < end) {
- if (!sectors_read) {
- struct bio *bio;
- unsigned nr_bvecs;
-reread:
- sectors_read = min_t(unsigned,
- end - offset, buf->size >> 9);
- nr_bvecs = buf_pages(buf->data, sectors_read << 9);
-
- bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
- if (!bio)
- return -BCH_ERR_ENOMEM_journal_read_bucket;
- bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
-
- bio->bi_iter.bi_sector = offset;
- bch2_bio_map(bio, buf->data, sectors_read << 9);
-
- ret = submit_bio_wait(bio);
- kfree(bio);
-
- if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
- "journal read error: sector %llu",
- offset) ||
- bch2_meta_read_fault("journal")) {
- /*
- * We don't error out of the recovery process
- * here, since the relevant journal entry may be
- * found on a different device, and missing or
- * no journal entries will be handled later
- */
- goto out;
- }
-
- j = buf->data;
- }
-
- ret = jset_validate_early(c, ca, j, offset,
- end - offset, sectors_read);
- switch (ret) {
- case 0:
- sectors = vstruct_sectors(j, c->block_bits);
- break;
- case JOURNAL_ENTRY_REREAD:
- if (vstruct_bytes(j) > buf->size) {
- ret = journal_read_buf_realloc(buf,
- vstruct_bytes(j));
- if (ret)
- goto err;
- }
- goto reread;
- case JOURNAL_ENTRY_NONE:
- if (!saw_bad)
- goto out;
- /*
- * On checksum error we don't really trust the size
- * field of the journal entry we read, so try reading
- * again at next block boundary:
- */
- sectors = block_sectors(c);
- goto next_block;
- default:
- goto err;
- }
-
- if (le64_to_cpu(j->seq) > ja->highest_seq_found) {
- ja->highest_seq_found = le64_to_cpu(j->seq);
- ja->cur_idx = bucket;
- ja->sectors_free = ca->mi.bucket_size -
- bucket_remainder(ca, offset) - sectors;
- }
-
- /*
- * This happens sometimes if we don't have discards on -
- * when we've partially overwritten a bucket with new
- * journal entries. We don't need the rest of the
- * bucket:
- */
- if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
- goto out;
-
- ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
-
- enum bch_csum_type csum_type = JSET_CSUM_TYPE(j);
- struct bch_csum csum;
- csum_good = jset_csum_good(c, j, &csum);
-
- if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
- "%s",
- (printbuf_reset(&err),
- prt_str(&err, "journal "),
- bch2_csum_err_msg(&err, csum_type, j->csum, csum),
- err.buf)))
- saw_bad = true;
-
- ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
- j->encrypted_start,
- vstruct_end(j) - (void *) j->encrypted_start);
- bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret));
-
- mutex_lock(&jlist->lock);
- ret = journal_entry_add(c, ca, (struct journal_ptr) {
- .csum_good = csum_good,
- .dev = ca->dev_idx,
- .bucket = bucket,
- .bucket_offset = offset -
- bucket_to_sector(ca, ja->buckets[bucket]),
- .sector = offset,
- }, jlist, j);
- mutex_unlock(&jlist->lock);
-
- switch (ret) {
- case JOURNAL_ENTRY_ADD_OK:
- break;
- case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
- break;
- default:
- goto err;
- }
-next_block:
- pr_debug("next");
- offset += sectors;
- sectors_read -= sectors;
- j = ((void *) j) + (sectors << 9);
- }
-
-out:
- ret = 0;
-err:
- printbuf_exit(&err);
- return ret;
-}
-
-static CLOSURE_CALLBACK(bch2_journal_read_device)
-{
- closure_type(ja, struct journal_device, read);
- struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
- struct bch_fs *c = ca->fs;
- struct journal_list *jlist =
- container_of(cl->parent, struct journal_list, cl);
- struct journal_read_buf buf = { NULL, 0 };
- unsigned i;
- int ret = 0;
-
- if (!ja->nr)
- goto out;
-
- ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
- if (ret)
- goto err;
-
- pr_debug("%u journal buckets", ja->nr);
-
- for (i = 0; i < ja->nr; i++) {
- ret = journal_read_bucket(ca, &buf, jlist, i);
- if (ret)
- goto err;
- }
-
- /*
- * Set dirty_idx to indicate the entire journal is full and needs to be
- * reclaimed - journal reclaim will immediately reclaim whatever isn't
- * pinned when it first runs:
- */
- ja->discard_idx = ja->dirty_idx_ondisk =
- ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
-out:
- bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
- kvfree(buf.data);
- percpu_ref_put(&ca->io_ref);
- closure_return(cl);
- return;
-err:
- mutex_lock(&jlist->lock);
- jlist->ret = ret;
- mutex_unlock(&jlist->lock);
- goto out;
-}
-
-int bch2_journal_read(struct bch_fs *c,
- u64 *last_seq,
- u64 *blacklist_seq,
- u64 *start_seq)
-{
- struct journal_list jlist;
- struct journal_replay *i, **_i, *prev = NULL;
- struct genradix_iter radix_iter;
- struct printbuf buf = PRINTBUF;
- bool degraded = false, last_write_torn = false;
- u64 seq;
- int ret = 0;
-
- closure_init_stack(&jlist.cl);
- mutex_init(&jlist.lock);
- jlist.last_seq = 0;
- jlist.ret = 0;
-
- for_each_member_device(c, ca) {
- if (!c->opts.fsck &&
- !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
- continue;
-
- if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
- ca->mi.state == BCH_MEMBER_STATE_ro) &&
- percpu_ref_tryget(&ca->io_ref))
- closure_call(&ca->journal.read,
- bch2_journal_read_device,
- system_unbound_wq,
- &jlist.cl);
- else
- degraded = true;
- }
-
- closure_sync(&jlist.cl);
-
- if (jlist.ret)
- return jlist.ret;
-
- *last_seq = 0;
- *start_seq = 0;
- *blacklist_seq = 0;
-
- /*
- * Find most recent flush entry, and ignore newer non flush entries -
- * those entries will be blacklisted:
- */
- genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
- enum bch_validate_flags flags = BCH_VALIDATE_journal;
-
- i = *_i;
-
- if (journal_replay_ignore(i))
- continue;
-
- if (!*start_seq)
- *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1;
-
- if (JSET_NO_FLUSH(&i->j)) {
- i->ignore_blacklisted = true;
- continue;
- }
-
- if (!last_write_torn && !i->csum_good) {
- last_write_torn = true;
- i->ignore_blacklisted = true;
- continue;
- }
-
- if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
- c, le32_to_cpu(i->j.version), &i->j, NULL,
- jset_last_seq_newer_than_seq,
- "invalid journal entry: last_seq > seq (%llu > %llu)",
- le64_to_cpu(i->j.last_seq),
- le64_to_cpu(i->j.seq)))
- i->j.last_seq = i->j.seq;
-
- *last_seq = le64_to_cpu(i->j.last_seq);
- *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
- break;
- }
-
- if (!*start_seq) {
- bch_info(c, "journal read done, but no entries found");
- return 0;
- }
-
- if (!*last_seq) {
- fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes,
- "journal read done, but no entries found after dropping non-flushes");
- return 0;
- }
-
- bch_info(c, "journal read done, replaying entries %llu-%llu",
- *last_seq, *blacklist_seq - 1);
-
- if (*start_seq != *blacklist_seq)
- bch_info(c, "dropped unflushed entries %llu-%llu",
- *blacklist_seq, *start_seq - 1);
-
- /* Drop blacklisted entries and entries older than last_seq: */
- genradix_for_each(&c->journal_entries, radix_iter, _i) {
- i = *_i;
-
- if (journal_replay_ignore(i))
- continue;
-
- seq = le64_to_cpu(i->j.seq);
- if (seq < *last_seq) {
- journal_replay_free(c, i, false);
- continue;
- }
-
- if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
- fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
- jset_seq_blacklisted,
- "found blacklisted journal entry %llu", seq);
- i->ignore_blacklisted = true;
- }
- }
-
- /* Check for missing entries: */
- seq = *last_seq;
- genradix_for_each(&c->journal_entries, radix_iter, _i) {
- i = *_i;
-
- if (journal_replay_ignore(i))
- continue;
-
- BUG_ON(seq > le64_to_cpu(i->j.seq));
-
- while (seq < le64_to_cpu(i->j.seq)) {
- u64 missing_start, missing_end;
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
-
- while (seq < le64_to_cpu(i->j.seq) &&
- bch2_journal_seq_is_blacklisted(c, seq, false))
- seq++;
-
- if (seq == le64_to_cpu(i->j.seq))
- break;
-
- missing_start = seq;
-
- while (seq < le64_to_cpu(i->j.seq) &&
- !bch2_journal_seq_is_blacklisted(c, seq, false))
- seq++;
-
- if (prev) {
- bch2_journal_ptrs_to_text(&buf1, c, prev);
- prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
- } else
- prt_printf(&buf1, "(none)");
- bch2_journal_ptrs_to_text(&buf2, c, i);
-
- missing_end = seq - 1;
- fsck_err(c, journal_entries_missing,
- "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
- " prev at %s\n"
- " next at %s, continue?",
- missing_start, missing_end,
- *last_seq, *blacklist_seq - 1,
- buf1.buf, buf2.buf);
-
- printbuf_exit(&buf1);
- printbuf_exit(&buf2);
- }
-
- prev = i;
- seq++;
- }
-
- genradix_for_each(&c->journal_entries, radix_iter, _i) {
- struct bch_replicas_padded replicas = {
- .e.data_type = BCH_DATA_journal,
- .e.nr_devs = 0,
- .e.nr_required = 1,
- };
-
- i = *_i;
- if (journal_replay_ignore(i))
- continue;
-
- darray_for_each(i->ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
-
- if (!ptr->csum_good)
- bch_err_dev_offset(ca, ptr->sector,
- "invalid journal checksum, seq %llu%s",
- le64_to_cpu(i->j.seq),
- i->csum_good ? " (had good copy on another device)" : "");
- }
-
- ret = jset_validate(c,
- bch2_dev_have_ref(c, i->ptrs.data[0].dev),
- &i->j,
- i->ptrs.data[0].sector,
- READ);
- if (ret)
- goto err;
-
- darray_for_each(i->ptrs, ptr)
- replicas_entry_add_dev(&replicas.e, ptr->dev);
-
- bch2_replicas_entry_sort(&replicas.e);
-
- printbuf_reset(&buf);
- bch2_replicas_entry_to_text(&buf, &replicas.e);
-
- if (!degraded &&
- !bch2_replicas_marked(c, &replicas.e) &&
- (le64_to_cpu(i->j.seq) == *last_seq ||
- fsck_err(c, journal_entry_replicas_not_marked,
- "superblock not marked as containing replicas for journal entry %llu\n %s",
- le64_to_cpu(i->j.seq), buf.buf))) {
- ret = bch2_mark_replicas(c, &replicas.e);
- if (ret)
- goto err;
- }
- }
-err:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-/* journal write: */
-
-static void __journal_write_alloc(struct journal *j,
- struct journal_buf *w,
- struct dev_alloc_list *devs_sorted,
- unsigned sectors,
- unsigned *replicas,
- unsigned replicas_want)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_device *ja;
- struct bch_dev *ca;
- unsigned i;
-
- if (*replicas >= replicas_want)
- return;
-
- for (i = 0; i < devs_sorted->nr; i++) {
- ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
- if (!ca)
- continue;
-
- ja = &ca->journal;
-
- /*
- * Check that we can use this device, and aren't already using
- * it:
- */
- if (!ca->mi.durability ||
- ca->mi.state != BCH_MEMBER_STATE_rw ||
- !ja->nr ||
- bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) ||
- sectors > ja->sectors_free)
- continue;
-
- bch2_dev_stripe_increment(ca, &j->wp.stripe);
-
- bch2_bkey_append_ptr(&w->key,
- (struct bch_extent_ptr) {
- .offset = bucket_to_sector(ca,
- ja->buckets[ja->cur_idx]) +
- ca->mi.bucket_size -
- ja->sectors_free,
- .dev = ca->dev_idx,
- });
-
- ja->sectors_free -= sectors;
- ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
-
- *replicas += ca->mi.durability;
-
- if (*replicas >= replicas_want)
- break;
- }
-}
-
-/**
- * journal_write_alloc - decide where to write next journal entry
- *
- * @j: journal object
- * @w: journal buf (entry to be written)
- *
- * Returns: 0 on success, or -EROFS on failure
- */
-static int journal_write_alloc(struct journal *j, struct journal_buf *w)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct bch_devs_mask devs;
- struct journal_device *ja;
- struct bch_dev *ca;
- struct dev_alloc_list devs_sorted;
- unsigned sectors = vstruct_sectors(w->data, c->block_bits);
- unsigned target = c->opts.metadata_target ?:
- c->opts.foreground_target;
- unsigned i, replicas = 0, replicas_want =
- READ_ONCE(c->opts.metadata_replicas);
- unsigned replicas_need = min_t(unsigned, replicas_want,
- READ_ONCE(c->opts.metadata_replicas_required));
-
- rcu_read_lock();
-retry:
- devs = target_rw_devs(c, BCH_DATA_journal, target);
-
- devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
-
- __journal_write_alloc(j, w, &devs_sorted,
- sectors, &replicas, replicas_want);
-
- if (replicas >= replicas_want)
- goto done;
-
- for (i = 0; i < devs_sorted.nr; i++) {
- ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
- if (!ca)
- continue;
-
- ja = &ca->journal;
-
- if (sectors > ja->sectors_free &&
- sectors <= ca->mi.bucket_size &&
- bch2_journal_dev_buckets_available(j, ja,
- journal_space_discarded)) {
- ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
- ja->sectors_free = ca->mi.bucket_size;
-
- /*
- * ja->bucket_seq[ja->cur_idx] must always have
- * something sensible:
- */
- ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
- }
- }
-
- __journal_write_alloc(j, w, &devs_sorted,
- sectors, &replicas, replicas_want);
-
- if (replicas < replicas_want && target) {
- /* Retry from all devices: */
- target = 0;
- goto retry;
- }
-done:
- rcu_read_unlock();
-
- BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
-
- return replicas >= replicas_need ? 0 : -EROFS;
-}
-
-static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
-
- /* we aren't holding j->lock: */
- unsigned new_size = READ_ONCE(j->buf_size_want);
- void *new_buf;
-
- if (buf->buf_size >= new_size)
- return;
-
- size_t btree_write_buffer_size = new_size / 64;
-
- if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
- return;
-
- new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
- if (!new_buf)
- return;
-
- memcpy(new_buf, buf->data, buf->buf_size);
-
- spin_lock(&j->lock);
- swap(buf->data, new_buf);
- swap(buf->buf_size, new_size);
- spin_unlock(&j->lock);
-
- kvfree(new_buf);
-}
-
-static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
-{
- return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
-}
-
-static CLOSURE_CALLBACK(journal_write_done)
-{
- closure_type(w, struct journal_buf, io);
- struct journal *j = container_of(w, struct journal, buf[w->idx]);
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct bch_replicas_padded replicas;
- union journal_res_state old, new;
- u64 seq = le64_to_cpu(w->data->seq);
- int err = 0;
-
- bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
- ? j->flush_write_time
- : j->noflush_write_time, j->write_start_time);
-
- if (!w->devs_written.nr) {
- bch_err(c, "unable to write journal to sufficient devices");
- err = -EIO;
- } else {
- bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
- w->devs_written);
- if (bch2_mark_replicas(c, &replicas.e))
- err = -EIO;
- }
-
- if (err)
- bch2_fatal_error(c);
-
- closure_debug_destroy(cl);
-
- spin_lock(&j->lock);
- if (seq >= j->pin.front)
- journal_seq_pin(j, seq)->devs = w->devs_written;
- if (err && (!j->err_seq || seq < j->err_seq))
- j->err_seq = seq;
- w->write_done = true;
-
- bool completed = false;
-
- for (seq = journal_last_unwritten_seq(j);
- seq <= journal_cur_seq(j);
- seq++) {
- w = j->buf + (seq & JOURNAL_BUF_MASK);
- if (!w->write_done)
- break;
-
- if (!j->err_seq && !JSET_NO_FLUSH(w->data)) {
- j->flushed_seq_ondisk = seq;
- j->last_seq_ondisk = w->last_seq;
-
- bch2_do_discards(c);
- closure_wake_up(&c->freelist_wait);
- bch2_reset_alloc_cursors(c);
- }
-
- j->seq_ondisk = seq;
-
- /*
- * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
- * more buckets:
- *
- * Must come before signaling write completion, for
- * bch2_fs_journal_stop():
- */
- if (j->watermark != BCH_WATERMARK_stripe)
- journal_reclaim_kick(&c->journal);
-
- old.v = atomic64_read(&j->reservations.counter);
- do {
- new.v = old.v;
- BUG_ON(journal_state_count(new, new.unwritten_idx));
- BUG_ON(new.unwritten_idx != (seq & JOURNAL_BUF_MASK));
-
- new.unwritten_idx++;
- } while (!atomic64_try_cmpxchg(&j->reservations.counter,
- &old.v, new.v));
-
- closure_wake_up(&w->wait);
- completed = true;
- }
-
- if (completed) {
- bch2_journal_reclaim_fast(j);
- bch2_journal_space_available(j);
-
- track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], false);
-
- journal_wake(j);
- }
-
- if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
- new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
- struct journal_buf *buf = journal_cur_buf(j);
- long delta = buf->expires - jiffies;
-
- /*
- * We don't close a journal entry to write it while there's
- * previous entries still in flight - the current journal entry
- * might want to be written now:
- */
- mod_delayed_work(j->wq, &j->write_work, max(0L, delta));
- }
-
- /*
- * We don't typically trigger journal writes from her - the next journal
- * write will be triggered immediately after the previous one is
- * allocated, in bch2_journal_write() - but the journal write error path
- * is special:
- */
- bch2_journal_do_writes(j);
- spin_unlock(&j->lock);
-}
-
-static void journal_write_endio(struct bio *bio)
-{
- struct journal_bio *jbio = container_of(bio, struct journal_bio, bio);
- struct bch_dev *ca = jbio->ca;
- struct journal *j = &ca->fs->journal;
- struct journal_buf *w = j->buf + jbio->buf_idx;
-
- if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
- "error writing journal entry %llu: %s",
- le64_to_cpu(w->data->seq),
- bch2_blk_status_to_str(bio->bi_status)) ||
- bch2_meta_write_fault("journal")) {
- unsigned long flags;
-
- spin_lock_irqsave(&j->err_lock, flags);
- bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
- spin_unlock_irqrestore(&j->err_lock, flags);
- }
-
- closure_put(&w->io);
- percpu_ref_put(&ca->io_ref);
-}
-
-static CLOSURE_CALLBACK(journal_write_submit)
-{
- closure_type(w, struct journal_buf, io);
- struct journal *j = container_of(w, struct journal, buf[w->idx]);
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- unsigned sectors = vstruct_sectors(w->data, c->block_bits);
-
- extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
- struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
- if (!ca) {
- /* XXX: fix this */
- bch_err(c, "missing device for journal write\n");
- continue;
- }
-
- this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
- sectors);
-
- struct journal_device *ja = &ca->journal;
- struct bio *bio = &ja->bio[w->idx]->bio;
- bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
- bio->bi_iter.bi_sector = ptr->offset;
- bio->bi_end_io = journal_write_endio;
- bio->bi_private = ca;
-
- BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
- ca->prev_journal_sector = bio->bi_iter.bi_sector;
-
- if (!JSET_NO_FLUSH(w->data))
- bio->bi_opf |= REQ_FUA;
- if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
- bio->bi_opf |= REQ_PREFLUSH;
-
- bch2_bio_map(bio, w->data, sectors << 9);
-
- trace_and_count(c, journal_write, bio);
- closure_bio_submit(bio, cl);
-
- ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
- }
-
- continue_at(cl, journal_write_done, j->wq);
-}
-
-static CLOSURE_CALLBACK(journal_write_preflush)
-{
- closure_type(w, struct journal_buf, io);
- struct journal *j = container_of(w, struct journal, buf[w->idx]);
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
-
- if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
- spin_lock(&j->lock);
- if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
- closure_wait(&j->async_wait, cl);
- spin_unlock(&j->lock);
- continue_at(cl, journal_write_preflush, j->wq);
- return;
- }
- spin_unlock(&j->lock);
- }
-
- if (w->separate_flush) {
- for_each_rw_member(c, ca) {
- percpu_ref_get(&ca->io_ref);
-
- struct journal_device *ja = &ca->journal;
- struct bio *bio = &ja->bio[w->idx]->bio;
- bio_reset(bio, ca->disk_sb.bdev,
- REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
- bio->bi_end_io = journal_write_endio;
- bio->bi_private = ca;
- closure_bio_submit(bio, cl);
- }
-
- continue_at(cl, journal_write_submit, j->wq);
- } else {
- /*
- * no need to punt to another work item if we're not waiting on
- * preflushes
- */
- journal_write_submit(&cl->work);
- }
-}
-
-static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct jset_entry *start, *end;
- struct jset *jset = w->data;
- struct journal_keys_to_wb wb = { NULL };
- unsigned sectors, bytes, u64s;
- unsigned long btree_roots_have = 0;
- bool validate_before_checksum = false;
- u64 seq = le64_to_cpu(jset->seq);
- int ret;
-
- /*
- * Simple compaction, dropping empty jset_entries (from journal
- * reservations that weren't fully used) and merging jset_entries that
- * can be.
- *
- * If we wanted to be really fancy here, we could sort all the keys in
- * the jset and drop keys that were overwritten - probably not worth it:
- */
- vstruct_for_each(jset, i) {
- unsigned u64s = le16_to_cpu(i->u64s);
-
- /* Empty entry: */
- if (!u64s)
- continue;
-
- /*
- * New btree roots are set by journalling them; when the journal
- * entry gets written we have to propagate them to
- * c->btree_roots
- *
- * But, every journal entry we write has to contain all the
- * btree roots (at least for now); so after we copy btree roots
- * to c->btree_roots we have to get any missing btree roots and
- * add them to this journal entry:
- */
- switch (i->type) {
- case BCH_JSET_ENTRY_btree_root:
- bch2_journal_entry_to_btree_root(c, i);
- __set_bit(i->btree_id, &btree_roots_have);
- break;
- case BCH_JSET_ENTRY_write_buffer_keys:
- EBUG_ON(!w->need_flush_to_write_buffer);
-
- if (!wb.wb)
- bch2_journal_keys_to_write_buffer_start(c, &wb, seq);
-
- jset_entry_for_each_key(i, k) {
- ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k);
- if (ret) {
- bch2_fs_fatal_error(c, "flushing journal keys to btree write buffer: %s",
- bch2_err_str(ret));
- bch2_journal_keys_to_write_buffer_end(c, &wb);
- return ret;
- }
- }
- i->type = BCH_JSET_ENTRY_btree_keys;
- break;
- }
- }
-
- if (wb.wb) {
- ret = bch2_journal_keys_to_write_buffer_end(c, &wb);
- if (ret) {
- bch2_fs_fatal_error(c, "error flushing journal keys to btree write buffer: %s",
- bch2_err_str(ret));
- return ret;
- }
- }
-
- spin_lock(&c->journal.lock);
- w->need_flush_to_write_buffer = false;
- spin_unlock(&c->journal.lock);
-
- start = end = vstruct_last(jset);
-
- end = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have);
-
- struct jset_entry_datetime *d =
- container_of(jset_entry_init(&end, sizeof(*d)), struct jset_entry_datetime, entry);
- d->entry.type = BCH_JSET_ENTRY_datetime;
- d->seconds = cpu_to_le64(ktime_get_real_seconds());
-
- bch2_journal_super_entries_add_common(c, &end, seq);
- u64s = (u64 *) end - (u64 *) start;
-
- WARN_ON(u64s > j->entry_u64s_reserved);
-
- le32_add_cpu(&jset->u64s, u64s);
-
- sectors = vstruct_sectors(jset, c->block_bits);
- bytes = vstruct_bytes(jset);
-
- if (sectors > w->sectors) {
- bch2_fs_fatal_error(c, ": journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
- vstruct_bytes(jset), w->sectors << 9,
- u64s, w->u64s_reserved, j->entry_u64s_reserved);
- return -EINVAL;
- }
-
- jset->magic = cpu_to_le64(jset_magic(c));
- jset->version = cpu_to_le32(c->sb.version);
-
- SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
- SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
-
- if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
- j->last_empty_seq = seq;
-
- if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
- validate_before_checksum = true;
-
- if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
- validate_before_checksum = true;
-
- if (validate_before_checksum &&
- (ret = jset_validate(c, NULL, jset, 0, WRITE)))
- return ret;
-
- ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
- jset->encrypted_start,
- vstruct_end(jset) - (void *) jset->encrypted_start);
- if (bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret)))
- return ret;
-
- jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
- journal_nonce(jset), jset);
-
- if (!validate_before_checksum &&
- (ret = jset_validate(c, NULL, jset, 0, WRITE)))
- return ret;
-
- memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
- return 0;
-}
-
-static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- int error = bch2_journal_error(j);
-
- /*
- * If the journal is in an error state - we did an emergency shutdown -
- * we prefer to continue doing journal writes. We just mark them as
- * noflush so they'll never be used, but they'll still be visible by the
- * list_journal tool - this helps in debugging.
- *
- * There's a caveat: the first journal write after marking the
- * superblock dirty must always be a flush write, because on startup
- * from a clean shutdown we didn't necessarily read the journal and the
- * new journal write might overwrite whatever was in the journal
- * previously - we can't leave the journal without any flush writes in
- * it.
- *
- * So if we're in an error state, and we're still starting up, we don't
- * write anything at all.
- */
- if (error && test_bit(JOURNAL_need_flush_write, &j->flags))
- return -EIO;
-
- if (error ||
- w->noflush ||
- (!w->must_flush &&
- time_before(jiffies, j->last_flush_write +
- msecs_to_jiffies(c->opts.journal_flush_delay)) &&
- test_bit(JOURNAL_may_skip_flush, &j->flags))) {
- w->noflush = true;
- SET_JSET_NO_FLUSH(w->data, true);
- w->data->last_seq = 0;
- w->last_seq = 0;
-
- j->nr_noflush_writes++;
- } else {
- w->must_flush = true;
- j->last_flush_write = jiffies;
- j->nr_flush_writes++;
- clear_bit(JOURNAL_need_flush_write, &j->flags);
- }
-
- return 0;
-}
-
-CLOSURE_CALLBACK(bch2_journal_write)
-{
- closure_type(w, struct journal_buf, io);
- struct journal *j = container_of(w, struct journal, buf[w->idx]);
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct bch_replicas_padded replicas;
- unsigned nr_rw_members = 0;
- int ret;
-
- for_each_rw_member(c, ca)
- nr_rw_members++;
-
- BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
- BUG_ON(!w->write_started);
- BUG_ON(w->write_allocated);
- BUG_ON(w->write_done);
-
- j->write_start_time = local_clock();
-
- spin_lock(&j->lock);
- if (nr_rw_members > 1)
- w->separate_flush = true;
-
- ret = bch2_journal_write_pick_flush(j, w);
- spin_unlock(&j->lock);
- if (ret)
- goto err;
-
- mutex_lock(&j->buf_lock);
- journal_buf_realloc(j, w);
-
- ret = bch2_journal_write_prep(j, w);
- mutex_unlock(&j->buf_lock);
- if (ret)
- goto err;
-
- j->entry_bytes_written += vstruct_bytes(w->data);
-
- while (1) {
- spin_lock(&j->lock);
- ret = journal_write_alloc(j, w);
- if (!ret || !j->can_discard)
- break;
-
- spin_unlock(&j->lock);
- bch2_journal_do_discards(j);
- }
-
- if (ret) {
- struct printbuf buf = PRINTBUF;
- buf.atomic++;
-
- prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu: %s"),
- le64_to_cpu(w->data->seq),
- bch2_err_str(ret));
- __bch2_journal_debug_to_text(&buf, j);
- spin_unlock(&j->lock);
- bch2_print_string_as_lines(KERN_ERR, buf.buf);
- printbuf_exit(&buf);
- goto err;
- }
-
- /*
- * write is allocated, no longer need to account for it in
- * bch2_journal_space_available():
- */
- w->sectors = 0;
- w->write_allocated = true;
-
- /*
- * journal entry has been compacted and allocated, recalculate space
- * available:
- */
- bch2_journal_space_available(j);
- bch2_journal_do_writes(j);
- spin_unlock(&j->lock);
-
- w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
-
- if (c->opts.nochanges)
- goto no_io;
-
- /*
- * Mark journal replicas before we submit the write to guarantee
- * recovery will find the journal entries after a crash.
- */
- bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
- w->devs_written);
- ret = bch2_mark_replicas(c, &replicas.e);
- if (ret)
- goto err;
-
- if (!JSET_NO_FLUSH(w->data))
- continue_at(cl, journal_write_preflush, j->wq);
- else
- continue_at(cl, journal_write_submit, j->wq);
- return;
-no_io:
- continue_at(cl, journal_write_done, j->wq);
- return;
-err:
- bch2_fatal_error(c);
- continue_at(cl, journal_write_done, j->wq);
-}
diff --git a/fs/bcachefs/journal_io.h b/fs/bcachefs/journal_io.h
deleted file mode 100644
index 2ca9cde30ea8..000000000000
--- a/fs/bcachefs/journal_io.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_JOURNAL_IO_H
-#define _BCACHEFS_JOURNAL_IO_H
-
-#include "darray.h"
-
-void bch2_journal_pos_from_member_info_set(struct bch_fs *);
-void bch2_journal_pos_from_member_info_resume(struct bch_fs *);
-
-struct journal_ptr {
- bool csum_good;
- u8 dev;
- u32 bucket;
- u32 bucket_offset;
- u64 sector;
-};
-
-/*
- * Only used for holding the journal entries we read in btree_journal_read()
- * during cache_registration
- */
-struct journal_replay {
- DARRAY_PREALLOCATED(struct journal_ptr, 8) ptrs;
-
- bool csum_good;
- bool ignore_blacklisted;
- bool ignore_not_dirty;
- /* must be last: */
- struct jset j;
-};
-
-static inline bool journal_replay_ignore(struct journal_replay *i)
-{
- return !i || i->ignore_blacklisted || i->ignore_not_dirty;
-}
-
-static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
- struct jset_entry *entry, unsigned type)
-{
- while (entry < vstruct_last(jset)) {
- if (entry->type == type)
- return entry;
-
- entry = vstruct_next(entry);
- }
-
- return NULL;
-}
-
-#define for_each_jset_entry_type(entry, jset, type) \
- for (struct jset_entry *entry = (jset)->start; \
- (entry = __jset_entry_type_next(jset, entry, type)); \
- entry = vstruct_next(entry))
-
-#define jset_entry_for_each_key(_e, _k) \
- for (struct bkey_i *_k = (_e)->start; \
- _k < vstruct_last(_e); \
- _k = bkey_next(_k))
-
-#define for_each_jset_key(k, entry, jset) \
- for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys)\
- jset_entry_for_each_key(entry, k)
-
-int bch2_journal_entry_validate(struct bch_fs *, struct jset *,
- struct jset_entry *, unsigned, int,
- enum bch_validate_flags);
-void bch2_journal_entry_to_text(struct printbuf *, struct bch_fs *,
- struct jset_entry *);
-
-void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *,
- struct journal_replay *);
-
-int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *);
-
-CLOSURE_CALLBACK(bch2_journal_write);
-
-static inline struct jset_entry *jset_entry_init(struct jset_entry **end, size_t size)
-{
- struct jset_entry *entry = *end;
- unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
-
- memset(entry, 0, u64s * sizeof(u64));
- /*
- * The u64s field counts from the start of data, ignoring the shared
- * fields.
- */
- entry->u64s = cpu_to_le16(u64s - 1);
-
- *end = vstruct_next(*end);
- return entry;
-}
-
-#endif /* _BCACHEFS_JOURNAL_IO_H */
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
deleted file mode 100644
index ace291f175dd..000000000000
--- a/fs/bcachefs/journal_reclaim.c
+++ /dev/null
@@ -1,917 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_key_cache.h"
-#include "btree_update.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "errcode.h"
-#include "error.h"
-#include "journal.h"
-#include "journal_io.h"
-#include "journal_reclaim.h"
-#include "replicas.h"
-#include "sb-members.h"
-#include "trace.h"
-
-#include <linux/kthread.h>
-#include <linux/sched/mm.h>
-
-/* Free space calculations: */
-
-static unsigned journal_space_from(struct journal_device *ja,
- enum journal_space_from from)
-{
- switch (from) {
- case journal_space_discarded:
- return ja->discard_idx;
- case journal_space_clean_ondisk:
- return ja->dirty_idx_ondisk;
- case journal_space_clean:
- return ja->dirty_idx;
- default:
- BUG();
- }
-}
-
-unsigned bch2_journal_dev_buckets_available(struct journal *j,
- struct journal_device *ja,
- enum journal_space_from from)
-{
- unsigned available = (journal_space_from(ja, from) -
- ja->cur_idx - 1 + ja->nr) % ja->nr;
-
- /*
- * Don't use the last bucket unless writing the new last_seq
- * will make another bucket available:
- */
- if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
- --available;
-
- return available;
-}
-
-void bch2_journal_set_watermark(struct journal *j)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- bool low_on_space = j->space[journal_space_clean].total * 4 <=
- j->space[journal_space_total].total;
- bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
- bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
- unsigned watermark = low_on_space || low_on_pin || low_on_wb
- ? BCH_WATERMARK_reclaim
- : BCH_WATERMARK_stripe;
-
- if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) ||
- track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) ||
- track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
- trace_and_count(c, journal_full, c);
-
- mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin);
-
- swap(watermark, j->watermark);
- if (watermark > j->watermark)
- journal_wake(j);
-}
-
-static struct journal_space
-journal_dev_space_available(struct journal *j, struct bch_dev *ca,
- enum journal_space_from from)
-{
- struct journal_device *ja = &ca->journal;
- unsigned sectors, buckets, unwritten;
- u64 seq;
-
- if (from == journal_space_total)
- return (struct journal_space) {
- .next_entry = ca->mi.bucket_size,
- .total = ca->mi.bucket_size * ja->nr,
- };
-
- buckets = bch2_journal_dev_buckets_available(j, ja, from);
- sectors = ja->sectors_free;
-
- /*
- * We that we don't allocate the space for a journal entry
- * until we write it out - thus, account for it here:
- */
- for (seq = journal_last_unwritten_seq(j);
- seq <= journal_cur_seq(j);
- seq++) {
- unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
-
- if (!unwritten)
- continue;
-
- /* entry won't fit on this device, skip: */
- if (unwritten > ca->mi.bucket_size)
- continue;
-
- if (unwritten >= sectors) {
- if (!buckets) {
- sectors = 0;
- break;
- }
-
- buckets--;
- sectors = ca->mi.bucket_size;
- }
-
- sectors -= unwritten;
- }
-
- if (sectors < ca->mi.bucket_size && buckets) {
- buckets--;
- sectors = ca->mi.bucket_size;
- }
-
- return (struct journal_space) {
- .next_entry = sectors,
- .total = sectors + buckets * ca->mi.bucket_size,
- };
-}
-
-static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
- enum journal_space_from from)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- unsigned pos, nr_devs = 0;
- struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
-
- BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
-
- rcu_read_lock();
- for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
- if (!ca->journal.nr)
- continue;
-
- space = journal_dev_space_available(j, ca, from);
- if (!space.next_entry)
- continue;
-
- for (pos = 0; pos < nr_devs; pos++)
- if (space.total > dev_space[pos].total)
- break;
-
- array_insert_item(dev_space, nr_devs, pos, space);
- }
- rcu_read_unlock();
-
- if (nr_devs < nr_devs_want)
- return (struct journal_space) { 0, 0 };
-
- /*
- * We sorted largest to smallest, and we want the smallest out of the
- * @nr_devs_want largest devices:
- */
- return dev_space[nr_devs_want - 1];
-}
-
-void bch2_journal_space_available(struct journal *j)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- unsigned clean, clean_ondisk, total;
- unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
- j->buf[1].buf_size >> 9);
- unsigned nr_online = 0, nr_devs_want;
- bool can_discard = false;
- int ret = 0;
-
- lockdep_assert_held(&j->lock);
-
- rcu_read_lock();
- for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
- struct journal_device *ja = &ca->journal;
-
- if (!ja->nr)
- continue;
-
- while (ja->dirty_idx != ja->cur_idx &&
- ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
- ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
-
- while (ja->dirty_idx_ondisk != ja->dirty_idx &&
- ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
- ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
-
- if (ja->discard_idx != ja->dirty_idx_ondisk)
- can_discard = true;
-
- max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
- nr_online++;
- }
- rcu_read_unlock();
-
- j->can_discard = can_discard;
-
- if (nr_online < metadata_replicas_required(c)) {
- struct printbuf buf = PRINTBUF;
- buf.atomic++;
- prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n"
- "rw journal devs:", nr_online, metadata_replicas_required(c));
-
- rcu_read_lock();
- for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal])
- prt_printf(&buf, " %s", ca->name);
- rcu_read_unlock();
-
- bch_err(c, "%s", buf.buf);
- printbuf_exit(&buf);
- ret = JOURNAL_ERR_insufficient_devices;
- goto out;
- }
-
- nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
-
- for (unsigned i = 0; i < journal_space_nr; i++)
- j->space[i] = __journal_space_available(j, nr_devs_want, i);
-
- clean_ondisk = j->space[journal_space_clean_ondisk].total;
- clean = j->space[journal_space_clean].total;
- total = j->space[journal_space_total].total;
-
- if (!j->space[journal_space_discarded].next_entry)
- ret = JOURNAL_ERR_journal_full;
-
- if ((j->space[journal_space_clean_ondisk].next_entry <
- j->space[journal_space_clean_ondisk].total) &&
- (clean - clean_ondisk <= total / 8) &&
- (clean_ondisk * 2 > clean))
- set_bit(JOURNAL_may_skip_flush, &j->flags);
- else
- clear_bit(JOURNAL_may_skip_flush, &j->flags);
-
- bch2_journal_set_watermark(j);
-out:
- j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
- j->cur_entry_error = ret;
-
- if (!ret)
- journal_wake(j);
-}
-
-/* Discards - last part of journal reclaim: */
-
-static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
-{
- bool ret;
-
- spin_lock(&j->lock);
- ret = ja->discard_idx != ja->dirty_idx_ondisk;
- spin_unlock(&j->lock);
-
- return ret;
-}
-
-/*
- * Advance ja->discard_idx as long as it points to buckets that are no longer
- * dirty, issuing discards if necessary:
- */
-void bch2_journal_do_discards(struct journal *j)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
-
- mutex_lock(&j->discard_lock);
-
- for_each_rw_member(c, ca) {
- struct journal_device *ja = &ca->journal;
-
- while (should_discard_bucket(j, ja)) {
- if (!c->opts.nochanges &&
- ca->mi.discard &&
- bdev_max_discard_sectors(ca->disk_sb.bdev))
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca,
- ja->buckets[ja->discard_idx]),
- ca->mi.bucket_size, GFP_NOFS);
-
- spin_lock(&j->lock);
- ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
-
- bch2_journal_space_available(j);
- spin_unlock(&j->lock);
- }
- }
-
- mutex_unlock(&j->discard_lock);
-}
-
-/*
- * Journal entry pinning - machinery for holding a reference on a given journal
- * entry, holding it open to ensure it gets replayed during recovery:
- */
-
-void bch2_journal_reclaim_fast(struct journal *j)
-{
- bool popped = false;
-
- lockdep_assert_held(&j->lock);
-
- /*
- * Unpin journal entries whose reference counts reached zero, meaning
- * all btree nodes got written out
- */
- while (!fifo_empty(&j->pin) &&
- j->pin.front <= j->seq_ondisk &&
- !atomic_read(&fifo_peek_front(&j->pin).count)) {
- j->pin.front++;
- popped = true;
- }
-
- if (popped)
- bch2_journal_space_available(j);
-}
-
-bool __bch2_journal_pin_put(struct journal *j, u64 seq)
-{
- struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
-
- return atomic_dec_and_test(&pin_list->count);
-}
-
-void bch2_journal_pin_put(struct journal *j, u64 seq)
-{
- if (__bch2_journal_pin_put(j, seq)) {
- spin_lock(&j->lock);
- bch2_journal_reclaim_fast(j);
- spin_unlock(&j->lock);
- }
-}
-
-static inline bool __journal_pin_drop(struct journal *j,
- struct journal_entry_pin *pin)
-{
- struct journal_entry_pin_list *pin_list;
-
- if (!journal_pin_active(pin))
- return false;
-
- if (j->flush_in_progress == pin)
- j->flush_in_progress_dropped = true;
-
- pin_list = journal_seq_pin(j, pin->seq);
- pin->seq = 0;
- list_del_init(&pin->list);
-
- /*
- * Unpinning a journal entry may make journal_next_bucket() succeed, if
- * writing a new last_seq will now make another bucket available:
- */
- return atomic_dec_and_test(&pin_list->count) &&
- pin_list == &fifo_peek_front(&j->pin);
-}
-
-void bch2_journal_pin_drop(struct journal *j,
- struct journal_entry_pin *pin)
-{
- spin_lock(&j->lock);
- if (__journal_pin_drop(j, pin))
- bch2_journal_reclaim_fast(j);
- spin_unlock(&j->lock);
-}
-
-static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
-{
- if (fn == bch2_btree_node_flush0 ||
- fn == bch2_btree_node_flush1)
- return JOURNAL_PIN_btree;
- else if (fn == bch2_btree_key_cache_journal_flush)
- return JOURNAL_PIN_key_cache;
- else
- return JOURNAL_PIN_other;
-}
-
-static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn,
- enum journal_pin_type type)
-{
- struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
-
- /*
- * flush_fn is how we identify journal pins in debugfs, so must always
- * exist, even if it doesn't do anything:
- */
- BUG_ON(!flush_fn);
-
- atomic_inc(&pin_list->count);
- pin->seq = seq;
- pin->flush = flush_fn;
- list_add(&pin->list, &pin_list->list[type]);
-}
-
-void bch2_journal_pin_copy(struct journal *j,
- struct journal_entry_pin *dst,
- struct journal_entry_pin *src,
- journal_pin_flush_fn flush_fn)
-{
- spin_lock(&j->lock);
-
- u64 seq = READ_ONCE(src->seq);
-
- if (seq < journal_last_seq(j)) {
- /*
- * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
- * the src pin - with the pin dropped, the entry to pin might no
- * longer to exist, but that means there's no longer anything to
- * copy and we can bail out here:
- */
- spin_unlock(&j->lock);
- return;
- }
-
- bool reclaim = __journal_pin_drop(j, dst);
-
- bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn));
-
- if (reclaim)
- bch2_journal_reclaim_fast(j);
-
- /*
- * If the journal is currently full, we might want to call flush_fn
- * immediately:
- */
- if (seq == journal_last_seq(j))
- journal_wake(j);
- spin_unlock(&j->lock);
-}
-
-void bch2_journal_pin_set(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
-{
- spin_lock(&j->lock);
-
- BUG_ON(seq < journal_last_seq(j));
-
- bool reclaim = __journal_pin_drop(j, pin);
-
- bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn));
-
- if (reclaim)
- bch2_journal_reclaim_fast(j);
- /*
- * If the journal is currently full, we might want to call flush_fn
- * immediately:
- */
- if (seq == journal_last_seq(j))
- journal_wake(j);
-
- spin_unlock(&j->lock);
-}
-
-/**
- * bch2_journal_pin_flush: ensure journal pin callback is no longer running
- * @j: journal object
- * @pin: pin to flush
- */
-void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
-{
- BUG_ON(journal_pin_active(pin));
-
- wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
-}
-
-/*
- * Journal reclaim: flush references to open journal entries to reclaim space in
- * the journal
- *
- * May be done by the journal code in the background as needed to free up space
- * for more journal entries, or as part of doing a clean shutdown, or to migrate
- * data off of a specific device:
- */
-
-static struct journal_entry_pin *
-journal_get_next_pin(struct journal *j,
- u64 seq_to_flush,
- unsigned allowed_below_seq,
- unsigned allowed_above_seq,
- u64 *seq)
-{
- struct journal_entry_pin_list *pin_list;
- struct journal_entry_pin *ret = NULL;
- unsigned i;
-
- fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
- if (*seq > seq_to_flush && !allowed_above_seq)
- break;
-
- for (i = 0; i < JOURNAL_PIN_NR; i++)
- if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
- ((1U << i) & allowed_above_seq)) {
- ret = list_first_entry_or_null(&pin_list->list[i],
- struct journal_entry_pin, list);
- if (ret)
- return ret;
- }
- }
-
- return NULL;
-}
-
-/* returns true if we did work */
-static size_t journal_flush_pins(struct journal *j,
- u64 seq_to_flush,
- unsigned allowed_below_seq,
- unsigned allowed_above_seq,
- unsigned min_any,
- unsigned min_key_cache)
-{
- struct journal_entry_pin *pin;
- size_t nr_flushed = 0;
- journal_pin_flush_fn flush_fn;
- u64 seq;
- int err;
-
- lockdep_assert_held(&j->reclaim_lock);
-
- while (1) {
- unsigned allowed_above = allowed_above_seq;
- unsigned allowed_below = allowed_below_seq;
-
- if (min_any) {
- allowed_above |= ~0;
- allowed_below |= ~0;
- }
-
- if (min_key_cache) {
- allowed_above |= 1U << JOURNAL_PIN_key_cache;
- allowed_below |= 1U << JOURNAL_PIN_key_cache;
- }
-
- cond_resched();
-
- j->last_flushed = jiffies;
-
- spin_lock(&j->lock);
- pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
- if (pin) {
- BUG_ON(j->flush_in_progress);
- j->flush_in_progress = pin;
- j->flush_in_progress_dropped = false;
- flush_fn = pin->flush;
- }
- spin_unlock(&j->lock);
-
- if (!pin)
- break;
-
- if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
- min_key_cache--;
-
- if (min_any)
- min_any--;
-
- err = flush_fn(j, pin, seq);
-
- spin_lock(&j->lock);
- /* Pin might have been dropped or rearmed: */
- if (likely(!err && !j->flush_in_progress_dropped))
- list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
- j->flush_in_progress = NULL;
- j->flush_in_progress_dropped = false;
- spin_unlock(&j->lock);
-
- wake_up(&j->pin_flush_wait);
-
- if (err)
- break;
-
- nr_flushed++;
- }
-
- return nr_flushed;
-}
-
-static u64 journal_seq_to_flush(struct journal *j)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- u64 seq_to_flush = 0;
-
- spin_lock(&j->lock);
-
- for_each_rw_member(c, ca) {
- struct journal_device *ja = &ca->journal;
- unsigned nr_buckets, bucket_to_flush;
-
- if (!ja->nr)
- continue;
-
- /* Try to keep the journal at most half full: */
- nr_buckets = ja->nr / 2;
-
- nr_buckets = min(nr_buckets, ja->nr);
-
- bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
- seq_to_flush = max(seq_to_flush,
- ja->bucket_seq[bucket_to_flush]);
- }
-
- /* Also flush if the pin fifo is more than half full */
- seq_to_flush = max_t(s64, seq_to_flush,
- (s64) journal_cur_seq(j) -
- (j->pin.size >> 1));
- spin_unlock(&j->lock);
-
- return seq_to_flush;
-}
-
-/**
- * __bch2_journal_reclaim - free up journal buckets
- * @j: journal object
- * @direct: direct or background reclaim?
- * @kicked: requested to run since we last ran?
- * Returns: 0 on success, or -EIO if the journal has been shutdown
- *
- * Background journal reclaim writes out btree nodes. It should be run
- * early enough so that we never completely run out of journal buckets.
- *
- * High watermarks for triggering background reclaim:
- * - FIFO has fewer than 512 entries left
- * - fewer than 25% journal buckets free
- *
- * Background reclaim runs until low watermarks are reached:
- * - FIFO has more than 1024 entries left
- * - more than 50% journal buckets free
- *
- * As long as a reclaim can complete in the time it takes to fill up
- * 512 journal entries or 25% of all journal buckets, then
- * journal_next_bucket() should not stall.
- */
-static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct btree_cache *bc = &c->btree_cache;
- bool kthread = (current->flags & PF_KTHREAD) != 0;
- u64 seq_to_flush;
- size_t min_nr, min_key_cache, nr_flushed;
- unsigned flags;
- int ret = 0;
-
- /*
- * We can't invoke memory reclaim while holding the reclaim_lock -
- * journal reclaim is required to make progress for memory reclaim
- * (cleaning the caches), so we can't get stuck in memory reclaim while
- * we're holding the reclaim lock:
- */
- lockdep_assert_held(&j->reclaim_lock);
- flags = memalloc_noreclaim_save();
-
- do {
- if (kthread && kthread_should_stop())
- break;
-
- if (bch2_journal_error(j)) {
- ret = -EIO;
- break;
- }
-
- bch2_journal_do_discards(j);
-
- seq_to_flush = journal_seq_to_flush(j);
- min_nr = 0;
-
- /*
- * If it's been longer than j->reclaim_delay_ms since we last flushed,
- * make sure to flush at least one journal pin:
- */
- if (time_after(jiffies, j->last_flushed +
- msecs_to_jiffies(c->opts.journal_reclaim_delay)))
- min_nr = 1;
-
- if (j->watermark != BCH_WATERMARK_stripe)
- min_nr = 1;
-
- size_t btree_cache_live = bc->live[0].nr + bc->live[1].nr;
- if (atomic_long_read(&bc->nr_dirty) * 2 > btree_cache_live)
- min_nr = 1;
-
- min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
-
- trace_and_count(c, journal_reclaim_start, c,
- direct, kicked,
- min_nr, min_key_cache,
- atomic_long_read(&bc->nr_dirty), btree_cache_live,
- atomic_long_read(&c->btree_key_cache.nr_dirty),
- atomic_long_read(&c->btree_key_cache.nr_keys));
-
- nr_flushed = journal_flush_pins(j, seq_to_flush,
- ~0, 0,
- min_nr, min_key_cache);
-
- if (direct)
- j->nr_direct_reclaim += nr_flushed;
- else
- j->nr_background_reclaim += nr_flushed;
- trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
-
- if (nr_flushed)
- wake_up(&j->reclaim_wait);
- } while ((min_nr || min_key_cache) && nr_flushed && !direct);
-
- memalloc_noreclaim_restore(flags);
-
- return ret;
-}
-
-int bch2_journal_reclaim(struct journal *j)
-{
- return __bch2_journal_reclaim(j, true, true);
-}
-
-static int bch2_journal_reclaim_thread(void *arg)
-{
- struct journal *j = arg;
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- unsigned long delay, now;
- bool journal_empty;
- int ret = 0;
-
- set_freezable();
-
- j->last_flushed = jiffies;
-
- while (!ret && !kthread_should_stop()) {
- bool kicked = j->reclaim_kicked;
-
- j->reclaim_kicked = false;
-
- mutex_lock(&j->reclaim_lock);
- ret = __bch2_journal_reclaim(j, false, kicked);
- mutex_unlock(&j->reclaim_lock);
-
- now = jiffies;
- delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
- j->next_reclaim = j->last_flushed + delay;
-
- if (!time_in_range(j->next_reclaim, now, now + delay))
- j->next_reclaim = now + delay;
-
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
- if (kthread_should_stop())
- break;
- if (j->reclaim_kicked)
- break;
-
- spin_lock(&j->lock);
- journal_empty = fifo_empty(&j->pin);
- spin_unlock(&j->lock);
-
- if (journal_empty)
- schedule();
- else if (time_after(j->next_reclaim, jiffies))
- schedule_timeout(j->next_reclaim - jiffies);
- else
- break;
- }
- __set_current_state(TASK_RUNNING);
- }
-
- return 0;
-}
-
-void bch2_journal_reclaim_stop(struct journal *j)
-{
- struct task_struct *p = j->reclaim_thread;
-
- j->reclaim_thread = NULL;
-
- if (p) {
- kthread_stop(p);
- put_task_struct(p);
- }
-}
-
-int bch2_journal_reclaim_start(struct journal *j)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct task_struct *p;
- int ret;
-
- if (j->reclaim_thread)
- return 0;
-
- p = kthread_create(bch2_journal_reclaim_thread, j,
- "bch-reclaim/%s", c->name);
- ret = PTR_ERR_OR_ZERO(p);
- bch_err_msg(c, ret, "creating journal reclaim thread");
- if (ret)
- return ret;
-
- get_task_struct(p);
- j->reclaim_thread = p;
- wake_up_process(p);
- return 0;
-}
-
-static int journal_flush_done(struct journal *j, u64 seq_to_flush,
- bool *did_work)
-{
- int ret;
-
- ret = bch2_journal_error(j);
- if (ret)
- return ret;
-
- mutex_lock(&j->reclaim_lock);
-
- if (journal_flush_pins(j, seq_to_flush,
- (1U << JOURNAL_PIN_key_cache)|
- (1U << JOURNAL_PIN_other), 0, 0, 0) ||
- journal_flush_pins(j, seq_to_flush,
- (1U << JOURNAL_PIN_btree), 0, 0, 0))
- *did_work = true;
-
- if (seq_to_flush > journal_cur_seq(j))
- bch2_journal_entry_close(j);
-
- spin_lock(&j->lock);
- /*
- * If journal replay hasn't completed, the unreplayed journal entries
- * hold refs on their corresponding sequence numbers
- */
- ret = !test_bit(JOURNAL_replay_done, &j->flags) ||
- journal_last_seq(j) > seq_to_flush ||
- !fifo_used(&j->pin);
-
- spin_unlock(&j->lock);
- mutex_unlock(&j->reclaim_lock);
-
- return ret;
-}
-
-bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
-{
- /* time_stats this */
- bool did_work = false;
-
- if (!test_bit(JOURNAL_running, &j->flags))
- return false;
-
- closure_wait_event(&j->async_wait,
- journal_flush_done(j, seq_to_flush, &did_work));
-
- return did_work;
-}
-
-int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_entry_pin_list *p;
- u64 iter, seq = 0;
- int ret = 0;
-
- spin_lock(&j->lock);
- fifo_for_each_entry_ptr(p, &j->pin, iter)
- if (dev_idx >= 0
- ? bch2_dev_list_has_dev(p->devs, dev_idx)
- : p->devs.nr < c->opts.metadata_replicas)
- seq = iter;
- spin_unlock(&j->lock);
-
- bch2_journal_flush_pins(j, seq);
-
- ret = bch2_journal_error(j);
- if (ret)
- return ret;
-
- mutex_lock(&c->replicas_gc_lock);
- bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
-
- /*
- * Now that we've populated replicas_gc, write to the journal to mark
- * active journal devices. This handles the case where the journal might
- * be empty. Otherwise we could clear all journal replicas and
- * temporarily put the fs into an unrecoverable state. Journal recovery
- * expects to find devices marked for journal data on unclean mount.
- */
- ret = bch2_journal_meta(&c->journal);
- if (ret)
- goto err;
-
- seq = 0;
- spin_lock(&j->lock);
- while (!ret) {
- struct bch_replicas_padded replicas;
-
- seq = max(seq, journal_last_seq(j));
- if (seq >= j->pin.back)
- break;
- bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
- journal_seq_pin(j, seq)->devs);
- seq++;
-
- if (replicas.e.nr_devs) {
- spin_unlock(&j->lock);
- ret = bch2_mark_replicas(c, &replicas.e);
- spin_lock(&j->lock);
- }
- }
- spin_unlock(&j->lock);
-err:
- ret = bch2_replicas_gc_end(c, ret);
- mutex_unlock(&c->replicas_gc_lock);
-
- return ret;
-}
diff --git a/fs/bcachefs/journal_reclaim.h b/fs/bcachefs/journal_reclaim.h
deleted file mode 100644
index ec84c3345281..000000000000
--- a/fs/bcachefs/journal_reclaim.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_JOURNAL_RECLAIM_H
-#define _BCACHEFS_JOURNAL_RECLAIM_H
-
-#define JOURNAL_PIN (32 * 1024)
-
-static inline void journal_reclaim_kick(struct journal *j)
-{
- struct task_struct *p = READ_ONCE(j->reclaim_thread);
-
- j->reclaim_kicked = true;
- if (p)
- wake_up_process(p);
-}
-
-unsigned bch2_journal_dev_buckets_available(struct journal *,
- struct journal_device *,
- enum journal_space_from);
-void bch2_journal_set_watermark(struct journal *);
-void bch2_journal_space_available(struct journal *);
-
-static inline bool journal_pin_active(struct journal_entry_pin *pin)
-{
- return pin->seq != 0;
-}
-
-static inline struct journal_entry_pin_list *
-journal_seq_pin(struct journal *j, u64 seq)
-{
- EBUG_ON(seq < j->pin.front || seq >= j->pin.back);
-
- return &j->pin.data[seq & j->pin.mask];
-}
-
-void bch2_journal_reclaim_fast(struct journal *);
-bool __bch2_journal_pin_put(struct journal *, u64);
-void bch2_journal_pin_put(struct journal *, u64);
-void bch2_journal_pin_drop(struct journal *, struct journal_entry_pin *);
-
-void bch2_journal_pin_set(struct journal *, u64, struct journal_entry_pin *,
- journal_pin_flush_fn);
-
-static inline void bch2_journal_pin_add(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
-{
- if (unlikely(!journal_pin_active(pin) || pin->seq > seq))
- bch2_journal_pin_set(j, seq, pin, flush_fn);
-}
-
-void bch2_journal_pin_copy(struct journal *,
- struct journal_entry_pin *,
- struct journal_entry_pin *,
- journal_pin_flush_fn);
-
-static inline void bch2_journal_pin_update(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
-{
- if (unlikely(!journal_pin_active(pin) || pin->seq < seq))
- bch2_journal_pin_set(j, seq, pin, flush_fn);
-}
-
-void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
-
-void bch2_journal_do_discards(struct journal *);
-int bch2_journal_reclaim(struct journal *);
-
-void bch2_journal_reclaim_stop(struct journal *);
-int bch2_journal_reclaim_start(struct journal *);
-
-bool bch2_journal_flush_pins(struct journal *, u64);
-
-static inline bool bch2_journal_flush_all_pins(struct journal *j)
-{
- return bch2_journal_flush_pins(j, U64_MAX);
-}
-
-int bch2_journal_flush_device_pins(struct journal *, int);
-
-#endif /* _BCACHEFS_JOURNAL_RECLAIM_H */
diff --git a/fs/bcachefs/journal_sb.c b/fs/bcachefs/journal_sb.c
deleted file mode 100644
index 62b910f2fb27..000000000000
--- a/fs/bcachefs/journal_sb.c
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "journal_sb.h"
-#include "darray.h"
-
-#include <linux/sort.h>
-
-/* BCH_SB_FIELD_journal: */
-
-static int u64_cmp(const void *_l, const void *_r)
-{
- const u64 *l = _l;
- const u64 *r = _r;
-
- return cmp_int(*l, *r);
-}
-
-static int bch2_sb_journal_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_journal *journal = field_to_type(f, journal);
- struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
- int ret = -BCH_ERR_invalid_sb_journal;
- unsigned nr;
- unsigned i;
- u64 *b;
-
- nr = bch2_nr_journal_buckets(journal);
- if (!nr)
- return 0;
-
- b = kmalloc_array(nr, sizeof(u64), GFP_KERNEL);
- if (!b)
- return -BCH_ERR_ENOMEM_sb_journal_validate;
-
- for (i = 0; i < nr; i++)
- b[i] = le64_to_cpu(journal->buckets[i]);
-
- sort(b, nr, sizeof(u64), u64_cmp, NULL);
-
- if (!b[0]) {
- prt_printf(err, "journal bucket at sector 0");
- goto err;
- }
-
- if (b[0] < le16_to_cpu(m.first_bucket)) {
- prt_printf(err, "journal bucket %llu before first bucket %u",
- b[0], le16_to_cpu(m.first_bucket));
- goto err;
- }
-
- if (b[nr - 1] >= le64_to_cpu(m.nbuckets)) {
- prt_printf(err, "journal bucket %llu past end of device (nbuckets %llu)",
- b[nr - 1], le64_to_cpu(m.nbuckets));
- goto err;
- }
-
- for (i = 0; i + 1 < nr; i++)
- if (b[i] == b[i + 1]) {
- prt_printf(err, "duplicate journal buckets %llu", b[i]);
- goto err;
- }
-
- ret = 0;
-err:
- kfree(b);
- return ret;
-}
-
-static void bch2_sb_journal_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_journal *journal = field_to_type(f, journal);
- unsigned i, nr = bch2_nr_journal_buckets(journal);
-
- prt_printf(out, "Buckets: ");
- for (i = 0; i < nr; i++)
- prt_printf(out, " %llu", le64_to_cpu(journal->buckets[i]));
- prt_newline(out);
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_journal = {
- .validate = bch2_sb_journal_validate,
- .to_text = bch2_sb_journal_to_text,
-};
-
-struct u64_range {
- u64 start;
- u64 end;
-};
-
-static int u64_range_cmp(const void *_l, const void *_r)
-{
- const struct u64_range *l = _l;
- const struct u64_range *r = _r;
-
- return cmp_int(l->start, r->start);
-}
-
-static int bch2_sb_journal_v2_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_journal_v2 *journal = field_to_type(f, journal_v2);
- struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
- int ret = -BCH_ERR_invalid_sb_journal;
- u64 sum = 0;
- unsigned nr;
- unsigned i;
- struct u64_range *b;
-
- nr = bch2_sb_field_journal_v2_nr_entries(journal);
- if (!nr)
- return 0;
-
- b = kmalloc_array(nr, sizeof(*b), GFP_KERNEL);
- if (!b)
- return -BCH_ERR_ENOMEM_sb_journal_v2_validate;
-
- for (i = 0; i < nr; i++) {
- b[i].start = le64_to_cpu(journal->d[i].start);
- b[i].end = b[i].start + le64_to_cpu(journal->d[i].nr);
-
- if (b[i].end <= b[i].start) {
- prt_printf(err, "journal buckets entry with bad nr: %llu+%llu",
- le64_to_cpu(journal->d[i].start),
- le64_to_cpu(journal->d[i].nr));
- goto err;
- }
-
- sum += le64_to_cpu(journal->d[i].nr);
- }
-
- sort(b, nr, sizeof(*b), u64_range_cmp, NULL);
-
- if (!b[0].start) {
- prt_printf(err, "journal bucket at sector 0");
- goto err;
- }
-
- if (b[0].start < le16_to_cpu(m.first_bucket)) {
- prt_printf(err, "journal bucket %llu before first bucket %u",
- b[0].start, le16_to_cpu(m.first_bucket));
- goto err;
- }
-
- if (b[nr - 1].end > le64_to_cpu(m.nbuckets)) {
- prt_printf(err, "journal bucket %llu past end of device (nbuckets %llu)",
- b[nr - 1].end - 1, le64_to_cpu(m.nbuckets));
- goto err;
- }
-
- for (i = 0; i + 1 < nr; i++) {
- if (b[i].end > b[i + 1].start) {
- prt_printf(err, "duplicate journal buckets in ranges %llu-%llu, %llu-%llu",
- b[i].start, b[i].end, b[i + 1].start, b[i + 1].end);
- goto err;
- }
- }
-
- if (sum > UINT_MAX) {
- prt_printf(err, "too many journal buckets: %llu > %u", sum, UINT_MAX);
- goto err;
- }
-
- ret = 0;
-err:
- kfree(b);
- return ret;
-}
-
-static void bch2_sb_journal_v2_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_journal_v2 *journal = field_to_type(f, journal_v2);
- unsigned i, nr = bch2_sb_field_journal_v2_nr_entries(journal);
-
- prt_printf(out, "Buckets: ");
- for (i = 0; i < nr; i++)
- prt_printf(out, " %llu-%llu",
- le64_to_cpu(journal->d[i].start),
- le64_to_cpu(journal->d[i].start) + le64_to_cpu(journal->d[i].nr));
- prt_newline(out);
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_journal_v2 = {
- .validate = bch2_sb_journal_v2_validate,
- .to_text = bch2_sb_journal_v2_to_text,
-};
-
-int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca,
- u64 *buckets, unsigned nr)
-{
- struct bch_sb_field_journal_v2 *j;
- unsigned i, dst = 0, nr_compacted = 1;
-
- if (c)
- lockdep_assert_held(&c->sb_lock);
-
- if (!nr) {
- bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
- bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal_v2);
- return 0;
- }
-
- for (i = 0; i + 1 < nr; i++)
- if (buckets[i] + 1 != buckets[i + 1])
- nr_compacted++;
-
- j = bch2_sb_field_resize(&ca->disk_sb, journal_v2,
- (sizeof(*j) + sizeof(j->d[0]) * nr_compacted) / sizeof(u64));
- if (!j)
- return -BCH_ERR_ENOSPC_sb_journal;
-
- bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
-
- j->d[dst].start = cpu_to_le64(buckets[0]);
- j->d[dst].nr = cpu_to_le64(1);
-
- for (i = 1; i < nr; i++) {
- if (buckets[i] == buckets[i - 1] + 1) {
- le64_add_cpu(&j->d[dst].nr, 1);
- } else {
- dst++;
- j->d[dst].start = cpu_to_le64(buckets[i]);
- j->d[dst].nr = cpu_to_le64(1);
- }
- }
-
- BUG_ON(dst + 1 != nr_compacted);
- return 0;
-}
diff --git a/fs/bcachefs/journal_sb.h b/fs/bcachefs/journal_sb.h
deleted file mode 100644
index ba40a7e8d90a..000000000000
--- a/fs/bcachefs/journal_sb.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#include "super-io.h"
-#include "vstructs.h"
-
-static inline unsigned bch2_nr_journal_buckets(struct bch_sb_field_journal *j)
-{
- return j
- ? (__le64 *) vstruct_end(&j->field) - j->buckets
- : 0;
-}
-
-static inline unsigned bch2_sb_field_journal_v2_nr_entries(struct bch_sb_field_journal_v2 *j)
-{
- if (!j)
- return 0;
-
- return (struct bch_sb_field_journal_v2_entry *) vstruct_end(&j->field) - &j->d[0];
-}
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_journal;
-extern const struct bch_sb_field_ops bch_sb_field_ops_journal_v2;
-
-int bch2_journal_buckets_to_sb(struct bch_fs *, struct bch_dev *, u64 *, unsigned);
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
deleted file mode 100644
index 1f25c111c54c..000000000000
--- a/fs/bcachefs/journal_seq_blacklist.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "eytzinger.h"
-#include "journal.h"
-#include "journal_seq_blacklist.h"
-#include "super-io.h"
-
-/*
- * journal_seq_blacklist machinery:
- *
- * To guarantee order of btree updates after a crash, we need to detect when a
- * btree node entry (bset) is newer than the newest journal entry that was
- * successfully written, and ignore it - effectively ignoring any btree updates
- * that didn't make it into the journal.
- *
- * If we didn't do this, we might have two btree nodes, a and b, both with
- * updates that weren't written to the journal yet: if b was updated after a,
- * but b was flushed and not a - oops; on recovery we'll find that the updates
- * to b happened, but not the updates to a that happened before it.
- *
- * Ignoring bsets that are newer than the newest journal entry is always safe,
- * because everything they contain will also have been journalled - and must
- * still be present in the journal on disk until a journal entry has been
- * written _after_ that bset was written.
- *
- * To accomplish this, bsets record the newest journal sequence number they
- * contain updates for; then, on startup, the btree code queries the journal
- * code to ask "Is this sequence number newer than the newest journal entry? If
- * so, ignore it."
- *
- * When this happens, we must blacklist that journal sequence number: the
- * journal must not write any entries with that sequence number, and it must
- * record that it was blacklisted so that a) on recovery we don't think we have
- * missing journal entries and b) so that the btree code continues to ignore
- * that bset, until that btree node is rewritten.
- */
-
-static unsigned sb_blacklist_u64s(unsigned nr)
-{
- struct bch_sb_field_journal_seq_blacklist *bl;
-
- return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
-}
-
-int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
-{
- struct bch_sb_field_journal_seq_blacklist *bl;
- unsigned i = 0, nr;
- int ret = 0;
-
- mutex_lock(&c->sb_lock);
- bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
- nr = blacklist_nr_entries(bl);
-
- while (i < nr) {
- struct journal_seq_blacklist_entry *e =
- bl->start + i;
-
- if (end < le64_to_cpu(e->start))
- break;
-
- if (start > le64_to_cpu(e->end)) {
- i++;
- continue;
- }
-
- /*
- * Entry is contiguous or overlapping with new entry: merge it
- * with new entry, and delete:
- */
-
- start = min(start, le64_to_cpu(e->start));
- end = max(end, le64_to_cpu(e->end));
- array_remove_item(bl->start, nr, i);
- }
-
- bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
- sb_blacklist_u64s(nr + 1));
- if (!bl) {
- ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
- goto out;
- }
-
- array_insert_item(bl->start, nr, i, ((struct journal_seq_blacklist_entry) {
- .start = cpu_to_le64(start),
- .end = cpu_to_le64(end),
- }));
- c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
-
- ret = bch2_write_super(c);
-out:
- mutex_unlock(&c->sb_lock);
-
- return ret ?: bch2_blacklist_table_initialize(c);
-}
-
-static int journal_seq_blacklist_table_cmp(const void *_l, const void *_r)
-{
- const struct journal_seq_blacklist_table_entry *l = _l;
- const struct journal_seq_blacklist_table_entry *r = _r;
-
- return cmp_int(l->start, r->start);
-}
-
-bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
- bool dirty)
-{
- struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
- struct journal_seq_blacklist_table_entry search = { .start = seq };
- int idx;
-
- if (!t)
- return false;
-
- idx = eytzinger0_find_le(t->entries, t->nr,
- sizeof(t->entries[0]),
- journal_seq_blacklist_table_cmp,
- &search);
- if (idx < 0)
- return false;
-
- BUG_ON(t->entries[idx].start > seq);
-
- if (seq >= t->entries[idx].end)
- return false;
-
- if (dirty)
- t->entries[idx].dirty = true;
- return true;
-}
-
-int bch2_blacklist_table_initialize(struct bch_fs *c)
-{
- struct bch_sb_field_journal_seq_blacklist *bl =
- bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
- struct journal_seq_blacklist_table *t;
- unsigned i, nr = blacklist_nr_entries(bl);
-
- if (!bl)
- return 0;
-
- t = kzalloc(struct_size(t, entries, nr), GFP_KERNEL);
- if (!t)
- return -BCH_ERR_ENOMEM_blacklist_table_init;
-
- t->nr = nr;
-
- for (i = 0; i < nr; i++) {
- t->entries[i].start = le64_to_cpu(bl->start[i].start);
- t->entries[i].end = le64_to_cpu(bl->start[i].end);
- }
-
- eytzinger0_sort(t->entries,
- t->nr,
- sizeof(t->entries[0]),
- journal_seq_blacklist_table_cmp,
- NULL);
-
- kfree(c->journal_seq_blacklist_table);
- c->journal_seq_blacklist_table = t;
- return 0;
-}
-
-static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_journal_seq_blacklist *bl =
- field_to_type(f, journal_seq_blacklist);
- unsigned i, nr = blacklist_nr_entries(bl);
-
- for (i = 0; i < nr; i++) {
- struct journal_seq_blacklist_entry *e = bl->start + i;
-
- if (le64_to_cpu(e->start) >=
- le64_to_cpu(e->end)) {
- prt_printf(err, "entry %u start >= end (%llu >= %llu)",
- i, le64_to_cpu(e->start), le64_to_cpu(e->end));
- return -BCH_ERR_invalid_sb_journal_seq_blacklist;
- }
-
- if (i + 1 < nr &&
- le64_to_cpu(e[0].end) >
- le64_to_cpu(e[1].start)) {
- prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
- i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
- return -BCH_ERR_invalid_sb_journal_seq_blacklist;
- }
- }
-
- return 0;
-}
-
-static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
- struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_journal_seq_blacklist *bl =
- field_to_type(f, journal_seq_blacklist);
- struct journal_seq_blacklist_entry *i;
- unsigned nr = blacklist_nr_entries(bl);
-
- for (i = bl->start; i < bl->start + nr; i++) {
- if (i != bl->start)
- prt_printf(out, " ");
-
- prt_printf(out, "%llu-%llu",
- le64_to_cpu(i->start),
- le64_to_cpu(i->end));
- }
- prt_newline(out);
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
- .validate = bch2_sb_journal_seq_blacklist_validate,
- .to_text = bch2_sb_journal_seq_blacklist_to_text
-};
-
-bool bch2_blacklist_entries_gc(struct bch_fs *c)
-{
- struct journal_seq_blacklist_entry *src, *dst;
-
- struct bch_sb_field_journal_seq_blacklist *bl =
- bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
- if (!bl)
- return false;
-
- unsigned nr = blacklist_nr_entries(bl);
- dst = bl->start;
-
- struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
- BUG_ON(nr != t->nr);
-
- unsigned i;
- for (src = bl->start, i = t->nr == 0 ? 0 : eytzinger0_first(t->nr);
- src < bl->start + nr;
- src++, i = eytzinger0_next(i, nr)) {
- BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
- BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
-
- if (t->entries[i].dirty || t->entries[i].end >= c->journal.oldest_seq_found_ondisk)
- *dst++ = *src;
- }
-
- unsigned new_nr = dst - bl->start;
- if (new_nr == nr)
- return false;
-
- bch_verbose(c, "nr blacklist entries was %u, now %u", nr, new_nr);
-
- bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
- new_nr ? sb_blacklist_u64s(new_nr) : 0);
- BUG_ON(new_nr && !bl);
- return true;
-}
diff --git a/fs/bcachefs/journal_seq_blacklist.h b/fs/bcachefs/journal_seq_blacklist.h
deleted file mode 100644
index d47636f96fdc..000000000000
--- a/fs/bcachefs/journal_seq_blacklist.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H
-#define _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H
-
-static inline unsigned
-blacklist_nr_entries(struct bch_sb_field_journal_seq_blacklist *bl)
-{
- return bl
- ? ((vstruct_end(&bl->field) - (void *) &bl->start[0]) /
- sizeof(struct journal_seq_blacklist_entry))
- : 0;
-}
-
-bool bch2_journal_seq_is_blacklisted(struct bch_fs *, u64, bool);
-int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64, u64);
-int bch2_blacklist_table_initialize(struct bch_fs *);
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist;
-
-bool bch2_blacklist_entries_gc(struct bch_fs *);
-
-#endif /* _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H */
diff --git a/fs/bcachefs/journal_seq_blacklist_format.h b/fs/bcachefs/journal_seq_blacklist_format.h
deleted file mode 100644
index 2566b12dbc04..000000000000
--- a/fs/bcachefs/journal_seq_blacklist_format.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H
-#define _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H
-
-struct journal_seq_blacklist_entry {
- __le64 start;
- __le64 end;
-};
-
-struct bch_sb_field_journal_seq_blacklist {
- struct bch_sb_field field;
- struct journal_seq_blacklist_entry start[];
-};
-
-#endif /* _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H */
diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h
deleted file mode 100644
index 19183fcf7ad7..000000000000
--- a/fs/bcachefs/journal_types.h
+++ /dev/null
@@ -1,345 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_JOURNAL_TYPES_H
-#define _BCACHEFS_JOURNAL_TYPES_H
-
-#include <linux/cache.h>
-#include <linux/workqueue.h>
-
-#include "alloc_types.h"
-#include "super_types.h"
-#include "fifo.h"
-
-#define JOURNAL_BUF_BITS 2
-#define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS)
-#define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1)
-
-/*
- * We put JOURNAL_BUF_NR of these in struct journal; we used them for writes to
- * the journal that are being staged or in flight.
- */
-struct journal_buf {
- struct closure io;
- struct jset *data;
-
- __BKEY_PADDED(key, BCH_REPLICAS_MAX);
- struct bch_devs_list devs_written;
-
- struct closure_waitlist wait;
- u64 last_seq; /* copy of data->last_seq */
- long expires;
- u64 flush_time;
-
- unsigned buf_size; /* size in bytes of @data */
- unsigned sectors; /* maximum size for current entry */
- unsigned disk_sectors; /* maximum size entry could have been, if
- buf_size was bigger */
- unsigned u64s_reserved;
- bool noflush:1; /* write has already been kicked off, and was noflush */
- bool must_flush:1; /* something wants a flush */
- bool separate_flush:1;
- bool need_flush_to_write_buffer:1;
- bool write_started:1;
- bool write_allocated:1;
- bool write_done:1;
- u8 idx;
-};
-
-/*
- * Something that makes a journal entry dirty - i.e. a btree node that has to be
- * flushed:
- */
-
-enum journal_pin_type {
- JOURNAL_PIN_btree,
- JOURNAL_PIN_key_cache,
- JOURNAL_PIN_other,
- JOURNAL_PIN_NR,
-};
-
-struct journal_entry_pin_list {
- struct list_head list[JOURNAL_PIN_NR];
- struct list_head flushed;
- atomic_t count;
- struct bch_devs_list devs;
-};
-
-struct journal;
-struct journal_entry_pin;
-typedef int (*journal_pin_flush_fn)(struct journal *j,
- struct journal_entry_pin *, u64);
-
-struct journal_entry_pin {
- struct list_head list;
- journal_pin_flush_fn flush;
- u64 seq;
-};
-
-struct journal_res {
- bool ref;
- u8 idx;
- u16 u64s;
- u32 offset;
- u64 seq;
-};
-
-union journal_res_state {
- struct {
- atomic64_t counter;
- };
-
- struct {
- u64 v;
- };
-
- struct {
- u64 cur_entry_offset:20,
- idx:2,
- unwritten_idx:2,
- buf0_count:10,
- buf1_count:10,
- buf2_count:10,
- buf3_count:10;
- };
-};
-
-/* bytes: */
-#define JOURNAL_ENTRY_SIZE_MIN (64U << 10) /* 64k */
-#define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */
-
-/*
- * We stash some journal state as sentinal values in cur_entry_offset:
- * note - cur_entry_offset is in units of u64s
- */
-#define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1)
-
-#define JOURNAL_ENTRY_CLOSED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 1)
-#define JOURNAL_ENTRY_ERROR_VAL (JOURNAL_ENTRY_OFFSET_MAX)
-
-struct journal_space {
- /* Units of 512 bytes sectors: */
- unsigned next_entry; /* How big the next journal entry can be */
- unsigned total;
-};
-
-enum journal_space_from {
- journal_space_discarded,
- journal_space_clean_ondisk,
- journal_space_clean,
- journal_space_total,
- journal_space_nr,
-};
-
-#define JOURNAL_FLAGS() \
- x(replay_done) \
- x(running) \
- x(may_skip_flush) \
- x(need_flush_write) \
- x(space_low)
-
-enum journal_flags {
-#define x(n) JOURNAL_##n,
- JOURNAL_FLAGS()
-#undef x
-};
-
-/* Reasons we may fail to get a journal reservation: */
-#define JOURNAL_ERRORS() \
- x(ok) \
- x(retry) \
- x(blocked) \
- x(max_in_flight) \
- x(journal_full) \
- x(journal_pin_full) \
- x(journal_stuck) \
- x(insufficient_devices)
-
-enum journal_errors {
-#define x(n) JOURNAL_ERR_##n,
- JOURNAL_ERRORS()
-#undef x
-};
-
-typedef DARRAY(u64) darray_u64;
-
-struct journal_bio {
- struct bch_dev *ca;
- unsigned buf_idx;
-
- struct bio bio;
-};
-
-/* Embedded in struct bch_fs */
-struct journal {
- /* Fastpath stuff up front: */
- struct {
-
- union journal_res_state reservations;
- enum bch_watermark watermark;
-
- } __aligned(SMP_CACHE_BYTES);
-
- unsigned long flags;
-
- /* Max size of current journal entry */
- unsigned cur_entry_u64s;
- unsigned cur_entry_sectors;
-
- /* Reserved space in journal entry to be used just prior to write */
- unsigned entry_u64s_reserved;
-
-
- /*
- * 0, or -ENOSPC if waiting on journal reclaim, or -EROFS if
- * insufficient devices:
- */
- enum journal_errors cur_entry_error;
-
- unsigned buf_size_want;
- /*
- * We may queue up some things to be journalled (log messages) before
- * the journal has actually started - stash them here:
- */
- darray_u64 early_journal_entries;
-
- /*
- * Protects journal_buf->data, when accessing without a jorunal
- * reservation: for synchronization between the btree write buffer code
- * and the journal write path:
- */
- struct mutex buf_lock;
- /*
- * Two journal entries -- one is currently open for new entries, the
- * other is possibly being written out.
- */
- struct journal_buf buf[JOURNAL_BUF_NR];
-
- spinlock_t lock;
-
- /* if nonzero, we may not open a new journal entry: */
- unsigned blocked;
-
- /* Used when waiting because the journal was full */
- wait_queue_head_t wait;
- struct closure_waitlist async_wait;
-
- struct delayed_work write_work;
- struct workqueue_struct *wq;
-
- /* Sequence number of most recent journal entry (last entry in @pin) */
- atomic64_t seq;
-
- /* seq, last_seq from the most recent journal entry successfully written */
- u64 seq_ondisk;
- u64 flushed_seq_ondisk;
- u64 last_seq_ondisk;
- u64 err_seq;
- u64 last_empty_seq;
- u64 oldest_seq_found_ondisk;
-
- /*
- * FIFO of journal entries whose btree updates have not yet been
- * written out.
- *
- * Each entry is a reference count. The position in the FIFO is the
- * entry's sequence number relative to @seq.
- *
- * The journal entry itself holds a reference count, put when the
- * journal entry is written out. Each btree node modified by the journal
- * entry also holds a reference count, put when the btree node is
- * written.
- *
- * When a reference count reaches zero, the journal entry is no longer
- * needed. When all journal entries in the oldest journal bucket are no
- * longer needed, the bucket can be discarded and reused.
- */
- struct {
- u64 front, back, size, mask;
- struct journal_entry_pin_list *data;
- } pin;
-
- struct journal_space space[journal_space_nr];
-
- u64 replay_journal_seq;
- u64 replay_journal_seq_end;
-
- struct write_point wp;
- spinlock_t err_lock;
-
- struct mutex reclaim_lock;
- /*
- * Used for waiting until journal reclaim has freed up space in the
- * journal:
- */
- wait_queue_head_t reclaim_wait;
- struct task_struct *reclaim_thread;
- bool reclaim_kicked;
- unsigned long next_reclaim;
- u64 nr_direct_reclaim;
- u64 nr_background_reclaim;
-
- unsigned long last_flushed;
- struct journal_entry_pin *flush_in_progress;
- bool flush_in_progress_dropped;
- wait_queue_head_t pin_flush_wait;
-
- /* protects advancing ja->discard_idx: */
- struct mutex discard_lock;
- bool can_discard;
-
- unsigned long last_flush_write;
-
- u64 write_start_time;
-
- u64 nr_flush_writes;
- u64 nr_noflush_writes;
- u64 entry_bytes_written;
-
- struct bch2_time_stats *flush_write_time;
- struct bch2_time_stats *noflush_write_time;
- struct bch2_time_stats *flush_seq_time;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map res_map;
-#endif
-} __aligned(SMP_CACHE_BYTES);
-
-/*
- * Embedded in struct bch_dev. First three fields refer to the array of journal
- * buckets, in bch_sb.
- */
-struct journal_device {
- /*
- * For each journal bucket, contains the max sequence number of the
- * journal writes it contains - so we know when a bucket can be reused.
- */
- u64 *bucket_seq;
-
- unsigned sectors_free;
-
- /*
- * discard_idx <= dirty_idx_ondisk <= dirty_idx <= cur_idx:
- */
- unsigned discard_idx; /* Next bucket to discard */
- unsigned dirty_idx_ondisk;
- unsigned dirty_idx;
- unsigned cur_idx; /* Journal bucket we're currently writing to */
- unsigned nr;
-
- u64 *buckets;
-
- /* Bio for journal reads/writes to this device */
- struct journal_bio *bio[JOURNAL_BUF_NR];
-
- /* for bch_journal_read_device */
- struct closure read;
- u64 highest_seq_found;
-};
-
-/*
- * journal_entry_res - reserve space in every journal entry:
- */
-struct journal_entry_res {
- unsigned u64s;
-};
-
-#endif /* _BCACHEFS_JOURNAL_TYPES_H */
diff --git a/fs/bcachefs/keylist.c b/fs/bcachefs/keylist.c
deleted file mode 100644
index 1b828bddd11b..000000000000
--- a/fs/bcachefs/keylist.c
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey.h"
-#include "keylist.h"
-
-int bch2_keylist_realloc(struct keylist *l, u64 *inline_u64s,
- size_t nr_inline_u64s, size_t new_u64s)
-{
- size_t oldsize = bch2_keylist_u64s(l);
- size_t newsize = oldsize + new_u64s;
- u64 *old_buf = l->keys_p == inline_u64s ? NULL : l->keys_p;
- u64 *new_keys;
-
- newsize = roundup_pow_of_two(newsize);
-
- if (newsize <= nr_inline_u64s ||
- (old_buf && roundup_pow_of_two(oldsize) == newsize))
- return 0;
-
- new_keys = krealloc(old_buf, sizeof(u64) * newsize, GFP_NOFS);
- if (!new_keys)
- return -ENOMEM;
-
- if (!old_buf)
- memcpy_u64s(new_keys, inline_u64s, oldsize);
-
- l->keys_p = new_keys;
- l->top_p = new_keys + oldsize;
-
- return 0;
-}
-
-void bch2_keylist_pop_front(struct keylist *l)
-{
- l->top_p -= bch2_keylist_front(l)->k.u64s;
-
- memmove_u64s_down(l->keys,
- bkey_next(l->keys),
- bch2_keylist_u64s(l));
-}
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_verify_keylist_sorted(struct keylist *l)
-{
- for_each_keylist_key(l, k)
- BUG_ON(bkey_next(k) != l->top &&
- bpos_ge(k->k.p, bkey_next(k)->k.p));
-}
-#endif
diff --git a/fs/bcachefs/keylist.h b/fs/bcachefs/keylist.h
deleted file mode 100644
index e687e0e9aede..000000000000
--- a/fs/bcachefs/keylist.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_KEYLIST_H
-#define _BCACHEFS_KEYLIST_H
-
-#include "keylist_types.h"
-
-int bch2_keylist_realloc(struct keylist *, u64 *, size_t, size_t);
-void bch2_keylist_pop_front(struct keylist *);
-
-static inline void bch2_keylist_init(struct keylist *l, u64 *inline_keys)
-{
- l->top_p = l->keys_p = inline_keys;
-}
-
-static inline void bch2_keylist_free(struct keylist *l, u64 *inline_keys)
-{
- if (l->keys_p != inline_keys)
- kfree(l->keys_p);
-}
-
-static inline void bch2_keylist_push(struct keylist *l)
-{
- l->top = bkey_next(l->top);
-}
-
-static inline void bch2_keylist_add(struct keylist *l, const struct bkey_i *k)
-{
- bkey_copy(l->top, k);
- bch2_keylist_push(l);
-}
-
-static inline bool bch2_keylist_empty(struct keylist *l)
-{
- return l->top == l->keys;
-}
-
-static inline size_t bch2_keylist_u64s(struct keylist *l)
-{
- return l->top_p - l->keys_p;
-}
-
-static inline size_t bch2_keylist_bytes(struct keylist *l)
-{
- return bch2_keylist_u64s(l) * sizeof(u64);
-}
-
-static inline struct bkey_i *bch2_keylist_front(struct keylist *l)
-{
- return l->keys;
-}
-
-#define for_each_keylist_key(_keylist, _k) \
- for (struct bkey_i *_k = (_keylist)->keys; \
- _k != (_keylist)->top; \
- _k = bkey_next(_k))
-
-static inline u64 keylist_sectors(struct keylist *keys)
-{
- u64 ret = 0;
-
- for_each_keylist_key(keys, k)
- ret += k->k.size;
- return ret;
-}
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_verify_keylist_sorted(struct keylist *);
-#else
-static inline void bch2_verify_keylist_sorted(struct keylist *l) {}
-#endif
-
-#endif /* _BCACHEFS_KEYLIST_H */
diff --git a/fs/bcachefs/keylist_types.h b/fs/bcachefs/keylist_types.h
deleted file mode 100644
index 4b3ff7d8a875..000000000000
--- a/fs/bcachefs/keylist_types.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_KEYLIST_TYPES_H
-#define _BCACHEFS_KEYLIST_TYPES_H
-
-struct keylist {
- union {
- struct bkey_i *keys;
- u64 *keys_p;
- };
- union {
- struct bkey_i *top;
- u64 *top_p;
- };
-};
-
-#endif /* _BCACHEFS_KEYLIST_TYPES_H */
diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c
deleted file mode 100644
index 60e00702d1a4..000000000000
--- a/fs/bcachefs/logged_ops.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "btree_update.h"
-#include "error.h"
-#include "io_misc.h"
-#include "logged_ops.h"
-#include "super.h"
-
-struct bch_logged_op_fn {
- u8 type;
- int (*resume)(struct btree_trans *, struct bkey_i *);
-};
-
-static const struct bch_logged_op_fn logged_op_fns[] = {
-#define x(n) { \
- .type = KEY_TYPE_logged_op_##n, \
- .resume = bch2_resume_logged_op_##n, \
-},
- BCH_LOGGED_OPS()
-#undef x
-};
-
-static const struct bch_logged_op_fn *logged_op_fn(enum bch_bkey_type type)
-{
- for (unsigned i = 0; i < ARRAY_SIZE(logged_op_fns); i++)
- if (logged_op_fns[i].type == type)
- return logged_op_fns + i;
- return NULL;
-}
-
-static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- u32 restart_count = trans->restart_count;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- fsck_err_on(test_bit(BCH_FS_clean_recovery, &c->flags),
- trans, logged_op_but_clean,
- "filesystem marked as clean but have logged op\n%s",
- (bch2_bkey_val_to_text(&buf, c, k),
- buf.buf));
-
- struct bkey_buf sk;
- bch2_bkey_buf_init(&sk);
- bch2_bkey_buf_reassemble(&sk, c, k);
-
- const struct bch_logged_op_fn *fn = logged_op_fn(sk.k->k.type);
- if (fn)
- fn->resume(trans, sk.k);
-
- ret = bch2_logged_op_finish(trans, sk.k);
-
- bch2_bkey_buf_exit(&sk, c);
-fsck_err:
- printbuf_exit(&buf);
- return ret ?: trans_was_restarted(trans, restart_count);
-}
-
-int bch2_resume_logged_ops(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter,
- BTREE_ID_logged_ops, POS_MIN,
- BTREE_ITER_prefetch, k,
- resume_logged_op(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int __bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k)
-{
- struct btree_iter iter;
- int ret;
-
- ret = bch2_bkey_get_empty_slot(trans, &iter, BTREE_ID_logged_ops, POS_MAX);
- if (ret)
- return ret;
-
- k->k.p = iter.pos;
-
- ret = bch2_trans_update(trans, &iter, k, 0);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k)
-{
- return commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- __bch2_logged_op_start(trans, k));
-}
-
-int bch2_logged_op_finish(struct btree_trans *trans, struct bkey_i *k)
-{
- int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_btree_delete(trans, BTREE_ID_logged_ops, k->k.p, 0));
- /*
- * This needs to be a fatal error because we've left an unfinished
- * operation in the logged ops btree.
- *
- * We should only ever see an error here if the filesystem has already
- * been shut down, but make sure of that here:
- */
- if (ret) {
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
- bch2_fs_fatal_error(c, "deleting logged operation %s: %s",
- buf.buf, bch2_err_str(ret));
- printbuf_exit(&buf);
- }
-
- return ret;
-}
diff --git a/fs/bcachefs/logged_ops.h b/fs/bcachefs/logged_ops.h
deleted file mode 100644
index 30ae9ef737dd..000000000000
--- a/fs/bcachefs/logged_ops.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_LOGGED_OPS_H
-#define _BCACHEFS_LOGGED_OPS_H
-
-#include "bkey.h"
-
-#define BCH_LOGGED_OPS() \
- x(truncate) \
- x(finsert)
-
-static inline int bch2_logged_op_update(struct btree_trans *trans, struct bkey_i *op)
-{
- return bch2_btree_insert_nonextent(trans, BTREE_ID_logged_ops, op, 0);
-}
-
-int bch2_resume_logged_ops(struct bch_fs *);
-int bch2_logged_op_start(struct btree_trans *, struct bkey_i *);
-int bch2_logged_op_finish(struct btree_trans *, struct bkey_i *);
-
-#endif /* _BCACHEFS_LOGGED_OPS_H */
diff --git a/fs/bcachefs/logged_ops_format.h b/fs/bcachefs/logged_ops_format.h
deleted file mode 100644
index 6a4bf7129dba..000000000000
--- a/fs/bcachefs/logged_ops_format.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_LOGGED_OPS_FORMAT_H
-#define _BCACHEFS_LOGGED_OPS_FORMAT_H
-
-struct bch_logged_op_truncate {
- struct bch_val v;
- __le32 subvol;
- __le32 pad;
- __le64 inum;
- __le64 new_i_size;
-};
-
-enum logged_op_finsert_state {
- LOGGED_OP_FINSERT_start,
- LOGGED_OP_FINSERT_shift_extents,
- LOGGED_OP_FINSERT_finish,
-};
-
-struct bch_logged_op_finsert {
- struct bch_val v;
- __u8 state;
- __u8 pad[3];
- __le32 subvol;
- __le64 inum;
- __le64 dst_offset;
- __le64 src_offset;
- __le64 pos;
-};
-
-#endif /* _BCACHEFS_LOGGED_OPS_FORMAT_H */
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
deleted file mode 100644
index 10857eccdeaf..000000000000
--- a/fs/bcachefs/lru.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "bkey_buf.h"
-#include "btree_iter.h"
-#include "btree_update.h"
-#include "btree_write_buffer.h"
-#include "error.h"
-#include "lru.h"
-#include "recovery.h"
-
-/* KEY_TYPE_lru is obsolete: */
-int bch2_lru_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- int ret = 0;
-
- bkey_fsck_err_on(!lru_pos_time(k.k->p),
- c, lru_entry_at_time_0,
- "lru entry at time=0");
-fsck_err:
- return ret;
-}
-
-void bch2_lru_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- const struct bch_lru *lru = bkey_s_c_to_lru(k).v;
-
- prt_printf(out, "idx %llu", le64_to_cpu(lru->idx));
-}
-
-void bch2_lru_pos_to_text(struct printbuf *out, struct bpos lru)
-{
- prt_printf(out, "%llu:%llu -> %llu:%llu",
- lru_pos_id(lru),
- lru_pos_time(lru),
- u64_to_bucket(lru.offset).inode,
- u64_to_bucket(lru.offset).offset);
-}
-
-static int __bch2_lru_set(struct btree_trans *trans, u16 lru_id,
- u64 dev_bucket, u64 time, bool set)
-{
- return time
- ? bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru,
- lru_pos(lru_id, dev_bucket, time), set)
- : 0;
-}
-
-int bch2_lru_del(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time)
-{
- return __bch2_lru_set(trans, lru_id, dev_bucket, time, KEY_TYPE_deleted);
-}
-
-int bch2_lru_set(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time)
-{
- return __bch2_lru_set(trans, lru_id, dev_bucket, time, KEY_TYPE_set);
-}
-
-int bch2_lru_change(struct btree_trans *trans,
- u16 lru_id, u64 dev_bucket,
- u64 old_time, u64 new_time)
-{
- if (old_time == new_time)
- return 0;
-
- return bch2_lru_del(trans, lru_id, dev_bucket, old_time) ?:
- bch2_lru_set(trans, lru_id, dev_bucket, new_time);
-}
-
-static const char * const bch2_lru_types[] = {
-#define x(n) #n,
- BCH_LRU_TYPES()
-#undef x
- NULL
-};
-
-int bch2_lru_check_set(struct btree_trans *trans,
- u16 lru_id, u64 time,
- struct bkey_s_c referring_k,
- struct bkey_buf *last_flushed)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- struct btree_iter lru_iter;
- struct bkey_s_c lru_k =
- bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
- lru_pos(lru_id,
- bucket_to_u64(referring_k.k->p),
- time), 0);
- int ret = bkey_err(lru_k);
- if (ret)
- return ret;
-
- if (lru_k.k->type != KEY_TYPE_set) {
- ret = bch2_btree_write_buffer_maybe_flush(trans, referring_k, last_flushed);
- if (ret)
- goto err;
-
- if (fsck_err(trans, alloc_key_to_missing_lru_entry,
- "missing %s lru entry\n"
- " %s",
- bch2_lru_types[lru_type(lru_k)],
- (bch2_bkey_val_to_text(&buf, c, referring_k), buf.buf))) {
- ret = bch2_lru_set(trans, lru_id, bucket_to_u64(referring_k.k->p), time);
- if (ret)
- goto err;
- }
- }
-err:
-fsck_err:
- bch2_trans_iter_exit(trans, &lru_iter);
- printbuf_exit(&buf);
- return ret;
-}
-
-static int bch2_check_lru_key(struct btree_trans *trans,
- struct btree_iter *lru_iter,
- struct bkey_s_c lru_k,
- struct bkey_buf *last_flushed)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
- struct printbuf buf1 = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
- enum bch_lru_type type = lru_type(lru_k);
- struct bpos alloc_pos = u64_to_bucket(lru_k.k->p.offset);
- u64 idx;
- int ret;
-
- struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_pos);
-
- if (fsck_err_on(!ca,
- trans, lru_entry_to_invalid_bucket,
- "lru key points to nonexistent device:bucket %llu:%llu",
- alloc_pos.inode, alloc_pos.offset))
- return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false);
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, alloc_pos, 0);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- a = bch2_alloc_to_v4(k, &a_convert);
-
- switch (type) {
- case BCH_LRU_read:
- idx = alloc_lru_idx_read(*a);
- break;
- case BCH_LRU_fragmentation:
- idx = alloc_lru_idx_fragmentation(*a, ca);
- break;
- }
-
- if (lru_k.k->type != KEY_TYPE_set ||
- lru_pos_time(lru_k.k->p) != idx) {
- ret = bch2_btree_write_buffer_maybe_flush(trans, lru_k, last_flushed);
- if (ret)
- goto err;
-
- if (fsck_err(trans, lru_entry_bad,
- "incorrect lru entry: lru %s time %llu\n"
- " %s\n"
- " for %s",
- bch2_lru_types[type],
- lru_pos_time(lru_k.k->p),
- (bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf),
- (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf)))
- ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false);
- }
-err:
-fsck_err:
- bch2_trans_iter_exit(trans, &iter);
- bch2_dev_put(ca);
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
- return ret;
-}
-
-int bch2_check_lrus(struct bch_fs *c)
-{
- struct bkey_buf last_flushed;
-
- bch2_bkey_buf_init(&last_flushed);
- bkey_init(&last_flushed.k->k);
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw,
- bch2_check_lru_key(trans, &iter, k, &last_flushed)));
-
- bch2_bkey_buf_exit(&last_flushed, c);
- bch_err_fn(c, ret);
- return ret;
-
-}
diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h
deleted file mode 100644
index e6a7d8241bb8..000000000000
--- a/fs/bcachefs/lru.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_LRU_H
-#define _BCACHEFS_LRU_H
-
-static inline u64 lru_pos_id(struct bpos pos)
-{
- return pos.inode >> LRU_TIME_BITS;
-}
-
-static inline u64 lru_pos_time(struct bpos pos)
-{
- return pos.inode & ~(~0ULL << LRU_TIME_BITS);
-}
-
-static inline struct bpos lru_pos(u16 lru_id, u64 dev_bucket, u64 time)
-{
- struct bpos pos = POS(((u64) lru_id << LRU_TIME_BITS)|time, dev_bucket);
-
- EBUG_ON(time > LRU_TIME_MAX);
- EBUG_ON(lru_pos_id(pos) != lru_id);
- EBUG_ON(lru_pos_time(pos) != time);
- EBUG_ON(pos.offset != dev_bucket);
-
- return pos;
-}
-
-static inline enum bch_lru_type lru_type(struct bkey_s_c l)
-{
- u16 lru_id = l.k->p.inode >> 48;
-
- if (lru_id == BCH_LRU_FRAGMENTATION_START)
- return BCH_LRU_fragmentation;
- return BCH_LRU_read;
-}
-
-int bch2_lru_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-void bch2_lru_pos_to_text(struct printbuf *, struct bpos);
-
-#define bch2_bkey_ops_lru ((struct bkey_ops) { \
- .key_validate = bch2_lru_validate, \
- .val_to_text = bch2_lru_to_text, \
- .min_val_size = 8, \
-})
-
-int bch2_lru_del(struct btree_trans *, u16, u64, u64);
-int bch2_lru_set(struct btree_trans *, u16, u64, u64);
-int bch2_lru_change(struct btree_trans *, u16, u64, u64, u64);
-
-struct bkey_buf;
-int bch2_lru_check_set(struct btree_trans *, u16, u64, struct bkey_s_c, struct bkey_buf *);
-
-int bch2_check_lrus(struct bch_fs *);
-
-#endif /* _BCACHEFS_LRU_H */
diff --git a/fs/bcachefs/lru_format.h b/fs/bcachefs/lru_format.h
deleted file mode 100644
index f372cb3b8cda..000000000000
--- a/fs/bcachefs/lru_format.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_LRU_FORMAT_H
-#define _BCACHEFS_LRU_FORMAT_H
-
-struct bch_lru {
- struct bch_val v;
- __le64 idx;
-} __packed __aligned(8);
-
-#define BCH_LRU_TYPES() \
- x(read) \
- x(fragmentation)
-
-enum bch_lru_type {
-#define x(n) BCH_LRU_##n,
- BCH_LRU_TYPES()
-#undef x
-};
-
-#define BCH_LRU_FRAGMENTATION_START ((1U << 16) - 1)
-
-#define LRU_TIME_BITS 48
-#define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1)
-
-#endif /* _BCACHEFS_LRU_FORMAT_H */
diff --git a/fs/bcachefs/mean_and_variance.c b/fs/bcachefs/mean_and_variance.c
deleted file mode 100644
index 0ea9f30803a2..000000000000
--- a/fs/bcachefs/mean_and_variance.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Functions for incremental mean and variance.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * Copyright © 2022 Daniel B. Hill
- *
- * Author: Daniel B. Hill <daniel@gluo.nz>
- *
- * Description:
- *
- * This is includes some incremental algorithms for mean and variance calculation
- *
- * Derived from the paper: https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf
- *
- * Create a struct and if it's the weighted variant set the w field (weight = 2^k).
- *
- * Use mean_and_variance[_weighted]_update() on the struct to update it's state.
- *
- * Use the mean_and_variance[_weighted]_get_* functions to calculate the mean and variance, some computation
- * is deferred to these functions for performance reasons.
- *
- * see lib/math/mean_and_variance_test.c for examples of usage.
- *
- * DO NOT access the mean and variance fields of the weighted variants directly.
- * DO NOT change the weight after calling update.
- */
-
-#include <linux/bug.h>
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/limits.h>
-#include <linux/math.h>
-#include <linux/math64.h>
-#include <linux/module.h>
-
-#include "mean_and_variance.h"
-
-u128_u u128_div(u128_u n, u64 d)
-{
- u128_u r;
- u64 rem;
- u64 hi = u128_hi(n);
- u64 lo = u128_lo(n);
- u64 h = hi & ((u64) U32_MAX << 32);
- u64 l = (hi & (u64) U32_MAX) << 32;
-
- r = u128_shl(u64_to_u128(div64_u64_rem(h, d, &rem)), 64);
- r = u128_add(r, u128_shl(u64_to_u128(div64_u64_rem(l + (rem << 32), d, &rem)), 32));
- r = u128_add(r, u64_to_u128(div64_u64_rem(lo + (rem << 32), d, &rem)));
- return r;
-}
-EXPORT_SYMBOL_GPL(u128_div);
-
-/**
- * mean_and_variance_get_mean() - get mean from @s
- * @s: mean and variance number of samples and their sums
- */
-s64 mean_and_variance_get_mean(struct mean_and_variance s)
-{
- return s.n ? div64_u64(s.sum, s.n) : 0;
-}
-EXPORT_SYMBOL_GPL(mean_and_variance_get_mean);
-
-/**
- * mean_and_variance_get_variance() - get variance from @s1
- * @s1: mean and variance number of samples and sums
- *
- * see linked pdf equation 12.
- */
-u64 mean_and_variance_get_variance(struct mean_and_variance s1)
-{
- if (s1.n) {
- u128_u s2 = u128_div(s1.sum_squares, s1.n);
- u64 s3 = abs(mean_and_variance_get_mean(s1));
-
- return u128_lo(u128_sub(s2, u128_square(s3)));
- } else {
- return 0;
- }
-}
-EXPORT_SYMBOL_GPL(mean_and_variance_get_variance);
-
-/**
- * mean_and_variance_get_stddev() - get standard deviation from @s
- * @s: mean and variance number of samples and their sums
- */
-u32 mean_and_variance_get_stddev(struct mean_and_variance s)
-{
- return int_sqrt64(mean_and_variance_get_variance(s));
-}
-EXPORT_SYMBOL_GPL(mean_and_variance_get_stddev);
-
-/**
- * mean_and_variance_weighted_update() - exponentially weighted variant of mean_and_variance_update()
- * @s: mean and variance number of samples and their sums
- * @x: new value to include in the &mean_and_variance_weighted
- * @initted: caller must track whether this is the first use or not
- * @weight: ewma weight
- *
- * see linked pdf: function derived from equations 140-143 where alpha = 2^w.
- * values are stored bitshifted for performance and added precision.
- */
-void mean_and_variance_weighted_update(struct mean_and_variance_weighted *s,
- s64 x, bool initted, u8 weight)
-{
- // previous weighted variance.
- u8 w = weight;
- u64 var_w0 = s->variance;
- // new value weighted.
- s64 x_w = x << w;
- s64 diff_w = x_w - s->mean;
- s64 diff = fast_divpow2(diff_w, w);
- // new mean weighted.
- s64 u_w1 = s->mean + diff;
-
- if (!initted) {
- s->mean = x_w;
- s->variance = 0;
- } else {
- s->mean = u_w1;
- s->variance = ((var_w0 << w) - var_w0 + ((diff_w * (x_w - u_w1)) >> w)) >> w;
- }
-}
-EXPORT_SYMBOL_GPL(mean_and_variance_weighted_update);
-
-/**
- * mean_and_variance_weighted_get_mean() - get mean from @s
- * @s: mean and variance number of samples and their sums
- * @weight: ewma weight
- */
-s64 mean_and_variance_weighted_get_mean(struct mean_and_variance_weighted s,
- u8 weight)
-{
- return fast_divpow2(s.mean, weight);
-}
-EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_mean);
-
-/**
- * mean_and_variance_weighted_get_variance() -- get variance from @s
- * @s: mean and variance number of samples and their sums
- * @weight: ewma weight
- */
-u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s,
- u8 weight)
-{
- // always positive don't need fast divpow2
- return s.variance >> weight;
-}
-EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_variance);
-
-/**
- * mean_and_variance_weighted_get_stddev() - get standard deviation from @s
- * @s: mean and variance number of samples and their sums
- * @weight: ewma weight
- */
-u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s,
- u8 weight)
-{
- return int_sqrt64(mean_and_variance_weighted_get_variance(s, weight));
-}
-EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_stddev);
-
-MODULE_AUTHOR("Daniel B. Hill");
-MODULE_LICENSE("GPL");
diff --git a/fs/bcachefs/mean_and_variance.h b/fs/bcachefs/mean_and_variance.h
deleted file mode 100644
index 47e4a3c3d26e..000000000000
--- a/fs/bcachefs/mean_and_variance.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef MEAN_AND_VARIANCE_H_
-#define MEAN_AND_VARIANCE_H_
-
-#include <linux/types.h>
-#include <linux/limits.h>
-#include <linux/math.h>
-#include <linux/math64.h>
-
-#define SQRT_U64_MAX 4294967295ULL
-
-/*
- * u128_u: u128 user mode, because not all architectures support a real int128
- * type
- *
- * We don't use this version in userspace, because in userspace we link with
- * Rust and rustc has issues with u128.
- */
-
-#if defined(__SIZEOF_INT128__) && defined(__KERNEL__) && !defined(CONFIG_PARISC)
-
-typedef struct {
- unsigned __int128 v;
-} __aligned(16) u128_u;
-
-static inline u128_u u64_to_u128(u64 a)
-{
- return (u128_u) { .v = a };
-}
-
-static inline u64 u128_lo(u128_u a)
-{
- return a.v;
-}
-
-static inline u64 u128_hi(u128_u a)
-{
- return a.v >> 64;
-}
-
-static inline u128_u u128_add(u128_u a, u128_u b)
-{
- a.v += b.v;
- return a;
-}
-
-static inline u128_u u128_sub(u128_u a, u128_u b)
-{
- a.v -= b.v;
- return a;
-}
-
-static inline u128_u u128_shl(u128_u a, s8 shift)
-{
- a.v <<= shift;
- return a;
-}
-
-static inline u128_u u128_square(u64 a)
-{
- u128_u b = u64_to_u128(a);
-
- b.v *= b.v;
- return b;
-}
-
-#else
-
-typedef struct {
- u64 hi, lo;
-} __aligned(16) u128_u;
-
-/* conversions */
-
-static inline u128_u u64_to_u128(u64 a)
-{
- return (u128_u) { .lo = a };
-}
-
-static inline u64 u128_lo(u128_u a)
-{
- return a.lo;
-}
-
-static inline u64 u128_hi(u128_u a)
-{
- return a.hi;
-}
-
-/* arithmetic */
-
-static inline u128_u u128_add(u128_u a, u128_u b)
-{
- u128_u c;
-
- c.lo = a.lo + b.lo;
- c.hi = a.hi + b.hi + (c.lo < a.lo);
- return c;
-}
-
-static inline u128_u u128_sub(u128_u a, u128_u b)
-{
- u128_u c;
-
- c.lo = a.lo - b.lo;
- c.hi = a.hi - b.hi - (c.lo > a.lo);
- return c;
-}
-
-static inline u128_u u128_shl(u128_u i, s8 shift)
-{
- u128_u r;
-
- r.lo = i.lo << (shift & 63);
- if (shift < 64)
- r.hi = (i.hi << (shift & 63)) | (i.lo >> (-shift & 63));
- else {
- r.hi = i.lo << (-shift & 63);
- r.lo = 0;
- }
- return r;
-}
-
-static inline u128_u u128_square(u64 i)
-{
- u128_u r;
- u64 h = i >> 32, l = i & U32_MAX;
-
- r = u128_shl(u64_to_u128(h*h), 64);
- r = u128_add(r, u128_shl(u64_to_u128(h*l), 32));
- r = u128_add(r, u128_shl(u64_to_u128(l*h), 32));
- r = u128_add(r, u64_to_u128(l*l));
- return r;
-}
-
-#endif
-
-static inline u128_u u64s_to_u128(u64 hi, u64 lo)
-{
- u128_u c = u64_to_u128(hi);
-
- c = u128_shl(c, 64);
- c = u128_add(c, u64_to_u128(lo));
- return c;
-}
-
-u128_u u128_div(u128_u n, u64 d);
-
-struct mean_and_variance {
- s64 n;
- s64 sum;
- u128_u sum_squares;
-};
-
-/* expontentially weighted variant */
-struct mean_and_variance_weighted {
- s64 mean;
- u64 variance;
-};
-
-/**
- * fast_divpow2() - fast approximation for n / (1 << d)
- * @n: numerator
- * @d: the power of 2 denominator.
- *
- * note: this rounds towards 0.
- */
-static inline s64 fast_divpow2(s64 n, u8 d)
-{
- return (n + ((n < 0) ? ((1 << d) - 1) : 0)) >> d;
-}
-
-/**
- * mean_and_variance_update() - update a mean_and_variance struct @s1 with a new sample @v1
- * and return it.
- * @s1: the mean_and_variance to update.
- * @v1: the new sample.
- *
- * see linked pdf equation 12.
- */
-static inline void
-mean_and_variance_update(struct mean_and_variance *s, s64 v)
-{
- s->n++;
- s->sum += v;
- s->sum_squares = u128_add(s->sum_squares, u128_square(abs(v)));
-}
-
-s64 mean_and_variance_get_mean(struct mean_and_variance s);
-u64 mean_and_variance_get_variance(struct mean_and_variance s1);
-u32 mean_and_variance_get_stddev(struct mean_and_variance s);
-
-void mean_and_variance_weighted_update(struct mean_and_variance_weighted *s,
- s64 v, bool initted, u8 weight);
-
-s64 mean_and_variance_weighted_get_mean(struct mean_and_variance_weighted s,
- u8 weight);
-u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s,
- u8 weight);
-u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s,
- u8 weight);
-
-#endif // MEAN_AND_VAIRANCE_H_
diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c
deleted file mode 100644
index e9d9c0212e44..000000000000
--- a/fs/bcachefs/mean_and_variance_test.c
+++ /dev/null
@@ -1,221 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <kunit/test.h>
-
-#include "mean_and_variance.h"
-
-#define MAX_SQR (SQRT_U64_MAX*SQRT_U64_MAX)
-
-static void mean_and_variance_basic_test(struct kunit *test)
-{
- struct mean_and_variance s = {};
-
- mean_and_variance_update(&s, 2);
- mean_and_variance_update(&s, 2);
-
- KUNIT_EXPECT_EQ(test, mean_and_variance_get_mean(s), 2);
- KUNIT_EXPECT_EQ(test, mean_and_variance_get_variance(s), 0);
- KUNIT_EXPECT_EQ(test, s.n, 2);
-
- mean_and_variance_update(&s, 4);
- mean_and_variance_update(&s, 4);
-
- KUNIT_EXPECT_EQ(test, mean_and_variance_get_mean(s), 3);
- KUNIT_EXPECT_EQ(test, mean_and_variance_get_variance(s), 1);
- KUNIT_EXPECT_EQ(test, s.n, 4);
-}
-
-/*
- * Test values computed using a spreadsheet from the psuedocode at the bottom:
- * https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf
- */
-
-static void mean_and_variance_weighted_test(struct kunit *test)
-{
- struct mean_and_variance_weighted s = { };
-
- mean_and_variance_weighted_update(&s, 10, false, 2);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s, 2), 10);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s, 2), 0);
-
- mean_and_variance_weighted_update(&s, 20, true, 2);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s, 2), 12);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s, 2), 18);
-
- mean_and_variance_weighted_update(&s, 30, true, 2);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s, 2), 16);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s, 2), 72);
-
- s = (struct mean_and_variance_weighted) { };
-
- mean_and_variance_weighted_update(&s, -10, false, 2);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s, 2), -10);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s, 2), 0);
-
- mean_and_variance_weighted_update(&s, -20, true, 2);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s, 2), -12);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s, 2), 18);
-
- mean_and_variance_weighted_update(&s, -30, true, 2);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s, 2), -16);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s, 2), 72);
-}
-
-static void mean_and_variance_weighted_advanced_test(struct kunit *test)
-{
- struct mean_and_variance_weighted s = { };
- bool initted = false;
- s64 i;
-
- for (i = 10; i <= 100; i += 10) {
- mean_and_variance_weighted_update(&s, i, initted, 8);
- initted = true;
- }
-
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s, 8), 11);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s, 8), 107);
-
- s = (struct mean_and_variance_weighted) { };
- initted = false;
-
- for (i = -10; i >= -100; i -= 10) {
- mean_and_variance_weighted_update(&s, i, initted, 8);
- initted = true;
- }
-
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s, 8), -11);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s, 8), 107);
-}
-
-static void do_mean_and_variance_test(struct kunit *test,
- s64 initial_value,
- s64 initial_n,
- s64 n,
- unsigned weight,
- s64 *data,
- s64 *mean,
- s64 *stddev,
- s64 *weighted_mean,
- s64 *weighted_stddev)
-{
- struct mean_and_variance mv = {};
- struct mean_and_variance_weighted vw = { };
-
- for (unsigned i = 0; i < initial_n; i++) {
- mean_and_variance_update(&mv, initial_value);
- mean_and_variance_weighted_update(&vw, initial_value, false, weight);
-
- KUNIT_EXPECT_EQ(test, mean_and_variance_get_mean(mv), initial_value);
- KUNIT_EXPECT_EQ(test, mean_and_variance_get_stddev(mv), 0);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(vw, weight), initial_value);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_stddev(vw, weight),0);
- }
-
- for (unsigned i = 0; i < n; i++) {
- mean_and_variance_update(&mv, data[i]);
- mean_and_variance_weighted_update(&vw, data[i], true, weight);
-
- KUNIT_EXPECT_EQ(test, mean_and_variance_get_mean(mv), mean[i]);
- KUNIT_EXPECT_EQ(test, mean_and_variance_get_stddev(mv), stddev[i]);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(vw, weight), weighted_mean[i]);
- KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_stddev(vw, weight),weighted_stddev[i]);
- }
-
- KUNIT_EXPECT_EQ(test, mv.n, initial_n + n);
-}
-
-/* Test behaviour with a single outlier, then back to steady state: */
-static void mean_and_variance_test_1(struct kunit *test)
-{
- s64 d[] = { 100, 10, 10, 10, 10, 10, 10 };
- s64 mean[] = { 22, 21, 20, 19, 18, 17, 16 };
- s64 stddev[] = { 32, 29, 28, 27, 26, 25, 24 };
- s64 weighted_mean[] = { 32, 27, 22, 19, 17, 15, 14 };
- s64 weighted_stddev[] = { 38, 35, 31, 27, 24, 21, 18 };
-
- do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
- d, mean, stddev, weighted_mean, weighted_stddev);
-}
-
-/* Test behaviour where we switch from one steady state to another: */
-static void mean_and_variance_test_2(struct kunit *test)
-{
- s64 d[] = { 100, 100, 100, 100, 100 };
- s64 mean[] = { 22, 32, 40, 46, 50 };
- s64 stddev[] = { 32, 39, 42, 44, 45 };
- s64 weighted_mean[] = { 32, 49, 61, 71, 78 };
- s64 weighted_stddev[] = { 38, 44, 44, 41, 38 };
-
- do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
- d, mean, stddev, weighted_mean, weighted_stddev);
-}
-
-static void mean_and_variance_fast_divpow2(struct kunit *test)
-{
- s64 i;
- u8 d;
-
- for (i = 0; i < 100; i++) {
- d = 0;
- KUNIT_EXPECT_EQ(test, fast_divpow2(i, d), div_u64(i, 1LLU << d));
- KUNIT_EXPECT_EQ(test, abs(fast_divpow2(-i, d)), div_u64(i, 1LLU << d));
- for (d = 1; d < 32; d++) {
- KUNIT_EXPECT_EQ_MSG(test, abs(fast_divpow2(i, d)),
- div_u64(i, 1 << d), "%lld %u", i, d);
- KUNIT_EXPECT_EQ_MSG(test, abs(fast_divpow2(-i, d)),
- div_u64(i, 1 << d), "%lld %u", -i, d);
- }
- }
-}
-
-static void mean_and_variance_u128_basic_test(struct kunit *test)
-{
- u128_u a = u64s_to_u128(0, U64_MAX);
- u128_u a1 = u64s_to_u128(0, 1);
- u128_u b = u64s_to_u128(1, 0);
- u128_u c = u64s_to_u128(0, 1LLU << 63);
- u128_u c2 = u64s_to_u128(U64_MAX, U64_MAX);
-
- KUNIT_EXPECT_EQ(test, u128_hi(u128_add(a, a1)), 1);
- KUNIT_EXPECT_EQ(test, u128_lo(u128_add(a, a1)), 0);
- KUNIT_EXPECT_EQ(test, u128_hi(u128_add(a1, a)), 1);
- KUNIT_EXPECT_EQ(test, u128_lo(u128_add(a1, a)), 0);
-
- KUNIT_EXPECT_EQ(test, u128_lo(u128_sub(b, a1)), U64_MAX);
- KUNIT_EXPECT_EQ(test, u128_hi(u128_sub(b, a1)), 0);
-
- KUNIT_EXPECT_EQ(test, u128_hi(u128_shl(c, 1)), 1);
- KUNIT_EXPECT_EQ(test, u128_lo(u128_shl(c, 1)), 0);
-
- KUNIT_EXPECT_EQ(test, u128_hi(u128_square(U64_MAX)), U64_MAX - 1);
- KUNIT_EXPECT_EQ(test, u128_lo(u128_square(U64_MAX)), 1);
-
- KUNIT_EXPECT_EQ(test, u128_lo(u128_div(b, 2)), 1LLU << 63);
-
- KUNIT_EXPECT_EQ(test, u128_hi(u128_div(c2, 2)), U64_MAX >> 1);
- KUNIT_EXPECT_EQ(test, u128_lo(u128_div(c2, 2)), U64_MAX);
-
- KUNIT_EXPECT_EQ(test, u128_hi(u128_div(u128_shl(u64_to_u128(U64_MAX), 32), 2)), U32_MAX >> 1);
- KUNIT_EXPECT_EQ(test, u128_lo(u128_div(u128_shl(u64_to_u128(U64_MAX), 32), 2)), U64_MAX << 31);
-}
-
-static struct kunit_case mean_and_variance_test_cases[] = {
- KUNIT_CASE(mean_and_variance_fast_divpow2),
- KUNIT_CASE(mean_and_variance_u128_basic_test),
- KUNIT_CASE(mean_and_variance_basic_test),
- KUNIT_CASE(mean_and_variance_weighted_test),
- KUNIT_CASE(mean_and_variance_weighted_advanced_test),
- KUNIT_CASE(mean_and_variance_test_1),
- KUNIT_CASE(mean_and_variance_test_2),
- {}
-};
-
-static struct kunit_suite mean_and_variance_test_suite = {
- .name = "mean and variance tests",
- .test_cases = mean_and_variance_test_cases
-};
-
-kunit_test_suite(mean_and_variance_test_suite);
-
-MODULE_AUTHOR("Daniel B. Hill");
-MODULE_DESCRIPTION("bcachefs filesystem mean and variance unit tests");
-MODULE_LICENSE("GPL");
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
deleted file mode 100644
index ddc187fb693d..000000000000
--- a/fs/bcachefs/migrate.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Code for moving data off a device.
- */
-
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "buckets.h"
-#include "errcode.h"
-#include "extents.h"
-#include "io_write.h"
-#include "journal.h"
-#include "keylist.h"
-#include "migrate.h"
-#include "move.h"
-#include "replicas.h"
-#include "super-io.h"
-
-static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k,
- unsigned dev_idx, int flags, bool metadata)
-{
- unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas;
- unsigned lost = metadata ? BCH_FORCE_IF_METADATA_LOST : BCH_FORCE_IF_DATA_LOST;
- unsigned degraded = metadata ? BCH_FORCE_IF_METADATA_DEGRADED : BCH_FORCE_IF_DATA_DEGRADED;
- unsigned nr_good;
-
- bch2_bkey_drop_device(k, dev_idx);
-
- nr_good = bch2_bkey_durability(c, k.s_c);
- if ((!nr_good && !(flags & lost)) ||
- (nr_good < replicas && !(flags & degraded)))
- return -BCH_ERR_remove_would_lose_data;
-
- return 0;
-}
-
-static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- unsigned dev_idx,
- int flags)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i *n;
- int ret;
-
- if (!bch2_bkey_has_device_c(k, dev_idx))
- return 0;
-
- n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_internal_snapshot_node);
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- return ret;
-
- ret = drop_dev_ptrs(c, bkey_i_to_s(n), dev_idx, flags, false);
- if (ret)
- return ret;
-
- /*
- * If the new extent no longer has any pointers, bch2_extent_normalize()
- * will do the appropriate thing with it (turning it into a
- * KEY_TYPE_error key, or just a discard if it was a cached extent)
- */
- bch2_extent_normalize(c, bkey_i_to_s(n));
-
- /*
- * Since we're not inserting through an extent iterator
- * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
- * we aren't using the extent overwrite path to delete, we're
- * just using the normal key deletion path:
- */
- if (bkey_deleted(&n->k))
- n->k.size = 0;
- return 0;
-}
-
-static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- enum btree_id id;
- int ret = 0;
-
- for (id = 0; id < BTREE_ID_NR; id++) {
- if (!btree_type_has_ptrs(id))
- continue;
-
- ret = for_each_btree_key_commit(trans, iter, id, POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags));
- if (ret)
- break;
- }
-
- bch2_trans_put(trans);
-
- return ret;
-}
-
-static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
-{
- struct btree_trans *trans;
- struct btree_iter iter;
- struct closure cl;
- struct btree *b;
- struct bkey_buf k;
- unsigned id;
- int ret;
-
- /* don't handle this yet: */
- if (flags & BCH_FORCE_IF_METADATA_LOST)
- return -BCH_ERR_remove_with_metadata_missing_unimplemented;
-
- trans = bch2_trans_get(c);
- bch2_bkey_buf_init(&k);
- closure_init_stack(&cl);
-
- for (id = 0; id < BTREE_ID_NR; id++) {
- bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0,
- BTREE_ITER_prefetch);
-retry:
- ret = 0;
- while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(&iter)) &&
- !(ret = PTR_ERR_OR_ZERO(b))) {
- if (!bch2_bkey_has_device_c(bkey_i_to_s_c(&b->key), dev_idx))
- goto next;
-
- bch2_bkey_buf_copy(&k, c, &b->key);
-
- ret = drop_dev_ptrs(c, bkey_i_to_s(k.k),
- dev_idx, flags, true);
- if (ret)
- break;
-
- ret = bch2_btree_node_update_key(trans, &iter, b, k.k, 0, false);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
- ret = 0;
- continue;
- }
-
- bch_err_msg(c, ret, "updating btree node key");
- if (ret)
- break;
-next:
- bch2_btree_iter_next_node(&iter);
- }
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_iter_exit(trans, &iter);
-
- if (ret)
- goto err;
- }
-
- bch2_btree_interior_updates_flush(c);
- ret = 0;
-err:
- bch2_bkey_buf_exit(&k, c);
- bch2_trans_put(trans);
-
- BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
-
- return ret;
-}
-
-int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)
-{
- return bch2_dev_usrdata_drop(c, dev_idx, flags) ?:
- bch2_dev_metadata_drop(c, dev_idx, flags);
-}
diff --git a/fs/bcachefs/migrate.h b/fs/bcachefs/migrate.h
deleted file mode 100644
index 027efaa0d575..000000000000
--- a/fs/bcachefs/migrate.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_MIGRATE_H
-#define _BCACHEFS_MIGRATE_H
-
-int bch2_dev_data_drop(struct bch_fs *, unsigned, int);
-
-#endif /* _BCACHEFS_MIGRATE_H */
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
deleted file mode 100644
index 0ef4a86850bb..000000000000
--- a/fs/bcachefs/move.c
+++ /dev/null
@@ -1,1181 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "backpointers.h"
-#include "bkey_buf.h"
-#include "btree_gc.h"
-#include "btree_io.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "btree_write_buffer.h"
-#include "compress.h"
-#include "disk_groups.h"
-#include "ec.h"
-#include "errcode.h"
-#include "error.h"
-#include "inode.h"
-#include "io_read.h"
-#include "io_write.h"
-#include "journal_reclaim.h"
-#include "keylist.h"
-#include "move.h"
-#include "replicas.h"
-#include "snapshot.h"
-#include "super-io.h"
-#include "trace.h"
-
-#include <linux/ioprio.h>
-#include <linux/kthread.h>
-
-const char * const bch2_data_ops_strs[] = {
-#define x(t, n, ...) [n] = #t,
- BCH_DATA_OPS()
-#undef x
- NULL
-};
-
-static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- if (trace_move_extent_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- prt_newline(&buf);
- bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts);
- trace_move_extent(c, buf.buf);
- printbuf_exit(&buf);
- }
-}
-
-static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k)
-{
- if (trace_move_extent_read_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- trace_move_extent_read(c, buf.buf);
- printbuf_exit(&buf);
- }
-}
-
-struct moving_io {
- struct list_head read_list;
- struct list_head io_list;
- struct move_bucket_in_flight *b;
- struct closure cl;
- bool read_completed;
-
- unsigned read_sectors;
- unsigned write_sectors;
-
- struct bch_read_bio rbio;
-
- struct data_update write;
- /* Must be last since it is variable size */
- struct bio_vec bi_inline_vecs[];
-};
-
-static void move_free(struct moving_io *io)
-{
- struct moving_context *ctxt = io->write.ctxt;
-
- if (io->b)
- atomic_dec(&io->b->count);
-
- bch2_data_update_exit(&io->write);
-
- mutex_lock(&ctxt->lock);
- list_del(&io->io_list);
- wake_up(&ctxt->wait);
- mutex_unlock(&ctxt->lock);
-
- kfree(io);
-}
-
-static void move_write_done(struct bch_write_op *op)
-{
- struct moving_io *io = container_of(op, struct moving_io, write.op);
- struct moving_context *ctxt = io->write.ctxt;
-
- if (io->write.op.error)
- ctxt->write_error = true;
-
- atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
- atomic_dec(&io->write.ctxt->write_ios);
- move_free(io);
- closure_put(&ctxt->cl);
-}
-
-static void move_write(struct moving_io *io)
-{
- if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
- move_free(io);
- return;
- }
-
- if (trace_move_extent_write_enabled()) {
- struct bch_fs *c = io->write.op.c;
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k));
- trace_move_extent_write(c, buf.buf);
- printbuf_exit(&buf);
- }
-
- closure_get(&io->write.ctxt->cl);
- atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
- atomic_inc(&io->write.ctxt->write_ios);
-
- bch2_data_update_read_done(&io->write, io->rbio.pick.crc);
-}
-
-struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt)
-{
- struct moving_io *io =
- list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list);
-
- return io && io->read_completed ? io : NULL;
-}
-
-static void move_read_endio(struct bio *bio)
-{
- struct moving_io *io = container_of(bio, struct moving_io, rbio.bio);
- struct moving_context *ctxt = io->write.ctxt;
-
- atomic_sub(io->read_sectors, &ctxt->read_sectors);
- atomic_dec(&ctxt->read_ios);
- io->read_completed = true;
-
- wake_up(&ctxt->wait);
- closure_put(&ctxt->cl);
-}
-
-void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt)
-{
- struct moving_io *io;
-
- while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
- bch2_trans_unlock_long(ctxt->trans);
- list_del(&io->read_list);
- move_write(io);
- }
-}
-
-void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
-{
- unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
-
- move_ctxt_wait_event(ctxt,
- !atomic_read(&ctxt->write_sectors) ||
- atomic_read(&ctxt->write_sectors) != sectors_pending);
-}
-
-void bch2_moving_ctxt_flush_all(struct moving_context *ctxt)
-{
- move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
- bch2_trans_unlock_long(ctxt->trans);
- closure_sync(&ctxt->cl);
-}
-
-void bch2_moving_ctxt_exit(struct moving_context *ctxt)
-{
- struct bch_fs *c = ctxt->trans->c;
-
- bch2_moving_ctxt_flush_all(ctxt);
-
- EBUG_ON(atomic_read(&ctxt->write_sectors));
- EBUG_ON(atomic_read(&ctxt->write_ios));
- EBUG_ON(atomic_read(&ctxt->read_sectors));
- EBUG_ON(atomic_read(&ctxt->read_ios));
-
- mutex_lock(&c->moving_context_lock);
- list_del(&ctxt->list);
- mutex_unlock(&c->moving_context_lock);
-
- bch2_trans_put(ctxt->trans);
- memset(ctxt, 0, sizeof(*ctxt));
-}
-
-void bch2_moving_ctxt_init(struct moving_context *ctxt,
- struct bch_fs *c,
- struct bch_ratelimit *rate,
- struct bch_move_stats *stats,
- struct write_point_specifier wp,
- bool wait_on_copygc)
-{
- memset(ctxt, 0, sizeof(*ctxt));
-
- ctxt->trans = bch2_trans_get(c);
- ctxt->fn = (void *) _RET_IP_;
- ctxt->rate = rate;
- ctxt->stats = stats;
- ctxt->wp = wp;
- ctxt->wait_on_copygc = wait_on_copygc;
-
- closure_init_stack(&ctxt->cl);
-
- mutex_init(&ctxt->lock);
- INIT_LIST_HEAD(&ctxt->reads);
- INIT_LIST_HEAD(&ctxt->ios);
- init_waitqueue_head(&ctxt->wait);
-
- mutex_lock(&c->moving_context_lock);
- list_add(&ctxt->list, &c->moving_context_list);
- mutex_unlock(&c->moving_context_lock);
-}
-
-void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c)
-{
- trace_move_data(c, stats);
-}
-
-void bch2_move_stats_init(struct bch_move_stats *stats, const char *name)
-{
- memset(stats, 0, sizeof(*stats));
- stats->data_type = BCH_DATA_user;
- scnprintf(stats->name, sizeof(stats->name), "%s", name);
-}
-
-int bch2_move_extent(struct moving_context *ctxt,
- struct move_bucket_in_flight *bucket_in_flight,
- struct btree_iter *iter,
- struct bkey_s_c k,
- struct bch_io_opts io_opts,
- struct data_update_opts data_opts)
-{
- struct btree_trans *trans = ctxt->trans;
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- struct moving_io *io;
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned sectors = k.k->size, pages;
- int ret = -ENOMEM;
-
- trace_move_extent2(c, k, &io_opts, &data_opts);
-
- if (ctxt->stats)
- ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
-
- bch2_data_update_opts_normalize(k, &data_opts);
-
- if (!data_opts.rewrite_ptrs &&
- !data_opts.extra_replicas) {
- if (data_opts.kill_ptrs)
- return bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &data_opts);
- return 0;
- }
-
- /*
- * Before memory allocations & taking nocow locks in
- * bch2_data_update_init():
- */
- bch2_trans_unlock(trans);
-
- /* write path might have to decompress data: */
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
-
- pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
- io = kzalloc(sizeof(struct moving_io) +
- sizeof(struct bio_vec) * pages, GFP_KERNEL);
- if (!io)
- goto err;
-
- INIT_LIST_HEAD(&io->io_list);
- io->write.ctxt = ctxt;
- io->read_sectors = k.k->size;
- io->write_sectors = k.k->size;
-
- bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
- bio_set_prio(&io->write.op.wbio.bio,
- IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
-
- if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9,
- GFP_KERNEL))
- goto err_free;
-
- io->rbio.c = c;
- io->rbio.opts = io_opts;
- bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0);
- io->rbio.bio.bi_vcnt = pages;
- bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- io->rbio.bio.bi_iter.bi_size = sectors << 9;
-
- io->rbio.bio.bi_opf = REQ_OP_READ;
- io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
- io->rbio.bio.bi_end_io = move_read_endio;
-
- ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp,
- io_opts, data_opts, iter->btree_id, k);
- if (ret)
- goto err_free_pages;
-
- io->write.op.end_io = move_write_done;
-
- if (ctxt->rate)
- bch2_ratelimit_increment(ctxt->rate, k.k->size);
-
- if (ctxt->stats) {
- atomic64_inc(&ctxt->stats->keys_moved);
- atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
- }
-
- if (bucket_in_flight) {
- io->b = bucket_in_flight;
- atomic_inc(&io->b->count);
- }
-
- this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
- this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
- trace_move_extent_read2(c, k);
-
- mutex_lock(&ctxt->lock);
- atomic_add(io->read_sectors, &ctxt->read_sectors);
- atomic_inc(&ctxt->read_ios);
-
- list_add_tail(&io->read_list, &ctxt->reads);
- list_add_tail(&io->io_list, &ctxt->ios);
- mutex_unlock(&ctxt->lock);
-
- /*
- * dropped by move_read_endio() - guards against use after free of
- * ctxt when doing wakeup
- */
- closure_get(&ctxt->cl);
- bch2_read_extent(trans, &io->rbio,
- bkey_start_pos(k.k),
- iter->btree_id, k, 0,
- BCH_READ_NODECODE|
- BCH_READ_LAST_FRAGMENT);
- return 0;
-err_free_pages:
- bio_free_pages(&io->write.op.wbio.bio);
-err_free:
- kfree(io);
-err:
- if (ret == -BCH_ERR_data_update_done)
- return 0;
-
- if (bch2_err_matches(ret, EROFS) ||
- bch2_err_matches(ret, BCH_ERR_transaction_restart))
- return ret;
-
- count_event(c, move_extent_start_fail);
-
- if (trace_move_extent_start_fail_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- prt_str(&buf, ": ");
- prt_str(&buf, bch2_err_str(ret));
- trace_move_extent_start_fail(c, buf.buf);
- printbuf_exit(&buf);
- }
- return ret;
-}
-
-struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
- struct per_snapshot_io_opts *io_opts,
- struct bkey_s_c extent_k)
-{
- struct bch_fs *c = trans->c;
- u32 restart_count = trans->restart_count;
- int ret = 0;
-
- if (io_opts->cur_inum != extent_k.k->p.inode) {
- io_opts->d.nr = 0;
-
- ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode),
- BTREE_ITER_all_snapshots, k, ({
- if (k.k->p.offset != extent_k.k->p.inode)
- break;
-
- if (!bkey_is_inode(k.k))
- continue;
-
- struct bch_inode_unpacked inode;
- BUG_ON(bch2_inode_unpack(k, &inode));
-
- struct snapshot_io_opts_entry e = { .snapshot = k.k->p.snapshot };
- bch2_inode_opts_get(&e.io_opts, trans->c, &inode);
-
- darray_push(&io_opts->d, e);
- }));
- io_opts->cur_inum = extent_k.k->p.inode;
- }
-
- ret = ret ?: trans_was_restarted(trans, restart_count);
- if (ret)
- return ERR_PTR(ret);
-
- if (extent_k.k->p.snapshot)
- darray_for_each(io_opts->d, i)
- if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot))
- return &i->io_opts;
-
- return &io_opts->fs_io_opts;
-}
-
-int bch2_move_get_io_opts_one(struct btree_trans *trans,
- struct bch_io_opts *io_opts,
- struct bkey_s_c extent_k)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- /* reflink btree? */
- if (!extent_k.k->p.inode) {
- *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
- return 0;
- }
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
- BTREE_ITER_cached);
- ret = bkey_err(k);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- return ret;
-
- if (!ret && bkey_is_inode(k.k)) {
- struct bch_inode_unpacked inode;
- bch2_inode_unpack(k, &inode);
- bch2_inode_opts_get(io_opts, trans->c, &inode);
- } else {
- *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
- }
-
- bch2_trans_iter_exit(trans, &iter);
- return 0;
-}
-
-int bch2_move_ratelimit(struct moving_context *ctxt)
-{
- struct bch_fs *c = ctxt->trans->c;
- bool is_kthread = current->flags & PF_KTHREAD;
- u64 delay;
-
- if (ctxt->wait_on_copygc && c->copygc_running) {
- bch2_moving_ctxt_flush_all(ctxt);
- wait_event_killable(c->copygc_running_wq,
- !c->copygc_running ||
- (is_kthread && kthread_should_stop()));
- }
-
- do {
- delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
-
- if (is_kthread && kthread_should_stop())
- return 1;
-
- if (delay)
- move_ctxt_wait_event_timeout(ctxt,
- freezing(current) ||
- (is_kthread && kthread_should_stop()),
- delay);
-
- if (unlikely(freezing(current))) {
- bch2_moving_ctxt_flush_all(ctxt);
- try_to_freeze();
- }
- } while (delay);
-
- /*
- * XXX: these limits really ought to be per device, SSDs and hard drives
- * will want different limits
- */
- move_ctxt_wait_event(ctxt,
- atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 &&
- atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 &&
- atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight &&
- atomic_read(&ctxt->read_ios) < c->opts.move_ios_in_flight);
-
- return 0;
-}
-
-static int bch2_move_data_btree(struct moving_context *ctxt,
- struct bpos start,
- struct bpos end,
- move_pred_fn pred, void *arg,
- enum btree_id btree_id)
-{
- struct btree_trans *trans = ctxt->trans;
- struct bch_fs *c = trans->c;
- struct per_snapshot_io_opts snapshot_io_opts;
- struct bch_io_opts *io_opts;
- struct bkey_buf sk;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct data_update_opts data_opts;
- int ret = 0, ret2;
-
- per_snapshot_io_opts_init(&snapshot_io_opts, c);
- bch2_bkey_buf_init(&sk);
-
- if (ctxt->stats) {
- ctxt->stats->data_type = BCH_DATA_user;
- ctxt->stats->pos = BBPOS(btree_id, start);
- }
-
- bch2_trans_begin(trans);
- bch2_trans_iter_init(trans, &iter, btree_id, start,
- BTREE_ITER_prefetch|
- BTREE_ITER_all_snapshots);
-
- if (ctxt->rate)
- bch2_ratelimit_reset(ctxt->rate);
-
- while (!bch2_move_ratelimit(ctxt)) {
- bch2_trans_begin(trans);
-
- k = bch2_btree_iter_peek(&iter);
- if (!k.k)
- break;
-
- ret = bkey_err(k);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- break;
-
- if (bkey_ge(bkey_start_pos(k.k), end))
- break;
-
- if (ctxt->stats)
- ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos);
-
- if (!bkey_extent_is_direct_data(k.k))
- goto next_nondata;
-
- io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, k);
- ret = PTR_ERR_OR_ZERO(io_opts);
- if (ret)
- continue;
-
- memset(&data_opts, 0, sizeof(data_opts));
- if (!pred(c, arg, k, io_opts, &data_opts))
- goto next;
-
- /*
- * The iterator gets unlocked by __bch2_read_extent - need to
- * save a copy of @k elsewhere:
- */
- bch2_bkey_buf_reassemble(&sk, c, k);
- k = bkey_i_to_s_c(sk.k);
-
- ret2 = bch2_move_extent(ctxt, NULL, &iter, k, *io_opts, data_opts);
- if (ret2) {
- if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
- continue;
-
- if (ret2 == -ENOMEM) {
- /* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt);
- continue;
- }
-
- /* XXX signal failure */
- goto next;
- }
-next:
- if (ctxt->stats)
- atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
-next_nondata:
- bch2_btree_iter_advance(&iter);
- }
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_bkey_buf_exit(&sk, c);
- per_snapshot_io_opts_exit(&snapshot_io_opts);
-
- return ret;
-}
-
-int __bch2_move_data(struct moving_context *ctxt,
- struct bbpos start,
- struct bbpos end,
- move_pred_fn pred, void *arg)
-{
- struct bch_fs *c = ctxt->trans->c;
- enum btree_id id;
- int ret = 0;
-
- for (id = start.btree;
- id <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
- id++) {
- ctxt->stats->pos = BBPOS(id, POS_MIN);
-
- if (!btree_type_has_ptrs(id) ||
- !bch2_btree_id_root(c, id)->b)
- continue;
-
- ret = bch2_move_data_btree(ctxt,
- id == start.btree ? start.pos : POS_MIN,
- id == end.btree ? end.pos : POS_MAX,
- pred, arg, id);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-int bch2_move_data(struct bch_fs *c,
- struct bbpos start,
- struct bbpos end,
- struct bch_ratelimit *rate,
- struct bch_move_stats *stats,
- struct write_point_specifier wp,
- bool wait_on_copygc,
- move_pred_fn pred, void *arg)
-{
-
- struct moving_context ctxt;
- int ret;
-
- bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
- ret = __bch2_move_data(&ctxt, start, end, pred, arg);
- bch2_moving_ctxt_exit(&ctxt);
-
- return ret;
-}
-
-int bch2_evacuate_bucket(struct moving_context *ctxt,
- struct move_bucket_in_flight *bucket_in_flight,
- struct bpos bucket, int gen,
- struct data_update_opts _data_opts)
-{
- struct btree_trans *trans = ctxt->trans;
- struct bch_fs *c = trans->c;
- bool is_kthread = current->flags & PF_KTHREAD;
- struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
- struct btree_iter iter;
- struct bkey_buf sk;
- struct bch_backpointer bp;
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
- struct bkey_s_c k;
- struct data_update_opts data_opts;
- unsigned dirty_sectors, bucket_size;
- u64 fragmentation;
- struct bpos bp_pos = POS_MIN;
- int ret = 0;
-
- struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode);
- if (!ca)
- return 0;
-
- trace_bucket_evacuate(c, &bucket);
-
- bch2_bkey_buf_init(&sk);
-
- /*
- * We're not run in a context that handles transaction restarts:
- */
- bch2_trans_begin(trans);
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
- bucket, BTREE_ITER_cached);
- ret = lockrestart_do(trans,
- bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
- bch2_trans_iter_exit(trans, &iter);
-
- bch_err_msg(c, ret, "looking up alloc key");
- if (ret)
- goto err;
-
- a = bch2_alloc_to_v4(k, &a_convert);
- dirty_sectors = bch2_bucket_sectors_dirty(*a);
- bucket_size = ca->mi.bucket_size;
- fragmentation = alloc_lru_idx_fragmentation(*a, ca);
-
- ret = bch2_btree_write_buffer_tryflush(trans);
- bch_err_msg(c, ret, "flushing btree write buffer");
- if (ret)
- goto err;
-
- while (!(ret = bch2_move_ratelimit(ctxt))) {
- if (is_kthread && kthread_should_stop())
- break;
-
- bch2_trans_begin(trans);
-
- ret = bch2_get_next_backpointer(trans, ca, bucket, gen,
- &bp_pos, &bp,
- BTREE_ITER_cached);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- goto err;
- if (bkey_eq(bp_pos, POS_MAX))
- break;
-
- if (!bp.level) {
- k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0);
- ret = bkey_err(k);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- goto err;
- if (!k.k)
- goto next;
-
- bch2_bkey_buf_reassemble(&sk, c, k);
- k = bkey_i_to_s_c(sk.k);
-
- ret = bch2_move_get_io_opts_one(trans, &io_opts, k);
- if (ret) {
- bch2_trans_iter_exit(trans, &iter);
- continue;
- }
-
- data_opts = _data_opts;
- data_opts.target = io_opts.background_target;
- data_opts.rewrite_ptrs = 0;
-
- unsigned i = 0;
- bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
- if (ptr->dev == bucket.inode) {
- data_opts.rewrite_ptrs |= 1U << i;
- if (ptr->cached) {
- bch2_trans_iter_exit(trans, &iter);
- goto next;
- }
- }
- i++;
- }
-
- ret = bch2_move_extent(ctxt, bucket_in_flight,
- &iter, k, io_opts, data_opts);
- bch2_trans_iter_exit(trans, &iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret == -ENOMEM) {
- /* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt);
- continue;
- }
- if (ret)
- goto err;
-
- if (ctxt->stats)
- atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
- } else {
- struct btree *b;
-
- b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp);
- ret = PTR_ERR_OR_ZERO(b);
- if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
- continue;
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- goto err;
- if (!b)
- goto next;
-
- unsigned sectors = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
-
- ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
- bch2_trans_iter_exit(trans, &iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- goto err;
-
- if (ctxt->rate)
- bch2_ratelimit_increment(ctxt->rate, sectors);
- if (ctxt->stats) {
- atomic64_add(sectors, &ctxt->stats->sectors_seen);
- atomic64_add(sectors, &ctxt->stats->sectors_moved);
- }
- }
-next:
- bp_pos = bpos_nosnap_successor(bp_pos);
- }
-
- trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret);
-err:
- bch2_dev_put(ca);
- bch2_bkey_buf_exit(&sk, c);
- return ret;
-}
-
-typedef bool (*move_btree_pred)(struct bch_fs *, void *,
- struct btree *, struct bch_io_opts *,
- struct data_update_opts *);
-
-static int bch2_move_btree(struct bch_fs *c,
- struct bbpos start,
- struct bbpos end,
- move_btree_pred pred, void *arg,
- struct bch_move_stats *stats)
-{
- bool kthread = (current->flags & PF_KTHREAD) != 0;
- struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
- struct moving_context ctxt;
- struct btree_trans *trans;
- struct btree_iter iter;
- struct btree *b;
- enum btree_id btree;
- struct data_update_opts data_opts;
- int ret = 0;
-
- bch2_moving_ctxt_init(&ctxt, c, NULL, stats,
- writepoint_ptr(&c->btree_write_point),
- true);
- trans = ctxt.trans;
-
- stats->data_type = BCH_DATA_btree;
-
- for (btree = start.btree;
- btree <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
- btree ++) {
- stats->pos = BBPOS(btree, POS_MIN);
-
- if (!bch2_btree_id_root(c, btree)->b)
- continue;
-
- bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, 0,
- BTREE_ITER_prefetch);
-retry:
- ret = 0;
- while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(&iter)) &&
- !(ret = PTR_ERR_OR_ZERO(b))) {
- if (kthread && kthread_should_stop())
- break;
-
- if ((cmp_int(btree, end.btree) ?:
- bpos_cmp(b->key.k.p, end.pos)) > 0)
- break;
-
- stats->pos = BBPOS(iter.btree_id, iter.pos);
-
- if (!pred(c, arg, b, &io_opts, &data_opts))
- goto next;
-
- ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret;
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- break;
-next:
- bch2_btree_iter_next_node(&iter);
- }
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_iter_exit(trans, &iter);
-
- if (kthread && kthread_should_stop())
- break;
- }
-
- bch_err_fn(c, ret);
- bch2_moving_ctxt_exit(&ctxt);
- bch2_btree_interior_updates_flush(c);
-
- return ret;
-}
-
-static bool rereplicate_pred(struct bch_fs *c, void *arg,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- unsigned nr_good = bch2_bkey_durability(c, k);
- unsigned replicas = bkey_is_btree_ptr(k.k)
- ? c->opts.metadata_replicas
- : io_opts->data_replicas;
-
- rcu_read_lock();
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- unsigned i = 0;
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
- if (!ptr->cached &&
- (!ca || !ca->mi.durability))
- data_opts->kill_ptrs |= BIT(i);
- i++;
- }
- rcu_read_unlock();
-
- if (!data_opts->kill_ptrs &&
- (!nr_good || nr_good >= replicas))
- return false;
-
- data_opts->target = 0;
- data_opts->extra_replicas = replicas - nr_good;
- data_opts->btree_insert_flags = 0;
- return true;
-}
-
-static bool migrate_pred(struct bch_fs *c, void *arg,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- struct bch_ioctl_data *op = arg;
- unsigned i = 0;
-
- data_opts->rewrite_ptrs = 0;
- data_opts->target = 0;
- data_opts->extra_replicas = 0;
- data_opts->btree_insert_flags = 0;
-
- bkey_for_each_ptr(ptrs, ptr) {
- if (ptr->dev == op->migrate.dev)
- data_opts->rewrite_ptrs |= 1U << i;
- i++;
- }
-
- return data_opts->rewrite_ptrs != 0;
-}
-
-static bool rereplicate_btree_pred(struct bch_fs *c, void *arg,
- struct btree *b,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
-}
-
-static bool migrate_btree_pred(struct bch_fs *c, void *arg,
- struct btree *b,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
-}
-
-/*
- * Ancient versions of bcachefs produced packed formats which could represent
- * keys that the in memory format cannot represent; this checks for those
- * formats so we can get rid of them.
- */
-static bool bformat_needs_redo(struct bkey_format *f)
-{
- for (unsigned i = 0; i < f->nr_fields; i++)
- if (bch2_bkey_format_field_overflows(f, i))
- return true;
-
- return false;
-}
-
-static bool rewrite_old_nodes_pred(struct bch_fs *c, void *arg,
- struct btree *b,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- if (b->version_ondisk != c->sb.version ||
- btree_node_need_rewrite(b) ||
- bformat_needs_redo(&b->format)) {
- data_opts->target = 0;
- data_opts->extra_replicas = 0;
- data_opts->btree_insert_flags = 0;
- return true;
- }
-
- return false;
-}
-
-int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
-{
- int ret;
-
- ret = bch2_move_btree(c,
- BBPOS_MIN,
- BBPOS_MAX,
- rewrite_old_nodes_pred, c, stats);
- if (!ret) {
- mutex_lock(&c->sb_lock);
- c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
- c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
- c->disk_sb.sb->version_min = c->disk_sb.sb->version;
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
- }
-
- bch_err_fn(c, ret);
- return ret;
-}
-
-static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- unsigned durability = bch2_bkey_durability(c, k);
- unsigned replicas = bkey_is_btree_ptr(k.k)
- ? c->opts.metadata_replicas
- : io_opts->data_replicas;
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned i = 0;
-
- rcu_read_lock();
- bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) {
- unsigned d = bch2_extent_ptr_durability(c, &p);
-
- if (d && durability - d >= replicas) {
- data_opts->kill_ptrs |= BIT(i);
- durability -= d;
- }
-
- i++;
- }
- rcu_read_unlock();
-
- return data_opts->kill_ptrs != 0;
-}
-
-static bool drop_extra_replicas_btree_pred(struct bch_fs *c, void *arg,
- struct btree *b,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- return drop_extra_replicas_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
-}
-
-int bch2_data_job(struct bch_fs *c,
- struct bch_move_stats *stats,
- struct bch_ioctl_data op)
-{
- struct bbpos start = BBPOS(op.start_btree, op.start_pos);
- struct bbpos end = BBPOS(op.end_btree, op.end_pos);
- int ret = 0;
-
- if (op.op >= BCH_DATA_OP_NR)
- return -EINVAL;
-
- bch2_move_stats_init(stats, bch2_data_ops_strs[op.op]);
-
- switch (op.op) {
- case BCH_DATA_OP_rereplicate:
- stats->data_type = BCH_DATA_journal;
- ret = bch2_journal_flush_device_pins(&c->journal, -1);
- ret = bch2_move_btree(c, start, end,
- rereplicate_btree_pred, c, stats) ?: ret;
- ret = bch2_move_data(c, start, end,
- NULL,
- stats,
- writepoint_hashed((unsigned long) current),
- true,
- rereplicate_pred, c) ?: ret;
- ret = bch2_replicas_gc2(c) ?: ret;
- break;
- case BCH_DATA_OP_migrate:
- if (op.migrate.dev >= c->sb.nr_devices)
- return -EINVAL;
-
- stats->data_type = BCH_DATA_journal;
- ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
- ret = bch2_move_btree(c, start, end,
- migrate_btree_pred, &op, stats) ?: ret;
- ret = bch2_move_data(c, start, end,
- NULL,
- stats,
- writepoint_hashed((unsigned long) current),
- true,
- migrate_pred, &op) ?: ret;
- ret = bch2_replicas_gc2(c) ?: ret;
- break;
- case BCH_DATA_OP_rewrite_old_nodes:
- ret = bch2_scan_old_btree_nodes(c, stats);
- break;
- case BCH_DATA_OP_drop_extra_replicas:
- ret = bch2_move_btree(c, start, end,
- drop_extra_replicas_btree_pred, c, stats) ?: ret;
- ret = bch2_move_data(c, start, end, NULL, stats,
- writepoint_hashed((unsigned long) current),
- true,
- drop_extra_replicas_pred, c) ?: ret;
- ret = bch2_replicas_gc2(c) ?: ret;
- break;
- default:
- ret = -EINVAL;
- }
-
- bch2_move_stats_exit(stats, c);
- return ret;
-}
-
-void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
-{
- prt_printf(out, "%s: data type==", stats->name);
- bch2_prt_data_type(out, stats->data_type);
- prt_str(out, " pos=");
- bch2_bbpos_to_text(out, stats->pos);
- prt_newline(out);
- printbuf_indent_add(out, 2);
-
- prt_printf(out, "keys moved: %llu\n", atomic64_read(&stats->keys_moved));
- prt_printf(out, "keys raced: %llu\n", atomic64_read(&stats->keys_raced));
- prt_printf(out, "bytes seen: ");
- prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
- prt_newline(out);
-
- prt_printf(out, "bytes moved: ");
- prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
- prt_newline(out);
-
- prt_printf(out, "bytes raced: ");
- prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
- prt_newline(out);
-
- printbuf_indent_sub(out, 2);
-}
-
-static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
-{
- struct moving_io *io;
-
- bch2_move_stats_to_text(out, ctxt->stats);
- printbuf_indent_add(out, 2);
-
- prt_printf(out, "reads: ios %u/%u sectors %u/%u\n",
- atomic_read(&ctxt->read_ios),
- c->opts.move_ios_in_flight,
- atomic_read(&ctxt->read_sectors),
- c->opts.move_bytes_in_flight >> 9);
-
- prt_printf(out, "writes: ios %u/%u sectors %u/%u\n",
- atomic_read(&ctxt->write_ios),
- c->opts.move_ios_in_flight,
- atomic_read(&ctxt->write_sectors),
- c->opts.move_bytes_in_flight >> 9);
-
- printbuf_indent_add(out, 2);
-
- mutex_lock(&ctxt->lock);
- list_for_each_entry(io, &ctxt->ios, io_list)
- bch2_write_op_to_text(out, &io->write.op);
- mutex_unlock(&ctxt->lock);
-
- printbuf_indent_sub(out, 4);
-}
-
-void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct moving_context *ctxt;
-
- mutex_lock(&c->moving_context_lock);
- list_for_each_entry(ctxt, &c->moving_context_list, list)
- bch2_moving_ctxt_to_text(out, c, ctxt);
- mutex_unlock(&c->moving_context_lock);
-}
-
-void bch2_fs_move_init(struct bch_fs *c)
-{
- INIT_LIST_HEAD(&c->moving_context_list);
- mutex_init(&c->moving_context_lock);
-}
diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h
deleted file mode 100644
index 9baf3093a678..000000000000
--- a/fs/bcachefs/move.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_MOVE_H
-#define _BCACHEFS_MOVE_H
-
-#include "bbpos.h"
-#include "bcachefs_ioctl.h"
-#include "btree_iter.h"
-#include "buckets.h"
-#include "data_update.h"
-#include "move_types.h"
-
-struct bch_read_bio;
-
-struct moving_context {
- struct btree_trans *trans;
- struct list_head list;
- void *fn;
-
- struct bch_ratelimit *rate;
- struct bch_move_stats *stats;
- struct write_point_specifier wp;
- bool wait_on_copygc;
- bool write_error;
-
- /* For waiting on outstanding reads and writes: */
- struct closure cl;
-
- struct mutex lock;
- struct list_head reads;
- struct list_head ios;
-
- /* in flight sectors: */
- atomic_t read_sectors;
- atomic_t write_sectors;
- atomic_t read_ios;
- atomic_t write_ios;
-
- wait_queue_head_t wait;
-};
-
-#define move_ctxt_wait_event_timeout(_ctxt, _cond, _timeout) \
-({ \
- int _ret = 0; \
- while (true) { \
- bool cond_finished = false; \
- bch2_moving_ctxt_do_pending_writes(_ctxt); \
- \
- if (_cond) \
- break; \
- bch2_trans_unlock_long((_ctxt)->trans); \
- _ret = __wait_event_timeout((_ctxt)->wait, \
- bch2_moving_ctxt_next_pending_write(_ctxt) || \
- (cond_finished = (_cond)), _timeout); \
- if (_ret || ( cond_finished)) \
- break; \
- } \
- _ret; \
-})
-
-#define move_ctxt_wait_event(_ctxt, _cond) \
-do { \
- bool cond_finished = false; \
- bch2_moving_ctxt_do_pending_writes(_ctxt); \
- \
- if (_cond) \
- break; \
- bch2_trans_unlock_long((_ctxt)->trans); \
- __wait_event((_ctxt)->wait, \
- bch2_moving_ctxt_next_pending_write(_ctxt) || \
- (cond_finished = (_cond))); \
- if (cond_finished) \
- break; \
-} while (1)
-
-typedef bool (*move_pred_fn)(struct bch_fs *, void *, struct bkey_s_c,
- struct bch_io_opts *, struct data_update_opts *);
-
-extern const char * const bch2_data_ops_strs[];
-
-void bch2_moving_ctxt_exit(struct moving_context *);
-void bch2_moving_ctxt_init(struct moving_context *, struct bch_fs *,
- struct bch_ratelimit *, struct bch_move_stats *,
- struct write_point_specifier, bool);
-struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *);
-void bch2_moving_ctxt_do_pending_writes(struct moving_context *);
-void bch2_moving_ctxt_flush_all(struct moving_context *);
-void bch2_move_ctxt_wait_for_io(struct moving_context *);
-int bch2_move_ratelimit(struct moving_context *);
-
-/* Inodes in different snapshots may have different IO options: */
-struct snapshot_io_opts_entry {
- u32 snapshot;
- struct bch_io_opts io_opts;
-};
-
-struct per_snapshot_io_opts {
- u64 cur_inum;
- struct bch_io_opts fs_io_opts;
- DARRAY(struct snapshot_io_opts_entry) d;
-};
-
-static inline void per_snapshot_io_opts_init(struct per_snapshot_io_opts *io_opts, struct bch_fs *c)
-{
- memset(io_opts, 0, sizeof(*io_opts));
- io_opts->fs_io_opts = bch2_opts_to_inode_opts(c->opts);
-}
-
-static inline void per_snapshot_io_opts_exit(struct per_snapshot_io_opts *io_opts)
-{
- darray_exit(&io_opts->d);
-}
-
-struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *,
- struct per_snapshot_io_opts *, struct bkey_s_c);
-int bch2_move_get_io_opts_one(struct btree_trans *, struct bch_io_opts *, struct bkey_s_c);
-
-int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *);
-
-int bch2_move_extent(struct moving_context *,
- struct move_bucket_in_flight *,
- struct btree_iter *,
- struct bkey_s_c,
- struct bch_io_opts,
- struct data_update_opts);
-
-int __bch2_move_data(struct moving_context *,
- struct bbpos,
- struct bbpos,
- move_pred_fn, void *);
-int bch2_move_data(struct bch_fs *,
- struct bbpos start,
- struct bbpos end,
- struct bch_ratelimit *,
- struct bch_move_stats *,
- struct write_point_specifier,
- bool,
- move_pred_fn, void *);
-
-int bch2_evacuate_bucket(struct moving_context *,
- struct move_bucket_in_flight *,
- struct bpos, int,
- struct data_update_opts);
-int bch2_data_job(struct bch_fs *,
- struct bch_move_stats *,
- struct bch_ioctl_data);
-
-void bch2_move_stats_to_text(struct printbuf *, struct bch_move_stats *);
-void bch2_move_stats_exit(struct bch_move_stats *, struct bch_fs *);
-void bch2_move_stats_init(struct bch_move_stats *, const char *);
-
-void bch2_fs_moving_ctxts_to_text(struct printbuf *, struct bch_fs *);
-
-void bch2_fs_move_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_MOVE_H */
diff --git a/fs/bcachefs/move_types.h b/fs/bcachefs/move_types.h
deleted file mode 100644
index e22841ef31e4..000000000000
--- a/fs/bcachefs/move_types.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_MOVE_TYPES_H
-#define _BCACHEFS_MOVE_TYPES_H
-
-#include "bbpos_types.h"
-
-struct bch_move_stats {
- enum bch_data_type data_type;
- struct bbpos pos;
- char name[32];
-
- atomic64_t keys_moved;
- atomic64_t keys_raced;
- atomic64_t sectors_seen;
- atomic64_t sectors_moved;
- atomic64_t sectors_raced;
-};
-
-struct move_bucket_key {
- struct bpos bucket;
- u8 gen;
-};
-
-struct move_bucket {
- struct move_bucket_key k;
- unsigned sectors;
-};
-
-struct move_bucket_in_flight {
- struct move_bucket_in_flight *next;
- struct rhash_head hash;
- struct move_bucket bucket;
- atomic_t count;
-};
-
-#endif /* _BCACHEFS_MOVE_TYPES_H */
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
deleted file mode 100644
index d658be90f737..000000000000
--- a/fs/bcachefs/movinggc.c
+++ /dev/null
@@ -1,449 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Moving/copying garbage collector
- *
- * Copyright 2012 Google, Inc.
- */
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "btree_iter.h"
-#include "btree_update.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "clock.h"
-#include "errcode.h"
-#include "error.h"
-#include "lru.h"
-#include "move.h"
-#include "movinggc.h"
-#include "trace.h"
-
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/math64.h>
-#include <linux/sched/task.h>
-#include <linux/wait.h>
-
-struct buckets_in_flight {
- struct rhashtable table;
- struct move_bucket_in_flight *first;
- struct move_bucket_in_flight *last;
- size_t nr;
- size_t sectors;
-};
-
-static const struct rhashtable_params bch_move_bucket_params = {
- .head_offset = offsetof(struct move_bucket_in_flight, hash),
- .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
- .key_len = sizeof(struct move_bucket_key),
- .automatic_shrinking = true,
-};
-
-static struct move_bucket_in_flight *
-move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
-{
- struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
- int ret;
-
- if (!new)
- return ERR_PTR(-ENOMEM);
-
- new->bucket = b;
-
- ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
- bch_move_bucket_params);
- if (ret) {
- kfree(new);
- return ERR_PTR(ret);
- }
-
- if (!list->first)
- list->first = new;
- else
- list->last->next = new;
-
- list->last = new;
- list->nr++;
- list->sectors += b.sectors;
- return new;
-}
-
-static int bch2_bucket_is_movable(struct btree_trans *trans,
- struct move_bucket *b, u64 time)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_alloc_v4 _a;
- const struct bch_alloc_v4 *a;
- int ret;
-
- if (bch2_bucket_is_open(trans->c,
- b->k.bucket.inode,
- b->k.bucket.offset))
- return 0;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
- b->k.bucket, BTREE_ITER_cached);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode);
- if (!ca)
- goto out;
-
- a = bch2_alloc_to_v4(k, &_a);
- b->k.gen = a->gen;
- b->sectors = bch2_bucket_sectors_dirty(*a);
- u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
-
- ret = lru_idx && lru_idx <= time;
-
- bch2_dev_put(ca);
-out:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static void move_buckets_wait(struct moving_context *ctxt,
- struct buckets_in_flight *list,
- bool flush)
-{
- struct move_bucket_in_flight *i;
- int ret;
-
- while ((i = list->first)) {
- if (flush)
- move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
-
- if (atomic_read(&i->count))
- break;
-
- list->first = i->next;
- if (!list->first)
- list->last = NULL;
-
- list->nr--;
- list->sectors -= i->bucket.sectors;
-
- ret = rhashtable_remove_fast(&list->table, &i->hash,
- bch_move_bucket_params);
- BUG_ON(ret);
- kfree(i);
- }
-
- bch2_trans_unlock_long(ctxt->trans);
-}
-
-static bool bucket_in_flight(struct buckets_in_flight *list,
- struct move_bucket_key k)
-{
- return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
-}
-
-typedef DARRAY(struct move_bucket) move_buckets;
-
-static int bch2_copygc_get_buckets(struct moving_context *ctxt,
- struct buckets_in_flight *buckets_in_flight,
- move_buckets *buckets)
-{
- struct btree_trans *trans = ctxt->trans;
- struct bch_fs *c = trans->c;
- size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
- size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
- int ret;
-
- move_buckets_wait(ctxt, buckets_in_flight, false);
-
- ret = bch2_btree_write_buffer_tryflush(trans);
- if (bch2_err_matches(ret, EROFS))
- return ret;
-
- if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
- return ret;
-
- bch2_trans_begin(trans);
-
- ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
- lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
- lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
- 0, k, ({
- struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
- int ret2 = 0;
-
- saw++;
-
- ret2 = bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p));
- if (ret2 < 0)
- goto err;
-
- if (!ret2)
- not_movable++;
- else if (bucket_in_flight(buckets_in_flight, b.k))
- in_flight++;
- else {
- ret2 = darray_push(buckets, b);
- if (ret2)
- goto err;
- sectors += b.sectors;
- }
-
- ret2 = buckets->nr >= nr_to_get;
-err:
- ret2;
- }));
-
- pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
- buckets_in_flight->nr, buckets_in_flight->sectors,
- saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
-
- return ret < 0 ? ret : 0;
-}
-
-noinline
-static int bch2_copygc(struct moving_context *ctxt,
- struct buckets_in_flight *buckets_in_flight,
- bool *did_work)
-{
- struct btree_trans *trans = ctxt->trans;
- struct bch_fs *c = trans->c;
- struct data_update_opts data_opts = {
- .btree_insert_flags = BCH_WATERMARK_copygc,
- };
- move_buckets buckets = { 0 };
- struct move_bucket_in_flight *f;
- u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
- int ret = 0;
-
- ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
- if (ret)
- goto err;
-
- darray_for_each(buckets, i) {
- if (kthread_should_stop() || freezing(current))
- break;
-
- f = move_bucket_in_flight_add(buckets_in_flight, *i);
- ret = PTR_ERR_OR_ZERO(f);
- if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
- ret = 0;
- continue;
- }
- if (ret == -ENOMEM) { /* flush IO, continue later */
- ret = 0;
- break;
- }
-
- ret = bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
- f->bucket.k.gen, data_opts);
- if (ret)
- goto err;
-
- *did_work = true;
- }
-err:
- darray_exit(&buckets);
-
- /* no entries in LRU btree found, or got to end: */
- if (bch2_err_matches(ret, ENOENT))
- ret = 0;
-
- if (ret < 0 && !bch2_err_matches(ret, EROFS))
- bch_err_msg(c, ret, "from bch2_move_data()");
-
- moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
- trace_and_count(c, copygc, c, moved, 0, 0, 0);
- return ret;
-}
-
-/*
- * Copygc runs when the amount of fragmented data is above some arbitrary
- * threshold:
- *
- * The threshold at the limit - when the device is full - is the amount of space
- * we reserved in bch2_recalc_capacity; we can't have more than that amount of
- * disk space stranded due to fragmentation and store everything we have
- * promised to store.
- *
- * But we don't want to be running copygc unnecessarily when the device still
- * has plenty of free space - rather, we want copygc to smoothly run every so
- * often and continually reduce the amount of fragmented space as the device
- * fills up. So, we increase the threshold by half the current free space.
- */
-unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
-{
- s64 wait = S64_MAX, fragmented_allowed, fragmented;
-
- for_each_rw_member(c, ca) {
- struct bch_dev_usage usage = bch2_dev_usage_read(ca);
-
- fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
- ca->mi.bucket_size) >> 1);
- fragmented = 0;
-
- for (unsigned i = 0; i < BCH_DATA_NR; i++)
- if (data_type_movable(i))
- fragmented += usage.d[i].fragmented;
-
- wait = min(wait, max(0LL, fragmented_allowed - fragmented));
- }
-
- return wait;
-}
-
-void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
-{
- printbuf_tabstop_push(out, 32);
- prt_printf(out, "running:\t%u\n", c->copygc_running);
- prt_printf(out, "copygc_wait:\t%llu\n", c->copygc_wait);
- prt_printf(out, "copygc_wait_at:\t%llu\n", c->copygc_wait_at);
-
- prt_printf(out, "Currently waiting for:\t");
- prt_human_readable_u64(out, max(0LL, c->copygc_wait -
- atomic64_read(&c->io_clock[WRITE].now)) << 9);
- prt_newline(out);
-
- prt_printf(out, "Currently waiting since:\t");
- prt_human_readable_u64(out, max(0LL,
- atomic64_read(&c->io_clock[WRITE].now) -
- c->copygc_wait_at) << 9);
- prt_newline(out);
-
- prt_printf(out, "Currently calculated wait:\t");
- prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
- prt_newline(out);
-}
-
-static int bch2_copygc_thread(void *arg)
-{
- struct bch_fs *c = arg;
- struct moving_context ctxt;
- struct bch_move_stats move_stats;
- struct io_clock *clock = &c->io_clock[WRITE];
- struct buckets_in_flight *buckets;
- u64 last, wait;
- int ret = 0;
-
- buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
- if (!buckets)
- return -ENOMEM;
- ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
- bch_err_msg(c, ret, "allocating copygc buckets in flight");
- if (ret) {
- kfree(buckets);
- return ret;
- }
-
- set_freezable();
-
- bch2_move_stats_init(&move_stats, "copygc");
- bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
- writepoint_ptr(&c->copygc_write_point),
- false);
-
- while (!ret && !kthread_should_stop()) {
- bool did_work = false;
-
- bch2_trans_unlock_long(ctxt.trans);
- cond_resched();
-
- if (!c->copy_gc_enabled) {
- move_buckets_wait(&ctxt, buckets, true);
- kthread_wait_freezable(c->copy_gc_enabled ||
- kthread_should_stop());
- }
-
- if (unlikely(freezing(current))) {
- move_buckets_wait(&ctxt, buckets, true);
- __refrigerator(false);
- continue;
- }
-
- last = atomic64_read(&clock->now);
- wait = bch2_copygc_wait_amount(c);
-
- if (wait > clock->max_slop) {
- c->copygc_wait_at = last;
- c->copygc_wait = last + wait;
- move_buckets_wait(&ctxt, buckets, true);
- trace_and_count(c, copygc_wait, c, wait, last + wait);
- bch2_kthread_io_clock_wait(clock, last + wait,
- MAX_SCHEDULE_TIMEOUT);
- continue;
- }
-
- c->copygc_wait = 0;
-
- c->copygc_running = true;
- ret = bch2_copygc(&ctxt, buckets, &did_work);
- c->copygc_running = false;
-
- wake_up(&c->copygc_running_wq);
-
- if (!wait && !did_work) {
- u64 min_member_capacity = bch2_min_rw_member_capacity(c);
-
- if (min_member_capacity == U64_MAX)
- min_member_capacity = 128 * 2048;
-
- move_buckets_wait(&ctxt, buckets, true);
- bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
- MAX_SCHEDULE_TIMEOUT);
- }
- }
-
- move_buckets_wait(&ctxt, buckets, true);
-
- rhashtable_destroy(&buckets->table);
- kfree(buckets);
- bch2_moving_ctxt_exit(&ctxt);
- bch2_move_stats_exit(&move_stats, c);
-
- return 0;
-}
-
-void bch2_copygc_stop(struct bch_fs *c)
-{
- if (c->copygc_thread) {
- kthread_stop(c->copygc_thread);
- put_task_struct(c->copygc_thread);
- }
- c->copygc_thread = NULL;
-}
-
-int bch2_copygc_start(struct bch_fs *c)
-{
- struct task_struct *t;
- int ret;
-
- if (c->copygc_thread)
- return 0;
-
- if (c->opts.nochanges)
- return 0;
-
- if (bch2_fs_init_fault("copygc_start"))
- return -ENOMEM;
-
- t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
- ret = PTR_ERR_OR_ZERO(t);
- bch_err_msg(c, ret, "creating copygc thread");
- if (ret)
- return ret;
-
- get_task_struct(t);
-
- c->copygc_thread = t;
- wake_up_process(c->copygc_thread);
-
- return 0;
-}
-
-void bch2_fs_copygc_init(struct bch_fs *c)
-{
- init_waitqueue_head(&c->copygc_running_wq);
- c->copygc_running = false;
-}
diff --git a/fs/bcachefs/movinggc.h b/fs/bcachefs/movinggc.h
deleted file mode 100644
index ea181fef5bc9..000000000000
--- a/fs/bcachefs/movinggc.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_MOVINGGC_H
-#define _BCACHEFS_MOVINGGC_H
-
-unsigned long bch2_copygc_wait_amount(struct bch_fs *);
-void bch2_copygc_wait_to_text(struct printbuf *, struct bch_fs *);
-
-void bch2_copygc_stop(struct bch_fs *);
-int bch2_copygc_start(struct bch_fs *);
-void bch2_fs_copygc_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_MOVINGGC_H */
diff --git a/fs/bcachefs/nocow_locking.c b/fs/bcachefs/nocow_locking.c
deleted file mode 100644
index 3c21981a4a1c..000000000000
--- a/fs/bcachefs/nocow_locking.c
+++ /dev/null
@@ -1,144 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_methods.h"
-#include "nocow_locking.h"
-#include "util.h"
-
-#include <linux/closure.h>
-
-bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t, struct bpos bucket)
-{
- u64 dev_bucket = bucket_to_u64(bucket);
- struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(l->b); i++)
- if (l->b[i] == dev_bucket && atomic_read(&l->l[i]))
- return true;
- return false;
-}
-
-#define sign(v) (v < 0 ? -1 : v > 0 ? 1 : 0)
-
-void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t, struct bpos bucket, int flags)
-{
- u64 dev_bucket = bucket_to_u64(bucket);
- struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
- int lock_val = flags ? 1 : -1;
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(l->b); i++)
- if (l->b[i] == dev_bucket) {
- int v = atomic_sub_return(lock_val, &l->l[i]);
-
- BUG_ON(v && sign(v) != lock_val);
- if (!v)
- closure_wake_up(&l->wait);
- return;
- }
-
- BUG();
-}
-
-bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket *l,
- u64 dev_bucket, int flags)
-{
- int v, lock_val = flags ? 1 : -1;
- unsigned i;
-
- spin_lock(&l->lock);
-
- for (i = 0; i < ARRAY_SIZE(l->b); i++)
- if (l->b[i] == dev_bucket)
- goto got_entry;
-
- for (i = 0; i < ARRAY_SIZE(l->b); i++)
- if (!atomic_read(&l->l[i])) {
- l->b[i] = dev_bucket;
- goto take_lock;
- }
-fail:
- spin_unlock(&l->lock);
- return false;
-got_entry:
- v = atomic_read(&l->l[i]);
- if (lock_val > 0 ? v < 0 : v > 0)
- goto fail;
-take_lock:
- v = atomic_read(&l->l[i]);
- /* Overflow? */
- if (v && sign(v + lock_val) != sign(v))
- goto fail;
-
- atomic_add(lock_val, &l->l[i]);
- spin_unlock(&l->lock);
- return true;
-}
-
-void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
- struct nocow_lock_bucket *l,
- u64 dev_bucket, int flags)
-{
- if (!__bch2_bucket_nocow_trylock(l, dev_bucket, flags)) {
- struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
- u64 start_time = local_clock();
-
- __closure_wait_event(&l->wait, __bch2_bucket_nocow_trylock(l, dev_bucket, flags));
- bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
- }
-}
-
-void bch2_nocow_locks_to_text(struct printbuf *out, struct bucket_nocow_lock_table *t)
-
-{
- unsigned i, nr_zero = 0;
- struct nocow_lock_bucket *l;
-
- for (l = t->l; l < t->l + ARRAY_SIZE(t->l); l++) {
- unsigned v = 0;
-
- for (i = 0; i < ARRAY_SIZE(l->l); i++)
- v |= atomic_read(&l->l[i]);
-
- if (!v) {
- nr_zero++;
- continue;
- }
-
- if (nr_zero)
- prt_printf(out, "(%u empty entries)\n", nr_zero);
- nr_zero = 0;
-
- for (i = 0; i < ARRAY_SIZE(l->l); i++) {
- int v = atomic_read(&l->l[i]);
- if (v) {
- bch2_bpos_to_text(out, u64_to_bucket(l->b[i]));
- prt_printf(out, ": %s %u ", v < 0 ? "copy" : "update", abs(v));
- }
- }
- prt_newline(out);
- }
-
- if (nr_zero)
- prt_printf(out, "(%u empty entries)\n", nr_zero);
-}
-
-void bch2_fs_nocow_locking_exit(struct bch_fs *c)
-{
- struct bucket_nocow_lock_table *t = &c->nocow_locks;
-
- for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
- for (unsigned j = 0; j < ARRAY_SIZE(l->l); j++)
- BUG_ON(atomic_read(&l->l[j]));
-}
-
-int bch2_fs_nocow_locking_init(struct bch_fs *c)
-{
- struct bucket_nocow_lock_table *t = &c->nocow_locks;
-
- for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
- spin_lock_init(&l->lock);
-
- return 0;
-}
diff --git a/fs/bcachefs/nocow_locking.h b/fs/bcachefs/nocow_locking.h
deleted file mode 100644
index f9d6a426a960..000000000000
--- a/fs/bcachefs/nocow_locking.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_NOCOW_LOCKING_H
-#define _BCACHEFS_NOCOW_LOCKING_H
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "nocow_locking_types.h"
-
-#include <linux/hash.h>
-
-static inline struct nocow_lock_bucket *bucket_nocow_lock(struct bucket_nocow_lock_table *t,
- u64 dev_bucket)
-{
- unsigned h = hash_64(dev_bucket, BUCKET_NOCOW_LOCKS_BITS);
-
- return t->l + (h & (BUCKET_NOCOW_LOCKS - 1));
-}
-
-#define BUCKET_NOCOW_LOCK_UPDATE (1 << 0)
-
-bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *, struct bpos);
-void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *, struct bpos, int);
-bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket *, u64, int);
-void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *,
- struct nocow_lock_bucket *, u64, int);
-
-static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
- struct bpos bucket, int flags)
-{
- u64 dev_bucket = bucket_to_u64(bucket);
- struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
-
- __bch2_bucket_nocow_lock(t, l, dev_bucket, flags);
-}
-
-static inline bool bch2_bucket_nocow_trylock(struct bucket_nocow_lock_table *t,
- struct bpos bucket, int flags)
-{
- u64 dev_bucket = bucket_to_u64(bucket);
- struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
-
- return __bch2_bucket_nocow_trylock(l, dev_bucket, flags);
-}
-
-void bch2_nocow_locks_to_text(struct printbuf *, struct bucket_nocow_lock_table *);
-
-void bch2_fs_nocow_locking_exit(struct bch_fs *);
-int bch2_fs_nocow_locking_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_NOCOW_LOCKING_H */
diff --git a/fs/bcachefs/nocow_locking_types.h b/fs/bcachefs/nocow_locking_types.h
deleted file mode 100644
index bd12bf677924..000000000000
--- a/fs/bcachefs/nocow_locking_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_NOCOW_LOCKING_TYPES_H
-#define _BCACHEFS_NOCOW_LOCKING_TYPES_H
-
-#define BUCKET_NOCOW_LOCKS_BITS 10
-#define BUCKET_NOCOW_LOCKS (1U << BUCKET_NOCOW_LOCKS_BITS)
-
-struct nocow_lock_bucket {
- struct closure_waitlist wait;
- spinlock_t lock;
- u64 b[4];
- atomic_t l[4];
-} __aligned(SMP_CACHE_BYTES);
-
-struct bucket_nocow_lock_table {
- struct nocow_lock_bucket l[BUCKET_NOCOW_LOCKS];
-};
-
-#endif /* _BCACHEFS_NOCOW_LOCKING_TYPES_H */
-
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
deleted file mode 100644
index 0e2ee262fbd4..000000000000
--- a/fs/bcachefs/opts.c
+++ /dev/null
@@ -1,734 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/kernel.h>
-
-#include "bcachefs.h"
-#include "compress.h"
-#include "disk_groups.h"
-#include "error.h"
-#include "opts.h"
-#include "recovery_passes.h"
-#include "super-io.h"
-#include "util.h"
-
-#define x(t, n, ...) [n] = #t,
-
-const char * const bch2_error_actions[] = {
- BCH_ERROR_ACTIONS()
- NULL
-};
-
-const char * const bch2_fsck_fix_opts[] = {
- BCH_FIX_ERRORS_OPTS()
- NULL
-};
-
-const char * const bch2_version_upgrade_opts[] = {
- BCH_VERSION_UPGRADE_OPTS()
- NULL
-};
-
-const char * const bch2_sb_features[] = {
- BCH_SB_FEATURES()
- NULL
-};
-
-const char * const bch2_sb_compat[] = {
- BCH_SB_COMPAT()
- NULL
-};
-
-const char * const __bch2_btree_ids[] = {
- BCH_BTREE_IDS()
- NULL
-};
-
-static const char * const __bch2_csum_types[] = {
- BCH_CSUM_TYPES()
- NULL
-};
-
-const char * const bch2_csum_opts[] = {
- BCH_CSUM_OPTS()
- NULL
-};
-
-static const char * const __bch2_compression_types[] = {
- BCH_COMPRESSION_TYPES()
- NULL
-};
-
-const char * const bch2_compression_opts[] = {
- BCH_COMPRESSION_OPTS()
- NULL
-};
-
-const char * const __bch2_str_hash_types[] = {
- BCH_STR_HASH_TYPES()
- NULL
-};
-
-const char * const bch2_str_hash_opts[] = {
- BCH_STR_HASH_OPTS()
- NULL
-};
-
-const char * const __bch2_data_types[] = {
- BCH_DATA_TYPES()
- NULL
-};
-
-const char * const bch2_member_states[] = {
- BCH_MEMBER_STATES()
- NULL
-};
-
-static const char * const __bch2_jset_entry_types[] = {
- BCH_JSET_ENTRY_TYPES()
- NULL
-};
-
-static const char * const __bch2_fs_usage_types[] = {
- BCH_FS_USAGE_TYPES()
- NULL
-};
-
-#undef x
-
-static void prt_str_opt_boundscheck(struct printbuf *out, const char * const opts[],
- unsigned nr, const char *type, unsigned idx)
-{
- if (idx < nr)
- prt_str(out, opts[idx]);
- else
- prt_printf(out, "(unknown %s %u)", type, idx);
-}
-
-#define PRT_STR_OPT_BOUNDSCHECKED(name, type) \
-void bch2_prt_##name(struct printbuf *out, type t) \
-{ \
- prt_str_opt_boundscheck(out, __bch2_##name##s, ARRAY_SIZE(__bch2_##name##s) - 1, #name, t);\
-}
-
-PRT_STR_OPT_BOUNDSCHECKED(jset_entry_type, enum bch_jset_entry_type);
-PRT_STR_OPT_BOUNDSCHECKED(fs_usage_type, enum bch_fs_usage_type);
-PRT_STR_OPT_BOUNDSCHECKED(data_type, enum bch_data_type);
-PRT_STR_OPT_BOUNDSCHECKED(csum_type, enum bch_csum_type);
-PRT_STR_OPT_BOUNDSCHECKED(compression_type, enum bch_compression_type);
-PRT_STR_OPT_BOUNDSCHECKED(str_hash_type, enum bch_str_hash_type);
-
-static int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res,
- struct printbuf *err)
-{
- if (!val) {
- *res = FSCK_FIX_yes;
- } else {
- int ret = match_string(bch2_fsck_fix_opts, -1, val);
-
- if (ret < 0 && err)
- prt_str(err, "fix_errors: invalid selection");
- if (ret < 0)
- return ret;
- *res = ret;
- }
-
- return 0;
-}
-
-static void bch2_opt_fix_errors_to_text(struct printbuf *out,
- struct bch_fs *c,
- struct bch_sb *sb,
- u64 v)
-{
- prt_str(out, bch2_fsck_fix_opts[v]);
-}
-
-#define bch2_opt_fix_errors (struct bch_opt_fn) { \
- .parse = bch2_opt_fix_errors_parse, \
- .to_text = bch2_opt_fix_errors_to_text, \
-}
-
-const char * const bch2_d_types[BCH_DT_MAX] = {
- [DT_UNKNOWN] = "unknown",
- [DT_FIFO] = "fifo",
- [DT_CHR] = "chr",
- [DT_DIR] = "dir",
- [DT_BLK] = "blk",
- [DT_REG] = "reg",
- [DT_LNK] = "lnk",
- [DT_SOCK] = "sock",
- [DT_WHT] = "whiteout",
- [DT_SUBVOL] = "subvol",
-};
-
-u64 BCH2_NO_SB_OPT(const struct bch_sb *sb)
-{
- BUG();
-}
-
-void SET_BCH2_NO_SB_OPT(struct bch_sb *sb, u64 v)
-{
- BUG();
-}
-
-void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src)
-{
-#define x(_name, ...) \
- if (opt_defined(src, _name)) \
- opt_set(*dst, _name, src._name);
-
- BCH_OPTS()
-#undef x
-}
-
-bool bch2_opt_defined_by_id(const struct bch_opts *opts, enum bch_opt_id id)
-{
- switch (id) {
-#define x(_name, ...) \
- case Opt_##_name: \
- return opt_defined(*opts, _name);
- BCH_OPTS()
-#undef x
- default:
- BUG();
- }
-}
-
-u64 bch2_opt_get_by_id(const struct bch_opts *opts, enum bch_opt_id id)
-{
- switch (id) {
-#define x(_name, ...) \
- case Opt_##_name: \
- return opts->_name;
- BCH_OPTS()
-#undef x
- default:
- BUG();
- }
-}
-
-void bch2_opt_set_by_id(struct bch_opts *opts, enum bch_opt_id id, u64 v)
-{
- switch (id) {
-#define x(_name, ...) \
- case Opt_##_name: \
- opt_set(*opts, _name, v); \
- break;
- BCH_OPTS()
-#undef x
- default:
- BUG();
- }
-}
-
-const struct bch_option bch2_opt_table[] = {
-#define OPT_BOOL() .type = BCH_OPT_BOOL, .min = 0, .max = 2
-#define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, \
- .min = _min, .max = _max
-#define OPT_STR(_choices) .type = BCH_OPT_STR, \
- .min = 0, .max = ARRAY_SIZE(_choices) - 1, \
- .choices = _choices
-#define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \
- .min = 0, .max = U64_MAX, \
- .choices = _choices
-#define OPT_BITFIELD(_choices) .type = BCH_OPT_BITFIELD, \
- .choices = _choices
-#define OPT_FN(_fn) .type = BCH_OPT_FN, .fn = _fn
-
-#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
- [Opt_##_name] = { \
- .attr = { \
- .name = #_name, \
- .mode = (_flags) & OPT_RUNTIME ? 0644 : 0444, \
- }, \
- .flags = _flags, \
- .hint = _hint, \
- .help = _help, \
- .get_sb = _sb_opt, \
- .set_sb = SET_##_sb_opt, \
- _type \
- },
-
- BCH_OPTS()
-#undef x
-};
-
-int bch2_opt_lookup(const char *name)
-{
- const struct bch_option *i;
-
- for (i = bch2_opt_table;
- i < bch2_opt_table + ARRAY_SIZE(bch2_opt_table);
- i++)
- if (!strcmp(name, i->attr.name))
- return i - bch2_opt_table;
-
- return -1;
-}
-
-struct synonym {
- const char *s1, *s2;
-};
-
-static const struct synonym bch_opt_synonyms[] = {
- { "quota", "usrquota" },
-};
-
-static int bch2_mount_opt_lookup(const char *name)
-{
- const struct synonym *i;
-
- for (i = bch_opt_synonyms;
- i < bch_opt_synonyms + ARRAY_SIZE(bch_opt_synonyms);
- i++)
- if (!strcmp(name, i->s1))
- name = i->s2;
-
- return bch2_opt_lookup(name);
-}
-
-int bch2_opt_validate(const struct bch_option *opt, u64 v, struct printbuf *err)
-{
- if (v < opt->min) {
- if (err)
- prt_printf(err, "%s: too small (min %llu)",
- opt->attr.name, opt->min);
- return -BCH_ERR_ERANGE_option_too_small;
- }
-
- if (opt->max && v >= opt->max) {
- if (err)
- prt_printf(err, "%s: too big (max %llu)",
- opt->attr.name, opt->max);
- return -BCH_ERR_ERANGE_option_too_big;
- }
-
- if ((opt->flags & OPT_SB_FIELD_SECTORS) && (v & 511)) {
- if (err)
- prt_printf(err, "%s: not a multiple of 512",
- opt->attr.name);
- return -BCH_ERR_opt_parse_error;
- }
-
- if ((opt->flags & OPT_MUST_BE_POW_2) && !is_power_of_2(v)) {
- if (err)
- prt_printf(err, "%s: must be a power of two",
- opt->attr.name);
- return -BCH_ERR_opt_parse_error;
- }
-
- if (opt->fn.validate)
- return opt->fn.validate(v, err);
-
- return 0;
-}
-
-int bch2_opt_parse(struct bch_fs *c,
- const struct bch_option *opt,
- const char *val, u64 *res,
- struct printbuf *err)
-{
- ssize_t ret;
-
- switch (opt->type) {
- case BCH_OPT_BOOL:
- if (val) {
- ret = kstrtou64(val, 10, res);
- } else {
- ret = 0;
- *res = 1;
- }
-
- if (ret < 0 || (*res != 0 && *res != 1)) {
- if (err)
- prt_printf(err, "%s: must be bool", opt->attr.name);
- return ret < 0 ? ret : -BCH_ERR_option_not_bool;
- }
- break;
- case BCH_OPT_UINT:
- if (!val) {
- prt_printf(err, "%s: required value",
- opt->attr.name);
- return -EINVAL;
- }
-
- ret = opt->flags & OPT_HUMAN_READABLE
- ? bch2_strtou64_h(val, res)
- : kstrtou64(val, 10, res);
- if (ret < 0) {
- if (err)
- prt_printf(err, "%s: must be a number",
- opt->attr.name);
- return ret;
- }
- break;
- case BCH_OPT_STR:
- if (!val) {
- prt_printf(err, "%s: required value",
- opt->attr.name);
- return -EINVAL;
- }
-
- ret = match_string(opt->choices, -1, val);
- if (ret < 0) {
- if (err)
- prt_printf(err, "%s: invalid selection",
- opt->attr.name);
- return ret;
- }
-
- *res = ret;
- break;
- case BCH_OPT_BITFIELD: {
- s64 v = bch2_read_flag_list(val, opt->choices);
- if (v < 0)
- return v;
- *res = v;
- break;
- }
- case BCH_OPT_FN:
- ret = opt->fn.parse(c, val, res, err);
-
- if (ret == -BCH_ERR_option_needs_open_fs)
- return ret;
-
- if (ret < 0) {
- if (err)
- prt_printf(err, "%s: parse error",
- opt->attr.name);
- return ret;
- }
- }
-
- return bch2_opt_validate(opt, *res, err);
-}
-
-void bch2_opt_to_text(struct printbuf *out,
- struct bch_fs *c, struct bch_sb *sb,
- const struct bch_option *opt, u64 v,
- unsigned flags)
-{
- if (flags & OPT_SHOW_MOUNT_STYLE) {
- if (opt->type == BCH_OPT_BOOL) {
- prt_printf(out, "%s%s",
- v ? "" : "no",
- opt->attr.name);
- return;
- }
-
- prt_printf(out, "%s=", opt->attr.name);
- }
-
- switch (opt->type) {
- case BCH_OPT_BOOL:
- case BCH_OPT_UINT:
- if (opt->flags & OPT_HUMAN_READABLE)
- prt_human_readable_u64(out, v);
- else
- prt_printf(out, "%lli", v);
- break;
- case BCH_OPT_STR:
- if (v < opt->min || v >= opt->max)
- prt_printf(out, "(invalid option %lli)", v);
- else if (flags & OPT_SHOW_FULL_LIST)
- prt_string_option(out, opt->choices, v);
- else
- prt_str(out, opt->choices[v]);
- break;
- case BCH_OPT_BITFIELD:
- prt_bitflags(out, opt->choices, v);
- break;
- case BCH_OPT_FN:
- opt->fn.to_text(out, c, sb, v);
- break;
- default:
- BUG();
- }
-}
-
-void bch2_opts_to_text(struct printbuf *out,
- struct bch_opts opts,
- struct bch_fs *c, struct bch_sb *sb,
- unsigned show_mask, unsigned hide_mask,
- unsigned flags)
-{
- bool first = true;
-
- for (enum bch_opt_id i = 0; i < bch2_opts_nr; i++) {
- const struct bch_option *opt = &bch2_opt_table[i];
-
- if ((opt->flags & hide_mask) || !(opt->flags & show_mask))
- continue;
-
- u64 v = bch2_opt_get_by_id(&opts, i);
- if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
- continue;
-
- if (!first)
- prt_char(out, ',');
- first = false;
-
- bch2_opt_to_text(out, c, sb, opt, v, flags);
- }
-}
-
-int bch2_opt_check_may_set(struct bch_fs *c, int id, u64 v)
-{
- int ret = 0;
-
- switch (id) {
- case Opt_compression:
- case Opt_background_compression:
- ret = bch2_check_set_has_compressed_data(c, v);
- break;
- case Opt_erasure_code:
- if (v)
- bch2_check_set_feature(c, BCH_FEATURE_ec);
- break;
- }
-
- return ret;
-}
-
-int bch2_opts_check_may_set(struct bch_fs *c)
-{
- unsigned i;
- int ret;
-
- for (i = 0; i < bch2_opts_nr; i++) {
- ret = bch2_opt_check_may_set(c, i,
- bch2_opt_get_by_id(&c->opts, i));
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts,
- struct printbuf *parse_later,
- const char *name, const char *val)
-{
- struct printbuf err = PRINTBUF;
- u64 v;
- int ret, id;
-
- id = bch2_mount_opt_lookup(name);
-
- /* Check for the form "noopt", negation of a boolean opt: */
- if (id < 0 &&
- !val &&
- !strncmp("no", name, 2)) {
- id = bch2_mount_opt_lookup(name + 2);
- val = "0";
- }
-
- /* Unknown options are ignored: */
- if (id < 0)
- return 0;
-
- if (!(bch2_opt_table[id].flags & OPT_MOUNT))
- goto bad_opt;
-
- if (id == Opt_acl &&
- !IS_ENABLED(CONFIG_BCACHEFS_POSIX_ACL))
- goto bad_opt;
-
- if ((id == Opt_usrquota ||
- id == Opt_grpquota) &&
- !IS_ENABLED(CONFIG_BCACHEFS_QUOTA))
- goto bad_opt;
-
- ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v, &err);
- if (ret == -BCH_ERR_option_needs_open_fs && parse_later) {
- prt_printf(parse_later, "%s=%s,", name, val);
- if (parse_later->allocation_failure) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = 0;
- goto out;
- }
-
- if (ret < 0)
- goto bad_val;
-
- if (opts)
- bch2_opt_set_by_id(opts, id, v);
-
- ret = 0;
- goto out;
-
-bad_opt:
- pr_err("Bad mount option %s", name);
- ret = -BCH_ERR_option_name;
- goto out;
-
-bad_val:
- pr_err("Invalid mount option %s", err.buf);
- ret = -BCH_ERR_option_value;
-
-out:
- printbuf_exit(&err);
- return ret;
-}
-
-int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
- struct printbuf *parse_later, char *options)
-{
- char *copied_opts, *copied_opts_start;
- char *opt, *name, *val;
- int ret;
-
- if (!options)
- return 0;
-
- /*
- * sys_fsconfig() is now occasionally providing us with option lists
- * starting with a comma - weird.
- */
- if (*options == ',')
- options++;
-
- copied_opts = kstrdup(options, GFP_KERNEL);
- if (!copied_opts)
- return -ENOMEM;
- copied_opts_start = copied_opts;
-
- while ((opt = strsep(&copied_opts, ",")) != NULL) {
- if (!*opt)
- continue;
-
- name = strsep(&opt, "=");
- val = opt;
-
- ret = bch2_parse_one_mount_opt(c, opts, parse_later, name, val);
- if (ret < 0)
- goto out;
- }
-
- ret = 0;
- goto out;
-
-out:
- kfree(copied_opts_start);
- return ret;
-}
-
-u64 bch2_opt_from_sb(struct bch_sb *sb, enum bch_opt_id id)
-{
- const struct bch_option *opt = bch2_opt_table + id;
- u64 v;
-
- v = opt->get_sb(sb);
-
- if (opt->flags & OPT_SB_FIELD_ILOG2)
- v = 1ULL << v;
-
- if (opt->flags & OPT_SB_FIELD_SECTORS)
- v <<= 9;
-
- return v;
-}
-
-/*
- * Initial options from superblock - here we don't want any options undefined,
- * any options the superblock doesn't specify are set to 0:
- */
-int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb)
-{
- unsigned id;
-
- for (id = 0; id < bch2_opts_nr; id++) {
- const struct bch_option *opt = bch2_opt_table + id;
-
- if (opt->get_sb == BCH2_NO_SB_OPT)
- continue;
-
- bch2_opt_set_by_id(opts, id, bch2_opt_from_sb(sb, id));
- }
-
- return 0;
-}
-
-struct bch_dev_sb_opt_set {
- void (*set_sb)(struct bch_member *, u64);
-};
-
-static const struct bch_dev_sb_opt_set bch2_dev_sb_opt_setters [] = {
-#define x(n, set) [Opt_##n] = { .set_sb = SET_##set },
- BCH_DEV_OPT_SETTERS()
-#undef x
-};
-
-void __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx,
- const struct bch_option *opt, u64 v)
-{
- enum bch_opt_id id = opt - bch2_opt_table;
-
- if (opt->flags & OPT_SB_FIELD_SECTORS)
- v >>= 9;
-
- if (opt->flags & OPT_SB_FIELD_ILOG2)
- v = ilog2(v);
-
- if (opt->flags & OPT_SB_FIELD_ONE_BIAS)
- v++;
-
- if (opt->flags & OPT_FS) {
- if (opt->set_sb != SET_BCH2_NO_SB_OPT)
- opt->set_sb(sb, v);
- }
-
- if ((opt->flags & OPT_DEVICE) && dev_idx >= 0) {
- if (WARN(!bch2_member_exists(sb, dev_idx),
- "tried to set device option %s on nonexistent device %i",
- opt->attr.name, dev_idx))
- return;
-
- struct bch_member *m = bch2_members_v2_get_mut(sb, dev_idx);
-
- const struct bch_dev_sb_opt_set *set = bch2_dev_sb_opt_setters + id;
- if (set->set_sb)
- set->set_sb(m, v);
- else
- pr_err("option %s cannot be set via opt_set_sb()", opt->attr.name);
- }
-}
-
-void bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca,
- const struct bch_option *opt, u64 v)
-{
- mutex_lock(&c->sb_lock);
- __bch2_opt_set_sb(c->disk_sb.sb, ca ? ca->dev_idx : -1, opt, v);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-}
-
-/* io opts: */
-
-struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts src)
-{
- return (struct bch_io_opts) {
-#define x(_name, _bits) ._name = src._name,
- BCH_INODE_OPTS()
-#undef x
- };
-}
-
-bool bch2_opt_is_inode_opt(enum bch_opt_id id)
-{
- static const enum bch_opt_id inode_opt_list[] = {
-#define x(_name, _bits) Opt_##_name,
- BCH_INODE_OPTS()
-#undef x
- };
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(inode_opt_list); i++)
- if (inode_opt_list[i] == id)
- return true;
-
- return false;
-}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
deleted file mode 100644
index 23dda014e331..000000000000
--- a/fs/bcachefs/opts.h
+++ /dev/null
@@ -1,637 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_OPTS_H
-#define _BCACHEFS_OPTS_H
-
-#include <linux/bug.h>
-#include <linux/log2.h>
-#include <linux/string.h>
-#include <linux/sysfs.h>
-#include "bcachefs_format.h"
-
-struct bch_fs;
-
-extern const char * const bch2_error_actions[];
-extern const char * const bch2_fsck_fix_opts[];
-extern const char * const bch2_version_upgrade_opts[];
-extern const char * const bch2_sb_features[];
-extern const char * const bch2_sb_compat[];
-extern const char * const __bch2_btree_ids[];
-extern const char * const bch2_csum_opts[];
-extern const char * const bch2_compression_opts[];
-extern const char * const __bch2_str_hash_types[];
-extern const char * const bch2_str_hash_opts[];
-extern const char * const __bch2_data_types[];
-extern const char * const bch2_member_states[];
-extern const char * const bch2_d_types[];
-
-void bch2_prt_jset_entry_type(struct printbuf *, enum bch_jset_entry_type);
-void bch2_prt_fs_usage_type(struct printbuf *, enum bch_fs_usage_type);
-void bch2_prt_data_type(struct printbuf *, enum bch_data_type);
-void bch2_prt_csum_type(struct printbuf *, enum bch_csum_type);
-void bch2_prt_compression_type(struct printbuf *, enum bch_compression_type);
-void bch2_prt_str_hash_type(struct printbuf *, enum bch_str_hash_type);
-
-static inline const char *bch2_d_type_str(unsigned d_type)
-{
- return (d_type < BCH_DT_MAX ? bch2_d_types[d_type] : NULL) ?: "(bad d_type)";
-}
-
-/*
- * Mount options; we also store defaults in the superblock.
- *
- * Also exposed via sysfs: if an option is writeable, and it's also stored in
- * the superblock, changing it via sysfs (currently? might change this) also
- * updates the superblock.
- *
- * We store options as signed integers, where -1 means undefined. This means we
- * can pass the mount options to bch2_fs_alloc() as a whole struct, and then only
- * apply the options from that struct that are defined.
- */
-
-/* dummy option, for options that aren't stored in the superblock */
-u64 BCH2_NO_SB_OPT(const struct bch_sb *);
-void SET_BCH2_NO_SB_OPT(struct bch_sb *, u64);
-
-/* When can be set: */
-enum opt_flags {
- OPT_FS = BIT(0), /* Filesystem option */
- OPT_DEVICE = BIT(1), /* Device option */
- OPT_INODE = BIT(2), /* Inode option */
- OPT_FORMAT = BIT(3), /* May be specified at format time */
- OPT_MOUNT = BIT(4), /* May be specified at mount time */
- OPT_RUNTIME = BIT(5), /* May be specified at runtime */
- OPT_HUMAN_READABLE = BIT(6),
- OPT_MUST_BE_POW_2 = BIT(7), /* Must be power of 2 */
- OPT_SB_FIELD_SECTORS = BIT(8), /* Superblock field is >> 9 of actual value */
- OPT_SB_FIELD_ILOG2 = BIT(9), /* Superblock field is ilog2 of actual value */
- OPT_SB_FIELD_ONE_BIAS = BIT(10), /* 0 means default value */
- OPT_HIDDEN = BIT(11),
-};
-
-enum opt_type {
- BCH_OPT_BOOL,
- BCH_OPT_UINT,
- BCH_OPT_STR,
- BCH_OPT_BITFIELD,
- BCH_OPT_FN,
-};
-
-struct bch_opt_fn {
- int (*parse)(struct bch_fs *, const char *, u64 *, struct printbuf *);
- void (*to_text)(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
- int (*validate)(u64, struct printbuf *);
-};
-
-/**
- * x(name, shortopt, type, in mem type, mode, sb_opt)
- *
- * @name - name of mount option, sysfs attribute, and struct bch_opts
- * member
- *
- * @mode - when opt may be set
- *
- * @sb_option - name of corresponding superblock option
- *
- * @type - one of OPT_BOOL, OPT_UINT, OPT_STR
- */
-
-/*
- * XXX: add fields for
- * - default value
- * - helptext
- */
-
-#ifdef __KERNEL__
-#define RATELIMIT_ERRORS_DEFAULT true
-#else
-#define RATELIMIT_ERRORS_DEFAULT false
-#endif
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-#define BCACHEFS_VERBOSE_DEFAULT true
-#else
-#define BCACHEFS_VERBOSE_DEFAULT false
-#endif
-
-#define BCH_FIX_ERRORS_OPTS() \
- x(exit, 0) \
- x(yes, 1) \
- x(no, 2) \
- x(ask, 3)
-
-enum fsck_err_opts {
-#define x(t, n) FSCK_FIX_##t,
- BCH_FIX_ERRORS_OPTS()
-#undef x
-};
-
-#define BCH_OPTS() \
- x(block_size, u16, \
- OPT_FS|OPT_FORMAT| \
- OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
- OPT_UINT(512, 1U << 16), \
- BCH_SB_BLOCK_SIZE, 8, \
- "size", NULL) \
- x(btree_node_size, u32, \
- OPT_FS|OPT_FORMAT| \
- OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
- OPT_UINT(512, 1U << 20), \
- BCH_SB_BTREE_NODE_SIZE, 512, \
- "size", "Btree node size, default 256k") \
- x(errors, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_STR(bch2_error_actions), \
- BCH_SB_ERROR_ACTION, BCH_ON_ERROR_fix_safe, \
- NULL, "Action to take on filesystem error") \
- x(metadata_replicas, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_UINT(1, BCH_REPLICAS_MAX), \
- BCH_SB_META_REPLICAS_WANT, 1, \
- "#", "Number of metadata replicas") \
- x(data_replicas, u8, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_UINT(1, BCH_REPLICAS_MAX), \
- BCH_SB_DATA_REPLICAS_WANT, 1, \
- "#", "Number of data replicas") \
- x(metadata_replicas_required, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT, \
- OPT_UINT(1, BCH_REPLICAS_MAX), \
- BCH_SB_META_REPLICAS_REQ, 1, \
- "#", NULL) \
- x(data_replicas_required, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT, \
- OPT_UINT(1, BCH_REPLICAS_MAX), \
- BCH_SB_DATA_REPLICAS_REQ, 1, \
- "#", NULL) \
- x(encoded_extent_max, u32, \
- OPT_FS|OPT_FORMAT| \
- OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS|OPT_SB_FIELD_ILOG2,\
- OPT_UINT(4096, 2U << 20), \
- BCH_SB_ENCODED_EXTENT_MAX_BITS, 64 << 10, \
- "size", "Maximum size of checksummed/compressed extents")\
- x(metadata_checksum, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_STR(bch2_csum_opts), \
- BCH_SB_META_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
- NULL, NULL) \
- x(data_checksum, u8, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_STR(bch2_csum_opts), \
- BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
- NULL, NULL) \
- x(compression, u8, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_FN(bch2_opt_compression), \
- BCH_SB_COMPRESSION_TYPE, BCH_COMPRESSION_OPT_none, \
- NULL, NULL) \
- x(background_compression, u8, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_FN(bch2_opt_compression), \
- BCH_SB_BACKGROUND_COMPRESSION_TYPE,BCH_COMPRESSION_OPT_none, \
- NULL, NULL) \
- x(str_hash, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_STR(bch2_str_hash_opts), \
- BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_OPT_siphash, \
- NULL, "Hash function for directory entries and xattrs")\
- x(metadata_target, u16, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_FN(bch2_opt_target), \
- BCH_SB_METADATA_TARGET, 0, \
- "(target)", "Device or label for metadata writes") \
- x(foreground_target, u16, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_FN(bch2_opt_target), \
- BCH_SB_FOREGROUND_TARGET, 0, \
- "(target)", "Device or label for foreground writes") \
- x(background_target, u16, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_FN(bch2_opt_target), \
- BCH_SB_BACKGROUND_TARGET, 0, \
- "(target)", "Device or label to move data to in the background")\
- x(promote_target, u16, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_FN(bch2_opt_target), \
- BCH_SB_PROMOTE_TARGET, 0, \
- "(target)", "Device or label to promote data to on read") \
- x(erasure_code, u16, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH_SB_ERASURE_CODE, false, \
- NULL, "Enable erasure coding (DO NOT USE YET)") \
- x(inodes_32bit, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH_SB_INODE_32BIT, true, \
- NULL, "Constrain inode numbers to 32 bits") \
- x(shard_inode_numbers, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH_SB_SHARD_INUMS, true, \
- NULL, "Shard new inode numbers by CPU id") \
- x(inodes_use_key_cache, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH_SB_INODES_USE_KEY_CACHE, true, \
- NULL, "Use the btree key cache for the inodes btree") \
- x(btree_node_mem_ptr_optimization, u8, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, true, \
- NULL, "Stash pointer to in memory btree node in btree ptr")\
- x(gc_reserve_percent, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_UINT(5, 21), \
- BCH_SB_GC_RESERVE, 8, \
- "%", "Percentage of disk space to reserve for copygc")\
- x(gc_reserve_bytes, u64, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME| \
- OPT_HUMAN_READABLE|OPT_SB_FIELD_SECTORS, \
- OPT_UINT(0, U64_MAX), \
- BCH_SB_GC_RESERVE_BYTES, 0, \
- "%", "Amount of disk space to reserve for copygc\n" \
- "Takes precedence over gc_reserve_percent if set")\
- x(root_reserve_percent, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT, \
- OPT_UINT(0, 100), \
- BCH_SB_ROOT_RESERVE, 0, \
- "%", "Percentage of disk space to reserve for superuser")\
- x(wide_macs, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH_SB_128_BIT_MACS, false, \
- NULL, "Store full 128 bits of cryptographic MACs, instead of 80")\
- x(inline_data, u8, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, true, \
- NULL, "Enable inline data extents") \
- x(promote_whole_extents, u8, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH_SB_PROMOTE_WHOLE_EXTENTS, true, \
- NULL, "Promote whole extents, instead of just part being read")\
- x(acl, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH_SB_POSIX_ACL, true, \
- NULL, "Enable POSIX acls") \
- x(usrquota, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH_SB_USRQUOTA, false, \
- NULL, "Enable user quotas") \
- x(grpquota, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH_SB_GRPQUOTA, false, \
- NULL, "Enable group quotas") \
- x(prjquota, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH_SB_PRJQUOTA, false, \
- NULL, "Enable project quotas") \
- x(degraded, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Allow mounting in degraded mode") \
- x(very_degraded, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Allow mounting in when data will be missing") \
- x(no_splitbrain_check, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Don't kick drives out when splitbrain detected")\
- x(discard, u8, \
- OPT_FS|OPT_MOUNT|OPT_DEVICE, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, true, \
- NULL, "Enable discard/TRIM support") \
- x(verbose, u8, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, BCACHEFS_VERBOSE_DEFAULT, \
- NULL, "Extra debugging information during mount/recovery")\
- x(journal_flush_delay, u32, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_UINT(1, U32_MAX), \
- BCH_SB_JOURNAL_FLUSH_DELAY, 1000, \
- NULL, "Delay in milliseconds before automatic journal commits")\
- x(journal_flush_disabled, u8, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH_SB_JOURNAL_FLUSH_DISABLED,false, \
- NULL, "Disable journal flush on sync/fsync\n" \
- "If enabled, writes can be lost, but only since the\n"\
- "last journal write (default 1 second)") \
- x(journal_reclaim_delay, u32, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_UINT(0, U32_MAX), \
- BCH_SB_JOURNAL_RECLAIM_DELAY, 100, \
- NULL, "Delay in milliseconds before automatic journal reclaim")\
- x(move_bytes_in_flight, u32, \
- OPT_HUMAN_READABLE|OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_UINT(1024, U32_MAX), \
- BCH2_NO_SB_OPT, 1U << 20, \
- NULL, "Maximum Amount of IO to keep in flight by the move path")\
- x(move_ios_in_flight, u32, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_UINT(1, 1024), \
- BCH2_NO_SB_OPT, 32, \
- NULL, "Maximum number of IOs to keep in flight by the move path")\
- x(fsck, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Run fsck on mount") \
- x(fsck_memory_usage_percent, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_UINT(20, 70), \
- BCH2_NO_SB_OPT, 50, \
- NULL, "Maximum percentage of system ram fsck is allowed to pin")\
- x(fix_errors, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_FN(bch2_opt_fix_errors), \
- BCH2_NO_SB_OPT, FSCK_FIX_exit, \
- NULL, "Fix errors during fsck without asking") \
- x(ratelimit_errors, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, RATELIMIT_ERRORS_DEFAULT, \
- NULL, "Ratelimit error messages during fsck") \
- x(nochanges, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Super read only mode - no writes at all will be issued,\n"\
- "even if we have to replay the journal") \
- x(norecovery, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Exit recovery immediately prior to journal replay")\
- x(recovery_passes, u64, \
- OPT_FS|OPT_MOUNT, \
- OPT_BITFIELD(bch2_recovery_passes), \
- BCH2_NO_SB_OPT, 0, \
- NULL, "Recovery passes to run explicitly") \
- x(recovery_passes_exclude, u64, \
- OPT_FS|OPT_MOUNT, \
- OPT_BITFIELD(bch2_recovery_passes), \
- BCH2_NO_SB_OPT, 0, \
- NULL, "Recovery passes to exclude") \
- x(recovery_pass_last, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_STR_NOLIMIT(bch2_recovery_passes), \
- BCH2_NO_SB_OPT, 0, \
- NULL, "Exit recovery after specified pass") \
- x(retain_recovery_info, u8, \
- 0, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Don't free journal entries/keys, scanned btree nodes after startup")\
- x(read_entire_journal, u8, \
- 0, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Read all journal entries, not just dirty ones")\
- x(read_journal_only, u8, \
- 0, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Only read the journal, skip the rest of recovery")\
- x(journal_transaction_names, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH_SB_JOURNAL_TRANSACTION_NAMES, true, \
- NULL, "Log transaction function names in journal") \
- x(allocator_stuck_timeout, u16, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
- OPT_UINT(0, U16_MAX), \
- BCH_SB_ALLOCATOR_STUCK_TIMEOUT, 30, \
- NULL, "Default timeout in seconds for stuck allocator messages")\
- x(noexcl, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Don't open device in exclusive mode") \
- x(direct_io, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, true, \
- NULL, "Use O_DIRECT (userspace only)") \
- x(sb, u64, \
- OPT_MOUNT, \
- OPT_UINT(0, S64_MAX), \
- BCH2_NO_SB_OPT, BCH_SB_SECTOR, \
- "offset", "Sector offset of superblock") \
- x(read_only, u8, \
- OPT_FS|OPT_MOUNT|OPT_HIDDEN, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, NULL) \
- x(nostart, u8, \
- 0, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Don\'t start filesystem, only open devices") \
- x(reconstruct_alloc, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Reconstruct alloc btree") \
- x(version_upgrade, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_STR(bch2_version_upgrade_opts), \
- BCH_SB_VERSION_UPGRADE, BCH_VERSION_UPGRADE_compatible, \
- NULL, "Set superblock to latest version,\n" \
- "allowing any new features to be used") \
- x(stdio, u64, \
- 0, \
- OPT_UINT(0, S64_MAX), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Pointer to a struct stdio_redirect") \
- x(project, u8, \
- OPT_INODE, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, NULL) \
- x(nocow, u8, \
- OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
- OPT_BOOL(), \
- BCH_SB_NOCOW, false, \
- NULL, "Nocow mode: Writes will be done in place when possible.\n"\
- "Snapshots and reflink will still caused writes to be COW\n"\
- "Implicitly disables data checksumming, compression and encryption")\
- x(nocow_enabled, u8, \
- OPT_FS|OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, true, \
- NULL, "Enable nocow mode: enables runtime locking in\n"\
- "data move path needed if nocow will ever be in use\n")\
- x(no_data_io, u8, \
- OPT_MOUNT, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Skip submit_bio() for data reads and writes, " \
- "for performance testing purposes") \
- x(fs_size, u64, \
- OPT_DEVICE, \
- OPT_UINT(0, S64_MAX), \
- BCH2_NO_SB_OPT, 0, \
- "size", "Size of filesystem on device") \
- x(bucket, u32, \
- OPT_DEVICE, \
- OPT_UINT(0, S64_MAX), \
- BCH2_NO_SB_OPT, 0, \
- "size", "Size of filesystem on device") \
- x(durability, u8, \
- OPT_DEVICE|OPT_SB_FIELD_ONE_BIAS, \
- OPT_UINT(0, BCH_REPLICAS_MAX), \
- BCH2_NO_SB_OPT, 1, \
- "n", "Data written to this device will be considered\n"\
- "to have already been replicated n times") \
- x(data_allowed, u8, \
- OPT_DEVICE, \
- OPT_BITFIELD(__bch2_data_types), \
- BCH2_NO_SB_OPT, BIT(BCH_DATA_journal)|BIT(BCH_DATA_btree)|BIT(BCH_DATA_user),\
- "types", "Allowed data types for this device: journal, btree, and/or user")\
- x(btree_node_prefetch, u8, \
- OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, true, \
- NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\
- " prefetched sequentially")
-
-#define BCH_DEV_OPT_SETTERS() \
- x(discard, BCH_MEMBER_DISCARD) \
- x(durability, BCH_MEMBER_DURABILITY) \
- x(data_allowed, BCH_MEMBER_DATA_ALLOWED)
-
-struct bch_opts {
-#define x(_name, _bits, ...) unsigned _name##_defined:1;
- BCH_OPTS()
-#undef x
-
-#define x(_name, _bits, ...) _bits _name;
- BCH_OPTS()
-#undef x
-};
-
-struct bch2_opts_parse {
- struct bch_opts opts;
-
- /* to save opts that can't be parsed before the FS is opened: */
- struct printbuf parse_later;
-};
-
-static const __maybe_unused struct bch_opts bch2_opts_default = {
-#define x(_name, _bits, _mode, _type, _sb_opt, _default, ...) \
- ._name##_defined = true, \
- ._name = _default, \
-
- BCH_OPTS()
-#undef x
-};
-
-#define opt_defined(_opts, _name) ((_opts)._name##_defined)
-
-#define opt_get(_opts, _name) \
- (opt_defined(_opts, _name) ? (_opts)._name : bch2_opts_default._name)
-
-#define opt_set(_opts, _name, _v) \
-do { \
- (_opts)._name##_defined = true; \
- (_opts)._name = _v; \
-} while (0)
-
-static inline struct bch_opts bch2_opts_empty(void)
-{
- return (struct bch_opts) { 0 };
-}
-
-void bch2_opts_apply(struct bch_opts *, struct bch_opts);
-
-enum bch_opt_id {
-#define x(_name, ...) Opt_##_name,
- BCH_OPTS()
-#undef x
- bch2_opts_nr
-};
-
-struct bch_fs;
-struct printbuf;
-
-struct bch_option {
- struct attribute attr;
- u64 (*get_sb)(const struct bch_sb *);
- void (*set_sb)(struct bch_sb *, u64);
- enum opt_type type;
- enum opt_flags flags;
- u64 min, max;
-
- const char * const *choices;
-
- struct bch_opt_fn fn;
-
- const char *hint;
- const char *help;
-
-};
-
-extern const struct bch_option bch2_opt_table[];
-
-bool bch2_opt_defined_by_id(const struct bch_opts *, enum bch_opt_id);
-u64 bch2_opt_get_by_id(const struct bch_opts *, enum bch_opt_id);
-void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64);
-
-u64 bch2_opt_from_sb(struct bch_sb *, enum bch_opt_id);
-int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *);
-void __bch2_opt_set_sb(struct bch_sb *, int, const struct bch_option *, u64);
-
-struct bch_dev;
-void bch2_opt_set_sb(struct bch_fs *, struct bch_dev *, const struct bch_option *, u64);
-
-int bch2_opt_lookup(const char *);
-int bch2_opt_validate(const struct bch_option *, u64, struct printbuf *);
-int bch2_opt_parse(struct bch_fs *, const struct bch_option *,
- const char *, u64 *, struct printbuf *);
-
-#define OPT_SHOW_FULL_LIST (1 << 0)
-#define OPT_SHOW_MOUNT_STYLE (1 << 1)
-
-void bch2_opt_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *,
- const struct bch_option *, u64, unsigned);
-void bch2_opts_to_text(struct printbuf *,
- struct bch_opts,
- struct bch_fs *, struct bch_sb *,
- unsigned, unsigned, unsigned);
-
-int bch2_opt_check_may_set(struct bch_fs *, int, u64);
-int bch2_opts_check_may_set(struct bch_fs *);
-int bch2_parse_one_mount_opt(struct bch_fs *, struct bch_opts *,
- struct printbuf *, const char *, const char *);
-int bch2_parse_mount_opts(struct bch_fs *, struct bch_opts *, struct printbuf *,
- char *);
-
-/* inode opts: */
-
-struct bch_io_opts {
-#define x(_name, _bits) u##_bits _name;
- BCH_INODE_OPTS()
-#undef x
-};
-
-static inline unsigned background_compression(struct bch_io_opts opts)
-{
- return opts.background_compression ?: opts.compression;
-}
-
-struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts);
-bool bch2_opt_is_inode_opt(enum bch_opt_id);
-
-#endif /* _BCACHEFS_OPTS_H */
diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c
deleted file mode 100644
index 4cf5a2af1e6f..000000000000
--- a/fs/bcachefs/printbuf.c
+++ /dev/null
@@ -1,509 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1+
-/* Copyright (C) 2022 Kent Overstreet */
-
-#include <linux/bitmap.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string_helpers.h>
-
-#include "printbuf.h"
-
-static inline unsigned __printbuf_linelen(struct printbuf *buf, unsigned pos)
-{
- return pos - buf->last_newline;
-}
-
-static inline unsigned printbuf_linelen(struct printbuf *buf)
-{
- return __printbuf_linelen(buf, buf->pos);
-}
-
-/*
- * Returns spaces from start of line, if set, or 0 if unset:
- */
-static inline unsigned cur_tabstop(struct printbuf *buf)
-{
- return buf->cur_tabstop < buf->nr_tabstops
- ? buf->_tabstops[buf->cur_tabstop]
- : 0;
-}
-
-int bch2_printbuf_make_room(struct printbuf *out, unsigned extra)
-{
- /* Reserved space for terminating nul: */
- extra += 1;
-
- if (out->pos + extra <= out->size)
- return 0;
-
- if (!out->heap_allocated) {
- out->overflow = true;
- return 0;
- }
-
- unsigned new_size = roundup_pow_of_two(out->size + extra);
-
- /* Sanity check... */
- if (new_size > PAGE_SIZE << MAX_PAGE_ORDER) {
- out->allocation_failure = true;
- out->overflow = true;
- return -ENOMEM;
- }
-
- /*
- * Note: output buffer must be freeable with kfree(), it's not required
- * that the user use printbuf_exit().
- */
- char *buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_NOWAIT);
-
- if (!buf) {
- out->allocation_failure = true;
- out->overflow = true;
- return -ENOMEM;
- }
-
- out->buf = buf;
- out->size = new_size;
- return 0;
-}
-
-static void printbuf_advance_pos(struct printbuf *out, unsigned len)
-{
- out->pos += min(len, printbuf_remaining(out));
-}
-
-static void printbuf_insert_spaces(struct printbuf *out, unsigned pos, unsigned nr)
-{
- unsigned move = out->pos - pos;
-
- bch2_printbuf_make_room(out, nr);
-
- if (pos + nr < out->size)
- memmove(out->buf + pos + nr,
- out->buf + pos,
- min(move, out->size - 1 - pos - nr));
-
- if (pos < out->size)
- memset(out->buf + pos, ' ', min(nr, out->size - pos));
-
- printbuf_advance_pos(out, nr);
- printbuf_nul_terminate_reserved(out);
-}
-
-static void __printbuf_do_indent(struct printbuf *out, unsigned pos)
-{
- while (true) {
- int pad;
- unsigned len = out->pos - pos;
- char *p = out->buf + pos;
- char *n = memscan(p, '\n', len);
- if (cur_tabstop(out)) {
- n = min(n, (char *) memscan(p, '\r', len));
- n = min(n, (char *) memscan(p, '\t', len));
- }
-
- pos = n - out->buf;
- if (pos == out->pos)
- break;
-
- switch (*n) {
- case '\n':
- pos++;
- out->last_newline = pos;
-
- printbuf_insert_spaces(out, pos, out->indent);
-
- pos = min(pos + out->indent, out->pos);
- out->last_field = pos;
- out->cur_tabstop = 0;
- break;
- case '\r':
- memmove(n, n + 1, out->pos - pos);
- --out->pos;
- pad = (int) cur_tabstop(out) - (int) __printbuf_linelen(out, pos);
- if (pad > 0) {
- printbuf_insert_spaces(out, out->last_field, pad);
- pos += pad;
- }
-
- out->last_field = pos;
- out->cur_tabstop++;
- break;
- case '\t':
- pad = (int) cur_tabstop(out) - (int) __printbuf_linelen(out, pos) - 1;
- if (pad > 0) {
- *n = ' ';
- printbuf_insert_spaces(out, pos, pad - 1);
- pos += pad;
- } else {
- memmove(n, n + 1, out->pos - pos);
- --out->pos;
- }
-
- out->last_field = pos;
- out->cur_tabstop++;
- break;
- }
- }
-}
-
-static inline void printbuf_do_indent(struct printbuf *out, unsigned pos)
-{
- if (out->has_indent_or_tabstops && !out->suppress_indent_tabstop_handling)
- __printbuf_do_indent(out, pos);
-}
-
-void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list args)
-{
- int len;
-
- do {
- va_list args2;
-
- va_copy(args2, args);
- len = vsnprintf(out->buf + out->pos, printbuf_remaining_size(out), fmt, args2);
- va_end(args2);
- } while (len > printbuf_remaining(out) &&
- !bch2_printbuf_make_room(out, len));
-
- unsigned indent_pos = out->pos;
- printbuf_advance_pos(out, len);
- printbuf_do_indent(out, indent_pos);
-}
-
-void bch2_prt_printf(struct printbuf *out, const char *fmt, ...)
-{
- va_list args;
- int len;
-
- do {
- va_start(args, fmt);
- len = vsnprintf(out->buf + out->pos, printbuf_remaining_size(out), fmt, args);
- va_end(args);
- } while (len > printbuf_remaining(out) &&
- !bch2_printbuf_make_room(out, len));
-
- unsigned indent_pos = out->pos;
- printbuf_advance_pos(out, len);
- printbuf_do_indent(out, indent_pos);
-}
-
-/**
- * bch2_printbuf_str() - returns printbuf's buf as a C string, guaranteed to be
- * null terminated
- * @buf: printbuf to terminate
- * Returns: Printbuf contents, as a nul terminated C string
- */
-const char *bch2_printbuf_str(const struct printbuf *buf)
-{
- /*
- * If we've written to a printbuf then it's guaranteed to be a null
- * terminated string - but if we haven't, then we might not have
- * allocated a buffer at all:
- */
- return buf->pos
- ? buf->buf
- : "";
-}
-
-/**
- * bch2_printbuf_exit() - exit a printbuf, freeing memory it owns and poisoning it
- * against accidental use.
- * @buf: printbuf to exit
- */
-void bch2_printbuf_exit(struct printbuf *buf)
-{
- if (buf->heap_allocated) {
- kfree(buf->buf);
- buf->buf = ERR_PTR(-EINTR); /* poison value */
- }
-}
-
-void bch2_printbuf_tabstops_reset(struct printbuf *buf)
-{
- buf->nr_tabstops = 0;
-}
-
-void bch2_printbuf_tabstop_pop(struct printbuf *buf)
-{
- if (buf->nr_tabstops)
- --buf->nr_tabstops;
-}
-
-/*
- * bch2_printbuf_tabstop_set() - add a tabstop, n spaces from the previous tabstop
- *
- * @buf: printbuf to control
- * @spaces: number of spaces from previous tabpstop
- *
- * In the future this function may allocate memory if setting more than
- * PRINTBUF_INLINE_TABSTOPS or setting tabstops more than 255 spaces from start
- * of line.
- */
-int bch2_printbuf_tabstop_push(struct printbuf *buf, unsigned spaces)
-{
- unsigned prev_tabstop = buf->nr_tabstops
- ? buf->_tabstops[buf->nr_tabstops - 1]
- : 0;
-
- if (WARN_ON(buf->nr_tabstops >= ARRAY_SIZE(buf->_tabstops)))
- return -EINVAL;
-
- buf->_tabstops[buf->nr_tabstops++] = prev_tabstop + spaces;
- buf->has_indent_or_tabstops = true;
- return 0;
-}
-
-/**
- * bch2_printbuf_indent_add() - add to the current indent level
- *
- * @buf: printbuf to control
- * @spaces: number of spaces to add to the current indent level
- *
- * Subsequent lines, and the current line if the output position is at the start
- * of the current line, will be indented by @spaces more spaces.
- */
-void bch2_printbuf_indent_add(struct printbuf *buf, unsigned spaces)
-{
- if (WARN_ON_ONCE(buf->indent + spaces < buf->indent))
- spaces = 0;
-
- buf->indent += spaces;
- prt_chars(buf, ' ', spaces);
-
- buf->has_indent_or_tabstops = true;
-}
-
-/**
- * bch2_printbuf_indent_sub() - subtract from the current indent level
- *
- * @buf: printbuf to control
- * @spaces: number of spaces to subtract from the current indent level
- *
- * Subsequent lines, and the current line if the output position is at the start
- * of the current line, will be indented by @spaces less spaces.
- */
-void bch2_printbuf_indent_sub(struct printbuf *buf, unsigned spaces)
-{
- if (WARN_ON_ONCE(spaces > buf->indent))
- spaces = buf->indent;
-
- if (buf->last_newline + buf->indent == buf->pos) {
- buf->pos -= spaces;
- printbuf_nul_terminate(buf);
- }
- buf->indent -= spaces;
-
- if (!buf->indent && !buf->nr_tabstops)
- buf->has_indent_or_tabstops = false;
-}
-
-void bch2_prt_newline(struct printbuf *buf)
-{
- bch2_printbuf_make_room(buf, 1 + buf->indent);
-
- __prt_char_reserved(buf, '\n');
-
- buf->last_newline = buf->pos;
-
- __prt_chars_reserved(buf, ' ', buf->indent);
-
- printbuf_nul_terminate_reserved(buf);
-
- buf->last_field = buf->pos;
- buf->cur_tabstop = 0;
-}
-
-void bch2_printbuf_strip_trailing_newline(struct printbuf *out)
-{
- for (int p = out->pos - 1; p >= 0; --p) {
- if (out->buf[p] == '\n') {
- out->pos = p;
- break;
- }
- if (out->buf[p] != ' ')
- break;
- }
-
- printbuf_nul_terminate_reserved(out);
-}
-
-static void __prt_tab(struct printbuf *out)
-{
- int spaces = max_t(int, 0, cur_tabstop(out) - printbuf_linelen(out));
-
- prt_chars(out, ' ', spaces);
-
- out->last_field = out->pos;
- out->cur_tabstop++;
-}
-
-/**
- * bch2_prt_tab() - Advance printbuf to the next tabstop
- * @out: printbuf to control
- *
- * Advance output to the next tabstop by printing spaces.
- */
-void bch2_prt_tab(struct printbuf *out)
-{
- if (WARN_ON(!cur_tabstop(out)))
- return;
-
- __prt_tab(out);
-}
-
-static void __prt_tab_rjust(struct printbuf *buf)
-{
- int pad = (int) cur_tabstop(buf) - (int) printbuf_linelen(buf);
- if (pad > 0)
- printbuf_insert_spaces(buf, buf->last_field, pad);
-
- buf->last_field = buf->pos;
- buf->cur_tabstop++;
-}
-
-/**
- * bch2_prt_tab_rjust - Advance printbuf to the next tabstop, right justifying
- * previous output
- *
- * @buf: printbuf to control
- *
- * Advance output to the next tabstop by inserting spaces immediately after the
- * previous tabstop, right justifying previously outputted text.
- */
-void bch2_prt_tab_rjust(struct printbuf *buf)
-{
- if (WARN_ON(!cur_tabstop(buf)))
- return;
-
- __prt_tab_rjust(buf);
-}
-
-/**
- * bch2_prt_bytes_indented() - Print an array of chars, handling embedded control characters
- *
- * @out: output printbuf
- * @str: string to print
- * @count: number of bytes to print
- *
- * The following contol characters are handled as so:
- * \n: prt_newline newline that obeys current indent level
- * \t: prt_tab advance to next tabstop
- * \r: prt_tab_rjust advance to next tabstop, with right justification
- */
-void bch2_prt_bytes_indented(struct printbuf *out, const char *str, unsigned count)
-{
- unsigned indent_pos = out->pos;
- prt_bytes(out, str, count);
- printbuf_do_indent(out, indent_pos);
-}
-
-/**
- * bch2_prt_human_readable_u64() - Print out a u64 in human readable units
- * @out: output printbuf
- * @v: integer to print
- *
- * Units of 2^10 (default) or 10^3 are controlled via @out->si_units
- */
-void bch2_prt_human_readable_u64(struct printbuf *out, u64 v)
-{
- bch2_printbuf_make_room(out, 10);
- unsigned len = string_get_size(v, 1, !out->si_units,
- out->buf + out->pos,
- printbuf_remaining_size(out));
- printbuf_advance_pos(out, len);
-}
-
-/**
- * bch2_prt_human_readable_s64() - Print out a s64 in human readable units
- * @out: output printbuf
- * @v: integer to print
- *
- * Units of 2^10 (default) or 10^3 are controlled via @out->si_units
- */
-void bch2_prt_human_readable_s64(struct printbuf *out, s64 v)
-{
- if (v < 0)
- prt_char(out, '-');
- bch2_prt_human_readable_u64(out, abs(v));
-}
-
-/**
- * bch2_prt_units_u64() - Print out a u64 according to printbuf unit options
- * @out: output printbuf
- * @v: integer to print
- *
- * Units are either raw (default), or human reabable units (controlled via
- * @buf->human_readable_units)
- */
-void bch2_prt_units_u64(struct printbuf *out, u64 v)
-{
- if (out->human_readable_units)
- bch2_prt_human_readable_u64(out, v);
- else
- bch2_prt_printf(out, "%llu", v);
-}
-
-/**
- * bch2_prt_units_s64() - Print out a s64 according to printbuf unit options
- * @out: output printbuf
- * @v: integer to print
- *
- * Units are either raw (default), or human reabable units (controlled via
- * @buf->human_readable_units)
- */
-void bch2_prt_units_s64(struct printbuf *out, s64 v)
-{
- if (v < 0)
- prt_char(out, '-');
- bch2_prt_units_u64(out, abs(v));
-}
-
-void bch2_prt_string_option(struct printbuf *out,
- const char * const list[],
- size_t selected)
-{
- for (size_t i = 0; list[i]; i++)
- bch2_prt_printf(out, i == selected ? "[%s] " : "%s ", list[i]);
-}
-
-void bch2_prt_bitflags(struct printbuf *out,
- const char * const list[], u64 flags)
-{
- unsigned bit, nr = 0;
- bool first = true;
-
- while (list[nr])
- nr++;
-
- while (flags && (bit = __ffs64(flags)) < nr) {
- if (!first)
- bch2_prt_printf(out, ",");
- first = false;
- bch2_prt_printf(out, "%s", list[bit]);
- flags ^= BIT_ULL(bit);
- }
-}
-
-void bch2_prt_bitflags_vector(struct printbuf *out,
- const char * const list[],
- unsigned long *v, unsigned nr)
-{
- bool first = true;
- unsigned i;
-
- for (i = 0; i < nr; i++)
- if (!list[i]) {
- nr = i - 1;
- break;
- }
-
- for_each_set_bit(i, v, nr) {
- if (!first)
- bch2_prt_printf(out, ",");
- first = false;
- bch2_prt_printf(out, "%s", list[i]);
- }
-}
diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h
deleted file mode 100644
index 1d570387b77f..000000000000
--- a/fs/bcachefs/printbuf.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ */
-/* Copyright (C) 2022 Kent Overstreet */
-
-#ifndef _BCACHEFS_PRINTBUF_H
-#define _BCACHEFS_PRINTBUF_H
-
-/*
- * Printbufs: Simple strings for printing to, with optional heap allocation
- *
- * This code has provisions for use in userspace, to aid in making other code
- * portable between kernelspace and userspace.
- *
- * Basic example:
- * struct printbuf buf = PRINTBUF;
- *
- * prt_printf(&buf, "foo=");
- * foo_to_text(&buf, foo);
- * printk("%s", buf.buf);
- * printbuf_exit(&buf);
- *
- * Or
- * struct printbuf buf = PRINTBUF_EXTERN(char_buf, char_buf_size)
- *
- * We can now write pretty printers instead of writing code that dumps
- * everything to the kernel log buffer, and then those pretty-printers can be
- * used by other code that outputs to kernel log, sysfs, debugfs, etc.
- *
- * Memory allocation: Outputing to a printbuf may allocate memory. This
- * allocation is done with GFP_KERNEL, by default: use the newer
- * memalloc_*_(save|restore) functions as needed.
- *
- * Since no equivalent yet exists for GFP_ATOMIC/GFP_NOWAIT, memory allocations
- * will be done with GFP_NOWAIT if printbuf->atomic is nonzero.
- *
- * It's allowed to grab the output buffer and free it later with kfree() instead
- * of using printbuf_exit(), if the user just needs a heap allocated string at
- * the end.
- *
- * Memory allocation failures: We don't return errors directly, because on
- * memory allocation failure we usually don't want to bail out and unwind - we
- * want to print what we've got, on a best-effort basis. But code that does want
- * to return -ENOMEM may check printbuf.allocation_failure.
- *
- * Indenting, tabstops:
- *
- * To aid is writing multi-line pretty printers spread across multiple
- * functions, printbufs track the current indent level.
- *
- * printbuf_indent_push() and printbuf_indent_pop() increase and decrease the current indent
- * level, respectively.
- *
- * To use tabstops, set printbuf->tabstops[]; they are in units of spaces, from
- * start of line. Once set, prt_tab() will output spaces up to the next tabstop.
- * prt_tab_rjust() will also advance the current line of text up to the next
- * tabstop, but it does so by shifting text since the previous tabstop up to the
- * next tabstop - right justifying it.
- *
- * Make sure you use prt_newline() instead of \n in the format string for indent
- * level and tabstops to work corretly.
- *
- * Output units: printbuf->units exists to tell pretty-printers how to output
- * numbers: a raw value (e.g. directly from a superblock field), as bytes, or as
- * human readable bytes. prt_units() obeys it.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-
-enum printbuf_si {
- PRINTBUF_UNITS_2, /* use binary powers of 2^10 */
- PRINTBUF_UNITS_10, /* use powers of 10^3 (standard SI) */
-};
-
-#define PRINTBUF_INLINE_TABSTOPS 6
-
-struct printbuf {
- char *buf;
- unsigned size;
- unsigned pos;
- unsigned last_newline;
- unsigned last_field;
- unsigned indent;
- /*
- * If nonzero, allocations will be done with GFP_ATOMIC:
- */
- u8 atomic;
- bool allocation_failure:1;
- bool heap_allocated:1;
- bool overflow:1;
- enum printbuf_si si_units:1;
- bool human_readable_units:1;
- bool has_indent_or_tabstops:1;
- bool suppress_indent_tabstop_handling:1;
- u8 nr_tabstops;
-
- /*
- * Do not modify directly: use printbuf_tabstop_add(),
- * printbuf_tabstop_get()
- */
- u8 cur_tabstop;
- u8 _tabstops[PRINTBUF_INLINE_TABSTOPS];
-};
-
-int bch2_printbuf_make_room(struct printbuf *, unsigned);
-__printf(2, 3) void bch2_prt_printf(struct printbuf *out, const char *fmt, ...);
-__printf(2, 0) void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list);
-const char *bch2_printbuf_str(const struct printbuf *);
-void bch2_printbuf_exit(struct printbuf *);
-
-void bch2_printbuf_tabstops_reset(struct printbuf *);
-void bch2_printbuf_tabstop_pop(struct printbuf *);
-int bch2_printbuf_tabstop_push(struct printbuf *, unsigned);
-
-void bch2_printbuf_indent_add(struct printbuf *, unsigned);
-void bch2_printbuf_indent_sub(struct printbuf *, unsigned);
-
-void bch2_prt_newline(struct printbuf *);
-void bch2_printbuf_strip_trailing_newline(struct printbuf *);
-void bch2_prt_tab(struct printbuf *);
-void bch2_prt_tab_rjust(struct printbuf *);
-
-void bch2_prt_bytes_indented(struct printbuf *, const char *, unsigned);
-void bch2_prt_human_readable_u64(struct printbuf *, u64);
-void bch2_prt_human_readable_s64(struct printbuf *, s64);
-void bch2_prt_units_u64(struct printbuf *, u64);
-void bch2_prt_units_s64(struct printbuf *, s64);
-void bch2_prt_string_option(struct printbuf *, const char * const[], size_t);
-void bch2_prt_bitflags(struct printbuf *, const char * const[], u64);
-void bch2_prt_bitflags_vector(struct printbuf *, const char * const[],
- unsigned long *, unsigned);
-
-/* Initializer for a heap allocated printbuf: */
-#define PRINTBUF ((struct printbuf) { .heap_allocated = true })
-
-/* Initializer a printbuf that points to an external buffer: */
-#define PRINTBUF_EXTERN(_buf, _size) \
-((struct printbuf) { \
- .buf = _buf, \
- .size = _size, \
-})
-
-/*
- * Returns size remaining of output buffer:
- */
-static inline unsigned printbuf_remaining_size(struct printbuf *out)
-{
- if (WARN_ON(out->size && out->pos >= out->size))
- out->pos = out->size - 1;
- return out->size - out->pos;
-}
-
-/*
- * Returns number of characters we can print to the output buffer - i.e.
- * excluding the terminating nul:
- */
-static inline unsigned printbuf_remaining(struct printbuf *out)
-{
- return out->size ? printbuf_remaining_size(out) - 1 : 0;
-}
-
-static inline unsigned printbuf_written(struct printbuf *out)
-{
- return out->size ? min(out->pos, out->size - 1) : 0;
-}
-
-static inline void printbuf_nul_terminate_reserved(struct printbuf *out)
-{
- if (WARN_ON(out->size && out->pos >= out->size))
- out->pos = out->size - 1;
- if (out->size)
- out->buf[out->pos] = 0;
-}
-
-static inline void printbuf_nul_terminate(struct printbuf *out)
-{
- bch2_printbuf_make_room(out, 1);
- printbuf_nul_terminate_reserved(out);
-}
-
-/* Doesn't call bch2_printbuf_make_room(), doesn't nul terminate: */
-static inline void __prt_char_reserved(struct printbuf *out, char c)
-{
- if (printbuf_remaining(out))
- out->buf[out->pos++] = c;
-}
-
-/* Doesn't nul terminate: */
-static inline void __prt_char(struct printbuf *out, char c)
-{
- bch2_printbuf_make_room(out, 1);
- __prt_char_reserved(out, c);
-}
-
-static inline void prt_char(struct printbuf *out, char c)
-{
- bch2_printbuf_make_room(out, 2);
- __prt_char_reserved(out, c);
- printbuf_nul_terminate_reserved(out);
-}
-
-static inline void __prt_chars_reserved(struct printbuf *out, char c, unsigned n)
-{
- unsigned can_print = min(n, printbuf_remaining(out));
-
- for (unsigned i = 0; i < can_print; i++)
- out->buf[out->pos++] = c;
-}
-
-static inline void prt_chars(struct printbuf *out, char c, unsigned n)
-{
- bch2_printbuf_make_room(out, n);
- __prt_chars_reserved(out, c, n);
- printbuf_nul_terminate_reserved(out);
-}
-
-static inline void prt_bytes(struct printbuf *out, const void *b, unsigned n)
-{
- bch2_printbuf_make_room(out, n);
-
- unsigned can_print = min(n, printbuf_remaining(out));
-
- for (unsigned i = 0; i < can_print; i++)
- out->buf[out->pos++] = ((char *) b)[i];
-
- printbuf_nul_terminate(out);
-}
-
-static inline void prt_str(struct printbuf *out, const char *str)
-{
- prt_bytes(out, str, strlen(str));
-}
-
-static inline void prt_str_indented(struct printbuf *out, const char *str)
-{
- bch2_prt_bytes_indented(out, str, strlen(str));
-}
-
-static inline void prt_hex_byte(struct printbuf *out, u8 byte)
-{
- bch2_printbuf_make_room(out, 3);
- __prt_char_reserved(out, hex_asc_hi(byte));
- __prt_char_reserved(out, hex_asc_lo(byte));
- printbuf_nul_terminate_reserved(out);
-}
-
-static inline void prt_hex_byte_upper(struct printbuf *out, u8 byte)
-{
- bch2_printbuf_make_room(out, 3);
- __prt_char_reserved(out, hex_asc_upper_hi(byte));
- __prt_char_reserved(out, hex_asc_upper_lo(byte));
- printbuf_nul_terminate_reserved(out);
-}
-
-/**
- * printbuf_reset - re-use a printbuf without freeing and re-initializing it:
- */
-static inline void printbuf_reset(struct printbuf *buf)
-{
- buf->pos = 0;
- buf->allocation_failure = 0;
- buf->indent = 0;
- buf->nr_tabstops = 0;
- buf->cur_tabstop = 0;
-}
-
-/**
- * printbuf_atomic_inc - mark as entering an atomic section
- */
-static inline void printbuf_atomic_inc(struct printbuf *buf)
-{
- buf->atomic++;
-}
-
-/**
- * printbuf_atomic_inc - mark as leaving an atomic section
- */
-static inline void printbuf_atomic_dec(struct printbuf *buf)
-{
- buf->atomic--;
-}
-
-#endif /* _BCACHEFS_PRINTBUF_H */
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
deleted file mode 100644
index 74f45a8162ad..000000000000
--- a/fs/bcachefs/quota.c
+++ /dev/null
@@ -1,892 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "btree_update.h"
-#include "errcode.h"
-#include "error.h"
-#include "inode.h"
-#include "quota.h"
-#include "snapshot.h"
-#include "super-io.h"
-
-static const char * const bch2_quota_types[] = {
- "user",
- "group",
- "project",
-};
-
-static const char * const bch2_quota_counters[] = {
- "space",
- "inodes",
-};
-
-static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_quota *q = field_to_type(f, quota);
-
- if (vstruct_bytes(&q->field) < sizeof(*q)) {
- prt_printf(err, "wrong size (got %zu should be %zu)",
- vstruct_bytes(&q->field), sizeof(*q));
- return -BCH_ERR_invalid_sb_quota;
- }
-
- return 0;
-}
-
-static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_quota *q = field_to_type(f, quota);
- unsigned qtyp, counter;
-
- for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) {
- prt_printf(out, "%s: flags %llx",
- bch2_quota_types[qtyp],
- le64_to_cpu(q->q[qtyp].flags));
-
- for (counter = 0; counter < Q_COUNTERS; counter++)
- prt_printf(out, " %s timelimit %u warnlimit %u",
- bch2_quota_counters[counter],
- le32_to_cpu(q->q[qtyp].c[counter].timelimit),
- le32_to_cpu(q->q[qtyp].c[counter].warnlimit));
-
- prt_newline(out);
- }
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_quota = {
- .validate = bch2_sb_quota_validate,
- .to_text = bch2_sb_quota_to_text,
-};
-
-int bch2_quota_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- int ret = 0;
-
- bkey_fsck_err_on(k.k->p.inode >= QTYP_NR,
- c, quota_type_invalid,
- "invalid quota type (%llu >= %u)",
- k.k->p.inode, QTYP_NR);
-fsck_err:
- return ret;
-}
-
-void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
- unsigned i;
-
- for (i = 0; i < Q_COUNTERS; i++)
- prt_printf(out, "%s hardlimit %llu softlimit %llu",
- bch2_quota_counters[i],
- le64_to_cpu(dq.v->c[i].hardlimit),
- le64_to_cpu(dq.v->c[i].softlimit));
-}
-
-#ifdef CONFIG_BCACHEFS_QUOTA
-
-#include <linux/cred.h>
-#include <linux/fs.h>
-#include <linux/quota.h>
-
-static void qc_info_to_text(struct printbuf *out, struct qc_info *i)
-{
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 20);
-
- prt_printf(out, "i_fieldmask\t%x\n", i->i_fieldmask);
- prt_printf(out, "i_flags\t%u\n", i->i_flags);
- prt_printf(out, "i_spc_timelimit\t%u\n", i->i_spc_timelimit);
- prt_printf(out, "i_ino_timelimit\t%u\n", i->i_ino_timelimit);
- prt_printf(out, "i_rt_spc_timelimit\t%u\n", i->i_rt_spc_timelimit);
- prt_printf(out, "i_spc_warnlimit\t%u\n", i->i_spc_warnlimit);
- prt_printf(out, "i_ino_warnlimit\t%u\n", i->i_ino_warnlimit);
- prt_printf(out, "i_rt_spc_warnlimit\t%u\n", i->i_rt_spc_warnlimit);
-}
-
-static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
-{
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 20);
-
- prt_printf(out, "d_fieldmask\t%x\n", q->d_fieldmask);
- prt_printf(out, "d_spc_hardlimit\t%llu\n", q->d_spc_hardlimit);
- prt_printf(out, "d_spc_softlimit\t%llu\n", q->d_spc_softlimit);
- prt_printf(out, "d_ino_hardlimit\%llu\n", q->d_ino_hardlimit);
- prt_printf(out, "d_ino_softlimit\t%llu\n", q->d_ino_softlimit);
- prt_printf(out, "d_space\t%llu\n", q->d_space);
- prt_printf(out, "d_ino_count\t%llu\n", q->d_ino_count);
- prt_printf(out, "d_ino_timer\t%llu\n", q->d_ino_timer);
- prt_printf(out, "d_spc_timer\t%llu\n", q->d_spc_timer);
- prt_printf(out, "d_ino_warns\t%i\n", q->d_ino_warns);
- prt_printf(out, "d_spc_warns\t%i\n", q->d_spc_warns);
-}
-
-static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
-{
- qtypes >>= i;
- return qtypes ? i + __ffs(qtypes) : QTYP_NR;
-}
-
-#define for_each_set_qtype(_c, _i, _q, _qtypes) \
- for (_i = 0; \
- (_i = __next_qtype(_i, _qtypes), \
- _q = &(_c)->quotas[_i], \
- _i < QTYP_NR); \
- _i++)
-
-static bool ignore_hardlimit(struct bch_memquota_type *q)
-{
- if (capable(CAP_SYS_RESOURCE))
- return true;
-#if 0
- struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
-
- return capable(CAP_SYS_RESOURCE) &&
- (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
- !(info->dqi_flags & DQF_ROOT_SQUASH));
-#endif
- return false;
-}
-
-enum quota_msg {
- SOFTWARN, /* Softlimit reached */
- SOFTLONGWARN, /* Grace time expired */
- HARDWARN, /* Hardlimit reached */
-
- HARDBELOW, /* Usage got below inode hardlimit */
- SOFTBELOW, /* Usage got below inode softlimit */
-};
-
-static int quota_nl[][Q_COUNTERS] = {
- [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
- [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
- [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
- [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
- [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
-
- [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
- [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
- [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
- [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
- [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
-};
-
-struct quota_msgs {
- u8 nr;
- struct {
- u8 qtype;
- u8 msg;
- } m[QTYP_NR * Q_COUNTERS];
-};
-
-static void prepare_msg(unsigned qtype,
- enum quota_counters counter,
- struct quota_msgs *msgs,
- enum quota_msg msg_type)
-{
- BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
-
- msgs->m[msgs->nr].qtype = qtype;
- msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
- msgs->nr++;
-}
-
-static void prepare_warning(struct memquota_counter *qc,
- unsigned qtype,
- enum quota_counters counter,
- struct quota_msgs *msgs,
- enum quota_msg msg_type)
-{
- if (qc->warning_issued & (1 << msg_type))
- return;
-
- prepare_msg(qtype, counter, msgs, msg_type);
-}
-
-static void flush_warnings(struct bch_qid qid,
- struct super_block *sb,
- struct quota_msgs *msgs)
-{
- unsigned i;
-
- for (i = 0; i < msgs->nr; i++)
- quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
- sb->s_dev, msgs->m[i].msg);
-}
-
-static int bch2_quota_check_limit(struct bch_fs *c,
- unsigned qtype,
- struct bch_memquota *mq,
- struct quota_msgs *msgs,
- enum quota_counters counter,
- s64 v,
- enum quota_acct_mode mode)
-{
- struct bch_memquota_type *q = &c->quotas[qtype];
- struct memquota_counter *qc = &mq->c[counter];
- u64 n = qc->v + v;
-
- BUG_ON((s64) n < 0);
-
- if (mode == KEY_TYPE_QUOTA_NOCHECK)
- return 0;
-
- if (v <= 0) {
- if (n < qc->hardlimit &&
- (qc->warning_issued & (1 << HARDWARN))) {
- qc->warning_issued &= ~(1 << HARDWARN);
- prepare_msg(qtype, counter, msgs, HARDBELOW);
- }
-
- if (n < qc->softlimit &&
- (qc->warning_issued & (1 << SOFTWARN))) {
- qc->warning_issued &= ~(1 << SOFTWARN);
- prepare_msg(qtype, counter, msgs, SOFTBELOW);
- }
-
- qc->warning_issued = 0;
- return 0;
- }
-
- if (qc->hardlimit &&
- qc->hardlimit < n &&
- !ignore_hardlimit(q)) {
- prepare_warning(qc, qtype, counter, msgs, HARDWARN);
- return -EDQUOT;
- }
-
- if (qc->softlimit &&
- qc->softlimit < n) {
- if (qc->timer == 0) {
- qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit;
- prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
- } else if (ktime_get_real_seconds() >= qc->timer &&
- !ignore_hardlimit(q)) {
- prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
- return -EDQUOT;
- }
- }
-
- return 0;
-}
-
-int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
- enum quota_counters counter, s64 v,
- enum quota_acct_mode mode)
-{
- unsigned qtypes = enabled_qtypes(c);
- struct bch_memquota_type *q;
- struct bch_memquota *mq[QTYP_NR];
- struct quota_msgs msgs;
- unsigned i;
- int ret = 0;
-
- memset(&msgs, 0, sizeof(msgs));
-
- for_each_set_qtype(c, i, q, qtypes) {
- mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL);
- if (!mq[i])
- return -ENOMEM;
- }
-
- for_each_set_qtype(c, i, q, qtypes)
- mutex_lock_nested(&q->lock, i);
-
- for_each_set_qtype(c, i, q, qtypes) {
- ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
- if (ret)
- goto err;
- }
-
- for_each_set_qtype(c, i, q, qtypes)
- mq[i]->c[counter].v += v;
-err:
- for_each_set_qtype(c, i, q, qtypes)
- mutex_unlock(&q->lock);
-
- flush_warnings(qid, c->vfs_sb, &msgs);
-
- return ret;
-}
-
-static void __bch2_quota_transfer(struct bch_memquota *src_q,
- struct bch_memquota *dst_q,
- enum quota_counters counter, s64 v)
-{
- BUG_ON(v > src_q->c[counter].v);
- BUG_ON(v + dst_q->c[counter].v < v);
-
- src_q->c[counter].v -= v;
- dst_q->c[counter].v += v;
-}
-
-int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
- struct bch_qid dst,
- struct bch_qid src, u64 space,
- enum quota_acct_mode mode)
-{
- struct bch_memquota_type *q;
- struct bch_memquota *src_q[3], *dst_q[3];
- struct quota_msgs msgs;
- unsigned i;
- int ret = 0;
-
- qtypes &= enabled_qtypes(c);
-
- memset(&msgs, 0, sizeof(msgs));
-
- for_each_set_qtype(c, i, q, qtypes) {
- src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL);
- dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL);
- if (!src_q[i] || !dst_q[i])
- return -ENOMEM;
- }
-
- for_each_set_qtype(c, i, q, qtypes)
- mutex_lock_nested(&q->lock, i);
-
- for_each_set_qtype(c, i, q, qtypes) {
- ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
- dst_q[i]->c[Q_SPC].v + space,
- mode);
- if (ret)
- goto err;
-
- ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
- dst_q[i]->c[Q_INO].v + 1,
- mode);
- if (ret)
- goto err;
- }
-
- for_each_set_qtype(c, i, q, qtypes) {
- __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
- __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
- }
-
-err:
- for_each_set_qtype(c, i, q, qtypes)
- mutex_unlock(&q->lock);
-
- flush_warnings(dst, c->vfs_sb, &msgs);
-
- return ret;
-}
-
-static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
- struct qc_dqblk *qdq)
-{
- struct bkey_s_c_quota dq;
- struct bch_memquota_type *q;
- struct bch_memquota *mq;
- unsigned i;
-
- BUG_ON(k.k->p.inode >= QTYP_NR);
-
- if (!((1U << k.k->p.inode) & enabled_qtypes(c)))
- return 0;
-
- switch (k.k->type) {
- case KEY_TYPE_quota:
- dq = bkey_s_c_to_quota(k);
- q = &c->quotas[k.k->p.inode];
-
- mutex_lock(&q->lock);
- mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
- if (!mq) {
- mutex_unlock(&q->lock);
- return -ENOMEM;
- }
-
- for (i = 0; i < Q_COUNTERS; i++) {
- mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
- mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
- }
-
- if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
- mq->c[Q_SPC].timer = qdq->d_spc_timer;
- if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
- mq->c[Q_SPC].warns = qdq->d_spc_warns;
- if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
- mq->c[Q_INO].timer = qdq->d_ino_timer;
- if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
- mq->c[Q_INO].warns = qdq->d_ino_warns;
-
- mutex_unlock(&q->lock);
- }
-
- return 0;
-}
-
-void bch2_fs_quota_exit(struct bch_fs *c)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
- genradix_free(&c->quotas[i].table);
-}
-
-void bch2_fs_quota_init(struct bch_fs *c)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
- mutex_init(&c->quotas[i].lock);
-}
-
-static struct bch_sb_field_quota *bch2_sb_get_or_create_quota(struct bch_sb_handle *sb)
-{
- struct bch_sb_field_quota *sb_quota = bch2_sb_field_get(sb->sb, quota);
-
- if (sb_quota)
- return sb_quota;
-
- sb_quota = bch2_sb_field_resize(sb, quota, sizeof(*sb_quota) / sizeof(u64));
- if (sb_quota) {
- unsigned qtype, qc;
-
- for (qtype = 0; qtype < QTYP_NR; qtype++)
- for (qc = 0; qc < Q_COUNTERS; qc++)
- sb_quota->q[qtype].c[qc].timelimit =
- cpu_to_le32(7 * 24 * 60 * 60);
- }
-
- return sb_quota;
-}
-
-static void bch2_sb_quota_read(struct bch_fs *c)
-{
- struct bch_sb_field_quota *sb_quota;
- unsigned i, j;
-
- sb_quota = bch2_sb_field_get(c->disk_sb.sb, quota);
- if (!sb_quota)
- return;
-
- for (i = 0; i < QTYP_NR; i++) {
- struct bch_memquota_type *q = &c->quotas[i];
-
- for (j = 0; j < Q_COUNTERS; j++) {
- q->limits[j].timelimit =
- le32_to_cpu(sb_quota->q[i].c[j].timelimit);
- q->limits[j].warnlimit =
- le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
- }
- }
-}
-
-static int bch2_fs_quota_read_inode(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bch_inode_unpacked u;
- struct bch_snapshot_tree s_t;
- u32 tree = bch2_snapshot_tree(c, k.k->p.snapshot);
-
- int ret = bch2_snapshot_tree_lookup(trans, tree, &s_t);
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
- "%s: snapshot tree %u not found", __func__, tree);
- if (ret)
- return ret;
-
- if (!s_t.master_subvol)
- goto advance;
-
- ret = bch2_inode_find_by_inum_nowarn_trans(trans,
- (subvol_inum) {
- le32_to_cpu(s_t.master_subvol),
- k.k->p.offset,
- }, &u);
- /*
- * Inode might be deleted in this snapshot - the easiest way to handle
- * that is to just skip it here:
- */
- if (bch2_err_matches(ret, ENOENT))
- goto advance;
-
- if (ret)
- return ret;
-
- bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
- KEY_TYPE_QUOTA_NOCHECK);
- bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
- KEY_TYPE_QUOTA_NOCHECK);
-advance:
- bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
- return 0;
-}
-
-int bch2_fs_quota_read(struct bch_fs *c)
-{
-
- mutex_lock(&c->sb_lock);
- struct bch_sb_field_quota *sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
- if (!sb_quota) {
- mutex_unlock(&c->sb_lock);
- return -BCH_ERR_ENOSPC_sb_quota;
- }
-
- bch2_sb_quota_read(c);
- mutex_unlock(&c->sb_lock);
-
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
- BTREE_ITER_prefetch, k,
- __bch2_quota_set(c, k, NULL)) ?:
- for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- bch2_fs_quota_read_inode(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
-
-/* Enable/disable/delete quotas for an entire filesystem: */
-
-static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
-{
- struct bch_fs *c = sb->s_fs_info;
- struct bch_sb_field_quota *sb_quota;
- int ret = 0;
-
- if (sb->s_flags & SB_RDONLY)
- return -EROFS;
-
- /* Accounting must be enabled at mount time: */
- if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
- return -EINVAL;
-
- /* Can't enable enforcement without accounting: */
- if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
- return -EINVAL;
-
- if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
- return -EINVAL;
-
- if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
- return -EINVAL;
-
- mutex_lock(&c->sb_lock);
- sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
- if (!sb_quota) {
- ret = -BCH_ERR_ENOSPC_sb_quota;
- goto unlock;
- }
-
- if (uflags & FS_QUOTA_UDQ_ENFD)
- SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true);
-
- if (uflags & FS_QUOTA_GDQ_ENFD)
- SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true);
-
- if (uflags & FS_QUOTA_PDQ_ENFD)
- SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true);
-
- bch2_write_super(c);
-unlock:
- mutex_unlock(&c->sb_lock);
-
- return bch2_err_class(ret);
-}
-
-static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
-{
- struct bch_fs *c = sb->s_fs_info;
-
- if (sb->s_flags & SB_RDONLY)
- return -EROFS;
-
- mutex_lock(&c->sb_lock);
- if (uflags & FS_QUOTA_UDQ_ENFD)
- SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false);
-
- if (uflags & FS_QUOTA_GDQ_ENFD)
- SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false);
-
- if (uflags & FS_QUOTA_PDQ_ENFD)
- SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false);
-
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- return 0;
-}
-
-static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
-{
- struct bch_fs *c = sb->s_fs_info;
- int ret;
-
- if (sb->s_flags & SB_RDONLY)
- return -EROFS;
-
- if (uflags & FS_USER_QUOTA) {
- if (c->opts.usrquota)
- return -EINVAL;
-
- ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
- POS(QTYP_USR, 0),
- POS(QTYP_USR, U64_MAX),
- 0, NULL);
- if (ret)
- return ret;
- }
-
- if (uflags & FS_GROUP_QUOTA) {
- if (c->opts.grpquota)
- return -EINVAL;
-
- ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
- POS(QTYP_GRP, 0),
- POS(QTYP_GRP, U64_MAX),
- 0, NULL);
- if (ret)
- return ret;
- }
-
- if (uflags & FS_PROJ_QUOTA) {
- if (c->opts.prjquota)
- return -EINVAL;
-
- ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
- POS(QTYP_PRJ, 0),
- POS(QTYP_PRJ, U64_MAX),
- 0, NULL);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Return quota status information, such as enforcements, quota file inode
- * numbers etc.
- */
-static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
-{
- struct bch_fs *c = sb->s_fs_info;
- unsigned qtypes = enabled_qtypes(c);
- unsigned i;
-
- memset(state, 0, sizeof(*state));
-
- for (i = 0; i < QTYP_NR; i++) {
- state->s_state[i].flags |= QCI_SYSFILE;
-
- if (!(qtypes & (1 << i)))
- continue;
-
- state->s_state[i].flags |= QCI_ACCT_ENABLED;
-
- state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
- state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
-
- state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
- state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
- }
-
- return 0;
-}
-
-/*
- * Adjust quota timers & warnings
- */
-static int bch2_quota_set_info(struct super_block *sb, int type,
- struct qc_info *info)
-{
- struct bch_fs *c = sb->s_fs_info;
- struct bch_sb_field_quota *sb_quota;
- int ret = 0;
-
- if (0) {
- struct printbuf buf = PRINTBUF;
-
- qc_info_to_text(&buf, info);
- pr_info("setting:\n%s", buf.buf);
- printbuf_exit(&buf);
- }
-
- if (sb->s_flags & SB_RDONLY)
- return -EROFS;
-
- if (type >= QTYP_NR)
- return -EINVAL;
-
- if (!((1 << type) & enabled_qtypes(c)))
- return -ESRCH;
-
- if (info->i_fieldmask &
- ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
- return -EINVAL;
-
- mutex_lock(&c->sb_lock);
- sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
- if (!sb_quota) {
- ret = -BCH_ERR_ENOSPC_sb_quota;
- goto unlock;
- }
-
- if (info->i_fieldmask & QC_SPC_TIMER)
- sb_quota->q[type].c[Q_SPC].timelimit =
- cpu_to_le32(info->i_spc_timelimit);
-
- if (info->i_fieldmask & QC_SPC_WARNS)
- sb_quota->q[type].c[Q_SPC].warnlimit =
- cpu_to_le32(info->i_spc_warnlimit);
-
- if (info->i_fieldmask & QC_INO_TIMER)
- sb_quota->q[type].c[Q_INO].timelimit =
- cpu_to_le32(info->i_ino_timelimit);
-
- if (info->i_fieldmask & QC_INO_WARNS)
- sb_quota->q[type].c[Q_INO].warnlimit =
- cpu_to_le32(info->i_ino_warnlimit);
-
- bch2_sb_quota_read(c);
-
- bch2_write_super(c);
-unlock:
- mutex_unlock(&c->sb_lock);
-
- return bch2_err_class(ret);
-}
-
-/* Get/set individual quotas: */
-
-static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
-{
- dst->d_space = src->c[Q_SPC].v << 9;
- dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
- dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
- dst->d_spc_timer = src->c[Q_SPC].timer;
- dst->d_spc_warns = src->c[Q_SPC].warns;
-
- dst->d_ino_count = src->c[Q_INO].v;
- dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
- dst->d_ino_softlimit = src->c[Q_INO].softlimit;
- dst->d_ino_timer = src->c[Q_INO].timer;
- dst->d_ino_warns = src->c[Q_INO].warns;
-}
-
-static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
- struct qc_dqblk *qdq)
-{
- struct bch_fs *c = sb->s_fs_info;
- struct bch_memquota_type *q = &c->quotas[kqid.type];
- qid_t qid = from_kqid(&init_user_ns, kqid);
- struct bch_memquota *mq;
-
- memset(qdq, 0, sizeof(*qdq));
-
- mutex_lock(&q->lock);
- mq = genradix_ptr(&q->table, qid);
- if (mq)
- __bch2_quota_get(qdq, mq);
- mutex_unlock(&q->lock);
-
- return 0;
-}
-
-static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
- struct qc_dqblk *qdq)
-{
- struct bch_fs *c = sb->s_fs_info;
- struct bch_memquota_type *q = &c->quotas[kqid->type];
- qid_t qid = from_kqid(&init_user_ns, *kqid);
- struct genradix_iter iter;
- struct bch_memquota *mq;
- int ret = 0;
-
- mutex_lock(&q->lock);
-
- genradix_for_each_from(&q->table, iter, mq, qid)
- if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
- __bch2_quota_get(qdq, mq);
- *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
- goto found;
- }
-
- ret = -ENOENT;
-found:
- mutex_unlock(&q->lock);
- return bch2_err_class(ret);
-}
-
-static int bch2_set_quota_trans(struct btree_trans *trans,
- struct bkey_i_quota *new_quota,
- struct qc_dqblk *qdq)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
- BTREE_ITER_slots|BTREE_ITER_intent);
- ret = bkey_err(k);
- if (unlikely(ret))
- return ret;
-
- if (k.k->type == KEY_TYPE_quota)
- new_quota->v = *bkey_s_c_to_quota(k).v;
-
- if (qdq->d_fieldmask & QC_SPC_SOFT)
- new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
- if (qdq->d_fieldmask & QC_SPC_HARD)
- new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
-
- if (qdq->d_fieldmask & QC_INO_SOFT)
- new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
- if (qdq->d_fieldmask & QC_INO_HARD)
- new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
-
- ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int bch2_set_quota(struct super_block *sb, struct kqid qid,
- struct qc_dqblk *qdq)
-{
- struct bch_fs *c = sb->s_fs_info;
- struct bkey_i_quota new_quota;
- int ret;
-
- if (0) {
- struct printbuf buf = PRINTBUF;
-
- qc_dqblk_to_text(&buf, qdq);
- pr_info("setting:\n%s", buf.buf);
- printbuf_exit(&buf);
- }
-
- if (sb->s_flags & SB_RDONLY)
- return -EROFS;
-
- bkey_quota_init(&new_quota.k_i);
- new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
-
- ret = bch2_trans_commit_do(c, NULL, NULL, 0,
- bch2_set_quota_trans(trans, &new_quota, qdq)) ?:
- __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq);
-
- return bch2_err_class(ret);
-}
-
-const struct quotactl_ops bch2_quotactl_operations = {
- .quota_enable = bch2_quota_enable,
- .quota_disable = bch2_quota_disable,
- .rm_xquota = bch2_quota_remove,
-
- .get_state = bch2_quota_get_state,
- .set_info = bch2_quota_set_info,
-
- .get_dqblk = bch2_get_quota,
- .get_nextdqblk = bch2_get_next_quota,
- .set_dqblk = bch2_set_quota,
-};
-
-#endif /* CONFIG_BCACHEFS_QUOTA */
diff --git a/fs/bcachefs/quota.h b/fs/bcachefs/quota.h
deleted file mode 100644
index a62abcc5332a..000000000000
--- a/fs/bcachefs/quota.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_QUOTA_H
-#define _BCACHEFS_QUOTA_H
-
-#include "inode.h"
-#include "quota_types.h"
-
-enum bch_validate_flags;
-extern const struct bch_sb_field_ops bch_sb_field_ops_quota;
-
-int bch2_quota_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-#define bch2_bkey_ops_quota ((struct bkey_ops) { \
- .key_validate = bch2_quota_validate, \
- .val_to_text = bch2_quota_to_text, \
- .min_val_size = 32, \
-})
-
-static inline struct bch_qid bch_qid(struct bch_inode_unpacked *u)
-{
- return (struct bch_qid) {
- .q[QTYP_USR] = u->bi_uid,
- .q[QTYP_GRP] = u->bi_gid,
- .q[QTYP_PRJ] = u->bi_project ? u->bi_project - 1 : 0,
- };
-}
-
-static inline unsigned enabled_qtypes(struct bch_fs *c)
-{
- return ((c->opts.usrquota << QTYP_USR)|
- (c->opts.grpquota << QTYP_GRP)|
- (c->opts.prjquota << QTYP_PRJ));
-}
-
-#ifdef CONFIG_BCACHEFS_QUOTA
-
-int bch2_quota_acct(struct bch_fs *, struct bch_qid, enum quota_counters,
- s64, enum quota_acct_mode);
-
-int bch2_quota_transfer(struct bch_fs *, unsigned, struct bch_qid,
- struct bch_qid, u64, enum quota_acct_mode);
-
-void bch2_fs_quota_exit(struct bch_fs *);
-void bch2_fs_quota_init(struct bch_fs *);
-int bch2_fs_quota_read(struct bch_fs *);
-
-extern const struct quotactl_ops bch2_quotactl_operations;
-
-#else
-
-static inline int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
- enum quota_counters counter, s64 v,
- enum quota_acct_mode mode)
-{
- return 0;
-}
-
-static inline int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
- struct bch_qid dst,
- struct bch_qid src, u64 space,
- enum quota_acct_mode mode)
-{
- return 0;
-}
-
-static inline void bch2_fs_quota_exit(struct bch_fs *c) {}
-static inline void bch2_fs_quota_init(struct bch_fs *c) {}
-static inline int bch2_fs_quota_read(struct bch_fs *c) { return 0; }
-
-#endif
-
-#endif /* _BCACHEFS_QUOTA_H */
diff --git a/fs/bcachefs/quota_format.h b/fs/bcachefs/quota_format.h
deleted file mode 100644
index dc34347ef6c7..000000000000
--- a/fs/bcachefs/quota_format.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_QUOTA_FORMAT_H
-#define _BCACHEFS_QUOTA_FORMAT_H
-
-/* KEY_TYPE_quota: */
-
-enum quota_types {
- QTYP_USR = 0,
- QTYP_GRP = 1,
- QTYP_PRJ = 2,
- QTYP_NR = 3,
-};
-
-enum quota_counters {
- Q_SPC = 0,
- Q_INO = 1,
- Q_COUNTERS = 2,
-};
-
-struct bch_quota_counter {
- __le64 hardlimit;
- __le64 softlimit;
-};
-
-struct bch_quota {
- struct bch_val v;
- struct bch_quota_counter c[Q_COUNTERS];
-} __packed __aligned(8);
-
-/* BCH_SB_FIELD_quota: */
-
-struct bch_sb_quota_counter {
- __le32 timelimit;
- __le32 warnlimit;
-};
-
-struct bch_sb_quota_type {
- __le64 flags;
- struct bch_sb_quota_counter c[Q_COUNTERS];
-};
-
-struct bch_sb_field_quota {
- struct bch_sb_field field;
- struct bch_sb_quota_type q[QTYP_NR];
-} __packed __aligned(8);
-
-#endif /* _BCACHEFS_QUOTA_FORMAT_H */
diff --git a/fs/bcachefs/quota_types.h b/fs/bcachefs/quota_types.h
deleted file mode 100644
index 6a136083d389..000000000000
--- a/fs/bcachefs/quota_types.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_QUOTA_TYPES_H
-#define _BCACHEFS_QUOTA_TYPES_H
-
-#include <linux/generic-radix-tree.h>
-
-struct bch_qid {
- u32 q[QTYP_NR];
-};
-
-enum quota_acct_mode {
- KEY_TYPE_QUOTA_PREALLOC,
- KEY_TYPE_QUOTA_WARN,
- KEY_TYPE_QUOTA_NOCHECK,
-};
-
-struct memquota_counter {
- u64 v;
- u64 hardlimit;
- u64 softlimit;
- s64 timer;
- int warns;
- int warning_issued;
-};
-
-struct bch_memquota {
- struct memquota_counter c[Q_COUNTERS];
-};
-
-typedef GENRADIX(struct bch_memquota) bch_memquota_table;
-
-struct quota_limit {
- u32 timelimit;
- u32 warnlimit;
-};
-
-struct bch_memquota_type {
- struct quota_limit limits[Q_COUNTERS];
- bch_memquota_table table;
- struct mutex lock;
-};
-
-#endif /* _BCACHEFS_QUOTA_TYPES_H */
diff --git a/fs/bcachefs/rcu_pending.c b/fs/bcachefs/rcu_pending.c
deleted file mode 100644
index 40a20192eee8..000000000000
--- a/fs/bcachefs/rcu_pending.c
+++ /dev/null
@@ -1,650 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define pr_fmt(fmt) "%s() " fmt "\n", __func__
-
-#include <linux/generic-radix-tree.h>
-#include <linux/mm.h>
-#include <linux/percpu.h>
-#include <linux/slab.h>
-#include <linux/srcu.h>
-#include <linux/vmalloc.h>
-
-#include "rcu_pending.h"
-#include "darray.h"
-#include "util.h"
-
-#define static_array_for_each(_a, _i) \
- for (typeof(&(_a)[0]) _i = _a; \
- _i < (_a) + ARRAY_SIZE(_a); \
- _i++)
-
-enum rcu_pending_special {
- RCU_PENDING_KVFREE = 1,
- RCU_PENDING_CALL_RCU = 2,
-};
-
-#define RCU_PENDING_KVFREE_FN ((rcu_pending_process_fn) (ulong) RCU_PENDING_KVFREE)
-#define RCU_PENDING_CALL_RCU_FN ((rcu_pending_process_fn) (ulong) RCU_PENDING_CALL_RCU)
-
-static inline unsigned long __get_state_synchronize_rcu(struct srcu_struct *ssp)
-{
- return ssp
- ? get_state_synchronize_srcu(ssp)
- : get_state_synchronize_rcu();
-}
-
-static inline unsigned long __start_poll_synchronize_rcu(struct srcu_struct *ssp)
-{
- return ssp
- ? start_poll_synchronize_srcu(ssp)
- : start_poll_synchronize_rcu();
-}
-
-static inline bool __poll_state_synchronize_rcu(struct srcu_struct *ssp, unsigned long cookie)
-{
- return ssp
- ? poll_state_synchronize_srcu(ssp, cookie)
- : poll_state_synchronize_rcu(cookie);
-}
-
-static inline void __rcu_barrier(struct srcu_struct *ssp)
-{
- return ssp
- ? srcu_barrier(ssp)
- : rcu_barrier();
-}
-
-static inline void __call_rcu(struct srcu_struct *ssp, struct rcu_head *rhp,
- rcu_callback_t func)
-{
- if (ssp)
- call_srcu(ssp, rhp, func);
- else
- call_rcu(rhp, func);
-}
-
-struct rcu_pending_seq {
- /*
- * We're using a radix tree like a vector - we're just pushing elements
- * onto the end; we're using a radix tree instead of an actual vector to
- * avoid reallocation overhead
- */
- GENRADIX(struct rcu_head *) objs;
- size_t nr;
- struct rcu_head **cursor;
- unsigned long seq;
-};
-
-struct rcu_pending_list {
- struct rcu_head *head;
- struct rcu_head *tail;
- unsigned long seq;
-};
-
-struct rcu_pending_pcpu {
- struct rcu_pending *parent;
- spinlock_t lock;
- int cpu;
-
- /*
- * We can't bound the number of unprocessed gp sequence numbers, and we
- * can't efficiently merge radix trees for expired grace periods, so we
- * need darray/vector:
- */
- DARRAY_PREALLOCATED(struct rcu_pending_seq, 4) objs;
-
- /* Third entry is for expired objects: */
- struct rcu_pending_list lists[NUM_ACTIVE_RCU_POLL_OLDSTATE + 1];
-
- struct rcu_head cb;
- bool cb_armed;
- struct work_struct work;
-};
-
-static bool __rcu_pending_has_pending(struct rcu_pending_pcpu *p)
-{
- if (p->objs.nr)
- return true;
-
- static_array_for_each(p->lists, i)
- if (i->head)
- return true;
-
- return false;
-}
-
-static void rcu_pending_list_merge(struct rcu_pending_list *l1,
- struct rcu_pending_list *l2)
-{
-#ifdef __KERNEL__
- if (!l1->head)
- l1->head = l2->head;
- else
- l1->tail->next = l2->head;
-#else
- if (!l1->head)
- l1->head = l2->head;
- else
- l1->tail->next.next = (void *) l2->head;
-#endif
-
- l1->tail = l2->tail;
- l2->head = l2->tail = NULL;
-}
-
-static void rcu_pending_list_add(struct rcu_pending_list *l,
- struct rcu_head *n)
-{
-#ifdef __KERNEL__
- if (!l->head)
- l->head = n;
- else
- l->tail->next = n;
- l->tail = n;
- n->next = NULL;
-#else
- if (!l->head)
- l->head = n;
- else
- l->tail->next.next = (void *) n;
- l->tail = n;
- n->next.next = NULL;
-#endif
-}
-
-static void merge_expired_lists(struct rcu_pending_pcpu *p)
-{
- struct rcu_pending_list *expired = &p->lists[NUM_ACTIVE_RCU_POLL_OLDSTATE];
-
- for (struct rcu_pending_list *i = p->lists; i < expired; i++)
- if (i->head && __poll_state_synchronize_rcu(p->parent->srcu, i->seq))
- rcu_pending_list_merge(expired, i);
-}
-
-#ifndef __KERNEL__
-static inline void kfree_bulk(size_t nr, void ** p)
-{
- while (nr--)
- kfree(*p);
-}
-
-#define local_irq_save(flags) \
-do { \
- flags = 0; \
-} while (0)
-#endif
-
-static noinline void __process_finished_items(struct rcu_pending *pending,
- struct rcu_pending_pcpu *p,
- unsigned long flags)
-{
- struct rcu_pending_list *expired = &p->lists[NUM_ACTIVE_RCU_POLL_OLDSTATE];
- struct rcu_pending_seq objs = {};
- struct rcu_head *list = NULL;
-
- if (p->objs.nr &&
- __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) {
- objs = p->objs.data[0];
- darray_remove_item(&p->objs, p->objs.data);
- }
-
- merge_expired_lists(p);
-
- list = expired->head;
- expired->head = expired->tail = NULL;
-
- spin_unlock_irqrestore(&p->lock, flags);
-
- switch ((ulong) pending->process) {
- case RCU_PENDING_KVFREE:
- for (size_t i = 0; i < objs.nr; ) {
- size_t nr_this_node = min(GENRADIX_NODE_SIZE / sizeof(void *), objs.nr - i);
-
- kfree_bulk(nr_this_node, (void **) genradix_ptr(&objs.objs, i));
- i += nr_this_node;
- }
- genradix_free(&objs.objs);
-
- while (list) {
- struct rcu_head *obj = list;
-#ifdef __KERNEL__
- list = obj->next;
-#else
- list = (void *) obj->next.next;
-#endif
-
- /*
- * low bit of pointer indicates whether rcu_head needs
- * to be freed - kvfree_rcu_mightsleep()
- */
- BUILD_BUG_ON(ARCH_SLAB_MINALIGN == 0);
-
- void *ptr = (void *)(((unsigned long) obj->func) & ~1UL);
- bool free_head = ((unsigned long) obj->func) & 1UL;
-
- kvfree(ptr);
- if (free_head)
- kfree(obj);
- }
-
- break;
-
- case RCU_PENDING_CALL_RCU:
- for (size_t i = 0; i < objs.nr; i++) {
- struct rcu_head *obj = *genradix_ptr(&objs.objs, i);
- obj->func(obj);
- }
- genradix_free(&objs.objs);
-
- while (list) {
- struct rcu_head *obj = list;
-#ifdef __KERNEL__
- list = obj->next;
-#else
- list = (void *) obj->next.next;
-#endif
- obj->func(obj);
- }
- break;
-
- default:
- for (size_t i = 0; i < objs.nr; i++)
- pending->process(pending, *genradix_ptr(&objs.objs, i));
- genradix_free(&objs.objs);
-
- while (list) {
- struct rcu_head *obj = list;
-#ifdef __KERNEL__
- list = obj->next;
-#else
- list = (void *) obj->next.next;
-#endif
- pending->process(pending, obj);
- }
- break;
- }
-}
-
-static bool process_finished_items(struct rcu_pending *pending,
- struct rcu_pending_pcpu *p,
- unsigned long flags)
-{
- /*
- * XXX: we should grab the gp seq once and avoid multiple function
- * calls, this is called from __rcu_pending_enqueue() fastpath in
- * may_sleep==true mode
- */
- if ((p->objs.nr && __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) ||
- (p->lists[0].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[0].seq)) ||
- (p->lists[1].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[1].seq)) ||
- p->lists[2].head) {
- __process_finished_items(pending, p, flags);
- return true;
- }
-
- return false;
-}
-
-static void rcu_pending_work(struct work_struct *work)
-{
- struct rcu_pending_pcpu *p =
- container_of(work, struct rcu_pending_pcpu, work);
- struct rcu_pending *pending = p->parent;
- unsigned long flags;
-
- do {
- spin_lock_irqsave(&p->lock, flags);
- } while (process_finished_items(pending, p, flags));
-
- spin_unlock_irqrestore(&p->lock, flags);
-}
-
-static void rcu_pending_rcu_cb(struct rcu_head *rcu)
-{
- struct rcu_pending_pcpu *p = container_of(rcu, struct rcu_pending_pcpu, cb);
-
- schedule_work_on(p->cpu, &p->work);
-
- unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
- if (__rcu_pending_has_pending(p)) {
- spin_unlock_irqrestore(&p->lock, flags);
- __call_rcu(p->parent->srcu, &p->cb, rcu_pending_rcu_cb);
- } else {
- p->cb_armed = false;
- spin_unlock_irqrestore(&p->lock, flags);
- }
-}
-
-static __always_inline struct rcu_pending_seq *
-get_object_radix(struct rcu_pending_pcpu *p, unsigned long seq)
-{
- darray_for_each_reverse(p->objs, objs)
- if (objs->seq == seq)
- return objs;
-
- if (darray_push_gfp(&p->objs, ((struct rcu_pending_seq) { .seq = seq }), GFP_ATOMIC))
- return NULL;
-
- return &darray_last(p->objs);
-}
-
-static noinline bool
-rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq,
- struct rcu_head *head, void *ptr,
- unsigned long *flags)
-{
- if (ptr) {
- if (!head) {
- /*
- * kvfree_rcu_mightsleep(): we weren't passed an
- * rcu_head, but we need one: use the low bit of the
- * ponter to free to flag that the head needs to be
- * freed as well:
- */
- ptr = (void *)(((unsigned long) ptr)|1UL);
- head = kmalloc(sizeof(*head), __GFP_NOWARN);
- if (!head) {
- spin_unlock_irqrestore(&p->lock, *flags);
- head = kmalloc(sizeof(*head), GFP_KERNEL|__GFP_NOFAIL);
- /*
- * dropped lock, did GFP_KERNEL allocation,
- * check for gp expiration
- */
- if (unlikely(__poll_state_synchronize_rcu(p->parent->srcu, seq))) {
- kvfree(--ptr);
- kfree(head);
- spin_lock_irqsave(&p->lock, *flags);
- return false;
- }
- }
- }
-
- head->func = ptr;
- }
-again:
- for (struct rcu_pending_list *i = p->lists;
- i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) {
- if (i->seq == seq) {
- rcu_pending_list_add(i, head);
- return false;
- }
- }
-
- for (struct rcu_pending_list *i = p->lists;
- i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) {
- if (!i->head) {
- i->seq = seq;
- rcu_pending_list_add(i, head);
- return true;
- }
- }
-
- merge_expired_lists(p);
- goto again;
-}
-
-/*
- * __rcu_pending_enqueue: enqueue a pending RCU item, to be processed (via
- * pending->pracess) once grace period elapses.
- *
- * Attempt to enqueue items onto a radix tree; if memory allocation fails, fall
- * back to a linked list.
- *
- * - If @ptr is NULL, we're enqueuing an item for a generic @pending with a
- * process callback
- *
- * - If @ptr and @head are both not NULL, we're kvfree_rcu()
- *
- * - If @ptr is not NULL and @head is, we're kvfree_rcu_mightsleep()
- *
- * - If @may_sleep is true, will do GFP_KERNEL memory allocations and process
- * expired items.
- */
-static __always_inline void
-__rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head,
- void *ptr, bool may_sleep)
-{
-
- struct rcu_pending_pcpu *p;
- struct rcu_pending_seq *objs;
- struct genradix_node *new_node = NULL;
- unsigned long seq, flags;
- bool start_gp = false;
-
- BUG_ON((ptr != NULL) != (pending->process == RCU_PENDING_KVFREE_FN));
-
- local_irq_save(flags);
- p = this_cpu_ptr(pending->p);
- spin_lock(&p->lock);
- seq = __get_state_synchronize_rcu(pending->srcu);
-restart:
- if (may_sleep &&
- unlikely(process_finished_items(pending, p, flags)))
- goto check_expired;
-
- /*
- * In kvfree_rcu() mode, the radix tree is only for slab pointers so
- * that we can do kfree_bulk() - vmalloc pointers always use the linked
- * list:
- */
- if (ptr && unlikely(is_vmalloc_addr(ptr)))
- goto list_add;
-
- objs = get_object_radix(p, seq);
- if (unlikely(!objs))
- goto list_add;
-
- if (unlikely(!objs->cursor)) {
- /*
- * New radix tree nodes must be added under @p->lock because the
- * tree root is in a darray that can be resized (typically,
- * genradix supports concurrent unlocked allocation of new
- * nodes) - hence preallocation and the retry loop:
- */
- objs->cursor = genradix_ptr_alloc_preallocated_inlined(&objs->objs,
- objs->nr, &new_node, GFP_ATOMIC|__GFP_NOWARN);
- if (unlikely(!objs->cursor)) {
- if (may_sleep) {
- spin_unlock_irqrestore(&p->lock, flags);
-
- gfp_t gfp = GFP_KERNEL;
- if (!head)
- gfp |= __GFP_NOFAIL;
-
- new_node = genradix_alloc_node(gfp);
- if (!new_node)
- may_sleep = false;
- goto check_expired;
- }
-list_add:
- start_gp = rcu_pending_enqueue_list(p, seq, head, ptr, &flags);
- goto start_gp;
- }
- }
-
- *objs->cursor++ = ptr ?: head;
- /* zero cursor if we hit the end of a radix tree node: */
- if (!(((ulong) objs->cursor) & (GENRADIX_NODE_SIZE - 1)))
- objs->cursor = NULL;
- start_gp = !objs->nr;
- objs->nr++;
-start_gp:
- if (unlikely(start_gp)) {
- /*
- * We only have one callback (ideally, we would have one for
- * every outstanding graceperiod) - so if our callback is
- * already in flight, we may still have to start a grace period
- * (since we used get_state() above, not start_poll())
- */
- if (!p->cb_armed) {
- p->cb_armed = true;
- __call_rcu(pending->srcu, &p->cb, rcu_pending_rcu_cb);
- } else {
- __start_poll_synchronize_rcu(pending->srcu);
- }
- }
- spin_unlock_irqrestore(&p->lock, flags);
-free_node:
- if (new_node)
- genradix_free_node(new_node);
- return;
-check_expired:
- if (unlikely(__poll_state_synchronize_rcu(pending->srcu, seq))) {
- switch ((ulong) pending->process) {
- case RCU_PENDING_KVFREE:
- kvfree(ptr);
- break;
- case RCU_PENDING_CALL_RCU:
- head->func(head);
- break;
- default:
- pending->process(pending, head);
- break;
- }
- goto free_node;
- }
-
- local_irq_save(flags);
- p = this_cpu_ptr(pending->p);
- spin_lock(&p->lock);
- goto restart;
-}
-
-void rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *obj)
-{
- __rcu_pending_enqueue(pending, obj, NULL, true);
-}
-
-static struct rcu_head *rcu_pending_pcpu_dequeue(struct rcu_pending_pcpu *p)
-{
- struct rcu_head *ret = NULL;
-
- spin_lock_irq(&p->lock);
- darray_for_each(p->objs, objs)
- if (objs->nr) {
- ret = *genradix_ptr(&objs->objs, --objs->nr);
- objs->cursor = NULL;
- if (!objs->nr)
- genradix_free(&objs->objs);
- goto out;
- }
-
- static_array_for_each(p->lists, i)
- if (i->head) {
- ret = i->head;
-#ifdef __KERNEL__
- i->head = ret->next;
-#else
- i->head = (void *) ret->next.next;
-#endif
- if (!i->head)
- i->tail = NULL;
- goto out;
- }
-out:
- spin_unlock_irq(&p->lock);
-
- return ret;
-}
-
-struct rcu_head *rcu_pending_dequeue(struct rcu_pending *pending)
-{
- return rcu_pending_pcpu_dequeue(raw_cpu_ptr(pending->p));
-}
-
-struct rcu_head *rcu_pending_dequeue_from_all(struct rcu_pending *pending)
-{
- struct rcu_head *ret = rcu_pending_dequeue(pending);
-
- if (ret)
- return ret;
-
- int cpu;
- for_each_possible_cpu(cpu) {
- ret = rcu_pending_pcpu_dequeue(per_cpu_ptr(pending->p, cpu));
- if (ret)
- break;
- }
- return ret;
-}
-
-static bool rcu_pending_has_pending_or_armed(struct rcu_pending *pending)
-{
- int cpu;
- for_each_possible_cpu(cpu) {
- struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
- spin_lock_irq(&p->lock);
- if (__rcu_pending_has_pending(p) || p->cb_armed) {
- spin_unlock_irq(&p->lock);
- return true;
- }
- spin_unlock_irq(&p->lock);
- }
-
- return false;
-}
-
-void rcu_pending_exit(struct rcu_pending *pending)
-{
- int cpu;
-
- if (!pending->p)
- return;
-
- while (rcu_pending_has_pending_or_armed(pending)) {
- __rcu_barrier(pending->srcu);
-
- for_each_possible_cpu(cpu) {
- struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
- flush_work(&p->work);
- }
- }
-
- for_each_possible_cpu(cpu) {
- struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
- flush_work(&p->work);
- }
-
- for_each_possible_cpu(cpu) {
- struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
-
- static_array_for_each(p->lists, i)
- WARN_ON(i->head);
- WARN_ON(p->objs.nr);
- darray_exit(&p->objs);
- }
- free_percpu(pending->p);
-}
-
-/**
- * rcu_pending_init: - initialize a rcu_pending
- *
- * @pending: Object to init
- * @srcu: May optionally be used with an srcu_struct; if NULL, uses normal
- * RCU flavor
- * @process: Callback function invoked on objects once their RCU barriers
- * have completed; if NULL, kvfree() is used.
- */
-int rcu_pending_init(struct rcu_pending *pending,
- struct srcu_struct *srcu,
- rcu_pending_process_fn process)
-{
- pending->p = alloc_percpu(struct rcu_pending_pcpu);
- if (!pending->p)
- return -ENOMEM;
-
- int cpu;
- for_each_possible_cpu(cpu) {
- struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
- p->parent = pending;
- p->cpu = cpu;
- spin_lock_init(&p->lock);
- darray_init(&p->objs);
- INIT_WORK(&p->work, rcu_pending_work);
- }
-
- pending->srcu = srcu;
- pending->process = process;
-
- return 0;
-}
diff --git a/fs/bcachefs/rcu_pending.h b/fs/bcachefs/rcu_pending.h
deleted file mode 100644
index 71a2f4ddaade..000000000000
--- a/fs/bcachefs/rcu_pending.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_RCU_PENDING_H
-#define _LINUX_RCU_PENDING_H
-
-#include <linux/rcupdate.h>
-
-struct rcu_pending;
-typedef void (*rcu_pending_process_fn)(struct rcu_pending *, struct rcu_head *);
-
-struct rcu_pending_pcpu;
-
-struct rcu_pending {
- struct rcu_pending_pcpu __percpu *p;
- struct srcu_struct *srcu;
- rcu_pending_process_fn process;
-};
-
-void rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *obj);
-struct rcu_head *rcu_pending_dequeue(struct rcu_pending *pending);
-struct rcu_head *rcu_pending_dequeue_from_all(struct rcu_pending *pending);
-
-void rcu_pending_exit(struct rcu_pending *pending);
-int rcu_pending_init(struct rcu_pending *pending,
- struct srcu_struct *srcu,
- rcu_pending_process_fn process);
-
-#endif /* _LINUX_RCU_PENDING_H */
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
deleted file mode 100644
index cd6647374353..000000000000
--- a/fs/bcachefs/rebalance.c
+++ /dev/null
@@ -1,490 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "btree_iter.h"
-#include "btree_update.h"
-#include "btree_write_buffer.h"
-#include "buckets.h"
-#include "clock.h"
-#include "compress.h"
-#include "disk_groups.h"
-#include "errcode.h"
-#include "error.h"
-#include "inode.h"
-#include "io_write.h"
-#include "move.h"
-#include "rebalance.h"
-#include "subvolume.h"
-#include "super-io.h"
-#include "trace.h"
-
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/sched/cputime.h>
-
-#define REBALANCE_WORK_SCAN_OFFSET (U64_MAX - 1)
-
-static const char * const bch2_rebalance_state_strs[] = {
-#define x(t) #t,
- BCH_REBALANCE_STATES()
- NULL
-#undef x
-};
-
-static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey_i_cookie *cookie;
- u64 v;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
- SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
- BTREE_ITER_intent);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- v = k.k->type == KEY_TYPE_cookie
- ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
- : 0;
-
- cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
- ret = PTR_ERR_OR_ZERO(cookie);
- if (ret)
- goto err;
-
- bkey_cookie_init(&cookie->k_i);
- cookie->k.p = iter.pos;
- cookie->v.cookie = cpu_to_le64(v + 1);
-
- ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
-{
- int ret = bch2_trans_commit_do(c, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_lazy_rw,
- __bch2_set_rebalance_needs_scan(trans, inum));
- rebalance_wakeup(c);
- return ret;
-}
-
-int bch2_set_fs_needs_rebalance(struct bch_fs *c)
-{
- return bch2_set_rebalance_needs_scan(c, 0);
-}
-
-static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- u64 v;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
- SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
- BTREE_ITER_intent);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- v = k.k->type == KEY_TYPE_cookie
- ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
- : 0;
-
- if (v == cookie)
- ret = bch2_btree_delete_at(trans, &iter, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
- struct btree_iter *work_iter)
-{
- return !kthread_should_stop()
- ? bch2_btree_iter_peek(work_iter)
- : bkey_s_c_null;
-}
-
-static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0);
- int ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- return ret;
-
- extent_entry_drop(bkey_i_to_s(n),
- (void *) bch2_bkey_rebalance_opts(bkey_i_to_s_c(n)));
- return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
-}
-
-static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
- struct bpos work_pos,
- struct btree_iter *extent_iter,
- struct data_update_opts *data_opts)
-{
- struct bch_fs *c = trans->c;
- struct bkey_s_c k;
-
- bch2_trans_iter_exit(trans, extent_iter);
- bch2_trans_iter_init(trans, extent_iter,
- work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
- work_pos,
- BTREE_ITER_all_snapshots);
- k = bch2_btree_iter_peek_slot(extent_iter);
- if (bkey_err(k))
- return k;
-
- const struct bch_extent_rebalance *r = k.k ? bch2_bkey_rebalance_opts(k) : NULL;
- if (!r) {
- /* raced due to btree write buffer, nothing to do */
- return bkey_s_c_null;
- }
-
- memset(data_opts, 0, sizeof(*data_opts));
-
- data_opts->rewrite_ptrs =
- bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression);
- data_opts->target = r->target;
- data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
-
- if (!data_opts->rewrite_ptrs) {
- /*
- * device we would want to write to offline? devices in target
- * changed?
- *
- * We'll now need a full scan before this extent is picked up
- * again:
- */
- int ret = bch2_bkey_clear_needs_rebalance(trans, extent_iter, k);
- if (ret)
- return bkey_s_c_err(ret);
- return bkey_s_c_null;
- }
-
- if (trace_rebalance_extent_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "target=");
- bch2_target_to_text(&buf, c, r->target);
- prt_str(&buf, " compression=");
- bch2_compression_opt_to_text(&buf, r->compression);
- prt_str(&buf, " ");
- bch2_bkey_val_to_text(&buf, c, k);
-
- trace_rebalance_extent(c, buf.buf);
- printbuf_exit(&buf);
- }
-
- return k;
-}
-
-noinline_for_stack
-static int do_rebalance_extent(struct moving_context *ctxt,
- struct bpos work_pos,
- struct btree_iter *extent_iter)
-{
- struct btree_trans *trans = ctxt->trans;
- struct bch_fs *c = trans->c;
- struct bch_fs_rebalance *r = &trans->c->rebalance;
- struct data_update_opts data_opts;
- struct bch_io_opts io_opts;
- struct bkey_s_c k;
- struct bkey_buf sk;
- int ret;
-
- ctxt->stats = &r->work_stats;
- r->state = BCH_REBALANCE_working;
-
- bch2_bkey_buf_init(&sk);
-
- ret = bkey_err(k = next_rebalance_extent(trans, work_pos,
- extent_iter, &data_opts));
- if (ret || !k.k)
- goto out;
-
- ret = bch2_move_get_io_opts_one(trans, &io_opts, k);
- if (ret)
- goto out;
-
- atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
-
- /*
- * The iterator gets unlocked by __bch2_read_extent - need to
- * save a copy of @k elsewhere:
- */
- bch2_bkey_buf_reassemble(&sk, c, k);
- k = bkey_i_to_s_c(sk.k);
-
- ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts);
- if (ret) {
- if (bch2_err_matches(ret, ENOMEM)) {
- /* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt);
- ret = -BCH_ERR_transaction_restart_nested;
- }
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto out;
-
- /* skip it and continue, XXX signal failure */
- ret = 0;
- }
-out:
- bch2_bkey_buf_exit(&sk, c);
- return ret;
-}
-
-static bool rebalance_pred(struct bch_fs *c, void *arg,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- unsigned target, compression;
-
- if (k.k->p.inode) {
- target = io_opts->background_target;
- compression = background_compression(*io_opts);
- } else {
- const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
-
- target = r ? r->target : io_opts->background_target;
- compression = r ? r->compression : background_compression(*io_opts);
- }
-
- data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
- data_opts->target = target;
- data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
- return data_opts->rewrite_ptrs != 0;
-}
-
-static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
-{
- struct btree_trans *trans = ctxt->trans;
- struct bch_fs_rebalance *r = &trans->c->rebalance;
- int ret;
-
- bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
- ctxt->stats = &r->scan_stats;
-
- if (!inum) {
- r->scan_start = BBPOS_MIN;
- r->scan_end = BBPOS_MAX;
- } else {
- r->scan_start = BBPOS(BTREE_ID_extents, POS(inum, 0));
- r->scan_end = BBPOS(BTREE_ID_extents, POS(inum, U64_MAX));
- }
-
- r->state = BCH_REBALANCE_scanning;
-
- ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?:
- commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_clear_rebalance_needs_scan(trans, inum, cookie));
-
- bch2_move_stats_exit(&r->scan_stats, trans->c);
- return ret;
-}
-
-static void rebalance_wait(struct bch_fs *c)
-{
- struct bch_fs_rebalance *r = &c->rebalance;
- struct io_clock *clock = &c->io_clock[WRITE];
- u64 now = atomic64_read(&clock->now);
- u64 min_member_capacity = bch2_min_rw_member_capacity(c);
-
- if (min_member_capacity == U64_MAX)
- min_member_capacity = 128 * 2048;
-
- r->wait_iotime_end = now + (min_member_capacity >> 6);
-
- if (r->state != BCH_REBALANCE_waiting) {
- r->wait_iotime_start = now;
- r->wait_wallclock_start = ktime_get_real_ns();
- r->state = BCH_REBALANCE_waiting;
- }
-
- bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT);
-}
-
-static int do_rebalance(struct moving_context *ctxt)
-{
- struct btree_trans *trans = ctxt->trans;
- struct bch_fs *c = trans->c;
- struct bch_fs_rebalance *r = &c->rebalance;
- struct btree_iter rebalance_work_iter, extent_iter = { NULL };
- struct bkey_s_c k;
- int ret = 0;
-
- bch2_trans_begin(trans);
-
- bch2_move_stats_init(&r->work_stats, "rebalance_work");
- bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
-
- bch2_trans_iter_init(trans, &rebalance_work_iter,
- BTREE_ID_rebalance_work, POS_MIN,
- BTREE_ITER_all_snapshots);
-
- while (!bch2_move_ratelimit(ctxt)) {
- if (!r->enabled) {
- bch2_moving_ctxt_flush_all(ctxt);
- kthread_wait_freezable(r->enabled ||
- kthread_should_stop());
- }
-
- if (kthread_should_stop())
- break;
-
- bch2_trans_begin(trans);
-
- ret = bkey_err(k = next_rebalance_entry(trans, &rebalance_work_iter));
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret || !k.k)
- break;
-
- ret = k.k->type == KEY_TYPE_cookie
- ? do_rebalance_scan(ctxt, k.k->p.inode,
- le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie))
- : do_rebalance_extent(ctxt, k.k->p, &extent_iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- break;
-
- bch2_btree_iter_advance(&rebalance_work_iter);
- }
-
- bch2_trans_iter_exit(trans, &extent_iter);
- bch2_trans_iter_exit(trans, &rebalance_work_iter);
- bch2_move_stats_exit(&r->scan_stats, c);
-
- if (!ret &&
- !kthread_should_stop() &&
- !atomic64_read(&r->work_stats.sectors_seen) &&
- !atomic64_read(&r->scan_stats.sectors_seen)) {
- bch2_moving_ctxt_flush_all(ctxt);
- bch2_trans_unlock_long(trans);
- rebalance_wait(c);
- }
-
- if (!bch2_err_matches(ret, EROFS))
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int bch2_rebalance_thread(void *arg)
-{
- struct bch_fs *c = arg;
- struct bch_fs_rebalance *r = &c->rebalance;
- struct moving_context ctxt;
-
- set_freezable();
-
- bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
- writepoint_ptr(&c->rebalance_write_point),
- true);
-
- while (!kthread_should_stop() && !do_rebalance(&ctxt))
- ;
-
- bch2_moving_ctxt_exit(&ctxt);
-
- return 0;
-}
-
-void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct bch_fs_rebalance *r = &c->rebalance;
-
- prt_str(out, bch2_rebalance_state_strs[r->state]);
- prt_newline(out);
- printbuf_indent_add(out, 2);
-
- switch (r->state) {
- case BCH_REBALANCE_waiting: {
- u64 now = atomic64_read(&c->io_clock[WRITE].now);
-
- prt_str(out, "io wait duration: ");
- bch2_prt_human_readable_s64(out, (r->wait_iotime_end - r->wait_iotime_start) << 9);
- prt_newline(out);
-
- prt_str(out, "io wait remaining: ");
- bch2_prt_human_readable_s64(out, (r->wait_iotime_end - now) << 9);
- prt_newline(out);
-
- prt_str(out, "duration waited: ");
- bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
- prt_newline(out);
- break;
- }
- case BCH_REBALANCE_working:
- bch2_move_stats_to_text(out, &r->work_stats);
- break;
- case BCH_REBALANCE_scanning:
- bch2_move_stats_to_text(out, &r->scan_stats);
- break;
- }
- prt_newline(out);
- printbuf_indent_sub(out, 2);
-}
-
-void bch2_rebalance_stop(struct bch_fs *c)
-{
- struct task_struct *p;
-
- c->rebalance.pd.rate.rate = UINT_MAX;
- bch2_ratelimit_reset(&c->rebalance.pd.rate);
-
- p = rcu_dereference_protected(c->rebalance.thread, 1);
- c->rebalance.thread = NULL;
-
- if (p) {
- /* for sychronizing with rebalance_wakeup() */
- synchronize_rcu();
-
- kthread_stop(p);
- put_task_struct(p);
- }
-}
-
-int bch2_rebalance_start(struct bch_fs *c)
-{
- struct task_struct *p;
- int ret;
-
- if (c->rebalance.thread)
- return 0;
-
- if (c->opts.nochanges)
- return 0;
-
- p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
- ret = PTR_ERR_OR_ZERO(p);
- bch_err_msg(c, ret, "creating rebalance thread");
- if (ret)
- return ret;
-
- get_task_struct(p);
- rcu_assign_pointer(c->rebalance.thread, p);
- wake_up_process(p);
- return 0;
-}
-
-void bch2_fs_rebalance_init(struct bch_fs *c)
-{
- bch2_pd_controller_init(&c->rebalance.pd);
-}
diff --git a/fs/bcachefs/rebalance.h b/fs/bcachefs/rebalance.h
deleted file mode 100644
index 28a52638f16c..000000000000
--- a/fs/bcachefs/rebalance.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_REBALANCE_H
-#define _BCACHEFS_REBALANCE_H
-
-#include "rebalance_types.h"
-
-int bch2_set_rebalance_needs_scan(struct bch_fs *, u64 inum);
-int bch2_set_fs_needs_rebalance(struct bch_fs *);
-
-static inline void rebalance_wakeup(struct bch_fs *c)
-{
- struct task_struct *p;
-
- rcu_read_lock();
- p = rcu_dereference(c->rebalance.thread);
- if (p)
- wake_up_process(p);
- rcu_read_unlock();
-}
-
-void bch2_rebalance_status_to_text(struct printbuf *, struct bch_fs *);
-
-void bch2_rebalance_stop(struct bch_fs *);
-int bch2_rebalance_start(struct bch_fs *);
-void bch2_fs_rebalance_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_REBALANCE_H */
diff --git a/fs/bcachefs/rebalance_types.h b/fs/bcachefs/rebalance_types.h
deleted file mode 100644
index 0fffb536c1d0..000000000000
--- a/fs/bcachefs/rebalance_types.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_REBALANCE_TYPES_H
-#define _BCACHEFS_REBALANCE_TYPES_H
-
-#include "bbpos_types.h"
-#include "move_types.h"
-
-#define BCH_REBALANCE_STATES() \
- x(waiting) \
- x(working) \
- x(scanning)
-
-enum bch_rebalance_states {
-#define x(t) BCH_REBALANCE_##t,
- BCH_REBALANCE_STATES()
-#undef x
-};
-
-struct bch_fs_rebalance {
- struct task_struct __rcu *thread;
- struct bch_pd_controller pd;
-
- enum bch_rebalance_states state;
- u64 wait_iotime_start;
- u64 wait_iotime_end;
- u64 wait_wallclock_start;
-
- struct bch_move_stats work_stats;
-
- struct bbpos scan_start;
- struct bbpos scan_end;
- struct bch_move_stats scan_stats;
-
- unsigned enabled:1;
-};
-
-#endif /* _BCACHEFS_REBALANCE_TYPES_H */
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
deleted file mode 100644
index 3c7f941dde39..000000000000
--- a/fs/bcachefs/recovery.c
+++ /dev/null
@@ -1,1144 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "bkey_buf.h"
-#include "btree_journal_iter.h"
-#include "btree_node_scan.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "btree_io.h"
-#include "buckets.h"
-#include "dirent.h"
-#include "disk_accounting.h"
-#include "errcode.h"
-#include "error.h"
-#include "fs-common.h"
-#include "journal_io.h"
-#include "journal_reclaim.h"
-#include "journal_seq_blacklist.h"
-#include "logged_ops.h"
-#include "move.h"
-#include "quota.h"
-#include "rebalance.h"
-#include "recovery.h"
-#include "recovery_passes.h"
-#include "replicas.h"
-#include "sb-clean.h"
-#include "sb-downgrade.h"
-#include "snapshot.h"
-#include "super-io.h"
-
-#include <linux/sort.h>
-#include <linux/stat.h>
-
-#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
-
-void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
-{
- if (btree >= BTREE_ID_NR_MAX)
- return;
-
- u64 b = BIT_ULL(btree);
-
- if (!(c->sb.btrees_lost_data & b)) {
- bch_err(c, "flagging btree %s lost data", bch2_btree_id_str(btree));
-
- mutex_lock(&c->sb_lock);
- bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
- }
-}
-
-/* for -o reconstruct_alloc: */
-static void bch2_reconstruct_alloc(struct bch_fs *c)
-{
- bch2_journal_log_msg(c, "dropping alloc info");
- bch_info(c, "dropping and reconstructing all alloc info");
-
- mutex_lock(&c->sb_lock);
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
-
- __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required);
- __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_info, ext->recovery_passes_required);
- __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_lrus, ext->recovery_passes_required);
- __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_extents_to_backpointers, ext->recovery_passes_required);
- __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_to_lru_refs, ext->recovery_passes_required);
-
- __set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_alloc_key, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_stale_dirty_ptr, ext->errors_silent);
-
- __set_bit_le64(BCH_FSCK_ERR_dev_usage_buckets_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_dev_usage_sectors_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_dev_usage_fragmented_wrong, ext->errors_silent);
-
- __set_bit_le64(BCH_FSCK_ERR_fs_usage_btree_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_fs_usage_cached_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent);
-
- __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_need_discard_key_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_freespace_key_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_bucket_gens_key_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_freespace_hole_missing, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_backpointer, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent);
- c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
-
- c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
-
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- bch2_shoot_down_journal_keys(c, BTREE_ID_alloc,
- 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
- bch2_shoot_down_journal_keys(c, BTREE_ID_backpointers,
- 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
- bch2_shoot_down_journal_keys(c, BTREE_ID_need_discard,
- 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
- bch2_shoot_down_journal_keys(c, BTREE_ID_freespace,
- 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
- bch2_shoot_down_journal_keys(c, BTREE_ID_bucket_gens,
- 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
-}
-
-/*
- * Btree node pointers have a field to stack a pointer to the in memory btree
- * node; we need to zero out this field when reading in btree nodes, or when
- * reading in keys from the journal:
- */
-static void zero_out_btree_mem_ptr(struct journal_keys *keys)
-{
- darray_for_each(*keys, i)
- if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
- bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
-}
-
-/* journal replay: */
-
-static void replay_now_at(struct journal *j, u64 seq)
-{
- BUG_ON(seq < j->replay_journal_seq);
-
- seq = min(seq, j->replay_journal_seq_end);
-
- while (j->replay_journal_seq < seq)
- bch2_journal_pin_put(j, j->replay_journal_seq++);
-}
-
-static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
- struct journal_key *k)
-{
- struct btree_iter iter;
- bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
- BTREE_MAX_DEPTH, k->level,
- BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(&iter);
- if (ret)
- goto out;
-
- struct bkey u;
- struct bkey_s_c old = bch2_btree_path_peek_slot(btree_iter_path(trans, &iter), &u);
-
- /* Has this delta already been applied to the btree? */
- if (bversion_cmp(old.k->bversion, k->k->k.bversion) >= 0) {
- ret = 0;
- goto out;
- }
-
- struct bkey_i *new = k->k;
- if (old.k->type == KEY_TYPE_accounting) {
- new = bch2_bkey_make_mut_noupdate(trans, bkey_i_to_s_c(k->k));
- ret = PTR_ERR_OR_ZERO(new);
- if (ret)
- goto out;
-
- bch2_accounting_accumulate(bkey_i_to_accounting(new),
- bkey_s_c_to_accounting(old));
- }
-
- trans->journal_res.seq = k->journal_seq;
-
- ret = bch2_trans_update(trans, &iter, new, BTREE_TRIGGER_norun);
-out:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int bch2_journal_replay_key(struct btree_trans *trans,
- struct journal_key *k)
-{
- struct btree_iter iter;
- unsigned iter_flags =
- BTREE_ITER_intent|
- BTREE_ITER_not_extents;
- unsigned update_flags = BTREE_TRIGGER_norun;
- int ret;
-
- if (k->overwritten)
- return 0;
-
- trans->journal_res.seq = k->journal_seq;
-
- /*
- * BTREE_UPDATE_key_cache_reclaim disables key cache lookup/update to
- * keep the key cache coherent with the underlying btree. Nothing
- * besides the allocator is doing updates yet so we don't need key cache
- * coherency for non-alloc btrees, and key cache fills for snapshots
- * btrees use BTREE_ITER_filter_snapshots, which isn't available until
- * the snapshots recovery pass runs.
- */
- if (!k->level && k->btree_id == BTREE_ID_alloc)
- iter_flags |= BTREE_ITER_cached;
- else
- update_flags |= BTREE_UPDATE_key_cache_reclaim;
-
- bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
- BTREE_MAX_DEPTH, k->level,
- iter_flags);
- ret = bch2_btree_iter_traverse(&iter);
- if (ret)
- goto out;
-
- struct btree_path *path = btree_iter_path(trans, &iter);
- if (unlikely(!btree_path_node(path, k->level))) {
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
- BTREE_MAX_DEPTH, 0, iter_flags);
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_btree_increase_depth(trans, iter.path, 0) ?:
- -BCH_ERR_transaction_restart_nested;
- goto out;
- }
-
- /* Must be checked with btree locked: */
- if (k->overwritten)
- goto out;
-
- if (k->k->k.type == KEY_TYPE_accounting) {
- ret = bch2_trans_update_buffered(trans, BTREE_ID_accounting, k->k);
- goto out;
- }
-
- ret = bch2_trans_update(trans, &iter, k->k, update_flags);
-out:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int journal_sort_seq_cmp(const void *_l, const void *_r)
-{
- const struct journal_key *l = *((const struct journal_key **)_l);
- const struct journal_key *r = *((const struct journal_key **)_r);
-
- /*
- * Map 0 to U64_MAX, so that keys with journal_seq === 0 come last
- *
- * journal_seq == 0 means that the key comes from early repair, and
- * should be inserted last so as to avoid overflowing the journal
- */
- return cmp_int(l->journal_seq - 1, r->journal_seq - 1);
-}
-
-int bch2_journal_replay(struct bch_fs *c)
-{
- struct journal_keys *keys = &c->journal_keys;
- DARRAY(struct journal_key *) keys_sorted = { 0 };
- struct journal *j = &c->journal;
- u64 start_seq = c->journal_replay_seq_start;
- u64 end_seq = c->journal_replay_seq_start;
- struct btree_trans *trans = NULL;
- bool immediate_flush = false;
- int ret = 0;
-
- if (keys->nr) {
- ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
- keys->nr, start_seq, end_seq);
- if (ret)
- goto err;
- }
-
- BUG_ON(!atomic_read(&keys->ref));
-
- move_gap(keys, keys->nr);
- trans = bch2_trans_get(c);
-
- /*
- * Replay accounting keys first: we can't allow the write buffer to
- * flush accounting keys until we're done
- */
- darray_for_each(*keys, k) {
- if (!(k->k->k.type == KEY_TYPE_accounting && !k->allocated))
- continue;
-
- cond_resched();
-
- ret = commit_do(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_journal_reclaim|
- BCH_TRANS_COMMIT_skip_accounting_apply|
- BCH_TRANS_COMMIT_no_journal_res|
- BCH_WATERMARK_reclaim,
- bch2_journal_replay_accounting_key(trans, k));
- if (bch2_fs_fatal_err_on(ret, c, "error replaying accounting; %s", bch2_err_str(ret)))
- goto err;
-
- k->overwritten = true;
- }
-
- set_bit(BCH_FS_accounting_replay_done, &c->flags);
-
- /*
- * First, attempt to replay keys in sorted order. This is more
- * efficient - better locality of btree access - but some might fail if
- * that would cause a journal deadlock.
- */
- darray_for_each(*keys, k) {
- cond_resched();
-
- /*
- * k->allocated means the key wasn't read in from the journal,
- * rather it was from early repair code
- */
- if (k->allocated)
- immediate_flush = true;
-
- /* Skip fastpath if we're low on space in the journal */
- ret = c->journal.watermark ? -1 :
- commit_do(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_journal_reclaim|
- BCH_TRANS_COMMIT_skip_accounting_apply|
- (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0),
- bch2_journal_replay_key(trans, k));
- BUG_ON(!ret && !k->overwritten && k->k->k.type != KEY_TYPE_accounting);
- if (ret) {
- ret = darray_push(&keys_sorted, k);
- if (ret)
- goto err;
- }
- }
-
- bch2_trans_unlock_long(trans);
- /*
- * Now, replay any remaining keys in the order in which they appear in
- * the journal, unpinning those journal entries as we go:
- */
- sort(keys_sorted.data, keys_sorted.nr,
- sizeof(keys_sorted.data[0]),
- journal_sort_seq_cmp, NULL);
-
- darray_for_each(keys_sorted, kp) {
- cond_resched();
-
- struct journal_key *k = *kp;
-
- if (k->journal_seq)
- replay_now_at(j, k->journal_seq);
- else
- replay_now_at(j, j->replay_journal_seq_end);
-
- ret = commit_do(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_skip_accounting_apply|
- (!k->allocated
- ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim
- : 0),
- bch2_journal_replay_key(trans, k));
- bch_err_msg(c, ret, "while replaying key at btree %s level %u:",
- bch2_btree_id_str(k->btree_id), k->level);
- if (ret)
- goto err;
-
- BUG_ON(k->btree_id != BTREE_ID_accounting && !k->overwritten);
- }
-
- /*
- * We need to put our btree_trans before calling flush_all_pins(), since
- * that will use a btree_trans internally
- */
- bch2_trans_put(trans);
- trans = NULL;
-
- if (!c->opts.retain_recovery_info &&
- c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay)
- bch2_journal_keys_put_initial(c);
-
- replay_now_at(j, j->replay_journal_seq_end);
- j->replay_journal_seq = 0;
-
- bch2_journal_set_replay_done(j);
-
- /* if we did any repair, flush it immediately */
- if (immediate_flush) {
- bch2_journal_flush_all_pins(&c->journal);
- ret = bch2_journal_meta(&c->journal);
- }
-
- if (keys->nr)
- bch2_journal_log_msg(c, "journal replay finished");
-err:
- if (trans)
- bch2_trans_put(trans);
- darray_exit(&keys_sorted);
- bch_err_fn(c, ret);
- return ret;
-}
-
-/* journal replay early: */
-
-static int journal_replay_entry_early(struct bch_fs *c,
- struct jset_entry *entry)
-{
- int ret = 0;
-
- switch (entry->type) {
- case BCH_JSET_ENTRY_btree_root: {
- struct btree_root *r;
-
- if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX,
- c, invalid_btree_id,
- "invalid btree id %u (max %u)",
- entry->btree_id, BTREE_ID_NR_MAX))
- return 0;
-
- while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
- ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
- if (ret)
- return ret;
- }
-
- r = bch2_btree_id_root(c, entry->btree_id);
-
- if (entry->u64s) {
- r->level = entry->level;
- bkey_copy(&r->key, (struct bkey_i *) entry->start);
- r->error = 0;
- } else {
- r->error = -BCH_ERR_btree_node_read_error;
- }
- r->alive = true;
- break;
- }
- case BCH_JSET_ENTRY_usage: {
- struct jset_entry_usage *u =
- container_of(entry, struct jset_entry_usage, entry);
-
- switch (entry->btree_id) {
- case BCH_FS_USAGE_key_version:
- atomic64_set(&c->key_version, le64_to_cpu(u->v));
- break;
- }
- break;
- }
- case BCH_JSET_ENTRY_blacklist: {
- struct jset_entry_blacklist *bl_entry =
- container_of(entry, struct jset_entry_blacklist, entry);
-
- ret = bch2_journal_seq_blacklist_add(c,
- le64_to_cpu(bl_entry->seq),
- le64_to_cpu(bl_entry->seq) + 1);
- break;
- }
- case BCH_JSET_ENTRY_blacklist_v2: {
- struct jset_entry_blacklist_v2 *bl_entry =
- container_of(entry, struct jset_entry_blacklist_v2, entry);
-
- ret = bch2_journal_seq_blacklist_add(c,
- le64_to_cpu(bl_entry->start),
- le64_to_cpu(bl_entry->end) + 1);
- break;
- }
- case BCH_JSET_ENTRY_clock: {
- struct jset_entry_clock *clock =
- container_of(entry, struct jset_entry_clock, entry);
-
- atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
- }
- }
-fsck_err:
- return ret;
-}
-
-static int journal_replay_early(struct bch_fs *c,
- struct bch_sb_field_clean *clean)
-{
- if (clean) {
- for (struct jset_entry *entry = clean->start;
- entry != vstruct_end(&clean->field);
- entry = vstruct_next(entry)) {
- int ret = journal_replay_entry_early(c, entry);
- if (ret)
- return ret;
- }
- } else {
- struct genradix_iter iter;
- struct journal_replay *i, **_i;
-
- genradix_for_each(&c->journal_entries, iter, _i) {
- i = *_i;
-
- if (journal_replay_ignore(i))
- continue;
-
- vstruct_for_each(&i->j, entry) {
- int ret = journal_replay_entry_early(c, entry);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-/* sb clean section: */
-
-static int read_btree_roots(struct bch_fs *c)
-{
- int ret = 0;
-
- for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
- struct btree_root *r = bch2_btree_id_root(c, i);
-
- if (!r->alive)
- continue;
-
- if (btree_id_is_alloc(i) && c->opts.reconstruct_alloc)
- continue;
-
- if (mustfix_fsck_err_on((ret = r->error),
- c, btree_root_bkey_invalid,
- "invalid btree root %s",
- bch2_btree_id_str(i)) ||
- mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)),
- c, btree_root_read_error,
- "error reading btree root %s l=%u: %s",
- bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
- if (btree_id_is_alloc(i)) {
- c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
- c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
- c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
- c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
- c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
- c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
- r->error = 0;
- } else if (!(c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
- bch_info(c, "will run btree node scan");
- c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
- c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
- }
-
- ret = 0;
- bch2_btree_lost_data(c, i);
- }
- }
-
- for (unsigned i = 0; i < BTREE_ID_NR; i++) {
- struct btree_root *r = bch2_btree_id_root(c, i);
-
- if (!r->b && !r->error) {
- r->alive = false;
- r->level = 0;
- bch2_btree_root_alloc_fake(c, i, 0);
- }
- }
-fsck_err:
- return ret;
-}
-
-static bool check_version_upgrade(struct bch_fs *c)
-{
- unsigned latest_version = bcachefs_metadata_version_current;
- unsigned latest_compatible = min(latest_version,
- bch2_latest_compatible_version(c->sb.version));
- unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
- unsigned new_version = 0;
-
- if (old_version < bcachefs_metadata_required_upgrade_below) {
- if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
- latest_compatible < bcachefs_metadata_required_upgrade_below)
- new_version = latest_version;
- else
- new_version = latest_compatible;
- } else {
- switch (c->opts.version_upgrade) {
- case BCH_VERSION_UPGRADE_compatible:
- new_version = latest_compatible;
- break;
- case BCH_VERSION_UPGRADE_incompatible:
- new_version = latest_version;
- break;
- case BCH_VERSION_UPGRADE_none:
- new_version = min(old_version, latest_version);
- break;
- }
- }
-
- if (new_version > old_version) {
- struct printbuf buf = PRINTBUF;
-
- if (old_version < bcachefs_metadata_required_upgrade_below)
- prt_str(&buf, "Version upgrade required:\n");
-
- if (old_version != c->sb.version) {
- prt_str(&buf, "Version upgrade from ");
- bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
- prt_str(&buf, " to ");
- bch2_version_to_text(&buf, c->sb.version);
- prt_str(&buf, " incomplete\n");
- }
-
- prt_printf(&buf, "Doing %s version upgrade from ",
- BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
- ? "incompatible" : "compatible");
- bch2_version_to_text(&buf, old_version);
- prt_str(&buf, " to ");
- bch2_version_to_text(&buf, new_version);
- prt_newline(&buf);
-
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
- __le64 passes = ext->recovery_passes_required[0];
- bch2_sb_set_upgrade(c, old_version, new_version);
- passes = ext->recovery_passes_required[0] & ~passes;
-
- if (passes) {
- prt_str(&buf, " running recovery passes: ");
- prt_bitflags(&buf, bch2_recovery_passes,
- bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
- }
-
- bch_info(c, "%s", buf.buf);
-
- bch2_sb_upgrade(c, new_version);
-
- printbuf_exit(&buf);
- return true;
- }
-
- return false;
-}
-
-int bch2_fs_recovery(struct bch_fs *c)
-{
- struct bch_sb_field_clean *clean = NULL;
- struct jset *last_journal_entry = NULL;
- u64 last_seq = 0, blacklist_seq, journal_seq;
- int ret = 0;
-
- if (c->sb.clean) {
- clean = bch2_read_superblock_clean(c);
- ret = PTR_ERR_OR_ZERO(clean);
- if (ret)
- goto err;
-
- bch_info(c, "recovering from clean shutdown, journal seq %llu",
- le64_to_cpu(clean->journal_seq));
- } else {
- bch_info(c, "recovering from unclean shutdown");
- }
-
- if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
- bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
- ret = -EINVAL;
- goto err;
- }
-
- if (!c->sb.clean &&
- !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
- bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
- ret = -EINVAL;
- goto err;
- }
-
- if (c->opts.norecovery)
- c->opts.recovery_pass_last = BCH_RECOVERY_PASS_journal_replay - 1;
-
- mutex_lock(&c->sb_lock);
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
- bool write_sb = false;
-
- if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
- ext->recovery_passes_required[0] |=
- cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
- write_sb = true;
- }
-
- u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
- if (sb_passes) {
- struct printbuf buf = PRINTBUF;
- prt_str(&buf, "superblock requires following recovery passes to be run:\n ");
- prt_bitflags(&buf, bch2_recovery_passes, sb_passes);
- bch_info(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
-
- if (bch2_check_version_downgrade(c)) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "Version downgrade required:");
-
- __le64 passes = ext->recovery_passes_required[0];
- bch2_sb_set_downgrade(c,
- BCH_VERSION_MINOR(bcachefs_metadata_version_current),
- BCH_VERSION_MINOR(c->sb.version));
- passes = ext->recovery_passes_required[0] & ~passes;
- if (passes) {
- prt_str(&buf, "\n running recovery passes: ");
- prt_bitflags(&buf, bch2_recovery_passes,
- bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
- }
-
- bch_info(c, "%s", buf.buf);
- printbuf_exit(&buf);
- write_sb = true;
- }
-
- if (check_version_upgrade(c))
- write_sb = true;
-
- c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
-
- if (write_sb)
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
-
- if (c->opts.fsck)
- set_bit(BCH_FS_fsck_running, &c->flags);
- if (c->sb.clean)
- set_bit(BCH_FS_clean_recovery, &c->flags);
-
- ret = bch2_blacklist_table_initialize(c);
- if (ret) {
- bch_err(c, "error initializing blacklist table");
- goto err;
- }
-
- bch2_journal_pos_from_member_info_resume(c);
-
- if (!c->sb.clean || c->opts.retain_recovery_info) {
- struct genradix_iter iter;
- struct journal_replay **i;
-
- bch_verbose(c, "starting journal read");
- ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
- if (ret)
- goto err;
-
- /*
- * note: cmd_list_journal needs the blacklist table fully up to date so
- * it can asterisk ignored journal entries:
- */
- if (c->opts.read_journal_only)
- goto out;
-
- genradix_for_each_reverse(&c->journal_entries, iter, i)
- if (!journal_replay_ignore(*i)) {
- last_journal_entry = &(*i)->j;
- break;
- }
-
- if (mustfix_fsck_err_on(c->sb.clean &&
- last_journal_entry &&
- !journal_entry_empty(last_journal_entry), c,
- clean_but_journal_not_empty,
- "filesystem marked clean but journal not empty")) {
- c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
- SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
- c->sb.clean = false;
- }
-
- if (!last_journal_entry) {
- fsck_err_on(!c->sb.clean, c,
- dirty_but_no_journal_entries,
- "no journal entries found");
- if (clean)
- goto use_clean;
-
- genradix_for_each_reverse(&c->journal_entries, iter, i)
- if (*i) {
- last_journal_entry = &(*i)->j;
- (*i)->ignore_blacklisted = false;
- (*i)->ignore_not_dirty= false;
- /*
- * This was probably a NO_FLUSH entry,
- * so last_seq was garbage - but we know
- * we're only using a single journal
- * entry, set it here:
- */
- (*i)->j.last_seq = (*i)->j.seq;
- break;
- }
- }
-
- ret = bch2_journal_keys_sort(c);
- if (ret)
- goto err;
-
- if (c->sb.clean && last_journal_entry) {
- ret = bch2_verify_superblock_clean(c, &clean,
- last_journal_entry);
- if (ret)
- goto err;
- }
- } else {
-use_clean:
- if (!clean) {
- bch_err(c, "no superblock clean section found");
- ret = -BCH_ERR_fsck_repair_impossible;
- goto err;
-
- }
- blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
- }
-
- c->journal_replay_seq_start = last_seq;
- c->journal_replay_seq_end = blacklist_seq - 1;
-
- if (c->opts.reconstruct_alloc)
- bch2_reconstruct_alloc(c);
-
- zero_out_btree_mem_ptr(&c->journal_keys);
-
- ret = journal_replay_early(c, clean);
- if (ret)
- goto err;
-
- /*
- * After an unclean shutdown, skip then next few journal sequence
- * numbers as they may have been referenced by btree writes that
- * happened before their corresponding journal writes - those btree
- * writes need to be ignored, by skipping and blacklisting the next few
- * journal sequence numbers:
- */
- if (!c->sb.clean)
- journal_seq += 8;
-
- if (blacklist_seq != journal_seq) {
- ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
- blacklist_seq, journal_seq) ?:
- bch2_journal_seq_blacklist_add(c,
- blacklist_seq, journal_seq);
- if (ret) {
- bch_err_msg(c, ret, "error creating new journal seq blacklist entry");
- goto err;
- }
- }
-
- ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
- journal_seq, last_seq, blacklist_seq - 1) ?:
- bch2_fs_journal_start(&c->journal, journal_seq);
- if (ret)
- goto err;
-
- /*
- * Skip past versions that might have possibly been used (as nonces),
- * but hadn't had their pointers written:
- */
- if (c->sb.encryption_type && !c->sb.clean)
- atomic64_add(1 << 16, &c->key_version);
-
- ret = read_btree_roots(c);
- if (ret)
- goto err;
-
- set_bit(BCH_FS_btree_running, &c->flags);
-
- ret = bch2_sb_set_upgrade_extra(c);
-
- ret = bch2_run_recovery_passes(c);
- if (ret)
- goto err;
-
- /*
- * Normally set by the appropriate recovery pass: when cleared, this
- * indicates we're in early recovery and btree updates should be done by
- * being applied to the journal replay keys. _Must_ be cleared before
- * multithreaded use:
- */
- set_bit(BCH_FS_may_go_rw, &c->flags);
- clear_bit(BCH_FS_fsck_running, &c->flags);
-
- /* in case we don't run journal replay, i.e. norecovery mode */
- set_bit(BCH_FS_accounting_replay_done, &c->flags);
-
- /* fsync if we fixed errors */
- if (test_bit(BCH_FS_errors_fixed, &c->flags) &&
- bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) {
- bch2_journal_flush_all_pins(&c->journal);
- bch2_journal_meta(&c->journal);
- bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
- }
-
- /* If we fixed errors, verify that fs is actually clean now: */
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
- test_bit(BCH_FS_errors_fixed, &c->flags) &&
- !test_bit(BCH_FS_errors_not_fixed, &c->flags) &&
- !test_bit(BCH_FS_error, &c->flags)) {
- bch2_flush_fsck_errs(c);
-
- bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
- clear_bit(BCH_FS_errors_fixed, &c->flags);
-
- c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
-
- ret = bch2_run_recovery_passes(c);
- if (ret)
- goto err;
-
- if (test_bit(BCH_FS_errors_fixed, &c->flags) ||
- test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
- bch_err(c, "Second fsck run was not clean");
- set_bit(BCH_FS_errors_not_fixed, &c->flags);
- }
-
- set_bit(BCH_FS_errors_fixed, &c->flags);
- }
-
- if (enabled_qtypes(c)) {
- bch_verbose(c, "reading quotas");
- ret = bch2_fs_quota_read(c);
- if (ret)
- goto err;
- bch_verbose(c, "quotas done");
- }
-
- mutex_lock(&c->sb_lock);
- ext = bch2_sb_field_get(c->disk_sb.sb, ext);
- write_sb = false;
-
- if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
- SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version));
- write_sb = true;
- }
-
- if (!test_bit(BCH_FS_error, &c->flags) &&
- !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) {
- c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
- write_sb = true;
- }
-
- if (!test_bit(BCH_FS_error, &c->flags) &&
- !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent))) {
- memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
- write_sb = true;
- }
-
- if (c->opts.fsck &&
- !test_bit(BCH_FS_error, &c->flags) &&
- c->recovery_pass_done == BCH_RECOVERY_PASS_NR - 1 &&
- ext->btrees_lost_data) {
- ext->btrees_lost_data = 0;
- write_sb = true;
- }
-
- if (c->opts.fsck &&
- !test_bit(BCH_FS_error, &c->flags) &&
- !test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
- SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
- SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
- write_sb = true;
- }
-
- if (bch2_blacklist_entries_gc(c))
- write_sb = true;
-
- if (write_sb)
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
- c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
- struct bch_move_stats stats;
-
- bch2_move_stats_init(&stats, "recovery");
-
- struct printbuf buf = PRINTBUF;
- bch2_version_to_text(&buf, c->sb.version_min);
- bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf);
- printbuf_exit(&buf);
-
- ret = bch2_fs_read_write_early(c) ?:
- bch2_scan_old_btree_nodes(c, &stats);
- if (ret)
- goto err;
- bch_info(c, "scanning for old btree nodes done");
- }
-
- ret = 0;
-out:
- bch2_flush_fsck_errs(c);
-
- if (!c->opts.retain_recovery_info) {
- bch2_journal_keys_put_initial(c);
- bch2_find_btree_nodes_exit(&c->found_btree_nodes);
- }
- if (!IS_ERR(clean))
- kfree(clean);
-
- if (!ret &&
- test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) &&
- !c->opts.nochanges) {
- bch2_fs_read_write_early(c);
- bch2_delete_dead_snapshots_async(c);
- }
-
- bch_err_fn(c, ret);
- return ret;
-err:
-fsck_err:
- bch2_fs_emergency_read_only(c);
- goto out;
-}
-
-int bch2_fs_initialize(struct bch_fs *c)
-{
- struct bch_inode_unpacked root_inode, lostfound_inode;
- struct bkey_inode_buf packed_inode;
- struct qstr lostfound = QSTR("lost+found");
- struct bch_member *m;
- int ret;
-
- bch_notice(c, "initializing new filesystem");
- set_bit(BCH_FS_new_fs, &c->flags);
-
- mutex_lock(&c->sb_lock);
- c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
- c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
-
- bch2_check_version_downgrade(c);
-
- if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
- bch2_sb_upgrade(c, bcachefs_metadata_version_current);
- SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
- bch2_write_super(c);
- }
-
- for_each_member_device(c, ca) {
- m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
- SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, false);
- ca->mi = bch2_mi_to_cpu(m);
- }
-
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- c->curr_recovery_pass = BCH_RECOVERY_PASS_NR;
- set_bit(BCH_FS_btree_running, &c->flags);
- set_bit(BCH_FS_may_go_rw, &c->flags);
-
- for (unsigned i = 0; i < BTREE_ID_NR; i++)
- bch2_btree_root_alloc_fake(c, i, 0);
-
- ret = bch2_fs_journal_alloc(c);
- if (ret)
- goto err;
-
- /*
- * journal_res_get() will crash if called before this has
- * set up the journal.pin FIFO and journal.cur pointer:
- */
- bch2_fs_journal_start(&c->journal, 1);
- set_bit(BCH_FS_accounting_replay_done, &c->flags);
- bch2_journal_set_replay_done(&c->journal);
-
- ret = bch2_fs_read_write_early(c);
- if (ret)
- goto err;
-
- for_each_member_device(c, ca) {
- ret = bch2_dev_usage_init(ca, false);
- if (ret) {
- bch2_dev_put(ca);
- goto err;
- }
- }
-
- /*
- * Write out the superblock and journal buckets, now that we can do
- * btree updates
- */
- bch_verbose(c, "marking superblocks");
- ret = bch2_trans_mark_dev_sbs(c);
- bch_err_msg(c, ret, "marking superblocks");
- if (ret)
- goto err;
-
- for_each_online_member(c, ca)
- ca->new_fs_bucket_idx = 0;
-
- ret = bch2_fs_freespace_init(c);
- if (ret)
- goto err;
-
- ret = bch2_initialize_subvolumes(c);
- if (ret)
- goto err;
-
- bch_verbose(c, "reading snapshots table");
- ret = bch2_snapshots_read(c);
- if (ret)
- goto err;
- bch_verbose(c, "reading snapshots done");
-
- bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
- root_inode.bi_inum = BCACHEFS_ROOT_INO;
- root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
- bch2_inode_pack(&packed_inode, &root_inode);
- packed_inode.inode.k.p.snapshot = U32_MAX;
-
- ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0, 0);
- bch_err_msg(c, ret, "creating root directory");
- if (ret)
- goto err;
-
- bch2_inode_init_early(c, &lostfound_inode);
-
- ret = bch2_trans_commit_do(c, NULL, NULL, 0,
- bch2_create_trans(trans,
- BCACHEFS_ROOT_SUBVOL_INUM,
- &root_inode, &lostfound_inode,
- &lostfound,
- 0, 0, S_IFDIR|0700, 0,
- NULL, NULL, (subvol_inum) { 0 }, 0));
- bch_err_msg(c, ret, "creating lost+found");
- if (ret)
- goto err;
-
- c->recovery_pass_done = BCH_RECOVERY_PASS_NR - 1;
-
- if (enabled_qtypes(c)) {
- ret = bch2_fs_quota_read(c);
- if (ret)
- goto err;
- }
-
- ret = bch2_journal_flush(&c->journal);
- bch_err_msg(c, ret, "writing first journal entry");
- if (ret)
- goto err;
-
- mutex_lock(&c->sb_lock);
- SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
- SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
-
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- return 0;
-err:
- bch_err_fn(c, ret);
- return ret;
-}
diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h
deleted file mode 100644
index 4bf818de1f2f..000000000000
--- a/fs/bcachefs/recovery.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_RECOVERY_H
-#define _BCACHEFS_RECOVERY_H
-
-void bch2_btree_lost_data(struct bch_fs *, enum btree_id);
-
-int bch2_journal_replay(struct bch_fs *);
-
-int bch2_fs_recovery(struct bch_fs *);
-int bch2_fs_initialize(struct bch_fs *);
-
-#endif /* _BCACHEFS_RECOVERY_H */
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
deleted file mode 100644
index dff589ddc984..000000000000
--- a/fs/bcachefs/recovery_passes.c
+++ /dev/null
@@ -1,264 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "backpointers.h"
-#include "btree_gc.h"
-#include "btree_node_scan.h"
-#include "disk_accounting.h"
-#include "ec.h"
-#include "fsck.h"
-#include "inode.h"
-#include "journal.h"
-#include "lru.h"
-#include "logged_ops.h"
-#include "rebalance.h"
-#include "recovery.h"
-#include "recovery_passes.h"
-#include "snapshot.h"
-#include "subvolume.h"
-#include "super.h"
-#include "super-io.h"
-
-const char * const bch2_recovery_passes[] = {
-#define x(_fn, ...) #_fn,
- BCH_RECOVERY_PASSES()
-#undef x
- NULL
-};
-
-/* Fake recovery pass, so that scan_for_btree_nodes isn't 0: */
-static int bch2_recovery_pass_empty(struct bch_fs *c)
-{
- return 0;
-}
-
-static int bch2_set_may_go_rw(struct bch_fs *c)
-{
- struct journal_keys *keys = &c->journal_keys;
-
- /*
- * After we go RW, the journal keys buffer can't be modified (except for
- * setting journal_key->overwritten: it will be accessed by multiple
- * threads
- */
- move_gap(keys, keys->nr);
-
- set_bit(BCH_FS_may_go_rw, &c->flags);
-
- if (keys->nr || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes)
- return bch2_fs_read_write_early(c);
- return 0;
-}
-
-struct recovery_pass_fn {
- int (*fn)(struct bch_fs *);
- unsigned when;
-};
-
-static struct recovery_pass_fn recovery_pass_fns[] = {
-#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
- BCH_RECOVERY_PASSES()
-#undef x
-};
-
-static const u8 passes_to_stable_map[] = {
-#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
- BCH_RECOVERY_PASSES()
-#undef x
-};
-
-static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)
-{
- return passes_to_stable_map[pass];
-}
-
-u64 bch2_recovery_passes_to_stable(u64 v)
-{
- u64 ret = 0;
- for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++)
- if (v & BIT_ULL(i))
- ret |= BIT_ULL(passes_to_stable_map[i]);
- return ret;
-}
-
-u64 bch2_recovery_passes_from_stable(u64 v)
-{
- static const u8 map[] = {
-#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
- BCH_RECOVERY_PASSES()
-#undef x
- };
-
- u64 ret = 0;
- for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
- if (v & BIT_ULL(i))
- ret |= BIT_ULL(map[i]);
- return ret;
-}
-
-/*
- * For when we need to rewind recovery passes and run a pass we skipped:
- */
-int bch2_run_explicit_recovery_pass(struct bch_fs *c,
- enum bch_recovery_pass pass)
-{
- if (c->opts.recovery_passes & BIT_ULL(pass))
- return 0;
-
- bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
- bch2_recovery_passes[pass], pass,
- bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
-
- c->opts.recovery_passes |= BIT_ULL(pass);
-
- if (c->curr_recovery_pass >= pass) {
- c->curr_recovery_pass = pass;
- c->recovery_passes_complete &= (1ULL << pass) >> 1;
- return -BCH_ERR_restart_recovery;
- } else {
- return 0;
- }
-}
-
-int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c,
- enum bch_recovery_pass pass)
-{
- enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
-
- mutex_lock(&c->sb_lock);
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
-
- if (!test_bit_le64(s, ext->recovery_passes_required)) {
- __set_bit_le64(s, ext->recovery_passes_required);
- bch2_write_super(c);
- }
- mutex_unlock(&c->sb_lock);
-
- return bch2_run_explicit_recovery_pass(c, pass);
-}
-
-static void bch2_clear_recovery_pass_required(struct bch_fs *c,
- enum bch_recovery_pass pass)
-{
- enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
-
- mutex_lock(&c->sb_lock);
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
-
- if (test_bit_le64(s, ext->recovery_passes_required)) {
- __clear_bit_le64(s, ext->recovery_passes_required);
- bch2_write_super(c);
- }
- mutex_unlock(&c->sb_lock);
-}
-
-u64 bch2_fsck_recovery_passes(void)
-{
- u64 ret = 0;
-
- for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
- if (recovery_pass_fns[i].when & PASS_FSCK)
- ret |= BIT_ULL(i);
- return ret;
-}
-
-static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
-{
- struct recovery_pass_fn *p = recovery_pass_fns + pass;
-
- if (c->opts.recovery_passes_exclude & BIT_ULL(pass))
- return false;
- if (c->opts.recovery_passes & BIT_ULL(pass))
- return true;
- if ((p->when & PASS_FSCK) && c->opts.fsck)
- return true;
- if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
- return true;
- if (p->when & PASS_ALWAYS)
- return true;
- return false;
-}
-
-static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
-{
- struct recovery_pass_fn *p = recovery_pass_fns + pass;
- int ret;
-
- if (!(p->when & PASS_SILENT))
- bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
- bch2_recovery_passes[pass]);
- ret = p->fn(c);
- if (ret)
- return ret;
- if (!(p->when & PASS_SILENT))
- bch2_print(c, KERN_CONT " done\n");
-
- return 0;
-}
-
-int bch2_run_online_recovery_passes(struct bch_fs *c)
-{
- int ret = 0;
-
- down_read(&c->state_lock);
-
- for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
- struct recovery_pass_fn *p = recovery_pass_fns + i;
-
- if (!(p->when & PASS_ONLINE))
- continue;
-
- ret = bch2_run_recovery_pass(c, i);
- if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
- i = c->curr_recovery_pass;
- continue;
- }
- if (ret)
- break;
- }
-
- up_read(&c->state_lock);
-
- return ret;
-}
-
-int bch2_run_recovery_passes(struct bch_fs *c)
-{
- int ret = 0;
-
- /*
- * We can't allow set_may_go_rw to be excluded; that would cause us to
- * use the journal replay keys for updates where it's not expected.
- */
- c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw;
-
- while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
- if (c->opts.recovery_pass_last &&
- c->curr_recovery_pass > c->opts.recovery_pass_last)
- break;
-
- if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
- unsigned pass = c->curr_recovery_pass;
-
- ret = bch2_run_recovery_pass(c, c->curr_recovery_pass) ?:
- bch2_journal_flush(&c->journal);
- if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
- (ret && c->curr_recovery_pass < pass))
- continue;
- if (ret)
- break;
-
- c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
- }
-
- c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
-
- if (!test_bit(BCH_FS_error, &c->flags))
- bch2_clear_recovery_pass_required(c, c->curr_recovery_pass);
-
- c->curr_recovery_pass++;
- }
-
- return ret;
-}
diff --git a/fs/bcachefs/recovery_passes.h b/fs/bcachefs/recovery_passes.h
deleted file mode 100644
index 99b464e127b8..000000000000
--- a/fs/bcachefs/recovery_passes.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _BCACHEFS_RECOVERY_PASSES_H
-#define _BCACHEFS_RECOVERY_PASSES_H
-
-extern const char * const bch2_recovery_passes[];
-
-u64 bch2_recovery_passes_to_stable(u64 v);
-u64 bch2_recovery_passes_from_stable(u64 v);
-
-u64 bch2_fsck_recovery_passes(void);
-
-int bch2_run_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass);
-int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *, enum bch_recovery_pass);
-
-int bch2_run_online_recovery_passes(struct bch_fs *);
-int bch2_run_recovery_passes(struct bch_fs *);
-
-#endif /* _BCACHEFS_RECOVERY_PASSES_H */
diff --git a/fs/bcachefs/recovery_passes_types.h b/fs/bcachefs/recovery_passes_types.h
deleted file mode 100644
index 94dc20ca2065..000000000000
--- a/fs/bcachefs/recovery_passes_types.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_RECOVERY_PASSES_TYPES_H
-#define _BCACHEFS_RECOVERY_PASSES_TYPES_H
-
-#define PASS_SILENT BIT(0)
-#define PASS_FSCK BIT(1)
-#define PASS_UNCLEAN BIT(2)
-#define PASS_ALWAYS BIT(3)
-#define PASS_ONLINE BIT(4)
-
-/*
- * Passes may be reordered, but the second field is a persistent identifier and
- * must never change:
- */
-#define BCH_RECOVERY_PASSES() \
- x(recovery_pass_empty, 41, PASS_SILENT) \
- x(scan_for_btree_nodes, 37, 0) \
- x(check_topology, 4, 0) \
- x(accounting_read, 39, PASS_ALWAYS) \
- x(alloc_read, 0, PASS_ALWAYS) \
- x(stripes_read, 1, PASS_ALWAYS) \
- x(initialize_subvolumes, 2, 0) \
- x(snapshots_read, 3, PASS_ALWAYS) \
- x(check_allocations, 5, PASS_FSCK) \
- x(trans_mark_dev_sbs, 6, PASS_ALWAYS|PASS_SILENT) \
- x(fs_journal_alloc, 7, PASS_ALWAYS|PASS_SILENT) \
- x(set_may_go_rw, 8, PASS_ALWAYS|PASS_SILENT) \
- x(journal_replay, 9, PASS_ALWAYS) \
- x(check_alloc_info, 10, PASS_ONLINE|PASS_FSCK) \
- x(check_lrus, 11, PASS_ONLINE|PASS_FSCK) \
- x(check_btree_backpointers, 12, PASS_ONLINE|PASS_FSCK) \
- x(check_backpointers_to_extents, 13, PASS_ONLINE|PASS_FSCK) \
- x(check_extents_to_backpointers, 14, PASS_ONLINE|PASS_FSCK) \
- x(check_alloc_to_lru_refs, 15, PASS_ONLINE|PASS_FSCK) \
- x(fs_freespace_init, 16, PASS_ALWAYS|PASS_SILENT) \
- x(bucket_gens_init, 17, 0) \
- x(reconstruct_snapshots, 38, 0) \
- x(check_snapshot_trees, 18, PASS_ONLINE|PASS_FSCK) \
- x(check_snapshots, 19, PASS_ONLINE|PASS_FSCK) \
- x(check_subvols, 20, PASS_ONLINE|PASS_FSCK) \
- x(check_subvol_children, 35, PASS_ONLINE|PASS_FSCK) \
- x(delete_dead_snapshots, 21, PASS_ONLINE|PASS_FSCK) \
- x(fs_upgrade_for_subvolumes, 22, 0) \
- x(check_inodes, 24, PASS_FSCK) \
- x(check_extents, 25, PASS_FSCK) \
- x(check_indirect_extents, 26, PASS_FSCK) \
- x(check_dirents, 27, PASS_FSCK) \
- x(check_xattrs, 28, PASS_FSCK) \
- x(check_root, 29, PASS_ONLINE|PASS_FSCK) \
- x(check_unreachable_inodes, 40, PASS_ONLINE|PASS_FSCK) \
- x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \
- x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \
- x(check_nlinks, 31, PASS_FSCK) \
- x(resume_logged_ops, 23, PASS_ALWAYS) \
- x(delete_dead_inodes, 32, PASS_ALWAYS) \
- x(fix_reflink_p, 33, 0) \
- x(set_fs_needs_rebalance, 34, 0) \
-
-/* We normally enumerate recovery passes in the order we run them: */
-enum bch_recovery_pass {
-#define x(n, id, when) BCH_RECOVERY_PASS_##n,
- BCH_RECOVERY_PASSES()
-#undef x
- BCH_RECOVERY_PASS_NR
-};
-
-/* But we also need stable identifiers that can be used in the superblock */
-enum bch_recovery_pass_stable {
-#define x(n, id, when) BCH_RECOVERY_PASS_STABLE_##n = id,
- BCH_RECOVERY_PASSES()
-#undef x
-};
-
-#endif /* _BCACHEFS_RECOVERY_PASSES_TYPES_H */
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
deleted file mode 100644
index f457925fa362..000000000000
--- a/fs/bcachefs/reflink.c
+++ /dev/null
@@ -1,593 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "error.h"
-#include "extents.h"
-#include "inode.h"
-#include "io_misc.h"
-#include "io_write.h"
-#include "rebalance.h"
-#include "reflink.h"
-#include "subvolume.h"
-#include "super-io.h"
-
-#include <linux/sched/signal.h>
-
-static inline unsigned bkey_type_to_indirect(const struct bkey *k)
-{
- switch (k->type) {
- case KEY_TYPE_extent:
- return KEY_TYPE_reflink_v;
- case KEY_TYPE_inline_data:
- return KEY_TYPE_indirect_inline_data;
- default:
- return 0;
- }
-}
-
-/* reflink pointers */
-
-int bch2_reflink_p_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
- int ret = 0;
-
- bkey_fsck_err_on(le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad),
- c, reflink_p_front_pad_bad,
- "idx < front_pad (%llu < %u)",
- le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad));
-fsck_err:
- return ret;
-}
-
-void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
-
- prt_printf(out, "idx %llu front_pad %u back_pad %u",
- le64_to_cpu(p.v->idx),
- le32_to_cpu(p.v->front_pad),
- le32_to_cpu(p.v->back_pad));
-}
-
-bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
-{
- struct bkey_s_reflink_p l = bkey_s_to_reflink_p(_l);
- struct bkey_s_c_reflink_p r = bkey_s_c_to_reflink_p(_r);
-
- /*
- * Disabled for now, the triggers code needs to be reworked for merging
- * of reflink pointers to work:
- */
- return false;
-
- if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx))
- return false;
-
- bch2_key_resize(l.k, l.k->size + r.k->size);
- return true;
-}
-
-static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
- struct bkey_s_c_reflink_p p, u64 *idx,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_i *k;
- __le64 *refcount;
- int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
- struct printbuf buf = PRINTBUF;
- int ret;
-
- k = bch2_bkey_get_mut_noupdate(trans, &iter,
- BTREE_ID_reflink, POS(0, *idx),
- BTREE_ITER_with_updates);
- ret = PTR_ERR_OR_ZERO(k);
- if (ret)
- goto err;
-
- refcount = bkey_refcount(bkey_i_to_s(k));
- if (!refcount) {
- bch2_bkey_val_to_text(&buf, c, p.s_c);
- bch2_trans_inconsistent(trans,
- "nonexistent indirect extent at %llu while marking\n %s",
- *idx, buf.buf);
- ret = -EIO;
- goto err;
- }
-
- if (!*refcount && (flags & BTREE_TRIGGER_overwrite)) {
- bch2_bkey_val_to_text(&buf, c, p.s_c);
- bch2_trans_inconsistent(trans,
- "indirect extent refcount underflow at %llu while marking\n %s",
- *idx, buf.buf);
- ret = -EIO;
- goto err;
- }
-
- if (flags & BTREE_TRIGGER_insert) {
- struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
- u64 pad;
-
- pad = max_t(s64, le32_to_cpu(v->front_pad),
- le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
- BUG_ON(pad > U32_MAX);
- v->front_pad = cpu_to_le32(pad);
-
- pad = max_t(s64, le32_to_cpu(v->back_pad),
- k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
- BUG_ON(pad > U32_MAX);
- v->back_pad = cpu_to_le32(pad);
- }
-
- le64_add_cpu(refcount, add);
-
- bch2_btree_iter_set_pos_to_extent_start(&iter);
- ret = bch2_trans_update(trans, &iter, k, 0);
- if (ret)
- goto err;
-
- *idx = k->k.p.offset;
-err:
- bch2_trans_iter_exit(trans, &iter);
- printbuf_exit(&buf);
- return ret;
-}
-
-static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
- struct bkey_s_c_reflink_p p, u64 *idx,
- enum btree_iter_update_trigger_flags flags,
- size_t r_idx)
-{
- struct bch_fs *c = trans->c;
- struct reflink_gc *r;
- int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
- u64 start = le64_to_cpu(p.v->idx);
- u64 end = le64_to_cpu(p.v->idx) + p.k->size;
- u64 next_idx = end + le32_to_cpu(p.v->back_pad);
- s64 ret = 0;
- struct printbuf buf = PRINTBUF;
-
- if (r_idx >= c->reflink_gc_nr)
- goto not_found;
-
- r = genradix_ptr(&c->reflink_gc_table, r_idx);
- next_idx = min(next_idx, r->offset - r->size);
- if (*idx < next_idx)
- goto not_found;
-
- BUG_ON((s64) r->refcount + add < 0);
-
- if (flags & BTREE_TRIGGER_gc)
- r->refcount += add;
- *idx = r->offset;
- return 0;
-not_found:
- BUG_ON(!(flags & BTREE_TRIGGER_check_repair));
-
- if (fsck_err(trans, reflink_p_to_missing_reflink_v,
- "pointer to missing indirect extent\n"
- " %s\n"
- " missing range %llu-%llu",
- (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
- *idx, next_idx)) {
- struct bkey_i *update = bch2_bkey_make_mut_noupdate(trans, p.s_c);
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- goto err;
-
- if (next_idx <= start) {
- bkey_i_to_reflink_p(update)->v.front_pad = cpu_to_le32(start - next_idx);
- } else if (*idx >= end) {
- bkey_i_to_reflink_p(update)->v.back_pad = cpu_to_le32(*idx - end);
- } else {
- bkey_error_init(update);
- update->k.p = p.k->p;
- update->k.size = p.k->size;
- set_bkey_val_u64s(&update->k, 0);
- }
-
- ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_norun);
- }
-
- *idx = next_idx;
-err:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-static int __trigger_reflink_p(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level, struct bkey_s_c k,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
- int ret = 0;
-
- u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
- u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad);
-
- if (flags & BTREE_TRIGGER_transactional) {
- while (idx < end && !ret)
- ret = trans_trigger_reflink_p_segment(trans, p, &idx, flags);
- }
-
- if (flags & (BTREE_TRIGGER_check_repair|BTREE_TRIGGER_gc)) {
- size_t l = 0, r = c->reflink_gc_nr;
-
- while (l < r) {
- size_t m = l + (r - l) / 2;
- struct reflink_gc *ref = genradix_ptr(&c->reflink_gc_table, m);
- if (ref->offset <= idx)
- l = m + 1;
- else
- r = m;
- }
-
- while (idx < end && !ret)
- ret = gc_trigger_reflink_p_segment(trans, p, &idx, flags, l++);
- }
-
- return ret;
-}
-
-int bch2_trigger_reflink_p(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- if ((flags & BTREE_TRIGGER_transactional) &&
- (flags & BTREE_TRIGGER_insert)) {
- struct bch_reflink_p *v = bkey_s_to_reflink_p(new).v;
-
- v->front_pad = v->back_pad = 0;
- }
-
- return trigger_run_overwrite_then_insert(__trigger_reflink_p, trans, btree_id, level, old, new, flags);
-}
-
-/* indirect extents */
-
-int bch2_reflink_v_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- return bch2_bkey_ptrs_validate(c, k, flags);
-}
-
-void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
-
- prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount));
-
- bch2_bkey_ptrs_to_text(out, c, k);
-}
-
-#if 0
-Currently disabled, needs to be debugged:
-
-bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
-{
- struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l);
- struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r);
-
- return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
-}
-#endif
-
-static inline void
-check_indirect_extent_deleting(struct bkey_s new,
- enum btree_iter_update_trigger_flags *flags)
-{
- if ((*flags & BTREE_TRIGGER_insert) && !*bkey_refcount(new)) {
- new.k->type = KEY_TYPE_deleted;
- new.k->size = 0;
- set_bkey_val_u64s(new.k, 0);
- *flags &= ~BTREE_TRIGGER_insert;
- }
-}
-
-int bch2_trigger_reflink_v(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- if ((flags & BTREE_TRIGGER_transactional) &&
- (flags & BTREE_TRIGGER_insert))
- check_indirect_extent_deleting(new, &flags);
-
- return bch2_trigger_extent(trans, btree_id, level, old, new, flags);
-}
-
-/* indirect inline data */
-
-int bch2_indirect_inline_data_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- return 0;
-}
-
-void bch2_indirect_inline_data_to_text(struct printbuf *out,
- struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
- unsigned datalen = bkey_inline_data_bytes(k.k);
-
- prt_printf(out, "refcount %llu datalen %u: %*phN",
- le64_to_cpu(d.v->refcount), datalen,
- min(datalen, 32U), d.v->data);
-}
-
-int bch2_trigger_indirect_inline_data(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- check_indirect_extent_deleting(new, &flags);
-
- return 0;
-}
-
-static int bch2_make_extent_indirect(struct btree_trans *trans,
- struct btree_iter *extent_iter,
- struct bkey_i *orig)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter reflink_iter = { NULL };
- struct bkey_s_c k;
- struct bkey_i *r_v;
- struct bkey_i_reflink_p *r_p;
- __le64 *refcount;
- int ret;
-
- if (orig->k.type == KEY_TYPE_inline_data)
- bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
-
- bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
- BTREE_ITER_intent);
- k = bch2_btree_iter_peek_prev(&reflink_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k));
- ret = PTR_ERR_OR_ZERO(r_v);
- if (ret)
- goto err;
-
- bkey_init(&r_v->k);
- r_v->k.type = bkey_type_to_indirect(&orig->k);
- r_v->k.p = reflink_iter.pos;
- bch2_key_resize(&r_v->k, orig->k.size);
- r_v->k.bversion = orig->k.bversion;
-
- set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
-
- refcount = bkey_refcount(bkey_i_to_s(r_v));
- *refcount = 0;
- memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
-
- ret = bch2_trans_update(trans, &reflink_iter, r_v, 0);
- if (ret)
- goto err;
-
- /*
- * orig is in a bkey_buf which statically allocates 5 64s for the val,
- * so we know it will be big enough:
- */
- orig->k.type = KEY_TYPE_reflink_p;
- r_p = bkey_i_to_reflink_p(orig);
- set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
-
- /* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */
-#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
- __underlying_memset(&r_p->v, 0, sizeof(r_p->v));
-#else
- memset(&r_p->v, 0, sizeof(r_p->v));
-#endif
-
- r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
-
- ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
- BTREE_UPDATE_internal_snapshot_node);
-err:
- bch2_trans_iter_exit(trans, &reflink_iter);
-
- return ret;
-}
-
-static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
-{
- struct bkey_s_c k;
- int ret;
-
- for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) {
- if (bkey_extent_is_unwritten(k))
- continue;
-
- if (bkey_extent_is_data(k.k))
- return k;
- }
-
- if (bkey_ge(iter->pos, end))
- bch2_btree_iter_set_pos(iter, end);
- return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
-}
-
-s64 bch2_remap_range(struct bch_fs *c,
- subvol_inum dst_inum, u64 dst_offset,
- subvol_inum src_inum, u64 src_offset,
- u64 remap_sectors,
- u64 new_i_size, s64 *i_sectors_delta)
-{
- struct btree_trans *trans;
- struct btree_iter dst_iter, src_iter;
- struct bkey_s_c src_k;
- struct bkey_buf new_dst, new_src;
- struct bpos dst_start = POS(dst_inum.inum, dst_offset);
- struct bpos src_start = POS(src_inum.inum, src_offset);
- struct bpos dst_end = dst_start, src_end = src_start;
- struct bch_io_opts opts;
- struct bpos src_want;
- u64 dst_done = 0;
- u32 dst_snapshot, src_snapshot;
- int ret = 0, ret2 = 0;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_reflink))
- return -BCH_ERR_erofs_no_writes;
-
- bch2_check_set_feature(c, BCH_FEATURE_reflink);
-
- dst_end.offset += remap_sectors;
- src_end.offset += remap_sectors;
-
- bch2_bkey_buf_init(&new_dst);
- bch2_bkey_buf_init(&new_src);
- trans = bch2_trans_get(c);
-
- ret = bch2_inum_opts_get(trans, src_inum, &opts);
- if (ret)
- goto err;
-
- bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
- BTREE_ITER_intent);
- bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
- BTREE_ITER_intent);
-
- while ((ret == 0 ||
- bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
- bkey_lt(dst_iter.pos, dst_end)) {
- struct disk_reservation disk_res = { 0 };
-
- bch2_trans_begin(trans);
-
- if (fatal_signal_pending(current)) {
- ret = -EINTR;
- break;
- }
-
- ret = bch2_subvolume_get_snapshot(trans, src_inum.subvol,
- &src_snapshot);
- if (ret)
- continue;
-
- bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
-
- ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
- &dst_snapshot);
- if (ret)
- continue;
-
- bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
-
- if (dst_inum.inum < src_inum.inum) {
- /* Avoid some lock cycle transaction restarts */
- ret = bch2_btree_iter_traverse(&dst_iter);
- if (ret)
- continue;
- }
-
- dst_done = dst_iter.pos.offset - dst_start.offset;
- src_want = POS(src_start.inode, src_start.offset + dst_done);
- bch2_btree_iter_set_pos(&src_iter, src_want);
-
- src_k = get_next_src(&src_iter, src_end);
- ret = bkey_err(src_k);
- if (ret)
- continue;
-
- if (bkey_lt(src_want, src_iter.pos)) {
- ret = bch2_fpunch_at(trans, &dst_iter, dst_inum,
- min(dst_end.offset,
- dst_iter.pos.offset +
- src_iter.pos.offset - src_want.offset),
- i_sectors_delta);
- continue;
- }
-
- if (src_k.k->type != KEY_TYPE_reflink_p) {
- bch2_btree_iter_set_pos_to_extent_start(&src_iter);
-
- bch2_bkey_buf_reassemble(&new_src, c, src_k);
- src_k = bkey_i_to_s_c(new_src.k);
-
- ret = bch2_make_extent_indirect(trans, &src_iter,
- new_src.k);
- if (ret)
- continue;
-
- BUG_ON(src_k.k->type != KEY_TYPE_reflink_p);
- }
-
- if (src_k.k->type == KEY_TYPE_reflink_p) {
- struct bkey_s_c_reflink_p src_p =
- bkey_s_c_to_reflink_p(src_k);
- struct bkey_i_reflink_p *dst_p =
- bkey_reflink_p_init(new_dst.k);
-
- u64 offset = le64_to_cpu(src_p.v->idx) +
- (src_want.offset -
- bkey_start_offset(src_k.k));
-
- dst_p->v.idx = cpu_to_le64(offset);
- } else {
- BUG();
- }
-
- new_dst.k->k.p = dst_iter.pos;
- bch2_key_resize(&new_dst.k->k,
- min(src_k.k->p.offset - src_want.offset,
- dst_end.offset - dst_iter.pos.offset));
-
- ret = bch2_bkey_set_needs_rebalance(c, new_dst.k, &opts) ?:
- bch2_extent_update(trans, dst_inum, &dst_iter,
- new_dst.k, &disk_res,
- new_i_size, i_sectors_delta,
- true);
- bch2_disk_reservation_put(c, &disk_res);
- }
- bch2_trans_iter_exit(trans, &dst_iter);
- bch2_trans_iter_exit(trans, &src_iter);
-
- BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
- BUG_ON(bkey_gt(dst_iter.pos, dst_end));
-
- dst_done = dst_iter.pos.offset - dst_start.offset;
- new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
-
- do {
- struct bch_inode_unpacked inode_u;
- struct btree_iter inode_iter = { NULL };
-
- bch2_trans_begin(trans);
-
- ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u,
- dst_inum, BTREE_ITER_intent);
-
- if (!ret2 &&
- inode_u.bi_size < new_i_size) {
- inode_u.bi_size = new_i_size;
- ret2 = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
- }
-
- bch2_trans_iter_exit(trans, &inode_iter);
- } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
-err:
- bch2_trans_put(trans);
- bch2_bkey_buf_exit(&new_src, c);
- bch2_bkey_buf_exit(&new_dst, c);
-
- bch2_write_ref_put(c, BCH_WRITE_REF_reflink);
-
- return dst_done ?: ret ?: ret2;
-}
diff --git a/fs/bcachefs/reflink.h b/fs/bcachefs/reflink.h
deleted file mode 100644
index 51afe11d8ed6..000000000000
--- a/fs/bcachefs/reflink.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_REFLINK_H
-#define _BCACHEFS_REFLINK_H
-
-enum bch_validate_flags;
-
-int bch2_reflink_p_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-
-#define bch2_bkey_ops_reflink_p ((struct bkey_ops) { \
- .key_validate = bch2_reflink_p_validate, \
- .val_to_text = bch2_reflink_p_to_text, \
- .key_merge = bch2_reflink_p_merge, \
- .trigger = bch2_trigger_reflink_p, \
- .min_val_size = 16, \
-})
-
-int bch2_reflink_v_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-
-#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
- .key_validate = bch2_reflink_v_validate, \
- .val_to_text = bch2_reflink_v_to_text, \
- .swab = bch2_ptr_swab, \
- .trigger = bch2_trigger_reflink_v, \
- .min_val_size = 8, \
-})
-
-int bch2_indirect_inline_data_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-void bch2_indirect_inline_data_to_text(struct printbuf *,
- struct bch_fs *, struct bkey_s_c);
-int bch2_trigger_indirect_inline_data(struct btree_trans *,
- enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-
-#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) { \
- .key_validate = bch2_indirect_inline_data_validate, \
- .val_to_text = bch2_indirect_inline_data_to_text, \
- .trigger = bch2_trigger_indirect_inline_data, \
- .min_val_size = 8, \
-})
-
-static inline const __le64 *bkey_refcount_c(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_reflink_v:
- return &bkey_s_c_to_reflink_v(k).v->refcount;
- case KEY_TYPE_indirect_inline_data:
- return &bkey_s_c_to_indirect_inline_data(k).v->refcount;
- default:
- return NULL;
- }
-}
-
-static inline __le64 *bkey_refcount(struct bkey_s k)
-{
- switch (k.k->type) {
- case KEY_TYPE_reflink_v:
- return &bkey_s_to_reflink_v(k).v->refcount;
- case KEY_TYPE_indirect_inline_data:
- return &bkey_s_to_indirect_inline_data(k).v->refcount;
- default:
- return NULL;
- }
-}
-
-s64 bch2_remap_range(struct bch_fs *, subvol_inum, u64,
- subvol_inum, u64, u64, u64, s64 *);
-
-#endif /* _BCACHEFS_REFLINK_H */
diff --git a/fs/bcachefs/reflink_format.h b/fs/bcachefs/reflink_format.h
deleted file mode 100644
index 6772eebb1fc6..000000000000
--- a/fs/bcachefs/reflink_format.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_REFLINK_FORMAT_H
-#define _BCACHEFS_REFLINK_FORMAT_H
-
-struct bch_reflink_p {
- struct bch_val v;
- __le64 idx;
- /*
- * A reflink pointer might point to an indirect extent which is then
- * later split (by copygc or rebalance). If we only pointed to part of
- * the original indirect extent, and then one of the fragments is
- * outside the range we point to, we'd leak a refcount: so when creating
- * reflink pointers, we need to store pad values to remember the full
- * range we were taking a reference on.
- */
- __le32 front_pad;
- __le32 back_pad;
-} __packed __aligned(8);
-
-struct bch_reflink_v {
- struct bch_val v;
- __le64 refcount;
- union bch_extent_entry start[0];
- __u64 _data[];
-} __packed __aligned(8);
-
-struct bch_indirect_inline_data {
- struct bch_val v;
- __le64 refcount;
- u8 data[];
-};
-
-#endif /* _BCACHEFS_REFLINK_FORMAT_H */
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
deleted file mode 100644
index 477ef0997949..000000000000
--- a/fs/bcachefs/replicas.c
+++ /dev/null
@@ -1,919 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "buckets.h"
-#include "disk_accounting.h"
-#include "journal.h"
-#include "replicas.h"
-#include "super-io.h"
-
-#include <linux/sort.h>
-
-static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
- struct bch_replicas_cpu *);
-
-/* Some (buggy!) compilers don't allow memcmp to be passed as a pointer */
-static int bch2_memcmp(const void *l, const void *r, const void *priv)
-{
- size_t size = (size_t) priv;
- return memcmp(l, r, size);
-}
-
-/* Replicas tracking - in memory: */
-
-static void verify_replicas_entry(struct bch_replicas_entry_v1 *e)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- BUG_ON(!e->nr_devs);
- BUG_ON(e->nr_required > 1 &&
- e->nr_required >= e->nr_devs);
-
- for (unsigned i = 0; i + 1 < e->nr_devs; i++)
- BUG_ON(e->devs[i] >= e->devs[i + 1]);
-#endif
-}
-
-void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
-{
- bubble_sort(e->devs, e->nr_devs, u8_cmp);
-}
-
-static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
-{
- eytzinger0_sort_r(r->entries, r->nr, r->entry_size,
- bch2_memcmp, NULL, (void *)(size_t)r->entry_size);
-}
-
-static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
- struct bch_replicas_entry_v0 *e)
-{
- bch2_prt_data_type(out, e->data_type);
-
- prt_printf(out, ": %u [", e->nr_devs);
- for (unsigned i = 0; i < e->nr_devs; i++)
- prt_printf(out, i ? " %u" : "%u", e->devs[i]);
- prt_printf(out, "]");
-}
-
-void bch2_replicas_entry_to_text(struct printbuf *out,
- struct bch_replicas_entry_v1 *e)
-{
- bch2_prt_data_type(out, e->data_type);
-
- prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs);
- for (unsigned i = 0; i < e->nr_devs; i++)
- prt_printf(out, i ? " %u" : "%u", e->devs[i]);
- prt_printf(out, "]");
-}
-
-static int bch2_replicas_entry_sb_validate(struct bch_replicas_entry_v1 *r,
- struct bch_sb *sb,
- struct printbuf *err)
-{
- if (!r->nr_devs) {
- prt_printf(err, "no devices in entry ");
- goto bad;
- }
-
- if (r->nr_required > 1 &&
- r->nr_required >= r->nr_devs) {
- prt_printf(err, "bad nr_required in entry ");
- goto bad;
- }
-
- for (unsigned i = 0; i < r->nr_devs; i++)
- if (r->devs[i] != BCH_SB_MEMBER_INVALID &&
- !bch2_member_exists(sb, r->devs[i])) {
- prt_printf(err, "invalid device %u in entry ", r->devs[i]);
- goto bad;
- }
-
- return 0;
-bad:
- bch2_replicas_entry_to_text(err, r);
- return -BCH_ERR_invalid_replicas_entry;
-}
-
-int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
- struct bch_fs *c,
- struct printbuf *err)
-{
- if (!r->nr_devs) {
- prt_printf(err, "no devices in entry ");
- goto bad;
- }
-
- if (r->nr_required > 1 &&
- r->nr_required >= r->nr_devs) {
- prt_printf(err, "bad nr_required in entry ");
- goto bad;
- }
-
- for (unsigned i = 0; i < r->nr_devs; i++)
- if (r->devs[i] != BCH_SB_MEMBER_INVALID &&
- !bch2_dev_exists(c, r->devs[i])) {
- prt_printf(err, "invalid device %u in entry ", r->devs[i]);
- goto bad;
- }
-
- return 0;
-bad:
- bch2_replicas_entry_to_text(err, r);
- return -BCH_ERR_invalid_replicas_entry;
-}
-
-void bch2_cpu_replicas_to_text(struct printbuf *out,
- struct bch_replicas_cpu *r)
-{
- struct bch_replicas_entry_v1 *e;
- bool first = true;
-
- for_each_cpu_replicas_entry(r, e) {
- if (!first)
- prt_printf(out, " ");
- first = false;
-
- bch2_replicas_entry_to_text(out, e);
- }
-}
-
-static void extent_to_replicas(struct bkey_s_c k,
- struct bch_replicas_entry_v1 *r)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- r->nr_required = 1;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- if (p.ptr.cached)
- continue;
-
- if (!p.has_ec)
- replicas_entry_add_dev(r, p.ptr.dev);
- else
- r->nr_required = 0;
- }
-}
-
-static void stripe_to_replicas(struct bkey_s_c k,
- struct bch_replicas_entry_v1 *r)
-{
- struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
- const struct bch_extent_ptr *ptr;
-
- r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
-
- for (ptr = s.v->ptrs;
- ptr < s.v->ptrs + s.v->nr_blocks;
- ptr++)
- replicas_entry_add_dev(r, ptr->dev);
-}
-
-void bch2_bkey_to_replicas(struct bch_replicas_entry_v1 *e,
- struct bkey_s_c k)
-{
- e->nr_devs = 0;
-
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- e->data_type = BCH_DATA_btree;
- extent_to_replicas(k, e);
- break;
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v:
- e->data_type = BCH_DATA_user;
- extent_to_replicas(k, e);
- break;
- case KEY_TYPE_stripe:
- e->data_type = BCH_DATA_parity;
- stripe_to_replicas(k, e);
- break;
- }
-
- bch2_replicas_entry_sort(e);
-}
-
-void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *e,
- enum bch_data_type data_type,
- struct bch_devs_list devs)
-{
- BUG_ON(!data_type ||
- data_type == BCH_DATA_sb ||
- data_type >= BCH_DATA_NR);
-
- e->data_type = data_type;
- e->nr_devs = 0;
- e->nr_required = 1;
-
- darray_for_each(devs, i)
- replicas_entry_add_dev(e, *i);
-
- bch2_replicas_entry_sort(e);
-}
-
-static struct bch_replicas_cpu
-cpu_replicas_add_entry(struct bch_fs *c,
- struct bch_replicas_cpu *old,
- struct bch_replicas_entry_v1 *new_entry)
-{
- struct bch_replicas_cpu new = {
- .nr = old->nr + 1,
- .entry_size = max_t(unsigned, old->entry_size,
- replicas_entry_bytes(new_entry)),
- };
-
- new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
- if (!new.entries)
- return new;
-
- for (unsigned i = 0; i < old->nr; i++)
- memcpy(cpu_replicas_entry(&new, i),
- cpu_replicas_entry(old, i),
- old->entry_size);
-
- memcpy(cpu_replicas_entry(&new, old->nr),
- new_entry,
- replicas_entry_bytes(new_entry));
-
- bch2_cpu_replicas_sort(&new);
- return new;
-}
-
-static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
- struct bch_replicas_entry_v1 *search)
-{
- int idx, entry_size = replicas_entry_bytes(search);
-
- if (unlikely(entry_size > r->entry_size))
- return -1;
-
-#define entry_cmp(_l, _r) memcmp(_l, _r, entry_size)
- idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
- entry_cmp, search);
-#undef entry_cmp
-
- return idx < r->nr ? idx : -1;
-}
-
-int bch2_replicas_entry_idx(struct bch_fs *c,
- struct bch_replicas_entry_v1 *search)
-{
- bch2_replicas_entry_sort(search);
-
- return __replicas_entry_idx(&c->replicas, search);
-}
-
-static bool __replicas_has_entry(struct bch_replicas_cpu *r,
- struct bch_replicas_entry_v1 *search)
-{
- return __replicas_entry_idx(r, search) >= 0;
-}
-
-bool bch2_replicas_marked_locked(struct bch_fs *c,
- struct bch_replicas_entry_v1 *search)
-{
- verify_replicas_entry(search);
-
- return !search->nr_devs ||
- (__replicas_has_entry(&c->replicas, search) &&
- (likely((!c->replicas_gc.entries)) ||
- __replicas_has_entry(&c->replicas_gc, search)));
-}
-
-bool bch2_replicas_marked(struct bch_fs *c,
- struct bch_replicas_entry_v1 *search)
-{
- percpu_down_read(&c->mark_lock);
- bool ret = bch2_replicas_marked_locked(c, search);
- percpu_up_read(&c->mark_lock);
-
- return ret;
-}
-
-noinline
-static int bch2_mark_replicas_slowpath(struct bch_fs *c,
- struct bch_replicas_entry_v1 *new_entry)
-{
- struct bch_replicas_cpu new_r, new_gc;
- int ret = 0;
-
- verify_replicas_entry(new_entry);
-
- memset(&new_r, 0, sizeof(new_r));
- memset(&new_gc, 0, sizeof(new_gc));
-
- mutex_lock(&c->sb_lock);
-
- if (c->replicas_gc.entries &&
- !__replicas_has_entry(&c->replicas_gc, new_entry)) {
- new_gc = cpu_replicas_add_entry(c, &c->replicas_gc, new_entry);
- if (!new_gc.entries) {
- ret = -BCH_ERR_ENOMEM_cpu_replicas;
- goto err;
- }
- }
-
- if (!__replicas_has_entry(&c->replicas, new_entry)) {
- new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry);
- if (!new_r.entries) {
- ret = -BCH_ERR_ENOMEM_cpu_replicas;
- goto err;
- }
-
- ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
- if (ret)
- goto err;
- }
-
- if (!new_r.entries &&
- !new_gc.entries)
- goto out;
-
- /* allocations done, now commit: */
-
- if (new_r.entries)
- bch2_write_super(c);
-
- /* don't update in memory replicas until changes are persistent */
- percpu_down_write(&c->mark_lock);
- if (new_r.entries)
- swap(c->replicas, new_r);
- if (new_gc.entries)
- swap(new_gc, c->replicas_gc);
- percpu_up_write(&c->mark_lock);
-out:
- mutex_unlock(&c->sb_lock);
-
- kfree(new_r.entries);
- kfree(new_gc.entries);
-
- return ret;
-err:
- bch_err_msg(c, ret, "adding replicas entry");
- goto out;
-}
-
-int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
-{
- return likely(bch2_replicas_marked(c, r))
- ? 0 : bch2_mark_replicas_slowpath(c, r);
-}
-
-/*
- * Old replicas_gc mechanism: only used for journal replicas entries now, should
- * die at some point:
- */
-
-int bch2_replicas_gc_end(struct bch_fs *c, int ret)
-{
- lockdep_assert_held(&c->replicas_gc_lock);
-
- mutex_lock(&c->sb_lock);
- percpu_down_write(&c->mark_lock);
-
- ret = ret ?:
- bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
- if (!ret)
- swap(c->replicas, c->replicas_gc);
-
- kfree(c->replicas_gc.entries);
- c->replicas_gc.entries = NULL;
-
- percpu_up_write(&c->mark_lock);
-
- if (!ret)
- bch2_write_super(c);
-
- mutex_unlock(&c->sb_lock);
-
- return ret;
-}
-
-int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
-{
- struct bch_replicas_entry_v1 *e;
- unsigned i = 0;
-
- lockdep_assert_held(&c->replicas_gc_lock);
-
- mutex_lock(&c->sb_lock);
- BUG_ON(c->replicas_gc.entries);
-
- c->replicas_gc.nr = 0;
- c->replicas_gc.entry_size = 0;
-
- for_each_cpu_replicas_entry(&c->replicas, e) {
- /* Preserve unknown data types */
- if (e->data_type >= BCH_DATA_NR ||
- !((1 << e->data_type) & typemask)) {
- c->replicas_gc.nr++;
- c->replicas_gc.entry_size =
- max_t(unsigned, c->replicas_gc.entry_size,
- replicas_entry_bytes(e));
- }
- }
-
- c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
- c->replicas_gc.entry_size,
- GFP_KERNEL);
- if (!c->replicas_gc.entries) {
- mutex_unlock(&c->sb_lock);
- bch_err(c, "error allocating c->replicas_gc");
- return -BCH_ERR_ENOMEM_replicas_gc;
- }
-
- for_each_cpu_replicas_entry(&c->replicas, e)
- if (e->data_type >= BCH_DATA_NR ||
- !((1 << e->data_type) & typemask))
- memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
- e, c->replicas_gc.entry_size);
-
- bch2_cpu_replicas_sort(&c->replicas_gc);
- mutex_unlock(&c->sb_lock);
-
- return 0;
-}
-
-/*
- * New much simpler mechanism for clearing out unneeded replicas entries - drop
- * replicas entries that have 0 sectors used.
- *
- * However, we don't track sector counts for journal usage, so this doesn't drop
- * any BCH_DATA_journal entries; the old bch2_replicas_gc_(start|end) mechanism
- * is retained for that.
- */
-int bch2_replicas_gc2(struct bch_fs *c)
-{
- struct bch_replicas_cpu new = { 0 };
- unsigned nr;
- int ret = 0;
-
- bch2_accounting_mem_gc(c);
-retry:
- nr = READ_ONCE(c->replicas.nr);
- new.entry_size = READ_ONCE(c->replicas.entry_size);
- new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
- if (!new.entries) {
- bch_err(c, "error allocating c->replicas_gc");
- return -BCH_ERR_ENOMEM_replicas_gc;
- }
-
- mutex_lock(&c->sb_lock);
- percpu_down_write(&c->mark_lock);
-
- if (nr != c->replicas.nr ||
- new.entry_size != c->replicas.entry_size) {
- percpu_up_write(&c->mark_lock);
- mutex_unlock(&c->sb_lock);
- kfree(new.entries);
- goto retry;
- }
-
- for (unsigned i = 0; i < c->replicas.nr; i++) {
- struct bch_replicas_entry_v1 *e =
- cpu_replicas_entry(&c->replicas, i);
-
- struct disk_accounting_pos k = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
-
- unsafe_memcpy(&k.replicas, e, replicas_entry_bytes(e),
- "embedded variable length struct");
-
- struct bpos p = disk_accounting_pos_to_bpos(&k);
-
- struct bch_accounting_mem *acc = &c->accounting;
- bool kill = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, &p) >= acc->k.nr;
-
- if (e->data_type == BCH_DATA_journal || !kill)
- memcpy(cpu_replicas_entry(&new, new.nr++),
- e, new.entry_size);
- }
-
- bch2_cpu_replicas_sort(&new);
-
- ret = bch2_cpu_replicas_to_sb_replicas(c, &new);
-
- if (!ret)
- swap(c->replicas, new);
-
- kfree(new.entries);
-
- percpu_up_write(&c->mark_lock);
-
- if (!ret)
- bch2_write_super(c);
-
- mutex_unlock(&c->sb_lock);
-
- return ret;
-}
-
-/* Replicas tracking - superblock: */
-
-static int
-__bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
- struct bch_replicas_cpu *cpu_r)
-{
- struct bch_replicas_entry_v1 *e, *dst;
- unsigned nr = 0, entry_size = 0, idx = 0;
-
- for_each_replicas_entry(sb_r, e) {
- entry_size = max_t(unsigned, entry_size,
- replicas_entry_bytes(e));
- nr++;
- }
-
- cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
- if (!cpu_r->entries)
- return -BCH_ERR_ENOMEM_cpu_replicas;
-
- cpu_r->nr = nr;
- cpu_r->entry_size = entry_size;
-
- for_each_replicas_entry(sb_r, e) {
- dst = cpu_replicas_entry(cpu_r, idx++);
- memcpy(dst, e, replicas_entry_bytes(e));
- bch2_replicas_entry_sort(dst);
- }
-
- return 0;
-}
-
-static int
-__bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
- struct bch_replicas_cpu *cpu_r)
-{
- struct bch_replicas_entry_v0 *e;
- unsigned nr = 0, entry_size = 0, idx = 0;
-
- for_each_replicas_entry(sb_r, e) {
- entry_size = max_t(unsigned, entry_size,
- replicas_entry_bytes(e));
- nr++;
- }
-
- entry_size += sizeof(struct bch_replicas_entry_v1) -
- sizeof(struct bch_replicas_entry_v0);
-
- cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
- if (!cpu_r->entries)
- return -BCH_ERR_ENOMEM_cpu_replicas;
-
- cpu_r->nr = nr;
- cpu_r->entry_size = entry_size;
-
- for_each_replicas_entry(sb_r, e) {
- struct bch_replicas_entry_v1 *dst =
- cpu_replicas_entry(cpu_r, idx++);
-
- dst->data_type = e->data_type;
- dst->nr_devs = e->nr_devs;
- dst->nr_required = 1;
- memcpy(dst->devs, e->devs, e->nr_devs);
- bch2_replicas_entry_sort(dst);
- }
-
- return 0;
-}
-
-int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
-{
- struct bch_sb_field_replicas *sb_v1;
- struct bch_sb_field_replicas_v0 *sb_v0;
- struct bch_replicas_cpu new_r = { 0, 0, NULL };
- int ret = 0;
-
- if ((sb_v1 = bch2_sb_field_get(c->disk_sb.sb, replicas)))
- ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
- else if ((sb_v0 = bch2_sb_field_get(c->disk_sb.sb, replicas_v0)))
- ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
- if (ret)
- return ret;
-
- bch2_cpu_replicas_sort(&new_r);
-
- percpu_down_write(&c->mark_lock);
- swap(c->replicas, new_r);
- percpu_up_write(&c->mark_lock);
-
- kfree(new_r.entries);
-
- return 0;
-}
-
-static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
- struct bch_replicas_cpu *r)
-{
- struct bch_sb_field_replicas_v0 *sb_r;
- struct bch_replicas_entry_v0 *dst;
- struct bch_replicas_entry_v1 *src;
- size_t bytes;
-
- bytes = sizeof(struct bch_sb_field_replicas);
-
- for_each_cpu_replicas_entry(r, src)
- bytes += replicas_entry_bytes(src) - 1;
-
- sb_r = bch2_sb_field_resize(&c->disk_sb, replicas_v0,
- DIV_ROUND_UP(bytes, sizeof(u64)));
- if (!sb_r)
- return -BCH_ERR_ENOSPC_sb_replicas;
-
- bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
- sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas_v0);
-
- memset(&sb_r->entries, 0,
- vstruct_end(&sb_r->field) -
- (void *) &sb_r->entries);
-
- dst = sb_r->entries;
- for_each_cpu_replicas_entry(r, src) {
- dst->data_type = src->data_type;
- dst->nr_devs = src->nr_devs;
- memcpy(dst->devs, src->devs, src->nr_devs);
-
- dst = replicas_entry_next(dst);
-
- BUG_ON((void *) dst > vstruct_end(&sb_r->field));
- }
-
- return 0;
-}
-
-static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
- struct bch_replicas_cpu *r)
-{
- struct bch_sb_field_replicas *sb_r;
- struct bch_replicas_entry_v1 *dst, *src;
- bool need_v1 = false;
- size_t bytes;
-
- bytes = sizeof(struct bch_sb_field_replicas);
-
- for_each_cpu_replicas_entry(r, src) {
- bytes += replicas_entry_bytes(src);
- if (src->nr_required != 1)
- need_v1 = true;
- }
-
- if (!need_v1)
- return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
-
- sb_r = bch2_sb_field_resize(&c->disk_sb, replicas,
- DIV_ROUND_UP(bytes, sizeof(u64)));
- if (!sb_r)
- return -BCH_ERR_ENOSPC_sb_replicas;
-
- bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
- sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas);
-
- memset(&sb_r->entries, 0,
- vstruct_end(&sb_r->field) -
- (void *) &sb_r->entries);
-
- dst = sb_r->entries;
- for_each_cpu_replicas_entry(r, src) {
- memcpy(dst, src, replicas_entry_bytes(src));
-
- dst = replicas_entry_next(dst);
-
- BUG_ON((void *) dst > vstruct_end(&sb_r->field));
- }
-
- return 0;
-}
-
-static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
- struct bch_sb *sb,
- struct printbuf *err)
-{
- unsigned i;
-
- sort_r(cpu_r->entries,
- cpu_r->nr,
- cpu_r->entry_size,
- bch2_memcmp, NULL,
- (void *)(size_t)cpu_r->entry_size);
-
- for (i = 0; i < cpu_r->nr; i++) {
- struct bch_replicas_entry_v1 *e =
- cpu_replicas_entry(cpu_r, i);
-
- int ret = bch2_replicas_entry_sb_validate(e, sb, err);
- if (ret)
- return ret;
-
- if (i + 1 < cpu_r->nr) {
- struct bch_replicas_entry_v1 *n =
- cpu_replicas_entry(cpu_r, i + 1);
-
- BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0);
-
- if (!memcmp(e, n, cpu_r->entry_size)) {
- prt_printf(err, "duplicate replicas entry ");
- bch2_replicas_entry_to_text(err, e);
- return -BCH_ERR_invalid_sb_replicas;
- }
- }
- }
-
- return 0;
-}
-
-static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
- struct bch_replicas_cpu cpu_r;
- int ret;
-
- ret = __bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r);
- if (ret)
- return ret;
-
- ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
- kfree(cpu_r.entries);
- return ret;
-}
-
-static void bch2_sb_replicas_to_text(struct printbuf *out,
- struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_replicas *r = field_to_type(f, replicas);
- struct bch_replicas_entry_v1 *e;
- bool first = true;
-
- for_each_replicas_entry(r, e) {
- if (!first)
- prt_printf(out, " ");
- first = false;
-
- bch2_replicas_entry_to_text(out, e);
- }
- prt_newline(out);
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
- .validate = bch2_sb_replicas_validate,
- .to_text = bch2_sb_replicas_to_text,
-};
-
-static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
- struct bch_replicas_cpu cpu_r;
- int ret;
-
- ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r);
- if (ret)
- return ret;
-
- ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
- kfree(cpu_r.entries);
- return ret;
-}
-
-static void bch2_sb_replicas_v0_to_text(struct printbuf *out,
- struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
- struct bch_replicas_entry_v0 *e;
- bool first = true;
-
- for_each_replicas_entry(sb_r, e) {
- if (!first)
- prt_printf(out, " ");
- first = false;
-
- bch2_replicas_entry_v0_to_text(out, e);
- }
- prt_newline(out);
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
- .validate = bch2_sb_replicas_v0_validate,
- .to_text = bch2_sb_replicas_v0_to_text,
-};
-
-/* Query replicas: */
-
-bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
- unsigned flags, bool print)
-{
- struct bch_replicas_entry_v1 *e;
- bool ret = true;
-
- percpu_down_read(&c->mark_lock);
- for_each_cpu_replicas_entry(&c->replicas, e) {
- unsigned nr_online = 0, nr_failed = 0, dflags = 0;
- bool metadata = e->data_type < BCH_DATA_user;
-
- if (e->data_type == BCH_DATA_cached)
- continue;
-
- rcu_read_lock();
- for (unsigned i = 0; i < e->nr_devs; i++) {
- if (e->devs[i] == BCH_SB_MEMBER_INVALID) {
- nr_failed++;
- continue;
- }
-
- nr_online += test_bit(e->devs[i], devs.d);
-
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]);
- nr_failed += !ca || ca->mi.state == BCH_MEMBER_STATE_failed;
- }
- rcu_read_unlock();
-
- if (nr_online + nr_failed == e->nr_devs)
- continue;
-
- if (nr_online < e->nr_required)
- dflags |= metadata
- ? BCH_FORCE_IF_METADATA_LOST
- : BCH_FORCE_IF_DATA_LOST;
-
- if (nr_online < e->nr_devs)
- dflags |= metadata
- ? BCH_FORCE_IF_METADATA_DEGRADED
- : BCH_FORCE_IF_DATA_DEGRADED;
-
- if (dflags & ~flags) {
- if (print) {
- struct printbuf buf = PRINTBUF;
-
- bch2_replicas_entry_to_text(&buf, e);
- bch_err(c, "insufficient devices online (%u) for replicas entry %s",
- nr_online, buf.buf);
- printbuf_exit(&buf);
- }
- ret = false;
- break;
- }
-
- }
- percpu_up_read(&c->mark_lock);
-
- return ret;
-}
-
-unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev)
-{
- struct bch_sb_field_replicas *replicas;
- struct bch_sb_field_replicas_v0 *replicas_v0;
- unsigned data_has = 0;
-
- replicas = bch2_sb_field_get(sb, replicas);
- replicas_v0 = bch2_sb_field_get(sb, replicas_v0);
-
- if (replicas) {
- struct bch_replicas_entry_v1 *r;
-
- for_each_replicas_entry(replicas, r) {
- if (r->data_type >= sizeof(data_has) * 8)
- continue;
-
- for (unsigned i = 0; i < r->nr_devs; i++)
- if (r->devs[i] == dev)
- data_has |= 1 << r->data_type;
- }
-
- } else if (replicas_v0) {
- struct bch_replicas_entry_v0 *r;
-
- for_each_replicas_entry_v0(replicas_v0, r) {
- if (r->data_type >= sizeof(data_has) * 8)
- continue;
-
- for (unsigned i = 0; i < r->nr_devs; i++)
- if (r->devs[i] == dev)
- data_has |= 1 << r->data_type;
- }
- }
-
-
- return data_has;
-}
-
-unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
-{
- mutex_lock(&c->sb_lock);
- unsigned ret = bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx);
- mutex_unlock(&c->sb_lock);
-
- return ret;
-}
-
-void bch2_fs_replicas_exit(struct bch_fs *c)
-{
- kfree(c->replicas.entries);
- kfree(c->replicas_gc.entries);
-}
diff --git a/fs/bcachefs/replicas.h b/fs/bcachefs/replicas.h
deleted file mode 100644
index 5aba2c1ce133..000000000000
--- a/fs/bcachefs/replicas.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_REPLICAS_H
-#define _BCACHEFS_REPLICAS_H
-
-#include "bkey.h"
-#include "eytzinger.h"
-#include "replicas_types.h"
-
-void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *);
-void bch2_replicas_entry_to_text(struct printbuf *,
- struct bch_replicas_entry_v1 *);
-int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *,
- struct bch_fs *, struct printbuf *);
-void bch2_cpu_replicas_to_text(struct printbuf *, struct bch_replicas_cpu *);
-
-static inline struct bch_replicas_entry_v1 *
-cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
-{
- return (void *) r->entries + r->entry_size * i;
-}
-
-int bch2_replicas_entry_idx(struct bch_fs *,
- struct bch_replicas_entry_v1 *);
-
-void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *,
- enum bch_data_type,
- struct bch_devs_list);
-
-bool bch2_replicas_marked_locked(struct bch_fs *,
- struct bch_replicas_entry_v1 *);
-bool bch2_replicas_marked(struct bch_fs *, struct bch_replicas_entry_v1 *);
-int bch2_mark_replicas(struct bch_fs *,
- struct bch_replicas_entry_v1 *);
-
-void bch2_bkey_to_replicas(struct bch_replicas_entry_v1 *, struct bkey_s_c);
-
-static inline void bch2_replicas_entry_cached(struct bch_replicas_entry_v1 *e,
- unsigned dev)
-{
- e->data_type = BCH_DATA_cached;
- e->nr_devs = 1;
- e->nr_required = 1;
- e->devs[0] = dev;
-}
-
-bool bch2_have_enough_devs(struct bch_fs *, struct bch_devs_mask,
- unsigned, bool);
-
-unsigned bch2_sb_dev_has_data(struct bch_sb *, unsigned);
-unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *);
-
-int bch2_replicas_gc_end(struct bch_fs *, int);
-int bch2_replicas_gc_start(struct bch_fs *, unsigned);
-int bch2_replicas_gc2(struct bch_fs *);
-
-#define for_each_cpu_replicas_entry(_r, _i) \
- for (_i = (_r)->entries; \
- (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
- _i = (void *) (_i) + (_r)->entry_size)
-
-/* iterate over superblock replicas - used by userspace tools: */
-
-#define replicas_entry_next(_i) \
- ((typeof(_i)) ((void *) (_i) + replicas_entry_bytes(_i)))
-
-#define for_each_replicas_entry(_r, _i) \
- for (_i = (_r)->entries; \
- (void *) (_i) < vstruct_end(&(_r)->field) && (_i)->data_type;\
- (_i) = replicas_entry_next(_i))
-
-#define for_each_replicas_entry_v0(_r, _i) \
- for (_i = (_r)->entries; \
- (void *) (_i) < vstruct_end(&(_r)->field) && (_i)->data_type;\
- (_i) = replicas_entry_next(_i))
-
-int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *);
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_replicas;
-extern const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0;
-
-void bch2_fs_replicas_exit(struct bch_fs *);
-
-#endif /* _BCACHEFS_REPLICAS_H */
diff --git a/fs/bcachefs/replicas_format.h b/fs/bcachefs/replicas_format.h
deleted file mode 100644
index b7eff904acdb..000000000000
--- a/fs/bcachefs/replicas_format.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_REPLICAS_FORMAT_H
-#define _BCACHEFS_REPLICAS_FORMAT_H
-
-struct bch_replicas_entry_v0 {
- __u8 data_type;
- __u8 nr_devs;
- __u8 devs[] __counted_by(nr_devs);
-} __packed;
-
-struct bch_sb_field_replicas_v0 {
- struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[];
-} __packed __aligned(8);
-
-struct bch_replicas_entry_v1 {
- __u8 data_type;
- __u8 nr_devs;
- __u8 nr_required;
- __u8 devs[] __counted_by(nr_devs);
-} __packed;
-
-struct bch_sb_field_replicas {
- struct bch_sb_field field;
- struct bch_replicas_entry_v1 entries[];
-} __packed __aligned(8);
-
-#define replicas_entry_bytes(_i) \
- (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
-
-#define replicas_entry_add_dev(e, d) ({ \
- (e)->nr_devs++; \
- (e)->devs[(e)->nr_devs - 1] = (d); \
-})
-
-#endif /* _BCACHEFS_REPLICAS_FORMAT_H */
diff --git a/fs/bcachefs/replicas_types.h b/fs/bcachefs/replicas_types.h
deleted file mode 100644
index fed71c861fe7..000000000000
--- a/fs/bcachefs/replicas_types.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_REPLICAS_TYPES_H
-#define _BCACHEFS_REPLICAS_TYPES_H
-
-struct bch_replicas_cpu {
- unsigned nr;
- unsigned entry_size;
- struct bch_replicas_entry_v1 *entries;
-};
-
-#endif /* _BCACHEFS_REPLICAS_TYPES_H */
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
deleted file mode 100644
index 005275281804..000000000000
--- a/fs/bcachefs/sb-clean.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_update_interior.h"
-#include "buckets.h"
-#include "error.h"
-#include "journal_io.h"
-#include "replicas.h"
-#include "sb-clean.h"
-#include "super-io.h"
-
-/*
- * BCH_SB_FIELD_clean:
- *
- * Btree roots, and a few other things, are recovered from the journal after an
- * unclean shutdown - but after a clean shutdown, to avoid having to read the
- * journal, we can store them in the superblock.
- *
- * bch_sb_field_clean simply contains a list of journal entries, stored exactly
- * as they would be in the journal:
- */
-
-int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *clean,
- int write)
-{
- struct jset_entry *entry;
- int ret;
-
- for (entry = clean->start;
- entry < (struct jset_entry *) vstruct_end(&clean->field);
- entry = vstruct_next(entry)) {
- if (vstruct_end(entry) > vstruct_end(&clean->field)) {
- bch_err(c, "journal entry (u64s %u) overran end of superblock clean section (u64s %u) by %zu",
- le16_to_cpu(entry->u64s), le32_to_cpu(clean->field.u64s),
- (u64 *) vstruct_end(entry) - (u64 *) vstruct_end(&clean->field));
- bch2_sb_error_count(c, BCH_FSCK_ERR_sb_clean_entry_overrun);
- return -BCH_ERR_fsck_repair_unimplemented;
- }
-
- ret = bch2_journal_entry_validate(c, NULL, entry,
- le16_to_cpu(c->disk_sb.sb->version),
- BCH_SB_BIG_ENDIAN(c->disk_sb.sb),
- write);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct bkey_i *btree_root_find(struct bch_fs *c,
- struct bch_sb_field_clean *clean,
- struct jset *j,
- enum btree_id id, unsigned *level)
-{
- struct bkey_i *k;
- struct jset_entry *entry, *start, *end;
-
- if (clean) {
- start = clean->start;
- end = vstruct_end(&clean->field);
- } else {
- start = j->start;
- end = vstruct_last(j);
- }
-
- for (entry = start; entry < end; entry = vstruct_next(entry))
- if (entry->type == BCH_JSET_ENTRY_btree_root &&
- entry->btree_id == id)
- goto found;
-
- return NULL;
-found:
- if (!entry->u64s)
- return ERR_PTR(-EINVAL);
-
- k = entry->start;
- *level = entry->level;
- return k;
-}
-
-int bch2_verify_superblock_clean(struct bch_fs *c,
- struct bch_sb_field_clean **cleanp,
- struct jset *j)
-{
- unsigned i;
- struct bch_sb_field_clean *clean = *cleanp;
- struct printbuf buf1 = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
- int ret = 0;
-
- if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
- sb_clean_journal_seq_mismatch,
- "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
- le64_to_cpu(clean->journal_seq),
- le64_to_cpu(j->seq))) {
- kfree(clean);
- *cleanp = NULL;
- return 0;
- }
-
- for (i = 0; i < BTREE_ID_NR; i++) {
- struct bkey_i *k1, *k2;
- unsigned l1 = 0, l2 = 0;
-
- k1 = btree_root_find(c, clean, NULL, i, &l1);
- k2 = btree_root_find(c, NULL, j, i, &l2);
-
- if (!k1 && !k2)
- continue;
-
- printbuf_reset(&buf1);
- printbuf_reset(&buf2);
-
- if (k1)
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
- else
- prt_printf(&buf1, "(none)");
-
- if (k2)
- bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
- else
- prt_printf(&buf2, "(none)");
-
- mustfix_fsck_err_on(!k1 || !k2 ||
- IS_ERR(k1) ||
- IS_ERR(k2) ||
- k1->k.u64s != k2->k.u64s ||
- memcmp(k1, k2, bkey_bytes(&k1->k)) ||
- l1 != l2, c,
- sb_clean_btree_root_mismatch,
- "superblock btree root %u doesn't match journal after clean shutdown\n"
- "sb: l=%u %s\n"
- "journal: l=%u %s\n", i,
- l1, buf1.buf,
- l2, buf2.buf);
- }
-fsck_err:
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
- return ret;
-}
-
-struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
-{
- struct bch_sb_field_clean *clean, *sb_clean;
- int ret;
-
- mutex_lock(&c->sb_lock);
- sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean);
-
- if (fsck_err_on(!sb_clean, c,
- sb_clean_missing,
- "superblock marked clean but clean section not present")) {
- SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
- c->sb.clean = false;
- mutex_unlock(&c->sb_lock);
- return ERR_PTR(-BCH_ERR_invalid_sb_clean);
- }
-
- clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
- GFP_KERNEL);
- if (!clean) {
- mutex_unlock(&c->sb_lock);
- return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean);
- }
-
- ret = bch2_sb_clean_validate_late(c, clean, READ);
- if (ret) {
- kfree(clean);
- mutex_unlock(&c->sb_lock);
- return ERR_PTR(ret);
- }
-
- mutex_unlock(&c->sb_lock);
-
- return clean;
-fsck_err:
- mutex_unlock(&c->sb_lock);
- return ERR_PTR(ret);
-}
-
-void bch2_journal_super_entries_add_common(struct bch_fs *c,
- struct jset_entry **end,
- u64 journal_seq)
-{
- {
- struct jset_entry_usage *u =
- container_of(jset_entry_init(end, sizeof(*u)),
- struct jset_entry_usage, entry);
-
- u->entry.type = BCH_JSET_ENTRY_usage;
- u->entry.btree_id = BCH_FS_USAGE_key_version;
- u->v = cpu_to_le64(atomic64_read(&c->key_version));
- }
-
- for (unsigned i = 0; i < 2; i++) {
- struct jset_entry_clock *clock =
- container_of(jset_entry_init(end, sizeof(*clock)),
- struct jset_entry_clock, entry);
-
- clock->entry.type = BCH_JSET_ENTRY_clock;
- clock->rw = i;
- clock->time = cpu_to_le64(atomic64_read(&c->io_clock[i].now));
- }
-}
-
-static int bch2_sb_clean_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_clean *clean = field_to_type(f, clean);
-
- if (vstruct_bytes(&clean->field) < sizeof(*clean)) {
- prt_printf(err, "wrong size (got %zu should be %zu)",
- vstruct_bytes(&clean->field), sizeof(*clean));
- return -BCH_ERR_invalid_sb_clean;
- }
-
- for (struct jset_entry *entry = clean->start;
- entry != vstruct_end(&clean->field);
- entry = vstruct_next(entry)) {
- if ((void *) vstruct_next(entry) > vstruct_end(&clean->field)) {
- prt_str(err, "entry type ");
- bch2_prt_jset_entry_type(err, entry->type);
- prt_str(err, " overruns end of section");
- return -BCH_ERR_invalid_sb_clean;
- }
- }
-
- return 0;
-}
-
-static void bch2_sb_clean_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_clean *clean = field_to_type(f, clean);
- struct jset_entry *entry;
-
- prt_printf(out, "flags: %x\n", le32_to_cpu(clean->flags));
- prt_printf(out, "journal_seq: %llu\n", le64_to_cpu(clean->journal_seq));
-
- for (entry = clean->start;
- entry != vstruct_end(&clean->field);
- entry = vstruct_next(entry)) {
- if ((void *) vstruct_next(entry) > vstruct_end(&clean->field))
- break;
-
- if (entry->type == BCH_JSET_ENTRY_btree_keys &&
- !entry->u64s)
- continue;
-
- bch2_journal_entry_to_text(out, NULL, entry);
- prt_newline(out);
- }
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_clean = {
- .validate = bch2_sb_clean_validate,
- .to_text = bch2_sb_clean_to_text,
-};
-
-int bch2_fs_mark_dirty(struct bch_fs *c)
-{
- int ret;
-
- /*
- * Unconditionally write superblock, to verify it hasn't changed before
- * we go rw:
- */
-
- mutex_lock(&c->sb_lock);
- SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
- c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS);
-
- ret = bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- return ret;
-}
-
-void bch2_fs_mark_clean(struct bch_fs *c)
-{
- struct bch_sb_field_clean *sb_clean;
- struct jset_entry *entry;
- unsigned u64s;
- int ret;
-
- mutex_lock(&c->sb_lock);
- if (BCH_SB_CLEAN(c->disk_sb.sb))
- goto out;
-
- SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
-
- c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
- c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_metadata);
- c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_extents_above_btree_updates));
- c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_btree_updates_journalled));
-
- u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
-
- sb_clean = bch2_sb_field_resize(&c->disk_sb, clean, u64s);
- if (!sb_clean) {
- bch_err(c, "error resizing superblock while setting filesystem clean");
- goto out;
- }
-
- sb_clean->flags = 0;
- sb_clean->journal_seq = cpu_to_le64(atomic64_read(&c->journal.seq));
-
- /* Trying to catch outstanding bug: */
- BUG_ON(le64_to_cpu(sb_clean->journal_seq) > S64_MAX);
-
- entry = sb_clean->start;
- bch2_journal_super_entries_add_common(c, &entry, 0);
- entry = bch2_btree_roots_to_journal_entries(c, entry, 0);
- BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
-
- memset(entry, 0,
- vstruct_end(&sb_clean->field) - (void *) entry);
-
- /*
- * this should be in the write path, and we should be validating every
- * superblock section:
- */
- ret = bch2_sb_clean_validate_late(c, sb_clean, WRITE);
- if (ret) {
- bch_err(c, "error writing marking filesystem clean: validate error");
- goto out;
- }
-
- bch2_journal_pos_from_member_info_set(c);
-
- bch2_write_super(c);
-out:
- mutex_unlock(&c->sb_lock);
-}
diff --git a/fs/bcachefs/sb-clean.h b/fs/bcachefs/sb-clean.h
deleted file mode 100644
index 71caef281239..000000000000
--- a/fs/bcachefs/sb-clean.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_CLEAN_H
-#define _BCACHEFS_SB_CLEAN_H
-
-int bch2_sb_clean_validate_late(struct bch_fs *, struct bch_sb_field_clean *, int);
-int bch2_verify_superblock_clean(struct bch_fs *, struct bch_sb_field_clean **,
- struct jset *);
-struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *);
-void bch2_journal_super_entries_add_common(struct bch_fs *, struct jset_entry **, u64);
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_clean;
-
-int bch2_fs_mark_dirty(struct bch_fs *);
-void bch2_fs_mark_clean(struct bch_fs *);
-
-#endif /* _BCACHEFS_SB_CLEAN_H */
diff --git a/fs/bcachefs/sb-counters.c b/fs/bcachefs/sb-counters.c
deleted file mode 100644
index 6992e7469112..000000000000
--- a/fs/bcachefs/sb-counters.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "super-io.h"
-#include "sb-counters.h"
-
-/* BCH_SB_FIELD_counters */
-
-static const char * const bch2_counter_names[] = {
-#define x(t, n, ...) (#t),
- BCH_PERSISTENT_COUNTERS()
-#undef x
- NULL
-};
-
-static size_t bch2_sb_counter_nr_entries(struct bch_sb_field_counters *ctrs)
-{
- if (!ctrs)
- return 0;
-
- return (__le64 *) vstruct_end(&ctrs->field) - &ctrs->d[0];
-};
-
-static int bch2_sb_counters_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- return 0;
-};
-
-static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
- unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
-
- for (unsigned i = 0; i < nr; i++)
- prt_printf(out, "%s \t%llu\n",
- i < BCH_COUNTER_NR ? bch2_counter_names[i] : "(unknown)",
- le64_to_cpu(ctrs->d[i]));
-};
-
-int bch2_sb_counters_to_cpu(struct bch_fs *c)
-{
- struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
- unsigned int i;
- unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
- u64 val = 0;
-
- for (i = 0; i < BCH_COUNTER_NR; i++)
- c->counters_on_mount[i] = 0;
-
- for (i = 0; i < min_t(unsigned int, nr, BCH_COUNTER_NR); i++) {
- val = le64_to_cpu(ctrs->d[i]);
- percpu_u64_set(&c->counters[i], val);
- c->counters_on_mount[i] = val;
- }
- return 0;
-};
-
-int bch2_sb_counters_from_cpu(struct bch_fs *c)
-{
- struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
- struct bch_sb_field_counters *ret;
- unsigned int i;
- unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
-
- if (nr < BCH_COUNTER_NR) {
- ret = bch2_sb_field_resize(&c->disk_sb, counters,
- sizeof(*ctrs) / sizeof(u64) + BCH_COUNTER_NR);
-
- if (ret) {
- ctrs = ret;
- nr = bch2_sb_counter_nr_entries(ctrs);
- }
- }
-
-
- for (i = 0; i < min_t(unsigned int, nr, BCH_COUNTER_NR); i++)
- ctrs->d[i] = cpu_to_le64(percpu_u64_get(&c->counters[i]));
- return 0;
-}
-
-void bch2_fs_counters_exit(struct bch_fs *c)
-{
- free_percpu(c->counters);
-}
-
-int bch2_fs_counters_init(struct bch_fs *c)
-{
- c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64));
- if (!c->counters)
- return -BCH_ERR_ENOMEM_fs_counters_init;
-
- return bch2_sb_counters_to_cpu(c);
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_counters = {
- .validate = bch2_sb_counters_validate,
- .to_text = bch2_sb_counters_to_text,
-};
diff --git a/fs/bcachefs/sb-counters.h b/fs/bcachefs/sb-counters.h
deleted file mode 100644
index 81f8aec9fcb1..000000000000
--- a/fs/bcachefs/sb-counters.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_COUNTERS_H
-#define _BCACHEFS_SB_COUNTERS_H
-
-#include "bcachefs.h"
-#include "super-io.h"
-
-int bch2_sb_counters_to_cpu(struct bch_fs *);
-int bch2_sb_counters_from_cpu(struct bch_fs *);
-
-void bch2_fs_counters_exit(struct bch_fs *);
-int bch2_fs_counters_init(struct bch_fs *);
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_counters;
-
-#endif // _BCACHEFS_SB_COUNTERS_H
diff --git a/fs/bcachefs/sb-counters_format.h b/fs/bcachefs/sb-counters_format.h
deleted file mode 100644
index 62ea478215d0..000000000000
--- a/fs/bcachefs/sb-counters_format.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_COUNTERS_FORMAT_H
-#define _BCACHEFS_SB_COUNTERS_FORMAT_H
-
-#define BCH_PERSISTENT_COUNTERS() \
- x(io_read, 0) \
- x(io_write, 1) \
- x(io_move, 2) \
- x(bucket_invalidate, 3) \
- x(bucket_discard, 4) \
- x(bucket_alloc, 5) \
- x(bucket_alloc_fail, 6) \
- x(btree_cache_scan, 7) \
- x(btree_cache_reap, 8) \
- x(btree_cache_cannibalize, 9) \
- x(btree_cache_cannibalize_lock, 10) \
- x(btree_cache_cannibalize_lock_fail, 11) \
- x(btree_cache_cannibalize_unlock, 12) \
- x(btree_node_write, 13) \
- x(btree_node_read, 14) \
- x(btree_node_compact, 15) \
- x(btree_node_merge, 16) \
- x(btree_node_split, 17) \
- x(btree_node_rewrite, 18) \
- x(btree_node_alloc, 19) \
- x(btree_node_free, 20) \
- x(btree_node_set_root, 21) \
- x(btree_path_relock_fail, 22) \
- x(btree_path_upgrade_fail, 23) \
- x(btree_reserve_get_fail, 24) \
- x(journal_entry_full, 25) \
- x(journal_full, 26) \
- x(journal_reclaim_finish, 27) \
- x(journal_reclaim_start, 28) \
- x(journal_write, 29) \
- x(read_promote, 30) \
- x(read_bounce, 31) \
- x(read_split, 33) \
- x(read_retry, 32) \
- x(read_reuse_race, 34) \
- x(move_extent_read, 35) \
- x(move_extent_write, 36) \
- x(move_extent_finish, 37) \
- x(move_extent_fail, 38) \
- x(move_extent_start_fail, 39) \
- x(copygc, 40) \
- x(copygc_wait, 41) \
- x(gc_gens_end, 42) \
- x(gc_gens_start, 43) \
- x(trans_blocked_journal_reclaim, 44) \
- x(trans_restart_btree_node_reused, 45) \
- x(trans_restart_btree_node_split, 46) \
- x(trans_restart_fault_inject, 47) \
- x(trans_restart_iter_upgrade, 48) \
- x(trans_restart_journal_preres_get, 49) \
- x(trans_restart_journal_reclaim, 50) \
- x(trans_restart_journal_res_get, 51) \
- x(trans_restart_key_cache_key_realloced, 52) \
- x(trans_restart_key_cache_raced, 53) \
- x(trans_restart_mark_replicas, 54) \
- x(trans_restart_mem_realloced, 55) \
- x(trans_restart_memory_allocation_failure, 56) \
- x(trans_restart_relock, 57) \
- x(trans_restart_relock_after_fill, 58) \
- x(trans_restart_relock_key_cache_fill, 59) \
- x(trans_restart_relock_next_node, 60) \
- x(trans_restart_relock_parent_for_fill, 61) \
- x(trans_restart_relock_path, 62) \
- x(trans_restart_relock_path_intent, 63) \
- x(trans_restart_too_many_iters, 64) \
- x(trans_restart_traverse, 65) \
- x(trans_restart_upgrade, 66) \
- x(trans_restart_would_deadlock, 67) \
- x(trans_restart_would_deadlock_write, 68) \
- x(trans_restart_injected, 69) \
- x(trans_restart_key_cache_upgrade, 70) \
- x(trans_traverse_all, 71) \
- x(transaction_commit, 72) \
- x(write_super, 73) \
- x(trans_restart_would_deadlock_recursion_limit, 74) \
- x(trans_restart_write_buffer_flush, 75) \
- x(trans_restart_split_race, 76) \
- x(write_buffer_flush_slowpath, 77) \
- x(write_buffer_flush_sync, 78)
-
-enum bch_persistent_counters {
-#define x(t, n, ...) BCH_COUNTER_##t,
- BCH_PERSISTENT_COUNTERS()
-#undef x
- BCH_COUNTER_NR
-};
-
-struct bch_sb_field_counters {
- struct bch_sb_field field;
- __le64 d[];
-};
-
-#endif /* _BCACHEFS_SB_COUNTERS_FORMAT_H */
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
deleted file mode 100644
index 8767c33c2b51..000000000000
--- a/fs/bcachefs/sb-downgrade.c
+++ /dev/null
@@ -1,416 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * Superblock section that contains a list of recovery passes to run when
- * downgrading past a given version
- */
-
-#include "bcachefs.h"
-#include "darray.h"
-#include "recovery_passes.h"
-#include "sb-downgrade.h"
-#include "sb-errors.h"
-#include "super-io.h"
-
-#define RECOVERY_PASS_ALL_FSCK BIT_ULL(63)
-
-/*
- * Upgrade, downgrade tables - run certain recovery passes, fix certain errors
- *
- * x(version, recovery_passes, errors...)
- */
-#define UPGRADE_TABLE() \
- x(backpointers, \
- RECOVERY_PASS_ALL_FSCK) \
- x(inode_v3, \
- RECOVERY_PASS_ALL_FSCK) \
- x(unwritten_extents, \
- RECOVERY_PASS_ALL_FSCK) \
- x(bucket_gens, \
- BIT_ULL(BCH_RECOVERY_PASS_bucket_gens_init)| \
- RECOVERY_PASS_ALL_FSCK) \
- x(lru_v2, \
- RECOVERY_PASS_ALL_FSCK) \
- x(fragmentation_lru, \
- RECOVERY_PASS_ALL_FSCK) \
- x(no_bps_in_alloc_keys, \
- RECOVERY_PASS_ALL_FSCK) \
- x(snapshot_trees, \
- RECOVERY_PASS_ALL_FSCK) \
- x(snapshot_skiplists, \
- BIT_ULL(BCH_RECOVERY_PASS_check_snapshots), \
- BCH_FSCK_ERR_snapshot_bad_depth, \
- BCH_FSCK_ERR_snapshot_bad_skiplist) \
- x(deleted_inodes, \
- BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \
- BCH_FSCK_ERR_unlinked_inode_not_on_deleted_list) \
- x(rebalance_work, \
- BIT_ULL(BCH_RECOVERY_PASS_set_fs_needs_rebalance)) \
- x(subvolume_fs_parent, \
- BIT_ULL(BCH_RECOVERY_PASS_check_dirents), \
- BCH_FSCK_ERR_subvol_fs_path_parent_wrong) \
- x(btree_subvolume_children, \
- BIT_ULL(BCH_RECOVERY_PASS_check_subvols), \
- BCH_FSCK_ERR_subvol_children_not_set) \
- x(mi_btree_bitmap, \
- BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
- BCH_FSCK_ERR_btree_bitmap_not_marked) \
- x(disk_accounting_v2, \
- BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
- BCH_FSCK_ERR_bkey_version_in_future, \
- BCH_FSCK_ERR_dev_usage_buckets_wrong, \
- BCH_FSCK_ERR_dev_usage_sectors_wrong, \
- BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
- BCH_FSCK_ERR_accounting_mismatch) \
- x(disk_accounting_v3, \
- BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
- BCH_FSCK_ERR_bkey_version_in_future, \
- BCH_FSCK_ERR_dev_usage_buckets_wrong, \
- BCH_FSCK_ERR_dev_usage_sectors_wrong, \
- BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
- BCH_FSCK_ERR_accounting_mismatch, \
- BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
- BCH_FSCK_ERR_accounting_key_replicas_nr_required_bad, \
- BCH_FSCK_ERR_accounting_key_replicas_devs_unsorted, \
- BCH_FSCK_ERR_accounting_key_junk_at_end) \
- x(disk_accounting_inum, \
- BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
- BCH_FSCK_ERR_accounting_mismatch) \
- x(rebalance_work_acct_fix, \
- BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
- BCH_FSCK_ERR_accounting_mismatch) \
- x(inode_has_child_snapshots, \
- BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \
- BCH_FSCK_ERR_inode_has_child_snapshots_wrong)
-
-#define DOWNGRADE_TABLE() \
- x(bucket_stripe_sectors, \
- 0) \
- x(disk_accounting_v2, \
- BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
- BCH_FSCK_ERR_dev_usage_buckets_wrong, \
- BCH_FSCK_ERR_dev_usage_sectors_wrong, \
- BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
- BCH_FSCK_ERR_fs_usage_hidden_wrong, \
- BCH_FSCK_ERR_fs_usage_btree_wrong, \
- BCH_FSCK_ERR_fs_usage_data_wrong, \
- BCH_FSCK_ERR_fs_usage_cached_wrong, \
- BCH_FSCK_ERR_fs_usage_reserved_wrong, \
- BCH_FSCK_ERR_fs_usage_nr_inodes_wrong, \
- BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \
- BCH_FSCK_ERR_fs_usage_replicas_wrong, \
- BCH_FSCK_ERR_bkey_version_in_future) \
- x(disk_accounting_v3, \
- BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
- BCH_FSCK_ERR_dev_usage_buckets_wrong, \
- BCH_FSCK_ERR_dev_usage_sectors_wrong, \
- BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
- BCH_FSCK_ERR_fs_usage_hidden_wrong, \
- BCH_FSCK_ERR_fs_usage_btree_wrong, \
- BCH_FSCK_ERR_fs_usage_data_wrong, \
- BCH_FSCK_ERR_fs_usage_cached_wrong, \
- BCH_FSCK_ERR_fs_usage_reserved_wrong, \
- BCH_FSCK_ERR_fs_usage_nr_inodes_wrong, \
- BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \
- BCH_FSCK_ERR_fs_usage_replicas_wrong, \
- BCH_FSCK_ERR_accounting_replicas_not_marked, \
- BCH_FSCK_ERR_bkey_version_in_future) \
- x(rebalance_work_acct_fix, \
- BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
- BCH_FSCK_ERR_accounting_mismatch)
-
-struct upgrade_downgrade_entry {
- u64 recovery_passes;
- u16 version;
- u16 nr_errors;
- const u16 *errors;
-};
-
-#define x(ver, passes, ...) static const u16 upgrade_##ver##_errors[] = { __VA_ARGS__ };
-UPGRADE_TABLE()
-#undef x
-
-static const struct upgrade_downgrade_entry upgrade_table[] = {
-#define x(ver, passes, ...) { \
- .recovery_passes = passes, \
- .version = bcachefs_metadata_version_##ver,\
- .nr_errors = ARRAY_SIZE(upgrade_##ver##_errors), \
- .errors = upgrade_##ver##_errors, \
-},
-UPGRADE_TABLE()
-#undef x
-};
-
-static int have_stripes(struct bch_fs *c)
-{
- if (IS_ERR_OR_NULL(c->btree_roots_known[BTREE_ID_stripes].b))
- return 0;
-
- return !btree_node_fake(c->btree_roots_known[BTREE_ID_stripes].b);
-}
-
-int bch2_sb_set_upgrade_extra(struct bch_fs *c)
-{
- unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
- unsigned new_version = c->sb.version;
- bool write_sb = false;
- int ret = 0;
-
- mutex_lock(&c->sb_lock);
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
-
- if (old_version < bcachefs_metadata_version_bucket_stripe_sectors &&
- new_version >= bcachefs_metadata_version_bucket_stripe_sectors &&
- (ret = have_stripes(c) > 0)) {
- __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required);
- __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
- __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_sectors_wrong, ext->errors_silent);
- write_sb = true;
- }
-
- if (write_sb)
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- return ret < 0 ? ret : 0;
-}
-
-void bch2_sb_set_upgrade(struct bch_fs *c,
- unsigned old_version,
- unsigned new_version)
-{
- lockdep_assert_held(&c->sb_lock);
-
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
-
- for (const struct upgrade_downgrade_entry *i = upgrade_table;
- i < upgrade_table + ARRAY_SIZE(upgrade_table);
- i++)
- if (i->version > old_version && i->version <= new_version) {
- u64 passes = i->recovery_passes;
-
- if (passes & RECOVERY_PASS_ALL_FSCK)
- passes |= bch2_fsck_recovery_passes();
- passes &= ~RECOVERY_PASS_ALL_FSCK;
-
- ext->recovery_passes_required[0] |=
- cpu_to_le64(bch2_recovery_passes_to_stable(passes));
-
- for (const u16 *e = i->errors; e < i->errors + i->nr_errors; e++)
- __set_bit_le64(*e, ext->errors_silent);
- }
-}
-
-#define x(ver, passes, ...) static const u16 downgrade_##ver##_errors[] = { __VA_ARGS__ };
-DOWNGRADE_TABLE()
-#undef x
-
-static const struct upgrade_downgrade_entry downgrade_table[] = {
-#define x(ver, passes, ...) { \
- .recovery_passes = passes, \
- .version = bcachefs_metadata_version_##ver,\
- .nr_errors = ARRAY_SIZE(downgrade_##ver##_errors), \
- .errors = downgrade_##ver##_errors, \
-},
-DOWNGRADE_TABLE()
-#undef x
-};
-
-static int downgrade_table_extra(struct bch_fs *c, darray_char *table)
-{
- struct bch_sb_field_downgrade_entry *dst = (void *) &darray_top(*table);
- unsigned bytes = sizeof(*dst) + sizeof(dst->errors[0]) * le16_to_cpu(dst->nr_errors);
- int ret = 0;
-
- unsigned nr_errors = le16_to_cpu(dst->nr_errors);
-
- switch (le16_to_cpu(dst->version)) {
- case bcachefs_metadata_version_bucket_stripe_sectors:
- if (have_stripes(c)) {
- bytes += sizeof(dst->errors[0]) * 2;
-
- ret = darray_make_room(table, bytes);
- if (ret)
- return ret;
-
- /* open coded __set_bit_le64, as dst is packed and
- * dst->recovery_passes is misaligned */
- unsigned b = BCH_RECOVERY_PASS_STABLE_check_allocations;
- dst->recovery_passes[b / 64] |= cpu_to_le64(BIT_ULL(b % 64));
-
- dst->errors[nr_errors++] = cpu_to_le16(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong);
- }
- break;
- }
-
- dst->nr_errors = cpu_to_le16(nr_errors);
- return ret;
-}
-
-static inline const struct bch_sb_field_downgrade_entry *
-downgrade_entry_next_c(const struct bch_sb_field_downgrade_entry *e)
-{
- return (void *) &e->errors[le16_to_cpu(e->nr_errors)];
-}
-
-#define for_each_downgrade_entry(_d, _i) \
- for (const struct bch_sb_field_downgrade_entry *_i = (_d)->entries; \
- (void *) _i < vstruct_end(&(_d)->field) && \
- (void *) &_i->errors[0] <= vstruct_end(&(_d)->field) && \
- (void *) downgrade_entry_next_c(_i) <= vstruct_end(&(_d)->field); \
- _i = downgrade_entry_next_c(_i))
-
-static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_downgrade *e = field_to_type(f, downgrade);
-
- for (const struct bch_sb_field_downgrade_entry *i = e->entries;
- (void *) i < vstruct_end(&e->field);
- i = downgrade_entry_next_c(i)) {
- /*
- * Careful: sb_field_downgrade_entry is only 2 byte aligned, but
- * section sizes are 8 byte aligned - an empty entry spanning
- * the end of the section is allowed (and ignored):
- */
- if ((void *) &i->errors[0] > vstruct_end(&e->field))
- break;
-
- if (flags & BCH_VALIDATE_write &&
- (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field)) {
- prt_printf(err, "downgrade entry overruns end of superblock section");
- return -BCH_ERR_invalid_sb_downgrade;
- }
-
- if (BCH_VERSION_MAJOR(le16_to_cpu(i->version)) !=
- BCH_VERSION_MAJOR(le16_to_cpu(sb->version))) {
- prt_printf(err, "downgrade entry with mismatched major version (%u != %u)",
- BCH_VERSION_MAJOR(le16_to_cpu(i->version)),
- BCH_VERSION_MAJOR(le16_to_cpu(sb->version)));
- return -BCH_ERR_invalid_sb_downgrade;
- }
- }
-
- return 0;
-}
-
-static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_downgrade *e = field_to_type(f, downgrade);
-
- if (out->nr_tabstops <= 1)
- printbuf_tabstop_push(out, 16);
-
- for_each_downgrade_entry(e, i) {
- prt_str(out, "version:\t");
- bch2_version_to_text(out, le16_to_cpu(i->version));
- prt_newline(out);
-
- prt_str(out, "recovery passes:\t");
- prt_bitflags(out, bch2_recovery_passes,
- bch2_recovery_passes_from_stable(le64_to_cpu(i->recovery_passes[0])));
- prt_newline(out);
-
- prt_str(out, "errors:\t");
- bool first = true;
- for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
- if (!first)
- prt_char(out, ',');
- first = false;
- bch2_sb_error_id_to_text(out, le16_to_cpu(i->errors[j]));
- }
- prt_newline(out);
- }
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_downgrade = {
- .validate = bch2_sb_downgrade_validate,
- .to_text = bch2_sb_downgrade_to_text,
-};
-
-int bch2_sb_downgrade_update(struct bch_fs *c)
-{
- if (!test_bit(BCH_FS_btree_running, &c->flags))
- return 0;
-
- darray_char table = {};
- int ret = 0;
-
- for (const struct upgrade_downgrade_entry *src = downgrade_table;
- src < downgrade_table + ARRAY_SIZE(downgrade_table);
- src++) {
- if (BCH_VERSION_MAJOR(src->version) != BCH_VERSION_MAJOR(le16_to_cpu(c->disk_sb.sb->version)))
- continue;
-
- struct bch_sb_field_downgrade_entry *dst;
- unsigned bytes = sizeof(*dst) + sizeof(dst->errors[0]) * src->nr_errors;
-
- ret = darray_make_room(&table, bytes);
- if (ret)
- goto out;
-
- dst = (void *) &darray_top(table);
- dst->version = cpu_to_le16(src->version);
- dst->recovery_passes[0] = cpu_to_le64(bch2_recovery_passes_to_stable(src->recovery_passes));
- dst->recovery_passes[1] = 0;
- dst->nr_errors = cpu_to_le16(src->nr_errors);
- for (unsigned i = 0; i < src->nr_errors; i++)
- dst->errors[i] = cpu_to_le16(src->errors[i]);
-
- ret = downgrade_table_extra(c, &table);
- if (ret)
- goto out;
-
- if (!dst->recovery_passes[0] &&
- !dst->recovery_passes[1] &&
- !dst->nr_errors)
- continue;
-
- table.nr += sizeof(*dst) + sizeof(dst->errors[0]) * le16_to_cpu(dst->nr_errors);
- }
-
- struct bch_sb_field_downgrade *d = bch2_sb_field_get(c->disk_sb.sb, downgrade);
-
- unsigned sb_u64s = DIV_ROUND_UP(sizeof(*d) + table.nr, sizeof(u64));
-
- if (d && le32_to_cpu(d->field.u64s) > sb_u64s)
- goto out;
-
- d = bch2_sb_field_resize(&c->disk_sb, downgrade, sb_u64s);
- if (!d) {
- ret = -BCH_ERR_ENOSPC_sb_downgrade;
- goto out;
- }
-
- memcpy(d->entries, table.data, table.nr);
- memset_u64s_tail(d->entries, 0, table.nr);
-out:
- darray_exit(&table);
- return ret;
-}
-
-void bch2_sb_set_downgrade(struct bch_fs *c, unsigned new_minor, unsigned old_minor)
-{
- struct bch_sb_field_downgrade *d = bch2_sb_field_get(c->disk_sb.sb, downgrade);
- if (!d)
- return;
-
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
-
- for_each_downgrade_entry(d, i) {
- unsigned minor = BCH_VERSION_MINOR(le16_to_cpu(i->version));
- if (new_minor < minor && minor <= old_minor) {
- ext->recovery_passes_required[0] |= i->recovery_passes[0];
- ext->recovery_passes_required[1] |= i->recovery_passes[1];
-
- for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
- unsigned e = le16_to_cpu(i->errors[j]);
- if (e < BCH_FSCK_ERR_MAX)
- __set_bit(e, c->sb.errors_silent);
- if (e < sizeof(ext->errors_silent) * 8)
- __set_bit_le64(e, ext->errors_silent);
- }
- }
- }
-}
diff --git a/fs/bcachefs/sb-downgrade.h b/fs/bcachefs/sb-downgrade.h
deleted file mode 100644
index 095b7cc9bb47..000000000000
--- a/fs/bcachefs/sb-downgrade.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_DOWNGRADE_H
-#define _BCACHEFS_SB_DOWNGRADE_H
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_downgrade;
-
-int bch2_sb_downgrade_update(struct bch_fs *);
-void bch2_sb_set_upgrade(struct bch_fs *, unsigned, unsigned);
-int bch2_sb_set_upgrade_extra(struct bch_fs *);
-void bch2_sb_set_downgrade(struct bch_fs *, unsigned, unsigned);
-
-#endif /* _BCACHEFS_SB_DOWNGRADE_H */
diff --git a/fs/bcachefs/sb-downgrade_format.h b/fs/bcachefs/sb-downgrade_format.h
deleted file mode 100644
index cffd932be3ec..000000000000
--- a/fs/bcachefs/sb-downgrade_format.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_DOWNGRADE_FORMAT_H
-#define _BCACHEFS_SB_DOWNGRADE_FORMAT_H
-
-struct bch_sb_field_downgrade_entry {
- __le16 version;
- __le64 recovery_passes[2];
- __le16 nr_errors;
- __le16 errors[] __counted_by(nr_errors);
-} __packed __aligned(2);
-
-struct bch_sb_field_downgrade {
- struct bch_sb_field field;
- struct bch_sb_field_downgrade_entry entries[];
-};
-
-#endif /* _BCACHEFS_SB_DOWNGRADE_FORMAT_H */
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
deleted file mode 100644
index 013a96883b4e..000000000000
--- a/fs/bcachefs/sb-errors.c
+++ /dev/null
@@ -1,176 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "sb-errors.h"
-#include "super-io.h"
-
-const char * const bch2_sb_error_strs[] = {
-#define x(t, n, ...) [n] = #t,
- BCH_SB_ERRS()
-#undef x
-};
-
-void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id)
-{
- if (id < BCH_FSCK_ERR_MAX)
- prt_str(out, bch2_sb_error_strs[id]);
- else
- prt_printf(out, "(unknown error %u)", id);
-}
-
-static inline unsigned bch2_sb_field_errors_nr_entries(struct bch_sb_field_errors *e)
-{
- return bch2_sb_field_nr_entries(e);
-}
-
-static inline unsigned bch2_sb_field_errors_u64s(unsigned nr)
-{
- return (sizeof(struct bch_sb_field_errors) +
- sizeof(struct bch_sb_field_error_entry) * nr) / sizeof(u64);
-}
-
-static int bch2_sb_errors_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_errors *e = field_to_type(f, errors);
- unsigned i, nr = bch2_sb_field_errors_nr_entries(e);
-
- for (i = 0; i < nr; i++) {
- if (!BCH_SB_ERROR_ENTRY_NR(&e->entries[i])) {
- prt_printf(err, "entry with count 0 (id ");
- bch2_sb_error_id_to_text(err, BCH_SB_ERROR_ENTRY_ID(&e->entries[i]));
- prt_printf(err, ")");
- return -BCH_ERR_invalid_sb_errors;
- }
-
- if (i + 1 < nr &&
- BCH_SB_ERROR_ENTRY_ID(&e->entries[i]) >=
- BCH_SB_ERROR_ENTRY_ID(&e->entries[i + 1])) {
- prt_printf(err, "entries out of order");
- return -BCH_ERR_invalid_sb_errors;
- }
- }
-
- return 0;
-}
-
-static void bch2_sb_errors_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_errors *e = field_to_type(f, errors);
- unsigned i, nr = bch2_sb_field_errors_nr_entries(e);
-
- if (out->nr_tabstops <= 1)
- printbuf_tabstop_push(out, 16);
-
- for (i = 0; i < nr; i++) {
- bch2_sb_error_id_to_text(out, BCH_SB_ERROR_ENTRY_ID(&e->entries[i]));
- prt_tab(out);
- prt_u64(out, BCH_SB_ERROR_ENTRY_NR(&e->entries[i]));
- prt_tab(out);
- bch2_prt_datetime(out, le64_to_cpu(e->entries[i].last_error_time));
- prt_newline(out);
- }
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_errors = {
- .validate = bch2_sb_errors_validate,
- .to_text = bch2_sb_errors_to_text,
-};
-
-void bch2_sb_error_count(struct bch_fs *c, enum bch_sb_error_id err)
-{
- bch_sb_errors_cpu *e = &c->fsck_error_counts;
- struct bch_sb_error_entry_cpu n = {
- .id = err,
- .nr = 1,
- .last_error_time = ktime_get_real_seconds()
- };
- unsigned i;
-
- mutex_lock(&c->fsck_error_counts_lock);
- for (i = 0; i < e->nr; i++) {
- if (err == e->data[i].id) {
- e->data[i].nr++;
- e->data[i].last_error_time = n.last_error_time;
- goto out;
- }
- if (err < e->data[i].id)
- break;
- }
-
- if (darray_make_room(e, 1))
- goto out;
-
- darray_insert_item(e, i, n);
-out:
- mutex_unlock(&c->fsck_error_counts_lock);
-}
-
-void bch2_sb_errors_from_cpu(struct bch_fs *c)
-{
- bch_sb_errors_cpu *src = &c->fsck_error_counts;
- struct bch_sb_field_errors *dst;
- unsigned i;
-
- mutex_lock(&c->fsck_error_counts_lock);
-
- dst = bch2_sb_field_resize(&c->disk_sb, errors,
- bch2_sb_field_errors_u64s(src->nr));
-
- if (!dst)
- goto err;
-
- for (i = 0; i < src->nr; i++) {
- SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id);
- SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[i], src->data[i].nr);
- dst->entries[i].last_error_time = cpu_to_le64(src->data[i].last_error_time);
- }
-
-err:
- mutex_unlock(&c->fsck_error_counts_lock);
-}
-
-static int bch2_sb_errors_to_cpu(struct bch_fs *c)
-{
- struct bch_sb_field_errors *src = bch2_sb_field_get(c->disk_sb.sb, errors);
- bch_sb_errors_cpu *dst = &c->fsck_error_counts;
- unsigned i, nr = bch2_sb_field_errors_nr_entries(src);
- int ret;
-
- if (!nr)
- return 0;
-
- mutex_lock(&c->fsck_error_counts_lock);
- ret = darray_make_room(dst, nr);
- if (ret)
- goto err;
-
- dst->nr = nr;
-
- for (i = 0; i < nr; i++) {
- dst->data[i].id = BCH_SB_ERROR_ENTRY_ID(&src->entries[i]);
- dst->data[i].nr = BCH_SB_ERROR_ENTRY_NR(&src->entries[i]);
- dst->data[i].last_error_time = le64_to_cpu(src->entries[i].last_error_time);
- }
-err:
- mutex_unlock(&c->fsck_error_counts_lock);
-
- return ret;
-}
-
-void bch2_fs_sb_errors_exit(struct bch_fs *c)
-{
- darray_exit(&c->fsck_error_counts);
-}
-
-void bch2_fs_sb_errors_init_early(struct bch_fs *c)
-{
- mutex_init(&c->fsck_error_counts_lock);
- darray_init(&c->fsck_error_counts);
-}
-
-int bch2_fs_sb_errors_init(struct bch_fs *c)
-{
- return bch2_sb_errors_to_cpu(c);
-}
diff --git a/fs/bcachefs/sb-errors.h b/fs/bcachefs/sb-errors.h
deleted file mode 100644
index b2357b8e6107..000000000000
--- a/fs/bcachefs/sb-errors.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_ERRORS_H
-#define _BCACHEFS_SB_ERRORS_H
-
-#include "sb-errors_types.h"
-
-extern const char * const bch2_sb_error_strs[];
-
-void bch2_sb_error_id_to_text(struct printbuf *, enum bch_sb_error_id);
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_errors;
-
-void bch2_sb_error_count(struct bch_fs *, enum bch_sb_error_id);
-
-void bch2_sb_errors_from_cpu(struct bch_fs *);
-
-void bch2_fs_sb_errors_exit(struct bch_fs *);
-void bch2_fs_sb_errors_init_early(struct bch_fs *);
-int bch2_fs_sb_errors_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_SB_ERRORS_H */
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
deleted file mode 100644
index 9feb6739f77a..000000000000
--- a/fs/bcachefs/sb-errors_format.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_ERRORS_FORMAT_H
-#define _BCACHEFS_SB_ERRORS_FORMAT_H
-
-enum bch_fsck_flags {
- FSCK_CAN_FIX = 1 << 0,
- FSCK_CAN_IGNORE = 1 << 1,
- FSCK_NEED_FSCK = 1 << 2,
- FSCK_NO_RATELIMIT = 1 << 3,
- FSCK_AUTOFIX = 1 << 4,
-};
-
-#define BCH_SB_ERRS() \
- x(clean_but_journal_not_empty, 0, 0) \
- x(dirty_but_no_journal_entries, 1, 0) \
- x(dirty_but_no_journal_entries_post_drop_nonflushes, 2, 0) \
- x(sb_clean_journal_seq_mismatch, 3, 0) \
- x(sb_clean_btree_root_mismatch, 4, 0) \
- x(sb_clean_missing, 5, 0) \
- x(jset_unsupported_version, 6, 0) \
- x(jset_unknown_csum, 7, 0) \
- x(jset_last_seq_newer_than_seq, 8, 0) \
- x(jset_past_bucket_end, 9, 0) \
- x(jset_seq_blacklisted, 10, 0) \
- x(journal_entries_missing, 11, 0) \
- x(journal_entry_replicas_not_marked, 12, FSCK_AUTOFIX) \
- x(journal_entry_past_jset_end, 13, 0) \
- x(journal_entry_replicas_data_mismatch, 14, 0) \
- x(journal_entry_bkey_u64s_0, 15, 0) \
- x(journal_entry_bkey_past_end, 16, 0) \
- x(journal_entry_bkey_bad_format, 17, 0) \
- x(journal_entry_bkey_invalid, 18, 0) \
- x(journal_entry_btree_root_bad_size, 19, 0) \
- x(journal_entry_blacklist_bad_size, 20, 0) \
- x(journal_entry_blacklist_v2_bad_size, 21, 0) \
- x(journal_entry_blacklist_v2_start_past_end, 22, 0) \
- x(journal_entry_usage_bad_size, 23, 0) \
- x(journal_entry_data_usage_bad_size, 24, 0) \
- x(journal_entry_clock_bad_size, 25, 0) \
- x(journal_entry_clock_bad_rw, 26, 0) \
- x(journal_entry_dev_usage_bad_size, 27, 0) \
- x(journal_entry_dev_usage_bad_dev, 28, 0) \
- x(journal_entry_dev_usage_bad_pad, 29, 0) \
- x(btree_node_unreadable, 30, 0) \
- x(btree_node_fault_injected, 31, 0) \
- x(btree_node_bad_magic, 32, 0) \
- x(btree_node_bad_seq, 33, 0) \
- x(btree_node_unsupported_version, 34, 0) \
- x(btree_node_bset_older_than_sb_min, 35, 0) \
- x(btree_node_bset_newer_than_sb, 36, 0) \
- x(btree_node_data_missing, 37, 0) \
- x(btree_node_bset_after_end, 38, 0) \
- x(btree_node_replicas_sectors_written_mismatch, 39, 0) \
- x(btree_node_replicas_data_mismatch, 40, 0) \
- x(bset_unknown_csum, 41, 0) \
- x(bset_bad_csum, 42, 0) \
- x(bset_past_end_of_btree_node, 43, 0) \
- x(bset_wrong_sector_offset, 44, 0) \
- x(bset_empty, 45, 0) \
- x(bset_bad_seq, 46, 0) \
- x(bset_blacklisted_journal_seq, 47, 0) \
- x(first_bset_blacklisted_journal_seq, 48, 0) \
- x(btree_node_bad_btree, 49, 0) \
- x(btree_node_bad_level, 50, 0) \
- x(btree_node_bad_min_key, 51, 0) \
- x(btree_node_bad_max_key, 52, 0) \
- x(btree_node_bad_format, 53, 0) \
- x(btree_node_bkey_past_bset_end, 54, 0) \
- x(btree_node_bkey_bad_format, 55, 0) \
- x(btree_node_bad_bkey, 56, 0) \
- x(btree_node_bkey_out_of_order, 57, 0) \
- x(btree_root_bkey_invalid, 58, 0) \
- x(btree_root_read_error, 59, 0) \
- x(btree_root_bad_min_key, 60, 0) \
- x(btree_root_bad_max_key, 61, 0) \
- x(btree_node_read_error, 62, 0) \
- x(btree_node_topology_bad_min_key, 63, 0) \
- x(btree_node_topology_bad_max_key, 64, 0) \
- x(btree_node_topology_overwritten_by_prev_node, 65, 0) \
- x(btree_node_topology_overwritten_by_next_node, 66, 0) \
- x(btree_node_topology_interior_node_empty, 67, 0) \
- x(fs_usage_hidden_wrong, 68, FSCK_AUTOFIX) \
- x(fs_usage_btree_wrong, 69, FSCK_AUTOFIX) \
- x(fs_usage_data_wrong, 70, FSCK_AUTOFIX) \
- x(fs_usage_cached_wrong, 71, FSCK_AUTOFIX) \
- x(fs_usage_reserved_wrong, 72, FSCK_AUTOFIX) \
- x(fs_usage_persistent_reserved_wrong, 73, FSCK_AUTOFIX) \
- x(fs_usage_nr_inodes_wrong, 74, FSCK_AUTOFIX) \
- x(fs_usage_replicas_wrong, 75, FSCK_AUTOFIX) \
- x(dev_usage_buckets_wrong, 76, FSCK_AUTOFIX) \
- x(dev_usage_sectors_wrong, 77, FSCK_AUTOFIX) \
- x(dev_usage_fragmented_wrong, 78, FSCK_AUTOFIX) \
- x(dev_usage_buckets_ec_wrong, 79, FSCK_AUTOFIX) \
- x(bkey_version_in_future, 80, 0) \
- x(bkey_u64s_too_small, 81, 0) \
- x(bkey_invalid_type_for_btree, 82, 0) \
- x(bkey_extent_size_zero, 83, 0) \
- x(bkey_extent_size_greater_than_offset, 84, 0) \
- x(bkey_size_nonzero, 85, 0) \
- x(bkey_snapshot_nonzero, 86, 0) \
- x(bkey_snapshot_zero, 87, 0) \
- x(bkey_at_pos_max, 88, 0) \
- x(bkey_before_start_of_btree_node, 89, 0) \
- x(bkey_after_end_of_btree_node, 90, 0) \
- x(bkey_val_size_nonzero, 91, 0) \
- x(bkey_val_size_too_small, 92, 0) \
- x(alloc_v1_val_size_bad, 93, 0) \
- x(alloc_v2_unpack_error, 94, 0) \
- x(alloc_v3_unpack_error, 95, 0) \
- x(alloc_v4_val_size_bad, 96, 0) \
- x(alloc_v4_backpointers_start_bad, 97, 0) \
- x(alloc_key_data_type_bad, 98, 0) \
- x(alloc_key_empty_but_have_data, 99, 0) \
- x(alloc_key_dirty_sectors_0, 100, 0) \
- x(alloc_key_data_type_inconsistency, 101, 0) \
- x(alloc_key_to_missing_dev_bucket, 102, 0) \
- x(alloc_key_cached_inconsistency, 103, 0) \
- x(alloc_key_cached_but_read_time_zero, 104, FSCK_AUTOFIX) \
- x(alloc_key_to_missing_lru_entry, 105, FSCK_AUTOFIX) \
- x(alloc_key_data_type_wrong, 106, FSCK_AUTOFIX) \
- x(alloc_key_gen_wrong, 107, FSCK_AUTOFIX) \
- x(alloc_key_dirty_sectors_wrong, 108, FSCK_AUTOFIX) \
- x(alloc_key_cached_sectors_wrong, 109, FSCK_AUTOFIX) \
- x(alloc_key_stripe_wrong, 110, FSCK_AUTOFIX) \
- x(alloc_key_stripe_redundancy_wrong, 111, FSCK_AUTOFIX) \
- x(bucket_sector_count_overflow, 112, 0) \
- x(bucket_metadata_type_mismatch, 113, 0) \
- x(need_discard_key_wrong, 114, 0) \
- x(freespace_key_wrong, 115, 0) \
- x(freespace_hole_missing, 116, 0) \
- x(bucket_gens_val_size_bad, 117, 0) \
- x(bucket_gens_key_wrong, 118, FSCK_AUTOFIX) \
- x(bucket_gens_hole_wrong, 119, FSCK_AUTOFIX) \
- x(bucket_gens_to_invalid_dev, 120, FSCK_AUTOFIX) \
- x(bucket_gens_to_invalid_buckets, 121, FSCK_AUTOFIX) \
- x(bucket_gens_nonzero_for_invalid_buckets, 122, FSCK_AUTOFIX) \
- x(need_discard_freespace_key_to_invalid_dev_bucket, 123, 0) \
- x(need_discard_freespace_key_bad, 124, 0) \
- x(discarding_bucket_not_in_need_discard_btree, 291, 0) \
- x(backpointer_bucket_offset_wrong, 125, 0) \
- x(backpointer_level_bad, 294, 0) \
- x(backpointer_to_missing_device, 126, 0) \
- x(backpointer_to_missing_alloc, 127, 0) \
- x(backpointer_to_missing_ptr, 128, 0) \
- x(lru_entry_at_time_0, 129, FSCK_AUTOFIX) \
- x(lru_entry_to_invalid_bucket, 130, FSCK_AUTOFIX) \
- x(lru_entry_bad, 131, FSCK_AUTOFIX) \
- x(btree_ptr_val_too_big, 132, 0) \
- x(btree_ptr_v2_val_too_big, 133, 0) \
- x(btree_ptr_has_non_ptr, 134, 0) \
- x(extent_ptrs_invalid_entry, 135, 0) \
- x(extent_ptrs_no_ptrs, 136, 0) \
- x(extent_ptrs_too_many_ptrs, 137, 0) \
- x(extent_ptrs_redundant_crc, 138, 0) \
- x(extent_ptrs_redundant_stripe, 139, 0) \
- x(extent_ptrs_unwritten, 140, 0) \
- x(extent_ptrs_written_and_unwritten, 141, 0) \
- x(ptr_to_invalid_device, 142, 0) \
- x(ptr_to_duplicate_device, 143, 0) \
- x(ptr_after_last_bucket, 144, 0) \
- x(ptr_before_first_bucket, 145, 0) \
- x(ptr_spans_multiple_buckets, 146, 0) \
- x(ptr_to_missing_backpointer, 147, FSCK_AUTOFIX) \
- x(ptr_to_missing_alloc_key, 148, FSCK_AUTOFIX) \
- x(ptr_to_missing_replicas_entry, 149, FSCK_AUTOFIX) \
- x(ptr_to_missing_stripe, 150, 0) \
- x(ptr_to_incorrect_stripe, 151, 0) \
- x(ptr_gen_newer_than_bucket_gen, 152, 0) \
- x(ptr_too_stale, 153, 0) \
- x(stale_dirty_ptr, 154, 0) \
- x(ptr_bucket_data_type_mismatch, 155, 0) \
- x(ptr_cached_and_erasure_coded, 156, 0) \
- x(ptr_crc_uncompressed_size_too_small, 157, 0) \
- x(ptr_crc_csum_type_unknown, 158, 0) \
- x(ptr_crc_compression_type_unknown, 159, 0) \
- x(ptr_crc_redundant, 160, 0) \
- x(ptr_crc_uncompressed_size_too_big, 161, 0) \
- x(ptr_crc_nonce_mismatch, 162, 0) \
- x(ptr_stripe_redundant, 163, 0) \
- x(reservation_key_nr_replicas_invalid, 164, 0) \
- x(reflink_v_refcount_wrong, 165, 0) \
- x(reflink_v_pos_bad, 292, 0) \
- x(reflink_p_to_missing_reflink_v, 166, 0) \
- x(reflink_refcount_underflow, 293, 0) \
- x(stripe_pos_bad, 167, 0) \
- x(stripe_val_size_bad, 168, 0) \
- x(stripe_csum_granularity_bad, 290, 0) \
- x(stripe_sector_count_wrong, 169, 0) \
- x(snapshot_tree_pos_bad, 170, 0) \
- x(snapshot_tree_to_missing_snapshot, 171, 0) \
- x(snapshot_tree_to_missing_subvol, 172, 0) \
- x(snapshot_tree_to_wrong_subvol, 173, 0) \
- x(snapshot_tree_to_snapshot_subvol, 174, 0) \
- x(snapshot_pos_bad, 175, 0) \
- x(snapshot_parent_bad, 176, 0) \
- x(snapshot_children_not_normalized, 177, 0) \
- x(snapshot_child_duplicate, 178, 0) \
- x(snapshot_child_bad, 179, 0) \
- x(snapshot_skiplist_not_normalized, 180, 0) \
- x(snapshot_skiplist_bad, 181, 0) \
- x(snapshot_should_not_have_subvol, 182, 0) \
- x(snapshot_to_bad_snapshot_tree, 183, FSCK_AUTOFIX) \
- x(snapshot_bad_depth, 184, 0) \
- x(snapshot_bad_skiplist, 185, 0) \
- x(subvol_pos_bad, 186, 0) \
- x(subvol_not_master_and_not_snapshot, 187, 0) \
- x(subvol_to_missing_root, 188, 0) \
- x(subvol_root_wrong_bi_subvol, 189, 0) \
- x(bkey_in_missing_snapshot, 190, 0) \
- x(inode_pos_inode_nonzero, 191, 0) \
- x(inode_pos_blockdev_range, 192, 0) \
- x(inode_unpack_error, 193, 0) \
- x(inode_str_hash_invalid, 194, 0) \
- x(inode_v3_fields_start_bad, 195, 0) \
- x(inode_snapshot_mismatch, 196, 0) \
- x(inode_unlinked_but_clean, 197, 0) \
- x(inode_unlinked_but_nlink_nonzero, 198, 0) \
- x(inode_unlinked_and_not_open, 281, 0) \
- x(inode_unlinked_but_has_dirent, 285, 0) \
- x(inode_checksum_type_invalid, 199, 0) \
- x(inode_compression_type_invalid, 200, 0) \
- x(inode_subvol_root_but_not_dir, 201, 0) \
- x(inode_i_size_dirty_but_clean, 202, FSCK_AUTOFIX) \
- x(inode_i_sectors_dirty_but_clean, 203, FSCK_AUTOFIX) \
- x(inode_i_sectors_wrong, 204, FSCK_AUTOFIX) \
- x(inode_dir_wrong_nlink, 205, FSCK_AUTOFIX) \
- x(inode_dir_multiple_links, 206, FSCK_AUTOFIX) \
- x(inode_dir_missing_backpointer, 284, FSCK_AUTOFIX) \
- x(inode_dir_unlinked_but_not_empty, 286, FSCK_AUTOFIX) \
- x(inode_multiple_links_but_nlink_0, 207, FSCK_AUTOFIX) \
- x(inode_wrong_backpointer, 208, FSCK_AUTOFIX) \
- x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \
- x(inode_has_child_snapshots_wrong, 287, 0) \
- x(inode_unreachable, 210, FSCK_AUTOFIX) \
- x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \
- x(deleted_inode_missing, 212, FSCK_AUTOFIX) \
- x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \
- x(deleted_inode_not_unlinked, 214, FSCK_AUTOFIX) \
- x(deleted_inode_has_child_snapshots, 288, FSCK_AUTOFIX) \
- x(extent_overlapping, 215, 0) \
- x(key_in_missing_inode, 216, 0) \
- x(key_in_wrong_inode_type, 217, 0) \
- x(extent_past_end_of_inode, 218, 0) \
- x(dirent_empty_name, 219, 0) \
- x(dirent_val_too_big, 220, 0) \
- x(dirent_name_too_long, 221, 0) \
- x(dirent_name_embedded_nul, 222, 0) \
- x(dirent_name_dot_or_dotdot, 223, 0) \
- x(dirent_name_has_slash, 224, 0) \
- x(dirent_d_type_wrong, 225, 0) \
- x(inode_bi_parent_wrong, 226, 0) \
- x(dirent_in_missing_dir_inode, 227, 0) \
- x(dirent_in_non_dir_inode, 228, 0) \
- x(dirent_to_missing_inode, 229, 0) \
- x(dirent_to_missing_subvol, 230, 0) \
- x(dirent_to_itself, 231, 0) \
- x(quota_type_invalid, 232, 0) \
- x(xattr_val_size_too_small, 233, 0) \
- x(xattr_val_size_too_big, 234, 0) \
- x(xattr_invalid_type, 235, 0) \
- x(xattr_name_invalid_chars, 236, 0) \
- x(xattr_in_missing_inode, 237, 0) \
- x(root_subvol_missing, 238, 0) \
- x(root_dir_missing, 239, 0) \
- x(root_inode_not_dir, 240, 0) \
- x(dir_loop, 241, 0) \
- x(hash_table_key_duplicate, 242, 0) \
- x(hash_table_key_wrong_offset, 243, 0) \
- x(unlinked_inode_not_on_deleted_list, 244, FSCK_AUTOFIX) \
- x(reflink_p_front_pad_bad, 245, 0) \
- x(journal_entry_dup_same_device, 246, 0) \
- x(inode_bi_subvol_missing, 247, 0) \
- x(inode_bi_subvol_wrong, 248, 0) \
- x(inode_points_to_missing_dirent, 249, FSCK_AUTOFIX) \
- x(inode_points_to_wrong_dirent, 250, FSCK_AUTOFIX) \
- x(inode_bi_parent_nonzero, 251, 0) \
- x(dirent_to_missing_parent_subvol, 252, 0) \
- x(dirent_not_visible_in_parent_subvol, 253, 0) \
- x(subvol_fs_path_parent_wrong, 254, 0) \
- x(subvol_root_fs_path_parent_nonzero, 255, 0) \
- x(subvol_children_not_set, 256, 0) \
- x(subvol_children_bad, 257, 0) \
- x(subvol_loop, 258, 0) \
- x(subvol_unreachable, 259, FSCK_AUTOFIX) \
- x(btree_node_bkey_bad_u64s, 260, 0) \
- x(btree_node_topology_empty_interior_node, 261, 0) \
- x(btree_ptr_v2_min_key_bad, 262, 0) \
- x(btree_root_unreadable_and_scan_found_nothing, 263, 0) \
- x(snapshot_node_missing, 264, 0) \
- x(dup_backpointer_to_bad_csum_extent, 265, 0) \
- x(btree_bitmap_not_marked, 266, 0) \
- x(sb_clean_entry_overrun, 267, 0) \
- x(btree_ptr_v2_written_0, 268, 0) \
- x(subvol_snapshot_bad, 269, 0) \
- x(subvol_inode_bad, 270, 0) \
- x(alloc_key_stripe_sectors_wrong, 271, FSCK_AUTOFIX) \
- x(accounting_mismatch, 272, FSCK_AUTOFIX) \
- x(accounting_replicas_not_marked, 273, 0) \
- x(accounting_to_invalid_device, 289, 0) \
- x(invalid_btree_id, 274, 0) \
- x(alloc_key_io_time_bad, 275, 0) \
- x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) \
- x(accounting_key_junk_at_end, 277, FSCK_AUTOFIX) \
- x(accounting_key_replicas_nr_devs_0, 278, FSCK_AUTOFIX) \
- x(accounting_key_replicas_nr_required_bad, 279, FSCK_AUTOFIX) \
- x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \
- x(accounting_key_version_0, 282, FSCK_AUTOFIX) \
- x(logged_op_but_clean, 283, FSCK_AUTOFIX) \
- x(MAX, 295, 0)
-
-enum bch_sb_error_id {
-#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
- BCH_SB_ERRS()
-#undef x
-};
-
-struct bch_sb_field_errors {
- struct bch_sb_field field;
- struct bch_sb_field_error_entry {
- __le64 v;
- __le64 last_error_time;
- } entries[];
-};
-
-LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16);
-LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
-
-#endif /* _BCACHEFS_SB_ERRORS_FORMAT_H */
diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h
deleted file mode 100644
index 40325239c3b0..000000000000
--- a/fs/bcachefs/sb-errors_types.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_ERRORS_TYPES_H
-#define _BCACHEFS_SB_ERRORS_TYPES_H
-
-#include "darray.h"
-
-struct bch_sb_error_entry_cpu {
- u64 id:16,
- nr:48;
- u64 last_error_time;
-};
-
-typedef DARRAY(struct bch_sb_error_entry_cpu) bch_sb_errors_cpu;
-
-#endif /* _BCACHEFS_SB_ERRORS_TYPES_H */
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
deleted file mode 100644
index 116131f95815..000000000000
--- a/fs/bcachefs/sb-members.c
+++ /dev/null
@@ -1,532 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_cache.h"
-#include "disk_groups.h"
-#include "error.h"
-#include "opts.h"
-#include "replicas.h"
-#include "sb-members.h"
-#include "super-io.h"
-
-void bch2_dev_missing(struct bch_fs *c, unsigned dev)
-{
- if (dev != BCH_SB_MEMBER_INVALID)
- bch2_fs_inconsistent(c, "pointer to nonexistent device %u", dev);
-}
-
-void bch2_dev_bucket_missing(struct bch_fs *c, struct bpos bucket)
-{
- bch2_fs_inconsistent(c, "pointer to nonexistent bucket %llu:%llu", bucket.inode, bucket.offset);
-}
-
-#define x(t, n, ...) [n] = #t,
-static const char * const bch2_iops_measurements[] = {
- BCH_IOPS_MEASUREMENTS()
- NULL
-};
-
-char * const bch2_member_error_strs[] = {
- BCH_MEMBER_ERROR_TYPES()
- NULL
-};
-#undef x
-
-/* Code for bch_sb_field_members_v1: */
-
-struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i)
-{
- return __bch2_members_v2_get_mut(bch2_sb_field_get(sb, members_v2), i);
-}
-
-static struct bch_member members_v2_get(struct bch_sb_field_members_v2 *mi, int i)
-{
- struct bch_member ret, *p = __bch2_members_v2_get_mut(mi, i);
- memset(&ret, 0, sizeof(ret));
- memcpy(&ret, p, min_t(size_t, le16_to_cpu(mi->member_bytes), sizeof(ret)));
- return ret;
-}
-
-static struct bch_member *members_v1_get_mut(struct bch_sb_field_members_v1 *mi, int i)
-{
- return (void *) mi->_members + (i * BCH_MEMBER_V1_BYTES);
-}
-
-static struct bch_member members_v1_get(struct bch_sb_field_members_v1 *mi, int i)
-{
- struct bch_member ret, *p = members_v1_get_mut(mi, i);
- memset(&ret, 0, sizeof(ret));
- memcpy(&ret, p, min_t(size_t, BCH_MEMBER_V1_BYTES, sizeof(ret)));
- return ret;
-}
-
-struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i)
-{
- struct bch_sb_field_members_v2 *mi2 = bch2_sb_field_get(sb, members_v2);
- if (mi2)
- return members_v2_get(mi2, i);
- struct bch_sb_field_members_v1 *mi1 = bch2_sb_field_get(sb, members_v1);
- return members_v1_get(mi1, i);
-}
-
-static int sb_members_v2_resize_entries(struct bch_fs *c)
-{
- struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
-
- if (le16_to_cpu(mi->member_bytes) < sizeof(struct bch_member)) {
- unsigned u64s = DIV_ROUND_UP((sizeof(*mi) + sizeof(mi->_members[0]) *
- c->disk_sb.sb->nr_devices), 8);
-
- mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
- if (!mi)
- return -BCH_ERR_ENOSPC_sb_members_v2;
-
- for (int i = c->disk_sb.sb->nr_devices - 1; i >= 0; --i) {
- void *dst = (void *) mi->_members + (i * sizeof(struct bch_member));
- memmove(dst, __bch2_members_v2_get_mut(mi, i), le16_to_cpu(mi->member_bytes));
- memset(dst + le16_to_cpu(mi->member_bytes),
- 0, (sizeof(struct bch_member) - le16_to_cpu(mi->member_bytes)));
- }
- mi->member_bytes = cpu_to_le16(sizeof(struct bch_member));
- }
- return 0;
-}
-
-int bch2_sb_members_v2_init(struct bch_fs *c)
-{
- struct bch_sb_field_members_v1 *mi1;
- struct bch_sb_field_members_v2 *mi2;
-
- if (!bch2_sb_field_get(c->disk_sb.sb, members_v2)) {
- mi2 = bch2_sb_field_resize(&c->disk_sb, members_v2,
- DIV_ROUND_UP(sizeof(*mi2) +
- sizeof(struct bch_member) * c->sb.nr_devices,
- sizeof(u64)));
- mi1 = bch2_sb_field_get(c->disk_sb.sb, members_v1);
- memcpy(&mi2->_members[0], &mi1->_members[0],
- BCH_MEMBER_V1_BYTES * c->sb.nr_devices);
- memset(&mi2->pad[0], 0, sizeof(mi2->pad));
- mi2->member_bytes = cpu_to_le16(BCH_MEMBER_V1_BYTES);
- }
-
- return sb_members_v2_resize_entries(c);
-}
-
-int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb)
-{
- struct bch_sb_field_members_v1 *mi1;
- struct bch_sb_field_members_v2 *mi2;
-
- mi1 = bch2_sb_field_resize(disk_sb, members_v1,
- DIV_ROUND_UP(sizeof(*mi1) + BCH_MEMBER_V1_BYTES *
- disk_sb->sb->nr_devices, sizeof(u64)));
- if (!mi1)
- return -BCH_ERR_ENOSPC_sb_members;
-
- mi2 = bch2_sb_field_get(disk_sb->sb, members_v2);
-
- for (unsigned i = 0; i < disk_sb->sb->nr_devices; i++)
- memcpy(members_v1_get_mut(mi1, i), __bch2_members_v2_get_mut(mi2, i), BCH_MEMBER_V1_BYTES);
-
- return 0;
-}
-
-static int validate_member(struct printbuf *err,
- struct bch_member m,
- struct bch_sb *sb,
- int i)
-{
- if (le64_to_cpu(m.nbuckets) > BCH_MEMBER_NBUCKETS_MAX) {
- prt_printf(err, "device %u: too many buckets (got %llu, max %u)",
- i, le64_to_cpu(m.nbuckets), BCH_MEMBER_NBUCKETS_MAX);
- return -BCH_ERR_invalid_sb_members;
- }
-
- if (le64_to_cpu(m.nbuckets) -
- le16_to_cpu(m.first_bucket) < BCH_MIN_NR_NBUCKETS) {
- prt_printf(err, "device %u: not enough buckets (got %llu, max %u)",
- i, le64_to_cpu(m.nbuckets), BCH_MIN_NR_NBUCKETS);
- return -BCH_ERR_invalid_sb_members;
- }
-
- if (le16_to_cpu(m.bucket_size) <
- le16_to_cpu(sb->block_size)) {
- prt_printf(err, "device %u: bucket size %u smaller than block size %u",
- i, le16_to_cpu(m.bucket_size), le16_to_cpu(sb->block_size));
- return -BCH_ERR_invalid_sb_members;
- }
-
- if (le16_to_cpu(m.bucket_size) <
- BCH_SB_BTREE_NODE_SIZE(sb)) {
- prt_printf(err, "device %u: bucket size %u smaller than btree node size %llu",
- i, le16_to_cpu(m.bucket_size), BCH_SB_BTREE_NODE_SIZE(sb));
- return -BCH_ERR_invalid_sb_members;
- }
-
- if (m.btree_bitmap_shift >= BCH_MI_BTREE_BITMAP_SHIFT_MAX) {
- prt_printf(err, "device %u: invalid btree_bitmap_shift %u", i, m.btree_bitmap_shift);
- return -BCH_ERR_invalid_sb_members;
- }
-
- return 0;
-}
-
-static void member_to_text(struct printbuf *out,
- struct bch_member m,
- struct bch_sb_field_disk_groups *gi,
- struct bch_sb *sb,
- int i)
-{
- unsigned data_have = bch2_sb_dev_has_data(sb, i);
- u64 bucket_size = le16_to_cpu(m.bucket_size);
- u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size;
-
- if (!bch2_member_alive(&m))
- return;
-
- prt_printf(out, "Device:\t%u\n", i);
-
- printbuf_indent_add(out, 2);
-
- prt_printf(out, "Label:\t");
- if (BCH_MEMBER_GROUP(&m)) {
- unsigned idx = BCH_MEMBER_GROUP(&m) - 1;
-
- if (idx < disk_groups_nr(gi))
- prt_printf(out, "%s (%u)",
- gi->entries[idx].label, idx);
- else
- prt_printf(out, "(bad disk labels section)");
- } else {
- prt_printf(out, "(none)");
- }
- prt_newline(out);
-
- prt_printf(out, "UUID:\t");
- pr_uuid(out, m.uuid.b);
- prt_newline(out);
-
- prt_printf(out, "Size:\t");
- prt_units_u64(out, device_size << 9);
- prt_newline(out);
-
- for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
- prt_printf(out, "%s errors:\t%llu\n", bch2_member_error_strs[i], le64_to_cpu(m.errors[i]));
-
- for (unsigned i = 0; i < BCH_IOPS_NR; i++)
- prt_printf(out, "%s iops:\t%u\n", bch2_iops_measurements[i], le32_to_cpu(m.iops[i]));
-
- prt_printf(out, "Bucket size:\t");
- prt_units_u64(out, bucket_size << 9);
- prt_newline(out);
-
- prt_printf(out, "First bucket:\t%u\n", le16_to_cpu(m.first_bucket));
- prt_printf(out, "Buckets:\t%llu\n", le64_to_cpu(m.nbuckets));
-
- prt_printf(out, "Last mount:\t");
- if (m.last_mount)
- bch2_prt_datetime(out, le64_to_cpu(m.last_mount));
- else
- prt_printf(out, "(never)");
- prt_newline(out);
-
- prt_printf(out, "Last superblock write:\t%llu\n", le64_to_cpu(m.seq));
-
- prt_printf(out, "State:\t%s\n",
- BCH_MEMBER_STATE(&m) < BCH_MEMBER_STATE_NR
- ? bch2_member_states[BCH_MEMBER_STATE(&m)]
- : "unknown");
-
- prt_printf(out, "Data allowed:\t");
- if (BCH_MEMBER_DATA_ALLOWED(&m))
- prt_bitflags(out, __bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m));
- else
- prt_printf(out, "(none)");
- prt_newline(out);
-
- prt_printf(out, "Has data:\t");
- if (data_have)
- prt_bitflags(out, __bch2_data_types, data_have);
- else
- prt_printf(out, "(none)");
- prt_newline(out);
-
- prt_printf(out, "Btree allocated bitmap blocksize:\t");
- if (m.btree_bitmap_shift < 64)
- prt_units_u64(out, 1ULL << m.btree_bitmap_shift);
- else
- prt_printf(out, "(invalid shift %u)", m.btree_bitmap_shift);
- prt_newline(out);
-
- prt_printf(out, "Btree allocated bitmap:\t");
- bch2_prt_u64_base2_nbits(out, le64_to_cpu(m.btree_allocated_bitmap), 64);
- prt_newline(out);
-
- prt_printf(out, "Durability:\t%llu\n", BCH_MEMBER_DURABILITY(&m) ? BCH_MEMBER_DURABILITY(&m) - 1 : 1);
-
- prt_printf(out, "Discard:\t%llu\n", BCH_MEMBER_DISCARD(&m));
- prt_printf(out, "Freespace initialized:\t%llu\n", BCH_MEMBER_FREESPACE_INITIALIZED(&m));
-
- printbuf_indent_sub(out, 2);
-}
-
-static int bch2_sb_members_v1_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1);
- unsigned i;
-
- if ((void *) members_v1_get_mut(mi, sb->nr_devices) > vstruct_end(&mi->field)) {
- prt_printf(err, "too many devices for section size");
- return -BCH_ERR_invalid_sb_members;
- }
-
- for (i = 0; i < sb->nr_devices; i++) {
- struct bch_member m = members_v1_get(mi, i);
-
- int ret = validate_member(err, m, sb, i);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void bch2_sb_members_v1_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1);
- struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
- unsigned i;
-
- for (i = 0; i < sb->nr_devices; i++)
- member_to_text(out, members_v1_get(mi, i), gi, sb, i);
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_members_v1 = {
- .validate = bch2_sb_members_v1_validate,
- .to_text = bch2_sb_members_v1_to_text,
-};
-
-static void bch2_sb_members_v2_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2);
- struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
- unsigned i;
-
- for (i = 0; i < sb->nr_devices; i++)
- member_to_text(out, members_v2_get(mi, i), gi, sb, i);
-}
-
-static int bch2_sb_members_v2_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2);
- size_t mi_bytes = (void *) __bch2_members_v2_get_mut(mi, sb->nr_devices) -
- (void *) mi;
-
- if (mi_bytes > vstruct_bytes(&mi->field)) {
- prt_printf(err, "section too small (%zu > %zu)",
- mi_bytes, vstruct_bytes(&mi->field));
- return -BCH_ERR_invalid_sb_members;
- }
-
- for (unsigned i = 0; i < sb->nr_devices; i++) {
- int ret = validate_member(err, members_v2_get(mi, i), sb, i);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = {
- .validate = bch2_sb_members_v2_validate,
- .to_text = bch2_sb_members_v2_to_text,
-};
-
-void bch2_sb_members_from_cpu(struct bch_fs *c)
-{
- struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
-
- rcu_read_lock();
- for_each_member_device_rcu(c, ca, NULL) {
- struct bch_member *m = __bch2_members_v2_get_mut(mi, ca->dev_idx);
-
- for (unsigned e = 0; e < BCH_MEMBER_ERROR_NR; e++)
- m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e]));
- }
- rcu_read_unlock();
-}
-
-void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca)
-{
- struct bch_fs *c = ca->fs;
- struct bch_member m;
-
- mutex_lock(&ca->fs->sb_lock);
- m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx);
- mutex_unlock(&ca->fs->sb_lock);
-
- printbuf_tabstop_push(out, 12);
-
- prt_str(out, "IO errors since filesystem creation");
- prt_newline(out);
-
- printbuf_indent_add(out, 2);
- for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
- prt_printf(out, "%s:\t%llu\n", bch2_member_error_strs[i], atomic64_read(&ca->errors[i]));
- printbuf_indent_sub(out, 2);
-
- prt_str(out, "IO errors since ");
- bch2_pr_time_units(out, (ktime_get_real_seconds() - le64_to_cpu(m.errors_reset_time)) * NSEC_PER_SEC);
- prt_str(out, " ago");
- prt_newline(out);
-
- printbuf_indent_add(out, 2);
- for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
- prt_printf(out, "%s:\t%llu\n", bch2_member_error_strs[i],
- atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i]));
- printbuf_indent_sub(out, 2);
-}
-
-void bch2_dev_errors_reset(struct bch_dev *ca)
-{
- struct bch_fs *c = ca->fs;
- struct bch_member *m;
-
- mutex_lock(&c->sb_lock);
- m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
- for (unsigned i = 0; i < ARRAY_SIZE(m->errors_at_reset); i++)
- m->errors_at_reset[i] = cpu_to_le64(atomic64_read(&ca->errors[i]));
- m->errors_reset_time = cpu_to_le64(ktime_get_real_seconds());
-
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-}
-
-/*
- * Per member "range has btree nodes" bitmap:
- *
- * This is so that if we ever have to run the btree node scan to repair we don't
- * have to scan full devices:
- */
-
-bool bch2_dev_btree_bitmap_marked(struct bch_fs *c, struct bkey_s_c k)
-{
- bool ret = true;
- rcu_read_lock();
- bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
- if (!ca)
- continue;
-
- if (!bch2_dev_btree_bitmap_marked_sectors(ca, ptr->offset, btree_sectors(c))) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
-static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, unsigned dev,
- u64 start, unsigned sectors)
-{
- struct bch_member *m = __bch2_members_v2_get_mut(mi, dev);
- u64 bitmap = le64_to_cpu(m->btree_allocated_bitmap);
-
- u64 end = start + sectors;
-
- int resize = ilog2(roundup_pow_of_two(end)) - (m->btree_bitmap_shift + 6);
- if (resize > 0) {
- u64 new_bitmap = 0;
-
- for (unsigned i = 0; i < 64; i++)
- if (bitmap & BIT_ULL(i))
- new_bitmap |= BIT_ULL(i >> resize);
- bitmap = new_bitmap;
- m->btree_bitmap_shift += resize;
- }
-
- BUG_ON(m->btree_bitmap_shift >= BCH_MI_BTREE_BITMAP_SHIFT_MAX);
- BUG_ON(end > 64ULL << m->btree_bitmap_shift);
-
- for (unsigned bit = start >> m->btree_bitmap_shift;
- (u64) bit << m->btree_bitmap_shift < end;
- bit++)
- bitmap |= BIT_ULL(bit);
-
- m->btree_allocated_bitmap = cpu_to_le64(bitmap);
-}
-
-void bch2_dev_btree_bitmap_mark(struct bch_fs *c, struct bkey_s_c k)
-{
- lockdep_assert_held(&c->sb_lock);
-
- struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
- bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
- if (!bch2_member_exists(c->disk_sb.sb, ptr->dev))
- continue;
-
- __bch2_dev_btree_bitmap_mark(mi, ptr->dev, ptr->offset, btree_sectors(c));
- }
-}
-
-unsigned bch2_sb_nr_devices(const struct bch_sb *sb)
-{
- unsigned nr = 0;
-
- for (unsigned i = 0; i < sb->nr_devices; i++)
- nr += bch2_member_exists((struct bch_sb *) sb, i);
- return nr;
-}
-
-int bch2_sb_member_alloc(struct bch_fs *c)
-{
- unsigned dev_idx = c->sb.nr_devices;
- struct bch_sb_field_members_v2 *mi;
- unsigned nr_devices;
- unsigned u64s;
- int best = -1;
- u64 best_last_mount = 0;
-
- if (dev_idx < BCH_SB_MEMBERS_MAX)
- goto have_slot;
-
- for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) {
- /* eventually BCH_SB_MEMBERS_MAX will be raised */
- if (dev_idx == BCH_SB_MEMBER_INVALID)
- continue;
-
- struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
- if (bch2_member_alive(&m))
- continue;
-
- u64 last_mount = le64_to_cpu(m.last_mount);
- if (best < 0 || last_mount < best_last_mount) {
- best = dev_idx;
- best_last_mount = last_mount;
- }
- }
- if (best >= 0) {
- dev_idx = best;
- goto have_slot;
- }
-
- return -BCH_ERR_ENOSPC_sb_members;
-have_slot:
- nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
-
- mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
- u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
- le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
-
- mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
- if (!mi)
- return -BCH_ERR_ENOSPC_sb_members;
-
- c->disk_sb.sb->nr_devices = nr_devices;
- return dev_idx;
-}
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
deleted file mode 100644
index 762083b564ee..000000000000
--- a/fs/bcachefs/sb-members.h
+++ /dev/null
@@ -1,367 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_MEMBERS_H
-#define _BCACHEFS_SB_MEMBERS_H
-
-#include "darray.h"
-#include "bkey_types.h"
-
-extern char * const bch2_member_error_strs[];
-
-static inline struct bch_member *
-__bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i)
-{
- return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes));
-}
-
-int bch2_sb_members_v2_init(struct bch_fs *c);
-int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
-struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i);
-struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
-
-static inline bool bch2_dev_is_online(struct bch_dev *ca)
-{
- return !percpu_ref_is_zero(&ca->io_ref);
-}
-
-static inline bool bch2_dev_is_readable(struct bch_dev *ca)
-{
- return bch2_dev_is_online(ca) &&
- ca->mi.state != BCH_MEMBER_STATE_failed;
-}
-
-static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
-{
- return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
-}
-
-static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
- unsigned dev)
-{
- darray_for_each(devs, i)
- if (*i == dev)
- return true;
- return false;
-}
-
-static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
- unsigned dev)
-{
- darray_for_each(*devs, i)
- if (*i == dev) {
- darray_remove_item(devs, i);
- return;
- }
-}
-
-static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
- unsigned dev)
-{
- if (!bch2_dev_list_has_dev(*devs, dev)) {
- BUG_ON(devs->nr >= ARRAY_SIZE(devs->data));
- devs->data[devs->nr++] = dev;
- }
-}
-
-static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
-{
- return (struct bch_devs_list) { .nr = 1, .data[0] = dev };
-}
-
-static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx,
- const struct bch_devs_mask *mask)
-{
- struct bch_dev *ca = NULL;
-
- while ((idx = mask
- ? find_next_bit(mask->d, c->sb.nr_devices, idx)
- : idx) < c->sb.nr_devices &&
- !(ca = rcu_dereference_check(c->devs[idx],
- lockdep_is_held(&c->state_lock))))
- idx++;
-
- return ca;
-}
-
-static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca,
- const struct bch_devs_mask *mask)
-{
- return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask);
-}
-
-#define for_each_member_device_rcu(_c, _ca, _mask) \
- for (struct bch_dev *_ca = NULL; \
- (_ca = __bch2_next_dev((_c), _ca, (_mask)));)
-
-static inline void bch2_dev_get(struct bch_dev *ca)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- BUG_ON(atomic_long_inc_return(&ca->ref) <= 1L);
-#else
- percpu_ref_get(&ca->ref);
-#endif
-}
-
-static inline void __bch2_dev_put(struct bch_dev *ca)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- long r = atomic_long_dec_return(&ca->ref);
- if (r < (long) !ca->dying)
- panic("bch_dev->ref underflow, last put: %pS\n", (void *) ca->last_put);
- ca->last_put = _THIS_IP_;
- if (!r)
- complete(&ca->ref_completion);
-#else
- percpu_ref_put(&ca->ref);
-#endif
-}
-
-static inline void bch2_dev_put(struct bch_dev *ca)
-{
- if (ca)
- __bch2_dev_put(ca);
-}
-
-static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
-{
- rcu_read_lock();
- bch2_dev_put(ca);
- if ((ca = __bch2_next_dev(c, ca, NULL)))
- bch2_dev_get(ca);
- rcu_read_unlock();
-
- return ca;
-}
-
-/*
- * If you break early, you must drop your ref on the current device
- */
-#define __for_each_member_device(_c, _ca) \
- for (; (_ca = bch2_get_next_dev(_c, _ca));)
-
-#define for_each_member_device(_c, _ca) \
- for (struct bch_dev *_ca = NULL; \
- (_ca = bch2_get_next_dev(_c, _ca));)
-
-static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
- struct bch_dev *ca,
- unsigned state_mask)
-{
- rcu_read_lock();
- if (ca)
- percpu_ref_put(&ca->io_ref);
-
- while ((ca = __bch2_next_dev(c, ca, NULL)) &&
- (!((1 << ca->mi.state) & state_mask) ||
- !percpu_ref_tryget(&ca->io_ref)))
- ;
- rcu_read_unlock();
-
- return ca;
-}
-
-#define __for_each_online_member(_c, _ca, state_mask) \
- for (struct bch_dev *_ca = NULL; \
- (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
-
-#define for_each_online_member(c, ca) \
- __for_each_online_member(c, ca, ~0)
-
-#define for_each_rw_member(c, ca) \
- __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw))
-
-#define for_each_readable_member(c, ca) \
- __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
-
-static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
-{
- return dev < c->sb.nr_devices && c->devs[dev];
-}
-
-static inline bool bucket_valid(const struct bch_dev *ca, u64 b)
-{
- return b - ca->mi.first_bucket < ca->mi.nbuckets_minus_first;
-}
-
-static inline struct bch_dev *bch2_dev_have_ref(const struct bch_fs *c, unsigned dev)
-{
- EBUG_ON(!bch2_dev_exists(c, dev));
-
- return rcu_dereference_check(c->devs[dev], 1);
-}
-
-static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev)
-{
- EBUG_ON(!bch2_dev_exists(c, dev));
-
- return rcu_dereference_protected(c->devs[dev],
- lockdep_is_held(&c->sb_lock) ||
- lockdep_is_held(&c->state_lock));
-}
-
-static inline struct bch_dev *bch2_dev_rcu_noerror(struct bch_fs *c, unsigned dev)
-{
- return c && dev < c->sb.nr_devices
- ? rcu_dereference(c->devs[dev])
- : NULL;
-}
-
-void bch2_dev_missing(struct bch_fs *, unsigned);
-
-static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev)
-{
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
- if (unlikely(!ca))
- bch2_dev_missing(c, dev);
- return ca;
-}
-
-static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev)
-{
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
- if (ca)
- bch2_dev_get(ca);
- rcu_read_unlock();
- return ca;
-}
-
-static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev)
-{
- struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
- if (unlikely(!ca))
- bch2_dev_missing(c, dev);
- return ca;
-}
-
-static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket)
-{
- struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode);
- if (ca && !bucket_valid(ca, bucket.offset)) {
- bch2_dev_put(ca);
- ca = NULL;
- }
- return ca;
-}
-
-void bch2_dev_bucket_missing(struct bch_fs *, struct bpos);
-
-static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket)
-{
- struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, bucket);
- if (!ca)
- bch2_dev_bucket_missing(c, bucket);
- return ca;
-}
-
-static inline struct bch_dev *bch2_dev_iterate_noerror(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx)
-{
- if (ca && ca->dev_idx == dev_idx)
- return ca;
- bch2_dev_put(ca);
- return bch2_dev_tryget_noerror(c, dev_idx);
-}
-
-static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx)
-{
- if (ca && ca->dev_idx == dev_idx)
- return ca;
- bch2_dev_put(ca);
- return bch2_dev_tryget(c, dev_idx);
-}
-
-static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw)
-{
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, dev);
- if (ca && !percpu_ref_tryget(&ca->io_ref))
- ca = NULL;
- rcu_read_unlock();
-
- if (ca &&
- (ca->mi.state == BCH_MEMBER_STATE_rw ||
- (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ)))
- return ca;
-
- if (ca)
- percpu_ref_put(&ca->io_ref);
- return NULL;
-}
-
-/* XXX kill, move to struct bch_fs */
-static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
-{
- struct bch_devs_mask devs;
-
- memset(&devs, 0, sizeof(devs));
- for_each_online_member(c, ca)
- __set_bit(ca->dev_idx, devs.d);
- return devs;
-}
-
-extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
-extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
-
-static inline bool bch2_member_alive(struct bch_member *m)
-{
- return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
-}
-
-static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev)
-{
- if (dev < sb->nr_devices) {
- struct bch_member m = bch2_sb_member_get(sb, dev);
- return bch2_member_alive(&m);
- }
- return false;
-}
-
-unsigned bch2_sb_nr_devices(const struct bch_sb *);
-
-static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
-{
- return (struct bch_member_cpu) {
- .nbuckets = le64_to_cpu(mi->nbuckets),
- .nbuckets_minus_first = le64_to_cpu(mi->nbuckets) -
- le16_to_cpu(mi->first_bucket),
- .first_bucket = le16_to_cpu(mi->first_bucket),
- .bucket_size = le16_to_cpu(mi->bucket_size),
- .group = BCH_MEMBER_GROUP(mi),
- .state = BCH_MEMBER_STATE(mi),
- .discard = BCH_MEMBER_DISCARD(mi),
- .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi),
- .durability = BCH_MEMBER_DURABILITY(mi)
- ? BCH_MEMBER_DURABILITY(mi) - 1
- : 1,
- .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
- .valid = bch2_member_alive(mi),
- .btree_bitmap_shift = mi->btree_bitmap_shift,
- .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
- };
-}
-
-void bch2_sb_members_from_cpu(struct bch_fs *);
-
-void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *);
-void bch2_dev_errors_reset(struct bch_dev *);
-
-static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors)
-{
- u64 end = start + sectors;
-
- if (end > 64ULL << ca->mi.btree_bitmap_shift)
- return false;
-
- for (unsigned bit = start >> ca->mi.btree_bitmap_shift;
- (u64) bit << ca->mi.btree_bitmap_shift < end;
- bit++)
- if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit)))
- return false;
- return true;
-}
-
-bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c);
-void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c);
-
-int bch2_sb_member_alloc(struct bch_fs *);
-
-#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h
deleted file mode 100644
index 2adf1221a440..000000000000
--- a/fs/bcachefs/sb-members_format.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_MEMBERS_FORMAT_H
-#define _BCACHEFS_SB_MEMBERS_FORMAT_H
-
-/*
- * We refer to members with bitmasks in various places - but we need to get rid
- * of this limit:
- */
-#define BCH_SB_MEMBERS_MAX 64
-
-/*
- * Sentinal value - indicates a device that does not exist
- */
-#define BCH_SB_MEMBER_INVALID 255
-
-#define BCH_MIN_NR_NBUCKETS (1 << 6)
-
-#define BCH_IOPS_MEASUREMENTS() \
- x(seqread, 0) \
- x(seqwrite, 1) \
- x(randread, 2) \
- x(randwrite, 3)
-
-enum bch_iops_measurement {
-#define x(t, n) BCH_IOPS_##t = n,
- BCH_IOPS_MEASUREMENTS()
-#undef x
- BCH_IOPS_NR
-};
-
-#define BCH_MEMBER_ERROR_TYPES() \
- x(read, 0) \
- x(write, 1) \
- x(checksum, 2)
-
-enum bch_member_error_type {
-#define x(t, n) BCH_MEMBER_ERROR_##t = n,
- BCH_MEMBER_ERROR_TYPES()
-#undef x
- BCH_MEMBER_ERROR_NR
-};
-
-struct bch_member {
- __uuid_t uuid;
- __le64 nbuckets; /* device size */
- __le16 first_bucket; /* index of first bucket used */
- __le16 bucket_size; /* sectors */
- __u8 btree_bitmap_shift;
- __u8 pad[3];
- __le64 last_mount; /* time_t */
-
- __le64 flags;
- __le32 iops[4];
- __le64 errors[BCH_MEMBER_ERROR_NR];
- __le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
- __le64 errors_reset_time;
- __le64 seq;
- __le64 btree_allocated_bitmap;
- /*
- * On recovery from a clean shutdown we don't normally read the journal,
- * but we still want to resume writing from where we left off so we
- * don't overwrite more than is necessary, for list journal debugging:
- */
- __le32 last_journal_bucket;
- __le32 last_journal_bucket_offset;
-};
-
-/*
- * btree_allocated_bitmap can represent sector addresses of a u64: it itself has
- * 64 elements, so 64 - ilog2(64)
- */
-#define BCH_MI_BTREE_BITMAP_SHIFT_MAX 58
-
-/*
- * This limit comes from the bucket_gens array - it's a single allocation, and
- * kernel allocation are limited to INT_MAX
- */
-#define BCH_MEMBER_NBUCKETS_MAX (INT_MAX - 64)
-
-#define BCH_MEMBER_V1_BYTES 56
-
-LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
-/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
-LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
-LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20)
-LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
-LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
-LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
- struct bch_member, flags, 30, 31)
-
-#if 0
-LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
-LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
-#endif
-
-#define BCH_MEMBER_STATES() \
- x(rw, 0) \
- x(ro, 1) \
- x(failed, 2) \
- x(spare, 3)
-
-enum bch_member_state {
-#define x(t, n) BCH_MEMBER_STATE_##t = n,
- BCH_MEMBER_STATES()
-#undef x
- BCH_MEMBER_STATE_NR
-};
-
-struct bch_sb_field_members_v1 {
- struct bch_sb_field field;
- struct bch_member _members[]; //Members are now variable size
-};
-
-struct bch_sb_field_members_v2 {
- struct bch_sb_field field;
- __le16 member_bytes; //size of single member entry
- u8 pad[6];
- struct bch_member _members[];
-};
-
-#endif /* _BCACHEFS_SB_MEMBERS_FORMAT_H */
diff --git a/fs/bcachefs/sb-members_types.h b/fs/bcachefs/sb-members_types.h
deleted file mode 100644
index c0eda888fe39..000000000000
--- a/fs/bcachefs/sb-members_types.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SB_MEMBERS_TYPES_H
-#define _BCACHEFS_SB_MEMBERS_TYPES_H
-
-struct bch_member_cpu {
- u64 nbuckets; /* device size */
- u64 nbuckets_minus_first;
- u16 first_bucket; /* index of first bucket used */
- u16 bucket_size; /* sectors */
- u16 group;
- u8 state;
- u8 discard;
- u8 data_allowed;
- u8 durability;
- u8 freespace_initialized;
- u8 valid;
- u8 btree_bitmap_shift;
- u64 btree_allocated_bitmap;
-};
-
-#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/seqmutex.h b/fs/bcachefs/seqmutex.h
deleted file mode 100644
index c4b3d8d3f414..000000000000
--- a/fs/bcachefs/seqmutex.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SEQMUTEX_H
-#define _BCACHEFS_SEQMUTEX_H
-
-#include <linux/mutex.h>
-
-struct seqmutex {
- struct mutex lock;
- u32 seq;
-};
-
-#define seqmutex_init(_lock) mutex_init(&(_lock)->lock)
-
-static inline bool seqmutex_trylock(struct seqmutex *lock)
-{
- return mutex_trylock(&lock->lock);
-}
-
-static inline void seqmutex_lock(struct seqmutex *lock)
-{
- mutex_lock(&lock->lock);
- lock->seq++;
-}
-
-static inline u32 seqmutex_unlock(struct seqmutex *lock)
-{
- u32 seq = lock->seq;
- mutex_unlock(&lock->lock);
- return seq;
-}
-
-static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq)
-{
- if (lock->seq != seq || !mutex_trylock(&lock->lock))
- return false;
-
- if (lock->seq != seq) {
- mutex_unlock(&lock->lock);
- return false;
- }
-
- return true;
-}
-
-#endif /* _BCACHEFS_SEQMUTEX_H */
diff --git a/fs/bcachefs/siphash.c b/fs/bcachefs/siphash.c
deleted file mode 100644
index a1cc44e66c7e..000000000000
--- a/fs/bcachefs/siphash.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-/* $OpenBSD: siphash.c,v 1.3 2015/02/20 11:51:03 tedu Exp $ */
-
-/*-
- * Copyright (c) 2013 Andre Oppermann <andre@FreeBSD.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * SipHash is a family of PRFs SipHash-c-d where the integer parameters c and d
- * are the number of compression rounds and the number of finalization rounds.
- * A compression round is identical to a finalization round and this round
- * function is called SipRound. Given a 128-bit key k and a (possibly empty)
- * byte string m, SipHash-c-d returns a 64-bit value SipHash-c-d(k; m).
- *
- * Implemented from the paper "SipHash: a fast short-input PRF", 2012.09.18,
- * by Jean-Philippe Aumasson and Daniel J. Bernstein,
- * Permanent Document ID b9a943a805fbfc6fde808af9fc0ecdfa
- * https://131002.net/siphash/siphash.pdf
- * https://131002.net/siphash/
- */
-
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-
-#include "siphash.h"
-
-static void SipHash_Rounds(SIPHASH_CTX *ctx, int rounds)
-{
- while (rounds--) {
- ctx->v[0] += ctx->v[1];
- ctx->v[2] += ctx->v[3];
- ctx->v[1] = rol64(ctx->v[1], 13);
- ctx->v[3] = rol64(ctx->v[3], 16);
-
- ctx->v[1] ^= ctx->v[0];
- ctx->v[3] ^= ctx->v[2];
- ctx->v[0] = rol64(ctx->v[0], 32);
-
- ctx->v[2] += ctx->v[1];
- ctx->v[0] += ctx->v[3];
- ctx->v[1] = rol64(ctx->v[1], 17);
- ctx->v[3] = rol64(ctx->v[3], 21);
-
- ctx->v[1] ^= ctx->v[2];
- ctx->v[3] ^= ctx->v[0];
- ctx->v[2] = rol64(ctx->v[2], 32);
- }
-}
-
-static void SipHash_CRounds(SIPHASH_CTX *ctx, const void *ptr, int rounds)
-{
- u64 m = get_unaligned_le64(ptr);
-
- ctx->v[3] ^= m;
- SipHash_Rounds(ctx, rounds);
- ctx->v[0] ^= m;
-}
-
-void SipHash_Init(SIPHASH_CTX *ctx, const SIPHASH_KEY *key)
-{
- u64 k0, k1;
-
- k0 = le64_to_cpu(key->k0);
- k1 = le64_to_cpu(key->k1);
-
- ctx->v[0] = 0x736f6d6570736575ULL ^ k0;
- ctx->v[1] = 0x646f72616e646f6dULL ^ k1;
- ctx->v[2] = 0x6c7967656e657261ULL ^ k0;
- ctx->v[3] = 0x7465646279746573ULL ^ k1;
-
- memset(ctx->buf, 0, sizeof(ctx->buf));
- ctx->bytes = 0;
-}
-
-void SipHash_Update(SIPHASH_CTX *ctx, int rc, int rf,
- const void *src, size_t len)
-{
- const u8 *ptr = src;
- size_t left, used;
-
- if (len == 0)
- return;
-
- used = ctx->bytes % sizeof(ctx->buf);
- ctx->bytes += len;
-
- if (used > 0) {
- left = sizeof(ctx->buf) - used;
-
- if (len >= left) {
- memcpy(&ctx->buf[used], ptr, left);
- SipHash_CRounds(ctx, ctx->buf, rc);
- len -= left;
- ptr += left;
- } else {
- memcpy(&ctx->buf[used], ptr, len);
- return;
- }
- }
-
- while (len >= sizeof(ctx->buf)) {
- SipHash_CRounds(ctx, ptr, rc);
- len -= sizeof(ctx->buf);
- ptr += sizeof(ctx->buf);
- }
-
- if (len > 0)
- memcpy(&ctx->buf[used], ptr, len);
-}
-
-void SipHash_Final(void *dst, SIPHASH_CTX *ctx, int rc, int rf)
-{
- u64 r;
-
- r = SipHash_End(ctx, rc, rf);
-
- *((__le64 *) dst) = cpu_to_le64(r);
-}
-
-u64 SipHash_End(SIPHASH_CTX *ctx, int rc, int rf)
-{
- u64 r;
- size_t left, used;
-
- used = ctx->bytes % sizeof(ctx->buf);
- left = sizeof(ctx->buf) - used;
- memset(&ctx->buf[used], 0, left - 1);
- ctx->buf[7] = ctx->bytes;
-
- SipHash_CRounds(ctx, ctx->buf, rc);
- ctx->v[2] ^= 0xff;
- SipHash_Rounds(ctx, rf);
-
- r = (ctx->v[0] ^ ctx->v[1]) ^ (ctx->v[2] ^ ctx->v[3]);
- memset(ctx, 0, sizeof(*ctx));
- return r;
-}
-
-u64 SipHash(const SIPHASH_KEY *key, int rc, int rf, const void *src, size_t len)
-{
- SIPHASH_CTX ctx;
-
- SipHash_Init(&ctx, key);
- SipHash_Update(&ctx, rc, rf, src, len);
- return SipHash_End(&ctx, rc, rf);
-}
diff --git a/fs/bcachefs/siphash.h b/fs/bcachefs/siphash.h
deleted file mode 100644
index 3dfaf34a43b2..000000000000
--- a/fs/bcachefs/siphash.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
-/* $OpenBSD: siphash.h,v 1.5 2015/02/20 11:51:03 tedu Exp $ */
-/*-
- * Copyright (c) 2013 Andre Oppermann <andre@FreeBSD.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-/*
- * SipHash is a family of pseudorandom functions (a.k.a. keyed hash functions)
- * optimized for speed on short messages returning a 64bit hash/digest value.
- *
- * The number of rounds is defined during the initialization:
- * SipHash24_Init() for the fast and resonable strong version
- * SipHash48_Init() for the strong version (half as fast)
- *
- * struct SIPHASH_CTX ctx;
- * SipHash24_Init(&ctx);
- * SipHash_SetKey(&ctx, "16bytes long key");
- * SipHash_Update(&ctx, pointer_to_string, length_of_string);
- * SipHash_Final(output, &ctx);
- */
-
-#ifndef _SIPHASH_H_
-#define _SIPHASH_H_
-
-#include <linux/types.h>
-
-#define SIPHASH_BLOCK_LENGTH 8
-#define SIPHASH_KEY_LENGTH 16
-#define SIPHASH_DIGEST_LENGTH 8
-
-typedef struct _SIPHASH_CTX {
- u64 v[4];
- u8 buf[SIPHASH_BLOCK_LENGTH];
- u32 bytes;
-} SIPHASH_CTX;
-
-typedef struct {
- __le64 k0;
- __le64 k1;
-} SIPHASH_KEY;
-
-void SipHash_Init(SIPHASH_CTX *, const SIPHASH_KEY *);
-void SipHash_Update(SIPHASH_CTX *, int, int, const void *, size_t);
-u64 SipHash_End(SIPHASH_CTX *, int, int);
-void SipHash_Final(void *, SIPHASH_CTX *, int, int);
-u64 SipHash(const SIPHASH_KEY *, int, int, const void *, size_t);
-
-#define SipHash24_Init(_c, _k) SipHash_Init((_c), (_k))
-#define SipHash24_Update(_c, _p, _l) SipHash_Update((_c), 2, 4, (_p), (_l))
-#define SipHash24_End(_d) SipHash_End((_d), 2, 4)
-#define SipHash24_Final(_d, _c) SipHash_Final((_d), (_c), 2, 4)
-#define SipHash24(_k, _p, _l) SipHash((_k), 2, 4, (_p), (_l))
-
-#define SipHash48_Init(_c, _k) SipHash_Init((_c), (_k))
-#define SipHash48_Update(_c, _p, _l) SipHash_Update((_c), 4, 8, (_p), (_l))
-#define SipHash48_End(_d) SipHash_End((_d), 4, 8)
-#define SipHash48_Final(_d, _c) SipHash_Final((_d), (_c), 4, 8)
-#define SipHash48(_k, _p, _l) SipHash((_k), 4, 8, (_p), (_l))
-
-#endif /* _SIPHASH_H_ */
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
deleted file mode 100644
index 617d07e53b20..000000000000
--- a/fs/bcachefs/six.c
+++ /dev/null
@@ -1,873 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/export.h>
-#include <linux/log2.h>
-#include <linux/percpu.h>
-#include <linux/preempt.h>
-#include <linux/rcupdate.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/sched/rt.h>
-#include <linux/sched/task.h>
-#include <linux/slab.h>
-
-#include <trace/events/lock.h>
-
-#include "six.h"
-
-#ifdef DEBUG
-#define EBUG_ON(cond) BUG_ON(cond)
-#else
-#define EBUG_ON(cond) do {} while (0)
-#endif
-
-#define six_acquire(l, t, r, ip) lock_acquire(l, 0, t, r, 1, NULL, ip)
-#define six_release(l, ip) lock_release(l, ip)
-
-static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
-
-#define SIX_LOCK_HELD_read_OFFSET 0
-#define SIX_LOCK_HELD_read ~(~0U << 26)
-#define SIX_LOCK_HELD_intent (1U << 26)
-#define SIX_LOCK_HELD_write (1U << 27)
-#define SIX_LOCK_WAITING_read (1U << (28 + SIX_LOCK_read))
-#define SIX_LOCK_WAITING_write (1U << (28 + SIX_LOCK_write))
-#define SIX_LOCK_NOSPIN (1U << 31)
-
-struct six_lock_vals {
- /* Value we add to the lock in order to take the lock: */
- u32 lock_val;
-
- /* If the lock has this value (used as a mask), taking the lock fails: */
- u32 lock_fail;
-
- /* Mask that indicates lock is held for this type: */
- u32 held_mask;
-
- /* Waitlist we wakeup when releasing the lock: */
- enum six_lock_type unlock_wakeup;
-};
-
-static const struct six_lock_vals l[] = {
- [SIX_LOCK_read] = {
- .lock_val = 1U << SIX_LOCK_HELD_read_OFFSET,
- .lock_fail = SIX_LOCK_HELD_write,
- .held_mask = SIX_LOCK_HELD_read,
- .unlock_wakeup = SIX_LOCK_write,
- },
- [SIX_LOCK_intent] = {
- .lock_val = SIX_LOCK_HELD_intent,
- .lock_fail = SIX_LOCK_HELD_intent,
- .held_mask = SIX_LOCK_HELD_intent,
- .unlock_wakeup = SIX_LOCK_intent,
- },
- [SIX_LOCK_write] = {
- .lock_val = SIX_LOCK_HELD_write,
- .lock_fail = SIX_LOCK_HELD_read,
- .held_mask = SIX_LOCK_HELD_write,
- .unlock_wakeup = SIX_LOCK_read,
- },
-};
-
-static inline void six_set_bitmask(struct six_lock *lock, u32 mask)
-{
- if ((atomic_read(&lock->state) & mask) != mask)
- atomic_or(mask, &lock->state);
-}
-
-static inline void six_clear_bitmask(struct six_lock *lock, u32 mask)
-{
- if (atomic_read(&lock->state) & mask)
- atomic_and(~mask, &lock->state);
-}
-
-static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
- u32 old, struct task_struct *owner)
-{
- if (type != SIX_LOCK_intent)
- return;
-
- if (!(old & SIX_LOCK_HELD_intent)) {
- EBUG_ON(lock->owner);
- lock->owner = owner;
- } else {
- EBUG_ON(lock->owner != current);
- }
-}
-
-static inline unsigned pcpu_read_count(struct six_lock *lock)
-{
- unsigned read_count = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- read_count += *per_cpu_ptr(lock->readers, cpu);
- return read_count;
-}
-
-/*
- * __do_six_trylock() - main trylock routine
- *
- * Returns 1 on success, 0 on failure
- *
- * In percpu reader mode, a failed trylock may cause a spurious trylock failure
- * for anoter thread taking the competing lock type, and we may havve to do a
- * wakeup: when a wakeup is required, we return -1 - wakeup_type.
- */
-static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
- struct task_struct *task, bool try)
-{
- int ret;
- u32 old;
-
- EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
- EBUG_ON(type == SIX_LOCK_write &&
- (try != !(atomic_read(&lock->state) & SIX_LOCK_HELD_write)));
-
- /*
- * Percpu reader mode:
- *
- * The basic idea behind this algorithm is that you can implement a lock
- * between two threads without any atomics, just memory barriers:
- *
- * For two threads you'll need two variables, one variable for "thread a
- * has the lock" and another for "thread b has the lock".
- *
- * To take the lock, a thread sets its variable indicating that it holds
- * the lock, then issues a full memory barrier, then reads from the
- * other thread's variable to check if the other thread thinks it has
- * the lock. If we raced, we backoff and retry/sleep.
- *
- * Failure to take the lock may cause a spurious trylock failure in
- * another thread, because we temporarily set the lock to indicate that
- * we held it. This would be a problem for a thread in six_lock(), when
- * they are calling trylock after adding themself to the waitlist and
- * prior to sleeping.
- *
- * Therefore, if we fail to get the lock, and there were waiters of the
- * type we conflict with, we will have to issue a wakeup.
- *
- * Since we may be called under wait_lock (and by the wakeup code
- * itself), we return that the wakeup has to be done instead of doing it
- * here.
- */
- if (type == SIX_LOCK_read && lock->readers) {
- preempt_disable();
- this_cpu_inc(*lock->readers); /* signal that we own lock */
-
- smp_mb();
-
- old = atomic_read(&lock->state);
- ret = !(old & l[type].lock_fail);
-
- this_cpu_sub(*lock->readers, !ret);
- preempt_enable();
-
- if (!ret) {
- smp_mb();
- if (atomic_read(&lock->state) & SIX_LOCK_WAITING_write)
- ret = -1 - SIX_LOCK_write;
- }
- } else if (type == SIX_LOCK_write && lock->readers) {
- if (try)
- atomic_add(SIX_LOCK_HELD_write, &lock->state);
-
- /*
- * Make sure atomic_add happens before pcpu_read_count and
- * six_set_bitmask in slow path happens before pcpu_read_count.
- *
- * Paired with the smp_mb() in read lock fast path (per-cpu mode)
- * and the one before atomic_read in read unlock path.
- */
- smp_mb();
- ret = !pcpu_read_count(lock);
-
- if (try && !ret) {
- old = atomic_sub_return(SIX_LOCK_HELD_write, &lock->state);
- if (old & SIX_LOCK_WAITING_read)
- ret = -1 - SIX_LOCK_read;
- }
- } else {
- old = atomic_read(&lock->state);
- do {
- ret = !(old & l[type].lock_fail);
- if (!ret || (type == SIX_LOCK_write && !try)) {
- smp_mb();
- break;
- }
- } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, old + l[type].lock_val));
-
- EBUG_ON(ret && !(atomic_read(&lock->state) & l[type].held_mask));
- }
-
- if (ret > 0)
- six_set_owner(lock, type, old, task);
-
- EBUG_ON(type == SIX_LOCK_write && try && ret <= 0 &&
- (atomic_read(&lock->state) & SIX_LOCK_HELD_write));
-
- return ret;
-}
-
-static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
-{
- struct six_lock_waiter *w, *next;
- struct task_struct *task;
- bool saw_one;
- int ret;
-again:
- ret = 0;
- saw_one = false;
- raw_spin_lock(&lock->wait_lock);
-
- list_for_each_entry_safe(w, next, &lock->wait_list, list) {
- if (w->lock_want != lock_type)
- continue;
-
- if (saw_one && lock_type != SIX_LOCK_read)
- goto unlock;
- saw_one = true;
-
- ret = __do_six_trylock(lock, lock_type, w->task, false);
- if (ret <= 0)
- goto unlock;
-
- /*
- * Similar to percpu_rwsem_wake_function(), we need to guard
- * against the wakee noticing w->lock_acquired, returning, and
- * then exiting before we do the wakeup:
- */
- task = get_task_struct(w->task);
- __list_del(w->list.prev, w->list.next);
- /*
- * The release barrier here ensures the ordering of the
- * __list_del before setting w->lock_acquired; @w is on the
- * stack of the thread doing the waiting and will be reused
- * after it sees w->lock_acquired with no other locking:
- * pairs with smp_load_acquire() in six_lock_slowpath()
- */
- smp_store_release(&w->lock_acquired, true);
- wake_up_process(task);
- put_task_struct(task);
- }
-
- six_clear_bitmask(lock, SIX_LOCK_WAITING_read << lock_type);
-unlock:
- raw_spin_unlock(&lock->wait_lock);
-
- if (ret < 0) {
- lock_type = -ret - 1;
- goto again;
- }
-}
-
-__always_inline
-static void six_lock_wakeup(struct six_lock *lock, u32 state,
- enum six_lock_type lock_type)
-{
- if (lock_type == SIX_LOCK_write && (state & SIX_LOCK_HELD_read))
- return;
-
- if (!(state & (SIX_LOCK_WAITING_read << lock_type)))
- return;
-
- __six_lock_wakeup(lock, lock_type);
-}
-
-__always_inline
-static bool do_six_trylock(struct six_lock *lock, enum six_lock_type type, bool try)
-{
- int ret;
-
- ret = __do_six_trylock(lock, type, current, try);
- if (ret < 0)
- __six_lock_wakeup(lock, -ret - 1);
-
- return ret > 0;
-}
-
-/**
- * six_trylock_ip - attempt to take a six lock without blocking
- * @lock: lock to take
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
- *
- * Return: true on success, false on failure.
- */
-bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
-{
- if (!do_six_trylock(lock, type, true))
- return false;
-
- if (type != SIX_LOCK_write)
- six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read, ip);
- return true;
-}
-EXPORT_SYMBOL_GPL(six_trylock_ip);
-
-/**
- * six_relock_ip - attempt to re-take a lock that was held previously
- * @lock: lock to take
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- * @seq: lock sequence number obtained from six_lock_seq() while lock was
- * held previously
- * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
- *
- * Return: true on success, false on failure.
- */
-bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
- unsigned seq, unsigned long ip)
-{
- if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip))
- return false;
-
- if (six_lock_seq(lock) != seq) {
- six_unlock_ip(lock, type, ip);
- return false;
- }
-
- return true;
-}
-EXPORT_SYMBOL_GPL(six_relock_ip);
-
-#ifdef CONFIG_BCACHEFS_SIX_OPTIMISTIC_SPIN
-
-static inline bool six_owner_running(struct six_lock *lock)
-{
- /*
- * When there's no owner, we might have preempted between the owner
- * acquiring the lock and setting the owner field. If we're an RT task
- * that will live-lock because we won't let the owner complete.
- */
- rcu_read_lock();
- struct task_struct *owner = READ_ONCE(lock->owner);
- bool ret = owner ? owner_on_cpu(owner) : !rt_or_dl_task(current);
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool six_optimistic_spin(struct six_lock *lock,
- struct six_lock_waiter *wait,
- enum six_lock_type type)
-{
- unsigned loop = 0;
- u64 end_time;
-
- if (type == SIX_LOCK_write)
- return false;
-
- if (lock->wait_list.next != &wait->list)
- return false;
-
- if (atomic_read(&lock->state) & SIX_LOCK_NOSPIN)
- return false;
-
- preempt_disable();
- end_time = sched_clock() + 10 * NSEC_PER_USEC;
-
- while (!need_resched() && six_owner_running(lock)) {
- /*
- * Ensures that writes to the waitlist entry happen after we see
- * wait->lock_acquired: pairs with the smp_store_release in
- * __six_lock_wakeup
- */
- if (smp_load_acquire(&wait->lock_acquired)) {
- preempt_enable();
- return true;
- }
-
- if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) {
- six_set_bitmask(lock, SIX_LOCK_NOSPIN);
- break;
- }
-
- /*
- * The cpu_relax() call is a compiler barrier which forces
- * everything in this loop to be re-loaded. We don't need
- * memory barriers as we'll eventually observe the right
- * values at the cost of a few extra spins.
- */
- cpu_relax();
- }
-
- preempt_enable();
- return false;
-}
-
-#else /* CONFIG_LOCK_SPIN_ON_OWNER */
-
-static inline bool six_optimistic_spin(struct six_lock *lock,
- struct six_lock_waiter *wait,
- enum six_lock_type type)
-{
- return false;
-}
-
-#endif
-
-noinline
-static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
- struct six_lock_waiter *wait,
- six_lock_should_sleep_fn should_sleep_fn, void *p,
- unsigned long ip)
-{
- int ret = 0;
-
- if (type == SIX_LOCK_write) {
- EBUG_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
- atomic_add(SIX_LOCK_HELD_write, &lock->state);
- smp_mb__after_atomic();
- }
-
- trace_contention_begin(lock, 0);
- lock_contended(&lock->dep_map, ip);
-
- wait->task = current;
- wait->lock_want = type;
- wait->lock_acquired = false;
-
- raw_spin_lock(&lock->wait_lock);
- six_set_bitmask(lock, SIX_LOCK_WAITING_read << type);
- /*
- * Retry taking the lock after taking waitlist lock, in case we raced
- * with an unlock:
- */
- ret = __do_six_trylock(lock, type, current, false);
- if (ret <= 0) {
- wait->start_time = local_clock();
-
- if (!list_empty(&lock->wait_list)) {
- struct six_lock_waiter *last =
- list_last_entry(&lock->wait_list,
- struct six_lock_waiter, list);
-
- if (time_before_eq64(wait->start_time, last->start_time))
- wait->start_time = last->start_time + 1;
- }
-
- list_add_tail(&wait->list, &lock->wait_list);
- }
- raw_spin_unlock(&lock->wait_lock);
-
- if (unlikely(ret > 0)) {
- ret = 0;
- goto out;
- }
-
- if (unlikely(ret < 0)) {
- __six_lock_wakeup(lock, -ret - 1);
- ret = 0;
- }
-
- if (six_optimistic_spin(lock, wait, type))
- goto out;
-
- while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- /*
- * Ensures that writes to the waitlist entry happen after we see
- * wait->lock_acquired: pairs with the smp_store_release in
- * __six_lock_wakeup
- */
- if (smp_load_acquire(&wait->lock_acquired))
- break;
-
- ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
- if (unlikely(ret)) {
- bool acquired;
-
- /*
- * If should_sleep_fn() returns an error, we are
- * required to return that error even if we already
- * acquired the lock - should_sleep_fn() might have
- * modified external state (e.g. when the deadlock cycle
- * detector in bcachefs issued a transaction restart)
- */
- raw_spin_lock(&lock->wait_lock);
- acquired = wait->lock_acquired;
- if (!acquired)
- list_del(&wait->list);
- raw_spin_unlock(&lock->wait_lock);
-
- if (unlikely(acquired))
- do_six_unlock_type(lock, type);
- break;
- }
-
- schedule();
- }
-
- __set_current_state(TASK_RUNNING);
-out:
- if (ret && type == SIX_LOCK_write) {
- six_clear_bitmask(lock, SIX_LOCK_HELD_write);
- six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
- }
- trace_contention_end(lock, 0);
-
- return ret;
-}
-
-/**
- * six_lock_ip_waiter - take a lock, with full waitlist interface
- * @lock: lock to take
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- * @wait: pointer to wait object, which will be added to lock's waitlist
- * @should_sleep_fn: callback run after adding to waitlist, immediately prior
- * to scheduling
- * @p: passed through to @should_sleep_fn
- * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
- *
- * This is the most general six_lock() variant, with parameters to support full
- * cycle detection for deadlock avoidance.
- *
- * The code calling this function must implement tracking of held locks, and the
- * @wait object should be embedded into the struct that tracks held locks -
- * which must also be accessible in a thread-safe way.
- *
- * @should_sleep_fn should invoke the cycle detector; it should walk each
- * lock's waiters, and for each waiter recursively walk their held locks.
- *
- * When this function must block, @wait will be added to @lock's waitlist before
- * calling trylock, and before calling @should_sleep_fn, and @wait will not be
- * removed from the lock waitlist until the lock has been successfully acquired,
- * or we abort.
- *
- * @wait.start_time will be monotonically increasing for any given waitlist, and
- * thus may be used as a loop cursor.
- *
- * Return: 0 on success, or the return code from @should_sleep_fn on failure.
- */
-int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type,
- struct six_lock_waiter *wait,
- six_lock_should_sleep_fn should_sleep_fn, void *p,
- unsigned long ip)
-{
- int ret;
-
- wait->start_time = 0;
-
- if (type != SIX_LOCK_write)
- six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, ip);
-
- ret = do_six_trylock(lock, type, true) ? 0
- : six_lock_slowpath(lock, type, wait, should_sleep_fn, p, ip);
-
- if (ret && type != SIX_LOCK_write)
- six_release(&lock->dep_map, ip);
- if (!ret)
- lock_acquired(&lock->dep_map, ip);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(six_lock_ip_waiter);
-
-__always_inline
-static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
-{
- u32 state;
-
- if (type == SIX_LOCK_intent)
- lock->owner = NULL;
-
- if (type == SIX_LOCK_read &&
- lock->readers) {
- smp_mb(); /* unlock barrier */
- this_cpu_dec(*lock->readers);
- smp_mb(); /* between unlocking and checking for waiters */
- state = atomic_read(&lock->state);
- } else {
- u32 v = l[type].lock_val;
-
- if (type != SIX_LOCK_read)
- v += atomic_read(&lock->state) & SIX_LOCK_NOSPIN;
-
- EBUG_ON(!(atomic_read(&lock->state) & l[type].held_mask));
- state = atomic_sub_return_release(v, &lock->state);
- }
-
- six_lock_wakeup(lock, state, l[type].unlock_wakeup);
-}
-
-/**
- * six_unlock_ip - drop a six lock
- * @lock: lock to unlock
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
- *
- * When a lock is held multiple times (because six_lock_incement()) was used),
- * this decrements the 'lock held' counter by one.
- *
- * For example:
- * six_lock_read(&foo->lock); read count 1
- * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
- * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
- * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
- */
-void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
-{
- EBUG_ON(type == SIX_LOCK_write &&
- !(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
- EBUG_ON((type == SIX_LOCK_write ||
- type == SIX_LOCK_intent) &&
- lock->owner != current);
-
- if (type != SIX_LOCK_write)
- six_release(&lock->dep_map, ip);
- else
- lock->seq++;
-
- if (type == SIX_LOCK_intent &&
- lock->intent_lock_recurse) {
- --lock->intent_lock_recurse;
- return;
- }
-
- do_six_unlock_type(lock, type);
-}
-EXPORT_SYMBOL_GPL(six_unlock_ip);
-
-/**
- * six_lock_downgrade - convert an intent lock to a read lock
- * @lock: lock to dowgrade
- *
- * @lock will have read count incremented and intent count decremented
- */
-void six_lock_downgrade(struct six_lock *lock)
-{
- six_lock_increment(lock, SIX_LOCK_read);
- six_unlock_intent(lock);
-}
-EXPORT_SYMBOL_GPL(six_lock_downgrade);
-
-/**
- * six_lock_tryupgrade - attempt to convert read lock to an intent lock
- * @lock: lock to upgrade
- *
- * On success, @lock will have intent count incremented and read count
- * decremented
- *
- * Return: true on success, false on failure
- */
-bool six_lock_tryupgrade(struct six_lock *lock)
-{
- u32 old = atomic_read(&lock->state), new;
-
- do {
- new = old;
-
- if (new & SIX_LOCK_HELD_intent)
- return false;
-
- if (!lock->readers) {
- EBUG_ON(!(new & SIX_LOCK_HELD_read));
- new -= l[SIX_LOCK_read].lock_val;
- }
-
- new |= SIX_LOCK_HELD_intent;
- } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, new));
-
- if (lock->readers)
- this_cpu_dec(*lock->readers);
-
- six_set_owner(lock, SIX_LOCK_intent, old, current);
-
- return true;
-}
-EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
-
-/**
- * six_trylock_convert - attempt to convert a held lock from one type to another
- * @lock: lock to upgrade
- * @from: SIX_LOCK_read or SIX_LOCK_intent
- * @to: SIX_LOCK_read or SIX_LOCK_intent
- *
- * On success, @lock will have intent count incremented and read count
- * decremented
- *
- * Return: true on success, false on failure
- */
-bool six_trylock_convert(struct six_lock *lock,
- enum six_lock_type from,
- enum six_lock_type to)
-{
- EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
-
- if (to == from)
- return true;
-
- if (to == SIX_LOCK_read) {
- six_lock_downgrade(lock);
- return true;
- } else {
- return six_lock_tryupgrade(lock);
- }
-}
-EXPORT_SYMBOL_GPL(six_trylock_convert);
-
-/**
- * six_lock_increment - increase held lock count on a lock that is already held
- * @lock: lock to increment
- * @type: SIX_LOCK_read or SIX_LOCK_intent
- *
- * @lock must already be held, with a lock type that is greater than or equal to
- * @type
- *
- * A corresponding six_unlock_type() call will be required for @lock to be fully
- * unlocked.
- */
-void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
-{
- six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_);
-
- /* XXX: assert already locked, and that we don't overflow: */
-
- switch (type) {
- case SIX_LOCK_read:
- if (lock->readers) {
- this_cpu_inc(*lock->readers);
- } else {
- EBUG_ON(!(atomic_read(&lock->state) &
- (SIX_LOCK_HELD_read|
- SIX_LOCK_HELD_intent)));
- atomic_add(l[type].lock_val, &lock->state);
- }
- break;
- case SIX_LOCK_intent:
- EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
- lock->intent_lock_recurse++;
- break;
- case SIX_LOCK_write:
- BUG();
- break;
- }
-}
-EXPORT_SYMBOL_GPL(six_lock_increment);
-
-/**
- * six_lock_wakeup_all - wake up all waiters on @lock
- * @lock: lock to wake up waiters for
- *
- * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then
- * abort the lock operation.
- *
- * This function is never needed in a bug-free program; it's only useful in
- * debug code, e.g. to determine if a cycle detector is at fault.
- */
-void six_lock_wakeup_all(struct six_lock *lock)
-{
- u32 state = atomic_read(&lock->state);
- struct six_lock_waiter *w;
-
- six_lock_wakeup(lock, state, SIX_LOCK_read);
- six_lock_wakeup(lock, state, SIX_LOCK_intent);
- six_lock_wakeup(lock, state, SIX_LOCK_write);
-
- raw_spin_lock(&lock->wait_lock);
- list_for_each_entry(w, &lock->wait_list, list)
- wake_up_process(w->task);
- raw_spin_unlock(&lock->wait_lock);
-}
-EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
-
-/**
- * six_lock_counts - return held lock counts, for each lock type
- * @lock: lock to return counters for
- *
- * Return: the number of times a lock is held for read, intent and write.
- */
-struct six_lock_count six_lock_counts(struct six_lock *lock)
-{
- struct six_lock_count ret;
-
- ret.n[SIX_LOCK_read] = !lock->readers
- ? atomic_read(&lock->state) & SIX_LOCK_HELD_read
- : pcpu_read_count(lock);
- ret.n[SIX_LOCK_intent] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent) +
- lock->intent_lock_recurse;
- ret.n[SIX_LOCK_write] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(six_lock_counts);
-
-/**
- * six_lock_readers_add - directly manipulate reader count of a lock
- * @lock: lock to add/subtract readers for
- * @nr: reader count to add/subtract
- *
- * When an upper layer is implementing lock reentrency, we may have both read
- * and intent locks on the same lock.
- *
- * When we need to take a write lock, the read locks will cause self-deadlock,
- * because six locks themselves do not track which read locks are held by the
- * current thread and which are held by a different thread - it does no
- * per-thread tracking of held locks.
- *
- * The upper layer that is tracking held locks may however, if trylock() has
- * failed, count up its own read locks, subtract them, take the write lock, and
- * then re-add them.
- *
- * As in any other situation when taking a write lock, @lock must be held for
- * intent one (or more) times, so @lock will never be left unlocked.
- */
-void six_lock_readers_add(struct six_lock *lock, int nr)
-{
- if (lock->readers) {
- this_cpu_add(*lock->readers, nr);
- } else {
- EBUG_ON((int) (atomic_read(&lock->state) & SIX_LOCK_HELD_read) + nr < 0);
- /* reader count starts at bit 0 */
- atomic_add(nr, &lock->state);
- }
-}
-EXPORT_SYMBOL_GPL(six_lock_readers_add);
-
-/**
- * six_lock_exit - release resources held by a lock prior to freeing
- * @lock: lock to exit
- *
- * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is
- * required to free the percpu read counts.
- */
-void six_lock_exit(struct six_lock *lock)
-{
- WARN_ON(lock->readers && pcpu_read_count(lock));
- WARN_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_read);
-
- free_percpu(lock->readers);
- lock->readers = NULL;
-}
-EXPORT_SYMBOL_GPL(six_lock_exit);
-
-void __six_lock_init(struct six_lock *lock, const char *name,
- struct lock_class_key *key, enum six_lock_init_flags flags)
-{
- atomic_set(&lock->state, 0);
- raw_spin_lock_init(&lock->wait_lock);
- INIT_LIST_HEAD(&lock->wait_list);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- debug_check_no_locks_freed((void *) lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
-
- /*
- * Don't assume that we have real percpu variables available in
- * userspace:
- */
-#ifdef __KERNEL__
- if (flags & SIX_LOCK_INIT_PCPU) {
- /*
- * We don't return an error here on memory allocation failure
- * since percpu is an optimization, and locks will work with the
- * same semantics in non-percpu mode: callers can check for
- * failure if they wish by checking lock->readers, but generally
- * will not want to treat it as an error.
- */
- lock->readers = alloc_percpu(unsigned);
- }
-#endif
-}
-EXPORT_SYMBOL_GPL(__six_lock_init);
diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h
deleted file mode 100644
index 68d46fd7f391..000000000000
--- a/fs/bcachefs/six.h
+++ /dev/null
@@ -1,386 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _LINUX_SIX_H
-#define _LINUX_SIX_H
-
-/**
- * DOC: SIX locks overview
- *
- * Shared/intent/exclusive locks: sleepable read/write locks, like rw semaphores
- * but with an additional state: read/shared, intent, exclusive/write
- *
- * The purpose of the intent state is to allow for greater concurrency on tree
- * structures without deadlocking. In general, a read can't be upgraded to a
- * write lock without deadlocking, so an operation that updates multiple nodes
- * will have to take write locks for the full duration of the operation.
- *
- * But by adding an intent state, which is exclusive with other intent locks but
- * not with readers, we can take intent locks at the start of the operation,
- * and then take write locks only for the actual update to each individual
- * nodes, without deadlocking.
- *
- * Example usage:
- * six_lock_read(&foo->lock);
- * six_unlock_read(&foo->lock);
- *
- * An intent lock must be held before taking a write lock:
- * six_lock_intent(&foo->lock);
- * six_lock_write(&foo->lock);
- * six_unlock_write(&foo->lock);
- * six_unlock_intent(&foo->lock);
- *
- * Other operations:
- * six_trylock_read()
- * six_trylock_intent()
- * six_trylock_write()
- *
- * six_lock_downgrade() convert from intent to read
- * six_lock_tryupgrade() attempt to convert from read to intent, may fail
- *
- * There are also interfaces that take the lock type as an enum:
- *
- * six_lock_type(&foo->lock, SIX_LOCK_read);
- * six_trylock_convert(&foo->lock, SIX_LOCK_read, SIX_LOCK_intent)
- * six_lock_type(&foo->lock, SIX_LOCK_write);
- * six_unlock_type(&foo->lock, SIX_LOCK_write);
- * six_unlock_type(&foo->lock, SIX_LOCK_intent);
- *
- * Lock sequence numbers - unlock(), relock():
- *
- * Locks embed sequences numbers, which are incremented on write lock/unlock.
- * This allows locks to be dropped and the retaken iff the state they protect
- * hasn't changed; this makes it much easier to avoid holding locks while e.g.
- * doing IO or allocating memory.
- *
- * Example usage:
- * six_lock_read(&foo->lock);
- * u32 seq = six_lock_seq(&foo->lock);
- * six_unlock_read(&foo->lock);
- *
- * some_operation_that_may_block();
- *
- * if (six_relock_read(&foo->lock, seq)) { ... }
- *
- * If the relock operation succeeds, it is as if the lock was never unlocked.
- *
- * Reentrancy:
- *
- * Six locks are not by themselves reentrant, but have counters for both the
- * read and intent states that can be used to provide reentrancy by an upper
- * layer that tracks held locks. If a lock is known to already be held in the
- * read or intent state, six_lock_increment() can be used to bump the "lock
- * held in this state" counter, increasing the number of unlock calls that
- * will be required to fully unlock it.
- *
- * Example usage:
- * six_lock_read(&foo->lock);
- * six_lock_increment(&foo->lock, SIX_LOCK_read);
- * six_unlock_read(&foo->lock);
- * six_unlock_read(&foo->lock);
- * foo->lock is now fully unlocked.
- *
- * Since the intent state supercedes read, it's legal to increment the read
- * counter when holding an intent lock, but not the reverse.
- *
- * A lock may only be held once for write: six_lock_increment(.., SIX_LOCK_write)
- * is not legal.
- *
- * should_sleep_fn:
- *
- * There is a six_lock() variant that takes a function pointer that is called
- * immediately prior to schedule() when blocking, and may return an error to
- * abort.
- *
- * One possible use for this feature is when objects being locked are part of
- * a cache and may reused, and lock ordering is based on a property of the
- * object that will change when the object is reused - i.e. logical key order.
- *
- * If looking up an object in the cache may race with object reuse, and lock
- * ordering is required to prevent deadlock, object reuse may change the
- * correct lock order for that object and cause a deadlock. should_sleep_fn
- * can be used to check if the object is still the object we want and avoid
- * this deadlock.
- *
- * Wait list entry interface:
- *
- * There is a six_lock() variant, six_lock_waiter(), that takes a pointer to a
- * wait list entry. By embedding six_lock_waiter into another object, and by
- * traversing lock waitlists, it is then possible for an upper layer to
- * implement full cycle detection for deadlock avoidance.
- *
- * should_sleep_fn should be used for invoking the cycle detector, walking the
- * graph of held locks to check for a deadlock. The upper layer must track
- * held locks for each thread, and each thread's held locks must be reachable
- * from its six_lock_waiter object.
- *
- * six_lock_waiter() will add the wait object to the waitlist re-trying taking
- * the lock, and before calling should_sleep_fn, and the wait object will not
- * be removed from the waitlist until either the lock has been successfully
- * acquired, or we aborted because should_sleep_fn returned an error.
- *
- * Also, six_lock_waiter contains a timestamp, and waiters on a waitlist will
- * have timestamps in strictly ascending order - this is so the timestamp can
- * be used as a cursor for lock graph traverse.
- */
-
-#include <linux/lockdep.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-
-enum six_lock_type {
- SIX_LOCK_read,
- SIX_LOCK_intent,
- SIX_LOCK_write,
-};
-
-struct six_lock {
- atomic_t state;
- u32 seq;
- unsigned intent_lock_recurse;
- struct task_struct *owner;
- unsigned __percpu *readers;
- raw_spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-struct six_lock_waiter {
- struct list_head list;
- struct task_struct *task;
- enum six_lock_type lock_want;
- bool lock_acquired;
- u64 start_time;
-};
-
-typedef int (*six_lock_should_sleep_fn)(struct six_lock *lock, void *);
-
-void six_lock_exit(struct six_lock *lock);
-
-enum six_lock_init_flags {
- SIX_LOCK_INIT_PCPU = 1U << 0,
-};
-
-void __six_lock_init(struct six_lock *lock, const char *name,
- struct lock_class_key *key, enum six_lock_init_flags flags);
-
-/**
- * six_lock_init - initialize a six lock
- * @lock: lock to initialize
- * @flags: optional flags, i.e. SIX_LOCK_INIT_PCPU
- */
-#define six_lock_init(lock, flags) \
-do { \
- static struct lock_class_key __key; \
- \
- __six_lock_init((lock), #lock, &__key, flags); \
-} while (0)
-
-/**
- * six_lock_seq - obtain current lock sequence number
- * @lock: six_lock to obtain sequence number for
- *
- * @lock should be held for read or intent, and not write
- *
- * By saving the lock sequence number, we can unlock @lock and then (typically
- * after some blocking operation) attempt to relock it: the relock will succeed
- * if the sequence number hasn't changed, meaning no write locks have been taken
- * and state corresponding to what @lock protects is still valid.
- */
-static inline u32 six_lock_seq(const struct six_lock *lock)
-{
- return lock->seq;
-}
-
-bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip);
-
-/**
- * six_trylock_type - attempt to take a six lock without blocking
- * @lock: lock to take
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- *
- * Return: true on success, false on failure.
- */
-static inline bool six_trylock_type(struct six_lock *lock, enum six_lock_type type)
-{
- return six_trylock_ip(lock, type, _THIS_IP_);
-}
-
-int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type,
- struct six_lock_waiter *wait,
- six_lock_should_sleep_fn should_sleep_fn, void *p,
- unsigned long ip);
-
-/**
- * six_lock_waiter - take a lock, with full waitlist interface
- * @lock: lock to take
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- * @wait: pointer to wait object, which will be added to lock's waitlist
- * @should_sleep_fn: callback run after adding to waitlist, immediately prior
- * to scheduling
- * @p: passed through to @should_sleep_fn
- *
- * This is a convenience wrapper around six_lock_ip_waiter(), see that function
- * for full documentation.
- *
- * Return: 0 on success, or the return code from @should_sleep_fn on failure.
- */
-static inline int six_lock_waiter(struct six_lock *lock, enum six_lock_type type,
- struct six_lock_waiter *wait,
- six_lock_should_sleep_fn should_sleep_fn, void *p)
-{
- return six_lock_ip_waiter(lock, type, wait, should_sleep_fn, p, _THIS_IP_);
-}
-
-/**
- * six_lock_ip - take a six lock lock
- * @lock: lock to take
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- * @should_sleep_fn: callback run after adding to waitlist, immediately prior
- * to scheduling
- * @p: passed through to @should_sleep_fn
- * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
- *
- * Return: 0 on success, or the return code from @should_sleep_fn on failure.
- */
-static inline int six_lock_ip(struct six_lock *lock, enum six_lock_type type,
- six_lock_should_sleep_fn should_sleep_fn, void *p,
- unsigned long ip)
-{
- struct six_lock_waiter wait;
-
- return six_lock_ip_waiter(lock, type, &wait, should_sleep_fn, p, ip);
-}
-
-/**
- * six_lock_type - take a six lock lock
- * @lock: lock to take
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- * @should_sleep_fn: callback run after adding to waitlist, immediately prior
- * to scheduling
- * @p: passed through to @should_sleep_fn
- *
- * Return: 0 on success, or the return code from @should_sleep_fn on failure.
- */
-static inline int six_lock_type(struct six_lock *lock, enum six_lock_type type,
- six_lock_should_sleep_fn should_sleep_fn, void *p)
-{
- struct six_lock_waiter wait;
-
- return six_lock_ip_waiter(lock, type, &wait, should_sleep_fn, p, _THIS_IP_);
-}
-
-bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
- unsigned seq, unsigned long ip);
-
-/**
- * six_relock_type - attempt to re-take a lock that was held previously
- * @lock: lock to take
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- * @seq: lock sequence number obtained from six_lock_seq() while lock was
- * held previously
- *
- * Return: true on success, false on failure.
- */
-static inline bool six_relock_type(struct six_lock *lock, enum six_lock_type type,
- unsigned seq)
-{
- return six_relock_ip(lock, type, seq, _THIS_IP_);
-}
-
-void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip);
-
-/**
- * six_unlock_type - drop a six lock
- * @lock: lock to unlock
- * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
- *
- * When a lock is held multiple times (because six_lock_incement()) was used),
- * this decrements the 'lock held' counter by one.
- *
- * For example:
- * six_lock_read(&foo->lock); read count 1
- * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
- * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
- * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
- */
-static inline void six_unlock_type(struct six_lock *lock, enum six_lock_type type)
-{
- six_unlock_ip(lock, type, _THIS_IP_);
-}
-
-#define __SIX_LOCK(type) \
-static inline bool six_trylock_ip_##type(struct six_lock *lock, unsigned long ip)\
-{ \
- return six_trylock_ip(lock, SIX_LOCK_##type, ip); \
-} \
- \
-static inline bool six_trylock_##type(struct six_lock *lock) \
-{ \
- return six_trylock_ip(lock, SIX_LOCK_##type, _THIS_IP_); \
-} \
- \
-static inline int six_lock_ip_waiter_##type(struct six_lock *lock, \
- struct six_lock_waiter *wait, \
- six_lock_should_sleep_fn should_sleep_fn, void *p,\
- unsigned long ip) \
-{ \
- return six_lock_ip_waiter(lock, SIX_LOCK_##type, wait, should_sleep_fn, p, ip);\
-} \
- \
-static inline int six_lock_ip_##type(struct six_lock *lock, \
- six_lock_should_sleep_fn should_sleep_fn, void *p, \
- unsigned long ip) \
-{ \
- return six_lock_ip(lock, SIX_LOCK_##type, should_sleep_fn, p, ip);\
-} \
- \
-static inline bool six_relock_ip_##type(struct six_lock *lock, u32 seq, unsigned long ip)\
-{ \
- return six_relock_ip(lock, SIX_LOCK_##type, seq, ip); \
-} \
- \
-static inline bool six_relock_##type(struct six_lock *lock, u32 seq) \
-{ \
- return six_relock_ip(lock, SIX_LOCK_##type, seq, _THIS_IP_); \
-} \
- \
-static inline int six_lock_##type(struct six_lock *lock, \
- six_lock_should_sleep_fn fn, void *p)\
-{ \
- return six_lock_ip_##type(lock, fn, p, _THIS_IP_); \
-} \
- \
-static inline void six_unlock_ip_##type(struct six_lock *lock, unsigned long ip) \
-{ \
- six_unlock_ip(lock, SIX_LOCK_##type, ip); \
-} \
- \
-static inline void six_unlock_##type(struct six_lock *lock) \
-{ \
- six_unlock_ip(lock, SIX_LOCK_##type, _THIS_IP_); \
-}
-
-__SIX_LOCK(read)
-__SIX_LOCK(intent)
-__SIX_LOCK(write)
-#undef __SIX_LOCK
-
-void six_lock_downgrade(struct six_lock *);
-bool six_lock_tryupgrade(struct six_lock *);
-bool six_trylock_convert(struct six_lock *, enum six_lock_type,
- enum six_lock_type);
-
-void six_lock_increment(struct six_lock *, enum six_lock_type);
-
-void six_lock_wakeup_all(struct six_lock *);
-
-struct six_lock_count {
- unsigned n[3];
-};
-
-struct six_lock_count six_lock_counts(struct six_lock *);
-void six_lock_readers_add(struct six_lock *, int);
-
-#endif /* _LINUX_SIX_H */
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
deleted file mode 100644
index ae57638506c3..000000000000
--- a/fs/bcachefs/snapshot.c
+++ /dev/null
@@ -1,1815 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "bkey_buf.h"
-#include "btree_key_cache.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "errcode.h"
-#include "error.h"
-#include "fs.h"
-#include "recovery_passes.h"
-#include "snapshot.h"
-
-#include <linux/random.h>
-
-/*
- * Snapshot trees:
- *
- * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
- * exist to provide a stable identifier for the whole lifetime of a snapshot
- * tree.
- */
-
-void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
-
- prt_printf(out, "subvol %u root snapshot %u",
- le32_to_cpu(t.v->master_subvol),
- le32_to_cpu(t.v->root_snapshot));
-}
-
-int bch2_snapshot_tree_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- int ret = 0;
-
- bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
- bkey_lt(k.k->p, POS(0, 1)),
- c, snapshot_tree_pos_bad,
- "bad pos");
-fsck_err:
- return ret;
-}
-
-int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
- struct bch_snapshot_tree *s)
-{
- int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
- BTREE_ITER_with_updates, snapshot_tree, s);
-
- if (bch2_err_matches(ret, ENOENT))
- ret = -BCH_ERR_ENOENT_snapshot_tree;
- return ret;
-}
-
-struct bkey_i_snapshot_tree *
-__bch2_snapshot_tree_create(struct btree_trans *trans)
-{
- struct btree_iter iter;
- int ret = bch2_bkey_get_empty_slot(trans, &iter,
- BTREE_ID_snapshot_trees, POS(0, U32_MAX));
- struct bkey_i_snapshot_tree *s_t;
-
- if (ret == -BCH_ERR_ENOSPC_btree_slot)
- ret = -BCH_ERR_ENOSPC_snapshot_tree;
- if (ret)
- return ERR_PTR(ret);
-
- s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
- ret = PTR_ERR_OR_ZERO(s_t);
- bch2_trans_iter_exit(trans, &iter);
- return ret ? ERR_PTR(ret) : s_t;
-}
-
-static int bch2_snapshot_tree_create(struct btree_trans *trans,
- u32 root_id, u32 subvol_id, u32 *tree_id)
-{
- struct bkey_i_snapshot_tree *n_tree =
- __bch2_snapshot_tree_create(trans);
-
- if (IS_ERR(n_tree))
- return PTR_ERR(n_tree);
-
- n_tree->v.master_subvol = cpu_to_le32(subvol_id);
- n_tree->v.root_snapshot = cpu_to_le32(root_id);
- *tree_id = n_tree->k.p.offset;
- return 0;
-}
-
-/* Snapshot nodes: */
-
-static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id, u32 ancestor)
-{
- while (id && id < ancestor) {
- const struct snapshot_t *s = __snapshot_t(t, id);
- id = s ? s->parent : 0;
- }
- return id == ancestor;
-}
-
-static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
-{
- rcu_read_lock();
- bool ret = __bch2_snapshot_is_ancestor_early(rcu_dereference(c->snapshots), id, ancestor);
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
-{
- const struct snapshot_t *s = __snapshot_t(t, id);
- if (!s)
- return 0;
-
- if (s->skip[2] <= ancestor)
- return s->skip[2];
- if (s->skip[1] <= ancestor)
- return s->skip[1];
- if (s->skip[0] <= ancestor)
- return s->skip[0];
- return s->parent;
-}
-
-static bool test_ancestor_bitmap(struct snapshot_table *t, u32 id, u32 ancestor)
-{
- const struct snapshot_t *s = __snapshot_t(t, id);
- if (!s)
- return false;
-
- return test_bit(ancestor - id - 1, s->is_ancestor);
-}
-
-bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
-{
- bool ret;
-
- rcu_read_lock();
- struct snapshot_table *t = rcu_dereference(c->snapshots);
-
- if (unlikely(c->recovery_pass_done < BCH_RECOVERY_PASS_check_snapshots)) {
- ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor);
- goto out;
- }
-
- while (id && id < ancestor - IS_ANCESTOR_BITMAP)
- id = get_ancestor_below(t, id, ancestor);
-
- ret = id && id < ancestor
- ? test_ancestor_bitmap(t, id, ancestor)
- : id == ancestor;
-
- EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor));
-out:
- rcu_read_unlock();
-
- return ret;
-}
-
-static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
-{
- size_t idx = U32_MAX - id;
- struct snapshot_table *new, *old;
-
- size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
- size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
-
- if (unlikely(new_bytes > INT_MAX))
- return NULL;
-
- new = kvzalloc(new_bytes, GFP_KERNEL);
- if (!new)
- return NULL;
-
- new->nr = new_size;
-
- old = rcu_dereference_protected(c->snapshots, true);
- if (old)
- memcpy(new->s, old->s, sizeof(old->s[0]) * old->nr);
-
- rcu_assign_pointer(c->snapshots, new);
- kvfree_rcu(old, rcu);
-
- return &rcu_dereference_protected(c->snapshots,
- lockdep_is_held(&c->snapshot_table_lock))->s[idx];
-}
-
-static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
-{
- size_t idx = U32_MAX - id;
- struct snapshot_table *table =
- rcu_dereference_protected(c->snapshots,
- lockdep_is_held(&c->snapshot_table_lock));
-
- lockdep_assert_held(&c->snapshot_table_lock);
-
- if (likely(table && idx < table->nr))
- return &table->s[idx];
-
- return __snapshot_t_mut(c, id);
-}
-
-void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
-
- prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
- BCH_SNAPSHOT_SUBVOL(s.v),
- BCH_SNAPSHOT_DELETED(s.v),
- le32_to_cpu(s.v->parent),
- le32_to_cpu(s.v->children[0]),
- le32_to_cpu(s.v->children[1]),
- le32_to_cpu(s.v->subvol),
- le32_to_cpu(s.v->tree));
-
- if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
- prt_printf(out, " depth %u skiplist %u %u %u",
- le32_to_cpu(s.v->depth),
- le32_to_cpu(s.v->skip[0]),
- le32_to_cpu(s.v->skip[1]),
- le32_to_cpu(s.v->skip[2]));
-}
-
-int bch2_snapshot_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_snapshot s;
- u32 i, id;
- int ret = 0;
-
- bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
- bkey_lt(k.k->p, POS(0, 1)),
- c, snapshot_pos_bad,
- "bad pos");
-
- s = bkey_s_c_to_snapshot(k);
-
- id = le32_to_cpu(s.v->parent);
- bkey_fsck_err_on(id && id <= k.k->p.offset,
- c, snapshot_parent_bad,
- "bad parent node (%u <= %llu)",
- id, k.k->p.offset);
-
- bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]),
- c, snapshot_children_not_normalized,
- "children not normalized");
-
- bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1],
- c, snapshot_child_duplicate,
- "duplicate child nodes");
-
- for (i = 0; i < 2; i++) {
- id = le32_to_cpu(s.v->children[i]);
-
- bkey_fsck_err_on(id >= k.k->p.offset,
- c, snapshot_child_bad,
- "bad child node (%u >= %llu)",
- id, k.k->p.offset);
- }
-
- if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
- bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
- le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]),
- c, snapshot_skiplist_not_normalized,
- "skiplist not normalized");
-
- for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
- id = le32_to_cpu(s.v->skip[i]);
-
- bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent),
- c, snapshot_skiplist_bad,
- "bad skiplist node %u", id);
- }
- }
-fsck_err:
- return ret;
-}
-
-static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
-{
- struct snapshot_t *t = snapshot_t_mut(c, id);
- u32 parent = id;
-
- while ((parent = bch2_snapshot_parent_early(c, parent)) &&
- parent - id - 1 < IS_ANCESTOR_BITMAP)
- __set_bit(parent - id - 1, t->is_ancestor);
-}
-
-static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
-{
- mutex_lock(&c->snapshot_table_lock);
- __set_is_ancestor_bitmap(c, id);
- mutex_unlock(&c->snapshot_table_lock);
-}
-
-static int __bch2_mark_snapshot(struct btree_trans *trans,
- enum btree_id btree, unsigned level,
- struct bkey_s_c old, struct bkey_s_c new,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bch_fs *c = trans->c;
- struct snapshot_t *t;
- u32 id = new.k->p.offset;
- int ret = 0;
-
- mutex_lock(&c->snapshot_table_lock);
-
- t = snapshot_t_mut(c, id);
- if (!t) {
- ret = -BCH_ERR_ENOMEM_mark_snapshot;
- goto err;
- }
-
- if (new.k->type == KEY_TYPE_snapshot) {
- struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
-
- t->parent = le32_to_cpu(s.v->parent);
- t->children[0] = le32_to_cpu(s.v->children[0]);
- t->children[1] = le32_to_cpu(s.v->children[1]);
- t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
- t->tree = le32_to_cpu(s.v->tree);
-
- if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
- t->depth = le32_to_cpu(s.v->depth);
- t->skip[0] = le32_to_cpu(s.v->skip[0]);
- t->skip[1] = le32_to_cpu(s.v->skip[1]);
- t->skip[2] = le32_to_cpu(s.v->skip[2]);
- } else {
- t->depth = 0;
- t->skip[0] = 0;
- t->skip[1] = 0;
- t->skip[2] = 0;
- }
-
- __set_is_ancestor_bitmap(c, id);
-
- if (BCH_SNAPSHOT_DELETED(s.v)) {
- set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
- if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
- bch2_delete_dead_snapshots_async(c);
- }
- } else {
- memset(t, 0, sizeof(*t));
- }
-err:
- mutex_unlock(&c->snapshot_table_lock);
- return ret;
-}
-
-int bch2_mark_snapshot(struct btree_trans *trans,
- enum btree_id btree, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- return __bch2_mark_snapshot(trans, btree, level, old, new.s_c, flags);
-}
-
-int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
- struct bch_snapshot *s)
-{
- return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
- BTREE_ITER_with_updates, snapshot, s);
-}
-
-static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
-{
- struct bch_snapshot v;
- int ret;
-
- if (!id)
- return 0;
-
- ret = bch2_snapshot_lookup(trans, id, &v);
- if (bch2_err_matches(ret, ENOENT))
- bch_err(trans->c, "snapshot node %u not found", id);
- if (ret)
- return ret;
-
- return !BCH_SNAPSHOT_DELETED(&v);
-}
-
-/*
- * If @k is a snapshot with just one live child, it's part of a linear chain,
- * which we consider to be an equivalence class: and then after snapshot
- * deletion cleanup, there should only be a single key at a given position in
- * this equivalence class.
- *
- * This sets the equivalence class of @k to be the child's equivalence class, if
- * it's part of such a linear chain: this correctly sets equivalence classes on
- * startup if we run leaf to root (i.e. in natural key order).
- */
-static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- unsigned i, nr_live = 0, live_idx = 0;
- struct bkey_s_c_snapshot snap;
- u32 id = k.k->p.offset, child[2];
-
- if (k.k->type != KEY_TYPE_snapshot)
- return 0;
-
- snap = bkey_s_c_to_snapshot(k);
-
- child[0] = le32_to_cpu(snap.v->children[0]);
- child[1] = le32_to_cpu(snap.v->children[1]);
-
- for (i = 0; i < 2; i++) {
- int ret = bch2_snapshot_live(trans, child[i]);
-
- if (ret < 0)
- return ret;
-
- if (ret)
- live_idx = i;
- nr_live += ret;
- }
-
- mutex_lock(&c->snapshot_table_lock);
-
- snapshot_t_mut(c, id)->equiv = nr_live == 1
- ? snapshot_t_mut(c, child[live_idx])->equiv
- : id;
-
- mutex_unlock(&c->snapshot_table_lock);
-
- return 0;
-}
-
-/* fsck: */
-
-static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
-{
- return snapshot_t(c, id)->children[child];
-}
-
-static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
-{
- return bch2_snapshot_child(c, id, 0);
-}
-
-static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
-{
- return bch2_snapshot_child(c, id, 1);
-}
-
-static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
-{
- u32 n, parent;
-
- n = bch2_snapshot_left_child(c, id);
- if (n)
- return n;
-
- while ((parent = bch2_snapshot_parent(c, id))) {
- n = bch2_snapshot_right_child(c, parent);
- if (n && n != id)
- return n;
- id = parent;
- }
-
- return 0;
-}
-
-static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
-{
- u32 id = snapshot_root;
- u32 subvol = 0, s;
-
- rcu_read_lock();
- while (id) {
- s = snapshot_t(c, id)->subvol;
-
- if (s && (!subvol || s < subvol))
- subvol = s;
-
- id = bch2_snapshot_tree_next(c, id);
- }
- rcu_read_unlock();
-
- return subvol;
-}
-
-static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
- u32 snapshot_root, u32 *subvol_id)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- bool found = false;
- int ret;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
- 0, k, ret) {
- if (k.k->type != KEY_TYPE_subvolume)
- continue;
-
- struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
- if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
- continue;
- if (!BCH_SUBVOLUME_SNAP(s.v)) {
- *subvol_id = s.k->p.offset;
- found = true;
- break;
- }
- }
-
- bch2_trans_iter_exit(trans, &iter);
-
- if (!ret && !found) {
- struct bkey_i_subvolume *u;
-
- *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
-
- u = bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_subvolumes, POS(0, *subvol_id),
- 0, subvolume);
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- return ret;
-
- SET_BCH_SUBVOLUME_SNAP(&u->v, false);
- }
-
- return ret;
-}
-
-static int check_snapshot_tree(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_s_c_snapshot_tree st;
- struct bch_snapshot s;
- struct bch_subvolume subvol;
- struct printbuf buf = PRINTBUF;
- u32 root_id;
- int ret;
-
- if (k.k->type != KEY_TYPE_snapshot_tree)
- return 0;
-
- st = bkey_s_c_to_snapshot_tree(k);
- root_id = le32_to_cpu(st.v->root_snapshot);
-
- ret = bch2_snapshot_lookup(trans, root_id, &s);
- if (ret && !bch2_err_matches(ret, ENOENT))
- goto err;
-
- if (fsck_err_on(ret ||
- root_id != bch2_snapshot_root(c, root_id) ||
- st.k->p.offset != le32_to_cpu(s.tree),
- trans, snapshot_tree_to_missing_snapshot,
- "snapshot tree points to missing/incorrect snapshot:\n %s",
- (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
- ret = bch2_btree_delete_at(trans, iter, 0);
- goto err;
- }
-
- ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
- false, 0, &subvol);
- if (ret && !bch2_err_matches(ret, ENOENT))
- goto err;
-
- if (fsck_err_on(ret,
- trans, snapshot_tree_to_missing_subvol,
- "snapshot tree points to missing subvolume:\n %s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
- fsck_err_on(!bch2_snapshot_is_ancestor(c,
- le32_to_cpu(subvol.snapshot),
- root_id),
- trans, snapshot_tree_to_wrong_subvol,
- "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
- fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
- trans, snapshot_tree_to_snapshot_subvol,
- "snapshot tree points to snapshot subvolume:\n %s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
- struct bkey_i_snapshot_tree *u;
- u32 subvol_id;
-
- ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
- bch_err_fn(c, ret);
-
- if (bch2_err_matches(ret, ENOENT)) { /* nothing to be done here */
- ret = 0;
- goto err;
- }
-
- if (ret)
- goto err;
-
- u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- goto err;
-
- u->v.master_subvol = cpu_to_le32(subvol_id);
- st = snapshot_tree_i_to_s_c(u);
- }
-err:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-/*
- * For each snapshot_tree, make sure it points to the root of a snapshot tree
- * and that snapshot entry points back to it, or delete it.
- *
- * And, make sure it points to a subvolume within that snapshot tree, or correct
- * it to point to the oldest subvolume within that snapshot tree.
- */
-int bch2_check_snapshot_trees(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_snapshot_trees, POS_MIN,
- BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_snapshot_tree(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
-
-/*
- * Look up snapshot tree for @tree_id and find root,
- * make sure @snap_id is a descendent:
- */
-static int snapshot_tree_ptr_good(struct btree_trans *trans,
- u32 snap_id, u32 tree_id)
-{
- struct bch_snapshot_tree s_t;
- int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
-
- if (bch2_err_matches(ret, ENOENT))
- return 0;
- if (ret)
- return ret;
-
- return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
-}
-
-u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
-{
- const struct snapshot_t *s;
-
- if (!id)
- return 0;
-
- rcu_read_lock();
- s = snapshot_t(c, id);
- if (s->parent)
- id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
- rcu_read_unlock();
-
- return id;
-}
-
-static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
-{
- unsigned i;
-
- for (i = 0; i < 3; i++)
- if (!s.parent) {
- if (s.skip[i])
- return false;
- } else {
- if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
- return false;
- }
-
- return true;
-}
-
-/*
- * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
- * its snapshot_tree pointer is correct (allocate new one if necessary), then
- * update this node's pointer to root node's pointer:
- */
-static int snapshot_tree_ptr_repair(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- struct bch_snapshot *s)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter root_iter;
- struct bch_snapshot_tree s_t;
- struct bkey_s_c_snapshot root;
- struct bkey_i_snapshot *u;
- u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
- int ret;
-
- root = bch2_bkey_get_iter_typed(trans, &root_iter,
- BTREE_ID_snapshots, POS(0, root_id),
- BTREE_ITER_with_updates, snapshot);
- ret = bkey_err(root);
- if (ret)
- goto err;
-
- tree_id = le32_to_cpu(root.v->tree);
-
- ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
- u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
- ret = PTR_ERR_OR_ZERO(u) ?:
- bch2_snapshot_tree_create(trans, root_id,
- bch2_snapshot_tree_oldest_subvol(c, root_id),
- &tree_id);
- if (ret)
- goto err;
-
- u->v.tree = cpu_to_le32(tree_id);
- if (k.k->p.offset == root_id)
- *s = u->v;
- }
-
- if (k.k->p.offset != root_id) {
- u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- goto err;
-
- u->v.tree = cpu_to_le32(tree_id);
- *s = u->v;
- }
-err:
- bch2_trans_iter_exit(trans, &root_iter);
- return ret;
-}
-
-static int check_snapshot(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bch_snapshot s;
- struct bch_subvolume subvol;
- struct bch_snapshot v;
- struct bkey_i_snapshot *u;
- u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
- u32 real_depth;
- struct printbuf buf = PRINTBUF;
- u32 i, id;
- int ret = 0;
-
- if (k.k->type != KEY_TYPE_snapshot)
- return 0;
-
- memset(&s, 0, sizeof(s));
- memcpy(&s, k.v, min(sizeof(s), bkey_val_bytes(k.k)));
-
- id = le32_to_cpu(s.parent);
- if (id) {
- ret = bch2_snapshot_lookup(trans, id, &v);
- if (bch2_err_matches(ret, ENOENT))
- bch_err(c, "snapshot with nonexistent parent:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (ret)
- goto err;
-
- if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
- le32_to_cpu(v.children[1]) != k.k->p.offset) {
- bch_err(c, "snapshot parent %u missing pointer to child %llu",
- id, k.k->p.offset);
- ret = -EINVAL;
- goto err;
- }
- }
-
- for (i = 0; i < 2 && s.children[i]; i++) {
- id = le32_to_cpu(s.children[i]);
-
- ret = bch2_snapshot_lookup(trans, id, &v);
- if (bch2_err_matches(ret, ENOENT))
- bch_err(c, "snapshot node %llu has nonexistent child %u",
- k.k->p.offset, id);
- if (ret)
- goto err;
-
- if (le32_to_cpu(v.parent) != k.k->p.offset) {
- bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
- id, le32_to_cpu(v.parent), k.k->p.offset);
- ret = -EINVAL;
- goto err;
- }
- }
-
- bool should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
- !BCH_SNAPSHOT_DELETED(&s);
-
- if (should_have_subvol) {
- id = le32_to_cpu(s.subvol);
- ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
- if (bch2_err_matches(ret, ENOENT))
- bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (ret)
- goto err;
-
- if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
- bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
- k.k->p.offset);
- ret = -EINVAL;
- goto err;
- }
- } else {
- if (fsck_err_on(s.subvol,
- trans, snapshot_should_not_have_subvol,
- "snapshot should not point to subvol:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- goto err;
-
- u->v.subvol = 0;
- s = u->v;
- }
- }
-
- ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
- if (ret < 0)
- goto err;
-
- if (fsck_err_on(!ret,
- trans, snapshot_to_bad_snapshot_tree,
- "snapshot points to missing/incorrect tree:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
- if (ret)
- goto err;
- }
- ret = 0;
-
- real_depth = bch2_snapshot_depth(c, parent_id);
-
- if (fsck_err_on(le32_to_cpu(s.depth) != real_depth,
- trans, snapshot_bad_depth,
- "snapshot with incorrect depth field, should be %u:\n %s",
- real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- goto err;
-
- u->v.depth = cpu_to_le32(real_depth);
- s = u->v;
- }
-
- ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
- if (ret < 0)
- goto err;
-
- if (fsck_err_on(!ret,
- trans, snapshot_bad_skiplist,
- "snapshot with bad skiplist field:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
- ret = PTR_ERR_OR_ZERO(u);
- if (ret)
- goto err;
-
- for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
- u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
-
- bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
- s = u->v;
- }
- ret = 0;
-err:
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_check_snapshots(struct bch_fs *c)
-{
- /*
- * We iterate backwards as checking/fixing the depth field requires that
- * the parent's depth already be correct:
- */
- int ret = bch2_trans_run(c,
- for_each_btree_key_reverse_commit(trans, iter,
- BTREE_ID_snapshots, POS_MAX,
- BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_snapshot(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_snapshot_exists(struct btree_trans *trans, u32 id)
-{
- struct bch_fs *c = trans->c;
-
- if (bch2_snapshot_equiv(c, id))
- return 0;
-
- /* Do we need to reconstruct the snapshot_tree entry as well? */
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
- u32 tree_id = 0;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_snapshot_trees, POS_MIN,
- 0, k, ret) {
- if (le32_to_cpu(bkey_s_c_to_snapshot_tree(k).v->root_snapshot) == id) {
- tree_id = k.k->p.offset;
- break;
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-
- if (ret)
- return ret;
-
- if (!tree_id) {
- ret = bch2_snapshot_tree_create(trans, id, 0, &tree_id);
- if (ret)
- return ret;
- }
-
- struct bkey_i_snapshot *snapshot = bch2_trans_kmalloc(trans, sizeof(*snapshot));
- ret = PTR_ERR_OR_ZERO(snapshot);
- if (ret)
- return ret;
-
- bkey_snapshot_init(&snapshot->k_i);
- snapshot->k.p = POS(0, id);
- snapshot->v.tree = cpu_to_le32(tree_id);
- snapshot->v.btime.lo = cpu_to_le64(bch2_current_time(c));
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
- 0, k, ret) {
- if (le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot) == id) {
- snapshot->v.subvol = cpu_to_le32(k.k->p.offset);
- SET_BCH_SNAPSHOT_SUBVOL(&snapshot->v, true);
- break;
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-
- return bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0) ?:
- bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
- bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0) ?:
- bch2_snapshot_set_equiv(trans, bkey_i_to_s_c(&snapshot->k_i));
-}
-
-/* Figure out which snapshot nodes belong in the same tree: */
-struct snapshot_tree_reconstruct {
- enum btree_id btree;
- struct bpos cur_pos;
- snapshot_id_list cur_ids;
- DARRAY(snapshot_id_list) trees;
-};
-
-static void snapshot_tree_reconstruct_exit(struct snapshot_tree_reconstruct *r)
-{
- darray_for_each(r->trees, i)
- darray_exit(i);
- darray_exit(&r->trees);
- darray_exit(&r->cur_ids);
-}
-
-static inline bool same_snapshot(struct snapshot_tree_reconstruct *r, struct bpos pos)
-{
- return r->btree == BTREE_ID_inodes
- ? r->cur_pos.offset == pos.offset
- : r->cur_pos.inode == pos.inode;
-}
-
-static inline bool snapshot_id_lists_have_common(snapshot_id_list *l, snapshot_id_list *r)
-{
- darray_for_each(*l, i)
- if (snapshot_list_has_id(r, *i))
- return true;
- return false;
-}
-
-static void snapshot_id_list_to_text(struct printbuf *out, snapshot_id_list *s)
-{
- bool first = true;
- darray_for_each(*s, i) {
- if (!first)
- prt_char(out, ' ');
- first = false;
- prt_printf(out, "%u", *i);
- }
-}
-
-static int snapshot_tree_reconstruct_next(struct bch_fs *c, struct snapshot_tree_reconstruct *r)
-{
- if (r->cur_ids.nr) {
- darray_for_each(r->trees, i)
- if (snapshot_id_lists_have_common(i, &r->cur_ids)) {
- int ret = snapshot_list_merge(c, i, &r->cur_ids);
- if (ret)
- return ret;
- goto out;
- }
- darray_push(&r->trees, r->cur_ids);
- darray_init(&r->cur_ids);
- }
-out:
- r->cur_ids.nr = 0;
- return 0;
-}
-
-static int get_snapshot_trees(struct bch_fs *c, struct snapshot_tree_reconstruct *r, struct bpos pos)
-{
- if (!same_snapshot(r, pos))
- snapshot_tree_reconstruct_next(c, r);
- r->cur_pos = pos;
- return snapshot_list_add_nodup(c, &r->cur_ids, pos.snapshot);
-}
-
-int bch2_reconstruct_snapshots(struct bch_fs *c)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct printbuf buf = PRINTBUF;
- struct snapshot_tree_reconstruct r = {};
- int ret = 0;
-
- for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
- if (btree_type_has_snapshots(btree)) {
- r.btree = btree;
-
- ret = for_each_btree_key(trans, iter, btree, POS_MIN,
- BTREE_ITER_all_snapshots|BTREE_ITER_prefetch, k, ({
- get_snapshot_trees(c, &r, k.k->p);
- }));
- if (ret)
- goto err;
-
- snapshot_tree_reconstruct_next(c, &r);
- }
- }
-
- darray_for_each(r.trees, t) {
- printbuf_reset(&buf);
- snapshot_id_list_to_text(&buf, t);
-
- darray_for_each(*t, id) {
- if (fsck_err_on(!bch2_snapshot_equiv(c, *id),
- trans, snapshot_node_missing,
- "snapshot node %u from tree %s missing, recreate?", *id, buf.buf)) {
- if (t->nr > 1) {
- bch_err(c, "cannot reconstruct snapshot trees with multiple nodes");
- ret = -BCH_ERR_fsck_repair_unimplemented;
- goto err;
- }
-
- ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_snapshot_exists(trans, *id));
- if (ret)
- goto err;
- }
- }
- }
-fsck_err:
-err:
- bch2_trans_put(trans);
- snapshot_tree_reconstruct_exit(&r);
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-int bch2_check_key_has_snapshot(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- if (fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot),
- trans, bkey_in_missing_snapshot,
- "key in missing snapshot %s, delete?",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- ret = bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_internal_snapshot_node) ?: 1;
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
-/*
- * Mark a snapshot as deleted, for future cleanup:
- */
-int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
-{
- struct btree_iter iter;
- struct bkey_i_snapshot *s;
- int ret = 0;
-
- s = bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_snapshots, POS(0, id),
- 0, snapshot);
- ret = PTR_ERR_OR_ZERO(s);
- if (unlikely(ret)) {
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
- trans->c, "missing snapshot %u", id);
- return ret;
- }
-
- /* already deleted? */
- if (BCH_SNAPSHOT_DELETED(&s->v))
- goto err;
-
- SET_BCH_SNAPSHOT_DELETED(&s->v, true);
- SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
- s->v.subvol = 0;
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
-{
- if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
- swap(s->children[0], s->children[1]);
-}
-
-static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
- struct btree_iter c_iter = (struct btree_iter) { NULL };
- struct btree_iter tree_iter = (struct btree_iter) { NULL };
- struct bkey_s_c_snapshot s;
- u32 parent_id, child_id;
- unsigned i;
- int ret = 0;
-
- s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
- BTREE_ITER_intent, snapshot);
- ret = bkey_err(s);
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
- "missing snapshot %u", id);
-
- if (ret)
- goto err;
-
- BUG_ON(s.v->children[1]);
-
- parent_id = le32_to_cpu(s.v->parent);
- child_id = le32_to_cpu(s.v->children[0]);
-
- if (parent_id) {
- struct bkey_i_snapshot *parent;
-
- parent = bch2_bkey_get_mut_typed(trans, &p_iter,
- BTREE_ID_snapshots, POS(0, parent_id),
- 0, snapshot);
- ret = PTR_ERR_OR_ZERO(parent);
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
- "missing snapshot %u", parent_id);
- if (unlikely(ret))
- goto err;
-
- /* find entry in parent->children for node being deleted */
- for (i = 0; i < 2; i++)
- if (le32_to_cpu(parent->v.children[i]) == id)
- break;
-
- if (bch2_fs_inconsistent_on(i == 2, c,
- "snapshot %u missing child pointer to %u",
- parent_id, id))
- goto err;
-
- parent->v.children[i] = cpu_to_le32(child_id);
-
- normalize_snapshot_child_pointers(&parent->v);
- }
-
- if (child_id) {
- struct bkey_i_snapshot *child;
-
- child = bch2_bkey_get_mut_typed(trans, &c_iter,
- BTREE_ID_snapshots, POS(0, child_id),
- 0, snapshot);
- ret = PTR_ERR_OR_ZERO(child);
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
- "missing snapshot %u", child_id);
- if (unlikely(ret))
- goto err;
-
- child->v.parent = cpu_to_le32(parent_id);
-
- if (!child->v.parent) {
- child->v.skip[0] = 0;
- child->v.skip[1] = 0;
- child->v.skip[2] = 0;
- }
- }
-
- if (!parent_id) {
- /*
- * We're deleting the root of a snapshot tree: update the
- * snapshot_tree entry to point to the new root, or delete it if
- * this is the last snapshot ID in this tree:
- */
- struct bkey_i_snapshot_tree *s_t;
-
- BUG_ON(s.v->children[1]);
-
- s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
- BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
- 0, snapshot_tree);
- ret = PTR_ERR_OR_ZERO(s_t);
- if (ret)
- goto err;
-
- if (s.v->children[0]) {
- s_t->v.root_snapshot = s.v->children[0];
- } else {
- s_t->k.type = KEY_TYPE_deleted;
- set_bkey_val_u64s(&s_t->k, 0);
- }
- }
-
- ret = bch2_btree_delete_at(trans, &iter, 0);
-err:
- bch2_trans_iter_exit(trans, &tree_iter);
- bch2_trans_iter_exit(trans, &p_iter);
- bch2_trans_iter_exit(trans, &c_iter);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
- u32 *new_snapids,
- u32 *snapshot_subvols,
- unsigned nr_snapids)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_i_snapshot *n;
- struct bkey_s_c k;
- unsigned i, j;
- u32 depth = bch2_snapshot_depth(c, parent);
- int ret;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
- POS_MIN, BTREE_ITER_intent);
- k = bch2_btree_iter_peek(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- for (i = 0; i < nr_snapids; i++) {
- k = bch2_btree_iter_prev_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (!k.k || !k.k->p.offset) {
- ret = -BCH_ERR_ENOSPC_snapshot_create;
- goto err;
- }
-
- n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- goto err;
-
- n->v.flags = 0;
- n->v.parent = cpu_to_le32(parent);
- n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
- n->v.tree = cpu_to_le32(tree);
- n->v.depth = cpu_to_le32(depth);
- n->v.btime.lo = cpu_to_le64(bch2_current_time(c));
- n->v.btime.hi = 0;
-
- for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
- n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
-
- bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
- SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
-
- ret = __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
- bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
- if (ret)
- goto err;
-
- new_snapids[i] = iter.pos.offset;
-
- mutex_lock(&c->snapshot_table_lock);
- snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
- mutex_unlock(&c->snapshot_table_lock);
- }
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-/*
- * Create new snapshot IDs as children of an existing snapshot ID:
- */
-static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
- u32 *new_snapids,
- u32 *snapshot_subvols,
- unsigned nr_snapids)
-{
- struct btree_iter iter;
- struct bkey_i_snapshot *n_parent;
- int ret = 0;
-
- n_parent = bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_snapshots, POS(0, parent),
- 0, snapshot);
- ret = PTR_ERR_OR_ZERO(n_parent);
- if (unlikely(ret)) {
- if (bch2_err_matches(ret, ENOENT))
- bch_err(trans->c, "snapshot %u not found", parent);
- return ret;
- }
-
- if (n_parent->v.children[0] || n_parent->v.children[1]) {
- bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
- ret = -EINVAL;
- goto err;
- }
-
- ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
- new_snapids, snapshot_subvols, nr_snapids);
- if (ret)
- goto err;
-
- n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
- n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
- n_parent->v.subvol = 0;
- SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-/*
- * Create a snapshot node that is the root of a new tree:
- */
-static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
- u32 *new_snapids,
- u32 *snapshot_subvols,
- unsigned nr_snapids)
-{
- struct bkey_i_snapshot_tree *n_tree;
- int ret;
-
- n_tree = __bch2_snapshot_tree_create(trans);
- ret = PTR_ERR_OR_ZERO(n_tree) ?:
- create_snapids(trans, 0, n_tree->k.p.offset,
- new_snapids, snapshot_subvols, nr_snapids);
- if (ret)
- return ret;
-
- n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
- n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
- return 0;
-}
-
-int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
- u32 *new_snapids,
- u32 *snapshot_subvols,
- unsigned nr_snapids)
-{
- BUG_ON((parent == 0) != (nr_snapids == 1));
- BUG_ON((parent != 0) != (nr_snapids == 2));
-
- return parent
- ? bch2_snapshot_node_create_children(trans, parent,
- new_snapids, snapshot_subvols, nr_snapids)
- : bch2_snapshot_node_create_tree(trans,
- new_snapids, snapshot_subvols, nr_snapids);
-
-}
-
-/*
- * If we have an unlinked inode in an internal snapshot node, and the inode
- * really has been deleted in all child snapshots, how does this get cleaned up?
- *
- * first there is the problem of how keys that have been overwritten in all
- * child snapshots get deleted (unimplemented?), but inodes may perhaps be
- * special?
- *
- * also: unlinked inode in internal snapshot appears to not be getting deleted
- * correctly if inode doesn't exist in leaf snapshots
- *
- * solution:
- *
- * for a key in an interior snapshot node that needs work to be done that
- * requires it to be mutated: iterate over all descendent leaf nodes and copy
- * that key to snapshot leaf nodes, where we can mutate it
- */
-
-static int delete_dead_snapshots_process_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- snapshot_id_list *deleted,
- snapshot_id_list *equiv_seen,
- struct bpos *last_pos)
-{
- int ret = bch2_check_key_has_snapshot(trans, iter, k);
- if (ret)
- return ret < 0 ? ret : 0;
-
- struct bch_fs *c = trans->c;
- u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
- if (!equiv) /* key for invalid snapshot node, but we chose not to delete */
- return 0;
-
- if (!bkey_eq(k.k->p, *last_pos))
- equiv_seen->nr = 0;
-
- if (snapshot_list_has_id(deleted, k.k->p.snapshot))
- return bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_internal_snapshot_node);
-
- if (!bpos_eq(*last_pos, k.k->p) &&
- snapshot_list_has_id(equiv_seen, equiv))
- return bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_internal_snapshot_node);
-
- *last_pos = k.k->p;
-
- ret = snapshot_list_add_nodup(c, equiv_seen, equiv);
- if (ret)
- return ret;
-
- /*
- * When we have a linear chain of snapshot nodes, we consider
- * those to form an equivalence class: we're going to collapse
- * them all down to a single node, and keep the leaf-most node -
- * which has the same id as the equivalence class id.
- *
- * If there are multiple keys in different snapshots at the same
- * position, we're only going to keep the one in the newest
- * snapshot (we delete the others above) - the rest have been
- * overwritten and are redundant, and for the key we're going to keep we
- * need to move it to the equivalance class ID if it's not there
- * already.
- */
- if (equiv != k.k->p.snapshot) {
- struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
- int ret = PTR_ERR_OR_ZERO(new);
- if (ret)
- return ret;
-
- new->k.p.snapshot = equiv;
-
- struct btree_iter new_iter;
- bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
- BTREE_ITER_all_snapshots|
- BTREE_ITER_cached|
- BTREE_ITER_intent);
-
- ret = bch2_btree_iter_traverse(&new_iter) ?:
- bch2_trans_update(trans, &new_iter, new,
- BTREE_UPDATE_internal_snapshot_node) ?:
- bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &new_iter);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
-{
- struct bkey_s_c_snapshot snap;
- u32 children[2];
- int ret;
-
- if (k.k->type != KEY_TYPE_snapshot)
- return 0;
-
- snap = bkey_s_c_to_snapshot(k);
- if (BCH_SNAPSHOT_DELETED(snap.v) ||
- BCH_SNAPSHOT_SUBVOL(snap.v))
- return 0;
-
- children[0] = le32_to_cpu(snap.v->children[0]);
- children[1] = le32_to_cpu(snap.v->children[1]);
-
- ret = bch2_snapshot_live(trans, children[0]) ?:
- bch2_snapshot_live(trans, children[1]);
- if (ret < 0)
- return ret;
- return !ret;
-}
-
-/*
- * For a given snapshot, if it doesn't have a subvolume that points to it, and
- * it doesn't have child snapshot nodes - it's now redundant and we can mark it
- * as deleted.
- */
-static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
-{
- int ret = bch2_snapshot_needs_delete(trans, k);
-
- return ret <= 0
- ? ret
- : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
-}
-
-static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
- snapshot_id_list *skip)
-{
- rcu_read_lock();
- while (snapshot_list_has_id(skip, id))
- id = __bch2_snapshot_parent(c, id);
-
- while (n--) {
- do {
- id = __bch2_snapshot_parent(c, id);
- } while (snapshot_list_has_id(skip, id));
- }
- rcu_read_unlock();
-
- return id;
-}
-
-static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
- struct btree_iter *iter, struct bkey_s_c k,
- snapshot_id_list *deleted)
-{
- struct bch_fs *c = trans->c;
- u32 nr_deleted_ancestors = 0;
- struct bkey_i_snapshot *s;
- int ret;
-
- if (k.k->type != KEY_TYPE_snapshot)
- return 0;
-
- if (snapshot_list_has_id(deleted, k.k->p.offset))
- return 0;
-
- s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
- ret = PTR_ERR_OR_ZERO(s);
- if (ret)
- return ret;
-
- darray_for_each(*deleted, i)
- nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
-
- if (!nr_deleted_ancestors)
- return 0;
-
- le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
-
- if (!s->v.depth) {
- s->v.skip[0] = 0;
- s->v.skip[1] = 0;
- s->v.skip[2] = 0;
- } else {
- u32 depth = le32_to_cpu(s->v.depth);
- u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
-
- for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
- u32 id = le32_to_cpu(s->v.skip[j]);
-
- if (snapshot_list_has_id(deleted, id)) {
- id = bch2_snapshot_nth_parent_skip(c,
- parent,
- depth > 1
- ? get_random_u32_below(depth - 1)
- : 0,
- deleted);
- s->v.skip[j] = cpu_to_le32(id);
- }
- }
-
- bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
- }
-
- return bch2_trans_update(trans, iter, &s->k_i, 0);
-}
-
-int bch2_delete_dead_snapshots(struct bch_fs *c)
-{
- struct btree_trans *trans;
- snapshot_id_list deleted = { 0 };
- snapshot_id_list deleted_interior = { 0 };
- int ret = 0;
-
- if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
- return 0;
-
- trans = bch2_trans_get(c);
-
- /*
- * For every snapshot node: If we have no live children and it's not
- * pointed to by a subvolume, delete it:
- */
- ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
- POS_MIN, 0, k,
- NULL, NULL, 0,
- bch2_delete_redundant_snapshot(trans, k));
- bch_err_msg(c, ret, "deleting redundant snapshots");
- if (ret)
- goto err;
-
- ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
- POS_MIN, 0, k,
- bch2_snapshot_set_equiv(trans, k));
- bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
- if (ret)
- goto err;
-
- ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
- POS_MIN, 0, k, ({
- if (k.k->type != KEY_TYPE_snapshot)
- continue;
-
- BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v)
- ? snapshot_list_add(c, &deleted, k.k->p.offset)
- : 0;
- }));
- bch_err_msg(c, ret, "walking snapshots");
- if (ret)
- goto err;
-
- for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
- struct bpos last_pos = POS_MIN;
- snapshot_id_list equiv_seen = { 0 };
- struct disk_reservation res = { 0 };
-
- if (!btree_type_has_snapshots(btree))
- continue;
-
- ret = for_each_btree_key_commit(trans, iter,
- btree, POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- &res, NULL, BCH_TRANS_COMMIT_no_enospc,
- delete_dead_snapshots_process_key(trans, &iter, k, &deleted,
- &equiv_seen, &last_pos));
-
- bch2_disk_reservation_put(c, &res);
- darray_exit(&equiv_seen);
-
- bch_err_msg(c, ret, "deleting keys from dying snapshots");
- if (ret)
- goto err;
- }
-
- bch2_trans_unlock(trans);
- down_write(&c->snapshot_create_lock);
-
- ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
- POS_MIN, 0, k, ({
- u32 snapshot = k.k->p.offset;
- u32 equiv = bch2_snapshot_equiv(c, snapshot);
-
- equiv != snapshot
- ? snapshot_list_add(c, &deleted_interior, snapshot)
- : 0;
- }));
-
- bch_err_msg(c, ret, "walking snapshots");
- if (ret)
- goto err_create_lock;
-
- /*
- * Fixing children of deleted snapshots can't be done completely
- * atomically, if we crash between here and when we delete the interior
- * nodes some depth fields will be off:
- */
- ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
- BTREE_ITER_intent, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
- if (ret)
- goto err_create_lock;
-
- darray_for_each(deleted, i) {
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_snapshot_node_delete(trans, *i));
- bch_err_msg(c, ret, "deleting snapshot %u", *i);
- if (ret)
- goto err_create_lock;
- }
-
- darray_for_each(deleted_interior, i) {
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_snapshot_node_delete(trans, *i));
- bch_err_msg(c, ret, "deleting snapshot %u", *i);
- if (ret)
- goto err_create_lock;
- }
-err_create_lock:
- up_write(&c->snapshot_create_lock);
-err:
- darray_exit(&deleted_interior);
- darray_exit(&deleted);
- bch2_trans_put(trans);
- bch_err_fn(c, ret);
- return ret;
-}
-
-void bch2_delete_dead_snapshots_work(struct work_struct *work)
-{
- struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
-
- set_worker_desc("bcachefs-delete-dead-snapshots/%s", c->name);
-
- bch2_delete_dead_snapshots(c);
- bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
-}
-
-void bch2_delete_dead_snapshots_async(struct bch_fs *c)
-{
- if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
- !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
-}
-
-int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
- enum btree_id id,
- struct bpos pos)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, id, pos,
- BTREE_ITER_not_extents|
- BTREE_ITER_all_snapshots);
- while (1) {
- k = bch2_btree_iter_prev(&iter);
- ret = bkey_err(k);
- if (ret)
- break;
-
- if (!k.k)
- break;
-
- if (!bkey_eq(pos, k.k->p))
- break;
-
- if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
- ret = 1;
- break;
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_s_c_snapshot snap;
- int ret = 0;
-
- if (k.k->type != KEY_TYPE_snapshot)
- return 0;
-
- snap = bkey_s_c_to_snapshot(k);
- if (BCH_SNAPSHOT_DELETED(snap.v) ||
- bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
- (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
- set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
- return 0;
- }
-
- return ret;
-}
-
-int bch2_snapshots_read(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_snapshots,
- POS_MIN, 0, k,
- __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
- bch2_snapshot_set_equiv(trans, k) ?:
- bch2_check_snapshot_needs_deletion(trans, k)) ?:
- for_each_btree_key(trans, iter, BTREE_ID_snapshots,
- POS_MIN, 0, k,
- (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
- bch_err_fn(c, ret);
-
- /*
- * It's important that we check if we need to reconstruct snapshots
- * before going RW, so we mark that pass as required in the superblock -
- * otherwise, we could end up deleting keys with missing snapshot nodes
- * instead
- */
- BUG_ON(!test_bit(BCH_FS_new_fs, &c->flags) &&
- test_bit(BCH_FS_may_go_rw, &c->flags));
-
- if (bch2_err_matches(ret, EIO) ||
- (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_snapshots)))
- ret = bch2_run_explicit_recovery_pass_persistent(c, BCH_RECOVERY_PASS_reconstruct_snapshots);
-
- return ret;
-}
-
-void bch2_fs_snapshots_exit(struct bch_fs *c)
-{
- kvfree(rcu_dereference_protected(c->snapshots, true));
-}
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
deleted file mode 100644
index 29c94716293e..000000000000
--- a/fs/bcachefs/snapshot.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SNAPSHOT_H
-#define _BCACHEFS_SNAPSHOT_H
-
-enum bch_validate_flags;
-
-void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_snapshot_tree_validate(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags);
-
-#define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \
- .key_validate = bch2_snapshot_tree_validate, \
- .val_to_text = bch2_snapshot_tree_to_text, \
- .min_val_size = 8, \
-})
-
-struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *);
-
-int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *);
-
-void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_snapshot_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-
-#define bch2_bkey_ops_snapshot ((struct bkey_ops) { \
- .key_validate = bch2_snapshot_validate, \
- .val_to_text = bch2_snapshot_to_text, \
- .trigger = bch2_mark_snapshot, \
- .min_val_size = 24, \
-})
-
-static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
-{
- u32 idx = U32_MAX - id;
-
- return likely(t && idx < t->nr)
- ? &t->s[idx]
- : NULL;
-}
-
-static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
-{
- return __snapshot_t(rcu_dereference(c->snapshots), id);
-}
-
-static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
-{
- rcu_read_lock();
- const struct snapshot_t *s = snapshot_t(c, id);
- id = s ? s->tree : 0;
- rcu_read_unlock();
-
- return id;
-}
-
-static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
-{
- const struct snapshot_t *s = snapshot_t(c, id);
- return s ? s->parent : 0;
-}
-
-static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
-{
- rcu_read_lock();
- id = __bch2_snapshot_parent_early(c, id);
- rcu_read_unlock();
-
- return id;
-}
-
-static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
-{
- const struct snapshot_t *s = snapshot_t(c, id);
- if (!s)
- return 0;
-
- u32 parent = s->parent;
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
- parent &&
- s->depth != snapshot_t(c, parent)->depth + 1)
- panic("id %u depth=%u parent %u depth=%u\n",
- id, snapshot_t(c, id)->depth,
- parent, snapshot_t(c, parent)->depth);
-
- return parent;
-}
-
-static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
-{
- rcu_read_lock();
- id = __bch2_snapshot_parent(c, id);
- rcu_read_unlock();
-
- return id;
-}
-
-static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n)
-{
- rcu_read_lock();
- while (n--)
- id = __bch2_snapshot_parent(c, id);
- rcu_read_unlock();
-
- return id;
-}
-
-u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32);
-
-static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
-{
- u32 parent;
-
- rcu_read_lock();
- while ((parent = __bch2_snapshot_parent(c, id)))
- id = parent;
- rcu_read_unlock();
-
- return id;
-}
-
-static inline u32 __bch2_snapshot_equiv(struct bch_fs *c, u32 id)
-{
- const struct snapshot_t *s = snapshot_t(c, id);
- return s ? s->equiv : 0;
-}
-
-static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id)
-{
- rcu_read_lock();
- id = __bch2_snapshot_equiv(c, id);
- rcu_read_unlock();
-
- return id;
-}
-
-static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
-{
- rcu_read_lock();
- const struct snapshot_t *s = snapshot_t(c, id);
- int ret = s ? s->children[0] : -BCH_ERR_invalid_snapshot_node;
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline int bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
-{
- int ret = bch2_snapshot_is_internal_node(c, id);
- if (ret < 0)
- return ret;
- return !ret;
-}
-
-static inline u32 bch2_snapshot_depth(struct bch_fs *c, u32 parent)
-{
- u32 depth;
-
- rcu_read_lock();
- depth = parent ? snapshot_t(c, parent)->depth + 1 : 0;
- rcu_read_unlock();
-
- return depth;
-}
-
-bool __bch2_snapshot_is_ancestor(struct bch_fs *, u32, u32);
-
-static inline bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
-{
- return id == ancestor
- ? true
- : __bch2_snapshot_is_ancestor(c, id, ancestor);
-}
-
-static inline bool bch2_snapshot_has_children(struct bch_fs *c, u32 id)
-{
- rcu_read_lock();
- const struct snapshot_t *t = snapshot_t(c, id);
- bool ret = t && (t->children[0]|t->children[1]) != 0;
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool snapshot_list_has_id(snapshot_id_list *s, u32 id)
-{
- darray_for_each(*s, i)
- if (*i == id)
- return true;
- return false;
-}
-
-static inline bool snapshot_list_has_ancestor(struct bch_fs *c, snapshot_id_list *s, u32 id)
-{
- darray_for_each(*s, i)
- if (bch2_snapshot_is_ancestor(c, id, *i))
- return true;
- return false;
-}
-
-static inline int snapshot_list_add(struct bch_fs *c, snapshot_id_list *s, u32 id)
-{
- BUG_ON(snapshot_list_has_id(s, id));
- int ret = darray_push(s, id);
- if (ret)
- bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
- return ret;
-}
-
-static inline int snapshot_list_add_nodup(struct bch_fs *c, snapshot_id_list *s, u32 id)
-{
- int ret = snapshot_list_has_id(s, id)
- ? 0
- : darray_push(s, id);
- if (ret)
- bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
- return ret;
-}
-
-static inline int snapshot_list_merge(struct bch_fs *c, snapshot_id_list *dst, snapshot_id_list *src)
-{
- darray_for_each(*src, i) {
- int ret = snapshot_list_add_nodup(c, dst, *i);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
- struct bch_snapshot *s);
-int bch2_snapshot_get_subvol(struct btree_trans *, u32,
- struct bch_subvolume *);
-
-/* only exported for tests: */
-int bch2_snapshot_node_create(struct btree_trans *, u32,
- u32 *, u32 *, unsigned);
-
-int bch2_check_snapshot_trees(struct bch_fs *);
-int bch2_check_snapshots(struct bch_fs *);
-int bch2_reconstruct_snapshots(struct bch_fs *);
-int bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c);
-
-int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
-void bch2_delete_dead_snapshots_work(struct work_struct *);
-
-int __bch2_key_has_snapshot_overwrites(struct btree_trans *, enum btree_id, struct bpos);
-
-static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
- enum btree_id id,
- struct bpos pos)
-{
- if (!btree_type_has_snapshots(id) ||
- bch2_snapshot_is_leaf(trans->c, pos.snapshot) > 0)
- return 0;
-
- return __bch2_key_has_snapshot_overwrites(trans, id, pos);
-}
-
-int bch2_snapshots_read(struct bch_fs *);
-void bch2_fs_snapshots_exit(struct bch_fs *);
-
-#endif /* _BCACHEFS_SNAPSHOT_H */
diff --git a/fs/bcachefs/snapshot_format.h b/fs/bcachefs/snapshot_format.h
deleted file mode 100644
index aabcd3a74cd9..000000000000
--- a/fs/bcachefs/snapshot_format.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SNAPSHOT_FORMAT_H
-#define _BCACHEFS_SNAPSHOT_FORMAT_H
-
-struct bch_snapshot {
- struct bch_val v;
- __le32 flags;
- __le32 parent;
- __le32 children[2];
- __le32 subvol;
- /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
- __le32 tree;
- __le32 depth;
- __le32 skip[3];
- bch_le128 btime;
-};
-
-LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
-
-/* True if a subvolume points to this snapshot node: */
-LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
-
-/*
- * Snapshot trees:
- *
- * The snapshot_trees btree gives us persistent indentifier for each tree of
- * bch_snapshot nodes, and allow us to record and easily find the root/master
- * subvolume that other snapshots were created from:
- */
-struct bch_snapshot_tree {
- struct bch_val v;
- __le32 master_subvol;
- __le32 root_snapshot;
-};
-
-#endif /* _BCACHEFS_SNAPSHOT_FORMAT_H */
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
deleted file mode 100644
index ec2b1feea520..000000000000
--- a/fs/bcachefs/str_hash.h
+++ /dev/null
@@ -1,396 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_STR_HASH_H
-#define _BCACHEFS_STR_HASH_H
-
-#include "btree_iter.h"
-#include "btree_update.h"
-#include "checksum.h"
-#include "error.h"
-#include "inode.h"
-#include "siphash.h"
-#include "subvolume.h"
-#include "super.h"
-
-#include <linux/crc32c.h>
-#include <crypto/hash.h>
-#include <crypto/sha2.h>
-
-static inline enum bch_str_hash_type
-bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
-{
- switch (opt) {
- case BCH_STR_HASH_OPT_crc32c:
- return BCH_STR_HASH_crc32c;
- case BCH_STR_HASH_OPT_crc64:
- return BCH_STR_HASH_crc64;
- case BCH_STR_HASH_OPT_siphash:
- return c->sb.features & (1ULL << BCH_FEATURE_new_siphash)
- ? BCH_STR_HASH_siphash
- : BCH_STR_HASH_siphash_old;
- default:
- BUG();
- }
-}
-
-struct bch_hash_info {
- u8 type;
- /*
- * For crc32 or crc64 string hashes the first key value of
- * the siphash_key (k0) is used as the key.
- */
- SIPHASH_KEY siphash_key;
-};
-
-static inline struct bch_hash_info
-bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi)
-{
- /* XXX ick */
- struct bch_hash_info info = {
- .type = INODE_STR_HASH(bi),
- .siphash_key = { .k0 = bi->bi_hash_seed }
- };
-
- if (unlikely(info.type == BCH_STR_HASH_siphash_old)) {
- SHASH_DESC_ON_STACK(desc, c->sha256);
- u8 digest[SHA256_DIGEST_SIZE];
-
- desc->tfm = c->sha256;
-
- crypto_shash_digest(desc, (void *) &bi->bi_hash_seed,
- sizeof(bi->bi_hash_seed), digest);
- memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
- }
-
- return info;
-}
-
-struct bch_str_hash_ctx {
- union {
- u32 crc32c;
- u64 crc64;
- SIPHASH_CTX siphash;
- };
-};
-
-static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
- const struct bch_hash_info *info)
-{
- switch (info->type) {
- case BCH_STR_HASH_crc32c:
- ctx->crc32c = crc32c(~0, &info->siphash_key.k0,
- sizeof(info->siphash_key.k0));
- break;
- case BCH_STR_HASH_crc64:
- ctx->crc64 = crc64_be(~0, &info->siphash_key.k0,
- sizeof(info->siphash_key.k0));
- break;
- case BCH_STR_HASH_siphash_old:
- case BCH_STR_HASH_siphash:
- SipHash24_Init(&ctx->siphash, &info->siphash_key);
- break;
- default:
- BUG();
- }
-}
-
-static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx,
- const struct bch_hash_info *info,
- const void *data, size_t len)
-{
- switch (info->type) {
- case BCH_STR_HASH_crc32c:
- ctx->crc32c = crc32c(ctx->crc32c, data, len);
- break;
- case BCH_STR_HASH_crc64:
- ctx->crc64 = crc64_be(ctx->crc64, data, len);
- break;
- case BCH_STR_HASH_siphash_old:
- case BCH_STR_HASH_siphash:
- SipHash24_Update(&ctx->siphash, data, len);
- break;
- default:
- BUG();
- }
-}
-
-static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx,
- const struct bch_hash_info *info)
-{
- switch (info->type) {
- case BCH_STR_HASH_crc32c:
- return ctx->crc32c;
- case BCH_STR_HASH_crc64:
- return ctx->crc64 >> 1;
- case BCH_STR_HASH_siphash_old:
- case BCH_STR_HASH_siphash:
- return SipHash24_End(&ctx->siphash) >> 1;
- default:
- BUG();
- }
-}
-
-struct bch_hash_desc {
- enum btree_id btree_id;
- u8 key_type;
-
- u64 (*hash_key)(const struct bch_hash_info *, const void *);
- u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c);
- bool (*cmp_key)(struct bkey_s_c, const void *);
- bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c);
- bool (*is_visible)(subvol_inum inum, struct bkey_s_c);
-};
-
-static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, struct bkey_s_c k)
-{
- return k.k->type == desc.key_type &&
- (!desc.is_visible ||
- !inum.inum ||
- desc.is_visible(inum, k));
-}
-
-static __always_inline struct bkey_s_c
-bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
- struct btree_iter *iter,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- subvol_inum inum, const void *key,
- enum btree_iter_update_trigger_flags flags,
- u32 snapshot)
-{
- struct bkey_s_c k;
- int ret;
-
- for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
- SPOS(inum.inum, desc.hash_key(info, key), snapshot),
- POS(inum.inum, U64_MAX),
- BTREE_ITER_slots|flags, k, ret) {
- if (is_visible_key(desc, inum, k)) {
- if (!desc.cmp_key(k, key))
- return k;
- } else if (k.k->type == KEY_TYPE_hash_whiteout) {
- ;
- } else {
- /* hole, not found */
- break;
- }
- }
- bch2_trans_iter_exit(trans, iter);
-
- return bkey_s_c_err(ret ?: -BCH_ERR_ENOENT_str_hash_lookup);
-}
-
-static __always_inline struct bkey_s_c
-bch2_hash_lookup(struct btree_trans *trans,
- struct btree_iter *iter,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- subvol_inum inum, const void *key,
- enum btree_iter_update_trigger_flags flags)
-{
- u32 snapshot;
- int ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- return bkey_s_c_err(ret);
-
- return bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot);
-}
-
-static __always_inline int
-bch2_hash_hole(struct btree_trans *trans,
- struct btree_iter *iter,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- subvol_inum inum, const void *key)
-{
- struct bkey_s_c k;
- u32 snapshot;
- int ret;
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- return ret;
-
- for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
- SPOS(inum.inum, desc.hash_key(info, key), snapshot),
- POS(inum.inum, U64_MAX),
- BTREE_ITER_slots|BTREE_ITER_intent, k, ret)
- if (!is_visible_key(desc, inum, k))
- return 0;
- bch2_trans_iter_exit(trans, iter);
-
- return ret ?: -BCH_ERR_ENOSPC_str_hash_create;
-}
-
-static __always_inline
-int bch2_hash_needs_whiteout(struct btree_trans *trans,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct btree_iter *start)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- bch2_trans_copy_iter(&iter, start);
-
- bch2_btree_iter_advance(&iter);
-
- for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) {
- if (k.k->type != desc.key_type &&
- k.k->type != KEY_TYPE_hash_whiteout)
- break;
-
- if (k.k->type == desc.key_type &&
- desc.hash_bkey(info, k) <= start->pos.offset) {
- ret = 1;
- break;
- }
- }
-
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static __always_inline
-struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
- struct btree_iter *iter,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- subvol_inum inum, u32 snapshot,
- struct bkey_i *insert,
- enum btree_iter_update_trigger_flags flags)
-{
- struct btree_iter slot = {};
- struct bkey_s_c k;
- bool found = false;
- int ret;
-
- for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
- SPOS(insert->k.p.inode,
- desc.hash_bkey(info, bkey_i_to_s_c(insert)),
- snapshot),
- POS(insert->k.p.inode, U64_MAX),
- BTREE_ITER_slots|BTREE_ITER_intent|flags, k, ret) {
- if (is_visible_key(desc, inum, k)) {
- if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert)))
- goto found;
-
- /* hash collision: */
- continue;
- }
-
- if (!slot.path && !(flags & STR_HASH_must_replace))
- bch2_trans_copy_iter(&slot, iter);
-
- if (k.k->type != KEY_TYPE_hash_whiteout)
- goto not_found;
- }
-
- if (!ret)
- ret = -BCH_ERR_ENOSPC_str_hash_create;
-out:
- bch2_trans_iter_exit(trans, &slot);
- bch2_trans_iter_exit(trans, iter);
- return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
-found:
- found = true;
-not_found:
- if (found && (flags & STR_HASH_must_create)) {
- bch2_trans_iter_exit(trans, &slot);
- return k;
- } else if (!found && (flags & STR_HASH_must_replace)) {
- ret = -BCH_ERR_ENOENT_str_hash_set_must_replace;
- } else {
- if (!found && slot.path)
- swap(*iter, slot);
-
- insert->k.p = iter->pos;
- ret = bch2_trans_update(trans, iter, insert, flags);
- }
-
- goto out;
-}
-
-static __always_inline
-int bch2_hash_set_in_snapshot(struct btree_trans *trans,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- subvol_inum inum, u32 snapshot,
- struct bkey_i *insert,
- enum btree_iter_update_trigger_flags flags)
-{
- struct btree_iter iter;
- struct bkey_s_c k = bch2_hash_set_or_get_in_snapshot(trans, &iter, desc, info, inum,
- snapshot, insert, flags);
- int ret = bkey_err(k);
- if (ret)
- return ret;
- if (k.k) {
- bch2_trans_iter_exit(trans, &iter);
- return -BCH_ERR_EEXIST_str_hash_set;
- }
-
- return 0;
-}
-
-static __always_inline
-int bch2_hash_set(struct btree_trans *trans,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- subvol_inum inum,
- struct bkey_i *insert,
- enum btree_iter_update_trigger_flags flags)
-{
- insert->k.p.inode = inum.inum;
-
- u32 snapshot;
- return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
- bch2_hash_set_in_snapshot(trans, desc, info, inum,
- snapshot, insert, flags);
-}
-
-static __always_inline
-int bch2_hash_delete_at(struct btree_trans *trans,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct btree_iter *iter,
- enum btree_iter_update_trigger_flags flags)
-{
- struct bkey_i *delete;
- int ret;
-
- delete = bch2_trans_kmalloc(trans, sizeof(*delete));
- ret = PTR_ERR_OR_ZERO(delete);
- if (ret)
- return ret;
-
- ret = bch2_hash_needs_whiteout(trans, desc, info, iter);
- if (ret < 0)
- return ret;
-
- bkey_init(&delete->k);
- delete->k.p = iter->pos;
- delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted;
-
- return bch2_trans_update(trans, iter, delete, flags);
-}
-
-static __always_inline
-int bch2_hash_delete(struct btree_trans *trans,
- const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- subvol_inum inum, const void *key)
-{
- struct btree_iter iter;
- struct bkey_s_c k = bch2_hash_lookup(trans, &iter, desc, info, inum, key,
- BTREE_ITER_intent);
- int ret = bkey_err(k);
- if (ret)
- return ret;
-
- ret = bch2_hash_delete_at(trans, desc, info, &iter, 0);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-#endif /* _BCACHEFS_STR_HASH_H */
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
deleted file mode 100644
index 80e5efaff524..000000000000
--- a/fs/bcachefs/subvolume.c
+++ /dev/null
@@ -1,691 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "btree_key_cache.h"
-#include "btree_update.h"
-#include "errcode.h"
-#include "error.h"
-#include "fs.h"
-#include "snapshot.h"
-#include "subvolume.h"
-
-#include <linux/random.h>
-
-static int bch2_subvolume_delete(struct btree_trans *, u32);
-
-static struct bpos subvolume_children_pos(struct bkey_s_c k)
-{
- if (k.k->type != KEY_TYPE_subvolume)
- return POS_MIN;
-
- struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
- if (!s.v->fs_path_parent)
- return POS_MIN;
- return POS(le32_to_cpu(s.v->fs_path_parent), s.k->p.offset);
-}
-
-static int check_subvol(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_s_c_subvolume subvol;
- struct btree_iter subvol_children_iter = {};
- struct bch_snapshot snapshot;
- struct printbuf buf = PRINTBUF;
- unsigned snapid;
- int ret = 0;
-
- if (k.k->type != KEY_TYPE_subvolume)
- return 0;
-
- subvol = bkey_s_c_to_subvolume(k);
- snapid = le32_to_cpu(subvol.v->snapshot);
- ret = bch2_snapshot_lookup(trans, snapid, &snapshot);
-
- if (bch2_err_matches(ret, ENOENT))
- bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
- k.k->p.offset, snapid);
- if (ret)
- return ret;
-
- if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
- ret = bch2_subvolume_delete(trans, iter->pos.offset);
- bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
- return ret ?: -BCH_ERR_transaction_restart_nested;
- }
-
- if (fsck_err_on(subvol.k->p.offset == BCACHEFS_ROOT_SUBVOL &&
- subvol.v->fs_path_parent,
- trans, subvol_root_fs_path_parent_nonzero,
- "root subvolume has nonzero fs_path_parent\n%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- struct bkey_i_subvolume *n =
- bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- goto err;
-
- n->v.fs_path_parent = 0;
- }
-
- if (subvol.v->fs_path_parent) {
- struct bpos pos = subvolume_children_pos(k);
-
- struct bkey_s_c subvol_children_k =
- bch2_bkey_get_iter(trans, &subvol_children_iter,
- BTREE_ID_subvolume_children, pos, 0);
- ret = bkey_err(subvol_children_k);
- if (ret)
- goto err;
-
- if (fsck_err_on(subvol_children_k.k->type != KEY_TYPE_set,
- trans, subvol_children_not_set,
- "subvolume not set in subvolume_children btree at %llu:%llu\n%s",
- pos.inode, pos.offset,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = bch2_btree_bit_mod(trans, BTREE_ID_subvolume_children, pos, true);
- if (ret)
- goto err;
- }
- }
-
- struct bch_inode_unpacked inode;
- ret = bch2_inode_find_by_inum_nowarn_trans(trans,
- (subvol_inum) { k.k->p.offset, le64_to_cpu(subvol.v->inode) },
- &inode);
- if (!ret) {
- if (fsck_err_on(inode.bi_subvol != subvol.k->p.offset,
- trans, subvol_root_wrong_bi_subvol,
- "subvol root %llu:%u has wrong bi_subvol field: got %u, should be %llu",
- inode.bi_inum, inode.bi_snapshot,
- inode.bi_subvol, subvol.k->p.offset)) {
- inode.bi_subvol = subvol.k->p.offset;
- inode.bi_snapshot = le32_to_cpu(subvol.v->snapshot);
- ret = __bch2_fsck_write_inode(trans, &inode);
- if (ret)
- goto err;
- }
- } else if (bch2_err_matches(ret, ENOENT)) {
- if (fsck_err(trans, subvol_to_missing_root,
- "subvolume %llu points to missing subvolume root %llu:%u",
- k.k->p.offset, le64_to_cpu(subvol.v->inode),
- le32_to_cpu(subvol.v->snapshot))) {
- ret = bch2_subvolume_delete(trans, iter->pos.offset);
- bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
- ret = ret ?: -BCH_ERR_transaction_restart_nested;
- goto err;
- }
- } else {
- goto err;
- }
-
- if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
- u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot));
- u32 snapshot_tree;
- struct bch_snapshot_tree st;
-
- rcu_read_lock();
- snapshot_tree = snapshot_t(c, snapshot_root)->tree;
- rcu_read_unlock();
-
- ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
-
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
- "%s: snapshot tree %u not found", __func__, snapshot_tree);
-
- if (ret)
- goto err;
-
- if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset,
- trans, subvol_not_master_and_not_snapshot,
- "subvolume %llu is not set as snapshot but is not master subvolume",
- k.k->p.offset)) {
- struct bkey_i_subvolume *s =
- bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
- ret = PTR_ERR_OR_ZERO(s);
- if (ret)
- goto err;
-
- SET_BCH_SUBVOLUME_SNAP(&s->v, true);
- }
- }
-err:
-fsck_err:
- bch2_trans_iter_exit(trans, &subvol_children_iter);
- printbuf_exit(&buf);
- return ret;
-}
-
-int bch2_check_subvols(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_subvol(trans, &iter, k)));
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int check_subvol_child(struct btree_trans *trans,
- struct btree_iter *child_iter,
- struct bkey_s_c child_k)
-{
- struct bch_subvolume s;
- int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, child_k.k->p.offset),
- 0, subvolume, &s);
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (fsck_err_on(ret ||
- le32_to_cpu(s.fs_path_parent) != child_k.k->p.inode,
- trans, subvol_children_bad,
- "incorrect entry in subvolume_children btree %llu:%llu",
- child_k.k->p.inode, child_k.k->p.offset)) {
- ret = bch2_btree_delete_at(trans, child_iter, 0);
- if (ret)
- goto err;
- }
-err:
-fsck_err:
- return ret;
-}
-
-int bch2_check_subvol_children(struct bch_fs *c)
-{
- int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_subvolume_children, POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_subvol_child(trans, &iter, k)));
- bch_err_fn(c, ret);
- return 0;
-}
-
-/* Subvolumes: */
-
-int bch2_subvolume_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_subvolume subvol = bkey_s_c_to_subvolume(k);
- int ret = 0;
-
- bkey_fsck_err_on(bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
- bkey_gt(k.k->p, SUBVOL_POS_MAX),
- c, subvol_pos_bad,
- "invalid pos");
-
- bkey_fsck_err_on(!subvol.v->snapshot,
- c, subvol_snapshot_bad,
- "invalid snapshot");
-
- bkey_fsck_err_on(!subvol.v->inode,
- c, subvol_inode_bad,
- "invalid inode");
-fsck_err:
- return ret;
-}
-
-void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
-
- prt_printf(out, "root %llu snapshot id %u",
- le64_to_cpu(s.v->inode),
- le32_to_cpu(s.v->snapshot));
-
- if (bkey_val_bytes(s.k) > offsetof(struct bch_subvolume, creation_parent)) {
- prt_printf(out, " creation_parent %u", le32_to_cpu(s.v->creation_parent));
- prt_printf(out, " fs_parent %u", le32_to_cpu(s.v->fs_path_parent));
- }
-}
-
-static int subvolume_children_mod(struct btree_trans *trans, struct bpos pos, bool set)
-{
- return !bpos_eq(pos, POS_MIN)
- ? bch2_btree_bit_mod(trans, BTREE_ID_subvolume_children, pos, set)
- : 0;
-}
-
-int bch2_subvolume_trigger(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- enum btree_iter_update_trigger_flags flags)
-{
- if (flags & BTREE_TRIGGER_transactional) {
- struct bpos children_pos_old = subvolume_children_pos(old);
- struct bpos children_pos_new = subvolume_children_pos(new.s_c);
-
- if (!bpos_eq(children_pos_old, children_pos_new)) {
- int ret = subvolume_children_mod(trans, children_pos_old, false) ?:
- subvolume_children_mod(trans, children_pos_new, true);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-int bch2_subvol_has_children(struct btree_trans *trans, u32 subvol)
-{
- struct btree_iter iter;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolume_children, POS(subvol, 0), 0);
- struct bkey_s_c k = bch2_btree_iter_peek(&iter);
- bch2_trans_iter_exit(trans, &iter);
-
- return bkey_err(k) ?: k.k && k.k->p.inode == subvol
- ? -BCH_ERR_ENOTEMPTY_subvol_not_empty
- : 0;
-}
-
-static __always_inline int
-bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
- bool inconsistent_if_not_found,
- int iter_flags,
- struct bch_subvolume *s)
-{
- int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, subvol),
- iter_flags, subvolume, s);
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT) &&
- inconsistent_if_not_found,
- trans->c, "missing subvolume %u", subvol);
- return ret;
-}
-
-int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
- bool inconsistent_if_not_found,
- int iter_flags,
- struct bch_subvolume *s)
-{
- return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
-}
-
-int bch2_subvol_is_ro_trans(struct btree_trans *trans, u32 subvol)
-{
- struct bch_subvolume s;
- int ret = bch2_subvolume_get_inlined(trans, subvol, true, 0, &s);
- if (ret)
- return ret;
-
- if (BCH_SUBVOLUME_RO(&s))
- return -EROFS;
- return 0;
-}
-
-int bch2_subvol_is_ro(struct bch_fs *c, u32 subvol)
-{
- return bch2_trans_do(c, bch2_subvol_is_ro_trans(trans, subvol));
-}
-
-int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
- struct bch_subvolume *subvol)
-{
- struct bch_snapshot snap;
-
- return bch2_snapshot_lookup(trans, snapshot, &snap) ?:
- bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
-}
-
-int __bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid,
- u32 *snapid, bool warn)
-{
- struct btree_iter iter;
- struct bkey_s_c_subvolume subvol;
- int ret;
-
- subvol = bch2_bkey_get_iter_typed(trans, &iter,
- BTREE_ID_subvolumes, POS(0, subvolid),
- BTREE_ITER_cached|BTREE_ITER_with_updates,
- subvolume);
- ret = bkey_err(subvol);
-
- bch2_fs_inconsistent_on(warn && bch2_err_matches(ret, ENOENT), trans->c,
- "missing subvolume %u", subvolid);
-
- if (likely(!ret))
- *snapid = le32_to_cpu(subvol.v->snapshot);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid,
- u32 *snapid)
-{
- return __bch2_subvolume_get_snapshot(trans, subvolid, snapid, true);
-}
-
-static int bch2_subvolume_reparent(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- u32 old_parent, u32 new_parent)
-{
- struct bkey_i_subvolume *s;
- int ret;
-
- if (k.k->type != KEY_TYPE_subvolume)
- return 0;
-
- if (bkey_val_bytes(k.k) > offsetof(struct bch_subvolume, creation_parent) &&
- le32_to_cpu(bkey_s_c_to_subvolume(k).v->creation_parent) != old_parent)
- return 0;
-
- s = bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume);
- ret = PTR_ERR_OR_ZERO(s);
- if (ret)
- return ret;
-
- s->v.creation_parent = cpu_to_le32(new_parent);
- return 0;
-}
-
-/*
- * Separate from the snapshot tree in the snapshots btree, we record the tree
- * structure of how snapshot subvolumes were created - the parent subvolume of
- * each snapshot subvolume.
- *
- * When a subvolume is deleted, we scan for child subvolumes and reparant them,
- * to avoid dangling references:
- */
-static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete)
-{
- struct bch_subvolume s;
-
- return lockrestart_do(trans,
- bch2_subvolume_get(trans, subvolid_to_delete, true,
- BTREE_ITER_cached, &s)) ?:
- for_each_btree_key_commit(trans, iter,
- BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_subvolume_reparent(trans, &iter, k,
- subvolid_to_delete, le32_to_cpu(s.creation_parent)));
-}
-
-/*
- * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
- * deletion/cleanup:
- */
-static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
-{
- struct btree_iter iter;
- struct bkey_s_c_subvolume subvol;
- u32 snapid;
- int ret = 0;
-
- subvol = bch2_bkey_get_iter_typed(trans, &iter,
- BTREE_ID_subvolumes, POS(0, subvolid),
- BTREE_ITER_cached|BTREE_ITER_intent,
- subvolume);
- ret = bkey_err(subvol);
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
- "missing subvolume %u", subvolid);
- if (ret)
- return ret;
-
- snapid = le32_to_cpu(subvol.v->snapshot);
-
- ret = bch2_btree_delete_at(trans, &iter, 0) ?:
- bch2_snapshot_node_set_deleted(trans, snapid);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
-{
- return bch2_subvolumes_reparent(trans, subvolid) ?:
- commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- __bch2_subvolume_delete(trans, subvolid));
-}
-
-static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
-{
- struct bch_fs *c = container_of(work, struct bch_fs,
- snapshot_wait_for_pagecache_and_delete_work);
- snapshot_id_list s;
- u32 *id;
- int ret = 0;
-
- while (!ret) {
- mutex_lock(&c->snapshots_unlinked_lock);
- s = c->snapshots_unlinked;
- darray_init(&c->snapshots_unlinked);
- mutex_unlock(&c->snapshots_unlinked_lock);
-
- if (!s.nr)
- break;
-
- bch2_evict_subvolume_inodes(c, &s);
-
- for (id = s.data; id < s.data + s.nr; id++) {
- ret = bch2_trans_run(c, bch2_subvolume_delete(trans, *id));
- bch_err_msg(c, ret, "deleting subvolume %u", *id);
- if (ret)
- break;
- }
-
- darray_exit(&s);
- }
-
- bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
-}
-
-struct subvolume_unlink_hook {
- struct btree_trans_commit_hook h;
- u32 subvol;
-};
-
-static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
- struct btree_trans_commit_hook *_h)
-{
- struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
- struct bch_fs *c = trans->c;
- int ret = 0;
-
- mutex_lock(&c->snapshots_unlinked_lock);
- if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
- ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
- mutex_unlock(&c->snapshots_unlinked_lock);
-
- if (ret)
- return ret;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache))
- return -EROFS;
-
- if (!queue_work(c->write_ref_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
- return 0;
-}
-
-int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
-{
- struct btree_iter iter;
- struct bkey_i_subvolume *n;
- struct subvolume_unlink_hook *h;
- int ret = 0;
-
- h = bch2_trans_kmalloc(trans, sizeof(*h));
- ret = PTR_ERR_OR_ZERO(h);
- if (ret)
- return ret;
-
- h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
- h->subvol = subvolid;
- bch2_trans_commit_hook(trans, &h->h);
-
- n = bch2_bkey_get_mut_typed(trans, &iter,
- BTREE_ID_subvolumes, POS(0, subvolid),
- BTREE_ITER_cached, subvolume);
- ret = PTR_ERR_OR_ZERO(n);
- if (unlikely(ret)) {
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
- "missing subvolume %u", subvolid);
- return ret;
- }
-
- SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
- u32 parent_subvolid,
- u32 src_subvolid,
- u32 *new_subvolid,
- u32 *new_snapshotid,
- bool ro)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
- struct bkey_i_subvolume *new_subvol = NULL;
- struct bkey_i_subvolume *src_subvol = NULL;
- u32 parent = 0, new_nodes[2], snapshot_subvols[2];
- int ret = 0;
-
- ret = bch2_bkey_get_empty_slot(trans, &dst_iter,
- BTREE_ID_subvolumes, POS(0, U32_MAX));
- if (ret == -BCH_ERR_ENOSPC_btree_slot)
- ret = -BCH_ERR_ENOSPC_subvolume_create;
- if (ret)
- return ret;
-
- snapshot_subvols[0] = dst_iter.pos.offset;
- snapshot_subvols[1] = src_subvolid;
-
- if (src_subvolid) {
- /* Creating a snapshot: */
-
- src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
- BTREE_ID_subvolumes, POS(0, src_subvolid),
- BTREE_ITER_cached, subvolume);
- ret = PTR_ERR_OR_ZERO(src_subvol);
- if (unlikely(ret)) {
- bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
- "subvolume %u not found", src_subvolid);
- goto err;
- }
-
- parent = le32_to_cpu(src_subvol->v.snapshot);
- }
-
- ret = bch2_snapshot_node_create(trans, parent, new_nodes,
- snapshot_subvols,
- src_subvolid ? 2 : 1);
- if (ret)
- goto err;
-
- if (src_subvolid) {
- src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
- ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
- if (ret)
- goto err;
- }
-
- new_subvol = bch2_bkey_alloc(trans, &dst_iter, 0, subvolume);
- ret = PTR_ERR_OR_ZERO(new_subvol);
- if (ret)
- goto err;
-
- new_subvol->v.flags = 0;
- new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
- new_subvol->v.inode = cpu_to_le64(inode);
- new_subvol->v.creation_parent = cpu_to_le32(src_subvolid);
- new_subvol->v.fs_path_parent = cpu_to_le32(parent_subvolid);
- new_subvol->v.otime.lo = cpu_to_le64(bch2_current_time(c));
- new_subvol->v.otime.hi = 0;
-
- SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
- SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
-
- *new_subvolid = new_subvol->k.p.offset;
- *new_snapshotid = new_nodes[0];
-err:
- bch2_trans_iter_exit(trans, &src_iter);
- bch2_trans_iter_exit(trans, &dst_iter);
- return ret;
-}
-
-int bch2_initialize_subvolumes(struct bch_fs *c)
-{
- struct bkey_i_snapshot_tree root_tree;
- struct bkey_i_snapshot root_snapshot;
- struct bkey_i_subvolume root_volume;
- int ret;
-
- bkey_snapshot_tree_init(&root_tree.k_i);
- root_tree.k.p.offset = 1;
- root_tree.v.master_subvol = cpu_to_le32(1);
- root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
-
- bkey_snapshot_init(&root_snapshot.k_i);
- root_snapshot.k.p.offset = U32_MAX;
- root_snapshot.v.flags = 0;
- root_snapshot.v.parent = 0;
- root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
- root_snapshot.v.tree = cpu_to_le32(1);
- SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
-
- bkey_subvolume_init(&root_volume.k_i);
- root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
- root_volume.v.flags = 0;
- root_volume.v.snapshot = cpu_to_le32(U32_MAX);
- root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
-
- ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0, 0) ?:
- bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0, 0) ?:
- bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0, 0);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_inode_unpacked inode;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- if (!bkey_is_inode(k.k)) {
- bch_err(trans->c, "root inode not found");
- ret = -BCH_ERR_ENOENT_inode;
- goto err;
- }
-
- ret = bch2_inode_unpack(k, &inode);
- BUG_ON(ret);
-
- inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
-
- ret = bch2_inode_write(trans, &iter, &inode);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-/* set bi_subvol on root inode */
-int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
-{
- int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
- __bch2_fs_upgrade_for_subvolumes(trans));
- bch_err_fn(c, ret);
- return ret;
-}
-
-int bch2_fs_subvolumes_init(struct bch_fs *c)
-{
- INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
- INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
- bch2_subvolume_wait_for_pagecache_and_delete);
- mutex_init(&c->snapshots_unlinked_lock);
- return 0;
-}
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
deleted file mode 100644
index f897d106e142..000000000000
--- a/fs/bcachefs/subvolume.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SUBVOLUME_H
-#define _BCACHEFS_SUBVOLUME_H
-
-#include "darray.h"
-#include "subvolume_types.h"
-
-enum bch_validate_flags;
-
-int bch2_check_subvols(struct bch_fs *);
-int bch2_check_subvol_children(struct bch_fs *);
-
-int bch2_subvolume_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_subvolume_trigger(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s,
- enum btree_iter_update_trigger_flags);
-
-#define bch2_bkey_ops_subvolume ((struct bkey_ops) { \
- .key_validate = bch2_subvolume_validate, \
- .val_to_text = bch2_subvolume_to_text, \
- .trigger = bch2_subvolume_trigger, \
- .min_val_size = 16, \
-})
-
-int bch2_subvol_has_children(struct btree_trans *, u32);
-int bch2_subvolume_get(struct btree_trans *, unsigned,
- bool, int, struct bch_subvolume *);
-int __bch2_subvolume_get_snapshot(struct btree_trans *, u32,
- u32 *, bool);
-int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *);
-
-int bch2_subvol_is_ro_trans(struct btree_trans *, u32);
-int bch2_subvol_is_ro(struct bch_fs *, u32);
-
-static inline struct bkey_s_c
-bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos end,
- u32 subvolid, unsigned flags)
-{
- u32 snapshot;
- int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot);
- if (ret)
- return bkey_s_c_err(ret);
-
- bch2_btree_iter_set_snapshot(iter, snapshot);
- return bch2_btree_iter_peek_upto_type(iter, end, flags);
-}
-
-#define for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \
- _end, _subvolid, _flags, _k, _do) \
-({ \
- struct bkey_s_c _k; \
- int _ret3 = 0; \
- \
- do { \
- _ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_in_subvolume_upto_type(&(_iter), \
- _end, _subvolid, (_flags)); \
- if (!(_k).k) \
- break; \
- \
- bkey_err(_k) ?: (_do); \
- })); \
- } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
- \
- bch2_trans_iter_exit((_trans), &(_iter)); \
- _ret3; \
-})
-
-#define for_each_btree_key_in_subvolume_upto(_trans, _iter, _btree_id, \
- _start, _end, _subvolid, _flags, _k, _do) \
-({ \
- struct btree_iter _iter; \
- bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- \
- for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \
- _end, _subvolid, _flags, _k, _do); \
-})
-
-int bch2_delete_dead_snapshots(struct bch_fs *);
-void bch2_delete_dead_snapshots_async(struct bch_fs *);
-
-int bch2_subvolume_unlink(struct btree_trans *, u32);
-int bch2_subvolume_create(struct btree_trans *, u64, u32, u32, u32 *, u32 *, bool);
-
-int bch2_initialize_subvolumes(struct bch_fs *);
-int bch2_fs_upgrade_for_subvolumes(struct bch_fs *);
-
-int bch2_fs_subvolumes_init(struct bch_fs *);
-
-#endif /* _BCACHEFS_SUBVOLUME_H */
diff --git a/fs/bcachefs/subvolume_format.h b/fs/bcachefs/subvolume_format.h
deleted file mode 100644
index e029df7ba89f..000000000000
--- a/fs/bcachefs/subvolume_format.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SUBVOLUME_FORMAT_H
-#define _BCACHEFS_SUBVOLUME_FORMAT_H
-
-#define SUBVOL_POS_MIN POS(0, 1)
-#define SUBVOL_POS_MAX POS(0, S32_MAX)
-#define BCACHEFS_ROOT_SUBVOL 1
-
-struct bch_subvolume {
- struct bch_val v;
- __le32 flags;
- __le32 snapshot;
- __le64 inode;
- /*
- * Snapshot subvolumes form a tree, separate from the snapshot nodes
- * tree - if this subvolume is a snapshot, this is the ID of the
- * subvolume it was created from:
- *
- * This is _not_ necessarily the subvolume of the directory containing
- * this subvolume:
- */
- __le32 creation_parent;
- __le32 fs_path_parent;
- bch_le128 otime;
-};
-
-LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
-/*
- * We need to know whether a subvolume is a snapshot so we can know whether we
- * can delete it (or whether it should just be rm -rf'd)
- */
-LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
-LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
-
-#endif /* _BCACHEFS_SUBVOLUME_FORMAT_H */
diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h
deleted file mode 100644
index f2ec4277c2a5..000000000000
--- a/fs/bcachefs/subvolume_types.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SUBVOLUME_TYPES_H
-#define _BCACHEFS_SUBVOLUME_TYPES_H
-
-#include "darray.h"
-
-typedef DARRAY(u32) snapshot_id_list;
-
-#define IS_ANCESTOR_BITMAP 128
-
-struct snapshot_t {
- u32 parent;
- u32 skip[3];
- u32 depth;
- u32 children[2];
- u32 subvol; /* Nonzero only if a subvolume points to this node: */
- u32 tree;
- u32 equiv;
- unsigned long is_ancestor[BITS_TO_LONGS(IS_ANCESTOR_BITMAP)];
-};
-
-struct snapshot_table {
- struct rcu_head rcu;
- size_t nr;
-#ifndef RUST_BINDGEN
- DECLARE_FLEX_ARRAY(struct snapshot_t, s);
-#else
- struct snapshot_t s[0];
-#endif
-};
-
-typedef struct {
- /* we can't have padding in this struct: */
- u64 subvol;
- u64 inum;
-} subvol_inum;
-
-#endif /* _BCACHEFS_SUBVOLUME_TYPES_H */
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
deleted file mode 100644
index 7c71594f6a8b..000000000000
--- a/fs/bcachefs/super-io.c
+++ /dev/null
@@ -1,1418 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "checksum.h"
-#include "disk_groups.h"
-#include "ec.h"
-#include "error.h"
-#include "journal.h"
-#include "journal_sb.h"
-#include "journal_seq_blacklist.h"
-#include "recovery_passes.h"
-#include "replicas.h"
-#include "quota.h"
-#include "sb-clean.h"
-#include "sb-counters.h"
-#include "sb-downgrade.h"
-#include "sb-errors.h"
-#include "sb-members.h"
-#include "super-io.h"
-#include "super.h"
-#include "trace.h"
-#include "vstructs.h"
-
-#include <linux/backing-dev.h>
-#include <linux/sort.h>
-
-static const struct blk_holder_ops bch2_sb_handle_bdev_ops = {
-};
-
-struct bch2_metadata_version {
- u16 version;
- const char *name;
-};
-
-static const struct bch2_metadata_version bch2_metadata_versions[] = {
-#define x(n, v) { \
- .version = v, \
- .name = #n, \
-},
- BCH_METADATA_VERSIONS()
-#undef x
-};
-
-void bch2_version_to_text(struct printbuf *out, unsigned v)
-{
- const char *str = "(unknown version)";
-
- for (unsigned i = 0; i < ARRAY_SIZE(bch2_metadata_versions); i++)
- if (bch2_metadata_versions[i].version == v) {
- str = bch2_metadata_versions[i].name;
- break;
- }
-
- prt_printf(out, "%u.%u: %s", BCH_VERSION_MAJOR(v), BCH_VERSION_MINOR(v), str);
-}
-
-unsigned bch2_latest_compatible_version(unsigned v)
-{
- if (!BCH_VERSION_MAJOR(v))
- return v;
-
- for (unsigned i = 0; i < ARRAY_SIZE(bch2_metadata_versions); i++)
- if (bch2_metadata_versions[i].version > v &&
- BCH_VERSION_MAJOR(bch2_metadata_versions[i].version) ==
- BCH_VERSION_MAJOR(v))
- v = bch2_metadata_versions[i].version;
-
- return v;
-}
-
-const char * const bch2_sb_fields[] = {
-#define x(name, nr) #name,
- BCH_SB_FIELDS()
-#undef x
- NULL
-};
-
-static int bch2_sb_field_validate(struct bch_sb *, struct bch_sb_field *,
- enum bch_validate_flags, struct printbuf *);
-
-struct bch_sb_field *bch2_sb_field_get_id(struct bch_sb *sb,
- enum bch_sb_field_type type)
-{
- /* XXX: need locking around superblock to access optional fields */
-
- vstruct_for_each(sb, f)
- if (le32_to_cpu(f->type) == type)
- return f;
- return NULL;
-}
-
-static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb_handle *sb,
- struct bch_sb_field *f,
- unsigned u64s)
-{
- unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
- unsigned sb_u64s = le32_to_cpu(sb->sb->u64s) + u64s - old_u64s;
-
- BUG_ON(__vstruct_bytes(struct bch_sb, sb_u64s) > sb->buffer_size);
-
- if (!f && !u64s) {
- /* nothing to do: */
- } else if (!f) {
- f = vstruct_last(sb->sb);
- memset(f, 0, sizeof(u64) * u64s);
- f->u64s = cpu_to_le32(u64s);
- f->type = 0;
- } else {
- void *src, *dst;
-
- src = vstruct_end(f);
-
- if (u64s) {
- f->u64s = cpu_to_le32(u64s);
- dst = vstruct_end(f);
- } else {
- dst = f;
- }
-
- memmove(dst, src, vstruct_end(sb->sb) - src);
-
- if (dst > src)
- memset(src, 0, dst - src);
- }
-
- sb->sb->u64s = cpu_to_le32(sb_u64s);
-
- return u64s ? f : NULL;
-}
-
-void bch2_sb_field_delete(struct bch_sb_handle *sb,
- enum bch_sb_field_type type)
-{
- struct bch_sb_field *f = bch2_sb_field_get_id(sb->sb, type);
-
- if (f)
- __bch2_sb_field_resize(sb, f, 0);
-}
-
-/* Superblock realloc/free: */
-
-void bch2_free_super(struct bch_sb_handle *sb)
-{
- kfree(sb->bio);
- if (!IS_ERR_OR_NULL(sb->s_bdev_file))
- bdev_fput(sb->s_bdev_file);
- kfree(sb->holder);
- kfree(sb->sb_name);
-
- kfree(sb->sb);
- memset(sb, 0, sizeof(*sb));
-}
-
-int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
-{
- size_t new_bytes = __vstruct_bytes(struct bch_sb, u64s);
- size_t new_buffer_size;
- struct bch_sb *new_sb;
- struct bio *bio;
-
- if (sb->bdev)
- new_bytes = max_t(size_t, new_bytes, bdev_logical_block_size(sb->bdev));
-
- new_buffer_size = roundup_pow_of_two(new_bytes);
-
- if (sb->sb && sb->buffer_size >= new_buffer_size)
- return 0;
-
- if (sb->sb && sb->have_layout) {
- u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
-
- if (new_bytes > max_bytes) {
- struct printbuf buf = PRINTBUF;
-
- prt_bdevname(&buf, sb->bdev);
- prt_printf(&buf, ": superblock too big: want %zu but have %llu", new_bytes, max_bytes);
- pr_err("%s", buf.buf);
- printbuf_exit(&buf);
- return -BCH_ERR_ENOSPC_sb;
- }
- }
-
- if (sb->buffer_size >= new_buffer_size && sb->sb)
- return 0;
-
- if (dynamic_fault("bcachefs:add:super_realloc"))
- return -BCH_ERR_ENOMEM_sb_realloc_injected;
-
- new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO);
- if (!new_sb)
- return -BCH_ERR_ENOMEM_sb_buf_realloc;
-
- sb->sb = new_sb;
-
- if (sb->have_bio) {
- unsigned nr_bvecs = buf_pages(sb->sb, new_buffer_size);
-
- bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
- if (!bio)
- return -BCH_ERR_ENOMEM_sb_bio_realloc;
-
- bio_init(bio, NULL, bio->bi_inline_vecs, nr_bvecs, 0);
-
- kfree(sb->bio);
- sb->bio = bio;
- }
-
- sb->buffer_size = new_buffer_size;
-
- return 0;
-}
-
-struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
- enum bch_sb_field_type type,
- unsigned u64s)
-{
- struct bch_sb_field *f = bch2_sb_field_get_id(sb->sb, type);
- ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
- ssize_t d = -old_u64s + u64s;
-
- if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
- return NULL;
-
- if (sb->fs_sb) {
- struct bch_fs *c = container_of(sb, struct bch_fs, disk_sb);
-
- lockdep_assert_held(&c->sb_lock);
-
- /* XXX: we're not checking that offline device have enough space */
-
- for_each_online_member(c, ca) {
- struct bch_sb_handle *dev_sb = &ca->disk_sb;
-
- if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
- percpu_ref_put(&ca->io_ref);
- return NULL;
- }
- }
- }
-
- f = bch2_sb_field_get_id(sb->sb, type);
- f = __bch2_sb_field_resize(sb, f, u64s);
- if (f)
- f->type = cpu_to_le32(type);
- return f;
-}
-
-struct bch_sb_field *bch2_sb_field_get_minsize_id(struct bch_sb_handle *sb,
- enum bch_sb_field_type type,
- unsigned u64s)
-{
- struct bch_sb_field *f = bch2_sb_field_get_id(sb->sb, type);
-
- if (!f || le32_to_cpu(f->u64s) < u64s)
- f = bch2_sb_field_resize_id(sb, type, u64s);
- return f;
-}
-
-/* Superblock validate: */
-
-static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out)
-{
- u64 offset, prev_offset, max_sectors;
- unsigned i;
-
- BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
-
- if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) &&
- !uuid_equal(&layout->magic, &BCHFS_MAGIC)) {
- prt_printf(out, "Not a bcachefs superblock layout");
- return -BCH_ERR_invalid_sb_layout;
- }
-
- if (layout->layout_type != 0) {
- prt_printf(out, "Invalid superblock layout type %u",
- layout->layout_type);
- return -BCH_ERR_invalid_sb_layout_type;
- }
-
- if (!layout->nr_superblocks) {
- prt_printf(out, "Invalid superblock layout: no superblocks");
- return -BCH_ERR_invalid_sb_layout_nr_superblocks;
- }
-
- if (layout->nr_superblocks > ARRAY_SIZE(layout->sb_offset)) {
- prt_printf(out, "Invalid superblock layout: too many superblocks");
- return -BCH_ERR_invalid_sb_layout_nr_superblocks;
- }
-
- if (layout->sb_max_size_bits > BCH_SB_LAYOUT_SIZE_BITS_MAX) {
- prt_printf(out, "Invalid superblock layout: max_size_bits too high");
- return -BCH_ERR_invalid_sb_layout_sb_max_size_bits;
- }
-
- max_sectors = 1 << layout->sb_max_size_bits;
-
- prev_offset = le64_to_cpu(layout->sb_offset[0]);
-
- for (i = 1; i < layout->nr_superblocks; i++) {
- offset = le64_to_cpu(layout->sb_offset[i]);
-
- if (offset < prev_offset + max_sectors) {
- prt_printf(out, "Invalid superblock layout: superblocks overlap\n"
- " (sb %u ends at %llu next starts at %llu",
- i - 1, prev_offset + max_sectors, offset);
- return -BCH_ERR_invalid_sb_layout_superblocks_overlap;
- }
- prev_offset = offset;
- }
-
- return 0;
-}
-
-static int bch2_sb_compatible(struct bch_sb *sb, struct printbuf *out)
-{
- u16 version = le16_to_cpu(sb->version);
- u16 version_min = le16_to_cpu(sb->version_min);
-
- if (!bch2_version_compatible(version)) {
- prt_str(out, "Unsupported superblock version ");
- bch2_version_to_text(out, version);
- prt_str(out, " (min ");
- bch2_version_to_text(out, bcachefs_metadata_version_min);
- prt_str(out, ", max ");
- bch2_version_to_text(out, bcachefs_metadata_version_current);
- prt_str(out, ")");
- return -BCH_ERR_invalid_sb_version;
- }
-
- if (!bch2_version_compatible(version_min)) {
- prt_str(out, "Unsupported superblock version_min ");
- bch2_version_to_text(out, version_min);
- prt_str(out, " (min ");
- bch2_version_to_text(out, bcachefs_metadata_version_min);
- prt_str(out, ", max ");
- bch2_version_to_text(out, bcachefs_metadata_version_current);
- prt_str(out, ")");
- return -BCH_ERR_invalid_sb_version;
- }
-
- if (version_min > version) {
- prt_str(out, "Bad minimum version ");
- bch2_version_to_text(out, version_min);
- prt_str(out, ", greater than version field ");
- bch2_version_to_text(out, version);
- return -BCH_ERR_invalid_sb_version;
- }
-
- return 0;
-}
-
-static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
- enum bch_validate_flags flags, struct printbuf *out)
-{
- struct bch_sb *sb = disk_sb->sb;
- struct bch_sb_field_members_v1 *mi;
- enum bch_opt_id opt_id;
- u16 block_size;
- int ret;
-
- ret = bch2_sb_compatible(sb, out);
- if (ret)
- return ret;
-
- if (sb->features[1] ||
- (le64_to_cpu(sb->features[0]) & (~0ULL << BCH_FEATURE_NR))) {
- prt_printf(out, "Filesystem has incompatible features");
- return -BCH_ERR_invalid_sb_features;
- }
-
- block_size = le16_to_cpu(sb->block_size);
-
- if (block_size > PAGE_SECTORS) {
- prt_printf(out, "Block size too big (got %u, max %u)",
- block_size, PAGE_SECTORS);
- return -BCH_ERR_invalid_sb_block_size;
- }
-
- if (bch2_is_zero(sb->user_uuid.b, sizeof(sb->user_uuid))) {
- prt_printf(out, "Bad user UUID (got zeroes)");
- return -BCH_ERR_invalid_sb_uuid;
- }
-
- if (bch2_is_zero(sb->uuid.b, sizeof(sb->uuid))) {
- prt_printf(out, "Bad internal UUID (got zeroes)");
- return -BCH_ERR_invalid_sb_uuid;
- }
-
- if (!sb->nr_devices ||
- sb->nr_devices > BCH_SB_MEMBERS_MAX) {
- prt_printf(out, "Bad number of member devices %u (max %u)",
- sb->nr_devices, BCH_SB_MEMBERS_MAX);
- return -BCH_ERR_invalid_sb_too_many_members;
- }
-
- if (sb->dev_idx >= sb->nr_devices) {
- prt_printf(out, "Bad dev_idx (got %u, nr_devices %u)",
- sb->dev_idx, sb->nr_devices);
- return -BCH_ERR_invalid_sb_dev_idx;
- }
-
- if (!sb->time_precision ||
- le32_to_cpu(sb->time_precision) > NSEC_PER_SEC) {
- prt_printf(out, "Invalid time precision: %u (min 1, max %lu)",
- le32_to_cpu(sb->time_precision), NSEC_PER_SEC);
- return -BCH_ERR_invalid_sb_time_precision;
- }
-
- if (!flags) {
- /*
- * Been seeing a bug where these are getting inexplicably
- * zeroed, so we're now validating them, but we have to be
- * careful not to preven people's filesystems from mounting:
- */
- if (!BCH_SB_JOURNAL_FLUSH_DELAY(sb))
- SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
- if (!BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
- SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 1000);
-
- if (!BCH_SB_VERSION_UPGRADE_COMPLETE(sb))
- SET_BCH_SB_VERSION_UPGRADE_COMPLETE(sb, le16_to_cpu(sb->version));
-
- if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2 &&
- !BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb))
- SET_BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb, 30);
-
- if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2)
- SET_BCH_SB_PROMOTE_WHOLE_EXTENTS(sb, true);
- }
-
- for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) {
- const struct bch_option *opt = bch2_opt_table + opt_id;
-
- if (opt->get_sb != BCH2_NO_SB_OPT) {
- u64 v = bch2_opt_from_sb(sb, opt_id);
-
- prt_printf(out, "Invalid option ");
- ret = bch2_opt_validate(opt, v, out);
- if (ret)
- return ret;
-
- printbuf_reset(out);
- }
- }
-
- /* validate layout */
- ret = validate_sb_layout(&sb->layout, out);
- if (ret)
- return ret;
-
- vstruct_for_each(sb, f) {
- if (!f->u64s) {
- prt_printf(out, "Invalid superblock: optional field with size 0 (type %u)",
- le32_to_cpu(f->type));
- return -BCH_ERR_invalid_sb_field_size;
- }
-
- if (vstruct_next(f) > vstruct_last(sb)) {
- prt_printf(out, "Invalid superblock: optional field extends past end of superblock (type %u)",
- le32_to_cpu(f->type));
- return -BCH_ERR_invalid_sb_field_size;
- }
- }
-
- /* members must be validated first: */
- mi = bch2_sb_field_get(sb, members_v1);
- if (!mi) {
- prt_printf(out, "Invalid superblock: member info area missing");
- return -BCH_ERR_invalid_sb_members_missing;
- }
-
- ret = bch2_sb_field_validate(sb, &mi->field, flags, out);
- if (ret)
- return ret;
-
- vstruct_for_each(sb, f) {
- if (le32_to_cpu(f->type) == BCH_SB_FIELD_members_v1)
- continue;
-
- ret = bch2_sb_field_validate(sb, f, flags, out);
- if (ret)
- return ret;
- }
-
- if ((flags & BCH_VALIDATE_write) &&
- bch2_sb_member_get(sb, sb->dev_idx).seq != sb->seq) {
- prt_printf(out, "Invalid superblock: member seq %llu != sb seq %llu",
- le64_to_cpu(bch2_sb_member_get(sb, sb->dev_idx).seq),
- le64_to_cpu(sb->seq));
- return -BCH_ERR_invalid_sb_members_missing;
- }
-
- return 0;
-}
-
-/* device open: */
-
-static unsigned long le_ulong_to_cpu(unsigned long v)
-{
- return sizeof(unsigned long) == 8
- ? le64_to_cpu(v)
- : le32_to_cpu(v);
-}
-
-static void le_bitvector_to_cpu(unsigned long *dst, unsigned long *src, unsigned nr)
-{
- BUG_ON(nr & (BITS_PER_TYPE(long) - 1));
-
- for (unsigned i = 0; i < BITS_TO_LONGS(nr); i++)
- dst[i] = le_ulong_to_cpu(src[i]);
-}
-
-static void bch2_sb_update(struct bch_fs *c)
-{
- struct bch_sb *src = c->disk_sb.sb;
-
- lockdep_assert_held(&c->sb_lock);
-
- c->sb.uuid = src->uuid;
- c->sb.user_uuid = src->user_uuid;
- c->sb.version = le16_to_cpu(src->version);
- c->sb.version_min = le16_to_cpu(src->version_min);
- c->sb.version_upgrade_complete = BCH_SB_VERSION_UPGRADE_COMPLETE(src);
- c->sb.nr_devices = src->nr_devices;
- c->sb.clean = BCH_SB_CLEAN(src);
- c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
-
- c->sb.nsec_per_time_unit = le32_to_cpu(src->time_precision);
- c->sb.time_units_per_sec = NSEC_PER_SEC / c->sb.nsec_per_time_unit;
-
- /* XXX this is wrong, we need a 96 or 128 bit integer type */
- c->sb.time_base_lo = div_u64(le64_to_cpu(src->time_base_lo),
- c->sb.nsec_per_time_unit);
- c->sb.time_base_hi = le32_to_cpu(src->time_base_hi);
-
- c->sb.features = le64_to_cpu(src->features[0]);
- c->sb.compat = le64_to_cpu(src->compat[0]);
-
- memset(c->sb.errors_silent, 0, sizeof(c->sb.errors_silent));
-
- struct bch_sb_field_ext *ext = bch2_sb_field_get(src, ext);
- if (ext) {
- le_bitvector_to_cpu(c->sb.errors_silent, (void *) ext->errors_silent,
- sizeof(c->sb.errors_silent) * 8);
- c->sb.btrees_lost_data = le64_to_cpu(ext->btrees_lost_data);
- }
-
- for_each_member_device(c, ca) {
- struct bch_member m = bch2_sb_member_get(src, ca->dev_idx);
- ca->mi = bch2_mi_to_cpu(&m);
- }
-}
-
-static int __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src)
-{
- struct bch_sb_field *src_f, *dst_f;
- struct bch_sb *dst = dst_handle->sb;
- unsigned i;
-
- dst->version = src->version;
- dst->version_min = src->version_min;
- dst->seq = src->seq;
- dst->uuid = src->uuid;
- dst->user_uuid = src->user_uuid;
- memcpy(dst->label, src->label, sizeof(dst->label));
-
- dst->block_size = src->block_size;
- dst->nr_devices = src->nr_devices;
-
- dst->time_base_lo = src->time_base_lo;
- dst->time_base_hi = src->time_base_hi;
- dst->time_precision = src->time_precision;
- dst->write_time = src->write_time;
-
- memcpy(dst->flags, src->flags, sizeof(dst->flags));
- memcpy(dst->features, src->features, sizeof(dst->features));
- memcpy(dst->compat, src->compat, sizeof(dst->compat));
-
- for (i = 0; i < BCH_SB_FIELD_NR; i++) {
- int d;
-
- if ((1U << i) & BCH_SINGLE_DEVICE_SB_FIELDS)
- continue;
-
- src_f = bch2_sb_field_get_id(src, i);
- dst_f = bch2_sb_field_get_id(dst, i);
-
- d = (src_f ? le32_to_cpu(src_f->u64s) : 0) -
- (dst_f ? le32_to_cpu(dst_f->u64s) : 0);
- if (d > 0) {
- int ret = bch2_sb_realloc(dst_handle,
- le32_to_cpu(dst_handle->sb->u64s) + d);
-
- if (ret)
- return ret;
-
- dst = dst_handle->sb;
- dst_f = bch2_sb_field_get_id(dst, i);
- }
-
- dst_f = __bch2_sb_field_resize(dst_handle, dst_f,
- src_f ? le32_to_cpu(src_f->u64s) : 0);
-
- if (src_f)
- memcpy(dst_f, src_f, vstruct_bytes(src_f));
- }
-
- return 0;
-}
-
-int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
-{
- int ret;
-
- lockdep_assert_held(&c->sb_lock);
-
- ret = bch2_sb_realloc(&c->disk_sb, 0) ?:
- __copy_super(&c->disk_sb, src) ?:
- bch2_sb_replicas_to_cpu_replicas(c) ?:
- bch2_sb_disk_groups_to_cpu(c);
- if (ret)
- return ret;
-
- bch2_sb_update(c);
- return 0;
-}
-
-int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
-{
- return __copy_super(&ca->disk_sb, c->disk_sb.sb);
-}
-
-/* read superblock: */
-
-static int read_one_super(struct bch_sb_handle *sb, u64 offset, struct printbuf *err)
-{
- size_t bytes;
- int ret;
-reread:
- bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
- sb->bio->bi_iter.bi_sector = offset;
- bch2_bio_map(sb->bio, sb->sb, sb->buffer_size);
-
- ret = submit_bio_wait(sb->bio);
- if (ret) {
- prt_printf(err, "IO error: %i", ret);
- return ret;
- }
-
- if (!uuid_equal(&sb->sb->magic, &BCACHE_MAGIC) &&
- !uuid_equal(&sb->sb->magic, &BCHFS_MAGIC)) {
- prt_str(err, "Not a bcachefs superblock (got magic ");
- pr_uuid(err, sb->sb->magic.b);
- prt_str(err, ")");
- return -BCH_ERR_invalid_sb_magic;
- }
-
- ret = bch2_sb_compatible(sb->sb, err);
- if (ret)
- return ret;
-
- bytes = vstruct_bytes(sb->sb);
-
- u64 sb_size = 512ULL << min(BCH_SB_LAYOUT_SIZE_BITS_MAX, sb->sb->layout.sb_max_size_bits);
- if (bytes > sb_size) {
- prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %llu)",
- bytes, sb_size);
- return -BCH_ERR_invalid_sb_too_big;
- }
-
- if (bytes > sb->buffer_size) {
- ret = bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s));
- if (ret)
- return ret;
- goto reread;
- }
-
- enum bch_csum_type csum_type = BCH_SB_CSUM_TYPE(sb->sb);
- if (csum_type >= BCH_CSUM_NR) {
- prt_printf(err, "unknown checksum type %llu", BCH_SB_CSUM_TYPE(sb->sb));
- return -BCH_ERR_invalid_sb_csum_type;
- }
-
- /* XXX: verify MACs */
- struct bch_csum csum = csum_vstruct(NULL, csum_type, null_nonce(), sb->sb);
- if (bch2_crc_cmp(csum, sb->sb->csum)) {
- bch2_csum_err_msg(err, csum_type, sb->sb->csum, csum);
- return -BCH_ERR_invalid_sb_csum;
- }
-
- sb->seq = le64_to_cpu(sb->sb->seq);
-
- return 0;
-}
-
-static int __bch2_read_super(const char *path, struct bch_opts *opts,
- struct bch_sb_handle *sb, bool ignore_notbchfs_msg)
-{
- u64 offset = opt_get(*opts, sb);
- struct bch_sb_layout layout;
- struct printbuf err = PRINTBUF;
- struct printbuf err2 = PRINTBUF;
- __le64 *i;
- int ret;
-#ifndef __KERNEL__
-retry:
-#endif
- memset(sb, 0, sizeof(*sb));
- sb->mode = BLK_OPEN_READ;
- sb->have_bio = true;
- sb->holder = kmalloc(1, GFP_KERNEL);
- if (!sb->holder)
- return -ENOMEM;
-
- sb->sb_name = kstrdup(path, GFP_KERNEL);
- if (!sb->sb_name) {
- ret = -ENOMEM;
- prt_printf(&err, "error allocating memory for sb_name");
- goto err;
- }
-
-#ifndef __KERNEL__
- if (opt_get(*opts, direct_io) == false)
- sb->mode |= BLK_OPEN_BUFFERED;
-#endif
-
- if (!opt_get(*opts, noexcl))
- sb->mode |= BLK_OPEN_EXCL;
-
- if (!opt_get(*opts, nochanges))
- sb->mode |= BLK_OPEN_WRITE;
-
- sb->s_bdev_file = bdev_file_open_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
- if (IS_ERR(sb->s_bdev_file) &&
- PTR_ERR(sb->s_bdev_file) == -EACCES &&
- opt_get(*opts, read_only)) {
- sb->mode &= ~BLK_OPEN_WRITE;
-
- sb->s_bdev_file = bdev_file_open_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
- if (!IS_ERR(sb->s_bdev_file))
- opt_set(*opts, nochanges, true);
- }
-
- if (IS_ERR(sb->s_bdev_file)) {
- ret = PTR_ERR(sb->s_bdev_file);
- prt_printf(&err, "error opening %s: %s", path, bch2_err_str(ret));
- goto err;
- }
- sb->bdev = file_bdev(sb->s_bdev_file);
-
- ret = bch2_sb_realloc(sb, 0);
- if (ret) {
- prt_printf(&err, "error allocating memory for superblock");
- goto err;
- }
-
- if (bch2_fs_init_fault("read_super")) {
- prt_printf(&err, "dynamic fault");
- ret = -EFAULT;
- goto err;
- }
-
- ret = read_one_super(sb, offset, &err);
- if (!ret)
- goto got_super;
-
- if (opt_defined(*opts, sb))
- goto err;
-
- prt_printf(&err2, "bcachefs (%s): error reading default superblock: %s\n",
- path, err.buf);
- if (ret == -BCH_ERR_invalid_sb_magic && ignore_notbchfs_msg)
- bch2_print_opts(opts, KERN_INFO "%s", err2.buf);
- else
- bch2_print_opts(opts, KERN_ERR "%s", err2.buf);
-
- printbuf_exit(&err2);
- printbuf_reset(&err);
-
- /*
- * Error reading primary superblock - read location of backup
- * superblocks:
- */
- bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
- sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
- /*
- * use sb buffer to read layout, since sb buffer is page aligned but
- * layout won't be:
- */
- bch2_bio_map(sb->bio, sb->sb, sizeof(struct bch_sb_layout));
-
- ret = submit_bio_wait(sb->bio);
- if (ret) {
- prt_printf(&err, "IO error: %i", ret);
- goto err;
- }
-
- memcpy(&layout, sb->sb, sizeof(layout));
- ret = validate_sb_layout(&layout, &err);
- if (ret)
- goto err;
-
- for (i = layout.sb_offset;
- i < layout.sb_offset + layout.nr_superblocks; i++) {
- offset = le64_to_cpu(*i);
-
- if (offset == opt_get(*opts, sb)) {
- ret = -BCH_ERR_invalid;
- continue;
- }
-
- ret = read_one_super(sb, offset, &err);
- if (!ret)
- goto got_super;
- }
-
- goto err;
-
-got_super:
- if (le16_to_cpu(sb->sb->block_size) << 9 <
- bdev_logical_block_size(sb->bdev) &&
- opt_get(*opts, direct_io)) {
-#ifndef __KERNEL__
- opt_set(*opts, direct_io, false);
- bch2_free_super(sb);
- goto retry;
-#endif
- prt_printf(&err, "block size (%u) smaller than device block size (%u)",
- le16_to_cpu(sb->sb->block_size) << 9,
- bdev_logical_block_size(sb->bdev));
- ret = -BCH_ERR_block_size_too_small;
- goto err;
- }
-
- sb->have_layout = true;
-
- ret = bch2_sb_validate(sb, 0, &err);
- if (ret) {
- bch2_print_opts(opts, KERN_ERR "bcachefs (%s): error validating superblock: %s\n",
- path, err.buf);
- goto err_no_print;
- }
-out:
- printbuf_exit(&err);
- return ret;
-err:
- bch2_print_opts(opts, KERN_ERR "bcachefs (%s): error reading superblock: %s\n",
- path, err.buf);
-err_no_print:
- bch2_free_super(sb);
- goto out;
-}
-
-int bch2_read_super(const char *path, struct bch_opts *opts,
- struct bch_sb_handle *sb)
-{
- return __bch2_read_super(path, opts, sb, false);
-}
-
-/* provide a silenced version for mount.bcachefs */
-
-int bch2_read_super_silent(const char *path, struct bch_opts *opts,
- struct bch_sb_handle *sb)
-{
- return __bch2_read_super(path, opts, sb, true);
-}
-
-/* write superblock: */
-
-static void write_super_endio(struct bio *bio)
-{
- struct bch_dev *ca = bio->bi_private;
-
- /* XXX: return errors directly */
-
- if (bch2_dev_io_err_on(bio->bi_status, ca,
- bio_data_dir(bio)
- ? BCH_MEMBER_ERROR_write
- : BCH_MEMBER_ERROR_read,
- "superblock %s error: %s",
- bio_data_dir(bio) ? "write" : "read",
- bch2_blk_status_to_str(bio->bi_status)))
- ca->sb_write_error = 1;
-
- closure_put(&ca->fs->sb_write);
- percpu_ref_put(&ca->io_ref);
-}
-
-static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
-{
- struct bch_sb *sb = ca->disk_sb.sb;
- struct bio *bio = ca->disk_sb.bio;
-
- bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
- bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]);
- bio->bi_end_io = write_super_endio;
- bio->bi_private = ca;
- bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
-
- this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
- bio_sectors(bio));
-
- percpu_ref_get(&ca->io_ref);
- closure_bio_submit(bio, &c->sb_write);
-}
-
-static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
-{
- struct bch_sb *sb = ca->disk_sb.sb;
- struct bio *bio = ca->disk_sb.bio;
-
- sb->offset = sb->layout.sb_offset[idx];
-
- SET_BCH_SB_CSUM_TYPE(sb, bch2_csum_opt_to_type(c->opts.metadata_checksum, false));
- sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
- null_nonce(), sb);
-
- bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
- bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
- bio->bi_end_io = write_super_endio;
- bio->bi_private = ca;
- bch2_bio_map(bio, sb,
- roundup((size_t) vstruct_bytes(sb),
- bdev_logical_block_size(ca->disk_sb.bdev)));
-
- this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
- bio_sectors(bio));
-
- percpu_ref_get(&ca->io_ref);
- closure_bio_submit(bio, &c->sb_write);
-}
-
-int bch2_write_super(struct bch_fs *c)
-{
- struct closure *cl = &c->sb_write;
- struct printbuf err = PRINTBUF;
- unsigned sb = 0, nr_wrote;
- struct bch_devs_mask sb_written;
- bool wrote, can_mount_without_written, can_mount_with_written;
- unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
- DARRAY(struct bch_dev *) online_devices = {};
- int ret = 0;
-
- trace_and_count(c, write_super, c, _RET_IP_);
-
- if (c->opts.very_degraded)
- degraded_flags |= BCH_FORCE_IF_LOST;
-
- lockdep_assert_held(&c->sb_lock);
-
- closure_init_stack(cl);
- memset(&sb_written, 0, sizeof(sb_written));
-
- for_each_online_member(c, ca) {
- ret = darray_push(&online_devices, ca);
- if (bch2_fs_fatal_err_on(ret, c, "%s: error allocating online devices", __func__)) {
- percpu_ref_put(&ca->io_ref);
- goto out;
- }
- percpu_ref_get(&ca->io_ref);
- }
-
- /* Make sure we're using the new magic numbers: */
- c->disk_sb.sb->magic = BCHFS_MAGIC;
- c->disk_sb.sb->layout.magic = BCHFS_MAGIC;
-
- le64_add_cpu(&c->disk_sb.sb->seq, 1);
-
- struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
- darray_for_each(online_devices, ca)
- __bch2_members_v2_get_mut(mi, (*ca)->dev_idx)->seq = c->disk_sb.sb->seq;
- c->disk_sb.sb->write_time = cpu_to_le64(ktime_get_real_seconds());
-
- if (test_bit(BCH_FS_error, &c->flags))
- SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 1);
- if (test_bit(BCH_FS_topology_error, &c->flags))
- SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 1);
-
- SET_BCH_SB_BIG_ENDIAN(c->disk_sb.sb, CPU_BIG_ENDIAN);
-
- bch2_sb_counters_from_cpu(c);
- bch2_sb_members_from_cpu(c);
- bch2_sb_members_cpy_v2_v1(&c->disk_sb);
- bch2_sb_errors_from_cpu(c);
- bch2_sb_downgrade_update(c);
-
- darray_for_each(online_devices, ca)
- bch2_sb_from_fs(c, (*ca));
-
- darray_for_each(online_devices, ca) {
- printbuf_reset(&err);
-
- ret = bch2_sb_validate(&(*ca)->disk_sb, BCH_VALIDATE_write, &err);
- if (ret) {
- bch2_fs_inconsistent(c, "sb invalid before write: %s", err.buf);
- goto out;
- }
- }
-
- if (c->opts.nochanges)
- goto out;
-
- /*
- * Defer writing the superblock until filesystem initialization is
- * complete - don't write out a partly initialized superblock:
- */
- if (!BCH_SB_INITIALIZED(c->disk_sb.sb))
- goto out;
-
- if (le16_to_cpu(c->disk_sb.sb->version) > bcachefs_metadata_version_current) {
- struct printbuf buf = PRINTBUF;
- prt_printf(&buf, "attempting to write superblock that wasn't version downgraded (");
- bch2_version_to_text(&buf, le16_to_cpu(c->disk_sb.sb->version));
- prt_str(&buf, " > ");
- bch2_version_to_text(&buf, bcachefs_metadata_version_current);
- prt_str(&buf, ")");
- bch2_fs_fatal_error(c, ": %s", buf.buf);
- printbuf_exit(&buf);
- return -BCH_ERR_sb_not_downgraded;
- }
-
- darray_for_each(online_devices, ca) {
- __set_bit((*ca)->dev_idx, sb_written.d);
- (*ca)->sb_write_error = 0;
- }
-
- darray_for_each(online_devices, ca)
- read_back_super(c, *ca);
- closure_sync(cl);
-
- darray_for_each(online_devices, cap) {
- struct bch_dev *ca = *cap;
-
- if (ca->sb_write_error)
- continue;
-
- if (le64_to_cpu(ca->sb_read_scratch->seq) < ca->disk_sb.seq) {
- struct printbuf buf = PRINTBUF;
- prt_char(&buf, ' ');
- prt_bdevname(&buf, ca->disk_sb.bdev);
- prt_printf(&buf,
- ": Superblock write was silently dropped! (seq %llu expected %llu)",
- le64_to_cpu(ca->sb_read_scratch->seq),
- ca->disk_sb.seq);
- bch2_fs_fatal_error(c, "%s", buf.buf);
- printbuf_exit(&buf);
- ret = -BCH_ERR_erofs_sb_err;
- }
-
- if (le64_to_cpu(ca->sb_read_scratch->seq) > ca->disk_sb.seq) {
- struct printbuf buf = PRINTBUF;
- prt_char(&buf, ' ');
- prt_bdevname(&buf, ca->disk_sb.bdev);
- prt_printf(&buf,
- ": Superblock modified by another process (seq %llu expected %llu)",
- le64_to_cpu(ca->sb_read_scratch->seq),
- ca->disk_sb.seq);
- bch2_fs_fatal_error(c, "%s", buf.buf);
- printbuf_exit(&buf);
- ret = -BCH_ERR_erofs_sb_err;
- }
- }
-
- if (ret)
- goto out;
-
- do {
- wrote = false;
- darray_for_each(online_devices, cap) {
- struct bch_dev *ca = *cap;
- if (!ca->sb_write_error &&
- sb < ca->disk_sb.sb->layout.nr_superblocks) {
- write_one_super(c, ca, sb);
- wrote = true;
- }
- }
- closure_sync(cl);
- sb++;
- } while (wrote);
-
- darray_for_each(online_devices, cap) {
- struct bch_dev *ca = *cap;
- if (ca->sb_write_error)
- __clear_bit(ca->dev_idx, sb_written.d);
- else
- ca->disk_sb.seq = le64_to_cpu(ca->disk_sb.sb->seq);
- }
-
- nr_wrote = dev_mask_nr(&sb_written);
-
- can_mount_with_written =
- bch2_have_enough_devs(c, sb_written, degraded_flags, false);
-
- for (unsigned i = 0; i < ARRAY_SIZE(sb_written.d); i++)
- sb_written.d[i] = ~sb_written.d[i];
-
- can_mount_without_written =
- bch2_have_enough_devs(c, sb_written, degraded_flags, false);
-
- /*
- * If we would be able to mount _without_ the devices we successfully
- * wrote superblocks to, we weren't able to write to enough devices:
- *
- * Exception: if we can mount without the successes because we haven't
- * written anything (new filesystem), we continue if we'd be able to
- * mount with the devices we did successfully write to:
- */
- if (bch2_fs_fatal_err_on(!nr_wrote ||
- !can_mount_with_written ||
- (can_mount_without_written &&
- !can_mount_with_written), c,
- ": Unable to write superblock to sufficient devices (from %ps)",
- (void *) _RET_IP_))
- ret = -1;
-out:
- /* Make new options visible after they're persistent: */
- bch2_sb_update(c);
- darray_for_each(online_devices, ca)
- percpu_ref_put(&(*ca)->io_ref);
- darray_exit(&online_devices);
- printbuf_exit(&err);
- return ret;
-}
-
-void __bch2_check_set_feature(struct bch_fs *c, unsigned feat)
-{
- mutex_lock(&c->sb_lock);
- if (!(c->sb.features & (1ULL << feat))) {
- c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << feat);
-
- bch2_write_super(c);
- }
- mutex_unlock(&c->sb_lock);
-}
-
-/* Downgrade if superblock is at a higher version than currently supported: */
-bool bch2_check_version_downgrade(struct bch_fs *c)
-{
- bool ret = bcachefs_metadata_version_current < c->sb.version;
-
- lockdep_assert_held(&c->sb_lock);
-
- /*
- * Downgrade, if superblock is at a higher version than currently
- * supported:
- *
- * c->sb will be checked before we write the superblock, so update it as
- * well:
- */
- if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current)
- SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
- if (c->sb.version > bcachefs_metadata_version_current)
- c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
- if (c->sb.version_min > bcachefs_metadata_version_current)
- c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
- c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
- return ret;
-}
-
-void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version)
-{
- lockdep_assert_held(&c->sb_lock);
-
- if (BCH_VERSION_MAJOR(new_version) >
- BCH_VERSION_MAJOR(le16_to_cpu(c->disk_sb.sb->version)))
- bch2_sb_field_resize(&c->disk_sb, downgrade, 0);
-
- c->disk_sb.sb->version = cpu_to_le16(new_version);
- c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
-}
-
-static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- if (vstruct_bytes(f) < 88) {
- prt_printf(err, "field too small (%zu < %u)", vstruct_bytes(f), 88);
- return -BCH_ERR_invalid_sb_ext;
- }
-
- return 0;
-}
-
-static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- struct bch_sb_field_ext *e = field_to_type(f, ext);
-
- prt_printf(out, "Recovery passes required:\t");
- prt_bitflags(out, bch2_recovery_passes,
- bch2_recovery_passes_from_stable(le64_to_cpu(e->recovery_passes_required[0])));
- prt_newline(out);
-
- unsigned long *errors_silent = kmalloc(sizeof(e->errors_silent), GFP_KERNEL);
- if (errors_silent) {
- le_bitvector_to_cpu(errors_silent, (void *) e->errors_silent, sizeof(e->errors_silent) * 8);
-
- prt_printf(out, "Errors to silently fix:\t");
- prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent,
- min(BCH_FSCK_ERR_MAX, sizeof(e->errors_silent) * 8));
- prt_newline(out);
-
- kfree(errors_silent);
- }
-
- prt_printf(out, "Btrees with missing data:\t");
- prt_bitflags(out, __bch2_btree_ids, le64_to_cpu(e->btrees_lost_data));
- prt_newline(out);
-}
-
-static const struct bch_sb_field_ops bch_sb_field_ops_ext = {
- .validate = bch2_sb_ext_validate,
- .to_text = bch2_sb_ext_to_text,
-};
-
-static const struct bch_sb_field_ops *bch2_sb_field_ops[] = {
-#define x(f, nr) \
- [BCH_SB_FIELD_##f] = &bch_sb_field_ops_##f,
- BCH_SB_FIELDS()
-#undef x
-};
-
-static const struct bch_sb_field_ops bch2_sb_field_null_ops;
-
-static const struct bch_sb_field_ops *bch2_sb_field_type_ops(unsigned type)
-{
- return likely(type < ARRAY_SIZE(bch2_sb_field_ops))
- ? bch2_sb_field_ops[type]
- : &bch2_sb_field_null_ops;
-}
-
-static int bch2_sb_field_validate(struct bch_sb *sb, struct bch_sb_field *f,
- enum bch_validate_flags flags, struct printbuf *err)
-{
- unsigned type = le32_to_cpu(f->type);
- struct printbuf field_err = PRINTBUF;
- const struct bch_sb_field_ops *ops = bch2_sb_field_type_ops(type);
- int ret;
-
- ret = ops->validate ? ops->validate(sb, f, flags, &field_err) : 0;
- if (ret) {
- prt_printf(err, "Invalid superblock section %s: %s",
- bch2_sb_fields[type], field_err.buf);
- prt_newline(err);
- bch2_sb_field_to_text(err, sb, f);
- }
-
- printbuf_exit(&field_err);
- return ret;
-}
-
-void __bch2_sb_field_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- unsigned type = le32_to_cpu(f->type);
- const struct bch_sb_field_ops *ops = bch2_sb_field_type_ops(type);
-
- if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 32);
-
- if (ops->to_text)
- ops->to_text(out, sb, f);
-}
-
-void bch2_sb_field_to_text(struct printbuf *out, struct bch_sb *sb,
- struct bch_sb_field *f)
-{
- unsigned type = le32_to_cpu(f->type);
-
- if (type < BCH_SB_FIELD_NR)
- prt_printf(out, "%s", bch2_sb_fields[type]);
- else
- prt_printf(out, "(unknown field %u)", type);
-
- prt_printf(out, " (size %zu):", vstruct_bytes(f));
- prt_newline(out);
-
- __bch2_sb_field_to_text(out, sb, f);
-}
-
-void bch2_sb_layout_to_text(struct printbuf *out, struct bch_sb_layout *l)
-{
- unsigned i;
-
- prt_printf(out, "Type: %u", l->layout_type);
- prt_newline(out);
-
- prt_str(out, "Superblock max size: ");
- prt_units_u64(out, 512 << l->sb_max_size_bits);
- prt_newline(out);
-
- prt_printf(out, "Nr superblocks: %u", l->nr_superblocks);
- prt_newline(out);
-
- prt_str(out, "Offsets: ");
- for (i = 0; i < l->nr_superblocks; i++) {
- if (i)
- prt_str(out, ", ");
- prt_printf(out, "%llu", le64_to_cpu(l->sb_offset[i]));
- }
- prt_newline(out);
-}
-
-void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
- bool print_layout, unsigned fields)
-{
- if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 44);
-
- prt_printf(out, "External UUID:\t");
- pr_uuid(out, sb->user_uuid.b);
- prt_newline(out);
-
- prt_printf(out, "Internal UUID:\t");
- pr_uuid(out, sb->uuid.b);
- prt_newline(out);
-
- prt_printf(out, "Magic number:\t");
- pr_uuid(out, sb->magic.b);
- prt_newline(out);
-
- prt_printf(out, "Device index:\t%u\n", sb->dev_idx);
-
- prt_printf(out, "Label:\t");
- if (!strlen(sb->label))
- prt_printf(out, "(none)");
- else
- prt_printf(out, "%.*s", (int) sizeof(sb->label), sb->label);
- prt_newline(out);
-
- prt_printf(out, "Version:\t");
- bch2_version_to_text(out, le16_to_cpu(sb->version));
- prt_newline(out);
-
- prt_printf(out, "Version upgrade complete:\t");
- bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb));
- prt_newline(out);
-
- prt_printf(out, "Oldest version on disk:\t");
- bch2_version_to_text(out, le16_to_cpu(sb->version_min));
- prt_newline(out);
-
- prt_printf(out, "Created:\t");
- if (sb->time_base_lo)
- bch2_prt_datetime(out, div_u64(le64_to_cpu(sb->time_base_lo), NSEC_PER_SEC));
- else
- prt_printf(out, "(not set)");
- prt_newline(out);
-
- prt_printf(out, "Sequence number:\t");
- prt_printf(out, "%llu", le64_to_cpu(sb->seq));
- prt_newline(out);
-
- prt_printf(out, "Time of last write:\t");
- bch2_prt_datetime(out, le64_to_cpu(sb->write_time));
- prt_newline(out);
-
- prt_printf(out, "Superblock size:\t");
- prt_units_u64(out, vstruct_bytes(sb));
- prt_str(out, "/");
- prt_units_u64(out, 512ULL << sb->layout.sb_max_size_bits);
- prt_newline(out);
-
- prt_printf(out, "Clean:\t%llu\n", BCH_SB_CLEAN(sb));
- prt_printf(out, "Devices:\t%u\n", bch2_sb_nr_devices(sb));
-
- prt_printf(out, "Sections:\t");
- u64 fields_have = 0;
- vstruct_for_each(sb, f)
- fields_have |= 1 << le32_to_cpu(f->type);
- prt_bitflags(out, bch2_sb_fields, fields_have);
- prt_newline(out);
-
- prt_printf(out, "Features:\t");
- prt_bitflags(out, bch2_sb_features, le64_to_cpu(sb->features[0]));
- prt_newline(out);
-
- prt_printf(out, "Compat features:\t");
- prt_bitflags(out, bch2_sb_compat, le64_to_cpu(sb->compat[0]));
- prt_newline(out);
-
- prt_newline(out);
- prt_printf(out, "Options:");
- prt_newline(out);
- printbuf_indent_add(out, 2);
- {
- enum bch_opt_id id;
-
- for (id = 0; id < bch2_opts_nr; id++) {
- const struct bch_option *opt = bch2_opt_table + id;
-
- if (opt->get_sb != BCH2_NO_SB_OPT) {
- u64 v = bch2_opt_from_sb(sb, id);
-
- prt_printf(out, "%s:\t", opt->attr.name);
- bch2_opt_to_text(out, NULL, sb, opt, v,
- OPT_HUMAN_READABLE|OPT_SHOW_FULL_LIST);
- prt_newline(out);
- }
- }
- }
-
- printbuf_indent_sub(out, 2);
-
- if (print_layout) {
- prt_newline(out);
- prt_printf(out, "layout:");
- prt_newline(out);
- printbuf_indent_add(out, 2);
- bch2_sb_layout_to_text(out, &sb->layout);
- printbuf_indent_sub(out, 2);
- }
-
- vstruct_for_each(sb, f)
- if (fields & (1 << le32_to_cpu(f->type))) {
- prt_newline(out);
- bch2_sb_field_to_text(out, sb, f);
- }
-}
diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h
deleted file mode 100644
index fadd364e2802..000000000000
--- a/fs/bcachefs/super-io.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SUPER_IO_H
-#define _BCACHEFS_SUPER_IO_H
-
-#include "extents.h"
-#include "eytzinger.h"
-#include "super_types.h"
-#include "super.h"
-#include "sb-members.h"
-
-#include <asm/byteorder.h>
-
-static inline bool bch2_version_compatible(u16 version)
-{
- return BCH_VERSION_MAJOR(version) <= BCH_VERSION_MAJOR(bcachefs_metadata_version_current) &&
- version >= bcachefs_metadata_version_min;
-}
-
-void bch2_version_to_text(struct printbuf *, unsigned);
-unsigned bch2_latest_compatible_version(unsigned);
-
-static inline size_t bch2_sb_field_bytes(struct bch_sb_field *f)
-{
- return le32_to_cpu(f->u64s) * sizeof(u64);
-}
-
-#define field_to_type(_f, _name) \
- container_of_or_null(_f, struct bch_sb_field_##_name, field)
-
-struct bch_sb_field *bch2_sb_field_get_id(struct bch_sb *, enum bch_sb_field_type);
-#define bch2_sb_field_get(_sb, _name) \
- field_to_type(bch2_sb_field_get_id(_sb, BCH_SB_FIELD_##_name), _name)
-
-struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *,
- enum bch_sb_field_type, unsigned);
-#define bch2_sb_field_resize(_sb, _name, _u64s) \
- field_to_type(bch2_sb_field_resize_id(_sb, BCH_SB_FIELD_##_name, _u64s), _name)
-
-struct bch_sb_field *bch2_sb_field_get_minsize_id(struct bch_sb_handle *,
- enum bch_sb_field_type, unsigned);
-#define bch2_sb_field_get_minsize(_sb, _name, _u64s) \
- field_to_type(bch2_sb_field_get_minsize_id(_sb, BCH_SB_FIELD_##_name, _u64s), _name)
-
-#define bch2_sb_field_nr_entries(_f) \
- (_f ? ((bch2_sb_field_bytes(&_f->field) - sizeof(*_f)) / \
- sizeof(_f->entries[0])) \
- : 0)
-
-void bch2_sb_field_delete(struct bch_sb_handle *, enum bch_sb_field_type);
-
-extern const char * const bch2_sb_fields[];
-
-struct bch_sb_field_ops {
- int (*validate)(struct bch_sb *, struct bch_sb_field *,
- enum bch_validate_flags, struct printbuf *);
- void (*to_text)(struct printbuf *, struct bch_sb *, struct bch_sb_field *);
-};
-
-static inline __le64 bch2_sb_magic(struct bch_fs *c)
-{
- __le64 ret;
-
- memcpy(&ret, &c->sb.uuid, sizeof(ret));
- return ret;
-}
-
-static inline __u64 jset_magic(struct bch_fs *c)
-{
- return __le64_to_cpu(bch2_sb_magic(c) ^ JSET_MAGIC);
-}
-
-static inline __u64 bset_magic(struct bch_fs *c)
-{
- return __le64_to_cpu(bch2_sb_magic(c) ^ BSET_MAGIC);
-}
-
-int bch2_sb_to_fs(struct bch_fs *, struct bch_sb *);
-int bch2_sb_from_fs(struct bch_fs *, struct bch_dev *);
-
-void bch2_free_super(struct bch_sb_handle *);
-int bch2_sb_realloc(struct bch_sb_handle *, unsigned);
-
-int bch2_read_super(const char *, struct bch_opts *, struct bch_sb_handle *);
-int bch2_read_super_silent(const char *, struct bch_opts *, struct bch_sb_handle *);
-int bch2_write_super(struct bch_fs *);
-void __bch2_check_set_feature(struct bch_fs *, unsigned);
-
-static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat)
-{
- if (!(c->sb.features & (1ULL << feat)))
- __bch2_check_set_feature(c, feat);
-}
-
-bool bch2_check_version_downgrade(struct bch_fs *);
-void bch2_sb_upgrade(struct bch_fs *, unsigned);
-
-void __bch2_sb_field_to_text(struct printbuf *, struct bch_sb *,
- struct bch_sb_field *);
-void bch2_sb_field_to_text(struct printbuf *, struct bch_sb *,
- struct bch_sb_field *);
-void bch2_sb_layout_to_text(struct printbuf *, struct bch_sb_layout *);
-void bch2_sb_to_text(struct printbuf *, struct bch_sb *, bool, unsigned);
-
-#endif /* _BCACHEFS_SUPER_IO_H */
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
deleted file mode 100644
index a6ed9a0bf1c7..000000000000
--- a/fs/bcachefs/super.c
+++ /dev/null
@@ -1,2148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bcachefs setup/teardown code, and some metadata io - read a superblock and
- * figure out what to do with it.
- *
- * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
- * Copyright 2012 Google, Inc.
- */
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "bkey_sort.h"
-#include "btree_cache.h"
-#include "btree_gc.h"
-#include "btree_journal_iter.h"
-#include "btree_key_cache.h"
-#include "btree_node_scan.h"
-#include "btree_update_interior.h"
-#include "btree_io.h"
-#include "btree_write_buffer.h"
-#include "buckets_waiting_for_journal.h"
-#include "chardev.h"
-#include "checksum.h"
-#include "clock.h"
-#include "compress.h"
-#include "debug.h"
-#include "disk_accounting.h"
-#include "disk_groups.h"
-#include "ec.h"
-#include "errcode.h"
-#include "error.h"
-#include "fs.h"
-#include "fs-io.h"
-#include "fs-io-buffered.h"
-#include "fs-io-direct.h"
-#include "fsck.h"
-#include "inode.h"
-#include "io_read.h"
-#include "io_write.h"
-#include "journal.h"
-#include "journal_reclaim.h"
-#include "journal_seq_blacklist.h"
-#include "move.h"
-#include "migrate.h"
-#include "movinggc.h"
-#include "nocow_locking.h"
-#include "quota.h"
-#include "rebalance.h"
-#include "recovery.h"
-#include "replicas.h"
-#include "sb-clean.h"
-#include "sb-counters.h"
-#include "sb-errors.h"
-#include "sb-members.h"
-#include "snapshot.h"
-#include "subvolume.h"
-#include "super.h"
-#include "super-io.h"
-#include "sysfs.h"
-#include "thread_with_file.h"
-#include "trace.h"
-
-#include <linux/backing-dev.h>
-#include <linux/blkdev.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/idr.h>
-#include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/random.h>
-#include <linux/sysfs.h>
-#include <crypto/hash.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
-MODULE_DESCRIPTION("bcachefs filesystem");
-MODULE_SOFTDEP("pre: crc32c");
-MODULE_SOFTDEP("pre: crc64");
-MODULE_SOFTDEP("pre: sha256");
-MODULE_SOFTDEP("pre: chacha20");
-MODULE_SOFTDEP("pre: poly1305");
-MODULE_SOFTDEP("pre: xxhash");
-
-const char * const bch2_fs_flag_strs[] = {
-#define x(n) #n,
- BCH_FS_FLAGS()
-#undef x
- NULL
-};
-
-void bch2_print_str(struct bch_fs *c, const char *str)
-{
-#ifdef __KERNEL__
- struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
-
- if (unlikely(stdio)) {
- bch2_stdio_redirect_printf(stdio, true, "%s", str);
- return;
- }
-#endif
- bch2_print_string_as_lines(KERN_ERR, str);
-}
-
-__printf(2, 0)
-static void bch2_print_maybe_redirect(struct stdio_redirect *stdio, const char *fmt, va_list args)
-{
-#ifdef __KERNEL__
- if (unlikely(stdio)) {
- if (fmt[0] == KERN_SOH[0])
- fmt += 2;
-
- bch2_stdio_redirect_vprintf(stdio, true, fmt, args);
- return;
- }
-#endif
- vprintk(fmt, args);
-}
-
-void bch2_print_opts(struct bch_opts *opts, const char *fmt, ...)
-{
- struct stdio_redirect *stdio = (void *)(unsigned long)opts->stdio;
-
- va_list args;
- va_start(args, fmt);
- bch2_print_maybe_redirect(stdio, fmt, args);
- va_end(args);
-}
-
-void __bch2_print(struct bch_fs *c, const char *fmt, ...)
-{
- struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
-
- va_list args;
- va_start(args, fmt);
- bch2_print_maybe_redirect(stdio, fmt, args);
- va_end(args);
-}
-
-#define KTYPE(type) \
-static const struct attribute_group type ## _group = { \
- .attrs = type ## _files \
-}; \
- \
-static const struct attribute_group *type ## _groups[] = { \
- &type ## _group, \
- NULL \
-}; \
- \
-static const struct kobj_type type ## _ktype = { \
- .release = type ## _release, \
- .sysfs_ops = &type ## _sysfs_ops, \
- .default_groups = type ## _groups \
-}
-
-static void bch2_fs_release(struct kobject *);
-static void bch2_dev_release(struct kobject *);
-static void bch2_fs_counters_release(struct kobject *k)
-{
-}
-
-static void bch2_fs_internal_release(struct kobject *k)
-{
-}
-
-static void bch2_fs_opts_dir_release(struct kobject *k)
-{
-}
-
-static void bch2_fs_time_stats_release(struct kobject *k)
-{
-}
-
-KTYPE(bch2_fs);
-KTYPE(bch2_fs_counters);
-KTYPE(bch2_fs_internal);
-KTYPE(bch2_fs_opts_dir);
-KTYPE(bch2_fs_time_stats);
-KTYPE(bch2_dev);
-
-static struct kset *bcachefs_kset;
-static LIST_HEAD(bch_fs_list);
-static DEFINE_MUTEX(bch_fs_list_lock);
-
-DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
-
-static void bch2_dev_unlink(struct bch_dev *);
-static void bch2_dev_free(struct bch_dev *);
-static int bch2_dev_alloc(struct bch_fs *, unsigned);
-static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
-static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
-
-struct bch_fs *bch2_dev_to_fs(dev_t dev)
-{
- struct bch_fs *c;
-
- mutex_lock(&bch_fs_list_lock);
- rcu_read_lock();
-
- list_for_each_entry(c, &bch_fs_list, list)
- for_each_member_device_rcu(c, ca, NULL)
- if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
- closure_get(&c->cl);
- goto found;
- }
- c = NULL;
-found:
- rcu_read_unlock();
- mutex_unlock(&bch_fs_list_lock);
-
- return c;
-}
-
-static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
-{
- struct bch_fs *c;
-
- lockdep_assert_held(&bch_fs_list_lock);
-
- list_for_each_entry(c, &bch_fs_list, list)
- if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
- return c;
-
- return NULL;
-}
-
-struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
-{
- struct bch_fs *c;
-
- mutex_lock(&bch_fs_list_lock);
- c = __bch2_uuid_to_fs(uuid);
- if (c)
- closure_get(&c->cl);
- mutex_unlock(&bch_fs_list_lock);
-
- return c;
-}
-
-/* Filesystem RO/RW: */
-
-/*
- * For startup/shutdown of RW stuff, the dependencies are:
- *
- * - foreground writes depend on copygc and rebalance (to free up space)
- *
- * - copygc and rebalance depend on mark and sweep gc (they actually probably
- * don't because they either reserve ahead of time or don't block if
- * allocations fail, but allocations can require mark and sweep gc to run
- * because of generation number wraparound)
- *
- * - all of the above depends on the allocator threads
- *
- * - allocator depends on the journal (when it rewrites prios and gens)
- */
-
-static void __bch2_fs_read_only(struct bch_fs *c)
-{
- unsigned clean_passes = 0;
- u64 seq = 0;
-
- bch2_fs_ec_stop(c);
- bch2_open_buckets_stop(c, NULL, true);
- bch2_rebalance_stop(c);
- bch2_copygc_stop(c);
- bch2_fs_ec_flush(c);
-
- bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
- journal_cur_seq(&c->journal));
-
- do {
- clean_passes++;
-
- if (bch2_btree_interior_updates_flush(c) ||
- bch2_btree_write_buffer_flush_going_ro(c) ||
- bch2_journal_flush_all_pins(&c->journal) ||
- bch2_btree_flush_all_writes(c) ||
- seq != atomic64_read(&c->journal.seq)) {
- seq = atomic64_read(&c->journal.seq);
- clean_passes = 0;
- }
- } while (clean_passes < 2);
-
- bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
- journal_cur_seq(&c->journal));
-
- if (test_bit(JOURNAL_replay_done, &c->journal.flags) &&
- !test_bit(BCH_FS_emergency_ro, &c->flags))
- set_bit(BCH_FS_clean_shutdown, &c->flags);
-
- bch2_fs_journal_stop(&c->journal);
-
- bch_info(c, "%sshutdown complete, journal seq %llu",
- test_bit(BCH_FS_clean_shutdown, &c->flags) ? "" : "un",
- c->journal.seq_ondisk);
-
- /*
- * After stopping journal:
- */
- for_each_member_device(c, ca)
- bch2_dev_allocator_remove(c, ca);
-}
-
-#ifndef BCH_WRITE_REF_DEBUG
-static void bch2_writes_disabled(struct percpu_ref *writes)
-{
- struct bch_fs *c = container_of(writes, struct bch_fs, writes);
-
- set_bit(BCH_FS_write_disable_complete, &c->flags);
- wake_up(&bch2_read_only_wait);
-}
-#endif
-
-void bch2_fs_read_only(struct bch_fs *c)
-{
- if (!test_bit(BCH_FS_rw, &c->flags)) {
- bch2_journal_reclaim_stop(&c->journal);
- return;
- }
-
- BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
-
- bch_verbose(c, "going read-only");
-
- /*
- * Block new foreground-end write operations from starting - any new
- * writes will return -EROFS:
- */
- set_bit(BCH_FS_going_ro, &c->flags);
-#ifndef BCH_WRITE_REF_DEBUG
- percpu_ref_kill(&c->writes);
-#else
- for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
- bch2_write_ref_put(c, i);
-#endif
-
- /*
- * If we're not doing an emergency shutdown, we want to wait on
- * outstanding writes to complete so they don't see spurious errors due
- * to shutting down the allocator:
- *
- * If we are doing an emergency shutdown outstanding writes may
- * hang until we shutdown the allocator so we don't want to wait
- * on outstanding writes before shutting everything down - but
- * we do need to wait on them before returning and signalling
- * that going RO is complete:
- */
- wait_event(bch2_read_only_wait,
- test_bit(BCH_FS_write_disable_complete, &c->flags) ||
- test_bit(BCH_FS_emergency_ro, &c->flags));
-
- bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
- if (writes_disabled)
- bch_verbose(c, "finished waiting for writes to stop");
-
- __bch2_fs_read_only(c);
-
- wait_event(bch2_read_only_wait,
- test_bit(BCH_FS_write_disable_complete, &c->flags));
-
- if (!writes_disabled)
- bch_verbose(c, "finished waiting for writes to stop");
-
- clear_bit(BCH_FS_write_disable_complete, &c->flags);
- clear_bit(BCH_FS_going_ro, &c->flags);
- clear_bit(BCH_FS_rw, &c->flags);
-
- if (!bch2_journal_error(&c->journal) &&
- !test_bit(BCH_FS_error, &c->flags) &&
- !test_bit(BCH_FS_emergency_ro, &c->flags) &&
- test_bit(BCH_FS_started, &c->flags) &&
- test_bit(BCH_FS_clean_shutdown, &c->flags) &&
- c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
- BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
- BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty));
- BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
- BUG_ON(c->btree_write_buffer.inc.keys.nr);
- BUG_ON(c->btree_write_buffer.flushing.keys.nr);
- bch2_verify_accounting_clean(c);
-
- bch_verbose(c, "marking filesystem clean");
- bch2_fs_mark_clean(c);
- } else {
- bch_verbose(c, "done going read-only, filesystem not clean");
- }
-}
-
-static void bch2_fs_read_only_work(struct work_struct *work)
-{
- struct bch_fs *c =
- container_of(work, struct bch_fs, read_only_work);
-
- down_write(&c->state_lock);
- bch2_fs_read_only(c);
- up_write(&c->state_lock);
-}
-
-static void bch2_fs_read_only_async(struct bch_fs *c)
-{
- queue_work(system_long_wq, &c->read_only_work);
-}
-
-bool bch2_fs_emergency_read_only(struct bch_fs *c)
-{
- bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
-
- bch2_journal_halt(&c->journal);
- bch2_fs_read_only_async(c);
-
- wake_up(&bch2_read_only_wait);
- return ret;
-}
-
-static int bch2_fs_read_write_late(struct bch_fs *c)
-{
- int ret;
-
- /*
- * Data move operations can't run until after check_snapshots has
- * completed, and bch2_snapshot_is_ancestor() is available.
- *
- * Ideally we'd start copygc/rebalance earlier instead of waiting for
- * all of recovery/fsck to complete:
- */
- ret = bch2_copygc_start(c);
- if (ret) {
- bch_err(c, "error starting copygc thread");
- return ret;
- }
-
- ret = bch2_rebalance_start(c);
- if (ret) {
- bch_err(c, "error starting rebalance thread");
- return ret;
- }
-
- return 0;
-}
-
-static int __bch2_fs_read_write(struct bch_fs *c, bool early)
-{
- int ret;
-
- if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
- bch_err(c, "cannot go rw, unfixed btree errors");
- return -BCH_ERR_erofs_unfixed_errors;
- }
-
- if (test_bit(BCH_FS_rw, &c->flags))
- return 0;
-
- bch_info(c, "going read-write");
-
- ret = bch2_sb_members_v2_init(c);
- if (ret)
- goto err;
-
- ret = bch2_fs_mark_dirty(c);
- if (ret)
- goto err;
-
- clear_bit(BCH_FS_clean_shutdown, &c->flags);
-
- /*
- * First journal write must be a flush write: after a clean shutdown we
- * don't read the journal, so the first journal write may end up
- * overwriting whatever was there previously, and there must always be
- * at least one non-flush write in the journal or recovery will fail:
- */
- set_bit(JOURNAL_need_flush_write, &c->journal.flags);
- set_bit(JOURNAL_running, &c->journal.flags);
-
- for_each_rw_member(c, ca)
- bch2_dev_allocator_add(c, ca);
- bch2_recalc_capacity(c);
-
- set_bit(BCH_FS_rw, &c->flags);
- set_bit(BCH_FS_was_rw, &c->flags);
-
-#ifndef BCH_WRITE_REF_DEBUG
- percpu_ref_reinit(&c->writes);
-#else
- for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
- BUG_ON(atomic_long_read(&c->writes[i]));
- atomic_long_inc(&c->writes[i]);
- }
-#endif
-
- ret = bch2_journal_reclaim_start(&c->journal);
- if (ret)
- goto err;
-
- if (!early) {
- ret = bch2_fs_read_write_late(c);
- if (ret)
- goto err;
- }
-
- bch2_do_discards(c);
- bch2_do_invalidates(c);
- bch2_do_stripe_deletes(c);
- bch2_do_pending_node_rewrites(c);
- return 0;
-err:
- if (test_bit(BCH_FS_rw, &c->flags))
- bch2_fs_read_only(c);
- else
- __bch2_fs_read_only(c);
- return ret;
-}
-
-int bch2_fs_read_write(struct bch_fs *c)
-{
- if (c->opts.recovery_pass_last &&
- c->opts.recovery_pass_last < BCH_RECOVERY_PASS_journal_replay)
- return -BCH_ERR_erofs_norecovery;
-
- if (c->opts.nochanges)
- return -BCH_ERR_erofs_nochanges;
-
- return __bch2_fs_read_write(c, false);
-}
-
-int bch2_fs_read_write_early(struct bch_fs *c)
-{
- lockdep_assert_held(&c->state_lock);
-
- return __bch2_fs_read_write(c, true);
-}
-
-/* Filesystem startup/shutdown: */
-
-static void __bch2_fs_free(struct bch_fs *c)
-{
- for (unsigned i = 0; i < BCH_TIME_STAT_NR; i++)
- bch2_time_stats_exit(&c->times[i]);
-
- bch2_find_btree_nodes_exit(&c->found_btree_nodes);
- bch2_free_pending_node_rewrites(c);
- bch2_fs_accounting_exit(c);
- bch2_fs_sb_errors_exit(c);
- bch2_fs_counters_exit(c);
- bch2_fs_snapshots_exit(c);
- bch2_fs_quota_exit(c);
- bch2_fs_fs_io_direct_exit(c);
- bch2_fs_fs_io_buffered_exit(c);
- bch2_fs_fsio_exit(c);
- bch2_fs_vfs_exit(c);
- bch2_fs_ec_exit(c);
- bch2_fs_encryption_exit(c);
- bch2_fs_nocow_locking_exit(c);
- bch2_fs_io_write_exit(c);
- bch2_fs_io_read_exit(c);
- bch2_fs_buckets_waiting_for_journal_exit(c);
- bch2_fs_btree_interior_update_exit(c);
- bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
- bch2_fs_btree_cache_exit(c);
- bch2_fs_btree_iter_exit(c);
- bch2_fs_replicas_exit(c);
- bch2_fs_journal_exit(&c->journal);
- bch2_io_clock_exit(&c->io_clock[WRITE]);
- bch2_io_clock_exit(&c->io_clock[READ]);
- bch2_fs_compress_exit(c);
- bch2_journal_keys_put_initial(c);
- bch2_find_btree_nodes_exit(&c->found_btree_nodes);
- BUG_ON(atomic_read(&c->journal_keys.ref));
- bch2_fs_btree_write_buffer_exit(c);
- percpu_free_rwsem(&c->mark_lock);
- if (c->online_reserved) {
- u64 v = percpu_u64_get(c->online_reserved);
- WARN(v, "online_reserved not 0 at shutdown: %lli", v);
- free_percpu(c->online_reserved);
- }
-
- darray_exit(&c->btree_roots_extra);
- free_percpu(c->pcpu);
- free_percpu(c->usage);
- mempool_exit(&c->large_bkey_pool);
- mempool_exit(&c->btree_bounce_pool);
- bioset_exit(&c->btree_bio);
- mempool_exit(&c->fill_iter);
-#ifndef BCH_WRITE_REF_DEBUG
- percpu_ref_exit(&c->writes);
-#endif
- kfree(rcu_dereference_protected(c->disk_groups, 1));
- kfree(c->journal_seq_blacklist_table);
- kfree(c->unused_inode_hints);
-
- if (c->write_ref_wq)
- destroy_workqueue(c->write_ref_wq);
- if (c->btree_write_submit_wq)
- destroy_workqueue(c->btree_write_submit_wq);
- if (c->btree_read_complete_wq)
- destroy_workqueue(c->btree_read_complete_wq);
- if (c->copygc_wq)
- destroy_workqueue(c->copygc_wq);
- if (c->btree_io_complete_wq)
- destroy_workqueue(c->btree_io_complete_wq);
- if (c->btree_update_wq)
- destroy_workqueue(c->btree_update_wq);
-
- bch2_free_super(&c->disk_sb);
- kvfree(c);
- module_put(THIS_MODULE);
-}
-
-static void bch2_fs_release(struct kobject *kobj)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
-
- __bch2_fs_free(c);
-}
-
-void __bch2_fs_stop(struct bch_fs *c)
-{
- bch_verbose(c, "shutting down");
-
- set_bit(BCH_FS_stopping, &c->flags);
-
- down_write(&c->state_lock);
- bch2_fs_read_only(c);
- up_write(&c->state_lock);
-
- for_each_member_device(c, ca)
- bch2_dev_unlink(ca);
-
- if (c->kobj.state_in_sysfs)
- kobject_del(&c->kobj);
-
- bch2_fs_debug_exit(c);
- bch2_fs_chardev_exit(c);
-
- bch2_ro_ref_put(c);
- wait_event(c->ro_ref_wait, !refcount_read(&c->ro_ref));
-
- kobject_put(&c->counters_kobj);
- kobject_put(&c->time_stats);
- kobject_put(&c->opts_dir);
- kobject_put(&c->internal);
-
- /* btree prefetch might have kicked off reads in the background: */
- bch2_btree_flush_all_reads(c);
-
- for_each_member_device(c, ca)
- cancel_work_sync(&ca->io_error_work);
-
- cancel_work_sync(&c->read_only_work);
-}
-
-void bch2_fs_free(struct bch_fs *c)
-{
- unsigned i;
-
- mutex_lock(&bch_fs_list_lock);
- list_del(&c->list);
- mutex_unlock(&bch_fs_list_lock);
-
- closure_sync(&c->cl);
- closure_debug_destroy(&c->cl);
-
- for (i = 0; i < c->sb.nr_devices; i++) {
- struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
-
- if (ca) {
- EBUG_ON(atomic_long_read(&ca->ref) != 1);
- bch2_free_super(&ca->disk_sb);
- bch2_dev_free(ca);
- }
- }
-
- bch_verbose(c, "shutdown complete");
-
- kobject_put(&c->kobj);
-}
-
-void bch2_fs_stop(struct bch_fs *c)
-{
- __bch2_fs_stop(c);
- bch2_fs_free(c);
-}
-
-static int bch2_fs_online(struct bch_fs *c)
-{
- int ret = 0;
-
- lockdep_assert_held(&bch_fs_list_lock);
-
- if (__bch2_uuid_to_fs(c->sb.uuid)) {
- bch_err(c, "filesystem UUID already open");
- return -EINVAL;
- }
-
- ret = bch2_fs_chardev_init(c);
- if (ret) {
- bch_err(c, "error creating character device");
- return ret;
- }
-
- bch2_fs_debug_init(c);
-
- ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
- kobject_add(&c->internal, &c->kobj, "internal") ?:
- kobject_add(&c->opts_dir, &c->kobj, "options") ?:
-#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
- kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
-#endif
- kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
- bch2_opts_create_sysfs_files(&c->opts_dir);
- if (ret) {
- bch_err(c, "error creating sysfs objects");
- return ret;
- }
-
- down_write(&c->state_lock);
-
- for_each_member_device(c, ca) {
- ret = bch2_dev_sysfs_online(c, ca);
- if (ret) {
- bch_err(c, "error creating sysfs objects");
- bch2_dev_put(ca);
- goto err;
- }
- }
-
- BUG_ON(!list_empty(&c->list));
- list_add(&c->list, &bch_fs_list);
-err:
- up_write(&c->state_lock);
- return ret;
-}
-
-static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
-{
- struct bch_fs *c;
- struct printbuf name = PRINTBUF;
- unsigned i, iter_size;
- int ret = 0;
-
- c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
- if (!c) {
- c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
- goto out;
- }
-
- c->stdio = (void *)(unsigned long) opts.stdio;
-
- __module_get(THIS_MODULE);
-
- closure_init(&c->cl, NULL);
-
- c->kobj.kset = bcachefs_kset;
- kobject_init(&c->kobj, &bch2_fs_ktype);
- kobject_init(&c->internal, &bch2_fs_internal_ktype);
- kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
- kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
- kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
-
- c->minor = -1;
- c->disk_sb.fs_sb = true;
-
- init_rwsem(&c->state_lock);
- mutex_init(&c->sb_lock);
- mutex_init(&c->replicas_gc_lock);
- mutex_init(&c->btree_root_lock);
- INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
-
- refcount_set(&c->ro_ref, 1);
- init_waitqueue_head(&c->ro_ref_wait);
- sema_init(&c->online_fsck_mutex, 1);
-
- init_rwsem(&c->gc_lock);
- mutex_init(&c->gc_gens_lock);
- atomic_set(&c->journal_keys.ref, 1);
- c->journal_keys.initial_ref_held = true;
-
- for (i = 0; i < BCH_TIME_STAT_NR; i++)
- bch2_time_stats_init(&c->times[i]);
-
- bch2_fs_gc_init(c);
- bch2_fs_copygc_init(c);
- bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
- bch2_fs_btree_iter_init_early(c);
- bch2_fs_btree_interior_update_init_early(c);
- bch2_fs_allocator_background_init(c);
- bch2_fs_allocator_foreground_init(c);
- bch2_fs_rebalance_init(c);
- bch2_fs_quota_init(c);
- bch2_fs_ec_init_early(c);
- bch2_fs_move_init(c);
- bch2_fs_sb_errors_init_early(c);
-
- INIT_LIST_HEAD(&c->list);
-
- mutex_init(&c->bio_bounce_pages_lock);
- mutex_init(&c->snapshot_table_lock);
- init_rwsem(&c->snapshot_create_lock);
-
- spin_lock_init(&c->btree_write_error_lock);
-
- INIT_LIST_HEAD(&c->journal_iters);
-
- INIT_LIST_HEAD(&c->fsck_error_msgs);
- mutex_init(&c->fsck_error_msgs_lock);
-
- seqcount_init(&c->usage_lock);
-
- sema_init(&c->io_in_flight, 128);
-
- INIT_LIST_HEAD(&c->vfs_inodes_list);
- mutex_init(&c->vfs_inodes_lock);
-
- c->copy_gc_enabled = 1;
- c->rebalance.enabled = 1;
-
- c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write];
- c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write];
- c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
-
- bch2_fs_btree_cache_init_early(&c->btree_cache);
-
- mutex_init(&c->sectors_available_lock);
-
- ret = percpu_init_rwsem(&c->mark_lock);
- if (ret)
- goto err;
-
- mutex_lock(&c->sb_lock);
- ret = bch2_sb_to_fs(c, sb);
- mutex_unlock(&c->sb_lock);
-
- if (ret)
- goto err;
-
- pr_uuid(&name, c->sb.user_uuid.b);
- ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
- if (ret)
- goto err;
-
- strscpy(c->name, name.buf, sizeof(c->name));
- printbuf_exit(&name);
-
- /* Compat: */
- if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
- !BCH_SB_JOURNAL_FLUSH_DELAY(sb))
- SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
-
- if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
- !BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
- SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
-
- c->opts = bch2_opts_default;
- ret = bch2_opts_from_sb(&c->opts, sb);
- if (ret)
- goto err;
-
- bch2_opts_apply(&c->opts, opts);
-
- c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
- if (c->opts.inodes_use_key_cache)
- c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
- c->btree_key_cache_btrees |= 1U << BTREE_ID_logged_ops;
-
- c->block_bits = ilog2(block_sectors(c));
- c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
-
- if (bch2_fs_init_fault("fs_alloc")) {
- bch_err(c, "fs_alloc fault injected");
- ret = -EFAULT;
- goto err;
- }
-
- iter_size = sizeof(struct sort_iter) +
- (btree_blocks(c) + 1) * 2 *
- sizeof(struct sort_iter_set);
-
- c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
-
- if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
- WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_UNBOUND, 512)) ||
- !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
- WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
- !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
- WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
- !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete",
- WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) ||
- !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit",
- WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
- !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
- WQ_FREEZABLE, 0)) ||
-#ifndef BCH_WRITE_REF_DEBUG
- percpu_ref_init(&c->writes, bch2_writes_disabled,
- PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
-#endif
- mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
- bioset_init(&c->btree_bio, 1,
- max(offsetof(struct btree_read_bio, bio),
- offsetof(struct btree_write_bio, wbio.bio)),
- BIOSET_NEED_BVECS) ||
- !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
- !(c->usage = alloc_percpu(struct bch_fs_usage_base)) ||
- !(c->online_reserved = alloc_percpu(u64)) ||
- mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1,
- c->opts.btree_node_size) ||
- mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
- !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
- sizeof(u64), GFP_KERNEL))) {
- ret = -BCH_ERR_ENOMEM_fs_other_alloc;
- goto err;
- }
-
- ret = bch2_fs_counters_init(c) ?:
- bch2_fs_sb_errors_init(c) ?:
- bch2_io_clock_init(&c->io_clock[READ]) ?:
- bch2_io_clock_init(&c->io_clock[WRITE]) ?:
- bch2_fs_journal_init(&c->journal) ?:
- bch2_fs_btree_iter_init(c) ?:
- bch2_fs_btree_cache_init(c) ?:
- bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
- bch2_fs_btree_interior_update_init(c) ?:
- bch2_fs_buckets_waiting_for_journal_init(c) ?:
- bch2_fs_btree_write_buffer_init(c) ?:
- bch2_fs_subvolumes_init(c) ?:
- bch2_fs_io_read_init(c) ?:
- bch2_fs_io_write_init(c) ?:
- bch2_fs_nocow_locking_init(c) ?:
- bch2_fs_encryption_init(c) ?:
- bch2_fs_compress_init(c) ?:
- bch2_fs_ec_init(c) ?:
- bch2_fs_vfs_init(c) ?:
- bch2_fs_fsio_init(c) ?:
- bch2_fs_fs_io_buffered_init(c) ?:
- bch2_fs_fs_io_direct_init(c);
- if (ret)
- goto err;
-
- for (i = 0; i < c->sb.nr_devices; i++) {
- if (!bch2_member_exists(c->disk_sb.sb, i))
- continue;
- ret = bch2_dev_alloc(c, i);
- if (ret)
- goto err;
- }
-
- bch2_journal_entry_res_resize(&c->journal,
- &c->btree_root_journal_res,
- BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
- bch2_journal_entry_res_resize(&c->journal,
- &c->clock_journal_res,
- (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
-
- mutex_lock(&bch_fs_list_lock);
- ret = bch2_fs_online(c);
- mutex_unlock(&bch_fs_list_lock);
-
- if (ret)
- goto err;
-out:
- return c;
-err:
- bch2_fs_free(c);
- c = ERR_PTR(ret);
- goto out;
-}
-
-noinline_for_stack
-static void print_mount_opts(struct bch_fs *c)
-{
- enum bch_opt_id i;
- struct printbuf p = PRINTBUF;
- bool first = true;
-
- prt_str(&p, "starting version ");
- bch2_version_to_text(&p, c->sb.version);
-
- if (c->opts.read_only) {
- prt_str(&p, " opts=");
- first = false;
- prt_printf(&p, "ro");
- }
-
- for (i = 0; i < bch2_opts_nr; i++) {
- const struct bch_option *opt = &bch2_opt_table[i];
- u64 v = bch2_opt_get_by_id(&c->opts, i);
-
- if (!(opt->flags & OPT_MOUNT))
- continue;
-
- if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
- continue;
-
- prt_str(&p, first ? " opts=" : ",");
- first = false;
- bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
- }
-
- bch_info(c, "%s", p.buf);
- printbuf_exit(&p);
-}
-
-int bch2_fs_start(struct bch_fs *c)
-{
- time64_t now = ktime_get_real_seconds();
- int ret;
-
- print_mount_opts(c);
-
- down_write(&c->state_lock);
-
- BUG_ON(test_bit(BCH_FS_started, &c->flags));
-
- mutex_lock(&c->sb_lock);
-
- ret = bch2_sb_members_v2_init(c);
- if (ret) {
- mutex_unlock(&c->sb_lock);
- goto err;
- }
-
- for_each_online_member(c, ca)
- bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
-
- struct bch_sb_field_ext *ext =
- bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
- mutex_unlock(&c->sb_lock);
-
- if (!ext) {
- bch_err(c, "insufficient space in superblock for sb_field_ext");
- ret = -BCH_ERR_ENOSPC_sb;
- goto err;
- }
-
- for_each_rw_member(c, ca)
- bch2_dev_allocator_add(c, ca);
- bch2_recalc_capacity(c);
-
- ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
- ? bch2_fs_recovery(c)
- : bch2_fs_initialize(c);
- if (ret)
- goto err;
-
- ret = bch2_opts_check_may_set(c);
- if (ret)
- goto err;
-
- if (bch2_fs_init_fault("fs_start")) {
- bch_err(c, "fs_start fault injected");
- ret = -EINVAL;
- goto err;
- }
-
- set_bit(BCH_FS_started, &c->flags);
-
- if (c->opts.read_only) {
- bch2_fs_read_only(c);
- } else {
- ret = !test_bit(BCH_FS_rw, &c->flags)
- ? bch2_fs_read_write(c)
- : bch2_fs_read_write_late(c);
- if (ret)
- goto err;
- }
-
- ret = 0;
-err:
- if (ret)
- bch_err_msg(c, ret, "starting filesystem");
- else
- bch_verbose(c, "done starting filesystem");
- up_write(&c->state_lock);
- return ret;
-}
-
-static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
-{
- struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
-
- if (le16_to_cpu(sb->block_size) != block_sectors(c))
- return -BCH_ERR_mismatched_block_size;
-
- if (le16_to_cpu(m.bucket_size) <
- BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
- return -BCH_ERR_bucket_size_too_small;
-
- return 0;
-}
-
-static int bch2_dev_in_fs(struct bch_sb_handle *fs,
- struct bch_sb_handle *sb,
- struct bch_opts *opts)
-{
- if (fs == sb)
- return 0;
-
- if (!uuid_equal(&fs->sb->uuid, &sb->sb->uuid))
- return -BCH_ERR_device_not_a_member_of_filesystem;
-
- if (!bch2_member_exists(fs->sb, sb->sb->dev_idx))
- return -BCH_ERR_device_has_been_removed;
-
- if (fs->sb->block_size != sb->sb->block_size)
- return -BCH_ERR_mismatched_block_size;
-
- if (le16_to_cpu(fs->sb->version) < bcachefs_metadata_version_member_seq ||
- le16_to_cpu(sb->sb->version) < bcachefs_metadata_version_member_seq)
- return 0;
-
- if (fs->sb->seq == sb->sb->seq &&
- fs->sb->write_time != sb->sb->write_time) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "Split brain detected between ");
- prt_bdevname(&buf, sb->bdev);
- prt_str(&buf, " and ");
- prt_bdevname(&buf, fs->bdev);
- prt_char(&buf, ':');
- prt_newline(&buf);
- prt_printf(&buf, "seq=%llu but write_time different, got", le64_to_cpu(sb->sb->seq));
- prt_newline(&buf);
-
- prt_bdevname(&buf, fs->bdev);
- prt_char(&buf, ' ');
- bch2_prt_datetime(&buf, le64_to_cpu(fs->sb->write_time));;
- prt_newline(&buf);
-
- prt_bdevname(&buf, sb->bdev);
- prt_char(&buf, ' ');
- bch2_prt_datetime(&buf, le64_to_cpu(sb->sb->write_time));;
- prt_newline(&buf);
-
- if (!opts->no_splitbrain_check)
- prt_printf(&buf, "Not using older sb");
-
- pr_err("%s", buf.buf);
- printbuf_exit(&buf);
-
- if (!opts->no_splitbrain_check)
- return -BCH_ERR_device_splitbrain;
- }
-
- struct bch_member m = bch2_sb_member_get(fs->sb, sb->sb->dev_idx);
- u64 seq_from_fs = le64_to_cpu(m.seq);
- u64 seq_from_member = le64_to_cpu(sb->sb->seq);
-
- if (seq_from_fs && seq_from_fs < seq_from_member) {
- struct printbuf buf = PRINTBUF;
-
- prt_str(&buf, "Split brain detected between ");
- prt_bdevname(&buf, sb->bdev);
- prt_str(&buf, " and ");
- prt_bdevname(&buf, fs->bdev);
- prt_char(&buf, ':');
- prt_newline(&buf);
-
- prt_bdevname(&buf, fs->bdev);
- prt_str(&buf, " believes seq of ");
- prt_bdevname(&buf, sb->bdev);
- prt_printf(&buf, " to be %llu, but ", seq_from_fs);
- prt_bdevname(&buf, sb->bdev);
- prt_printf(&buf, " has %llu\n", seq_from_member);
-
- if (!opts->no_splitbrain_check) {
- prt_str(&buf, "Not using ");
- prt_bdevname(&buf, sb->bdev);
- }
-
- pr_err("%s", buf.buf);
- printbuf_exit(&buf);
-
- if (!opts->no_splitbrain_check)
- return -BCH_ERR_device_splitbrain;
- }
-
- return 0;
-}
-
-/* Device startup/shutdown: */
-
-static void bch2_dev_release(struct kobject *kobj)
-{
- struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
-
- kfree(ca);
-}
-
-static void bch2_dev_free(struct bch_dev *ca)
-{
- cancel_work_sync(&ca->io_error_work);
-
- bch2_dev_unlink(ca);
-
- if (ca->kobj.state_in_sysfs)
- kobject_del(&ca->kobj);
-
- bch2_free_super(&ca->disk_sb);
- bch2_dev_allocator_background_exit(ca);
- bch2_dev_journal_exit(ca);
-
- free_percpu(ca->io_done);
- bch2_dev_buckets_free(ca);
- free_page((unsigned long) ca->sb_read_scratch);
-
- bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]);
- bch2_time_stats_quantiles_exit(&ca->io_latency[READ]);
-
- percpu_ref_exit(&ca->io_ref);
-#ifndef CONFIG_BCACHEFS_DEBUG
- percpu_ref_exit(&ca->ref);
-#endif
- kobject_put(&ca->kobj);
-}
-
-static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
-{
-
- lockdep_assert_held(&c->state_lock);
-
- if (percpu_ref_is_zero(&ca->io_ref))
- return;
-
- __bch2_dev_read_only(c, ca);
-
- reinit_completion(&ca->io_ref_completion);
- percpu_ref_kill(&ca->io_ref);
- wait_for_completion(&ca->io_ref_completion);
-
- bch2_dev_unlink(ca);
-
- bch2_free_super(&ca->disk_sb);
- bch2_dev_journal_exit(ca);
-}
-
-#ifndef CONFIG_BCACHEFS_DEBUG
-static void bch2_dev_ref_complete(struct percpu_ref *ref)
-{
- struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
-
- complete(&ca->ref_completion);
-}
-#endif
-
-static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
-{
- struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
-
- complete(&ca->io_ref_completion);
-}
-
-static void bch2_dev_unlink(struct bch_dev *ca)
-{
- struct kobject *b;
-
- /*
- * This is racy w.r.t. the underlying block device being hot-removed,
- * which removes it from sysfs.
- *
- * It'd be lovely if we had a way to handle this race, but the sysfs
- * code doesn't appear to provide a good method and block/holder.c is
- * susceptible as well:
- */
- if (ca->kobj.state_in_sysfs &&
- ca->disk_sb.bdev &&
- (b = bdev_kobj(ca->disk_sb.bdev))->state_in_sysfs) {
- sysfs_remove_link(b, "bcachefs");
- sysfs_remove_link(&ca->kobj, "block");
- }
-}
-
-static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
-{
- int ret;
-
- if (!c->kobj.state_in_sysfs)
- return 0;
-
- if (!ca->kobj.state_in_sysfs) {
- ret = kobject_add(&ca->kobj, &c->kobj,
- "dev-%u", ca->dev_idx);
- if (ret)
- return ret;
- }
-
- if (ca->disk_sb.bdev) {
- struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
-
- ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
- if (ret)
- return ret;
-
- ret = sysfs_create_link(&ca->kobj, block, "block");
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
- struct bch_member *member)
-{
- struct bch_dev *ca;
- unsigned i;
-
- ca = kzalloc(sizeof(*ca), GFP_KERNEL);
- if (!ca)
- return NULL;
-
- kobject_init(&ca->kobj, &bch2_dev_ktype);
- init_completion(&ca->ref_completion);
- init_completion(&ca->io_ref_completion);
-
- init_rwsem(&ca->bucket_lock);
-
- INIT_WORK(&ca->io_error_work, bch2_io_error_work);
-
- bch2_time_stats_quantiles_init(&ca->io_latency[READ]);
- bch2_time_stats_quantiles_init(&ca->io_latency[WRITE]);
-
- ca->mi = bch2_mi_to_cpu(member);
-
- for (i = 0; i < ARRAY_SIZE(member->errors); i++)
- atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
-
- ca->uuid = member->uuid;
-
- ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
- ca->mi.bucket_size / btree_sectors(c));
-
-#ifndef CONFIG_BCACHEFS_DEBUG
- if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete, 0, GFP_KERNEL))
- goto err;
-#else
- atomic_long_set(&ca->ref, 1);
-#endif
-
- bch2_dev_allocator_background_init(ca);
-
- if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
- PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
- !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
- bch2_dev_buckets_alloc(c, ca) ||
- !(ca->io_done = alloc_percpu(*ca->io_done)))
- goto err;
-
- return ca;
-err:
- bch2_dev_free(ca);
- return NULL;
-}
-
-static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
- unsigned dev_idx)
-{
- ca->dev_idx = dev_idx;
- __set_bit(ca->dev_idx, ca->self.d);
- scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
-
- ca->fs = c;
- rcu_assign_pointer(c->devs[ca->dev_idx], ca);
-
- if (bch2_dev_sysfs_online(c, ca))
- pr_warn("error creating sysfs objects");
-}
-
-static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
-{
- struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
- struct bch_dev *ca = NULL;
- int ret = 0;
-
- if (bch2_fs_init_fault("dev_alloc"))
- goto err;
-
- ca = __bch2_dev_alloc(c, &member);
- if (!ca)
- goto err;
-
- ca->fs = c;
-
- bch2_dev_attach(c, ca, dev_idx);
- return ret;
-err:
- if (ca)
- bch2_dev_free(ca);
- return -BCH_ERR_ENOMEM_dev_alloc;
-}
-
-static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
-{
- unsigned ret;
-
- if (bch2_dev_is_online(ca)) {
- bch_err(ca, "already have device online in slot %u",
- sb->sb->dev_idx);
- return -BCH_ERR_device_already_online;
- }
-
- if (get_capacity(sb->bdev->bd_disk) <
- ca->mi.bucket_size * ca->mi.nbuckets) {
- bch_err(ca, "cannot online: device too small");
- return -BCH_ERR_device_size_too_small;
- }
-
- BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
-
- ret = bch2_dev_journal_init(ca, sb->sb);
- if (ret)
- return ret;
-
- /* Commit: */
- ca->disk_sb = *sb;
- memset(sb, 0, sizeof(*sb));
-
- ca->dev = ca->disk_sb.bdev->bd_dev;
-
- percpu_ref_reinit(&ca->io_ref);
-
- return 0;
-}
-
-static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
-{
- struct bch_dev *ca;
- int ret;
-
- lockdep_assert_held(&c->state_lock);
-
- if (le64_to_cpu(sb->sb->seq) >
- le64_to_cpu(c->disk_sb.sb->seq))
- bch2_sb_to_fs(c, sb->sb);
-
- BUG_ON(!bch2_dev_exists(c, sb->sb->dev_idx));
-
- ca = bch2_dev_locked(c, sb->sb->dev_idx);
-
- ret = __bch2_dev_attach_bdev(ca, sb);
- if (ret)
- return ret;
-
- bch2_dev_sysfs_online(c, ca);
-
- struct printbuf name = PRINTBUF;
- prt_bdevname(&name, ca->disk_sb.bdev);
-
- if (c->sb.nr_devices == 1)
- strscpy(c->name, name.buf, sizeof(c->name));
- strscpy(ca->name, name.buf, sizeof(ca->name));
-
- printbuf_exit(&name);
-
- rebalance_wakeup(c);
- return 0;
-}
-
-/* Device management: */
-
-/*
- * Note: this function is also used by the error paths - when a particular
- * device sees an error, we call it to determine whether we can just set the
- * device RO, or - if this function returns false - we'll set the whole
- * filesystem RO:
- *
- * XXX: maybe we should be more explicit about whether we're changing state
- * because we got an error or what have you?
- */
-bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
- enum bch_member_state new_state, int flags)
-{
- struct bch_devs_mask new_online_devs;
- int nr_rw = 0, required;
-
- lockdep_assert_held(&c->state_lock);
-
- switch (new_state) {
- case BCH_MEMBER_STATE_rw:
- return true;
- case BCH_MEMBER_STATE_ro:
- if (ca->mi.state != BCH_MEMBER_STATE_rw)
- return true;
-
- /* do we have enough devices to write to? */
- for_each_member_device(c, ca2)
- if (ca2 != ca)
- nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
-
- required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
- ? c->opts.metadata_replicas
- : metadata_replicas_required(c),
- !(flags & BCH_FORCE_IF_DATA_DEGRADED)
- ? c->opts.data_replicas
- : data_replicas_required(c));
-
- return nr_rw >= required;
- case BCH_MEMBER_STATE_failed:
- case BCH_MEMBER_STATE_spare:
- if (ca->mi.state != BCH_MEMBER_STATE_rw &&
- ca->mi.state != BCH_MEMBER_STATE_ro)
- return true;
-
- /* do we have enough devices to read from? */
- new_online_devs = bch2_online_devs(c);
- __clear_bit(ca->dev_idx, new_online_devs.d);
-
- return bch2_have_enough_devs(c, new_online_devs, flags, false);
- default:
- BUG();
- }
-}
-
-static bool bch2_fs_may_start(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned i, flags = 0;
-
- if (c->opts.very_degraded)
- flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
-
- if (c->opts.degraded)
- flags |= BCH_FORCE_IF_DEGRADED;
-
- if (!c->opts.degraded &&
- !c->opts.very_degraded) {
- mutex_lock(&c->sb_lock);
-
- for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
- if (!bch2_member_exists(c->disk_sb.sb, i))
- continue;
-
- ca = bch2_dev_locked(c, i);
-
- if (!bch2_dev_is_online(ca) &&
- (ca->mi.state == BCH_MEMBER_STATE_rw ||
- ca->mi.state == BCH_MEMBER_STATE_ro)) {
- mutex_unlock(&c->sb_lock);
- return false;
- }
- }
- mutex_unlock(&c->sb_lock);
- }
-
- return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
-}
-
-static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
-{
- /*
- * The allocator thread itself allocates btree nodes, so stop it first:
- */
- bch2_dev_allocator_remove(c, ca);
- bch2_recalc_capacity(c);
- bch2_dev_journal_stop(&c->journal, ca);
-}
-
-static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
-{
- lockdep_assert_held(&c->state_lock);
-
- BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
-
- bch2_dev_allocator_add(c, ca);
- bch2_recalc_capacity(c);
- bch2_dev_do_discards(ca);
-}
-
-int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
- enum bch_member_state new_state, int flags)
-{
- struct bch_member *m;
- int ret = 0;
-
- if (ca->mi.state == new_state)
- return 0;
-
- if (!bch2_dev_state_allowed(c, ca, new_state, flags))
- return -BCH_ERR_device_state_not_allowed;
-
- if (new_state != BCH_MEMBER_STATE_rw)
- __bch2_dev_read_only(c, ca);
-
- bch_notice(ca, "%s", bch2_member_states[new_state]);
-
- mutex_lock(&c->sb_lock);
- m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
- SET_BCH_MEMBER_STATE(m, new_state);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- if (new_state == BCH_MEMBER_STATE_rw)
- __bch2_dev_read_write(c, ca);
-
- rebalance_wakeup(c);
-
- return ret;
-}
-
-int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
- enum bch_member_state new_state, int flags)
-{
- int ret;
-
- down_write(&c->state_lock);
- ret = __bch2_dev_set_state(c, ca, new_state, flags);
- up_write(&c->state_lock);
-
- return ret;
-}
-
-/* Device add/removal: */
-
-int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
-{
- struct bch_member *m;
- unsigned dev_idx = ca->dev_idx, data;
- int ret;
-
- down_write(&c->state_lock);
-
- /*
- * We consume a reference to ca->ref, regardless of whether we succeed
- * or fail:
- */
- bch2_dev_put(ca);
-
- if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
- bch_err(ca, "Cannot remove without losing data");
- ret = -BCH_ERR_device_state_not_allowed;
- goto err;
- }
-
- __bch2_dev_read_only(c, ca);
-
- ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
- bch_err_msg(ca, ret, "bch2_dev_data_drop()");
- if (ret)
- goto err;
-
- ret = bch2_dev_remove_alloc(c, ca);
- bch_err_msg(ca, ret, "bch2_dev_remove_alloc()");
- if (ret)
- goto err;
-
- /*
- * We need to flush the entire journal to get rid of keys that reference
- * the device being removed before removing the superblock entry
- */
- bch2_journal_flush_all_pins(&c->journal);
-
- /*
- * this is really just needed for the bch2_replicas_gc_(start|end)
- * calls, and could be cleaned up:
- */
- ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
- bch_err_msg(ca, ret, "bch2_journal_flush_device_pins()");
- if (ret)
- goto err;
-
- ret = bch2_journal_flush(&c->journal);
- bch_err_msg(ca, ret, "bch2_journal_flush()");
- if (ret)
- goto err;
-
- ret = bch2_replicas_gc2(c);
- bch_err_msg(ca, ret, "bch2_replicas_gc2()");
- if (ret)
- goto err;
-
- data = bch2_dev_has_data(c, ca);
- if (data) {
- struct printbuf data_has = PRINTBUF;
-
- prt_bitflags(&data_has, __bch2_data_types, data);
- bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
- printbuf_exit(&data_has);
- ret = -EBUSY;
- goto err;
- }
-
- __bch2_dev_offline(c, ca);
-
- mutex_lock(&c->sb_lock);
- rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
- mutex_unlock(&c->sb_lock);
-
-#ifndef CONFIG_BCACHEFS_DEBUG
- percpu_ref_kill(&ca->ref);
-#else
- ca->dying = true;
- bch2_dev_put(ca);
-#endif
- wait_for_completion(&ca->ref_completion);
-
- bch2_dev_free(ca);
-
- /*
- * Free this device's slot in the bch_member array - all pointers to
- * this device must be gone:
- */
- mutex_lock(&c->sb_lock);
- m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
- memset(&m->uuid, 0, sizeof(m->uuid));
-
- bch2_write_super(c);
-
- mutex_unlock(&c->sb_lock);
- up_write(&c->state_lock);
- return 0;
-err:
- if (ca->mi.state == BCH_MEMBER_STATE_rw &&
- !percpu_ref_is_zero(&ca->io_ref))
- __bch2_dev_read_write(c, ca);
- up_write(&c->state_lock);
- return ret;
-}
-
-/* Add new device to running filesystem: */
-int bch2_dev_add(struct bch_fs *c, const char *path)
-{
- struct bch_opts opts = bch2_opts_empty();
- struct bch_sb_handle sb;
- struct bch_dev *ca = NULL;
- struct printbuf errbuf = PRINTBUF;
- struct printbuf label = PRINTBUF;
- int ret;
-
- ret = bch2_read_super(path, &opts, &sb);
- bch_err_msg(c, ret, "reading super");
- if (ret)
- goto err;
-
- struct bch_member dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
-
- if (BCH_MEMBER_GROUP(&dev_mi)) {
- bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
- if (label.allocation_failure) {
- ret = -ENOMEM;
- goto err;
- }
- }
-
- ret = bch2_dev_may_add(sb.sb, c);
- if (ret)
- goto err;
-
- ca = __bch2_dev_alloc(c, &dev_mi);
- if (!ca) {
- ret = -ENOMEM;
- goto err;
- }
-
- ret = __bch2_dev_attach_bdev(ca, &sb);
- if (ret)
- goto err;
-
- ret = bch2_dev_journal_alloc(ca, true);
- bch_err_msg(c, ret, "allocating journal");
- if (ret)
- goto err;
-
- down_write(&c->state_lock);
- mutex_lock(&c->sb_lock);
-
- ret = bch2_sb_from_fs(c, ca);
- bch_err_msg(c, ret, "setting up new superblock");
- if (ret)
- goto err_unlock;
-
- if (dynamic_fault("bcachefs:add:no_slot"))
- goto err_unlock;
-
- ret = bch2_sb_member_alloc(c);
- if (ret < 0) {
- bch_err_msg(c, ret, "setting up new superblock");
- goto err_unlock;
- }
- unsigned dev_idx = ret;
-
- /* success: */
-
- dev_mi.last_mount = cpu_to_le64(ktime_get_real_seconds());
- *bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx) = dev_mi;
-
- ca->disk_sb.sb->dev_idx = dev_idx;
- bch2_dev_attach(c, ca, dev_idx);
-
- if (BCH_MEMBER_GROUP(&dev_mi)) {
- ret = __bch2_dev_group_set(c, ca, label.buf);
- bch_err_msg(c, ret, "creating new label");
- if (ret)
- goto err_unlock;
- }
-
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- ret = bch2_dev_usage_init(ca, false);
- if (ret)
- goto err_late;
-
- ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
- bch_err_msg(ca, ret, "marking new superblock");
- if (ret)
- goto err_late;
-
- ret = bch2_fs_freespace_init(c);
- bch_err_msg(ca, ret, "initializing free space");
- if (ret)
- goto err_late;
-
- ca->new_fs_bucket_idx = 0;
-
- if (ca->mi.state == BCH_MEMBER_STATE_rw)
- __bch2_dev_read_write(c, ca);
-
- up_write(&c->state_lock);
- return 0;
-
-err_unlock:
- mutex_unlock(&c->sb_lock);
- up_write(&c->state_lock);
-err:
- if (ca)
- bch2_dev_free(ca);
- bch2_free_super(&sb);
- printbuf_exit(&label);
- printbuf_exit(&errbuf);
- bch_err_fn(c, ret);
- return ret;
-err_late:
- up_write(&c->state_lock);
- ca = NULL;
- goto err;
-}
-
-/* Hot add existing device to running filesystem: */
-int bch2_dev_online(struct bch_fs *c, const char *path)
-{
- struct bch_opts opts = bch2_opts_empty();
- struct bch_sb_handle sb = { NULL };
- struct bch_dev *ca;
- unsigned dev_idx;
- int ret;
-
- down_write(&c->state_lock);
-
- ret = bch2_read_super(path, &opts, &sb);
- if (ret) {
- up_write(&c->state_lock);
- return ret;
- }
-
- dev_idx = sb.sb->dev_idx;
-
- ret = bch2_dev_in_fs(&c->disk_sb, &sb, &c->opts);
- bch_err_msg(c, ret, "bringing %s online", path);
- if (ret)
- goto err;
-
- ret = bch2_dev_attach_bdev(c, &sb);
- if (ret)
- goto err;
-
- ca = bch2_dev_locked(c, dev_idx);
-
- ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
- bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
- if (ret)
- goto err;
-
- if (ca->mi.state == BCH_MEMBER_STATE_rw)
- __bch2_dev_read_write(c, ca);
-
- if (!ca->mi.freespace_initialized) {
- ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
- bch_err_msg(ca, ret, "initializing free space");
- if (ret)
- goto err;
- }
-
- if (!ca->journal.nr) {
- ret = bch2_dev_journal_alloc(ca, false);
- bch_err_msg(ca, ret, "allocating journal");
- if (ret)
- goto err;
- }
-
- mutex_lock(&c->sb_lock);
- bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
- cpu_to_le64(ktime_get_real_seconds());
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- up_write(&c->state_lock);
- return 0;
-err:
- up_write(&c->state_lock);
- bch2_free_super(&sb);
- return ret;
-}
-
-int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
-{
- down_write(&c->state_lock);
-
- if (!bch2_dev_is_online(ca)) {
- bch_err(ca, "Already offline");
- up_write(&c->state_lock);
- return 0;
- }
-
- if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
- bch_err(ca, "Cannot offline required disk");
- up_write(&c->state_lock);
- return -BCH_ERR_device_state_not_allowed;
- }
-
- __bch2_dev_offline(c, ca);
-
- up_write(&c->state_lock);
- return 0;
-}
-
-int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
-{
- struct bch_member *m;
- u64 old_nbuckets;
- int ret = 0;
-
- down_write(&c->state_lock);
- old_nbuckets = ca->mi.nbuckets;
-
- if (nbuckets < ca->mi.nbuckets) {
- bch_err(ca, "Cannot shrink yet");
- ret = -EINVAL;
- goto err;
- }
-
- if (nbuckets > BCH_MEMBER_NBUCKETS_MAX) {
- bch_err(ca, "New device size too big (%llu greater than max %u)",
- nbuckets, BCH_MEMBER_NBUCKETS_MAX);
- ret = -BCH_ERR_device_size_too_big;
- goto err;
- }
-
- if (bch2_dev_is_online(ca) &&
- get_capacity(ca->disk_sb.bdev->bd_disk) <
- ca->mi.bucket_size * nbuckets) {
- bch_err(ca, "New size larger than device");
- ret = -BCH_ERR_device_size_too_small;
- goto err;
- }
-
- ret = bch2_dev_buckets_resize(c, ca, nbuckets);
- bch_err_msg(ca, ret, "resizing buckets");
- if (ret)
- goto err;
-
- ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
- if (ret)
- goto err;
-
- mutex_lock(&c->sb_lock);
- m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
- m->nbuckets = cpu_to_le64(nbuckets);
-
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
-
- if (ca->mi.freespace_initialized) {
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_dev_data_type,
- .dev_data_type.dev = ca->dev_idx,
- .dev_data_type.data_type = BCH_DATA_free,
- };
- u64 v[3] = { nbuckets - old_nbuckets, 0, 0 };
-
- ret = bch2_trans_commit_do(ca->fs, NULL, NULL, 0,
- bch2_disk_accounting_mod(trans, &acc, v, ARRAY_SIZE(v), false)) ?:
- bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets);
- if (ret)
- goto err;
- }
-
- bch2_recalc_capacity(c);
-err:
- up_write(&c->state_lock);
- return ret;
-}
-
-/* return with ref on ca->ref: */
-struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
-{
- if (!strncmp(name, "/dev/", strlen("/dev/")))
- name += strlen("/dev/");
-
- for_each_member_device(c, ca)
- if (!strcmp(name, ca->name))
- return ca;
- return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
-}
-
-/* Filesystem open: */
-
-static inline int sb_cmp(struct bch_sb *l, struct bch_sb *r)
-{
- return cmp_int(le64_to_cpu(l->seq), le64_to_cpu(r->seq)) ?:
- cmp_int(le64_to_cpu(l->write_time), le64_to_cpu(r->write_time));
-}
-
-struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
- struct bch_opts opts)
-{
- DARRAY(struct bch_sb_handle) sbs = { 0 };
- struct bch_fs *c = NULL;
- struct bch_sb_handle *best = NULL;
- struct printbuf errbuf = PRINTBUF;
- int ret = 0;
-
- if (!try_module_get(THIS_MODULE))
- return ERR_PTR(-ENODEV);
-
- if (!nr_devices) {
- ret = -EINVAL;
- goto err;
- }
-
- ret = darray_make_room(&sbs, nr_devices);
- if (ret)
- goto err;
-
- for (unsigned i = 0; i < nr_devices; i++) {
- struct bch_sb_handle sb = { NULL };
-
- ret = bch2_read_super(devices[i], &opts, &sb);
- if (ret)
- goto err;
-
- BUG_ON(darray_push(&sbs, sb));
- }
-
- if (opts.nochanges && !opts.read_only) {
- ret = -BCH_ERR_erofs_nochanges;
- goto err_print;
- }
-
- darray_for_each(sbs, sb)
- if (!best || sb_cmp(sb->sb, best->sb) > 0)
- best = sb;
-
- darray_for_each_reverse(sbs, sb) {
- ret = bch2_dev_in_fs(best, sb, &opts);
-
- if (ret == -BCH_ERR_device_has_been_removed ||
- ret == -BCH_ERR_device_splitbrain) {
- bch2_free_super(sb);
- darray_remove_item(&sbs, sb);
- best -= best > sb;
- ret = 0;
- continue;
- }
-
- if (ret)
- goto err_print;
- }
-
- c = bch2_fs_alloc(best->sb, opts);
- ret = PTR_ERR_OR_ZERO(c);
- if (ret)
- goto err;
-
- down_write(&c->state_lock);
- darray_for_each(sbs, sb) {
- ret = bch2_dev_attach_bdev(c, sb);
- if (ret) {
- up_write(&c->state_lock);
- goto err;
- }
- }
- up_write(&c->state_lock);
-
- if (!bch2_fs_may_start(c)) {
- ret = -BCH_ERR_insufficient_devices_to_start;
- goto err_print;
- }
-
- if (!c->opts.nostart) {
- ret = bch2_fs_start(c);
- if (ret)
- goto err;
- }
-out:
- darray_for_each(sbs, sb)
- bch2_free_super(sb);
- darray_exit(&sbs);
- printbuf_exit(&errbuf);
- module_put(THIS_MODULE);
- return c;
-err_print:
- pr_err("bch_fs_open err opening %s: %s",
- devices[0], bch2_err_str(ret));
-err:
- if (!IS_ERR_OR_NULL(c))
- bch2_fs_stop(c);
- c = ERR_PTR(ret);
- goto out;
-}
-
-/* Global interfaces/init */
-
-static void bcachefs_exit(void)
-{
- bch2_debug_exit();
- bch2_vfs_exit();
- bch2_chardev_exit();
- bch2_btree_key_cache_exit();
- if (bcachefs_kset)
- kset_unregister(bcachefs_kset);
-}
-
-static int __init bcachefs_init(void)
-{
- bch2_bkey_pack_test();
-
- if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
- bch2_btree_key_cache_init() ||
- bch2_chardev_init() ||
- bch2_vfs_init() ||
- bch2_debug_init())
- goto err;
-
- return 0;
-err:
- bcachefs_exit();
- return -ENOMEM;
-}
-
-#define BCH_DEBUG_PARAM(name, description) \
- bool bch2_##name; \
- module_param_named(name, bch2_##name, bool, 0644); \
- MODULE_PARM_DESC(name, description);
-BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
-
-__maybe_unused
-static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
-module_param_named(version, bch2_metadata_version, uint, 0400);
-
-module_exit(bcachefs_exit);
-module_init(bcachefs_init);
diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h
deleted file mode 100644
index dada09331d2e..000000000000
--- a/fs/bcachefs/super.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SUPER_H
-#define _BCACHEFS_SUPER_H
-
-#include "extents.h"
-
-#include "bcachefs_ioctl.h"
-
-#include <linux/math64.h>
-
-extern const char * const bch2_fs_flag_strs[];
-
-struct bch_fs *bch2_dev_to_fs(dev_t);
-struct bch_fs *bch2_uuid_to_fs(__uuid_t);
-
-bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
- enum bch_member_state, int);
-int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
- enum bch_member_state, int);
-int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
- enum bch_member_state, int);
-
-int bch2_dev_fail(struct bch_dev *, int);
-int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
-int bch2_dev_add(struct bch_fs *, const char *);
-int bch2_dev_online(struct bch_fs *, const char *);
-int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
-int bch2_dev_resize(struct bch_fs *, struct bch_dev *, u64);
-struct bch_dev *bch2_dev_lookup(struct bch_fs *, const char *);
-
-bool bch2_fs_emergency_read_only(struct bch_fs *);
-void bch2_fs_read_only(struct bch_fs *);
-
-int bch2_fs_read_write(struct bch_fs *);
-int bch2_fs_read_write_early(struct bch_fs *);
-
-/*
- * Only for use in the recovery/fsck path:
- */
-static inline void bch2_fs_lazy_rw(struct bch_fs *c)
-{
- if (!test_bit(BCH_FS_rw, &c->flags) &&
- !test_bit(BCH_FS_was_rw, &c->flags))
- bch2_fs_read_write_early(c);
-}
-
-void __bch2_fs_stop(struct bch_fs *);
-void bch2_fs_free(struct bch_fs *);
-void bch2_fs_stop(struct bch_fs *);
-
-int bch2_fs_start(struct bch_fs *);
-struct bch_fs *bch2_fs_open(char * const *, unsigned, struct bch_opts);
-
-#endif /* _BCACHEFS_SUPER_H */
diff --git a/fs/bcachefs/super_types.h b/fs/bcachefs/super_types.h
deleted file mode 100644
index 368a63d938cf..000000000000
--- a/fs/bcachefs/super_types.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SUPER_TYPES_H
-#define _BCACHEFS_SUPER_TYPES_H
-
-struct bch_sb_handle {
- struct bch_sb *sb;
- struct file *s_bdev_file;
- struct block_device *bdev;
- char *sb_name;
- struct bio *bio;
- void *holder;
- size_t buffer_size;
- blk_mode_t mode;
- unsigned have_layout:1;
- unsigned have_bio:1;
- unsigned fs_sb:1;
- u64 seq;
-};
-
-struct bch_devs_mask {
- unsigned long d[BITS_TO_LONGS(BCH_SB_MEMBERS_MAX)];
-};
-
-struct bch_devs_list {
- u8 nr;
- u8 data[BCH_BKEY_PTRS_MAX];
-};
-
-#endif /* _BCACHEFS_SUPER_TYPES_H */
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
deleted file mode 100644
index 03e59f86f360..000000000000
--- a/fs/bcachefs/sysfs.c
+++ /dev/null
@@ -1,893 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bcache sysfs interfaces
- *
- * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
- * Copyright 2012 Google, Inc.
- */
-
-#ifndef NO_BCACHEFS_SYSFS
-
-#include "bcachefs.h"
-#include "alloc_background.h"
-#include "alloc_foreground.h"
-#include "sysfs.h"
-#include "btree_cache.h"
-#include "btree_io.h"
-#include "btree_iter.h"
-#include "btree_key_cache.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
-#include "btree_gc.h"
-#include "buckets.h"
-#include "clock.h"
-#include "compress.h"
-#include "disk_accounting.h"
-#include "disk_groups.h"
-#include "ec.h"
-#include "inode.h"
-#include "journal.h"
-#include "journal_reclaim.h"
-#include "keylist.h"
-#include "move.h"
-#include "movinggc.h"
-#include "nocow_locking.h"
-#include "opts.h"
-#include "rebalance.h"
-#include "replicas.h"
-#include "super-io.h"
-#include "tests.h"
-
-#include <linux/blkdev.h>
-#include <linux/sort.h>
-#include <linux/sched/clock.h>
-
-#include "util.h"
-
-#define SYSFS_OPS(type) \
-const struct sysfs_ops type ## _sysfs_ops = { \
- .show = type ## _show, \
- .store = type ## _store \
-}
-
-#define SHOW(fn) \
-static ssize_t fn ## _to_text(struct printbuf *, \
- struct kobject *, struct attribute *); \
- \
-static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
- char *buf) \
-{ \
- struct printbuf out = PRINTBUF; \
- ssize_t ret = fn ## _to_text(&out, kobj, attr); \
- \
- if (out.pos && out.buf[out.pos - 1] != '\n') \
- prt_newline(&out); \
- \
- if (!ret && out.allocation_failure) \
- ret = -ENOMEM; \
- \
- if (!ret) { \
- ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
- memcpy(buf, out.buf, ret); \
- } \
- printbuf_exit(&out); \
- return bch2_err_class(ret); \
-} \
- \
-static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
- struct attribute *attr)
-
-#define STORE(fn) \
-static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
- const char *, size_t); \
- \
-static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
- const char *buf, size_t size) \
-{ \
- return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \
-} \
- \
-static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
- const char *buf, size_t size)
-
-#define __sysfs_attribute(_name, _mode) \
- static struct attribute sysfs_##_name = \
- { .name = #_name, .mode = _mode }
-
-#define write_attribute(n) __sysfs_attribute(n, 0200)
-#define read_attribute(n) __sysfs_attribute(n, 0444)
-#define rw_attribute(n) __sysfs_attribute(n, 0644)
-
-#define sysfs_printf(file, fmt, ...) \
-do { \
- if (attr == &sysfs_ ## file) \
- prt_printf(out, fmt "\n", __VA_ARGS__); \
-} while (0)
-
-#define sysfs_print(file, var) \
-do { \
- if (attr == &sysfs_ ## file) \
- snprint(out, var); \
-} while (0)
-
-#define sysfs_hprint(file, val) \
-do { \
- if (attr == &sysfs_ ## file) \
- prt_human_readable_s64(out, val); \
-} while (0)
-
-#define sysfs_strtoul(file, var) \
-do { \
- if (attr == &sysfs_ ## file) \
- return strtoul_safe(buf, var) ?: (ssize_t) size; \
-} while (0)
-
-#define sysfs_strtoul_clamp(file, var, min, max) \
-do { \
- if (attr == &sysfs_ ## file) \
- return strtoul_safe_clamp(buf, var, min, max) \
- ?: (ssize_t) size; \
-} while (0)
-
-#define strtoul_or_return(cp) \
-({ \
- unsigned long _v; \
- int _r = kstrtoul(cp, 10, &_v); \
- if (_r) \
- return _r; \
- _v; \
-})
-
-write_attribute(trigger_gc);
-write_attribute(trigger_discards);
-write_attribute(trigger_invalidates);
-write_attribute(trigger_journal_flush);
-write_attribute(trigger_journal_writes);
-write_attribute(trigger_btree_cache_shrink);
-write_attribute(trigger_btree_key_cache_shrink);
-write_attribute(trigger_freelist_wakeup);
-rw_attribute(gc_gens_pos);
-
-read_attribute(uuid);
-read_attribute(minor);
-read_attribute(flags);
-read_attribute(bucket_size);
-read_attribute(first_bucket);
-read_attribute(nbuckets);
-rw_attribute(durability);
-read_attribute(io_done);
-read_attribute(io_errors);
-write_attribute(io_errors_reset);
-
-read_attribute(io_latency_read);
-read_attribute(io_latency_write);
-read_attribute(io_latency_stats_read);
-read_attribute(io_latency_stats_write);
-read_attribute(congested);
-
-read_attribute(btree_write_stats);
-
-read_attribute(btree_cache_size);
-read_attribute(compression_stats);
-read_attribute(journal_debug);
-read_attribute(btree_cache);
-read_attribute(btree_key_cache);
-read_attribute(btree_reserve_cache);
-read_attribute(stripes_heap);
-read_attribute(open_buckets);
-read_attribute(open_buckets_partial);
-read_attribute(write_points);
-read_attribute(nocow_lock_table);
-
-#ifdef BCH_WRITE_REF_DEBUG
-read_attribute(write_refs);
-
-static const char * const bch2_write_refs[] = {
-#define x(n) #n,
- BCH_WRITE_REFS()
-#undef x
- NULL
-};
-
-static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
-{
- bch2_printbuf_tabstop_push(out, 24);
-
- for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++)
- prt_printf(out, "%s\t%li\n", bch2_write_refs[i], atomic_long_read(&c->writes[i]));
-}
-#endif
-
-read_attribute(internal_uuid);
-read_attribute(disk_groups);
-
-read_attribute(has_data);
-read_attribute(alloc_debug);
-read_attribute(accounting);
-read_attribute(usage_base);
-
-#define x(t, n, ...) read_attribute(t);
-BCH_PERSISTENT_COUNTERS()
-#undef x
-
-rw_attribute(discard);
-rw_attribute(label);
-
-rw_attribute(copy_gc_enabled);
-read_attribute(copy_gc_wait);
-
-rw_attribute(rebalance_enabled);
-sysfs_pd_controller_attribute(rebalance);
-read_attribute(rebalance_status);
-
-read_attribute(new_stripes);
-
-read_attribute(io_timers_read);
-read_attribute(io_timers_write);
-
-read_attribute(moving_ctxts);
-
-#ifdef CONFIG_BCACHEFS_TESTS
-write_attribute(perf_test);
-#endif /* CONFIG_BCACHEFS_TESTS */
-
-#define x(_name) \
- static struct attribute sysfs_time_stat_##_name = \
- { .name = #_name, .mode = 0644 };
- BCH_TIME_STATS()
-#undef x
-
-static struct attribute sysfs_state_rw = {
- .name = "state",
- .mode = 0444,
-};
-
-static size_t bch2_btree_cache_size(struct bch_fs *c)
-{
- struct btree_cache *bc = &c->btree_cache;
- size_t ret = 0;
- struct btree *b;
-
- mutex_lock(&bc->lock);
- list_for_each_entry(b, &bc->live[0].list, list)
- ret += btree_buf_bytes(b);
- list_for_each_entry(b, &bc->live[1].list, list)
- ret += btree_buf_bytes(b);
- list_for_each_entry(b, &bc->freeable, list)
- ret += btree_buf_bytes(b);
- mutex_unlock(&bc->lock);
- return ret;
-}
-
-static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
-{
- prt_str(out, "type");
- printbuf_tabstop_push(out, 12);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 24);
- prt_printf(out, "type\tcompressed\runcompressed\raverage extent size\r\n");
-
- for (unsigned i = 1; i < BCH_COMPRESSION_TYPE_NR; i++) {
- struct disk_accounting_pos a = {
- .type = BCH_DISK_ACCOUNTING_compression,
- .compression.type = i,
- };
- struct bpos p = disk_accounting_pos_to_bpos(&a);
- u64 v[3];
- bch2_accounting_mem_read(c, p, v, ARRAY_SIZE(v));
-
- u64 nr_extents = v[0];
- u64 sectors_uncompressed = v[1];
- u64 sectors_compressed = v[2];
-
- bch2_prt_compression_type(out, i);
- prt_tab(out);
-
- prt_human_readable_u64(out, sectors_compressed << 9);
- prt_tab_rjust(out);
-
- prt_human_readable_u64(out, sectors_uncompressed << 9);
- prt_tab_rjust(out);
-
- prt_human_readable_u64(out, nr_extents
- ? div64_u64(sectors_uncompressed << 9, nr_extents)
- : 0);
- prt_tab_rjust(out);
- prt_newline(out);
- }
-
- return 0;
-}
-
-static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
-{
- prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
- bch2_bpos_to_text(out, c->gc_gens_pos);
- prt_printf(out, "\n");
-}
-
-static void bch2_fs_usage_base_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct bch_fs_usage_base b = {};
-
- acc_u64s_percpu(&b.hidden, &c->usage->hidden, sizeof(b) / sizeof(u64));
-
- prt_printf(out, "hidden:\t\t%llu\n", b.hidden);
- prt_printf(out, "btree:\t\t%llu\n", b.btree);
- prt_printf(out, "data:\t\t%llu\n", b.data);
- prt_printf(out, "cached:\t%llu\n", b.cached);
- prt_printf(out, "reserved:\t\t%llu\n", b.reserved);
- prt_printf(out, "nr_inodes:\t%llu\n", b.nr_inodes);
-}
-
-SHOW(bch2_fs)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
-
- sysfs_print(minor, c->minor);
- sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
-
- if (attr == &sysfs_flags)
- prt_bitflags(out, bch2_fs_flag_strs, c->flags);
-
- sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
-
- if (attr == &sysfs_btree_write_stats)
- bch2_btree_write_stats_to_text(out, c);
-
- if (attr == &sysfs_gc_gens_pos)
- bch2_gc_gens_pos_to_text(out, c);
-
- sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
-
- sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
- sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
-
- if (attr == &sysfs_copy_gc_wait)
- bch2_copygc_wait_to_text(out, c);
-
- if (attr == &sysfs_rebalance_status)
- bch2_rebalance_status_to_text(out, c);
-
- /* Debugging: */
-
- if (attr == &sysfs_journal_debug)
- bch2_journal_debug_to_text(out, &c->journal);
-
- if (attr == &sysfs_btree_cache)
- bch2_btree_cache_to_text(out, &c->btree_cache);
-
- if (attr == &sysfs_btree_key_cache)
- bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
-
- if (attr == &sysfs_btree_reserve_cache)
- bch2_btree_reserve_cache_to_text(out, c);
-
- if (attr == &sysfs_stripes_heap)
- bch2_stripes_heap_to_text(out, c);
-
- if (attr == &sysfs_open_buckets)
- bch2_open_buckets_to_text(out, c, NULL);
-
- if (attr == &sysfs_open_buckets_partial)
- bch2_open_buckets_partial_to_text(out, c);
-
- if (attr == &sysfs_write_points)
- bch2_write_points_to_text(out, c);
-
- if (attr == &sysfs_compression_stats)
- bch2_compression_stats_to_text(out, c);
-
- if (attr == &sysfs_new_stripes)
- bch2_new_stripes_to_text(out, c);
-
- if (attr == &sysfs_io_timers_read)
- bch2_io_timers_to_text(out, &c->io_clock[READ]);
-
- if (attr == &sysfs_io_timers_write)
- bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
-
- if (attr == &sysfs_moving_ctxts)
- bch2_fs_moving_ctxts_to_text(out, c);
-
-#ifdef BCH_WRITE_REF_DEBUG
- if (attr == &sysfs_write_refs)
- bch2_write_refs_to_text(out, c);
-#endif
-
- if (attr == &sysfs_nocow_lock_table)
- bch2_nocow_locks_to_text(out, &c->nocow_locks);
-
- if (attr == &sysfs_disk_groups)
- bch2_disk_groups_to_text(out, c);
-
- if (attr == &sysfs_alloc_debug)
- bch2_fs_alloc_debug_to_text(out, c);
-
- if (attr == &sysfs_accounting)
- bch2_fs_accounting_to_text(out, c);
-
- if (attr == &sysfs_usage_base)
- bch2_fs_usage_base_to_text(out, c);
-
- return 0;
-}
-
-STORE(bch2_fs)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
-
- if (attr == &sysfs_copy_gc_enabled) {
- ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
- ?: (ssize_t) size;
-
- if (c->copygc_thread)
- wake_up_process(c->copygc_thread);
- return ret;
- }
-
- if (attr == &sysfs_rebalance_enabled) {
- ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
- ?: (ssize_t) size;
-
- rebalance_wakeup(c);
- return ret;
- }
-
- sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
-
- /* Debugging: */
-
- if (!test_bit(BCH_FS_started, &c->flags))
- return -EPERM;
-
- /* Debugging: */
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))
- return -EROFS;
-
- if (attr == &sysfs_trigger_btree_cache_shrink) {
- struct btree_cache *bc = &c->btree_cache;
- struct shrink_control sc;
-
- sc.gfp_mask = GFP_KERNEL;
- sc.nr_to_scan = strtoul_or_return(buf);
- bc->live[0].shrink->scan_objects(bc->live[0].shrink, &sc);
- }
-
- if (attr == &sysfs_trigger_btree_key_cache_shrink) {
- struct shrink_control sc;
-
- sc.gfp_mask = GFP_KERNEL;
- sc.nr_to_scan = strtoul_or_return(buf);
- c->btree_key_cache.shrink->scan_objects(c->btree_key_cache.shrink, &sc);
- }
-
- if (attr == &sysfs_trigger_gc)
- bch2_gc_gens(c);
-
- if (attr == &sysfs_trigger_discards)
- bch2_do_discards(c);
-
- if (attr == &sysfs_trigger_invalidates)
- bch2_do_invalidates(c);
-
- if (attr == &sysfs_trigger_journal_flush) {
- bch2_journal_flush_all_pins(&c->journal);
- bch2_journal_meta(&c->journal);
- }
-
- if (attr == &sysfs_trigger_journal_writes)
- bch2_journal_do_writes(&c->journal);
-
- if (attr == &sysfs_trigger_freelist_wakeup)
- closure_wake_up(&c->freelist_wait);
-
-#ifdef CONFIG_BCACHEFS_TESTS
- if (attr == &sysfs_perf_test) {
- char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
- char *test = strsep(&p, " \t\n");
- char *nr_str = strsep(&p, " \t\n");
- char *threads_str = strsep(&p, " \t\n");
- unsigned threads;
- u64 nr;
- int ret = -EINVAL;
-
- if (threads_str &&
- !(ret = kstrtouint(threads_str, 10, &threads)) &&
- !(ret = bch2_strtoull_h(nr_str, &nr)))
- ret = bch2_btree_perf_test(c, test, nr, threads);
- kfree(tmp);
-
- if (ret)
- size = ret;
- }
-#endif
- bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
- return size;
-}
-SYSFS_OPS(bch2_fs);
-
-struct attribute *bch2_fs_files[] = {
- &sysfs_minor,
- &sysfs_btree_cache_size,
- &sysfs_btree_write_stats,
-
- &sysfs_rebalance_status,
-
- &sysfs_compression_stats,
-
-#ifdef CONFIG_BCACHEFS_TESTS
- &sysfs_perf_test,
-#endif
- NULL
-};
-
-/* counters dir */
-
-SHOW(bch2_fs_counters)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
- u64 counter = 0;
- u64 counter_since_mount = 0;
-
- printbuf_tabstop_push(out, 32);
-
- #define x(t, ...) \
- if (attr == &sysfs_##t) { \
- counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
- counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
- prt_printf(out, "since mount:\t"); \
- prt_human_readable_u64(out, counter_since_mount); \
- prt_newline(out); \
- \
- prt_printf(out, "since filesystem creation:\t"); \
- prt_human_readable_u64(out, counter); \
- prt_newline(out); \
- }
- BCH_PERSISTENT_COUNTERS()
- #undef x
- return 0;
-}
-
-STORE(bch2_fs_counters) {
- return 0;
-}
-
-SYSFS_OPS(bch2_fs_counters);
-
-struct attribute *bch2_fs_counters_files[] = {
-#define x(t, ...) \
- &sysfs_##t,
- BCH_PERSISTENT_COUNTERS()
-#undef x
- NULL
-};
-/* internal dir - just a wrapper */
-
-SHOW(bch2_fs_internal)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
-
- return bch2_fs_to_text(out, &c->kobj, attr);
-}
-
-STORE(bch2_fs_internal)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
-
- return bch2_fs_store(&c->kobj, attr, buf, size);
-}
-SYSFS_OPS(bch2_fs_internal);
-
-struct attribute *bch2_fs_internal_files[] = {
- &sysfs_flags,
- &sysfs_journal_debug,
- &sysfs_btree_cache,
- &sysfs_btree_key_cache,
- &sysfs_btree_reserve_cache,
- &sysfs_new_stripes,
- &sysfs_stripes_heap,
- &sysfs_open_buckets,
- &sysfs_open_buckets_partial,
- &sysfs_write_points,
-#ifdef BCH_WRITE_REF_DEBUG
- &sysfs_write_refs,
-#endif
- &sysfs_nocow_lock_table,
- &sysfs_io_timers_read,
- &sysfs_io_timers_write,
-
- &sysfs_trigger_gc,
- &sysfs_trigger_discards,
- &sysfs_trigger_invalidates,
- &sysfs_trigger_journal_flush,
- &sysfs_trigger_journal_writes,
- &sysfs_trigger_btree_cache_shrink,
- &sysfs_trigger_btree_key_cache_shrink,
- &sysfs_trigger_freelist_wakeup,
-
- &sysfs_gc_gens_pos,
-
- &sysfs_copy_gc_enabled,
- &sysfs_copy_gc_wait,
-
- &sysfs_rebalance_enabled,
- sysfs_pd_controller_files(rebalance),
-
- &sysfs_moving_ctxts,
-
- &sysfs_internal_uuid,
-
- &sysfs_disk_groups,
- &sysfs_alloc_debug,
- &sysfs_accounting,
- &sysfs_usage_base,
- NULL
-};
-
-/* options */
-
-SHOW(bch2_fs_opts_dir)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
- const struct bch_option *opt = container_of(attr, struct bch_option, attr);
- int id = opt - bch2_opt_table;
- u64 v = bch2_opt_get_by_id(&c->opts, id);
-
- bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
- prt_char(out, '\n');
-
- return 0;
-}
-
-STORE(bch2_fs_opts_dir)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
- const struct bch_option *opt = container_of(attr, struct bch_option, attr);
- int ret, id = opt - bch2_opt_table;
- char *tmp;
- u64 v;
-
- /*
- * We don't need to take c->writes for correctness, but it eliminates an
- * unsightly error message in the dmesg log when we're RO:
- */
- if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
- return -EROFS;
-
- tmp = kstrdup(buf, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto err;
- }
-
- ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
- kfree(tmp);
-
- if (ret < 0)
- goto err;
-
- ret = bch2_opt_check_may_set(c, id, v);
- if (ret < 0)
- goto err;
-
- bch2_opt_set_sb(c, NULL, opt, v);
- bch2_opt_set_by_id(&c->opts, id, v);
-
- if (v &&
- (id == Opt_background_target ||
- id == Opt_background_compression ||
- (id == Opt_compression && !c->opts.background_compression)))
- bch2_set_rebalance_needs_scan(c, 0);
-
- ret = size;
-err:
- bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
- return ret;
-}
-SYSFS_OPS(bch2_fs_opts_dir);
-
-struct attribute *bch2_fs_opts_dir_files[] = { NULL };
-
-int bch2_opts_create_sysfs_files(struct kobject *kobj)
-{
- const struct bch_option *i;
- int ret;
-
- for (i = bch2_opt_table;
- i < bch2_opt_table + bch2_opts_nr;
- i++) {
- if (!(i->flags & OPT_FS))
- continue;
-
- ret = sysfs_create_file(kobj, &i->attr);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/* time stats */
-
-SHOW(bch2_fs_time_stats)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
-
-#define x(name) \
- if (attr == &sysfs_time_stat_##name) \
- bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
- BCH_TIME_STATS()
-#undef x
-
- return 0;
-}
-
-STORE(bch2_fs_time_stats)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
-
-#define x(name) \
- if (attr == &sysfs_time_stat_##name) \
- bch2_time_stats_reset(&c->times[BCH_TIME_##name]);
- BCH_TIME_STATS()
-#undef x
- return size;
-}
-SYSFS_OPS(bch2_fs_time_stats);
-
-struct attribute *bch2_fs_time_stats_files[] = {
-#define x(name) \
- &sysfs_time_stat_##name,
- BCH_TIME_STATS()
-#undef x
- NULL
-};
-
-static const char * const bch2_rw[] = {
- "read",
- "write",
- NULL
-};
-
-static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
-{
- int rw, i;
-
- for (rw = 0; rw < 2; rw++) {
- prt_printf(out, "%s:\n", bch2_rw[rw]);
-
- for (i = 1; i < BCH_DATA_NR; i++)
- prt_printf(out, "%-12s:%12llu\n",
- bch2_data_type_str(i),
- percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
- }
-}
-
-SHOW(bch2_dev)
-{
- struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
- struct bch_fs *c = ca->fs;
-
- sysfs_printf(uuid, "%pU\n", ca->uuid.b);
-
- sysfs_print(bucket_size, bucket_bytes(ca));
- sysfs_print(first_bucket, ca->mi.first_bucket);
- sysfs_print(nbuckets, ca->mi.nbuckets);
- sysfs_print(durability, ca->mi.durability);
- sysfs_print(discard, ca->mi.discard);
-
- if (attr == &sysfs_label) {
- if (ca->mi.group)
- bch2_disk_path_to_text(out, c, ca->mi.group - 1);
- prt_char(out, '\n');
- }
-
- if (attr == &sysfs_has_data) {
- prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca));
- prt_char(out, '\n');
- }
-
- if (attr == &sysfs_state_rw) {
- prt_string_option(out, bch2_member_states, ca->mi.state);
- prt_char(out, '\n');
- }
-
- if (attr == &sysfs_io_done)
- dev_io_done_to_text(out, ca);
-
- if (attr == &sysfs_io_errors)
- bch2_dev_io_errors_to_text(out, ca);
-
- sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
- sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
-
- if (attr == &sysfs_io_latency_stats_read)
- bch2_time_stats_to_text(out, &ca->io_latency[READ].stats);
-
- if (attr == &sysfs_io_latency_stats_write)
- bch2_time_stats_to_text(out, &ca->io_latency[WRITE].stats);
-
- sysfs_printf(congested, "%u%%",
- clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
- * 100 / CONGESTED_MAX);
-
- if (attr == &sysfs_alloc_debug)
- bch2_dev_alloc_debug_to_text(out, ca);
-
- if (attr == &sysfs_open_buckets)
- bch2_open_buckets_to_text(out, c, ca);
-
- return 0;
-}
-
-STORE(bch2_dev)
-{
- struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
- struct bch_fs *c = ca->fs;
-
- if (attr == &sysfs_discard) {
- bool v = strtoul_or_return(buf);
-
- bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_discard, v);
- }
-
- if (attr == &sysfs_durability) {
- u64 v = strtoul_or_return(buf);
-
- bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_durability, v);
- }
-
- if (attr == &sysfs_label) {
- char *tmp;
- int ret;
-
- tmp = kstrdup(buf, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- ret = bch2_dev_group_set(c, ca, strim(tmp));
- kfree(tmp);
- if (ret)
- return ret;
- }
-
- if (attr == &sysfs_io_errors_reset)
- bch2_dev_errors_reset(ca);
-
- return size;
-}
-SYSFS_OPS(bch2_dev);
-
-struct attribute *bch2_dev_files[] = {
- &sysfs_uuid,
- &sysfs_bucket_size,
- &sysfs_first_bucket,
- &sysfs_nbuckets,
- &sysfs_durability,
-
- /* settings: */
- &sysfs_discard,
- &sysfs_state_rw,
- &sysfs_label,
-
- &sysfs_has_data,
- &sysfs_io_done,
- &sysfs_io_errors,
- &sysfs_io_errors_reset,
-
- &sysfs_io_latency_read,
- &sysfs_io_latency_write,
- &sysfs_io_latency_stats_read,
- &sysfs_io_latency_stats_write,
- &sysfs_congested,
-
- /* debug: */
- &sysfs_alloc_debug,
- &sysfs_open_buckets,
- NULL
-};
-
-#endif /* _BCACHEFS_SYSFS_H_ */
diff --git a/fs/bcachefs/sysfs.h b/fs/bcachefs/sysfs.h
deleted file mode 100644
index 222cd5062702..000000000000
--- a/fs/bcachefs/sysfs.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_SYSFS_H_
-#define _BCACHEFS_SYSFS_H_
-
-#include <linux/sysfs.h>
-
-#ifndef NO_BCACHEFS_SYSFS
-
-struct attribute;
-struct sysfs_ops;
-
-extern struct attribute *bch2_fs_files[];
-extern struct attribute *bch2_fs_counters_files[];
-extern struct attribute *bch2_fs_internal_files[];
-extern struct attribute *bch2_fs_opts_dir_files[];
-extern struct attribute *bch2_fs_time_stats_files[];
-extern struct attribute *bch2_dev_files[];
-
-extern const struct sysfs_ops bch2_fs_sysfs_ops;
-extern const struct sysfs_ops bch2_fs_counters_sysfs_ops;
-extern const struct sysfs_ops bch2_fs_internal_sysfs_ops;
-extern const struct sysfs_ops bch2_fs_opts_dir_sysfs_ops;
-extern const struct sysfs_ops bch2_fs_time_stats_sysfs_ops;
-extern const struct sysfs_ops bch2_dev_sysfs_ops;
-
-int bch2_opts_create_sysfs_files(struct kobject *);
-
-#else
-
-static struct attribute *bch2_fs_files[] = {};
-static struct attribute *bch2_fs_counters_files[] = {};
-static struct attribute *bch2_fs_internal_files[] = {};
-static struct attribute *bch2_fs_opts_dir_files[] = {};
-static struct attribute *bch2_fs_time_stats_files[] = {};
-static struct attribute *bch2_dev_files[] = {};
-
-static const struct sysfs_ops bch2_fs_sysfs_ops;
-static const struct sysfs_ops bch2_fs_counters_sysfs_ops;
-static const struct sysfs_ops bch2_fs_internal_sysfs_ops;
-static const struct sysfs_ops bch2_fs_opts_dir_sysfs_ops;
-static const struct sysfs_ops bch2_fs_time_stats_sysfs_ops;
-static const struct sysfs_ops bch2_dev_sysfs_ops;
-
-static inline int bch2_opts_create_sysfs_files(struct kobject *kobj) { return 0; }
-
-#endif /* NO_BCACHEFS_SYSFS */
-
-#endif /* _BCACHEFS_SYSFS_H_ */
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
deleted file mode 100644
index fb5c1543e52f..000000000000
--- a/fs/bcachefs/tests.c
+++ /dev/null
@@ -1,887 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifdef CONFIG_BCACHEFS_TESTS
-
-#include "bcachefs.h"
-#include "btree_update.h"
-#include "journal_reclaim.h"
-#include "snapshot.h"
-#include "tests.h"
-
-#include "linux/kthread.h"
-#include "linux/random.h"
-
-static void delete_test_keys(struct bch_fs *c)
-{
- int ret;
-
- ret = bch2_btree_delete_range(c, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX),
- POS(0, U64_MAX),
- 0, NULL);
- BUG_ON(ret);
-
- ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX),
- POS(0, U64_MAX),
- 0, NULL);
- BUG_ON(ret);
-}
-
-/* unit tests */
-
-static int test_delete(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_i_cookie k;
- int ret;
-
- bkey_cookie_init(&k.k_i);
- k.k.p.snapshot = U32_MAX;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
- BTREE_ITER_intent);
-
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
- bch2_trans_update(trans, &iter, &k.k_i, 0));
- bch_err_msg(c, ret, "update error");
- if (ret)
- goto err;
-
- pr_info("deleting once");
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
- bch2_btree_delete_at(trans, &iter, 0));
- bch_err_msg(c, ret, "delete error (first)");
- if (ret)
- goto err;
-
- pr_info("deleting twice");
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
- bch2_btree_delete_at(trans, &iter, 0));
- bch_err_msg(c, ret, "delete error (second)");
- if (ret)
- goto err;
-err:
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return ret;
-}
-
-static int test_delete_written(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_i_cookie k;
- int ret;
-
- bkey_cookie_init(&k.k_i);
- k.k.p.snapshot = U32_MAX;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
- BTREE_ITER_intent);
-
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
- bch2_trans_update(trans, &iter, &k.k_i, 0));
- bch_err_msg(c, ret, "update error");
- if (ret)
- goto err;
-
- bch2_trans_unlock(trans);
- bch2_journal_flush_all_pins(&c->journal);
-
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
- bch2_btree_delete_at(trans, &iter, 0));
- bch_err_msg(c, ret, "delete error");
- if (ret)
- goto err;
-err:
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return ret;
-}
-
-static int test_iterate(struct bch_fs *c, u64 nr)
-{
- u64 i;
- int ret = 0;
-
- delete_test_keys(c);
-
- pr_info("inserting test keys");
-
- for (i = 0; i < nr; i++) {
- struct bkey_i_cookie ck;
-
- bkey_cookie_init(&ck.k_i);
- ck.k.p.offset = i;
- ck.k.p.snapshot = U32_MAX;
-
- ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0, 0);
- bch_err_msg(c, ret, "insert error");
- if (ret)
- return ret;
- }
-
- pr_info("iterating forwards");
- i = 0;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- 0, k, ({
- BUG_ON(k.k->p.offset != i++);
- 0;
- })));
- bch_err_msg(c, ret, "error iterating forwards");
- if (ret)
- return ret;
-
- BUG_ON(i != nr);
-
- pr_info("iterating backwards");
-
- ret = bch2_trans_run(c,
- for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
- SPOS(0, U64_MAX, U32_MAX), 0, k, ({
- BUG_ON(k.k->p.offset != --i);
- 0;
- })));
- bch_err_msg(c, ret, "error iterating backwards");
- if (ret)
- return ret;
-
- BUG_ON(i);
- return 0;
-}
-
-static int test_iterate_extents(struct bch_fs *c, u64 nr)
-{
- u64 i;
- int ret = 0;
-
- delete_test_keys(c);
-
- pr_info("inserting test extents");
-
- for (i = 0; i < nr; i += 8) {
- struct bkey_i_cookie ck;
-
- bkey_cookie_init(&ck.k_i);
- ck.k.p.offset = i + 8;
- ck.k.p.snapshot = U32_MAX;
- ck.k.size = 8;
-
- ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0, 0);
- bch_err_msg(c, ret, "insert error");
- if (ret)
- return ret;
- }
-
- pr_info("iterating forwards");
- i = 0;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- 0, k, ({
- BUG_ON(bkey_start_offset(k.k) != i);
- i = k.k->p.offset;
- 0;
- })));
- bch_err_msg(c, ret, "error iterating forwards");
- if (ret)
- return ret;
-
- BUG_ON(i != nr);
-
- pr_info("iterating backwards");
-
- ret = bch2_trans_run(c,
- for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
- SPOS(0, U64_MAX, U32_MAX), 0, k, ({
- BUG_ON(k.k->p.offset != i);
- i = bkey_start_offset(k.k);
- 0;
- })));
- bch_err_msg(c, ret, "error iterating backwards");
- if (ret)
- return ret;
-
- BUG_ON(i);
- return 0;
-}
-
-static int test_iterate_slots(struct bch_fs *c, u64 nr)
-{
- u64 i;
- int ret = 0;
-
- delete_test_keys(c);
-
- pr_info("inserting test keys");
-
- for (i = 0; i < nr; i++) {
- struct bkey_i_cookie ck;
-
- bkey_cookie_init(&ck.k_i);
- ck.k.p.offset = i * 2;
- ck.k.p.snapshot = U32_MAX;
-
- ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0, 0);
- bch_err_msg(c, ret, "insert error");
- if (ret)
- return ret;
- }
-
- pr_info("iterating forwards");
- i = 0;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- 0, k, ({
- BUG_ON(k.k->p.offset != i);
- i += 2;
- 0;
- })));
- bch_err_msg(c, ret, "error iterating forwards");
- if (ret)
- return ret;
-
- BUG_ON(i != nr * 2);
-
- pr_info("iterating forwards by slots");
- i = 0;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- BTREE_ITER_slots, k, ({
- if (i >= nr * 2)
- break;
-
- BUG_ON(k.k->p.offset != i);
- BUG_ON(bkey_deleted(k.k) != (i & 1));
-
- i++;
- 0;
- })));
- bch_err_msg(c, ret, "error iterating forwards by slots");
- return ret;
-}
-
-static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
-{
- u64 i;
- int ret = 0;
-
- delete_test_keys(c);
-
- pr_info("inserting test keys");
-
- for (i = 0; i < nr; i += 16) {
- struct bkey_i_cookie ck;
-
- bkey_cookie_init(&ck.k_i);
- ck.k.p.offset = i + 16;
- ck.k.p.snapshot = U32_MAX;
- ck.k.size = 8;
-
- ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0, 0);
- bch_err_msg(c, ret, "insert error");
- if (ret)
- return ret;
- }
-
- pr_info("iterating forwards");
- i = 0;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- 0, k, ({
- BUG_ON(bkey_start_offset(k.k) != i + 8);
- BUG_ON(k.k->size != 8);
- i += 16;
- 0;
- })));
- bch_err_msg(c, ret, "error iterating forwards");
- if (ret)
- return ret;
-
- BUG_ON(i != nr);
-
- pr_info("iterating forwards by slots");
- i = 0;
-
- ret = bch2_trans_run(c,
- for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- BTREE_ITER_slots, k, ({
- if (i == nr)
- break;
- BUG_ON(bkey_deleted(k.k) != !(i % 16));
-
- BUG_ON(bkey_start_offset(k.k) != i);
- BUG_ON(k.k->size != 8);
- i = k.k->p.offset;
- 0;
- })));
- bch_err_msg(c, ret, "error iterating forwards by slots");
- return ret;
-}
-
-/*
- * XXX: we really want to make sure we've got a btree with depth > 0 for these
- * tests
- */
-static int test_peek_end(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0);
-
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
- BUG_ON(k.k);
-
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
- BUG_ON(k.k);
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return 0;
-}
-
-static int test_peek_end_extents(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX), 0);
-
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
- BUG_ON(k.k);
-
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
- BUG_ON(k.k);
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return 0;
-}
-
-/* extent unit tests */
-
-static u64 test_version;
-
-static int insert_test_extent(struct bch_fs *c,
- u64 start, u64 end)
-{
- struct bkey_i_cookie k;
- int ret;
-
- bkey_cookie_init(&k.k_i);
- k.k_i.k.p.offset = end;
- k.k_i.k.p.snapshot = U32_MAX;
- k.k_i.k.size = end - start;
- k.k_i.k.bversion.lo = test_version++;
-
- ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int __test_extent_overwrite(struct bch_fs *c,
- u64 e1_start, u64 e1_end,
- u64 e2_start, u64 e2_end)
-{
- int ret;
-
- ret = insert_test_extent(c, e1_start, e1_end) ?:
- insert_test_extent(c, e2_start, e2_end);
-
- delete_test_keys(c);
- return ret;
-}
-
-static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
-{
- return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
- __test_extent_overwrite(c, 8, 64, 0, 32);
-}
-
-static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
-{
- return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
- __test_extent_overwrite(c, 0, 64, 32, 72);
-}
-
-static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
-{
- return __test_extent_overwrite(c, 0, 64, 32, 40);
-}
-
-static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
-{
- return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
- __test_extent_overwrite(c, 32, 64, 0, 128) ?:
- __test_extent_overwrite(c, 32, 64, 32, 64) ?:
- __test_extent_overwrite(c, 32, 64, 32, 128);
-}
-
-static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start, u32 len, u32 snapid)
-{
- struct bkey_i_cookie k;
- int ret;
-
- bkey_cookie_init(&k.k_i);
- k.k_i.k.p.inode = inum;
- k.k_i.k.p.offset = start + len;
- k.k_i.k.p.snapshot = snapid;
- k.k_i.k.size = len;
-
- ret = bch2_trans_commit_do(c, NULL, NULL, 0,
- bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
- BTREE_UPDATE_internal_snapshot_node));
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int test_extent_create_overlapping(struct bch_fs *c, u64 inum)
-{
- return insert_test_overlapping_extent(c, inum, 0, 16, U32_MAX - 2) ?: /* overwrite entire */
- insert_test_overlapping_extent(c, inum, 2, 8, U32_MAX - 2) ?:
- insert_test_overlapping_extent(c, inum, 4, 4, U32_MAX) ?:
- insert_test_overlapping_extent(c, inum, 32, 8, U32_MAX - 2) ?: /* overwrite front/back */
- insert_test_overlapping_extent(c, inum, 36, 8, U32_MAX) ?:
- insert_test_overlapping_extent(c, inum, 60, 8, U32_MAX - 2) ?:
- insert_test_overlapping_extent(c, inum, 64, 8, U32_MAX);
-}
-
-/* snapshot unit tests */
-
-/* Test skipping over keys in unrelated snapshots: */
-static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
-{
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey_i_cookie cookie;
- int ret;
-
- bkey_cookie_init(&cookie.k_i);
- cookie.k.p.snapshot = snapid_hi;
- ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0);
- if (ret)
- return ret;
-
- trans = bch2_trans_get(c);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
- SPOS(0, 0, snapid_lo), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
-
- BUG_ON(k.k->p.snapshot != U32_MAX);
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return ret;
-}
-
-static int test_snapshots(struct bch_fs *c, u64 nr)
-{
- struct bkey_i_cookie cookie;
- u32 snapids[2];
- u32 snapid_subvols[2] = { 1, 1 };
- int ret;
-
- bkey_cookie_init(&cookie.k_i);
- cookie.k.p.snapshot = U32_MAX;
- ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0);
- if (ret)
- return ret;
-
- ret = bch2_trans_commit_do(c, NULL, NULL, 0,
- bch2_snapshot_node_create(trans, U32_MAX,
- snapids,
- snapid_subvols,
- 2));
- if (ret)
- return ret;
-
- if (snapids[0] > snapids[1])
- swap(snapids[0], snapids[1]);
-
- ret = test_snapshot_filter(c, snapids[0], snapids[1]);
- bch_err_msg(c, ret, "from test_snapshot_filter");
- return ret;
-}
-
-/* perf tests */
-
-static u64 test_rand(void)
-{
- u64 v;
-
- get_random_bytes(&v, sizeof(v));
- return v;
-}
-
-static int rand_insert(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct bkey_i_cookie k;
- int ret = 0;
- u64 i;
-
- for (i = 0; i < nr; i++) {
- bkey_cookie_init(&k.k_i);
- k.k.p.offset = test_rand();
- k.k.p.snapshot = U32_MAX;
-
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k.k_i, 0));
- if (ret)
- break;
- }
-
- bch2_trans_put(trans);
- return ret;
-}
-
-static int rand_insert_multi(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct bkey_i_cookie k[8];
- int ret = 0;
- unsigned j;
- u64 i;
-
- for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
- for (j = 0; j < ARRAY_SIZE(k); j++) {
- bkey_cookie_init(&k[j].k_i);
- k[j].k.p.offset = test_rand();
- k[j].k.p.snapshot = U32_MAX;
- }
-
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[3].k_i, 0) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[4].k_i, 0) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?:
- bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[7].k_i, 0));
- if (ret)
- break;
- }
-
- bch2_trans_put(trans);
- return ret;
-}
-
-static int rand_lookup(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
- u64 i;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0);
-
- for (i = 0; i < nr; i++) {
- bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
-
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
- ret = bkey_err(k);
- if (ret)
- break;
- }
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return ret;
-}
-
-static int rand_mixed_trans(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i_cookie *cookie,
- u64 i, u64 pos)
-{
- struct bkey_s_c k;
- int ret;
-
- bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
-
- k = bch2_btree_iter_peek(iter);
- ret = bkey_err(k);
- bch_err_msg(trans->c, ret, "lookup error");
- if (ret)
- return ret;
-
- if (!(i & 3) && k.k) {
- bkey_cookie_init(&cookie->k_i);
- cookie->k.p = iter->pos;
- ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
- }
-
- return ret;
-}
-
-static int rand_mixed(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_i_cookie cookie;
- int ret = 0;
- u64 i, rand;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0);
-
- for (i = 0; i < nr; i++) {
- rand = test_rand();
- ret = commit_do(trans, NULL, NULL, 0,
- rand_mixed_trans(trans, &iter, &cookie, i, rand));
- if (ret)
- break;
- }
-
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- return ret;
-}
-
-static int __do_delete(struct btree_trans *trans, struct bpos pos)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
- BTREE_ITER_intent);
- k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX));
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (!k.k)
- goto err;
-
- ret = bch2_btree_delete_at(trans, &iter, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-static int rand_delete(struct bch_fs *c, u64 nr)
-{
- struct btree_trans *trans = bch2_trans_get(c);
- int ret = 0;
- u64 i;
-
- for (i = 0; i < nr; i++) {
- struct bpos pos = SPOS(0, test_rand(), U32_MAX);
-
- ret = commit_do(trans, NULL, NULL, 0,
- __do_delete(trans, pos));
- if (ret)
- break;
- }
-
- bch2_trans_put(trans);
- return ret;
-}
-
-static int seq_insert(struct bch_fs *c, u64 nr)
-{
- struct bkey_i_cookie insert;
-
- bkey_cookie_init(&insert.k_i);
-
- return bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX),
- BTREE_ITER_slots|BTREE_ITER_intent, k,
- NULL, NULL, 0, ({
- if (iter.pos.offset >= nr)
- break;
- insert.k.p = iter.pos;
- bch2_trans_update(trans, &iter, &insert.k_i, 0);
- })));
-}
-
-static int seq_lookup(struct bch_fs *c, u64 nr)
-{
- return bch2_trans_run(c,
- for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- 0, k,
- 0));
-}
-
-static int seq_overwrite(struct bch_fs *c, u64 nr)
-{
- return bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX),
- BTREE_ITER_intent, k,
- NULL, NULL, 0, ({
- struct bkey_i_cookie u;
-
- bkey_reassemble(&u.k_i, k);
- bch2_trans_update(trans, &iter, &u.k_i, 0);
- })));
-}
-
-static int seq_delete(struct bch_fs *c, u64 nr)
-{
- return bch2_btree_delete_range(c, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX),
- POS(0, U64_MAX),
- 0, NULL);
-}
-
-typedef int (*perf_test_fn)(struct bch_fs *, u64);
-
-struct test_job {
- struct bch_fs *c;
- u64 nr;
- unsigned nr_threads;
- perf_test_fn fn;
-
- atomic_t ready;
- wait_queue_head_t ready_wait;
-
- atomic_t done;
- struct completion done_completion;
-
- u64 start;
- u64 finish;
- int ret;
-};
-
-static int btree_perf_test_thread(void *data)
-{
- struct test_job *j = data;
- int ret;
-
- if (atomic_dec_and_test(&j->ready)) {
- wake_up(&j->ready_wait);
- j->start = sched_clock();
- } else {
- wait_event(j->ready_wait, !atomic_read(&j->ready));
- }
-
- ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
- if (ret) {
- bch_err(j->c, "%ps: error %s", j->fn, bch2_err_str(ret));
- j->ret = ret;
- }
-
- if (atomic_dec_and_test(&j->done)) {
- j->finish = sched_clock();
- complete(&j->done_completion);
- }
-
- return 0;
-}
-
-int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
- u64 nr, unsigned nr_threads)
-{
- struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
- char name_buf[20];
- struct printbuf nr_buf = PRINTBUF;
- struct printbuf per_sec_buf = PRINTBUF;
- unsigned i;
- u64 time;
-
- if (nr == 0 || nr_threads == 0) {
- pr_err("nr of iterations or threads is not allowed to be 0");
- return -EINVAL;
- }
-
- atomic_set(&j.ready, nr_threads);
- init_waitqueue_head(&j.ready_wait);
-
- atomic_set(&j.done, nr_threads);
- init_completion(&j.done_completion);
-
-#define perf_test(_test) \
- if (!strcmp(testname, #_test)) j.fn = _test
-
- perf_test(rand_insert);
- perf_test(rand_insert_multi);
- perf_test(rand_lookup);
- perf_test(rand_mixed);
- perf_test(rand_delete);
-
- perf_test(seq_insert);
- perf_test(seq_lookup);
- perf_test(seq_overwrite);
- perf_test(seq_delete);
-
- /* a unit test, not a perf test: */
- perf_test(test_delete);
- perf_test(test_delete_written);
- perf_test(test_iterate);
- perf_test(test_iterate_extents);
- perf_test(test_iterate_slots);
- perf_test(test_iterate_slots_extents);
- perf_test(test_peek_end);
- perf_test(test_peek_end_extents);
-
- perf_test(test_extent_overwrite_front);
- perf_test(test_extent_overwrite_back);
- perf_test(test_extent_overwrite_middle);
- perf_test(test_extent_overwrite_all);
- perf_test(test_extent_create_overlapping);
-
- perf_test(test_snapshots);
-
- if (!j.fn) {
- pr_err("unknown test %s", testname);
- return -EINVAL;
- }
-
- //pr_info("running test %s:", testname);
-
- if (nr_threads == 1)
- btree_perf_test_thread(&j);
- else
- for (i = 0; i < nr_threads; i++)
- kthread_run(btree_perf_test_thread, &j,
- "bcachefs perf test[%u]", i);
-
- while (wait_for_completion_interruptible(&j.done_completion))
- ;
-
- time = j.finish - j.start;
-
- scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
- prt_human_readable_u64(&nr_buf, nr);
- prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
- printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
- name_buf, nr_buf.buf, nr_threads,
- div_u64(time, NSEC_PER_SEC),
- div_u64(time * nr_threads, nr),
- per_sec_buf.buf);
- printbuf_exit(&per_sec_buf);
- printbuf_exit(&nr_buf);
- return j.ret;
-}
-
-#endif /* CONFIG_BCACHEFS_TESTS */
diff --git a/fs/bcachefs/tests.h b/fs/bcachefs/tests.h
deleted file mode 100644
index c73b18aea7e0..000000000000
--- a/fs/bcachefs/tests.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_TEST_H
-#define _BCACHEFS_TEST_H
-
-struct bch_fs;
-
-#ifdef CONFIG_BCACHEFS_TESTS
-
-int bch2_btree_perf_test(struct bch_fs *, const char *, u64, unsigned);
-
-#else
-
-#endif /* CONFIG_BCACHEFS_TESTS */
-
-#endif /* _BCACHEFS_TEST_H */
diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c
deleted file mode 100644
index dea73bc1cb51..000000000000
--- a/fs/bcachefs/thread_with_file.c
+++ /dev/null
@@ -1,492 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef NO_BCACHEFS_FS
-
-#include "bcachefs.h"
-#include "thread_with_file.h"
-
-#include <linux/anon_inodes.h>
-#include <linux/file.h>
-#include <linux/kthread.h>
-#include <linux/pagemap.h>
-#include <linux/poll.h>
-#include <linux/sched/sysctl.h>
-
-void bch2_thread_with_file_exit(struct thread_with_file *thr)
-{
- if (thr->task) {
- kthread_stop(thr->task);
- put_task_struct(thr->task);
- }
-}
-
-int bch2_run_thread_with_file(struct thread_with_file *thr,
- const struct file_operations *fops,
- int (*fn)(void *))
-{
- struct file *file = NULL;
- int ret, fd = -1;
- unsigned fd_flags = O_CLOEXEC;
-
- if (fops->read && fops->write)
- fd_flags |= O_RDWR;
- else if (fops->read)
- fd_flags |= O_RDONLY;
- else if (fops->write)
- fd_flags |= O_WRONLY;
-
- char name[TASK_COMM_LEN];
- get_task_comm(name, current);
-
- thr->ret = 0;
- thr->task = kthread_create(fn, thr, "%s", name);
- ret = PTR_ERR_OR_ZERO(thr->task);
- if (ret)
- return ret;
-
- ret = get_unused_fd_flags(fd_flags);
- if (ret < 0)
- goto err;
- fd = ret;
-
- file = anon_inode_getfile(name, fops, thr, fd_flags);
- ret = PTR_ERR_OR_ZERO(file);
- if (ret)
- goto err;
-
- get_task_struct(thr->task);
- wake_up_process(thr->task);
- fd_install(fd, file);
- return fd;
-err:
- if (fd >= 0)
- put_unused_fd(fd);
- if (thr->task)
- kthread_stop(thr->task);
- return ret;
-}
-
-/* stdio_redirect */
-
-static bool stdio_redirect_has_more_input(struct stdio_redirect *stdio, size_t seen)
-{
- return stdio->input.buf.nr > seen || stdio->done;
-}
-
-static bool stdio_redirect_has_input(struct stdio_redirect *stdio)
-{
- return stdio_redirect_has_more_input(stdio, 0);
-}
-
-static bool stdio_redirect_has_output(struct stdio_redirect *stdio)
-{
- return stdio->output.buf.nr || stdio->done;
-}
-
-#define STDIO_REDIRECT_BUFSIZE 4096
-
-static bool stdio_redirect_has_input_space(struct stdio_redirect *stdio)
-{
- return stdio->input.buf.nr < STDIO_REDIRECT_BUFSIZE || stdio->done;
-}
-
-static bool stdio_redirect_has_output_space(struct stdio_redirect *stdio)
-{
- return stdio->output.buf.nr < STDIO_REDIRECT_BUFSIZE || stdio->done;
-}
-
-static void stdio_buf_init(struct stdio_buf *buf)
-{
- spin_lock_init(&buf->lock);
- init_waitqueue_head(&buf->wait);
- darray_init(&buf->buf);
-}
-
-/* thread_with_stdio */
-
-static void thread_with_stdio_done(struct thread_with_stdio *thr)
-{
- thr->thr.done = true;
- thr->stdio.done = true;
- wake_up(&thr->stdio.input.wait);
- wake_up(&thr->stdio.output.wait);
-}
-
-static ssize_t thread_with_stdio_read(struct file *file, char __user *ubuf,
- size_t len, loff_t *ppos)
-{
- struct thread_with_stdio *thr =
- container_of(file->private_data, struct thread_with_stdio, thr);
- struct stdio_buf *buf = &thr->stdio.output;
- size_t copied = 0, b;
- int ret = 0;
-
- if (!(file->f_flags & O_NONBLOCK)) {
- ret = wait_event_interruptible(buf->wait, stdio_redirect_has_output(&thr->stdio));
- if (ret)
- return ret;
- } else if (!stdio_redirect_has_output(&thr->stdio))
- return -EAGAIN;
-
- while (len && buf->buf.nr) {
- if (fault_in_writeable(ubuf, len) == len) {
- ret = -EFAULT;
- break;
- }
-
- spin_lock_irq(&buf->lock);
- b = min_t(size_t, len, buf->buf.nr);
-
- if (b && !copy_to_user_nofault(ubuf, buf->buf.data, b)) {
- ubuf += b;
- len -= b;
- copied += b;
- buf->buf.nr -= b;
- memmove(buf->buf.data,
- buf->buf.data + b,
- buf->buf.nr);
- }
- spin_unlock_irq(&buf->lock);
- }
-
- return copied ?: ret;
-}
-
-static int thread_with_stdio_release(struct inode *inode, struct file *file)
-{
- struct thread_with_stdio *thr =
- container_of(file->private_data, struct thread_with_stdio, thr);
-
- thread_with_stdio_done(thr);
- bch2_thread_with_file_exit(&thr->thr);
- darray_exit(&thr->stdio.input.buf);
- darray_exit(&thr->stdio.output.buf);
- thr->ops->exit(thr);
- return 0;
-}
-
-static ssize_t thread_with_stdio_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *ppos)
-{
- struct thread_with_stdio *thr =
- container_of(file->private_data, struct thread_with_stdio, thr);
- struct stdio_buf *buf = &thr->stdio.input;
- size_t copied = 0;
- ssize_t ret = 0;
-
- while (len) {
- if (thr->thr.done) {
- ret = -EPIPE;
- break;
- }
-
- size_t b = len - fault_in_readable(ubuf, len);
- if (!b) {
- ret = -EFAULT;
- break;
- }
-
- spin_lock(&buf->lock);
- size_t makeroom = b;
- if (!buf->waiting_for_line || memchr(buf->buf.data, '\n', buf->buf.nr))
- makeroom = min_t(ssize_t, makeroom,
- max_t(ssize_t, STDIO_REDIRECT_BUFSIZE - buf->buf.nr,
- 0));
- darray_make_room_gfp(&buf->buf, makeroom, GFP_NOWAIT);
-
- b = min(len, darray_room(buf->buf));
-
- if (b && !copy_from_user_nofault(&darray_top(buf->buf), ubuf, b)) {
- buf->buf.nr += b;
- ubuf += b;
- len -= b;
- copied += b;
- }
- spin_unlock(&buf->lock);
-
- if (b) {
- wake_up(&buf->wait);
- } else {
- if ((file->f_flags & O_NONBLOCK)) {
- ret = -EAGAIN;
- break;
- }
-
- ret = wait_event_interruptible(buf->wait,
- stdio_redirect_has_input_space(&thr->stdio));
- if (ret)
- break;
- }
- }
-
- return copied ?: ret;
-}
-
-static __poll_t thread_with_stdio_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct thread_with_stdio *thr =
- container_of(file->private_data, struct thread_with_stdio, thr);
-
- poll_wait(file, &thr->stdio.output.wait, wait);
- poll_wait(file, &thr->stdio.input.wait, wait);
-
- __poll_t mask = 0;
-
- if (stdio_redirect_has_output(&thr->stdio))
- mask |= EPOLLIN;
- if (stdio_redirect_has_input_space(&thr->stdio))
- mask |= EPOLLOUT;
- if (thr->thr.done)
- mask |= EPOLLHUP|EPOLLERR;
- return mask;
-}
-
-static __poll_t thread_with_stdout_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct thread_with_stdio *thr =
- container_of(file->private_data, struct thread_with_stdio, thr);
-
- poll_wait(file, &thr->stdio.output.wait, wait);
-
- __poll_t mask = 0;
-
- if (stdio_redirect_has_output(&thr->stdio))
- mask |= EPOLLIN;
- if (thr->thr.done)
- mask |= EPOLLHUP|EPOLLERR;
- return mask;
-}
-
-static int thread_with_stdio_flush(struct file *file, fl_owner_t id)
-{
- struct thread_with_stdio *thr =
- container_of(file->private_data, struct thread_with_stdio, thr);
-
- return thr->thr.ret;
-}
-
-static long thread_with_stdio_ioctl(struct file *file, unsigned int cmd, unsigned long p)
-{
- struct thread_with_stdio *thr =
- container_of(file->private_data, struct thread_with_stdio, thr);
-
- if (thr->ops->unlocked_ioctl)
- return thr->ops->unlocked_ioctl(thr, cmd, p);
- return -ENOTTY;
-}
-
-static const struct file_operations thread_with_stdio_fops = {
- .read = thread_with_stdio_read,
- .write = thread_with_stdio_write,
- .poll = thread_with_stdio_poll,
- .flush = thread_with_stdio_flush,
- .release = thread_with_stdio_release,
- .unlocked_ioctl = thread_with_stdio_ioctl,
-};
-
-static const struct file_operations thread_with_stdout_fops = {
- .read = thread_with_stdio_read,
- .poll = thread_with_stdout_poll,
- .flush = thread_with_stdio_flush,
- .release = thread_with_stdio_release,
- .unlocked_ioctl = thread_with_stdio_ioctl,
-};
-
-static int thread_with_stdio_fn(void *arg)
-{
- struct thread_with_stdio *thr = arg;
-
- thr->thr.ret = thr->ops->fn(thr);
-
- thread_with_stdio_done(thr);
- return 0;
-}
-
-void bch2_thread_with_stdio_init(struct thread_with_stdio *thr,
- const struct thread_with_stdio_ops *ops)
-{
- stdio_buf_init(&thr->stdio.input);
- stdio_buf_init(&thr->stdio.output);
- thr->ops = ops;
-}
-
-int __bch2_run_thread_with_stdio(struct thread_with_stdio *thr)
-{
- return bch2_run_thread_with_file(&thr->thr, &thread_with_stdio_fops, thread_with_stdio_fn);
-}
-
-int bch2_run_thread_with_stdio(struct thread_with_stdio *thr,
- const struct thread_with_stdio_ops *ops)
-{
- bch2_thread_with_stdio_init(thr, ops);
-
- return __bch2_run_thread_with_stdio(thr);
-}
-
-int bch2_run_thread_with_stdout(struct thread_with_stdio *thr,
- const struct thread_with_stdio_ops *ops)
-{
- stdio_buf_init(&thr->stdio.input);
- stdio_buf_init(&thr->stdio.output);
- thr->ops = ops;
-
- return bch2_run_thread_with_file(&thr->thr, &thread_with_stdout_fops, thread_with_stdio_fn);
-}
-EXPORT_SYMBOL_GPL(bch2_run_thread_with_stdout);
-
-int bch2_stdio_redirect_read(struct stdio_redirect *stdio, char *ubuf, size_t len)
-{
- struct stdio_buf *buf = &stdio->input;
-
- /*
- * we're waiting on user input (or for the file descriptor to be
- * closed), don't want a hung task warning:
- */
- do {
- wait_event_timeout(buf->wait, stdio_redirect_has_input(stdio),
- sysctl_hung_task_timeout_secs * HZ / 2);
- } while (!stdio_redirect_has_input(stdio));
-
- if (stdio->done)
- return -1;
-
- spin_lock(&buf->lock);
- int ret = min(len, buf->buf.nr);
- buf->buf.nr -= ret;
- memcpy(ubuf, buf->buf.data, ret);
- memmove(buf->buf.data,
- buf->buf.data + ret,
- buf->buf.nr);
- spin_unlock(&buf->lock);
-
- wake_up(&buf->wait);
- return ret;
-}
-
-int bch2_stdio_redirect_readline_timeout(struct stdio_redirect *stdio,
- darray_char *line,
- unsigned long timeout)
-{
- unsigned long until = jiffies + timeout, t;
- struct stdio_buf *buf = &stdio->input;
- size_t seen = 0;
-again:
- t = timeout != MAX_SCHEDULE_TIMEOUT
- ? max_t(long, until - jiffies, 0)
- : timeout;
-
- t = min(t, sysctl_hung_task_timeout_secs * HZ / 2);
-
- wait_event_timeout(buf->wait, stdio_redirect_has_more_input(stdio, seen), t);
-
- if (stdio->done)
- return -1;
-
- spin_lock(&buf->lock);
- seen = buf->buf.nr;
- char *n = memchr(buf->buf.data, '\n', seen);
-
- if (!n && timeout != MAX_SCHEDULE_TIMEOUT && time_after_eq(jiffies, until)) {
- spin_unlock(&buf->lock);
- return -ETIME;
- }
-
- if (!n) {
- buf->waiting_for_line = true;
- spin_unlock(&buf->lock);
- goto again;
- }
-
- size_t b = n + 1 - buf->buf.data;
- if (b > line->size) {
- spin_unlock(&buf->lock);
- int ret = darray_resize(line, b);
- if (ret)
- return ret;
- seen = 0;
- goto again;
- }
-
- buf->buf.nr -= b;
- memcpy(line->data, buf->buf.data, b);
- memmove(buf->buf.data,
- buf->buf.data + b,
- buf->buf.nr);
- line->nr = b;
-
- buf->waiting_for_line = false;
- spin_unlock(&buf->lock);
-
- wake_up(&buf->wait);
- return 0;
-}
-
-int bch2_stdio_redirect_readline(struct stdio_redirect *stdio, darray_char *line)
-{
- return bch2_stdio_redirect_readline_timeout(stdio, line, MAX_SCHEDULE_TIMEOUT);
-}
-
-__printf(3, 0)
-static ssize_t bch2_darray_vprintf(darray_char *out, gfp_t gfp, const char *fmt, va_list args)
-{
- ssize_t ret;
-
- do {
- va_list args2;
- size_t len;
-
- va_copy(args2, args);
- len = vsnprintf(out->data + out->nr, darray_room(*out), fmt, args2);
- va_end(args2);
-
- if (len + 1 <= darray_room(*out)) {
- out->nr += len;
- return len;
- }
-
- ret = darray_make_room_gfp(out, len + 1, gfp);
- } while (ret == 0);
-
- return ret;
-}
-
-ssize_t bch2_stdio_redirect_vprintf(struct stdio_redirect *stdio, bool nonblocking,
- const char *fmt, va_list args)
-{
- struct stdio_buf *buf = &stdio->output;
- unsigned long flags;
- ssize_t ret;
-
-again:
- spin_lock_irqsave(&buf->lock, flags);
- ret = bch2_darray_vprintf(&buf->buf, GFP_NOWAIT, fmt, args);
- spin_unlock_irqrestore(&buf->lock, flags);
-
- if (ret < 0) {
- if (nonblocking)
- return -EAGAIN;
-
- ret = wait_event_interruptible(buf->wait,
- stdio_redirect_has_output_space(stdio));
- if (ret)
- return ret;
- goto again;
- }
-
- wake_up(&buf->wait);
- return ret;
-}
-
-ssize_t bch2_stdio_redirect_printf(struct stdio_redirect *stdio, bool nonblocking,
- const char *fmt, ...)
-{
- va_list args;
- ssize_t ret;
-
- va_start(args, fmt);
- ret = bch2_stdio_redirect_vprintf(stdio, nonblocking, fmt, args);
- va_end(args);
-
- return ret;
-}
-
-#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/thread_with_file.h b/fs/bcachefs/thread_with_file.h
deleted file mode 100644
index 72497b921911..000000000000
--- a/fs/bcachefs/thread_with_file.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_THREAD_WITH_FILE_H
-#define _BCACHEFS_THREAD_WITH_FILE_H
-
-#include "thread_with_file_types.h"
-
-/*
- * Thread with file: Run a kthread and connect it to a file descriptor, so that
- * it can be interacted with via fd read/write methods and closing the file
- * descriptor stops the kthread.
- *
- * We have two different APIs:
- *
- * thread_with_file, the low level version.
- * You get to define the full file_operations, including your release function,
- * which means that you must call bch2_thread_with_file_exit() from your
- * .release method
- *
- * thread_with_stdio, the higher level version
- * This implements full piping of input and output, including .poll.
- *
- * Notes on behaviour:
- * - kthread shutdown behaves like writing or reading from a pipe that has been
- * closed
- * - Input and output buffers are 4096 bytes, although buffers may in some
- * situations slightly exceed that limit so as to avoid chopping off a
- * message in the middle in nonblocking mode.
- * - Input/output buffers are lazily allocated, with GFP_NOWAIT allocations -
- * should be fine but might change in future revisions.
- * - Output buffer may grow past 4096 bytes to deal with messages that are
- * bigger than 4096 bytes
- * - Writing may be done blocking or nonblocking; in nonblocking mode, we only
- * drop entire messages.
- *
- * To write, use stdio_redirect_printf()
- * To read, use stdio_redirect_read() or stdio_redirect_readline()
- */
-
-struct task_struct;
-
-struct thread_with_file {
- struct task_struct *task;
- int ret;
- bool done;
-};
-
-void bch2_thread_with_file_exit(struct thread_with_file *);
-int bch2_run_thread_with_file(struct thread_with_file *,
- const struct file_operations *,
- int (*fn)(void *));
-
-struct thread_with_stdio;
-
-struct thread_with_stdio_ops {
- void (*exit)(struct thread_with_stdio *);
- int (*fn)(struct thread_with_stdio *);
- long (*unlocked_ioctl)(struct thread_with_stdio *, unsigned int, unsigned long);
-};
-
-struct thread_with_stdio {
- struct thread_with_file thr;
- struct stdio_redirect stdio;
- const struct thread_with_stdio_ops *ops;
-};
-
-void bch2_thread_with_stdio_init(struct thread_with_stdio *,
- const struct thread_with_stdio_ops *);
-int __bch2_run_thread_with_stdio(struct thread_with_stdio *);
-int bch2_run_thread_with_stdio(struct thread_with_stdio *,
- const struct thread_with_stdio_ops *);
-int bch2_run_thread_with_stdout(struct thread_with_stdio *,
- const struct thread_with_stdio_ops *);
-int bch2_stdio_redirect_read(struct stdio_redirect *, char *, size_t);
-
-int bch2_stdio_redirect_readline_timeout(struct stdio_redirect *, darray_char *, unsigned long);
-int bch2_stdio_redirect_readline(struct stdio_redirect *, darray_char *);
-
-__printf(3, 0) ssize_t bch2_stdio_redirect_vprintf(struct stdio_redirect *, bool, const char *, va_list);
-__printf(3, 4) ssize_t bch2_stdio_redirect_printf(struct stdio_redirect *, bool, const char *, ...);
-
-#endif /* _BCACHEFS_THREAD_WITH_FILE_H */
diff --git a/fs/bcachefs/thread_with_file_types.h b/fs/bcachefs/thread_with_file_types.h
deleted file mode 100644
index f4d484d44f63..000000000000
--- a/fs/bcachefs/thread_with_file_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_THREAD_WITH_FILE_TYPES_H
-#define _BCACHEFS_THREAD_WITH_FILE_TYPES_H
-
-#include "darray.h"
-
-struct stdio_buf {
- spinlock_t lock;
- wait_queue_head_t wait;
- darray_char buf;
- bool waiting_for_line;
-};
-
-struct stdio_redirect {
- struct stdio_buf input;
- struct stdio_buf output;
- bool done;
-};
-
-#endif /* _BCACHEFS_THREAD_WITH_FILE_TYPES_H */
diff --git a/fs/bcachefs/time_stats.c b/fs/bcachefs/time_stats.c
deleted file mode 100644
index 3fe82757f93a..000000000000
--- a/fs/bcachefs/time_stats.c
+++ /dev/null
@@ -1,179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/preempt.h>
-#include <linux/time.h>
-#include <linux/spinlock.h>
-
-#include "eytzinger.h"
-#include "time_stats.h"
-
-static const struct time_unit time_units[] = {
- { "ns", 1 },
- { "us", NSEC_PER_USEC },
- { "ms", NSEC_PER_MSEC },
- { "s", NSEC_PER_SEC },
- { "m", (u64) NSEC_PER_SEC * 60},
- { "h", (u64) NSEC_PER_SEC * 3600},
- { "d", (u64) NSEC_PER_SEC * 3600 * 24},
- { "w", (u64) NSEC_PER_SEC * 3600 * 24 * 7},
- { "y", (u64) NSEC_PER_SEC * ((3600 * 24 * 7 * 365) + (3600 * (24 / 4) * 7))}, /* 365.25d */
- { "eon", U64_MAX },
-};
-
-const struct time_unit *bch2_pick_time_units(u64 ns)
-{
- const struct time_unit *u;
-
- for (u = time_units;
- u + 1 < time_units + ARRAY_SIZE(time_units) &&
- ns >= u[1].nsecs << 1;
- u++)
- ;
-
- return u;
-}
-
-static void quantiles_update(struct quantiles *q, u64 v)
-{
- unsigned i = 0;
-
- while (i < ARRAY_SIZE(q->entries)) {
- struct quantile_entry *e = q->entries + i;
-
- if (unlikely(!e->step)) {
- e->m = v;
- e->step = max_t(unsigned, v / 2, 1024);
- } else if (e->m > v) {
- e->m = e->m >= e->step
- ? e->m - e->step
- : 0;
- } else if (e->m < v) {
- e->m = e->m + e->step > e->m
- ? e->m + e->step
- : U32_MAX;
- }
-
- if ((e->m > v ? e->m - v : v - e->m) < e->step)
- e->step = max_t(unsigned, e->step / 2, 1);
-
- if (v >= e->m)
- break;
-
- i = eytzinger0_child(i, v > e->m);
- }
-}
-
-static inline void time_stats_update_one(struct bch2_time_stats *stats,
- u64 start, u64 end)
-{
- u64 duration, freq;
- bool initted = stats->last_event != 0;
-
- if (time_after64(end, start)) {
- struct quantiles *quantiles = time_stats_to_quantiles(stats);
-
- duration = end - start;
- mean_and_variance_update(&stats->duration_stats, duration);
- mean_and_variance_weighted_update(&stats->duration_stats_weighted,
- duration, initted, TIME_STATS_MV_WEIGHT);
- stats->max_duration = max(stats->max_duration, duration);
- stats->min_duration = min(stats->min_duration, duration);
- stats->total_duration += duration;
-
- if (quantiles)
- quantiles_update(quantiles, duration);
- }
-
- if (stats->last_event && time_after64(end, stats->last_event)) {
- freq = end - stats->last_event;
- mean_and_variance_update(&stats->freq_stats, freq);
- mean_and_variance_weighted_update(&stats->freq_stats_weighted,
- freq, initted, TIME_STATS_MV_WEIGHT);
- stats->max_freq = max(stats->max_freq, freq);
- stats->min_freq = min(stats->min_freq, freq);
- }
-
- stats->last_event = end;
-}
-
-void __bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
- struct time_stat_buffer *b)
-{
- for (struct time_stat_buffer_entry *i = b->entries;
- i < b->entries + ARRAY_SIZE(b->entries);
- i++)
- time_stats_update_one(stats, i->start, i->end);
- b->nr = 0;
-}
-
-static noinline void time_stats_clear_buffer(struct bch2_time_stats *stats,
- struct time_stat_buffer *b)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&stats->lock, flags);
- __bch2_time_stats_clear_buffer(stats, b);
- spin_unlock_irqrestore(&stats->lock, flags);
-}
-
-void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
-{
- unsigned long flags;
-
- if (!stats->buffer) {
- spin_lock_irqsave(&stats->lock, flags);
- time_stats_update_one(stats, start, end);
-
- if (mean_and_variance_weighted_get_mean(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT) < 32 &&
- stats->duration_stats.n > 1024)
- stats->buffer =
- alloc_percpu_gfp(struct time_stat_buffer,
- GFP_ATOMIC);
- spin_unlock_irqrestore(&stats->lock, flags);
- } else {
- struct time_stat_buffer *b;
-
- preempt_disable();
- b = this_cpu_ptr(stats->buffer);
-
- BUG_ON(b->nr >= ARRAY_SIZE(b->entries));
- b->entries[b->nr++] = (struct time_stat_buffer_entry) {
- .start = start,
- .end = end
- };
-
- if (unlikely(b->nr == ARRAY_SIZE(b->entries)))
- time_stats_clear_buffer(stats, b);
- preempt_enable();
- }
-}
-
-void bch2_time_stats_reset(struct bch2_time_stats *stats)
-{
- spin_lock_irq(&stats->lock);
- unsigned offset = offsetof(struct bch2_time_stats, min_duration);
- memset((void *) stats + offset, 0, sizeof(*stats) - offset);
-
- if (stats->buffer) {
- int cpu;
- for_each_possible_cpu(cpu)
- per_cpu_ptr(stats->buffer, cpu)->nr = 0;
- }
- spin_unlock_irq(&stats->lock);
-}
-
-void bch2_time_stats_exit(struct bch2_time_stats *stats)
-{
- free_percpu(stats->buffer);
-}
-
-void bch2_time_stats_init(struct bch2_time_stats *stats)
-{
- memset(stats, 0, sizeof(*stats));
- stats->min_duration = U64_MAX;
- stats->min_freq = U64_MAX;
- spin_lock_init(&stats->lock);
-}
diff --git a/fs/bcachefs/time_stats.h b/fs/bcachefs/time_stats.h
deleted file mode 100644
index dc6493f7bbab..000000000000
--- a/fs/bcachefs/time_stats.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * bch2_time_stats - collect statistics on events that have a duration, with nicely
- * formatted textual output on demand
- *
- * - percpu buffering of event collection: cheap enough to shotgun
- * everywhere without worrying about overhead
- *
- * tracks:
- * - number of events
- * - maximum event duration ever seen
- * - sum of all event durations
- * - average event duration, standard and weighted
- * - standard deviation of event durations, standard and weighted
- * and analagous statistics for the frequency of events
- *
- * We provide both mean and weighted mean (exponentially weighted), and standard
- * deviation and weighted standard deviation, to give an efficient-to-compute
- * view of current behaviour versus. average behaviour - "did this event source
- * just become wonky, or is this typical?".
- *
- * Particularly useful for tracking down latency issues.
- */
-#ifndef _BCACHEFS_TIME_STATS_H
-#define _BCACHEFS_TIME_STATS_H
-
-#include <linux/sched/clock.h>
-#include <linux/spinlock_types.h>
-#include <linux/string.h>
-
-#include "mean_and_variance.h"
-
-struct time_unit {
- const char *name;
- u64 nsecs;
-};
-
-/*
- * given a nanosecond value, pick the preferred time units for printing:
- */
-const struct time_unit *bch2_pick_time_units(u64 ns);
-
-/*
- * quantiles - do not use:
- *
- * Only enabled if bch2_time_stats->quantiles_enabled has been manually set - don't
- * use in new code.
- */
-
-#define NR_QUANTILES 15
-#define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES)
-#define QUANTILE_FIRST eytzinger0_first(NR_QUANTILES)
-#define QUANTILE_LAST eytzinger0_last(NR_QUANTILES)
-
-struct quantiles {
- struct quantile_entry {
- u64 m;
- u64 step;
- } entries[NR_QUANTILES];
-};
-
-struct time_stat_buffer {
- unsigned nr;
- struct time_stat_buffer_entry {
- u64 start;
- u64 end;
- } entries[31];
-};
-
-struct bch2_time_stats {
- spinlock_t lock;
- bool have_quantiles;
- struct time_stat_buffer __percpu *buffer;
- /* all fields are in nanoseconds */
- u64 min_duration;
- u64 max_duration;
- u64 total_duration;
- u64 max_freq;
- u64 min_freq;
- u64 last_event;
- u64 last_event_start;
-
- struct mean_and_variance duration_stats;
- struct mean_and_variance freq_stats;
-
-/* default weight for weighted mean and variance calculations */
-#define TIME_STATS_MV_WEIGHT 8
-
- struct mean_and_variance_weighted duration_stats_weighted;
- struct mean_and_variance_weighted freq_stats_weighted;
-};
-
-struct bch2_time_stats_quantiles {
- struct bch2_time_stats stats;
- struct quantiles quantiles;
-};
-
-static inline struct quantiles *time_stats_to_quantiles(struct bch2_time_stats *stats)
-{
- return stats->have_quantiles
- ? &container_of(stats, struct bch2_time_stats_quantiles, stats)->quantiles
- : NULL;
-}
-
-void __bch2_time_stats_clear_buffer(struct bch2_time_stats *, struct time_stat_buffer *);
-void __bch2_time_stats_update(struct bch2_time_stats *stats, u64, u64);
-
-/**
- * time_stats_update - collect a new event being tracked
- *
- * @stats - bch2_time_stats to update
- * @start - start time of event, recorded with local_clock()
- *
- * The end duration of the event will be the current time
- */
-static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start)
-{
- __bch2_time_stats_update(stats, start, local_clock());
-}
-
-/**
- * track_event_change - track state change events
- *
- * @stats - bch2_time_stats to update
- * @v - new state, true or false
- *
- * Use this when tracking time stats for state changes, i.e. resource X becoming
- * blocked/unblocked.
- */
-static inline bool track_event_change(struct bch2_time_stats *stats, bool v)
-{
- if (v != !!stats->last_event_start) {
- if (!v) {
- bch2_time_stats_update(stats, stats->last_event_start);
- stats->last_event_start = 0;
- } else {
- stats->last_event_start = local_clock() ?: 1;
- return true;
- }
- }
-
- return false;
-}
-
-void bch2_time_stats_reset(struct bch2_time_stats *);
-void bch2_time_stats_exit(struct bch2_time_stats *);
-void bch2_time_stats_init(struct bch2_time_stats *);
-
-static inline void bch2_time_stats_quantiles_exit(struct bch2_time_stats_quantiles *statq)
-{
- bch2_time_stats_exit(&statq->stats);
-}
-static inline void bch2_time_stats_quantiles_init(struct bch2_time_stats_quantiles *statq)
-{
- bch2_time_stats_init(&statq->stats);
- statq->stats.have_quantiles = true;
- memset(&statq->quantiles, 0, sizeof(statq->quantiles));
-}
-
-#endif /* _BCACHEFS_TIME_STATS_H */
diff --git a/fs/bcachefs/trace.c b/fs/bcachefs/trace.c
deleted file mode 100644
index dfad1d06633d..000000000000
--- a/fs/bcachefs/trace.c
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "bcachefs.h"
-#include "alloc_types.h"
-#include "buckets.h"
-#include "btree_cache.h"
-#include "btree_iter.h"
-#include "btree_key_cache.h"
-#include "btree_locking.h"
-#include "btree_update_interior.h"
-#include "keylist.h"
-#include "move_types.h"
-#include "opts.h"
-#include "six.h"
-
-#include <linux/blktrace_api.h>
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
deleted file mode 100644
index 5597b9d6297f..000000000000
--- a/fs/bcachefs/trace.h
+++ /dev/null
@@ -1,1905 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM bcachefs
-
-#if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
-
-#include <linux/tracepoint.h>
-
-#define TRACE_BPOS_entries(name) \
- __field(u64, name##_inode ) \
- __field(u64, name##_offset ) \
- __field(u32, name##_snapshot )
-
-#define TRACE_BPOS_assign(dst, src) \
- __entry->dst##_inode = (src).inode; \
- __entry->dst##_offset = (src).offset; \
- __entry->dst##_snapshot = (src).snapshot
-
-DECLARE_EVENT_CLASS(bpos,
- TP_PROTO(const struct bpos *p),
- TP_ARGS(p),
-
- TP_STRUCT__entry(
- TRACE_BPOS_entries(p)
- ),
-
- TP_fast_assign(
- TRACE_BPOS_assign(p, *p);
- ),
-
- TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
-);
-
-DECLARE_EVENT_CLASS(fs_str,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __string(str, str )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __assign_str(str);
- ),
-
- TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
-);
-
-DECLARE_EVENT_CLASS(trans_str,
- TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
- TP_ARGS(trans, caller_ip, str),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __string(str, str )
- ),
-
- TP_fast_assign(
- __entry->dev = trans->c->dev;
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __assign_str(str);
- ),
-
- TP_printk("%d,%d %s %pS %s",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
-);
-
-DECLARE_EVENT_CLASS(trans_str_nocaller,
- TP_PROTO(struct btree_trans *trans, const char *str),
- TP_ARGS(trans, str),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __array(char, trans_fn, 32 )
- __string(str, str )
- ),
-
- TP_fast_assign(
- __entry->dev = trans->c->dev;
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __assign_str(str);
- ),
-
- TP_printk("%d,%d %s %s",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->trans_fn, __get_str(str))
-);
-
-DECLARE_EVENT_CLASS(btree_node_nofs,
- TP_PROTO(struct bch_fs *c, struct btree *b),
- TP_ARGS(c, b),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u8, level )
- __field(u8, btree_id )
- TRACE_BPOS_entries(pos)
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->level = b->c.level;
- __entry->btree_id = b->c.btree_id;
- TRACE_BPOS_assign(pos, b->key.k.p);
- ),
-
- TP_printk("%d,%d %u %s %llu:%llu:%u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->level,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
-);
-
-DECLARE_EVENT_CLASS(btree_node,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __array(char, trans_fn, 32 )
- __field(u8, level )
- __field(u8, btree_id )
- TRACE_BPOS_entries(pos)
- ),
-
- TP_fast_assign(
- __entry->dev = trans->c->dev;
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->level = b->c.level;
- __entry->btree_id = b->c.btree_id;
- TRACE_BPOS_assign(pos, b->key.k.p);
- ),
-
- TP_printk("%d,%d %s %u %s %llu:%llu:%u",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
- __entry->level,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
-);
-
-DECLARE_EVENT_CLASS(bch_fs,
- TP_PROTO(struct bch_fs *c),
- TP_ARGS(c),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- ),
-
- TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
-);
-
-DECLARE_EVENT_CLASS(btree_trans,
- TP_PROTO(struct btree_trans *trans),
- TP_ARGS(trans),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __array(char, trans_fn, 32 )
- ),
-
- TP_fast_assign(
- __entry->dev = trans->c->dev;
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- ),
-
- TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
-);
-
-DECLARE_EVENT_CLASS(bio,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(sector_t, sector )
- __field(unsigned int, nr_sector )
- __array(char, rwbs, 6 )
- ),
-
- TP_fast_assign(
- __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0;
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
- ),
-
- TP_printk("%d,%d %s %llu + %u",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector, __entry->nr_sector)
-);
-
-/* fs.c: */
-TRACE_EVENT(bch2_sync_fs,
- TP_PROTO(struct super_block *sb, int wait),
-
- TP_ARGS(sb, wait),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, wait )
-
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->wait = wait;
- ),
-
- TP_printk("dev %d,%d wait %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->wait)
-);
-
-/* fs-io.c: */
-TRACE_EVENT(bch2_fsync,
- TP_PROTO(struct file *file, int datasync),
-
- TP_ARGS(file, datasync),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, parent )
- __field( int, datasync )
- ),
-
- TP_fast_assign(
- struct dentry *dentry = file->f_path.dentry;
-
- __entry->dev = dentry->d_sb->s_dev;
- __entry->ino = d_inode(dentry)->i_ino;
- __entry->parent = d_inode(dentry->d_parent)->i_ino;
- __entry->datasync = datasync;
- ),
-
- TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long) __entry->parent, __entry->datasync)
-);
-
-/* super-io.c: */
-TRACE_EVENT(write_super,
- TP_PROTO(struct bch_fs *c, unsigned long ip),
- TP_ARGS(c, ip),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(unsigned long, ip )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->ip = ip;
- ),
-
- TP_printk("%d,%d for %pS",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (void *) __entry->ip)
-);
-
-/* io.c: */
-
-DEFINE_EVENT(bio, read_promote,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-TRACE_EVENT(read_nopromote,
- TP_PROTO(struct bch_fs *c, int ret),
- TP_ARGS(c, ret),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __array(char, ret, 32 )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
- ),
-
- TP_printk("%d,%d ret %s",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ret)
-);
-
-DEFINE_EVENT(bio, read_bounce,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-DEFINE_EVENT(bio, read_split,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-DEFINE_EVENT(bio, read_retry,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-DEFINE_EVENT(bio, read_reuse_race,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-/* Journal */
-
-DEFINE_EVENT(bch_fs, journal_full,
- TP_PROTO(struct bch_fs *c),
- TP_ARGS(c)
-);
-
-DEFINE_EVENT(fs_str, journal_entry_full,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(fs_str, journal_entry_close,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(bio, journal_write,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-TRACE_EVENT(journal_reclaim_start,
- TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
- u64 min_nr, u64 min_key_cache,
- u64 btree_cache_dirty, u64 btree_cache_total,
- u64 btree_key_cache_dirty, u64 btree_key_cache_total),
- TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
- btree_cache_dirty, btree_cache_total,
- btree_key_cache_dirty, btree_key_cache_total),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(bool, direct )
- __field(bool, kicked )
- __field(u64, min_nr )
- __field(u64, min_key_cache )
- __field(u64, btree_cache_dirty )
- __field(u64, btree_cache_total )
- __field(u64, btree_key_cache_dirty )
- __field(u64, btree_key_cache_total )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->direct = direct;
- __entry->kicked = kicked;
- __entry->min_nr = min_nr;
- __entry->min_key_cache = min_key_cache;
- __entry->btree_cache_dirty = btree_cache_dirty;
- __entry->btree_cache_total = btree_cache_total;
- __entry->btree_key_cache_dirty = btree_key_cache_dirty;
- __entry->btree_key_cache_total = btree_key_cache_total;
- ),
-
- TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->direct,
- __entry->kicked,
- __entry->min_nr,
- __entry->min_key_cache,
- __entry->btree_cache_dirty,
- __entry->btree_cache_total,
- __entry->btree_key_cache_dirty,
- __entry->btree_key_cache_total)
-);
-
-TRACE_EVENT(journal_reclaim_finish,
- TP_PROTO(struct bch_fs *c, u64 nr_flushed),
- TP_ARGS(c, nr_flushed),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u64, nr_flushed )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->nr_flushed = nr_flushed;
- ),
-
- TP_printk("%d,%d flushed %llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->nr_flushed)
-);
-
-/* bset.c: */
-
-DEFINE_EVENT(bpos, bkey_pack_pos_fail,
- TP_PROTO(const struct bpos *p),
- TP_ARGS(p)
-);
-
-/* Btree cache: */
-
-TRACE_EVENT(btree_cache_scan,
- TP_PROTO(long nr_to_scan, long can_free, long ret),
- TP_ARGS(nr_to_scan, can_free, ret),
-
- TP_STRUCT__entry(
- __field(long, nr_to_scan )
- __field(long, can_free )
- __field(long, ret )
- ),
-
- TP_fast_assign(
- __entry->nr_to_scan = nr_to_scan;
- __entry->can_free = can_free;
- __entry->ret = ret;
- ),
-
- TP_printk("scanned for %li nodes, can free %li, ret %li",
- __entry->nr_to_scan, __entry->can_free, __entry->ret)
-);
-
-DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
- TP_PROTO(struct bch_fs *c, struct btree *b),
- TP_ARGS(c, b)
-);
-
-DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
- TP_PROTO(struct btree_trans *trans),
- TP_ARGS(trans)
-);
-
-DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
- TP_PROTO(struct btree_trans *trans),
- TP_ARGS(trans)
-);
-
-DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
- TP_PROTO(struct btree_trans *trans),
- TP_ARGS(trans)
-);
-
-DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
- TP_PROTO(struct btree_trans *trans),
- TP_ARGS(trans)
-);
-
-/* Btree */
-
-DEFINE_EVENT(btree_node, btree_node_read,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b)
-);
-
-TRACE_EVENT(btree_node_write,
- TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
- TP_ARGS(b, bytes, sectors),
-
- TP_STRUCT__entry(
- __field(enum btree_node_type, type)
- __field(unsigned, bytes )
- __field(unsigned, sectors )
- ),
-
- TP_fast_assign(
- __entry->type = btree_node_type(b);
- __entry->bytes = bytes;
- __entry->sectors = sectors;
- ),
-
- TP_printk("bkey type %u bytes %u sectors %u",
- __entry->type , __entry->bytes, __entry->sectors)
-);
-
-DEFINE_EVENT(btree_node, btree_node_alloc,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b)
-);
-
-DEFINE_EVENT(btree_node, btree_node_free,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b)
-);
-
-TRACE_EVENT(btree_reserve_get_fail,
- TP_PROTO(const char *trans_fn,
- unsigned long caller_ip,
- size_t required,
- int ret),
- TP_ARGS(trans_fn, caller_ip, required, ret),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(size_t, required )
- __array(char, ret, 32 )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->required = required;
- strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
- ),
-
- TP_printk("%s %pS required %zu ret %s",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- __entry->required,
- __entry->ret)
-);
-
-DEFINE_EVENT(btree_node, btree_node_compact,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b)
-);
-
-DEFINE_EVENT(btree_node, btree_node_merge,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b)
-);
-
-DEFINE_EVENT(btree_node, btree_node_split,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b)
-);
-
-DEFINE_EVENT(btree_node, btree_node_rewrite,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b)
-);
-
-DEFINE_EVENT(btree_node, btree_node_set_root,
- TP_PROTO(struct btree_trans *trans, struct btree *b),
- TP_ARGS(trans, b)
-);
-
-TRACE_EVENT(btree_path_relock_fail,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path,
- unsigned level),
- TP_ARGS(trans, caller_ip, path, level),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(u8, btree_id )
- __field(u8, level )
- __field(u8, path_idx)
- TRACE_BPOS_entries(pos)
- __array(char, node, 24 )
- __field(u8, self_read_count )
- __field(u8, self_intent_count)
- __field(u8, read_count )
- __field(u8, intent_count )
- __field(u32, iter_lock_seq )
- __field(u32, node_lock_seq )
- ),
-
- TP_fast_assign(
- struct btree *b = btree_path_node(path, level);
- struct six_lock_count c;
-
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->btree_id = path->btree_id;
- __entry->level = level;
- __entry->path_idx = path - trans->paths;
- TRACE_BPOS_assign(pos, path->pos);
-
- c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
- __entry->self_read_count = c.n[SIX_LOCK_read];
- __entry->self_intent_count = c.n[SIX_LOCK_intent];
-
- if (IS_ERR(b)) {
- strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
- } else {
- c = six_lock_counts(&path->l[level].b->c.lock);
- __entry->read_count = c.n[SIX_LOCK_read];
- __entry->intent_count = c.n[SIX_LOCK_intent];
- scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
- }
- __entry->iter_lock_seq = path->l[level].lock_seq;
- __entry->node_lock_seq = is_btree_node(path, level)
- ? six_lock_seq(&path->l[level].b->c.lock)
- : 0;
- ),
-
- TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- __entry->path_idx,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot,
- __entry->level,
- __entry->node,
- __entry->self_read_count,
- __entry->self_intent_count,
- __entry->read_count,
- __entry->intent_count,
- __entry->iter_lock_seq,
- __entry->node_lock_seq)
-);
-
-TRACE_EVENT(btree_path_upgrade_fail,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path,
- unsigned level),
- TP_ARGS(trans, caller_ip, path, level),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(u8, btree_id )
- __field(u8, level )
- __field(u8, path_idx)
- TRACE_BPOS_entries(pos)
- __field(u8, locked )
- __field(u8, self_read_count )
- __field(u8, self_intent_count)
- __field(u8, read_count )
- __field(u8, intent_count )
- __field(u32, iter_lock_seq )
- __field(u32, node_lock_seq )
- ),
-
- TP_fast_assign(
- struct six_lock_count c;
-
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->btree_id = path->btree_id;
- __entry->level = level;
- __entry->path_idx = path - trans->paths;
- TRACE_BPOS_assign(pos, path->pos);
- __entry->locked = btree_node_locked(path, level);
-
- c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
- __entry->self_read_count = c.n[SIX_LOCK_read];
- __entry->self_intent_count = c.n[SIX_LOCK_intent];
- c = six_lock_counts(&path->l[level].b->c.lock);
- __entry->read_count = c.n[SIX_LOCK_read];
- __entry->intent_count = c.n[SIX_LOCK_intent];
- __entry->iter_lock_seq = path->l[level].lock_seq;
- __entry->node_lock_seq = is_btree_node(path, level)
- ? six_lock_seq(&path->l[level].b->c.lock)
- : 0;
- ),
-
- TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- __entry->path_idx,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot,
- __entry->level,
- __entry->locked,
- __entry->self_read_count,
- __entry->self_intent_count,
- __entry->read_count,
- __entry->intent_count,
- __entry->iter_lock_seq,
- __entry->node_lock_seq)
-);
-
-/* Garbage collection */
-
-DEFINE_EVENT(bch_fs, gc_gens_start,
- TP_PROTO(struct bch_fs *c),
- TP_ARGS(c)
-);
-
-DEFINE_EVENT(bch_fs, gc_gens_end,
- TP_PROTO(struct bch_fs *c),
- TP_ARGS(c)
-);
-
-/* Allocator */
-
-DEFINE_EVENT(fs_str, bucket_alloc,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(fs_str, bucket_alloc_fail,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-TRACE_EVENT(discard_buckets,
- TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
- u64 need_journal_commit, u64 discarded, const char *err),
- TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u64, seen )
- __field(u64, open )
- __field(u64, need_journal_commit )
- __field(u64, discarded )
- __array(char, err, 16 )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->seen = seen;
- __entry->open = open;
- __entry->need_journal_commit = need_journal_commit;
- __entry->discarded = discarded;
- strscpy(__entry->err, err, sizeof(__entry->err));
- ),
-
- TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->seen,
- __entry->open,
- __entry->need_journal_commit,
- __entry->discarded,
- __entry->err)
-);
-
-TRACE_EVENT(bucket_invalidate,
- TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
- TP_ARGS(c, dev, bucket, sectors),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u32, dev_idx )
- __field(u32, sectors )
- __field(u64, bucket )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->dev_idx = dev;
- __entry->sectors = sectors;
- __entry->bucket = bucket;
- ),
-
- TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->dev_idx, __entry->bucket,
- __entry->sectors)
-);
-
-/* Moving IO */
-
-TRACE_EVENT(bucket_evacuate,
- TP_PROTO(struct bch_fs *c, struct bpos *bucket),
- TP_ARGS(c, bucket),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u32, dev_idx )
- __field(u64, bucket )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->dev_idx = bucket->inode;
- __entry->bucket = bucket->offset;
- ),
-
- TP_printk("%d:%d %u:%llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->dev_idx, __entry->bucket)
-);
-
-DEFINE_EVENT(fs_str, move_extent,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(fs_str, move_extent_read,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(fs_str, move_extent_write,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(fs_str, move_extent_finish,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(fs_str, move_extent_fail,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(fs_str, move_extent_start_fail,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-TRACE_EVENT(move_data,
- TP_PROTO(struct bch_fs *c,
- struct bch_move_stats *stats),
- TP_ARGS(c, stats),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u64, keys_moved )
- __field(u64, keys_raced )
- __field(u64, sectors_seen )
- __field(u64, sectors_moved )
- __field(u64, sectors_raced )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->keys_moved = atomic64_read(&stats->keys_moved);
- __entry->keys_raced = atomic64_read(&stats->keys_raced);
- __entry->sectors_seen = atomic64_read(&stats->sectors_seen);
- __entry->sectors_moved = atomic64_read(&stats->sectors_moved);
- __entry->sectors_raced = atomic64_read(&stats->sectors_raced);
- ),
-
- TP_printk("%d,%d keys moved %llu raced %llu"
- "sectors seen %llu moved %llu raced %llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->keys_moved,
- __entry->keys_raced,
- __entry->sectors_seen,
- __entry->sectors_moved,
- __entry->sectors_raced)
-);
-
-TRACE_EVENT(evacuate_bucket,
- TP_PROTO(struct bch_fs *c, struct bpos *bucket,
- unsigned sectors, unsigned bucket_size,
- u64 fragmentation, int ret),
- TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u64, member )
- __field(u64, bucket )
- __field(u32, sectors )
- __field(u32, bucket_size )
- __field(u64, fragmentation )
- __field(int, ret )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->member = bucket->inode;
- __entry->bucket = bucket->offset;
- __entry->sectors = sectors;
- __entry->bucket_size = bucket_size;
- __entry->fragmentation = fragmentation;
- __entry->ret = ret;
- ),
-
- TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->member, __entry->bucket,
- __entry->sectors, __entry->bucket_size,
- __entry->fragmentation, __entry->ret)
-);
-
-TRACE_EVENT(copygc,
- TP_PROTO(struct bch_fs *c,
- u64 sectors_moved, u64 sectors_not_moved,
- u64 buckets_moved, u64 buckets_not_moved),
- TP_ARGS(c,
- sectors_moved, sectors_not_moved,
- buckets_moved, buckets_not_moved),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u64, sectors_moved )
- __field(u64, sectors_not_moved )
- __field(u64, buckets_moved )
- __field(u64, buckets_not_moved )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->sectors_moved = sectors_moved;
- __entry->sectors_not_moved = sectors_not_moved;
- __entry->buckets_moved = buckets_moved;
- __entry->buckets_not_moved = buckets_moved;
- ),
-
- TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->sectors_moved, __entry->sectors_not_moved,
- __entry->buckets_moved, __entry->buckets_not_moved)
-);
-
-TRACE_EVENT(copygc_wait,
- TP_PROTO(struct bch_fs *c,
- u64 wait_amount, u64 until),
- TP_ARGS(c, wait_amount, until),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u64, wait_amount )
- __field(u64, until )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->wait_amount = wait_amount;
- __entry->until = until;
- ),
-
- TP_printk("%d,%u waiting for %llu sectors until %llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->wait_amount, __entry->until)
-);
-
-/* btree transactions: */
-
-DECLARE_EVENT_CLASS(transaction_event,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- ),
-
- TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
-);
-
-DEFINE_EVENT(transaction_event, transaction_commit,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
-);
-
-DEFINE_EVENT(transaction_event, trans_restart_injected,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
-);
-
-TRACE_EVENT(trans_restart_split_race,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree *b),
- TP_ARGS(trans, caller_ip, b),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(u8, level )
- __field(u16, written )
- __field(u16, blocks )
- __field(u16, u64s_remaining )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->level = b->c.level;
- __entry->written = b->written;
- __entry->blocks = btree_blocks(trans->c);
- __entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b);
- ),
-
- TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
- __entry->trans_fn, (void *) __entry->caller_ip,
- __entry->level,
- __entry->written, __entry->blocks,
- __entry->u64s_remaining)
-);
-
-TRACE_EVENT(trans_blocked_journal_reclaim,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
-
- __field(unsigned long, key_cache_nr_keys )
- __field(unsigned long, key_cache_nr_dirty )
- __field(long, must_wait )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->key_cache_nr_keys = atomic_long_read(&trans->c->btree_key_cache.nr_keys);
- __entry->key_cache_nr_dirty = atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
- __entry->must_wait = __bch2_btree_key_cache_must_wait(trans->c);
- ),
-
- TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
- __entry->trans_fn, (void *) __entry->caller_ip,
- __entry->key_cache_nr_keys,
- __entry->key_cache_nr_dirty,
- __entry->must_wait)
-);
-
-TRACE_EVENT(trans_restart_journal_preres_get,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- unsigned flags),
- TP_ARGS(trans, caller_ip, flags),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(unsigned, flags )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->flags = flags;
- ),
-
- TP_printk("%s %pS %x", __entry->trans_fn,
- (void *) __entry->caller_ip,
- __entry->flags)
-);
-
-DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
-);
-
-DEFINE_EVENT(transaction_event, trans_traverse_all,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
-);
-
-DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
-);
-
-DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- const char *paths),
- TP_ARGS(trans, caller_ip, paths)
-);
-
-DECLARE_EVENT_CLASS(transaction_restart_iter,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(u8, btree_id )
- TRACE_BPOS_entries(pos)
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->btree_id = path->btree_id;
- TRACE_BPOS_assign(pos, path->pos)
- ),
-
- TP_printk("%s %pS btree %s pos %llu:%llu:%u",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-TRACE_EVENT(trans_restart_upgrade,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path,
- unsigned old_locks_want,
- unsigned new_locks_want,
- struct get_locks_fail *f),
- TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(u8, btree_id )
- __field(u8, old_locks_want )
- __field(u8, new_locks_want )
- __field(u8, level )
- __field(u32, path_seq )
- __field(u32, node_seq )
- TRACE_BPOS_entries(pos)
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->btree_id = path->btree_id;
- __entry->old_locks_want = old_locks_want;
- __entry->new_locks_want = new_locks_want;
- __entry->level = f->l;
- __entry->path_seq = path->l[f->l].lock_seq;
- __entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
- TRACE_BPOS_assign(pos, path->pos)
- ),
-
- TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot,
- __entry->old_locks_want,
- __entry->new_locks_want,
- __entry->level,
- __entry->path_seq,
- __entry->node_seq)
-);
-
-DEFINE_EVENT(trans_str, trans_restart_relock,
- TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
- TP_ARGS(trans, caller_ip, str)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
-);
-
-DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
- TP_PROTO(struct btree_trans *trans,
- const char *cycle),
- TP_ARGS(trans, cycle)
-);
-
-DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
-);
-
-TRACE_EVENT(trans_restart_would_deadlock_write,
- TP_PROTO(struct btree_trans *trans),
- TP_ARGS(trans),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- ),
-
- TP_printk("%s", __entry->trans_fn)
-);
-
-TRACE_EVENT(trans_restart_mem_realloced,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- unsigned long bytes),
- TP_ARGS(trans, caller_ip, bytes),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(unsigned long, bytes )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->bytes = bytes;
- ),
-
- TP_printk("%s %pS bytes %lu",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- __entry->bytes)
-);
-
-TRACE_EVENT(trans_restart_key_cache_key_realloced,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path,
- unsigned old_u64s,
- unsigned new_u64s),
- TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(enum btree_id, btree_id )
- TRACE_BPOS_entries(pos)
- __field(u32, old_u64s )
- __field(u32, new_u64s )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
-
- __entry->btree_id = path->btree_id;
- TRACE_BPOS_assign(pos, path->pos);
- __entry->old_u64s = old_u64s;
- __entry->new_u64s = new_u64s;
- ),
-
- TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot,
- __entry->old_u64s,
- __entry->new_u64s)
-);
-
-TRACE_EVENT(path_downgrade,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path,
- unsigned old_locks_want),
- TP_ARGS(trans, caller_ip, path, old_locks_want),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(unsigned, old_locks_want )
- __field(unsigned, new_locks_want )
- __field(unsigned, btree )
- TRACE_BPOS_entries(pos)
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->old_locks_want = old_locks_want;
- __entry->new_locks_want = path->locks_want;
- __entry->btree = path->btree_id;
- TRACE_BPOS_assign(pos, path->pos);
- ),
-
- TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- __entry->old_locks_want,
- __entry->new_locks_want,
- bch2_btree_id_str(__entry->btree),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot)
-);
-
-DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
-);
-
-TRACE_EVENT(write_buffer_flush,
- TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
- TP_ARGS(trans, nr, skipped, fast, size),
-
- TP_STRUCT__entry(
- __field(size_t, nr )
- __field(size_t, skipped )
- __field(size_t, fast )
- __field(size_t, size )
- ),
-
- TP_fast_assign(
- __entry->nr = nr;
- __entry->skipped = skipped;
- __entry->fast = fast;
- __entry->size = size;
- ),
-
- TP_printk("%zu/%zu skipped %zu fast %zu",
- __entry->nr, __entry->size, __entry->skipped, __entry->fast)
-);
-
-TRACE_EVENT(write_buffer_flush_sync,
- TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
- TP_ARGS(trans, caller_ip),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- ),
-
- TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
-);
-
-TRACE_EVENT(write_buffer_flush_slowpath,
- TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
- TP_ARGS(trans, slowpath, total),
-
- TP_STRUCT__entry(
- __field(size_t, slowpath )
- __field(size_t, total )
- ),
-
- TP_fast_assign(
- __entry->slowpath = slowpath;
- __entry->total = total;
- ),
-
- TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
-);
-
-DEFINE_EVENT(fs_str, rebalance_extent,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-DEFINE_EVENT(fs_str, data_update,
- TP_PROTO(struct bch_fs *c, const char *str),
- TP_ARGS(c, str)
-);
-
-TRACE_EVENT(error_downcast,
- TP_PROTO(int bch_err, int std_err, unsigned long ip),
- TP_ARGS(bch_err, std_err, ip),
-
- TP_STRUCT__entry(
- __array(char, bch_err, 32 )
- __array(char, std_err, 32 )
- __array(char, ip, 32 )
- ),
-
- TP_fast_assign(
- strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
- strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
- snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
- ),
-
- TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
-);
-
-#ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
-
-TRACE_EVENT(update_by_path,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path,
- struct btree_insert_entry *i, bool overwrite),
- TP_ARGS(trans, path, i, overwrite),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(btree_path_idx_t, path_idx )
- __field(u8, btree_id )
- TRACE_BPOS_entries(pos)
- __field(u8, overwrite )
- __field(btree_path_idx_t, update_idx )
- __field(btree_path_idx_t, nr_updates )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->path_idx = path - trans->paths;
- __entry->btree_id = path->btree_id;
- TRACE_BPOS_assign(pos, path->pos);
- __entry->overwrite = overwrite;
- __entry->update_idx = i - trans->updates;
- __entry->nr_updates = trans->nr_updates;
- ),
-
- TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
- __entry->trans_fn,
- __entry->path_idx,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot,
- __entry->overwrite,
- __entry->update_idx,
- __entry->nr_updates)
-);
-
-TRACE_EVENT(btree_path_lock,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_bkey_cached_common *b),
- TP_ARGS(trans, caller_ip, b),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(unsigned long, caller_ip )
- __field(u8, btree_id )
- __field(u8, level )
- __array(char, node, 24 )
- __field(u32, lock_seq )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
- __entry->btree_id = b->btree_id;
- __entry->level = b->level;
-
- scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
- __entry->lock_seq = six_lock_seq(&b->lock);
- ),
-
- TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
- __entry->trans_fn,
- (void *) __entry->caller_ip,
- bch2_btree_id_str(__entry->btree_id),
- __entry->level,
- __entry->node,
- __entry->lock_seq)
-);
-
-DECLARE_EVENT_CLASS(btree_path_ev,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path),
- TP_ARGS(trans, path),
-
- TP_STRUCT__entry(
- __field(u16, idx )
- __field(u8, ref )
- __field(u8, btree_id )
- TRACE_BPOS_entries(pos)
- ),
-
- TP_fast_assign(
- __entry->idx = path - trans->paths;
- __entry->ref = path->ref;
- __entry->btree_id = path->btree_id;
- TRACE_BPOS_assign(pos, path->pos);
- ),
-
- TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
- __entry->idx, __entry->ref,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot)
-);
-
-DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path),
- TP_ARGS(trans, path)
-);
-
-DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path),
- TP_ARGS(trans, path)
-);
-
-DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path),
- TP_ARGS(trans, path)
-);
-
-TRACE_EVENT(btree_path_alloc,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path),
- TP_ARGS(trans, path),
-
- TP_STRUCT__entry(
- __field(btree_path_idx_t, idx )
- __field(u8, locks_want )
- __field(u8, btree_id )
- TRACE_BPOS_entries(pos)
- ),
-
- TP_fast_assign(
- __entry->idx = path - trans->paths;
- __entry->locks_want = path->locks_want;
- __entry->btree_id = path->btree_id;
- TRACE_BPOS_assign(pos, path->pos);
- ),
-
- TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
- __entry->idx,
- bch2_btree_id_str(__entry->btree_id),
- __entry->locks_want,
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot)
-);
-
-TRACE_EVENT(btree_path_get,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
- TP_ARGS(trans, path, new_pos),
-
- TP_STRUCT__entry(
- __field(btree_path_idx_t, idx )
- __field(u8, ref )
- __field(u8, preserve )
- __field(u8, locks_want )
- __field(u8, btree_id )
- TRACE_BPOS_entries(old_pos)
- TRACE_BPOS_entries(new_pos)
- ),
-
- TP_fast_assign(
- __entry->idx = path - trans->paths;
- __entry->ref = path->ref;
- __entry->preserve = path->preserve;
- __entry->locks_want = path->locks_want;
- __entry->btree_id = path->btree_id;
- TRACE_BPOS_assign(old_pos, path->pos);
- TRACE_BPOS_assign(new_pos, *new_pos);
- ),
-
- TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
- __entry->idx,
- __entry->ref,
- __entry->preserve,
- bch2_btree_id_str(__entry->btree_id),
- __entry->locks_want,
- __entry->old_pos_inode,
- __entry->old_pos_offset,
- __entry->old_pos_snapshot,
- __entry->new_pos_inode,
- __entry->new_pos_offset,
- __entry->new_pos_snapshot)
-);
-
-DECLARE_EVENT_CLASS(btree_path_clone,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
- TP_ARGS(trans, path, new),
-
- TP_STRUCT__entry(
- __field(btree_path_idx_t, idx )
- __field(u8, new_idx )
- __field(u8, btree_id )
- __field(u8, ref )
- __field(u8, preserve )
- TRACE_BPOS_entries(pos)
- ),
-
- TP_fast_assign(
- __entry->idx = path - trans->paths;
- __entry->new_idx = new - trans->paths;
- __entry->btree_id = path->btree_id;
- __entry->ref = path->ref;
- __entry->preserve = path->preserve;
- TRACE_BPOS_assign(pos, path->pos);
- ),
-
- TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
- __entry->idx,
- __entry->ref,
- __entry->preserve,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot,
- __entry->new_idx)
-);
-
-DEFINE_EVENT(btree_path_clone, btree_path_clone,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
- TP_ARGS(trans, path, new)
-);
-
-DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
- TP_ARGS(trans, path, new)
-);
-
-DECLARE_EVENT_CLASS(btree_path_traverse,
- TP_PROTO(struct btree_trans *trans,
- struct btree_path *path),
- TP_ARGS(trans, path),
-
- TP_STRUCT__entry(
- __array(char, trans_fn, 32 )
- __field(btree_path_idx_t, idx )
- __field(u8, ref )
- __field(u8, preserve )
- __field(u8, should_be_locked )
- __field(u8, btree_id )
- __field(u8, level )
- TRACE_BPOS_entries(pos)
- __field(u8, locks_want )
- __field(u8, nodes_locked )
- __array(char, node0, 24 )
- __array(char, node1, 24 )
- __array(char, node2, 24 )
- __array(char, node3, 24 )
- ),
-
- TP_fast_assign(
- strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
-
- __entry->idx = path - trans->paths;
- __entry->ref = path->ref;
- __entry->preserve = path->preserve;
- __entry->btree_id = path->btree_id;
- __entry->level = path->level;
- TRACE_BPOS_assign(pos, path->pos);
-
- __entry->locks_want = path->locks_want;
- __entry->nodes_locked = path->nodes_locked;
- struct btree *b = path->l[0].b;
- if (IS_ERR(b))
- strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
- else
- scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
- b = path->l[1].b;
- if (IS_ERR(b))
- strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
- else
- scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
- b = path->l[2].b;
- if (IS_ERR(b))
- strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
- else
- scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
- b = path->l[3].b;
- if (IS_ERR(b))
- strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
- else
- scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
- ),
-
- TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
- "locks %u %u %u %u node %s %s %s %s",
- __entry->trans_fn,
- __entry->idx,
- __entry->ref,
- __entry->preserve,
- bch2_btree_id_str(__entry->btree_id),
- __entry->pos_inode,
- __entry->pos_offset,
- __entry->pos_snapshot,
- __entry->level,
- __entry->locks_want,
- (__entry->nodes_locked >> 6) & 3,
- (__entry->nodes_locked >> 4) & 3,
- (__entry->nodes_locked >> 2) & 3,
- (__entry->nodes_locked >> 0) & 3,
- __entry->node3,
- __entry->node2,
- __entry->node1,
- __entry->node0)
-);
-
-DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
- TP_PROTO(struct btree_trans *trans,
- struct btree_path *path),
- TP_ARGS(trans, path)
-);
-
-DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
- TP_PROTO(struct btree_trans *trans, struct btree_path *path),
- TP_ARGS(trans, path)
-);
-
-TRACE_EVENT(btree_path_set_pos,
- TP_PROTO(struct btree_trans *trans,
- struct btree_path *path,
- struct bpos *new_pos),
- TP_ARGS(trans, path, new_pos),
-
- TP_STRUCT__entry(
- __field(btree_path_idx_t, idx )
- __field(u8, ref )
- __field(u8, preserve )
- __field(u8, btree_id )
- TRACE_BPOS_entries(old_pos)
- TRACE_BPOS_entries(new_pos)
- __field(u8, locks_want )
- __field(u8, nodes_locked )
- __array(char, node0, 24 )
- __array(char, node1, 24 )
- __array(char, node2, 24 )
- __array(char, node3, 24 )
- ),
-
- TP_fast_assign(
- __entry->idx = path - trans->paths;
- __entry->ref = path->ref;
- __entry->preserve = path->preserve;
- __entry->btree_id = path->btree_id;
- TRACE_BPOS_assign(old_pos, path->pos);
- TRACE_BPOS_assign(new_pos, *new_pos);
-
- __entry->nodes_locked = path->nodes_locked;
- struct btree *b = path->l[0].b;
- if (IS_ERR(b))
- strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
- else
- scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
- b = path->l[1].b;
- if (IS_ERR(b))
- strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
- else
- scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
- b = path->l[2].b;
- if (IS_ERR(b))
- strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
- else
- scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
- b = path->l[3].b;
- if (IS_ERR(b))
- strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
- else
- scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
- ),
-
- TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
- "locks %u %u %u %u node %s %s %s %s",
- __entry->idx,
- __entry->ref,
- __entry->preserve,
- bch2_btree_id_str(__entry->btree_id),
- __entry->old_pos_inode,
- __entry->old_pos_offset,
- __entry->old_pos_snapshot,
- __entry->new_pos_inode,
- __entry->new_pos_offset,
- __entry->new_pos_snapshot,
- (__entry->nodes_locked >> 6) & 3,
- (__entry->nodes_locked >> 4) & 3,
- (__entry->nodes_locked >> 2) & 3,
- (__entry->nodes_locked >> 0) & 3,
- __entry->node3,
- __entry->node2,
- __entry->node1,
- __entry->node0)
-);
-
-TRACE_EVENT(btree_path_free,
- TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
- TP_ARGS(trans, path, dup),
-
- TP_STRUCT__entry(
- __field(btree_path_idx_t, idx )
- __field(u8, preserve )
- __field(u8, should_be_locked)
- __field(s8, dup )
- __field(u8, dup_locked )
- ),
-
- TP_fast_assign(
- __entry->idx = path;
- __entry->preserve = trans->paths[path].preserve;
- __entry->should_be_locked = trans->paths[path].should_be_locked;
- __entry->dup = dup ? dup - trans->paths : -1;
- __entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
- ),
-
- TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
- __entry->preserve ? 'P' : ' ',
- __entry->should_be_locked ? 'S' : ' ',
- __entry->dup,
- __entry->dup_locked)
-);
-
-TRACE_EVENT(btree_path_free_trans_begin,
- TP_PROTO(btree_path_idx_t path),
- TP_ARGS(path),
-
- TP_STRUCT__entry(
- __field(btree_path_idx_t, idx )
- ),
-
- TP_fast_assign(
- __entry->idx = path;
- ),
-
- TP_printk(" path %3u", __entry->idx)
-);
-
-#else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
-#ifndef _TRACE_BCACHEFS_H
-
-static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
- struct btree_insert_entry *i, bool overwrite) {}
-static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
-static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
-static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
-static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
-static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
-static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
-static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
-static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
-static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
-static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
-static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
-static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
-static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
-
-#endif
-#endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
-
-#define _TRACE_BCACHEFS_H
-#endif /* _TRACE_BCACHEFS_H */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../fs/bcachefs
-
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
-
-#include <trace/define_trace.h>
diff --git a/fs/bcachefs/two_state_shared_lock.c b/fs/bcachefs/two_state_shared_lock.c
deleted file mode 100644
index 9764c2e6a910..000000000000
--- a/fs/bcachefs/two_state_shared_lock.c
+++ /dev/null
@@ -1,8 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "two_state_shared_lock.h"
-
-void __bch2_two_state_lock(two_state_lock_t *lock, int s)
-{
- __wait_event(lock->wait, bch2_two_state_trylock(lock, s));
-}
diff --git a/fs/bcachefs/two_state_shared_lock.h b/fs/bcachefs/two_state_shared_lock.h
deleted file mode 100644
index 7f647846b511..000000000000
--- a/fs/bcachefs/two_state_shared_lock.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_TWO_STATE_LOCK_H
-#define _BCACHEFS_TWO_STATE_LOCK_H
-
-#include <linux/atomic.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-
-#include "util.h"
-
-/*
- * Two-state lock - can be taken for add or block - both states are shared,
- * like read side of rwsem, but conflict with other state:
- */
-typedef struct {
- atomic_long_t v;
- wait_queue_head_t wait;
-} two_state_lock_t;
-
-static inline void two_state_lock_init(two_state_lock_t *lock)
-{
- atomic_long_set(&lock->v, 0);
- init_waitqueue_head(&lock->wait);
-}
-
-static inline void bch2_two_state_unlock(two_state_lock_t *lock, int s)
-{
- long i = s ? 1 : -1;
-
- EBUG_ON(atomic_long_read(&lock->v) == 0);
-
- if (atomic_long_sub_return_release(i, &lock->v) == 0)
- wake_up_all(&lock->wait);
-}
-
-static inline bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
-{
- long i = s ? 1 : -1;
- long old;
-
- old = atomic_long_read(&lock->v);
- do {
- if (i > 0 ? old < 0 : old > 0)
- return false;
- } while (!atomic_long_try_cmpxchg_acquire(&lock->v, &old, old + i));
-
- return true;
-}
-
-void __bch2_two_state_lock(two_state_lock_t *, int);
-
-static inline void bch2_two_state_lock(two_state_lock_t *lock, int s)
-{
- if (!bch2_two_state_trylock(lock, s))
- __bch2_two_state_lock(lock, s);
-}
-
-#endif /* _BCACHEFS_TWO_STATE_LOCK_H */
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
deleted file mode 100644
index e0a876cbaa6b..000000000000
--- a/fs/bcachefs/util.c
+++ /dev/null
@@ -1,887 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * random utility code, for bcache but in theory not specific to bcache
- *
- * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
- * Copyright 2012 Google, Inc.
- */
-
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/console.h>
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/log2.h>
-#include <linux/math64.h>
-#include <linux/percpu.h>
-#include <linux/preempt.h>
-#include <linux/random.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/sched/clock.h>
-
-#include "eytzinger.h"
-#include "mean_and_variance.h"
-#include "util.h"
-
-static const char si_units[] = "?kMGTPEZY";
-
-/* string_get_size units: */
-static const char *const units_2[] = {
- "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
-};
-static const char *const units_10[] = {
- "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
-};
-
-static int parse_u64(const char *cp, u64 *res)
-{
- const char *start = cp;
- u64 v = 0;
-
- if (!isdigit(*cp))
- return -EINVAL;
-
- do {
- if (v > U64_MAX / 10)
- return -ERANGE;
- v *= 10;
- if (v > U64_MAX - (*cp - '0'))
- return -ERANGE;
- v += *cp - '0';
- cp++;
- } while (isdigit(*cp));
-
- *res = v;
- return cp - start;
-}
-
-static int bch2_pow(u64 n, u64 p, u64 *res)
-{
- *res = 1;
-
- while (p--) {
- if (*res > div64_u64(U64_MAX, n))
- return -ERANGE;
- *res *= n;
- }
- return 0;
-}
-
-static int parse_unit_suffix(const char *cp, u64 *res)
-{
- const char *start = cp;
- u64 base = 1024;
- unsigned u;
- int ret;
-
- if (*cp == ' ')
- cp++;
-
- for (u = 1; u < strlen(si_units); u++)
- if (*cp == si_units[u]) {
- cp++;
- goto got_unit;
- }
-
- for (u = 0; u < ARRAY_SIZE(units_2); u++)
- if (!strncmp(cp, units_2[u], strlen(units_2[u]))) {
- cp += strlen(units_2[u]);
- goto got_unit;
- }
-
- for (u = 0; u < ARRAY_SIZE(units_10); u++)
- if (!strncmp(cp, units_10[u], strlen(units_10[u]))) {
- cp += strlen(units_10[u]);
- base = 1000;
- goto got_unit;
- }
-
- *res = 1;
- return 0;
-got_unit:
- ret = bch2_pow(base, u, res);
- if (ret)
- return ret;
-
- return cp - start;
-}
-
-#define parse_or_ret(cp, _f) \
-do { \
- int _ret = _f; \
- if (_ret < 0) \
- return _ret; \
- cp += _ret; \
-} while (0)
-
-static int __bch2_strtou64_h(const char *cp, u64 *res)
-{
- const char *start = cp;
- u64 v = 0, b, f_n = 0, f_d = 1;
- int ret;
-
- parse_or_ret(cp, parse_u64(cp, &v));
-
- if (*cp == '.') {
- cp++;
- ret = parse_u64(cp, &f_n);
- if (ret < 0)
- return ret;
- cp += ret;
-
- ret = bch2_pow(10, ret, &f_d);
- if (ret)
- return ret;
- }
-
- parse_or_ret(cp, parse_unit_suffix(cp, &b));
-
- if (v > div64_u64(U64_MAX, b))
- return -ERANGE;
- v *= b;
-
- if (f_n > div64_u64(U64_MAX, b))
- return -ERANGE;
-
- f_n = div64_u64(f_n * b, f_d);
- if (v + f_n < v)
- return -ERANGE;
- v += f_n;
-
- *res = v;
- return cp - start;
-}
-
-static int __bch2_strtoh(const char *cp, u64 *res,
- u64 t_max, bool t_signed)
-{
- bool positive = *cp != '-';
- u64 v = 0;
-
- if (*cp == '+' || *cp == '-')
- cp++;
-
- parse_or_ret(cp, __bch2_strtou64_h(cp, &v));
-
- if (*cp == '\n')
- cp++;
- if (*cp)
- return -EINVAL;
-
- if (positive) {
- if (v > t_max)
- return -ERANGE;
- } else {
- if (v && !t_signed)
- return -ERANGE;
-
- if (v > t_max + 1)
- return -ERANGE;
- v = -v;
- }
-
- *res = v;
- return 0;
-}
-
-#define STRTO_H(name, type) \
-int bch2_ ## name ## _h(const char *cp, type *res) \
-{ \
- u64 v = 0; \
- int ret = __bch2_strtoh(cp, &v, ANYSINT_MAX(type), \
- ANYSINT_MAX(type) != ((type) ~0ULL)); \
- *res = v; \
- return ret; \
-}
-
-STRTO_H(strtoint, int)
-STRTO_H(strtouint, unsigned int)
-STRTO_H(strtoll, long long)
-STRTO_H(strtoull, unsigned long long)
-STRTO_H(strtou64, u64)
-
-u64 bch2_read_flag_list(const char *opt, const char * const list[])
-{
- u64 ret = 0;
- char *p, *s, *d = kstrdup(opt, GFP_KERNEL);
-
- if (!d)
- return -ENOMEM;
-
- s = strim(d);
-
- while ((p = strsep(&s, ",;"))) {
- int flag = match_string(list, -1, p);
-
- if (flag < 0) {
- ret = -1;
- break;
- }
-
- ret |= BIT_ULL(flag);
- }
-
- kfree(d);
-
- return ret;
-}
-
-bool bch2_is_zero(const void *_p, size_t n)
-{
- const char *p = _p;
- size_t i;
-
- for (i = 0; i < n; i++)
- if (p[i])
- return false;
- return true;
-}
-
-void bch2_prt_u64_base2_nbits(struct printbuf *out, u64 v, unsigned nr_bits)
-{
- while (nr_bits)
- prt_char(out, '0' + ((v >> --nr_bits) & 1));
-}
-
-void bch2_prt_u64_base2(struct printbuf *out, u64 v)
-{
- bch2_prt_u64_base2_nbits(out, v, fls64(v) ?: 1);
-}
-
-static void __bch2_print_string_as_lines(const char *prefix, const char *lines,
- bool nonblocking)
-{
- bool locked = false;
- const char *p;
-
- if (!lines) {
- printk("%s (null)\n", prefix);
- return;
- }
-
- if (!nonblocking) {
- console_lock();
- locked = true;
- } else {
- locked = console_trylock();
- }
-
- while (1) {
- p = strchrnul(lines, '\n');
- printk("%s%.*s\n", prefix, (int) (p - lines), lines);
- if (!*p)
- break;
- lines = p + 1;
- }
- if (locked)
- console_unlock();
-}
-
-void bch2_print_string_as_lines(const char *prefix, const char *lines)
-{
- return __bch2_print_string_as_lines(prefix, lines, false);
-}
-
-void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines)
-{
- return __bch2_print_string_as_lines(prefix, lines, true);
-}
-
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr,
- gfp_t gfp)
-{
-#ifdef CONFIG_STACKTRACE
- unsigned nr_entries = 0;
-
- stack->nr = 0;
- int ret = darray_make_room_gfp(stack, 32, gfp);
- if (ret)
- return ret;
-
- if (!down_read_trylock(&task->signal->exec_update_lock))
- return -1;
-
- do {
- nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, skipnr + 1);
- } while (nr_entries == stack->size &&
- !(ret = darray_make_room_gfp(stack, stack->size * 2, gfp)));
-
- stack->nr = nr_entries;
- up_read(&task->signal->exec_update_lock);
-
- return ret;
-#else
- return 0;
-#endif
-}
-
-void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack)
-{
- darray_for_each(*stack, i) {
- prt_printf(out, "[<0>] %pB", (void *) *i);
- prt_newline(out);
- }
-}
-
-int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr, gfp_t gfp)
-{
- bch_stacktrace stack = { 0 };
- int ret = bch2_save_backtrace(&stack, task, skipnr + 1, gfp);
-
- bch2_prt_backtrace(out, &stack);
- darray_exit(&stack);
- return ret;
-}
-
-#ifndef __KERNEL__
-#include <time.h>
-void bch2_prt_datetime(struct printbuf *out, time64_t sec)
-{
- time_t t = sec;
- char buf[64];
- ctime_r(&t, buf);
- strim(buf);
- prt_str(out, buf);
-}
-#else
-void bch2_prt_datetime(struct printbuf *out, time64_t sec)
-{
- char buf[64];
- snprintf(buf, sizeof(buf), "%ptT", &sec);
- prt_u64(out, sec);
-}
-#endif
-
-void bch2_pr_time_units(struct printbuf *out, u64 ns)
-{
- const struct time_unit *u = bch2_pick_time_units(ns);
-
- prt_printf(out, "%llu %s", div64_u64(ns, u->nsecs), u->name);
-}
-
-static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
-{
- const struct time_unit *u = bch2_pick_time_units(ns);
-
- prt_printf(out, "%llu \r%s", div64_u64(ns, u->nsecs), u->name);
-}
-
-static inline void pr_name_and_units(struct printbuf *out, const char *name, u64 ns)
-{
- prt_printf(out, "%s\t", name);
- bch2_pr_time_units_aligned(out, ns);
- prt_newline(out);
-}
-
-#define TABSTOP_SIZE 12
-
-void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats)
-{
- struct quantiles *quantiles = time_stats_to_quantiles(stats);
- s64 f_mean = 0, d_mean = 0;
- u64 f_stddev = 0, d_stddev = 0;
-
- if (stats->buffer) {
- int cpu;
-
- spin_lock_irq(&stats->lock);
- for_each_possible_cpu(cpu)
- __bch2_time_stats_clear_buffer(stats, per_cpu_ptr(stats->buffer, cpu));
- spin_unlock_irq(&stats->lock);
- }
-
- /*
- * avoid divide by zero
- */
- if (stats->freq_stats.n) {
- f_mean = mean_and_variance_get_mean(stats->freq_stats);
- f_stddev = mean_and_variance_get_stddev(stats->freq_stats);
- d_mean = mean_and_variance_get_mean(stats->duration_stats);
- d_stddev = mean_and_variance_get_stddev(stats->duration_stats);
- }
-
- printbuf_tabstop_push(out, out->indent + TABSTOP_SIZE);
- prt_printf(out, "count:\t%llu\n", stats->duration_stats.n);
- printbuf_tabstop_pop(out);
-
- printbuf_tabstops_reset(out);
-
- printbuf_tabstop_push(out, out->indent + 20);
- printbuf_tabstop_push(out, TABSTOP_SIZE + 2);
- printbuf_tabstop_push(out, 0);
- printbuf_tabstop_push(out, TABSTOP_SIZE + 2);
-
- prt_printf(out, "\tsince mount\r\trecent\r\n");
-
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, out->indent + 20);
- printbuf_tabstop_push(out, TABSTOP_SIZE);
- printbuf_tabstop_push(out, 2);
- printbuf_tabstop_push(out, TABSTOP_SIZE);
-
- prt_printf(out, "duration of events\n");
- printbuf_indent_add(out, 2);
-
- pr_name_and_units(out, "min:", stats->min_duration);
- pr_name_and_units(out, "max:", stats->max_duration);
- pr_name_and_units(out, "total:", stats->total_duration);
-
- prt_printf(out, "mean:\t");
- bch2_pr_time_units_aligned(out, d_mean);
- prt_tab(out);
- bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->duration_stats_weighted, TIME_STATS_MV_WEIGHT));
- prt_newline(out);
-
- prt_printf(out, "stddev:\t");
- bch2_pr_time_units_aligned(out, d_stddev);
- prt_tab(out);
- bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->duration_stats_weighted, TIME_STATS_MV_WEIGHT));
-
- printbuf_indent_sub(out, 2);
- prt_newline(out);
-
- prt_printf(out, "time between events\n");
- printbuf_indent_add(out, 2);
-
- pr_name_and_units(out, "min:", stats->min_freq);
- pr_name_and_units(out, "max:", stats->max_freq);
-
- prt_printf(out, "mean:\t");
- bch2_pr_time_units_aligned(out, f_mean);
- prt_tab(out);
- bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT));
- prt_newline(out);
-
- prt_printf(out, "stddev:\t");
- bch2_pr_time_units_aligned(out, f_stddev);
- prt_tab(out);
- bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT));
-
- printbuf_indent_sub(out, 2);
- prt_newline(out);
-
- printbuf_tabstops_reset(out);
-
- if (quantiles) {
- int i = eytzinger0_first(NR_QUANTILES);
- const struct time_unit *u =
- bch2_pick_time_units(quantiles->entries[i].m);
- u64 last_q = 0;
-
- prt_printf(out, "quantiles (%s):\t", u->name);
- eytzinger0_for_each(i, NR_QUANTILES) {
- bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
-
- u64 q = max(quantiles->entries[i].m, last_q);
- prt_printf(out, "%llu ", div64_u64(q, u->nsecs));
- if (is_last)
- prt_newline(out);
- last_q = q;
- }
- }
-}
-
-/* ratelimit: */
-
-/**
- * bch2_ratelimit_delay() - return how long to delay until the next time to do
- * some work
- * @d: the struct bch_ratelimit to update
- * Returns: the amount of time to delay by, in jiffies
- */
-u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
-{
- u64 now = local_clock();
-
- return time_after64(d->next, now)
- ? nsecs_to_jiffies(d->next - now)
- : 0;
-}
-
-/**
- * bch2_ratelimit_increment() - increment @d by the amount of work done
- * @d: the struct bch_ratelimit to update
- * @done: the amount of work done, in arbitrary units
- */
-void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
-{
- u64 now = local_clock();
-
- d->next += div_u64(done * NSEC_PER_SEC, d->rate);
-
- if (time_before64(now + NSEC_PER_SEC, d->next))
- d->next = now + NSEC_PER_SEC;
-
- if (time_after64(now - NSEC_PER_SEC * 2, d->next))
- d->next = now - NSEC_PER_SEC * 2;
-}
-
-/* pd controller: */
-
-/*
- * Updates pd_controller. Attempts to scale inputed values to units per second.
- * @target: desired value
- * @actual: current value
- *
- * @sign: 1 or -1; 1 if increasing the rate makes actual go up, -1 if increasing
- * it makes actual go down.
- */
-void bch2_pd_controller_update(struct bch_pd_controller *pd,
- s64 target, s64 actual, int sign)
-{
- s64 proportional, derivative, change;
-
- unsigned long seconds_since_update = (jiffies - pd->last_update) / HZ;
-
- if (seconds_since_update == 0)
- return;
-
- pd->last_update = jiffies;
-
- proportional = actual - target;
- proportional *= seconds_since_update;
- proportional = div_s64(proportional, pd->p_term_inverse);
-
- derivative = actual - pd->last_actual;
- derivative = div_s64(derivative, seconds_since_update);
- derivative = ewma_add(pd->smoothed_derivative, derivative,
- (pd->d_term / seconds_since_update) ?: 1);
- derivative = derivative * pd->d_term;
- derivative = div_s64(derivative, pd->p_term_inverse);
-
- change = proportional + derivative;
-
- /* Don't increase rate if not keeping up */
- if (change > 0 &&
- pd->backpressure &&
- time_after64(local_clock(),
- pd->rate.next + NSEC_PER_MSEC))
- change = 0;
-
- change *= (sign * -1);
-
- pd->rate.rate = clamp_t(s64, (s64) pd->rate.rate + change,
- 1, UINT_MAX);
-
- pd->last_actual = actual;
- pd->last_derivative = derivative;
- pd->last_proportional = proportional;
- pd->last_change = change;
- pd->last_target = target;
-}
-
-void bch2_pd_controller_init(struct bch_pd_controller *pd)
-{
- pd->rate.rate = 1024;
- pd->last_update = jiffies;
- pd->p_term_inverse = 6000;
- pd->d_term = 30;
- pd->d_smooth = pd->d_term;
- pd->backpressure = 1;
-}
-
-void bch2_pd_controller_debug_to_text(struct printbuf *out, struct bch_pd_controller *pd)
-{
- if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 20);
-
- prt_printf(out, "rate:\t");
- prt_human_readable_s64(out, pd->rate.rate);
- prt_newline(out);
-
- prt_printf(out, "target:\t");
- prt_human_readable_u64(out, pd->last_target);
- prt_newline(out);
-
- prt_printf(out, "actual:\t");
- prt_human_readable_u64(out, pd->last_actual);
- prt_newline(out);
-
- prt_printf(out, "proportional:\t");
- prt_human_readable_s64(out, pd->last_proportional);
- prt_newline(out);
-
- prt_printf(out, "derivative:\t");
- prt_human_readable_s64(out, pd->last_derivative);
- prt_newline(out);
-
- prt_printf(out, "change:\t");
- prt_human_readable_s64(out, pd->last_change);
- prt_newline(out);
-
- prt_printf(out, "next io:\t%llims\n", div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC));
-}
-
-/* misc: */
-
-void bch2_bio_map(struct bio *bio, void *base, size_t size)
-{
- while (size) {
- struct page *page = is_vmalloc_addr(base)
- ? vmalloc_to_page(base)
- : virt_to_page(base);
- unsigned offset = offset_in_page(base);
- unsigned len = min_t(size_t, PAGE_SIZE - offset, size);
-
- BUG_ON(!bio_add_page(bio, page, len, offset));
- size -= len;
- base += len;
- }
-}
-
-int bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask)
-{
- while (size) {
- struct page *page = alloc_pages(gfp_mask, 0);
- unsigned len = min_t(size_t, PAGE_SIZE, size);
-
- if (!page)
- return -ENOMEM;
-
- if (unlikely(!bio_add_page(bio, page, len, 0))) {
- __free_page(page);
- break;
- }
-
- size -= len;
- }
-
- return 0;
-}
-
-size_t bch2_rand_range(size_t max)
-{
- size_t rand;
-
- if (!max)
- return 0;
-
- do {
- rand = get_random_long();
- rand &= roundup_pow_of_two(max) - 1;
- } while (rand >= max);
-
- return rand;
-}
-
-void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
-{
- struct bio_vec bv;
- struct bvec_iter iter;
-
- __bio_for_each_segment(bv, dst, iter, dst_iter) {
- void *dstp = kmap_local_page(bv.bv_page);
-
- memcpy(dstp + bv.bv_offset, src, bv.bv_len);
- kunmap_local(dstp);
-
- src += bv.bv_len;
- }
-}
-
-void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
-{
- struct bio_vec bv;
- struct bvec_iter iter;
-
- __bio_for_each_segment(bv, src, iter, src_iter) {
- void *srcp = kmap_local_page(bv.bv_page);
-
- memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
- kunmap_local(srcp);
-
- dst += bv.bv_len;
- }
-}
-
-#if 0
-void eytzinger1_test(void)
-{
- unsigned inorder, eytz, size;
-
- pr_info("1 based eytzinger test:");
-
- for (size = 2;
- size < 65536;
- size++) {
- unsigned extra = eytzinger1_extra(size);
-
- if (!(size % 4096))
- pr_info("tree size %u", size);
-
- BUG_ON(eytzinger1_prev(0, size) != eytzinger1_last(size));
- BUG_ON(eytzinger1_next(0, size) != eytzinger1_first(size));
-
- BUG_ON(eytzinger1_prev(eytzinger1_first(size), size) != 0);
- BUG_ON(eytzinger1_next(eytzinger1_last(size), size) != 0);
-
- inorder = 1;
- eytzinger1_for_each(eytz, size) {
- BUG_ON(__inorder_to_eytzinger1(inorder, size, extra) != eytz);
- BUG_ON(__eytzinger1_to_inorder(eytz, size, extra) != inorder);
- BUG_ON(eytz != eytzinger1_last(size) &&
- eytzinger1_prev(eytzinger1_next(eytz, size), size) != eytz);
-
- inorder++;
- }
- }
-}
-
-void eytzinger0_test(void)
-{
-
- unsigned inorder, eytz, size;
-
- pr_info("0 based eytzinger test:");
-
- for (size = 1;
- size < 65536;
- size++) {
- unsigned extra = eytzinger0_extra(size);
-
- if (!(size % 4096))
- pr_info("tree size %u", size);
-
- BUG_ON(eytzinger0_prev(-1, size) != eytzinger0_last(size));
- BUG_ON(eytzinger0_next(-1, size) != eytzinger0_first(size));
-
- BUG_ON(eytzinger0_prev(eytzinger0_first(size), size) != -1);
- BUG_ON(eytzinger0_next(eytzinger0_last(size), size) != -1);
-
- inorder = 0;
- eytzinger0_for_each(eytz, size) {
- BUG_ON(__inorder_to_eytzinger0(inorder, size, extra) != eytz);
- BUG_ON(__eytzinger0_to_inorder(eytz, size, extra) != inorder);
- BUG_ON(eytz != eytzinger0_last(size) &&
- eytzinger0_prev(eytzinger0_next(eytz, size), size) != eytz);
-
- inorder++;
- }
- }
-}
-
-static inline int cmp_u16(const void *_l, const void *_r, size_t size)
-{
- const u16 *l = _l, *r = _r;
-
- return (*l > *r) - (*r - *l);
-}
-
-static void eytzinger0_find_test_val(u16 *test_array, unsigned nr, u16 search)
-{
- int i, c1 = -1, c2 = -1;
- ssize_t r;
-
- r = eytzinger0_find_le(test_array, nr,
- sizeof(test_array[0]),
- cmp_u16, &search);
- if (r >= 0)
- c1 = test_array[r];
-
- for (i = 0; i < nr; i++)
- if (test_array[i] <= search && test_array[i] > c2)
- c2 = test_array[i];
-
- if (c1 != c2) {
- eytzinger0_for_each(i, nr)
- pr_info("[%3u] = %12u", i, test_array[i]);
- pr_info("find_le(%2u) -> [%2zi] = %2i should be %2i",
- i, r, c1, c2);
- }
-}
-
-void eytzinger0_find_test(void)
-{
- unsigned i, nr, allocated = 1 << 12;
- u16 *test_array = kmalloc_array(allocated, sizeof(test_array[0]), GFP_KERNEL);
-
- for (nr = 1; nr < allocated; nr++) {
- pr_info("testing %u elems", nr);
-
- get_random_bytes(test_array, nr * sizeof(test_array[0]));
- eytzinger0_sort(test_array, nr, sizeof(test_array[0]), cmp_u16, NULL);
-
- /* verify array is sorted correctly: */
- eytzinger0_for_each(i, nr)
- BUG_ON(i != eytzinger0_last(nr) &&
- test_array[i] > test_array[eytzinger0_next(i, nr)]);
-
- for (i = 0; i < U16_MAX; i += 1 << 12)
- eytzinger0_find_test_val(test_array, nr, i);
-
- for (i = 0; i < nr; i++) {
- eytzinger0_find_test_val(test_array, nr, test_array[i] - 1);
- eytzinger0_find_test_val(test_array, nr, test_array[i]);
- eytzinger0_find_test_val(test_array, nr, test_array[i] + 1);
- }
- }
-
- kfree(test_array);
-}
-#endif
-
-/*
- * Accumulate percpu counters onto one cpu's copy - only valid when access
- * against any percpu counter is guarded against
- */
-u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr)
-{
- u64 *ret;
- int cpu;
-
- /* access to pcpu vars has to be blocked by other locking */
- preempt_disable();
- ret = this_cpu_ptr(p);
- preempt_enable();
-
- for_each_possible_cpu(cpu) {
- u64 *i = per_cpu_ptr(p, cpu);
-
- if (i != ret) {
- acc_u64s(ret, i, nr);
- memset(i, 0, nr * sizeof(u64));
- }
- }
-
- return ret;
-}
-
-void bch2_darray_str_exit(darray_str *d)
-{
- darray_for_each(*d, i)
- kfree(*i);
- darray_exit(d);
-}
-
-int bch2_split_devs(const char *_dev_name, darray_str *ret)
-{
- darray_init(ret);
-
- char *dev_name, *s, *orig;
-
- dev_name = orig = kstrdup(_dev_name, GFP_KERNEL);
- if (!dev_name)
- return -ENOMEM;
-
- while ((s = strsep(&dev_name, ":"))) {
- char *p = kstrdup(s, GFP_KERNEL);
- if (!p)
- goto err;
-
- if (darray_push(ret, p)) {
- kfree(p);
- goto err;
- }
- }
-
- kfree(orig);
- return 0;
-err:
- bch2_darray_str_exit(ret);
- kfree(orig);
- return -ENOMEM;
-}
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
deleted file mode 100644
index fb02c1c36004..000000000000
--- a/fs/bcachefs/util.h
+++ /dev/null
@@ -1,699 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_UTIL_H
-#define _BCACHEFS_UTIL_H
-
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/closure.h>
-#include <linux/errno.h>
-#include <linux/freezer.h>
-#include <linux/kernel.h>
-#include <linux/min_heap.h>
-#include <linux/sched/clock.h>
-#include <linux/llist.h>
-#include <linux/log2.h>
-#include <linux/percpu.h>
-#include <linux/preempt.h>
-#include <linux/ratelimit.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
-
-#include "mean_and_variance.h"
-
-#include "darray.h"
-#include "time_stats.h"
-
-struct closure;
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-#define EBUG_ON(cond) BUG_ON(cond)
-#else
-#define EBUG_ON(cond)
-#endif
-
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define CPU_BIG_ENDIAN 0
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define CPU_BIG_ENDIAN 1
-#endif
-
-/* type hackery */
-
-#define type_is_exact(_val, _type) \
- __builtin_types_compatible_p(typeof(_val), _type)
-
-#define type_is(_val, _type) \
- (__builtin_types_compatible_p(typeof(_val), _type) || \
- __builtin_types_compatible_p(typeof(_val), const _type))
-
-/* Userspace doesn't align allocations as nicely as the kernel allocators: */
-static inline size_t buf_pages(void *p, size_t len)
-{
- return DIV_ROUND_UP(len +
- ((unsigned long) p & (PAGE_SIZE - 1)),
- PAGE_SIZE);
-}
-
-#define init_heap(heap, _size, gfp) \
-({ \
- (heap)->nr = 0; \
- (heap)->size = (_size); \
- (heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\
- (gfp)); \
-})
-
-#define free_heap(heap) \
-do { \
- kvfree((heap)->data); \
- (heap)->data = NULL; \
-} while (0)
-
-#define ANYSINT_MAX(t) \
- ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
-
-#include "printbuf.h"
-
-#define prt_vprintf(_out, ...) bch2_prt_vprintf(_out, __VA_ARGS__)
-#define prt_printf(_out, ...) bch2_prt_printf(_out, __VA_ARGS__)
-#define printbuf_str(_buf) bch2_printbuf_str(_buf)
-#define printbuf_exit(_buf) bch2_printbuf_exit(_buf)
-
-#define printbuf_tabstops_reset(_buf) bch2_printbuf_tabstops_reset(_buf)
-#define printbuf_tabstop_pop(_buf) bch2_printbuf_tabstop_pop(_buf)
-#define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n)
-
-#define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n)
-#define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n)
-
-#define prt_newline(_out) bch2_prt_newline(_out)
-#define prt_tab(_out) bch2_prt_tab(_out)
-#define prt_tab_rjust(_out) bch2_prt_tab_rjust(_out)
-
-#define prt_bytes_indented(...) bch2_prt_bytes_indented(__VA_ARGS__)
-#define prt_u64(_out, _v) prt_printf(_out, "%llu", (u64) (_v))
-#define prt_human_readable_u64(...) bch2_prt_human_readable_u64(__VA_ARGS__)
-#define prt_human_readable_s64(...) bch2_prt_human_readable_s64(__VA_ARGS__)
-#define prt_units_u64(...) bch2_prt_units_u64(__VA_ARGS__)
-#define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__)
-#define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__)
-#define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__)
-#define prt_bitflags_vector(...) bch2_prt_bitflags_vector(__VA_ARGS__)
-
-void bch2_pr_time_units(struct printbuf *, u64);
-void bch2_prt_datetime(struct printbuf *, time64_t);
-
-#ifdef __KERNEL__
-static inline void uuid_unparse_lower(u8 *uuid, char *out)
-{
- sprintf(out, "%pUb", uuid);
-}
-#else
-#include <uuid/uuid.h>
-#endif
-
-static inline void pr_uuid(struct printbuf *out, u8 *uuid)
-{
- char uuid_str[40];
-
- uuid_unparse_lower(uuid, uuid_str);
- prt_printf(out, "%s", uuid_str);
-}
-
-int bch2_strtoint_h(const char *, int *);
-int bch2_strtouint_h(const char *, unsigned int *);
-int bch2_strtoll_h(const char *, long long *);
-int bch2_strtoull_h(const char *, unsigned long long *);
-int bch2_strtou64_h(const char *, u64 *);
-
-static inline int bch2_strtol_h(const char *cp, long *res)
-{
-#if BITS_PER_LONG == 32
- return bch2_strtoint_h(cp, (int *) res);
-#else
- return bch2_strtoll_h(cp, (long long *) res);
-#endif
-}
-
-static inline int bch2_strtoul_h(const char *cp, long *res)
-{
-#if BITS_PER_LONG == 32
- return bch2_strtouint_h(cp, (unsigned int *) res);
-#else
- return bch2_strtoull_h(cp, (unsigned long long *) res);
-#endif
-}
-
-#define strtoi_h(cp, res) \
- ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\
- : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\
- : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\
- : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\
- : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\
- : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
- : -EINVAL)
-
-#define strtoul_safe(cp, var) \
-({ \
- unsigned long _v; \
- int _r = kstrtoul(cp, 10, &_v); \
- if (!_r) \
- var = _v; \
- _r; \
-})
-
-#define strtoul_safe_clamp(cp, var, min, max) \
-({ \
- unsigned long _v; \
- int _r = kstrtoul(cp, 10, &_v); \
- if (!_r) \
- var = clamp_t(typeof(var), _v, min, max); \
- _r; \
-})
-
-#define strtoul_safe_restrict(cp, var, min, max) \
-({ \
- unsigned long _v; \
- int _r = kstrtoul(cp, 10, &_v); \
- if (!_r && _v >= min && _v <= max) \
- var = _v; \
- else \
- _r = -EINVAL; \
- _r; \
-})
-
-#define snprint(out, var) \
- prt_printf(out, \
- type_is(var, int) ? "%i\n" \
- : type_is(var, unsigned) ? "%u\n" \
- : type_is(var, long) ? "%li\n" \
- : type_is(var, unsigned long) ? "%lu\n" \
- : type_is(var, s64) ? "%lli\n" \
- : type_is(var, u64) ? "%llu\n" \
- : type_is(var, char *) ? "%s\n" \
- : "%i\n", var)
-
-bool bch2_is_zero(const void *, size_t);
-
-u64 bch2_read_flag_list(const char *, const char * const[]);
-
-void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
-void bch2_prt_u64_base2(struct printbuf *, u64);
-
-void bch2_print_string_as_lines(const char *prefix, const char *lines);
-void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines);
-
-typedef DARRAY(unsigned long) bch_stacktrace;
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
-void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
-int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t);
-
-static inline void prt_bdevname(struct printbuf *out, struct block_device *bdev)
-{
-#ifdef __KERNEL__
- prt_printf(out, "%pg", bdev);
-#else
- prt_str(out, bdev->name);
-#endif
-}
-
-void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *);
-
-#define ewma_add(ewma, val, weight) \
-({ \
- typeof(ewma) _ewma = (ewma); \
- typeof(weight) _weight = (weight); \
- \
- (((_ewma << _weight) - _ewma) + (val)) >> _weight; \
-})
-
-struct bch_ratelimit {
- /* Next time we want to do some work, in nanoseconds */
- u64 next;
-
- /*
- * Rate at which we want to do work, in units per nanosecond
- * The units here correspond to the units passed to
- * bch2_ratelimit_increment()
- */
- unsigned rate;
-};
-
-static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
-{
- d->next = local_clock();
-}
-
-u64 bch2_ratelimit_delay(struct bch_ratelimit *);
-void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
-
-struct bch_pd_controller {
- struct bch_ratelimit rate;
- unsigned long last_update;
-
- s64 last_actual;
- s64 smoothed_derivative;
-
- unsigned p_term_inverse;
- unsigned d_smooth;
- unsigned d_term;
-
- /* for exporting to sysfs (no effect on behavior) */
- s64 last_derivative;
- s64 last_proportional;
- s64 last_change;
- s64 last_target;
-
- /*
- * If true, the rate will not increase if bch2_ratelimit_delay()
- * is not being called often enough.
- */
- bool backpressure;
-};
-
-void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
-void bch2_pd_controller_init(struct bch_pd_controller *);
-void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *);
-
-#define sysfs_pd_controller_attribute(name) \
- rw_attribute(name##_rate); \
- rw_attribute(name##_rate_bytes); \
- rw_attribute(name##_rate_d_term); \
- rw_attribute(name##_rate_p_term_inverse); \
- read_attribute(name##_rate_debug)
-
-#define sysfs_pd_controller_files(name) \
- &sysfs_##name##_rate, \
- &sysfs_##name##_rate_bytes, \
- &sysfs_##name##_rate_d_term, \
- &sysfs_##name##_rate_p_term_inverse, \
- &sysfs_##name##_rate_debug
-
-#define sysfs_pd_controller_show(name, var) \
-do { \
- sysfs_hprint(name##_rate, (var)->rate.rate); \
- sysfs_print(name##_rate_bytes, (var)->rate.rate); \
- sysfs_print(name##_rate_d_term, (var)->d_term); \
- sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
- \
- if (attr == &sysfs_##name##_rate_debug) \
- bch2_pd_controller_debug_to_text(out, var); \
-} while (0)
-
-#define sysfs_pd_controller_store(name, var) \
-do { \
- sysfs_strtoul_clamp(name##_rate, \
- (var)->rate.rate, 1, UINT_MAX); \
- sysfs_strtoul_clamp(name##_rate_bytes, \
- (var)->rate.rate, 1, UINT_MAX); \
- sysfs_strtoul(name##_rate_d_term, (var)->d_term); \
- sysfs_strtoul_clamp(name##_rate_p_term_inverse, \
- (var)->p_term_inverse, 1, INT_MAX); \
-} while (0)
-
-#define container_of_or_null(ptr, type, member) \
-({ \
- typeof(ptr) _ptr = ptr; \
- _ptr ? container_of(_ptr, type, member) : NULL; \
-})
-
-/* Does linear interpolation between powers of two */
-static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
-{
- unsigned fract = x & ~(~0 << fract_bits);
-
- x >>= fract_bits;
- x = 1 << x;
- x += (x * fract) >> fract_bits;
-
- return x;
-}
-
-void bch2_bio_map(struct bio *bio, void *base, size_t);
-int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
-
-#define closure_bio_submit(bio, cl) \
-do { \
- closure_get(cl); \
- submit_bio(bio); \
-} while (0)
-
-#define kthread_wait(cond) \
-({ \
- int _ret = 0; \
- \
- while (1) { \
- set_current_state(TASK_INTERRUPTIBLE); \
- if (kthread_should_stop()) { \
- _ret = -1; \
- break; \
- } \
- \
- if (cond) \
- break; \
- \
- schedule(); \
- } \
- set_current_state(TASK_RUNNING); \
- _ret; \
-})
-
-#define kthread_wait_freezable(cond) \
-({ \
- int _ret = 0; \
- while (1) { \
- set_current_state(TASK_INTERRUPTIBLE); \
- if (kthread_should_stop()) { \
- _ret = -1; \
- break; \
- } \
- \
- if (cond) \
- break; \
- \
- schedule(); \
- try_to_freeze(); \
- } \
- set_current_state(TASK_RUNNING); \
- _ret; \
-})
-
-size_t bch2_rand_range(size_t);
-
-void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
-void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
-
-static inline void memcpy_u64s_small(void *dst, const void *src,
- unsigned u64s)
-{
- u64 *d = dst;
- const u64 *s = src;
-
- while (u64s--)
- *d++ = *s++;
-}
-
-static inline void __memcpy_u64s(void *dst, const void *src,
- unsigned u64s)
-{
-#ifdef CONFIG_X86_64
- long d0, d1, d2;
-
- asm volatile("rep ; movsq"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (u64s), "1" (dst), "2" (src)
- : "memory");
-#else
- u64 *d = dst;
- const u64 *s = src;
-
- while (u64s--)
- *d++ = *s++;
-#endif
-}
-
-static inline void memcpy_u64s(void *dst, const void *src,
- unsigned u64s)
-{
- EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
- dst + u64s * sizeof(u64) <= src));
-
- __memcpy_u64s(dst, src, u64s);
-}
-
-static inline void __memmove_u64s_down(void *dst, const void *src,
- unsigned u64s)
-{
- __memcpy_u64s(dst, src, u64s);
-}
-
-static inline void memmove_u64s_down(void *dst, const void *src,
- unsigned u64s)
-{
- EBUG_ON(dst > src);
-
- __memmove_u64s_down(dst, src, u64s);
-}
-
-static inline void __memmove_u64s_down_small(void *dst, const void *src,
- unsigned u64s)
-{
- memcpy_u64s_small(dst, src, u64s);
-}
-
-static inline void memmove_u64s_down_small(void *dst, const void *src,
- unsigned u64s)
-{
- EBUG_ON(dst > src);
-
- __memmove_u64s_down_small(dst, src, u64s);
-}
-
-static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
- unsigned u64s)
-{
- u64 *dst = (u64 *) _dst + u64s;
- u64 *src = (u64 *) _src + u64s;
-
- while (u64s--)
- *--dst = *--src;
-}
-
-static inline void memmove_u64s_up_small(void *dst, const void *src,
- unsigned u64s)
-{
- EBUG_ON(dst < src);
-
- __memmove_u64s_up_small(dst, src, u64s);
-}
-
-static inline void __memmove_u64s_up(void *_dst, const void *_src,
- unsigned u64s)
-{
- u64 *dst = (u64 *) _dst + u64s - 1;
- u64 *src = (u64 *) _src + u64s - 1;
-
-#ifdef CONFIG_X86_64
- long d0, d1, d2;
-
- asm volatile("std ;\n"
- "rep ; movsq\n"
- "cld ;\n"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (u64s), "1" (dst), "2" (src)
- : "memory");
-#else
- while (u64s--)
- *dst-- = *src--;
-#endif
-}
-
-static inline void memmove_u64s_up(void *dst, const void *src,
- unsigned u64s)
-{
- EBUG_ON(dst < src);
-
- __memmove_u64s_up(dst, src, u64s);
-}
-
-static inline void memmove_u64s(void *dst, const void *src,
- unsigned u64s)
-{
- if (dst < src)
- __memmove_u64s_down(dst, src, u64s);
- else
- __memmove_u64s_up(dst, src, u64s);
-}
-
-/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
-static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
-{
- unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
-
- memset(s + bytes, c, rem);
-}
-
-/* just the memmove, doesn't update @_nr */
-#define __array_insert_item(_array, _nr, _pos) \
- memmove(&(_array)[(_pos) + 1], \
- &(_array)[(_pos)], \
- sizeof((_array)[0]) * ((_nr) - (_pos)))
-
-#define array_insert_item(_array, _nr, _pos, _new_item) \
-do { \
- __array_insert_item(_array, _nr, _pos); \
- (_nr)++; \
- (_array)[(_pos)] = (_new_item); \
-} while (0)
-
-#define array_remove_items(_array, _nr, _pos, _nr_to_remove) \
-do { \
- (_nr) -= (_nr_to_remove); \
- memmove(&(_array)[(_pos)], \
- &(_array)[(_pos) + (_nr_to_remove)], \
- sizeof((_array)[0]) * ((_nr) - (_pos))); \
-} while (0)
-
-#define array_remove_item(_array, _nr, _pos) \
- array_remove_items(_array, _nr, _pos, 1)
-
-static inline void __move_gap(void *array, size_t element_size,
- size_t nr, size_t size,
- size_t old_gap, size_t new_gap)
-{
- size_t gap_end = old_gap + size - nr;
-
- if (new_gap < old_gap) {
- size_t move = old_gap - new_gap;
-
- memmove(array + element_size * (gap_end - move),
- array + element_size * (old_gap - move),
- element_size * move);
- } else if (new_gap > old_gap) {
- size_t move = new_gap - old_gap;
-
- memmove(array + element_size * old_gap,
- array + element_size * gap_end,
- element_size * move);
- }
-}
-
-/* Move the gap in a gap buffer: */
-#define move_gap(_d, _new_gap) \
-do { \
- BUG_ON(_new_gap > (_d)->nr); \
- BUG_ON((_d)->gap > (_d)->nr); \
- \
- __move_gap((_d)->data, sizeof((_d)->data[0]), \
- (_d)->nr, (_d)->size, (_d)->gap, _new_gap); \
- (_d)->gap = _new_gap; \
-} while (0)
-
-#define bubble_sort(_base, _nr, _cmp) \
-do { \
- ssize_t _i, _last; \
- bool _swapped = true; \
- \
- for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\
- _swapped = false; \
- for (_i = 0; _i < _last; _i++) \
- if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \
- swap((_base)[_i], (_base)[_i + 1]); \
- _swapped = true; \
- } \
- } \
-} while (0)
-
-#define per_cpu_sum(_p) \
-({ \
- typeof(*_p) _ret = 0; \
- \
- int cpu; \
- for_each_possible_cpu(cpu) \
- _ret += *per_cpu_ptr(_p, cpu); \
- _ret; \
-})
-
-static inline u64 percpu_u64_get(u64 __percpu *src)
-{
- return per_cpu_sum(src);
-}
-
-static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(dst, cpu) = 0;
- this_cpu_write(*dst, src);
-}
-
-static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
-{
- for (unsigned i = 0; i < nr; i++)
- acc[i] += src[i];
-}
-
-static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
- unsigned nr)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
-}
-
-static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- memset(per_cpu_ptr(p, cpu), c, bytes);
-}
-
-u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
-
-#define cmp_int(l, r) ((l > r) - (l < r))
-
-static inline int u8_cmp(u8 l, u8 r)
-{
- return cmp_int(l, r);
-}
-
-static inline int cmp_le32(__le32 l, __le32 r)
-{
- return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
-}
-
-#include <linux/uuid.h>
-
-#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
-
-static inline bool qstr_eq(const struct qstr l, const struct qstr r)
-{
- return l.len == r.len && !memcmp(l.name, r.name, l.len);
-}
-
-void bch2_darray_str_exit(darray_str *);
-int bch2_split_devs(const char *, darray_str *);
-
-#ifdef __KERNEL__
-
-__must_check
-static inline int copy_to_user_errcode(void __user *to, const void *from, unsigned long n)
-{
- return copy_to_user(to, from, n) ? -EFAULT : 0;
-}
-
-__must_check
-static inline int copy_from_user_errcode(void *to, const void __user *from, unsigned long n)
-{
- return copy_from_user(to, from, n) ? -EFAULT : 0;
-}
-
-#endif
-
-static inline void mod_bit(long nr, volatile unsigned long *addr, bool v)
-{
- if (v)
- set_bit(nr, addr);
- else
- clear_bit(nr, addr);
-}
-
-static inline void __set_bit_le64(size_t bit, __le64 *addr)
-{
- addr[bit / 64] |= cpu_to_le64(BIT_ULL(bit % 64));
-}
-
-static inline void __clear_bit_le64(size_t bit, __le64 *addr)
-{
- addr[bit / 64] &= ~cpu_to_le64(BIT_ULL(bit % 64));
-}
-
-static inline bool test_bit_le64(size_t bit, __le64 *addr)
-{
- return (addr[bit / 64] & cpu_to_le64(BIT_ULL(bit % 64))) != 0;
-}
-
-#endif /* _BCACHEFS_UTIL_H */
diff --git a/fs/bcachefs/varint.c b/fs/bcachefs/varint.c
deleted file mode 100644
index 6a78553d9b0c..000000000000
--- a/fs/bcachefs/varint.c
+++ /dev/null
@@ -1,129 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/bitops.h>
-#include <linux/math.h>
-#include <linux/string.h>
-#include <linux/unaligned.h>
-
-#ifdef CONFIG_VALGRIND
-#include <valgrind/memcheck.h>
-#endif
-
-#include "varint.h"
-
-/**
- * bch2_varint_encode - encode a variable length integer
- * @out: destination to encode to
- * @v: unsigned integer to encode
- * Returns: size in bytes of the encoded integer - at most 9 bytes
- */
-int bch2_varint_encode(u8 *out, u64 v)
-{
- unsigned bits = fls64(v|1);
- unsigned bytes = DIV_ROUND_UP(bits, 7);
- __le64 v_le;
-
- if (likely(bytes < 9)) {
- v <<= bytes;
- v |= ~(~0 << (bytes - 1));
- v_le = cpu_to_le64(v);
- memcpy(out, &v_le, bytes);
- } else {
- *out++ = 255;
- bytes = 9;
- put_unaligned_le64(v, out);
- }
-
- return bytes;
-}
-
-/**
- * bch2_varint_decode - encode a variable length integer
- * @in: varint to decode
- * @end: end of buffer to decode from
- * @out: on success, decoded integer
- * Returns: size in bytes of the decoded integer - or -1 on failure (would
- * have read past the end of the buffer)
- */
-int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
-{
- unsigned bytes = likely(in < end)
- ? ffz(*in & 255) + 1
- : 1;
- u64 v;
-
- if (unlikely(in + bytes > end))
- return -1;
-
- if (likely(bytes < 9)) {
- __le64 v_le = 0;
-
- memcpy(&v_le, in, bytes);
- v = le64_to_cpu(v_le);
- v >>= bytes;
- } else {
- v = get_unaligned_le64(++in);
- }
-
- *out = v;
- return bytes;
-}
-
-/**
- * bch2_varint_encode_fast - fast version of bch2_varint_encode
- * @out: destination to encode to
- * @v: unsigned integer to encode
- * Returns: size in bytes of the encoded integer - at most 9 bytes
- *
- * This version assumes it's always safe to write 8 bytes to @out, even if the
- * encoded integer would be smaller.
- */
-int bch2_varint_encode_fast(u8 *out, u64 v)
-{
- unsigned bits = fls64(v|1);
- unsigned bytes = DIV_ROUND_UP(bits, 7);
-
- if (likely(bytes < 9)) {
- v <<= bytes;
- v |= ~(~0U << (bytes - 1));
- } else {
- *out++ = 255;
- bytes = 9;
- }
-
- put_unaligned_le64(v, out);
- return bytes;
-}
-
-/**
- * bch2_varint_decode_fast - fast version of bch2_varint_decode
- * @in: varint to decode
- * @end: end of buffer to decode from
- * @out: on success, decoded integer
- * Returns: size in bytes of the decoded integer - or -1 on failure (would
- * have read past the end of the buffer)
- *
- * This version assumes that it is safe to read at most 8 bytes past the end of
- * @end (we still return an error if the varint extends past @end).
- */
-int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out)
-{
-#ifdef CONFIG_VALGRIND
- VALGRIND_MAKE_MEM_DEFINED(in, 8);
-#endif
- u64 v = get_unaligned_le64(in);
- unsigned bytes = ffz(*in) + 1;
-
- if (unlikely(in + bytes > end))
- return -1;
-
- if (likely(bytes < 9)) {
- v >>= bytes;
- v &= ~(~0ULL << (7 * bytes));
- } else {
- v = get_unaligned_le64(++in);
- }
-
- *out = v;
- return bytes;
-}
diff --git a/fs/bcachefs/varint.h b/fs/bcachefs/varint.h
deleted file mode 100644
index 92a182fb3d7a..000000000000
--- a/fs/bcachefs/varint.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_VARINT_H
-#define _BCACHEFS_VARINT_H
-
-int bch2_varint_encode(u8 *, u64);
-int bch2_varint_decode(const u8 *, const u8 *, u64 *);
-
-int bch2_varint_encode_fast(u8 *, u64);
-int bch2_varint_decode_fast(const u8 *, const u8 *, u64 *);
-
-#endif /* _BCACHEFS_VARINT_H */
diff --git a/fs/bcachefs/vstructs.h b/fs/bcachefs/vstructs.h
deleted file mode 100644
index 2ad338e282da..000000000000
--- a/fs/bcachefs/vstructs.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VSTRUCTS_H
-#define _VSTRUCTS_H
-
-#include "util.h"
-
-/*
- * NOTE: we can't differentiate between __le64 and u64 with type_is - this
- * assumes u64 is little endian:
- */
-#define __vstruct_u64s(_s) \
-({ \
- ( type_is((_s)->u64s, u64) ? le64_to_cpu((__force __le64) (_s)->u64s) \
- : type_is((_s)->u64s, u32) ? le32_to_cpu((__force __le32) (_s)->u64s) \
- : type_is((_s)->u64s, u16) ? le16_to_cpu((__force __le16) (_s)->u64s) \
- : ((__force u8) ((_s)->u64s))); \
-})
-
-#define __vstruct_bytes(_type, _u64s) \
-({ \
- BUILD_BUG_ON(offsetof(_type, _data) % sizeof(u64)); \
- \
- (size_t) (offsetof(_type, _data) + (_u64s) * sizeof(u64)); \
-})
-
-#define vstruct_bytes(_s) \
- __vstruct_bytes(typeof(*(_s)), __vstruct_u64s(_s))
-
-#define __vstruct_blocks(_type, _sector_block_bits, _u64s) \
- (round_up(__vstruct_bytes(_type, _u64s), \
- 512 << (_sector_block_bits)) >> (9 + (_sector_block_bits)))
-
-#define vstruct_blocks(_s, _sector_block_bits) \
- __vstruct_blocks(typeof(*(_s)), _sector_block_bits, __vstruct_u64s(_s))
-
-#define vstruct_blocks_plus(_s, _sector_block_bits, _u64s) \
- __vstruct_blocks(typeof(*(_s)), _sector_block_bits, \
- __vstruct_u64s(_s) + (_u64s))
-
-#define vstruct_sectors(_s, _sector_block_bits) \
- (round_up(vstruct_bytes(_s), 512 << (_sector_block_bits)) >> 9)
-
-#define vstruct_next(_s) \
- ((typeof(_s)) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
-#define vstruct_last(_s) \
- ((typeof(&(_s)->start[0])) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
-#define vstruct_end(_s) \
- ((void *) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
-
-#define vstruct_for_each(_s, _i) \
- for (typeof(&(_s)->start[0]) _i = (_s)->start; \
- _i < vstruct_last(_s); \
- _i = vstruct_next(_i))
-
-#define vstruct_for_each_safe(_s, _i) \
- for (typeof(&(_s)->start[0]) _next, _i = (_s)->start; \
- _i < vstruct_last(_s) && (_next = vstruct_next(_i), true); \
- _i = _next)
-
-#define vstruct_idx(_s, _idx) \
- ((typeof(&(_s)->start[0])) ((_s)->_data + (_idx)))
-
-#endif /* _VSTRUCTS_H */
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
deleted file mode 100644
index 952aca400faf..000000000000
--- a/fs/bcachefs/xattr.c
+++ /dev/null
@@ -1,638 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "bcachefs.h"
-#include "acl.h"
-#include "bkey_methods.h"
-#include "btree_update.h"
-#include "extents.h"
-#include "fs.h"
-#include "rebalance.h"
-#include "str_hash.h"
-#include "xattr.h"
-
-#include <linux/dcache.h>
-#include <linux/posix_acl_xattr.h>
-#include <linux/xattr.h>
-
-static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned);
-
-static u64 bch2_xattr_hash(const struct bch_hash_info *info,
- const struct xattr_search_key *key)
-{
- struct bch_str_hash_ctx ctx;
-
- bch2_str_hash_init(&ctx, info);
- bch2_str_hash_update(&ctx, info, &key->type, sizeof(key->type));
- bch2_str_hash_update(&ctx, info, key->name.name, key->name.len);
-
- return bch2_str_hash_end(&ctx, info);
-}
-
-static u64 xattr_hash_key(const struct bch_hash_info *info, const void *key)
-{
- return bch2_xattr_hash(info, key);
-}
-
-static u64 xattr_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
-{
- struct bkey_s_c_xattr x = bkey_s_c_to_xattr(k);
-
- return bch2_xattr_hash(info,
- &X_SEARCH(x.v->x_type, x.v->x_name, x.v->x_name_len));
-}
-
-static bool xattr_cmp_key(struct bkey_s_c _l, const void *_r)
-{
- struct bkey_s_c_xattr l = bkey_s_c_to_xattr(_l);
- const struct xattr_search_key *r = _r;
-
- return l.v->x_type != r->type ||
- l.v->x_name_len != r->name.len ||
- memcmp(l.v->x_name, r->name.name, r->name.len);
-}
-
-static bool xattr_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
-{
- struct bkey_s_c_xattr l = bkey_s_c_to_xattr(_l);
- struct bkey_s_c_xattr r = bkey_s_c_to_xattr(_r);
-
- return l.v->x_type != r.v->x_type ||
- l.v->x_name_len != r.v->x_name_len ||
- memcmp(l.v->x_name, r.v->x_name, r.v->x_name_len);
-}
-
-const struct bch_hash_desc bch2_xattr_hash_desc = {
- .btree_id = BTREE_ID_xattrs,
- .key_type = KEY_TYPE_xattr,
- .hash_key = xattr_hash_key,
- .hash_bkey = xattr_hash_bkey,
- .cmp_key = xattr_cmp_key,
- .cmp_bkey = xattr_cmp_bkey,
-};
-
-int bch2_xattr_validate(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags)
-{
- struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
- unsigned val_u64s = xattr_val_u64s(xattr.v->x_name_len,
- le16_to_cpu(xattr.v->x_val_len));
- int ret = 0;
-
- bkey_fsck_err_on(bkey_val_u64s(k.k) < val_u64s,
- c, xattr_val_size_too_small,
- "value too small (%zu < %u)",
- bkey_val_u64s(k.k), val_u64s);
-
- /* XXX why +4 ? */
- val_u64s = xattr_val_u64s(xattr.v->x_name_len,
- le16_to_cpu(xattr.v->x_val_len) + 4);
-
- bkey_fsck_err_on(bkey_val_u64s(k.k) > val_u64s,
- c, xattr_val_size_too_big,
- "value too big (%zu > %u)",
- bkey_val_u64s(k.k), val_u64s);
-
- bkey_fsck_err_on(!bch2_xattr_type_to_handler(xattr.v->x_type),
- c, xattr_invalid_type,
- "invalid type (%u)", xattr.v->x_type);
-
- bkey_fsck_err_on(memchr(xattr.v->x_name, '\0', xattr.v->x_name_len),
- c, xattr_name_invalid_chars,
- "xattr name has invalid characters");
-fsck_err:
- return ret;
-}
-
-void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- const struct xattr_handler *handler;
- struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
-
- handler = bch2_xattr_type_to_handler(xattr.v->x_type);
- if (handler && handler->prefix)
- prt_printf(out, "%s", handler->prefix);
- else if (handler)
- prt_printf(out, "(type %u)", xattr.v->x_type);
- else
- prt_printf(out, "(unknown type %u)", xattr.v->x_type);
-
- unsigned name_len = xattr.v->x_name_len;
- unsigned val_len = le16_to_cpu(xattr.v->x_val_len);
- unsigned max_name_val_bytes = bkey_val_bytes(xattr.k) -
- offsetof(struct bch_xattr, x_name);
-
- val_len = min_t(int, val_len, max_name_val_bytes - name_len);
- name_len = min(name_len, max_name_val_bytes);
-
- prt_printf(out, "%.*s:%.*s",
- name_len, xattr.v->x_name,
- val_len, (char *) xattr_val(xattr.v));
-
- if (xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS ||
- xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT) {
- prt_char(out, ' ');
- bch2_acl_to_text(out, xattr_val(xattr.v),
- le16_to_cpu(xattr.v->x_val_len));
- }
-}
-
-static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info *inode,
- const char *name, void *buffer, size_t size, int type)
-{
- struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
- struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
- struct btree_iter iter;
- struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
- inode_inum(inode), &search, 0);
- int ret = bkey_err(k);
- if (ret)
- return ret;
-
- struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
- ret = le16_to_cpu(xattr.v->x_val_len);
- if (buffer) {
- if (ret > size)
- ret = -ERANGE;
- else
- memcpy(buffer, xattr_val(xattr.v), ret);
- }
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
- struct bch_inode_unpacked *inode_u,
- const struct bch_hash_info *hash_info,
- const char *name, const void *value, size_t size,
- int type, int flags)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter inode_iter = { NULL };
- int ret;
-
- ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
- bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent);
- if (ret)
- return ret;
-
- inode_u->bi_ctime = bch2_current_time(c);
-
- ret = bch2_inode_write(trans, &inode_iter, inode_u);
- bch2_trans_iter_exit(trans, &inode_iter);
-
- if (ret)
- return ret;
-
- if (value) {
- struct bkey_i_xattr *xattr;
- unsigned namelen = strlen(name);
- unsigned u64s = BKEY_U64s +
- xattr_val_u64s(namelen, size);
-
- if (u64s > U8_MAX)
- return -ERANGE;
-
- xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
- if (IS_ERR(xattr))
- return PTR_ERR(xattr);
-
- bkey_xattr_init(&xattr->k_i);
- xattr->k.u64s = u64s;
- xattr->v.x_type = type;
- xattr->v.x_name_len = namelen;
- xattr->v.x_val_len = cpu_to_le16(size);
- memcpy(xattr->v.x_name, name, namelen);
- memcpy(xattr_val(&xattr->v), value, size);
-
- ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
- inum, &xattr->k_i,
- (flags & XATTR_CREATE ? STR_HASH_must_create : 0)|
- (flags & XATTR_REPLACE ? STR_HASH_must_replace : 0));
- } else {
- struct xattr_search_key search =
- X_SEARCH(type, name, strlen(name));
-
- ret = bch2_hash_delete(trans, bch2_xattr_hash_desc,
- hash_info, inum, &search);
- }
-
- if (bch2_err_matches(ret, ENOENT))
- ret = flags & XATTR_REPLACE ? -ENODATA : 0;
-
- return ret;
-}
-
-struct xattr_buf {
- char *buf;
- size_t len;
- size_t used;
-};
-
-static int __bch2_xattr_emit(const char *prefix,
- const char *name, size_t name_len,
- struct xattr_buf *buf)
-{
- const size_t prefix_len = strlen(prefix);
- const size_t total_len = prefix_len + name_len + 1;
-
- if (buf->buf) {
- if (buf->used + total_len > buf->len)
- return -ERANGE;
-
- memcpy(buf->buf + buf->used, prefix, prefix_len);
- memcpy(buf->buf + buf->used + prefix_len,
- name, name_len);
- buf->buf[buf->used + prefix_len + name_len] = '\0';
- }
-
- buf->used += total_len;
- return 0;
-}
-
-static inline const char *bch2_xattr_prefix(unsigned type, struct dentry *dentry)
-{
- const struct xattr_handler *handler = bch2_xattr_type_to_handler(type);
-
- if (!xattr_handler_can_list(handler, dentry))
- return NULL;
-
- return xattr_prefix(handler);
-}
-
-static int bch2_xattr_emit(struct dentry *dentry,
- const struct bch_xattr *xattr,
- struct xattr_buf *buf)
-{
- const char *prefix;
-
- prefix = bch2_xattr_prefix(xattr->x_type, dentry);
- if (!prefix)
- return 0;
-
- return __bch2_xattr_emit(prefix, xattr->x_name, xattr->x_name_len, buf);
-}
-
-static int bch2_xattr_list_bcachefs(struct bch_fs *c,
- struct bch_inode_unpacked *inode,
- struct xattr_buf *buf,
- bool all)
-{
- const char *prefix = all ? "bcachefs_effective." : "bcachefs.";
- unsigned id;
- int ret = 0;
- u64 v;
-
- for (id = 0; id < Inode_opt_nr; id++) {
- v = bch2_inode_opt_get(inode, id);
- if (!v)
- continue;
-
- if (!all &&
- !(inode->bi_fields_set & (1 << id)))
- continue;
-
- ret = __bch2_xattr_emit(prefix, bch2_inode_opts[id],
- strlen(bch2_inode_opts[id]), buf);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
- struct bch_fs *c = dentry->d_sb->s_fs_info;
- struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
- struct xattr_buf buf = { .buf = buffer, .len = buffer_size };
- u64 offset = 0, inum = inode->ei_inode.bi_inum;
-
- int ret = bch2_trans_run(c,
- for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_xattrs,
- POS(inum, offset),
- POS(inum, U64_MAX),
- inode->ei_inum.subvol, 0, k, ({
- if (k.k->type != KEY_TYPE_xattr)
- continue;
-
- bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf);
- }))) ?:
- bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, false) ?:
- bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, true);
-
- return ret ? bch2_err_class(ret) : buf.used;
-}
-
-static int bch2_xattr_get_handler(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *vinode,
- const char *name, void *buffer, size_t size)
-{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- int ret = bch2_trans_do(c,
- bch2_xattr_get_trans(trans, inode, name, buffer, size, handler->flags));
-
- if (ret < 0 && bch2_err_matches(ret, ENOENT))
- ret = -ENODATA;
-
- return bch2_err_class(ret);
-}
-
-static int bch2_xattr_set_handler(const struct xattr_handler *handler,
- struct mnt_idmap *idmap,
- struct dentry *dentry, struct inode *vinode,
- const char *name, const void *value,
- size_t size, int flags)
-{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
- struct bch_inode_unpacked inode_u;
- int ret;
-
- ret = bch2_trans_run(c,
- commit_do(trans, NULL, NULL, 0,
- bch2_xattr_set(trans, inode_inum(inode), &inode_u,
- &hash, name, value, size,
- handler->flags, flags)) ?:
- (bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME), 0));
-
- return bch2_err_class(ret);
-}
-
-static const struct xattr_handler bch_xattr_user_handler = {
- .prefix = XATTR_USER_PREFIX,
- .get = bch2_xattr_get_handler,
- .set = bch2_xattr_set_handler,
- .flags = KEY_TYPE_XATTR_INDEX_USER,
-};
-
-static bool bch2_xattr_trusted_list(struct dentry *dentry)
-{
- return capable(CAP_SYS_ADMIN);
-}
-
-static const struct xattr_handler bch_xattr_trusted_handler = {
- .prefix = XATTR_TRUSTED_PREFIX,
- .list = bch2_xattr_trusted_list,
- .get = bch2_xattr_get_handler,
- .set = bch2_xattr_set_handler,
- .flags = KEY_TYPE_XATTR_INDEX_TRUSTED,
-};
-
-static const struct xattr_handler bch_xattr_security_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .get = bch2_xattr_get_handler,
- .set = bch2_xattr_set_handler,
- .flags = KEY_TYPE_XATTR_INDEX_SECURITY,
-};
-
-#ifndef NO_BCACHEFS_FS
-
-static int opt_to_inode_opt(int id)
-{
- switch (id) {
-#define x(name, ...) \
- case Opt_##name: return Inode_opt_##name;
- BCH_INODE_OPTS()
-#undef x
- default:
- return -1;
- }
-}
-
-static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *vinode,
- const char *name, void *buffer, size_t size,
- bool all)
-{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_opts opts =
- bch2_inode_opts_to_opts(&inode->ei_inode);
- const struct bch_option *opt;
- int id, inode_opt_id;
- struct printbuf out = PRINTBUF;
- int ret;
- u64 v;
-
- id = bch2_opt_lookup(name);
- if (id < 0 || !bch2_opt_is_inode_opt(id))
- return -EINVAL;
-
- inode_opt_id = opt_to_inode_opt(id);
- if (inode_opt_id < 0)
- return -EINVAL;
-
- opt = bch2_opt_table + id;
-
- if (!bch2_opt_defined_by_id(&opts, id))
- return -ENODATA;
-
- if (!all &&
- !(inode->ei_inode.bi_fields_set & (1 << inode_opt_id)))
- return -ENODATA;
-
- v = bch2_opt_get_by_id(&opts, id);
- bch2_opt_to_text(&out, c, c->disk_sb.sb, opt, v, 0);
-
- ret = out.pos;
-
- if (out.allocation_failure) {
- ret = -ENOMEM;
- } else if (buffer) {
- if (out.pos > size)
- ret = -ERANGE;
- else
- memcpy(buffer, out.buf, out.pos);
- }
-
- printbuf_exit(&out);
- return ret;
-}
-
-static int bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *vinode,
- const char *name, void *buffer, size_t size)
-{
- return __bch2_xattr_bcachefs_get(handler, dentry, vinode,
- name, buffer, size, false);
-}
-
-struct inode_opt_set {
- int id;
- u64 v;
- bool defined;
-};
-
-static int inode_opt_set_fn(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct inode_opt_set *s = p;
-
- if (s->defined)
- bi->bi_fields_set |= 1U << s->id;
- else
- bi->bi_fields_set &= ~(1U << s->id);
-
- bch2_inode_opt_set(bi, s->id, s->v);
-
- return 0;
-}
-
-static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler,
- struct mnt_idmap *idmap,
- struct dentry *dentry, struct inode *vinode,
- const char *name, const void *value,
- size_t size, int flags)
-{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- const struct bch_option *opt;
- char *buf;
- struct inode_opt_set s;
- int opt_id, inode_opt_id, ret;
-
- opt_id = bch2_opt_lookup(name);
- if (opt_id < 0)
- return -EINVAL;
-
- opt = bch2_opt_table + opt_id;
-
- inode_opt_id = opt_to_inode_opt(opt_id);
- if (inode_opt_id < 0)
- return -EINVAL;
-
- s.id = inode_opt_id;
-
- if (value) {
- u64 v = 0;
-
- buf = kmalloc(size + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- memcpy(buf, value, size);
- buf[size] = '\0';
-
- ret = bch2_opt_parse(c, opt, buf, &v, NULL);
- kfree(buf);
-
- if (ret < 0)
- goto err_class_exit;
-
- ret = bch2_opt_check_may_set(c, opt_id, v);
- if (ret < 0)
- goto err_class_exit;
-
- s.v = v + 1;
- s.defined = true;
- } else {
- /*
- * Check if this option was set on the parent - if so, switched
- * back to inheriting from the parent:
- *
- * rename() also has to deal with keeping inherited options up
- * to date - see bch2_reinherit_attrs()
- */
- spin_lock(&dentry->d_lock);
- if (!IS_ROOT(dentry)) {
- struct bch_inode_info *dir =
- to_bch_ei(d_inode(dentry->d_parent));
-
- s.v = bch2_inode_opt_get(&dir->ei_inode, inode_opt_id);
- } else {
- s.v = 0;
- }
- spin_unlock(&dentry->d_lock);
-
- s.defined = false;
- }
-
- mutex_lock(&inode->ei_update_lock);
- if (inode_opt_id == Inode_opt_project) {
- /*
- * inode fields accessible via the xattr interface are stored
- * with a +1 bias, so that 0 means unset:
- */
- ret = bch2_set_projid(c, inode, s.v ? s.v - 1 : 0);
- if (ret)
- goto err;
- }
-
- ret = bch2_write_inode(c, inode, inode_opt_set_fn, &s, 0);
-err:
- mutex_unlock(&inode->ei_update_lock);
-
- if (value &&
- (opt_id == Opt_background_target ||
- opt_id == Opt_background_compression ||
- (opt_id == Opt_compression && !inode_opt_get(c, &inode->ei_inode, background_compression))))
- bch2_set_rebalance_needs_scan(c, inode->ei_inode.bi_inum);
-
-err_class_exit:
- return bch2_err_class(ret);
-}
-
-static const struct xattr_handler bch_xattr_bcachefs_handler = {
- .prefix = "bcachefs.",
- .get = bch2_xattr_bcachefs_get,
- .set = bch2_xattr_bcachefs_set,
-};
-
-static int bch2_xattr_bcachefs_get_effective(
- const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *vinode,
- const char *name, void *buffer, size_t size)
-{
- return __bch2_xattr_bcachefs_get(handler, dentry, vinode,
- name, buffer, size, true);
-}
-
-/* Noop - xattrs in the bcachefs_effective namespace are inherited */
-static int bch2_xattr_bcachefs_set_effective(const struct xattr_handler *handler,
- struct mnt_idmap *idmap,
- struct dentry *dentry, struct inode *vinode,
- const char *name, const void *value,
- size_t size, int flags)
-{
- return 0;
-}
-
-static const struct xattr_handler bch_xattr_bcachefs_effective_handler = {
- .prefix = "bcachefs_effective.",
- .get = bch2_xattr_bcachefs_get_effective,
- .set = bch2_xattr_bcachefs_set_effective,
-};
-
-#endif /* NO_BCACHEFS_FS */
-
-const struct xattr_handler *bch2_xattr_handlers[] = {
- &bch_xattr_user_handler,
- &bch_xattr_trusted_handler,
- &bch_xattr_security_handler,
-#ifndef NO_BCACHEFS_FS
- &bch_xattr_bcachefs_handler,
- &bch_xattr_bcachefs_effective_handler,
-#endif
- NULL
-};
-
-static const struct xattr_handler *bch_xattr_handler_map[] = {
- [KEY_TYPE_XATTR_INDEX_USER] = &bch_xattr_user_handler,
- [KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS] =
- &nop_posix_acl_access,
- [KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT] =
- &nop_posix_acl_default,
- [KEY_TYPE_XATTR_INDEX_TRUSTED] = &bch_xattr_trusted_handler,
- [KEY_TYPE_XATTR_INDEX_SECURITY] = &bch_xattr_security_handler,
-};
-
-static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned type)
-{
- return type < ARRAY_SIZE(bch_xattr_handler_map)
- ? bch_xattr_handler_map[type]
- : NULL;
-}
diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h
deleted file mode 100644
index c188a5ad64ce..000000000000
--- a/fs/bcachefs/xattr.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_XATTR_H
-#define _BCACHEFS_XATTR_H
-
-#include "str_hash.h"
-
-extern const struct bch_hash_desc bch2_xattr_hash_desc;
-
-int bch2_xattr_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
-void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-
-#define bch2_bkey_ops_xattr ((struct bkey_ops) { \
- .key_validate = bch2_xattr_validate, \
- .val_to_text = bch2_xattr_to_text, \
- .min_val_size = 8, \
-})
-
-static inline unsigned xattr_val_u64s(unsigned name_len, unsigned val_len)
-{
- return DIV_ROUND_UP(offsetof(struct bch_xattr, x_name) +
- name_len + val_len, sizeof(u64));
-}
-
-#define xattr_val(_xattr) \
- ((void *) (_xattr)->x_name + (_xattr)->x_name_len)
-
-struct xattr_search_key {
- u8 type;
- struct qstr name;
-};
-
-#define X_SEARCH(_type, _name, _len) ((struct xattr_search_key) \
- { .type = _type, .name = QSTR_INIT(_name, _len) })
-
-struct dentry;
-struct xattr_handler;
-struct bch_hash_info;
-struct bch_inode_info;
-
-/* Exported for cmd_migrate.c in tools: */
-int bch2_xattr_set(struct btree_trans *, subvol_inum,
- struct bch_inode_unpacked *, const struct bch_hash_info *,
- const char *, const void *, size_t, int, int);
-
-ssize_t bch2_xattr_list(struct dentry *, char *, size_t);
-
-extern const struct xattr_handler *bch2_xattr_handlers[];
-
-#endif /* _BCACHEFS_XATTR_H */
diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h
deleted file mode 100644
index c7916011ef34..000000000000
--- a/fs/bcachefs/xattr_format.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_XATTR_FORMAT_H
-#define _BCACHEFS_XATTR_FORMAT_H
-
-#define KEY_TYPE_XATTR_INDEX_USER 0
-#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
-#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
-#define KEY_TYPE_XATTR_INDEX_TRUSTED 3
-#define KEY_TYPE_XATTR_INDEX_SECURITY 4
-
-struct bch_xattr {
- struct bch_val v;
- __u8 x_type;
- __u8 x_name_len;
- __le16 x_val_len;
- __u8 x_name[] __counted_by(x_name_len);
-} __packed __aligned(8);
-
-#endif /* _BCACHEFS_XATTR_FORMAT_H */
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 8f430ff8e445..9fcfdd6b8189 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -307,7 +307,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
befs_ino = BEFS_I(inode);
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index fa66a09e496a..d33d6bde992b 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -27,7 +27,7 @@ const struct file_operations bfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.splice_read = filemap_splice_read,
};
@@ -170,9 +170,10 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to)
truncate_pagecache(inode, inode->i_size);
}
-static int bfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
+static int bfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index db81570c9637..ce6f83234b67 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -17,6 +17,7 @@
#include <linux/writeback.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
+#include <linux/fs_context.h>
#include "bfs.h"
MODULE_AUTHOR("Tigran Aivazian <aivazian.tigran@gmail.com>");
@@ -41,7 +42,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {
@@ -60,7 +61,19 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
di = (struct bfs_inode *)bh->b_data + off;
- inode->i_mode = 0x0000FFFF & le32_to_cpu(di->i_mode);
+ /*
+ * https://martin.hinner.info/fs/bfs/bfs-structure.html explains that
+ * BFS in SCO UnixWare environment used only lower 9 bits of di->i_mode
+ * value. This means that, although bfs_write_inode() saves whole
+ * inode->i_mode bits (which include S_IFMT bits and S_IS{UID,GID,VTX}
+ * bits), middle 7 bits of di->i_mode value can be garbage when these
+ * bits were not saved by bfs_write_inode().
+ * Since we can't tell whether middle 7 bits are garbage, use only
+ * lower 12 bits (i.e. tolerate S_IS{UID,GID,VTX} bits possibly being
+ * garbage) and reconstruct S_IFMT bits for Linux environment from
+ * di->i_vtype value.
+ */
+ inode->i_mode = 0x00000FFF & le32_to_cpu(di->i_mode);
if (le32_to_cpu(di->i_vtype) == BFS_VDIR) {
inode->i_mode |= S_IFDIR;
inode->i_op = &bfs_dir_inops;
@@ -70,6 +83,11 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
inode->i_mapping->a_ops = &bfs_aops;
+ } else {
+ brelse(bh);
+ printf("Unknown vtype=%u %s:%08lx\n",
+ le32_to_cpu(di->i_vtype), inode->i_sb->s_id, ino);
+ goto error;
}
BFS_I(inode)->i_sblock = le32_to_cpu(di->i_sblock);
@@ -305,7 +323,7 @@ void bfs_dump_imap(const char *prefix, struct super_block *s)
#endif
}
-static int bfs_fill_super(struct super_block *s, void *data, int silent)
+static int bfs_fill_super(struct super_block *s, struct fs_context *fc)
{
struct buffer_head *bh, *sbh;
struct bfs_super_block *bfs_sb;
@@ -314,6 +332,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
struct bfs_sb_info *info;
int ret = -EINVAL;
unsigned long i_sblock, i_eblock, i_eoff, s_size;
+ int silent = fc->sb_flags & SB_SILENT;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -446,18 +465,28 @@ out:
return ret;
}
-static struct dentry *bfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, bfs_fill_super);
+}
+
+static const struct fs_context_operations bfs_context_ops = {
+ .get_tree = bfs_get_tree,
+};
+
+static int bfs_init_fs_context(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, bfs_fill_super);
+ fc->ops = &bfs_context_ops;
+
+ return 0;
}
static struct file_system_type bfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "bfs",
- .mount = bfs_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .owner = THIS_MODULE,
+ .name = "bfs",
+ .init_fs_context = bfs_init_fs_context,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("bfs");
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 106f0e8af177..3eb734c192e9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -46,7 +46,7 @@
#include <linux/cred.h>
#include <linux/dax.h>
#include <linux/uaccess.h>
-#include <linux/rseq.h>
+#include <uapi/linux/rseq.h>
#include <asm/param.h>
#include <asm/page.h>
@@ -68,12 +68,6 @@
static int load_elf_binary(struct linux_binprm *bprm);
-#ifdef CONFIG_USELIB
-static int load_elf_library(struct file *);
-#else
-#define load_elf_library NULL
-#endif
-
/*
* If we don't support core dumping, then supply a NULL so we
* don't even try.
@@ -101,7 +95,6 @@ static int elf_core_dump(struct coredump_params *cprm);
static struct linux_binfmt elf_format = {
.module = THIS_MODULE,
.load_binary = load_elf_binary,
- .load_shlib = load_elf_library,
#ifdef CONFIG_COREDUMP
.core_dump = elf_core_dump,
.min_coredump = ELF_EXEC_PAGESIZE,
@@ -110,6 +103,21 @@ static struct linux_binfmt elf_format = {
#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
+static inline void elf_coredump_set_mm_eflags(struct mm_struct *mm, u32 flags)
+{
+#ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
+ mm->saved_e_flags = flags;
+#endif
+}
+
+static inline u32 elf_coredump_get_mm_eflags(struct mm_struct *mm, u32 flags)
+{
+#ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
+ flags = mm->saved_e_flags;
+#endif
+ return flags;
+}
+
/*
* We need to explicitly zero any trailing portion of the page that follows
* p_filesz when it ends before the page ends (e.g. bss), otherwise this
@@ -526,7 +534,7 @@ static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
/* Sanity check the number of program headers... */
/* ...and their total size. */
size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
- if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
+ if (size == 0 || size > 65536)
goto out;
elf_phdata = kmalloc(size, GFP_KERNEL);
@@ -653,7 +661,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
if (!elf_check_arch(interp_elf_ex) ||
elf_check_fdpic(interp_elf_ex))
goto out;
- if (!interpreter->f_op->mmap)
+ if (!can_mmap_file(interpreter))
goto out;
total_size = total_mapping_size(interp_elf_phdata,
@@ -762,8 +770,7 @@ static int parse_elf_property(const char *data, size_t *off, size_t datasz,
}
#define NOTE_DATA_SZ SZ_1K
-#define GNU_PROPERTY_TYPE_0_NAME "GNU"
-#define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
+#define NOTE_NAME_SZ (sizeof(NN_GNU_PROPERTY_TYPE_0))
static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
struct arch_elf_state *arch)
@@ -800,7 +807,7 @@ static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
note.nhdr.n_namesz != NOTE_NAME_SZ ||
strncmp(note.data + sizeof(note.nhdr),
- GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
+ NN_GNU_PROPERTY_TYPE_0, n - sizeof(note.nhdr)))
return -ENOEXEC;
off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
@@ -831,6 +838,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
struct elf_phdr *elf_property_phdata = NULL;
unsigned long elf_brk;
+ bool brk_moved = false;
int retval, i;
unsigned long elf_entry;
unsigned long e_entry;
@@ -855,7 +863,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
goto out;
if (elf_check_fdpic(elf_ex))
goto out;
- if (!bprm->file->f_op->mmap)
+ if (!can_mmap_file(bprm->file))
goto out;
elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
@@ -1098,15 +1106,19 @@ out_free_interp:
/* Calculate any requested alignment. */
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
- /*
- * There are effectively two types of ET_DYN
- * binaries: programs (i.e. PIE: ET_DYN with PT_INTERP)
- * and loaders (ET_DYN without PT_INTERP, since they
- * _are_ the ELF interpreter). The loaders must
- * be loaded away from programs since the program
- * may otherwise collide with the loader (especially
- * for ET_EXEC which does not have a randomized
- * position). For example to handle invocations of
+ /**
+ * DOC: PIE handling
+ *
+ * There are effectively two types of ET_DYN ELF
+ * binaries: programs (i.e. PIE: ET_DYN with
+ * PT_INTERP) and loaders (i.e. static PIE: ET_DYN
+ * without PT_INTERP, usually the ELF interpreter
+ * itself). Loaders must be loaded away from programs
+ * since the program may otherwise collide with the
+ * loader (especially for ET_EXEC which does not have
+ * a randomized position).
+ *
+ * For example, to handle invocations of
* "./ld.so someprog" to test out a new version of
* the loader, the subsequent program that the
* loader loads must avoid the loader itself, so
@@ -1119,6 +1131,9 @@ out_free_interp:
* ELF_ET_DYN_BASE and loaders are loaded into the
* independently randomized mmap region (0 load_bias
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
+ *
+ * See below for "brk" handling details, which is
+ * also affected by program vs loader and ASLR.
*/
if (interpreter) {
/* On ET_DYN with PT_INTERP, we do the ASLR. */
@@ -1235,8 +1250,6 @@ out_free_interp:
start_data += load_bias;
end_data += load_bias;
- current->mm->start_brk = current->mm->brk = ELF_PAGEALIGN(elf_brk);
-
if (interpreter) {
elf_entry = load_elf_interp(interp_elf_ex,
interpreter,
@@ -1257,7 +1270,7 @@ out_free_interp:
}
reloc_func_desc = interp_load_addr;
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
kfree(interp_elf_ex);
@@ -1292,27 +1305,46 @@ out_free_interp:
mm->end_data = end_data;
mm->start_stack = bprm->p;
- if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
+ elf_coredump_set_mm_eflags(mm, elf_ex->e_flags);
+
+ /**
+ * DOC: "brk" handling
+ *
+ * For architectures with ELF randomization, when executing a
+ * loader directly (i.e. static PIE: ET_DYN without PT_INTERP),
+ * move the brk area out of the mmap region and into the unused
+ * ELF_ET_DYN_BASE region. Since "brk" grows up it may collide
+ * early with the stack growing down or other regions being put
+ * into the mmap region by the kernel (e.g. vdso).
+ *
+ * In the CONFIG_COMPAT_BRK case, though, everything is turned
+ * off because we're not allowed to move the brk at all.
+ */
+ if (!IS_ENABLED(CONFIG_COMPAT_BRK) &&
+ IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
+ elf_ex->e_type == ET_DYN && !interpreter) {
+ elf_brk = ELF_ET_DYN_BASE;
+ /* This counts as moving the brk, so let brk(2) know. */
+ brk_moved = true;
+ }
+ mm->start_brk = mm->brk = ELF_PAGEALIGN(elf_brk);
+
+ if ((current->flags & PF_RANDOMIZE) && snapshot_randomize_va_space > 1) {
/*
- * For architectures with ELF randomization, when executing
- * a loader directly (i.e. no interpreter listed in ELF
- * headers), move the brk area out of the mmap region
- * (since it grows up, and may collide early with the stack
- * growing down), and into the unused ELF_ET_DYN_BASE region.
+ * If we didn't move the brk to ELF_ET_DYN_BASE (above),
+ * leave a gap between .bss and brk.
*/
- if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
- elf_ex->e_type == ET_DYN && !interpreter) {
- mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
- } else {
- /* Otherwise leave a gap between .bss and brk. */
+ if (!brk_moved)
mm->brk = mm->start_brk = mm->brk + PAGE_SIZE;
- }
mm->brk = mm->start_brk = arch_randomize_brk(mm);
+ brk_moved = true;
+ }
+
#ifdef compat_brk_randomized
+ if (brk_moved)
current->brk_randomized = 1;
#endif
- }
if (current->personality & MMAP_PAGE_ZERO) {
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
@@ -1354,7 +1386,7 @@ out_free_dentry:
kfree(interp_elf_ex);
kfree(interp_elf_phdata);
out_free_file:
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
out_free_ph:
@@ -1362,75 +1394,6 @@ out_free_ph:
goto out;
}
-#ifdef CONFIG_USELIB
-/* This is really simpleminded and specialized - we are loading an
- a.out library that is given an ELF header. */
-static int load_elf_library(struct file *file)
-{
- struct elf_phdr *elf_phdata;
- struct elf_phdr *eppnt;
- int retval, error, i, j;
- struct elfhdr elf_ex;
-
- error = -ENOEXEC;
- retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
- if (retval < 0)
- goto out;
-
- if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
- goto out;
-
- /* First of all, some simple consistency checks */
- if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
- !elf_check_arch(&elf_ex) || !file->f_op->mmap)
- goto out;
- if (elf_check_fdpic(&elf_ex))
- goto out;
-
- /* Now read in all of the header information */
-
- j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
- /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
-
- error = -ENOMEM;
- elf_phdata = kmalloc(j, GFP_KERNEL);
- if (!elf_phdata)
- goto out;
-
- eppnt = elf_phdata;
- error = -ENOEXEC;
- retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
- if (retval < 0)
- goto out_free_ph;
-
- for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
- if ((eppnt + i)->p_type == PT_LOAD)
- j++;
- if (j != 1)
- goto out_free_ph;
-
- while (eppnt->p_type != PT_LOAD)
- eppnt++;
-
- /* Now use mmap to map the library into memory. */
- error = elf_load(file, ELF_PAGESTART(eppnt->p_vaddr),
- eppnt,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED_NOREPLACE | MAP_PRIVATE,
- 0);
-
- if (error != ELF_PAGESTART(eppnt->p_vaddr))
- goto out_free_ph;
-
- error = 0;
-
-out_free_ph:
- kfree(elf_phdata);
-out:
- return error;
-}
-#endif /* #ifdef CONFIG_USELIB */
-
#ifdef CONFIG_ELF_CORE
/*
* ELF core dumper
@@ -1504,8 +1467,8 @@ static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
phdr->p_align = 4;
}
-static void fill_note(struct memelfnote *note, const char *name, int type,
- unsigned int sz, void *data)
+static void __fill_note(struct memelfnote *note, const char *name, int type,
+ unsigned int sz, void *data)
{
note->name = name;
note->type = type;
@@ -1513,6 +1476,9 @@ static void fill_note(struct memelfnote *note, const char *name, int type,
note->data = data;
}
+#define fill_note(note, type, sz, data) \
+ __fill_note(note, NN_ ## type, NT_ ## type, sz, data)
+
/*
* fill up all the fields in prstatus from the given task struct, except
* registers which need to be filled up separately.
@@ -1603,14 +1569,14 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
do
i += 2;
while (auxv[i - 2] != AT_NULL);
- fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ fill_note(note, AUXV, i * sizeof(elf_addr_t), auxv);
}
static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
const kernel_siginfo_t *siginfo)
{
copy_siginfo_to_external(csigdata, siginfo);
- fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
+ fill_note(note, SIGINFO, sizeof(*csigdata), csigdata);
}
/*
@@ -1706,7 +1672,7 @@ static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm
}
size = name_curpos - (char *)data;
- fill_note(note, "CORE", NT_FILE, size, data);
+ fill_note(note, FILE, size, data);
return 0;
}
@@ -1767,8 +1733,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
regset_get(t->task, &view->regsets[0],
sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
- PRSTATUS_SIZE, &t->prstatus);
+ fill_note(&t->notes[0], PRSTATUS, PRSTATUS_SIZE, &t->prstatus);
info->size += notesize(&t->notes[0]);
do_thread_regset_writeback(t->task, &view->regsets[0]);
@@ -1781,6 +1746,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (view_iter = 1; view_iter < view->n; ++view_iter) {
const struct user_regset *regset = &view->regsets[view_iter];
int note_type = regset->core_note_type;
+ const char *note_name = regset->core_note_name;
bool is_fpreg = note_type == NT_PRFPREG;
void *data;
int ret;
@@ -1801,8 +1767,16 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
if (is_fpreg)
SET_PR_FPVALID(&t->prstatus);
- fill_note(&t->notes[note_iter], is_fpreg ? "CORE" : "LINUX",
- note_type, ret, data);
+ /* There should be a note name, but if not, guess: */
+ if (WARN_ON_ONCE(!note_name))
+ note_name = "LINUX";
+ else
+ /* Warn on non-legacy-compatible names, for now. */
+ WARN_ON_ONCE(strcmp(note_name,
+ is_fpreg ? "CORE" : "LINUX"));
+
+ __fill_note(&t->notes[note_iter], note_name, note_type,
+ ret, data);
info->size += notesize(&t->notes[note_iter]);
note_iter++;
@@ -1821,8 +1795,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
fill_prstatus(&t->prstatus.common, p, signr);
elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
- &(t->prstatus));
+ fill_note(&t->notes[0], PRSTATUS, sizeof(t->prstatus), &t->prstatus);
info->size += notesize(&t->notes[0]);
fpu = kzalloc(sizeof(elf_fpregset_t), GFP_KERNEL);
@@ -1832,7 +1805,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
}
t->prstatus.pr_fpvalid = 1;
- fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
+ fill_note(&t->notes[1], PRFPREG, sizeof(*fpu), fpu);
info->size += notesize(&t->notes[1]);
return 1;
@@ -1848,11 +1821,13 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_thread_core_info *t;
struct elf_prpsinfo *psinfo;
struct core_thread *ct;
+ u16 machine;
+ u32 flags;
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
if (!psinfo)
return 0;
- fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+ fill_note(&info->psinfo, PRPSINFO, sizeof(*psinfo), psinfo);
#ifdef CORE_DUMP_USE_REGSET
view = task_user_regset_view(dump_task);
@@ -1875,30 +1850,37 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
return 0;
}
- /*
- * Initialize the ELF file header.
- */
- fill_elf_header(elf, phdrs,
- view->e_machine, view->e_flags);
+ machine = view->e_machine;
+ flags = view->e_flags;
#else
view = NULL;
info->thread_notes = 2;
- fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
+ machine = ELF_ARCH;
+ flags = ELF_CORE_EFLAGS;
#endif
/*
+ * Override ELF e_flags with value taken from process,
+ * if arch needs that.
+ */
+ flags = elf_coredump_get_mm_eflags(dump_task->mm, flags);
+
+ /*
+ * Initialize the ELF file header.
+ */
+ fill_elf_header(elf, phdrs, machine, flags);
+
+ /*
* Allocate a structure for each thread.
*/
- info->thread = kzalloc(offsetof(struct elf_thread_core_info,
- notes[info->thread_notes]),
- GFP_KERNEL);
+ info->thread = kzalloc(struct_size(info->thread, notes, info->thread_notes),
+ GFP_KERNEL);
if (unlikely(!info->thread))
return 0;
info->thread->task = dump_task;
for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) {
- t = kzalloc(offsetof(struct elf_thread_core_info,
- notes[info->thread_notes]),
+ t = kzalloc(struct_size(t, notes, info->thread_notes),
GFP_KERNEL);
if (unlikely(!t))
return 0;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index f1a7c4875c4a..48fd2de3bca0 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -109,7 +109,7 @@ static int is_elf(struct elfhdr *hdr, struct file *file)
return 0;
if (!elf_check_arch(hdr))
return 0;
- if (!file->f_op->mmap)
+ if (!can_mmap_file(file))
return 0;
return 1;
}
@@ -394,7 +394,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
goto error;
}
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
interpreter = NULL;
}
@@ -467,7 +467,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
error:
if (interpreter) {
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
}
kfree(interpreter_name);
@@ -1024,7 +1024,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
/* deal with each load segment separately */
phdr = params->phdrs;
for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
- unsigned long maddr, disp, excess, excess1;
+ unsigned long maddr, disp, excess;
int prot = 0, flags;
if (phdr->p_type != PT_LOAD)
@@ -1120,9 +1120,10 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
* extant in the file
*/
excess = phdr->p_memsz - phdr->p_filesz;
- excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
#ifdef CONFIG_MMU
+ unsigned long excess1
+ = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
if (excess > excess1) {
unsigned long xaddr = maddr + phdr->p_filesz + excess1;
unsigned long xmaddr;
@@ -1274,8 +1275,8 @@ static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offs
return;
}
-static inline void fill_note(struct memelfnote *note, const char *name, int type,
- unsigned int sz, void *data)
+static inline void __fill_note(struct memelfnote *note, const char *name, int type,
+ unsigned int sz, void *data)
{
note->name = name;
note->type = type;
@@ -1284,6 +1285,9 @@ static inline void fill_note(struct memelfnote *note, const char *name, int type
return;
}
+#define fill_note(note, type, sz, data) \
+ __fill_note(note, NN_ ## type, NT_ ## type, sz, data)
+
/*
* fill up all the fields in prstatus from the given task struct, except
* registers which need to be filled up separately.
@@ -1397,8 +1401,7 @@ static struct elf_thread_status *elf_dump_thread_status(long signr, struct task_
regset_get(p, &view->regsets[0],
sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
- &t->prstatus);
+ fill_note(&t->notes[0], PRSTATUS, sizeof(t->prstatus), &t->prstatus);
t->num_notes++;
*sz += notesize(&t->notes[0]);
@@ -1415,8 +1418,7 @@ static struct elf_thread_status *elf_dump_thread_status(long signr, struct task_
}
if (t->prstatus.pr_fpvalid) {
- fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
- &t->fpu);
+ fill_note(&t->notes[1], PRFPREG, sizeof(t->fpu), &t->fpu);
t->num_notes++;
*sz += notesize(&t->notes[1]);
}
@@ -1530,7 +1532,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
*/
fill_psinfo(psinfo, current->group_leader, current->mm);
- fill_note(&psinfo_note, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+ fill_note(&psinfo_note, PRPSINFO, sizeof(*psinfo), psinfo);
thread_status_size += notesize(&psinfo_note);
auxv = (elf_addr_t *) current->mm->saved_auxv;
@@ -1538,7 +1540,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
do
i += 2;
while (auxv[i - 2] != AT_NULL);
- fill_note(&auxv_note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ fill_note(&auxv_note, AUXV, i * sizeof(elf_addr_t), auxv);
thread_status_size += notesize(&auxv_note);
offset = sizeof(*elf); /* ELF header */
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 390808ce935d..b5b5ca1a44f7 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -478,7 +478,7 @@ static int load_flat_file(struct linux_binprm *bprm,
* 28 bits (256 MB) is way more than reasonable in this case.
* If some top bits are set we have probable binary corruption.
*/
- if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) {
+ if ((text_len | data_len | bss_len | stack_len | relocs | full_data) >> 28) {
pr_err("bad header\n");
ret = -ENOEXEC;
goto err;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 6a3a16f91051..8cb1a94339b8 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -675,44 +675,6 @@ static void bm_evict_inode(struct inode *inode)
}
/**
- * unlink_binfmt_dentry - remove the dentry for the binary type handler
- * @dentry: dentry associated with the binary type handler
- *
- * Do the actual filesystem work to remove a dentry for a registered binary
- * type handler. Since binfmt_misc only allows simple files to be created
- * directly under the root dentry of the filesystem we ensure that we are
- * indeed passed a dentry directly beneath the root dentry, that the inode
- * associated with the root dentry is locked, and that it is a regular file we
- * are asked to remove.
- */
-static void unlink_binfmt_dentry(struct dentry *dentry)
-{
- struct dentry *parent = dentry->d_parent;
- struct inode *inode, *parent_inode;
-
- /* All entries are immediate descendants of the root dentry. */
- if (WARN_ON_ONCE(dentry->d_sb->s_root != parent))
- return;
-
- /* We only expect to be called on regular files. */
- inode = d_inode(dentry);
- if (WARN_ON_ONCE(!S_ISREG(inode->i_mode)))
- return;
-
- /* The parent inode must be locked. */
- parent_inode = d_inode(parent);
- if (WARN_ON_ONCE(!inode_is_locked(parent_inode)))
- return;
-
- if (simple_positive(dentry)) {
- dget(dentry);
- simple_unlink(parent_inode, dentry);
- d_delete(dentry);
- dput(dentry);
- }
-}
-
-/**
* remove_binfmt_handler - remove a binary type handler
* @misc: handle to binfmt_misc instance
* @e: binary type handler to remove
@@ -729,7 +691,7 @@ static void remove_binfmt_handler(struct binfmt_misc *misc, Node *e)
write_lock(&misc->entries_lock);
list_del_init(&e->list);
write_unlock(&misc->entries_lock);
- unlink_binfmt_dentry(e->dentry);
+ locked_recursive_removal(e->dentry, NULL);
}
/* /<entry> */
@@ -772,7 +734,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
case 3:
/* Delete this handler. */
inode = d_inode(inode->i_sb->s_root);
- inode_lock(inode);
+ inode_lock_nested(inode, I_MUTEX_PARENT);
/*
* In order to add new element or remove elements from the list
@@ -803,14 +765,41 @@ static const struct file_operations bm_entry_operations = {
/* /register */
+/* add to filesystem */
+static int add_entry(Node *e, struct super_block *sb)
+{
+ struct dentry *dentry = simple_start_creating(sb->s_root, e->name);
+ struct inode *inode;
+ struct binfmt_misc *misc;
+
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ inode = bm_get_inode(sb, S_IFREG | 0644);
+ if (unlikely(!inode)) {
+ simple_done_creating(dentry);
+ return -ENOMEM;
+ }
+
+ refcount_set(&e->users, 1);
+ e->dentry = dentry;
+ inode->i_private = e;
+ inode->i_fop = &bm_entry_operations;
+
+ d_make_persistent(dentry, inode);
+ misc = i_binfmt_misc(inode);
+ write_lock(&misc->entries_lock);
+ list_add(&e->list, &misc->entries);
+ write_unlock(&misc->entries_lock);
+ simple_done_creating(dentry);
+ return 0;
+}
+
static ssize_t bm_register_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
Node *e;
- struct inode *inode;
struct super_block *sb = file_inode(file)->i_sb;
- struct dentry *root = sb->s_root, *dentry;
- struct binfmt_misc *misc;
int err = 0;
struct file *f = NULL;
@@ -820,8 +809,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
return PTR_ERR(e);
if (e->flags & MISC_FMT_OPEN_FILE) {
- const struct cred *old_cred;
-
/*
* Now that we support unprivileged binfmt_misc mounts make
* sure we use the credentials that the register @file was
@@ -829,9 +816,8 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
* didn't matter much as only a privileged process could open
* the register file.
*/
- old_cred = override_creds(file->f_cred);
- f = open_exec(e->interpreter);
- revert_creds(old_cred);
+ scoped_with_creds(file->f_cred)
+ f = open_exec(e->interpreter);
if (IS_ERR(f)) {
pr_notice("register: failed to install interpreter file %s\n",
e->interpreter);
@@ -841,42 +827,12 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
e->interp_file = f;
}
- inode_lock(d_inode(root));
- dentry = lookup_one_len(e->name, root, strlen(e->name));
- err = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out;
-
- err = -EEXIST;
- if (d_really_is_positive(dentry))
- goto out2;
-
- inode = bm_get_inode(sb, S_IFREG | 0644);
-
- err = -ENOMEM;
- if (!inode)
- goto out2;
-
- refcount_set(&e->users, 1);
- e->dentry = dget(dentry);
- inode->i_private = e;
- inode->i_fop = &bm_entry_operations;
-
- d_instantiate(dentry, inode);
- misc = i_binfmt_misc(inode);
- write_lock(&misc->entries_lock);
- list_add(&e->list, &misc->entries);
- write_unlock(&misc->entries_lock);
-
- err = 0;
-out2:
- dput(dentry);
-out:
- inode_unlock(d_inode(root));
-
+ err = add_entry(e, sb);
if (err) {
- if (f)
+ if (f) {
+ exe_file_allow_write_access(f);
filp_close(f, NULL);
+ }
kfree(e);
return err;
}
@@ -922,7 +878,7 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
case 3:
/* Delete all handlers. */
inode = d_inode(file_inode(file)->i_sb->s_root);
- inode_lock(inode);
+ inode_lock_nested(inode, I_MUTEX_PARENT);
/*
* In order to add new element or remove elements from the list
@@ -1001,7 +957,7 @@ static int bm_fill_super(struct super_block *sb, struct fs_context *fc)
/*
* If it turns out that most user namespaces actually want to
* register their own binary type handler and therefore all
- * create their own separate binfm_misc mounts we should
+ * create their own separate binfmt_misc mounts we should
* consider turning this into a kmem cache.
*/
misc = kzalloc(sizeof(struct binfmt_misc), GFP_KERNEL);
@@ -1066,7 +1022,7 @@ static struct file_system_type bm_fs_type = {
.name = "binfmt_misc",
.init_fs_context = bm_init_fs_context,
.fs_flags = FS_USERNS_MOUNT,
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("binfmt_misc");
diff --git a/fs/bpf_fs_kfuncs.c b/fs/bpf_fs_kfuncs.c
index 3fe9f59ef867..5ace2511fec5 100644
--- a/fs/bpf_fs_kfuncs.c
+++ b/fs/bpf_fs_kfuncs.c
@@ -2,11 +2,14 @@
/* Copyright (c) 2024 Google LLC. */
#include <linux/bpf.h>
+#include <linux/bpf_lsm.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/dcache.h>
#include <linux/fs.h>
+#include <linux/fsnotify.h>
#include <linux/file.h>
+#include <linux/kernfs.h>
#include <linux/mm.h>
#include <linux/xattr.h>
@@ -76,7 +79,7 @@ __bpf_kfunc void bpf_put_file(struct file *file)
* pathname in *buf*, including the NUL termination character. On error, a
* negative integer is returned.
*/
-__bpf_kfunc int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz)
+__bpf_kfunc int bpf_path_d_path(const struct path *path, char *buf, size_t buf__sz)
{
int len;
char *ret;
@@ -93,6 +96,24 @@ __bpf_kfunc int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz)
return len;
}
+static bool match_security_bpf_prefix(const char *name__str)
+{
+ return !strncmp(name__str, XATTR_NAME_BPF_LSM, XATTR_NAME_BPF_LSM_LEN);
+}
+
+static int bpf_xattr_read_permission(const char *name, struct inode *inode)
+{
+ if (WARN_ON(!inode))
+ return -EINVAL;
+
+ /* Allow reading xattr with user. and security.bpf. prefix */
+ if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ !match_security_bpf_prefix(name))
+ return -EPERM;
+
+ return inode_permission(&nop_mnt_idmap, inode, MAY_READ);
+}
+
/**
* bpf_get_dentry_xattr - get xattr of a dentry
* @dentry: dentry to get xattr from
@@ -101,9 +122,10 @@ __bpf_kfunc int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz)
*
* Get xattr *name__str* of *dentry* and store the output in *value_ptr*.
*
- * For security reasons, only *name__str* with prefix "user." is allowed.
+ * For security reasons, only *name__str* with prefixes "user." or
+ * "security.bpf." are allowed.
*
- * Return: 0 on success, a negative value on error.
+ * Return: length of the xattr value on success, a negative value on error.
*/
__bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__str,
struct bpf_dynptr *value_p)
@@ -114,18 +136,12 @@ __bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__st
void *value;
int ret;
- if (WARN_ON(!inode))
- return -EINVAL;
-
- if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
- return -EPERM;
-
value_len = __bpf_dynptr_size(value_ptr);
value = __bpf_dynptr_data_rw(value_ptr, value_len);
if (!value)
return -EINVAL;
- ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
+ ret = bpf_xattr_read_permission(name__str, inode);
if (ret)
return ret;
return __vfs_getxattr(dentry, inode, name__str, value, value_len);
@@ -139,9 +155,10 @@ __bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__st
*
* Get xattr *name__str* of *file* and store the output in *value_ptr*.
*
- * For security reasons, only *name__str* with prefix "user." is allowed.
+ * For security reasons, only *name__str* with prefixes "user." or
+ * "security.bpf." are allowed.
*
- * Return: 0 on success, a negative value on error.
+ * Return: length of the xattr value on success, a negative value on error.
*/
__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
struct bpf_dynptr *value_p)
@@ -154,6 +171,193 @@ __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
__bpf_kfunc_end_defs();
+static int bpf_xattr_write_permission(const char *name, struct inode *inode)
+{
+ if (WARN_ON(!inode))
+ return -EINVAL;
+
+ /* Only allow setting and removing security.bpf. xattrs */
+ if (!match_security_bpf_prefix(name))
+ return -EPERM;
+
+ return inode_permission(&nop_mnt_idmap, inode, MAY_WRITE);
+}
+
+/**
+ * bpf_set_dentry_xattr_locked - set a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: xattr value
+ * @flags: flags to pass into filesystem operations
+ *
+ * Set xattr *name__str* of *dentry* to the value in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller already locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
+ struct inode *inode = d_inode(dentry);
+ const void *value;
+ u32 value_len;
+ int ret;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ ret = bpf_xattr_write_permission(name__str, inode);
+ if (ret)
+ return ret;
+
+ ret = __vfs_setxattr(&nop_mnt_idmap, dentry, inode, name__str,
+ value, value_len, flags);
+ if (!ret) {
+ fsnotify_xattr(dentry);
+
+ /* This xattr is set by BPF LSM, so we do not call
+ * security_inode_post_setxattr. Otherwise, we would
+ * risk deadlocks by calling back to the same kfunc.
+ *
+ * This is the same as security_inode_setsecurity().
+ */
+ }
+ return ret;
+}
+
+/**
+ * bpf_remove_dentry_xattr_locked - remove a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ *
+ * Rmove xattr *name__str* of *dentry*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller already locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ ret = bpf_xattr_write_permission(name__str, inode);
+ if (ret)
+ return ret;
+
+ ret = __vfs_removexattr(&nop_mnt_idmap, dentry, name__str);
+ if (!ret) {
+ fsnotify_xattr(dentry);
+
+ /* This xattr is removed by BPF LSM, so we do not call
+ * security_inode_post_removexattr. Otherwise, we would
+ * risk deadlocks by calling back to the same kfunc.
+ */
+ }
+ return ret;
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_set_dentry_xattr - set a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: xattr value
+ * @flags: flags to pass into filesystem operations
+ *
+ * Set xattr *name__str* of *dentry* to the value in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller has not locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_set_dentry_xattr(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ inode_lock(inode);
+ ret = bpf_set_dentry_xattr_locked(dentry, name__str, value_p, flags);
+ inode_unlock(inode);
+ return ret;
+}
+
+/**
+ * bpf_remove_dentry_xattr - remove a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ *
+ * Rmove xattr *name__str* of *dentry*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller has not locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_remove_dentry_xattr(struct dentry *dentry, const char *name__str)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ inode_lock(inode);
+ ret = bpf_remove_dentry_xattr_locked(dentry, name__str);
+ inode_unlock(inode);
+ return ret;
+}
+
+#ifdef CONFIG_CGROUPS
+/**
+ * bpf_cgroup_read_xattr - read xattr of a cgroup's node in cgroupfs
+ * @cgroup: cgroup to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *cgroup* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "user." is allowed.
+ *
+ * Return: length of the xattr value on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
+ struct bpf_dynptr *value_p)
+{
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
+ u32 value_len;
+ void *value;
+
+ /* Only allow reading "user.*" xattrs */
+ if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ return -EPERM;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data_rw(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ return kernfs_xattr_get(cgroup->kn, name__str, value, value_len);
+}
+#endif /* CONFIG_CGROUPS */
+
+__bpf_kfunc_end_defs();
+
BTF_KFUNCS_START(bpf_fs_kfunc_set_ids)
BTF_ID_FLAGS(func, bpf_get_task_exe_file,
KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL)
@@ -161,6 +365,8 @@ BTF_ID_FLAGS(func, bpf_put_file, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_path_d_path, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_set_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_remove_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
BTF_KFUNCS_END(bpf_fs_kfunc_set_ids)
static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
@@ -171,6 +377,37 @@ static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
return -EACCES;
}
+/* bpf_[set|remove]_dentry_xattr.* hooks have KF_TRUSTED_ARGS and
+ * KF_SLEEPABLE, so they are only available to sleepable hooks with
+ * dentry arguments.
+ *
+ * Setting and removing xattr requires exclusive lock on dentry->d_inode.
+ * Some hooks already locked d_inode, while some hooks have not locked
+ * d_inode. Therefore, we need different kfuncs for different hooks.
+ * Specifically, hooks in the following list (d_inode_locked_hooks)
+ * should call bpf_[set|remove]_dentry_xattr_locked; while other hooks
+ * should call bpf_[set|remove]_dentry_xattr.
+ */
+BTF_SET_START(d_inode_locked_hooks)
+BTF_ID(func, bpf_lsm_inode_post_removexattr)
+BTF_ID(func, bpf_lsm_inode_post_setattr)
+BTF_ID(func, bpf_lsm_inode_post_setxattr)
+BTF_ID(func, bpf_lsm_inode_removexattr)
+BTF_ID(func, bpf_lsm_inode_rmdir)
+BTF_ID(func, bpf_lsm_inode_setattr)
+BTF_ID(func, bpf_lsm_inode_setxattr)
+BTF_ID(func, bpf_lsm_inode_unlink)
+#ifdef CONFIG_SECURITY_PATH
+BTF_ID(func, bpf_lsm_path_unlink)
+BTF_ID(func, bpf_lsm_path_rmdir)
+#endif /* CONFIG_SECURITY_PATH */
+BTF_SET_END(d_inode_locked_hooks)
+
+bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog)
+{
+ return btf_id_set_contains(&d_inode_locked_hooks, prog->aux->attach_btf_id);
+}
+
static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_fs_kfunc_set_ids,
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index fa8515598341..4438637c8900 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -3,9 +3,9 @@
config BTRFS_FS
tristate "Btrfs filesystem support"
select BLK_CGROUP_PUNT_BIO
+ select CRC32
select CRYPTO
select CRYPTO_CRC32C
- select LIBCRC32C
select CRYPTO_XXHASH
select CRYPTO_SHA256
select CRYPTO_BLAKE2B
@@ -52,20 +52,24 @@ config BTRFS_FS_RUN_SANITY_TESTS
bool "Btrfs will run sanity tests upon loading"
depends on BTRFS_FS
help
- This will run some basic sanity tests on the free space cache
- code to make sure it is acting as it should. These are mostly
- regression tests and are only really interesting to btrfs
- developers.
+ This will run sanity tests for core functionality like free space,
+ extent maps, extent io, extent buffers, inodes, qgroups and others,
+ at module load time. These are mostly regression tests and are only
+ interesting to developers.
If unsure, say N.
config BTRFS_DEBUG
bool "Btrfs debugging support"
depends on BTRFS_FS
+ select REF_TRACKER if STACKTRACE_SUPPORT
help
- Enable run-time debugging support for the btrfs filesystem. This may
- enable additional and expensive checks with negative impact on
- performance, or export extra information via sysfs.
+ Enable run-time debugging support for the btrfs filesystem.
+
+ Additional potentially expensive checks, debugging functionality or
+ sysfs exported information is enabled, like leak checks of internal
+ objects, optional forced space fragmentation and /sys/fs/btrfs/debug .
+ This has negative impact on performance.
If unsure, say N.
@@ -73,8 +77,10 @@ config BTRFS_ASSERT
bool "Btrfs assert support"
depends on BTRFS_FS
help
- Enable run-time assertion checking. This will result in panics if
- any of the assertions trip. This is meant for btrfs developers only.
+ Enable run-time assertion checking. Additional safety checks are
+ done, simple enough not to affect performance but verify invariants
+ and assumptions of code to run properly. This may result in panics,
+ and is meant for developers but can be enabled in general.
If unsure, say N.
@@ -89,7 +95,14 @@ config BTRFS_EXPERIMENTAL
Current list:
- - extent map shrinker - performance problems with too frequent shrinks
+ - COW fixup worker warning - last warning before removing the
+ functionality catching out-of-band page
+ dirtying, not necessary since 5.8
+
+ - RAID mirror read policy - additional read policies for balancing
+ reading from redundant block group
+ profiles (currently: pid, round-robin,
+ fixed devid)
- send stream protocol v3 - fs-verity support
@@ -102,15 +115,6 @@ config BTRFS_EXPERIMENTAL
- extent tree v2 - complex rework of extent tracking
- If unsure, say N.
-
-config BTRFS_FS_REF_VERIFY
- bool "Btrfs with the ref verify tool compiled in"
- depends on BTRFS_FS
- default n
- help
- Enable run-time extent reference verification instrumentation. This
- is meant to be used by btrfs developers for tracking down extent
- reference problems or verifying they didn't break something.
+ - large folio support
If unsure, say N.
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 3cfc440c636c..743d7677b175 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -36,7 +36,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
lru_cache.o raid-stripe-tree.o fiemap.o direct-io.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
-btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
+btrfs-$(CONFIG_BTRFS_DEBUG) += ref-verify.o
btrfs-$(CONFIG_BLK_DEV_ZONED) += zoned.o
btrfs-$(CONFIG_FS_VERITY) += verity.o
@@ -44,4 +44,4 @@ btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
tests/free-space-tree-tests.o tests/extent-map-tests.o \
- tests/raid-stripe-tree-tests.o
+ tests/raid-stripe-tree-tests.o tests/delayed-refs-tests.o
diff --git a/fs/btrfs/accessors.c b/fs/btrfs/accessors.c
index e3716516ca38..1248aa2535d3 100644
--- a/fs/btrfs/accessors.c
+++ b/fs/btrfs/accessors.c
@@ -9,27 +9,24 @@
#include "fs.h"
#include "accessors.h"
-static bool check_setget_bounds(const struct extent_buffer *eb,
- const void *ptr, unsigned off, int size)
+static void __cold report_setget_bounds(const struct extent_buffer *eb,
+ const void *ptr, unsigned off, int size)
{
- const unsigned long member_offset = (unsigned long)ptr + off;
+ unsigned long member_offset = (unsigned long)ptr + off;
- if (unlikely(member_offset + size > eb->len)) {
- btrfs_warn(eb->fs_info,
- "bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d",
- (member_offset > eb->len ? "start" : "end"),
- (unsigned long)ptr, eb->start, member_offset, size);
- return false;
- }
-
- return true;
+ btrfs_warn(eb->fs_info,
+ "bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d",
+ (member_offset > eb->len ? "start" : "end"),
+ (unsigned long)ptr, eb->start, member_offset, size);
}
-void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb)
+/* Copy bytes from @src1 and @src2 to @dest. */
+static __always_inline void memcpy_split_src(char *dest, const char *src1,
+ const char *src2, const size_t len1,
+ const size_t total)
{
- token->eb = eb;
- token->kaddr = folio_address(eb->folios[0]);
- token->offset = 0;
+ memcpy(dest, src1, len1);
+ memcpy(dest + len1, src2, total - len1);
}
/*
@@ -41,134 +38,77 @@ void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *e
* - btrfs_set_8 (for 8/16/32/64)
* - btrfs_get_8 (for 8/16/32/64)
*
- * Generic helpers with a token (cached address of the most recently accessed
- * page):
- * - btrfs_set_token_8 (for 8/16/32/64)
- * - btrfs_get_token_8 (for 8/16/32/64)
- *
* The set/get functions handle data spanning two pages transparently, in case
* metadata block size is larger than page. Every pointer to metadata items is
* an offset into the extent buffer page array, cast to a specific type. This
* gives us all the type checking.
*
* The extent buffer pages stored in the array folios may not form a contiguous
- * phyusical range, but the API functions assume the linear offset to the range
+ * physical range, but the API functions assume the linear offset to the range
* from 0 to metadata node size.
*/
#define DEFINE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
- const void *ptr, unsigned long off) \
-{ \
- const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \
- const unsigned long oil = get_eb_offset_in_folio(token->eb, \
- member_offset);\
- const int unit_size = token->eb->folio_size; \
- const int unit_shift = token->eb->folio_shift; \
- const int size = sizeof(u##bits); \
- u8 lebytes[sizeof(u##bits)]; \
- const int part = unit_size - oil; \
- \
- ASSERT(token); \
- ASSERT(token->kaddr); \
- ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
- if (token->offset <= member_offset && \
- member_offset + size <= token->offset + unit_size) { \
- return get_unaligned_le##bits(token->kaddr + oil); \
- } \
- token->kaddr = folio_address(token->eb->folios[idx]); \
- token->offset = idx << unit_shift; \
- if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \
- return get_unaligned_le##bits(token->kaddr + oil); \
- \
- memcpy(lebytes, token->kaddr + oil, part); \
- token->kaddr = folio_address(token->eb->folios[idx + 1]); \
- token->offset = (idx + 1) << unit_shift; \
- memcpy(lebytes + part, token->kaddr, size - part); \
- return get_unaligned_le##bits(lebytes); \
-} \
u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
const void *ptr, unsigned long off) \
{ \
const unsigned long member_offset = (unsigned long)ptr + off; \
const unsigned long idx = get_eb_folio_index(eb, member_offset);\
- const unsigned long oil = get_eb_offset_in_folio(eb, \
- member_offset);\
- const int unit_size = eb->folio_size; \
- char *kaddr = folio_address(eb->folios[idx]); \
- const int size = sizeof(u##bits); \
- const int part = unit_size - oil; \
- u8 lebytes[sizeof(u##bits)]; \
- \
- ASSERT(check_setget_bounds(eb, ptr, off, size)); \
- if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \
- return get_unaligned_le##bits(kaddr + oil); \
- \
- memcpy(lebytes, kaddr + oil, part); \
- kaddr = folio_address(eb->folios[idx + 1]); \
- memcpy(lebytes + part, kaddr, size - part); \
- return get_unaligned_le##bits(lebytes); \
-} \
-void btrfs_set_token_##bits(struct btrfs_map_token *token, \
- const void *ptr, unsigned long off, \
- u##bits val) \
-{ \
- const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \
- const unsigned long oil = get_eb_offset_in_folio(token->eb, \
+ const unsigned long oif = get_eb_offset_in_folio(eb, \
member_offset);\
- const int unit_size = token->eb->folio_size; \
- const int unit_shift = token->eb->folio_shift; \
- const int size = sizeof(u##bits); \
+ char *kaddr = folio_address(eb->folios[idx]) + oif; \
+ const int part = eb->folio_size - oif; \
u8 lebytes[sizeof(u##bits)]; \
- const int part = unit_size - oil; \
\
- ASSERT(token); \
- ASSERT(token->kaddr); \
- ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
- if (token->offset <= member_offset && \
- member_offset + size <= token->offset + unit_size) { \
- put_unaligned_le##bits(val, token->kaddr + oil); \
- return; \
+ if (unlikely(member_offset + sizeof(u##bits) > eb->len)) { \
+ report_setget_bounds(eb, ptr, off, sizeof(u##bits)); \
+ return 0; \
} \
- token->kaddr = folio_address(token->eb->folios[idx]); \
- token->offset = idx << unit_shift; \
- if (INLINE_EXTENT_BUFFER_PAGES == 1 || \
- oil + size <= unit_size) { \
- put_unaligned_le##bits(val, token->kaddr + oil); \
- return; \
+ if (INLINE_EXTENT_BUFFER_PAGES == 1 || sizeof(u##bits) == 1 || \
+ likely(sizeof(u##bits) <= part)) \
+ return get_unaligned_le##bits(kaddr); \
+ \
+ if (sizeof(u##bits) == 2) { \
+ lebytes[0] = *kaddr; \
+ kaddr = folio_address(eb->folios[idx + 1]); \
+ lebytes[1] = *kaddr; \
+ } else { \
+ memcpy_split_src(lebytes, kaddr, \
+ folio_address(eb->folios[idx + 1]), \
+ part, sizeof(u##bits)); \
} \
- put_unaligned_le##bits(val, lebytes); \
- memcpy(token->kaddr + oil, lebytes, part); \
- token->kaddr = folio_address(token->eb->folios[idx + 1]); \
- token->offset = (idx + 1) << unit_shift; \
- memcpy(token->kaddr, lebytes + part, size - part); \
+ return get_unaligned_le##bits(lebytes); \
} \
void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
unsigned long off, u##bits val) \
{ \
const unsigned long member_offset = (unsigned long)ptr + off; \
const unsigned long idx = get_eb_folio_index(eb, member_offset);\
- const unsigned long oil = get_eb_offset_in_folio(eb, \
+ const unsigned long oif = get_eb_offset_in_folio(eb, \
member_offset);\
- const int unit_size = eb->folio_size; \
- char *kaddr = folio_address(eb->folios[idx]); \
- const int size = sizeof(u##bits); \
- const int part = unit_size - oil; \
+ char *kaddr = folio_address(eb->folios[idx]) + oif; \
+ const int part = eb->folio_size - oif; \
u8 lebytes[sizeof(u##bits)]; \
\
- ASSERT(check_setget_bounds(eb, ptr, off, size)); \
- if (INLINE_EXTENT_BUFFER_PAGES == 1 || \
- oil + size <= unit_size) { \
- put_unaligned_le##bits(val, kaddr + oil); \
+ if (unlikely(member_offset + sizeof(u##bits) > eb->len)) { \
+ report_setget_bounds(eb, ptr, off, sizeof(u##bits)); \
+ return; \
+ } \
+ if (INLINE_EXTENT_BUFFER_PAGES == 1 || sizeof(u##bits) == 1 || \
+ likely(sizeof(u##bits) <= part)) { \
+ put_unaligned_le##bits(val, kaddr); \
return; \
} \
- \
put_unaligned_le##bits(val, lebytes); \
- memcpy(kaddr + oil, lebytes, part); \
- kaddr = folio_address(eb->folios[idx + 1]); \
- memcpy(kaddr, lebytes + part, size - part); \
+ if (sizeof(u##bits) == 2) { \
+ *kaddr = lebytes[0]; \
+ kaddr = folio_address(eb->folios[idx + 1]); \
+ *kaddr = lebytes[1]; \
+ } else { \
+ memcpy(kaddr, lebytes, part); \
+ kaddr = folio_address(eb->folios[idx + 1]); \
+ memcpy(kaddr, lebytes + part, sizeof(u##bits) - part); \
+ } \
}
DEFINE_BTRFS_SETGET_BITS(8)
diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h
index 7a7e0ef69973..78721412951c 100644
--- a/fs/btrfs/accessors.h
+++ b/fs/btrfs/accessors.h
@@ -12,17 +12,11 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <uapi/linux/btrfs_tree.h>
+#include "fs.h"
+#include "extent_io.h"
struct extent_buffer;
-struct btrfs_map_token {
- struct extent_buffer *eb;
- char *kaddr;
- unsigned long offset;
-};
-
-void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb);
-
/*
* Some macros to generate set/get functions for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple one
@@ -55,11 +49,6 @@ static inline void put_unaligned_le8(u8 val, void *p)
sizeof_field(type, member)))
#define DECLARE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
- const void *ptr, unsigned long off); \
-void btrfs_set_token_##bits(struct btrfs_map_token *token, \
- const void *ptr, unsigned long off, \
- u##bits val); \
u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
const void *ptr, unsigned long off); \
void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
@@ -82,18 +71,6 @@ static inline void btrfs_set_##name(const struct extent_buffer *eb, type *s, \
{ \
static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
btrfs_set_##bits(eb, s, offsetof(type, member), val); \
-} \
-static inline u##bits btrfs_token_##name(struct btrfs_map_token *token, \
- const type *s) \
-{ \
- static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
- return btrfs_get_token_##bits(token, s, offsetof(type, member));\
-} \
-static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\
- type *s, u##bits val) \
-{ \
- static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
- btrfs_set_token_##bits(token, s, offsetof(type, member), val); \
}
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
@@ -478,18 +455,6 @@ static inline void btrfs_set_item_##member(const struct extent_buffer *eb, \
int slot, u32 val) \
{ \
btrfs_set_raw_item_##member(eb, btrfs_item_nr(eb, slot), val); \
-} \
-static inline u32 btrfs_token_item_##member(struct btrfs_map_token *token, \
- int slot) \
-{ \
- struct btrfs_item *item = btrfs_item_nr(token->eb, slot); \
- return btrfs_token_raw_item_##member(token, item); \
-} \
-static inline void btrfs_set_token_item_##member(struct btrfs_map_token *token, \
- int slot, u32 val) \
-{ \
- struct btrfs_item *item = btrfs_item_nr(token->eb, slot); \
- btrfs_set_token_raw_item_##member(token, item, val); \
}
BTRFS_ITEM_SETGET_FUNCS(offset)
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index e0ba00d64ea0..c336e2ab7f8a 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -14,12 +14,13 @@
#include "ctree.h"
#include "xattr.h"
#include "acl.h"
+#include "misc.h"
struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu)
{
int size;
const char *name;
- char *value = NULL;
+ char AUTO_KFREE(value);
struct posix_acl *acl;
if (rcu)
@@ -49,7 +50,6 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu)
acl = NULL;
else
acl = ERR_PTR(size);
- kfree(value);
return acl;
}
@@ -59,7 +59,7 @@ int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode,
{
int ret, size = 0;
const char *name;
- char *value = NULL;
+ char AUTO_KFREE(value);
switch (type) {
case ACL_TYPE_ACCESS:
@@ -85,28 +85,23 @@ int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode,
nofs_flag = memalloc_nofs_save();
value = kmalloc(size, GFP_KERNEL);
memalloc_nofs_restore(nofs_flag);
- if (!value) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!value)
+ return -ENOMEM;
ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
if (ret < 0)
- goto out;
+ return ret;
}
if (trans)
ret = btrfs_setxattr(trans, inode, name, value, size, 0);
else
ret = btrfs_setxattr_trans(inode, name, value, size, 0);
+ if (ret < 0)
+ return ret;
-out:
- kfree(value);
-
- if (!ret)
- set_cached_acl(inode, type, acl);
-
- return ret;
+ set_cached_acl(inode, type, acl);
+ return 0;
}
int btrfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
diff --git a/fs/btrfs/acl.h b/fs/btrfs/acl.h
index 48b9ddae4a46..0458cd51ed48 100644
--- a/fs/btrfs/acl.h
+++ b/fs/btrfs/acl.h
@@ -3,6 +3,8 @@
#ifndef BTRFS_ACL_H
#define BTRFS_ACL_H
+#include <linux/types.h>
+
struct posix_acl;
struct inode;
struct btrfs_trans_handle;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 361a866c1995..6c6f3bb58f4e 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -18,7 +18,7 @@ enum {
};
#define NO_THRESHOLD (-1)
-#define DFT_THRESHOLD (32)
+#define DEFAULT_THRESHOLD (32)
struct btrfs_workqueue {
struct workqueue_struct *normal_wq;
@@ -94,9 +94,9 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
ret->limit_active = limit_active;
if (thresh == 0)
- thresh = DFT_THRESHOLD;
+ thresh = DEFAULT_THRESHOLD;
/* For low threshold, disabling threshold is a better choice */
- if (thresh < DFT_THRESHOLD) {
+ if (thresh < DEFAULT_THRESHOLD) {
ret->current_active = limit_active;
ret->thresh = NO_THRESHOLD;
} else {
@@ -168,7 +168,7 @@ static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
{
int new_current_active;
long pending;
- int need_change = 0;
+ bool need_change = false;
if (wq->thresh == NO_THRESHOLD)
return;
@@ -196,15 +196,14 @@ static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
new_current_active--;
new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
if (new_current_active != wq->current_active) {
- need_change = 1;
+ need_change = true;
wq->current_active = new_current_active;
}
out:
spin_unlock(&wq->thres_lock);
- if (need_change) {
+ if (need_change)
workqueue_set_max_active(wq->normal_wq, wq->current_active);
- }
}
static void run_ordered_work(struct btrfs_workqueue *wq,
@@ -220,8 +219,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
- work = list_entry(list->next, struct btrfs_work,
- ordered_list);
+ work = list_first_entry(list, struct btrfs_work, ordered_list);
if (!test_bit(WORK_DONE_BIT, &work->flags))
break;
/*
@@ -296,7 +294,7 @@ static void btrfs_work_helper(struct work_struct *normal_work)
struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
normal_work);
struct btrfs_workqueue *wq = work->wq;
- int need_order = 0;
+ bool need_order = false;
/*
* We should not touch things inside work in the following cases:
@@ -307,7 +305,7 @@ static void btrfs_work_helper(struct work_struct *normal_work)
* So we save the needed things here.
*/
if (work->ordered_func)
- need_order = 1;
+ need_order = true;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 04f53ca548e1..78da47a3d00e 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -250,6 +250,21 @@ static int prelim_ref_compare(const struct prelim_ref *ref1,
return 0;
}
+static int prelim_ref_rb_add_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct prelim_ref *ref_new =
+ rb_entry(new, struct prelim_ref, rbnode);
+ const struct prelim_ref *ref_exist =
+ rb_entry(exist, struct prelim_ref, rbnode);
+
+ /*
+ * prelim_ref_compare() expects the first parameter as the existing one,
+ * different from the rb_find_add_cached() order.
+ */
+ return prelim_ref_compare(ref_exist, ref_new);
+}
+
static void update_share_count(struct share_check *sc, int oldcount,
int newcount, const struct prelim_ref *newref)
{
@@ -278,55 +293,39 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
struct share_check *sc)
{
struct rb_root_cached *root;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct prelim_ref *ref;
- int result;
- bool leftmost = true;
+ struct rb_node *exist;
root = &preftree->root;
- p = &root->rb_root.rb_node;
+ exist = rb_find_add_cached(&newref->rbnode, root, prelim_ref_rb_add_cmp);
+ if (exist) {
+ struct prelim_ref *ref = rb_entry(exist, struct prelim_ref, rbnode);
+ /* Identical refs, merge them and free @newref */
+ struct extent_inode_elem *eie = ref->inode_list;
- while (*p) {
- parent = *p;
- ref = rb_entry(parent, struct prelim_ref, rbnode);
- result = prelim_ref_compare(ref, newref);
- if (result < 0) {
- p = &(*p)->rb_left;
- } else if (result > 0) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- /* Identical refs, merge them and free @newref */
- struct extent_inode_elem *eie = ref->inode_list;
-
- while (eie && eie->next)
- eie = eie->next;
+ while (eie && eie->next)
+ eie = eie->next;
- if (!eie)
- ref->inode_list = newref->inode_list;
- else
- eie->next = newref->inode_list;
- trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
- preftree->count);
- /*
- * A delayed ref can have newref->count < 0.
- * The ref->count is updated to follow any
- * BTRFS_[ADD|DROP]_DELAYED_REF actions.
- */
- update_share_count(sc, ref->count,
- ref->count + newref->count, newref);
- ref->count += newref->count;
- free_pref(newref);
- return;
- }
+ if (!eie)
+ ref->inode_list = newref->inode_list;
+ else
+ eie->next = newref->inode_list;
+ trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
+ preftree->count);
+ /*
+ * A delayed ref can have newref->count < 0.
+ * The ref->count is updated to follow any
+ * BTRFS_[ADD|DROP]_DELAYED_REF actions.
+ */
+ update_share_count(sc, ref->count,
+ ref->count + newref->count, newref);
+ ref->count += newref->count;
+ free_pref(newref);
+ return;
}
update_share_count(sc, 0, newref->count, newref);
preftree->count++;
trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
- rb_link_node(&newref->rbnode, parent, p);
- rb_insert_color_cached(&newref->rbnode, root, leftmost);
}
/*
@@ -667,10 +666,9 @@ static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
btrfs_debug(ctx->fs_info,
- "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
- ref->root_id, level, ref->count, ret,
- ref->key_for_search.objectid, ref->key_for_search.type,
- ref->key_for_search.offset);
+"search slot in root %llu (level %d, ref count %d) returned %d for key " BTRFS_KEY_FMT,
+ ref->root_id, level, ref->count, ret,
+ BTRFS_KEY_FMT_VALUE(&ref->key_for_search));
if (ret < 0)
goto out;
@@ -734,7 +732,6 @@ static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
struct preftrees *preftrees,
struct share_check *sc)
{
- int err;
int ret = 0;
struct ulist *parents;
struct ulist_node *node;
@@ -753,6 +750,7 @@ static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
*/
while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
struct prelim_ref *ref;
+ int ret2;
ref = rb_entry(rnode, struct prelim_ref, rbnode);
if (WARN(ref->parent,
@@ -774,18 +772,18 @@ static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
ret = BACKREF_FOUND_SHARED;
goto out;
}
- err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
+ ret2 = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
/*
* we can only tolerate ENOENT,otherwise,we should catch error
* and return directly.
*/
- if (err == -ENOENT) {
+ if (ret2 == -ENOENT) {
prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
NULL);
continue;
- } else if (err) {
+ } else if (ret2) {
free_pref(ref);
- ret = err;
+ ret = ret2;
goto out;
}
@@ -860,7 +858,7 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
free_pref(ref);
return PTR_ERR(eb);
}
- if (!extent_buffer_uptodate(eb)) {
+ if (unlikely(!extent_buffer_uptodate(eb))) {
free_pref(ref);
free_extent_buffer(eb);
return -EIO;
@@ -1063,7 +1061,7 @@ static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_get_extent_inline_ref_type(leaf, iref,
BTRFS_REF_TYPE_ANY);
- if (type == BTRFS_REF_TYPE_INVALID)
+ if (unlikely(type == BTRFS_REF_TYPE_INVALID))
return -EUCLEAN;
offset = btrfs_extent_inline_ref_offset(leaf, iref);
@@ -1400,22 +1398,22 @@ static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
ASSERT(ctx->roots == NULL);
key.objectid = ctx->bytenr;
- key.offset = (u64)-1;
if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = (u64)-1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
if (!ctx->trans) {
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
}
if (ctx->time_seq == BTRFS_SEQ_LAST)
- path->skip_locking = 1;
+ path->skip_locking = true;
again:
head = NULL;
@@ -1423,7 +1421,7 @@ again:
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
/*
* Key with offset -1 found, there would have to exist an extent
* item with such offset, but this is out of the valid range.
@@ -1562,7 +1560,7 @@ again:
btrfs_release_path(path);
- ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
+ ret = add_missing_keys(ctx->fs_info, &preftrees, !path->skip_locking);
if (ret)
goto out;
@@ -1615,7 +1613,7 @@ again:
ret = PTR_ERR(eb);
goto out;
}
- if (!extent_buffer_uptodate(eb)) {
+ if (unlikely(!extent_buffer_uptodate(eb))) {
free_extent_buffer(eb);
ret = -EIO;
goto out;
@@ -1653,7 +1651,7 @@ again:
* case.
*/
ASSERT(eie);
- if (!eie) {
+ if (unlikely(!eie)) {
ret = -EUCLEAN;
goto out;
}
@@ -1691,7 +1689,7 @@ out:
* @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
* added to the ulist at @ctx->refs, and that ulist is allocated by this
* function. The caller should free the ulist with free_leaf_list() if
- * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
+ * @ctx->ignore_extent_item_pos is false, otherwise a simple ulist_free() is
* enough.
*
* Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
@@ -2202,22 +2200,21 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
int ret;
u64 flags;
u64 size = 0;
- u32 item_size;
const struct extent_buffer *eb;
struct btrfs_extent_item *ei;
struct btrfs_key key;
+ key.objectid = logical;
if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
- key.objectid = logical;
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
return ret;
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
/*
* Key with offset -1 found, there would have to exist an extent
* item with such offset, but this is out of the valid range.
@@ -2245,7 +2242,6 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
}
eb = path->nodes[0];
- item_size = btrfs_item_size(eb, path->slots[0]);
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
flags = btrfs_extent_flags(eb, ei);
@@ -2253,7 +2249,7 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
btrfs_debug(fs_info,
"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
logical, logical - found_key->objectid, found_key->objectid,
- found_key->offset, flags, item_size);
+ found_key->offset, flags, btrfs_item_size(eb, path->slots[0]));
WARN_ON(!flags_ret);
if (flags_ret) {
@@ -2315,7 +2311,7 @@ static int get_extent_inline_ref(unsigned long *ptr,
*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
BTRFS_REF_TYPE_ANY);
- if (*out_type == BTRFS_REF_TYPE_INVALID)
+ if (unlikely(*out_type == BTRFS_REF_TYPE_INVALID))
return -EUCLEAN;
*ptr += btrfs_extent_inline_ref_size(*out_type);
@@ -2549,17 +2545,20 @@ static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *c
}
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
void *ctx, bool ignore_offset)
{
struct btrfs_backref_walk_ctx walk_ctx = { 0 };
int ret;
u64 flags = 0;
struct btrfs_key found_key;
- int search_commit_root = path->search_commit_root;
+ struct btrfs_path *path;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
- btrfs_release_path(path);
+ btrfs_free_path(path);
if (ret < 0)
return ret;
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
@@ -2572,8 +2571,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
walk_ctx.extent_item_pos = logical - found_key.objectid;
walk_ctx.fs_info = fs_info;
- return iterate_extent_inodes(&walk_ctx, search_commit_root,
- build_ino_list, ctx);
+ return iterate_extent_inodes(&walk_ctx, false, build_ino_list, ctx);
}
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
@@ -2787,7 +2785,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
* allocates space to return multiple file system paths for an inode.
* total_bytes to allocate are passed, note that space usable for actual path
* information will be total_bytes - sizeof(struct inode_fs_paths).
- * the returned pointer must be freed with free_ipath() in the end.
+ * the returned pointer must be freed with __free_inode_fs_paths() in the end.
*/
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
struct btrfs_path *path)
@@ -2812,14 +2810,6 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
return ifp;
}
-void free_ipath(struct inode_fs_paths *ipath)
-{
- if (!ipath)
- return;
- kvfree(ipath->fspath);
- kfree(ipath);
-}
-
struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
{
struct btrfs_backref_iter *ret;
@@ -2835,8 +2825,8 @@ struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_inf
}
/* Current backref iterator only supports iteration in commit root */
- ret->path->search_commit_root = 1;
- ret->path->skip_locking = 1;
+ ret->path->search_commit_root = true;
+ ret->path->skip_locking = true;
ret->fs_info = fs_info;
return ret;
@@ -2869,7 +2859,7 @@ int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
return ret;
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
/*
* Key with offset -1 found, there would have to exist an extent
* item with such offset, but this is out of the valid range.
@@ -2877,8 +2867,8 @@ int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
ret = -EUCLEAN;
goto release;
}
- if (path->slots[0] == 0) {
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ if (unlikely(path->slots[0] == 0)) {
+ DEBUG_WARN();
ret = -EUCLEAN;
goto release;
}
@@ -3022,9 +3012,6 @@ void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
cache->rb_root = RB_ROOT;
for (i = 0; i < BTRFS_MAX_LEVEL; i++)
INIT_LIST_HEAD(&cache->pending[i]);
- INIT_LIST_HEAD(&cache->changed);
- INIT_LIST_HEAD(&cache->detached);
- INIT_LIST_HEAD(&cache->leaves);
INIT_LIST_HEAD(&cache->pending_edge);
INIT_LIST_HEAD(&cache->useless_node);
cache->fs_info = fs_info;
@@ -3132,29 +3119,17 @@ void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
struct btrfs_backref_node *node)
{
- struct btrfs_backref_node *upper;
struct btrfs_backref_edge *edge;
if (!node)
return;
- BUG_ON(!node->lowest && !node->detached);
while (!list_empty(&node->upper)) {
- edge = list_entry(node->upper.next, struct btrfs_backref_edge,
- list[LOWER]);
- upper = edge->node[UPPER];
+ edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
+ list[LOWER]);
list_del(&edge->list[LOWER]);
list_del(&edge->list[UPPER]);
btrfs_backref_free_edge(cache, edge);
-
- /*
- * Add the node to leaf node list if no other child block
- * cached.
- */
- if (list_empty(&upper->lower)) {
- list_add_tail(&upper->lower, &cache->leaves);
- upper->lowest = 1;
- }
}
btrfs_backref_drop_node(cache, node);
@@ -3166,49 +3141,25 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
{
struct btrfs_backref_node *node;
- int i;
- while (!list_empty(&cache->detached)) {
- node = list_entry(cache->detached.next,
- struct btrfs_backref_node, list);
+ while ((node = rb_entry_safe(rb_first(&cache->rb_root),
+ struct btrfs_backref_node, rb_node)))
btrfs_backref_cleanup_node(cache, node);
- }
- while (!list_empty(&cache->leaves)) {
- node = list_entry(cache->leaves.next,
- struct btrfs_backref_node, lower);
- btrfs_backref_cleanup_node(cache, node);
- }
-
- for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
- while (!list_empty(&cache->pending[i])) {
- node = list_first_entry(&cache->pending[i],
- struct btrfs_backref_node,
- list);
- btrfs_backref_cleanup_node(cache, node);
- }
- }
ASSERT(list_empty(&cache->pending_edge));
ASSERT(list_empty(&cache->useless_node));
- ASSERT(list_empty(&cache->changed));
- ASSERT(list_empty(&cache->detached));
- ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
ASSERT(!cache->nr_nodes);
ASSERT(!cache->nr_edges);
}
-void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
- struct btrfs_backref_node *lower,
- struct btrfs_backref_node *upper,
- int link_which)
+static void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
+ struct btrfs_backref_node *lower,
+ struct btrfs_backref_node *upper)
{
ASSERT(upper && lower && upper->level == lower->level + 1);
edge->node[LOWER] = lower;
edge->node[UPPER] = upper;
- if (link_which & LINK_LOWER)
- list_add_tail(&edge->list[LOWER], &lower->upper);
- if (link_which & LINK_UPPER)
- list_add_tail(&edge->list[UPPER], &upper->lower);
+ list_add_tail(&edge->list[LOWER], &lower->upper);
}
/*
* Handle direct tree backref
@@ -3278,7 +3229,7 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
ASSERT(upper->checked);
INIT_LIST_HEAD(&edge->list[UPPER]);
}
- btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
+ btrfs_backref_link_edge(edge, cur, upper);
return 0;
}
@@ -3316,8 +3267,12 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
if (IS_ERR(root))
return PTR_ERR(root);
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- cur->cowonly = 1;
+
+ /* We shouldn't be using backref cache for non-shareable roots. */
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_put_root(root);
+ return -EUCLEAN;
+ }
if (btrfs_root_level(&root->root_item) == cur->level) {
/* Tree root */
@@ -3344,8 +3299,8 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
level = cur->level + 1;
/* Search the tree to find parent blocks referring to the block */
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
path->lowest_level = 0;
@@ -3359,9 +3314,9 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
eb = path->nodes[level];
if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
btrfs_err(fs_info,
-"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
+"couldn't find block (%llu) (level %d) in tree (%llu) with key " BTRFS_KEY_FMT,
cur->bytenr, level - 1, btrfs_root_id(root),
- tree_key->objectid, tree_key->type, tree_key->offset);
+ BTRFS_KEY_FMT_VALUE(tree_key));
btrfs_put_root(root);
ret = -ENOENT;
goto out;
@@ -3403,8 +3358,15 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
goto out;
}
upper->owner = btrfs_header_owner(eb);
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- upper->cowonly = 1;
+
+ /* We shouldn't be using backref cache for non shareable roots. */
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_put_root(root);
+ btrfs_backref_free_edge(cache, edge);
+ btrfs_backref_free_node(cache, upper);
+ ret = -EUCLEAN;
+ goto out;
+ }
/*
* If we know the block isn't shared we can avoid
@@ -3437,7 +3399,7 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
if (!upper->owner)
upper->owner = btrfs_header_owner(eb);
}
- btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
+ btrfs_backref_link_edge(edge, lower, upper);
if (rb_node) {
btrfs_put_root(root);
@@ -3486,7 +3448,7 @@ int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
if (ret < 0)
goto out;
/* No extra backref? This means the tree block is corrupted */
- if (ret > 0) {
+ if (unlikely(ret > 0)) {
ret = -EUCLEAN;
goto out;
}
@@ -3498,8 +3460,8 @@ int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
* type BTRFS_TREE_BLOCK_REF_KEY
*/
ASSERT(list_is_singular(&cur->upper));
- edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
- list[LOWER]);
+ edge = list_first_entry(&cur->upper, struct btrfs_backref_edge,
+ list[LOWER]);
ASSERT(list_empty(&edge->list[UPPER]));
exist = edge->node[UPPER];
/*
@@ -3529,7 +3491,7 @@ int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
((unsigned long)iter->cur_ptr);
type = btrfs_get_extent_inline_ref_type(eb, iref,
BTRFS_REF_TYPE_BLOCK);
- if (type == BTRFS_REF_TYPE_INVALID) {
+ if (unlikely(type == BTRFS_REF_TYPE_INVALID)) {
ret = -EUCLEAN;
goto out;
}
@@ -3595,15 +3557,9 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
ASSERT(start->checked);
- /* Insert this node to cache if it's not COW-only */
- if (!start->cowonly) {
- rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
- &start->rb_node);
- if (rb_node)
- btrfs_backref_panic(cache->fs_info, start->bytenr,
- -EEXIST);
- list_add_tail(&start->lower, &cache->leaves);
- }
+ rb_node = rb_simple_insert(&cache->rb_root, &start->simple_node);
+ if (rb_node)
+ btrfs_backref_panic(cache->fs_info, start->bytenr, -EEXIST);
/*
* Use breadth first search to iterate all related edges.
@@ -3642,38 +3598,22 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
* parents have already been linked.
*/
if (!RB_EMPTY_NODE(&upper->rb_node)) {
- if (upper->lowest) {
- list_del_init(&upper->lower);
- upper->lowest = 0;
- }
-
list_add_tail(&edge->list[UPPER], &upper->lower);
continue;
}
/* Sanity check, we shouldn't have any unchecked nodes */
- if (!upper->checked) {
- ASSERT(0);
+ if (unlikely(!upper->checked)) {
+ DEBUG_WARN("we should not have any unchecked nodes");
return -EUCLEAN;
}
- /* Sanity check, COW-only node has non-COW-only parent */
- if (start->cowonly != upper->cowonly) {
- ASSERT(0);
+ rb_node = rb_simple_insert(&cache->rb_root, &upper->simple_node);
+ if (unlikely(rb_node)) {
+ btrfs_backref_panic(cache->fs_info, upper->bytenr, -EEXIST);
return -EUCLEAN;
}
- /* Only cache non-COW-only (subvolume trees) tree blocks */
- if (!upper->cowonly) {
- rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
- &upper->rb_node);
- if (rb_node) {
- btrfs_backref_panic(cache->fs_info,
- upper->bytenr, -EEXIST);
- return -EUCLEAN;
- }
- }
-
list_add_tail(&edge->list[UPPER], &upper->lower);
/*
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index e8c22cccb5c1..1d009b0f4c69 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -190,7 +190,7 @@ struct btrfs_backref_share_check_ctx {
* It's very common to have several file extent items that point to the
* same extent (bytenr) but with different offsets and lengths. This
* typically happens for COW writes, partial writes into prealloc
- * extents, NOCOW writes after snapshoting a root, hole punching or
+ * extents, NOCOW writes after snapshotting a root, hole punching or
* reflinking within the same file (less common perhaps).
* So keep a small cache with the lookup results for the extent pointed
* by the last few file extent items. This cache is checked, with a
@@ -226,8 +226,7 @@ int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
iterate_extent_inodes_t *iterate, void *user_ctx);
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
- struct btrfs_path *path, void *ctx,
- bool ignore_offset);
+ void *ctx, bool ignore_offset);
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
@@ -242,7 +241,12 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
struct btrfs_path *path);
-void free_ipath(struct inode_fs_paths *ipath);
+
+DEFINE_FREE(inode_fs_paths, struct inode_fs_paths *,
+ if (_T) {
+ kvfree(_T->fspath);
+ kfree(_T);
+ })
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
u64 start_off, struct btrfs_path *path,
@@ -313,11 +317,22 @@ int btrfs_backref_iter_next(struct btrfs_backref_iter *iter);
* Represent a tree block in the backref cache
*/
struct btrfs_backref_node {
- struct {
- struct rb_node rb_node;
- u64 bytenr;
- }; /* Use rb_simple_node for search/insert */
+ union{
+ /* Use rb_simple_node for search/insert */
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ };
+
+ struct rb_simple_node simple_node;
+ };
+ /*
+ * This is a sanity check, whenever we COW a block we will update
+ * new_bytenr with it's current location, and we will check this in
+ * various places to validate that the cache makes sense, it shouldn't
+ * be used for anything else.
+ */
u64 new_bytenr;
/* Objectid of tree block owner, can be not uptodate */
u64 owner;
@@ -335,10 +350,6 @@ struct btrfs_backref_node {
struct extent_buffer *eb;
/* Level of the tree block */
unsigned int level:8;
- /* Is the block in a non-shareable tree */
- unsigned int cowonly:1;
- /* 1 if no child node is in the cache */
- unsigned int lowest:1;
/* Is the extent buffer locked */
unsigned int locked:1;
/* Has the block been processed */
@@ -391,12 +402,6 @@ struct btrfs_backref_cache {
* level blocks may not reflect the new location
*/
struct list_head pending[BTRFS_MAX_LEVEL];
- /* List of backref nodes with no child node */
- struct list_head leaves;
- /* List of blocks that have been COWed in current transaction */
- struct list_head changed;
- /* List of detached backref node. */
- struct list_head detached;
u64 last_trans;
@@ -414,7 +419,7 @@ struct btrfs_backref_cache {
/*
* Whether this cache is for relocation
*
- * Reloction backref cache require more info for reloc root compared
+ * Relocation backref cache require more info for reloc root compared
* to generic backref cache.
*/
bool is_reloc;
@@ -427,13 +432,6 @@ struct btrfs_backref_node *btrfs_backref_alloc_node(
struct btrfs_backref_edge *btrfs_backref_alloc_edge(
struct btrfs_backref_cache *cache);
-#define LINK_LOWER (1 << 0)
-#define LINK_UPPER (1 << 1)
-
-void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
- struct btrfs_backref_node *lower,
- struct btrfs_backref_node *upper,
- int link_which);
void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
struct btrfs_backref_node *node);
void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index 1f216d07eff6..fa1d321a2fb8 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -27,12 +27,12 @@ struct btrfs_failed_bio {
};
/* Is this a data path I/O that needs storage layer checksum and repair? */
-static inline bool is_data_bbio(struct btrfs_bio *bbio)
+static inline bool is_data_bbio(const struct btrfs_bio *bbio)
{
return bbio->inode && is_data_inode(bbio->inode);
}
-static bool bbio_has_ordered_extent(struct btrfs_bio *bbio)
+static bool bbio_has_ordered_extent(const struct btrfs_bio *bbio)
{
return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE;
}
@@ -41,13 +41,17 @@ static bool bbio_has_ordered_extent(struct btrfs_bio *bbio)
* Initialize a btrfs_bio structure. This skips the embedded bio itself as it
* is already initialized by the block layer.
*/
-void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
+void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, u64 file_offset,
btrfs_bio_end_io_t end_io, void *private)
{
+ /* @inode parameter is mandatory. */
+ ASSERT(inode);
+
memset(bbio, 0, offsetof(struct btrfs_bio, bio));
- bbio->fs_info = fs_info;
+ bbio->inode = inode;
bbio->end_io = end_io;
bbio->private = private;
+ bbio->file_offset = file_offset;
atomic_set(&bbio->pending_ios, 1);
WRITE_ONCE(bbio->status, BLK_STS_OK);
}
@@ -60,7 +64,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
* a mempool.
*/
struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
- struct btrfs_fs_info *fs_info,
+ struct btrfs_inode *inode, u64 file_offset,
btrfs_bio_end_io_t end_io, void *private)
{
struct btrfs_bio *bbio;
@@ -68,7 +72,7 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
bbio = btrfs_bio(bio);
- btrfs_bio_init(bbio, fs_info, end_io, private);
+ btrfs_bio_init(bbio, inode, file_offset, end_io, private);
return bbio;
}
@@ -81,46 +85,40 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
&btrfs_clone_bioset);
+ if (IS_ERR(bio))
+ return ERR_CAST(bio);
+
bbio = btrfs_bio(bio);
- btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
- bbio->inode = orig_bbio->inode;
- bbio->file_offset = orig_bbio->file_offset;
+ btrfs_bio_init(bbio, orig_bbio->inode, orig_bbio->file_offset, NULL, orig_bbio);
orig_bbio->file_offset += map_length;
if (bbio_has_ordered_extent(bbio)) {
refcount_inc(&orig_bbio->ordered->refs);
bbio->ordered = orig_bbio->ordered;
+ bbio->orig_logical = orig_bbio->orig_logical;
+ orig_bbio->orig_logical += map_length;
}
+ bbio->csum_search_commit_root = orig_bbio->csum_search_commit_root;
atomic_inc(&orig_bbio->pending_ios);
return bbio;
}
-/* Free a bio that was never submitted to the underlying device. */
-static void btrfs_cleanup_bio(struct btrfs_bio *bbio)
+void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
{
- if (bbio_has_ordered_extent(bbio))
- btrfs_put_ordered_extent(bbio->ordered);
- bio_put(&bbio->bio);
-}
+ /* Make sure we're already in task context. */
+ ASSERT(in_task());
-static void __btrfs_bio_end_io(struct btrfs_bio *bbio)
-{
- if (bbio_has_ordered_extent(bbio)) {
- struct btrfs_ordered_extent *ordered = bbio->ordered;
+ if (bbio->async_csum)
+ wait_for_completion(&bbio->csum_done);
- bbio->end_io(bbio);
- btrfs_put_ordered_extent(ordered);
- } else {
- bbio->end_io(bbio);
- }
-}
-
-void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
-{
bbio->bio.bi_status = status;
if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
struct btrfs_bio *orig_bbio = bbio->private;
- btrfs_cleanup_bio(bbio);
+ /* Free bio that was never submitted to the underlying device. */
+ if (bbio_has_ordered_extent(bbio))
+ btrfs_put_ordered_extent(bbio->ordered);
+ bio_put(&bbio->bio);
+
bbio = orig_bbio;
}
@@ -135,18 +133,26 @@ void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
/* Load split bio's error which might be set above. */
if (status == BLK_STS_OK)
bbio->bio.bi_status = READ_ONCE(bbio->status);
- __btrfs_bio_end_io(bbio);
+
+ if (bbio_has_ordered_extent(bbio)) {
+ struct btrfs_ordered_extent *ordered = bbio->ordered;
+
+ bbio->end_io(bbio);
+ btrfs_put_ordered_extent(ordered);
+ } else {
+ bbio->end_io(bbio);
+ }
}
}
-static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
+static int next_repair_mirror(const struct btrfs_failed_bio *fbio, int cur_mirror)
{
if (cur_mirror == fbio->num_copies)
return cur_mirror + 1 - fbio->num_copies;
return cur_mirror + 1;
}
-static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
+static int prev_repair_mirror(const struct btrfs_failed_bio *fbio, int cur_mirror)
{
if (cur_mirror == 1)
return fbio->num_copies;
@@ -167,17 +173,30 @@ static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio,
struct btrfs_failed_bio *fbio = repair_bbio->private;
struct btrfs_inode *inode = repair_bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio);
- int mirror = repair_bbio->mirror_num;
-
/*
- * We can only trigger this for data bio, which doesn't support larger
- * folios yet.
+ * We can not move forward the saved_iter, as it will be later
+ * utilized by repair_bbio again.
*/
- ASSERT(folio_order(page_folio(bv->bv_page)) == 0);
+ struct bvec_iter saved_iter = repair_bbio->saved_iter;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u64 logical = repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT;
+ const u32 nr_steps = repair_bbio->saved_iter.bi_size / step;
+ int mirror = repair_bbio->mirror_num;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ phys_addr_t paddr;
+ unsigned int slot = 0;
+
+ /* Repair bbio should be eaxctly one block sized. */
+ ASSERT(repair_bbio->saved_iter.bi_size == fs_info->sectorsize);
+
+ btrfs_bio_for_each_block(paddr, &repair_bbio->bio, &saved_iter, step) {
+ ASSERT(slot < nr_steps);
+ paddrs[slot] = paddr;
+ slot++;
+ }
if (repair_bbio->bio.bi_status ||
- !btrfs_data_csum_ok(repair_bbio, dev, 0, bv)) {
+ !btrfs_data_csum_ok(repair_bbio, dev, 0, paddrs)) {
bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ);
repair_bbio->bio.bi_iter = repair_bbio->saved_iter;
@@ -196,8 +215,7 @@ static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio,
mirror = prev_repair_mirror(fbio, mirror);
btrfs_repair_io_failure(fs_info, btrfs_ino(inode),
repair_bbio->file_offset, fs_info->sectorsize,
- repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT,
- page_folio(bv->bv_page), bv->bv_offset, mirror);
+ logical, paddrs, step, mirror);
} while (mirror != fbio->bbio->mirror_num);
done:
@@ -214,13 +232,20 @@ done:
*/
static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
u32 bio_offset,
- struct bio_vec *bv,
+ phys_addr_t paddrs[],
struct btrfs_failed_bio *fbio)
{
struct btrfs_inode *inode = failed_bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
const u32 sectorsize = fs_info->sectorsize;
- const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT);
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u32 nr_steps = sectorsize / step;
+ /*
+ * For bs > ps cases, the saved_iter can be partially moved forward.
+ * In that case we should round it down to the block boundary.
+ */
+ const u64 logical = round_down(failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT,
+ sectorsize);
struct btrfs_bio *repair_bbio;
struct bio *repair_bio;
int num_copies;
@@ -245,15 +270,22 @@ static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
atomic_inc(&fbio->repair_count);
- repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS,
+ repair_bio = bio_alloc_bioset(NULL, nr_steps, REQ_OP_READ, GFP_NOFS,
&btrfs_repair_bioset);
- repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector;
- __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset);
+ repair_bio->bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+ for (int i = 0; i < nr_steps; i++) {
+ int ret;
+
+ ASSERT(offset_in_page(paddrs[i]) + step <= PAGE_SIZE);
+
+ ret = bio_add_page(repair_bio, phys_to_page(paddrs[i]), step,
+ offset_in_page(paddrs[i]));
+ ASSERT(ret == step);
+ }
repair_bbio = btrfs_bio(repair_bio);
- btrfs_bio_init(repair_bbio, fs_info, NULL, fbio);
- repair_bbio->inode = failed_bbio->inode;
- repair_bbio->file_offset = failed_bbio->file_offset + bio_offset;
+ btrfs_bio_init(repair_bbio, failed_bbio->inode, failed_bbio->file_offset + bio_offset,
+ NULL, fbio);
mirror = next_repair_mirror(fbio, failed_bbio->mirror_num);
btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror);
@@ -265,10 +297,14 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de
{
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u32 sectorsize = fs_info->sectorsize;
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 step = min(sectorsize, PAGE_SIZE);
+ const u32 nr_steps = sectorsize / step;
struct bvec_iter *iter = &bbio->saved_iter;
blk_status_t status = bbio->bio.bi_status;
struct btrfs_failed_bio *fbio = NULL;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ phys_addr_t paddr;
u32 offset = 0;
/* Read-repair requires the inode field to be set by the submitter. */
@@ -286,19 +322,19 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de
/* Clear the I/O error. A failed repair will reset it. */
bbio->bio.bi_status = BLK_STS_OK;
- while (iter->bi_size) {
- struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter);
+ btrfs_bio_for_each_block(paddr, &bbio->bio, iter, step) {
+ paddrs[(offset / step) % nr_steps] = paddr;
+ offset += step;
- bv.bv_len = min(bv.bv_len, sectorsize);
- if (status || !btrfs_data_csum_ok(bbio, dev, offset, &bv))
- fbio = repair_one_sector(bbio, offset, &bv, fbio);
-
- bio_advance_iter_single(&bbio->bio, iter, sectorsize);
- offset += sectorsize;
+ if (IS_ALIGNED(offset, sectorsize)) {
+ if (status ||
+ !btrfs_data_csum_ok(bbio, dev, offset - sectorsize, paddrs))
+ fbio = repair_one_sector(bbio, offset - sectorsize,
+ paddrs, fbio);
+ }
}
-
if (bbio->csum != bbio->csum_inline)
- kfree(bbio->csum);
+ kvfree(bbio->csum);
if (fbio)
btrfs_repair_done(fbio);
@@ -306,7 +342,7 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
}
-static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
+static void btrfs_log_dev_io_error(const struct bio *bio, struct btrfs_device *dev)
{
if (!dev || !dev->bdev)
return;
@@ -321,44 +357,43 @@ static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
}
-static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_fs_info *fs_info,
- struct bio *bio)
+static struct workqueue_struct *btrfs_end_io_wq(const struct btrfs_fs_info *fs_info,
+ const struct bio *bio)
{
if (bio->bi_opf & REQ_META)
return fs_info->endio_meta_workers;
return fs_info->endio_workers;
}
-static void btrfs_end_bio_work(struct work_struct *work)
+static void simple_end_io_work(struct work_struct *work)
{
struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
+ struct bio *bio = &bbio->bio;
- /* Metadata reads are checked and repaired by the submitter. */
- if (is_data_bbio(bbio))
- btrfs_check_read_bio(bbio, bbio->bio.bi_private);
- else
- btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+ if (bio_op(bio) == REQ_OP_READ) {
+ /* Metadata reads are checked and repaired by the submitter. */
+ if (is_data_bbio(bbio))
+ return btrfs_check_read_bio(bbio, bbio->bio.bi_private);
+ return btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+ }
+ if (bio_is_zone_append(bio) && !bio->bi_status)
+ btrfs_record_physical_zoned(bbio);
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
}
static void btrfs_simple_end_io(struct bio *bio)
{
struct btrfs_bio *bbio = btrfs_bio(bio);
struct btrfs_device *dev = bio->bi_private;
- struct btrfs_fs_info *fs_info = bbio->fs_info;
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
btrfs_bio_counter_dec(fs_info);
if (bio->bi_status)
btrfs_log_dev_io_error(bio, dev);
- if (bio_op(bio) == REQ_OP_READ) {
- INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
- queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
- } else {
- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
- btrfs_record_physical_zoned(bbio);
- btrfs_bio_end_io(bbio, bbio->bio.bi_status);
- }
+ INIT_WORK(&bbio->end_io_work, simple_end_io_work);
+ queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
}
static void btrfs_raid56_end_io(struct bio *bio)
@@ -366,6 +401,9 @@ static void btrfs_raid56_end_io(struct bio *bio)
struct btrfs_io_context *bioc = bio->bi_private;
struct btrfs_bio *bbio = btrfs_bio(bio);
+ /* RAID56 endio is always handled in workqueue. */
+ ASSERT(in_task());
+
btrfs_bio_counter_dec(bioc->fs_info);
bbio->mirror_num = bioc->mirror_num;
if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio))
@@ -376,11 +414,12 @@ static void btrfs_raid56_end_io(struct bio *bio)
btrfs_put_bioc(bioc);
}
-static void btrfs_orig_write_end_io(struct bio *bio)
+static void orig_write_end_io_work(struct work_struct *work)
{
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
+ struct bio *bio = &bbio->bio;
struct btrfs_io_stripe *stripe = bio->bi_private;
struct btrfs_io_context *bioc = stripe->bioc;
- struct btrfs_bio *bbio = btrfs_bio(bio);
btrfs_bio_counter_dec(bioc->fs_info);
@@ -398,21 +437,31 @@ static void btrfs_orig_write_end_io(struct bio *bio)
else
bio->bi_status = BLK_STS_OK;
- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+ if (bio_is_zone_append(bio) && !bio->bi_status)
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
btrfs_put_bioc(bioc);
}
-static void btrfs_clone_write_end_io(struct bio *bio)
+static void btrfs_orig_write_end_io(struct bio *bio)
{
+ struct btrfs_bio *bbio = btrfs_bio(bio);
+
+ INIT_WORK(&bbio->end_io_work, orig_write_end_io_work);
+ queue_work(btrfs_end_io_wq(bbio->inode->root->fs_info, bio), &bbio->end_io_work);
+}
+
+static void clone_write_end_io_work(struct work_struct *work)
+{
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
+ struct bio *bio = &bbio->bio;
struct btrfs_io_stripe *stripe = bio->bi_private;
if (bio->bi_status) {
atomic_inc(&stripe->bioc->error);
btrfs_log_dev_io_error(bio, stripe->dev);
- } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ } else if (bio_is_zone_append(bio)) {
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
}
@@ -421,6 +470,14 @@ static void btrfs_clone_write_end_io(struct bio *bio)
bio_put(bio);
}
+static void btrfs_clone_write_end_io(struct bio *bio)
+{
+ struct btrfs_bio *bbio = btrfs_bio(bio);
+
+ INIT_WORK(&bbio->end_io_work, clone_write_end_io_work);
+ queue_work(btrfs_end_io_wq(bbio->inode->root->fs_info, bio), &bbio->end_io_work);
+}
+
static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
{
if (!dev || !dev->bdev ||
@@ -444,12 +501,20 @@ static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
ASSERT(btrfs_dev_is_sequential(dev, physical));
bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
}
- btrfs_debug_in_rcu(dev->fs_info,
+ btrfs_debug(dev->fs_info,
"%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
__func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
(unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev),
dev->devid, bio->bi_iter.bi_size);
+ /*
+ * Track reads if tracking is enabled; ignore I/O operations before the
+ * filesystem is fully initialized.
+ */
+ if (dev->fs_devices->collect_fs_stats && bio_op(bio) == REQ_OP_READ && dev->fs_info)
+ percpu_counter_add(&dev->fs_info->stats_read_blocks,
+ bio->bi_iter.bi_size >> dev->fs_info->sectorsize_bits);
+
if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
blkcg_punt_bio_submit(bio);
else
@@ -459,6 +524,7 @@ static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
{
struct bio *orig_bio = bioc->orig_bio, *bio;
+ struct btrfs_bio *orig_bbio = btrfs_bio(orig_bio);
ASSERT(bio_op(orig_bio) != REQ_OP_READ);
@@ -467,8 +533,11 @@ static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
bio = orig_bio;
bio->bi_end_io = btrfs_orig_write_end_io;
} else {
- bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &fs_bio_set);
+ /* We need to use endio_work to run end_io in task context. */
+ bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &btrfs_bioset);
bio_inc_remaining(orig_bio);
+ btrfs_bio_init(btrfs_bio(bio), orig_bbio->inode,
+ orig_bbio->file_offset, NULL, NULL);
bio->bi_end_io = btrfs_clone_write_end_io;
}
@@ -509,11 +578,15 @@ static void btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
}
}
-static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio)
+static int btrfs_bio_csum(struct btrfs_bio *bbio)
{
if (bbio->bio.bi_opf & REQ_META)
return btree_csum_one_bio(bbio);
- return btrfs_csum_one_bio(bbio);
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ return btrfs_csum_one_bio(bbio, true);
+#else
+ return btrfs_csum_one_bio(bbio, false);
+#endif
}
/*
@@ -540,11 +613,11 @@ static void run_one_async_start(struct btrfs_work *work)
{
struct async_submit_bio *async =
container_of(work, struct async_submit_bio, work);
- blk_status_t ret;
+ int ret;
ret = btrfs_bio_csum(async->bbio);
if (ret)
- async->bbio->bio.bi_status = ret;
+ async->bbio->bio.bi_status = errno_to_blk_status(ret);
}
/*
@@ -570,7 +643,7 @@ static void run_one_async_done(struct btrfs_work *work, bool do_free)
/* If an error occurred we just want to clean up the bio and move on. */
if (bio->bi_status) {
- btrfs_bio_end_io(async->bbio, async->bbio->bio.bi_status);
+ btrfs_bio_end_io(async->bbio, bio->bi_status);
return;
}
@@ -585,20 +658,25 @@ static void run_one_async_done(struct btrfs_work *work, bool do_free)
static bool should_async_write(struct btrfs_bio *bbio)
{
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
bool auto_csum_mode = true;
#ifdef CONFIG_BTRFS_EXPERIMENTAL
- struct btrfs_fs_devices *fs_devices = bbio->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
enum btrfs_offload_csum_mode csum_mode = READ_ONCE(fs_devices->offload_csum_mode);
- if (csum_mode == BTRFS_OFFLOAD_CSUM_FORCE_OFF)
- return false;
-
- auto_csum_mode = (csum_mode == BTRFS_OFFLOAD_CSUM_AUTO);
+ if (csum_mode == BTRFS_OFFLOAD_CSUM_FORCE_ON)
+ return true;
+ /*
+ * Write bios will calculate checksum and submit bio at the same time.
+ * Unless explicitly required don't offload serial csum calculate and bio
+ * submit into a workqueue.
+ */
+ return false;
#endif
/* Submit synchronously if the checksum implementation is fast. */
- if (auto_csum_mode && test_bit(BTRFS_FS_CSUM_IMPL_FAST, &bbio->fs_info->flags))
+ if (auto_csum_mode && test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
return false;
/*
@@ -609,7 +687,7 @@ static bool should_async_write(struct btrfs_bio *bbio)
return false;
/* Zoned devices require I/O to be submitted in order. */
- if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info))
+ if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(fs_info))
return false;
return true;
@@ -624,7 +702,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
struct btrfs_io_context *bioc,
struct btrfs_io_stripe *smap, int mirror_num)
{
- struct btrfs_fs_info *fs_info = bbio->fs_info;
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
struct async_submit_bio *async;
async = kmalloc(sizeof(*async), GFP_NOFS);
@@ -643,21 +721,28 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
{
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
unsigned int nr_segs;
int sector_offset;
- map_length = min(map_length, bbio->fs_info->max_zone_append_size);
- sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
+ map_length = min(map_length, fs_info->max_zone_append_size);
+ sector_offset = bio_split_rw_at(&bbio->bio, &fs_info->limits,
&nr_segs, map_length);
- if (sector_offset)
- return sector_offset << SECTOR_SHIFT;
+ if (sector_offset) {
+ /*
+ * bio_split_rw_at() could split at a size smaller than our
+ * sectorsize and thus cause unaligned I/Os. Fix that by
+ * always rounding down to the nearest boundary.
+ */
+ return ALIGN_DOWN(sector_offset << SECTOR_SHIFT, fs_info->sectorsize);
+ }
return map_length;
}
static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
{
struct btrfs_inode *inode = bbio->inode;
- struct btrfs_fs_info *fs_info = bbio->fs_info;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct bio *bio = &bbio->bio;
u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
u64 length = bio->bi_iter.bi_size;
@@ -665,28 +750,45 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
bool use_append = btrfs_use_zone_append(bbio);
struct btrfs_io_context *bioc = NULL;
struct btrfs_io_stripe smap;
- blk_status_t ret;
- int error;
+ blk_status_t status;
+ int ret;
- if (!bbio->inode || btrfs_is_data_reloc_root(inode->root))
+ if (bbio->is_scrub || btrfs_is_data_reloc_root(inode->root))
smap.rst_search_commit_root = true;
else
smap.rst_search_commit_root = false;
btrfs_bio_counter_inc_blocked(fs_info);
- error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
- &bioc, &smap, &mirror_num);
- if (error) {
- ret = errno_to_blk_status(error);
- goto fail;
+ ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
+ &bioc, &smap, &mirror_num);
+ if (ret) {
+ status = errno_to_blk_status(ret);
+ btrfs_bio_counter_dec(fs_info);
+ goto end_bbio;
}
+ /*
+ * For fscrypt writes we will get the encrypted bio after we've remapped
+ * our bio to the physical disk location, so we need to save the
+ * original bytenr so we know what we're checksumming.
+ */
+ if (bio_op(bio) == REQ_OP_WRITE && is_data_bbio(bbio))
+ bbio->orig_logical = logical;
+
map_length = min(map_length, length);
if (use_append)
map_length = btrfs_append_map_length(bbio, map_length);
if (map_length < length) {
- bbio = btrfs_split_bio(fs_info, bbio, map_length);
+ struct btrfs_bio *split;
+
+ split = btrfs_split_bio(fs_info, bbio, map_length);
+ if (IS_ERR(split)) {
+ status = errno_to_blk_status(PTR_ERR(split));
+ btrfs_bio_counter_dec(fs_info);
+ goto end_bbio;
+ }
+ bbio = split;
bio = &bbio->bio;
}
@@ -697,7 +799,8 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio)) {
bbio->saved_iter = bio->bi_iter;
ret = btrfs_lookup_bio_sums(bbio);
- if (ret)
+ status = errno_to_blk_status(ret);
+ if (status)
goto fail;
}
@@ -707,8 +810,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
bio->bi_opf |= REQ_OP_ZONE_APPEND;
}
- if (is_data_bbio(bbio) && bioc &&
- btrfs_need_stripe_tree_update(bioc->fs_info, bioc->map_type)) {
+ if (is_data_bbio(bbio) && bioc && bioc->use_rst) {
/*
* No locking for the list update, as we only add to
* the list in the I/O submission path, and list
@@ -723,7 +825,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
* Csum items for reloc roots have already been cloned at this
* point, so they are handled as part of the no-checksum case.
*/
- if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) &&
+ if (!(inode->flags & BTRFS_INODE_NODATASUM) &&
!test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state) &&
!btrfs_is_data_reloc_root(inode->root)) {
if (should_async_write(bbio) &&
@@ -731,13 +833,15 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
goto done;
ret = btrfs_bio_csum(bbio);
- if (ret)
+ status = errno_to_blk_status(ret);
+ if (status)
goto fail;
} else if (use_append ||
(btrfs_is_zoned(fs_info) && inode &&
inode->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_alloc_dummy_sum(bbio);
- if (ret)
+ status = errno_to_blk_status(ret);
+ if (status)
goto fail;
}
}
@@ -758,18 +862,48 @@ fail:
ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
ASSERT(remaining);
- btrfs_bio_end_io(remaining, ret);
+ btrfs_bio_end_io(remaining, status);
}
- btrfs_bio_end_io(bbio, ret);
+end_bbio:
+ btrfs_bio_end_io(bbio, status);
/* Do not submit another chunk */
return true;
}
+static void assert_bbio_alignment(struct btrfs_bio *bbio)
+{
+#ifdef CONFIG_BTRFS_ASSERT
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 alignment = min(blocksize, PAGE_SIZE);
+ const u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ const u32 length = bbio->bio.bi_iter.bi_size;
+
+ /* The logical and length should still be aligned to blocksize. */
+ ASSERT(IS_ALIGNED(logical, blocksize) && IS_ALIGNED(length, blocksize) &&
+ length != 0, "root=%llu inode=%llu logical=%llu length=%u",
+ btrfs_root_id(bbio->inode->root),
+ btrfs_ino(bbio->inode), logical, length);
+
+ bio_for_each_bvec(bvec, &bbio->bio, iter)
+ ASSERT(IS_ALIGNED(bvec.bv_offset, alignment) &&
+ IS_ALIGNED(bvec.bv_len, alignment),
+ "root=%llu inode=%llu logical=%llu length=%u index=%u bv_offset=%u bv_len=%u",
+ btrfs_root_id(bbio->inode->root),
+ btrfs_ino(bbio->inode), logical, length, iter.bi_idx,
+ bvec.bv_offset, bvec.bv_len);
+#endif
+}
+
void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num)
{
/* If bbio->inode is not populated, its file_offset must be 0. */
ASSERT(bbio->inode || bbio->file_offset == 0);
+ assert_bbio_alignment(bbio);
+
while (!btrfs_submit_chunk(bbio, mirror_num))
;
}
@@ -783,19 +917,36 @@ void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num)
*
* The I/O is issued synchronously to block the repair read completion from
* freeing the bio.
+ *
+ * @ino: Offending inode number
+ * @fileoff: File offset inside the inode
+ * @length: Length of the repair write
+ * @logical: Logical address of the range
+ * @paddrs: Physical address array of the content
+ * @step: Length of for each paddrs
+ * @mirror_num: Mirror number to write to. Must not be zero
*/
-int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
- u64 length, u64 logical, struct folio *folio,
- unsigned int folio_offset, int mirror_num)
+int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 fileoff,
+ u32 length, u64 logical, const phys_addr_t paddrs[],
+ unsigned int step, int mirror_num)
{
+ const u32 nr_steps = DIV_ROUND_UP_POW2(length, step);
struct btrfs_io_stripe smap = { 0 };
- struct bio_vec bvec;
- struct bio bio;
+ struct bio *bio = NULL;
int ret = 0;
ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
BUG_ON(!mirror_num);
+ /* Basic alignment checks. */
+ ASSERT(IS_ALIGNED(logical, fs_info->sectorsize));
+ ASSERT(IS_ALIGNED(length, fs_info->sectorsize));
+ ASSERT(IS_ALIGNED(fileoff, fs_info->sectorsize));
+ /* Either it's a single data or metadata block. */
+ ASSERT(length <= BTRFS_MAX_BLOCKSIZE);
+ ASSERT(step <= length);
+ ASSERT(is_power_of_2(step));
+
if (btrfs_repair_one_zone(fs_info, logical))
return 0;
@@ -809,31 +960,33 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
if (ret < 0)
goto out_counter_dec;
- if (!smap.dev->bdev ||
- !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state)) {
+ if (unlikely(!smap.dev->bdev ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state))) {
ret = -EIO;
goto out_counter_dec;
}
- bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC);
- bio.bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
- ret = bio_add_folio(&bio, folio, length, folio_offset);
- ASSERT(ret);
- ret = submit_bio_wait(&bio);
+ bio = bio_alloc(smap.dev->bdev, nr_steps, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
+ bio->bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
+ for (int i = 0; i < nr_steps; i++) {
+ ret = bio_add_page(bio, phys_to_page(paddrs[i]), step, offset_in_page(paddrs[i]));
+ /* We should have allocated enough slots to contain all the different pages. */
+ ASSERT(ret == step);
+ }
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
if (ret) {
/* try to remap that extent elsewhere? */
btrfs_dev_stat_inc_and_print(smap.dev, BTRFS_DEV_STAT_WRITE_ERRS);
- goto out_bio_uninit;
+ goto out_counter_dec;
}
- btrfs_info_rl_in_rcu(fs_info,
+ btrfs_info_rl(fs_info,
"read error corrected: ino %llu off %llu (dev %s sector %llu)",
- ino, start, btrfs_dev_name(smap.dev),
+ ino, fileoff, btrfs_dev_name(smap.dev),
smap.physical >> SECTOR_SHIFT);
ret = 0;
-out_bio_uninit:
- bio_uninit(&bio);
out_counter_dec:
btrfs_bio_counter_dec(fs_info);
return ret;
@@ -846,16 +999,16 @@ out_counter_dec:
*/
void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace)
{
- struct btrfs_fs_info *fs_info = bbio->fs_info;
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
u64 length = bbio->bio.bi_iter.bi_size;
struct btrfs_io_stripe smap = { 0 };
int ret;
- ASSERT(fs_info);
ASSERT(mirror_num > 0);
ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
- ASSERT(!bbio->inode);
+ ASSERT(!is_data_inode(bbio->inode));
+ ASSERT(bbio->is_scrub);
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num);
@@ -882,22 +1035,18 @@ int __init btrfs_bioset_init(void)
return -ENOMEM;
if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE,
offsetof(struct btrfs_bio, bio), 0))
- goto out_free_bioset;
+ goto out;
if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE,
offsetof(struct btrfs_bio, bio),
BIOSET_NEED_BVECS))
- goto out_free_clone_bioset;
+ goto out;
if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE,
sizeof(struct btrfs_failed_bio)))
- goto out_free_repair_bioset;
+ goto out;
return 0;
-out_free_repair_bioset:
- bioset_exit(&btrfs_repair_bioset);
-out_free_clone_bioset:
- bioset_exit(&btrfs_clone_bioset);
-out_free_bioset:
- bioset_exit(&btrfs_bioset);
+out:
+ btrfs_bioset_exit();
return -ENOMEM;
}
diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h
index e2fe16074ad6..1be74209f0b8 100644
--- a/fs/btrfs/bio.h
+++ b/fs/btrfs/bio.h
@@ -18,13 +18,6 @@ struct btrfs_inode;
#define BTRFS_BIO_INLINE_CSUM_SIZE 64
-/*
- * Maximum number of sectors for a single bio to limit the size of the
- * checksum array. This matches the number of bio_vecs per bio and thus the
- * I/O size for buffered I/O.
- */
-#define BTRFS_MAX_BIO_SECTORS (256)
-
typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
/*
@@ -34,7 +27,10 @@ typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
struct btrfs_bio {
/*
* Inode and offset into it that this I/O operates on.
- * Only set for data I/O.
+ *
+ * If the inode is a data one, csum verification and read-repair
+ * will be done automatically.
+ * If the inode is a metadata one, everything is handled by the caller.
*/
struct btrfs_inode *inode;
u64 file_offset;
@@ -56,11 +52,16 @@ struct btrfs_bio {
* - pointer to the checksums for this bio
* - original physical address from the allocator
* (for zone append only)
+ * - original logical address, used for checksumming fscrypt bios
*/
struct {
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_sum *sums;
+ struct work_struct csum_work;
+ struct completion csum_done;
+ struct bvec_iter csum_saved_iter;
u64 orig_physical;
+ u64 orig_logical;
};
/* For metadata reads: parentness verification. */
@@ -76,12 +77,21 @@ struct btrfs_bio {
atomic_t pending_ios;
struct work_struct end_io_work;
- /* File system that this I/O operates on. */
- struct btrfs_fs_info *fs_info;
-
/* Save the first error status of split bio. */
blk_status_t status;
+ /* Use the commit root to look up csums (data read bio only). */
+ bool csum_search_commit_root;
+
+ /*
+ * Since scrub will reuse btree inode, we need this flag to distinguish
+ * scrub bios.
+ */
+ bool is_scrub;
+
+ /* Whether the csum generation for data write is async. */
+ bool async_csum;
+
/*
* This member must come last, bio_alloc_bioset will allocate enough
* bytes for entire btrfs_bio but relies on bio being last.
@@ -97,10 +107,10 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
int __init btrfs_bioset_init(void);
void __cold btrfs_bioset_exit(void);
-void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
+void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, u64 file_offset,
btrfs_bio_end_io_t end_io, void *private);
struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
- struct btrfs_fs_info *fs_info,
+ struct btrfs_inode *inode, u64 file_offset,
btrfs_bio_end_io_t end_io, void *private);
void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status);
@@ -109,8 +119,8 @@ void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status);
void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num);
void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace);
-int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
- u64 length, u64 logical, struct folio *folio,
- unsigned int folio_offset, int mirror_num);
+int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 fileoff,
+ u32 length, u64 logical, const phys_addr_t paddrs[],
+ unsigned int step, int mirror_num);
#endif
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 4427c1b835e8..08b14449fabe 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -34,6 +34,19 @@ int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group
}
#endif
+static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group)
+{
+ /* The meta_write_pointer is available only on the zoned setup. */
+ if (!btrfs_is_zoned(block_group->fs_info))
+ return false;
+
+ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
+ return false;
+
+ return block_group->start + block_group->alloc_offset >
+ block_group->meta_write_pointer;
+}
+
/*
* Return target flags in extended format or 0 if restripe for this chunk_type
* is not in progress
@@ -173,43 +186,41 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
}
}
+static int btrfs_bg_start_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct btrfs_block_group *new_bg =
+ rb_entry(new, struct btrfs_block_group, cache_node);
+ const struct btrfs_block_group *exist_bg =
+ rb_entry(exist, struct btrfs_block_group, cache_node);
+
+ if (new_bg->start < exist_bg->start)
+ return -1;
+ if (new_bg->start > exist_bg->start)
+ return 1;
+ return 0;
+}
+
/*
* This adds the block group to the fs_info rb tree for the block group cache
*/
-static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
- struct btrfs_block_group *block_group)
+static int btrfs_add_block_group_cache(struct btrfs_block_group *block_group)
{
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct btrfs_block_group *cache;
- bool leftmost = true;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct rb_node *exist;
+ int ret = 0;
ASSERT(block_group->length != 0);
- write_lock(&info->block_group_cache_lock);
- p = &info->block_group_cache_tree.rb_root.rb_node;
-
- while (*p) {
- parent = *p;
- cache = rb_entry(parent, struct btrfs_block_group, cache_node);
- if (block_group->start < cache->start) {
- p = &(*p)->rb_left;
- } else if (block_group->start > cache->start) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- write_unlock(&info->block_group_cache_lock);
- return -EEXIST;
- }
- }
-
- rb_link_node(&block_group->cache_node, parent, p);
- rb_insert_color_cached(&block_group->cache_node,
- &info->block_group_cache_tree, leftmost);
+ write_lock(&fs_info->block_group_cache_lock);
- write_unlock(&info->block_group_cache_lock);
+ exist = rb_find_add_cached(&block_group->cache_node,
+ &fs_info->block_group_cache_tree, btrfs_bg_start_cmp);
+ if (exist)
+ ret = -EEXIST;
+ write_unlock(&fs_info->block_group_cache_lock);
- return 0;
+ return ret;
}
/*
@@ -527,10 +538,9 @@ int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start,
*total_added_ret = 0;
while (start < end) {
- if (!find_first_extent_bit(&info->excluded_extents, start,
- &extent_start, &extent_end,
- EXTENT_DIRTY | EXTENT_UPTODATE,
- NULL))
+ if (!btrfs_find_first_extent_bit(&info->excluded_extents, start,
+ &extent_start, &extent_end,
+ EXTENT_DIRTY, NULL))
break;
if (extent_start <= start) {
@@ -586,7 +596,7 @@ static int sample_block_group_extent_item(struct btrfs_caching_control *caching_
struct btrfs_root *extent_root;
u64 search_offset;
u64 search_end = block_group->start + block_group->length;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key search_key;
int ret = 0;
@@ -603,8 +613,8 @@ static int sample_block_group_extent_item(struct btrfs_caching_control *caching_
extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start,
BTRFS_SUPER_INFO_OFFSET));
- path->skip_locking = 1;
- path->search_commit_root = 1;
+ path->skip_locking = true;
+ path->search_commit_root = true;
path->reada = READA_FORWARD;
search_offset = index * div_u64(block_group->length, max_index);
@@ -628,7 +638,6 @@ static int sample_block_group_extent_item(struct btrfs_caching_control *caching_
lockdep_assert_held(&caching_ctl->mutex);
lockdep_assert_held_read(&fs_info->commit_root_sem);
- btrfs_free_path(path);
return ret;
}
@@ -704,7 +713,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
struct btrfs_block_group *block_group = caching_ctl->block_group;
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *extent_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key;
u64 total_found = 0;
@@ -735,13 +744,13 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
* root to add free space. So we skip locking and search the commit
* root, since its read-only
*/
- path->skip_locking = 1;
- path->search_commit_root = 1;
+ path->skip_locking = true;
+ path->search_commit_root = true;
path->reada = READA_FORWARD;
key.objectid = last;
- key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = 0;
next:
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
@@ -787,8 +796,8 @@ next:
if (key.objectid < last) {
key.objectid = last;
- key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = 0;
btrfs_release_path(path);
goto next;
}
@@ -831,14 +840,13 @@ next:
block_group->start + block_group->length,
NULL);
out:
- btrfs_free_path(path);
return ret;
}
static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg)
{
- clear_extent_bits(&bg->fs_info->excluded_extents, bg->start,
- bg->start + bg->length - 1, EXTENT_UPTODATE);
+ btrfs_clear_extent_bit(&bg->fs_info->excluded_extents, bg->start,
+ bg->start + bg->length - 1, EXTENT_DIRTY, NULL);
}
static noinline void caching_thread(struct btrfs_work *work)
@@ -882,7 +890,7 @@ static noinline void caching_thread(struct btrfs_work *work)
*/
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
!(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
- ret = load_free_space_tree(caching_ctl);
+ ret = btrfs_load_free_space_tree(caching_ctl);
else
ret = load_extent_tree_free(caching_ctl);
done:
@@ -1057,7 +1065,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_chunk_map *map)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_block_group *block_group;
struct btrfs_free_cluster *cluster;
struct inode *inode;
@@ -1223,7 +1231,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
block_group->space_info->total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
- btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
+ btrfs_space_info_update_bytes_zone_unusable(block_group->space_info,
-block_group->zone_unusable);
block_group->space_info->disk_total -= block_group->length * factor;
@@ -1240,7 +1248,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* another task to attempt to create another block group with the same
* item key (and failing with -EEXIST and a transaction abort).
*/
- ret = remove_block_group_free_space(trans, block_group);
+ ret = btrfs_remove_block_group_free_space(trans, block_group);
if (ret)
goto out;
@@ -1249,6 +1257,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
goto out;
spin_lock(&block_group->lock);
+ /*
+ * Hitting this WARN means we removed a block group with an unwritten
+ * region. It will cause "unable to find chunk map for logical" errors.
+ */
+ if (WARN_ON(has_unwritten_metadata(block_group)))
+ btrfs_warn(fs_info,
+ "block group %llu is removed before metadata write out",
+ block_group->start);
+
set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
/*
@@ -1288,7 +1305,6 @@ out:
btrfs_put_block_group(block_group);
if (remove_rsv)
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
- btrfs_free_path(path);
return ret;
}
@@ -1341,7 +1357,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
* data in this block group. That check should be done by relocation routine,
* not this function.
*/
-static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
+static int inc_block_group_ro(struct btrfs_block_group *cache, bool force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
@@ -1386,8 +1402,7 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
* BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
* leeway to allow us to mark this block group as read only.
*/
- if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
- BTRFS_RESERVE_NO_FLUSH))
+ if (btrfs_can_overcommit(sinfo, num_bytes, BTRFS_RESERVE_NO_FLUSH))
ret = 0;
}
@@ -1396,8 +1411,7 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes to readonly */
sinfo->bytes_readonly += cache->zone_unusable;
- btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
- -cache->zone_unusable);
+ btrfs_space_info_update_bytes_zone_unusable(sinfo, -cache->zone_unusable);
cache->zone_unusable = 0;
}
cache->ro++;
@@ -1409,7 +1423,7 @@ out:
if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
btrfs_info(cache->fs_info,
"unable to make block group %llu ro", cache->start);
- btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
+ btrfs_dump_space_info(cache->space_info, 0, false);
}
return ret;
}
@@ -1424,9 +1438,8 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
int ret;
spin_lock(&fs_info->trans_lock);
- if (trans->transaction->list.prev != &fs_info->trans_list) {
- prev_trans = list_last_entry(&trans->transaction->list,
- struct btrfs_transaction, list);
+ if (!list_is_first(&trans->transaction->list, &fs_info->trans_list)) {
+ prev_trans = list_prev_entry(trans->transaction, list);
refcount_inc(&prev_trans->use_count);
}
spin_unlock(&fs_info->trans_lock);
@@ -1443,14 +1456,14 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
if (prev_trans) {
- ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
- EXTENT_DIRTY);
+ ret = btrfs_clear_extent_bit(&prev_trans->pinned_extents, start, end,
+ EXTENT_DIRTY, NULL);
if (ret)
goto out;
}
- ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
- EXTENT_DIRTY);
+ ret = btrfs_clear_extent_bit(&trans->transaction->pinned_extents, start, end,
+ EXTENT_DIRTY, NULL);
out:
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
if (prev_trans)
@@ -1460,6 +1473,32 @@ out:
}
/*
+ * Link the block_group to a list via bg_list.
+ *
+ * @bg: The block_group to link to the list.
+ * @list: The list to link it to.
+ *
+ * Use this rather than list_add_tail() directly to ensure proper respect
+ * to locking and refcounting.
+ *
+ * Returns: true if the bg was linked with a refcount bump and false otherwise.
+ */
+static bool btrfs_link_bg_list(struct btrfs_block_group *bg, struct list_head *list)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ bool added = false;
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ if (list_empty(&bg->bg_list)) {
+ btrfs_get_block_group(bg);
+ list_add_tail(&bg->bg_list, list);
+ added = true;
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+ return added;
+}
+
+/*
* Process the unused_bgs list and remove any that don't have any allocated
* space inside of them.
*/
@@ -1567,15 +1606,15 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* needing to allocate extents from the block group.
*/
used = btrfs_space_info_used(space_info, true);
- if (space_info->total_bytes - block_group->length < used &&
- block_group->zone_unusable < block_group->length) {
+ if ((space_info->total_bytes - block_group->length < used &&
+ block_group->zone_unusable < block_group->length) ||
+ has_unwritten_metadata(block_group)) {
/*
* Add a reference for the list, compensate for the ref
* drop under the "next" label for the
* fs_info->unused_bgs list.
*/
- btrfs_get_block_group(block_group);
- list_add_tail(&block_group->bg_list, &retry_list);
+ btrfs_link_bg_list(block_group, &retry_list);
trace_btrfs_skip_unused_block_group(block_group);
spin_unlock(&block_group->lock);
@@ -1598,8 +1637,10 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
ret = btrfs_zone_finish(block_group);
if (ret < 0) {
btrfs_dec_block_group_ro(block_group);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN) {
+ btrfs_link_bg_list(block_group, &retry_list);
ret = 0;
+ }
goto next;
}
@@ -1645,8 +1686,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- btrfs_space_info_update_bytes_pinned(fs_info, space_info,
- -block_group->pinned);
+ btrfs_space_info_update_bytes_pinned(space_info, -block_group->pinned);
space_info->bytes_readonly += block_group->pinned;
block_group->pinned = 0;
@@ -1753,7 +1793,14 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
bg1 = list_entry(a, struct btrfs_block_group, bg_list);
bg2 = list_entry(b, struct btrfs_block_group, bg_list);
- return bg1->used > bg2->used;
+ /*
+ * Some other task may be updating the ->used field concurrently, but it
+ * is not serious if we get a stale value or load/store tearing issues,
+ * as sorting the list of block groups to reclaim is not critical and an
+ * occasional imperfect order is ok. So silence KCSAN and avoid the
+ * overhead of locking or any other synchronization.
+ */
+ return data_race(bg1->used > bg2->used);
}
static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info)
@@ -1801,12 +1848,10 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
if (!btrfs_should_reclaim(fs_info))
return;
- sb_start_write(fs_info->sb);
+ guard(super_write)(fs_info->sb);
- if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
- sb_end_write(fs_info->sb);
+ if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
return;
- }
/*
* Long running balances can keep us blocked here for eternity, so
@@ -1814,7 +1859,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
*/
if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
btrfs_exclop_finish(fs_info);
- sb_end_write(fs_info->sb);
return;
}
@@ -1826,8 +1870,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
*/
list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
while (!list_empty(&fs_info->reclaim_bgs)) {
- u64 zone_unusable;
- u64 reclaimed;
+ u64 used;
+ u64 reserved;
int ret = 0;
bg = list_first_entry(&fs_info->reclaim_bgs,
@@ -1891,13 +1935,14 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
up_write(&space_info->groups_sem);
goto next;
}
+
spin_unlock(&bg->lock);
spin_unlock(&space_info->lock);
/*
* Get out fast, in case we're read-only or unmounting the
* filesystem. It is OK to drop block groups from the list even
- * for the read-only case. As we did sb_start_write(),
+ * for the read-only case. As we did take the super write lock,
* "mount -o remount,ro" won't happen and read-only filesystem
* means it is forced read-only due to a fatal error. So, it
* never gets back to read-write to let us reclaim again.
@@ -1907,31 +1952,41 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
goto next;
}
- /*
- * Cache the zone_unusable value before turning the block group
- * to read only. As soon as the blog group is read only it's
- * zone_unusable value gets moved to the block group's read-only
- * bytes and isn't available for calculations anymore.
- */
- zone_unusable = bg->zone_unusable;
ret = inc_block_group_ro(bg, 0);
up_write(&space_info->groups_sem);
if (ret < 0)
goto next;
- btrfs_info(fs_info,
- "reclaiming chunk %llu with %llu%% used %llu%% unusable",
- bg->start,
- div64_u64(bg->used * 100, bg->length),
- div64_u64(zone_unusable * 100, bg->length));
+ /*
+ * The amount of bytes reclaimed corresponds to the sum of the
+ * "used" and "reserved" counters. We have set the block group
+ * to RO above, which prevents reservations from happening but
+ * we may have existing reservations for which allocation has
+ * not yet been done - btrfs_update_block_group() was not yet
+ * called, which is where we will transfer a reserved extent's
+ * size from the "reserved" counter to the "used" counter - this
+ * happens when running delayed references. When we relocate the
+ * chunk below, relocation first flushes delalloc, waits for
+ * ordered extent completion (which is where we create delayed
+ * references for data extents) and commits the current
+ * transaction (which runs delayed references), and only after
+ * it does the actual work to move extents out of the block
+ * group. So the reported amount of reclaimed bytes is
+ * effectively the sum of the 'used' and 'reserved' counters.
+ */
+ spin_lock(&bg->lock);
+ used = bg->used;
+ reserved = bg->reserved;
+ spin_unlock(&bg->lock);
+
trace_btrfs_reclaim_block_group(bg);
- reclaimed = bg->used;
- ret = btrfs_relocate_chunk(fs_info, bg->start);
+ ret = btrfs_relocate_chunk(fs_info, bg->start, false);
if (ret) {
btrfs_dec_block_group_ro(bg);
btrfs_err(fs_info, "error relocating chunk %llu",
bg->start);
- reclaimed = 0;
+ used = 0;
+ reserved = 0;
spin_lock(&space_info->lock);
space_info->reclaim_errors++;
if (READ_ONCE(space_info->periodic_reclaim))
@@ -1940,24 +1995,13 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
}
spin_lock(&space_info->lock);
space_info->reclaim_count++;
- space_info->reclaim_bytes += reclaimed;
+ space_info->reclaim_bytes += used;
+ space_info->reclaim_bytes += reserved;
spin_unlock(&space_info->lock);
next:
- if (ret && !READ_ONCE(space_info->periodic_reclaim)) {
- /* Refcount held by the reclaim_bgs list after splice. */
- spin_lock(&fs_info->unused_bgs_lock);
- /*
- * This block group might be added to the unused list
- * during the above process. Move it back to the
- * reclaim list otherwise.
- */
- if (list_empty(&bg->bg_list)) {
- btrfs_get_block_group(bg);
- list_add_tail(&bg->bg_list, &retry_list);
- }
- spin_unlock(&fs_info->unused_bgs_lock);
- }
+ if (ret && !READ_ONCE(space_info->periodic_reclaim))
+ btrfs_link_bg_list(bg, &retry_list);
btrfs_put_block_group(bg);
mutex_unlock(&fs_info->reclaim_bgs_lock);
@@ -1981,7 +2025,6 @@ end:
list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
spin_unlock(&fs_info->unused_bgs_lock);
btrfs_exclop_finish(fs_info);
- sb_end_write(fs_info->sb);
}
void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
@@ -1989,7 +2032,7 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
btrfs_reclaim_sweep(fs_info);
spin_lock(&fs_info->unused_bgs_lock);
if (!list_empty(&fs_info->reclaim_bgs))
- queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
+ queue_work(system_dfl_wq, &fs_info->reclaim_bgs_work);
spin_unlock(&fs_info->unused_bgs_lock);
}
@@ -1997,13 +2040,8 @@ void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
- spin_lock(&fs_info->unused_bgs_lock);
- if (list_empty(&bg->bg_list)) {
- btrfs_get_block_group(bg);
+ if (btrfs_link_bg_list(bg, &fs_info->reclaim_bgs))
trace_btrfs_add_reclaim_block_group(bg);
- list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs);
- }
- spin_unlock(&fs_info->unused_bgs_lock);
}
static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key,
@@ -2027,7 +2065,7 @@ static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key
return -ENOENT;
}
- if (map->start != key->objectid || map->chunk_len != key->offset) {
+ if (unlikely(map->start != key->objectid || map->chunk_len != key->offset)) {
btrfs_err(fs_info,
"block group %llu len %llu mismatch with chunk %llu len %llu",
key->objectid, key->offset, map->start, map->chunk_len);
@@ -2040,7 +2078,7 @@ static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key
flags = btrfs_stack_block_group_flags(&bg) &
BTRFS_BLOCK_GROUP_TYPE_MASK;
- if (flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ if (unlikely(flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) {
btrfs_err(fs_info,
"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
key->objectid, key->offset, flags,
@@ -2186,9 +2224,9 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
cache->bytes_super += stripe_len;
- ret = set_extent_bit(&fs_info->excluded_extents, cache->start,
- cache->start + stripe_len - 1,
- EXTENT_UPTODATE, NULL);
+ ret = btrfs_set_extent_bit(&fs_info->excluded_extents, cache->start,
+ cache->start + stripe_len - 1,
+ EXTENT_DIRTY, NULL);
if (ret)
return ret;
}
@@ -2201,7 +2239,7 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
return ret;
/* Shouldn't have super stripes in sequential zones */
- if (zoned && nr) {
+ if (unlikely(zoned && nr)) {
kfree(logical);
btrfs_err(fs_info,
"zoned: block group %llu must not contain super block",
@@ -2214,9 +2252,9 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
cache->start + cache->length - logical[nr]);
cache->bytes_super += len;
- ret = set_extent_bit(&fs_info->excluded_extents, logical[nr],
- logical[nr] + len - 1,
- EXTENT_UPTODATE, NULL);
+ ret = btrfs_set_extent_bit(&fs_info->excluded_extents,
+ logical[nr], logical[nr] + len - 1,
+ EXTENT_DIRTY, NULL);
if (ret) {
kfree(logical);
return ret;
@@ -2292,7 +2330,7 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
break;
bg = btrfs_lookup_block_group(fs_info, map->start);
- if (!bg) {
+ if (unlikely(!bg)) {
btrfs_err(fs_info,
"chunk start=%llu len=%llu doesn't have corresponding block group",
map->start, map->chunk_len);
@@ -2300,9 +2338,9 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
btrfs_free_chunk_map(map);
break;
}
- if (bg->start != map->start || bg->length != map->chunk_len ||
- (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
- (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ if (unlikely(bg->start != map->start || bg->length != map->chunk_len ||
+ (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
+ (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) {
btrfs_err(fs_info,
"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
map->start, map->chunk_len,
@@ -2341,8 +2379,9 @@ static int read_one_block_group(struct btrfs_fs_info *info,
cache->commit_used = cache->used;
cache->flags = btrfs_stack_block_group_flags(bgi);
cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
+ cache->space_info = btrfs_find_space_info(info, cache->flags);
- set_free_space_tree_thresholds(cache);
+ btrfs_set_free_space_tree_thresholds(cache);
if (need_clear) {
/*
@@ -2414,11 +2453,12 @@ static int read_one_block_group(struct btrfs_fs_info *info,
goto error;
}
- ret = btrfs_add_block_group_cache(info, cache);
+ ret = btrfs_add_block_group_cache(cache);
if (ret) {
btrfs_remove_free_space_cache(cache);
goto error;
}
+
trace_btrfs_add_block_group(info, cache, 0);
btrfs_add_bg_to_space_info(info, cache);
@@ -2463,7 +2503,8 @@ static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
bg->cached = BTRFS_CACHE_FINISHED;
bg->used = map->chunk_len;
bg->flags = map->type;
- ret = btrfs_add_block_group_cache(fs_info, bg);
+ bg->space_info = btrfs_find_space_info(fs_info, bg->flags);
+ ret = btrfs_add_block_group_cache(bg);
/*
* We may have some valid block group cache added already, in
* that case we skip to the next one.
@@ -2513,8 +2554,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
return fill_dummy_bgs(info);
key.objectid = 0;
- key.offset = 0;
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -2645,7 +2686,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dev_extent *extent;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -2662,7 +2703,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
key.offset = start;
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
@@ -2670,11 +2711,8 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
btrfs_set_dev_extent_chunk_objectid(leaf, extent,
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
-
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
- btrfs_mark_buffer_dirty(trans, leaf);
-out:
- btrfs_free_path(path);
+
return ret;
}
@@ -2762,7 +2800,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
block_group->length);
if (ret)
btrfs_abort_transaction(trans, ret);
- add_block_group_free_space(trans, block_group);
+ btrfs_add_block_group_free_space(trans, block_group);
/*
* If we restriped during balance, we may have added a new raid
@@ -2776,8 +2814,12 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
/* Already aborted the transaction if it failed. */
next:
btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
+
+ spin_lock(&fs_info->unused_bgs_lock);
list_del_init(&block_group->bg_list);
clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
+ btrfs_put_block_group(block_group);
+ spin_unlock(&fs_info->unused_bgs_lock);
/*
* If the block group is still unused, add it to the list of
@@ -2791,7 +2833,7 @@ next:
* space or none at all (due to no need to COW, extent buffers
* were already COWed in the current transaction and still
* unwritten, tree heights lower than the maximum possible
- * height, etc). For data we generally reserve the axact amount
+ * height, etc). For data we generally reserve the exact amount
* of space we are going to allocate later, the exception is
* when using compression, as we must reserve space based on the
* uncompressed data size, because the compression is only done
@@ -2835,8 +2877,8 @@ static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 off
}
struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
- u64 type,
- u64 chunk_offset, u64 size)
+ struct btrfs_space_info *space_info,
+ u64 type, u64 chunk_offset, u64 size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *cache;
@@ -2856,7 +2898,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
cache->length = size;
- set_free_space_tree_thresholds(cache);
+ btrfs_set_free_space_tree_thresholds(cache);
cache->flags = type;
cache->cached = BTRFS_CACHE_FINISHED;
cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
@@ -2890,10 +2932,10 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
* assigned to our block group. We want our bg to be added to the rbtree
* with its ->space_info set.
*/
- cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
+ cache->space_info = space_info;
ASSERT(cache->space_info);
- ret = btrfs_add_block_group_cache(fs_info, cache);
+ ret = btrfs_add_block_group_cache(cache);
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
@@ -2915,7 +2957,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
}
#endif
- list_add_tail(&cache->bg_list, &trans->new_bgs);
+ btrfs_link_bg_list(cache, &trans->new_bgs);
btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info);
set_avail_alloc_bits(fs_info, type);
@@ -2935,6 +2977,7 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
bool do_chunk_alloc)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_space_info *space_info = cache->space_info;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = btrfs_block_group_root(fs_info);
u64 alloc_flags;
@@ -2987,7 +3030,7 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
*/
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
- ret = btrfs_chunk_alloc(trans, alloc_flags,
+ ret = btrfs_chunk_alloc(trans, space_info, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
@@ -3015,15 +3058,15 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
(cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
goto unlock_out;
- alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
- ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags);
+ ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
/*
* We have allocated a new chunk. We also need to activate that chunk to
* grant metadata tickets for zoned filesystem.
*/
- ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
+ ret = btrfs_zoned_activate_one_bg(space_info, true);
if (ret < 0)
goto out;
@@ -3060,8 +3103,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
(cache->alloc_offset - cache->used - cache->pinned -
cache->reserved) +
(cache->length - cache->zone_capacity);
- btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
- cache->zone_unusable);
+ btrfs_space_info_update_bytes_zone_unusable(sinfo, cache->zone_unusable);
sinfo->bytes_readonly -= cache->zone_unusable;
}
num_bytes = cache->length - cache->reserved -
@@ -3123,7 +3165,6 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
cache->global_root_id);
btrfs_set_stack_block_group_flags(&bgi, cache->flags);
write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
- btrfs_mark_buffer_dirty(trans, leaf);
fail:
btrfs_release_path(path);
/*
@@ -3201,7 +3242,7 @@ again:
*/
BTRFS_I(inode)->generation = 0;
ret = btrfs_update_inode(trans, BTRFS_I(inode));
- if (ret) {
+ if (unlikely(ret)) {
/*
* So theoretically we could recover from this, simply set the
* super cache generation to 0 so we know to invalidate the
@@ -3313,7 +3354,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *cache, *tmp;
struct btrfs_transaction *cur_trans = trans->transaction;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
if (list_empty(&cur_trans->dirty_bgs) ||
!btrfs_test_opt(fs_info, SPACE_CACHE))
@@ -3330,7 +3371,6 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
cache_save_setup(cache, trans, path);
}
- btrfs_free_path(path);
return 0;
}
@@ -3353,7 +3393,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
int should_put;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
LIST_HEAD(dirty);
struct list_head *io = &cur_trans->io_bgs;
int loops = 0;
@@ -3508,7 +3548,6 @@ out:
btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
}
- btrfs_free_path(path);
return ret;
}
@@ -3519,7 +3558,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
int should_put;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct list_head *io = &cur_trans->io_bgs;
path = btrfs_alloc_path();
@@ -3606,9 +3645,11 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
ret = update_block_group_item(trans, path, cache);
- }
- if (ret)
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ } else if (ret) {
btrfs_abort_transaction(trans, ret);
+ }
}
/* If its not on the io list, we need to put the block group */
@@ -3631,7 +3672,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
btrfs_put_block_group(cache);
}
- btrfs_free_path(path);
return ret;
}
@@ -3699,7 +3739,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
+ btrfs_space_info_update_bytes_pinned(space_info, num_bytes);
space_info->bytes_used -= num_bytes;
space_info->disk_used -= num_bytes * factor;
if (READ_ONCE(space_info->periodic_reclaim))
@@ -3710,8 +3750,8 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
- set_extent_bit(&trans->transaction->pinned_extents, bytenr,
- bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
+ btrfs_set_extent_bit(&trans->transaction->pinned_extents, bytenr,
+ bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
}
spin_lock(&trans->transaction->dirty_bgs_lock);
@@ -3757,7 +3797,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
* reservation and return -EAGAIN, otherwise this function always succeeds.
*/
int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
- u64 ram_bytes, u64 num_bytes, int delalloc,
+ u64 ram_bytes, u64 num_bytes, bool delalloc,
bool force_wrong_size_class)
{
struct btrfs_space_info *space_info = cache->space_info;
@@ -3768,31 +3808,38 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
spin_lock(&cache->lock);
if (cache->ro) {
ret = -EAGAIN;
- goto out;
+ goto out_error;
}
if (btrfs_block_group_should_use_size_class(cache)) {
size_class = btrfs_calc_block_group_size_class(num_bytes);
ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class);
if (ret)
- goto out;
+ goto out_error;
}
+
cache->reserved += num_bytes;
- space_info->bytes_reserved += num_bytes;
- trace_btrfs_space_reservation(cache->fs_info, "space_info",
- space_info->flags, num_bytes, 1);
- btrfs_space_info_update_bytes_may_use(cache->fs_info,
- space_info, -ram_bytes);
if (delalloc)
cache->delalloc_bytes += num_bytes;
+ trace_btrfs_space_reservation(cache->fs_info, "space_info",
+ space_info->flags, num_bytes, 1);
+ spin_unlock(&cache->lock);
+
+ space_info->bytes_reserved += num_bytes;
+ btrfs_space_info_update_bytes_may_use(space_info, -ram_bytes);
+
/*
* Compression can use less space than we reserved, so wake tickets if
* that happens.
*/
if (num_bytes < ram_bytes)
- btrfs_try_granting_tickets(cache->fs_info, space_info);
-out:
+ btrfs_try_granting_tickets(space_info);
+ spin_unlock(&space_info->lock);
+
+ return 0;
+
+out_error:
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
return ret;
@@ -3801,35 +3848,38 @@ out:
/*
* Update the block_group and space info counters.
*
- * @cache: The cache we are manipulating
- * @num_bytes: The number of bytes in question
- * @delalloc: The blocks are allocated for the delalloc write
+ * @cache: The cache we are manipulating.
+ * @num_bytes: The number of bytes in question.
+ * @is_delalloc: Whether the blocks are allocated for a delalloc write.
*
* This is called by somebody who is freeing space that was never actually used
* on disk. For example if you reserve some space for a new leaf in transaction
* A and before transaction A commits you free that leaf, you call this with
* reserve set to 0 in order to clear the reservation.
*/
-void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
- u64 num_bytes, int delalloc)
+void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes,
+ bool is_delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
+ bool bg_ro;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
- if (cache->ro)
+ bg_ro = cache->ro;
+ cache->reserved -= num_bytes;
+ if (is_delalloc)
+ cache->delalloc_bytes -= num_bytes;
+ spin_unlock(&cache->lock);
+
+ if (bg_ro)
space_info->bytes_readonly += num_bytes;
else if (btrfs_is_zoned(cache->fs_info))
space_info->bytes_zone_unusable += num_bytes;
- cache->reserved -= num_bytes;
+
space_info->bytes_reserved -= num_bytes;
space_info->max_extent_size = 0;
- if (delalloc)
- cache->delalloc_bytes -= num_bytes;
- spin_unlock(&cache->lock);
-
- btrfs_try_granting_tickets(cache->fs_info, space_info);
+ btrfs_try_granting_tickets(space_info);
spin_unlock(&space_info->lock);
}
@@ -3844,14 +3894,14 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
}
}
-static int should_alloc_chunk(const struct btrfs_fs_info *fs_info,
- const struct btrfs_space_info *sinfo, int force)
+static bool should_alloc_chunk(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, int force)
{
u64 bytes_used = btrfs_space_info_used(sinfo, false);
u64 thresh;
if (force == CHUNK_ALLOC_FORCE)
- return 1;
+ return true;
/*
* in limited mode, we want to have some free space up to
@@ -3862,22 +3912,31 @@ static int should_alloc_chunk(const struct btrfs_fs_info *fs_info,
thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1));
if (sinfo->total_bytes - bytes_used < thresh)
- return 1;
+ return true;
}
if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80))
- return 0;
- return 1;
+ return false;
+ return true;
}
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
{
u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
+ struct btrfs_space_info *space_info;
+
+ space_info = btrfs_find_space_info(trans->fs_info, type);
+ if (!space_info) {
+ DEBUG_WARN();
+ return -EINVAL;
+ }
- return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ return btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
}
-static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_space_info *space_info,
+ u64 flags)
{
struct btrfs_block_group *bg;
int ret;
@@ -3890,7 +3949,7 @@ static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans
*/
check_system_chunk(trans, flags);
- bg = btrfs_create_chunk(trans, flags);
+ bg = btrfs_create_chunk(trans, space_info, flags);
if (IS_ERR(bg)) {
ret = PTR_ERR(bg);
goto out;
@@ -3938,8 +3997,16 @@ static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans
if (ret == -ENOSPC) {
const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
struct btrfs_block_group *sys_bg;
+ struct btrfs_space_info *sys_space_info;
+
+ sys_space_info = btrfs_find_space_info(trans->fs_info, sys_flags);
+ if (unlikely(!sys_space_info)) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
- sys_bg = btrfs_create_chunk(trans, sys_flags);
+ sys_bg = btrfs_create_chunk(trans, sys_space_info, sys_flags);
if (IS_ERR(sys_bg)) {
ret = PTR_ERR(sys_bg);
btrfs_abort_transaction(trans, ret);
@@ -3947,17 +4014,17 @@ static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans
}
ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- } else if (ret) {
+ } else if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4070,6 +4137,8 @@ out:
*
* This function, btrfs_chunk_alloc(), belongs to phase 1.
*
+ * @space_info: specify which space_info the new chunk should belong to.
+ *
* If @force is CHUNK_ALLOC_FORCE:
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
@@ -4078,11 +4147,11 @@ out:
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
*/
-int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+int btrfs_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_space_info *space_info, u64 flags,
enum btrfs_chunk_alloc_enum force)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_space_info *space_info;
struct btrfs_block_group *ret_bg;
bool wait_for_alloc = false;
bool should_alloc = false;
@@ -4121,9 +4190,6 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
return -ENOSPC;
- space_info = btrfs_find_space_info(fs_info, flags);
- ASSERT(space_info);
-
do {
spin_lock(&space_info->lock);
if (force < space_info->force_alloc)
@@ -4131,11 +4197,11 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
should_alloc = should_alloc_chunk(fs_info, space_info, force);
if (space_info->full) {
/* No more free physical space */
+ spin_unlock(&space_info->lock);
if (should_alloc)
ret = -ENOSPC;
else
ret = 0;
- spin_unlock(&space_info->lock);
return ret;
} else if (!should_alloc) {
spin_unlock(&space_info->lock);
@@ -4147,16 +4213,16 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
* recheck if we should continue with our allocation
* attempt.
*/
+ spin_unlock(&space_info->lock);
wait_for_alloc = true;
force = CHUNK_ALLOC_NO_FORCE;
- spin_unlock(&space_info->lock);
mutex_lock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->chunk_mutex);
} else {
/* Proceed with allocation */
- space_info->chunk_alloc = 1;
- wait_for_alloc = false;
+ space_info->chunk_alloc = true;
spin_unlock(&space_info->lock);
+ wait_for_alloc = false;
}
cond_resched();
@@ -4184,7 +4250,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
force_metadata_allocation(fs_info);
}
- ret_bg = do_chunk_alloc(trans, flags);
+ ret_bg = do_chunk_alloc(trans, space_info, flags);
trans->allocating_chunk = false;
if (IS_ERR(ret_bg)) {
@@ -4203,7 +4269,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
spin_lock(&space_info->lock);
if (ret < 0) {
if (ret == -ENOSPC)
- space_info->full = 1;
+ space_info->full = true;
else
goto out;
} else {
@@ -4213,7 +4279,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
out:
- space_info->chunk_alloc = 0;
+ space_info->chunk_alloc = false;
spin_unlock(&space_info->lock);
mutex_unlock(&fs_info->chunk_mutex);
@@ -4254,12 +4320,16 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans,
if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
left, bytes, type);
- btrfs_dump_space_info(fs_info, info, 0, 0);
+ btrfs_dump_space_info(info, 0, false);
}
if (left < bytes) {
u64 flags = btrfs_system_alloc_profile(fs_info);
struct btrfs_block_group *bg;
+ struct btrfs_space_info *space_info;
+
+ space_info = btrfs_find_space_info(fs_info, flags);
+ ASSERT(space_info);
/*
* Ignore failure to create system chunk. We might end up not
@@ -4267,7 +4337,7 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans,
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
*/
- bg = btrfs_create_chunk(trans, flags);
+ bg = btrfs_create_chunk(trans, space_info, flags);
if (IS_ERR(bg)) {
ret = PTR_ERR(bg);
} else {
@@ -4275,7 +4345,7 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans,
* We have a new chunk. We also need to activate it for
* zoned filesystem.
*/
- ret = btrfs_zoned_activate_one_bg(fs_info, info, true);
+ ret = btrfs_zoned_activate_one_bg(info, true);
if (ret < 0)
return;
@@ -4375,6 +4445,43 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
}
}
+static void check_removing_space_info(struct btrfs_space_info *space_info)
+{
+ struct btrfs_fs_info *info = space_info->fs_info;
+
+ if (space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY) {
+ /* This is a top space_info, proceed with its children first. */
+ for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+ if (space_info->sub_group[i]) {
+ check_removing_space_info(space_info->sub_group[i]);
+ kfree(space_info->sub_group[i]);
+ space_info->sub_group[i] = NULL;
+ }
+ }
+ }
+
+ /*
+ * Do not hide this behind enospc_debug, this is actually important and
+ * indicates a real bug if this happens.
+ */
+ if (WARN_ON(space_info->bytes_pinned > 0 || space_info->bytes_may_use > 0))
+ btrfs_dump_space_info(space_info, 0, false);
+
+ /*
+ * If there was a failure to cleanup a log tree, very likely due to an
+ * IO failure on a writeback attempt of one or more of its extent
+ * buffers, we could not do proper (and cheap) unaccounting of their
+ * reserved space, so don't warn on bytes_reserved > 0 in that case.
+ */
+ if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
+ if (WARN_ON(space_info->bytes_reserved > 0))
+ btrfs_dump_space_info(space_info, 0, false);
+ }
+
+ WARN_ON(space_info->reclaim_size > 0);
+}
+
/*
* Must be called only after stopping all workers, since we could have block
* group caching kthreads running, and therefore they could race with us if we
@@ -4400,8 +4507,8 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
write_lock(&info->block_group_cache_lock);
while (!list_empty(&info->caching_block_groups)) {
- caching_ctl = list_entry(info->caching_block_groups.next,
- struct btrfs_caching_control, list);
+ caching_ctl = list_first_entry(&info->caching_block_groups,
+ struct btrfs_caching_control, list);
list_del(&caching_ctl->list);
btrfs_put_caching_control(caching_ctl);
}
@@ -4472,32 +4579,10 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
btrfs_release_global_block_rsv(info);
while (!list_empty(&info->space_info)) {
- space_info = list_entry(info->space_info.next,
- struct btrfs_space_info,
- list);
-
- /*
- * Do not hide this behind enospc_debug, this is actually
- * important and indicates a real bug if this happens.
- */
- if (WARN_ON(space_info->bytes_pinned > 0 ||
- space_info->bytes_may_use > 0))
- btrfs_dump_space_info(info, space_info, 0, 0);
-
- /*
- * If there was a failure to cleanup a log tree, very likely due
- * to an IO failure on a writeback attempt of one or more of its
- * extent buffers, we could not do proper (and cheap) unaccounting
- * of their reserved space, so don't warn on bytes_reserved > 0 in
- * that case.
- */
- if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
- !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
- if (WARN_ON(space_info->bytes_reserved > 0))
- btrfs_dump_space_info(info, space_info, 0, 0);
- }
+ space_info = list_first_entry(&info->space_info,
+ struct btrfs_space_info, list);
- WARN_ON(space_info->reclaim_size > 0);
+ check_removing_space_info(space_info);
list_del(&space_info->list);
btrfs_sysfs_remove_space_info(space_info);
}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 36937eeab9b8..5f933455118c 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -63,7 +63,7 @@ enum btrfs_discard_state {
* CHUNK_ALLOC_FORCE means it must try to allocate one
*
* CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
- * find_free_extent() that also activaes the zone
+ * find_free_extent() that also activates the zone
*/
enum btrfs_chunk_alloc_enum {
CHUNK_ALLOC_NO_FORCE,
@@ -83,6 +83,8 @@ enum btrfs_block_group_flags {
BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
/* Does the block group need to be added to the free space tree? */
BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
+ /* Set after we add a new block group to the free space tree. */
+ BLOCK_GROUP_FLAG_FREE_SPACE_ADDED,
/* Indicate that the block group is placed on a sequential zone */
BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
/*
@@ -244,6 +246,11 @@ struct btrfs_block_group {
/* Lock for free space tree operations. */
struct mutex free_space_lock;
+ /* Protected by @free_space_lock. */
+ bool using_free_space_bitmaps;
+ /* Protected by @free_space_lock. */
+ bool using_free_space_bitmaps_cached;
+
/*
* Number of extents in this block group used for swap files.
* All accesses protected by the spinlock 'lock'.
@@ -326,8 +333,8 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
- u64 type,
- u64 chunk_offset, u64 size);
+ struct btrfs_space_info *space_info,
+ u64 type, u64 chunk_offset, u64 size);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
bool do_chunk_alloc);
@@ -338,11 +345,12 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, bool alloc);
int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
- u64 ram_bytes, u64 num_bytes, int delalloc,
+ u64 ram_bytes, u64 num_bytes, bool delalloc,
bool force_wrong_size_class);
-void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
- u64 num_bytes, int delalloc);
-int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes,
+ bool is_delalloc);
+int btrfs_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_space_info *space_info, u64 flags,
enum btrfs_chunk_alloc_enum force);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index a07b9594dc70..96cf7a162987 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -150,9 +150,7 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
spin_unlock(&dest->lock);
}
if (num_bytes)
- btrfs_space_info_free_bytes_may_use(fs_info,
- space_info,
- num_bytes);
+ btrfs_space_info_free_bytes_may_use(space_info, num_bytes);
}
if (qgroup_to_release_ret)
*qgroup_to_release_ret = qgroup_to_release;
@@ -220,8 +218,7 @@ int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
if (num_bytes == 0)
return 0;
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
- num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, num_bytes, flush);
if (!ret)
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
@@ -261,8 +258,7 @@ int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
if (!ret)
return 0;
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
- num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, num_bytes, flush);
if (!ret) {
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
return 0;
@@ -383,15 +379,13 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
if (block_rsv->reserved < block_rsv->size) {
num_bytes = block_rsv->size - block_rsv->reserved;
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
- num_bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, num_bytes);
block_rsv->reserved = block_rsv->size;
} else if (block_rsv->reserved > block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
- -num_bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, -num_bytes);
block_rsv->reserved = block_rsv->size;
- btrfs_try_granting_tickets(fs_info, sinfo);
+ btrfs_try_granting_tickets(sinfo);
}
block_rsv->full = (block_rsv->reserved == block_rsv->size);
@@ -422,6 +416,9 @@ void btrfs_init_root_block_rsv(struct btrfs_root *root)
case BTRFS_CHUNK_TREE_OBJECTID:
root->block_rsv = &fs_info->chunk_block_rsv;
break;
+ case BTRFS_TREE_LOG_OBJECTID:
+ root->block_rsv = &fs_info->treelog_rsv;
+ break;
default:
root->block_rsv = NULL;
break;
@@ -442,6 +439,14 @@ void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
fs_info->delayed_block_rsv.space_info = space_info;
fs_info->delayed_refs_rsv.space_info = space_info;
+ /* The treelog_rsv uses a dedicated space_info on the zoned mode. */
+ if (!btrfs_is_zoned(fs_info)) {
+ fs_info->treelog_rsv.space_info = space_info;
+ } else {
+ ASSERT(space_info->sub_group[0]->subgroup_id == BTRFS_SUB_GROUP_TREELOG);
+ fs_info->treelog_rsv.space_info = space_info->sub_group[0];
+ }
+
btrfs_update_global_block_rsv(fs_info);
}
@@ -523,8 +528,8 @@ again:
block_rsv->type, ret);
}
try_reserve:
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
- blocksize, BTRFS_RESERVE_NO_FLUSH);
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, blocksize,
+ BTRFS_RESERVE_NO_FLUSH);
if (!ret)
return block_rsv;
/*
@@ -545,7 +550,7 @@ try_reserve:
* one last time to force a reservation if there's enough actual space
* on disk to make the reservation.
*/
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, blocksize,
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, blocksize,
BTRFS_RESERVE_FLUSH_EMERGENCY);
if (!ret)
return block_rsv;
diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
index d12b1fac5c74..79ae9d05cd91 100644
--- a/fs/btrfs/block-rsv.h
+++ b/fs/btrfs/block-rsv.h
@@ -24,6 +24,7 @@ enum btrfs_rsv_type {
BTRFS_BLOCK_RSV_CHUNK,
BTRFS_BLOCK_RSV_DELOPS,
BTRFS_BLOCK_RSV_DELREFS,
+ BTRFS_BLOCK_RSV_TREELOG,
BTRFS_BLOCK_RSV_EMPTY,
BTRFS_BLOCK_RSV_TEMP,
};
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index aa1f55cd81b7..73602ee8de3f 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -18,20 +18,20 @@
#include <linux/lockdep.h>
#include <uapi/linux/btrfs_tree.h>
#include <trace/events/btrfs.h>
+#include "ctree.h"
#include "block-rsv.h"
#include "extent_map.h"
-#include "extent_io.h"
#include "extent-io-tree.h"
-#include "ordered-data.h"
-#include "delayed-inode.h"
-struct extent_state;
struct posix_acl;
struct iov_iter;
struct writeback_control;
struct btrfs_root;
struct btrfs_fs_info;
struct btrfs_trans_handle;
+struct btrfs_bio;
+struct btrfs_file_extent;
+struct btrfs_delayed_node;
/*
* Since we search a directory based on f_pos (struct dir_context::pos) we have
@@ -145,6 +145,7 @@ struct btrfs_inode {
* different from prop_compress and takes precedence if set.
*/
u8 defrag_compress;
+ s8 defrag_compress_level;
/*
* Lock for counters and all fields used to determine if the inode is in
@@ -247,7 +248,7 @@ struct btrfs_inode {
u64 new_delalloc_bytes;
/*
* The offset of the last dir index key that was logged.
- * This is used only for directories.
+ * This is used only for directories. Protected by 'log_mutex'.
*/
u64 last_dir_index_offset;
};
@@ -337,6 +338,11 @@ struct btrfs_inode {
struct list_head delayed_iput;
struct rw_semaphore i_mmap_lock;
+
+#ifdef CONFIG_FS_VERITY
+ struct fsverity_info *i_verity_info;
+#endif
+
struct inode vfs_inode;
};
@@ -516,17 +522,38 @@ static inline void btrfs_assert_inode_locked(struct btrfs_inode *inode)
lockdep_assert_held(&inode->vfs_inode.i_rwsem);
}
-/* Array of bytes with variable length, hexadecimal format 0x1234 */
-#define CSUM_FMT "0x%*phN"
-#define CSUM_FMT_VALUE(size, bytes) size, bytes
+static inline void btrfs_update_inode_mapping_flags(struct btrfs_inode *inode)
+{
+ if (inode->flags & BTRFS_INODE_NODATASUM)
+ mapping_clear_stable_writes(inode->vfs_inode.i_mapping);
+ else
+ mapping_set_stable_writes(inode->vfs_inode.i_mapping);
+}
+
+static inline void btrfs_set_inode_mapping_order(struct btrfs_inode *inode)
+{
+ /* Metadata inode should not reach here. */
+ ASSERT(is_data_inode(inode));
+
+ /* We only allow BITS_PER_LONGS blocks for each bitmap. */
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ mapping_set_folio_order_range(inode->vfs_inode.i_mapping,
+ inode->root->fs_info->block_min_order,
+ inode->root->fs_info->block_max_order);
+#endif
+}
-int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
- u32 pgoff, u8 *csum, const u8 * const csum_expected);
+void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
+ const phys_addr_t paddr, u8 *dest);
+void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
+ const phys_addr_t paddrs[], u8 *dest);
+int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
+ const u8 * const csum_expected);
bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
- u32 bio_offset, struct bio_vec *bv);
-noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+ u32 bio_offset, const phys_addr_t paddrs[]);
+noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
struct btrfs_file_extent *file_extent,
- bool nowait, bool strict);
+ bool nowait);
void btrfs_del_delalloc_inode(struct btrfs_inode *inode);
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
@@ -536,10 +563,9 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
const struct fscrypt_str *name);
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
- const struct fscrypt_str *name, int add_backref, u64 index);
+ const struct fscrypt_str *name, bool add_backref, u64 index);
int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry);
-int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
- int front);
+int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end);
int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
@@ -584,9 +610,9 @@ void btrfs_free_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
-struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
- struct btrfs_path *path);
-struct inode *btrfs_iget(u64 ino, struct btrfs_root *root);
+struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
+ struct btrfs_path *path);
+struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct folio *folio, u64 start, u64 len);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 0c4d486c3048..6b3357287b42 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -67,9 +67,7 @@ static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
GFP_NOFS, &btrfs_compressed_bioset));
- btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
- bbio->inode = inode;
- bbio->file_offset = start;
+ btrfs_bio_init(bbio, inode, start, end_io, NULL);
return to_compressed_bio(bbio);
}
@@ -90,19 +88,19 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
}
static int compression_compress_pages(int type, struct list_head *ws,
- struct address_space *mapping, u64 start,
+ struct btrfs_inode *inode, u64 start,
struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out)
{
switch (type) {
case BTRFS_COMPRESS_ZLIB:
- return zlib_compress_folios(ws, mapping, start, folios,
+ return zlib_compress_folios(ws, inode, start, folios,
out_folios, total_in, total_out);
case BTRFS_COMPRESS_LZO:
- return lzo_compress_folios(ws, mapping, start, folios,
+ return lzo_compress_folios(ws, inode, start, folios,
out_folios, total_in, total_out);
case BTRFS_COMPRESS_ZSTD:
- return zstd_compress_folios(ws, mapping, start, folios,
+ return zstd_compress_folios(ws, inode, start, folios,
out_folios, total_in, total_out);
case BTRFS_COMPRESS_NONE:
default:
@@ -194,15 +192,13 @@ static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_c
static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
{
- struct list_head remove;
+ LIST_HEAD(remove);
struct list_head *tmp, *next;
int freed;
if (compr_pool.count == 0)
return SHRINK_STOP;
- INIT_LIST_HEAD(&remove);
-
/* For now, just simply drain the whole list. */
spin_lock(&compr_pool.lock);
list_splice_init(&compr_pool.list, &remove);
@@ -223,10 +219,14 @@ static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_co
/*
* Common wrappers for page allocation from compression wrappers
*/
-struct folio *btrfs_alloc_compr_folio(void)
+struct folio *btrfs_alloc_compr_folio(struct btrfs_fs_info *fs_info)
{
struct folio *folio = NULL;
+ /* For bs > ps cases, no cached folio pool for now. */
+ if (fs_info->block_min_order)
+ goto alloc;
+
spin_lock(&compr_pool.lock);
if (compr_pool.count > 0) {
folio = list_first_entry(&compr_pool.list, struct folio, lru);
@@ -238,13 +238,18 @@ struct folio *btrfs_alloc_compr_folio(void)
if (folio)
return folio;
- return folio_alloc(GFP_NOFS, 0);
+alloc:
+ return folio_alloc(GFP_NOFS, fs_info->block_min_order);
}
void btrfs_free_compr_folio(struct folio *folio)
{
bool do_free = false;
+ /* The folio is from bs > ps fs, no cached pool for now. */
+ if (folio_order(folio))
+ goto free;
+
spin_lock(&compr_pool.lock);
if (compr_pool.count > compr_pool.thresh) {
do_free = true;
@@ -257,6 +262,7 @@ void btrfs_free_compr_folio(struct folio *folio)
if (!do_free)
return;
+free:
ASSERT(folio_ref_count(folio) == 1);
folio_put(folio);
}
@@ -282,15 +288,15 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb)
{
struct inode *inode = &cb->bbio.inode->vfs_inode;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- unsigned long index = cb->start >> PAGE_SHIFT;
- unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
+ pgoff_t index = cb->start >> PAGE_SHIFT;
+ const pgoff_t end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
struct folio_batch fbatch;
- const int error = blk_status_to_errno(cb->bbio.bio.bi_status);
int i;
int ret;
- if (error)
- mapping_set_error(inode->i_mapping, error);
+ ret = blk_status_to_errno(cb->bbio.bio.bi_status);
+ if (ret)
+ mapping_set_error(inode->i_mapping, ret);
folio_batch_init(&fbatch);
while (index <= end_index) {
@@ -311,22 +317,6 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb)
/* the inode may be gone now */
}
-static void btrfs_finish_compressed_write_work(struct work_struct *work)
-{
- struct compressed_bio *cb =
- container_of(work, struct compressed_bio, write_end_work);
-
- btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
- cb->bbio.bio.bi_status == BLK_STS_OK);
-
- if (cb->writeback)
- end_compressed_writeback(cb);
- /* Note, our inode could be gone now */
-
- btrfs_free_compressed_folios(cb);
- bio_put(&cb->bbio.bio);
-}
-
/*
* Do the cleanup once all the compressed pages hit the disk. This will clear
* writeback on the file pages and free the compressed pages.
@@ -337,25 +327,33 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work)
static void end_bbio_compressed_write(struct btrfs_bio *bbio)
{
struct compressed_bio *cb = to_compressed_bio(bbio);
- struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
- queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
+ btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
+ cb->bbio.bio.bi_status == BLK_STS_OK);
+
+ if (cb->writeback)
+ end_compressed_writeback(cb);
+ /* Note, our inode could be gone now. */
+ btrfs_free_compressed_folios(cb);
+ bio_put(&cb->bbio.bio);
}
static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
{
struct bio *bio = &cb->bbio.bio;
u32 offset = 0;
+ unsigned int findex = 0;
while (offset < cb->compressed_len) {
+ struct folio *folio = cb->compressed_folios[findex];
+ u32 len = min_t(u32, cb->compressed_len - offset, folio_size(folio));
int ret;
- u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
/* Maximum compressed extent is smaller than bio size limit. */
- ret = bio_add_folio(bio, cb->compressed_folios[offset >> PAGE_SHIFT],
- len, 0);
+ ret = bio_add_folio(bio, folio, len, 0);
ASSERT(ret);
offset += len;
+ findex++;
}
}
@@ -389,7 +387,6 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
cb->compressed_folios = compressed_folios;
cb->compressed_len = ordered->disk_num_bytes;
cb->writeback = writeback;
- INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
cb->nr_folios = nr_folios;
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
cb->bbio.ordered = ordered;
@@ -415,7 +412,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
int *memstall, unsigned long *pflags)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- unsigned long end_index;
+ pgoff_t end_index;
struct bio *orig_bio = &cb->orig_bbio->bio;
u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
u64 isize = i_size_read(inode);
@@ -443,11 +440,15 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (fs_info->sectorsize < PAGE_SIZE)
return 0;
+ /* For bs > ps cases, we don't support readahead for compressed folios for now. */
+ if (fs_info->block_min_order)
+ return 0;
+
end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
while (cur < compressed_end) {
- u64 page_end;
- u64 pg_index = cur >> PAGE_SHIFT;
+ pgoff_t page_end;
+ pgoff_t pg_index = cur >> PAGE_SHIFT;
u32 add_size;
if (pg_index > end_index)
@@ -474,8 +475,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
continue;
}
- folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
- ~__GFP_FS), 0);
+ folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, ~__GFP_FS),
+ 0, NULL);
if (!folio)
break;
@@ -499,9 +500,9 @@ static noinline int add_ra_bio_pages(struct inode *inode,
}
page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
- lock_extent(tree, cur, page_end, NULL);
+ btrfs_lock_extent(tree, cur, page_end, NULL);
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
+ em = btrfs_lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
read_unlock(&em_tree->lock);
/*
@@ -510,20 +511,20 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* to this compressed extent on disk.
*/
if (!em || cur < em->start ||
- (cur + fs_info->sectorsize > extent_map_end(em)) ||
- (extent_map_block_start(em) >> SECTOR_SHIFT) !=
+ (cur + fs_info->sectorsize > btrfs_extent_map_end(em)) ||
+ (btrfs_extent_map_block_start(em) >> SECTOR_SHIFT) !=
orig_bio->bi_iter.bi_sector) {
- free_extent_map(em);
- unlock_extent(tree, cur, page_end, NULL);
+ btrfs_free_extent_map(em);
+ btrfs_unlock_extent(tree, cur, page_end, NULL);
folio_unlock(folio);
folio_put(folio);
break;
}
add_size = min(em->start + em->len, page_end + 1) - cur;
- free_extent_map(em);
- unlock_extent(tree, cur, page_end, NULL);
+ btrfs_free_extent_map(em);
+ btrfs_unlock_extent(tree, cur, page_end, NULL);
- if (folio->index == end_index) {
+ if (folio_contains(folio, end_index)) {
size_t zero_offset = offset_in_folio(folio, isize);
if (zero_offset) {
@@ -576,19 +577,19 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
struct extent_map *em;
unsigned long pflags;
int memstall = 0;
- blk_status_t ret;
- int ret2;
+ blk_status_t status;
+ int ret;
/* we need the actual starting offset of this extent in the file */
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
+ em = btrfs_lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
read_unlock(&em_tree->lock);
if (!em) {
- ret = BLK_STS_IOERR;
+ status = BLK_STS_IOERR;
goto out;
}
- ASSERT(extent_map_is_compressed(em));
+ ASSERT(btrfs_extent_map_is_compressed(em));
compressed_len = em->disk_num_bytes;
cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
@@ -600,21 +601,23 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
cb->len = bbio->bio.bi_iter.bi_size;
cb->compressed_len = compressed_len;
- cb->compress_type = extent_map_compression(em);
+ cb->compress_type = btrfs_extent_map_compression(em);
cb->orig_bbio = bbio;
+ cb->bbio.csum_search_commit_root = bbio->csum_search_commit_root;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
- cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
+ cb->nr_folios = DIV_ROUND_UP(compressed_len, btrfs_min_folio_size(fs_info));
+ cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct folio *), GFP_NOFS);
if (!cb->compressed_folios) {
- ret = BLK_STS_RESOURCE;
+ status = BLK_STS_RESOURCE;
goto out_free_bio;
}
- ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios);
- if (ret2) {
- ret = BLK_STS_RESOURCE;
+ ret = btrfs_alloc_folio_array(cb->nr_folios, fs_info->block_min_order,
+ cb->compressed_folios);
+ if (ret) {
+ status = BLK_STS_RESOURCE;
goto out_free_compressed_pages;
}
@@ -637,7 +640,7 @@ out_free_compressed_pages:
out_free_bio:
bio_put(&cb->bbio.bio);
out:
- btrfs_bio_end_io(bbio, ret);
+ btrfs_bio_end_io(bbio, status);
}
/*
@@ -687,8 +690,6 @@ struct heuristic_ws {
struct list_head list;
};
-static struct workspace_manager heuristic_wsm;
-
static void free_heuristic_ws(struct list_head *ws)
{
struct heuristic_ws *workspace;
@@ -701,7 +702,7 @@ static void free_heuristic_ws(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *alloc_heuristic_ws(void)
+static struct list_head *alloc_heuristic_ws(struct btrfs_fs_info *fs_info)
{
struct heuristic_ws *ws;
@@ -728,11 +729,9 @@ fail:
return ERR_PTR(-ENOMEM);
}
-const struct btrfs_compress_op btrfs_heuristic_compress = {
- .workspace_manager = &heuristic_wsm,
-};
+const struct btrfs_compress_levels btrfs_heuristic_compress = { 0 };
-static const struct btrfs_compress_op * const btrfs_compress_op[] = {
+static const struct btrfs_compress_levels * const btrfs_compress_levels[] = {
/* The heuristic is represented as compression type 0 */
&btrfs_heuristic_compress,
&btrfs_zlib_compress,
@@ -740,13 +739,13 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zstd_compress,
};
-static struct list_head *alloc_workspace(int type, unsigned int level)
+static struct list_head *alloc_workspace(struct btrfs_fs_info *fs_info, int type, int level)
{
switch (type) {
- case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws();
- case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
- case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace();
- case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
+ case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(fs_info);
+ case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(fs_info, level);
+ case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(fs_info);
+ case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(fs_info, level);
default:
/*
* This can't happen, the type is validated several times
@@ -772,44 +771,58 @@ static void free_workspace(int type, struct list_head *ws)
}
}
-static void btrfs_init_workspace_manager(int type)
+static int alloc_workspace_manager(struct btrfs_fs_info *fs_info,
+ enum btrfs_compression_type type)
{
- struct workspace_manager *wsm;
+ struct workspace_manager *gwsm;
struct list_head *workspace;
- wsm = btrfs_compress_op[type]->workspace_manager;
- INIT_LIST_HEAD(&wsm->idle_ws);
- spin_lock_init(&wsm->ws_lock);
- atomic_set(&wsm->total_ws, 0);
- init_waitqueue_head(&wsm->ws_wait);
+ ASSERT(fs_info->compr_wsm[type] == NULL);
+ gwsm = kzalloc(sizeof(*gwsm), GFP_KERNEL);
+ if (!gwsm)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&gwsm->idle_ws);
+ spin_lock_init(&gwsm->ws_lock);
+ atomic_set(&gwsm->total_ws, 0);
+ init_waitqueue_head(&gwsm->ws_wait);
+ fs_info->compr_wsm[type] = gwsm;
/*
* Preallocate one workspace for each compression type so we can
* guarantee forward progress in the worst case
*/
- workspace = alloc_workspace(type, 0);
+ workspace = alloc_workspace(fs_info, type, 0);
if (IS_ERR(workspace)) {
- pr_warn(
- "BTRFS: cannot preallocate compression workspace, will try later\n");
+ btrfs_warn(fs_info,
+ "cannot preallocate compression workspace for %s, will try later",
+ btrfs_compress_type2str(type));
} else {
- atomic_set(&wsm->total_ws, 1);
- wsm->free_ws = 1;
- list_add(workspace, &wsm->idle_ws);
+ atomic_set(&gwsm->total_ws, 1);
+ gwsm->free_ws = 1;
+ list_add(workspace, &gwsm->idle_ws);
}
+ return 0;
}
-static void btrfs_cleanup_workspace_manager(int type)
+static void free_workspace_manager(struct btrfs_fs_info *fs_info,
+ enum btrfs_compression_type type)
{
- struct workspace_manager *wsman;
struct list_head *ws;
+ struct workspace_manager *gwsm = fs_info->compr_wsm[type];
- wsman = btrfs_compress_op[type]->workspace_manager;
- while (!list_empty(&wsman->idle_ws)) {
- ws = wsman->idle_ws.next;
+ /* ZSTD uses its own workspace manager, should enter here. */
+ ASSERT(type != BTRFS_COMPRESS_ZSTD && type < BTRFS_NR_COMPRESS_TYPES);
+ if (!gwsm)
+ return;
+ fs_info->compr_wsm[type] = NULL;
+ while (!list_empty(&gwsm->idle_ws)) {
+ ws = gwsm->idle_ws.next;
list_del(ws);
free_workspace(type, ws);
- atomic_dec(&wsman->total_ws);
+ atomic_dec(&gwsm->total_ws);
}
+ kfree(gwsm);
}
/*
@@ -818,9 +831,9 @@ static void btrfs_cleanup_workspace_manager(int type)
* Preallocation makes a forward progress guarantees and we do not return
* errors.
*/
-struct list_head *btrfs_get_workspace(int type, unsigned int level)
+struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
{
- struct workspace_manager *wsm;
+ struct workspace_manager *wsm = fs_info->compr_wsm[type];
struct list_head *workspace;
int cpus = num_online_cpus();
unsigned nofs_flag;
@@ -830,7 +843,7 @@ struct list_head *btrfs_get_workspace(int type, unsigned int level)
wait_queue_head_t *ws_wait;
int *free_ws;
- wsm = btrfs_compress_op[type]->workspace_manager;
+ ASSERT(wsm);
idle_ws = &wsm->idle_ws;
ws_lock = &wsm->ws_lock;
total_ws = &wsm->total_ws;
@@ -866,7 +879,7 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = alloc_workspace(type, level);
+ workspace = alloc_workspace(fs_info, type, level);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -888,22 +901,22 @@ again:
/* once per minute */ 60 * HZ,
/* no burst */ 1);
- if (__ratelimit(&_rs)) {
- pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
- }
+ if (__ratelimit(&_rs))
+ btrfs_warn(fs_info,
+ "no compression workspaces, low memory, retrying");
}
goto again;
}
return workspace;
}
-static struct list_head *get_workspace(int type, int level)
+static struct list_head *get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
{
switch (type) {
- case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
- case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
- case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
- case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
+ case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(fs_info, type, level);
+ case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(fs_info, level);
+ case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(fs_info, type, level);
+ case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(fs_info, level);
default:
/*
* This can't happen, the type is validated several times
@@ -917,21 +930,21 @@ static struct list_head *get_workspace(int type, int level)
* put a workspace struct back on the list or free it if we have enough
* idle ones sitting around
*/
-void btrfs_put_workspace(int type, struct list_head *ws)
+void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
{
- struct workspace_manager *wsm;
+ struct workspace_manager *gwsm = fs_info->compr_wsm[type];
struct list_head *idle_ws;
spinlock_t *ws_lock;
atomic_t *total_ws;
wait_queue_head_t *ws_wait;
int *free_ws;
- wsm = btrfs_compress_op[type]->workspace_manager;
- idle_ws = &wsm->idle_ws;
- ws_lock = &wsm->ws_lock;
- total_ws = &wsm->total_ws;
- ws_wait = &wsm->ws_wait;
- free_ws = &wsm->free_ws;
+ ASSERT(gwsm);
+ idle_ws = &gwsm->idle_ws;
+ ws_lock = &gwsm->ws_lock;
+ total_ws = &gwsm->total_ws;
+ ws_wait = &gwsm->ws_wait;
+ free_ws = &gwsm->free_ws;
spin_lock(ws_lock);
if (*free_ws <= num_online_cpus()) {
@@ -948,13 +961,13 @@ wake:
cond_wake_up(ws_wait);
}
-static void put_workspace(int type, struct list_head *ws)
+static void put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
{
switch (type) {
- case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
- case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
- case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
- case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
+ case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(fs_info, type, ws);
+ case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(fs_info, type, ws);
+ case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(fs_info, type, ws);
+ case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(fs_info, ws);
default:
/*
* This can't happen, the type is validated several times
@@ -968,18 +981,28 @@ static void put_workspace(int type, struct list_head *ws)
* Adjust @level according to the limits of the compression algorithm or
* fallback to default
*/
-static unsigned int btrfs_compress_set_level(int type, unsigned level)
+static int btrfs_compress_set_level(unsigned int type, int level)
{
- const struct btrfs_compress_op *ops = btrfs_compress_op[type];
+ const struct btrfs_compress_levels *levels = btrfs_compress_levels[type];
if (level == 0)
- level = ops->default_level;
+ level = levels->default_level;
else
- level = min(level, ops->max_level);
+ level = clamp(level, levels->min_level, levels->max_level);
return level;
}
+/*
+ * Check whether the @level is within the valid range for the given type.
+ */
+bool btrfs_compress_level_valid(unsigned int type, int level)
+{
+ const struct btrfs_compress_levels *levels = btrfs_compress_levels[type];
+
+ return levels->min_level <= level && level <= levels->max_level;
+}
+
/* Wrapper around find_get_page(), with extra error message. */
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
struct folio **in_folio_ret)
@@ -1012,46 +1035,46 @@ int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
* - compression algo are 0-3
* - the level are bits 4-7
*
- * @out_pages is an in/out parameter, holds maximum number of pages to allocate
- * and returns number of actually allocated pages
+ * @out_folios is an in/out parameter, holds maximum number of folios to allocate
+ * and returns number of actually allocated folios
*
* @total_in is used to return the number of bytes actually read. It
* may be smaller than the input length if we had to exit early because we
- * ran out of room in the pages array or because we cross the
+ * ran out of room in the folios array or because we cross the
* max_out threshold.
*
* @total_out is an in/out parameter, must be set to the input length and will
* be also used to return the total number of compressed bytes
*/
-int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
+int btrfs_compress_folios(unsigned int type, int level, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out)
{
- int type = btrfs_compress_type(type_level);
- int level = btrfs_compress_level(type_level);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
const unsigned long orig_len = *total_out;
struct list_head *workspace;
int ret;
level = btrfs_compress_set_level(type, level);
- workspace = get_workspace(type, level);
- ret = compression_compress_pages(type, workspace, mapping, start, folios,
+ workspace = get_workspace(fs_info, type, level);
+ ret = compression_compress_pages(type, workspace, inode, start, folios,
out_folios, total_in, total_out);
/* The total read-in bytes should be no larger than the input. */
ASSERT(*total_in <= orig_len);
- put_workspace(type, workspace);
+ put_workspace(fs_info, type, workspace);
return ret;
}
static int btrfs_decompress_bio(struct compressed_bio *cb)
{
+ struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
struct list_head *workspace;
int ret;
int type = cb->compress_type;
- workspace = get_workspace(type, 0);
+ workspace = get_workspace(fs_info, type, 0);
ret = compression_decompress_bio(workspace, cb);
- put_workspace(type, workspace);
+ put_workspace(fs_info, type, workspace);
if (!ret)
zero_fill_bio(&cb->orig_bbio->bio);
@@ -1061,7 +1084,8 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
/*
* a less complex decompression routine. Our compressed data fits in a
* single page, and we want to read a single page out of it.
- * start_byte tells us the offset into the compressed data we're interested in
+ * dest_pgoff tells us the offset into the destination folio where we write the
+ * decompressed data.
*/
int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
unsigned long dest_pgoff, size_t srclen, size_t destlen)
@@ -1072,20 +1096,50 @@ int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
int ret;
/*
- * The full destination page range should not exceed the page size.
+ * The full destination folio range should not exceed the folio size.
* And the @destlen should not exceed sectorsize, as this is only called for
* inline file extents, which should not exceed sectorsize.
*/
- ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
+ ASSERT(dest_pgoff + destlen <= folio_size(dest_folio) && destlen <= sectorsize);
- workspace = get_workspace(type, 0);
+ workspace = get_workspace(fs_info, type, 0);
ret = compression_decompress(type, workspace, data_in, dest_folio,
dest_pgoff, srclen, destlen);
- put_workspace(type, workspace);
+ put_workspace(fs_info, type, workspace);
return ret;
}
+int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info)
+{
+ int ret;
+
+ ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_NONE);
+ if (ret < 0)
+ goto error;
+ ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_ZLIB);
+ if (ret < 0)
+ goto error;
+ ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_LZO);
+ if (ret < 0)
+ goto error;
+ ret = zstd_alloc_workspace_manager(fs_info);
+ if (ret < 0)
+ goto error;
+ return 0;
+error:
+ btrfs_free_compress_wsm(fs_info);
+ return ret;
+}
+
+void btrfs_free_compress_wsm(struct btrfs_fs_info *fs_info)
+{
+ free_workspace_manager(fs_info, BTRFS_COMPRESS_NONE);
+ free_workspace_manager(fs_info, BTRFS_COMPRESS_ZLIB);
+ free_workspace_manager(fs_info, BTRFS_COMPRESS_LZO);
+ zstd_free_workspace_manager(fs_info);
+}
+
int __init btrfs_init_compress(void)
{
if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
@@ -1097,11 +1151,6 @@ int __init btrfs_init_compress(void)
if (!compr_pool.shrinker)
return -ENOMEM;
- btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
- btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
- btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
- zstd_init_workspace_manager();
-
spin_lock_init(&compr_pool.lock);
INIT_LIST_HEAD(&compr_pool.list);
compr_pool.count = 0;
@@ -1122,14 +1171,26 @@ void __cold btrfs_exit_compress(void)
btrfs_compr_pool_scan(NULL, NULL);
shrinker_free(compr_pool.shrinker);
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
- zstd_cleanup_workspace_manager();
bioset_exit(&btrfs_compressed_bioset);
}
/*
+ * The bvec is a single page bvec from a bio that contains folios from a filemap.
+ *
+ * Since the folio may be a large one, and if the bv_page is not a head page of
+ * a large folio, then page->index is unreliable.
+ *
+ * Thus we need this helper to grab the proper file offset.
+ */
+static u64 file_offset_from_bvec(const struct bio_vec *bvec)
+{
+ const struct page *page = bvec->bv_page;
+ const struct folio *folio = page_folio(page);
+
+ return (page_pgoff(folio, page) << PAGE_SHIFT) + bvec->bv_offset;
+}
+
+/*
* Copy decompressed data from working buffer to pages.
*
* @buf: The decompressed data buffer
@@ -1174,13 +1235,14 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
u32 copy_start;
/* Offset inside the full decompressed extent */
u32 bvec_offset;
+ void *kaddr;
bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
/*
* cb->start may underflow, but subtracting that value can still
* give us correct offset inside the full decompressed extent.
*/
- bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
+ bvec_offset = file_offset_from_bvec(&bvec) - cb->start;
/* Haven't reached the bvec range, exit */
if (decompressed + buf_len <= bvec_offset)
@@ -1196,10 +1258,12 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
* @buf + @buf_len.
*/
ASSERT(copy_start - decompressed < buf_len);
- memcpy_to_page(bvec.bv_page, bvec.bv_offset,
- buf + copy_start - decompressed, copy_len);
- cur_offset += copy_len;
+ kaddr = bvec_kmap_local(&bvec);
+ memcpy(kaddr, buf + copy_start - decompressed, copy_len);
+ kunmap_local(kaddr);
+
+ cur_offset += copy_len;
bio_advance(orig_bio, copy_len);
/* Finished the bio */
if (!orig_bio->bi_iter.bi_size)
@@ -1229,7 +1293,7 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
#define ENTROPY_LVL_HIGH (80)
/*
- * For increasead precision in shannon_entropy calculation,
+ * For increased precision in shannon_entropy calculation,
* let's do pow(n, M) to save more digits after comma:
*
* - maximum int bit length is 64
@@ -1455,7 +1519,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
struct heuristic_ws *ws)
{
struct page *page;
- u64 index, index_end;
+ pgoff_t index, index_end;
u32 i, curr_sample_pos;
u8 *in_data;
@@ -1515,7 +1579,8 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
*/
int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
{
- struct list_head *ws_list = get_workspace(0, 0);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct list_head *ws_list = get_workspace(fs_info, 0, 0);
struct heuristic_ws *ws;
u32 i;
u8 byte;
@@ -1584,29 +1649,34 @@ int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
}
out:
- put_workspace(0, ws_list);
+ put_workspace(fs_info, 0, ws_list);
return ret;
}
/*
- * Convert the compression suffix (eg. after "zlib" starting with ":") to
- * level, unrecognized string will set the default level
+ * Convert the compression suffix (eg. after "zlib" starting with ":") to level.
+ *
+ * If the resulting level exceeds the algo's supported levels, it will be clamped.
+ *
+ * Return <0 if no valid string can be found.
+ * Return 0 if everything is fine.
*/
-unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
+int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret)
{
- unsigned int level = 0;
+ int level = 0;
int ret;
- if (!type)
+ if (!type) {
+ *level_ret = btrfs_compress_set_level(type, level);
return 0;
+ }
if (str[0] == ':') {
- ret = kstrtouint(str + 1, 10, &level);
+ ret = kstrtoint(str + 1, 10, &level);
if (ret)
- level = 0;
+ return ret;
}
- level = btrfs_compress_set_level(type, level);
-
- return level;
+ *level_ret = btrfs_compress_set_level(type, level);
+ return 0;
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 954034086d0d..e0228017e861 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -11,14 +11,15 @@
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
+#include <linux/pagemap.h>
#include "bio.h"
+#include "fs.h"
+#include "btrfs_inode.h"
struct address_space;
-struct page;
struct inode;
struct btrfs_inode;
struct btrfs_ordered_extent;
-struct btrfs_bio;
/*
* We want to make sure that amount of RAM required to uncompress an extent is
@@ -62,42 +63,39 @@ struct compressed_bio {
/* Whether this is a write for writeback. */
bool writeback;
- union {
- /* For reads, this is the bio we are copying the data into */
- struct btrfs_bio *orig_bbio;
- struct work_struct write_end_work;
- };
+ /* For reads, this is the bio we are copying the data into. */
+ struct btrfs_bio *orig_bbio;
/* Must be last. */
struct btrfs_bio bbio;
};
-static inline unsigned int btrfs_compress_type(unsigned int type_level)
+static inline struct btrfs_fs_info *cb_to_fs_info(const struct compressed_bio *cb)
{
- return (type_level & 0xF);
-}
-
-static inline unsigned int btrfs_compress_level(unsigned int type_level)
-{
- return ((type_level & 0xF0) >> 4);
+ return cb->bbio.inode->root->fs_info;
}
/* @range_end must be exclusive. */
-static inline u32 btrfs_calc_input_length(u64 range_end, u64 cur)
+static inline u32 btrfs_calc_input_length(struct folio *folio, u64 range_end, u64 cur)
{
- u64 page_end = round_down(cur, PAGE_SIZE) + PAGE_SIZE;
-
- return min(range_end, page_end) - cur;
+ /* @cur must be inside the folio. */
+ ASSERT(folio_pos(folio) <= cur);
+ ASSERT(cur < folio_next_pos(folio));
+ return umin(range_end, folio_next_pos(folio)) - cur;
}
+int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info);
+void btrfs_free_compress_wsm(struct btrfs_fs_info *fs_info);
+
int __init btrfs_init_compress(void);
void __cold btrfs_exit_compress(void);
-int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
+bool btrfs_compress_level_valid(unsigned int type, int level);
+int btrfs_compress_folios(unsigned int type, int level, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
- unsigned long start_byte, size_t srclen, size_t destlen);
+ unsigned long dest_pgoff, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
@@ -107,19 +105,11 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
bool writeback);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
-unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
+int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret);
-struct folio *btrfs_alloc_compr_folio(void);
+struct folio *btrfs_alloc_compr_folio(struct btrfs_fs_info *fs_info);
void btrfs_free_compr_folio(struct folio *folio);
-enum btrfs_compression_type {
- BTRFS_COMPRESS_NONE = 0,
- BTRFS_COMPRESS_ZLIB = 1,
- BTRFS_COMPRESS_LZO = 2,
- BTRFS_COMPRESS_ZSTD = 3,
- BTRFS_NR_COMPRESS_TYPES = 4,
-};
-
struct workspace_manager {
struct list_head idle_ws;
spinlock_t ws_lock;
@@ -131,23 +121,23 @@ struct workspace_manager {
wait_queue_head_t ws_wait;
};
-struct list_head *btrfs_get_workspace(int type, unsigned int level);
-void btrfs_put_workspace(int type, struct list_head *ws);
+struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, int level);
+void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws);
-struct btrfs_compress_op {
- struct workspace_manager *workspace_manager;
+struct btrfs_compress_levels {
/* Maximum level supported by the compression algorithm */
- unsigned int max_level;
- unsigned int default_level;
+ int min_level;
+ int max_level;
+ int default_level;
};
/* The heuristic workspaces are managed via the 0th workspace manager */
#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
-extern const struct btrfs_compress_op btrfs_heuristic_compress;
-extern const struct btrfs_compress_op btrfs_zlib_compress;
-extern const struct btrfs_compress_op btrfs_lzo_compress;
-extern const struct btrfs_compress_op btrfs_zstd_compress;
+extern const struct btrfs_compress_levels btrfs_heuristic_compress;
+extern const struct btrfs_compress_levels btrfs_zlib_compress;
+extern const struct btrfs_compress_levels btrfs_lzo_compress;
+extern const struct btrfs_compress_levels btrfs_zstd_compress;
const char* btrfs_compress_type2str(enum btrfs_compression_type type);
bool btrfs_compress_is_valid_type(const char *str, size_t len);
@@ -157,39 +147,39 @@ int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end);
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
struct folio **in_folio_ret);
-int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
+int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
-struct list_head *zlib_alloc_workspace(unsigned int level);
+struct list_head *zlib_alloc_workspace(struct btrfs_fs_info *fs_info, unsigned int level);
void zlib_free_workspace(struct list_head *ws);
-struct list_head *zlib_get_workspace(unsigned int level);
+struct list_head *zlib_get_workspace(struct btrfs_fs_info *fs_info, unsigned int level);
-int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
+int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
-struct list_head *lzo_alloc_workspace(void);
+struct list_head *lzo_alloc_workspace(struct btrfs_fs_info *fs_info);
void lzo_free_workspace(struct list_head *ws);
-int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
+int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
-void zstd_init_workspace_manager(void);
-void zstd_cleanup_workspace_manager(void);
-struct list_head *zstd_alloc_workspace(unsigned int level);
+int zstd_alloc_workspace_manager(struct btrfs_fs_info *fs_info);
+void zstd_free_workspace_manager(struct btrfs_fs_info *fs_info);
+struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level);
void zstd_free_workspace(struct list_head *ws);
-struct list_head *zstd_get_workspace(unsigned int level);
-void zstd_put_workspace(struct list_head *ws);
+struct list_head *zstd_get_workspace(struct btrfs_fs_info *fs_info, int level);
+void zstd_put_workspace(struct btrfs_fs_info *fs_info, struct list_head *ws);
#endif
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 148648ea1c8b..a48b4befbee7 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -30,26 +30,13 @@ static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path, int level);
static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *ins_key, struct btrfs_path *path,
- int data_size, int extend);
+ int data_size, bool extend);
static int push_node_left(struct btrfs_trans_handle *trans,
struct extent_buffer *dst,
- struct extent_buffer *src, int empty);
+ struct extent_buffer *src, bool empty);
static int balance_node_right(struct btrfs_trans_handle *trans,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
-
-static const struct btrfs_csums {
- u16 size;
- const char name[10];
- const char driver[12];
-} btrfs_csums[] = {
- [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
- [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
- [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
- [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
- .driver = "blake2b-256" },
-};
-
/*
* The leaf data grows from end-to-front in the node. this returns the address
* of the start of the last item, which is the stop of the leaf data stack.
@@ -148,44 +135,6 @@ static inline void copy_leaf_items(const struct extent_buffer *dst,
nr_items * sizeof(struct btrfs_item));
}
-/* This exists for btrfs-progs usages. */
-u16 btrfs_csum_type_size(u16 type)
-{
- return btrfs_csums[type].size;
-}
-
-int btrfs_super_csum_size(const struct btrfs_super_block *s)
-{
- u16 t = btrfs_super_csum_type(s);
- /*
- * csum type is validated at mount time
- */
- return btrfs_csum_type_size(t);
-}
-
-const char *btrfs_super_csum_name(u16 csum_type)
-{
- /* csum type is validated at mount time */
- return btrfs_csums[csum_type].name;
-}
-
-/*
- * Return driver name if defined, otherwise the name that's also a valid driver
- * name
- */
-const char *btrfs_super_csum_driver(u16 csum_type)
-{
- /* csum type is validated at mount time */
- return btrfs_csums[csum_type].driver[0] ?
- btrfs_csums[csum_type].driver :
- btrfs_csums[csum_type].name;
-}
-
-size_t __attribute_const__ btrfs_get_num_csums(void)
-{
- return ARRAY_SIZE(btrfs_csums);
-}
-
struct btrfs_path *btrfs_alloc_path(void)
{
might_sleep();
@@ -226,22 +175,6 @@ noinline void btrfs_release_path(struct btrfs_path *p)
}
/*
- * We want the transaction abort to print stack trace only for errors where the
- * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
- * caused by external factors.
- */
-bool __cold abort_should_print_stack(int error)
-{
- switch (error) {
- case -EIO:
- case -EROFS:
- case -ENOMEM:
- return false;
- }
- return true;
-}
-
-/*
* safely gets a reference on the root node of a tree. A lock
* is not taken, so a concurrent writer may put a different node
* at the root of the tree. See btrfs_lock_root_node for the
@@ -265,7 +198,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
* the inc_not_zero dance and if it doesn't work then
* synchronize_rcu and try again.
*/
- if (atomic_inc_not_zero(&eb->refs)) {
+ if (refcount_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
break;
}
@@ -350,15 +283,26 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
- WARN_ON(btrfs_header_generation(buf) > trans->transid);
- if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
+ if (unlikely(btrfs_header_generation(buf) > trans->transid)) {
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_inc_ref(trans, root, cow, 1);
- else
+ if (unlikely(ret))
+ btrfs_abort_transaction(trans, ret);
+ } else {
ret = btrfs_inc_ref(trans, root, cow, 0);
+ if (unlikely(ret))
+ btrfs_abort_transaction(trans, ret);
+ }
if (ret) {
btrfs_tree_unlock(cow);
free_extent_buffer(cow);
- btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -370,9 +314,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
/*
* check if the tree block can be shared by multiple trees
*/
-bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf)
+bool btrfs_block_can_be_shared(const struct btrfs_trans_handle *trans,
+ const struct btrfs_root *root,
+ const struct extent_buffer *buf)
{
const u64 buf_gen = btrfs_header_generation(buf);
@@ -592,14 +536,14 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_unlock_cow;
}
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
ret = btrfs_reloc_cow_block(trans, root, buf, cow);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_unlock_cow;
}
@@ -612,18 +556,18 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
parent_start = buf->start;
ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto error_unlock_cow;
}
- atomic_inc(&cow->refs);
+ refcount_inc(&cow->refs);
rcu_assign_pointer(root->node, cow);
ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
parent_start, last_ref);
free_extent_buffer(buf);
add_root_to_dirty_list(root);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto error_unlock_cow;
}
@@ -631,7 +575,7 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
WARN_ON(trans->transid != btrfs_header_generation(parent));
ret = btrfs_tree_mod_log_insert_key(parent, parent_slot,
BTRFS_MOD_LOG_KEY_REPLACE);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_unlock_cow;
}
@@ -642,18 +586,20 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(trans, parent);
if (last_ref) {
ret = btrfs_tree_mod_log_free_eb(buf);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_unlock_cow;
}
}
ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
parent_start, last_ref);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto error_unlock_cow;
}
}
+
+ trace_btrfs_cow_block(root, buf, cow);
if (unlock_orig)
btrfs_tree_unlock(buf);
free_extent_buffer_stale(buf);
@@ -667,15 +613,12 @@ error_unlock_cow:
return ret;
}
-static inline int should_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf)
+static inline bool should_cow_block(const struct btrfs_trans_handle *trans,
+ const struct btrfs_root *root,
+ const struct extent_buffer *buf)
{
if (btrfs_is_testing(root->fs_info))
- return 0;
-
- /* Ensure we can see the FORCE_COW bit */
- smp_mb__before_atomic();
+ return false;
/*
* We do not need to cow a block if
@@ -688,13 +631,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
* after we've finished copying src root, we must COW the shared
* block to ensure the metadata consistency.
*/
- if (btrfs_header_generation(buf) == trans->transid &&
- !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
- !(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
- btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
- !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
- return 0;
- return 1;
+
+ if (btrfs_header_generation(buf) != trans->transid)
+ return true;
+
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN))
+ return true;
+
+ /* Ensure we can see the FORCE_COW bit. */
+ smp_mb__before_atomic();
+ if (test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
+ return true;
+
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
+ return false;
+
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
+ return true;
+
+ return false;
}
/*
@@ -710,7 +665,6 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 search_start;
- int ret;
if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
btrfs_abort_transaction(trans, -EUCLEAN);
@@ -751,12 +705,8 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
* Also We don't care about the error, as it's handled internally.
*/
btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
- ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
- cow_ret, search_start, 0, nest);
-
- trace_btrfs_cow_block(root, buf, *cow_ret);
-
- return ret;
+ return btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
+ cow_ret, search_start, 0, nest);
}
ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
@@ -794,7 +744,7 @@ int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_ke
* Slot may point to the total number of items (i.e. one position beyond the last
* key) if the key is bigger than the last key in the extent buffer.
*/
-int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
+int btrfs_bin_search(const struct extent_buffer *eb, int first_slot,
const struct btrfs_key *key, int *slot)
{
unsigned long p;
@@ -903,7 +853,7 @@ struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
&check);
if (IS_ERR(eb))
return eb;
- if (!extent_buffer_uptodate(eb)) {
+ if (unlikely(!extent_buffer_uptodate(eb))) {
free_extent_buffer(eb);
return ERR_PTR(-EIO);
}
@@ -912,6 +862,75 @@ struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
}
/*
+ * Promote a child node to become the new tree root.
+ *
+ * @trans: Transaction handle
+ * @root: Tree root structure to update
+ * @path: Path holding nodes and locks
+ * @level: Level of the parent (old root)
+ * @parent: The parent (old root) with exactly one item
+ *
+ * This helper is called during rebalancing when the root node contains only
+ * a single item (nritems == 1). We can reduce the tree height by promoting
+ * that child to become the new root and freeing the old root node. The path
+ * locks and references are updated accordingly.
+ *
+ * Return: 0 on success, negative errno on failure. The transaction is aborted
+ * on critical errors.
+ */
+static int promote_child_to_root(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ int level, struct extent_buffer *parent)
+{
+ struct extent_buffer *child;
+ int ret;
+
+ ASSERT(btrfs_header_nritems(parent) == 1);
+
+ child = btrfs_read_node_slot(parent, 0);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ btrfs_tree_lock(child);
+ ret = btrfs_cow_block(trans, root, child, parent, 0, &child, BTRFS_NESTING_COW);
+ if (ret) {
+ btrfs_tree_unlock(child);
+ free_extent_buffer(child);
+ return ret;
+ }
+
+ ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
+ if (unlikely(ret < 0)) {
+ btrfs_tree_unlock(child);
+ free_extent_buffer(child);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ rcu_assign_pointer(root->node, child);
+
+ add_root_to_dirty_list(root);
+ btrfs_tree_unlock(child);
+
+ path->locks[level] = 0;
+ path->nodes[level] = NULL;
+ btrfs_clear_buffer_dirty(trans, parent);
+ btrfs_tree_unlock(parent);
+ /* Once for the path. */
+ free_extent_buffer(parent);
+
+ root_sub_used_bytes(root);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), parent, 0, 1);
+ /* Once for the root ptr. */
+ free_extent_buffer_stale(parent);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
* node level balancing, used to make sure nodes are in proper order for
* item deletion. We balance from the top down, so we have to make sure
* that a deletion won't leave an node completely empty later on.
@@ -950,55 +969,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
* by promoting the node below to a root
*/
if (!parent) {
- struct extent_buffer *child;
-
if (btrfs_header_nritems(mid) != 1)
return 0;
- /* promote the child to a root */
- child = btrfs_read_node_slot(mid, 0);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- goto out;
- }
-
- btrfs_tree_lock(child);
- ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
- BTRFS_NESTING_COW);
- if (ret) {
- btrfs_tree_unlock(child);
- free_extent_buffer(child);
- goto out;
- }
-
- ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
- if (ret < 0) {
- btrfs_tree_unlock(child);
- free_extent_buffer(child);
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- rcu_assign_pointer(root->node, child);
-
- add_root_to_dirty_list(root);
- btrfs_tree_unlock(child);
-
- path->locks[level] = 0;
- path->nodes[level] = NULL;
- btrfs_clear_buffer_dirty(trans, mid);
- btrfs_tree_unlock(mid);
- /* once for the path */
- free_extent_buffer(mid);
-
- root_sub_used_bytes(root);
- ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
- /* once for the root ptr */
- free_extent_buffer_stale(mid);
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- return 0;
+ return promote_child_to_root(trans, root, path, level, mid);
}
if (btrfs_header_nritems(mid) >
BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
@@ -1069,7 +1043,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
right, 0, 1);
free_extent_buffer_stale(right);
right = NULL;
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1078,7 +1052,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
btrfs_node_key(right, &right_key, 0);
ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
BTRFS_MOD_LOG_KEY_REPLACE);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1130,7 +1104,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
free_extent_buffer_stale(mid);
mid = NULL;
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1140,7 +1114,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
btrfs_node_key(mid, &mid_key, 0);
ret = btrfs_tree_mod_log_insert_key(parent, pslot,
BTRFS_MOD_LOG_KEY_REPLACE);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1151,11 +1125,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* update the path */
if (left) {
if (btrfs_header_nritems(left) > orig_slot) {
- atomic_inc(&left->refs);
/* left was locked after cow */
path->nodes[level] = left;
path->slots[level + 1] -= 1;
path->slots[level] = orig_slot;
+ /* Left is now owned by path. */
+ left = NULL;
if (mid) {
btrfs_tree_unlock(mid);
free_extent_buffer(mid);
@@ -1175,8 +1150,7 @@ out:
free_extent_buffer(right);
}
if (left) {
- if (path->nodes[level] != left)
- btrfs_tree_unlock(left);
+ btrfs_tree_unlock(left);
free_extent_buffer(left);
}
return ret;
@@ -1245,7 +1219,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_node_key(mid, &disk_key, 0);
ret = btrfs_tree_mod_log_insert_key(parent, pslot,
BTRFS_MOD_LOG_KEY_REPLACE);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_tree_unlock(left);
free_extent_buffer(left);
btrfs_abort_transaction(trans, ret);
@@ -1305,7 +1279,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_node_key(right, &disk_key, 0);
ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
BTRFS_MOD_LOG_KEY_REPLACE);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_tree_unlock(right);
free_extent_buffer(right);
btrfs_abort_transaction(trans, ret);
@@ -1338,7 +1312,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
* to the block in 'slot', and triggering ra on them.
*/
static void reada_for_search(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+ const struct btrfs_path *path,
int level, int slot, u64 objectid)
{
struct extent_buffer *node;
@@ -1420,7 +1394,7 @@ static void reada_for_search(struct btrfs_fs_info *fs_info,
}
}
-static noinline void reada_for_balance(struct btrfs_path *path, int level)
+static noinline void reada_for_balance(const struct btrfs_path *path, int level)
{
struct extent_buffer *parent;
int slot;
@@ -1485,8 +1459,8 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
}
if (i >= lowest_unlock && i > skip_level) {
- check_skip = false;
btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
+ check_skip = false;
path->locks[i] = 0;
if (write_lock_level &&
i > min_write_lock_level &&
@@ -1516,8 +1490,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
u64 blocknr;
struct extent_buffer *tmp = NULL;
int ret = 0;
+ int ret2;
int parent_level;
- int err;
bool read_tmp = false;
bool tmp_locked = false;
bool path_released = false;
@@ -1543,13 +1517,13 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
reada_for_search(fs_info, p, parent_level, slot, key->objectid);
/* first we do an atomic uptodate check */
- if (btrfs_buffer_uptodate(tmp, check.transid, 1) > 0) {
+ if (btrfs_buffer_uptodate(tmp, check.transid, true) > 0) {
/*
* Do extra check for first_key, eb can be stale due to
* being cached, read from scrub, or have multiple
* parents (shared tree blocks).
*/
- if (btrfs_verify_level_key(tmp, &check)) {
+ if (unlikely(btrfs_verify_level_key(tmp, &check))) {
ret = -EUCLEAN;
goto out;
}
@@ -1566,6 +1540,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
if (!p->skip_locking) {
btrfs_unlock_up_safe(p, parent_level + 1);
+ btrfs_maybe_reset_lockdep_class(root, tmp);
tmp_locked = true;
btrfs_tree_read_lock(tmp);
btrfs_release_path(p);
@@ -1574,9 +1549,9 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
}
/* Now we're allowed to do a blocking uptodate check. */
- err = btrfs_read_extent_buffer(tmp, &check);
- if (err) {
- ret = err;
+ ret2 = btrfs_read_extent_buffer(tmp, &check);
+ if (ret2) {
+ ret = ret2;
goto out;
}
@@ -1609,6 +1584,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
if (!p->skip_locking) {
ASSERT(ret == -EAGAIN);
+ btrfs_maybe_reset_lockdep_class(root, tmp);
tmp_locked = true;
btrfs_tree_read_lock(tmp);
btrfs_release_path(p);
@@ -1616,9 +1592,9 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
}
/* Now we're allowed to do a blocking uptodate check. */
- err = btrfs_read_extent_buffer(tmp, &check);
- if (err) {
- ret = err;
+ ret2 = btrfs_read_extent_buffer(tmp, &check);
+ if (ret2) {
+ ret = ret2;
goto out;
}
@@ -1628,7 +1604,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
* and give up so that our caller doesn't loop forever
* on our EAGAINs.
*/
- if (!extent_buffer_uptodate(tmp)) {
+ if (unlikely(!extent_buffer_uptodate(tmp))) {
ret = -EIO;
goto out;
}
@@ -1753,13 +1729,13 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
if (p->search_commit_root) {
b = root->commit_root;
- atomic_inc(&b->refs);
+ refcount_inc(&b->refs);
level = btrfs_header_level(b);
/*
* Ensure that all callers have set skip_locking when
- * p->search_commit_root = 1.
+ * p->search_commit_root is true.
*/
- ASSERT(p->skip_locking == 1);
+ ASSERT(p->skip_locking);
goto out;
}
@@ -1809,7 +1785,7 @@ out:
* The root may have failed to write out at some point, and thus is no
* longer valid, return an error in this case.
*/
- if (!extent_buffer_uptodate(b)) {
+ if (unlikely(!extent_buffer_uptodate(b))) {
if (root_lock)
btrfs_tree_unlock_rw(b, root_lock);
free_extent_buffer(b);
@@ -1862,7 +1838,7 @@ static int finish_need_commit_sem_search(struct btrfs_path *path)
return 0;
}
-static inline int search_for_key_slot(struct extent_buffer *eb,
+static inline int search_for_key_slot(const struct extent_buffer *eb,
int search_low_slot,
const struct btrfs_key *key,
int prev_cmp,
@@ -1996,15 +1972,14 @@ static int search_leaf(struct btrfs_trans_handle *trans,
ASSERT(leaf_free_space >= 0);
if (leaf_free_space < ins_len) {
- int err;
-
- err = split_leaf(trans, root, key, path, ins_len,
- (ret == 0));
- ASSERT(err <= 0);
- if (WARN_ON(err > 0))
- err = -EUCLEAN;
- if (err)
- ret = err;
+ int ret2;
+
+ ret2 = split_leaf(trans, root, key, path, ins_len, (ret == 0));
+ ASSERT(ret2 <= 0);
+ if (WARN_ON(ret2 > 0))
+ ret2 = -EUCLEAN;
+ if (ret2)
+ ret = ret2;
}
}
@@ -2046,11 +2021,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key, struct btrfs_path *p,
int ins_len, int cow)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info;
struct extent_buffer *b;
int slot;
int ret;
- int err;
int level;
int lowest_unlock = 1;
/* everything at write_lock_level or lower must be write locked */
@@ -2059,6 +2033,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int min_write_lock_level;
int prev_cmp;
+ if (!root)
+ return -EINVAL;
+
+ fs_info = root->fs_info;
might_sleep();
lowest_level = p->lowest_level;
@@ -2117,6 +2095,7 @@ again:
while (b) {
int dec = 0;
+ int ret2;
level = btrfs_header_level(b);
@@ -2145,16 +2124,15 @@ again:
}
if (last_level)
- err = btrfs_cow_block(trans, root, b, NULL, 0,
- &b,
- BTRFS_NESTING_COW);
+ ret2 = btrfs_cow_block(trans, root, b, NULL, 0,
+ &b, BTRFS_NESTING_COW);
else
- err = btrfs_cow_block(trans, root, b,
- p->nodes[level + 1],
- p->slots[level + 1], &b,
- BTRFS_NESTING_COW);
- if (err) {
- ret = err;
+ ret2 = btrfs_cow_block(trans, root, b,
+ p->nodes[level + 1],
+ p->slots[level + 1], &b,
+ BTRFS_NESTING_COW);
+ if (ret2) {
+ ret = ret2;
goto done;
}
}
@@ -2202,12 +2180,12 @@ cow_done:
slot--;
}
p->slots[level] = slot;
- err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
- &write_lock_level);
- if (err == -EAGAIN)
+ ret2 = setup_nodes_for_search(trans, root, p, b, level, ins_len,
+ &write_lock_level);
+ if (ret2 == -EAGAIN)
goto again;
- if (err) {
- ret = err;
+ if (ret2) {
+ ret = ret2;
goto done;
}
b = p->nodes[level];
@@ -2233,11 +2211,11 @@ cow_done:
goto done;
}
- err = read_block_for_search(root, p, &b, slot, key);
- if (err == -EAGAIN && !p->nowait)
+ ret2 = read_block_for_search(root, p, &b, slot, key);
+ if (ret2 == -EAGAIN && !p->nowait)
goto again;
- if (err) {
- ret = err;
+ if (ret2) {
+ ret = ret2;
goto done;
}
@@ -2300,7 +2278,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
struct extent_buffer *b;
int slot;
int ret;
- int err;
int level;
int lowest_unlock = 1;
u8 lowest_level = 0;
@@ -2316,7 +2293,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
again:
b = btrfs_get_old_root(root, time_seq);
- if (!b) {
+ if (unlikely(!b)) {
ret = -EIO;
goto done;
}
@@ -2325,6 +2302,7 @@ again:
while (b) {
int dec = 0;
+ int ret2;
level = btrfs_header_level(b);
p->nodes[level] = b;
@@ -2360,11 +2338,11 @@ again:
goto done;
}
- err = read_block_for_search(root, p, &b, slot, key);
- if (err == -EAGAIN && !p->nowait)
+ ret2 = read_block_for_search(root, p, &b, slot, key);
+ if (ret2 == -EAGAIN && !p->nowait)
goto again;
- if (err) {
- ret = err;
+ if (ret2) {
+ ret = ret2;
goto done;
}
@@ -2645,12 +2623,11 @@ void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) {
btrfs_print_leaf(eb);
btrfs_crit(fs_info,
- "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
+ "slot %u key " BTRFS_KEY_FMT " new key " BTRFS_KEY_FMT,
slot, btrfs_disk_key_objectid(&disk_key),
btrfs_disk_key_type(&disk_key),
btrfs_disk_key_offset(&disk_key),
- new_key->objectid, new_key->type,
- new_key->offset);
+ BTRFS_KEY_FMT_VALUE(new_key));
BUG();
}
}
@@ -2659,12 +2636,11 @@ void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) {
btrfs_print_leaf(eb);
btrfs_crit(fs_info,
- "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
+ "slot %u key " BTRFS_KEY_FMT " new key " BTRFS_KEY_FMT,
slot, btrfs_disk_key_objectid(&disk_key),
btrfs_disk_key_type(&disk_key),
btrfs_disk_key_offset(&disk_key),
- new_key->objectid, new_key->type,
- new_key->offset);
+ BTRFS_KEY_FMT_VALUE(new_key));
BUG();
}
}
@@ -2723,10 +2699,9 @@ static bool check_sibling_keys(const struct extent_buffer *left,
btrfs_crit(left->fs_info, "right extent buffer:");
btrfs_print_tree(right, false);
btrfs_crit(left->fs_info,
-"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
- left_last.objectid, left_last.type,
- left_last.offset, right_first.objectid,
- right_first.type, right_first.offset);
+"bad key order, sibling blocks, left last " BTRFS_KEY_FMT " right first " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&left_last),
+ BTRFS_KEY_FMT_VALUE(&right_first));
return true;
}
return false;
@@ -2741,7 +2716,7 @@ static bool check_sibling_keys(const struct extent_buffer *left,
*/
static int push_node_left(struct btrfs_trans_handle *trans,
struct extent_buffer *dst,
- struct extent_buffer *src, int empty)
+ struct extent_buffer *src, bool empty)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int push_items = 0;
@@ -2777,13 +2752,13 @@ static int push_node_left(struct btrfs_trans_handle *trans,
push_items = min(src_nritems - 8, push_items);
/* dst is the left eb, src is the middle eb */
- if (check_sibling_keys(dst, src)) {
+ if (unlikely(check_sibling_keys(dst, src))) {
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
return ret;
}
ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -2851,7 +2826,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
push_items = max_push;
/* dst is the right eb, src is the middle eb */
- if (check_sibling_keys(src, dst)) {
+ if (unlikely(check_sibling_keys(src, dst))) {
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
return ret;
@@ -2868,7 +2843,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
push_items);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -2936,8 +2911,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
if (ret < 0) {
int ret2;
+ btrfs_clear_buffer_dirty(trans, c);
ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1);
- if (ret2 < 0)
+ if (unlikely(ret2 < 0))
btrfs_abort_transaction(trans, ret2);
btrfs_tree_unlock(c);
free_extent_buffer(c);
@@ -2949,7 +2925,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
free_extent_buffer(old);
add_root_to_dirty_list(root);
- atomic_inc(&c->refs);
+ refcount_inc(&c->refs);
path->nodes[level] = c;
path->locks[level] = BTRFS_WRITE_LOCK;
path->slots[level] = 0;
@@ -2982,7 +2958,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
if (level) {
ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
slot, nritems - slot);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -2995,7 +2971,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
if (level) {
ret = btrfs_tree_mod_log_insert_key(lower, slot,
BTRFS_MOD_LOG_KEY_ADD);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -3071,7 +3047,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
ASSERT(btrfs_header_level(c) == level);
ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_tree_unlock(split);
free_extent_buffer(split);
btrfs_abort_transaction(trans, ret);
@@ -3140,7 +3116,7 @@ int btrfs_leaf_free_space(const struct extent_buffer *leaf)
int ret;
ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_crit(fs_info,
"leaf free space ret %d, leaf data size %lu, used %d nritems %d",
ret,
@@ -3156,7 +3132,7 @@ int btrfs_leaf_free_space(const struct extent_buffer *leaf)
*/
static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
- int data_size, int empty,
+ int data_size, bool empty,
struct extent_buffer *right,
int free_space, u32 left_nritems,
u32 min_slot)
@@ -3164,7 +3140,6 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = right->fs_info;
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *upper = path->nodes[1];
- struct btrfs_map_token token;
struct btrfs_disk_key disk_key;
int slot;
u32 i;
@@ -3238,13 +3213,12 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
copy_leaf_items(right, left, 0, left_nritems - push_items, push_items);
/* update the item pointers */
- btrfs_init_map_token(&token, right);
right_nritems += push_items;
btrfs_set_header_nritems(right, right_nritems);
push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) {
- push_space -= btrfs_token_item_size(&token, i);
- btrfs_set_token_item_offset(&token, i, push_space);
+ push_space -= btrfs_item_size(right, i);
+ btrfs_set_item_offset(right, i, push_space);
}
left_nritems -= push_items;
@@ -3264,10 +3238,8 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
/* then fixup the leaf pointer in the path */
if (path->slots[0] >= left_nritems) {
path->slots[0] -= left_nritems;
- if (btrfs_header_nritems(path->nodes[0]) == 0)
- btrfs_clear_buffer_dirty(trans, path->nodes[0]);
- btrfs_tree_unlock(path->nodes[0]);
- free_extent_buffer(path->nodes[0]);
+ btrfs_tree_unlock(left);
+ free_extent_buffer(left);
path->nodes[0] = right;
path->slots[1] += 1;
} else {
@@ -3295,7 +3267,7 @@ out_unlock:
static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path,
int min_data_size, int data_size,
- int empty, u32 min_slot)
+ bool empty, u32 min_slot)
{
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *right;
@@ -3334,7 +3306,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (left_nritems == 0)
goto out_unlock;
- if (check_sibling_keys(left, right)) {
+ if (unlikely(check_sibling_keys(left, right))) {
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
btrfs_tree_unlock(right);
@@ -3372,7 +3344,7 @@ out_unlock:
*/
static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int data_size,
- int empty, struct extent_buffer *left,
+ bool empty, struct extent_buffer *left,
int free_space, u32 right_nritems,
u32 max_slot)
{
@@ -3387,7 +3359,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
int ret = 0;
u32 this_item_size;
u32 old_left_item_size;
- struct btrfs_map_token token;
if (empty)
nr = min(right_nritems, max_slot);
@@ -3435,21 +3406,24 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
old_left_nritems = btrfs_header_nritems(left);
BUG_ON(old_left_nritems <= 0);
- btrfs_init_map_token(&token, left);
old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
u32 ioff;
- ioff = btrfs_token_item_offset(&token, i);
- btrfs_set_token_item_offset(&token, i,
+ ioff = btrfs_item_offset(left, i);
+ btrfs_set_item_offset(left, i,
ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
}
btrfs_set_header_nritems(left, old_left_nritems + push_items);
/* fixup right node */
- if (push_items > right_nritems)
- WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
- right_nritems);
+ if (unlikely(push_items > right_nritems)) {
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
+ btrfs_crit(fs_info, "push items (%d) > right leaf items (%u)",
+ push_items, right_nritems);
+ goto out;
+ }
if (push_items < right_nritems) {
push_space = btrfs_item_offset(right, push_items - 1) -
@@ -3462,13 +3436,12 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
btrfs_header_nritems(right) - push_items);
}
- btrfs_init_map_token(&token, right);
right_nritems -= push_items;
btrfs_set_header_nritems(right, right_nritems);
push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) {
- push_space = push_space - btrfs_token_item_size(&token, i);
- btrfs_set_token_item_offset(&token, i, push_space);
+ push_space = push_space - btrfs_item_size(right, i);
+ btrfs_set_item_offset(right, i, push_space);
}
btrfs_mark_buffer_dirty(trans, left);
@@ -3483,8 +3456,8 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
path->slots[0] += old_left_nritems;
- btrfs_tree_unlock(path->nodes[0]);
- free_extent_buffer(path->nodes[0]);
+ btrfs_tree_unlock(right);
+ free_extent_buffer(right);
path->nodes[0] = left;
path->slots[1] -= 1;
} else {
@@ -3553,7 +3526,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
goto out;
}
- if (check_sibling_keys(left, right)) {
+ if (unlikely(check_sibling_keys(left, right))) {
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -3582,7 +3555,6 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
int i;
int ret;
struct btrfs_disk_key disk_key;
- struct btrfs_map_token token;
nritems = nritems - mid;
btrfs_set_header_nritems(right, nritems);
@@ -3595,12 +3567,11 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
- btrfs_init_map_token(&token, right);
for (i = 0; i < nritems; i++) {
u32 ioff;
- ioff = btrfs_token_item_offset(&token, i);
- btrfs_set_token_item_offset(&token, i, ioff + rt_data_off);
+ ioff = btrfs_item_offset(right, i);
+ btrfs_set_item_offset(right, i, ioff + rt_data_off);
}
btrfs_set_header_nritems(l, mid);
@@ -3703,7 +3674,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const struct btrfs_key *ins_key,
struct btrfs_path *path, int data_size,
- int extend)
+ bool extend)
{
struct btrfs_disk_key disk_key;
struct extent_buffer *l;
@@ -3899,6 +3870,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
+ key.type != BTRFS_RAID_STRIPE_KEY &&
key.type != BTRFS_EXTENT_CSUM_KEY);
if (btrfs_leaf_free_space(leaf) >= ins_len)
@@ -3912,10 +3884,10 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- path->keep_locks = 1;
- path->search_for_split = 1;
+ path->keep_locks = true;
+ path->search_for_split = true;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
- path->search_for_split = 0;
+ path->search_for_split = false;
if (ret > 0)
ret = -EAGAIN;
if (ret < 0)
@@ -3942,11 +3914,11 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
if (ret)
goto err;
- path->keep_locks = 0;
+ path->keep_locks = false;
btrfs_unlock_up_safe(path, 1);
return 0;
err:
- path->keep_locks = 0;
+ path->keep_locks = false;
return ret;
}
@@ -4065,7 +4037,6 @@ void btrfs_truncate_item(struct btrfs_trans_handle *trans,
unsigned int old_size;
unsigned int size_diff;
int i;
- struct btrfs_map_token token;
leaf = path->nodes[0];
slot = path->slots[0];
@@ -4088,12 +4059,11 @@ void btrfs_truncate_item(struct btrfs_trans_handle *trans,
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
- btrfs_init_map_token(&token, leaf);
for (i = slot; i < nritems; i++) {
u32 ioff;
- ioff = btrfs_token_item_offset(&token, i);
- btrfs_set_token_item_offset(&token, i, ioff + size_diff);
+ ioff = btrfs_item_offset(leaf, i);
+ btrfs_set_item_offset(leaf, i, ioff + size_diff);
}
/* shift the data */
@@ -4137,7 +4107,7 @@ void btrfs_truncate_item(struct btrfs_trans_handle *trans,
btrfs_set_item_size(leaf, slot, new_size);
btrfs_mark_buffer_dirty(trans, leaf);
- if (btrfs_leaf_free_space(leaf) < 0) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < 0)) {
btrfs_print_leaf(leaf);
BUG();
}
@@ -4156,14 +4126,13 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans,
unsigned int old_data;
unsigned int old_size;
int i;
- struct btrfs_map_token token;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(leaf);
- if (btrfs_leaf_free_space(leaf) < data_size) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < data_size)) {
btrfs_print_leaf(leaf);
BUG();
}
@@ -4171,7 +4140,7 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans,
old_data = btrfs_item_data_end(leaf, slot);
BUG_ON(slot < 0);
- if (slot >= nritems) {
+ if (unlikely(slot >= nritems)) {
btrfs_print_leaf(leaf);
btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
slot, nritems);
@@ -4182,24 +4151,22 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans,
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
- btrfs_init_map_token(&token, leaf);
for (i = slot; i < nritems; i++) {
u32 ioff;
- ioff = btrfs_token_item_offset(&token, i);
- btrfs_set_token_item_offset(&token, i, ioff - data_size);
+ ioff = btrfs_item_offset(leaf, i);
+ btrfs_set_item_offset(leaf, i, ioff - data_size);
}
/* shift the data */
memmove_leaf_data(leaf, data_end - data_size, data_end,
old_data - data_end);
- data_end = old_data;
old_size = btrfs_item_size(leaf, slot);
btrfs_set_item_size(leaf, slot, old_size + data_size);
btrfs_mark_buffer_dirty(trans, leaf);
- if (btrfs_leaf_free_space(leaf) < 0) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < 0)) {
btrfs_print_leaf(leaf);
BUG();
}
@@ -4227,7 +4194,6 @@ static void setup_items_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
struct extent_buffer *leaf;
int slot;
- struct btrfs_map_token token;
u32 total_size;
/*
@@ -4248,18 +4214,17 @@ static void setup_items_for_insert(struct btrfs_trans_handle *trans,
data_end = leaf_data_end(leaf);
total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
- if (btrfs_leaf_free_space(leaf) < total_size) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < total_size)) {
btrfs_print_leaf(leaf);
btrfs_crit(fs_info, "not enough freespace need %u have %d",
total_size, btrfs_leaf_free_space(leaf));
BUG();
}
- btrfs_init_map_token(&token, leaf);
if (slot != nritems) {
unsigned int old_data = btrfs_item_data_end(leaf, slot);
- if (old_data < data_end) {
+ if (unlikely(old_data < data_end)) {
btrfs_print_leaf(leaf);
btrfs_crit(fs_info,
"item at slot %d with data offset %u beyond data end of leaf %u",
@@ -4273,8 +4238,8 @@ static void setup_items_for_insert(struct btrfs_trans_handle *trans,
for (i = slot; i < nritems; i++) {
u32 ioff;
- ioff = btrfs_token_item_offset(&token, i);
- btrfs_set_token_item_offset(&token, i,
+ ioff = btrfs_item_offset(leaf, i);
+ btrfs_set_item_offset(leaf, i,
ioff - batch->total_data_size);
}
/* shift the items */
@@ -4291,14 +4256,14 @@ static void setup_items_for_insert(struct btrfs_trans_handle *trans,
btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
btrfs_set_item_key(leaf, &disk_key, slot + i);
data_end -= batch->data_sizes[i];
- btrfs_set_token_item_offset(&token, slot + i, data_end);
- btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]);
+ btrfs_set_item_offset(leaf, slot + i, data_end);
+ btrfs_set_item_size(leaf, slot + i, batch->data_sizes[i]);
}
btrfs_set_header_nritems(leaf, nritems + batch->nr);
btrfs_mark_buffer_dirty(trans, leaf);
- if (btrfs_leaf_free_space(leaf) < 0) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < 0)) {
btrfs_print_leaf(leaf);
BUG();
}
@@ -4369,7 +4334,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u32 data_size)
{
int ret = 0;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
unsigned long ptr;
@@ -4383,7 +4348,6 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
write_extent_buffer(leaf, data, ptr, data_size);
btrfs_mark_buffer_dirty(trans, leaf);
}
- btrfs_free_path(path);
return ret;
}
@@ -4441,7 +4405,7 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (level) {
ret = btrfs_tree_mod_log_insert_move(parent, slot,
slot + 1, nritems - slot - 1);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -4454,7 +4418,7 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
} else if (level) {
ret = btrfs_tree_mod_log_insert_key(parent, slot,
BTRFS_MOD_LOG_KEY_REMOVE);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -4506,7 +4470,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used_bytes(root);
- atomic_inc(&leaf->refs);
+ refcount_inc(&leaf->refs);
ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
free_extent_buffer_stale(leaf);
if (ret < 0)
@@ -4533,7 +4497,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (slot + nr != nritems) {
const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
const int data_end = leaf_data_end(leaf);
- struct btrfs_map_token token;
u32 dsize = 0;
int i;
@@ -4543,12 +4506,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
memmove_leaf_data(leaf, data_end + dsize, data_end,
last_off - data_end);
- btrfs_init_map_token(&token, leaf);
for (i = slot + nr; i < nritems; i++) {
u32 ioff;
- ioff = btrfs_token_item_offset(&token, i);
- btrfs_set_token_item_offset(&token, i, ioff + dsize);
+ ioff = btrfs_item_offset(leaf, i);
+ btrfs_set_item_offset(leaf, i, ioff + dsize);
}
memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr);
@@ -4558,9 +4520,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
/* delete the leaf if we've emptied it */
if (nritems == 0) {
- if (leaf == root->node) {
- btrfs_set_header_level(leaf, 0);
- } else {
+ if (leaf != root->node) {
btrfs_clear_buffer_dirty(trans, leaf);
ret = btrfs_del_leaf(trans, root, path, leaf);
if (ret < 0)
@@ -4591,7 +4551,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* for possible call to btrfs_del_ptr below
*/
slot = path->slots[1];
- atomic_inc(&leaf->refs);
+ refcount_inc(&leaf->refs);
/*
* We want to be able to at least push one item to the
* left neighbour leaf, and that's the first item.
@@ -4626,10 +4586,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (btrfs_header_nritems(leaf) == 0) {
path->slots[1] = slot;
ret = btrfs_del_leaf(trans, root, path, leaf);
+ free_extent_buffer(leaf);
if (ret < 0)
return ret;
- free_extent_buffer(leaf);
- ret = 0;
} else {
/* if we're still in the path, make sure
* we're dirty. Otherwise, one of the
@@ -4649,16 +4608,13 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
/*
* A helper function to walk down the tree starting at min_key, and looking
- * for nodes or leaves that are have a minimum transaction id.
+ * for leaves that have a minimum transaction id.
* This is used by the btree defrag code, and tree logging
*
* This does not cow, but it does stuff the starting key it finds back
* into min_key, so you can call btrfs_search_slot with cow=1 on the
* key and get a writable path.
*
- * This honors path->lowest_level to prevent descent past a given level
- * of the tree.
- *
* min_trans indicates the oldest transaction that you are interested
* in walking through. Any nodes or leaves older than min_trans are
* skipped over (without reading them).
@@ -4671,16 +4627,16 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
u64 min_trans)
{
struct extent_buffer *cur;
- struct btrfs_key found_key;
int slot;
int sret;
u32 nritems;
int level;
int ret = 1;
- int keep_locks = path->keep_locks;
+ const bool keep_locks = path->keep_locks;
ASSERT(!path->nowait);
- path->keep_locks = 1;
+ ASSERT(path->lowest_level == 0);
+ path->keep_locks = true;
again:
cur = btrfs_read_lock_root_node(root);
level = btrfs_header_level(cur);
@@ -4701,13 +4657,14 @@ again:
goto out;
}
- /* at the lowest level, we're done, setup the path and exit */
- if (level == path->lowest_level) {
+ /* At level 0 we're done, setup the path and exit. */
+ if (level == 0) {
if (slot >= nritems)
goto find_next_key;
ret = 0;
path->slots[level] = slot;
- btrfs_item_key_to_cpu(cur, &found_key, slot);
+ /* Save our key for returning back. */
+ btrfs_item_key_to_cpu(cur, min_key, slot);
goto out;
}
if (sret && slot > 0)
@@ -4731,8 +4688,8 @@ find_next_key:
* we didn't find a candidate key in this node, walk forward
* and find another one
*/
+ path->slots[level] = slot;
if (slot >= nritems) {
- path->slots[level] = slot;
sret = btrfs_find_next_key(root, path, min_key, level,
min_trans);
if (sret == 0) {
@@ -4742,13 +4699,6 @@ find_next_key:
goto out;
}
}
- /* save our key for returning back */
- btrfs_node_key_to_cpu(cur, &found_key, slot);
- path->slots[level] = slot;
- if (level == path->lowest_level) {
- ret = 0;
- goto out;
- }
cur = btrfs_read_node_slot(cur, slot);
if (IS_ERR(cur)) {
ret = PTR_ERR(cur);
@@ -4763,10 +4713,8 @@ find_next_key:
}
out:
path->keep_locks = keep_locks;
- if (ret == 0) {
- btrfs_unlock_up_safe(path, path->lowest_level + 1);
- memcpy(min_key, &found_key, sizeof(found_key));
- }
+ if (ret == 0)
+ btrfs_unlock_up_safe(path, 1);
return ret;
}
@@ -4778,7 +4726,7 @@ out:
* 0 is returned if another key is found, < 0 if there are any errors
* and 1 is returned if there are no higher keys in the tree
*
- * path->keep_locks should be set to 1 on the search made before
+ * path->keep_locks should be set to true on the search made before
* calling this function.
*/
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
@@ -4877,13 +4825,13 @@ again:
next = NULL;
btrfs_release_path(path);
- path->keep_locks = 1;
+ path->keep_locks = true;
if (time_seq) {
ret = btrfs_search_old_slot(root, &key, path, time_seq);
} else {
if (path->need_commit_sem) {
- path->need_commit_sem = 0;
+ path->need_commit_sem = false;
need_commit_sem = true;
if (path->nowait) {
if (!down_read_trylock(&fs_info->commit_root_sem)) {
@@ -4896,41 +4844,30 @@ again:
}
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
}
- path->keep_locks = 0;
+ path->keep_locks = false;
if (ret < 0)
goto done;
nritems = btrfs_header_nritems(path->nodes[0]);
/*
- * by releasing the path above we dropped all our locks. A balance
- * could have added more items next to the key that used to be
- * at the very end of the block. So, check again here and
- * advance the path if there are now more items available.
- */
- if (nritems > 0 && path->slots[0] < nritems - 1) {
- if (ret == 0)
- path->slots[0]++;
- ret = 0;
- goto done;
- }
- /*
- * So the above check misses one case:
- * - after releasing the path above, someone has removed the item that
- * used to be at the very end of the block, and balance between leafs
- * gets another one with bigger key.offset to replace it.
+ * By releasing the path above we dropped all our locks. A balance
+ * could have happened and
*
- * This one should be returned as well, or we can get leaf corruption
- * later(esp. in __btrfs_drop_extents()).
+ * 1. added more items after the previous last item
+ * 2. deleted the previous last item
*
- * And a bit more explanation about this check,
- * with ret > 0, the key isn't found, the path points to the slot
- * where it should be inserted, so the path->slots[0] item must be the
- * bigger one.
+ * So, check again here and advance the path if there are now more
+ * items available.
*/
- if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
- ret = 0;
- goto done;
+ if (nritems > 0 && path->slots[0] <= nritems - 1) {
+ if (ret == 0 && path->slots[0] != nritems - 1) {
+ path->slots[0]++;
+ goto done;
+ } else if (ret > 0) {
+ ret = 0;
+ goto done;
+ }
}
while (level < BTRFS_MAX_LEVEL) {
@@ -5035,7 +4972,7 @@ done:
if (need_commit_sem) {
int ret2;
- path->need_commit_sem = 1;
+ path->need_commit_sem = true;
ret2 = finish_need_commit_sem_search(path);
up_read(&fs_info->commit_root_sem);
if (ret2)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 307dedf95c70..692370fc07b2 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -6,8 +6,7 @@
#ifndef BTRFS_CTREE_H
#define BTRFS_CTREE_H
-#include "linux/cleanup.h"
-#include <linux/pagemap.h>
+#include <linux/cleanup.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <linux/mutex.h>
@@ -18,9 +17,7 @@
#include <linux/refcount.h>
#include <uapi/linux/btrfs_tree.h>
#include "locking.h"
-#include "fs.h"
#include "accessors.h"
-#include "extent-io-tree.h"
struct extent_buffer;
struct btrfs_block_rsv;
@@ -62,27 +59,27 @@ struct btrfs_path {
/* if there is real range locking, this locks field will change */
u8 locks[BTRFS_MAX_LEVEL];
u8 reada;
- /* keep some upper locks as we walk down */
u8 lowest_level;
/*
* set by btrfs_split_item, tells search_slot to keep all locks
* and to force calls to keep space in the nodes
*/
- unsigned int search_for_split:1;
- unsigned int keep_locks:1;
- unsigned int skip_locking:1;
- unsigned int search_commit_root:1;
- unsigned int need_commit_sem:1;
- unsigned int skip_release_on_error:1;
+ bool search_for_split:1;
+ /* Keep some upper locks as we walk down. */
+ bool keep_locks:1;
+ bool skip_locking:1;
+ bool search_commit_root:1;
+ bool need_commit_sem:1;
+ bool skip_release_on_error:1;
/*
* Indicate that new item (btrfs_search_slot) is extending already
* existing item and ins_len contains only the data size and not item
* header (ie. sizeof(struct btrfs_item) is not included).
*/
- unsigned int search_for_extension:1;
+ bool search_for_extension:1;
/* Stop search if any locks need to be taken (for read) */
- unsigned int nowait:1;
+ bool nowait:1;
};
#define BTRFS_PATH_AUTO_FREE(path_name) \
@@ -225,16 +222,10 @@ struct btrfs_root {
struct list_head root_list;
- /*
- * Xarray that keeps track of in-memory inodes, protected by the lock
- * @inode_lock.
- */
+ /* Xarray that keeps track of in-memory inodes. */
struct xarray inodes;
- /*
- * Xarray that keeps track of delayed nodes of every inode, protected
- * by @inode_lock.
- */
+ /* Xarray that keeps track of delayed nodes of every inode. */
struct xarray delayed_nodes;
/*
* right now this just gets used so that a root has its own devid
@@ -371,6 +362,25 @@ static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transi
}
/*
+ * Return the generation this root started with.
+ *
+ * Every normal root that is created with root->root_key.offset set to it's
+ * originating generation. If it is a snapshot it is the generation when the
+ * snapshot was created.
+ *
+ * However for TREE_RELOC roots root_key.offset is the objectid of the owning
+ * tree root. Thankfully we copy the root item of the owning tree root, which
+ * has it's last_snapshot set to what we would have root_key.offset set to, so
+ * return that if this is a TREE_RELOC root.
+ */
+static inline u64 btrfs_root_origin_generation(const struct btrfs_root *root)
+{
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
+ return btrfs_root_last_snapshot(&root->root_item);
+ return root->root_key.offset;
+}
+
+/*
* Structure that conveys information about an extent that is going to replace
* all the extents in a file range.
*/
@@ -487,24 +497,10 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
}
-#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
- ((bytes) >> (fs_info)->sectorsize_bits)
-
-static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
-{
- return mapping_gfp_constraint(mapping, ~__GFP_FS);
-}
-
-void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
-int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 num_bytes, u64 *actual_bytes);
-int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
-
-/* ctree.c */
int __init btrfs_ctree_init(void);
void __cold btrfs_ctree_exit(void);
-int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
+int btrfs_bin_search(const struct extent_buffer *eb, int first_slot,
const struct btrfs_key *key, int *slot);
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
@@ -572,9 +568,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer **cow_ret, u64 new_root_objectid);
-bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf);
+bool btrfs_block_can_be_shared(const struct btrfs_trans_handle *trans,
+ const struct btrfs_root *root,
+ const struct extent_buffer *buf);
int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
void btrfs_extend_item(struct btrfs_trans_handle *trans,
@@ -723,13 +719,18 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
}
int btrfs_leaf_free_space(const struct extent_buffer *leaf);
-static inline int is_fstree(u64 rootid)
+static inline bool btrfs_is_fstree(u64 rootid)
{
- if (rootid == BTRFS_FS_TREE_OBJECTID ||
- ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
- !btrfs_qgroup_level(rootid)))
- return 1;
- return 0;
+ if (rootid == BTRFS_FS_TREE_OBJECTID)
+ return true;
+
+ if ((s64)rootid < (s64)BTRFS_FIRST_FREE_OBJECTID)
+ return false;
+
+ if (btrfs_qgroup_level(rootid) != 0)
+ return false;
+
+ return true;
}
static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
@@ -737,18 +738,4 @@ static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
}
-u16 btrfs_csum_type_size(u16 type);
-int btrfs_super_csum_size(const struct btrfs_super_block *s);
-const char *btrfs_super_csum_name(u16 csum_type);
-const char *btrfs_super_csum_driver(u16 csum_type);
-size_t __attribute_const__ btrfs_get_num_csums(void);
-
-/*
- * We use folio flag owner_2 to indicate there is an ordered extent with
- * unfinished IO.
- */
-#define folio_test_ordered(folio) folio_test_owner_2(folio)
-#define folio_set_ordered(folio) folio_set_owner_2(folio)
-#define folio_clear_ordered(folio) folio_clear_owner_2(folio)
-
#endif
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index 968dae953948..b81e224d4a27 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -15,6 +15,7 @@
#include "defrag.h"
#include "file-item.h"
#include "super.h"
+#include "compression.h"
static struct kmem_cache *btrfs_inode_defrag_cachep;
@@ -60,6 +61,14 @@ static int compare_inode_defrag(const struct inode_defrag *defrag1,
return 0;
}
+static int inode_defrag_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct inode_defrag *new_defrag = rb_entry(new, struct inode_defrag, rb_node);
+ const struct inode_defrag *existing_defrag = rb_entry(existing, struct inode_defrag, rb_node);
+
+ return compare_inode_defrag(new_defrag, existing_defrag);
+}
+
/*
* Insert a record for an inode into the defrag tree. The lock must be held
* already.
@@ -71,49 +80,35 @@ static int btrfs_insert_inode_defrag(struct btrfs_inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct inode_defrag *entry;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- int ret;
+ struct rb_node *node;
- p = &fs_info->defrag_inodes.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct inode_defrag, rb_node);
+ node = rb_find_add(&defrag->rb_node, &fs_info->defrag_inodes, inode_defrag_cmp);
+ if (node) {
+ struct inode_defrag *entry;
- ret = compare_inode_defrag(defrag, entry);
- if (ret < 0)
- p = &parent->rb_left;
- else if (ret > 0)
- p = &parent->rb_right;
- else {
- /*
- * If we're reinserting an entry for an old defrag run,
- * make sure to lower the transid of our existing
- * record.
- */
- if (defrag->transid < entry->transid)
- entry->transid = defrag->transid;
- entry->extent_thresh = min(defrag->extent_thresh,
- entry->extent_thresh);
- return -EEXIST;
- }
+ entry = rb_entry(node, struct inode_defrag, rb_node);
+ /*
+ * If we're reinserting an entry for an old defrag run, make
+ * sure to lower the transid of our existing record.
+ */
+ if (defrag->transid < entry->transid)
+ entry->transid = defrag->transid;
+ entry->extent_thresh = min(defrag->extent_thresh, entry->extent_thresh);
+ return -EEXIST;
}
set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
- rb_link_node(&defrag->rb_node, parent, p);
- rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
return 0;
}
-static inline int need_auto_defrag(struct btrfs_fs_info *fs_info)
+static inline bool need_auto_defrag(struct btrfs_fs_info *fs_info)
{
if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
- return 0;
+ return false;
if (btrfs_fs_closing(fs_info))
- return 0;
+ return false;
- return 1;
+ return true;
}
/*
@@ -159,7 +154,7 @@ void btrfs_add_inode_defrag(struct btrfs_inode *inode, u32 extent_thresh)
}
/*
- * Pick the defragable inode that we want, if it doesn't exist, we will get the
+ * Pick the defraggable inode that we want, if it doesn't exist, we will get the
* next one.
*/
static struct inode_defrag *btrfs_pick_defrag_inode(
@@ -191,10 +186,7 @@ static struct inode_defrag *btrfs_pick_defrag_inode(
if (parent && compare_inode_defrag(&tmp, entry) > 0) {
parent = rb_next(parent);
- if (parent)
- entry = rb_entry(parent, struct inode_defrag, rb_node);
- else
- entry = NULL;
+ entry = rb_entry_safe(parent, struct inode_defrag, rb_node);
}
out:
if (entry)
@@ -225,7 +217,7 @@ static int btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
struct file_ra_state *ra)
{
struct btrfs_root *inode_root;
- struct inode *inode;
+ struct btrfs_inode *inode;
struct btrfs_ioctl_defrag_range_args range;
int ret = 0;
u64 cur = 0;
@@ -250,24 +242,23 @@ again:
goto cleanup;
}
- if (cur >= i_size_read(inode)) {
- iput(inode);
+ if (cur >= i_size_read(&inode->vfs_inode)) {
+ iput(&inode->vfs_inode);
goto cleanup;
}
/* Do a chunk of defrag */
- clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
+ clear_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
memset(&range, 0, sizeof(range));
range.len = (u64)-1;
range.start = cur;
range.extent_thresh = defrag->extent_thresh;
- file_ra_state_init(ra, inode->i_mapping);
+ file_ra_state_init(ra, inode->vfs_inode.i_mapping);
- sb_start_write(fs_info->sb);
- ret = btrfs_defrag_file(inode, ra, &range, defrag->transid,
- BTRFS_DEFRAG_BATCH);
- sb_end_write(fs_info->sb);
- iput(inode);
+ scoped_guard(super_write, fs_info->sb)
+ ret = btrfs_defrag_file(inode, ra, &range,
+ defrag->transid, BTRFS_DEFRAG_BATCH);
+ iput(&inode->vfs_inode);
if (ret < 0)
goto cleanup;
@@ -480,7 +471,7 @@ static int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
memcpy(&key, &root->defrag_progress, sizeof(key));
}
- path->keep_locks = 1;
+ path->keep_locks = true;
ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
if (ret < 0)
@@ -523,7 +514,7 @@ static int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
/*
* Now that we reallocated the node we can find the next key. Note that
* btrfs_find_next_key() can release our path and do another search
- * without COWing, this is because even with path->keep_locks = 1,
+ * without COWing, this is because even with path->keep_locks == true,
* btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a
* node when path->slots[node_level - 1] does not point to the last
* item or a slot beyond the last item (ctree.c:unlock_up()). Therefore
@@ -624,7 +615,7 @@ static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
u64 ino = btrfs_ino(inode);
int ret;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
ret = -ENOMEM;
goto err;
@@ -734,12 +725,12 @@ next:
not_found:
btrfs_release_path(&path);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return NULL;
err:
btrfs_release_path(&path);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ERR_PTR(ret);
}
@@ -756,7 +747,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
* full extent lock.
*/
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, sectorsize);
+ em = btrfs_lookup_extent_mapping(em_tree, start, sectorsize);
read_unlock(&em_tree->lock);
/*
@@ -769,7 +760,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
* file extent items in the inode's subvolume tree).
*/
if (em && (em->flags & EXTENT_FLAG_MERGED)) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = NULL;
}
@@ -779,10 +770,10 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
/* Get the big lock and read metadata off disk. */
if (!locked)
- lock_extent(io_tree, start, end, &cached);
+ btrfs_lock_extent(io_tree, start, end, &cached);
em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
if (!locked)
- unlock_extent(io_tree, start, end, &cached);
+ btrfs_unlock_extent(io_tree, start, end, &cached);
if (IS_ERR(em))
return NULL;
@@ -794,7 +785,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info,
const struct extent_map *em)
{
- if (extent_map_is_compressed(em))
+ if (btrfs_extent_map_is_compressed(em))
return BTRFS_MAX_COMPRESSED;
return fs_info->max_extent_size;
}
@@ -837,7 +828,7 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
ret = true;
out:
- free_extent_map(next);
+ btrfs_free_extent_map(next);
return ret;
}
@@ -857,13 +848,14 @@ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t
{
struct address_space *mapping = inode->vfs_inode.i_mapping;
gfp_t mask = btrfs_alloc_write_mask(mapping);
- u64 page_start = (u64)index << PAGE_SHIFT;
- u64 page_end = page_start + PAGE_SIZE - 1;
+ u64 lock_start;
+ u64 lock_end;
struct extent_state *cached_state = NULL;
struct folio *folio;
int ret;
again:
+ /* TODO: Add order fgp order flags when large folios are fully enabled. */
folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
if (IS_ERR(folio))
@@ -871,13 +863,16 @@ again:
/*
* Since we can defragment files opened read-only, we can encounter
- * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We
- * can't do I/O using huge pages yet, so return an error for now.
+ * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS).
+ *
+ * The IO for such large folios is not fully tested, thus return
+ * an error to reject such folios unless it's an experimental build.
+ *
* Filesystem transparent huge pages are typically only used for
* executables that explicitly enable them, so this isn't very
* restrictive.
*/
- if (folio_test_large(folio)) {
+ if (!IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) && folio_test_large(folio)) {
folio_unlock(folio);
folio_put(folio);
return ERR_PTR(-ETXTBSY);
@@ -890,14 +885,15 @@ again:
return ERR_PTR(ret);
}
+ lock_start = folio_pos(folio);
+ lock_end = folio_next_pos(folio) - 1;
/* Wait for any existing ordered extent in the range */
while (1) {
struct btrfs_ordered_extent *ordered;
- lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
- ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
- unlock_extent(&inode->io_tree, page_start, page_end,
- &cached_state);
+ btrfs_lock_extent(&inode->io_tree, lock_start, lock_end, &cached_state);
+ ordered = btrfs_lookup_ordered_range(inode, lock_start, folio_size(folio));
+ btrfs_unlock_extent(&inode->io_tree, lock_start, lock_end, &cached_state);
if (!ordered)
break;
@@ -928,7 +924,7 @@ again:
folio_put(folio);
goto again;
}
- if (!folio_test_uptodate(folio)) {
+ if (unlikely(!folio_test_uptodate(folio))) {
folio_unlock(folio);
folio_put(folio);
return ERR_PTR(-EIO);
@@ -951,7 +947,7 @@ struct defrag_target_range {
* @extent_thresh: file extent size threshold, any extent size >= this value
* will be ignored
* @newer_than: only defrag extents newer than this value
- * @do_compress: whether the defrag is doing compression
+ * @do_compress: whether the defrag is doing compression or no-compression
* if true, @extent_thresh will be ignored and all regular
* file extents meeting @newer_than will be targets.
* @locked: if the range has already held extent lock
@@ -1027,8 +1023,8 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
* very likely resulting in a larger extent after writeback is
* triggered (except in a case of free space fragmentation).
*/
- if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
- EXTENT_DELALLOC))
+ if (btrfs_test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
+ EXTENT_DELALLOC))
goto next;
/*
@@ -1066,8 +1062,8 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
/* Empty target list, no way to merge with last entry */
if (list_empty(target_list))
goto next;
- last = list_entry(target_list->prev,
- struct defrag_target_range, list);
+ last = list_last_entry(target_list,
+ struct defrag_target_range, list);
/* Not mergeable with last entry */
if (last->start + last->len != cur)
goto next;
@@ -1077,7 +1073,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
add:
last_is_target = true;
- range_len = min(extent_map_end(em), start + len) - cur;
+ range_len = min(btrfs_extent_map_end(em), start + len) - cur;
/*
* This one is a good target, check if it can be merged into
* last range of the target list.
@@ -1085,8 +1081,8 @@ add:
if (!list_empty(target_list)) {
struct defrag_target_range *last;
- last = list_entry(target_list->prev,
- struct defrag_target_range, list);
+ last = list_last_entry(target_list,
+ struct defrag_target_range, list);
ASSERT(last->start + last->len <= cur);
if (last->start + last->len == cur) {
/* Mergeable, enlarge the last entry */
@@ -1099,7 +1095,7 @@ add:
/* Allocate new defrag_target_range */
new = kmalloc(sizeof(*new), GFP_NOFS);
if (!new) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ret = -ENOMEM;
break;
}
@@ -1108,8 +1104,8 @@ add:
list_add_tail(&new->list, target_list);
next:
- cur = extent_map_end(em);
- free_extent_map(em);
+ cur = btrfs_extent_map_end(em);
+ btrfs_free_extent_map(em);
}
if (ret < 0) {
struct defrag_target_range *entry;
@@ -1162,27 +1158,31 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
struct extent_changeset *data_reserved = NULL;
const u64 start = target->start;
const u64 len = target->len;
- unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
- unsigned long start_index = start >> PAGE_SHIFT;
- unsigned long first_index = folios[0]->index;
int ret = 0;
- int i;
-
- ASSERT(last_index - first_index + 1 <= nr_pages);
ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
if (ret < 0)
return ret;
- clear_extent_bit(&inode->io_tree, start, start + len - 1,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, cached_state);
- set_extent_bit(&inode->io_tree, start, start + len - 1,
- EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state);
-
- /* Update the page status */
- for (i = start_index - first_index; i <= last_index - first_index; i++) {
- folio_clear_checked(folios[i]);
- btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len);
+ btrfs_clear_extent_bit(&inode->io_tree, start, start + len - 1,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, cached_state);
+ btrfs_set_extent_bit(&inode->io_tree, start, start + len - 1,
+ EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state);
+
+ /*
+ * Update the page status.
+ * Due to possible large folios, we have to check all folios one by one.
+ */
+ for (int i = 0; i < nr_pages && folios[i]; i++) {
+ struct folio *folio = folios[i];
+
+ if (!folio)
+ break;
+ if (start >= folio_next_pos(folio) ||
+ start + len <= folio_pos(folio))
+ continue;
+ btrfs_folio_clamp_clear_checked(fs_info, folio, start, len);
+ btrfs_folio_clamp_set_dirty(fs_info, folio, start, len);
}
btrfs_delalloc_release_extents(inode, len);
extent_changeset_free(data_reserved);
@@ -1200,11 +1200,10 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
LIST_HEAD(target_list);
struct folio **folios;
const u32 sectorsize = inode->root->fs_info->sectorsize;
- u64 last_index = (start + len - 1) >> PAGE_SHIFT;
- u64 start_index = start >> PAGE_SHIFT;
- unsigned int nr_pages = last_index - start_index + 1;
+ u64 cur = start;
+ const unsigned int nr_pages = ((start + len - 1) >> PAGE_SHIFT) -
+ (start >> PAGE_SHIFT) + 1;
int ret = 0;
- int i;
ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
@@ -1214,21 +1213,25 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
return -ENOMEM;
/* Prepare all pages */
- for (i = 0; i < nr_pages; i++) {
- folios[i] = defrag_prepare_one_folio(inode, start_index + i);
+ for (int i = 0; cur < start + len && i < nr_pages; i++) {
+ folios[i] = defrag_prepare_one_folio(inode, cur >> PAGE_SHIFT);
if (IS_ERR(folios[i])) {
ret = PTR_ERR(folios[i]);
- nr_pages = i;
+ folios[i] = NULL;
goto free_folios;
}
+ cur = folio_next_pos(folios[i]);
}
- for (i = 0; i < nr_pages; i++)
+ for (int i = 0; i < nr_pages; i++) {
+ if (!folios[i])
+ break;
folio_wait_writeback(folios[i]);
+ }
+ /* We should get at least one folio. */
+ ASSERT(folios[0]);
/* Lock the pages range */
- lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
- (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
- &cached_state);
+ btrfs_lock_extent(&inode->io_tree, folio_pos(folios[0]), cur - 1, &cached_state);
/*
* Now we have a consistent view about the extent map, re-check
* which range really needs to be defragged.
@@ -1254,11 +1257,11 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
kfree(entry);
}
unlock_extent:
- unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
- (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, folio_pos(folios[0]), cur - 1, &cached_state);
free_folios:
- for (i = 0; i < nr_pages; i++) {
+ for (int i = 0; i < nr_pages; i++) {
+ if (!folios[i])
+ break;
folio_unlock(folios[i]);
folio_put(folios[i]);
}
@@ -1352,17 +1355,19 @@ out:
* (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
* defragging all the range).
*/
-int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
+int btrfs_defrag_file(struct btrfs_inode *inode, struct file_ra_state *ra,
struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_to_defrag)
{
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long sectors_defragged = 0;
- u64 isize = i_size_read(inode);
+ u64 isize = i_size_read(&inode->vfs_inode);
u64 cur;
u64 last_byte;
bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
+ bool no_compress = (range->flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS);
int compress_type = BTRFS_COMPRESS_ZLIB;
+ int compress_level = 0;
int ret = 0;
u32 extent_thresh = range->extent_thresh;
pgoff_t start_index;
@@ -1376,10 +1381,24 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
return -EINVAL;
if (do_compress) {
- if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
- return -EINVAL;
- if (range->compress_type)
- compress_type = range->compress_type;
+ if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS_LEVEL) {
+ if (range->compress.type >= BTRFS_NR_COMPRESS_TYPES)
+ return -EINVAL;
+ if (range->compress.type) {
+ compress_type = range->compress.type;
+ compress_level = range->compress.level;
+ if (!btrfs_compress_level_valid(compress_type, compress_level))
+ return -EINVAL;
+ }
+ } else {
+ if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
+ return -EINVAL;
+ if (range->compress_type)
+ compress_type = range->compress_type;
+ }
+ } else if (range->flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS) {
+ compress_type = BTRFS_DEFRAG_DONT_COMPRESS;
+ compress_level = 1;
}
if (extent_thresh == 0)
@@ -1402,8 +1421,8 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
* defrag range can be written sequentially.
*/
start_index = cur >> PAGE_SHIFT;
- if (start_index < inode->i_mapping->writeback_index)
- inode->i_mapping->writeback_index = start_index;
+ if (start_index < inode->vfs_inode.i_mapping->writeback_index)
+ inode->vfs_inode.i_mapping->writeback_index = start_index;
while (cur < last_byte) {
const unsigned long prev_sectors_defragged = sectors_defragged;
@@ -1420,27 +1439,30 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
(SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
cluster_end = min(cluster_end, last_byte);
- btrfs_inode_lock(BTRFS_I(inode), 0);
- if (IS_SWAPFILE(inode)) {
+ btrfs_inode_lock(inode, 0);
+ if (IS_SWAPFILE(&inode->vfs_inode)) {
ret = -ETXTBSY;
- btrfs_inode_unlock(BTRFS_I(inode), 0);
+ btrfs_inode_unlock(inode, 0);
break;
}
- if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
- btrfs_inode_unlock(BTRFS_I(inode), 0);
+ if (!(inode->vfs_inode.i_sb->s_flags & SB_ACTIVE)) {
+ btrfs_inode_unlock(inode, 0);
break;
}
- if (do_compress)
- BTRFS_I(inode)->defrag_compress = compress_type;
- ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
+ if (do_compress || no_compress) {
+ inode->defrag_compress = compress_type;
+ inode->defrag_compress_level = compress_level;
+ }
+ ret = defrag_one_cluster(inode, ra, cur,
cluster_end + 1 - cur, extent_thresh,
- newer_than, do_compress, &sectors_defragged,
+ newer_than, do_compress || no_compress,
+ &sectors_defragged,
max_to_defrag, &last_scanned);
if (sectors_defragged > prev_sectors_defragged)
- balance_dirty_pages_ratelimited(inode->i_mapping);
+ balance_dirty_pages_ratelimited(inode->vfs_inode.i_mapping);
- btrfs_inode_unlock(BTRFS_I(inode), 0);
+ btrfs_inode_unlock(inode, 0);
if (ret < 0)
break;
cur = max(cluster_end + 1, last_scanned);
@@ -1462,10 +1484,10 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
* need to be written back immediately.
*/
if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
- filemap_flush(inode->i_mapping);
+ filemap_flush(inode->vfs_inode.i_mapping);
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- filemap_flush(inode->i_mapping);
+ &inode->runtime_flags))
+ filemap_flush(inode->vfs_inode.i_mapping);
}
if (range->compress_type == BTRFS_COMPRESS_LZO)
btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
@@ -1473,10 +1495,10 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
ret = sectors_defragged;
}
- if (do_compress) {
- btrfs_inode_lock(BTRFS_I(inode), 0);
- BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
- btrfs_inode_unlock(BTRFS_I(inode), 0);
+ if (do_compress || no_compress) {
+ btrfs_inode_lock(inode, 0);
+ inode->defrag_compress = BTRFS_COMPRESS_NONE;
+ btrfs_inode_unlock(inode, 0);
}
return ret;
}
diff --git a/fs/btrfs/defrag.h b/fs/btrfs/defrag.h
index 6b7596c4f0dc..a7f917a38dbf 100644
--- a/fs/btrfs/defrag.h
+++ b/fs/btrfs/defrag.h
@@ -6,14 +6,14 @@
#include <linux/types.h>
#include <linux/compiler_types.h>
-struct inode;
struct file_ra_state;
+struct btrfs_inode;
struct btrfs_fs_info;
struct btrfs_root;
struct btrfs_trans_handle;
struct btrfs_ioctl_defrag_range_args;
-int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
+int btrfs_defrag_file(struct btrfs_inode *inode, struct file_ra_state *ra,
struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_to_defrag);
int __init btrfs_auto_defrag_init(void);
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 7aa8a395d838..0970799d0aa4 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -111,6 +111,18 @@
* making error handling and cleanup easier.
*/
+static inline struct btrfs_space_info *data_sinfo_for_inode(const struct btrfs_inode *inode)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+
+ if (btrfs_is_zoned(fs_info) && btrfs_is_data_reloc_root(inode->root)) {
+ ASSERT(fs_info->data_sinfo->sub_group[0]->subgroup_id ==
+ BTRFS_SUB_GROUP_DATA_RELOC);
+ return fs_info->data_sinfo->sub_group[0];
+ }
+ return fs_info->data_sinfo;
+}
+
int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes)
{
struct btrfs_root *root = inode->root;
@@ -123,7 +135,7 @@ int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes)
if (btrfs_is_free_space_inode(inode))
flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE;
- return btrfs_reserve_data_bytes(fs_info, bytes, flush);
+ return btrfs_reserve_data_bytes(data_sinfo_for_inode(inode), bytes, flush);
}
int btrfs_check_data_free_space(struct btrfs_inode *inode,
@@ -144,14 +156,14 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
else if (btrfs_is_free_space_inode(inode))
flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE;
- ret = btrfs_reserve_data_bytes(fs_info, len, flush);
+ ret = btrfs_reserve_data_bytes(data_sinfo_for_inode(inode), len, flush);
if (ret < 0)
return ret;
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
if (ret < 0) {
- btrfs_free_reserved_data_space_noquota(fs_info, len);
+ btrfs_free_reserved_data_space_noquota(inode, len);
extent_changeset_free(*reserved);
*reserved = NULL;
} else {
@@ -168,15 +180,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
* which we can't sleep and is sure it won't affect qgroup reserved space.
* Like clear_bit_hook().
*/
-void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info,
- u64 len)
+void btrfs_free_reserved_data_space_noquota(struct btrfs_inode *inode, u64 len)
{
- struct btrfs_space_info *data_sinfo;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
ASSERT(IS_ALIGNED(len, fs_info->sectorsize));
- data_sinfo = fs_info->data_sinfo;
- btrfs_space_info_free_bytes_may_use(fs_info, data_sinfo, len);
+ btrfs_space_info_free_bytes_may_use(data_sinfo_for_inode(inode), len);
}
/*
@@ -196,7 +206,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
round_down(start, fs_info->sectorsize);
start = round_down(start, fs_info->sectorsize);
- btrfs_free_reserved_data_space_noquota(fs_info, len);
+ btrfs_free_reserved_data_space_noquota(inode, len);
btrfs_qgroup_free_data(inode, reserved, start, len, NULL);
}
@@ -348,8 +358,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
noflush);
if (ret)
return ret;
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
- meta_reserve, flush);
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, meta_reserve,
+ flush);
if (ret) {
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
return ret;
@@ -439,6 +449,29 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
btrfs_inode_rsv_release(inode, true);
}
+/* Shrink a previously reserved extent to a new length. */
+void btrfs_delalloc_shrink_extents(struct btrfs_inode *inode, u64 reserved_len, u64 new_len)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u32 reserved_num_extents = count_max_extents(fs_info, reserved_len);
+ const u32 new_num_extents = count_max_extents(fs_info, new_len);
+ const int diff_num_extents = new_num_extents - reserved_num_extents;
+
+ ASSERT(new_len <= reserved_len);
+ if (new_num_extents == reserved_num_extents)
+ return;
+
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, diff_num_extents);
+ btrfs_calculate_inode_block_rsv_size(fs_info, inode);
+ spin_unlock(&inode->lock);
+
+ if (btrfs_is_testing(fs_info))
+ return;
+
+ btrfs_inode_rsv_release(inode, true);
+}
+
/*
* Reserve data and metadata space for delalloc
*
diff --git a/fs/btrfs/delalloc-space.h b/fs/btrfs/delalloc-space.h
index 3f32953c0a80..6119c0d3f883 100644
--- a/fs/btrfs/delalloc-space.h
+++ b/fs/btrfs/delalloc-space.h
@@ -18,8 +18,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
void btrfs_delalloc_release_space(struct btrfs_inode *inode,
struct extent_changeset *reserved,
u64 start, u64 len, bool qgroup_free);
-void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info,
- u64 len);
+void btrfs_free_reserved_data_space_noquota(struct btrfs_inode *inode, u64 len);
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
bool qgroup_free);
int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
@@ -27,5 +26,6 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
u64 disk_num_bytes, bool noflush);
void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
+void btrfs_delalloc_shrink_extents(struct btrfs_inode *inode, u64 reserved_len, u64 new_len);
#endif /* BTRFS_DELALLOC_SPACE_H */
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 508bdbae29a0..ce6e9f8812e0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -57,6 +57,7 @@ static inline void btrfs_init_delayed_node(
delayed_node->root = root;
delayed_node->inode_id = inode_id;
refcount_set(&delayed_node->refs, 0);
+ btrfs_delayed_node_ref_tracker_dir_init(delayed_node);
delayed_node->ins_root = RB_ROOT_CACHED;
delayed_node->del_root = RB_ROOT_CACHED;
mutex_init(&delayed_node->mutex);
@@ -65,7 +66,8 @@ static inline void btrfs_init_delayed_node(
}
static struct btrfs_delayed_node *btrfs_get_delayed_node(
- struct btrfs_inode *btrfs_inode)
+ struct btrfs_inode *btrfs_inode,
+ struct btrfs_ref_tracker *tracker)
{
struct btrfs_root *root = btrfs_inode->root;
u64 ino = btrfs_ino(btrfs_inode);
@@ -74,6 +76,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
node = READ_ONCE(btrfs_inode->delayed_node);
if (node) {
refcount_inc(&node->refs);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_NOFS);
return node;
}
@@ -83,6 +86,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
if (node) {
if (btrfs_inode->delayed_node) {
refcount_inc(&node->refs); /* can be accessed */
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
BUG_ON(btrfs_inode->delayed_node != node);
xa_unlock(&root->delayed_nodes);
return node;
@@ -106,6 +110,9 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
*/
if (refcount_inc_not_zero(&node->refs)) {
refcount_inc(&node->refs);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
+ btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker,
+ GFP_ATOMIC);
btrfs_inode->delayed_node = node;
} else {
node = NULL;
@@ -119,9 +126,15 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
return NULL;
}
-/* Will return either the node or PTR_ERR(-ENOMEM) */
+/*
+ * Look up an existing delayed node associated with @btrfs_inode or create a new
+ * one and insert it to the delayed nodes of the root.
+ *
+ * Return the delayed node, or error pointer on failure.
+ */
static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
- struct btrfs_inode *btrfs_inode)
+ struct btrfs_inode *btrfs_inode,
+ struct btrfs_ref_tracker *tracker)
{
struct btrfs_delayed_node *node;
struct btrfs_root *root = btrfs_inode->root;
@@ -130,7 +143,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
void *ptr;
again:
- node = btrfs_get_delayed_node(btrfs_inode);
+ node = btrfs_get_delayed_node(btrfs_inode, tracker);
if (node)
return node;
@@ -139,12 +152,10 @@ again:
return ERR_PTR(-ENOMEM);
btrfs_init_delayed_node(node, root, ino);
- /* Cached in the inode and can be accessed. */
- refcount_set(&node->refs, 2);
-
/* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
if (ret == -ENOMEM) {
+ btrfs_delayed_node_ref_tracker_dir_exit(node);
kmem_cache_free(delayed_node_cache, node);
return ERR_PTR(-ENOMEM);
}
@@ -153,6 +164,7 @@ again:
if (ptr) {
/* Somebody inserted it, go back and read it. */
xa_unlock(&root->delayed_nodes);
+ btrfs_delayed_node_ref_tracker_dir_exit(node);
kmem_cache_free(delayed_node_cache, node);
node = NULL;
goto again;
@@ -161,6 +173,12 @@ again:
ASSERT(xa_err(ptr) != -EINVAL);
ASSERT(xa_err(ptr) != -ENOMEM);
ASSERT(ptr == NULL);
+
+ /* Cached in the inode and can be accessed. */
+ refcount_set(&node->refs, 2);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
+ btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker, GFP_ATOMIC);
+
btrfs_inode->delayed_node = node;
xa_unlock(&root->delayed_nodes);
@@ -186,6 +204,8 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
list_add_tail(&node->n_list, &root->node_list);
list_add_tail(&node->p_list, &root->prepare_list);
refcount_inc(&node->refs); /* inserted into list */
+ btrfs_delayed_node_ref_tracker_alloc(node, &node->node_list_tracker,
+ GFP_ATOMIC);
root->nodes++;
set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
}
@@ -199,6 +219,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
spin_lock(&root->lock);
if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
root->nodes--;
+ btrfs_delayed_node_ref_tracker_free(node, &node->node_list_tracker);
refcount_dec(&node->refs); /* not in the list */
list_del_init(&node->n_list);
if (!list_empty(&node->p_list))
@@ -209,26 +230,26 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
}
static struct btrfs_delayed_node *btrfs_first_delayed_node(
- struct btrfs_delayed_root *delayed_root)
+ struct btrfs_delayed_root *delayed_root,
+ struct btrfs_ref_tracker *tracker)
{
- struct list_head *p;
- struct btrfs_delayed_node *node = NULL;
+ struct btrfs_delayed_node *node;
spin_lock(&delayed_root->lock);
- if (list_empty(&delayed_root->node_list))
- goto out;
-
- p = delayed_root->node_list.next;
- node = list_entry(p, struct btrfs_delayed_node, n_list);
- refcount_inc(&node->refs);
-out:
+ node = list_first_entry_or_null(&delayed_root->node_list,
+ struct btrfs_delayed_node, n_list);
+ if (node) {
+ refcount_inc(&node->refs);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
+ }
spin_unlock(&delayed_root->lock);
return node;
}
static struct btrfs_delayed_node *btrfs_next_delayed_node(
- struct btrfs_delayed_node *node)
+ struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
{
struct btrfs_delayed_root *delayed_root;
struct list_head *p;
@@ -248,6 +269,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
next = list_entry(p, struct btrfs_delayed_node, n_list);
refcount_inc(&next->refs);
+ btrfs_delayed_node_ref_tracker_alloc(next, tracker, GFP_ATOMIC);
out:
spin_unlock(&delayed_root->lock);
@@ -256,7 +278,7 @@ out:
static void __btrfs_release_delayed_node(
struct btrfs_delayed_node *delayed_node,
- int mod)
+ int mod, struct btrfs_ref_tracker *tracker)
{
struct btrfs_delayed_root *delayed_root;
@@ -272,6 +294,7 @@ static void __btrfs_release_delayed_node(
btrfs_dequeue_delayed_node(delayed_root, delayed_node);
mutex_unlock(&delayed_node->mutex);
+ btrfs_delayed_node_ref_tracker_free(delayed_node, tracker);
if (refcount_dec_and_test(&delayed_node->refs)) {
struct btrfs_root *root = delayed_node->root;
@@ -281,39 +304,41 @@ static void __btrfs_release_delayed_node(
* back up. We can delete it now.
*/
ASSERT(refcount_read(&delayed_node->refs) == 0);
+ btrfs_delayed_node_ref_tracker_dir_exit(delayed_node);
kmem_cache_free(delayed_node_cache, delayed_node);
}
}
-static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
+static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
{
- __btrfs_release_delayed_node(node, 0);
+ __btrfs_release_delayed_node(node, 0, tracker);
}
static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
- struct btrfs_delayed_root *delayed_root)
+ struct btrfs_delayed_root *delayed_root,
+ struct btrfs_ref_tracker *tracker)
{
- struct list_head *p;
- struct btrfs_delayed_node *node = NULL;
+ struct btrfs_delayed_node *node;
spin_lock(&delayed_root->lock);
- if (list_empty(&delayed_root->prepare_list))
- goto out;
-
- p = delayed_root->prepare_list.next;
- list_del_init(p);
- node = list_entry(p, struct btrfs_delayed_node, p_list);
- refcount_inc(&node->refs);
-out:
+ node = list_first_entry_or_null(&delayed_root->prepare_list,
+ struct btrfs_delayed_node, p_list);
+ if (node) {
+ list_del_init(&node->p_list);
+ refcount_inc(&node->refs);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
+ }
spin_unlock(&delayed_root->lock);
return node;
}
static inline void btrfs_release_prepared_delayed_node(
- struct btrfs_delayed_node *node)
+ struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
{
- __btrfs_release_delayed_node(node, 1);
+ __btrfs_release_delayed_node(node, 1, tracker);
}
static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
@@ -336,6 +361,20 @@ static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
return item;
}
+static int delayed_item_index_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *index = key;
+ const struct btrfs_delayed_item *delayed_item = rb_entry(node,
+ struct btrfs_delayed_item, rb_node);
+
+ if (delayed_item->index < *index)
+ return 1;
+ else if (delayed_item->index > *index)
+ return -1;
+
+ return 0;
+}
+
/*
* Look up the delayed item by key.
*
@@ -349,57 +388,35 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
struct rb_root *root,
u64 index)
{
- struct rb_node *node = root->rb_node;
- struct btrfs_delayed_item *delayed_item = NULL;
+ struct rb_node *node;
- while (node) {
- delayed_item = rb_entry(node, struct btrfs_delayed_item,
- rb_node);
- if (delayed_item->index < index)
- node = node->rb_right;
- else if (delayed_item->index > index)
- node = node->rb_left;
- else
- return delayed_item;
- }
+ node = rb_find(&index, root, delayed_item_index_cmp);
+ return rb_entry_safe(node, struct btrfs_delayed_item, rb_node);
+}
- return NULL;
+static int btrfs_delayed_item_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct btrfs_delayed_item *new_item =
+ rb_entry(new, struct btrfs_delayed_item, rb_node);
+
+ return delayed_item_index_cmp(&new_item->index, exist);
}
static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
struct btrfs_delayed_item *ins)
{
- struct rb_node **p, *node;
- struct rb_node *parent_node = NULL;
struct rb_root_cached *root;
- struct btrfs_delayed_item *item;
- bool leftmost = true;
+ struct rb_node *exist;
if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
root = &delayed_node->ins_root;
else
root = &delayed_node->del_root;
- p = &root->rb_root.rb_node;
- node = &ins->rb_node;
-
- while (*p) {
- parent_node = *p;
- item = rb_entry(parent_node, struct btrfs_delayed_item,
- rb_node);
-
- if (item->index < ins->index) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else if (item->index > ins->index) {
- p = &(*p)->rb_left;
- } else {
- return -EEXIST;
- }
- }
-
- rb_link_node(node, parent_node, p);
- rb_insert_color_cached(node, root, leftmost);
+ exist = rb_find_add_cached(&ins->rb_node, root, btrfs_delayed_item_cmp);
+ if (exist)
+ return -EEXIST;
if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
ins->index >= delayed_node->index_cnt)
@@ -459,40 +476,25 @@ static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
struct btrfs_delayed_node *delayed_node)
{
- struct rb_node *p;
- struct btrfs_delayed_item *item = NULL;
-
- p = rb_first_cached(&delayed_node->ins_root);
- if (p)
- item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+ struct rb_node *p = rb_first_cached(&delayed_node->ins_root);
- return item;
+ return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
}
static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
struct btrfs_delayed_node *delayed_node)
{
- struct rb_node *p;
- struct btrfs_delayed_item *item = NULL;
-
- p = rb_first_cached(&delayed_node->del_root);
- if (p)
- item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+ struct rb_node *p = rb_first_cached(&delayed_node->del_root);
- return item;
+ return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
}
static struct btrfs_delayed_item *__btrfs_next_delayed_item(
struct btrfs_delayed_item *item)
{
- struct rb_node *p;
- struct btrfs_delayed_item *next = NULL;
+ struct rb_node *p = rb_next(&item->rb_node);
- p = rb_next(&item->rb_node);
- if (p)
- next = rb_entry(p, struct btrfs_delayed_item, rb_node);
-
- return next;
+ return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
}
static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
@@ -666,7 +668,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_key first_key;
const u32 first_data_size = first_item->data_len;
int total_size;
- char *ins_data = NULL;
+ char AUTO_KFREE(ins_data);
int ret;
bool continuous_keys_only = false;
@@ -736,12 +738,10 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
u32 *ins_sizes;
int i = 0;
- ins_data = kmalloc(batch.nr * sizeof(u32) +
- batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
- if (!ins_data) {
- ret = -ENOMEM;
- goto out;
- }
+ ins_data = kmalloc_array(batch.nr,
+ sizeof(u32) + sizeof(struct btrfs_key), GFP_NOFS);
+ if (!ins_data)
+ return -ENOMEM;
ins_sizes = (u32 *)ins_data;
ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
batch.keys = ins_keys;
@@ -757,7 +757,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_items(trans, root, path, &batch);
if (ret)
- goto out;
+ return ret;
list_for_each_entry(curr, &item_list, tree_list) {
char *data_ptr;
@@ -812,9 +812,8 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
}
-out:
- kfree(ins_data);
- return ret;
+
+ return 0;
}
static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
@@ -1030,15 +1029,22 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_inode(trans, root, path, &key, mod);
if (ret > 0)
ret = -ENOENT;
- if (ret < 0)
+ if (ret < 0) {
+ /*
+ * If we fail to update the delayed inode we need to abort the
+ * transaction, because we could leave the inode with the
+ * improper counts behind.
+ */
+ if (unlikely(ret != -ENOENT))
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
sizeof(struct btrfs_inode_item));
- btrfs_mark_buffer_dirty(trans, leaf);
if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
goto out;
@@ -1057,8 +1063,10 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
goto err_out;
+ }
ASSERT(ret > 0);
ASSERT(path->slots[0] > 0);
ret = 0;
@@ -1080,21 +1088,14 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
* in the same item doesn't exist.
*/
ret = btrfs_del_item(trans, root, path);
+ if (ret < 0)
+ btrfs_abort_transaction(trans, ret);
out:
btrfs_release_delayed_iref(node);
btrfs_release_path(path);
err_out:
btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
btrfs_release_delayed_inode(node);
-
- /*
- * If we fail to update the delayed inode we need to abort the
- * transaction, because we could leave the inode with the improper
- * counts behind.
- */
- if (ret && ret != -ENOENT)
- btrfs_abort_transaction(trans, ret);
-
return ret;
}
@@ -1149,6 +1150,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
+ struct btrfs_ref_tracker curr_delayed_node_tracker, prev_delayed_node_tracker;
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret = 0;
@@ -1166,17 +1168,18 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
delayed_root = fs_info->delayed_root;
- curr_node = btrfs_first_delayed_node(delayed_root);
+ curr_node = btrfs_first_delayed_node(delayed_root, &curr_delayed_node_tracker);
while (curr_node && (!count || nr--)) {
ret = __btrfs_commit_inode_delayed_items(trans, path,
curr_node);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
prev_node = curr_node;
- curr_node = btrfs_next_delayed_node(curr_node);
+ prev_delayed_node_tracker = curr_delayed_node_tracker;
+ curr_node = btrfs_next_delayed_node(curr_node, &curr_delayed_node_tracker);
/*
* See the comment below about releasing path before releasing
* node. If the commit of delayed items was successful the path
@@ -1184,7 +1187,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
* point to locked extent buffers (a leaf at the very least).
*/
ASSERT(path->nodes[0] == NULL);
- btrfs_release_delayed_node(prev_node);
+ btrfs_release_delayed_node(prev_node, &prev_delayed_node_tracker);
}
/*
@@ -1197,7 +1200,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
btrfs_free_path(path);
if (curr_node)
- btrfs_release_delayed_node(curr_node);
+ btrfs_release_delayed_node(curr_node, &curr_delayed_node_tracker);
trans->block_rsv = block_rsv;
return ret;
@@ -1216,8 +1219,10 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
- struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
- struct btrfs_path *path;
+ struct btrfs_ref_tracker delayed_node_tracker;
+ struct btrfs_delayed_node *delayed_node =
+ btrfs_get_delayed_node(inode, &delayed_node_tracker);
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_block_rsv *block_rsv;
int ret;
@@ -1227,14 +1232,14 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
mutex_lock(&delayed_node->mutex);
if (!delayed_node->count) {
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
mutex_unlock(&delayed_node->mutex);
path = btrfs_alloc_path();
if (!path) {
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return -ENOMEM;
}
@@ -1243,8 +1248,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
- btrfs_release_delayed_node(delayed_node);
- btrfs_free_path(path);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
trans->block_rsv = block_rsv;
return ret;
@@ -1254,18 +1258,20 @@ int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_trans_handle *trans;
- struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+ struct btrfs_ref_tracker delayed_node_tracker;
+ struct btrfs_delayed_node *delayed_node;
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret;
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return 0;
mutex_lock(&delayed_node->mutex);
if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
mutex_unlock(&delayed_node->mutex);
@@ -1299,7 +1305,7 @@ trans_out:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out:
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return ret;
}
@@ -1313,7 +1319,8 @@ void btrfs_remove_delayed_node(struct btrfs_inode *inode)
return;
inode->delayed_node = NULL;
- btrfs_release_delayed_node(delayed_node);
+
+ btrfs_release_delayed_node(delayed_node, &delayed_node->inode_cache_tracker);
}
struct btrfs_async_delayed_work {
@@ -1329,6 +1336,7 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
struct btrfs_delayed_node *delayed_node = NULL;
+ struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_root *root;
struct btrfs_block_rsv *block_rsv;
int total_done = 0;
@@ -1345,7 +1353,8 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
BTRFS_DELAYED_BACKGROUND / 2)
break;
- delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
+ delayed_node = btrfs_first_prepared_delayed_node(delayed_root,
+ &delayed_node_tracker);
if (!delayed_node)
break;
@@ -1354,7 +1363,8 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_release_path(path);
- btrfs_release_prepared_delayed_node(delayed_node);
+ btrfs_release_prepared_delayed_node(delayed_node,
+ &delayed_node_tracker);
total_done++;
continue;
}
@@ -1369,7 +1379,8 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
btrfs_btree_balance_dirty_nodelay(root->fs_info);
btrfs_release_path(path);
- btrfs_release_prepared_delayed_node(delayed_node);
+ btrfs_release_prepared_delayed_node(delayed_node,
+ &delayed_node_tracker);
total_done++;
} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
@@ -1401,20 +1412,28 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
{
- WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
+ struct btrfs_ref_tracker delayed_node_tracker;
+ struct btrfs_delayed_node *node;
+
+ node = btrfs_first_delayed_node( fs_info->delayed_root, &delayed_node_tracker);
+ if (WARN_ON(node)) {
+ btrfs_delayed_node_ref_tracker_free(node,
+ &delayed_node_tracker);
+ refcount_dec(&node->refs);
+ }
}
-static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
+static bool could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
{
int val = atomic_read(&delayed_root->items_seq);
if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
- return 1;
+ return true;
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
- return 1;
+ return true;
- return 0;
+ return false;
}
void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
@@ -1475,13 +1494,14 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info;
const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_delayed_item *delayed_item;
struct btrfs_dir_item *dir_item;
bool reserve_leaf_space;
u32 data_len;
int ret;
- delayed_node = btrfs_get_or_create_delayed_node(dir);
+ delayed_node = btrfs_get_or_create_delayed_node(dir, &delayed_node_tracker);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
@@ -1557,13 +1577,12 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_unlock(&delayed_node->mutex);
release_node:
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return ret;
}
-static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_node *node,
- u64 index)
+static bool btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node,
+ u64 index)
{
struct btrfs_delayed_item *item;
@@ -1571,7 +1590,7 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
if (!item) {
mutex_unlock(&node->mutex);
- return 1;
+ return false;
}
/*
@@ -1606,23 +1625,25 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
}
mutex_unlock(&node->mutex);
- return 0;
+ return true;
}
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, u64 index)
{
struct btrfs_delayed_node *node;
+ struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_delayed_item *item;
int ret;
- node = btrfs_get_or_create_delayed_node(dir);
+ node = btrfs_get_or_create_delayed_node(dir, &delayed_node_tracker);
if (IS_ERR(node))
return PTR_ERR(node);
- ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
- if (!ret)
+ if (btrfs_delete_delayed_insertion_item(node, index)) {
+ ret = 0;
goto end;
+ }
item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
if (!item) {
@@ -1639,7 +1660,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
*/
if (ret < 0) {
btrfs_err(trans->fs_info,
-"metadata reservation failed for delayed dir item deltiona, should have been reserved");
+"metadata reservation failed for delayed dir item deletion, index: %llu, root: %llu, inode: %llu, error: %d",
+ index, btrfs_root_id(node->root), node->inode_id, ret);
btrfs_release_delayed_item(item);
goto end;
}
@@ -1648,22 +1670,23 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
ret = __btrfs_add_delayed_item(node, item);
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
- "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- index, btrfs_root_id(node->root),
- node->inode_id, ret);
+"failed to add delayed dir index item, root: %llu, inode: %llu, index: %llu, error: %d",
+ index, btrfs_root_id(node->root), node->inode_id, ret);
btrfs_delayed_item_release_metadata(dir->root, item);
btrfs_release_delayed_item(item);
}
mutex_unlock(&node->mutex);
end:
- btrfs_release_delayed_node(node);
+ btrfs_release_delayed_node(node, &delayed_node_tracker);
return ret;
}
int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
{
- struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+ struct btrfs_ref_tracker delayed_node_tracker;
+ struct btrfs_delayed_node *delayed_node;
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return -ENOENT;
@@ -1673,12 +1696,12 @@ int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
* is updated now. So we needn't lock the delayed node.
*/
if (!delayed_node->index_cnt) {
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return -EINVAL;
}
inode->index_cnt = delayed_node->index_cnt;
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
@@ -1689,8 +1712,9 @@ bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
{
struct btrfs_delayed_node *delayed_node;
struct btrfs_delayed_item *item;
+ struct btrfs_ref_tracker delayed_node_tracker;
- delayed_node = btrfs_get_delayed_node(inode);
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return false;
@@ -1725,6 +1749,7 @@ bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
* insert/delete delayed items in this period. So we also needn't
* requeue or dequeue this delayed node.
*/
+ btrfs_delayed_node_ref_tracker_free(delayed_node, &delayed_node_tracker);
refcount_dec(&delayed_node->refs);
return true;
@@ -1755,17 +1780,16 @@ void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode,
downgrade_write(&inode->vfs_inode.i_rwsem);
}
-int btrfs_should_delete_dir_index(const struct list_head *del_list,
- u64 index)
+bool btrfs_should_delete_dir_index(const struct list_head *del_list, u64 index)
{
struct btrfs_delayed_item *curr;
- int ret = 0;
+ bool ret = false;
list_for_each_entry(curr, del_list, readdir_list) {
if (curr->index > index)
break;
if (curr->index == index) {
- ret = 1;
+ ret = true;
break;
}
}
@@ -1775,15 +1799,14 @@ int btrfs_should_delete_dir_index(const struct list_head *del_list,
/*
* Read dir info stored in the delayed tree.
*/
-int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- const struct list_head *ins_list)
+bool btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+ const struct list_head *ins_list)
{
struct btrfs_dir_item *di;
struct btrfs_delayed_item *curr, *next;
struct btrfs_key location;
char *name;
int name_len;
- int over = 0;
unsigned char d_type;
/*
@@ -1792,6 +1815,8 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
* directory, nobody can delete any directory indexes now.
*/
list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+ bool over;
+
list_del(&curr->readdir_list);
if (curr->index < ctx->pos) {
@@ -1809,116 +1834,112 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
btrfs_disk_key_to_cpu(&location, &di->location);
- over = !dir_emit(ctx, name, name_len,
- location.objectid, d_type);
+ over = !dir_emit(ctx, name, name_len, location.objectid, d_type);
if (refcount_dec_and_test(&curr->refs))
kfree(curr);
if (over)
- return 1;
+ return true;
ctx->pos++;
}
- return 0;
+ return false;
}
static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *inode_item,
- struct inode *inode)
+ struct btrfs_inode *inode)
{
+ struct inode *vfs_inode = &inode->vfs_inode;
u64 flags;
- btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
- btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
- btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
- btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
- btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
- btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
- btrfs_set_stack_inode_generation(inode_item,
- BTRFS_I(inode)->generation);
+ btrfs_set_stack_inode_uid(inode_item, i_uid_read(vfs_inode));
+ btrfs_set_stack_inode_gid(inode_item, i_gid_read(vfs_inode));
+ btrfs_set_stack_inode_size(inode_item, inode->disk_i_size);
+ btrfs_set_stack_inode_mode(inode_item, vfs_inode->i_mode);
+ btrfs_set_stack_inode_nlink(inode_item, vfs_inode->i_nlink);
+ btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(vfs_inode));
+ btrfs_set_stack_inode_generation(inode_item, inode->generation);
btrfs_set_stack_inode_sequence(inode_item,
- inode_peek_iversion(inode));
+ inode_peek_iversion(vfs_inode));
btrfs_set_stack_inode_transid(inode_item, trans->transid);
- btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
- flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
- BTRFS_I(inode)->ro_flags);
+ btrfs_set_stack_inode_rdev(inode_item, vfs_inode->i_rdev);
+ flags = btrfs_inode_combine_flags(inode->flags, inode->ro_flags);
btrfs_set_stack_inode_flags(inode_item, flags);
btrfs_set_stack_inode_block_group(inode_item, 0);
btrfs_set_stack_timespec_sec(&inode_item->atime,
- inode_get_atime_sec(inode));
+ inode_get_atime_sec(vfs_inode));
btrfs_set_stack_timespec_nsec(&inode_item->atime,
- inode_get_atime_nsec(inode));
+ inode_get_atime_nsec(vfs_inode));
btrfs_set_stack_timespec_sec(&inode_item->mtime,
- inode_get_mtime_sec(inode));
+ inode_get_mtime_sec(vfs_inode));
btrfs_set_stack_timespec_nsec(&inode_item->mtime,
- inode_get_mtime_nsec(inode));
+ inode_get_mtime_nsec(vfs_inode));
btrfs_set_stack_timespec_sec(&inode_item->ctime,
- inode_get_ctime_sec(inode));
+ inode_get_ctime_sec(vfs_inode));
btrfs_set_stack_timespec_nsec(&inode_item->ctime,
- inode_get_ctime_nsec(inode));
+ inode_get_ctime_nsec(vfs_inode));
- btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
- btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
+ btrfs_set_stack_timespec_sec(&inode_item->otime, inode->i_otime_sec);
+ btrfs_set_stack_timespec_nsec(&inode_item->otime, inode->i_otime_nsec);
}
-int btrfs_fill_inode(struct inode *inode, u32 *rdev)
+int btrfs_fill_inode(struct btrfs_inode *inode, u32 *rdev)
{
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_inode_item *inode_item;
+ struct inode *vfs_inode = &inode->vfs_inode;
- delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return -ENOENT;
mutex_lock(&delayed_node->mutex);
if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return -ENOENT;
}
inode_item = &delayed_node->inode_item;
- i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
- i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
- btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
- btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
- round_up(i_size_read(inode), fs_info->sectorsize));
- inode->i_mode = btrfs_stack_inode_mode(inode_item);
- set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
- inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
- BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
- BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
-
- inode_set_iversion_queried(inode,
- btrfs_stack_inode_sequence(inode_item));
- inode->i_rdev = 0;
+ i_uid_write(vfs_inode, btrfs_stack_inode_uid(inode_item));
+ i_gid_write(vfs_inode, btrfs_stack_inode_gid(inode_item));
+ btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
+ vfs_inode->i_mode = btrfs_stack_inode_mode(inode_item);
+ set_nlink(vfs_inode, btrfs_stack_inode_nlink(inode_item));
+ inode_set_bytes(vfs_inode, btrfs_stack_inode_nbytes(inode_item));
+ inode->generation = btrfs_stack_inode_generation(inode_item);
+ inode->last_trans = btrfs_stack_inode_transid(inode_item);
+
+ inode_set_iversion_queried(vfs_inode, btrfs_stack_inode_sequence(inode_item));
+ vfs_inode->i_rdev = 0;
*rdev = btrfs_stack_inode_rdev(inode_item);
btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
- &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
+ &inode->flags, &inode->ro_flags);
- inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
+ inode_set_atime(vfs_inode, btrfs_stack_timespec_sec(&inode_item->atime),
btrfs_stack_timespec_nsec(&inode_item->atime));
- inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
+ inode_set_mtime(vfs_inode, btrfs_stack_timespec_sec(&inode_item->mtime),
btrfs_stack_timespec_nsec(&inode_item->mtime));
- inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
+ inode_set_ctime(vfs_inode, btrfs_stack_timespec_sec(&inode_item->ctime),
btrfs_stack_timespec_nsec(&inode_item->ctime));
- BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
- BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
+ inode->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
+ inode->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
- inode->i_generation = BTRFS_I(inode)->generation;
- if (S_ISDIR(inode->i_mode))
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ vfs_inode->i_generation = inode->generation;
+ if (S_ISDIR(vfs_inode->i_mode))
+ inode->index_cnt = (u64)-1;
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
@@ -1927,16 +1948,16 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
int ret = 0;
- delayed_node = btrfs_get_or_create_delayed_node(inode);
+ delayed_node = btrfs_get_or_create_delayed_node(inode, &delayed_node_tracker);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
mutex_lock(&delayed_node->mutex);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
- fill_stack_inode_item(trans, &delayed_node->inode_item,
- &inode->vfs_inode);
+ fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
goto release_node;
}
@@ -1944,13 +1965,13 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
if (ret)
goto release_node;
- fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
+ fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
delayed_node->count++;
atomic_inc(&root->fs_info->delayed_root->items);
release_node:
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return ret;
}
@@ -1958,6 +1979,7 @@ int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
/*
* we don't do delayed inode updates during log recovery because it
@@ -1967,7 +1989,7 @@ int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return -EAGAIN;
- delayed_node = btrfs_get_or_create_delayed_node(inode);
+ delayed_node = btrfs_get_or_create_delayed_node(inode, &delayed_node_tracker);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
@@ -1986,15 +2008,12 @@ int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
* It is very rare.
*/
mutex_lock(&delayed_node->mutex);
- if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
- goto release_node;
-
- set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
- delayed_node->count++;
- atomic_inc(&fs_info->delayed_root->items);
-release_node:
+ if (!test_and_set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
+ delayed_node->count++;
+ atomic_inc(&fs_info->delayed_root->items);
+ }
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
@@ -2038,19 +2057,21 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
{
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
- delayed_node = btrfs_get_delayed_node(inode);
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return;
__btrfs_kill_delayed_node(delayed_node);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
}
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
{
unsigned long index = 0;
struct btrfs_delayed_node *delayed_nodes[8];
+ struct btrfs_ref_tracker delayed_node_trackers[8];
while (1) {
struct btrfs_delayed_node *node;
@@ -2069,6 +2090,9 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
* about to be removed from the tree in the loop below
*/
if (refcount_inc_not_zero(&node->refs)) {
+ btrfs_delayed_node_ref_tracker_alloc(node,
+ &delayed_node_trackers[count],
+ GFP_ATOMIC);
delayed_nodes[count] = node;
count++;
}
@@ -2080,7 +2104,9 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
for (int i = 0; i < count; i++) {
__btrfs_kill_delayed_node(delayed_nodes[i]);
- btrfs_release_delayed_node(delayed_nodes[i]);
+ btrfs_delayed_node_ref_tracker_dir_print(delayed_nodes[i]);
+ btrfs_release_delayed_node(delayed_nodes[i],
+ &delayed_node_trackers[i]);
}
}
}
@@ -2088,14 +2114,17 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
{
struct btrfs_delayed_node *curr_node, *prev_node;
+ struct btrfs_ref_tracker curr_delayed_node_tracker, prev_delayed_node_tracker;
- curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
+ curr_node = btrfs_first_delayed_node(fs_info->delayed_root,
+ &curr_delayed_node_tracker);
while (curr_node) {
__btrfs_kill_delayed_node(curr_node);
prev_node = curr_node;
- curr_node = btrfs_next_delayed_node(curr_node);
- btrfs_release_delayed_node(prev_node);
+ prev_delayed_node_tracker = curr_delayed_node_tracker;
+ curr_node = btrfs_next_delayed_node(curr_node, &curr_delayed_node_tracker);
+ btrfs_release_delayed_node(prev_node, &prev_delayed_node_tracker);
}
}
@@ -2105,8 +2134,9 @@ void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
{
struct btrfs_delayed_node *node;
struct btrfs_delayed_item *item;
+ struct btrfs_ref_tracker delayed_node_tracker;
- node = btrfs_get_delayed_node(inode);
+ node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!node)
return;
@@ -2164,6 +2194,7 @@ void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
* delete delayed items.
*/
ASSERT(refcount_read(&node->refs) > 1);
+ btrfs_delayed_node_ref_tracker_free(node, &delayed_node_tracker);
refcount_dec(&node->refs);
}
@@ -2174,8 +2205,9 @@ void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
struct btrfs_delayed_node *node;
struct btrfs_delayed_item *item;
struct btrfs_delayed_item *next;
+ struct btrfs_ref_tracker delayed_node_tracker;
- node = btrfs_get_delayed_node(inode);
+ node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!node)
return;
@@ -2207,5 +2239,6 @@ void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
* delete delayed items.
*/
ASSERT(refcount_read(&node->refs) > 1);
+ btrfs_delayed_node_ref_tracker_free(node, &delayed_node_tracker);
refcount_dec(&node->refs);
}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index f4d9feac0d0e..b09d4ec8c77d 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/ref_tracker.h>
#include "ctree.h"
struct btrfs_disk_key;
@@ -44,6 +45,22 @@ struct btrfs_delayed_root {
wait_queue_head_t wait;
};
+struct btrfs_ref_tracker_dir {
+#ifdef CONFIG_BTRFS_DEBUG
+ struct ref_tracker_dir dir;
+#else
+ struct {} tracker;
+#endif
+};
+
+struct btrfs_ref_tracker {
+#ifdef CONFIG_BTRFS_DEBUG
+ struct ref_tracker *tracker;
+#else
+ struct {} tracker;
+#endif
+};
+
#define BTRFS_DELAYED_NODE_IN_LIST 0
#define BTRFS_DELAYED_NODE_INODE_DIRTY 1
#define BTRFS_DELAYED_NODE_DEL_IREF 2
@@ -78,6 +95,12 @@ struct btrfs_delayed_node {
* actual number of leaves we end up using. Protected by @mutex.
*/
u32 index_item_leaves;
+ /* Track all references to this delayed node. */
+ struct btrfs_ref_tracker_dir ref_dir;
+ /* Track delayed node reference stored in node list. */
+ struct btrfs_ref_tracker node_list_tracker;
+ /* Track delayed node reference stored in inode cache. */
+ struct btrfs_ref_tracker inode_cache_tracker;
};
struct btrfs_delayed_item {
@@ -133,7 +156,7 @@ int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode);
-int btrfs_fill_inode(struct inode *inode, u32 *rdev);
+int btrfs_fill_inode(struct btrfs_inode *inode, u32 *rdev);
int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode);
/* Used for drop dead root */
@@ -150,10 +173,9 @@ bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode,
struct list_head *ins_list,
struct list_head *del_list);
-int btrfs_should_delete_dir_index(const struct list_head *del_list,
- u64 index);
-int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- const struct list_head *ins_list);
+bool btrfs_should_delete_dir_index(const struct list_head *del_list, u64 index);
+bool btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+ const struct list_head *ins_list);
/* Used during directory logging. */
void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
@@ -170,4 +192,81 @@ void __cold btrfs_delayed_inode_exit(void);
/* for debugging */
void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info);
+#define BTRFS_DELAYED_NODE_REF_TRACKER_QUARANTINE_COUNT 16
+#define BTRFS_DELAYED_NODE_REF_TRACKER_DISPLAY_LIMIT 16
+
+#ifdef CONFIG_BTRFS_DEBUG
+static inline void btrfs_delayed_node_ref_tracker_dir_init(struct btrfs_delayed_node *node)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return;
+
+ ref_tracker_dir_init(&node->ref_dir.dir,
+ BTRFS_DELAYED_NODE_REF_TRACKER_QUARANTINE_COUNT,
+ "delayed_node");
+}
+
+static inline void btrfs_delayed_node_ref_tracker_dir_exit(struct btrfs_delayed_node *node)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return;
+
+ ref_tracker_dir_exit(&node->ref_dir.dir);
+}
+
+static inline void btrfs_delayed_node_ref_tracker_dir_print(struct btrfs_delayed_node *node)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return;
+
+ /*
+ * Only print if there are leaked references. The caller is
+ * holding one reference, so if refs == 1 there is no leak.
+ */
+ if (refcount_read(&node->refs) == 1)
+ return;
+
+ ref_tracker_dir_print(&node->ref_dir.dir,
+ BTRFS_DELAYED_NODE_REF_TRACKER_DISPLAY_LIMIT);
+}
+
+static inline int btrfs_delayed_node_ref_tracker_alloc(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker,
+ gfp_t gfp)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return 0;
+
+ return ref_tracker_alloc(&node->ref_dir.dir, &tracker->tracker, gfp);
+}
+
+static inline int btrfs_delayed_node_ref_tracker_free(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return 0;
+
+ return ref_tracker_free(&node->ref_dir.dir, &tracker->tracker);
+}
+#else
+static inline void btrfs_delayed_node_ref_tracker_dir_init(struct btrfs_delayed_node *node) { }
+
+static inline void btrfs_delayed_node_ref_tracker_dir_exit(struct btrfs_delayed_node *node) { }
+
+static inline void btrfs_delayed_node_ref_tracker_dir_print(struct btrfs_delayed_node *node) { }
+
+static inline int btrfs_delayed_node_ref_tracker_alloc(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static inline int btrfs_delayed_node_ref_tracker_free(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 0d878dbbabba..e8bc37453336 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -93,6 +93,9 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
u64 num_bytes;
u64 reserved_bytes;
+ if (btrfs_is_testing(fs_info))
+ return;
+
num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
trans->delayed_ref_csum_deletions);
@@ -225,7 +228,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
if (!num_bytes)
return 0;
- ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(space_info, num_bytes, flush);
if (ret)
return ret;
@@ -254,7 +257,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
spin_unlock(&block_rsv->lock);
if (to_free > 0)
- btrfs_space_info_free_bytes_may_use(fs_info, space_info, to_free);
+ btrfs_space_info_free_bytes_may_use(space_info, to_free);
if (refilled_bytes > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
@@ -265,8 +268,8 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
/*
* compare two delayed data backrefs with same bytenr and type
*/
-static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
- struct btrfs_delayed_ref_node *ref2)
+static int comp_data_refs(const struct btrfs_delayed_ref_node *ref1,
+ const struct btrfs_delayed_ref_node *ref2)
{
if (ref1->data_ref.objectid < ref2->data_ref.objectid)
return -1;
@@ -279,8 +282,8 @@ static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
return 0;
}
-static int comp_refs(struct btrfs_delayed_ref_node *ref1,
- struct btrfs_delayed_ref_node *ref2,
+static int comp_refs(const struct btrfs_delayed_ref_node *ref1,
+ const struct btrfs_delayed_ref_node *ref2,
bool check_seq)
{
int ret = 0;
@@ -314,35 +317,23 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
return 0;
}
+static int cmp_refs_node(const struct rb_node *new, const struct rb_node *exist)
+{
+ const struct btrfs_delayed_ref_node *new_node =
+ rb_entry(new, struct btrfs_delayed_ref_node, ref_node);
+ const struct btrfs_delayed_ref_node *exist_node =
+ rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
+
+ return comp_refs(new_node, exist_node, true);
+}
+
static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
struct btrfs_delayed_ref_node *ins)
{
- struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *node = &ins->ref_node;
- struct rb_node *parent_node = NULL;
- struct btrfs_delayed_ref_node *entry;
- bool leftmost = true;
-
- while (*p) {
- int comp;
-
- parent_node = *p;
- entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
- ref_node);
- comp = comp_refs(ins, entry, true);
- if (comp < 0) {
- p = &(*p)->rb_left;
- } else if (comp > 0) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- return entry;
- }
- }
+ struct rb_node *exist = rb_find_add_cached(node, root, cmp_refs_node);
- rb_link_node(node, parent_node, p);
- rb_insert_color_cached(node, root, leftmost);
- return NULL;
+ return rb_entry_safe(exist, struct btrfs_delayed_ref_node, ref_node);
}
static struct btrfs_delayed_ref_head *find_first_ref_head(
@@ -555,6 +546,32 @@ void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
delayed_refs->num_heads_ready--;
}
+struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_ref_node *ref;
+
+ lockdep_assert_held(&head->mutex);
+ lockdep_assert_held(&head->lock);
+
+ if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
+ return NULL;
+
+ /*
+ * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
+ * This is to prevent a ref count from going down to zero, which deletes
+ * the extent item from the extent tree, when there still are references
+ * to add, which would fail because they would not find the extent item.
+ */
+ if (!list_empty(&head->ref_add_list))
+ return list_first_entry(&head->ref_add_list,
+ struct btrfs_delayed_ref_node, add_list);
+
+ ref = rb_entry(rb_first_cached(&head->ref_tree),
+ struct btrfs_delayed_ref_node, ref_node);
+ ASSERT(list_empty(&ref->add_list));
+ return ref;
+}
+
/*
* Helper to insert the ref_node to the tail or merge with tail.
*
@@ -781,9 +798,13 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
}
/*
- * helper function to actually insert a head node into the rbtree.
- * this does all the dirty work in terms of maintaining the correct
- * overall modification count.
+ * Helper function to actually insert a head node into the xarray. This does all
+ * the dirty work in terms of maintaining the correct overall modification
+ * count.
+ *
+ * The caller is responsible for calling kfree() on @qrecord. More specifically,
+ * if this function reports that it did not insert it as noted in
+ * @qrecord_inserted_ret, then it's safe to call kfree() on it.
*
* Returns an error pointer in case of an error.
*/
@@ -797,7 +818,14 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
- bool qrecord_inserted = false;
+
+ /*
+ * If 'qrecord_inserted_ret' is provided, then the first thing we need
+ * to do is to initialize it to false just in case we have an exit
+ * before trying to insert the record.
+ */
+ if (qrecord_inserted_ret)
+ *qrecord_inserted_ret = false;
delayed_refs = &trans->transaction->delayed_refs;
lockdep_assert_held(&delayed_refs->lock);
@@ -816,6 +844,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
/* Record qgroup extent info if provided */
if (qrecord) {
+ /*
+ * Setting 'qrecord' but not 'qrecord_inserted_ret' will likely
+ * result in a memory leakage.
+ */
+ ASSERT(qrecord_inserted_ret != NULL);
+
int ret;
ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
@@ -823,12 +857,10 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
if (ret) {
/* Clean up if insertion fails or item exists. */
xa_release(&delayed_refs->dirty_extents, index);
- /* Caller responsible for freeing qrecord on error. */
if (ret < 0)
return ERR_PTR(ret);
- kfree(qrecord);
- } else {
- qrecord_inserted = true;
+ } else if (qrecord_inserted_ret) {
+ *qrecord_inserted_ret = true;
}
}
@@ -871,14 +903,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
}
- if (qrecord_inserted_ret)
- *qrecord_inserted_ret = qrecord_inserted;
return head_ref;
}
/*
- * Initialize the structure which represents a modification to a an extent.
+ * Initialize the structure which represents a modification to an extent.
*
* @fs_info: Internal to the mounted filesystem mount structure.
*
@@ -911,7 +941,7 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
if (action == BTRFS_ADD_DELAYED_EXTENT)
action = BTRFS_ADD_DELAYED_REF;
- if (is_fstree(generic_ref->ref_root))
+ if (btrfs_is_fstree(generic_ref->ref_root))
seq = atomic64_read(&fs_info->tree_mod_seq);
refcount_set(&ref->refs, 1);
@@ -935,14 +965,14 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
bool skip_qgroup)
{
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+#ifdef CONFIG_BTRFS_DEBUG
/* If @real_root not set, use @root as fallback */
generic_ref->real_root = mod_root ?: generic_ref->ref_root;
#endif
generic_ref->tree_ref.level = level;
generic_ref->type = BTRFS_REF_METADATA;
- if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
- (!mod_root || is_fstree(mod_root))))
+ if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) &&
+ (!mod_root || btrfs_is_fstree(mod_root))))
generic_ref->skip_qgroup = true;
else
generic_ref->skip_qgroup = false;
@@ -952,15 +982,15 @@ void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
u64 mod_root, bool skip_qgroup)
{
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+#ifdef CONFIG_BTRFS_DEBUG
/* If @real_root not set, use @root as fallback */
generic_ref->real_root = mod_root ?: generic_ref->ref_root;
#endif
generic_ref->data_ref.objectid = ino;
generic_ref->data_ref.offset = offset;
generic_ref->type = BTRFS_REF_DATA;
- if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
- (!mod_root || is_fstree(mod_root))))
+ if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) &&
+ (!mod_root || btrfs_is_fstree(mod_root))))
generic_ref->skip_qgroup = true;
else
generic_ref->skip_qgroup = false;
@@ -1032,6 +1062,14 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
xa_release(&delayed_refs->head_refs, index);
spin_unlock(&delayed_refs->lock);
ret = PTR_ERR(new_head_ref);
+
+ /*
+ * It's only safe to call kfree() on 'qrecord' if
+ * add_delayed_ref_head() has _not_ inserted it for
+ * tracing. Otherwise we need to handle this here.
+ */
+ if (!qrecord_reserved || qrecord_inserted)
+ goto free_head_ref;
goto free_record;
}
head_ref = new_head_ref;
@@ -1054,6 +1092,8 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
if (qrecord_inserted)
return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
+
+ kfree(record);
return 0;
free_record:
@@ -1263,7 +1303,7 @@ void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
spin_unlock(&delayed_refs->lock);
mutex_unlock(&head->mutex);
- if (pin_bytes) {
+ if (!btrfs_is_testing(fs_info) && pin_bytes) {
struct btrfs_block_group *bg;
bg = btrfs_lookup_block_group(fs_info, head->bytenr);
@@ -1281,8 +1321,7 @@ void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
spin_lock(&bg->space_info->lock);
spin_lock(&bg->lock);
bg->pinned += head->num_bytes;
- btrfs_space_info_update_bytes_pinned(fs_info,
- bg->space_info,
+ btrfs_space_info_update_bytes_pinned(bg->space_info,
head->num_bytes);
bg->reserved -= head->num_bytes;
bg->space_info->bytes_reserved -= head->num_bytes;
@@ -1295,12 +1334,15 @@ void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
btrfs_error_unpin_extent_range(fs_info, head->bytenr,
head->bytenr + head->num_bytes - 1);
}
- btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
+ if (!btrfs_is_testing(fs_info))
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
btrfs_put_delayed_ref_head(head);
cond_resched();
spin_lock(&delayed_refs->lock);
}
- btrfs_qgroup_destroy_extent_records(trans);
+
+ if (!btrfs_is_testing(fs_info))
+ btrfs_qgroup_destroy_extent_records(trans);
spin_unlock(&delayed_refs->lock);
}
@@ -1316,7 +1358,7 @@ int __init btrfs_delayed_ref_init(void)
{
btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
if (!btrfs_delayed_ref_head_cachep)
- goto fail;
+ return -ENOMEM;
btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
if (!btrfs_delayed_ref_node_cachep)
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 611fb3388f82..5ce940532144 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -14,6 +14,8 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <uapi/linux/btrfs_tree.h>
+#include "fs.h"
+#include "messages.h"
struct btrfs_trans_handle;
struct btrfs_fs_info;
@@ -260,7 +262,6 @@ enum btrfs_ref_type {
BTRFS_REF_NOT_SET,
BTRFS_REF_DATA,
BTRFS_REF_METADATA,
- BTRFS_REF_LAST,
} __packed;
struct btrfs_ref {
@@ -275,10 +276,6 @@ struct btrfs_ref {
*/
bool skip_qgroup;
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- /* Through which root is this modification. */
- u64 real_root;
-#endif
u64 bytenr;
u64 num_bytes;
u64 owning_root;
@@ -295,6 +292,11 @@ struct btrfs_ref {
struct btrfs_data_ref data_ref;
struct btrfs_tree_ref tree_ref;
};
+
+#ifdef CONFIG_BTRFS_DEBUG
+ /* Through which root is this modification. */
+ u64 real_root;
+#endif
};
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
@@ -402,6 +404,7 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
struct btrfs_delayed_ref_root *delayed_refs);
void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
+struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
@@ -418,7 +421,7 @@ bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
u64 root, u64 parent);
void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans);
-static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node)
+static inline u64 btrfs_delayed_ref_owner(const struct btrfs_delayed_ref_node *node)
{
if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
@@ -426,7 +429,7 @@ static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node)
return node->tree_ref.level;
}
-static inline u64 btrfs_delayed_ref_offset(struct btrfs_delayed_ref_node *node)
+static inline u64 btrfs_delayed_ref_offset(const struct btrfs_delayed_ref_node *node)
{
if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
@@ -434,7 +437,7 @@ static inline u64 btrfs_delayed_ref_offset(struct btrfs_delayed_ref_node *node)
return 0;
}
-static inline u8 btrfs_ref_type(struct btrfs_ref *ref)
+static inline u8 btrfs_ref_type(const struct btrfs_ref *ref)
{
ASSERT(ref->type == BTRFS_REF_DATA || ref->type == BTRFS_REF_METADATA);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index ac8e97ed13f7..b6c7da8e1bc8 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -76,7 +76,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
struct extent_buffer *eb;
int slot;
int ret = 0;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
int item_size;
struct btrfs_dev_replace_item *ptr;
u64 src_devid;
@@ -85,10 +85,8 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
return 0;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
key.objectid = 0;
key.type = BTRFS_DEV_REPLACE_KEY;
@@ -100,13 +98,11 @@ no_valid_dev_replace_entry_found:
* We don't have a replace item or it's corrupted. If there is
* a replace target, fail the mount.
*/
- if (btrfs_find_device(fs_info->fs_devices, &args)) {
+ if (unlikely(btrfs_find_device(fs_info->fs_devices, &args))) {
btrfs_err(fs_info,
"found replace target device without a valid replace item");
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
- ret = 0;
dev_replace->replace_state =
BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
dev_replace->cont_reading_from_srcdev_mode =
@@ -123,7 +119,7 @@ no_valid_dev_replace_entry_found:
dev_replace->tgtdev = NULL;
dev_replace->is_valid = 0;
dev_replace->item_needs_writeback = 0;
- goto out;
+ return 0;
}
slot = path->slots[0];
eb = path->nodes[0];
@@ -162,7 +158,7 @@ no_valid_dev_replace_entry_found:
* We don't have an active replace item but if there is a
* replace target, fail the mount.
*/
- if (btrfs_find_device(fs_info->fs_devices, &args)) {
+ if (unlikely(btrfs_find_device(fs_info->fs_devices, &args))) {
btrfs_err(fs_info,
"replace without active item, run 'device scan --forget' on the target device");
ret = -EUCLEAN;
@@ -181,8 +177,7 @@ no_valid_dev_replace_entry_found:
* allow 'btrfs dev replace_cancel' if src/tgt device is
* missing
*/
- if (!dev_replace->srcdev &&
- !btrfs_test_opt(fs_info, DEGRADED)) {
+ if (unlikely(!dev_replace->srcdev && !btrfs_test_opt(fs_info, DEGRADED))) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@@ -190,8 +185,7 @@ no_valid_dev_replace_entry_found:
"srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
src_devid);
}
- if (!dev_replace->tgtdev &&
- !btrfs_test_opt(fs_info, DEGRADED)) {
+ if (unlikely(!dev_replace->tgtdev && !btrfs_test_opt(fs_info, DEGRADED))) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@@ -226,8 +220,6 @@ no_valid_dev_replace_entry_found:
break;
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -256,7 +248,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
}
bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE,
- fs_info->bdev_holder, NULL);
+ fs_info->sb, &fs_holder_ops);
if (IS_ERR(bdev_file)) {
btrfs_err(fs_info, "target device %s is invalid!", device_path);
return PTR_ERR(bdev_file);
@@ -333,7 +325,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return 0;
error:
- fput(bdev_file);
+ bdev_fput(bdev_file);
return ret;
}
@@ -346,7 +338,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_root *dev_root = fs_info->dev_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *eb;
struct btrfs_dev_replace_item *ptr;
@@ -365,16 +357,15 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
key.offset = 0;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn(fs_info,
"error %d while searching for dev_replace item!",
ret);
- goto out;
+ return ret;
}
if (ret == 0 &&
@@ -395,7 +386,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
btrfs_warn(fs_info,
"delete too small dev_replace item failed %d!",
ret);
- goto out;
+ return ret;
}
ret = 1;
}
@@ -408,7 +399,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
if (ret < 0) {
btrfs_warn(fs_info,
"insert dev_replace item failed %d!", ret);
- goto out;
+ return ret;
}
}
@@ -441,11 +432,6 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
dev_replace->item_needs_writeback = 0;
up_write(&dev_replace->rwsem);
- btrfs_mark_buffer_dirty(trans, eb);
-
-out:
- btrfs_free_path(path);
-
return ret;
}
@@ -503,8 +489,8 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
}
path->reada = READA_FORWARD;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
key.objectid = src_dev->devid;
key.type = BTRFS_DEV_EXTENT_KEY;
@@ -612,7 +598,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return PTR_ERR(src_device);
if (btrfs_pinned_by_swapfile(fs_info, src_device)) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"cannot replace device %s (devid %llu) due to active swapfile",
btrfs_dev_name(src_device), src_device->devid);
return -ETXTBSY;
@@ -649,7 +635,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
- ASSERT(0);
+ DEBUG_WARN("unexpected STARTED or SUSPENDED dev-replace state");
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
up_write(&dev_replace->rwsem);
goto leave;
@@ -659,7 +645,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
dev_replace->srcdev = src_device;
dev_replace->tgtdev = tgt_device;
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"dev_replace from %s (devid %llu) to %s started",
btrfs_dev_name(src_device),
src_device->devid,
@@ -806,17 +792,17 @@ static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev,
lockdep_assert_held(&srcdev->fs_info->chunk_mutex);
- while (find_first_extent_bit(&srcdev->alloc_state, start,
- &found_start, &found_end,
- CHUNK_ALLOCATED, &cached_state)) {
- ret = set_extent_bit(&tgtdev->alloc_state, found_start,
- found_end, CHUNK_ALLOCATED, NULL);
+ while (btrfs_find_first_extent_bit(&srcdev->alloc_state, start,
+ &found_start, &found_end,
+ CHUNK_ALLOCATED, &cached_state)) {
+ ret = btrfs_set_extent_bit(&tgtdev->alloc_state, found_start,
+ found_end, CHUNK_ALLOCATED, NULL);
if (ret)
break;
start = found_end + 1;
}
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
return ret;
}
@@ -955,7 +941,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
tgt_device);
} else {
if (scrub_ret != -ECANCELED)
- btrfs_err_in_rcu(fs_info,
+ btrfs_err(fs_info,
"btrfs_scrub_dev(%s, %llu, %s) failed %d",
btrfs_dev_name(src_device),
src_device->devid,
@@ -973,7 +959,7 @@ error:
return scrub_ret;
}
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"dev_replace from %s (devid %llu) to %s finished",
btrfs_dev_name(src_device),
src_device->devid,
@@ -1121,7 +1107,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
* btrfs_dev_replace_finishing() will handle the
* cleanup part
*/
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"dev_replace from %s (devid %llu) to %s canceled",
btrfs_dev_name(src_device), src_device->devid,
btrfs_dev_name(tgt_device));
@@ -1155,7 +1141,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
ret = btrfs_commit_transaction(trans);
WARN_ON(ret);
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"suspended dev_replace from %s (devid %llu) to %s canceled",
btrfs_dev_name(src_device), src_device->devid,
btrfs_dev_name(tgt_device));
@@ -1259,7 +1245,7 @@ static int btrfs_dev_replace_kthread(void *data)
progress = btrfs_dev_replace_progress(fs_info);
progress = div_u64(progress, 10);
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"continuing dev_replace from %s (devid %llu) to target %s @%u%%",
btrfs_dev_name(dev_replace->srcdev),
dev_replace->srcdev->devid,
@@ -1277,16 +1263,16 @@ static int btrfs_dev_replace_kthread(void *data)
return 0;
}
-int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
+bool __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
{
if (!dev_replace->is_valid)
- return 0;
+ return false;
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
- return 0;
+ return false;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
/*
@@ -1301,7 +1287,7 @@ int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
*/
break;
}
- return 1;
+ return true;
}
void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index 23e480efe5e6..b35cecf388f2 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -25,7 +25,7 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
-int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
+bool __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
struct btrfs_block_group *cache,
u64 physical);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 1ea5d8fcfbf7..085a83ae9e62 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -9,6 +9,7 @@
#include "transaction.h"
#include "accessors.h"
#include "dir-item.h"
+#include "delayed-inode.h"
/*
* insert a name into a directory, doing overflow properly if there is a hash
@@ -92,7 +93,6 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, name, name_ptr, name_len);
write_extent_buffer(leaf, data, data_ptr, data_len);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
return ret;
}
@@ -112,7 +112,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
int ret = 0;
int ret2 = 0;
struct btrfs_root *root = dir->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *dir_item;
struct extent_buffer *leaf;
unsigned long name_ptr;
@@ -152,7 +152,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
name_ptr = (unsigned long)(dir_item + 1);
write_extent_buffer(leaf, name->name, name_ptr, name->len);
- btrfs_mark_buffer_dirty(trans, leaf);
second_insert:
/* FIXME, use some real flag for selecting the extra index */
@@ -165,7 +164,6 @@ second_insert:
ret2 = btrfs_insert_delayed_dir_index(trans, name->name, name->len, dir,
&disk_key, type, index);
out_free:
- btrfs_free_path(path);
if (ret)
return ret;
if (ret2)
@@ -229,7 +227,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
return di;
}
-int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
+int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir_ino,
const struct fscrypt_str *name)
{
int ret;
@@ -238,13 +236,13 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
int data_size;
struct extent_buffer *leaf;
int slot;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- key.objectid = dir;
+ key.objectid = dir_ino;
key.type = BTRFS_DIR_ITEM_KEY;
key.offset = btrfs_name_hash(name->name, name->len);
@@ -253,20 +251,17 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
if (IS_ERR(di)) {
ret = PTR_ERR(di);
/* Nothing found, we're safe */
- if (ret == -ENOENT) {
- ret = 0;
- goto out;
- }
+ if (ret == -ENOENT)
+ return 0;
if (ret < 0)
- goto out;
+ return ret;
}
/* we found an item, look for our name in the item */
if (di) {
/* our exact name was found */
- ret = -EEXIST;
- goto out;
+ return -EEXIST;
}
/* See if there is room in the item to insert this name. */
@@ -275,14 +270,11 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
slot = path->slots[0];
if (data_size + btrfs_item_size(leaf, slot) +
sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root->fs_info)) {
- ret = -EOVERFLOW;
- } else {
- /* plenty of insertion room */
- ret = 0;
+ return -EOVERFLOW;
}
-out:
- btrfs_free_path(path);
- return ret;
+
+ /* Plenty of insertion room. */
+ return 0;
}
/*
diff --git a/fs/btrfs/dir-item.h b/fs/btrfs/dir-item.h
index 28d69970bc70..e52174a8baf9 100644
--- a/fs/btrfs/dir-item.h
+++ b/fs/btrfs/dir-item.h
@@ -10,10 +10,11 @@ struct fscrypt_str;
struct btrfs_fs_info;
struct btrfs_key;
struct btrfs_path;
+struct btrfs_inode;
struct btrfs_root;
struct btrfs_trans_handle;
-int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
+int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir_ino,
const struct fscrypt_str *name);
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
const struct fscrypt_str *name, struct btrfs_inode *dir,
diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
index a7c3e221378d..07e19e88ba4b 100644
--- a/fs/btrfs/direct-io.c
+++ b/fs/btrfs/direct-io.c
@@ -10,6 +10,8 @@
#include "fs.h"
#include "transaction.h"
#include "volumes.h"
+#include "bio.h"
+#include "ordered-data.h"
struct btrfs_dio_data {
ssize_t submitted;
@@ -42,21 +44,21 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
/* Direct lock must be taken before the extent lock. */
if (nowait) {
- if (!try_lock_dio_extent(io_tree, lockstart, lockend, cached_state))
+ if (!btrfs_try_lock_dio_extent(io_tree, lockstart, lockend, cached_state))
return -EAGAIN;
} else {
- lock_dio_extent(io_tree, lockstart, lockend, cached_state);
+ btrfs_lock_dio_extent(io_tree, lockstart, lockend, cached_state);
}
while (1) {
if (nowait) {
- if (!try_lock_extent(io_tree, lockstart, lockend,
- cached_state)) {
+ if (!btrfs_try_lock_extent(io_tree, lockstart, lockend,
+ cached_state)) {
ret = -EAGAIN;
break;
}
} else {
- lock_extent(io_tree, lockstart, lockend, cached_state);
+ btrfs_lock_extent(io_tree, lockstart, lockend, cached_state);
}
/*
* We're concerned with the entire range that we're going to be
@@ -78,7 +80,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
lockstart, lockend)))
break;
- unlock_extent(io_tree, lockstart, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, lockstart, lockend, cached_state);
if (ordered) {
if (nowait) {
@@ -131,7 +133,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
}
if (ret)
- unlock_dio_extent(io_tree, lockstart, lockend, cached_state);
+ btrfs_unlock_dio_extent(io_tree, lockstart, lockend, cached_state);
return ret;
}
@@ -151,11 +153,11 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
}
ordered = btrfs_alloc_ordered_extent(inode, start, file_extent,
- (1 << type) |
- (1 << BTRFS_ORDERED_DIRECT));
+ (1U << type) |
+ (1U << BTRFS_ORDERED_DIRECT));
if (IS_ERR(ordered)) {
if (em) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
btrfs_drop_extent_map_range(inode, start,
start + file_extent->num_bytes - 1, false);
}
@@ -184,7 +186,7 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
alloc_hint = btrfs_get_extent_allocation_hint(inode, start, len);
again:
ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
- 0, alloc_hint, &ins, 1, 1);
+ 0, alloc_hint, &ins, true, true);
if (ret == -EAGAIN) {
ASSERT(btrfs_is_zoned(fs_info));
wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
@@ -204,8 +206,7 @@ again:
BTRFS_ORDERED_REGULAR);
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
if (IS_ERR(em))
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
- 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
return em;
}
@@ -246,10 +247,10 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
else
type = BTRFS_ORDERED_NOCOW;
len = min(len, em->len - (start - em->start));
- block_start = extent_map_block_start(em) + (start - em->start);
+ block_start = btrfs_extent_map_block_start(em) + (start - em->start);
- if (can_nocow_extent(inode, start, &len,
- &file_extent, false, false) == 1) {
+ if (can_nocow_extent(BTRFS_I(inode), start, &len, &file_extent,
+ false) == 1) {
bg = btrfs_inc_nocow_writers(fs_info, block_start);
if (bg)
can_nocow = true;
@@ -265,7 +266,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
nowait);
if (ret < 0) {
/* Our caller expects us to free the input extent map. */
- free_extent_map(em);
+ btrfs_free_extent_map(em);
*map = NULL;
btrfs_dec_nocow_writers(bg);
if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
@@ -278,7 +279,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
&file_extent, type);
btrfs_dec_nocow_writers(bg);
if (type == BTRFS_ORDERED_PREALLOC) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
*map = em2;
em = em2;
}
@@ -291,7 +292,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
dio_data->nocow_done = true;
} else {
/* Our caller expects us to free the input extent map. */
- free_extent_map(em);
+ btrfs_free_extent_map(em);
*map = NULL;
if (nowait) {
@@ -386,7 +387,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
* to allocate a contiguous array for the checksums.
*/
if (!write)
- len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS);
+ len = min_t(u64, len, fs_info->sectorsize * BIO_MAX_VECS);
lockstart = start;
lockend = start + len - 1;
@@ -440,8 +441,8 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
start, data_alloc_len, false);
if (!ret)
dio_data->data_space_reserved = true;
- else if (ret && !(BTRFS_I(inode)->flags &
- (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
+ else if (!(BTRFS_I(inode)->flags &
+ (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
goto err;
}
@@ -474,8 +475,8 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
* to buffered IO. Don't blame me, this is the price we pay for using
* the generic code.
*/
- if (extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) {
- free_extent_map(em);
+ if (btrfs_extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) {
+ btrfs_free_extent_map(em);
/*
* If we are in a NOWAIT context, return -EAGAIN in order to
* fallback to buffered IO. This is not only because we can
@@ -516,7 +517,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
* after we have submitted bios for all the extents in the range.
*/
if ((flags & IOMAP_NOWAIT) && len < length) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ret = -EAGAIN;
goto unlock_err;
}
@@ -558,13 +559,13 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
} else {
- iomap->addr = extent_map_block_start(em) + (start - em->start);
+ iomap->addr = btrfs_extent_map_block_start(em) + (start - em->start);
iomap->type = IOMAP_MAPPED;
}
iomap->offset = start;
iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
iomap->length = len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/*
* Reads will hold the EXTENT_DIO_LOCKED bit until the io is completed,
@@ -575,13 +576,13 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
if (write)
unlock_bits |= EXTENT_DIO_LOCKED;
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- unlock_bits, &cached_state);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ unlock_bits, &cached_state);
/* We didn't use everything, unlock the dio extent for the remainder. */
if (!write && (start + len) < lockend)
- unlock_dio_extent(&BTRFS_I(inode)->io_tree, start + len,
- lockend, NULL);
+ btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, start + len,
+ lockend, NULL);
return 0;
@@ -591,8 +592,8 @@ unlock_err:
* to update this, be explicit that we expect EXTENT_LOCKED and
* EXTENT_DIO_LOCKED to be set here, and so that's what we're clearing.
*/
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- EXTENT_LOCKED | EXTENT_DIO_LOCKED, &cached_state);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ EXTENT_LOCKED | EXTENT_DIO_LOCKED, &cached_state);
err:
if (dio_data->data_space_reserved) {
btrfs_free_reserved_data_space(BTRFS_I(inode),
@@ -615,8 +616,8 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
if (!write && (iomap->type == IOMAP_HOLE)) {
/* If reading from a hole, unlock and return */
- unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
- pos + length - 1, NULL);
+ btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
+ pos + length - 1, NULL);
return 0;
}
@@ -627,8 +628,8 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
btrfs_finish_ordered_extent(dio_data->ordered, NULL,
pos, length, false);
else
- unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
- pos + length - 1, NULL);
+ btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
+ pos + length - 1, NULL);
ret = -ENOTBLK;
}
if (write) {
@@ -660,8 +661,8 @@ static void btrfs_dio_end_io(struct btrfs_bio *bbio)
dip->file_offset, dip->bytes,
!bio->bi_status);
} else {
- unlock_dio_extent(&inode->io_tree, dip->file_offset,
- dip->file_offset + dip->bytes - 1, NULL);
+ btrfs_unlock_dio_extent(&inode->io_tree, dip->file_offset,
+ dip->file_offset + dip->bytes - 1, NULL);
}
bbio->bio.bi_private = bbio->private;
@@ -692,9 +693,9 @@ static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
* a pre-existing one.
*/
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
- ret = split_extent_map(bbio->inode, bbio->file_offset,
- ordered->num_bytes, len,
- ordered->disk_bytenr);
+ ret = btrfs_split_extent_map(bbio->inode, bbio->file_offset,
+ ordered->num_bytes, len,
+ ordered->disk_bytenr);
if (ret)
return ret;
}
@@ -714,10 +715,8 @@ static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
container_of(bbio, struct btrfs_dio_private, bbio);
struct btrfs_dio_data *dio_data = iter->private;
- btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
+ btrfs_bio_init(bbio, BTRFS_I(iter->inode), file_offset,
btrfs_dio_end_io, bio->bi_private);
- bbio->inode = BTRFS_I(iter->inode);
- bbio->file_offset = file_offset;
dip->file_offset = file_offset;
dip->bytes = bio->bi_iter.bi_size;
@@ -787,6 +786,18 @@ static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
if (iov_iter_alignment(iter) & blocksize_mask)
return -EINVAL;
+ /*
+ * For bs > ps support, we heavily rely on large folios to make sure no
+ * block will cross large folio boundaries.
+ *
+ * But memory provided by direct IO is only virtually contiguous, not
+ * physically contiguous, and will break the btrfs' large folio requirement.
+ *
+ * So for bs > ps support, all direct IOs should fallback to buffered ones.
+ */
+ if (fs_info->sectorsize > PAGE_SIZE)
+ return -EINVAL;
+
return 0;
}
@@ -856,6 +867,22 @@ relock:
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
goto buffered;
}
+ /*
+ * We can't control the folios being passed in, applications can write
+ * to them while a direct IO write is in progress. This means the
+ * content might change after we calculated the data checksum.
+ * Therefore we can end up storing a checksum that doesn't match the
+ * persisted data.
+ *
+ * To be extra safe and avoid false data checksum mismatch, if the
+ * inode requires data checksum, just fallback to buffered IO.
+ * For buffered IO we have full control of page cache and can ensure
+ * no one is modifying the content during writeback.
+ */
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ goto buffered;
+ }
/*
* The iov_iter can be mapped to the same file range we are writing to.
diff --git a/fs/btrfs/direct-io.h b/fs/btrfs/direct-io.h
index 3dc3ea926afe..df5d45ee6de7 100644
--- a/fs/btrfs/direct-io.h
+++ b/fs/btrfs/direct-io.h
@@ -5,6 +5,8 @@
#include <linux/types.h>
+struct kiocb;
+
int __init btrfs_init_dio(void);
void __cold btrfs_destroy_dio(void);
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index e815d165cccc..89fe85778115 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -94,8 +94,6 @@ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
{
lockdep_assert_held(&discard_ctl->lock);
- if (!btrfs_run_discard_work(discard_ctl))
- return;
if (list_empty(&block_group->discard_list) ||
block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) {
@@ -118,6 +116,9 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
if (!btrfs_is_block_group_data_only(block_group))
return;
+ if (!btrfs_run_discard_work(discard_ctl))
+ return;
+
spin_lock(&discard_ctl->lock);
__add_to_discard_list(discard_ctl, block_group);
spin_unlock(&discard_ctl->lock);
@@ -167,13 +168,7 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
block_group->discard_eligible_time = 0;
queued = !list_empty(&block_group->discard_list);
list_del_init(&block_group->discard_list);
- /*
- * If the block group is currently running in the discard workfn, we
- * don't want to deref it, since it's still being used by the workfn.
- * The workfn will notice this case and deref the block group when it is
- * finished.
- */
- if (queued && !running)
+ if (queued)
btrfs_put_block_group(block_group);
spin_unlock(&discard_ctl->lock);
@@ -250,6 +245,20 @@ again:
block_group->used != 0) {
if (btrfs_is_block_group_data_only(block_group)) {
__add_to_discard_list(discard_ctl, block_group);
+ /*
+ * The block group must have been moved to other
+ * discard list even if discard was disabled in
+ * the meantime or a transaction abort happened,
+ * otherwise we can end up in an infinite loop,
+ * always jumping into the 'again' label and
+ * keep getting this block group over and over
+ * in case there are no other block groups in
+ * the discard lists.
+ */
+ ASSERT(block_group->discard_index !=
+ BTRFS_DISCARD_INDEX_UNUSED,
+ "discard_index=%d",
+ block_group->discard_index);
} else {
list_del_init(&block_group->discard_list);
btrfs_put_block_group(block_group);
@@ -260,9 +269,10 @@ again:
block_group->discard_cursor = block_group->start;
block_group->discard_state = BTRFS_DISCARD_EXTENTS;
}
- discard_ctl->block_group = block_group;
}
if (block_group) {
+ btrfs_get_block_group(block_group);
+ discard_ctl->block_group = block_group;
*discard_state = block_group->discard_state;
*discard_index = block_group->discard_index;
}
@@ -493,9 +503,20 @@ static void btrfs_discard_workfn(struct work_struct *work)
block_group = peek_discard_list(discard_ctl, &discard_state,
&discard_index, now);
- if (!block_group || !btrfs_run_discard_work(discard_ctl))
+ if (!block_group)
+ return;
+ if (!btrfs_run_discard_work(discard_ctl)) {
+ spin_lock(&discard_ctl->lock);
+ btrfs_put_block_group(block_group);
+ discard_ctl->block_group = NULL;
+ spin_unlock(&discard_ctl->lock);
return;
+ }
if (now < block_group->discard_eligible_time) {
+ spin_lock(&discard_ctl->lock);
+ btrfs_put_block_group(block_group);
+ discard_ctl->block_group = NULL;
+ spin_unlock(&discard_ctl->lock);
btrfs_discard_schedule_work(discard_ctl, false);
return;
}
@@ -547,15 +568,7 @@ static void btrfs_discard_workfn(struct work_struct *work)
spin_lock(&discard_ctl->lock);
discard_ctl->prev_discard = trimmed;
discard_ctl->prev_discard_time = now;
- /*
- * If the block group was removed from the discard list while it was
- * running in this workfn, then we didn't deref it, since this function
- * still owned that reference. But we set the discard_ctl->block_group
- * back to NULL, so we can use that condition to know that now we need
- * to deref the block_group.
- */
- if (discard_ctl->block_group == NULL)
- btrfs_put_block_group(block_group);
+ btrfs_put_block_group(block_group);
discard_ctl->block_group = NULL;
__btrfs_discard_schedule_work(discard_ctl, now, false);
spin_unlock(&discard_ctl->lock);
diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
index dddb0f9101ba..2c5e85394092 100644
--- a/fs/btrfs/discard.h
+++ b/fs/btrfs/discard.h
@@ -3,6 +3,7 @@
#ifndef BTRFS_DISCARD_H
#define BTRFS_DISCARD_H
+#include <linux/types.h>
#include <linux/sizes.h>
struct btrfs_fs_info;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 814320948645..89149fac804c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -50,6 +50,7 @@
#include "relocation.h"
#include "scrub.h"
#include "super.h"
+#include "delayed-inode.h"
#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
BTRFS_HEADER_FLAG_RELOC |\
@@ -116,7 +117,7 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
* detect blocks that either didn't get written at all or got written
* in the wrong place.
*/
-int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
+int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, bool atomic)
{
if (!extent_buffer_uptodate(eb))
return 0;
@@ -182,26 +183,33 @@ static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
int mirror_num)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- int num_folios = num_extent_folios(eb);
+ const u32 step = min(fs_info->nodesize, PAGE_SIZE);
+ const u32 nr_steps = eb->len / step;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
int ret = 0;
if (sb_rdonly(fs_info->sb))
return -EROFS;
- for (int i = 0; i < num_folios; i++) {
+ for (int i = 0; i < num_extent_pages(eb); i++) {
struct folio *folio = eb->folios[i];
- u64 start = max_t(u64, eb->start, folio_pos(folio));
- u64 end = min_t(u64, eb->start + eb->len,
- folio_pos(folio) + eb->folio_size);
- u32 len = end - start;
-
- ret = btrfs_repair_io_failure(fs_info, 0, start, len,
- start, folio, offset_in_folio(folio, start),
- mirror_num);
- if (ret)
- break;
+
+ /* No large folio support yet. */
+ ASSERT(folio_order(folio) == 0);
+ ASSERT(i < nr_steps);
+
+ /*
+ * For nodesize < page size, there is just one paddr, with some
+ * offset inside the page.
+ *
+ * For nodesize >= page size, it's one or more paddrs, and eb->start
+ * must be aligned to page boundary.
+ */
+ paddrs[i] = page_to_phys(&folio->page) + offset_in_page(eb->start);
}
+ ret = btrfs_repair_io_failure(fs_info, 0, eb->start, eb->len, eb->start,
+ paddrs, step, mirror_num);
return ret;
}
@@ -225,8 +233,7 @@ int btrfs_read_extent_buffer(struct extent_buffer *eb,
ASSERT(check);
while (1) {
- clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
+ ret = read_extent_buffer_pages(eb, mirror_num, check);
if (!ret)
break;
@@ -257,7 +264,7 @@ int btrfs_read_extent_buffer(struct extent_buffer *eb,
/*
* Checksum a dirty tree block before IO.
*/
-blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
+int btree_csum_one_bio(struct btrfs_bio *bbio)
{
struct extent_buffer *eb = bbio->private;
struct btrfs_fs_info *fs_info = eb->fs_info;
@@ -268,9 +275,9 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
/* Btree blocks are always contiguous on disk. */
if (WARN_ON_ONCE(bbio->file_offset != eb->start))
- return BLK_STS_IOERR;
+ return -EIO;
if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
- return BLK_STS_IOERR;
+ return -EIO;
/*
* If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
@@ -279,14 +286,13 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
*/
if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
memzero_extent_buffer(eb, 0, eb->len);
- return BLK_STS_OK;
+ return 0;
}
if (WARN_ON_ONCE(found_start != eb->start))
- return BLK_STS_IOERR;
- if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0],
- eb->start, eb->len)))
- return BLK_STS_IOERR;
+ return -EIO;
+ if (WARN_ON(!btrfs_meta_folio_test_uptodate(eb->folios[0], eb)))
+ return -EIO;
ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
offsetof(struct btrfs_header, fsid),
@@ -314,7 +320,7 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
goto error;
}
write_extent_buffer(eb, result, 0, fs_info->csum_size);
- return BLK_STS_OK;
+ return 0;
error:
btrfs_print_tree(eb, 0);
@@ -328,7 +334,7 @@ error:
*/
WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
- return errno_to_blk_status(ret);
+ return ret;
}
static bool check_tree_block_fsid(struct extent_buffer *eb)
@@ -372,21 +378,21 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
ASSERT(check);
found_start = btrfs_header_bytenr(eb);
- if (found_start != eb->start) {
+ if (unlikely(found_start != eb->start)) {
btrfs_err_rl(fs_info,
"bad tree block start, mirror %u want %llu have %llu",
eb->read_mirror, eb->start, found_start);
ret = -EIO;
goto out;
}
- if (check_tree_block_fsid(eb)) {
+ if (unlikely(check_tree_block_fsid(eb))) {
btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
eb->start, eb->read_mirror);
ret = -EIO;
goto out;
}
found_level = btrfs_header_level(eb);
- if (found_level >= BTRFS_MAX_LEVEL) {
+ if (unlikely(found_level >= BTRFS_MAX_LEVEL)) {
btrfs_err(fs_info,
"bad tree block level, mirror %u level %d on logical %llu",
eb->read_mirror, btrfs_header_level(eb), eb->start);
@@ -400,19 +406,19 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
if (memcmp(result, header_csum, csum_size) != 0) {
btrfs_warn_rl(fs_info,
-"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d%s",
+"checksum verify failed on logical %llu mirror %u wanted " BTRFS_CSUM_FMT " found " BTRFS_CSUM_FMT " level %d%s",
eb->start, eb->read_mirror,
- CSUM_FMT_VALUE(csum_size, header_csum),
- CSUM_FMT_VALUE(csum_size, result),
+ BTRFS_CSUM_FMT_VALUE(csum_size, header_csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, result),
btrfs_header_level(eb),
ignore_csum ? ", ignored" : "");
- if (!ignore_csum) {
+ if (unlikely(!ignore_csum)) {
ret = -EUCLEAN;
goto out;
}
}
- if (found_level != check->level) {
+ if (unlikely(found_level != check->level)) {
btrfs_err(fs_info,
"level verify failed on logical %llu mirror %u wanted %u found %u",
eb->start, eb->read_mirror, check->level, found_level);
@@ -454,15 +460,9 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
goto out;
}
- /*
- * If this is a leaf block and it is corrupt, set the corrupt bit so
- * that we don't try and read the other copies of this block, just
- * return -EIO.
- */
- if (found_level == 0 && btrfs_check_leaf(eb)) {
- set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
+ /* If this is a leaf block and it is corrupt, just return -EIO. */
+ if (found_level == 0 && btrfs_check_leaf(eb))
ret = -EIO;
- }
if (found_level > 0 && btrfs_check_node(eb))
ret = -EIO;
@@ -643,25 +643,19 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
}
-static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
- u64 objectid)
+static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
+ u64 objectid, gfp_t flags)
{
- bool dummy = btrfs_is_testing(fs_info);
+ struct btrfs_root *root;
+
+ root = kzalloc(sizeof(*root), flags);
+ if (!root)
+ return NULL;
- memset(&root->root_key, 0, sizeof(root->root_key));
- memset(&root->root_item, 0, sizeof(root->root_item));
- memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
root->fs_info = fs_info;
root->root_key.objectid = objectid;
- root->node = NULL;
- root->commit_root = NULL;
- root->state = 0;
RB_CLEAR_NODE(&root->rb_node);
- btrfs_set_root_last_trans(root, 0);
- root->free_objectid = 0;
- root->nr_delalloc_inodes = 0;
- root->nr_ordered_extents = 0;
xa_init(&root->inodes);
xa_init(&root->delayed_nodes);
@@ -695,15 +689,12 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
refcount_set(&root->refs, 1);
atomic_set(&root->snapshot_force_cow, 0);
atomic_set(&root->nr_swapfiles, 0);
- btrfs_set_root_log_transid(root, 0);
root->log_transid_committed = -1;
- btrfs_set_root_last_log_commit(root, 0);
- root->anon_dev = 0;
- if (!dummy) {
- extent_io_tree_init(fs_info, &root->dirty_log_pages,
- IO_TREE_ROOT_DIRTY_LOG_PAGES);
- extent_io_tree_init(fs_info, &root->log_csum_range,
- IO_TREE_LOG_CSUM_RANGE);
+ if (!btrfs_is_testing(fs_info)) {
+ btrfs_extent_io_tree_init(fs_info, &root->dirty_log_pages,
+ IO_TREE_ROOT_DIRTY_LOG_PAGES);
+ btrfs_extent_io_tree_init(fs_info, &root->log_csum_range,
+ IO_TREE_LOG_CSUM_RANGE);
}
spin_lock_init(&root->root_item_lock);
@@ -714,14 +705,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
list_add_tail(&root->leak_list, &fs_info->allocated_roots);
spin_unlock(&fs_info->fs_roots_radix_lock);
#endif
-}
-static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
- u64 objectid, gfp_t flags)
-{
- struct btrfs_root *root = kzalloc(sizeof(*root), flags);
- if (root)
- __setup_root(root, fs_info, objectid);
return root;
}
@@ -894,7 +878,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
btrfs_set_root_used(&root->root_item, leaf->len);
btrfs_set_root_last_snapshot(&root->root_item, 0);
btrfs_set_root_dirid(&root->root_item, 0);
- if (is_fstree(objectid))
+ if (btrfs_is_fstree(objectid))
generate_random_guid(root->root_item.uuid);
else
export_guid(root->root_item.uuid, &guid_null);
@@ -1057,7 +1041,7 @@ static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
root->node = NULL;
goto fail;
}
- if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
+ if (unlikely(!btrfs_buffer_uptodate(root->node, generation, false))) {
ret = -EIO;
goto fail;
}
@@ -1066,10 +1050,10 @@ static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
* For real fs, and not log/reloc trees, root owner must
* match its root node owner
*/
- if (!btrfs_is_testing(fs_info) &&
- btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
- btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
- btrfs_root_id(root) != btrfs_header_owner(root->node)) {
+ if (unlikely(!btrfs_is_testing(fs_info) &&
+ btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
+ btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
+ btrfs_root_id(root) != btrfs_header_owner(root->node))) {
btrfs_crit(fs_info,
"root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
btrfs_root_id(root), root->node->start,
@@ -1089,21 +1073,22 @@ struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
const struct btrfs_key *key)
{
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
root = read_tree_root_path(tree_root, path, key);
- btrfs_free_path(path);
return root;
}
/*
- * Initialize subvolume root in-memory structure
+ * Initialize subvolume root in-memory structure.
*
* @anon_dev: anonymous device to attach to the root, if zero, allocate new
+ *
+ * In case of failure the caller is responsible to call btrfs_free_fs_root()
*/
static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
{
@@ -1113,7 +1098,7 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
!btrfs_is_data_reloc_root(root) &&
- is_fstree(btrfs_root_id(root))) {
+ btrfs_is_fstree(btrfs_root_id(root))) {
set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
btrfs_check_and_init_root_item(&root->root_item);
}
@@ -1122,12 +1107,12 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
* Don't assign anonymous block device to roots that are not exposed to
* userspace, the id pool is limited to 1M
*/
- if (is_fstree(btrfs_root_id(root)) &&
+ if (btrfs_is_fstree(btrfs_root_id(root)) &&
btrfs_root_refs(&root->root_item) > 0) {
if (!anon_dev) {
ret = get_anon_bdev(&root->anon_dev);
if (ret)
- goto fail;
+ return ret;
} else {
root->anon_dev = anon_dev;
}
@@ -1137,7 +1122,7 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
ret = btrfs_init_root_free_objectid(root);
if (ret) {
mutex_unlock(&root->objectid_mutex);
- goto fail;
+ return ret;
}
ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
@@ -1145,9 +1130,6 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
mutex_unlock(&root->objectid_mutex);
return 0;
-fail:
- /* The caller is responsible to call btrfs_free_fs_root */
- return ret;
}
static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
@@ -1258,6 +1240,10 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
{
struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
+ if (fs_info->fs_devices)
+ btrfs_close_devices(fs_info->fs_devices);
+ btrfs_free_compress_wsm(fs_info);
+ percpu_counter_destroy(&fs_info->stats_read_blocks);
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
percpu_counter_destroy(&fs_info->ordered_bytes);
@@ -1326,7 +1312,7 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
* This is namely for free-space-tree and quota tree, which can change
* at runtime and should only be grabbed from fs_info.
*/
- if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
+ if (!btrfs_is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
return ERR_PTR(-ENOENT);
again:
root = btrfs_lookup_fs_root(fs_info, objectid);
@@ -1567,7 +1553,7 @@ static int transaction_kthread(void *arg)
do {
cannot_commit = false;
- delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
+ delay = secs_to_jiffies(fs_info->commit_interval);
mutex_lock(&fs_info->transaction_kthread_mutex);
spin_lock(&fs_info->trans_lock);
@@ -1582,9 +1568,9 @@ static int transaction_kthread(void *arg)
cur->state < TRANS_STATE_COMMIT_PREP &&
delta < fs_info->commit_interval) {
spin_unlock(&fs_info->trans_lock);
- delay -= msecs_to_jiffies((delta - 1) * 1000);
+ delay -= secs_to_jiffies(delta - 1);
delay = min(delay,
- msecs_to_jiffies(fs_info->commit_interval * 1000));
+ secs_to_jiffies(fs_info->commit_interval));
goto sleep;
}
transid = cur->transid;
@@ -1782,8 +1768,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
destroy_workqueue(fs_info->endio_workers);
if (fs_info->rmw_workers)
destroy_workqueue(fs_info->rmw_workers);
- if (fs_info->compressed_write_workers)
- destroy_workqueue(fs_info->compressed_write_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
btrfs_destroy_workqueue(fs_info->delayed_workers);
@@ -1846,6 +1830,8 @@ void btrfs_put_root(struct btrfs_root *root)
if (refcount_dec_and_test(&root->refs)) {
if (WARN_ON(!xa_empty(&root->inodes)))
xa_destroy(&root->inodes);
+ if (WARN_ON(!xa_empty(&root->delayed_nodes)))
+ xa_destroy(&root->delayed_nodes);
WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
@@ -1866,8 +1852,8 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
int i;
while (!list_empty(&fs_info->dead_roots)) {
- gang[0] = list_entry(fs_info->dead_roots.next,
- struct btrfs_root, root_list);
+ gang[0] = list_first_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
list_del(&gang[0]->root_list);
if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
@@ -1930,13 +1916,14 @@ static int btrfs_init_btree_inode(struct super_block *sb)
inode->i_mapping->a_ops = &btree_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
- extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
- IO_TREE_BTREE_INODE_IO);
- extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
+ btrfs_extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
+ IO_TREE_BTREE_INODE_IO);
+ btrfs_extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
__insert_inode_hash(inode, hash);
+ set_bit(AS_KERNEL_FILE, &inode->i_mapping->flags);
fs_info->btree_inode = inode;
return 0;
@@ -1956,7 +1943,6 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
fs_info->qgroup_tree = RB_ROOT;
INIT_LIST_HEAD(&fs_info->dirty_qgroups);
fs_info->qgroup_seq = 1;
- fs_info->qgroup_ulist = NULL;
fs_info->qgroup_rescan_running = false;
fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
mutex_init(&fs_info->qgroup_rescan_lock);
@@ -1966,7 +1952,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
{
u32 max_active = fs_info->thread_pool_size;
unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
- unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
+ unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU;
fs_info->workers =
btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
@@ -1993,8 +1979,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
fs_info->endio_write_workers =
btrfs_alloc_workqueue(fs_info, "endio-write", flags,
max_active, 2);
- fs_info->compressed_write_workers =
- alloc_workqueue("btrfs-compressed-write", flags, max_active);
fs_info->endio_freespace_worker =
btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
max_active, 0);
@@ -2005,12 +1989,11 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
ordered_flags);
fs_info->discard_ctl.discard_workers =
- alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE);
+ alloc_ordered_workqueue("btrfs-discard", WQ_FREEZABLE);
if (!(fs_info->workers &&
fs_info->delalloc_workers && fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
- fs_info->compressed_write_workers &&
fs_info->endio_write_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->fixup_workers &&
@@ -2037,14 +2020,10 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
fs_info->csum_shash = csum_shash;
- /*
- * Check if the checksum implementation is a fast accelerated one.
- * As-is this is a bit of a hack and should be replaced once the csum
- * implementations provide that information themselves.
- */
+ /* Check if the checksum implementation is a fast accelerated one. */
switch (csum_type) {
case BTRFS_CSUM_TYPE_CRC32:
- if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
+ if (crc32_optimizations() & CRC32C_OPTIMIZATION)
set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
break;
case BTRFS_CSUM_TYPE_XXHASH:
@@ -2070,7 +2049,7 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
u64 bytenr = btrfs_super_log_root(disk_super);
int level = btrfs_super_log_root_level(disk_super);
- if (fs_devices->rw_devices == 0) {
+ if (unlikely(fs_devices->rw_devices == 0)) {
btrfs_warn(fs_info, "log replay required on RO media");
return -EIO;
}
@@ -2091,7 +2070,7 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
btrfs_put_root(log_tree_root);
return ret;
}
- if (!extent_buffer_uptodate(log_tree_root->node)) {
+ if (unlikely(!extent_buffer_uptodate(log_tree_root->node))) {
btrfs_err(fs_info, "failed to read log tree");
btrfs_put_root(log_tree_root);
return -EIO;
@@ -2099,10 +2078,10 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
/* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
+ btrfs_put_root(log_tree_root);
if (ret) {
btrfs_handle_fs_error(fs_info, ret,
"Failed to recover log tree");
- btrfs_put_root(log_tree_root);
return ret;
}
@@ -2167,8 +2146,7 @@ static int load_global_roots_objectid(struct btrfs_root *tree_root,
found = true;
root = read_tree_root_path(tree_root, path, &key);
if (IS_ERR(root)) {
- if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
- ret = PTR_ERR(root);
+ ret = PTR_ERR(root);
break;
}
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
@@ -2199,8 +2177,8 @@ static int load_global_roots_objectid(struct btrfs_root *tree_root,
static int load_global_roots(struct btrfs_root *tree_root)
{
- struct btrfs_path *path;
- int ret = 0;
+ BTRFS_PATH_AUTO_FREE(path);
+ int ret;
path = btrfs_alloc_path();
if (!path)
@@ -2209,18 +2187,17 @@ static int load_global_roots(struct btrfs_root *tree_root)
ret = load_global_roots_objectid(tree_root, path,
BTRFS_EXTENT_TREE_OBJECTID, "extent");
if (ret)
- goto out;
+ return ret;
ret = load_global_roots_objectid(tree_root, path,
BTRFS_CSUM_TREE_OBJECTID, "csum");
if (ret)
- goto out;
+ return ret;
if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
- goto out;
+ return ret;
ret = load_global_roots_objectid(tree_root, path,
BTRFS_FREE_SPACE_TREE_OBJECTID,
"free space");
-out:
- btrfs_free_path(path);
+
return ret;
}
@@ -2327,6 +2304,71 @@ out:
return ret;
}
+static int validate_sys_chunk_array(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *sb)
+{
+ unsigned int cur = 0; /* Offset inside the sys chunk array */
+ /*
+ * At sb read time, fs_info is not fully initialized. Thus we have
+ * to use super block sectorsize, which should have been validated.
+ */
+ const u32 sectorsize = btrfs_super_sectorsize(sb);
+ u32 sys_array_size = btrfs_super_sys_array_size(sb);
+
+ if (unlikely(sys_array_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)) {
+ btrfs_err(fs_info, "system chunk array too big %u > %u",
+ sys_array_size, BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
+ return -EUCLEAN;
+ }
+
+ while (cur < sys_array_size) {
+ struct btrfs_disk_key *disk_key;
+ struct btrfs_chunk *chunk;
+ struct btrfs_key key;
+ u64 type;
+ u16 num_stripes;
+ u32 len;
+ int ret;
+
+ disk_key = (struct btrfs_disk_key *)(sb->sys_chunk_array + cur);
+ len = sizeof(*disk_key);
+
+ if (unlikely(cur + len > sys_array_size))
+ goto short_read;
+ cur += len;
+
+ btrfs_disk_key_to_cpu(&key, disk_key);
+ if (unlikely(key.type != BTRFS_CHUNK_ITEM_KEY)) {
+ btrfs_err(fs_info,
+ "unexpected item type %u in sys_array at offset %u",
+ key.type, cur);
+ return -EUCLEAN;
+ }
+ chunk = (struct btrfs_chunk *)(sb->sys_chunk_array + cur);
+ num_stripes = btrfs_stack_chunk_num_stripes(chunk);
+ if (unlikely(cur + btrfs_chunk_item_size(num_stripes) > sys_array_size))
+ goto short_read;
+ type = btrfs_stack_chunk_type(chunk);
+ if (unlikely(!(type & BTRFS_BLOCK_GROUP_SYSTEM))) {
+ btrfs_err(fs_info,
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur);
+ return -EUCLEAN;
+ }
+ ret = btrfs_check_chunk_valid(fs_info, NULL, chunk, key.offset,
+ sectorsize);
+ if (ret < 0)
+ return ret;
+ cur += btrfs_chunk_item_size(num_stripes);
+ }
+ return 0;
+short_read:
+ btrfs_err(fs_info,
+ "super block sys chunk array short read, cur=%u sys_array_size=%u",
+ cur, sys_array_size);
+ return -EUCLEAN;
+}
+
/*
* Real super block validation
* NOTE: super csum type and incompat features will not be checked here.
@@ -2381,21 +2423,13 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
* Check sectorsize and nodesize first, other check will need it.
* Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
*/
- if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
+ if (!is_power_of_2(sectorsize) || sectorsize < BTRFS_MIN_BLOCKSIZE ||
sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
ret = -EINVAL;
}
- /*
- * We only support at most two sectorsizes: 4K and PAGE_SIZE.
- *
- * We can support 16K sectorsize with 64K page size without problem,
- * but such sectorsize/pagesize combination doesn't make much sense.
- * 4K will be our future standard, PAGE_SIZE is supported from the very
- * beginning.
- */
- if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) {
+ if (!btrfs_supported_blocksize(sectorsize)) {
btrfs_err(fs_info,
"sectorsize %llu not yet supported for page size %lu",
sectorsize, PAGE_SIZE);
@@ -2495,6 +2529,11 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
+ if (ret)
+ return ret;
+
+ ret = validate_sys_chunk_array(fs_info, sb);
+
/*
* Obvious sys_chunk_array corruptions, it must hold at least one key
* and one chunk
@@ -2557,13 +2596,13 @@ static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
ret = btrfs_validate_super(fs_info, sb, -1);
if (ret < 0)
goto out;
- if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
+ if (unlikely(!btrfs_supported_super_csum(btrfs_super_csum_type(sb)))) {
ret = -EUCLEAN;
btrfs_err(fs_info, "invalid csum type, has %u want %u",
btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
goto out;
}
- if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
+ if (unlikely(btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP)) {
ret = -EUCLEAN;
btrfs_err(fs_info,
"invalid incompat flags, has 0x%llx valid mask 0x%llx",
@@ -2593,7 +2632,7 @@ static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int lev
root->node = NULL;
return ret;
}
- if (!extent_buffer_uptodate(root->node)) {
+ if (unlikely(!extent_buffer_uptodate(root->node))) {
free_extent_buffer(root->node);
root->node = NULL;
return -EIO;
@@ -2697,10 +2736,21 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
return ret;
}
+/*
+ * Lockdep gets confused between our buffer_tree which requires IRQ locking because
+ * we modify marks in the IRQ context, and our delayed inode xarray which doesn't
+ * have these requirements. Use a class key so lockdep doesn't get them mixed up.
+ */
+static struct lock_class_key buffer_xa_class;
+
void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
{
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
- INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
+
+ /* Use the same flags as mapping->i_pages. */
+ xa_init_flags(&fs_info->buffer_tree, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
+ lockdep_set_class(&fs_info->buffer_tree.xa_lock, &buffer_xa_class);
+
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
@@ -2712,7 +2762,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->super_lock);
- spin_lock_init(&fs_info->buffer_lock);
spin_lock_init(&fs_info->unused_bgs_lock);
spin_lock_init(&fs_info->treelog_bg_lock);
spin_lock_init(&fs_info->zone_active_bgs_lock);
@@ -2757,6 +2806,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
BTRFS_BLOCK_RSV_GLOBAL);
btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
+ btrfs_init_block_rsv(&fs_info->treelog_rsv, BTRFS_BLOCK_RSV_TREELOG);
btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
BTRFS_BLOCK_RSV_DELOPS);
@@ -2790,8 +2840,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
rwlock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT_CACHED;
- extent_io_tree_init(fs_info, &fs_info->excluded_extents,
- IO_TREE_FS_EXCLUDED_EXTENTS);
+ btrfs_extent_io_tree_init(fs_info, &fs_info->excluded_extents,
+ IO_TREE_FS_EXCLUDED_EXTENTS);
mutex_init(&fs_info->ordered_operations_mutex);
mutex_init(&fs_info->tree_log_mutex);
@@ -2856,6 +2906,10 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
if (ret)
return ret;
+ ret = percpu_counter_init(&fs_info->stats_read_blocks, 0, GFP_KERNEL);
+ if (ret)
+ return ret;
+
fs_info->dirty_metadata_batch = PAGE_SIZE *
(1 + ilog2(nr_cpu_ids));
@@ -3179,13 +3233,13 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
}
/*
- * Subpage runtime limitation on v1 cache.
+ * Subpage/bs > ps runtime limitation on v1 cache.
*
- * V1 space cache still has some hard codeed PAGE_SIZE usage, while
+ * V1 space cache still has some hard coded PAGE_SIZE usage, while
* we're already defaulting to v2 cache, no need to bother v1 as it's
* going to be deprecated anyway.
*/
- if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
+ if (fs_info->sectorsize != PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
btrfs_warn(fs_info,
"v1 space cache is not supported for page size %lu with sectorsize %u",
PAGE_SIZE, fs_info->sectorsize);
@@ -3239,7 +3293,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
/*
* Read super block and check the signature bytes only
*/
- disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
+ disk_super = btrfs_read_disk_super(fs_devices->latest_dev->bdev, 0, false);
if (IS_ERR(disk_super)) {
ret = PTR_ERR(disk_super);
goto fail_alloc;
@@ -3316,12 +3370,19 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
fs_info->nodesize = nodesize;
+ fs_info->nodesize_bits = ilog2(nodesize);
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
- fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits);
+ fs_info->block_min_order = ilog2(round_up(sectorsize, PAGE_SIZE) >> PAGE_SHIFT);
+ fs_info->block_max_order = ilog2((BITS_PER_LONG << fs_info->sectorsize_bits) >> PAGE_SHIFT);
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize;
+ fs_info->fs_devices->fs_info = fs_info;
+ if (fs_info->sectorsize > PAGE_SIZE)
+ btrfs_warn(fs_info,
+ "support for block size %u with page size %lu is experimental, some features may be missing",
+ fs_info->sectorsize, PAGE_SIZE);
/*
* Handle the space caching options appropriately now that we have the
* super block loaded and validated.
@@ -3343,11 +3404,9 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
*/
fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
- if (sectorsize < PAGE_SIZE)
- btrfs_warn(fs_info,
- "read-write for sector size %u with page size %lu is experimental",
- sectorsize, PAGE_SIZE);
-
+ ret = btrfs_alloc_compress_wsm(fs_info);
+ if (ret)
+ goto fail_sb_buffer;
ret = btrfs_init_workqueues(fs_info);
if (ret)
goto fail_sb_buffer;
@@ -3395,7 +3454,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
* below in btrfs_init_dev_replace().
*/
btrfs_free_extra_devids(fs_devices);
- if (!fs_devices->latest_dev->bdev) {
+ if (unlikely(!fs_devices->latest_dev->bdev)) {
btrfs_err(fs_info, "failed to read devices");
ret = -EIO;
goto fail_tree_roots;
@@ -3486,6 +3545,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_sysfs;
}
+ btrfs_zoned_reserve_data_reloc_bg(fs_info);
btrfs_free_zone_cache(fs_info);
btrfs_check_active_zone_reservation(fs_info);
@@ -3606,7 +3666,6 @@ fail_alloc:
iput(fs_info->btree_inode);
fail:
- btrfs_close_devices(fs_info->fs_devices);
ASSERT(ret < 0);
return ret;
}
@@ -3619,7 +3678,7 @@ static void btrfs_end_super_write(struct bio *bio)
bio_for_each_folio_all(fi, bio) {
if (bio->bi_status) {
- btrfs_warn_rl_in_rcu(device->fs_info,
+ btrfs_warn_rl(device->fs_info,
"lost super block write due to IO error on %s (%d)",
btrfs_dev_name(device),
blk_status_to_errno(bio->bi_status));
@@ -3639,85 +3698,6 @@ static void btrfs_end_super_write(struct bio *bio)
bio_put(bio);
}
-struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
- int copy_num, bool drop_cache)
-{
- struct btrfs_super_block *super;
- struct page *page;
- u64 bytenr, bytenr_orig;
- struct address_space *mapping = bdev->bd_mapping;
- int ret;
-
- bytenr_orig = btrfs_sb_offset(copy_num);
- ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
- if (ret == -ENOENT)
- return ERR_PTR(-EINVAL);
- else if (ret)
- return ERR_PTR(ret);
-
- if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
- return ERR_PTR(-EINVAL);
-
- if (drop_cache) {
- /* This should only be called with the primary sb. */
- ASSERT(copy_num == 0);
-
- /*
- * Drop the page of the primary superblock, so later read will
- * always read from the device.
- */
- invalidate_inode_pages2_range(mapping,
- bytenr >> PAGE_SHIFT,
- (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
- }
-
- page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
- if (IS_ERR(page))
- return ERR_CAST(page);
-
- super = page_address(page);
- if (btrfs_super_magic(super) != BTRFS_MAGIC) {
- btrfs_release_disk_super(super);
- return ERR_PTR(-ENODATA);
- }
-
- if (btrfs_super_bytenr(super) != bytenr_orig) {
- btrfs_release_disk_super(super);
- return ERR_PTR(-EINVAL);
- }
-
- return super;
-}
-
-
-struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
-{
- struct btrfs_super_block *super, *latest = NULL;
- int i;
- u64 transid = 0;
-
- /* we would like to check all the supers, but that would make
- * a btrfs mount succeed after a mkfs from a different FS.
- * So, we need to add a special mount option to scan for
- * later supers, using BTRFS_SUPER_MIRROR_MAX instead
- */
- for (i = 0; i < 1; i++) {
- super = btrfs_read_dev_one_super(bdev, i, false);
- if (IS_ERR(super))
- continue;
-
- if (!latest || btrfs_super_generation(super) > transid) {
- if (latest)
- btrfs_release_disk_super(super);
-
- latest = super;
- transid = btrfs_super_generation(super);
- }
- }
-
- return super;
-}
-
/*
* Write superblock @sb to the @device. Do not wait for completion, all the
* folios we use for writing are locked.
@@ -3757,8 +3737,8 @@ static int write_dev_supers(struct btrfs_device *device,
continue;
} else if (ret < 0) {
btrfs_err(device->fs_info,
- "couldn't get super block location for mirror %d",
- i);
+ "couldn't get super block location for mirror %d error %d",
+ i, ret);
atomic_inc(&device->sb_write_errors);
continue;
}
@@ -3777,12 +3757,11 @@ static int write_dev_supers(struct btrfs_device *device,
GFP_NOFS);
if (IS_ERR(folio)) {
btrfs_err(device->fs_info,
- "couldn't get super block page for bytenr %llu",
- bytenr);
+ "couldn't get super block page for bytenr %llu error %ld",
+ bytenr, PTR_ERR(folio));
atomic_inc(&device->sb_write_errors);
continue;
}
- ASSERT(folio_order(folio) == 0);
offset = offset_in_folio(folio, bytenr);
disk_super = folio_address(folio) + offset;
@@ -3855,7 +3834,6 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
/* If the folio has been removed, then we know it completed. */
if (IS_ERR(folio))
continue;
- ASSERT(folio_order(folio) == 0);
/* Folio will be unlocked once the write completes. */
folio_wait_locked(folio);
@@ -3970,7 +3948,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
* Checks last_flush_error of disks in order to determine the device
* state.
*/
- if (errors_wait && !btrfs_check_rw_degradable(info, NULL))
+ if (unlikely(errors_wait && !btrfs_check_rw_degradable(info, NULL)))
return -EIO;
return 0;
@@ -3998,7 +3976,7 @@ int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
}
if (min_tolerated == INT_MAX) {
- pr_warn("BTRFS: unknown raid flag: %llu", flags);
+ btrfs_warn(NULL, "unknown raid flag: %llu", flags);
min_tolerated = 0;
}
@@ -4072,7 +4050,7 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
ret = btrfs_validate_write_super(fs_info, sb);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
btrfs_handle_fs_error(fs_info, -EUCLEAN,
"unexpected superblock corruption detected");
@@ -4083,7 +4061,7 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
if (ret)
total_errors++;
}
- if (total_errors > max_errors) {
+ if (unlikely(total_errors > max_errors)) {
btrfs_err(fs_info, "%d errors while writing supers",
total_errors);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -4108,7 +4086,7 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
total_errors++;
}
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- if (total_errors > max_errors) {
+ if (unlikely(total_errors > max_errors)) {
btrfs_handle_fs_error(fs_info, -EIO,
"%d errors while writing supers",
total_errors);
@@ -4175,8 +4153,9 @@ static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
u64 found_end;
found = true;
- while (find_first_extent_bit(&trans->dirty_pages, cur,
- &found_start, &found_end, EXTENT_DIRTY, &cached)) {
+ while (btrfs_find_first_extent_bit(&trans->dirty_pages, cur,
+ &found_start, &found_end,
+ EXTENT_DIRTY, &cached)) {
dirty_bytes += found_end + 1 - found_start;
cur = found_end + 1;
}
@@ -4253,6 +4232,14 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
btrfs_cleanup_defrag_inodes(fs_info);
/*
+ * Handle the error fs first, as it will flush and wait for all ordered
+ * extents. This will generate delayed iputs, thus we want to handle
+ * it first.
+ */
+ if (unlikely(BTRFS_FS_ERROR(fs_info)))
+ btrfs_error_commit_super(fs_info);
+
+ /*
* Wait for any fixup workers to complete.
* If we don't wait for them here and they are still running by the time
* we call kthread_stop() against the cleaner kthread further below, we
@@ -4262,6 +4249,40 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
* already the cleaner, but below we run all pending delayed iputs.
*/
btrfs_flush_workqueue(fs_info->fixup_workers);
+ /*
+ * Similar case here, we have to wait for delalloc workers before we
+ * proceed below and stop the cleaner kthread, otherwise we trigger a
+ * use-after-tree on the cleaner kthread task_struct when a delalloc
+ * worker running submit_compressed_extents() adds a delayed iput, which
+ * does a wake up on the cleaner kthread, which was already freed below
+ * when we call kthread_stop().
+ */
+ btrfs_flush_workqueue(fs_info->delalloc_workers);
+
+ /*
+ * We can have ordered extents getting their last reference dropped from
+ * the fs_info->workers queue because for async writes for data bios we
+ * queue a work for that queue, at btrfs_wq_submit_bio(), that runs
+ * run_one_async_done() which calls btrfs_bio_end_io() in case the bio
+ * has an error, and that later function can do the final
+ * btrfs_put_ordered_extent() on the ordered extent attached to the bio,
+ * which adds a delayed iput for the inode. So we must flush the queue
+ * so that we don't have delayed iputs after committing the current
+ * transaction below and stopping the cleaner and transaction kthreads.
+ */
+ btrfs_flush_workqueue(fs_info->workers);
+
+ /*
+ * When finishing a compressed write bio we schedule a work queue item
+ * to finish an ordered extent - end_bbio_compressed_write()
+ * calls btrfs_finish_ordered_extent() which in turns does a call to
+ * btrfs_queue_ordered_fn(), and that queues the ordered extent
+ * completion either in the endio_write_workers work queue or in the
+ * fs_info->endio_freespace_worker work queue. We flush those queues
+ * below, so before we flush them we must flush this queue for the
+ * workers of compressed writes.
+ */
+ flush_workqueue(fs_info->endio_workers);
/*
* After we parked the cleaner kthread, ordered extents may have
@@ -4274,8 +4295,8 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
*
* So wait for all ongoing ordered extents to complete and then run
* delayed iputs. This works because once we reach this point no one
- * can either create new ordered extents nor create delayed iputs
- * through some other means.
+ * can create new ordered extents, but delayed iputs can still be added
+ * by a reclaim worker (see comments further below).
*
* Also note that btrfs_wait_ordered_roots() is not safe here, because
* it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
@@ -4286,6 +4307,10 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
btrfs_flush_workqueue(fs_info->endio_write_workers);
/* Ordered extents for free space inodes. */
btrfs_flush_workqueue(fs_info->endio_freespace_worker);
+ /*
+ * Run delayed iputs in case an async reclaim worker is waiting for them
+ * to be run as mentioned above.
+ */
btrfs_run_delayed_iputs(fs_info);
cancel_work_sync(&fs_info->async_reclaim_work);
@@ -4293,6 +4318,18 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
cancel_work_sync(&fs_info->preempt_reclaim_work);
cancel_work_sync(&fs_info->em_shrinker_work);
+ /*
+ * Run delayed iputs again because an async reclaim worker may have
+ * added new ones if it was flushing delalloc:
+ *
+ * shrink_delalloc() -> btrfs_start_delalloc_roots() ->
+ * start_delalloc_inodes() -> btrfs_add_delayed_iput()
+ */
+ btrfs_run_delayed_iputs(fs_info);
+
+ /* There should be no more workload to generate new delayed iputs. */
+ set_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state);
+
/* Cancel or finish ongoing discard work */
btrfs_discard_cleanup(fs_info);
@@ -4321,9 +4358,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
btrfs_err(fs_info, "commit super ret %d", ret);
}
- if (BTRFS_FS_ERROR(fs_info))
- btrfs_error_commit_super(fs_info);
-
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
@@ -4331,7 +4365,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
if (btrfs_check_quota_leak(fs_info)) {
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ DEBUG_WARN("qgroup reserved space leaked");
btrfs_err(fs_info, "qgroup reserved space leaked");
}
@@ -4378,7 +4412,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
iput(fs_info->btree_inode);
btrfs_mapping_tree_free(fs_info);
- btrfs_close_devices(fs_info->fs_devices);
}
void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
@@ -4446,10 +4479,6 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
/* cleanup FS via transaction */
btrfs_cleanup_transaction(fs_info);
- mutex_lock(&fs_info->cleaner_mutex);
- btrfs_run_delayed_iputs(fs_info);
- mutex_unlock(&fs_info->cleaner_mutex);
-
down_write(&fs_info->cleanup_work_sem);
up_write(&fs_info->cleanup_work_sem);
}
@@ -4592,9 +4621,9 @@ static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
u64 start = 0;
u64 end;
- while (find_first_extent_bit(dirty_pages, start, &start, &end,
- mark, NULL)) {
- clear_extent_bits(dirty_pages, start, end, mark);
+ while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
+ mark, NULL)) {
+ btrfs_clear_extent_bit(dirty_pages, start, end, mark, NULL);
while (start <= end) {
eb = find_extent_buffer(fs_info, start);
start += fs_info->nodesize;
@@ -4627,14 +4656,14 @@ static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
* the same extent range.
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
- if (!find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY, &cached_state)) {
+ if (!btrfs_find_first_extent_bit(unpin, 0, &start, &end,
+ EXTENT_DIRTY, &cached_state)) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
}
- clear_extent_dirty(unpin, start, end, &cached_state);
- free_extent_state(cached_state);
+ btrfs_clear_extent_dirty(unpin, start, end, &cached_state);
+ btrfs_free_extent_state(cached_state);
btrfs_error_unpin_extent_range(fs_info, start, end);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
@@ -4820,7 +4849,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
int btrfs_init_root_free_objectid(struct btrfs_root *root)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
struct extent_buffer *l;
struct btrfs_key search_key;
@@ -4836,14 +4865,13 @@ int btrfs_init_root_free_objectid(struct btrfs_root *root)
search_key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret < 0)
- goto error;
- if (ret == 0) {
+ return ret;
+ if (unlikely(ret == 0)) {
/*
* Key with offset -1 found, there would have to exist a root
* with such id, but this is out of valid range.
*/
- ret = -EUCLEAN;
- goto error;
+ return -EUCLEAN;
}
if (path->slots[0] > 0) {
slot = path->slots[0] - 1;
@@ -4854,10 +4882,8 @@ int btrfs_init_root_free_objectid(struct btrfs_root *root)
} else {
root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
}
- ret = 0;
-error:
- btrfs_free_path(path);
- return ret;
+
+ return 0;
}
int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index a7051e2570c1..5320da83d0cf 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -9,7 +9,8 @@
#include <linux/sizes.h>
#include <linux/compiler_types.h>
#include "ctree.h"
-#include "fs.h"
+#include "bio.h"
+#include "ordered-data.h"
struct block_device;
struct super_block;
@@ -58,9 +59,6 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
const struct btrfs_super_block *sb, int mirror_num);
int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount);
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
-struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev);
-struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
- int copy_num, bool drop_cache);
int btrfs_commit_super(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
const struct btrfs_key *key);
@@ -96,9 +94,6 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
/*
* This function is used to grab the root, and avoid it is freed when we
* access it. But it doesn't ensure that the tree is not dropped.
- *
- * If you want to ensure the whole tree is safe, you should use
- * fs_info->subvol_srcu
*/
static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
{
@@ -112,12 +107,11 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
void btrfs_put_root(struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
struct extent_buffer *buf);
-int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
- int atomic);
+int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, bool atomic);
int btrfs_read_extent_buffer(struct extent_buffer *buf,
const struct btrfs_tree_parent_check *check);
-blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio);
+int btree_csum_one_bio(struct btrfs_bio *bbio);
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index e2b22bea348a..230d9326b685 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -23,7 +23,11 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
int type;
if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
- *max_len = BTRFS_FID_SIZE_CONNECTABLE;
+ if (btrfs_root_id(BTRFS_I(inode)->root) !=
+ btrfs_root_id(BTRFS_I(parent)->root))
+ *max_len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
+ else
+ *max_len = BTRFS_FID_SIZE_CONNECTABLE;
return FILEID_INVALID;
} else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
*max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
@@ -45,6 +49,8 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
parent_root_id = btrfs_root_id(BTRFS_I(parent)->root);
if (parent_root_id != fid->root_objectid) {
+ if (*max_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
+ return FILEID_INVALID;
fid->parent_root_objectid = parent_root_id;
len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
type = FILEID_BTRFS_WITH_PARENT_ROOT;
@@ -75,7 +81,7 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root;
- struct inode *inode;
+ struct btrfs_inode *inode;
if (objectid < BTRFS_FIRST_FREE_OBJECTID)
return ERR_PTR(-ESTALE);
@@ -89,12 +95,12 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
if (IS_ERR(inode))
return ERR_CAST(inode);
- if (generation != 0 && generation != inode->i_generation) {
- iput(inode);
+ if (generation != 0 && generation != inode->vfs_inode.i_generation) {
+ iput(&inode->vfs_inode);
return ERR_PTR(-ESTALE);
}
- return d_obtain_alias(inode);
+ return d_obtain_alias(&inode->vfs_inode);
}
static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
@@ -145,9 +151,10 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
struct dentry *btrfs_get_parent(struct dentry *child)
{
- struct inode *dir = d_inode(child);
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_inode *dir = BTRFS_I(d_inode(child));
+ struct btrfs_inode *inode;
+ struct btrfs_root *root = dir->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_root_ref *ref;
@@ -159,13 +166,13 @@ struct dentry *btrfs_get_parent(struct dentry *child)
if (!path)
return ERR_PTR(-ENOMEM);
- if (btrfs_ino(BTRFS_I(dir)) == BTRFS_FIRST_FREE_OBJECTID) {
+ if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = btrfs_root_id(root);
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = fs_info->tree_root;
} else {
- key.objectid = btrfs_ino(BTRFS_I(dir));
+ key.objectid = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
}
@@ -173,7 +180,7 @@ struct dentry *btrfs_get_parent(struct dentry *child)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto fail;
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
/*
* Key with offset of -1 found, there would have to exist an
* inode with such number or a root with such id.
@@ -210,7 +217,11 @@ struct dentry *btrfs_get_parent(struct dentry *child)
found_key.offset, 0);
}
- return d_obtain_alias(btrfs_iget(key.objectid, root));
+ inode = btrfs_iget(key.objectid, root);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ return d_obtain_alias(&inode->vfs_inode);
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
@@ -219,11 +230,11 @@ fail:
static int btrfs_get_name(struct dentry *parent, char *name,
struct dentry *child)
{
- struct inode *inode = d_inode(child);
- struct inode *dir = d_inode(parent);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct btrfs_path *path;
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_inode *inode = BTRFS_I(d_inode(child));
+ struct btrfs_inode *dir = BTRFS_I(d_inode(parent));
+ struct btrfs_root *root = dir->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_inode_ref *iref;
struct btrfs_root_ref *rref;
struct extent_buffer *leaf;
@@ -233,37 +244,34 @@ static int btrfs_get_name(struct dentry *parent, char *name,
int ret;
u64 ino;
- if (!S_ISDIR(dir->i_mode))
+ if (!S_ISDIR(dir->vfs_inode.i_mode))
return -EINVAL;
- ino = btrfs_ino(BTRFS_I(inode));
+ ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
- key.objectid = btrfs_root_id(BTRFS_I(inode)->root);
+ key.objectid = btrfs_root_id(inode->root);
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = fs_info->tree_root;
} else {
key.objectid = ino;
- key.offset = btrfs_ino(BTRFS_I(dir));
key.type = BTRFS_INODE_REF_KEY;
+ key.offset = btrfs_ino(dir);
}
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
- btrfs_free_path(path);
return ret;
} else if (ret > 0) {
- if (ino == BTRFS_FIRST_FREE_OBJECTID) {
+ if (ino == BTRFS_FIRST_FREE_OBJECTID)
path->slots[0]--;
- } else {
- btrfs_free_path(path);
+ else
return -ENOENT;
- }
}
leaf = path->nodes[0];
@@ -280,7 +288,6 @@ static int btrfs_get_name(struct dentry *parent, char *name,
}
read_extent_buffer(leaf, name, name_ptr, name_len);
- btrfs_free_path(path);
/*
* have to add the null termination to make sure that reconnect_path
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index 6d08c100b01d..bb2ca1c9c7b0 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -42,8 +42,9 @@ static inline void btrfs_extent_state_leak_debug_check(void)
struct extent_state *state;
while (!list_empty(&states)) {
- state = list_entry(states.next, struct extent_state, leak_list);
- pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
+ state = list_first_entry(&states, struct extent_state, leak_list);
+ btrfs_err(NULL,
+ "state leak: start %llu end %llu state %u in tree %d refs %d",
state->start, state->end, state->state,
extent_state_in_tree(state),
refcount_read(&state->refs));
@@ -59,13 +60,12 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
struct extent_io_tree *tree,
u64 start, u64 end)
{
- const struct btrfs_inode *inode;
+ const struct btrfs_inode *inode = tree->inode;
u64 isize;
if (tree->owner != IO_TREE_INODE_IO)
return;
- inode = extent_io_tree_to_inode_const(tree);
isize = i_size_read(&inode->vfs_inode);
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
btrfs_debug_rl(inode->root->fs_info,
@@ -80,25 +80,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
#endif
-
-/*
- * The only tree allowed to set the inode is IO_TREE_INODE_IO.
- */
-static bool is_inode_io_tree(const struct extent_io_tree *tree)
-{
- return tree->owner == IO_TREE_INODE_IO;
-}
-
-/* Return the inode if it's valid for the given tree, otherwise NULL. */
-struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree)
-{
- if (tree->owner == IO_TREE_INODE_IO)
- return tree->inode;
- return NULL;
-}
-
/* Read-only access to the inode. */
-const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree)
+const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree)
{
if (tree->owner == IO_TREE_INODE_IO)
return tree->inode;
@@ -106,15 +89,15 @@ const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_t
}
/* For read-only access to fs_info. */
-const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree)
+const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree)
{
if (tree->owner == IO_TREE_INODE_IO)
return tree->inode->root->fs_info;
return tree->fs_info;
}
-void extent_io_tree_init(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *tree, unsigned int owner)
+void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *tree, unsigned int owner)
{
tree->state = RB_ROOT;
spin_lock_init(&tree->lock);
@@ -129,7 +112,7 @@ void extent_io_tree_init(struct btrfs_fs_info *fs_info,
* aren't any waiters on any extent state record (EXTENT_LOCK_BITS are never
* set on any extent state when calling this function).
*/
-void extent_io_tree_release(struct extent_io_tree *tree)
+void btrfs_extent_io_tree_release(struct extent_io_tree *tree)
{
struct rb_root root;
struct extent_state *state;
@@ -148,7 +131,7 @@ void extent_io_tree_release(struct extent_io_tree *tree)
* (see wait_extent_bit()).
*/
ASSERT(!waitqueue_active(&state->wq));
- free_extent_state(state);
+ btrfs_free_extent_state(state);
cond_resched_lock(&tree->lock);
}
/*
@@ -176,7 +159,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
btrfs_leak_debug_add_state(state);
refcount_set(&state->refs, 1);
init_waitqueue_head(&state->wq);
- trace_alloc_extent_state(state, mask, _RET_IP_);
+ trace_btrfs_alloc_extent_state(state, mask, _RET_IP_);
return state;
}
@@ -188,14 +171,14 @@ static struct extent_state *alloc_extent_state_atomic(struct extent_state *preal
return prealloc;
}
-void free_extent_state(struct extent_state *state)
+void btrfs_free_extent_state(struct extent_state *state)
{
if (!state)
return;
if (refcount_dec_and_test(&state->refs)) {
WARN_ON(extent_state_in_tree(state));
btrfs_leak_debug_del_state(state);
- trace_free_extent_state(state, _RET_IP_);
+ trace_btrfs_free_extent_state(state, _RET_IP_);
kmem_cache_free(extent_state_cache, state);
}
}
@@ -222,38 +205,34 @@ static inline struct extent_state *next_state(struct extent_state *state)
{
struct rb_node *next = rb_next(&state->rb_node);
- if (next)
- return rb_entry(next, struct extent_state, rb_node);
- else
- return NULL;
+ return rb_entry_safe(next, struct extent_state, rb_node);
}
static inline struct extent_state *prev_state(struct extent_state *state)
{
struct rb_node *next = rb_prev(&state->rb_node);
- if (next)
- return rb_entry(next, struct extent_state, rb_node);
- else
- return NULL;
+ return rb_entry_safe(next, struct extent_state, rb_node);
}
/*
- * Search @tree for an entry that contains @offset. Such entry would have
- * entry->start <= offset && entry->end >= offset.
+ * Search @tree for an entry that contains @offset or if none exists for the
+ * first entry that starts and ends after that offset.
*
* @tree: the tree to search
- * @offset: offset that should fall within an entry in @tree
+ * @offset: search offset
* @node_ret: pointer where new node should be anchored (used when inserting an
* entry in the tree)
* @parent_ret: points to entry which would have been the parent of the entry,
* containing @offset
*
- * Return a pointer to the entry that contains @offset byte address and don't change
- * @node_ret and @parent_ret.
+ * Return a pointer to the entry that contains @offset byte address.
+ *
+ * If no such entry exists, return the first entry that starts and ends after
+ * @offset if one exists, otherwise NULL.
*
- * If no such entry exists, return pointer to entry that ends before @offset
- * and fill parameters @node_ret and @parent_ret, ie. does not return NULL.
+ * If the returned entry starts at @offset, then @node_ret and @parent_ret
+ * aren't changed.
*/
static inline struct extent_state *tree_search_for_insert(struct extent_io_tree *tree,
u64 offset,
@@ -282,7 +261,11 @@ static inline struct extent_state *tree_search_for_insert(struct extent_io_tree
if (parent_ret)
*parent_ret = prev;
- /* Search neighbors until we find the first one past the end */
+ /*
+ * Return either the current entry if it contains offset (it ends after
+ * or at offset) or the first entry that starts and ends after offset if
+ * one exists, or NULL.
+ */
while (entry && offset > entry->end)
entry = next_state(entry);
@@ -346,12 +329,12 @@ static inline struct extent_state *tree_search(struct extent_io_tree *tree, u64
return tree_search_for_insert(tree, offset, NULL, NULL);
}
-static void extent_io_tree_panic(const struct extent_io_tree *tree,
- const struct extent_state *state,
- const char *opname,
- int err)
+static void __cold extent_io_tree_panic(const struct extent_io_tree *tree,
+ const struct extent_state *state,
+ const char *opname,
+ int err)
{
- btrfs_panic(extent_io_tree_to_fs_info(tree), err,
+ btrfs_panic(btrfs_extent_io_tree_to_fs_info(tree), err,
"extent io tree error on %s state start %llu end %llu",
opname, state->start, state->end);
}
@@ -362,13 +345,12 @@ static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *s
prev = prev_state(state);
if (prev && prev->end == state->start - 1 && prev->state == state->state) {
- if (is_inode_io_tree(tree))
- btrfs_merge_delalloc_extent(extent_io_tree_to_inode(tree),
- state, prev);
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_merge_delalloc_extent(tree->inode, state, prev);
state->start = prev->start;
rb_erase(&prev->rb_node, &tree->state);
RB_CLEAR_NODE(&prev->rb_node);
- free_extent_state(prev);
+ btrfs_free_extent_state(prev);
}
}
@@ -378,13 +360,12 @@ static void merge_next_state(struct extent_io_tree *tree, struct extent_state *s
next = next_state(state);
if (next && next->start == state->end + 1 && next->state == state->state) {
- if (is_inode_io_tree(tree))
- btrfs_merge_delalloc_extent(extent_io_tree_to_inode(tree),
- state, next);
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_merge_delalloc_extent(tree->inode, state, next);
state->end = next->end;
rb_erase(&next->rb_node, &tree->state);
RB_CLEAR_NODE(&next->rb_node);
- free_extent_state(next);
+ btrfs_free_extent_state(next);
}
}
@@ -413,8 +394,8 @@ static void set_state_bits(struct extent_io_tree *tree,
u32 bits_to_set = bits & ~EXTENT_CTLBITS;
int ret;
- if (is_inode_io_tree(tree))
- btrfs_set_delalloc_extent(extent_io_tree_to_inode(tree), state, bits);
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_set_delalloc_extent(tree->inode, state, bits);
ret = add_extent_changeset(state, bits_to_set, changeset, 1);
BUG_ON(ret < 0);
@@ -459,10 +440,9 @@ static struct extent_state *insert_state(struct extent_io_tree *tree,
if (state->end < entry->start) {
if (try_merge && end == entry->start &&
state->state == entry->state) {
- if (is_inode_io_tree(tree))
- btrfs_merge_delalloc_extent(
- extent_io_tree_to_inode(tree),
- state, entry);
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_merge_delalloc_extent(tree->inode,
+ state, entry);
entry->start = state->start;
merge_prev_state(tree, entry);
state->state = 0;
@@ -472,10 +452,9 @@ static struct extent_state *insert_state(struct extent_io_tree *tree,
} else if (state->end > entry->end) {
if (try_merge && entry->end == start &&
state->state == entry->state) {
- if (is_inode_io_tree(tree))
- btrfs_merge_delalloc_extent(
- extent_io_tree_to_inode(tree),
- state, entry);
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_merge_delalloc_extent(tree->inode,
+ state, entry);
entry->end = state->end;
merge_next_state(tree, entry);
state->state = 0;
@@ -527,9 +506,8 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
struct rb_node *parent = NULL;
struct rb_node **node;
- if (is_inode_io_tree(tree))
- btrfs_split_delalloc_extent(extent_io_tree_to_inode(tree), orig,
- split);
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_split_delalloc_extent(tree->inode, orig, split);
prealloc->start = orig->start;
prealloc->end = split - 1;
@@ -549,7 +527,7 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
} else if (prealloc->end > entry->end) {
node = &(*node)->rb_right;
} else {
- free_extent_state(prealloc);
+ btrfs_free_extent_state(prealloc);
return -EEXIST;
}
}
@@ -561,6 +539,18 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
}
/*
+ * Use this during tree iteration to avoid doing next node searches when it's
+ * not needed (the current record ends at or after the target range's end).
+ */
+static inline struct extent_state *next_search_state(struct extent_state *state, u64 end)
+{
+ if (state->end < end)
+ return next_state(state);
+
+ return NULL;
+}
+
+/*
* Utility function to clear some bits in an extent state struct. It will
* optionally wake up anyone waiting on this state (wake == 1).
*
@@ -569,16 +559,15 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
*/
static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
struct extent_state *state,
- u32 bits, int wake,
+ u32 bits, int wake, u64 end,
struct extent_changeset *changeset)
{
struct extent_state *next;
u32 bits_to_clear = bits & ~EXTENT_CTLBITS;
int ret;
- if (is_inode_io_tree(tree))
- btrfs_clear_delalloc_extent(extent_io_tree_to_inode(tree), state,
- bits);
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_clear_delalloc_extent(tree->inode, state, bits);
ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
BUG_ON(ret < 0);
@@ -586,17 +575,17 @@ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
if (wake)
wake_up(&state->wq);
if (state->state == 0) {
- next = next_state(state);
+ next = next_search_state(state, end);
if (extent_state_in_tree(state)) {
rb_erase(&state->rb_node, &tree->state);
RB_CLEAR_NODE(&state->rb_node);
- free_extent_state(state);
+ btrfs_free_extent_state(state);
} else {
WARN_ON(1);
}
} else {
merge_state(tree, state);
- next = next_state(state);
+ next = next_search_state(state, end);
}
return next;
}
@@ -620,18 +609,18 @@ static void set_gfp_mask_from_bits(u32 *bits, gfp_t *mask)
*
* This takes the tree lock, and returns 0 on success and < 0 on error.
*/
-int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_state **cached_state,
- struct extent_changeset *changeset)
+int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached_state,
+ struct extent_changeset *changeset)
{
struct extent_state *state;
struct extent_state *cached;
struct extent_state *prealloc = NULL;
u64 last_end;
- int err;
- int clear = 0;
- int wake;
- int delete = (bits & EXTENT_CLEAR_ALL_BITS);
+ int ret = 0;
+ bool clear;
+ bool wake;
+ const bool delete = (bits & EXTENT_CLEAR_ALL_BITS);
gfp_t mask;
set_gfp_mask_from_bits(&bits, &mask);
@@ -644,9 +633,8 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (bits & EXTENT_DELALLOC)
bits |= EXTENT_NORESERVE;
- wake = ((bits & EXTENT_LOCK_BITS) ? 1 : 0);
- if (bits & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY))
- clear = 1;
+ wake = (bits & EXTENT_LOCK_BITS);
+ clear = (bits & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY));
again:
if (!prealloc) {
/*
@@ -676,7 +664,7 @@ again:
goto hit_next;
}
if (clear)
- free_extent_state(cached);
+ btrfs_free_extent_state(cached);
}
/* This search will find the extents that end after our range starts. */
@@ -691,7 +679,7 @@ hit_next:
/* The state doesn't have the wanted bits, go ahead. */
if (!(state->state & bits)) {
- state = next_state(state);
+ state = next_search_state(state, end);
goto next;
}
@@ -714,18 +702,24 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc)
goto search_again;
- err = split_state(tree, state, prealloc, start);
- if (err)
- extent_io_tree_panic(tree, state, "split", err);
-
+ ret = split_state(tree, state, prealloc, start);
prealloc = NULL;
- if (err)
+ if (ret) {
+ extent_io_tree_panic(tree, state, "split", ret);
goto out;
+ }
if (state->end <= end) {
- state = clear_state_bit(tree, state, bits, wake, changeset);
+ state = clear_state_bit(tree, state, bits, wake, end,
+ changeset);
goto next;
}
- goto search_again;
+ if (need_resched())
+ goto search_again;
+ /*
+ * Fallthrough and try atomic extent state allocation if needed.
+ * If it fails we'll jump to 'search_again' retry the allocation
+ * in non-atomic mode and start the search again.
+ */
}
/*
* | ---- desired range ---- |
@@ -736,30 +730,31 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc)
goto search_again;
- err = split_state(tree, state, prealloc, end + 1);
- if (err)
- extent_io_tree_panic(tree, state, "split", err);
+ ret = split_state(tree, state, prealloc, end + 1);
+ if (ret) {
+ extent_io_tree_panic(tree, state, "split", ret);
+ prealloc = NULL;
+ goto out;
+ }
if (wake)
wake_up(&state->wq);
- clear_state_bit(tree, prealloc, bits, wake, changeset);
+ clear_state_bit(tree, prealloc, bits, wake, end, changeset);
prealloc = NULL;
goto out;
}
- state = clear_state_bit(tree, state, bits, wake, changeset);
+ state = clear_state_bit(tree, state, bits, wake, end, changeset);
next:
- if (last_end == (u64)-1)
+ if (last_end >= end)
goto out;
start = last_end + 1;
- if (start <= end && state && !need_resched())
+ if (state && !need_resched())
goto hit_next;
search_again:
- if (start > end)
- goto out;
spin_unlock(&tree->lock);
if (gfpflags_allow_blocking(mask))
cond_resched();
@@ -767,10 +762,9 @@ search_again:
out:
spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
+ btrfs_free_extent_state(prealloc);
- return 0;
+ return ret;
}
@@ -820,7 +814,7 @@ process_node:
schedule();
spin_lock(&tree->lock);
finish_wait(&state->wq, &wait);
- free_extent_state(state);
+ btrfs_free_extent_state(state);
goto again;
}
start = state->end + 1;
@@ -838,7 +832,7 @@ out:
if (cached_state && *cached_state) {
state = *cached_state;
*cached_state = NULL;
- free_extent_state(state);
+ btrfs_free_extent_state(state);
}
spin_unlock(&tree->lock);
}
@@ -877,7 +871,7 @@ static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *t
*/
state = tree_search(tree, start);
while (state) {
- if (state->end >= start && (state->state & bits))
+ if (state->state & bits)
return state;
state = next_state(state);
}
@@ -892,9 +886,9 @@ static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *t
* Return true if we find something, and update @start_ret and @end_ret.
* Return false if we found nothing.
*/
-bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits,
- struct extent_state **cached_state)
+bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits,
+ struct extent_state **cached_state)
{
struct extent_state *state;
bool ret = false;
@@ -914,13 +908,13 @@ bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
* again. If we haven't found any, clear as well since
* it's now useless.
*/
- free_extent_state(*cached_state);
+ btrfs_free_extent_state(*cached_state);
*cached_state = NULL;
if (state)
goto got_it;
goto out;
}
- free_extent_state(*cached_state);
+ btrfs_free_extent_state(*cached_state);
*cached_state = NULL;
}
@@ -952,14 +946,17 @@ out:
* contiguous area for given bits. We will search to the first bit we find, and
* then walk down the tree until we find a non-contiguous area. The area
* returned will be the full contiguous area with the bits set.
+ *
+ * Returns true if we found a range with the given bits set, in which case
+ * @start_ret and @end_ret are updated, or false if no range was found.
*/
-int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits)
+bool btrfs_find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits)
{
struct extent_state *state;
- int ret = 1;
+ bool ret = false;
- ASSERT(!btrfs_fs_incompat(extent_io_tree_to_fs_info(tree), NO_HOLES));
+ ASSERT(!btrfs_fs_incompat(btrfs_extent_io_tree_to_fs_info(tree), NO_HOLES));
spin_lock(&tree->lock);
state = find_first_extent_bit_state(tree, start, bits);
@@ -971,7 +968,7 @@ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
break;
*end_ret = state->end;
}
- ret = 0;
+ ret = true;
}
spin_unlock(&tree->lock);
return ret;
@@ -1046,11 +1043,11 @@ out:
*
* [start, end] is inclusive This takes the tree lock.
*/
-static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, u64 *failed_start,
- struct extent_state **failed_state,
- struct extent_state **cached_state,
- struct extent_changeset *changeset)
+static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, u64 *failed_start,
+ struct extent_state **failed_state,
+ struct extent_state **cached_state,
+ struct extent_changeset *changeset)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
@@ -1129,12 +1126,11 @@ hit_next:
set_state_bits(tree, state, bits, changeset);
cache_state(state, cached_state);
merge_state(tree, state);
- if (last_end == (u64)-1)
+ if (last_end >= end)
goto out;
start = last_end + 1;
state = next_state(state);
- if (start < end && state && state->start == start &&
- !need_resched())
+ if (state && state->start == start && !need_resched())
goto hit_next;
goto search_again;
}
@@ -1186,12 +1182,11 @@ hit_next:
set_state_bits(tree, state, bits, changeset);
cache_state(state, cached_state);
merge_state(tree, state);
- if (last_end == (u64)-1)
+ if (last_end >= end)
goto out;
start = last_end + 1;
state = next_state(state);
- if (start < end && state && state->start == start &&
- !need_resched())
+ if (state && state->start == start && !need_resched())
goto hit_next;
}
goto search_again;
@@ -1204,14 +1199,8 @@ hit_next:
* extent we found.
*/
if (state->start > start) {
- u64 this_end;
struct extent_state *inserted_state;
- if (end < last_start)
- this_end = end;
- else
- this_end = last_start - 1;
-
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc)
goto search_again;
@@ -1221,17 +1210,38 @@ hit_next:
* extent.
*/
prealloc->start = start;
- prealloc->end = this_end;
+ if (end < last_start)
+ prealloc->end = end;
+ else
+ prealloc->end = last_start - 1;
+
inserted_state = insert_state(tree, prealloc, bits, changeset);
if (IS_ERR(inserted_state)) {
ret = PTR_ERR(inserted_state);
extent_io_tree_panic(tree, prealloc, "insert", ret);
+ goto out;
}
cache_state(inserted_state, cached_state);
if (inserted_state == prealloc)
prealloc = NULL;
- start = this_end + 1;
+ start = inserted_state->end + 1;
+
+ /* Beyond target range, stop. */
+ if (start > end)
+ goto out;
+
+ if (need_resched())
+ goto search_again;
+
+ state = next_search_state(inserted_state, end);
+ /*
+ * If there's a next state, whether contiguous or not, we don't
+ * need to unlock and start search again. If it's not contiguous
+ * we will end up here and try to allocate a prealloc state and insert.
+ */
+ if (state)
+ goto hit_next;
goto search_again;
}
/*
@@ -1252,8 +1262,11 @@ hit_next:
if (!prealloc)
goto search_again;
ret = split_state(tree, state, prealloc, end + 1);
- if (ret)
+ if (ret) {
extent_io_tree_panic(tree, state, "split", ret);
+ prealloc = NULL;
+ goto out;
+ }
set_state_bits(tree, prealloc, bits, changeset);
cache_state(prealloc, cached_state);
@@ -1272,18 +1285,16 @@ search_again:
out:
spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
+ btrfs_free_extent_state(prealloc);
return ret;
}
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_state **cached_state)
+int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached_state)
{
- return __set_extent_bit(tree, start, end, bits, NULL, NULL,
- cached_state, NULL);
+ return set_extent_bit(tree, start, end, bits, NULL, NULL, cached_state, NULL);
}
/*
@@ -1304,9 +1315,9 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
*
* All allocations are done with GFP_NOFS.
*/
-int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, u32 clear_bits,
- struct extent_state **cached_state)
+int btrfs_convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, u32 clear_bits,
+ struct extent_state **cached_state)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
@@ -1374,12 +1385,11 @@ hit_next:
if (state->start == start && state->end <= end) {
set_state_bits(tree, state, bits, NULL);
cache_state(state, cached_state);
- state = clear_state_bit(tree, state, clear_bits, 0, NULL);
- if (last_end == (u64)-1)
+ state = clear_state_bit(tree, state, clear_bits, 0, end, NULL);
+ if (last_end >= end)
goto out;
start = last_end + 1;
- if (start < end && state && state->start == start &&
- !need_resched())
+ if (state && state->start == start && !need_resched())
goto hit_next;
goto search_again;
}
@@ -1406,20 +1416,19 @@ hit_next:
goto out;
}
ret = split_state(tree, state, prealloc, start);
- if (ret)
- extent_io_tree_panic(tree, state, "split", ret);
prealloc = NULL;
- if (ret)
+ if (ret) {
+ extent_io_tree_panic(tree, state, "split", ret);
goto out;
+ }
if (state->end <= end) {
set_state_bits(tree, state, bits, NULL);
cache_state(state, cached_state);
- state = clear_state_bit(tree, state, clear_bits, 0, NULL);
- if (last_end == (u64)-1)
+ state = clear_state_bit(tree, state, clear_bits, 0, end, NULL);
+ if (last_end >= end)
goto out;
start = last_end + 1;
- if (start < end && state && state->start == start &&
- !need_resched())
+ if (state && state->start == start && !need_resched())
goto hit_next;
}
goto search_again;
@@ -1432,14 +1441,8 @@ hit_next:
* extent we found.
*/
if (state->start > start) {
- u64 this_end;
struct extent_state *inserted_state;
- if (end < last_start)
- this_end = end;
- else
- this_end = last_start - 1;
-
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc) {
ret = -ENOMEM;
@@ -1451,16 +1454,37 @@ hit_next:
* extent.
*/
prealloc->start = start;
- prealloc->end = this_end;
+ if (end < last_start)
+ prealloc->end = end;
+ else
+ prealloc->end = last_start - 1;
+
inserted_state = insert_state(tree, prealloc, bits, NULL);
if (IS_ERR(inserted_state)) {
ret = PTR_ERR(inserted_state);
extent_io_tree_panic(tree, prealloc, "insert", ret);
+ goto out;
}
cache_state(inserted_state, cached_state);
if (inserted_state == prealloc)
prealloc = NULL;
- start = this_end + 1;
+ start = inserted_state->end + 1;
+
+ /* Beyond target range, stop. */
+ if (start > end)
+ goto out;
+
+ if (need_resched())
+ goto search_again;
+
+ state = next_search_state(inserted_state, end);
+ /*
+ * If there's a next state, whether contiguous or not, we don't
+ * need to unlock and start search again. If it's not contiguous
+ * we will end up here and try to allocate a prealloc state and insert.
+ */
+ if (state)
+ goto hit_next;
goto search_again;
}
/*
@@ -1477,12 +1501,15 @@ hit_next:
}
ret = split_state(tree, state, prealloc, end + 1);
- if (ret)
+ if (ret) {
extent_io_tree_panic(tree, state, "split", ret);
+ prealloc = NULL;
+ goto out;
+ }
set_state_bits(tree, prealloc, bits, NULL);
cache_state(prealloc, cached_state);
- clear_state_bit(tree, prealloc, clear_bits, 0, NULL);
+ clear_state_bit(tree, prealloc, clear_bits, 0, end, NULL);
prealloc = NULL;
goto out;
}
@@ -1497,8 +1524,7 @@ search_again:
out:
spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
+ btrfs_free_extent_state(prealloc);
return ret;
}
@@ -1518,8 +1544,8 @@ out:
* spans (last_range_end, end of device]. In this case it's up to the caller to
* trim @end_ret to the appropriate size.
*/
-void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits)
+void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits)
{
struct extent_state *state;
struct extent_state *prev = NULL, *next = NULL;
@@ -1636,10 +1662,10 @@ out:
* all given bits set. If the returned number of bytes is greater than zero
* then @start is updated with the offset of the first byte with the bits set.
*/
-u64 count_range_bits(struct extent_io_tree *tree,
- u64 *start, u64 search_end, u64 max_bytes,
- u32 bits, int contig,
- struct extent_state **cached_state)
+u64 btrfs_count_range_bits(struct extent_io_tree *tree,
+ u64 *start, u64 search_end, u64 max_bytes,
+ u32 bits, bool contig,
+ struct extent_state **cached_state)
{
struct extent_state *state = NULL;
struct extent_state *cached;
@@ -1710,7 +1736,7 @@ search:
}
if (cached_state) {
- free_extent_state(*cached_state);
+ btrfs_free_extent_state(*cached_state);
*cached_state = state;
if (state)
refcount_inc(&state->refs);
@@ -1724,16 +1750,16 @@ search:
/*
* Check if the single @bit exists in the given range.
*/
-bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit)
+bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit)
{
- struct extent_state *state = NULL;
+ struct extent_state *state;
bool bitset = false;
ASSERT(is_power_of_2(bit));
spin_lock(&tree->lock);
state = tree_search(tree, start);
- while (state && start <= end) {
+ while (state) {
if (state->start > end)
break;
@@ -1742,9 +1768,7 @@ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32
break;
}
- /* If state->end is (u64)-1, start will overflow to 0 */
- start = state->end + 1;
- if (start > end || start == 0)
+ if (state->end >= end)
break;
state = next_state(state);
}
@@ -1752,16 +1776,51 @@ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32
return bitset;
}
+void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
+ struct extent_state **cached_state)
+{
+ struct extent_state *state;
+
+ /*
+ * The cached state is currently mandatory and not used to start the
+ * search, only to cache the first state record found in the range.
+ */
+ ASSERT(cached_state != NULL);
+ ASSERT(*cached_state == NULL);
+
+ *bits = 0;
+
+ spin_lock(&tree->lock);
+ state = tree_search(tree, start);
+ if (state && state->start < end) {
+ *cached_state = state;
+ refcount_inc(&state->refs);
+ }
+ while (state) {
+ if (state->start > end)
+ break;
+
+ *bits |= state->state;
+
+ if (state->end >= end)
+ break;
+
+ state = next_state(state);
+ }
+ spin_unlock(&tree->lock);
+}
+
/*
* Check if the whole range [@start,@end) contains the single @bit set.
*/
-bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
- struct extent_state *cached)
+bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
+ struct extent_state *cached)
{
- struct extent_state *state = NULL;
+ struct extent_state *state;
bool bitset = true;
ASSERT(is_power_of_2(bit));
+ ASSERT(start < end);
spin_lock(&tree->lock);
if (cached && extent_state_in_tree(cached) && cached->start <= start &&
@@ -1769,30 +1828,22 @@ bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
state = cached;
else
state = tree_search(tree, start);
- while (state && start <= end) {
+ while (state) {
if (state->start > start) {
bitset = false;
break;
}
- if (state->start > end)
- break;
-
if ((state->state & bit) == 0) {
bitset = false;
break;
}
- if (state->end == (u64)-1)
+ if (state->end >= end)
break;
- /*
- * Last entry (if state->end is (u64)-1 and overflow happens),
- * or next entry starts after the range.
- */
+ /* Next state must start where this one ends. */
start = state->end + 1;
- if (start > end || start == 0)
- break;
state = next_state(state);
}
@@ -1804,8 +1855,8 @@ bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
}
/* Wrappers around set/clear extent bit */
-int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_changeset *changeset)
+int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_changeset *changeset)
{
/*
* We don't support EXTENT_LOCK_BITS yet, as current changeset will
@@ -1814,11 +1865,11 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
ASSERT(!(bits & EXTENT_LOCK_BITS));
- return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset);
+ return set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset);
}
-int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_changeset *changeset)
+int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_changeset *changeset)
{
/*
* Don't support EXTENT_LOCK_BITS case, same reason as
@@ -1826,20 +1877,20 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
ASSERT(!(bits & EXTENT_LOCK_BITS));
- return __clear_extent_bit(tree, start, end, bits, NULL, changeset);
+ return btrfs_clear_extent_bit_changeset(tree, start, end, bits, NULL, changeset);
}
-bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
- struct extent_state **cached)
+bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached)
{
- int err;
+ int ret;
u64 failed_start;
- err = __set_extent_bit(tree, start, end, bits, &failed_start,
- NULL, cached, NULL);
- if (err == -EEXIST) {
+ ret = set_extent_bit(tree, start, end, bits, &failed_start, NULL, cached, NULL);
+ if (ret == -EEXIST) {
if (failed_start > start)
- clear_extent_bit(tree, start, failed_start - 1, bits, cached);
+ btrfs_clear_extent_bit(tree, start, failed_start - 1,
+ bits, cached);
return 0;
}
return 1;
@@ -1849,35 +1900,54 @@ bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits
* Either insert or lock state struct between start and end use mask to tell
* us if waiting is desired.
*/
-int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
- struct extent_state **cached_state)
+int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ struct extent_state **cached_state)
{
struct extent_state *failed_state = NULL;
- int err;
+ int ret;
u64 failed_start;
- err = __set_extent_bit(tree, start, end, bits, &failed_start,
- &failed_state, cached_state, NULL);
- while (err == -EEXIST) {
+ ret = set_extent_bit(tree, start, end, bits, &failed_start,
+ &failed_state, cached_state, NULL);
+ while (ret == -EEXIST) {
if (failed_start != start)
- clear_extent_bit(tree, start, failed_start - 1,
- bits, cached_state);
+ btrfs_clear_extent_bit(tree, start, failed_start - 1,
+ bits, cached_state);
wait_extent_bit(tree, failed_start, end, bits, &failed_state);
- err = __set_extent_bit(tree, start, end, bits,
- &failed_start, &failed_state,
- cached_state, NULL);
+ ret = set_extent_bit(tree, start, end, bits, &failed_start,
+ &failed_state, cached_state, NULL);
}
- return err;
+ return ret;
+}
+
+/*
+ * Get the extent state that follows the given extent state.
+ * This is meant to be used in a context where we know no other tasks can
+ * concurrently modify the tree.
+ */
+struct extent_state *btrfs_next_extent_state(struct extent_io_tree *tree,
+ struct extent_state *state)
+{
+ struct extent_state *next;
+
+ spin_lock(&tree->lock);
+ ASSERT(extent_state_in_tree(state));
+ next = next_state(state);
+ if (next)
+ refcount_inc(&next->refs);
+ spin_unlock(&tree->lock);
+
+ return next;
}
-void __cold extent_state_free_cachep(void)
+void __cold btrfs_extent_state_free_cachep(void)
{
btrfs_extent_state_leak_debug_check();
kmem_cache_destroy(extent_state_cache);
}
-int __init extent_state_init_cachep(void)
+int __init btrfs_extent_state_init_cachep(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0, 0,
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index 6ffef1cd37c1..6f07b965e8da 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -17,10 +17,10 @@ struct btrfs_inode;
/* Bits for the extent state */
enum {
ENUM_BIT(EXTENT_DIRTY),
- ENUM_BIT(EXTENT_UPTODATE),
ENUM_BIT(EXTENT_LOCKED),
ENUM_BIT(EXTENT_DIO_LOCKED),
- ENUM_BIT(EXTENT_NEW),
+ ENUM_BIT(EXTENT_DIRTY_LOG1),
+ ENUM_BIT(EXTENT_DIRTY_LOG2),
ENUM_BIT(EXTENT_DELALLOC),
ENUM_BIT(EXTENT_DEFRAG),
ENUM_BIT(EXTENT_BOUNDARY),
@@ -39,6 +39,11 @@ enum {
*/
ENUM_BIT(EXTENT_DELALLOC_NEW),
/*
+ * Mark that a range is being locked for finishing an ordered extent.
+ * Used together with EXTENT_LOCKED.
+ */
+ ENUM_BIT(EXTENT_FINISHING_ORDERED),
+ /*
* When an ordered extent successfully completes for a region marked as
* a new delalloc range, use this flag when clearing a new delalloc
* range to indicate that the VFS' inode number of bytes should be
@@ -130,117 +135,110 @@ struct extent_state {
#endif
};
-struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree);
-const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree);
-const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
+const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree);
+const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
-void extent_io_tree_init(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *tree, unsigned int owner);
-void extent_io_tree_release(struct extent_io_tree *tree);
-int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
- struct extent_state **cached);
-bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
- struct extent_state **cached);
+void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *tree, unsigned int owner);
+void btrfs_extent_io_tree_release(struct extent_io_tree *tree);
+int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ struct extent_state **cached);
+bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached);
-static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached)
+static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached)
{
- return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
+ return btrfs_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached);
}
-static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
+static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
+ return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached);
}
-int __init extent_state_init_cachep(void);
-void __cold extent_state_free_cachep(void);
-
-u64 count_range_bits(struct extent_io_tree *tree,
- u64 *start, u64 search_end,
- u64 max_bytes, u32 bits, int contig,
- struct extent_state **cached_state);
-
-void free_extent_state(struct extent_state *state);
-bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
- struct extent_state *cached_state);
-bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
-int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_changeset *changeset);
-int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_state **cached,
- struct extent_changeset *changeset);
-
-static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 end, u32 bits,
- struct extent_state **cached)
-{
- return __clear_extent_bit(tree, start, end, bits, cached, NULL);
-}
+int __init btrfs_extent_state_init_cachep(void);
+void __cold btrfs_extent_state_free_cachep(void);
-static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
-}
+u64 btrfs_count_range_bits(struct extent_io_tree *tree,
+ u64 *start, u64 search_end,
+ u64 max_bytes, u32 bits, bool contig,
+ struct extent_state **cached_state);
-static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, u32 bits)
+void btrfs_free_extent_state(struct extent_state *state);
+bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
+ struct extent_state *cached_state);
+bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
+void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
+ struct extent_state **cached_state);
+int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_changeset *changeset);
+int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached,
+ struct extent_changeset *changeset);
+
+static inline int btrfs_clear_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 end, u32 bits,
+ struct extent_state **cached)
{
- return clear_extent_bit(tree, start, end, bits, NULL);
+ return btrfs_clear_extent_bit_changeset(tree, start, end, bits, cached, NULL);
}
-int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_changeset *changeset);
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_state **cached_state);
-
-static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
+static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached)
{
- return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
- cached_state, NULL);
+ return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_LOCKED,
+ cached, NULL);
}
-static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
+int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_changeset *changeset);
+int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached_state);
+
+static inline int btrfs_clear_extent_dirty(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return clear_extent_bit(tree, start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, cached);
+ return btrfs_clear_extent_bit(tree, start, end,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, cached);
}
-int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, u32 clear_bits,
- struct extent_state **cached_state);
-
-bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits,
- struct extent_state **cached_state);
-void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits);
-int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits);
+int btrfs_convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, u32 clear_bits,
+ struct extent_state **cached_state);
+
+bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits,
+ struct extent_state **cached_state);
+void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits);
+bool btrfs_find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits);
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
u64 *end, u64 max_bytes,
struct extent_state **cached_state);
-static inline int lock_dio_extent(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
+static inline int btrfs_lock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return __lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
+ return btrfs_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached);
}
-static inline bool try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
+static inline bool btrfs_try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return __try_lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
+ return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached);
}
-static inline int unlock_dio_extent(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
+static inline int btrfs_unlock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return __clear_extent_bit(tree, start, end, EXTENT_DIO_LOCKED, cached, NULL);
+ return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_DIO_LOCKED,
+ cached, NULL);
}
+struct extent_state *btrfs_next_extent_state(struct extent_io_tree *tree,
+ struct extent_state *state);
+
#endif /* BTRFS_EXTENT_IO_TREE_H */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 412e318e4a22..e4cae34620d1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -40,13 +40,14 @@
#include "orphan.h"
#include "tree-checker.h"
#include "raid-stripe-tree.h"
+#include "delayed-inode.h"
#undef SCRAMBLE_DELAYED_REFS
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
@@ -56,12 +57,12 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod, u64 oref_root);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op);
-static int find_next_key(struct btrfs_path *path, int level,
+static int find_next_key(const struct btrfs_path *path, int level,
struct btrfs_key *key);
-static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
+static int block_group_bits(const struct btrfs_block_group *cache, u64 bits)
{
return (cache->flags & bits) == bits;
}
@@ -70,20 +71,17 @@ static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
{
struct btrfs_root *root = btrfs_extent_root(fs_info, start);
- int ret;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = start;
- key.offset = len;
key.type = BTRFS_EXTENT_ITEM_KEY;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- btrfs_free_path(path);
- return ret;
+ key.offset = len;
+ return btrfs_search_slot(NULL, root, &key, path, 0, 0);
}
/*
@@ -103,7 +101,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root;
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
u64 num_refs;
u64 extent_flags;
@@ -125,16 +123,16 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
search_again:
key.objectid = bytenr;
- key.offset = offset;
if (metadata)
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = offset;
extent_root = btrfs_extent_root(fs_info, bytenr);
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
- goto out_free;
+ return ret;
if (ret > 0 && key.type == BTRFS_METADATA_ITEM_KEY) {
if (path->slots[0]) {
@@ -159,7 +157,7 @@ search_again:
"unexpected extent item size, has %u expect >= %zu",
item_size, sizeof(*ei));
btrfs_abort_transaction(trans, ret);
- goto out_free;
+ return ret;
}
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -167,10 +165,10 @@ search_again:
if (unlikely(num_refs == 0)) {
ret = -EUCLEAN;
btrfs_err(fs_info,
- "unexpected zero reference count for extent item (%llu %u %llu)",
- key.objectid, key.type, key.offset);
+ "unexpected zero reference count for extent item " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&key));
btrfs_abort_transaction(trans, ret);
- goto out_free;
+ return ret;
}
extent_flags = btrfs_extent_flags(leaf, ei);
owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]);
@@ -216,8 +214,7 @@ search_again:
*flags = extent_flags;
if (owning_root)
*owning_root = owner;
-out_free:
- btrfs_free_path(path);
+
return ret;
}
@@ -329,11 +326,11 @@ out_free:
/*
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
- * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
+ * is_data == BTRFS_REF_TYPE_DATA, data type is required,
* is_data == BTRFS_REF_TYPE_ANY, either type is OK.
*/
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
- struct btrfs_extent_inline_ref *iref,
+ const struct btrfs_extent_inline_ref *iref,
enum btrfs_inline_ref_type is_data)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
@@ -405,23 +402,23 @@ u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
return ((u64)high_crc << 31) ^ (u64)low_crc;
}
-static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
- struct btrfs_extent_data_ref *ref)
+static u64 hash_extent_data_ref_item(const struct extent_buffer *leaf,
+ const struct btrfs_extent_data_ref *ref)
{
return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
btrfs_extent_data_ref_objectid(leaf, ref),
btrfs_extent_data_ref_offset(leaf, ref));
}
-static int match_extent_data_ref(struct extent_buffer *leaf,
- struct btrfs_extent_data_ref *ref,
- u64 root_objectid, u64 owner, u64 offset)
+static bool match_extent_data_ref(const struct extent_buffer *leaf,
+ const struct btrfs_extent_data_ref *ref,
+ u64 root_objectid, u64 owner, u64 offset)
{
if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
- return 0;
- return 1;
+ return false;
+ return true;
}
static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
@@ -501,7 +498,7 @@ fail:
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
u64 bytenr)
{
struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
@@ -570,7 +567,6 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
}
}
- btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
fail:
btrfs_release_path(path);
@@ -602,8 +598,8 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
} else {
btrfs_err(trans->fs_info,
- "unrecognized backref key (%llu %u %llu)",
- key.objectid, key.type, key.offset);
+ "unrecognized backref key " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&key));
btrfs_abort_transaction(trans, -EUCLEAN);
return -EUCLEAN;
}
@@ -618,18 +614,17 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
- btrfs_mark_buffer_dirty(trans, leaf);
}
return ret;
}
-static noinline u32 extent_data_ref_count(struct btrfs_path *path,
- struct btrfs_extent_inline_ref *iref)
+static noinline u32 extent_data_ref_count(const struct btrfs_path *path,
+ const struct btrfs_extent_inline_ref *iref)
{
struct btrfs_key key;
struct extent_buffer *leaf;
- struct btrfs_extent_data_ref *ref1;
- struct btrfs_shared_data_ref *ref2;
+ const struct btrfs_extent_data_ref *ref1;
+ const struct btrfs_shared_data_ref *ref2;
u32 num_refs = 0;
int type;
@@ -644,10 +639,10 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
ASSERT(type != BTRFS_REF_TYPE_INVALID);
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
- ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
+ ref1 = (const struct btrfs_extent_data_ref *)(&iref->offset);
num_refs = btrfs_extent_data_ref_count(leaf, ref1);
} else {
- ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
+ ref2 = (const struct btrfs_shared_data_ref *)(iref + 1);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
}
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
@@ -690,7 +685,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
u64 bytenr)
{
struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
@@ -728,7 +723,7 @@ static inline int extent_ref_type(u64 parent, u64 owner)
return type;
}
-static int find_next_key(struct btrfs_path *path, int level,
+static int find_next_key(const struct btrfs_path *path, int level,
struct btrfs_key *key)
{
@@ -794,7 +789,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
want = extent_ref_type(parent, owner);
if (insert) {
extra_size = btrfs_extent_inline_ref_size(want);
- path->search_for_extension = 1;
+ path->search_for_extension = true;
} else
extra_size = -1;
@@ -885,7 +880,7 @@ again:
ptr += btrfs_extent_inline_ref_size(type);
continue;
}
- if (type == BTRFS_REF_TYPE_INVALID) {
+ if (unlikely(type == BTRFS_REF_TYPE_INVALID)) {
ret = -EUCLEAN;
goto out;
}
@@ -960,7 +955,7 @@ again:
if (!path->keep_locks) {
btrfs_release_path(path);
- path->keep_locks = 1;
+ path->keep_locks = true;
goto again;
}
@@ -981,11 +976,11 @@ out_no_entry:
*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
out:
if (path->keep_locks) {
- path->keep_locks = 0;
+ path->keep_locks = false;
btrfs_unlock_up_safe(path, 1);
}
if (insert)
- path->search_for_extension = 0;
+ path->search_for_extension = false;
return ret;
}
@@ -1050,7 +1045,6 @@ void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
} else {
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
- btrfs_mark_buffer_dirty(trans, leaf);
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
@@ -1195,7 +1189,6 @@ static noinline_for_stack int update_inline_extent_backref(
item_size -= size;
btrfs_truncate_item(trans, path, item_size, 1);
}
- btrfs_mark_buffer_dirty(trans, leaf);
return 0;
}
@@ -1218,7 +1211,7 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
* We're adding refs to a tree block we already own, this
* should not happen at all.
*/
- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+ if (unlikely(owner < BTRFS_FIRST_FREE_OBJECTID)) {
btrfs_print_leaf(path->nodes[0]);
btrfs_crit(trans->fs_info,
"adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u",
@@ -1260,12 +1253,12 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
{
int j, ret = 0;
u64 bytes_left, end;
- u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
+ u64 aligned_start = ALIGN(start, SECTOR_SIZE);
/* Adjust the range to be aligned to 512B sectors if necessary. */
if (start != aligned_start) {
len -= aligned_start - start;
- len = round_down(len, 1 << SECTOR_SHIFT);
+ len = round_down(len, SECTOR_SIZE);
start = aligned_start;
}
@@ -1488,10 +1481,10 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
*
*/
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_extent_item *item;
struct btrfs_key key;
@@ -1512,7 +1505,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
node->parent, node->ref_root, owner,
offset, refs_to_add, extent_op);
if ((ret < 0 && ret != -EAGAIN) || !ret)
- goto out;
+ return ret;
/*
* Ok we had -EAGAIN which means we didn't have space to insert and
@@ -1527,24 +1520,24 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, item);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/* now insert the actual backref */
- if (owner < BTRFS_FIRST_FREE_OBJECTID)
+ if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = insert_tree_block_ref(trans, path, node, bytenr);
- else
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ } else {
ret = insert_extent_data_ref(trans, path, node, bytenr);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ }
- if (ret)
- btrfs_abort_transaction(trans, ret);
-out:
- btrfs_free_path(path);
return ret;
}
static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_head *href)
+ const struct btrfs_delayed_ref_head *href)
{
u64 root = href->owning_root;
@@ -1553,7 +1546,7 @@ static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info,
* where it has already been unset.
*/
if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE ||
- !href->is_data || !is_fstree(root))
+ !href->is_data || !btrfs_is_fstree(root))
return;
btrfs_qgroup_free_refroot(fs_info, root, href->reserved_bytes,
@@ -1562,7 +1555,7 @@ static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info,
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
bool insert_reserved)
{
@@ -1630,13 +1623,13 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
}
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head,
+ const struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_extent_item *ei;
struct extent_buffer *leaf;
u32 item_size;
@@ -1667,7 +1660,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
again:
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0) {
- goto out;
+ return ret;
} else if (ret > 0) {
if (metadata) {
if (path->slots[0] > 0) {
@@ -1684,8 +1677,8 @@ again:
metadata = 0;
key.objectid = head->bytenr;
- key.offset = head->num_bytes;
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = head->num_bytes;
goto again;
}
} else {
@@ -1693,7 +1686,7 @@ again:
btrfs_err(fs_info,
"missing extent item for extent %llu num_bytes %llu level %d",
head->bytenr, head->num_bytes, head->level);
- goto out;
+ return ret;
}
}
@@ -1706,21 +1699,18 @@ again:
"unexpected extent item size, has %u expect >= %zu",
item_size, sizeof(*ei));
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
__run_delayed_extent_op(extent_op, leaf, ei);
- btrfs_mark_buffer_dirty(trans, leaf);
-out:
- btrfs_free_path(path);
return ret;
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
bool insert_reserved)
{
@@ -1767,7 +1757,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
bool insert_reserved)
{
@@ -1775,7 +1765,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
if (TRANS_ABORTED(trans)) {
if (insert_reserved) {
- btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
+ btrfs_pin_extent(trans, node->bytenr, node->num_bytes);
free_head_ref_squota_rsv(trans->fs_info, href);
}
return 0;
@@ -1794,7 +1784,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
else
BUG();
if (ret && insert_reserved)
- btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
+ btrfs_pin_extent(trans, node->bytenr, node->num_bytes);
if (ret < 0)
btrfs_err(trans->fs_info,
"failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d",
@@ -1803,30 +1793,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
return ret;
}
-static inline struct btrfs_delayed_ref_node *
-select_delayed_ref(struct btrfs_delayed_ref_head *head)
-{
- struct btrfs_delayed_ref_node *ref;
-
- if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
- return NULL;
-
- /*
- * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
- * This is to prevent a ref count from going down to zero, which deletes
- * the extent item from the extent tree, when there still are references
- * to add, which would fail because they would not find the extent item.
- */
- if (!list_empty(&head->ref_add_list))
- return list_first_entry(&head->ref_add_list,
- struct btrfs_delayed_ref_node, add_list);
-
- ref = rb_entry(rb_first_cached(&head->ref_tree),
- struct btrfs_delayed_ref_node, ref_node);
- ASSERT(list_empty(&ref->add_list));
- return ref;
-}
-
static struct btrfs_delayed_extent_op *cleanup_extent_op(
struct btrfs_delayed_ref_head *head)
{
@@ -1925,7 +1891,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
spin_unlock(&delayed_refs->lock);
if (head->must_insert_reserved) {
- btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
+ btrfs_pin_extent(trans, head->bytenr, head->num_bytes);
if (head->is_data) {
struct btrfs_root *csum_root;
@@ -1959,7 +1925,7 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
lockdep_assert_held(&locked_ref->mutex);
lockdep_assert_held(&locked_ref->lock);
- while ((ref = select_delayed_ref(locked_ref))) {
+ while ((ref = btrfs_select_delayed_ref(locked_ref))) {
if (ref->seq &&
btrfs_check_delayed_seq(fs_info, ref->seq)) {
spin_unlock(&locked_ref->lock);
@@ -2043,7 +2009,12 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
if (min_bytes == 0) {
- max_count = delayed_refs->num_heads_ready;
+ /*
+ * We may be subject to a harmless race if some task is
+ * concurrently adding or removing a delayed ref, so silence
+ * KCSAN and similar tools.
+ */
+ max_count = data_race(delayed_refs->num_heads_ready);
min_bytes = U64_MAX;
}
@@ -2187,7 +2158,7 @@ again:
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
ret = __btrfs_run_delayed_refs(trans, min_bytes);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -2230,10 +2201,11 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
return ret;
}
-static noinline int check_delayed_ref(struct btrfs_root *root,
+static noinline int check_delayed_ref(struct btrfs_inode *inode,
struct btrfs_path *path,
- u64 objectid, u64 offset, u64 bytenr)
+ u64 offset, u64 bytenr)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -2307,7 +2279,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
* then we have a cross reference.
*/
if (ref->ref_root != btrfs_root_id(root) ||
- ref_owner != objectid || ref_offset != offset) {
+ ref_owner != btrfs_ino(inode) || ref_offset != offset) {
ret = 1;
break;
}
@@ -2318,11 +2290,53 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
return ret;
}
-static noinline int check_committed_ref(struct btrfs_root *root,
+/*
+ * Check if there are references for a data extent other than the one belonging
+ * to the given inode and offset.
+ *
+ * @inode: The only inode we expect to find associated with the data extent.
+ * @path: A path to use for searching the extent tree.
+ * @offset: The only offset we expect to find associated with the data extent.
+ * @bytenr: The logical address of the data extent.
+ *
+ * When the extent does not have any other references other than the one we
+ * expect to find, we always return a value of 0 with the path having a locked
+ * leaf that contains the extent's extent item - this is necessary to ensure
+ * we don't race with a task running delayed references, and our caller must
+ * have such a path when calling check_delayed_ref() - it must lock a delayed
+ * ref head while holding the leaf locked. In case the extent item is not found
+ * in the extent tree, we return -ENOENT with the path having the leaf (locked)
+ * where the extent item should be, in order to prevent races with another task
+ * running delayed references, so that we don't miss any reference when calling
+ * check_delayed_ref().
+ *
+ * Note: this may return false positives, and this is because we want to be
+ * quick here as we're called in write paths (when flushing delalloc and
+ * in the direct IO write path). For example we can have an extent with
+ * a single reference but that reference is not inlined, or we may have
+ * many references in the extent tree but we also have delayed references
+ * that cancel all the reference except the one for our inode and offset,
+ * but it would be expensive to do such checks and complex due to all
+ * locking to avoid races between the checks and flushing delayed refs,
+ * plus non-inline references may be located on leaves other than the one
+ * that contains the extent item in the extent tree. The important thing
+ * here is to not return false negatives and that the false positives are
+ * not very common.
+ *
+ * Returns: 0 if there are no cross references and with the path having a locked
+ * leaf from the extent tree that contains the extent's extent item.
+ *
+ * 1 if there are cross references (false positives can happen).
+ *
+ * < 0 in case of an error. In case of -ENOENT the leaf in the extent
+ * tree where the extent item should be located at is read locked and
+ * accessible in the given path.
+ */
+static noinline int check_committed_ref(struct btrfs_inode *inode,
struct btrfs_path *path,
- u64 objectid, u64 offset, u64 bytenr,
- bool strict)
+ u64 offset, u64 bytenr)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
struct extent_buffer *leaf;
@@ -2336,40 +2350,37 @@ static noinline int check_committed_ref(struct btrfs_root *root,
int ret;
key.objectid = bytenr;
- key.offset = (u64)-1;
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- if (ret == 0) {
+ return ret;
+ if (unlikely(ret == 0)) {
/*
* Key with offset -1 found, there would have to exist an extent
* item with such offset, but this is out of the valid range.
*/
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
- ret = -ENOENT;
if (path->slots[0] == 0)
- goto out;
+ return -ENOENT;
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
- goto out;
+ return -ENOENT;
- ret = 1;
item_size = btrfs_item_size(leaf, path->slots[0]);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
expected_size = sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY);
/* No inline refs; we need to bail before checking for owner ref. */
if (item_size == sizeof(*ei))
- goto out;
+ return 1;
/* Check for an owner ref; skip over it to the real inline refs. */
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
@@ -2377,56 +2388,69 @@ static noinline int check_committed_ref(struct btrfs_root *root,
if (btrfs_fs_incompat(fs_info, SIMPLE_QUOTA) && type == BTRFS_EXTENT_OWNER_REF_KEY) {
expected_size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
iref = (struct btrfs_extent_inline_ref *)(iref + 1);
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
}
/* If extent item has more than 1 inline ref then it's shared */
if (item_size != expected_size)
- goto out;
-
- /*
- * If extent created before last snapshot => it's shared unless the
- * snapshot has been deleted. Use the heuristic if strict is false.
- */
- if (!strict &&
- (btrfs_extent_generation(leaf, ei) <=
- btrfs_root_last_snapshot(&root->root_item)))
- goto out;
+ return 1;
/* If this extent has SHARED_DATA_REF then it's shared */
- type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
if (type != BTRFS_EXTENT_DATA_REF_KEY)
- goto out;
+ return 1;
ref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (btrfs_extent_refs(leaf, ei) !=
btrfs_extent_data_ref_count(leaf, ref) ||
btrfs_extent_data_ref_root(leaf, ref) != btrfs_root_id(root) ||
- btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
+ btrfs_extent_data_ref_objectid(leaf, ref) != btrfs_ino(inode) ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
- goto out;
+ return 1;
- ret = 0;
-out:
- return ret;
+ return 0;
}
-int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
- u64 bytenr, bool strict, struct btrfs_path *path)
+int btrfs_cross_ref_exist(struct btrfs_inode *inode, u64 offset,
+ u64 bytenr, struct btrfs_path *path)
{
int ret;
do {
- ret = check_committed_ref(root, path, objectid,
- offset, bytenr, strict);
+ ret = check_committed_ref(inode, path, offset, bytenr);
if (ret && ret != -ENOENT)
goto out;
- ret = check_delayed_ref(root, path, objectid, offset, bytenr);
- } while (ret == -EAGAIN);
+ /*
+ * The path must have a locked leaf from the extent tree where
+ * the extent item for our extent is located, in case it exists,
+ * or where it should be located in case it doesn't exist yet
+ * because it's new and its delayed ref was not yet flushed.
+ * We need to lock the delayed ref head at check_delayed_ref(),
+ * if one exists, while holding the leaf locked in order to not
+ * race with delayed ref flushing, missing references and
+ * incorrectly reporting that the extent is not shared.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
+ struct extent_buffer *leaf = path->nodes[0];
+
+ ASSERT(leaf != NULL);
+ btrfs_assert_tree_read_locked(leaf);
+
+ if (ret != -ENOENT) {
+ struct btrfs_key key;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ ASSERT(key.objectid == bytenr);
+ ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY);
+ }
+ }
+
+ ret = check_delayed_ref(inode, path, offset, bytenr);
+ } while (ret == -EAGAIN && !path->nowait);
out:
btrfs_release_path(path);
- if (btrfs_is_data_reloc_root(root))
+ if (btrfs_is_data_reloc_root(inode->root))
WARN_ON(ret > 0);
return ret;
}
@@ -2434,7 +2458,7 @@ out:
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
- int full_backref, int inc)
+ bool full_backref, bool inc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 parent;
@@ -2520,15 +2544,15 @@ fail:
}
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref)
+ struct extent_buffer *buf, bool full_backref)
{
- return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
+ return __btrfs_mod_ref(trans, root, buf, full_backref, true);
}
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref)
+ struct extent_buffer *buf, bool full_backref)
{
- return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
+ return __btrfs_mod_ref(trans, root, buf, full_backref, false);
}
static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
@@ -2568,37 +2592,34 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info)
}
static int pin_down_extent(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *cache,
- u64 bytenr, u64 num_bytes, int reserved)
+ struct btrfs_block_group *bg,
+ u64 bytenr, u64 num_bytes, bool reserved)
{
- struct btrfs_fs_info *fs_info = cache->fs_info;
-
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info,
- num_bytes);
- if (reserved) {
- cache->reserved -= num_bytes;
- cache->space_info->bytes_reserved -= num_bytes;
- }
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
-
- set_extent_bit(&trans->transaction->pinned_extents, bytenr,
- bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
+ struct btrfs_space_info *space_info = bg->space_info;
+ const u64 reserved_bytes = (reserved ? num_bytes : 0);
+
+ spin_lock(&space_info->lock);
+ spin_lock(&bg->lock);
+ bg->pinned += num_bytes;
+ bg->reserved -= reserved_bytes;
+ spin_unlock(&bg->lock);
+ space_info->bytes_reserved -= reserved_bytes;
+ btrfs_space_info_update_bytes_pinned(space_info, num_bytes);
+ spin_unlock(&space_info->lock);
+
+ btrfs_set_extent_bit(&trans->transaction->pinned_extents, bytenr,
+ bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
return 0;
}
-int btrfs_pin_extent(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes, int reserved)
+int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes)
{
struct btrfs_block_group *cache;
cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
BUG_ON(!cache); /* Logic error */
- pin_down_extent(trans, cache, bytenr, num_bytes, reserved);
+ pin_down_extent(trans, cache, bytenr, num_bytes, true);
btrfs_put_block_group(cache);
return 0;
@@ -2622,7 +2643,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- pin_down_extent(trans, cache, eb->start, eb->len, 0);
+ pin_down_extent(trans, cache, eb->start, eb->len, false);
/* remove us from the free space cache (if we're there at all) */
ret = btrfs_remove_free_space(cache, eb->start, eb->len);
@@ -2724,26 +2745,23 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
{
struct btrfs_block_group *cache = NULL;
struct btrfs_space_info *space_info;
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_free_cluster *cluster = NULL;
- u64 len;
u64 total_unpinned = 0;
u64 empty_cluster = 0;
- bool readonly;
- int ret = 0;
while (start <= end) {
- readonly = false;
+ u64 len;
+ bool readonly;
+
if (!cache ||
start >= cache->start + cache->length) {
if (cache)
btrfs_put_block_group(cache);
total_unpinned = 0;
cache = btrfs_lookup_block_group(fs_info, start);
- if (cache == NULL) {
+ if (unlikely(cache == NULL)) {
/* Logic error, something removed the block group. */
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
cluster = fetch_cluster_info(fs_info,
@@ -2777,45 +2795,28 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
+ readonly = cache->ro;
cache->pinned -= len;
- btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
+ spin_unlock(&cache->lock);
+
+ btrfs_space_info_update_bytes_pinned(space_info, -len);
space_info->max_extent_size = 0;
- if (cache->ro) {
+
+ if (readonly) {
space_info->bytes_readonly += len;
- readonly = true;
} else if (btrfs_is_zoned(fs_info)) {
/* Need reset before reusing in a zoned block group */
- btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
- len);
- readonly = true;
+ btrfs_space_info_update_bytes_zone_unusable(space_info, len);
+ } else if (return_free_space) {
+ btrfs_return_free_space(space_info, len);
}
- spin_unlock(&cache->lock);
- if (!readonly && return_free_space &&
- global_rsv->space_info == space_info) {
- spin_lock(&global_rsv->lock);
- if (!global_rsv->full) {
- u64 to_add = min(len, global_rsv->size -
- global_rsv->reserved);
-
- global_rsv->reserved += to_add;
- btrfs_space_info_update_bytes_may_use(fs_info,
- space_info, to_add);
- if (global_rsv->reserved >= global_rsv->size)
- global_rsv->full = 1;
- len -= to_add;
- }
- spin_unlock(&global_rsv->lock);
- }
- /* Add to any tickets we may have */
- if (!readonly && return_free_space && len)
- btrfs_try_granting_tickets(fs_info, space_info);
spin_unlock(&space_info->lock);
}
if (cache)
btrfs_put_block_group(cache);
-out:
- return ret;
+
+ return 0;
}
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
@@ -2823,34 +2824,63 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *block_group, *tmp;
struct list_head *deleted_bgs;
- struct extent_io_tree *unpin;
+ struct extent_io_tree *unpin = &trans->transaction->pinned_extents;
+ struct extent_state *cached_state = NULL;
u64 start;
u64 end;
+ int unpin_error = 0;
int ret;
- unpin = &trans->transaction->pinned_extents;
-
- while (!TRANS_ABORTED(trans)) {
- struct extent_state *cached_state = NULL;
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
+ btrfs_find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, &cached_state);
- mutex_lock(&fs_info->unused_bg_unpin_mutex);
- if (!find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY, &cached_state)) {
- mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- break;
- }
+ while (!TRANS_ABORTED(trans) && cached_state) {
+ struct extent_state *next_state;
if (btrfs_test_opt(fs_info, DISCARD_SYNC))
ret = btrfs_discard_extent(fs_info, start,
end + 1 - start, NULL);
- clear_extent_dirty(unpin, start, end, &cached_state);
+ next_state = btrfs_next_extent_state(unpin, cached_state);
+ btrfs_clear_extent_dirty(unpin, start, end, &cached_state);
ret = unpin_extent_range(fs_info, start, end, true);
- BUG_ON(ret);
- mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- free_extent_state(cached_state);
- cond_resched();
+ /*
+ * If we get an error unpinning an extent range, store the first
+ * error to return later after trying to unpin all ranges and do
+ * the sync discards. Our caller will abort the transaction
+ * (which already wrote new superblocks) and on the next mount
+ * the space will be available as it was pinned by in-memory
+ * only structures in this phase.
+ */
+ if (ret) {
+ btrfs_err_rl(fs_info,
+"failed to unpin extent range [%llu, %llu] when committing transaction %llu: %s (%d)",
+ start, end, trans->transid,
+ btrfs_decode_error(ret), ret);
+ if (!unpin_error)
+ unpin_error = ret;
+ }
+
+ btrfs_free_extent_state(cached_state);
+
+ if (need_resched()) {
+ btrfs_free_extent_state(next_state);
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+ cond_resched();
+ cached_state = NULL;
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
+ btrfs_find_first_extent_bit(unpin, 0, &start, &end,
+ EXTENT_DIRTY, &cached_state);
+ } else {
+ cached_state = next_state;
+ if (cached_state) {
+ start = cached_state->start;
+ end = cached_state->end;
+ }
+ }
}
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+ btrfs_free_extent_state(cached_state);
if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
btrfs_discard_calc_delay(&fs_info->discard_ctl);
@@ -2864,16 +2894,20 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
*/
deleted_bgs = &trans->transaction->deleted_bgs;
list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
- u64 trimmed = 0;
-
ret = -EROFS;
if (!TRANS_ABORTED(trans))
- ret = btrfs_discard_extent(fs_info,
- block_group->start,
- block_group->length,
- &trimmed);
+ ret = btrfs_discard_extent(fs_info, block_group->start,
+ block_group->length, NULL);
+ /*
+ * Not strictly necessary to lock, as the block_group should be
+ * read-only from btrfs_delete_unused_bgs().
+ */
+ ASSERT(block_group->ro);
+ spin_lock(&fs_info->unused_bgs_lock);
list_del_init(&block_group->bg_list);
+ spin_unlock(&fs_info->unused_bgs_lock);
+
btrfs_unfreeze_block_group(block_group);
btrfs_put_block_group(block_group);
@@ -2885,7 +2919,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
}
}
- return 0;
+ return unpin_error;
}
/*
@@ -2947,26 +2981,26 @@ static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
csum_root = btrfs_csum_root(trans->fs_info, bytenr);
ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
ret = btrfs_delete_raid_extent(trans, bytenr, num_bytes);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
}
ret = btrfs_record_squota_delta(trans->fs_info, delta);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
- ret = add_to_free_space_tree(trans, bytenr, num_bytes);
- if (ret) {
+ ret = btrfs_add_to_free_space_tree(trans, bytenr, num_bytes);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -3046,12 +3080,12 @@ static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
*/
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *extent_root;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
@@ -3080,13 +3114,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
- if (!is_data && refs_to_drop != 1) {
+ if (unlikely(!is_data && refs_to_drop != 1)) {
btrfs_crit(info,
"invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u",
node->bytenr, refs_to_drop);
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
if (is_data)
@@ -3127,19 +3161,18 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
if (!found_extent) {
- if (iref) {
+ if (unlikely(iref)) {
abort_and_dump(trans, path,
"invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref",
path->slots[0]);
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
/* Must be SHARED_* item, remove the backref first */
ret = remove_extent_backref(trans, extent_root, path,
NULL, refs_to_drop, is_data);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
btrfs_release_path(path);
@@ -3186,9 +3219,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
"umm, got %d back from search, was looking for %llu, slot %d",
ret, bytenr, path->slots[0]);
}
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
extent_slot = path->slots[0];
}
@@ -3197,10 +3230,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d",
bytenr, node->parent, node->ref_root, owner_objectid,
owner_offset, path->slots[0]);
- goto out;
+ return ret;
} else {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
leaf = path->nodes[0];
@@ -3211,7 +3244,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
"unexpected extent item size, has %u expect >= %zu",
item_size, sizeof(*ei));
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
ei = btrfs_item_ptr(leaf, extent_slot,
struct btrfs_extent_item);
@@ -3219,26 +3252,24 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
key.type == BTRFS_EXTENT_ITEM_KEY) {
struct btrfs_tree_block_info *bi;
- if (item_size < sizeof(*ei) + sizeof(*bi)) {
+ if (unlikely(item_size < sizeof(*ei) + sizeof(*bi))) {
abort_and_dump(trans, path,
"invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu",
key.objectid, key.type, key.offset,
path->slots[0], owner_objectid, item_size,
sizeof(*ei) + sizeof(*bi));
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
bi = (struct btrfs_tree_block_info *)(ei + 1);
WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
}
refs = btrfs_extent_refs(leaf, ei);
- if (refs < refs_to_drop) {
+ if (unlikely(refs < refs_to_drop)) {
abort_and_dump(trans, path,
"trying to drop %d refs but we only have %llu for bytenr %llu slot %u",
refs_to_drop, refs, bytenr, path->slots[0]);
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
refs -= refs_to_drop;
@@ -3250,23 +3281,21 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
* be updated by remove_extent_backref
*/
if (iref) {
- if (!found_extent) {
+ if (unlikely(!found_extent)) {
abort_and_dump(trans, path,
"invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u",
path->slots[0]);
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
} else {
btrfs_set_extent_refs(leaf, ei, refs);
- btrfs_mark_buffer_dirty(trans, leaf);
}
if (found_extent) {
ret = remove_extent_backref(trans, extent_root, path,
iref, refs_to_drop, is_data);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
}
} else {
@@ -3280,23 +3309,21 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
/* In this branch refs == 1 */
if (found_extent) {
- if (is_data && refs_to_drop !=
- extent_data_ref_count(path, iref)) {
+ if (unlikely(is_data && refs_to_drop !=
+ extent_data_ref_count(path, iref))) {
abort_and_dump(trans, path,
"invalid refs_to_drop, current refs %u refs_to_drop %u slot %u",
extent_data_ref_count(path, iref),
refs_to_drop, path->slots[0]);
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
if (iref) {
- if (path->slots[0] != extent_slot) {
+ if (unlikely(path->slots[0] != extent_slot)) {
abort_and_dump(trans, path,
-"invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref",
- key.objectid, key.type,
- key.offset, path->slots[0]);
- ret = -EUCLEAN;
- goto out;
+"invalid iref, extent item key " BTRFS_KEY_FMT " slot %u doesn't have wanted iref",
+ BTRFS_KEY_FMT_VALUE(&key),
+ path->slots[0]);
+ return -EUCLEAN;
}
} else {
/*
@@ -3305,12 +3332,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
* | extent_slot ||extent_slot + 1|
* [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ]
*/
- if (path->slots[0] != extent_slot + 1) {
+ if (unlikely(path->slots[0] != extent_slot + 1)) {
abort_and_dump(trans, path,
"invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM",
path->slots[0]);
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
path->slots[0] = extent_slot;
num_to_del = 2;
@@ -3329,9 +3355,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
btrfs_release_path(path);
@@ -3339,8 +3365,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -3449,7 +3473,7 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
bg = btrfs_lookup_block_group(fs_info, buf->start);
if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
- pin_down_extent(trans, bg, buf->start, buf->len, 1);
+ pin_down_extent(trans, bg, buf->start, buf->len, true);
btrfs_put_block_group(bg);
goto out;
}
@@ -3473,7 +3497,7 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)
|| btrfs_is_zoned(fs_info)) {
- pin_down_extent(trans, bg, buf->start, buf->len, 1);
+ pin_down_extent(trans, bg, buf->start, buf->len, true);
btrfs_put_block_group(bg);
goto out;
}
@@ -3481,17 +3505,11 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
btrfs_add_free_space(bg, buf->start, buf->len);
- btrfs_free_reserved_bytes(bg, buf->len, 0);
+ btrfs_free_reserved_bytes(bg, buf->len, false);
btrfs_put_block_group(bg);
trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
out:
-
- /*
- * Deleting the buffer, clear the corrupt flag since it doesn't
- * matter anymore.
- */
- clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
return 0;
}
@@ -3509,7 +3527,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
* tree, just update pinning info and exit early.
*/
if (ref->ref_root == BTRFS_TREE_LOG_OBJECTID) {
- btrfs_pin_extent(trans, ref->bytenr, ref->num_bytes, 1);
+ btrfs_pin_extent(trans, ref->bytenr, ref->num_bytes);
ret = 0;
} else if (ref->type == BTRFS_REF_METADATA) {
ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
@@ -3560,15 +3578,14 @@ enum btrfs_loop_type {
};
static inline void
-btrfs_lock_block_group(struct btrfs_block_group *cache,
- int delalloc)
+btrfs_lock_block_group(struct btrfs_block_group *cache, bool delalloc)
{
if (delalloc)
down_read(&cache->data_rwsem);
}
static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
- int delalloc)
+ bool delalloc)
{
btrfs_get_block_group(cache);
if (delalloc)
@@ -3578,7 +3595,7 @@ static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
static struct btrfs_block_group *btrfs_lock_cluster(
struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
- int delalloc)
+ bool delalloc)
__acquires(&cluster->refill_lock)
{
struct btrfs_block_group *used_bg = NULL;
@@ -3615,14 +3632,28 @@ static struct btrfs_block_group *btrfs_lock_cluster(
}
static inline void
-btrfs_release_block_group(struct btrfs_block_group *cache,
- int delalloc)
+btrfs_release_block_group(struct btrfs_block_group *cache, bool delalloc)
{
if (delalloc)
up_read(&cache->data_rwsem);
btrfs_put_block_group(cache);
}
+static bool find_free_extent_check_size_class(const struct find_free_extent_ctl *ffe_ctl,
+ const struct btrfs_block_group *bg)
+{
+ if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED)
+ return true;
+ if (!btrfs_block_group_should_use_size_class(bg))
+ return true;
+ if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS)
+ return true;
+ if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS &&
+ bg->size_class == BTRFS_BG_SZ_NONE)
+ return true;
+ return ffe_ctl->size_class == bg->size_class;
+}
+
/*
* Helper function for find_free_extent().
*
@@ -3644,7 +3675,8 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
if (!cluster_bg)
goto refill_cluster;
if (cluster_bg != bg && (cluster_bg->ro ||
- !block_group_bits(cluster_bg, ffe_ctl->flags)))
+ !block_group_bits(cluster_bg, ffe_ctl->flags) ||
+ !find_free_extent_check_size_class(ffe_ctl, cluster_bg)))
goto release_cluster;
offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
@@ -3990,7 +4022,7 @@ static int do_allocation(struct btrfs_block_group *block_group,
static void release_block_group(struct btrfs_block_group *block_group,
struct find_free_extent_ctl *ffe_ctl,
- int delalloc)
+ bool delalloc)
{
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
@@ -4109,6 +4141,7 @@ static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
struct btrfs_key *ins,
struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_space_info *space_info,
bool full_search)
{
struct btrfs_root *root = fs_info->chunk_root;
@@ -4163,7 +4196,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
return ret;
}
- ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
+ ret = btrfs_chunk_alloc(trans, space_info, ffe_ctl->flags,
CHUNK_ALLOC_FORCE_FOR_EXTENT);
/* Do not bail out on ENOSPC since we can do more. */
@@ -4200,21 +4233,6 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
return -ENOSPC;
}
-static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl,
- struct btrfs_block_group *bg)
-{
- if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED)
- return true;
- if (!btrfs_block_group_should_use_size_class(bg))
- return true;
- if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS)
- return true;
- if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS &&
- bg->size_class == BTRFS_BG_SZ_NONE)
- return true;
- return ffe_ctl->size_class == bg->size_class;
-}
-
static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info,
@@ -4267,7 +4285,8 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
}
static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
- struct find_free_extent_ctl *ffe_ctl)
+ struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_space_info *space_info)
{
if (ffe_ctl->for_treelog) {
spin_lock(&fs_info->treelog_bg_lock);
@@ -4285,12 +4304,13 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
spin_lock(&fs_info->zone_active_bgs_lock);
list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
/*
- * No lock is OK here because avail is monotinically
+ * No lock is OK here because avail is monotonically
* decreasing, and this is just a hint.
*/
u64 avail = block_group->zone_capacity - block_group->alloc_offset;
if (block_group_bits(block_group, ffe_ctl->flags) &&
+ block_group->space_info == space_info &&
avail >= ffe_ctl->num_bytes) {
ffe_ctl->hint_byte = block_group->start;
break;
@@ -4312,7 +4332,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
return prepare_allocation_clustered(fs_info, ffe_ctl,
space_info, ins);
case BTRFS_EXTENT_ALLOC_ZONED:
- return prepare_allocation_zoned(fs_info, ffe_ctl);
+ return prepare_allocation_zoned(fs_info, ffe_ctl, space_info);
default:
BUG();
}
@@ -4380,11 +4400,22 @@ static noinline int find_free_extent(struct btrfs_root *root,
ins->objectid = 0;
ins->offset = 0;
- trace_find_free_extent(root, ffe_ctl);
+ trace_btrfs_find_free_extent(root, ffe_ctl);
space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags);
+ if (btrfs_is_zoned(fs_info) && space_info) {
+ /* Use dedicated sub-space_info for dedicated block group users. */
+ if (ffe_ctl->for_data_reloc) {
+ space_info = space_info->sub_group[0];
+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC);
+ } else if (ffe_ctl->for_treelog) {
+ space_info = space_info->sub_group[0];
+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_TREELOG);
+ }
+ }
if (!space_info) {
- btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags);
+ btrfs_err(fs_info, "no space info for %llu, tree-log %d, relocation %d",
+ ffe_ctl->flags, ffe_ctl->for_treelog, ffe_ctl->for_data_reloc);
return -ENOSPC;
}
@@ -4406,6 +4437,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
* picked out then we don't care that the block group is cached.
*/
if (block_group && block_group_bits(block_group, ffe_ctl->flags) &&
+ block_group->space_info == space_info &&
block_group->cached != BTRFS_CACHE_NO) {
down_read(&space_info->groups_sem);
if (list_empty(&block_group->list) ||
@@ -4431,7 +4463,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
}
}
search:
- trace_find_free_extent_search_loop(root, ffe_ctl);
+ trace_btrfs_find_free_extent_search_loop(root, ffe_ctl);
ffe_ctl->have_caching_bg = false;
if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) ||
ffe_ctl->index == 0)
@@ -4483,7 +4515,7 @@ search:
}
have_block_group:
- trace_find_free_extent_have_block_group(root, ffe_ctl, block_group);
+ trace_btrfs_find_free_extent_have_block_group(root, ffe_ctl, block_group);
ffe_ctl->cached = btrfs_block_group_done(block_group);
if (unlikely(!ffe_ctl->cached)) {
ffe_ctl->have_caching_bg = true;
@@ -4576,7 +4608,8 @@ loop:
}
up_read(&space_info->groups_sem);
- ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search);
+ ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, space_info,
+ full_search);
if (ret > 0)
goto search;
@@ -4645,7 +4678,7 @@ loop:
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
- struct btrfs_key *ins, int is_data, int delalloc)
+ struct btrfs_key *ins, bool is_data, bool delalloc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct find_free_extent_ctl ffe_ctl = {};
@@ -4690,16 +4723,15 @@ again:
"allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d",
flags, num_bytes, for_treelog, for_data_reloc);
if (sinfo)
- btrfs_dump_space_info(fs_info, sinfo,
- num_bytes, 1);
+ btrfs_dump_space_info(sinfo, num_bytes, 1);
}
}
return ret;
}
-int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 len, int delalloc)
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len,
+ bool is_delalloc)
{
struct btrfs_block_group *cache;
@@ -4711,7 +4743,7 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
}
btrfs_add_free_space(cache, start, len);
- btrfs_free_reserved_bytes(cache, len, delalloc);
+ btrfs_free_reserved_bytes(cache, len, is_delalloc);
trace_btrfs_reserved_extent_free(fs_info, start, len);
btrfs_put_block_group(cache);
@@ -4731,7 +4763,7 @@ int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
return -ENOSPC;
}
- ret = pin_down_extent(trans, cache, eb->start, eb->len, 1);
+ ret = pin_down_extent(trans, cache, eb->start, eb->len, true);
btrfs_put_block_group(cache);
return ret;
}
@@ -4742,7 +4774,7 @@ static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr,
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- ret = remove_from_free_space_tree(trans, bytenr, num_bytes);
+ ret = btrfs_remove_from_free_space_tree(trans, bytenr, num_bytes);
if (ret)
return ret;
@@ -4827,14 +4859,13 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
}
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_free_path(path);
return alloc_reserved_extent(trans, ins->objectid, ins->offset);
}
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -4902,7 +4933,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
}
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_free_path(path);
return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
@@ -4923,7 +4953,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
ASSERT(generic_ref.ref_root != BTRFS_TREE_LOG_OBJECTID);
- if (btrfs_is_data_reloc_root(root) && is_fstree(root->relocation_src_root))
+ if (btrfs_is_data_reloc_root(root) && btrfs_is_fstree(root->relocation_src_root))
generic_ref.owning_root = root->relocation_src_root;
btrfs_init_data_ref(&generic_ref, owner, offset, 0, false);
@@ -4945,7 +4975,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
int ret;
struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
- struct btrfs_squota_delta delta = {
+ const struct btrfs_squota_delta delta = {
.root = root_objectid,
.num_bytes = ins->offset,
.generation = trans->transid,
@@ -4979,7 +5009,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
offset, ins, 1, root_objectid);
if (ret)
- btrfs_pin_extent(trans, ins->objectid, ins->offset, 1);
+ btrfs_pin_extent(trans, ins->objectid, ins->offset);
ret = btrfs_record_squota_delta(fs_info, &delta);
btrfs_put_block_group(block_group);
return ret;
@@ -5020,7 +5050,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (IS_ERR(buf))
return buf;
- if (check_eb_lock_owner(buf)) {
+ if (unlikely(check_eb_lock_owner(buf))) {
free_extent_buffer(buf);
return ERR_PTR(-EUCLEAN);
}
@@ -5071,17 +5101,17 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* EXTENT bit to differentiate dirty pages.
*/
if (buf->log_index == 0)
- set_extent_bit(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1,
- EXTENT_DIRTY, NULL);
+ btrfs_set_extent_bit(&root->dirty_log_pages, buf->start,
+ buf->start + buf->len - 1,
+ EXTENT_DIRTY_LOG1, NULL);
else
- set_extent_bit(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1,
- EXTENT_NEW, NULL);
+ btrfs_set_extent_bit(&root->dirty_log_pages, buf->start,
+ buf->start + buf->len - 1,
+ EXTENT_DIRTY_LOG2, NULL);
} else {
buf->log_index = -1;
- set_extent_bit(&trans->transaction->dirty_pages, buf->start,
- buf->start + buf->len - 1, EXTENT_DIRTY, NULL);
+ btrfs_set_extent_bit(&trans->transaction->dirty_pages, buf->start,
+ buf->start + buf->len - 1, EXTENT_DIRTY, NULL);
}
/* this returns a buffer locked for blocking */
return buf;
@@ -5125,7 +5155,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
return ERR_CAST(block_rsv);
ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
- empty_size, hint, &ins, 0, 0);
+ empty_size, hint, &ins, false, false);
if (ret)
goto out_unuse;
@@ -5187,7 +5217,7 @@ out_free_buf:
btrfs_tree_unlock(buf);
free_extent_buffer(buf);
out_free_reserved:
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, false);
out_unuse:
btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize);
return ERR_PTR(ret);
@@ -5285,7 +5315,7 @@ static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *
* reference to it.
*/
generation = btrfs_node_ptr_generation(eb, slot);
- if (!wc->update_ref || generation <= root->root_key.offset)
+ if (!wc->update_ref || generation <= btrfs_root_origin_generation(root))
return false;
/*
@@ -5340,7 +5370,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
goto reada;
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset)
+ generation <= btrfs_root_origin_generation(root))
continue;
/* We don't lock the tree block, it's OK to be racy here */
@@ -5429,17 +5459,17 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
if (!(wc->flags[level] & flag)) {
ASSERT(path->locks[level]);
ret = btrfs_inc_ref(trans, root, eb, 1);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
ret = btrfs_dec_ref(trans, root, eb, 0);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
ret = btrfs_set_disk_extent_flags(trans, eb, flag);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -5467,7 +5497,7 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
{
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *head;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_extent_inline_ref *iref;
int ret;
bool exists = false;
@@ -5484,7 +5514,6 @@ again:
* If we get 0 then we found our reference, return 1, else
* return the error if it's not -ENOENT;
*/
- btrfs_free_path(path);
return (ret < 0 ) ? ret : 1;
}
@@ -5515,11 +5544,10 @@ again:
goto again;
}
- exists = btrfs_find_delayed_tree_ref(head, root->root_key.objectid, parent);
+ exists = btrfs_find_delayed_tree_ref(head, btrfs_root_id(root), parent);
mutex_unlock(&head->mutex);
out:
spin_unlock(&delayed_refs->lock);
- btrfs_free_path(path);
return exists ? 1 : 0;
}
@@ -5543,7 +5571,7 @@ static int check_next_block_uptodate(struct btrfs_trans_handle *trans,
generation = btrfs_node_ptr_generation(path->nodes[level], path->slots[level]);
- if (btrfs_buffer_uptodate(next, generation, 0))
+ if (btrfs_buffer_uptodate(next, generation, false))
return 0;
check.level = level - 1;
@@ -5572,7 +5600,7 @@ static int check_next_block_uptodate(struct btrfs_trans_handle *trans,
* If we are UPDATE_BACKREF then we will not, we need to update our backrefs.
*
* If we are DROP_REFERENCE this will figure out if we need to drop our current
- * reference, skipping it if we dropped it from a previous incompleted drop, or
+ * reference, skipping it if we dropped it from a previous uncompleted drop, or
* dropping it if we still have a reference to it.
*/
static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -5597,7 +5625,7 @@ static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_r
ref.parent = path->nodes[level]->start;
} else {
ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level]));
- if (btrfs_root_id(root) != btrfs_header_owner(path->nodes[level])) {
+ if (unlikely(btrfs_root_id(root) != btrfs_header_owner(path->nodes[level]))) {
btrfs_err(root->fs_info, "mismatched block owner");
return -EIO;
}
@@ -5683,7 +5711,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
* for the subtree
*/
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset) {
+ generation <= btrfs_root_origin_generation(root)) {
wc->lookup_info = 1;
return 1;
}
@@ -5719,7 +5747,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
/*
* We have to walk down into this node, and if we're currently at the
- * DROP_REFERNCE stage and this block is shared then we need to switch
+ * DROP_REFERENCE stage and this block is shared then we need to switch
* to the UPDATE_BACKREF stage in order to convert to FULL_BACKREF.
*/
if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) {
@@ -5733,7 +5761,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
level--;
ASSERT(level == btrfs_header_level(next));
- if (level != btrfs_header_level(next)) {
+ if (unlikely(level != btrfs_header_level(next))) {
btrfs_err(root->fs_info, "mismatched level");
ret = -EIO;
goto out_unlock;
@@ -5836,15 +5864,20 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (wc->refs[level] == 1) {
if (level == 0) {
- if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
ret = btrfs_dec_ref(trans, root, eb, 1);
- else
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ } else {
ret = btrfs_dec_ref(trans, root, eb, 0);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- return ret;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
}
- if (is_fstree(btrfs_root_id(root))) {
+ if (btrfs_is_fstree(btrfs_root_id(root))) {
ret = btrfs_qgroup_trace_leaf_items(trans, eb);
if (ret) {
btrfs_err_rl(fs_info,
@@ -5864,13 +5897,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (eb == root->node) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = eb->start;
- else if (btrfs_root_id(root) != btrfs_header_owner(eb))
+ else if (unlikely(btrfs_root_id(root) != btrfs_header_owner(eb)))
goto owner_mismatch;
} else {
if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = path->nodes[level + 1]->start;
- else if (btrfs_root_id(root) !=
- btrfs_header_owner(path->nodes[level + 1]))
+ else if (unlikely(btrfs_root_id(root) !=
+ btrfs_header_owner(path->nodes[level + 1])))
goto owner_mismatch;
}
@@ -6005,9 +6038,9 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
* also make sure backrefs for the shared block and all lower level
* blocks are properly updated.
*
- * If called with for_reloc == 0, may exit early with -EAGAIN
+ * If called with for_reloc set, may exit early with -EAGAIN
*/
-int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
+int btrfs_drop_snapshot(struct btrfs_root *root, bool update_ref, bool for_reloc)
{
const bool is_reloc_root = (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID);
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -6015,7 +6048,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root_item *root_item = &root->root_item;
- struct walk_control *wc;
+ struct walk_control AUTO_KFREE(wc);
struct btrfs_key key;
const u64 rootid = btrfs_root_id(root);
int ret = 0;
@@ -6033,9 +6066,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
wc = kzalloc(sizeof(*wc), GFP_NOFS);
if (!wc) {
- btrfs_free_path(path);
ret = -ENOMEM;
- goto out;
+ goto out_free;
}
/*
@@ -6134,13 +6166,13 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
while (1) {
ret = walk_down_tree(trans, root, path, wc);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
break;
}
ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -6167,7 +6199,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
root_item);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -6203,7 +6235,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
goto out_end_trans;
ret = btrfs_del_root(trans, &root->root_key);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -6211,7 +6243,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
if (!is_reloc_root) {
ret = btrfs_find_root(tree_root, &root->root_key, path,
NULL, NULL);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
} else if (ret > 0) {
@@ -6245,7 +6277,6 @@ out_end_trans:
btrfs_end_transaction_throttle(trans);
out_free:
- kfree(wc);
btrfs_free_path(path);
out:
if (!ret && root_dropped) {
@@ -6287,8 +6318,8 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *parent)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
- struct walk_control *wc;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct walk_control AUTO_KFREE(wc);
int level;
int parent_level;
int ret = 0;
@@ -6300,14 +6331,12 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS);
- if (!wc) {
- btrfs_free_path(path);
+ if (!wc)
return -ENOMEM;
- }
btrfs_assert_tree_write_locked(parent);
parent_level = btrfs_header_level(parent);
- atomic_inc(&parent->refs);
+ refcount_inc(&parent->refs);
path->nodes[parent_level] = parent;
path->slots[parent_level] = btrfs_header_nritems(parent);
@@ -6329,19 +6358,17 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
while (1) {
ret = walk_down_tree(trans, root, path, wc);
if (ret < 0)
- break;
+ return ret;
ret = walk_up_tree(trans, root, path, wc, parent_level);
if (ret) {
- if (ret > 0)
- ret = 0;
+ if (ret < 0)
+ return ret;
break;
}
}
- kfree(wc);
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -6402,14 +6429,14 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
if (ret)
break;
- find_first_clear_extent_bit(&device->alloc_state, start,
- &start, &end,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_find_first_clear_extent_bit(&device->alloc_state, start,
+ &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
/* Check if there are any CHUNK_* bits left */
if (start > device->total_bytes) {
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
- btrfs_warn_in_rcu(fs_info,
+ DEBUG_WARN();
+ btrfs_warn(fs_info,
"ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
start, end - start + 1,
btrfs_dev_name(device),
@@ -6441,8 +6468,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
ret = btrfs_issue_discard(device->bdev, start, len,
&bytes);
if (!ret)
- set_extent_bit(&device->alloc_state, start,
- start + bytes - 1, CHUNK_TRIMMED, NULL);
+ btrfs_set_extent_bit(&device->alloc_state, start,
+ start + bytes - 1, CHUNK_TRIMMED, NULL);
mutex_unlock(&fs_info->chunk_mutex);
if (ret)
diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h
index 2ad51130c037..71bb8109c969 100644
--- a/fs/btrfs/extent-tree.h
+++ b/fs/btrfs/extent-tree.h
@@ -4,7 +4,6 @@
#define BTRFS_EXTENT_TREE_H
#include <linux/types.h>
-#include "misc.h"
#include "block-group.h"
#include "locking.h"
@@ -31,7 +30,6 @@ struct find_free_extent_ctl {
u64 min_alloc_size;
u64 empty_size;
u64 flags;
- int delalloc;
/* Where to start the search inside the bg */
u64 search_start;
@@ -41,6 +39,7 @@ struct find_free_extent_ctl {
struct btrfs_free_cluster *last_ptr;
bool use_cluster;
+ bool delalloc;
bool have_caching_bg;
bool orig_have_caching_bg;
@@ -50,6 +49,16 @@ struct find_free_extent_ctl {
/* Allocation is called for data relocation */
bool for_data_reloc;
+ /*
+ * Set to true if we're retrying the allocation on this block group
+ * after waiting for caching progress, this is so that we retry only
+ * once before moving on to another block group.
+ */
+ bool retry_uncached;
+
+ /* Whether or not the allocator is currently following a hint. */
+ bool hinted;
+
/* RAID index, converted from flags */
int index;
@@ -58,13 +67,6 @@ struct find_free_extent_ctl {
*/
int loop;
- /*
- * Set to true if we're retrying the allocation on this block group
- * after waiting for caching progress, this is so that we retry only
- * once before moving on to another block group.
- */
- bool retry_uncached;
-
/* If current block group is cached */
int cached;
@@ -83,9 +85,6 @@ struct find_free_extent_ctl {
/* Allocation policy */
enum btrfs_extent_allocation_policy policy;
- /* Whether or not the allocator is currently following a hint */
- bool hinted;
-
/* Size class of block groups to prefer in early loops */
enum btrfs_block_group_size_class size_class;
};
@@ -98,7 +97,7 @@ enum btrfs_inline_ref_type {
};
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
- struct btrfs_extent_inline_ref *iref,
+ const struct btrfs_extent_inline_ref *iref,
enum btrfs_inline_ref_type is_data);
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
@@ -111,13 +110,11 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 offset, int metadata, u64 *refs, u64 *flags,
u64 *owner_root);
-int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num,
- int reserved);
+int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num);
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
const struct extent_buffer *eb);
int btrfs_exclude_logged_extents(struct extent_buffer *eb);
-int btrfs_cross_ref_exist(struct btrfs_root *root,
- u64 objectid, u64 offset, u64 bytenr, bool strict,
+int btrfs_cross_ref_exist(struct btrfs_inode *inode, u64 offset, u64 bytenr,
struct btrfs_path *path);
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -140,28 +137,31 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
u64 min_alloc_size, u64 empty_size, u64 hint_byte,
- struct btrfs_key *ins, int is_data, int delalloc);
+ struct btrfs_key *ins, bool is_data, bool delalloc);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref);
+ struct extent_buffer *buf, bool full_backref);
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref);
+ struct extent_buffer *buf, bool full_backref);
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, u64 flags);
int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref);
u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf, int slot);
-int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 len, int delalloc);
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len,
+ bool is_delalloc);
int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
const struct extent_buffer *eb);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref);
-int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
- int for_reloc);
+int btrfs_drop_snapshot(struct btrfs_root *root, bool update_ref, bool for_reloc);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
struct extent_buffer *parent);
+void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
+ u64 num_bytes, u64 *actual_bytes);
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
#endif
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b923d0cec61c..629fd5af4286 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -75,9 +75,9 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
while (!list_empty(&fs_info->allocated_ebs)) {
eb = list_first_entry(&fs_info->allocated_ebs,
struct extent_buffer, leak_list);
- pr_err(
- "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
- eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
+ btrfs_err(fs_info,
+ "buffer leak start %llu len %u refs %d bflags %lu owner %llu",
+ eb->start, eb->len, refcount_read(&eb->refs), eb->bflags,
btrfs_header_owner(eb));
list_del(&eb->leak_list);
WARN_ON_ONCE(1);
@@ -96,9 +96,31 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
*/
struct btrfs_bio_ctrl {
struct btrfs_bio *bbio;
+ /* Last byte contained in bbio + 1 . */
+ loff_t next_file_offset;
enum btrfs_compression_type compress_type;
u32 len_to_oe_boundary;
blk_opf_t opf;
+ /*
+ * For data read bios, we attempt to optimize csum lookups if the extent
+ * generation is older than the current one. To make this possible, we
+ * need to track the maximum generation of an extent in a bio_ctrl to
+ * make the decision when submitting the bio.
+ *
+ * The pattern between do_readpage(), submit_one_bio() and
+ * submit_extent_folio() is quite subtle, so tracking this is tricky.
+ *
+ * As we process extent E, we might submit a bio with existing built up
+ * extents before adding E to a new bio, or we might just add E to the
+ * bio. As a result, E's generation could apply to the current bio or
+ * to the next one, so we need to be careful to update the bio_ctrl's
+ * generation with E's only when we are sure E is added to bio_ctrl->bbio
+ * in submit_extent_folio().
+ *
+ * See the comment in btrfs_lookup_bio_sums() for more detail on the
+ * need for this optimization.
+ */
+ u64 generation;
btrfs_bio_end_io_t end_io_func;
struct writeback_control *wbc;
@@ -108,8 +130,47 @@ struct btrfs_bio_ctrl {
* This is to avoid touching ranges covered by compression/inline.
*/
unsigned long submit_bitmap;
+ struct readahead_control *ractl;
+
+ /*
+ * The start offset of the last used extent map by a read operation.
+ *
+ * This is for proper compressed read merge.
+ * U64_MAX means we are starting the read and have made no progress yet.
+ *
+ * The current btrfs_bio_is_contig() only uses disk_bytenr as
+ * the condition to check if the read can be merged with previous
+ * bio, which is not correct. E.g. two file extents pointing to the
+ * same extent but with different offset.
+ *
+ * So here we need to do extra checks to only merge reads that are
+ * covered by the same extent map.
+ * Just extent_map::start will be enough, as they are unique
+ * inside the same inode.
+ */
+ u64 last_em_start;
};
+/*
+ * Helper to set the csum search commit root option for a bio_ctrl's bbio
+ * before submitting the bio.
+ *
+ * Only for use by submit_one_bio().
+ */
+static void bio_set_csum_search_commit_root(struct btrfs_bio_ctrl *bio_ctrl)
+{
+ struct btrfs_bio *bbio = bio_ctrl->bbio;
+
+ ASSERT(bbio);
+
+ if (!(btrfs_op(&bbio->bio) == BTRFS_MAP_READ && is_data_inode(bbio->inode)))
+ return;
+
+ bio_ctrl->bbio->csum_search_commit_root =
+ (bio_ctrl->generation &&
+ bio_ctrl->generation < btrfs_get_fs_generation(bbio->inode->root->fs_info));
+}
+
static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_bio *bbio = bio_ctrl->bbio;
@@ -120,6 +181,8 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
/* Caller should ensure the bio has at least some range added */
ASSERT(bbio->bio.bi_iter.bi_size);
+ bio_set_csum_search_commit_root(bio_ctrl);
+
if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
btrfs_submit_compressed_read(bbio);
@@ -128,6 +191,12 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
/* The bbio is owned by the end_io handler now */
bio_ctrl->bbio = NULL;
+ /*
+ * We used the generation to decide whether to lookup csums in the
+ * commit_root or not when we called bio_set_csum_search_commit_root()
+ * above. Now, reset the generation for the next bio.
+ */
+ bio_ctrl->generation = 0;
}
/*
@@ -198,9 +267,8 @@ static void __process_folios_contig(struct address_space *mapping,
u64 end, unsigned long page_ops)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
- pgoff_t start_index = start >> PAGE_SHIFT;
+ pgoff_t index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
- pgoff_t index = start_index;
struct folio_batch fbatch;
int i;
@@ -221,36 +289,27 @@ static void __process_folios_contig(struct address_space *mapping,
}
}
-static noinline void __unlock_for_delalloc(const struct inode *inode,
- const struct folio *locked_folio,
+static noinline void unlock_delalloc_folio(const struct inode *inode,
+ struct folio *locked_folio,
u64 start, u64 end)
{
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
-
ASSERT(locked_folio);
- if (index == locked_folio->index && end_index == index)
- return;
__process_folios_contig(inode->i_mapping, locked_folio, start, end,
PAGE_UNLOCK);
}
static noinline int lock_delalloc_folios(struct inode *inode,
- const struct folio *locked_folio,
+ struct folio *locked_folio,
u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct address_space *mapping = inode->i_mapping;
- pgoff_t start_index = start >> PAGE_SHIFT;
+ pgoff_t index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
- pgoff_t index = start_index;
u64 processed_end = start;
struct folio_batch fbatch;
- if (index == locked_folio->index && index == end_index)
- return 0;
-
folio_batch_init(&fbatch);
while (index <= end_index) {
unsigned int found_folios, i;
@@ -274,8 +333,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,
goto out;
}
range_start = max_t(u64, folio_pos(folio), start);
- range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
- end + 1) - range_start;
+ range_len = min_t(u64, folio_next_pos(folio), end + 1) - range_start;
btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
processed_end = range_start + range_len - 1;
@@ -288,8 +346,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,
out:
folio_batch_release(&fbatch);
if (processed_end > start)
- __unlock_for_delalloc(inode, locked_folio, start,
- processed_end);
+ unlock_delalloc_folio(inode, locked_folio, start, processed_end);
return -EAGAIN;
}
@@ -317,8 +374,7 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
const u64 orig_start = *start;
const u64 orig_end = *end;
- /* The sanity tests may not set a valid fs_info. */
- u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
+ u64 max_bytes = fs_info->max_extent_size;
u64 delalloc_start;
u64 delalloc_end;
bool found;
@@ -330,12 +386,19 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
ASSERT(orig_end > orig_start);
/* The range should at least cover part of the folio */
- ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
+ ASSERT(!(orig_start >= folio_next_pos(locked_folio) ||
orig_end <= folio_pos(locked_folio)));
again:
/* step one, find a bunch of delalloc bytes starting at start */
delalloc_start = *start;
delalloc_end = 0;
+
+ /*
+ * If @max_bytes is smaller than a block, btrfs_find_delalloc_range() can
+ * return early without handling any dirty ranges.
+ */
+ ASSERT(max_bytes >= fs_info->sectorsize);
+
found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
max_bytes, &cached_state);
if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
@@ -343,7 +406,7 @@ again:
/* @delalloc_end can be -1, never go beyond @orig_end */
*end = min(delalloc_end, orig_end);
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
return false;
}
@@ -361,18 +424,19 @@ again:
if (delalloc_end + 1 - delalloc_start > max_bytes)
delalloc_end = delalloc_start + max_bytes - 1;
- /* step two, lock all the folioss after the folios that has start */
+ /* step two, lock all the folios after the folios that has start */
ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
delalloc_end);
ASSERT(!ret || ret == -EAGAIN);
if (ret == -EAGAIN) {
- /* some of the folios are gone, lets avoid looping by
- * shortening the size of the delalloc range we're searching
+ /*
+ * Some of the folios are gone, lets avoid looping by
+ * shortening the size of the delalloc range we're searching.
*/
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
cached_state = NULL;
if (!loops) {
- max_bytes = PAGE_SIZE;
+ max_bytes = fs_info->sectorsize;
loops = 1;
goto again;
} else {
@@ -382,15 +446,15 @@ again:
}
/* step three, lock the state bits for the whole range */
- lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
+ btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
/* then test to make sure it is all still delalloc */
- ret = test_range_bit(tree, delalloc_start, delalloc_end,
- EXTENT_DELALLOC, cached_state);
+ ret = btrfs_test_range_bit(tree, delalloc_start, delalloc_end,
+ EXTENT_DELALLOC, cached_state);
- unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
+ btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
if (!ret) {
- __unlock_for_delalloc(inode, locked_folio, delalloc_start,
+ unlock_delalloc_folio(inode, locked_folio, delalloc_start,
delalloc_end);
cond_resched();
goto again;
@@ -406,7 +470,7 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct extent_state **cached,
u32 clear_bits, unsigned long page_ops)
{
- clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
+ btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
__process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
end, page_ops);
@@ -428,14 +492,14 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
ASSERT(folio_pos(folio) <= start &&
- start + len <= folio_pos(folio) + PAGE_SIZE);
+ start + len <= folio_next_pos(folio));
if (uptodate && btrfs_verify_folio(folio, start, len))
btrfs_folio_set_uptodate(fs_info, folio, start, len);
else
btrfs_folio_clear_uptodate(fs_info, folio, start, len);
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_is_subpage(fs_info, folio))
folio_unlock(folio);
else
btrfs_folio_end_lock(fs_info, folio, start, len);
@@ -453,7 +517,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
*/
static void end_bbio_data_write(struct btrfs_bio *bbio)
{
- struct btrfs_fs_info *fs_info = bbio->fs_info;
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
struct bio *bio = &bbio->bio;
int error = blk_status_to_errno(bio->bi_status);
struct folio_iter fi;
@@ -465,9 +529,6 @@ static void end_bbio_data_write(struct btrfs_bio *bbio)
u64 start = folio_pos(folio) + fi.offset;
u32 len = fi.length;
- /* Only order 0 (single page) folios are allowed for data. */
- ASSERT(folio_order(folio) == 0);
-
/* Our read/write should always be sector aligned. */
if (!IS_ALIGNED(fi.offset, sectorsize))
btrfs_err(fs_info,
@@ -491,11 +552,11 @@ static void end_bbio_data_write(struct btrfs_bio *bbio)
static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
{
ASSERT(folio_test_locked(folio));
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_is_subpage(fs_info, folio))
return;
ASSERT(folio_test_private(folio));
- btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
+ btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), folio_size(folio));
}
/*
@@ -512,50 +573,26 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
*/
static void end_bbio_data_read(struct btrfs_bio *bbio)
{
- struct btrfs_fs_info *fs_info = bbio->fs_info;
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
struct bio *bio = &bbio->bio;
struct folio_iter fi;
- const u32 sectorsize = fs_info->sectorsize;
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_folio_all(fi, &bbio->bio) {
bool uptodate = !bio->bi_status;
struct folio *folio = fi.folio;
struct inode *inode = folio->mapping->host;
- u64 start;
- u64 end;
- u32 len;
+ u64 start = folio_pos(folio) + fi.offset;
- /* For now only order 0 folios are supported for data. */
- ASSERT(folio_order(folio) == 0);
btrfs_debug(fs_info,
"%s: bi_sector=%llu, err=%d, mirror=%u",
__func__, bio->bi_iter.bi_sector, bio->bi_status,
bbio->mirror_num);
- /*
- * We always issue full-sector reads, but if some block in a
- * folio fails to read, blk_update_request() will advance
- * bv_offset and adjust bv_len to compensate. Print a warning
- * for unaligned offsets, and an error if they don't add up to
- * a full sector.
- */
- if (!IS_ALIGNED(fi.offset, sectorsize))
- btrfs_err(fs_info,
- "partial page read in btrfs with offset %zu and length %zu",
- fi.offset, fi.length);
- else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
- btrfs_info(fs_info,
- "incomplete page read with offset %zu and length %zu",
- fi.offset, fi.length);
-
- start = folio_pos(folio) + fi.offset;
- end = start + fi.length - 1;
- len = fi.length;
if (likely(uptodate)) {
+ u64 end = start + fi.length - 1;
loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> folio_shift(folio);
/*
* Zero out the remaining part if this range straddles
@@ -564,9 +601,11 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
* Here we should only zero the range inside the folio,
* not touch anything else.
*
- * NOTE: i_size is exclusive while end is inclusive.
+ * NOTE: i_size is exclusive while end is inclusive and
+ * folio_contains() takes PAGE_SIZE units.
*/
- if (folio_index(folio) == end_index && i_size <= end) {
+ if (folio_contains(folio, i_size >> PAGE_SHIFT) &&
+ i_size <= end) {
u32 zero_start = max(offset_in_folio(folio, i_size),
offset_in_folio(folio, start));
u32 zero_len = offset_in_folio(folio, end) + 1 -
@@ -577,7 +616,7 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
}
/* Update page status and unlock. */
- end_folio_read(folio, uptodate, start, len);
+ end_folio_read(folio, uptodate, start, fi.length);
}
bio_put(bio);
}
@@ -586,6 +625,7 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
* Populate every free slot in a provided array with folios using GFP_NOFS.
*
* @nr_folios: number of folios to allocate
+ * @order: the order of the folios to be allocated
* @folio_array: the array to fill with folios; any existing non-NULL entries in
* the array will be skipped
*
@@ -593,12 +633,13 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
* -ENOMEM otherwise, the partially allocated folios would be freed and
* the array slots zeroed
*/
-int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
+int btrfs_alloc_folio_array(unsigned int nr_folios, unsigned int order,
+ struct folio **folio_array)
{
for (int i = 0; i < nr_folios; i++) {
if (folio_array[i])
continue;
- folio_array[i] = folio_alloc(GFP_NOFS, 0);
+ folio_array[i] = folio_alloc(GFP_NOFS, order);
if (!folio_array[i])
goto error;
}
@@ -607,6 +648,7 @@ error:
for (int i = 0; i < nr_folios; i++) {
if (folio_array[i])
folio_put(folio_array[i]);
+ folio_array[i] = NULL;
}
return -ENOMEM;
}
@@ -632,7 +674,7 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
for (allocated = 0; allocated < nr_pages;) {
unsigned int last = allocated;
- allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
+ allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
if (unlikely(allocated == last)) {
/* No progress, fail and do cleanup. */
for (int i = 0; i < allocated; i++) {
@@ -668,13 +710,10 @@ static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
}
static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
- struct folio *folio, u64 disk_bytenr,
- unsigned int pg_offset)
+ u64 disk_bytenr, loff_t file_offset)
{
struct bio *bio = &bio_ctrl->bbio->bio;
- struct bio_vec *bvec = bio_last_bvec_all(bio);
const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
- struct folio *bv_folio = page_folio(bvec->bv_page);
if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
/*
@@ -685,19 +724,11 @@ static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
}
/*
- * The contig check requires the following conditions to be met:
- *
- * 1) The folios are belonging to the same inode
- * This is implied by the call chain.
- *
- * 2) The range has adjacent logical bytenr
- *
- * 3) The range has adjacent file offset
- * This is required for the usage of btrfs_bio->file_offset.
+ * To merge into a bio both the disk sector and the logical offset in
+ * the file need to be contiguous.
*/
- return bio_end_sector(bio) == sector &&
- folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
- folio_pos(folio) + pg_offset;
+ return bio_ctrl->next_file_offset == file_offset &&
+ bio_end_sector(bio) == sector;
}
static void alloc_new_bio(struct btrfs_inode *inode,
@@ -707,13 +738,13 @@ static void alloc_new_bio(struct btrfs_inode *inode,
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_bio *bbio;
- bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
- bio_ctrl->end_io_func, NULL);
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, inode,
+ file_offset, bio_ctrl->end_io_func, NULL);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
- bbio->inode = inode;
- bbio->file_offset = file_offset;
+ bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;
bio_ctrl->bbio = bbio;
bio_ctrl->len_to_oe_boundary = U32_MAX;
+ bio_ctrl->next_file_offset = file_offset;
/* Limit data write bios to the ordered boundary. */
if (bio_ctrl->wbc) {
@@ -744,33 +775,35 @@ static void alloc_new_bio(struct btrfs_inode *inode,
* @size: portion of page that we want to write to
* @pg_offset: offset of the new bio or to check whether we are adding
* a contiguous page to the previous one
+ * @read_em_generation: generation of the extent_map we are submitting
+ * (only used for read)
*
* The will either add the page into the existing @bio_ctrl->bbio, or allocate a
* new one in @bio_ctrl->bbio.
- * The mirror number for this IO should already be initizlied in
+ * The mirror number for this IO should already be initialized in
* @bio_ctrl->mirror_num.
*/
static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
u64 disk_bytenr, struct folio *folio,
- size_t size, unsigned long pg_offset)
+ size_t size, unsigned long pg_offset,
+ u64 read_em_generation)
{
struct btrfs_inode *inode = folio_to_inode(folio);
+ loff_t file_offset = folio_pos(folio) + pg_offset;
- ASSERT(pg_offset + size <= PAGE_SIZE);
+ ASSERT(pg_offset + size <= folio_size(folio));
ASSERT(bio_ctrl->end_io_func);
if (bio_ctrl->bbio &&
- !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
+ !btrfs_bio_is_contig(bio_ctrl, disk_bytenr, file_offset))
submit_one_bio(bio_ctrl);
do {
u32 len = size;
/* Allocate new bio if needed */
- if (!bio_ctrl->bbio) {
- alloc_new_bio(inode, bio_ctrl, disk_bytenr,
- folio_pos(folio) + pg_offset);
- }
+ if (!bio_ctrl->bbio)
+ alloc_new_bio(inode, bio_ctrl, disk_bytenr, file_offset);
/* Cap to the current ordered extent boundary if there is one. */
if (len > bio_ctrl->len_to_oe_boundary) {
@@ -784,14 +817,20 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
submit_one_bio(bio_ctrl);
continue;
}
+ /*
+ * Now that the folio is definitely added to the bio, include its
+ * generation in the max generation calculation.
+ */
+ bio_ctrl->generation = max(bio_ctrl->generation, read_em_generation);
+ bio_ctrl->next_file_offset += len;
if (bio_ctrl->wbc)
- wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
- len);
+ wbc_account_cgroup_owner(bio_ctrl->wbc, folio, len);
size -= len;
pg_offset += len;
disk_bytenr += len;
+ file_offset += len;
/*
* len_to_oe_boundary defaults to U32_MAX, which isn't folio or
@@ -825,7 +864,7 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
static int attach_extent_buffer_folio(struct extent_buffer *eb,
struct folio *folio,
- struct btrfs_subpage *prealloc)
+ struct btrfs_folio_state *prealloc)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
int ret = 0;
@@ -839,7 +878,7 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb,
if (folio->mapping)
lockdep_assert_held(&folio->mapping->i_private_lock);
- if (fs_info->nodesize >= PAGE_SIZE) {
+ if (!btrfs_meta_is_subpage(fs_info)) {
if (!folio_test_private(folio))
folio_attach_private(folio, eb);
else
@@ -849,7 +888,7 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb,
/* Already mapped, just free prealloc */
if (folio_test_private(folio)) {
- btrfs_free_subpage(prealloc);
+ btrfs_free_folio_state(prealloc);
return 0;
}
@@ -858,15 +897,10 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb,
folio_attach_private(folio, prealloc);
else
/* Do new allocation to attach subpage */
- ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
+ ret = btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
return ret;
}
-int set_page_extent_mapped(struct page *page)
-{
- return set_folio_extent_mapped(page_folio(page));
-}
-
int set_folio_extent_mapped(struct folio *folio)
{
struct btrfs_fs_info *fs_info;
@@ -878,8 +912,8 @@ int set_folio_extent_mapped(struct folio *folio)
fs_info = folio_to_fs_info(folio);
- if (btrfs_is_subpage(fs_info, folio->mapping))
- return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
+ if (btrfs_is_subpage(fs_info, folio))
+ return btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA);
folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
return 0;
@@ -895,44 +929,60 @@ void clear_folio_extent_mapped(struct folio *folio)
return;
fs_info = folio_to_fs_info(folio);
- if (btrfs_is_subpage(fs_info, folio->mapping))
- return btrfs_detach_subpage(fs_info, folio);
+ if (btrfs_is_subpage(fs_info, folio))
+ return btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA);
folio_detach_private(folio);
}
-static struct extent_map *__get_extent_map(struct inode *inode,
- struct folio *folio, u64 start,
- u64 len, struct extent_map **em_cached)
+static struct extent_map *get_extent_map(struct btrfs_inode *inode,
+ struct folio *folio, u64 start,
+ u64 len, struct extent_map **em_cached)
{
struct extent_map *em;
- struct extent_state *cached_state = NULL;
ASSERT(em_cached);
if (*em_cached) {
em = *em_cached;
- if (extent_map_in_tree(em) && start >= em->start &&
- start < extent_map_end(em)) {
+ if (btrfs_extent_map_in_tree(em) && start >= em->start &&
+ start < btrfs_extent_map_end(em)) {
refcount_inc(&em->refs);
return em;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
*em_cached = NULL;
}
- btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state);
- em = btrfs_get_extent(BTRFS_I(inode), folio, start, len);
+ em = btrfs_get_extent(inode, folio, start, len);
if (!IS_ERR(em)) {
BUG_ON(*em_cached);
refcount_inc(&em->refs);
*em_cached = em;
}
- unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state);
return em;
}
+
+static void btrfs_readahead_expand(struct readahead_control *ractl,
+ const struct extent_map *em)
+{
+ const u64 ra_pos = readahead_pos(ractl);
+ const u64 ra_end = ra_pos + readahead_length(ractl);
+ const u64 em_end = em->start + em->len;
+
+ /* No expansion for holes and inline extents. */
+ if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)
+ return;
+
+ ASSERT(em_end >= ra_pos,
+ "extent_map %llu %llu ends before current readahead position %llu",
+ em->start, em->len, ra_pos);
+ if (em_end > ra_end)
+ readahead_expand(ractl, ra_pos, em_end - ra_pos);
+}
+
/*
* basic readpage implementation. Locked extent state structs are inserted
* into the tree that are removed when the IO is done (by the end_io
@@ -941,21 +991,17 @@ static struct extent_map *__get_extent_map(struct inode *inode,
* return 0 on success, otherwise return error
*/
static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
- struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
struct inode *inode = folio->mapping->host;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
u64 start = folio_pos(folio);
- const u64 end = start + PAGE_SIZE - 1;
- u64 cur = start;
+ const u64 end = start + folio_size(folio) - 1;
u64 extent_offset;
u64 last_byte = i_size_read(inode);
- u64 block_start;
struct extent_map *em;
int ret = 0;
- size_t pg_offset = 0;
- size_t iosize;
- size_t blocksize = fs_info->sectorsize;
+ const size_t blocksize = fs_info->sectorsize;
ret = set_folio_extent_mapped(folio);
if (ret < 0) {
@@ -963,49 +1009,63 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
return ret;
}
- if (folio->index == last_byte >> folio_shift(folio)) {
+ if (folio_contains(folio, last_byte >> PAGE_SHIFT)) {
size_t zero_offset = offset_in_folio(folio, last_byte);
- if (zero_offset) {
- iosize = folio_size(folio) - zero_offset;
- folio_zero_range(folio, zero_offset, iosize);
- }
+ if (zero_offset)
+ folio_zero_range(folio, zero_offset,
+ folio_size(folio) - zero_offset);
}
bio_ctrl->end_io_func = end_bbio_data_read;
begin_folio_read(fs_info, folio);
- while (cur <= end) {
+ for (u64 cur = start; cur <= end; cur += blocksize) {
enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
+ unsigned long pg_offset = offset_in_folio(folio, cur);
bool force_bio_submit = false;
u64 disk_bytenr;
+ u64 block_start;
+ u64 em_gen;
ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
if (cur >= last_byte) {
- iosize = folio_size(folio) - pg_offset;
- folio_zero_range(folio, pg_offset, iosize);
- end_folio_read(folio, true, cur, iosize);
+ folio_zero_range(folio, pg_offset, end - cur + 1);
+ end_folio_read(folio, true, cur, end - cur + 1);
break;
}
- em = __get_extent_map(inode, folio, cur, end - cur + 1,
- em_cached);
+ if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
+ end_folio_read(folio, true, cur, blocksize);
+ continue;
+ }
+ em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
if (IS_ERR(em)) {
end_folio_read(folio, false, cur, end + 1 - cur);
return PTR_ERR(em);
}
extent_offset = cur - em->start;
- BUG_ON(extent_map_end(em) <= cur);
+ BUG_ON(btrfs_extent_map_end(em) <= cur);
BUG_ON(end < cur);
- compress_type = extent_map_compression(em);
+ compress_type = btrfs_extent_map_compression(em);
+
+ /*
+ * Only expand readahead for extents which are already creating
+ * the pages anyway in add_ra_bio_pages, which is compressed
+ * extents in the non subpage case.
+ */
+ if (bio_ctrl->ractl &&
+ !btrfs_is_subpage(fs_info, folio) &&
+ compress_type != BTRFS_COMPRESS_NONE)
+ btrfs_readahead_expand(bio_ctrl->ractl, em);
- iosize = min(extent_map_end(em) - cur, end - cur + 1);
- iosize = ALIGN(iosize, blocksize);
if (compress_type != BTRFS_COMPRESS_NONE)
disk_bytenr = em->disk_bytenr;
else
- disk_bytenr = extent_map_block_start(em) + extent_offset;
- block_start = extent_map_block_start(em);
+ disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset;
+
if (em->flags & EXTENT_FLAG_PREALLOC)
block_start = EXTENT_MAP_HOLE;
+ else
+ block_start = btrfs_extent_map_block_start(em);
/*
* If we have a file range that points to a compressed extent
@@ -1042,30 +1102,25 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
* non-optimal behavior (submitting 2 bios for the same extent).
*/
if (compress_type != BTRFS_COMPRESS_NONE &&
- prev_em_start && *prev_em_start != (u64)-1 &&
- *prev_em_start != em->start)
+ bio_ctrl->last_em_start != U64_MAX &&
+ bio_ctrl->last_em_start != em->start)
force_bio_submit = true;
- if (prev_em_start)
- *prev_em_start = em->start;
+ bio_ctrl->last_em_start = em->start;
- free_extent_map(em);
+ em_gen = em->generation;
+ btrfs_free_extent_map(em);
em = NULL;
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
- folio_zero_range(folio, pg_offset, iosize);
-
- end_folio_read(folio, true, cur, iosize);
- cur = cur + iosize;
- pg_offset += iosize;
+ folio_zero_range(folio, pg_offset, blocksize);
+ end_folio_read(folio, true, cur, blocksize);
continue;
}
/* the get_extent function already copied into the folio */
if (block_start == EXTENT_MAP_INLINE) {
- end_folio_read(folio, true, cur, iosize);
- cur = cur + iosize;
- pg_offset += iosize;
+ end_folio_read(folio, true, cur, blocksize);
continue;
}
@@ -1076,23 +1131,208 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
if (force_bio_submit)
submit_one_bio(bio_ctrl);
- submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
- pg_offset);
- cur = cur + iosize;
- pg_offset += iosize;
+ submit_extent_folio(bio_ctrl, disk_bytenr, folio, blocksize,
+ pg_offset, em_gen);
}
-
return 0;
}
+/*
+ * Check if we can skip waiting the @ordered extent covering the block at @fileoff.
+ *
+ * @fileoff: Both input and output.
+ * Input as the file offset where the check should start at.
+ * Output as where the next check should start at,
+ * if the function returns true.
+ *
+ * Return true if we can skip to @fileoff. The caller needs to check the new
+ * @fileoff value to make sure it covers the full range, before skipping the
+ * full OE.
+ *
+ * Return false if we must wait for the ordered extent.
+ */
+static bool can_skip_one_ordered_range(struct btrfs_inode *inode,
+ struct btrfs_ordered_extent *ordered,
+ u64 *fileoff)
+{
+ const struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct folio *folio;
+ const u32 blocksize = fs_info->sectorsize;
+ u64 cur = *fileoff;
+ bool ret;
+
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, cur >> PAGE_SHIFT);
+
+ /*
+ * We should have locked the folio(s) for range [start, end], thus
+ * there must be a folio and it must be locked.
+ */
+ ASSERT(!IS_ERR(folio));
+ ASSERT(folio_test_locked(folio));
+
+ /*
+ * There are several cases for the folio and OE combination:
+ *
+ * 1) Folio has no private flag
+ * The OE has all its IO done but not yet finished, and folio got
+ * invalidated.
+ *
+ * Have we have to wait for the OE to finish, as it may contain the
+ * to-be-inserted data checksum.
+ * Without the data checksum inserted into the csum tree, read will
+ * just fail with missing csum.
+ */
+ if (!folio_test_private(folio)) {
+ ret = false;
+ goto out;
+ }
+
+ /*
+ * 2) The first block is DIRTY.
+ *
+ * This means the OE is created by some other folios whose file pos is
+ * before this one. And since we are holding the folio lock, the writeback
+ * of this folio cannot start.
+ *
+ * We must skip the whole OE, because it will never start until we
+ * finished our folio read and unlocked the folio.
+ */
+ if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) {
+ u64 range_len = umin(folio_next_pos(folio),
+ ordered->file_offset + ordered->num_bytes) - cur;
+
+ ret = true;
+ /*
+ * At least inside the folio, all the remaining blocks should
+ * also be dirty.
+ */
+ ASSERT(btrfs_folio_test_dirty(fs_info, folio, cur, range_len));
+ *fileoff = ordered->file_offset + ordered->num_bytes;
+ goto out;
+ }
+
+ /*
+ * 3) The first block is uptodate.
+ *
+ * At least the first block can be skipped, but we are still not fully
+ * sure. E.g. if the OE has some other folios in the range that cannot
+ * be skipped.
+ * So we return true and update @next_ret to the OE/folio boundary.
+ */
+ if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
+ u64 range_len = umin(folio_next_pos(folio),
+ ordered->file_offset + ordered->num_bytes) - cur;
+
+ /*
+ * The whole range to the OE end or folio boundary should also
+ * be uptodate.
+ */
+ ASSERT(btrfs_folio_test_uptodate(fs_info, folio, cur, range_len));
+ ret = true;
+ *fileoff = cur + range_len;
+ goto out;
+ }
+
+ /*
+ * 4) The first block is not uptodate.
+ *
+ * This means the folio is invalidated after the writeback was finished,
+ * but by some other operations (e.g. block aligned buffered write) the
+ * folio is inserted into filemap.
+ * Very much the same as case 1).
+ */
+ ret = false;
+out:
+ folio_put(folio);
+ return ret;
+}
+
+static bool can_skip_ordered_extent(struct btrfs_inode *inode,
+ struct btrfs_ordered_extent *ordered,
+ u64 start, u64 end)
+{
+ const u64 range_end = min(end, ordered->file_offset + ordered->num_bytes - 1);
+ u64 cur = max(start, ordered->file_offset);
+
+ while (cur < range_end) {
+ bool can_skip;
+
+ can_skip = can_skip_one_ordered_range(inode, ordered, &cur);
+ if (!can_skip)
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Locking helper to make sure we get a stable view of extent maps for the
+ * involved range.
+ *
+ * This is for folio read paths (read and readahead), thus the involved range
+ * should have all the folios locked.
+ */
+static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state)
+{
+ u64 cur_pos;
+
+ /* Caller must provide a valid @cached_state. */
+ ASSERT(cached_state);
+
+ /* The range must at least be page aligned, as all read paths are folio based. */
+ ASSERT(IS_ALIGNED(start, PAGE_SIZE));
+ ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE));
+
+again:
+ btrfs_lock_extent(&inode->io_tree, start, end, cached_state);
+ cur_pos = start;
+ while (cur_pos < end) {
+ struct btrfs_ordered_extent *ordered;
+
+ ordered = btrfs_lookup_ordered_range(inode, cur_pos,
+ end - cur_pos + 1);
+ /*
+ * No ordered extents in the range, and we hold the extent lock,
+ * no one can modify the extent maps in the range, we're safe to return.
+ */
+ if (!ordered)
+ break;
+
+ /* Check if we can skip waiting for the whole OE. */
+ if (can_skip_ordered_extent(inode, ordered, start, end)) {
+ cur_pos = min(ordered->file_offset + ordered->num_bytes,
+ end + 1);
+ btrfs_put_ordered_extent(ordered);
+ continue;
+ }
+
+ /* Now wait for the OE to finish. */
+ btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
+ btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start);
+ btrfs_put_ordered_extent(ordered);
+ /* We have unlocked the whole range, restart from the beginning. */
+ goto again;
+ }
+}
+
int btrfs_read_folio(struct file *file, struct folio *folio)
{
- struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
+ struct btrfs_inode *inode = folio_to_inode(folio);
+ const u64 start = folio_pos(folio);
+ const u64 end = start + folio_size(folio) - 1;
+ struct extent_state *cached_state = NULL;
+ struct btrfs_bio_ctrl bio_ctrl = {
+ .opf = REQ_OP_READ,
+ .last_em_start = U64_MAX,
+ };
struct extent_map *em_cached = NULL;
int ret;
- ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
- free_extent_map(em_cached);
+ lock_extents_for_read(inode, start, end, &cached_state);
+ ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
+
+ btrfs_free_extent_map(em_cached);
/*
* If btrfs_do_readpage() failed we will want to submit the assembled
@@ -1110,7 +1350,7 @@ static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bit
unsigned int start_bit;
unsigned int nbits;
- ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE);
+ ASSERT(start >= folio_start && start + len <= folio_start + folio_size(folio));
start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
nbits = len >> fs_info->sectorsize_bits;
ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
@@ -1123,12 +1363,12 @@ static bool find_next_delalloc_bitmap(struct folio *folio,
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
const u64 folio_start = folio_pos(folio);
- const unsigned int bitmap_size = fs_info->sectors_per_page;
+ const unsigned int bitmap_size = btrfs_blocks_per_folio(fs_info, folio);
unsigned int start_bit;
unsigned int first_zero;
unsigned int first_set;
- ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE);
+ ASSERT(start >= folio_start && start < folio_start + folio_size(folio));
start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
@@ -1142,14 +1382,19 @@ static bool find_next_delalloc_bitmap(struct folio *folio,
}
/*
- * helper for extent_writepage(), doing all of the delayed allocation setup.
+ * Do all of the delayed allocation setup.
*
- * This returns 1 if btrfs_run_delalloc_range function did all the work required
- * to write the page (copy into inline extent). In this case the IO has
- * been started and the page is already unlocked.
+ * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
+ * The @folio should no longer be touched (treat it as already unlocked).
*
- * This returns 0 if all went well (page still locked)
- * This returns < 0 if there were errors (page still locked)
+ * Return 0 if there is still dirty block that needs to be submitted through
+ * extent_writepage_io().
+ * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
+ * submitted, and @folio is still kept locked.
+ *
+ * Return <0 if there is any error hit.
+ * Any allocated ordered extent range covering this folio will be marked
+ * finished (IOERR), and @folio is still kept locked.
*/
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
struct folio *folio,
@@ -1157,9 +1402,10 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
struct writeback_control *wbc = bio_ctrl->wbc;
- const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
+ const bool is_subpage = btrfs_is_subpage(fs_info, folio);
const u64 page_start = folio_pos(folio);
const u64 page_end = page_start + folio_size(folio) - 1;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
unsigned long delalloc_bitmap = 0;
/*
* Save the last found delalloc end. As the delalloc end can go beyond
@@ -1167,6 +1413,16 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
* last delalloc end.
*/
u64 last_delalloc_end = 0;
+ /*
+ * The range end (exclusive) of the last successfully finished delalloc
+ * range.
+ * Any range covered by ordered extent must either be manually marked
+ * finished (error handling), or has IO submitted (and finish the
+ * ordered extent normally).
+ *
+ * This records the end of ordered extent cleanup if we hit an error.
+ */
+ u64 last_finished_delalloc_end = page_start;
u64 delalloc_start = page_start;
u64 delalloc_end = page_end;
u64 delalloc_to_write = 0;
@@ -1174,14 +1430,14 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
int bit;
/* Save the dirty bitmap as our submission bitmap will be a subset of it. */
- if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
- ASSERT(fs_info->sectors_per_page > 1);
+ if (btrfs_is_subpage(fs_info, folio)) {
+ ASSERT(blocks_per_folio > 1);
btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
} else {
bio_ctrl->submit_bitmap = 1;
}
- for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
u64 start = page_start + (bit << fs_info->sectorsize_bits);
btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
@@ -1235,19 +1491,36 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
found_len = last_delalloc_end + 1 - found_start;
if (ret >= 0) {
+ /*
+ * Some delalloc range may be created by previous folios.
+ * Thus we still need to clean up this range during error
+ * handling.
+ */
+ last_finished_delalloc_end = found_start;
/* No errors hit so far, run the current delalloc range. */
ret = btrfs_run_delalloc_range(inode, folio,
found_start,
found_start + found_len - 1,
wbc);
+ if (ret >= 0)
+ last_finished_delalloc_end = found_start + found_len;
+ if (unlikely(ret < 0))
+ btrfs_err_rl(fs_info,
+"failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d",
+ btrfs_root_id(inode->root),
+ btrfs_ino(inode),
+ folio_pos(folio),
+ blocks_per_folio,
+ &bio_ctrl->submit_bitmap,
+ found_start, found_len, ret);
} else {
/*
* We've hit an error during previous delalloc range,
* have to cleanup the remaining locked ranges.
*/
- unlock_extent(&inode->io_tree, found_start,
- found_start + found_len - 1, NULL);
- __unlock_for_delalloc(&inode->vfs_inode, folio,
+ btrfs_unlock_extent(&inode->io_tree, found_start,
+ found_start + found_len - 1, NULL);
+ unlock_delalloc_folio(&inode->vfs_inode, folio,
found_start,
found_start + found_len - 1);
}
@@ -1274,8 +1547,22 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
delalloc_start = found_start + found_len;
}
- if (ret < 0)
+ /*
+ * It's possible we had some ordered extents created before we hit
+ * an error, cleanup non-async successfully created delalloc ranges.
+ */
+ if (unlikely(ret < 0)) {
+ unsigned int bitmap_size = min(
+ (last_finished_delalloc_end - page_start) >>
+ fs_info->sectorsize_bits,
+ blocks_per_folio);
+
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
+ btrfs_mark_ordered_io_finished(inode, folio,
+ page_start + (bit << fs_info->sectorsize_bits),
+ fs_info->sectorsize, false);
return ret;
+ }
out:
if (last_delalloc_end)
delalloc_end = last_delalloc_end;
@@ -1283,7 +1570,7 @@ out:
delalloc_end = page_end;
/*
* delalloc_end is already one less than the total length, so
- * we don't subtract one from PAGE_SIZE
+ * we don't subtract one from PAGE_SIZE.
*/
delalloc_to_write +=
DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
@@ -1292,7 +1579,7 @@ out:
* If all ranges are submitted asynchronously, we just need to account
* for them here.
*/
- if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
+ if (bitmap_empty(&bio_ctrl->submit_bitmap, blocks_per_folio)) {
wbc->nr_to_write -= delalloc_to_write;
return 1;
}
@@ -1311,7 +1598,7 @@ out:
/*
* Return 0 if we have submitted or queued the sector for submission.
- * Return <0 for critical errors.
+ * Return <0 for critical errors, and the sector will have its dirty flag cleared.
*
* Caller should make sure filepos < i_size and handle filepos >= i_size case.
*/
@@ -1334,23 +1621,32 @@ static int submit_one_sector(struct btrfs_inode *inode,
ASSERT(filepos < i_size);
em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
- if (IS_ERR(em))
- return PTR_ERR_OR_ZERO(em);
+ if (IS_ERR(em)) {
+ /*
+ * When submission failed, we should still clear the folio dirty.
+ * Or the folio will be written back again but without any
+ * ordered extent.
+ */
+ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
+ btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
+ btrfs_folio_clear_writeback(fs_info, folio, filepos, sectorsize);
+ return PTR_ERR(em);
+ }
extent_offset = filepos - em->start;
- em_end = extent_map_end(em);
+ em_end = btrfs_extent_map_end(em);
ASSERT(filepos <= em_end);
ASSERT(IS_ALIGNED(em->start, sectorsize));
ASSERT(IS_ALIGNED(em->len, sectorsize));
- block_start = extent_map_block_start(em);
- disk_bytenr = extent_map_block_start(em) + extent_offset;
+ block_start = btrfs_extent_map_block_start(em);
+ disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset;
- ASSERT(!extent_map_is_compressed(em));
+ ASSERT(!btrfs_extent_map_is_compressed(em));
ASSERT(block_start != EXTENT_MAP_HOLE);
ASSERT(block_start != EXTENT_MAP_INLINE);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = NULL;
/*
@@ -1370,7 +1666,7 @@ static int submit_one_sector(struct btrfs_inode *inode,
ASSERT(folio_test_writeback(folio));
submit_extent_folio(bio_ctrl, disk_bytenr, folio,
- sectorsize, filepos - folio_pos(folio));
+ sectorsize, filepos - folio_pos(folio), 0);
return 0;
}
@@ -1391,35 +1687,62 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long range_bitmap = 0;
bool submitted_io = false;
+ int found_error = 0;
+ const u64 end = start + len;
const u64 folio_start = folio_pos(folio);
+ const u64 folio_end = folio_start + folio_size(folio);
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
u64 cur;
int bit;
int ret = 0;
- ASSERT(start >= folio_start &&
- start + len <= folio_start + folio_size(folio));
+ ASSERT(start >= folio_start, "start=%llu folio_start=%llu", start, folio_start);
+ ASSERT(end <= folio_end, "start=%llu len=%u folio_start=%llu folio_size=%zu",
+ start, len, folio_start, folio_size(folio));
ret = btrfs_writepage_cow_fixup(folio);
- if (ret) {
+ if (ret == -EAGAIN) {
/* Fixup worker will requeue */
folio_redirty_for_writepage(bio_ctrl->wbc, folio);
folio_unlock(folio);
return 1;
}
+ if (ret < 0) {
+ btrfs_folio_clear_dirty(fs_info, folio, start, len);
+ btrfs_folio_set_writeback(fs_info, folio, start, len);
+ btrfs_folio_clear_writeback(fs_info, folio, start, len);
+ return ret;
+ }
- for (cur = start; cur < start + len; cur += fs_info->sectorsize)
+ for (cur = start; cur < end; cur += fs_info->sectorsize)
set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
- fs_info->sectors_per_page);
+ blocks_per_folio);
bio_ctrl->end_io_func = end_bbio_data_write;
- for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
if (cur >= i_size) {
+ struct btrfs_ordered_extent *ordered;
+
+ ordered = btrfs_lookup_first_ordered_range(inode, cur,
+ folio_end - cur);
+ /*
+ * We have just run delalloc before getting here, so
+ * there must be an ordered extent.
+ */
+ ASSERT(ordered != NULL);
+ spin_lock(&inode->ordered_tree_lock);
+ set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
+ ordered->truncated_len = min(ordered->truncated_len,
+ cur - ordered->file_offset);
+ spin_unlock(&inode->ordered_tree_lock);
+ btrfs_put_ordered_extent(ordered);
+
btrfs_mark_ordered_io_finished(inode, folio, cur,
- start + len - cur, true);
+ end - cur, true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
@@ -1428,16 +1751,31 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
* writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent.
*/
- btrfs_folio_clear_dirty(fs_info, folio, cur,
- start + len - cur);
+ btrfs_folio_clear_dirty(fs_info, folio, cur, end - cur);
break;
}
ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
- if (ret < 0)
- goto out;
+ if (unlikely(ret < 0)) {
+ /*
+ * bio_ctrl may contain a bio crossing several folios.
+ * Submit it immediately so that the bio has a chance
+ * to finish normally, other than marked as error.
+ */
+ submit_one_bio(bio_ctrl);
+ /*
+ * Failed to grab the extent map which should be very rare.
+ * Since there is no bio submitted to finish the ordered
+ * extent, we have to manually finish this sector.
+ */
+ btrfs_mark_ordered_io_finished(inode, folio, cur,
+ fs_info->sectorsize, false);
+ if (!found_error)
+ found_error = ret;
+ continue;
+ }
submitted_io = true;
}
-out:
+
/*
* If we didn't submitted any sector (>= i_size), folio dirty get
* cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
@@ -1445,12 +1783,15 @@ out:
*
* Here we set writeback and clear for the range. If the full folio
* is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
+ *
+ * If we hit any error, the corresponding sector will have its dirty
+ * flag cleared and writeback finished, thus no need to handle the error case.
*/
- if (!submitted_io) {
+ if (!submitted_io && !found_error) {
btrfs_folio_set_writeback(fs_info, folio, start, len);
btrfs_folio_clear_writeback(fs_info, folio, start, len);
}
- return ret;
+ return found_error;
}
/*
@@ -1464,15 +1805,15 @@ out:
*/
static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
{
- struct inode *inode = folio->mapping->host;
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- const u64 page_start = folio_pos(folio);
+ struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
size_t pg_offset;
- loff_t i_size = i_size_read(inode);
- unsigned long end_index = i_size >> PAGE_SHIFT;
+ loff_t i_size = i_size_read(&inode->vfs_inode);
+ const pgoff_t end_index = i_size >> PAGE_SHIFT;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
- trace_extent_writepage(folio, inode, bio_ctrl->wbc);
+ trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
WARN_ON(!folio_test_locked(folio));
@@ -1484,7 +1825,7 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
return 0;
}
- if (folio->index == end_index)
+ if (folio_contains(folio, end_index))
folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
/*
@@ -1492,30 +1833,56 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
* The proper bitmap can only be initialized until writepage_delalloc().
*/
bio_ctrl->submit_bitmap = (unsigned long)-1;
+
+ /*
+ * If the page is dirty but without private set, it's marked dirty
+ * without informing the fs.
+ * Nowadays that is a bug, since the introduction of
+ * pin_user_pages*().
+ *
+ * So here we check if the page has private set to rule out such
+ * case.
+ * But we also have a long history of relying on the COW fixup,
+ * so here we only enable this check for experimental builds until
+ * we're sure it's safe.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) &&
+ unlikely(!folio_test_private(folio))) {
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ btrfs_err_rl(fs_info,
+ "root %lld ino %llu folio %llu is marked dirty without notifying the fs",
+ btrfs_root_id(inode->root),
+ btrfs_ino(inode), folio_pos(folio));
+ ret = -EUCLEAN;
+ goto done;
+ }
+
ret = set_folio_extent_mapped(folio);
if (ret < 0)
goto done;
- ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl);
+ ret = writepage_delalloc(inode, folio, bio_ctrl);
if (ret == 1)
return 0;
if (ret)
goto done;
- ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
- PAGE_SIZE, bio_ctrl, i_size);
+ ret = extent_writepage_io(inode, folio, folio_pos(folio),
+ folio_size(folio), bio_ctrl, i_size);
if (ret == 1)
return 0;
+ if (unlikely(ret < 0))
+ btrfs_err_rl(fs_info,
+"failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ folio_pos(folio), blocks_per_folio,
+ &bio_ctrl->submit_bitmap, ret);
bio_ctrl->wbc->nr_to_write--;
done:
- if (ret) {
- btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
- page_start, PAGE_SIZE, !ret);
+ if (ret < 0)
mapping_set_error(folio->mapping, ret);
- }
-
/*
* Only unlock ranges that are submitted. As there can be some async
* submitted ranges inside the folio.
@@ -1525,12 +1892,6 @@ done:
return ret;
}
-void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
-{
- wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
- TASK_UNINTERRUPTIBLE);
-}
-
/*
* Lock extent buffer status and pages for writeback.
*
@@ -1560,8 +1921,19 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e
*/
spin_lock(&eb->refs_lock);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+ XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
+ unsigned long flags;
+
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
spin_unlock(&eb->refs_lock);
+
+ xas_lock_irqsave(&xas, flags);
+ xas_load(&xas);
+ xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
+ xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
+ xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
+ xas_unlock_irqrestore(&xas, flags);
+
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
-eb->len,
@@ -1647,50 +2019,167 @@ static void set_btree_ioerr(struct extent_buffer *eb)
}
}
+static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark)
+{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
+ unsigned long flags;
+
+ xas_lock_irqsave(&xas, flags);
+ xas_load(&xas);
+ xas_set_mark(&xas, mark);
+ xas_unlock_irqrestore(&xas, flags);
+}
+
+static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark)
+{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
+ unsigned long flags;
+
+ xas_lock_irqsave(&xas, flags);
+ xas_load(&xas);
+ xas_clear_mark(&xas, mark);
+ xas_unlock_irqrestore(&xas, flags);
+}
+
+static void buffer_tree_tag_for_writeback(struct btrfs_fs_info *fs_info,
+ unsigned long start, unsigned long end)
+{
+ XA_STATE(xas, &fs_info->buffer_tree, start);
+ unsigned int tagged = 0;
+ void *eb;
+
+ xas_lock_irq(&xas);
+ xas_for_each_marked(&xas, eb, end, PAGECACHE_TAG_DIRTY) {
+ xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
+ if (++tagged % XA_CHECK_SCHED)
+ continue;
+ xas_pause(&xas);
+ xas_unlock_irq(&xas);
+ cond_resched();
+ xas_lock_irq(&xas);
+ }
+ xas_unlock_irq(&xas);
+}
+
+struct eb_batch {
+ unsigned int nr;
+ unsigned int cur;
+ struct extent_buffer *ebs[PAGEVEC_SIZE];
+};
+
+static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb)
+{
+ batch->ebs[batch->nr++] = eb;
+ return (batch->nr < PAGEVEC_SIZE);
+}
+
+static inline void eb_batch_init(struct eb_batch *batch)
+{
+ batch->nr = 0;
+ batch->cur = 0;
+}
+
+static inline struct extent_buffer *eb_batch_next(struct eb_batch *batch)
+{
+ if (batch->cur >= batch->nr)
+ return NULL;
+ return batch->ebs[batch->cur++];
+}
+
+static inline void eb_batch_release(struct eb_batch *batch)
+{
+ for (unsigned int i = 0; i < batch->nr; i++)
+ free_extent_buffer(batch->ebs[i]);
+ eb_batch_init(batch);
+}
+
+static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned long max,
+ xa_mark_t mark)
+{
+ struct extent_buffer *eb;
+
+retry:
+ eb = xas_find_marked(xas, max, mark);
+
+ if (xas_retry(xas, eb))
+ goto retry;
+
+ if (!eb)
+ return NULL;
+
+ if (!refcount_inc_not_zero(&eb->refs)) {
+ xas_reset(xas);
+ goto retry;
+ }
+
+ if (unlikely(eb != xas_reload(xas))) {
+ free_extent_buffer(eb);
+ xas_reset(xas);
+ goto retry;
+ }
+
+ return eb;
+}
+
+static unsigned int buffer_tree_get_ebs_tag(struct btrfs_fs_info *fs_info,
+ unsigned long *start,
+ unsigned long end, xa_mark_t tag,
+ struct eb_batch *batch)
+{
+ XA_STATE(xas, &fs_info->buffer_tree, *start);
+ struct extent_buffer *eb;
+
+ rcu_read_lock();
+ while ((eb = find_get_eb(&xas, end, tag)) != NULL) {
+ if (!eb_batch_add(batch, eb)) {
+ *start = ((eb->start + eb->len) >> fs_info->nodesize_bits);
+ goto out;
+ }
+ }
+ if (end == ULONG_MAX)
+ *start = ULONG_MAX;
+ else
+ *start = end + 1;
+out:
+ rcu_read_unlock();
+
+ return batch->nr;
+}
+
/*
* The endio specific version which won't touch any unsafe spinlock in endio
* context.
*/
static struct extent_buffer *find_extent_buffer_nolock(
- const struct btrfs_fs_info *fs_info, u64 start)
+ struct btrfs_fs_info *fs_info, u64 start)
{
struct extent_buffer *eb;
+ unsigned long index = (start >> fs_info->nodesize_bits);
rcu_read_lock();
- eb = radix_tree_lookup(&fs_info->buffer_radix,
- start >> fs_info->sectorsize_bits);
- if (eb && atomic_inc_not_zero(&eb->refs)) {
- rcu_read_unlock();
- return eb;
- }
+ eb = xa_load(&fs_info->buffer_tree, index);
+ if (eb && !refcount_inc_not_zero(&eb->refs))
+ eb = NULL;
rcu_read_unlock();
- return NULL;
+ return eb;
}
static void end_bbio_meta_write(struct btrfs_bio *bbio)
{
struct extent_buffer *eb = bbio->private;
- struct btrfs_fs_info *fs_info = eb->fs_info;
- bool uptodate = !bbio->bio.bi_status;
struct folio_iter fi;
- u32 bio_offset = 0;
- if (!uptodate)
+ if (bbio->bio.bi_status != BLK_STS_OK)
set_btree_ioerr(eb);
bio_for_each_folio_all(fi, &bbio->bio) {
- u64 start = eb->start + bio_offset;
- struct folio *folio = fi.folio;
- u32 len = fi.length;
-
- btrfs_folio_clear_writeback(fs_info, folio, start, len);
- bio_offset += len;
+ btrfs_meta_folio_clear_writeback(fi.folio, eb);
}
- clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
- smp_mb__after_atomic();
- wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
-
+ buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK);
+ clear_and_wake_up_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
bio_put(&bbio->bio);
}
@@ -1732,205 +2221,69 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
- eb->fs_info, end_bbio_meta_write, eb);
+ BTRFS_I(fs_info->btree_inode), eb->start,
+ end_bbio_meta_write, eb);
bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
wbc_init_bio(wbc, &bbio->bio);
- bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
- bbio->file_offset = eb->start;
- if (fs_info->nodesize < PAGE_SIZE) {
- struct folio *folio = eb->folios[0];
- bool ret;
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ struct folio *folio = eb->folios[i];
+ u64 range_start = max_t(u64, eb->start, folio_pos(folio));
+ u32 range_len = min_t(u64, folio_next_pos(folio),
+ eb->start + eb->len) - range_start;
folio_lock(folio);
- btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
- if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
- eb->len)) {
- folio_clear_dirty_for_io(folio);
- wbc->nr_to_write--;
- }
- ret = bio_add_folio(&bbio->bio, folio, eb->len,
- eb->start - folio_pos(folio));
- ASSERT(ret);
- wbc_account_cgroup_owner(wbc, folio, eb->len);
- folio_unlock(folio);
- } else {
- int num_folios = num_extent_folios(eb);
-
- for (int i = 0; i < num_folios; i++) {
- struct folio *folio = eb->folios[i];
- bool ret;
-
- folio_lock(folio);
- folio_clear_dirty_for_io(folio);
- folio_start_writeback(folio);
- ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
- ASSERT(ret);
- wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
+ btrfs_meta_folio_clear_dirty(folio, eb);
+ btrfs_meta_folio_set_writeback(folio, eb);
+ if (!folio_test_dirty(folio))
wbc->nr_to_write -= folio_nr_pages(folio);
- folio_unlock(folio);
- }
+ bio_add_folio_nofail(&bbio->bio, folio, range_len,
+ offset_in_folio(folio, range_start));
+ wbc_account_cgroup_owner(wbc, folio, range_len);
+ folio_unlock(folio);
+ }
+ /*
+ * If the fs is already in error status, do not submit any writeback
+ * but immediately finish it.
+ */
+ if (unlikely(BTRFS_FS_ERROR(fs_info))) {
+ btrfs_bio_end_io(bbio, errno_to_blk_status(BTRFS_FS_ERROR(fs_info)));
+ return;
}
btrfs_submit_bbio(bbio, 0);
}
/*
- * Submit one subpage btree page.
- *
- * The main difference to submit_eb_page() is:
- * - Page locking
- * For subpage, we don't rely on page locking at all.
- *
- * - Flush write bio
- * We only flush bio if we may be unable to fit current extent buffers into
- * current bio.
+ * Wait for all eb writeback in the given range to finish.
*
- * Return >=0 for the number of submitted extent buffers.
- * Return <0 for fatal error.
+ * @fs_info: The fs_info for this file system.
+ * @start: The offset of the range to start waiting on writeback.
+ * @end: The end of the range, inclusive. This is meant to be used in
+ * conjunction with wait_marked_extents, so this will usually be
+ * the_next_eb->start - 1.
*/
-static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
+void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start,
+ u64 end)
{
- struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- int submitted = 0;
- u64 folio_start = folio_pos(folio);
- int bit_start = 0;
- int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
-
- /* Lock and write each dirty extent buffers in the range */
- while (bit_start < fs_info->sectors_per_page) {
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct eb_batch batch;
+ unsigned long start_index = (start >> fs_info->nodesize_bits);
+ unsigned long end_index = (end >> fs_info->nodesize_bits);
+
+ eb_batch_init(&batch);
+ while (start_index <= end_index) {
struct extent_buffer *eb;
- unsigned long flags;
- u64 start;
+ unsigned int nr_ebs;
- /*
- * Take private lock to ensure the subpage won't be detached
- * in the meantime.
- */
- spin_lock(&folio->mapping->i_private_lock);
- if (!folio_test_private(folio)) {
- spin_unlock(&folio->mapping->i_private_lock);
+ nr_ebs = buffer_tree_get_ebs_tag(fs_info, &start_index, end_index,
+ PAGECACHE_TAG_WRITEBACK, &batch);
+ if (!nr_ebs)
break;
- }
- spin_lock_irqsave(&subpage->lock, flags);
- if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
- subpage->bitmaps)) {
- spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&folio->mapping->i_private_lock);
- bit_start++;
- continue;
- }
-
- start = folio_start + bit_start * fs_info->sectorsize;
- bit_start += sectors_per_node;
-
- /*
- * Here we just want to grab the eb without touching extra
- * spin locks, so call find_extent_buffer_nolock().
- */
- eb = find_extent_buffer_nolock(fs_info, start);
- spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&folio->mapping->i_private_lock);
-
- /*
- * The eb has already reached 0 refs thus find_extent_buffer()
- * doesn't return it. We don't need to write back such eb
- * anyway.
- */
- if (!eb)
- continue;
-
- if (lock_extent_buffer_for_io(eb, wbc)) {
- write_one_eb(eb, wbc);
- submitted++;
- }
- free_extent_buffer(eb);
- }
- return submitted;
-}
-
-/*
- * Submit all page(s) of one extent buffer.
- *
- * @page: the page of one extent buffer
- * @eb_context: to determine if we need to submit this page, if current page
- * belongs to this eb, we don't need to submit
- *
- * The caller should pass each page in their bytenr order, and here we use
- * @eb_context to determine if we have submitted pages of one extent buffer.
- *
- * If we have, we just skip until we hit a new page that doesn't belong to
- * current @eb_context.
- *
- * If not, we submit all the page(s) of the extent buffer.
- *
- * Return >0 if we have submitted the extent buffer successfully.
- * Return 0 if we don't need to submit the page, as it's already submitted by
- * previous call.
- * Return <0 for fatal error.
- */
-static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
-{
- struct writeback_control *wbc = ctx->wbc;
- struct address_space *mapping = folio->mapping;
- struct extent_buffer *eb;
- int ret;
-
- if (!folio_test_private(folio))
- return 0;
-
- if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
- return submit_eb_subpage(folio, wbc);
-
- spin_lock(&mapping->i_private_lock);
- if (!folio_test_private(folio)) {
- spin_unlock(&mapping->i_private_lock);
- return 0;
- }
-
- eb = folio_get_private(folio);
-
- /*
- * Shouldn't happen and normally this would be a BUG_ON but no point
- * crashing the machine for something we can survive anyway.
- */
- if (WARN_ON(!eb)) {
- spin_unlock(&mapping->i_private_lock);
- return 0;
- }
-
- if (eb == ctx->eb) {
- spin_unlock(&mapping->i_private_lock);
- return 0;
- }
- ret = atomic_inc_not_zero(&eb->refs);
- spin_unlock(&mapping->i_private_lock);
- if (!ret)
- return 0;
-
- ctx->eb = eb;
- ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
- if (ret) {
- if (ret == -EBUSY)
- ret = 0;
- free_extent_buffer(eb);
- return ret;
- }
-
- if (!lock_extent_buffer_for_io(eb, wbc)) {
- free_extent_buffer(eb);
- return 0;
- }
- /* Implies write in zoned mode. */
- if (ctx->zoned_bg) {
- /* Mark the last eb in the block group. */
- btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
- ctx->zoned_bg->meta_write_pointer += eb->len;
+ while ((eb = eb_batch_next(&batch)) != NULL)
+ wait_on_extent_buffer_writeback(eb);
+ eb_batch_release(&batch);
+ cond_resched();
}
- write_one_eb(eb, wbc);
- free_extent_buffer(eb);
- return 1;
}
int btree_write_cache_pages(struct address_space *mapping,
@@ -1941,25 +2294,27 @@ int btree_write_cache_pages(struct address_space *mapping,
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
- struct folio_batch fbatch;
- unsigned int nr_folios;
- pgoff_t index;
- pgoff_t end; /* Inclusive */
+ struct eb_batch batch;
+ unsigned int nr_ebs;
+ unsigned long index;
+ unsigned long end;
int scanned = 0;
xa_mark_t tag;
- folio_batch_init(&fbatch);
+ eb_batch_init(&batch);
if (wbc->range_cyclic) {
- index = mapping->writeback_index; /* Start from prev offset */
+ index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->nodesize_bits);
end = -1;
+
/*
* Start from the beginning does not need to cycle over the
* range, mark it as scanned.
*/
scanned = (index == 0);
} else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
+ index = (wbc->range_start >> fs_info->nodesize_bits);
+ end = (wbc->range_end >> fs_info->nodesize_bits);
+
scanned = 1;
}
if (wbc->sync_mode == WB_SYNC_ALL)
@@ -1969,31 +2324,39 @@ int btree_write_cache_pages(struct address_space *mapping,
btrfs_zoned_meta_io_lock(fs_info);
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
- tag_pages_for_writeback(mapping, index, end);
+ buffer_tree_tag_for_writeback(fs_info, index, end);
while (!done && !nr_to_write_done && (index <= end) &&
- (nr_folios = filemap_get_folios_tag(mapping, &index, end,
- tag, &fbatch))) {
- unsigned i;
+ (nr_ebs = buffer_tree_get_ebs_tag(fs_info, &index, end, tag, &batch))) {
+ struct extent_buffer *eb;
- for (i = 0; i < nr_folios; i++) {
- struct folio *folio = fbatch.folios[i];
+ while ((eb = eb_batch_next(&batch)) != NULL) {
+ ctx.eb = eb;
+
+ ret = btrfs_check_meta_write_pointer(eb->fs_info, &ctx);
+ if (ret) {
+ if (ret == -EBUSY)
+ ret = 0;
- ret = submit_eb_page(folio, &ctx);
- if (ret == 0)
+ if (ret) {
+ done = 1;
+ break;
+ }
continue;
- if (ret < 0) {
- done = 1;
- break;
}
- /*
- * the filesystem may choose to bump up nr_to_write.
- * We have to make sure to honor the new nr_to_write
- * at any time
- */
- nr_to_write_done = wbc->nr_to_write <= 0;
+ if (!lock_extent_buffer_for_io(eb, wbc))
+ continue;
+
+ /* Implies write in zoned mode. */
+ if (ctx.zoned_bg) {
+ /* Mark the last eb in the block group. */
+ btrfs_schedule_zone_finish_bg(ctx.zoned_bg, eb);
+ ctx.zoned_bg->meta_write_pointer += eb->len;
+ }
+ write_one_eb(eb, wbc);
}
- folio_batch_release(&fbatch);
+ nr_to_write_done = (wbc->nr_to_write <= 0);
+ eb_batch_release(&batch);
cond_resched();
}
if (!scanned && !done) {
@@ -2119,10 +2482,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
&BTRFS_I(inode)->runtime_flags))
wbc->tagged_writepages = 1;
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
+ tag = wbc_to_tag(wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
@@ -2138,10 +2498,8 @@ retry:
done_index = folio_next_index(folio);
/*
* At this point we hold neither the i_pages lock nor
- * the page lock: the page may be truncated or
- * invalidated (changing page->mapping to NULL),
- * or even swizzled back from swapper_space to
- * tmpfs file mapping
+ * the folio lock: the folio may be truncated or
+ * invalidated (changing folio->mapping to NULL).
*/
if (!folio_trylock(folio)) {
submit_write_bio(bio_ctrl, 0);
@@ -2168,7 +2526,7 @@ retry:
* In above case, [32K, 96K) is asynchronously submitted
* for compression, and [124K, 128K) needs to be written back.
*
- * If we didn't wait wrtiteback for page 64K, [128K, 128K)
+ * If we didn't wait writeback for page 64K, [128K, 128K)
* won't be submitted as the page still has writeback flag
* and will be skipped in the next check.
*
@@ -2179,7 +2537,7 @@ retry:
* regular submission.
*/
if (wbc->sync_mode != WB_SYNC_NONE ||
- btrfs_is_subpage(inode_to_fs_info(inode), mapping)) {
+ btrfs_is_subpage(inode_to_fs_info(inode), folio)) {
if (folio_test_writeback(folio))
submit_write_bio(bio_ctrl, 0);
folio_wait_writeback(folio);
@@ -2260,8 +2618,8 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
while (cur <= end) {
- u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
- u32 cur_len = cur_end + 1 - cur;
+ u64 cur_end;
+ u32 cur_len;
struct folio *folio;
folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
@@ -2271,13 +2629,18 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
* code is just in case, but shouldn't actually be run.
*/
if (IS_ERR(folio)) {
+ cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
+ cur_len = cur_end + 1 - cur;
btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
cur, cur_len, false);
mapping_set_error(mapping, PTR_ERR(folio));
- cur = cur_end + 1;
+ cur = cur_end;
continue;
}
+ cur_end = min_t(u64, folio_next_pos(folio) - 1, end);
+ cur_len = cur_end + 1 - cur;
+
ASSERT(folio_test_locked(folio));
if (pages_dirty && folio != locked_folio)
ASSERT(folio_test_dirty(folio));
@@ -2292,11 +2655,8 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
if (ret == 1)
goto next_page;
- if (ret) {
- btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
- cur, cur_len, !ret);
+ if (ret)
mapping_set_error(mapping, ret);
- }
btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
if (ret < 0)
found_error = true;
@@ -2330,16 +2690,27 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wb
void btrfs_readahead(struct readahead_control *rac)
{
- struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
+ struct btrfs_bio_ctrl bio_ctrl = {
+ .opf = REQ_OP_READ | REQ_RAHEAD,
+ .ractl = rac,
+ .last_em_start = U64_MAX,
+ };
struct folio *folio;
+ struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
+ const u64 start = readahead_pos(rac);
+ const u64 end = start + readahead_length(rac) - 1;
+ struct extent_state *cached_state = NULL;
struct extent_map *em_cached = NULL;
- u64 prev_em_start = (u64)-1;
+
+ lock_extents_for_read(inode, start, end, &cached_state);
while ((folio = readahead_folio(rac)) != NULL)
- btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
+ btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
+
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
if (em_cached)
- free_extent_map(em_cached);
+ btrfs_free_extent_map(em_cached);
submit_one_bio(&bio_ctrl);
}
@@ -2363,7 +2734,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
if (start > end)
return 0;
- lock_extent(tree, start, end, &cached_state);
+ btrfs_lock_extent(tree, start, end, &cached_state);
folio_wait_writeback(folio);
/*
@@ -2371,46 +2742,54 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
* so here we only need to unlock the extent range to free any
* existing extent state.
*/
- unlock_extent(tree, start, end, &cached_state);
+ btrfs_unlock_extent(tree, start, end, &cached_state);
return 0;
}
/*
- * a helper for release_folio, this tests for areas of the page that
- * are locked or under IO and drops the related state bits if it is safe
- * to drop the page.
+ * A helper for struct address_space_operations::release_folio, this tests for
+ * areas of the folio that are locked or under IO and drops the related state
+ * bits if it is safe to drop the folio.
*/
static bool try_release_extent_state(struct extent_io_tree *tree,
struct folio *folio)
{
+ struct extent_state *cached_state = NULL;
u64 start = folio_pos(folio);
- u64 end = start + PAGE_SIZE - 1;
- bool ret;
+ u64 end = start + folio_size(folio) - 1;
+ u32 range_bits;
+ u32 clear_bits;
+ bool ret = false;
+ int ret2;
- if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
- ret = false;
- } else {
- u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
- EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
- EXTENT_QGROUP_RESERVED);
- int ret2;
+ btrfs_get_range_bits(tree, start, end, &range_bits, &cached_state);
- /*
- * At this point we can safely clear everything except the
- * locked bit, the nodatasum bit and the delalloc new bit.
- * The delalloc new bit will be cleared by ordered extent
- * completion.
- */
- ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
+ /*
+ * We can release the folio if it's locked only for ordered extent
+ * completion, since that doesn't require using the folio.
+ */
+ if ((range_bits & EXTENT_LOCKED) &&
+ !(range_bits & EXTENT_FINISHING_ORDERED))
+ goto out;
+
+ clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW |
+ EXTENT_CTLBITS | EXTENT_QGROUP_RESERVED |
+ EXTENT_FINISHING_ORDERED);
+ /*
+ * At this point we can safely clear everything except the locked,
+ * nodatasum, delalloc new and finishing ordered bits. The delalloc new
+ * bit will be cleared by ordered extent completion.
+ */
+ ret2 = btrfs_clear_extent_bit(tree, start, end, clear_bits, &cached_state);
+ /*
+ * If clear_extent_bit failed for enomem reasons, we can't allow the
+ * release to continue.
+ */
+ if (ret2 == 0)
+ ret = true;
+out:
+ btrfs_free_extent_state(cached_state);
- /* if clear_extent_bit failed for enomem reasons,
- * we can't allow the release to continue.
- */
- if (ret2 < 0)
- ret = false;
- else
- ret = true;
- }
return ret;
}
@@ -2422,7 +2801,7 @@ static bool try_release_extent_state(struct extent_io_tree *tree,
bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
{
u64 start = folio_pos(folio);
- u64 end = start + PAGE_SIZE - 1;
+ u64 end = start + folio_size(folio) - 1;
struct btrfs_inode *inode = folio_to_inode(folio);
struct extent_io_tree *io_tree = &inode->io_tree;
@@ -2433,18 +2812,19 @@ bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
struct extent_map *em;
write_lock(&extent_tree->lock);
- em = lookup_extent_mapping(extent_tree, start, len);
+ em = btrfs_lookup_extent_mapping(extent_tree, start, len);
if (!em) {
write_unlock(&extent_tree->lock);
break;
}
if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
write_unlock(&extent_tree->lock);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
break;
}
- if (test_range_bit_exists(io_tree, em->start,
- extent_map_end(em) - 1, EXTENT_LOCKED))
+ if (btrfs_test_range_bit_exists(io_tree, em->start,
+ btrfs_extent_map_end(em) - 1,
+ EXTENT_LOCKED))
goto next;
/*
* If it's not in the list of modified extents, used by a fast
@@ -2471,15 +2851,15 @@ remove_em:
* fsync performance for workloads with a data size that exceeds
* or is close to the system's memory).
*/
- remove_extent_mapping(inode, em);
+ btrfs_remove_extent_mapping(inode, em);
/* Once for the inode's extent map tree. */
- free_extent_map(em);
+ btrfs_free_extent_map(em);
next:
- start = extent_map_end(em);
+ start = btrfs_extent_map_end(em);
write_unlock(&extent_tree->lock);
/* Once for us, for the lookup_extent_mapping() reference. */
- free_extent_map(em);
+ btrfs_free_extent_map(em);
if (need_resched()) {
/*
@@ -2495,11 +2875,6 @@ next:
return try_release_extent_state(io_tree, folio);
}
-static void __free_extent_buffer(struct extent_buffer *eb)
-{
- kmem_cache_free(extent_buffer_cache, eb);
-}
-
static int extent_buffer_under_io(const struct extent_buffer *eb)
{
return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
@@ -2508,13 +2883,13 @@ static int extent_buffer_under_io(const struct extent_buffer *eb)
static bool folio_range_has_eb(struct folio *folio)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
lockdep_assert_held(&folio->mapping->i_private_lock);
if (folio_test_private(folio)) {
- subpage = folio_get_private(folio);
- if (atomic_read(&subpage->eb_refs))
+ bfs = folio_get_private(folio);
+ if (atomic_read(&bfs->eb_refs))
return true;
}
return false;
@@ -2523,6 +2898,7 @@ static bool folio_range_has_eb(struct folio *folio)
static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct address_space *mapping = folio->mapping;
const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
/*
@@ -2530,21 +2906,20 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo
* be done under the i_private_lock.
*/
if (mapped)
- spin_lock(&folio->mapping->i_private_lock);
+ spin_lock(&mapping->i_private_lock);
if (!folio_test_private(folio)) {
if (mapped)
- spin_unlock(&folio->mapping->i_private_lock);
+ spin_unlock(&mapping->i_private_lock);
return;
}
- if (fs_info->nodesize >= PAGE_SIZE) {
+ if (!btrfs_meta_is_subpage(fs_info)) {
/*
- * We do this since we'll remove the pages after we've
- * removed the eb from the radix tree, so we could race
- * and have this page now attached to the new eb. So
- * only clear folio if it's still connected to
- * this eb.
+ * We do this since we'll remove the pages after we've removed
+ * the eb from the xarray, so we could race and have this page
+ * now attached to the new eb. So only clear folio if it's
+ * still connected to this eb.
*/
if (folio_test_private(folio) && folio_get_private(folio) == eb) {
BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
@@ -2554,7 +2929,7 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo
folio_detach_private(folio);
}
if (mapped)
- spin_unlock(&folio->mapping->i_private_lock);
+ spin_unlock(&mapping->i_private_lock);
return;
}
@@ -2564,7 +2939,7 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo
* attached to one dummy eb, no sharing.
*/
if (!mapped) {
- btrfs_detach_subpage(fs_info, folio);
+ btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
return;
}
@@ -2575,13 +2950,13 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo
* page range and no unfinished IO.
*/
if (!folio_range_has_eb(folio))
- btrfs_detach_subpage(fs_info, folio);
+ btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
- spin_unlock(&folio->mapping->i_private_lock);
+ spin_unlock(&mapping->i_private_lock);
}
-/* Release all pages attached to the extent buffer */
-static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
+/* Release all folios attached to the extent buffer */
+static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb)
{
ASSERT(!extent_buffer_under_io(eb));
@@ -2592,9 +2967,6 @@ static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
continue;
detach_extent_buffer_folio(eb, folio);
-
- /* One for when we allocated the folio. */
- folio_put(folio);
}
}
@@ -2603,40 +2975,57 @@ static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
*/
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
- btrfs_release_extent_buffer_pages(eb);
+ btrfs_release_extent_buffer_folios(eb);
btrfs_leak_debug_del_eb(eb);
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
}
-static struct extent_buffer *
-__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
- unsigned long len)
+static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start)
{
struct extent_buffer *eb = NULL;
eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
eb->start = start;
- eb->len = len;
+ eb->len = fs_info->nodesize;
eb->fs_info = fs_info;
init_rwsem(&eb->lock);
btrfs_leak_debug_add_eb(eb);
spin_lock_init(&eb->refs_lock);
- atomic_set(&eb->refs, 1);
+ refcount_set(&eb->refs, 1);
- ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
+ ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE);
return eb;
}
+/*
+ * For use in eb allocation error cleanup paths, as btrfs_release_extent_buffer()
+ * does not call folio_put(), and we need to set the folios to NULL so that
+ * btrfs_release_extent_buffer() will not detach them a second time.
+ */
+static void cleanup_extent_buffer_folios(struct extent_buffer *eb)
+{
+ const int num_folios = num_extent_folios(eb);
+
+ /* We cannot use num_extent_folios() as loop bound as eb->folios changes. */
+ for (int i = 0; i < num_folios; i++) {
+ ASSERT(eb->folios[i]);
+ detach_extent_buffer_folio(eb, eb->folios[i]);
+ folio_put(eb->folios[i]);
+ eb->folios[i] = NULL;
+ }
+}
+
struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
{
struct extent_buffer *new;
- int num_folios = num_extent_folios(src);
+ int num_folios;
int ret;
- new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
+ new = __alloc_extent_buffer(src->fs_info, src->start);
if (new == NULL)
return NULL;
@@ -2648,78 +3037,78 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
ret = alloc_eb_folio_array(new, false);
- if (ret) {
- btrfs_release_extent_buffer(new);
- return NULL;
- }
+ if (ret)
+ goto release_eb;
+ ASSERT(num_extent_folios(src) == num_extent_folios(new),
+ "%d != %d", num_extent_folios(src), num_extent_folios(new));
+ /* Explicitly use the cached num_extent value from now on. */
+ num_folios = num_extent_folios(src);
for (int i = 0; i < num_folios; i++) {
struct folio *folio = new->folios[i];
ret = attach_extent_buffer_folio(new, folio, NULL);
- if (ret < 0) {
- btrfs_release_extent_buffer(new);
- return NULL;
- }
+ if (ret < 0)
+ goto cleanup_folios;
WARN_ON(folio_test_dirty(folio));
}
+ for (int i = 0; i < num_folios; i++)
+ folio_put(new->folios[i]);
+
copy_extent_buffer_full(new, src);
set_extent_buffer_uptodate(new);
return new;
+
+cleanup_folios:
+ cleanup_extent_buffer_folios(new);
+release_eb:
+ btrfs_release_extent_buffer(new);
+ return NULL;
}
-struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start, unsigned long len)
+struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start)
{
struct extent_buffer *eb;
- int num_folios = 0;
int ret;
- eb = __alloc_extent_buffer(fs_info, start, len);
+ eb = __alloc_extent_buffer(fs_info, start);
if (!eb)
return NULL;
ret = alloc_eb_folio_array(eb, false);
if (ret)
- goto err;
+ goto release_eb;
- num_folios = num_extent_folios(eb);
- for (int i = 0; i < num_folios; i++) {
+ for (int i = 0; i < num_extent_folios(eb); i++) {
ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
if (ret < 0)
- goto err;
+ goto cleanup_folios;
}
+ for (int i = 0; i < num_extent_folios(eb); i++)
+ folio_put(eb->folios[i]);
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
return eb;
-err:
- for (int i = 0; i < num_folios; i++) {
- if (eb->folios[i]) {
- detach_extent_buffer_folio(eb, eb->folios[i]);
- folio_put(eb->folios[i]);
- }
- }
- __free_extent_buffer(eb);
- return NULL;
-}
-struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start)
-{
- return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
+cleanup_folios:
+ cleanup_extent_buffer_folios(eb);
+release_eb:
+ btrfs_release_extent_buffer(eb);
+ return NULL;
}
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
int refs;
/*
- * The TREE_REF bit is first set when the extent_buffer is added
- * to the radix tree. It is also reset, if unset, when a new reference
- * is created by find_extent_buffer.
+ * The TREE_REF bit is first set when the extent_buffer is added to the
+ * xarray. It is also reset, if unset, when a new reference is created
+ * by find_extent_buffer.
*
* It is only cleared in two cases: freeing the last non-tree
* reference to the extent_buffer when its STALE bit is set or
@@ -2731,31 +3120,28 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
* conditions between the calls to check_buffer_tree_ref in those
* codepaths and clearing TREE_REF in try_release_extent_buffer.
*
- * The actual lifetime of the extent_buffer in the radix tree is
- * adequately protected by the refcount, but the TREE_REF bit and
- * its corresponding reference are not. To protect against this
- * class of races, we call check_buffer_tree_ref from the codepaths
- * which trigger io. Note that once io is initiated, TREE_REF can no
- * longer be cleared, so that is the moment at which any such race is
- * best fixed.
+ * The actual lifetime of the extent_buffer in the xarray is adequately
+ * protected by the refcount, but the TREE_REF bit and its corresponding
+ * reference are not. To protect against this class of races, we call
+ * check_buffer_tree_ref() from the code paths which trigger io. Note that
+ * once io is initiated, TREE_REF can no longer be cleared, so that is
+ * the moment at which any such race is best fixed.
*/
- refs = atomic_read(&eb->refs);
+ refs = refcount_read(&eb->refs);
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
return;
spin_lock(&eb->refs_lock);
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
- atomic_inc(&eb->refs);
+ refcount_inc(&eb->refs);
spin_unlock(&eb->refs_lock);
}
static void mark_extent_buffer_accessed(struct extent_buffer *eb)
{
- int num_folios= num_extent_folios(eb);
-
check_buffer_tree_ref(eb);
- for (int i = 0; i < num_folios; i++)
+ for (int i = 0; i < num_extent_folios(eb); i++)
folio_mark_accessed(eb->folios[i]);
}
@@ -2788,10 +3174,10 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
return eb;
}
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *eb, *exists = NULL;
int ret;
@@ -2803,47 +3189,48 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
return ERR_PTR(-ENOMEM);
eb->fs_info = fs_info;
again:
- ret = radix_tree_preload(GFP_NOFS);
- if (ret) {
- exists = ERR_PTR(ret);
- goto free_eb;
+ xa_lock_irq(&fs_info->buffer_tree);
+ exists = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->nodesize_bits,
+ NULL, eb, GFP_NOFS);
+ if (xa_is_err(exists)) {
+ ret = xa_err(exists);
+ xa_unlock_irq(&fs_info->buffer_tree);
+ btrfs_release_extent_buffer(eb);
+ return ERR_PTR(ret);
}
- spin_lock(&fs_info->buffer_lock);
- ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> fs_info->sectorsize_bits, eb);
- spin_unlock(&fs_info->buffer_lock);
- radix_tree_preload_end();
- if (ret == -EEXIST) {
- exists = find_extent_buffer(fs_info, start);
- if (exists)
- goto free_eb;
- else
+ if (exists) {
+ if (!refcount_inc_not_zero(&exists->refs)) {
+ /* The extent buffer is being freed, retry. */
+ xa_unlock_irq(&fs_info->buffer_tree);
goto again;
+ }
+ xa_unlock_irq(&fs_info->buffer_tree);
+ btrfs_release_extent_buffer(eb);
+ return exists;
}
+ xa_unlock_irq(&fs_info->buffer_tree);
check_buffer_tree_ref(eb);
- set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
return eb;
-free_eb:
- btrfs_release_extent_buffer(eb);
- return exists;
-}
+#else
+ /* Stub to avoid linker error when compiled with optimizations turned off. */
+ return NULL;
#endif
+}
-static struct extent_buffer *grab_extent_buffer(
- struct btrfs_fs_info *fs_info, struct page *page)
+static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
+ struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct extent_buffer *exists;
- lockdep_assert_held(&page->mapping->i_private_lock);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
/*
- * For subpage case, we completely rely on radix tree to ensure we
- * don't try to insert two ebs for the same bytenr. So here we always
- * return NULL and just continue.
+ * For subpage case, we completely rely on xarray to ensure we don't try
+ * to insert two ebs for the same bytenr. So here we always return NULL
+ * and just continue.
*/
- if (fs_info->nodesize < PAGE_SIZE)
+ if (btrfs_meta_is_subpage(fs_info))
return NULL;
/* Page not yet attached to an extent buffer */
@@ -2851,51 +3238,53 @@ static struct extent_buffer *grab_extent_buffer(
return NULL;
/*
- * We could have already allocated an eb for this page and attached one
+ * We could have already allocated an eb for this folio and attached one
* so lets see if we can get a ref on the existing eb, and if we can we
* know it's good and we can just return that one, else we know we can
* just overwrite folio private.
*/
exists = folio_get_private(folio);
- if (atomic_inc_not_zero(&exists->refs))
+ if (refcount_inc_not_zero(&exists->refs))
return exists;
- WARN_ON(PageDirty(page));
+ WARN_ON(folio_test_dirty(folio));
folio_detach_private(folio);
return NULL;
}
-static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
+/*
+ * Validate alignment constraints of eb at logical address @start.
+ */
+static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
{
- if (!IS_ALIGNED(start, fs_info->sectorsize)) {
+ const u32 nodesize = fs_info->nodesize;
+
+ if (unlikely(!IS_ALIGNED(start, fs_info->sectorsize))) {
btrfs_err(fs_info, "bad tree block start %llu", start);
- return -EINVAL;
+ return true;
}
- if (fs_info->nodesize < PAGE_SIZE &&
- offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
+ if (unlikely(nodesize < PAGE_SIZE && !IS_ALIGNED(start, nodesize))) {
btrfs_err(fs_info,
- "tree block crosses page boundary, start %llu nodesize %u",
- start, fs_info->nodesize);
- return -EINVAL;
+ "tree block is not nodesize aligned, start %llu nodesize %u",
+ start, nodesize);
+ return true;
}
- if (fs_info->nodesize >= PAGE_SIZE &&
- !PAGE_ALIGNED(start)) {
+ if (unlikely(nodesize >= PAGE_SIZE && !PAGE_ALIGNED(start))) {
btrfs_err(fs_info,
"tree block is not page aligned, start %llu nodesize %u",
- start, fs_info->nodesize);
- return -EINVAL;
+ start, nodesize);
+ return true;
}
- if (!IS_ALIGNED(start, fs_info->nodesize) &&
- !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
+ if (unlikely(!IS_ALIGNED(start, nodesize) &&
+ !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags))) {
btrfs_warn(fs_info,
"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
- start, fs_info->nodesize);
+ start, nodesize);
}
- return 0;
+ return false;
}
-
/*
* Return 0 if eb->folios[i] is attached to btree inode successfully.
* Return >0 if there is already another extent buffer for the range,
@@ -2905,14 +3294,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
* The caller needs to free the existing folios and retry using the same order.
*/
static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
- struct btrfs_subpage *prealloc,
+ struct btrfs_folio_state *prealloc,
struct extent_buffer **found_eb_ret)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
- const unsigned long index = eb->start >> PAGE_SHIFT;
- struct folio *existing_folio = NULL;
+ const pgoff_t index = eb->start >> PAGE_SHIFT;
+ struct folio *existing_folio;
int ret;
ASSERT(found_eb_ret);
@@ -2921,6 +3310,7 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
ASSERT(eb->folios[i]);
retry:
+ existing_folio = NULL;
ret = filemap_add_folio(mapping, eb->folios[i], index + i,
GFP_NOFS | __GFP_NOFAIL);
if (!ret)
@@ -2928,10 +3318,8 @@ retry:
existing_folio = filemap_lock_folio(mapping, index + i);
/* The page cache only exists for a very short time, just retry. */
- if (IS_ERR(existing_folio)) {
- existing_folio = NULL;
+ if (IS_ERR(existing_folio))
goto retry;
- }
/* For now, we should only have single-page folios for btree inode. */
ASSERT(folio_nr_pages(existing_folio) == 1);
@@ -2944,15 +3332,14 @@ retry:
finish:
spin_lock(&mapping->i_private_lock);
- if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
+ if (existing_folio && btrfs_meta_is_subpage(fs_info)) {
/* We're going to reuse the existing page, can drop our folio now. */
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
} else if (existing_folio) {
struct extent_buffer *existing_eb;
- existing_eb = grab_extent_buffer(fs_info,
- folio_page(existing_folio, 0));
+ existing_eb = grab_extent_buffer(fs_info, existing_folio);
if (existing_eb) {
/* The extent buffer still exists, we can use it directly. */
*found_eb_ret = existing_eb;
@@ -2973,7 +3360,7 @@ finish:
/*
* To inform we have an extra eb under allocation, so that
* detach_extent_buffer_page() won't release the folio private when the
- * eb hasn't been inserted into radix tree yet.
+ * eb hasn't been inserted into the xarray yet.
*
* The ref will be decreased when the eb releases the page, in
* detach_extent_buffer_page(). Thus needs no special handling in the
@@ -2987,12 +3374,10 @@ finish:
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level)
{
- unsigned long len = fs_info->nodesize;
- int num_folios;
int attached = 0;
struct extent_buffer *eb;
struct extent_buffer *existing_eb = NULL;
- struct btrfs_subpage *prealloc = NULL;
+ struct btrfs_folio_state *prealloc = NULL;
u64 lockdep_owner = owner_root;
bool page_contig = true;
int uptodate = 1;
@@ -3016,7 +3401,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
if (eb)
return eb;
- eb = __alloc_extent_buffer(fs_info, start, len);
+ eb = __alloc_extent_buffer(fs_info, start);
if (!eb)
return ERR_PTR(-ENOMEM);
@@ -3036,8 +3421,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
* The memory will be freed by attach_extent_buffer_page() or freed
* manually if we exit earlier.
*/
- if (fs_info->nodesize < PAGE_SIZE) {
- prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
+ if (btrfs_meta_is_subpage(fs_info)) {
+ prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA);
if (IS_ERR(prealloc)) {
ret = PTR_ERR(prealloc);
goto out;
@@ -3048,13 +3433,12 @@ reallocate:
/* Allocate all pages first. */
ret = alloc_eb_folio_array(eb, true);
if (ret < 0) {
- btrfs_free_subpage(prealloc);
+ btrfs_free_folio_state(prealloc);
goto out;
}
- num_folios = num_extent_folios(eb);
/* Attach all pages to the filemap. */
- for (int i = 0; i < num_folios; i++) {
+ for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio;
ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
@@ -3083,7 +3467,7 @@ reallocate:
* using 0-order folios.
*/
if (unlikely(ret == -EAGAIN)) {
- ASSERT(0);
+ DEBUG_WARN("folio order mismatch between new eb and filemap");
goto reallocate;
}
attached++;
@@ -3094,7 +3478,7 @@ reallocate:
* and free the allocated page.
*/
folio = eb->folios[i];
- WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
+ WARN_ON(btrfs_meta_folio_test_dirty(folio, eb));
/*
* Check if the current page is physically contiguous with previous eb
@@ -3105,15 +3489,14 @@ reallocate:
if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
page_contig = false;
- if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
+ if (!btrfs_meta_folio_test_uptodate(folio, eb))
uptodate = 0;
/*
* We can't unlock the pages just yet since the extent buffer
- * hasn't been properly inserted in the radix tree, this
- * opens a race with btree_release_folio which can free a page
- * while we are still filling in all pages for the buffer and
- * we could crash.
+ * hasn't been properly inserted into the xarray, this opens a
+ * race with btree_release_folio() which can free a page while we
+ * are still filling in all pages for the buffer and we could crash.
*/
}
if (uptodate)
@@ -3122,38 +3505,46 @@ reallocate:
if (page_contig)
eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
again:
- ret = radix_tree_preload(GFP_NOFS);
- if (ret)
+ xa_lock_irq(&fs_info->buffer_tree);
+ existing_eb = __xa_cmpxchg(&fs_info->buffer_tree,
+ start >> fs_info->nodesize_bits, NULL, eb,
+ GFP_NOFS);
+ if (xa_is_err(existing_eb)) {
+ ret = xa_err(existing_eb);
+ xa_unlock_irq(&fs_info->buffer_tree);
goto out;
-
- spin_lock(&fs_info->buffer_lock);
- ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> fs_info->sectorsize_bits, eb);
- spin_unlock(&fs_info->buffer_lock);
- radix_tree_preload_end();
- if (ret == -EEXIST) {
- ret = 0;
- existing_eb = find_extent_buffer(fs_info, start);
- if (existing_eb)
- goto out;
- else
+ }
+ if (existing_eb) {
+ if (!refcount_inc_not_zero(&existing_eb->refs)) {
+ xa_unlock_irq(&fs_info->buffer_tree);
goto again;
+ }
+ xa_unlock_irq(&fs_info->buffer_tree);
+ goto out;
}
+ xa_unlock_irq(&fs_info->buffer_tree);
+
/* add one reference for the tree */
check_buffer_tree_ref(eb);
- set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
/*
* Now it's safe to unlock the pages because any calls to
* btree_release_folio will correctly detect that a page belongs to a
* live buffer and won't free them prematurely.
*/
- for (int i = 0; i < num_folios; i++)
- unlock_page(folio_page(eb->folios[i], 0));
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ folio_unlock(eb->folios[i]);
+ /*
+ * A folio that has been added to an address_space mapping
+ * should not continue holding the refcount from its original
+ * allocation indefinitely.
+ */
+ folio_put(eb->folios[i]);
+ }
return eb;
out:
- WARN_ON(!atomic_dec_and_test(&eb->refs));
+ WARN_ON(!refcount_dec_and_test(&eb->refs));
/*
* Any attached folios need to be detached before we unlock them. This
@@ -3163,26 +3554,22 @@ out:
* want that to grab this eb, as we're getting ready to free it. So we
* have to detach it first and then unlock it.
*
- * We have to drop our reference and NULL it out here because in the
- * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
- * Below when we call btrfs_release_extent_buffer() we will call
- * detach_extent_buffer_folio() on our remaining pages in the !subpage
- * case. If we left eb->folios[i] populated in the subpage case we'd
- * double put our reference and be super sad.
+ * Note: the bounds is num_extent_pages() as we need to go through all slots.
*/
- for (int i = 0; i < attached; i++) {
- ASSERT(eb->folios[i]);
- detach_extent_buffer_folio(eb, eb->folios[i]);
- unlock_page(folio_page(eb->folios[i], 0));
- folio_put(eb->folios[i]);
+ for (int i = 0; i < num_extent_pages(eb); i++) {
+ struct folio *folio = eb->folios[i];
+
+ if (i < attached) {
+ ASSERT(folio);
+ detach_extent_buffer_folio(eb, folio);
+ folio_unlock(folio);
+ } else if (!folio) {
+ continue;
+ }
+
+ folio_put(folio);
eb->folios[i] = NULL;
}
- /*
- * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
- * so it can be cleaned up without utilizing page->mapping.
- */
- set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
-
btrfs_release_extent_buffer(eb);
if (ret < 0)
return ERR_PTR(ret);
@@ -3195,7 +3582,7 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
struct extent_buffer *eb =
container_of(head, struct extent_buffer, rcu_head);
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
}
static int release_extent_buffer(struct extent_buffer *eb)
@@ -3203,27 +3590,35 @@ static int release_extent_buffer(struct extent_buffer *eb)
{
lockdep_assert_held(&eb->refs_lock);
- WARN_ON(atomic_read(&eb->refs) == 0);
- if (atomic_dec_and_test(&eb->refs)) {
- if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
- struct btrfs_fs_info *fs_info = eb->fs_info;
+ if (refcount_dec_and_test(&eb->refs)) {
+ struct btrfs_fs_info *fs_info = eb->fs_info;
- spin_unlock(&eb->refs_lock);
+ spin_unlock(&eb->refs_lock);
- spin_lock(&fs_info->buffer_lock);
- radix_tree_delete(&fs_info->buffer_radix,
- eb->start >> fs_info->sectorsize_bits);
- spin_unlock(&fs_info->buffer_lock);
- } else {
- spin_unlock(&eb->refs_lock);
- }
+ /*
+ * We're erasing, theoretically there will be no allocations, so
+ * just use GFP_ATOMIC.
+ *
+ * We use cmpxchg instead of erase because we do not know if
+ * this eb is actually in the tree or not, we could be cleaning
+ * up an eb that we allocated but never inserted into the tree.
+ * Thus use cmpxchg to remove it from the tree if it is there,
+ * or leave the other entry if this isn't in the tree.
+ *
+ * The documentation says that putting a NULL value is the same
+ * as erase as long as XA_FLAGS_ALLOC is not set, which it isn't
+ * in this case.
+ */
+ xa_cmpxchg_irq(&fs_info->buffer_tree,
+ eb->start >> fs_info->nodesize_bits, eb, NULL,
+ GFP_ATOMIC);
btrfs_leak_debug_del_eb(eb);
- /* Should be safe to release our pages at this point */
- btrfs_release_extent_buffer_pages(eb);
+ /* Should be safe to release folios at this point. */
+ btrfs_release_extent_buffer_folios(eb);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
return 1;
}
#endif
@@ -3241,22 +3636,26 @@ void free_extent_buffer(struct extent_buffer *eb)
if (!eb)
return;
- refs = atomic_read(&eb->refs);
+ refs = refcount_read(&eb->refs);
while (1) {
- if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
- || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
- refs == 1))
+ if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) {
+ if (refs == 1)
+ break;
+ } else if (refs <= 3) {
break;
- if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
+ }
+
+ /* Optimization to avoid locking eb->refs_lock. */
+ if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1))
return;
}
spin_lock(&eb->refs_lock);
- if (atomic_read(&eb->refs) == 2 &&
+ if (refcount_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
!extent_buffer_under_io(eb) &&
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
- atomic_dec(&eb->refs);
+ refcount_dec(&eb->refs);
/*
* I know this is terrible, but it's temporary until we stop tracking
@@ -3273,44 +3672,27 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
spin_lock(&eb->refs_lock);
set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
- if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
+ if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
- atomic_dec(&eb->refs);
+ refcount_dec(&eb->refs);
release_extent_buffer(eb);
}
-static void btree_clear_folio_dirty(struct folio *folio)
+static void btree_clear_folio_dirty_tag(struct folio *folio)
{
- ASSERT(folio_test_dirty(folio));
+ ASSERT(!folio_test_dirty(folio));
ASSERT(folio_test_locked(folio));
- folio_clear_dirty_for_io(folio);
xa_lock_irq(&folio->mapping->i_pages);
if (!folio_test_dirty(folio))
- __xa_clear_mark(&folio->mapping->i_pages,
- folio_index(folio), PAGECACHE_TAG_DIRTY);
+ __xa_clear_mark(&folio->mapping->i_pages, folio->index,
+ PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&folio->mapping->i_pages);
}
-static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
-{
- struct btrfs_fs_info *fs_info = eb->fs_info;
- struct folio *folio = eb->folios[0];
- bool last;
-
- /* btree_clear_folio_dirty() needs page locked. */
- folio_lock(folio);
- last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
- if (last)
- btree_clear_folio_dirty(folio);
- folio_unlock(folio);
- WARN_ON(atomic_read(&eb->refs) == 0);
-}
-
void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- int num_folios;
btrfs_assert_tree_write_locked(eb);
@@ -3334,129 +3716,99 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
return;
+ buffer_tree_clear_mark(eb, PAGECACHE_TAG_DIRTY);
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
fs_info->dirty_metadata_batch);
- if (eb->fs_info->nodesize < PAGE_SIZE)
- return clear_subpage_extent_buffer_dirty(eb);
-
- num_folios = num_extent_folios(eb);
- for (int i = 0; i < num_folios; i++) {
+ for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio = eb->folios[i];
+ bool last;
if (!folio_test_dirty(folio))
continue;
folio_lock(folio);
- btree_clear_folio_dirty(folio);
+ last = btrfs_meta_folio_clear_and_test_dirty(folio, eb);
+ if (last)
+ btree_clear_folio_dirty_tag(folio);
folio_unlock(folio);
}
- WARN_ON(atomic_read(&eb->refs) == 0);
+ WARN_ON(refcount_read(&eb->refs) == 0);
}
void set_extent_buffer_dirty(struct extent_buffer *eb)
{
- int num_folios;
bool was_dirty;
check_buffer_tree_ref(eb);
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
- num_folios = num_extent_folios(eb);
- WARN_ON(atomic_read(&eb->refs) == 0);
+ WARN_ON(refcount_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
if (!was_dirty) {
- bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
+ bool subpage = btrfs_meta_is_subpage(eb->fs_info);
/*
* For subpage case, we can have other extent buffers in the
- * same page, and in clear_subpage_extent_buffer_dirty() we
+ * same page, and in clear_extent_buffer_dirty() we
* have to clear page dirty without subpage lock held.
* This can cause race where our page gets dirty cleared after
* we just set it.
*
- * Thankfully, clear_subpage_extent_buffer_dirty() has locked
+ * Thankfully, clear_extent_buffer_dirty() has locked
* its page for other reasons, we can use page lock to prevent
* the above race.
*/
if (subpage)
- lock_page(folio_page(eb->folios[0], 0));
- for (int i = 0; i < num_folios; i++)
- btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
- eb->start, eb->len);
+ folio_lock(eb->folios[0]);
+ for (int i = 0; i < num_extent_folios(eb); i++)
+ btrfs_meta_folio_set_dirty(eb->folios[i], eb);
+ buffer_tree_set_mark(eb, PAGECACHE_TAG_DIRTY);
if (subpage)
- unlock_page(folio_page(eb->folios[0], 0));
+ folio_unlock(eb->folios[0]);
percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
eb->len,
eb->fs_info->dirty_metadata_batch);
}
#ifdef CONFIG_BTRFS_DEBUG
- for (int i = 0; i < num_folios; i++)
+ for (int i = 0; i < num_extent_folios(eb); i++)
ASSERT(folio_test_dirty(eb->folios[i]));
#endif
}
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
- struct btrfs_fs_info *fs_info = eb->fs_info;
- int num_folios = num_extent_folios(eb);
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- for (int i = 0; i < num_folios; i++) {
+ for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio = eb->folios[i];
if (!folio)
continue;
- /*
- * This is special handling for metadata subpage, as regular
- * btrfs_is_subpage() can not handle cloned/dummy metadata.
- */
- if (fs_info->nodesize >= PAGE_SIZE)
- folio_clear_uptodate(folio);
- else
- btrfs_subpage_clear_uptodate(fs_info, folio,
- eb->start, eb->len);
+ btrfs_meta_folio_clear_uptodate(folio, eb);
}
}
void set_extent_buffer_uptodate(struct extent_buffer *eb)
{
- struct btrfs_fs_info *fs_info = eb->fs_info;
- int num_folios = num_extent_folios(eb);
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- for (int i = 0; i < num_folios; i++) {
- struct folio *folio = eb->folios[i];
-
- /*
- * This is special handling for metadata subpage, as regular
- * btrfs_is_subpage() can not handle cloned/dummy metadata.
- */
- if (fs_info->nodesize >= PAGE_SIZE)
- folio_mark_uptodate(folio);
- else
- btrfs_subpage_set_uptodate(fs_info, folio,
- eb->start, eb->len);
- }
+ for (int i = 0; i < num_extent_folios(eb); i++)
+ btrfs_meta_folio_set_uptodate(eb->folios[i], eb);
}
static void clear_extent_buffer_reading(struct extent_buffer *eb)
{
- clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
- smp_mb__after_atomic();
- wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
+ clear_and_wake_up_bit(EXTENT_BUFFER_READING, &eb->bflags);
}
static void end_bbio_meta_read(struct btrfs_bio *bbio)
{
struct extent_buffer *eb = bbio->private;
- struct btrfs_fs_info *fs_info = eb->fs_info;
bool uptodate = !bbio->bio.bi_status;
- struct folio_iter fi;
- u32 bio_offset = 0;
/*
* If the extent buffer is marked UPTODATE before the read operation
@@ -3471,25 +3823,10 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)
btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
uptodate = false;
- if (uptodate) {
+ if (uptodate)
set_extent_buffer_uptodate(eb);
- } else {
+ else
clear_extent_buffer_uptodate(eb);
- set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
- }
-
- bio_for_each_folio_all(fi, &bbio->bio) {
- struct folio *folio = fi.folio;
- u64 start = eb->start + bio_offset;
- u32 len = fi.length;
-
- if (uptodate)
- btrfs_folio_set_uptodate(fs_info, folio, start, len);
- else
- btrfs_folio_clear_uptodate(fs_info, folio, start, len);
-
- bio_offset += len;
- }
clear_extent_buffer_reading(eb);
free_extent_buffer(eb);
@@ -3497,11 +3834,11 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)
bio_put(&bbio->bio);
}
-int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
- const struct btrfs_tree_parent_check *check)
+int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *check)
{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
struct btrfs_bio *bbio;
- bool ret;
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
@@ -3516,7 +3853,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
/* Someone else is already reading the buffer, just wait for it. */
if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
- goto done;
+ return 0;
/*
* Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
@@ -3529,41 +3866,40 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
return 0;
}
- clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
check_buffer_tree_ref(eb);
- atomic_inc(&eb->refs);
+ refcount_inc(&eb->refs);
bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
- REQ_OP_READ | REQ_META, eb->fs_info,
- end_bbio_meta_read, eb);
+ REQ_OP_READ | REQ_META, BTRFS_I(fs_info->btree_inode),
+ eb->start, end_bbio_meta_read, eb);
bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
- bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
- bbio->file_offset = eb->start;
memcpy(&bbio->parent_check, check, sizeof(*check));
- if (eb->fs_info->nodesize < PAGE_SIZE) {
- ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
- eb->start - folio_pos(eb->folios[0]));
- ASSERT(ret);
- } else {
- int num_folios = num_extent_folios(eb);
-
- for (int i = 0; i < num_folios; i++) {
- struct folio *folio = eb->folios[i];
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ struct folio *folio = eb->folios[i];
+ u64 range_start = max_t(u64, eb->start, folio_pos(folio));
+ u32 range_len = min_t(u64, folio_next_pos(folio),
+ eb->start + eb->len) - range_start;
- ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
- ASSERT(ret);
- }
+ bio_add_folio_nofail(&bbio->bio, folio, range_len,
+ offset_in_folio(folio, range_start));
}
btrfs_submit_bbio(bbio, mirror_num);
+ return 0;
+}
-done:
- if (wait == WAIT_COMPLETE) {
- wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
- if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
- return -EIO;
- }
+int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *check)
+{
+ int ret;
+
+ ret = read_extent_buffer_pages_nowait(eb, mirror_num, check);
+ if (ret < 0)
+ return ret;
+ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
+ if (unlikely(!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)))
+ return -EIO;
return 0;
}
@@ -3573,7 +3909,7 @@ static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
btrfs_warn(eb->fs_info,
"access to eb bytenr %llu len %u out of range start %lu len %lu",
eb->start, eb->len, start, len);
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ DEBUG_WARN();
return true;
}
@@ -3735,7 +4071,7 @@ static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
return;
- if (fs_info->nodesize < PAGE_SIZE) {
+ if (btrfs_meta_is_subpage(fs_info)) {
folio = eb->folios[0];
ASSERT(i == 0);
if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
@@ -3921,8 +4257,8 @@ static inline void eb_bitmap_offset(const struct extent_buffer *eb,
* @start: offset of the bitmap item in the extent buffer
* @nr: bit number to test
*/
-int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
- unsigned long nr)
+bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
+ unsigned long nr)
{
unsigned long i;
size_t offset;
@@ -4109,82 +4445,29 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
}
}
-#define GANG_LOOKUP_SIZE 16
-static struct extent_buffer *get_next_extent_buffer(
- const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
-{
- struct extent_buffer *gang[GANG_LOOKUP_SIZE];
- struct extent_buffer *found = NULL;
- u64 folio_start = folio_pos(folio);
- u64 cur = folio_start;
-
- ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
- lockdep_assert_held(&fs_info->buffer_lock);
-
- while (cur < folio_start + PAGE_SIZE) {
- int ret;
- int i;
-
- ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
- (void **)gang, cur >> fs_info->sectorsize_bits,
- min_t(unsigned int, GANG_LOOKUP_SIZE,
- PAGE_SIZE / fs_info->nodesize));
- if (ret == 0)
- goto out;
- for (i = 0; i < ret; i++) {
- /* Already beyond page end */
- if (gang[i]->start >= folio_start + PAGE_SIZE)
- goto out;
- /* Found one */
- if (gang[i]->start >= bytenr) {
- found = gang[i];
- goto out;
- }
- }
- cur = gang[ret - 1]->start + gang[ret - 1]->len;
- }
-out:
- return found;
-}
-
static int try_release_subpage_extent_buffer(struct folio *folio)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- u64 cur = folio_pos(folio);
- const u64 end = cur + PAGE_SIZE;
+ struct extent_buffer *eb;
+ unsigned long start = (folio_pos(folio) >> fs_info->nodesize_bits);
+ unsigned long index = start;
+ unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1;
int ret;
- while (cur < end) {
- struct extent_buffer *eb = NULL;
-
- /*
- * Unlike try_release_extent_buffer() which uses folio private
- * to grab buffer, for subpage case we rely on radix tree, thus
- * we need to ensure radix tree consistency.
- *
- * We also want an atomic snapshot of the radix tree, thus go
- * with spinlock rather than RCU.
- */
- spin_lock(&fs_info->buffer_lock);
- eb = get_next_extent_buffer(fs_info, folio, cur);
- if (!eb) {
- /* No more eb in the page range after or at cur */
- spin_unlock(&fs_info->buffer_lock);
- break;
- }
- cur = eb->start + eb->len;
-
+ rcu_read_lock();
+ xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) {
/*
* The same as try_release_extent_buffer(), to ensure the eb
* won't disappear out from under us.
*/
spin_lock(&eb->refs_lock);
- if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+ rcu_read_unlock();
+
+ if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
- spin_unlock(&fs_info->buffer_lock);
- break;
+ rcu_read_lock();
+ continue;
}
- spin_unlock(&fs_info->buffer_lock);
/*
* If tree ref isn't set then we know the ref on this eb is a
@@ -4202,7 +4485,10 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
* release_extent_buffer() will release the refs_lock.
*/
release_extent_buffer(eb);
+ rcu_read_lock();
}
+ rcu_read_unlock();
+
/*
* Finally to check if we have cleared folio private, as if we have
* released all ebs in the page, the folio private should be cleared now.
@@ -4214,14 +4500,13 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
ret = 0;
spin_unlock(&folio->mapping->i_private_lock);
return ret;
-
}
int try_release_extent_buffer(struct folio *folio)
{
struct extent_buffer *eb;
- if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
+ if (btrfs_meta_is_subpage(folio_to_fs_info(folio)))
return try_release_subpage_extent_buffer(folio);
/*
@@ -4243,7 +4528,7 @@ int try_release_extent_buffer(struct folio *folio)
* this page.
*/
spin_lock(&eb->refs_lock);
- if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+ if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
spin_unlock(&folio->mapping->i_private_lock);
return 0;
@@ -4289,12 +4574,12 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
if (IS_ERR(eb))
return;
- if (btrfs_buffer_uptodate(eb, gen, 1)) {
+ if (btrfs_buffer_uptodate(eb, gen, true)) {
free_extent_buffer(eb);
return;
}
- ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
+ ret = read_extent_buffer_pages_nowait(eb, 0, &check);
if (ret < 0)
free_extent_buffer_stale(eb);
else
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 8a36117ed453..02ebb2f238af 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -12,7 +12,6 @@
#include <linux/rwsem.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include "compression.h"
#include "messages.h"
#include "ulist.h"
#include "misc.h"
@@ -38,16 +37,10 @@ struct btrfs_tree_parent_check;
enum {
EXTENT_BUFFER_UPTODATE,
EXTENT_BUFFER_DIRTY,
- EXTENT_BUFFER_CORRUPT,
- /* this got triggered by readahead */
- EXTENT_BUFFER_READAHEAD,
EXTENT_BUFFER_TREE_REF,
EXTENT_BUFFER_STALE,
EXTENT_BUFFER_WRITEBACK,
- /* read IO error */
- EXTENT_BUFFER_READ_ERR,
EXTENT_BUFFER_UNMAPPED,
- EXTENT_BUFFER_IN_TREE,
/* write IO error */
EXTENT_BUFFER_WRITE_ERR,
/* Indicate the extent buffer is written zeroed out (for zoned) */
@@ -79,7 +72,7 @@ enum {
* single word in a bitmap may straddle two pages in the extent buffer.
*/
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
-#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
+#define BYTE_MASK ((1U << BITS_PER_BYTE) - 1)
#define BITMAP_FIRST_BYTE_MASK(start) \
((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
#define BITMAP_LAST_BYTE_MASK(nbits) \
@@ -104,7 +97,7 @@ struct extent_buffer {
void *addr;
spinlock_t refs_lock;
- atomic_t refs;
+ refcount_t refs;
int read_mirror;
/* >= 0 if eb belongs to a log tree, -1 otherwise */
s8 log_index;
@@ -246,15 +239,13 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
+void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
void btrfs_readahead(struct readahead_control *rac);
int set_folio_extent_mapped(struct folio *folio);
-int set_page_extent_mapped(struct page *page);
void clear_folio_extent_mapped(struct folio *folio);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level);
-struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src);
@@ -262,17 +253,23 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
void free_extent_buffer(struct extent_buffer *eb);
void free_extent_buffer_stale(struct extent_buffer *eb);
-#define WAIT_NONE 0
-#define WAIT_COMPLETE 1
-#define WAIT_PAGE_LOCK 2
-int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
+int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
const struct btrfs_tree_parent_check *parent_check);
-void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
+int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *parent_check);
+
+static inline void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
+ TASK_UNINTERRUPTIBLE);
+}
+
void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 owner_root, u64 gen, int level);
void btrfs_readahead_node_child(struct extent_buffer *node, int slot);
-static inline int num_extent_pages(const struct extent_buffer *eb)
+/* Note: this can be used in for loops without caching the value in a variable. */
+static inline int __pure num_extent_pages(const struct extent_buffer *eb)
{
/*
* For sectorsize == PAGE_SIZE case, since nodesize is always aligned to
@@ -290,9 +287,13 @@ static inline int num_extent_pages(const struct extent_buffer *eb)
* As we can have either one large folio covering the whole eb
* (either nodesize <= PAGE_SIZE, or high order folio), or multiple
* single-paged folios.
+ *
+ * Note: this can be used in for loops without caching the value in a variable.
*/
-static inline int num_extent_folios(const struct extent_buffer *eb)
+static inline int __pure num_extent_folios(const struct extent_buffer *eb)
{
+ if (!eb->folios[0])
+ return 0;
if (folio_order(eb->folios[0]))
return 1;
return num_extent_pages(eb);
@@ -343,8 +344,8 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
unsigned long len);
void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
unsigned long len);
-int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
- unsigned long pos);
+bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
+ unsigned long pos);
void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len);
void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
@@ -364,7 +365,8 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
bool nofail);
-int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array);
+int btrfs_alloc_folio_array(unsigned int nr_folios, unsigned int order,
+ struct folio **folio_array);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 67ce85ff0ae2..7e38c23a0c1c 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -13,7 +13,7 @@
static struct kmem_cache *extent_map_cache;
-int __init extent_map_init(void)
+int __init btrfs_extent_map_init(void)
{
extent_map_cache = kmem_cache_create("btrfs_extent_map",
sizeof(struct extent_map), 0, 0, NULL);
@@ -22,7 +22,7 @@ int __init extent_map_init(void)
return 0;
}
-void __cold extent_map_exit(void)
+void __cold btrfs_extent_map_exit(void)
{
kmem_cache_destroy(extent_map_cache);
}
@@ -31,7 +31,7 @@ void __cold extent_map_exit(void)
* Initialize the extent tree @tree. Should be called for each new inode or
* other user of the extent_map interface.
*/
-void extent_map_tree_init(struct extent_map_tree *tree)
+void btrfs_extent_map_tree_init(struct extent_map_tree *tree)
{
tree->root = RB_ROOT;
INIT_LIST_HEAD(&tree->modified_extents);
@@ -42,7 +42,7 @@ void extent_map_tree_init(struct extent_map_tree *tree)
* Allocate a new extent_map structure. The new structure is returned with a
* reference count of one and needs to be freed using free_extent_map()
*/
-struct extent_map *alloc_extent_map(void)
+struct extent_map *btrfs_alloc_extent_map(void)
{
struct extent_map *em;
em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
@@ -58,12 +58,12 @@ struct extent_map *alloc_extent_map(void)
* Drop the reference out on @em by one and free the structure if the reference
* count hits zero.
*/
-void free_extent_map(struct extent_map *em)
+void btrfs_free_extent_map(struct extent_map *em)
{
if (!em)
return;
if (refcount_dec_and_test(&em->refs)) {
- WARN_ON(extent_map_in_tree(em));
+ WARN_ON(btrfs_extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list));
kmem_cache_free(extent_map_cache, em);
}
@@ -84,7 +84,7 @@ static void remove_em(struct btrfs_inode *inode, struct extent_map *em)
rb_erase(&em->rb_node, &inode->extent_tree.root);
RB_CLEAR_NODE(&em->rb_node);
- if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(inode->root)))
+ if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(inode->root)))
percpu_counter_dec(&fs_info->evictable_extent_maps);
}
@@ -102,19 +102,19 @@ static int tree_insert(struct rb_root *root, struct extent_map *em)
if (em->start < entry->start)
p = &(*p)->rb_left;
- else if (em->start >= extent_map_end(entry))
+ else if (em->start >= btrfs_extent_map_end(entry))
p = &(*p)->rb_right;
else
return -EEXIST;
}
orig_parent = parent;
- while (parent && em->start >= extent_map_end(entry)) {
+ while (parent && em->start >= btrfs_extent_map_end(entry)) {
parent = rb_next(parent);
entry = rb_entry(parent, struct extent_map, rb_node);
}
if (parent)
- if (end > entry->start && em->start < extent_map_end(entry))
+ if (end > entry->start && em->start < btrfs_extent_map_end(entry))
return -EEXIST;
parent = orig_parent;
@@ -124,7 +124,7 @@ static int tree_insert(struct rb_root *root, struct extent_map *em)
entry = rb_entry(parent, struct extent_map, rb_node);
}
if (parent)
- if (end > entry->start && em->start < extent_map_end(entry))
+ if (end > entry->start && em->start < btrfs_extent_map_end(entry))
return -EEXIST;
rb_link_node(&em->rb_node, orig_parent, p);
@@ -136,8 +136,8 @@ static int tree_insert(struct rb_root *root, struct extent_map *em)
* Search through the tree for an extent_map with a given offset. If it can't
* be found, try to find some neighboring extents
*/
-static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
- struct rb_node **prev_or_next_ret)
+static struct rb_node *tree_search(struct rb_root *root, u64 offset,
+ struct rb_node **prev_or_next_ret)
{
struct rb_node *n = root->rb_node;
struct rb_node *prev = NULL;
@@ -154,14 +154,14 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
if (offset < entry->start)
n = n->rb_left;
- else if (offset >= extent_map_end(entry))
+ else if (offset >= btrfs_extent_map_end(entry))
n = n->rb_right;
else
return n;
}
orig_prev = prev;
- while (prev && offset >= extent_map_end(prev_entry)) {
+ while (prev && offset >= btrfs_extent_map_end(prev_entry)) {
prev = rb_next(prev);
prev_entry = rb_entry(prev, struct extent_map, rb_node);
}
@@ -188,14 +188,14 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
static inline u64 extent_map_block_len(const struct extent_map *em)
{
- if (extent_map_is_compressed(em))
+ if (btrfs_extent_map_is_compressed(em))
return em->disk_num_bytes;
return em->len;
}
static inline u64 extent_map_block_end(const struct extent_map *em)
{
- const u64 block_start = extent_map_block_start(em);
+ const u64 block_start = btrfs_extent_map_block_start(em);
const u64 block_end = block_start + extent_map_block_len(em);
if (block_end < block_start)
@@ -210,7 +210,7 @@ static bool can_merge_extent_map(const struct extent_map *em)
return false;
/* Don't merge compressed extents, we need to know their actual size. */
- if (extent_map_is_compressed(em))
+ if (btrfs_extent_map_is_compressed(em))
return false;
if (em->flags & EXTENT_FLAG_LOGGING)
@@ -230,7 +230,7 @@ static bool can_merge_extent_map(const struct extent_map *em)
/* Check to see if two extent_map structs are adjacent and safe to merge. */
static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next)
{
- if (extent_map_end(prev) != next->start)
+ if (btrfs_extent_map_end(prev) != next->start)
return false;
/*
@@ -242,7 +242,7 @@ static bool mergeable_maps(const struct extent_map *prev, const struct extent_ma
return false;
if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1)
- return extent_map_block_start(next) == extent_map_block_end(prev);
+ return btrfs_extent_map_block_start(next) == extent_map_block_end(prev);
/* HOLES and INLINE extents. */
return next->disk_bytenr == prev->disk_bytenr;
@@ -270,8 +270,8 @@ static void merge_ondisk_extents(const struct extent_map *prev, const struct ext
u64 new_offset;
/* @prev and @next should not be compressed. */
- ASSERT(!extent_map_is_compressed(prev));
- ASSERT(!extent_map_is_compressed(next));
+ ASSERT(!btrfs_extent_map_is_compressed(prev));
+ ASSERT(!btrfs_extent_map_is_compressed(next));
/*
* There are two different cases where @prev and @next can be merged.
@@ -327,9 +327,9 @@ static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map
if (em->offset + em->len > em->ram_bytes)
dump_extent_map(fs_info, "ram_bytes too small", em);
if (em->offset + em->len > em->disk_num_bytes &&
- !extent_map_is_compressed(em))
+ !btrfs_extent_map_is_compressed(em))
dump_extent_map(fs_info, "disk_num_bytes too small", em);
- if (!extent_map_is_compressed(em) &&
+ if (!btrfs_extent_map_is_compressed(em) &&
em->ram_bytes != em->disk_num_bytes)
dump_extent_map(fs_info,
"ram_bytes mismatch with disk_num_bytes for non-compressed em",
@@ -361,8 +361,8 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
- if (rb)
- merge = rb_entry(rb, struct extent_map, rb_node);
+ merge = rb_entry_safe(rb, struct extent_map, rb_node);
+
if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
em->start = merge->start;
em->len += merge->len;
@@ -374,13 +374,13 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
validate_extent_map(fs_info, em);
remove_em(inode, merge);
- free_extent_map(merge);
+ btrfs_free_extent_map(merge);
}
}
rb = rb_next(&em->rb_node);
- if (rb)
- merge = rb_entry(rb, struct extent_map, rb_node);
+ merge = rb_entry_safe(rb, struct extent_map, rb_node);
+
if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
em->len += merge->len;
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
@@ -389,7 +389,7 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
em->generation = max(em->generation, merge->generation);
em->flags |= EXTENT_FLAG_MERGED;
remove_em(inode, merge);
- free_extent_map(merge);
+ btrfs_free_extent_map(merge);
}
}
@@ -409,7 +409,7 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
* -ENOENT when the extent is not found in the tree
* -EUCLEAN if the found extent does not match the expected start
*/
-int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
+int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map_tree *tree = &inode->extent_tree;
@@ -417,7 +417,7 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
struct extent_map *em;
write_lock(&tree->lock);
- em = lookup_extent_mapping(tree, start, len);
+ em = btrfs_lookup_extent_mapping(tree, start, len);
if (WARN_ON(!em)) {
btrfs_warn(fs_info,
@@ -444,23 +444,23 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
out:
write_unlock(&tree->lock);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ret;
}
-void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em)
+void btrfs_clear_em_logging(struct btrfs_inode *inode, struct extent_map *em)
{
lockdep_assert_held_write(&inode->extent_tree.lock);
em->flags &= ~EXTENT_FLAG_LOGGING;
- if (extent_map_in_tree(em))
+ if (btrfs_extent_map_in_tree(em))
try_merge_map(inode, em);
}
static inline void setup_extent_mapping(struct btrfs_inode *inode,
struct extent_map *em,
- int modified)
+ bool modified)
{
refcount_inc(&em->refs);
@@ -486,7 +486,7 @@ static inline void setup_extent_mapping(struct btrfs_inode *inode,
* taken, or a reference dropped if the merge attempt was successful.
*/
static int add_extent_mapping(struct btrfs_inode *inode,
- struct extent_map *em, int modified)
+ struct extent_map *em, bool modified)
{
struct extent_map_tree *tree = &inode->extent_tree;
struct btrfs_root *root = inode->root;
@@ -502,22 +502,21 @@ static int add_extent_mapping(struct btrfs_inode *inode,
setup_extent_mapping(inode, em, modified);
- if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(root)))
+ if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(root)))
percpu_counter_inc(&fs_info->evictable_extent_maps);
return 0;
}
-static struct extent_map *
-__lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len, int strict)
+static struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len, bool strict)
{
struct extent_map *em;
struct rb_node *rb_node;
struct rb_node *prev_or_next = NULL;
u64 end = range_end(start, len);
- rb_node = __tree_search(&tree->root, start, &prev_or_next);
+ rb_node = tree_search(&tree->root, start, &prev_or_next);
if (!rb_node) {
if (prev_or_next)
rb_node = prev_or_next;
@@ -527,7 +526,7 @@ __lookup_extent_mapping(struct extent_map_tree *tree,
em = rb_entry(rb_node, struct extent_map, rb_node);
- if (strict && !(end > em->start && start < extent_map_end(em)))
+ if (strict && !(end > em->start && start < btrfs_extent_map_end(em)))
return NULL;
refcount_inc(&em->refs);
@@ -546,10 +545,10 @@ __lookup_extent_mapping(struct extent_map_tree *tree,
* intersect, so check the object returned carefully to make sure that no
* additional lookups are needed.
*/
-struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len)
+struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len)
{
- return __lookup_extent_mapping(tree, start, len, 1);
+ return lookup_extent_mapping(tree, start, len, true);
}
/*
@@ -564,10 +563,10 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
*
* If one can't be found, any nearby extent may be returned
*/
-struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len)
+struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len)
{
- return __lookup_extent_mapping(tree, start, len, 0);
+ return lookup_extent_mapping(tree, start, len, false);
}
/*
@@ -579,7 +578,7 @@ struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
* Remove @em from the extent tree of @inode. No reference counts are dropped,
* and no checks are done to see if the range is in use.
*/
-void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
+void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
{
struct extent_map_tree *tree = &inode->extent_tree;
@@ -595,7 +594,7 @@ void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
static void replace_extent_mapping(struct btrfs_inode *inode,
struct extent_map *cur,
struct extent_map *new,
- int modified)
+ bool modified)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map_tree *tree = &inode->extent_tree;
@@ -605,7 +604,7 @@ static void replace_extent_mapping(struct btrfs_inode *inode,
validate_extent_map(fs_info, new);
WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
- ASSERT(extent_map_in_tree(cur));
+ ASSERT(btrfs_extent_map_in_tree(cur));
if (!(cur->flags & EXTENT_FLAG_LOGGING))
list_del_init(&cur->list);
rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root);
@@ -651,7 +650,7 @@ static noinline int merge_extent_mapping(struct btrfs_inode *inode,
u64 end;
u64 start_diff;
- if (map_start < em->start || map_start >= extent_map_end(em))
+ if (map_start < em->start || map_start >= btrfs_extent_map_end(em))
return -EINVAL;
if (existing->start > map_start) {
@@ -662,16 +661,16 @@ static noinline int merge_extent_mapping(struct btrfs_inode *inode,
next = next_extent_map(prev);
}
- start = prev ? extent_map_end(prev) : em->start;
+ start = prev ? btrfs_extent_map_end(prev) : em->start;
start = max_t(u64, start, em->start);
- end = next ? next->start : extent_map_end(em);
- end = min_t(u64, end, extent_map_end(em));
+ end = next ? next->start : btrfs_extent_map_end(em);
+ end = min_t(u64, end, btrfs_extent_map_end(em));
start_diff = start - em->start;
em->start = start;
em->len = end - start;
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
em->offset += start_diff;
- return add_extent_mapping(inode, em, 0);
+ return add_extent_mapping(inode, em, false);
}
/*
@@ -708,7 +707,7 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode,
if (em->disk_bytenr == EXTENT_MAP_INLINE)
ASSERT(em->start == 0);
- ret = add_extent_mapping(inode, em, 0);
+ ret = add_extent_mapping(inode, em, false);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
@@ -716,7 +715,7 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode,
if (ret == -EEXIST) {
struct extent_map *existing;
- existing = search_extent_mapping(&inode->extent_tree, start, len);
+ existing = btrfs_search_extent_mapping(&inode->extent_tree, start, len);
trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
@@ -725,8 +724,8 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode,
* extent causing the -EEXIST.
*/
if (start >= existing->start &&
- start < extent_map_end(existing)) {
- free_extent_map(em);
+ start < btrfs_extent_map_end(existing)) {
+ btrfs_free_extent_map(em);
*em_in = existing;
ret = 0;
} else {
@@ -739,14 +738,14 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode,
*/
ret = merge_extent_mapping(inode, existing, em, start);
if (WARN_ON(ret)) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
*em_in = NULL;
btrfs_warn(fs_info,
"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
- existing->start, extent_map_end(existing),
+ existing->start, btrfs_extent_map_end(existing),
orig_start, orig_start + orig_len, start);
}
- free_extent_map(existing);
+ btrfs_free_extent_map(existing);
}
}
@@ -772,8 +771,8 @@ static void drop_all_extent_maps_fast(struct btrfs_inode *inode)
em = rb_entry(node, struct extent_map, rb_node);
em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
- remove_extent_mapping(inode, em);
- free_extent_map(em);
+ btrfs_remove_extent_mapping(inode, em);
+ btrfs_free_extent_map(em);
if (cond_resched_rwlock_write(&tree->lock))
node = rb_first(&tree->root);
@@ -826,15 +825,15 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
* range ends after our range (and they might be the same extent map),
* because we need to split those two extent maps at the boundaries.
*/
- split = alloc_extent_map();
- split2 = alloc_extent_map();
+ split = btrfs_alloc_extent_map();
+ split2 = btrfs_alloc_extent_map();
write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
+ em = btrfs_lookup_extent_mapping(em_tree, start, len);
while (em) {
/* extent_map_end() returns exclusive value (last byte + 1). */
- const u64 em_end = extent_map_end(em);
+ const u64 em_end = btrfs_extent_map_end(em);
struct extent_map *next_em = NULL;
u64 gen;
unsigned long flags;
@@ -898,7 +897,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
split->generation = gen;
split->flags = flags;
replace_extent_mapping(inode, em, split, modified);
- free_extent_map(split);
+ btrfs_free_extent_map(split);
split = split2;
split2 = NULL;
}
@@ -925,7 +924,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
split->ram_bytes = split->len;
}
- if (extent_map_in_tree(em)) {
+ if (btrfs_extent_map_in_tree(em)) {
replace_extent_mapping(inode, em, split, modified);
} else {
int ret;
@@ -936,11 +935,11 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
if (WARN_ON(ret != 0) && modified)
btrfs_set_inode_full_sync(inode);
}
- free_extent_map(split);
+ btrfs_free_extent_map(split);
split = NULL;
}
remove_em:
- if (extent_map_in_tree(em)) {
+ if (btrfs_extent_map_in_tree(em)) {
/*
* If the extent map is still in the tree it means that
* either of the following is true:
@@ -965,25 +964,25 @@ remove_em:
ASSERT(!split);
btrfs_set_inode_full_sync(inode);
}
- remove_extent_mapping(inode, em);
+ btrfs_remove_extent_mapping(inode, em);
}
/*
* Once for the tree reference (we replaced or removed the
* extent map from the tree).
*/
- free_extent_map(em);
+ btrfs_free_extent_map(em);
next:
/* Once for us (for our lookup reference). */
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = next_em;
}
write_unlock(&em_tree->lock);
- free_extent_map(split);
- free_extent_map(split2);
+ btrfs_free_extent_map(split);
+ btrfs_free_extent_map(split2);
}
/*
@@ -1007,7 +1006,7 @@ int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
struct extent_map_tree *tree = &inode->extent_tree;
int ret;
- ASSERT(!extent_map_in_tree(new_em));
+ ASSERT(!btrfs_extent_map_in_tree(new_em));
/*
* The caller has locked an appropriate file range in the inode's io
@@ -1033,8 +1032,8 @@ int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
*
* This function is used when an ordered_extent needs to be split.
*/
-int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
- u64 new_logical)
+int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
+ u64 new_logical)
{
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
@@ -1046,25 +1045,25 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
ASSERT(pre != 0);
ASSERT(pre < len);
- split_pre = alloc_extent_map();
+ split_pre = btrfs_alloc_extent_map();
if (!split_pre)
return -ENOMEM;
- split_mid = alloc_extent_map();
+ split_mid = btrfs_alloc_extent_map();
if (!split_mid) {
ret = -ENOMEM;
goto out_free_pre;
}
- lock_extent(&inode->io_tree, start, start + len - 1, NULL);
+ btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL);
write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
- if (!em) {
+ em = btrfs_lookup_extent_mapping(em_tree, start, len);
+ if (unlikely(!em)) {
ret = -EIO;
goto out_unlock;
}
ASSERT(em->len == len);
- ASSERT(!extent_map_is_compressed(em));
+ ASSERT(!btrfs_extent_map_is_compressed(em));
ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE);
ASSERT(em->flags & EXTENT_FLAG_PINNED);
ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
@@ -1083,7 +1082,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
split_pre->flags = flags;
split_pre->generation = em->generation;
- replace_extent_mapping(inode, em, split_pre, 1);
+ replace_extent_mapping(inode, em, split_pre, true);
/*
* Now we only have an extent_map at:
@@ -1093,25 +1092,25 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
/* Insert the middle extent_map. */
split_mid->start = em->start + pre;
split_mid->len = em->len - pre;
- split_mid->disk_bytenr = extent_map_block_start(em) + pre;
+ split_mid->disk_bytenr = btrfs_extent_map_block_start(em) + pre;
split_mid->disk_num_bytes = split_mid->len;
split_mid->offset = 0;
split_mid->ram_bytes = split_mid->len;
split_mid->flags = flags;
split_mid->generation = em->generation;
- add_extent_mapping(inode, split_mid, 1);
+ add_extent_mapping(inode, split_mid, true);
/* Once for us */
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Once for the tree */
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out_unlock:
write_unlock(&em_tree->lock);
- unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
- free_extent_map(split_mid);
+ btrfs_unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
+ btrfs_free_extent_map(split_mid);
out_free_pre:
- free_extent_map(split_pre);
+ btrfs_free_extent_map(split_pre);
return ret;
}
@@ -1128,6 +1127,8 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
long nr_dropped = 0;
struct rb_node *node;
+ lockdep_assert_held_write(&tree->lock);
+
/*
* Take the mmap lock so that we serialize with the inode logging phase
* of fsync because we may need to set the full sync flag on the inode,
@@ -1139,28 +1140,12 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
* to find new extents, which may not be there yet because ordered
* extents haven't completed yet.
*
- * We also do a try lock because otherwise we could deadlock. This is
- * because the shrinker for this filesystem may be invoked while we are
- * in a path that is holding the mmap lock in write mode. For example in
- * a reflink operation while COWing an extent buffer, when allocating
- * pages for a new extent buffer and under memory pressure, the shrinker
- * may be invoked, and therefore we would deadlock by attempting to read
- * lock the mmap lock while we are holding already a write lock on it.
+ * We also do a try lock because we don't want to block for too long and
+ * we are holding the extent map tree's lock in write mode.
*/
if (!down_read_trylock(&inode->i_mmap_lock))
return 0;
- /*
- * We want to be fast so if the lock is busy we don't want to spend time
- * waiting for it - either some task is about to do IO for the inode or
- * we may have another task shrinking extent maps, here in this code, so
- * skip this inode.
- */
- if (!write_trylock(&tree->lock)) {
- up_read(&inode->i_mmap_lock);
- return 0;
- }
-
node = rb_first(&tree->root);
while (node) {
struct rb_node *next = rb_next(node);
@@ -1182,10 +1167,10 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
if (!list_empty(&em->list) && em->generation >= cur_fs_gen)
btrfs_set_inode_full_sync(inode);
- remove_extent_mapping(inode, em);
+ btrfs_remove_extent_mapping(inode, em);
trace_btrfs_extent_map_shrinker_remove_em(inode, em);
/* Drop the reference for the tree. */
- free_extent_map(em);
+ btrfs_free_extent_map(em);
nr_dropped++;
next:
if (ctx->scanned >= ctx->nr_to_scan)
@@ -1201,12 +1186,61 @@ next:
break;
node = next;
}
- write_unlock(&tree->lock);
up_read(&inode->i_mmap_lock);
return nr_dropped;
}
+static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root,
+ u64 min_ino)
+{
+ struct btrfs_inode *inode;
+ unsigned long from = min_ino;
+
+ xa_lock(&root->inodes);
+ while (true) {
+ struct extent_map_tree *tree;
+
+ inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
+ if (!inode)
+ break;
+
+ tree = &inode->extent_tree;
+
+ /*
+ * We want to be fast so if the lock is busy we don't want to
+ * spend time waiting for it (some task is about to do IO for
+ * the inode).
+ */
+ if (!write_trylock(&tree->lock))
+ goto next;
+
+ /*
+ * Skip inode if it doesn't have loaded extent maps, so we avoid
+ * getting a reference and doing an iput later. This includes
+ * cases like files that were opened for things like stat(2), or
+ * files with all extent maps previously released through the
+ * release folio callback (btrfs_release_folio()) or released in
+ * a previous run, or directories which never have extent maps.
+ */
+ if (RB_EMPTY_ROOT(&tree->root)) {
+ write_unlock(&tree->lock);
+ goto next;
+ }
+
+ if (igrab(&inode->vfs_inode))
+ break;
+
+ write_unlock(&tree->lock);
+next:
+ from = btrfs_ino(inode) + 1;
+ cond_resched_lock(&root->inodes.xa_lock);
+ }
+ xa_unlock(&root->inodes);
+
+ return inode;
+}
+
static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1214,21 +1248,21 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx
long nr_dropped = 0;
u64 min_ino = fs_info->em_shrinker_last_ino + 1;
- inode = btrfs_find_first_inode(root, min_ino);
+ inode = find_first_inode_to_shrink(root, min_ino);
while (inode) {
nr_dropped += btrfs_scan_inode(inode, ctx);
+ write_unlock(&inode->extent_tree.lock);
min_ino = btrfs_ino(inode) + 1;
fs_info->em_shrinker_last_ino = btrfs_ino(inode);
- btrfs_add_delayed_iput(inode);
+ iput(&inode->vfs_inode);
- if (ctx->scanned >= ctx->nr_to_scan ||
- btrfs_fs_closing(inode->root->fs_info))
+ if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info))
break;
cond_resched();
- inode = btrfs_find_first_inode(root, min_ino);
+ inode = find_first_inode_to_shrink(root, min_ino);
}
if (inode) {
@@ -1303,7 +1337,7 @@ static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
if (!root)
continue;
- if (is_fstree(btrfs_root_id(root)))
+ if (btrfs_is_fstree(btrfs_root_id(root)))
nr_dropped += btrfs_scan_root(root, &ctx);
btrfs_put_root(root);
@@ -1338,7 +1372,7 @@ void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
return;
- queue_work(system_unbound_wq, &fs_info->em_shrinker_work);
+ queue_work(system_dfl_wq, &fs_info->em_shrinker_work);
}
void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index cd123b266b64..6f685f3c9327 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -8,8 +8,7 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/refcount.h>
-#include "misc.h"
-#include "compression.h"
+#include "fs.h"
struct btrfs_inode;
struct btrfs_fs_info;
@@ -108,8 +107,8 @@ struct extent_map_tree {
struct btrfs_inode;
-static inline void extent_map_set_compression(struct extent_map *em,
- enum btrfs_compression_type type)
+static inline void btrfs_extent_map_set_compression(struct extent_map *em,
+ enum btrfs_compression_type type)
{
if (type == BTRFS_COMPRESS_ZLIB)
em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
@@ -119,7 +118,8 @@ static inline void extent_map_set_compression(struct extent_map *em,
em->flags |= EXTENT_FLAG_COMPRESS_ZSTD;
}
-static inline enum btrfs_compression_type extent_map_compression(const struct extent_map *em)
+static inline enum btrfs_compression_type btrfs_extent_map_compression(
+ const struct extent_map *em)
{
if (em->flags & EXTENT_FLAG_COMPRESS_ZLIB)
return BTRFS_COMPRESS_ZLIB;
@@ -137,50 +137,50 @@ static inline enum btrfs_compression_type extent_map_compression(const struct ex
* More efficient way to determine if extent is compressed, instead of using
* 'extent_map_compression() != BTRFS_COMPRESS_NONE'.
*/
-static inline bool extent_map_is_compressed(const struct extent_map *em)
+static inline bool btrfs_extent_map_is_compressed(const struct extent_map *em)
{
return (em->flags & (EXTENT_FLAG_COMPRESS_ZLIB |
EXTENT_FLAG_COMPRESS_LZO |
EXTENT_FLAG_COMPRESS_ZSTD)) != 0;
}
-static inline int extent_map_in_tree(const struct extent_map *em)
+static inline int btrfs_extent_map_in_tree(const struct extent_map *em)
{
return !RB_EMPTY_NODE(&em->rb_node);
}
-static inline u64 extent_map_block_start(const struct extent_map *em)
+static inline u64 btrfs_extent_map_block_start(const struct extent_map *em)
{
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
- if (extent_map_is_compressed(em))
+ if (btrfs_extent_map_is_compressed(em))
return em->disk_bytenr;
return em->disk_bytenr + em->offset;
}
return em->disk_bytenr;
}
-static inline u64 extent_map_end(const struct extent_map *em)
+static inline u64 btrfs_extent_map_end(const struct extent_map *em)
{
if (em->start + em->len < em->start)
return (u64)-1;
return em->start + em->len;
}
-void extent_map_tree_init(struct extent_map_tree *tree);
-struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len);
-void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em);
-int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
- u64 new_logical);
-
-struct extent_map *alloc_extent_map(void);
-void free_extent_map(struct extent_map *em);
-int __init extent_map_init(void);
-void __cold extent_map_exit(void);
-int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen);
-void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em);
-struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len);
+void btrfs_extent_map_tree_init(struct extent_map_tree *tree);
+struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len);
+void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em);
+int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
+ u64 new_logical);
+
+struct extent_map *btrfs_alloc_extent_map(void);
+void btrfs_free_extent_map(struct extent_map *em);
+int __init btrfs_extent_map_init(void);
+void __cold btrfs_extent_map_exit(void);
+int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen);
+void btrfs_clear_em_logging(struct btrfs_inode *inode, struct extent_map *em);
+struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len);
int btrfs_add_extent_mapping(struct btrfs_inode *inode,
struct extent_map **em_in, u64 start, u64 len);
void btrfs_drop_extent_map_range(struct btrfs_inode *inode,
diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c
index b80c07ad8c5e..f2eaaef8422b 100644
--- a/fs/btrfs/fiemap.c
+++ b/fs/btrfs/fiemap.c
@@ -153,7 +153,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
if (cache_end > offset) {
if (offset == cache->offset) {
/*
- * We cached a dealloc range (found in the io tree) for
+ * We cached a delalloc range (found in the io tree) for
* a hole or prealloc extent and we have now found a
* file extent item for the same offset. What we have
* now is more recent and up to date, so discard what
@@ -320,7 +320,7 @@ static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *p
* the cost of allocating a new one.
*/
ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
- atomic_inc(&clone->refs);
+ refcount_inc(&clone->refs);
ret = btrfs_next_leaf(inode->root, path);
if (ret != 0)
@@ -634,7 +634,7 @@ static int extent_fiemap(struct btrfs_inode *inode,
const u64 ino = btrfs_ino(inode);
struct extent_state *cached_state = NULL;
struct extent_state *delalloc_cached_state = NULL;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct fiemap_cache cache = { 0 };
struct btrfs_backref_share_check_ctx *backref_ctx;
u64 last_extent_end = 0;
@@ -661,7 +661,7 @@ restart:
range_end = round_up(start + len, sectorsize);
prev_extent_end = range_start;
- lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
if (ret < 0)
@@ -841,7 +841,7 @@ check_eof_delalloc:
}
out_unlock:
- unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
btrfs_release_path(path);
@@ -871,10 +871,9 @@ out_unlock:
ret = emit_last_fiemap_cache(fieinfo, &cache);
out:
- free_extent_state(delalloc_cached_state);
+ btrfs_free_extent_state(delalloc_cached_state);
kfree(cache.entries);
btrfs_free_backref_share_ctx(backref_ctx);
- btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 886749b39672..14e5257f0f04 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -18,6 +18,7 @@
#include "fs.h"
#include "accessors.h"
#include "file-item.h"
+#include "volumes.h"
#define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) * 2) / \
@@ -46,7 +47,7 @@
void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size)
{
u64 start, end, i_size;
- int ret;
+ bool found;
spin_lock(&inode->lock);
i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
@@ -55,9 +56,9 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
goto out_unlock;
}
- ret = find_contiguous_extent_bit(inode->file_extent_tree, 0, &start,
- &end, EXTENT_DIRTY);
- if (!ret && start == 0)
+ found = btrfs_find_contiguous_extent_bit(inode->file_extent_tree, 0, &start,
+ &end, EXTENT_DIRTY);
+ if (found && start == 0)
i_size = min(i_size, end + 1);
else
i_size = 0;
@@ -91,8 +92,8 @@ int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize));
- return set_extent_bit(inode->file_extent_tree, start, start + len - 1,
- EXTENT_DIRTY, NULL);
+ return btrfs_set_extent_bit(inode->file_extent_tree, start, start + len - 1,
+ EXTENT_DIRTY, NULL);
}
/*
@@ -121,8 +122,8 @@ int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) ||
len == (u64)-1);
- return clear_extent_bit(inode->file_extent_tree, start,
- start + len - 1, EXTENT_DIRTY, NULL);
+ return btrfs_clear_extent_bit(inode->file_extent_tree, start,
+ start + len - 1, EXTENT_DIRTY, NULL);
}
static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes)
@@ -163,20 +164,21 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
int ret = 0;
struct btrfs_file_extent_item *item;
struct btrfs_key file_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+
file_key.objectid = objectid;
- file_key.offset = pos;
file_key.type = BTRFS_EXTENT_DATA_KEY;
+ file_key.offset = pos;
ret = btrfs_insert_empty_item(trans, root, path, &file_key,
sizeof(*item));
if (ret < 0)
- goto out;
+ return ret;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -191,9 +193,6 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_encryption(leaf, item, 0);
btrfs_set_file_extent_other_encoding(leaf, item, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -214,8 +213,8 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
int csums_in_item;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- file_key.offset = bytenr;
file_key.type = BTRFS_EXTENT_CSUM_KEY;
+ file_key.offset = bytenr;
ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
if (ret < 0)
goto fail;
@@ -261,8 +260,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
int cow = mod != 0;
file_key.objectid = objectid;
- file_key.offset = offset;
file_key.type = BTRFS_EXTENT_DATA_KEY;
+ file_key.offset = offset;
return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
}
@@ -338,23 +337,23 @@ out:
*
* Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise.
*/
-blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
+int btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
{
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct bio *bio = &bbio->bio;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
const u32 sectorsize = fs_info->sectorsize;
const u32 csum_size = fs_info->csum_size;
u32 orig_len = bio->bi_iter.bi_size;
u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
- blk_status_t ret = BLK_STS_OK;
+ int ret = 0;
u32 bio_offset = 0;
if ((inode->flags & BTRFS_INODE_NODATASUM) ||
test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state))
- return BLK_STS_OK;
+ return 0;
/*
* This function is only called for read bio.
@@ -371,14 +370,12 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
ASSERT(bio_op(bio) == REQ_OP_READ);
path = btrfs_alloc_path();
if (!path)
- return BLK_STS_RESOURCE;
+ return -ENOMEM;
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
- bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS);
- if (!bbio->csum) {
- btrfs_free_path(path);
- return BLK_STS_RESOURCE;
- }
+ bbio->csum = kvcalloc(nblocks, csum_size, GFP_NOFS);
+ if (!bbio->csum)
+ return -ENOMEM;
} else {
bbio->csum = bbio->csum_inline;
}
@@ -397,8 +394,38 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
* between reading the free space cache and updating the csum tree.
*/
if (btrfs_is_free_space_inode(inode)) {
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
+ }
+
+ /*
+ * If we are searching for a csum of an extent from a past
+ * transaction, we can search in the commit root and reduce
+ * lock contention on the csum tree extent buffers.
+ *
+ * This is important because that lock is an rwsem which gets
+ * pretty heavy write load under memory pressure and sustained
+ * csum overwrites, unlike the commit_root_sem. (Memory pressure
+ * makes us writeback the nodes multiple times per transaction,
+ * which makes us cow them each time, taking the write lock.)
+ *
+ * Due to how rwsem is implemented, there is a possible
+ * priority inversion where the readers holding the lock don't
+ * get scheduled (say they're in a cgroup stuck in heavy reclaim)
+ * which then blocks writers, including transaction commit. By
+ * using a semaphore with fewer writers (only a commit switching
+ * the roots), we make this issue less likely.
+ *
+ * Note that we don't rely on btrfs_search_slot to lock the
+ * commit root csum. We call search_slot multiple times, which would
+ * create a potential race where a commit comes in between searches
+ * while we are not holding the commit_root_sem, and we get csums
+ * from across transactions.
+ */
+ if (bbio->csum_search_commit_root) {
+ path->search_commit_root = true;
+ path->skip_locking = true;
+ down_read(&fs_info->commit_root_sem);
}
while (bio_offset < orig_len) {
@@ -410,9 +437,9 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
count = search_csum_tree(fs_info, path, cur_disk_bytenr,
orig_len - bio_offset, csum_dst);
if (count < 0) {
- ret = errno_to_blk_status(count);
+ ret = count;
if (bbio->csum != bbio->csum_inline)
- kfree(bbio->csum);
+ kvfree(bbio->csum);
bbio->csum = NULL;
break;
}
@@ -431,12 +458,12 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
memset(csum_dst, 0, csum_size);
count = 1;
- if (btrfs_root_id(inode->root) == BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ if (btrfs_is_data_reloc_root(inode->root)) {
u64 file_offset = bbio->file_offset + bio_offset;
- set_extent_bit(&inode->io_tree, file_offset,
- file_offset + sectorsize - 1,
- EXTENT_NODATASUM, NULL);
+ btrfs_set_extent_bit(&inode->io_tree, file_offset,
+ file_offset + sectorsize - 1,
+ EXTENT_NODATASUM, NULL);
} else {
btrfs_warn_rl(fs_info,
"csum hole found for disk bytenr range [%llu, %llu)",
@@ -446,7 +473,8 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
bio_offset += count * sectorsize;
}
- btrfs_free_path(path);
+ if (bbio->csum_search_commit_root)
+ up_read(&fs_info->commit_root_sem);
return ret;
}
@@ -486,8 +514,8 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
path->nowait = nowait;
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- key.offset = start;
key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = start;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -737,23 +765,55 @@ fail:
return ret;
}
+static void csum_one_bio(struct btrfs_bio *bbio, struct bvec_iter *src)
+{
+ struct btrfs_inode *inode = bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
+ struct bio *bio = &bbio->bio;
+ struct btrfs_ordered_sum *sums = bbio->sums;
+ struct bvec_iter iter = *src;
+ phys_addr_t paddr;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 step = min(blocksize, PAGE_SIZE);
+ const u32 nr_steps = blocksize / step;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ u32 offset = 0;
+ int index = 0;
+
+ shash->tfm = fs_info->csum_shash;
+
+ btrfs_bio_for_each_block(paddr, bio, &iter, step) {
+ paddrs[(offset / step) % nr_steps] = paddr;
+ offset += step;
+
+ if (IS_ALIGNED(offset, blocksize)) {
+ btrfs_calculate_block_csum_pages(fs_info, paddrs, sums->sums + index);
+ index += fs_info->csum_size;
+ }
+ }
+}
+
+static void csum_one_bio_work(struct work_struct *work)
+{
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, csum_work);
+
+ ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
+ ASSERT(bbio->async_csum == true);
+ csum_one_bio(bbio, &bbio->csum_saved_iter);
+ complete(&bbio->csum_done);
+}
+
/*
* Calculate checksums of the data contained inside a bio.
*/
-blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio)
+int btrfs_csum_one_bio(struct btrfs_bio *bbio, bool async)
{
struct btrfs_ordered_extent *ordered = bbio->ordered;
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
struct bio *bio = &bbio->bio;
struct btrfs_ordered_sum *sums;
- char *data;
- struct bvec_iter iter;
- struct bio_vec bvec;
- int index;
- unsigned int blockcount;
- int i;
unsigned nofs_flag;
nofs_flag = memalloc_nofs_save();
@@ -762,35 +822,23 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio)
memalloc_nofs_restore(nofs_flag);
if (!sums)
- return BLK_STS_RESOURCE;
+ return -ENOMEM;
+ sums->logical = bbio->orig_logical;
sums->len = bio->bi_iter.bi_size;
INIT_LIST_HEAD(&sums->list);
-
- sums->logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
- index = 0;
-
- shash->tfm = fs_info->csum_shash;
-
- bio_for_each_segment(bvec, bio, iter) {
- blockcount = BTRFS_BYTES_TO_BLKS(fs_info,
- bvec.bv_len + fs_info->sectorsize
- - 1);
-
- for (i = 0; i < blockcount; i++) {
- data = bvec_kmap_local(&bvec);
- crypto_shash_digest(shash,
- data + (i * fs_info->sectorsize),
- fs_info->sectorsize,
- sums->sums + index);
- kunmap_local(data);
- index += fs_info->csum_size;
- }
-
- }
-
bbio->sums = sums;
btrfs_add_ordered_sum(ordered, sums);
+
+ if (!async) {
+ csum_one_bio(bbio, &bbio->bio.bi_iter);
+ return 0;
+ }
+ init_completion(&bbio->csum_done);
+ bbio->async_csum = true;
+ bbio->csum_saved_iter = bbio->bio.bi_iter;
+ INIT_WORK(&bbio->csum_work, csum_one_bio_work);
+ schedule_work(&bbio->csum_work);
return 0;
}
@@ -799,11 +847,11 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio)
* record the updated logical address on Zone Append completion.
* Allocate just the structure with an empty sums array here for that case.
*/
-blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
+int btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
{
bbio->sums = kmalloc(sizeof(*bbio->sums), GFP_NOFS);
if (!bbio->sums)
- return BLK_STS_RESOURCE;
+ return -ENOMEM;
bbio->sums->len = bbio->bio.bi_iter.bi_size;
bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
btrfs_add_ordered_sum(bbio->ordered, bbio->sums);
@@ -876,7 +924,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
u64 end_byte = bytenr + len;
u64 csum_end;
@@ -894,8 +942,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
while (1) {
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- key.offset = end_byte - 1;
key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = end_byte - 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
@@ -998,7 +1046,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
* item changed size or key
*/
ret = btrfs_split_item(trans, root, path, &key, offset);
- if (ret && ret != -EAGAIN) {
+ if (unlikely(ret && ret != -EAGAIN)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -1012,7 +1060,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
}
- btrfs_free_path(path);
return ret;
}
@@ -1054,7 +1101,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key file_key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_csum_item *item;
struct btrfs_csum_item *item_end;
struct extent_buffer *leaf = NULL;
@@ -1076,8 +1123,8 @@ again:
found_next = 0;
bytenr = sums->logical + total_bytes;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- file_key.offset = bytenr;
file_key.type = BTRFS_EXTENT_CSUM_KEY;
+ file_key.offset = bytenr;
item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
if (!IS_ERR(item)) {
@@ -1130,10 +1177,10 @@ again:
}
btrfs_release_path(path);
- path->search_for_extension = 1;
+ path->search_for_extension = true;
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
- path->search_for_extension = 0;
+ path->search_for_extension = false;
if (ret < 0)
goto out;
@@ -1259,14 +1306,12 @@ found:
ins_size /= csum_size;
total_bytes += ins_size * fs_info->sectorsize;
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(path);
cond_resched();
goto again;
}
out:
- btrfs_free_path(path);
return ret;
}
@@ -1304,7 +1349,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
em->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
em->offset = btrfs_file_extent_offset(leaf, fi);
if (compress_type != BTRFS_COMPRESS_NONE) {
- extent_map_set_compression(em, compress_type);
+ btrfs_extent_map_set_compression(em, compress_type);
} else {
/*
* Older kernels can create regular non-hole data
@@ -1324,7 +1369,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
em->start = 0;
em->len = fs_info->sectorsize;
em->offset = 0;
- extent_map_set_compression(em, compress_type);
+ btrfs_extent_map_set_compression(em, compress_type);
} else {
btrfs_err(fs_info,
"unknown file extent item type %d, inode %llu, offset %llu, "
diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h
index 0e13661a71f3..5645c5e3abdb 100644
--- a/fs/btrfs/file-item.h
+++ b/fs/btrfs/file-item.h
@@ -3,9 +3,11 @@
#ifndef BTRFS_FILE_ITEM_H
#define BTRFS_FILE_ITEM_H
+#include <linux/blk_types.h>
#include <linux/list.h>
#include <uapi/linux/btrfs_tree.h>
-#include "accessors.h"
+#include "ctree.h"
+#include "ordered-data.h"
struct extent_map;
struct btrfs_file_extent_item;
@@ -51,7 +53,7 @@ static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize)
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len);
-blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio);
+int btrfs_lookup_bio_sums(struct btrfs_bio *bbio);
int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid, u64 pos,
u64 num_bytes);
@@ -62,8 +64,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
-blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio);
-blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio);
+int btrfs_csum_one_bio(struct btrfs_bio *bbio, bool async);
+int btrfs_alloc_dummy_sum(struct btrfs_bio *bbio);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit,
bool nowait);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 588c353d2969..7a501e73d880 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -36,52 +36,7 @@
#include "ioctl.h"
#include "file.h"
#include "super.h"
-
-/*
- * Helper to fault in page and copy. This should go away and be replaced with
- * calls into generic code.
- */
-static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
- struct folio *folio, struct iov_iter *i)
-{
- size_t copied = 0;
- size_t total_copied = 0;
- int offset = offset_in_page(pos);
-
- while (write_bytes > 0) {
- size_t count = min_t(size_t, PAGE_SIZE - offset, write_bytes);
- /*
- * Copy data from userspace to the current page
- */
- copied = copy_folio_from_iter_atomic(folio, offset, count, i);
-
- /* Flush processor's dcache for this page */
- flush_dcache_folio(folio);
-
- /*
- * if we get a partial write, we can end up with
- * partially up to date page. These add
- * a lot of complexity, so make sure they don't
- * happen by forcing this copy to be retried.
- *
- * The rest of the btrfs_file_write code will fall
- * back to page at a time copies after we return 0.
- */
- if (unlikely(copied < count)) {
- if (!folio_test_uptodate(folio)) {
- iov_iter_revert(i, copied);
- copied = 0;
- }
- if (!copied)
- break;
- }
-
- write_bytes -= copied;
- total_copied += copied;
- offset += copied;
- }
- return total_copied;
-}
+#include "print-tree.h"
/*
* Unlock folio after btrfs_file_write() is done with it.
@@ -106,7 +61,7 @@ static void btrfs_drop_folio(struct btrfs_fs_info *fs_info, struct folio *folio,
}
/*
- * After btrfs_copy_from_user(), update the following things for delalloc:
+ * After copy_folio_from_iter_atomic(), update the following things for delalloc:
* - Mark newly dirtied folio as DELALLOC in the io tree.
* Used to advise which range is to be written back.
* - Mark modified folio as Uptodate/Dirty and not needing COW fixup
@@ -120,7 +75,7 @@ int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
- u64 end_pos = pos + write_bytes;
+ const u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(&inode->vfs_inode);
unsigned int extra_bits = 0;
@@ -131,11 +86,9 @@ int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos
extra_bits |= EXTENT_NORESERVE;
start_pos = round_down(pos, fs_info->sectorsize);
- num_bytes = round_up(write_bytes + pos - start_pos,
- fs_info->sectorsize);
+ num_bytes = round_up(end_pos - start_pos, fs_info->sectorsize);
ASSERT(num_bytes <= U32_MAX);
- ASSERT(folio_pos(folio) <= pos &&
- folio_pos(folio) + folio_size(folio) >= pos + write_bytes);
+ ASSERT(folio_pos(folio) <= pos && folio_next_pos(folio) >= end_pos);
end_of_last_block = start_pos + num_bytes - 1;
@@ -143,9 +96,9 @@ int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos
* The pages may have already been dirty, clear out old accounting so
* we can set things up properly
*/
- clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- cached);
+ btrfs_clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ cached);
ret = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
extra_bits, cached);
@@ -224,7 +177,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
if (args->drop_cache)
btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
- if (args->start >= inode->disk_i_size && !args->replace_extent)
+ if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent)
modify_tree = 0;
update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
@@ -245,7 +198,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
ret = btrfs_next_leaf(root, path);
if (ret < 0)
break;
@@ -321,7 +278,11 @@ next_slot:
* | -------- extent -------- |
*/
if (args->start > key.offset && args->end < extent_end) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
break;
@@ -351,7 +312,6 @@ next_slot:
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->start);
- btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0) {
struct btrfs_ref ref = {
@@ -366,7 +326,7 @@ next_slot:
args->start - extent_offset,
0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -397,7 +357,6 @@ next_slot:
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->end);
- btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += args->end - key.offset;
break;
@@ -409,7 +368,11 @@ next_slot:
* | -------- extent -------- |
*/
if (args->start > key.offset && args->end >= extent_end) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
break;
@@ -417,7 +380,6 @@ next_slot:
btrfs_set_file_extent_num_bytes(leaf, fi,
args->start - key.offset);
- btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += extent_end - args->start;
if (args->end == extent_end)
@@ -437,7 +399,11 @@ delete_extent_item:
del_slot = path->slots[0];
del_nr = 1;
} else {
- BUG_ON(del_slot + del_nr != path->slots[0]);
+ if (WARN_ON(del_slot + del_nr != path->slots[0])) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
del_nr++;
}
@@ -459,7 +425,7 @@ delete_extent_item:
key.offset - extent_offset,
0, false);
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -476,7 +442,7 @@ delete_extent_item:
ret = btrfs_del_items(trans, root, path, del_slot,
del_nr);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -540,20 +506,19 @@ out:
return ret;
}
-static int extent_mergeable(struct extent_buffer *leaf, int slot,
- u64 objectid, u64 bytenr, u64 orig_offset,
- u64 *start, u64 *end)
+static bool extent_mergeable(struct extent_buffer *leaf, int slot, u64 objectid,
+ u64 bytenr, u64 orig_offset, u64 *start, u64 *end)
{
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
u64 extent_end;
if (slot < 0 || slot >= btrfs_header_nritems(leaf))
- return 0;
+ return false;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
- return 0;
+ return false;
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
@@ -562,15 +527,15 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
- return 0;
+ return false;
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
if ((*start && *start != key.offset) || (*end && *end != extent_end))
- return 0;
+ return false;
*start = key.offset;
*end = extent_end;
- return 1;
+ return true;
}
/*
@@ -585,7 +550,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_file_extent_item *fi;
struct btrfs_ref ref = { 0 };
struct btrfs_key key;
@@ -621,21 +586,20 @@ again:
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid != ino ||
- key.type != BTRFS_EXTENT_DATA_KEY) {
+ if (unlikely(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
+ if (unlikely(btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC)) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- if (key.offset > start || extent_end < end) {
+ if (unlikely(key.offset > start || extent_end < end)) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -668,7 +632,6 @@ again:
trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
end - other_start);
- btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
}
@@ -697,7 +660,6 @@ again:
other_end - start);
btrfs_set_file_extent_offset(leaf, fi,
start - orig_offset);
- btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
}
@@ -712,7 +674,7 @@ again:
btrfs_release_path(path);
goto again;
}
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -731,7 +693,6 @@ again:
btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - split);
- btrfs_mark_buffer_dirty(trans, leaf);
ref.action = BTRFS_ADD_DELAYED_REF;
ref.bytenr = bytenr;
@@ -741,7 +702,7 @@ again:
ref.ref_root = btrfs_root_id(root);
btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -749,7 +710,7 @@ again:
if (split == start) {
key.offset = start;
} else {
- if (start != key.offset) {
+ if (unlikely(start != key.offset)) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -781,7 +742,7 @@ again:
del_slot = path->slots[0] + 1;
del_nr++;
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -799,7 +760,7 @@ again:
del_slot = path->slots[0];
del_nr++;
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -810,7 +771,6 @@ again:
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
} else {
fi = btrfs_item_ptr(leaf, del_slot - 1,
struct btrfs_file_extent_item);
@@ -819,16 +779,14 @@ again:
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - key.offset);
- btrfs_mark_buffer_dirty(trans, leaf);
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
out:
- btrfs_free_path(path);
return ret;
}
@@ -837,25 +795,25 @@ out:
* On success return a locked folio and 0
*/
static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 pos,
- u64 len, bool force_uptodate)
+ u64 len)
{
u64 clamp_start = max_t(u64, pos, folio_pos(folio));
- u64 clamp_end = min_t(u64, pos + len, folio_pos(folio) + folio_size(folio));
+ u64 clamp_end = min_t(u64, pos + len, folio_next_pos(folio));
+ const u32 blocksize = inode_to_fs_info(inode)->sectorsize;
int ret = 0;
if (folio_test_uptodate(folio))
return 0;
- if (!force_uptodate &&
- IS_ALIGNED(clamp_start, PAGE_SIZE) &&
- IS_ALIGNED(clamp_end, PAGE_SIZE))
+ if (IS_ALIGNED(clamp_start, blocksize) &&
+ IS_ALIGNED(clamp_end, blocksize))
return 0;
ret = btrfs_read_folio(NULL, folio);
if (ret)
return ret;
folio_lock(folio);
- if (!folio_test_uptodate(folio)) {
+ if (unlikely(!folio_test_uptodate(folio))) {
folio_unlock(folio);
return -EIO;
}
@@ -894,32 +852,27 @@ static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
*/
static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_ret,
loff_t pos, size_t write_bytes,
- bool force_uptodate, bool nowait)
+ bool nowait)
{
- unsigned long index = pos >> PAGE_SHIFT;
+ const pgoff_t index = pos >> PAGE_SHIFT;
gfp_t mask = get_prepare_gfp_flags(inode, nowait);
- fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN);
+ fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN) |
+ fgf_set_order(write_bytes);
struct folio *folio;
int ret = 0;
again:
folio = __filemap_get_folio(inode->i_mapping, index, fgp_flags, mask);
- if (IS_ERR(folio)) {
- if (nowait)
- ret = -EAGAIN;
- else
- ret = PTR_ERR(folio);
- return ret;
- }
- /* Only support page sized folio yet. */
- ASSERT(folio_order(folio) == 0);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
ret = set_folio_extent_mapped(folio);
if (ret < 0) {
folio_unlock(folio);
folio_put(folio);
return ret;
}
- ret = prepare_uptodate_folio(inode, folio, pos, write_bytes, force_uptodate);
+ ret = prepare_uptodate_folio(inode, folio, pos, write_bytes);
if (ret) {
/* The folio is already unlocked. */
folio_put(folio);
@@ -960,14 +913,15 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
struct btrfs_ordered_extent *ordered;
if (nowait) {
- if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
- cached_state)) {
+ if (!btrfs_try_lock_extent(&inode->io_tree, start_pos,
+ last_pos, cached_state)) {
folio_unlock(folio);
folio_put(folio);
return -EAGAIN;
}
} else {
- lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
+ btrfs_lock_extent(&inode->io_tree, start_pos, last_pos,
+ cached_state);
}
ordered = btrfs_lookup_ordered_range(inode, start_pos,
@@ -975,8 +929,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
if (ordered &&
ordered->file_offset + ordered->num_bytes > start_pos &&
ordered->file_offset <= last_pos) {
- unlock_extent(&inode->io_tree, start_pos, last_pos,
- cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start_pos, last_pos,
+ cached_state);
folio_unlock(folio);
folio_put(folio);
btrfs_start_ordered_extent(ordered);
@@ -1006,6 +960,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
* @pos: File offset.
* @write_bytes: The length to write, will be updated to the nocow writeable
* range.
+ * @nowait: Indicate if we can block or not (non-blocking IO context).
*
* This function will flush ordered extents in the range to ensure proper
* nocow checks.
@@ -1013,8 +968,9 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
* Return:
* > 0 If we can nocow, and updates @write_bytes.
* 0 If we can't do a nocow write.
- * -EAGAIN If we can't do a nocow write because snapshoting of the inode's
- * root is in progress.
+ * -EAGAIN If we can't do a nocow write because snapshotting of the inode's
+ * root is in progress or because we are in a non-blocking IO
+ * context and need to block (@nowait is true).
* < 0 If an error happened.
*
* NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
@@ -1026,8 +982,8 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
struct btrfs_root *root = inode->root;
struct extent_state *cached_state = NULL;
u64 lockstart, lockend;
- u64 num_bytes;
- int ret;
+ u64 cur_offset;
+ int ret = 0;
if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
return 0;
@@ -1038,7 +994,6 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
lockstart = round_down(pos, fs_info->sectorsize);
lockend = round_up(pos + *write_bytes,
fs_info->sectorsize) - 1;
- num_bytes = lockend - lockstart + 1;
if (nowait) {
if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
@@ -1050,14 +1005,35 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
&cached_state);
}
- ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
- NULL, nowait, false);
- if (ret <= 0)
- btrfs_drew_write_unlock(&root->snapshot_lock);
- else
- *write_bytes = min_t(size_t, *write_bytes ,
- num_bytes - pos + lockstart);
- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+
+ cur_offset = lockstart;
+ while (cur_offset < lockend) {
+ u64 num_bytes = lockend - cur_offset + 1;
+
+ ret = can_nocow_extent(inode, cur_offset, &num_bytes, NULL, nowait);
+ if (ret <= 0) {
+ /*
+ * If cur_offset == lockstart it means we haven't found
+ * any extent against which we can NOCOW, so unlock the
+ * snapshot lock.
+ */
+ if (cur_offset == lockstart)
+ btrfs_drew_write_unlock(&root->snapshot_lock);
+ break;
+ }
+ cur_offset += num_bytes;
+ }
+
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+
+ /*
+ * cur_offset > lockstart means there's at least a partial range we can
+ * NOCOW, and that range can cover one or more extents.
+ */
+ if (cur_offset > lockstart) {
+ *write_bytes = min_t(size_t, *write_bytes, cur_offset - pos);
+ return 1;
+ }
return ret;
}
@@ -1075,7 +1051,6 @@ int btrfs_write_check(struct kiocb *iocb, size_t count)
loff_t pos = iocb->ki_pos;
int ret;
loff_t oldsize;
- loff_t start_pos;
/*
* Quickly bail out on NOWAIT writes if we don't have the nodatacow or
@@ -1102,9 +1077,8 @@ int btrfs_write_check(struct kiocb *iocb, size_t count)
inode_inc_iversion(inode);
}
- start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
- if (start_pos > oldsize) {
+ if (pos > oldsize) {
/* Expand hole size to cover write data, preventing empty gap */
loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
@@ -1116,218 +1090,306 @@ int btrfs_write_check(struct kiocb *iocb, size_t count)
return 0;
}
-ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
+static void release_space(struct btrfs_inode *inode, struct extent_changeset *data_reserved,
+ u64 start, u64 len, bool only_release_metadata)
{
- struct file *file = iocb->ki_filp;
- loff_t pos;
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct extent_changeset *data_reserved = NULL;
- u64 release_bytes = 0;
- u64 lockstart;
- u64 lockend;
- size_t num_written = 0;
- ssize_t ret;
- loff_t old_isize = i_size_read(inode);
- unsigned int ilock_flags = 0;
- const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
- unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
- bool only_release_metadata = false;
+ if (len == 0)
+ return;
- if (nowait)
- ilock_flags |= BTRFS_ILOCK_TRY;
+ if (only_release_metadata) {
+ btrfs_check_nocow_unlock(inode);
+ btrfs_delalloc_release_metadata(inode, len, true);
+ } else {
+ const struct btrfs_fs_info *fs_info = inode->root->fs_info;
- ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
- if (ret < 0)
- return ret;
+ btrfs_delalloc_release_space(inode, data_reserved,
+ round_down(start, fs_info->sectorsize),
+ len, true);
+ }
+}
- ret = generic_write_checks(iocb, i);
- if (ret <= 0)
- goto out;
+/*
+ * Reserve data and metadata space for this buffered write range.
+ *
+ * Return >0 for the number of bytes reserved, which is always block aligned.
+ * Return <0 for error.
+ */
+static ssize_t reserve_space(struct btrfs_inode *inode,
+ struct extent_changeset **data_reserved,
+ u64 start, size_t *len, bool nowait,
+ bool *only_release_metadata)
+{
+ const struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const unsigned int block_offset = (start & (fs_info->sectorsize - 1));
+ size_t reserve_bytes;
+ int ret;
- ret = btrfs_write_check(iocb, ret);
- if (ret < 0)
- goto out;
+ ret = btrfs_check_data_free_space(inode, data_reserved, start, *len, nowait);
+ if (ret < 0) {
+ int can_nocow;
- pos = iocb->ki_pos;
- while (iov_iter_count(i) > 0) {
- struct extent_state *cached_state = NULL;
- size_t offset = offset_in_page(pos);
- size_t sector_offset;
- size_t write_bytes = min(iov_iter_count(i), PAGE_SIZE - offset);
- size_t reserve_bytes;
- size_t copied;
- size_t dirty_sectors;
- size_t num_sectors;
- struct folio *folio = NULL;
- int extents_locked;
- bool force_page_uptodate = false;
+ if (nowait && (ret == -ENOSPC || ret == -EAGAIN))
+ return -EAGAIN;
/*
- * Fault pages before locking them in prepare_one_folio()
- * to avoid recursive lock
+ * If we don't have to COW at the offset, reserve metadata only.
+ * write_bytes may get smaller than requested here.
*/
- if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
- ret = -EFAULT;
- break;
- }
+ can_nocow = btrfs_check_nocow_lock(inode, start, len, nowait);
+ if (can_nocow < 0)
+ ret = can_nocow;
+ if (can_nocow > 0)
+ ret = 0;
+ if (ret)
+ return ret;
+ *only_release_metadata = true;
+ }
- only_release_metadata = false;
- sector_offset = pos & (fs_info->sectorsize - 1);
+ reserve_bytes = round_up(*len + block_offset, fs_info->sectorsize);
+ WARN_ON(reserve_bytes == 0);
+ ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes,
+ reserve_bytes, nowait);
+ if (ret) {
+ if (!*only_release_metadata)
+ btrfs_free_reserved_data_space(inode, *data_reserved,
+ start, *len);
+ else
+ btrfs_check_nocow_unlock(inode);
- extent_changeset_release(data_reserved);
- ret = btrfs_check_data_free_space(BTRFS_I(inode),
- &data_reserved, pos,
- write_bytes, nowait);
- if (ret < 0) {
- int can_nocow;
+ if (nowait && ret == -ENOSPC)
+ ret = -EAGAIN;
+ return ret;
+ }
+ return reserve_bytes;
+}
- if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
- ret = -EAGAIN;
- break;
- }
+/* Shrink the reserved data and metadata space from @reserved_len to @new_len. */
+static void shrink_reserved_space(struct btrfs_inode *inode,
+ struct extent_changeset *data_reserved,
+ u64 reserved_start, u64 reserved_len,
+ u64 new_len, bool only_release_metadata)
+{
+ const u64 diff = reserved_len - new_len;
- /*
- * If we don't have to COW at the offset, reserve
- * metadata only. write_bytes may get smaller than
- * requested here.
- */
- can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
- &write_bytes, nowait);
- if (can_nocow < 0)
- ret = can_nocow;
- if (can_nocow > 0)
- ret = 0;
- if (ret)
- break;
- only_release_metadata = true;
- }
+ ASSERT(new_len <= reserved_len);
+ btrfs_delalloc_shrink_extents(inode, reserved_len, new_len);
+ if (only_release_metadata)
+ btrfs_delalloc_release_metadata(inode, diff, true);
+ else
+ btrfs_delalloc_release_space(inode, data_reserved,
+ reserved_start + new_len, diff, true);
+}
- reserve_bytes = round_up(write_bytes + sector_offset,
- fs_info->sectorsize);
- WARN_ON(reserve_bytes == 0);
- ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
- reserve_bytes,
- reserve_bytes, nowait);
- if (ret) {
- if (!only_release_metadata)
- btrfs_free_reserved_data_space(BTRFS_I(inode),
- data_reserved, pos,
- write_bytes);
- else
- btrfs_check_nocow_unlock(BTRFS_I(inode));
+/* Calculate the maximum amount of bytes we can write into one folio. */
+static size_t calc_write_bytes(const struct btrfs_inode *inode,
+ const struct iov_iter *iter, u64 start)
+{
+ const size_t max_folio_size = mapping_max_folio_size(inode->vfs_inode.i_mapping);
- if (nowait && ret == -ENOSPC)
- ret = -EAGAIN;
- break;
- }
+ return min(max_folio_size - (start & (max_folio_size - 1)),
+ iov_iter_count(iter));
+}
+
+/*
+ * Do the heavy-lifting work to copy one range into one folio of the page cache.
+ *
+ * Return > 0 in case we copied all bytes or just some of them.
+ * Return 0 if no bytes were copied, in which case the caller should retry.
+ * Return <0 on error.
+ */
+static int copy_one_range(struct btrfs_inode *inode, struct iov_iter *iter,
+ struct extent_changeset **data_reserved, u64 start,
+ bool nowait)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_state *cached_state = NULL;
+ size_t write_bytes = calc_write_bytes(inode, iter, start);
+ size_t copied;
+ const u64 reserved_start = round_down(start, fs_info->sectorsize);
+ u64 reserved_len;
+ struct folio *folio = NULL;
+ int extents_locked;
+ u64 lockstart;
+ u64 lockend;
+ bool only_release_metadata = false;
+ const unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
+ int ret;
+
+ /*
+ * Fault all pages before locking them in prepare_one_folio() to avoid
+ * recursive lock.
+ */
+ if (unlikely(fault_in_iov_iter_readable(iter, write_bytes)))
+ return -EFAULT;
+ extent_changeset_release(*data_reserved);
+ ret = reserve_space(inode, data_reserved, start, &write_bytes, nowait,
+ &only_release_metadata);
+ if (ret < 0)
+ return ret;
+ reserved_len = ret;
+ /* Write range must be inside the reserved range. */
+ ASSERT(reserved_start <= start);
+ ASSERT(start + write_bytes <= reserved_start + reserved_len);
- release_bytes = reserve_bytes;
again:
- ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
- if (ret) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
- break;
- }
+ ret = balance_dirty_pages_ratelimited_flags(inode->vfs_inode.i_mapping,
+ bdp_flags);
+ if (ret) {
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ return ret;
+ }
- ret = prepare_one_folio(inode, &folio, pos, write_bytes,
- force_page_uptodate, false);
- if (ret) {
- btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes);
- break;
- }
+ ret = prepare_one_folio(&inode->vfs_inode, &folio, start, write_bytes, false);
+ if (ret) {
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ return ret;
+ }
- extents_locked = lock_and_cleanup_extent_if_need(BTRFS_I(inode),
- folio, pos, write_bytes, &lockstart,
- &lockend, nowait, &cached_state);
- if (extents_locked < 0) {
- if (!nowait && extents_locked == -EAGAIN)
- goto again;
+ /*
+ * The reserved range goes beyond the current folio, shrink the reserved
+ * space to the folio boundary.
+ */
+ if (reserved_start + reserved_len > folio_next_pos(folio)) {
+ const u64 last_block = folio_next_pos(folio);
+
+ shrink_reserved_space(inode, *data_reserved, reserved_start,
+ reserved_len, last_block - reserved_start,
+ only_release_metadata);
+ write_bytes = last_block - start;
+ reserved_len = last_block - reserved_start;
+ }
+
+ extents_locked = lock_and_cleanup_extent_if_need(inode, folio, start,
+ write_bytes, &lockstart,
+ &lockend, nowait,
+ &cached_state);
+ if (extents_locked < 0) {
+ if (!nowait && extents_locked == -EAGAIN)
+ goto again;
- btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes);
- ret = extents_locked;
- break;
- }
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ ret = extents_locked;
+ return ret;
+ }
- copied = btrfs_copy_from_user(pos, write_bytes, folio, i);
+ copied = copy_folio_from_iter_atomic(folio, offset_in_folio(folio, start),
+ write_bytes, iter);
+ flush_dcache_folio(folio);
- num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
- dirty_sectors = round_up(copied + sector_offset,
- fs_info->sectorsize);
- dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
+ if (unlikely(copied < write_bytes)) {
+ u64 last_block;
- if (copied == 0) {
- force_page_uptodate = true;
- dirty_sectors = 0;
- } else {
- force_page_uptodate = false;
+ /*
+ * The original write range doesn't need an uptodate folio as
+ * the range is block aligned. But now a short copy happened.
+ * We cannot handle it without an uptodate folio.
+ *
+ * So just revert the range and we will retry.
+ */
+ if (!folio_test_uptodate(folio)) {
+ iov_iter_revert(iter, copied);
+ copied = 0;
}
- if (num_sectors > dirty_sectors) {
- /* release everything except the sectors we dirtied */
- release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
- if (only_release_metadata) {
- btrfs_delalloc_release_metadata(BTRFS_I(inode),
- release_bytes, true);
- } else {
- u64 release_start = round_up(pos + copied,
- fs_info->sectorsize);
- btrfs_delalloc_release_space(BTRFS_I(inode),
- data_reserved, release_start,
- release_bytes, true);
- }
+ /* No copied bytes, unlock, release reserved space and exit. */
+ if (copied == 0) {
+ if (extents_locked)
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend,
+ &cached_state);
+ else
+ btrfs_free_extent_state(cached_state);
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ btrfs_drop_folio(fs_info, folio, start, copied);
+ return 0;
}
- release_bytes = round_up(copied + sector_offset,
- fs_info->sectorsize);
+ /* Release the reserved space beyond the last block. */
+ last_block = round_up(start + copied, fs_info->sectorsize);
- ret = btrfs_dirty_folio(BTRFS_I(inode), folio, pos, copied,
- &cached_state, only_release_metadata);
+ shrink_reserved_space(inode, *data_reserved, reserved_start,
+ reserved_len, last_block - reserved_start,
+ only_release_metadata);
+ reserved_len = last_block - reserved_start;
+ }
- /*
- * If we have not locked the extent range, because the range's
- * start offset is >= i_size, we might still have a non-NULL
- * cached extent state, acquired while marking the extent range
- * as delalloc through btrfs_dirty_page(). Therefore free any
- * possible cached extent state to avoid a memory leak.
- */
- if (extents_locked)
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state);
- else
- free_extent_state(cached_state);
+ ret = btrfs_dirty_folio(inode, folio, start, copied, &cached_state,
+ only_release_metadata);
+ /*
+ * If we have not locked the extent range, because the range's start
+ * offset is >= i_size, we might still have a non-NULL cached extent
+ * state, acquired while marking the extent range as delalloc through
+ * btrfs_dirty_page(). Therefore free any possible cached extent state
+ * to avoid a memory leak.
+ */
+ if (extents_locked)
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ else
+ btrfs_free_extent_state(cached_state);
- btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
- if (ret) {
- btrfs_drop_folio(fs_info, folio, pos, copied);
- break;
- }
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ if (ret) {
+ btrfs_drop_folio(fs_info, folio, start, copied);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ return ret;
+ }
+ if (only_release_metadata)
+ btrfs_check_nocow_unlock(inode);
- release_bytes = 0;
- if (only_release_metadata)
- btrfs_check_nocow_unlock(BTRFS_I(inode));
+ btrfs_drop_folio(fs_info, folio, start, copied);
+ return copied;
+}
- btrfs_drop_folio(fs_info, folio, pos, copied);
+ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ loff_t pos;
+ struct inode *inode = file_inode(file);
+ struct extent_changeset *data_reserved = NULL;
+ size_t num_written = 0;
+ ssize_t ret;
+ loff_t old_isize;
+ unsigned int ilock_flags = 0;
+ const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
- cond_resched();
+ if (nowait)
+ ilock_flags |= BTRFS_ILOCK_TRY;
- pos += copied;
- num_written += copied;
- }
+ ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
+ if (ret < 0)
+ return ret;
- if (release_bytes) {
- if (only_release_metadata) {
- btrfs_check_nocow_unlock(BTRFS_I(inode));
- btrfs_delalloc_release_metadata(BTRFS_I(inode),
- release_bytes, true);
- } else {
- btrfs_delalloc_release_space(BTRFS_I(inode),
- data_reserved,
- round_down(pos, fs_info->sectorsize),
- release_bytes, true);
- }
+ /*
+ * We can only trust the isize with inode lock held, or it can race with
+ * other buffered writes and cause incorrect call of
+ * pagecache_isize_extended() to overwrite existing data.
+ */
+ old_isize = i_size_read(inode);
+
+ ret = generic_write_checks(iocb, iter);
+ if (ret <= 0)
+ goto out;
+
+ ret = btrfs_write_check(iocb, ret);
+ if (ret < 0)
+ goto out;
+
+ pos = iocb->ki_pos;
+ while (iov_iter_count(iter) > 0) {
+ ret = copy_one_range(BTRFS_I(inode), iter, &data_reserved, pos, nowait);
+ if (ret < 0)
+ break;
+ pos += ret;
+ num_written += ret;
+ cond_resched();
}
extent_changeset_free(data_reserved);
@@ -1378,6 +1440,8 @@ ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
struct btrfs_inode *inode = BTRFS_I(file_inode(file));
ssize_t num_written, num_sync;
+ if (unlikely(btrfs_is_shutdown(inode->root->fs_info)))
+ return -EIO;
/*
* If the fs flips readonly due to some impossible error, although we
* have opened a file as writable, we have to stop this write operation
@@ -1422,7 +1486,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
if (private) {
kfree(private->filldir_buf);
- free_extent_state(private->llseek_cached_state);
+ btrfs_free_extent_state(private->llseek_cached_state);
kfree(private);
filp->private_data = NULL;
}
@@ -1790,27 +1854,25 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct folio *folio = page_folio(page);
- struct inode *inode = file_inode(vmf->vma->vm_file);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct btrfs_inode *inode = BTRFS_I(file_inode(vmf->vma->vm_file));
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
unsigned long zero_start;
loff_t size;
- vm_fault_t ret;
- int ret2;
- int reserved = 0;
+ size_t fsize = folio_size(folio);
+ int ret;
+ bool only_release_metadata = false;
u64 reserved_space;
u64 page_start;
u64 page_end;
u64 end;
- ASSERT(folio_order(folio) == 0);
+ reserved_space = fsize;
- reserved_space = PAGE_SIZE;
-
- sb_start_pagefault(inode->i_sb);
+ sb_start_pagefault(inode->vfs_inode.i_sb);
page_start = folio_pos(folio);
page_end = page_start + folio_size(folio) - 1;
end = page_end;
@@ -1823,38 +1885,53 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
* end up waiting indefinitely to get a lock on the page currently
* being processed by btrfs_page_mkwrite() function.
*/
- ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
- page_start, reserved_space);
- if (!ret2) {
- ret2 = file_update_time(vmf->vma->vm_file);
- reserved = 1;
- }
- if (ret2) {
- ret = vmf_error(ret2);
- if (reserved)
- goto out;
+ ret = btrfs_check_data_free_space(inode, &data_reserved, page_start,
+ reserved_space, false);
+ if (ret < 0) {
+ size_t write_bytes = reserved_space;
+
+ if (btrfs_check_nocow_lock(inode, page_start, &write_bytes, false) <= 0)
+ goto out_noreserve;
+
+ only_release_metadata = true;
+
+ /*
+ * Can't write the whole range, there may be shared extents or
+ * holes in the range, bail out with @only_release_metadata set
+ * to true so that we unlock the nocow lock before returning the
+ * error.
+ */
+ if (write_bytes < reserved_space)
+ goto out_noreserve;
+ }
+ ret = btrfs_delalloc_reserve_metadata(inode, reserved_space,
+ reserved_space, false);
+ if (ret < 0) {
+ if (!only_release_metadata)
+ btrfs_free_reserved_data_space(inode, data_reserved,
+ page_start, reserved_space);
goto out_noreserve;
}
- /* Make the VM retry the fault. */
- ret = VM_FAULT_NOPAGE;
+ ret = file_update_time(vmf->vma->vm_file);
+ if (ret < 0)
+ goto out;
again:
- down_read(&BTRFS_I(inode)->i_mmap_lock);
+ down_read(&inode->i_mmap_lock);
folio_lock(folio);
- size = i_size_read(inode);
+ size = i_size_read(&inode->vfs_inode);
- if ((folio->mapping != inode->i_mapping) ||
+ if ((folio->mapping != inode->vfs_inode.i_mapping) ||
(page_start >= size)) {
/* Page got truncated out from underneath us. */
goto out_unlock;
}
folio_wait_writeback(folio);
- lock_extent(io_tree, page_start, page_end, &cached_state);
- ret2 = set_folio_extent_mapped(folio);
- if (ret2 < 0) {
- ret = vmf_error(ret2);
- unlock_extent(io_tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(io_tree, page_start, page_end, &cached_state);
+ ret = set_folio_extent_mapped(folio);
+ if (ret < 0) {
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
goto out_unlock;
}
@@ -1862,23 +1939,27 @@ again:
* We can't set the delalloc bits if there are pending ordered
* extents. Drop our locks and wait for them to finish.
*/
- ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
+ ordered = btrfs_lookup_ordered_range(inode, page_start, fsize);
if (ordered) {
- unlock_extent(io_tree, page_start, page_end, &cached_state);
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
folio_unlock(folio);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
+ up_read(&inode->i_mmap_lock);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
goto again;
}
- if (folio->index == ((size - 1) >> PAGE_SHIFT)) {
+ if (folio_contains(folio, (size - 1) >> PAGE_SHIFT)) {
reserved_space = round_up(size - page_start, fs_info->sectorsize);
- if (reserved_space < PAGE_SIZE) {
+ if (reserved_space < fsize) {
+ const u64 to_free = fsize - reserved_space;
+
end = page_start + reserved_space - 1;
- btrfs_delalloc_release_space(BTRFS_I(inode),
- data_reserved, page_start,
- PAGE_SIZE - reserved_space, true);
+ if (only_release_metadata)
+ btrfs_delalloc_release_metadata(inode, to_free, true);
+ else
+ btrfs_delalloc_release_space(inode, data_reserved,
+ end + 1, to_free, true);
}
}
@@ -1889,15 +1970,13 @@ again:
* clear any delalloc bits within this page range since we have to
* reserve data&meta space before lock_page() (see above comments).
*/
- clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, &cached_state);
-
- ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
- &cached_state);
- if (ret2) {
- unlock_extent(io_tree, page_start, page_end, &cached_state);
- ret = VM_FAULT_SIGBUS;
+ btrfs_clear_extent_bit(io_tree, page_start, end,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, &cached_state);
+
+ ret = btrfs_set_extent_delalloc(inode, page_start, end, 0, &cached_state);
+ if (ret < 0) {
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
goto out_unlock;
}
@@ -1905,36 +1984,53 @@ again:
if (page_start + folio_size(folio) > size)
zero_start = offset_in_folio(folio, size);
else
- zero_start = PAGE_SIZE;
+ zero_start = fsize;
- if (zero_start != PAGE_SIZE)
+ if (zero_start != fsize)
folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
- btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
+ btrfs_folio_clear_checked(fs_info, folio, page_start, fsize);
btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
- btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
+ btrfs_set_inode_last_sub_trans(inode);
- unlock_extent(io_tree, page_start, page_end, &cached_state);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
+ if (only_release_metadata)
+ btrfs_set_extent_bit(io_tree, page_start, end, EXTENT_NORESERVE,
+ &cached_state);
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
- sb_end_pagefault(inode->i_sb);
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
+ up_read(&inode->i_mmap_lock);
+
+ btrfs_delalloc_release_extents(inode, fsize);
+ if (only_release_metadata)
+ btrfs_check_nocow_unlock(inode);
+ sb_end_pagefault(inode->vfs_inode.i_sb);
extent_changeset_free(data_reserved);
return VM_FAULT_LOCKED;
out_unlock:
folio_unlock(folio);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
+ up_read(&inode->i_mmap_lock);
out:
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
- btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
- reserved_space, (ret != 0));
-out_noreserve:
- sb_end_pagefault(inode->i_sb);
+ btrfs_delalloc_release_extents(inode, fsize);
+ if (only_release_metadata)
+ btrfs_delalloc_release_metadata(inode, reserved_space, true);
+ else
+ btrfs_delalloc_release_space(inode, data_reserved, page_start,
+ reserved_space, true);
extent_changeset_free(data_reserved);
- return ret;
+out_noreserve:
+ if (only_release_metadata)
+ btrfs_check_nocow_unlock(inode);
+
+ sb_end_pagefault(inode->vfs_inode.i_sb);
+
+ if (ret < 0)
+ return vmf_error(ret);
+
+ /* Make the VM retry the fault. */
+ return VM_FAULT_NOPAGE;
}
static const struct vm_operations_struct btrfs_file_vm_ops = {
@@ -1943,46 +2039,49 @@ static const struct vm_operations_struct btrfs_file_vm_ops = {
.page_mkwrite = btrfs_page_mkwrite,
};
-static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
+static int btrfs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *filp = desc->file;
struct address_space *mapping = filp->f_mapping;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(filp)))))
+ return -EIO;
if (!mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(filp);
- vma->vm_ops = &btrfs_file_vm_ops;
+ desc->vm_ops = &btrfs_file_vm_ops;
return 0;
}
-static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
- int slot, u64 start, u64 end)
+static bool hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
+ int slot, u64 start, u64 end)
{
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
if (slot < 0 || slot >= btrfs_header_nritems(leaf))
- return 0;
+ return false;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY)
- return 0;
+ return false;
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
- return 0;
+ return false;
if (btrfs_file_extent_disk_bytenr(leaf, fi))
- return 0;
+ return false;
if (key.offset == end)
- return 1;
+ return true;
if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
- return 1;
- return 0;
+ return true;
+ return false;
}
static int fill_holes(struct btrfs_trans_handle *trans,
@@ -2028,7 +2127,6 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
@@ -2045,7 +2143,6 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
btrfs_release_path(path);
@@ -2058,7 +2155,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
out:
btrfs_release_path(path);
- hole_em = alloc_extent_map();
+ hole_em = btrfs_alloc_extent_map();
if (!hole_em) {
btrfs_drop_extent_map_range(inode, offset, end - 1, false);
btrfs_set_inode_full_sync(inode);
@@ -2072,7 +2169,7 @@ out:
hole_em->generation = trans->transid;
ret = btrfs_replace_extent_map_range(inode, hole_em, true);
- free_extent_map(hole_em);
+ btrfs_free_extent_map(hole_em);
if (ret)
btrfs_set_inode_full_sync(inode);
}
@@ -2105,15 +2202,33 @@ static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
0 : *start + *len - em->start - em->len;
*start = em->start + em->len;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ret;
}
-static void btrfs_punch_hole_lock_range(struct inode *inode,
- const u64 lockstart,
- const u64 lockend,
- struct extent_state **cached_state)
+/*
+ * Check if there is no folio in the range.
+ *
+ * We cannot utilize filemap_range_has_page() in a filemap with large folios
+ * as we can hit the following false positive:
+ *
+ * start end
+ * | |
+ * |//|//|//|//| | | | | | | | |//|//|
+ * \ / \ /
+ * Folio A Folio B
+ *
+ * That large folio A and B cover the start and end indexes.
+ * In that case filemap_range_has_page() will always return true, but the above
+ * case is fine for btrfs_punch_hole_lock_range() usage.
+ *
+ * So here we only ensure that no other folios is in the range, excluding the
+ * head/tail large folio.
+ */
+static bool check_range_has_page(struct inode *inode, u64 start, u64 end)
{
+ struct folio_batch fbatch;
+ bool ret = false;
/*
* For subpage case, if the range is not at page boundary, we could
* have pages at the leading/tailing part of the range.
@@ -2121,15 +2236,48 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
* will always return true.
* So here we need to do extra page alignment for
* filemap_range_has_page().
+ *
+ * And do not decrease page_lockend right now, as it can be 0.
*/
- const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
- const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
+ const u64 page_lockstart = round_up(start, PAGE_SIZE);
+ const u64 page_lockend = round_down(end + 1, PAGE_SIZE);
+ const pgoff_t start_index = page_lockstart >> PAGE_SHIFT;
+ const pgoff_t end_index = (page_lockend - 1) >> PAGE_SHIFT;
+ pgoff_t tmp = start_index;
+ int found_folios;
+
+ /* The same page or adjacent pages. */
+ if (page_lockend <= page_lockstart)
+ return false;
+
+ folio_batch_init(&fbatch);
+ found_folios = filemap_get_folios(inode->i_mapping, &tmp, end_index, &fbatch);
+ for (int i = 0; i < found_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ /* A large folio begins before the start. Not a target. */
+ if (folio->index < start_index)
+ continue;
+ /* A large folio extends beyond the end. Not a target. */
+ if (folio_next_index(folio) > end_index)
+ continue;
+ /* A folio doesn't cover the head/tail index. Found a target. */
+ ret = true;
+ break;
+ }
+ folio_batch_release(&fbatch);
+ return ret;
+}
+static void btrfs_punch_hole_lock_range(struct inode *inode,
+ const u64 lockstart, const u64 lockend,
+ struct extent_state **cached_state)
+{
while (1) {
truncate_pagecache_range(inode, lockstart, lockend);
- lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
/*
* We can't have ordered extents in the range, nor dirty/writeback
* pages, because we have locked the inode's VFS lock in exclusive
@@ -2140,12 +2288,11 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
* locking the range check if we have pages in the range, and if
* we do, unlock the range and retry.
*/
- if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
- page_lockend))
+ if (!check_range_has_page(inode, lockstart, lockend))
break;
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
}
btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
@@ -2193,7 +2340,6 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
if (extent_info->is_new_extent)
btrfs_set_file_extent_generation(leaf, extent, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
@@ -2259,7 +2405,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
struct btrfs_trans_handle *trans = NULL;
- struct btrfs_block_rsv *rsv;
+ struct btrfs_block_rsv rsv;
unsigned int rsv_count;
u64 cur_offset;
u64 len = end - start;
@@ -2268,13 +2414,9 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
if (end <= start)
return -EINVAL;
- rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
- if (!rsv) {
- ret = -ENOMEM;
- goto out;
- }
- rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
- rsv->failfast = true;
+ btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
+ rsv.size = btrfs_calc_insert_metadata_size(fs_info, 1);
+ rsv.failfast = true;
/*
* 1 - update the inode
@@ -2291,14 +2433,14 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
- goto out_free;
+ goto out_release;
}
- ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
min_size, false);
if (WARN_ON(ret))
goto out_trans;
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
cur_offset = start;
drop_args.path = path;
@@ -2320,9 +2462,9 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
* got EOPNOTSUPP via prealloc then we messed up and
* need to abort.
*/
- if (ret &&
- (ret != -EOPNOTSUPP ||
- (extent_info && extent_info->is_new_extent)))
+ if (unlikely(ret &&
+ (ret != -EOPNOTSUPP ||
+ (extent_info && extent_info->is_new_extent))))
btrfs_abort_transaction(trans, ret);
break;
}
@@ -2333,7 +2475,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
cur_offset < ino_size) {
ret = fill_holes(trans, inode, path, cur_offset,
drop_args.drop_end);
- if (ret) {
+ if (unlikely(ret)) {
/*
* If we failed then we didn't insert our hole
* entries for the area we dropped, so now the
@@ -2353,7 +2495,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
ret = btrfs_inode_clear_file_extent_range(inode,
cur_offset,
drop_args.drop_end - cur_offset);
- if (ret) {
+ if (unlikely(ret)) {
/*
* We couldn't clear our area, so we could
* presumably adjust up and corrupt the fs, so
@@ -2372,7 +2514,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
ret = btrfs_insert_replace_extent(trans, inode, path,
extent_info, replace_len,
drop_args.bytes_found);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -2414,10 +2556,10 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
}
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, false);
+ &rsv, min_size, false);
if (WARN_ON(ret))
break;
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
cur_offset = drop_args.drop_end;
len = end - cur_offset;
@@ -2467,7 +2609,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
cur_offset < drop_args.drop_end) {
ret = fill_holes(trans, inode, path, cur_offset,
drop_args.drop_end);
- if (ret) {
+ if (unlikely(ret)) {
/* Same comment as above. */
btrfs_abort_transaction(trans, ret);
goto out_trans;
@@ -2476,7 +2618,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
/* See the comment in the loop above for the reasoning here. */
ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
drop_args.drop_end - cur_offset);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_trans;
}
@@ -2486,7 +2628,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
ret = btrfs_insert_replace_extent(trans, inode, path,
extent_info, extent_info->data_len,
drop_args.bytes_found);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_trans;
}
@@ -2494,16 +2636,15 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
out_trans:
if (!trans)
- goto out_free;
+ goto out_release;
trans->block_rsv = &fs_info->trans_block_rsv;
if (ret)
btrfs_end_transaction(trans);
else
*trans_out = trans;
-out_free:
- btrfs_free_block_rsv(fs_info, rsv);
-out:
+out_release:
+ btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
return ret;
}
@@ -2519,7 +2660,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
u64 lockend;
u64 tail_start;
u64 tail_len;
- u64 orig_start = offset;
+ const u64 orig_start = offset;
+ const u64 orig_end = offset + len - 1;
int ret = 0;
bool same_block;
u64 ino_size;
@@ -2551,18 +2693,14 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
/*
- * We needn't truncate any block which is beyond the end of the file
- * because we are sure there is no data there.
- */
- /*
* Only do this if we are in the same block and we aren't doing the
* entire block.
*/
if (same_block && len < fs_info->sectorsize) {
if (offset < ino_size) {
truncated_block = true;
- ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
- 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1,
+ orig_start, orig_end);
} else {
ret = 0;
}
@@ -2572,7 +2710,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
/* zero back part of the first block */
if (offset < ino_size) {
truncated_block = true;
- ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset, orig_start, orig_end);
if (ret) {
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
return ret;
@@ -2609,8 +2747,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
if (tail_start + tail_len < ino_size) {
truncated_block = true;
ret = btrfs_truncate_block(BTRFS_I(inode),
- tail_start + tail_len,
- 0, 1);
+ tail_start + tail_len - 1,
+ orig_start, orig_end);
if (ret)
goto out_only_mutex;
}
@@ -2644,8 +2782,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out:
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
out_only_mutex:
if (!updated_inode && truncated_block && !ret) {
/*
@@ -2719,12 +2857,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ u64 range_start;
+ u64 range_end;
int ret;
int ret2;
if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
return 0;
+ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
+ range_end = round_up(end, root->fs_info->sectorsize);
+
+ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
+ range_end - range_start);
+ if (ret)
+ return ret;
+
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
@@ -2763,7 +2911,7 @@ static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
else
ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ret;
}
@@ -2778,6 +2926,8 @@ static int btrfs_zero_range(struct inode *inode,
int ret;
u64 alloc_hint = 0;
const u64 sectorsize = fs_info->sectorsize;
+ const u64 orig_start = offset;
+ const u64 orig_end = offset + len - 1;
u64 alloc_start = round_down(offset, sectorsize);
u64 alloc_end = round_up(offset + len, sectorsize);
u64 bytes_to_reserve = 0;
@@ -2807,7 +2957,7 @@ static int btrfs_zero_range(struct inode *inode,
* do nothing except updating the inode's i_size if
* needed.
*/
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ret = btrfs_fallocate_update_isize(inode, offset + len,
mode);
goto out;
@@ -2820,9 +2970,9 @@ static int btrfs_zero_range(struct inode *inode,
ASSERT(IS_ALIGNED(alloc_start, sectorsize));
len = offset + len - alloc_start;
offset = alloc_start;
- alloc_hint = extent_map_block_start(em) + em->len;
+ alloc_hint = btrfs_extent_map_block_start(em) + em->len;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
@@ -2833,22 +2983,22 @@ static int btrfs_zero_range(struct inode *inode,
}
if (em->flags & EXTENT_FLAG_PREALLOC) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ret = btrfs_fallocate_update_isize(inode, offset + len,
mode);
goto out;
}
if (len < sectorsize && em->disk_bytenr != EXTENT_MAP_HOLE) {
- free_extent_map(em);
- ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
- 0);
+ btrfs_free_extent_map(em);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1,
+ orig_start, orig_end);
if (!ret)
ret = btrfs_fallocate_update_isize(inode,
offset + len,
mode);
return ret;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
alloc_start = round_down(offset, sectorsize);
alloc_end = alloc_start + sectorsize;
goto reserve_space;
@@ -2872,7 +3022,8 @@ static int btrfs_zero_range(struct inode *inode,
alloc_start = round_down(offset, sectorsize);
ret = 0;
} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
- ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset,
+ orig_start, orig_end);
if (ret)
goto out;
} else {
@@ -2889,8 +3040,8 @@ static int btrfs_zero_range(struct inode *inode,
alloc_end = round_up(offset + len, sectorsize);
ret = 0;
} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
- ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
- 0, 1);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1,
+ orig_start, orig_end);
if (ret)
goto out;
} else {
@@ -2915,16 +3066,16 @@ reserve_space:
ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
alloc_start, bytes_to_reserve);
if (ret) {
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state);
goto out;
}
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
alloc_end - alloc_start,
fs_info->sectorsize,
offset + len, &alloc_hint);
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
/* btrfs_prealloc_file_range releases reserved space on error */
if (ret) {
space_reserved = false;
@@ -2964,6 +3115,9 @@ static long btrfs_fallocate(struct file *file, int mode,
int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
int ret;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(inode))))
+ return -EIO;
+
/* Do not allow fallocate in ZONED mode */
if (btrfs_is_zoned(inode_to_fs_info(inode)))
return -EOPNOTSUPP;
@@ -3010,7 +3164,8 @@ static long btrfs_fallocate(struct file *file, int mode,
* need to zero out the end of the block if i_size lands in the
* middle of a block.
*/
- ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size,
+ inode->i_size, (u64)-1);
if (ret)
goto out;
}
@@ -3035,8 +3190,8 @@ static long btrfs_fallocate(struct file *file, int mode,
}
locked_end = alloc_end - 1;
- lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+ &cached_state);
btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
@@ -3048,8 +3203,8 @@ static long btrfs_fallocate(struct file *file, int mode,
ret = PTR_ERR(em);
break;
}
- last_byte = min(extent_map_end(em), alloc_end);
- actual_end = min_t(u64, extent_map_end(em), offset + len);
+ last_byte = min(btrfs_extent_map_end(em), alloc_end);
+ actual_end = min_t(u64, btrfs_extent_map_end(em), offset + len);
last_byte = ALIGN(last_byte, blocksize);
if (em->disk_bytenr == EXTENT_MAP_HOLE ||
(cur_offset >= inode->i_size &&
@@ -3058,19 +3213,19 @@ static long btrfs_fallocate(struct file *file, int mode,
ret = add_falloc_range(&reserve_list, cur_offset, range_len);
if (ret < 0) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
break;
}
ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
&data_reserved, cur_offset, range_len);
if (ret < 0) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
break;
}
qgroup_reserved += range_len;
data_space_needed += range_len;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
cur_offset = last_byte;
}
@@ -3124,8 +3279,8 @@ static long btrfs_fallocate(struct file *file, int mode,
*/
ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
out_unlock:
- unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+ &cached_state);
out:
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
extent_changeset_free(data_reserved);
@@ -3159,10 +3314,10 @@ static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end
if (inode->delalloc_bytes > 0) {
spin_unlock(&inode->lock);
*delalloc_start_ret = start;
- delalloc_len = count_range_bits(&inode->io_tree,
- delalloc_start_ret, end,
- len, EXTENT_DELALLOC, 1,
- cached_state);
+ delalloc_len = btrfs_count_range_bits(&inode->io_tree,
+ delalloc_start_ret, end,
+ len, EXTENT_DELALLOC, 1,
+ cached_state);
} else {
spin_unlock(&inode->lock);
}
@@ -3205,7 +3360,7 @@ static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end
* We could also use the extent map tree to find such delalloc that is
* being flushed, but using the ordered extents tree is more efficient
* because it's usually much smaller as ordered extents are removed from
- * the tree once they complete. With the extent maps, we mau have them
+ * the tree once they complete. With the extent maps, we may have them
* in the extent map tree for a very long time, and they were either
* created by previous writes or loaded by read operations.
*/
@@ -3471,7 +3626,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
last_extent_end = lockstart;
- lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
@@ -3617,7 +3772,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
}
out:
- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
btrfs_free_path(path);
if (ret < 0)
@@ -3654,6 +3809,9 @@ static int btrfs_file_open(struct inode *inode, struct file *filp)
{
int ret;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(inode))))
+ return -EIO;
+
filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
ret = fsverity_file_open(inode, filp);
@@ -3666,6 +3824,9 @@ static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
ssize_t ret = 0;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(iocb->ki_filp)))))
+ return -EIO;
+
if (iocb->ki_flags & IOCB_DIRECT) {
ret = btrfs_direct_read(iocb, to);
if (ret < 0 || !iov_iter_count(to) ||
@@ -3676,13 +3837,23 @@ static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
return filemap_read(iocb, to, ret);
}
+static ssize_t btrfs_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(in)))))
+ return -EIO;
+
+ return filemap_splice_read(in, ppos, pipe, len, flags);
+}
+
const struct file_operations btrfs_file_operations = {
.llseek = btrfs_file_llseek,
.read_iter = btrfs_file_read_iter,
- .splice_read = filemap_splice_read,
+ .splice_read = btrfs_file_splice_read,
.write_iter = btrfs_file_write_iter,
.splice_write = iter_file_splice_write,
- .mmap = btrfs_file_mmap,
+ .mmap_prepare = btrfs_file_mmap_prepare,
.open = btrfs_file_open,
.release = btrfs_release_file,
.get_unmapped_area = thp_get_unmapped_area,
diff --git a/fs/btrfs/file.h b/fs/btrfs/file.h
index de89e644be29..d7df81388cbe 100644
--- a/fs/btrfs/file.h
+++ b/fs/btrfs/file.h
@@ -9,6 +9,8 @@ struct file;
struct extent_state;
struct kiocb;
struct iov_iter;
+struct inode;
+struct folio;
struct page;
struct btrfs_ioctl_encoded_io_args;
struct btrfs_drop_extents_args;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index cfa52ef40b06..f0f72850fab2 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -12,7 +12,7 @@
#include <linux/error-injection.h>
#include <linux/sched/mm.h>
#include <linux/string_choices.h>
-#include "ctree.h"
+#include "extent-tree.h"
#include "fs.h"
#include "messages.h"
#include "misc.h"
@@ -88,13 +88,13 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_disk_key disk_key;
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
- struct inode *inode = NULL;
+ struct btrfs_inode *inode;
unsigned nofs_flag;
int ret;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
key.type = 0;
+ key.offset = offset;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -120,13 +120,13 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
- return inode;
+ return ERR_CAST(inode);
- mapping_set_gfp_mask(inode->i_mapping,
- mapping_gfp_constraint(inode->i_mapping,
+ mapping_set_gfp_mask(inode->vfs_inode.i_mapping,
+ mapping_gfp_constraint(inode->vfs_inode.i_mapping,
~(__GFP_FS | __GFP_HIGHMEM)));
- return inode;
+ return &inode->vfs_inode;
}
struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
@@ -198,12 +198,11 @@ static int __create_free_space_inode(struct btrfs_root *root,
btrfs_set_inode_nlink(leaf, inode_item, 1);
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
btrfs_set_inode_block_group(leaf, inode_item, offset);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
key.type = 0;
+ key.offset = offset;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_free_space_header));
if (ret < 0) {
@@ -216,7 +215,6 @@ static int __create_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header);
memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
return 0;
@@ -246,7 +244,7 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
struct inode *inode,
struct btrfs_block_group *block_group)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret = 0;
@@ -259,12 +257,12 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
if (IS_ERR(inode)) {
if (PTR_ERR(inode) != -ENOENT)
ret = PTR_ERR(inode);
- goto out;
+ return ret;
}
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret) {
btrfs_add_delayed_iput(BTRFS_I(inode));
- goto out;
+ return ret;
}
clear_nlink(inode);
/* One for the block groups ref */
@@ -287,12 +285,9 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
if (ret) {
if (ret > 0)
ret = 0;
- goto out;
+ return ret;
}
- ret = btrfs_del_item(trans, trans->fs_info->tree_root, path);
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_del_item(trans, trans->fs_info->tree_root, path);
}
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
@@ -313,8 +308,9 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
bool locked = false;
if (block_group) {
- struct btrfs_path *path = btrfs_alloc_path();
+ BTRFS_PATH_AUTO_FREE(path);
+ path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto fail;
@@ -335,13 +331,12 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_CLEAR;
spin_unlock(&block_group->lock);
- btrfs_free_path(path);
}
btrfs_i_size_write(inode, 0);
truncate_pagecache(vfs_inode, 0);
- lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
/*
@@ -353,7 +348,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
- unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
if (ret)
goto fail;
@@ -371,7 +366,7 @@ fail:
static void readahead_cache(struct inode *inode)
{
struct file_ra_state ra;
- unsigned long last_index;
+ pgoff_t last_index;
file_ra_state_init(&ra, inode->i_mapping);
last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
@@ -449,7 +444,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
{
- struct page *page;
+ struct folio *folio;
struct inode *inode = io_ctl->inode;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
int i;
@@ -457,31 +452,33 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
for (i = 0; i < io_ctl->num_pages; i++) {
int ret;
- page = find_or_create_page(inode->i_mapping, i, mask);
- if (!page) {
+ folio = __filemap_get_folio(inode->i_mapping, i,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mask);
+ if (IS_ERR(folio)) {
io_ctl_drop_pages(io_ctl);
- return -ENOMEM;
+ return PTR_ERR(folio);
}
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(folio);
if (ret < 0) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
io_ctl_drop_pages(io_ctl);
return ret;
}
- io_ctl->pages[i] = page;
- if (uptodate && !PageUptodate(page)) {
- btrfs_read_folio(NULL, page_folio(page));
- lock_page(page);
- if (page->mapping != inode->i_mapping) {
+ io_ctl->pages[i] = &folio->page;
+ if (uptodate && !folio_test_uptodate(folio)) {
+ btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (folio->mapping != inode->i_mapping) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"free space cache page truncated");
io_ctl_drop_pages(io_ctl);
return -EIO;
}
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"error reading free space cache");
io_ctl_drop_pages(io_ctl);
@@ -755,8 +752,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
return 0;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
key.type = 0;
+ key.offset = offset;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -971,8 +968,8 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
path = btrfs_alloc_path();
if (!path)
return 0;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
/*
* We must pass a path with search_commit_root set to btrfs_iget in
@@ -1083,9 +1080,8 @@ int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
/* Get the cluster for this block_group if it exists */
if (block_group && !list_empty(&block_group->cluster_list)) {
- cluster = list_entry(block_group->cluster_list.next,
- struct btrfs_free_cluster,
- block_group_list);
+ cluster = list_first_entry(&block_group->cluster_list,
+ struct btrfs_free_cluster, block_group_list);
}
if (!node && cluster) {
@@ -1158,13 +1154,13 @@ update_cache_item(struct btrfs_trans_handle *trans,
int ret;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
key.type = 0;
+ key.offset = offset;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DELALLOC, NULL);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+ EXTENT_DELALLOC, NULL);
goto fail;
}
leaf = path->nodes[0];
@@ -1175,9 +1171,9 @@ update_cache_item(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
found_key.offset != offset) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
- inode->i_size - 1, EXTENT_DELALLOC,
- NULL);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
+ inode->i_size - 1, EXTENT_DELALLOC,
+ NULL);
btrfs_release_path(path);
goto fail;
}
@@ -1189,7 +1185,6 @@ update_cache_item(struct btrfs_trans_handle *trans,
btrfs_set_free_space_entries(leaf, header, entries);
btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
btrfs_set_free_space_generation(leaf, header, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
return 0;
@@ -1223,9 +1218,9 @@ static noinline_for_stack int write_pinned_extent_entries(
start = block_group->start;
while (start < block_group->start + block_group->length) {
- if (!find_first_extent_bit(unpin, start,
- &extent_start, &extent_end,
- EXTENT_DIRTY, NULL))
+ if (!btrfs_find_first_extent_bit(unpin, start,
+ &extent_start, &extent_end,
+ EXTENT_DIRTY, NULL))
return 0;
/* This pinned extent is out of our range */
@@ -1271,8 +1266,8 @@ static int flush_dirty_cache(struct inode *inode)
ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DELALLOC, NULL);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+ EXTENT_DELALLOC, NULL);
return ret;
}
@@ -1292,8 +1287,8 @@ cleanup_write_cache_enospc(struct inode *inode,
struct extent_state **cached_state)
{
io_ctl_drop_pages(io_ctl);
- unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ cached_state);
}
static int __btrfs_wait_cache_io(struct btrfs_root *root,
@@ -1418,8 +1413,8 @@ static int __btrfs_write_out_cache(struct inode *inode,
if (ret)
goto out_unlock;
- lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ &cached_state);
io_ctl_set_generation(io_ctl, trans->transid);
@@ -1479,8 +1474,8 @@ static int __btrfs_write_out_cache(struct inode *inode,
io_ctl_drop_pages(io_ctl);
io_ctl_free(io_ctl);
- unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ &cached_state);
/*
* at this point the pages are under IO and we're happy,
@@ -2287,7 +2282,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* If this block group has some small extents we don't want to
* use up all of our free slots in the cache with them, we want
* to reserve them to larger extents, however if we have plenty
- * of cache left then go ahead an dadd them, no sense in adding
+ * of cache left then go ahead and add them, no sense in adding
* the overhead of a bitmap if we don't have to.
*/
if (info->bytes <= fs_info->sectorsize * 8) {
@@ -2346,9 +2341,8 @@ again:
struct rb_node *node;
struct btrfs_free_space *entry;
- cluster = list_entry(block_group->cluster_list.next,
- struct btrfs_free_cluster,
- block_group_list);
+ cluster = list_first_entry(&block_group->cluster_list,
+ struct btrfs_free_cluster, block_group_list);
spin_lock(&cluster->lock);
node = rb_first(&cluster->root);
if (!node) {
@@ -3198,7 +3192,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
u64 *max_extent_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
- int err;
+ int ret2;
u64 search_start = cluster->window_start;
u64 search_bytes = bytes;
u64 ret = 0;
@@ -3206,8 +3200,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
search_start = min_start;
search_bytes = bytes;
- err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
- if (err) {
+ ret2 = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
+ if (ret2) {
*max_extent_size = max(get_max_extent_size(entry),
*max_extent_size);
return 0;
@@ -3662,7 +3656,7 @@ static int do_trimming(struct btrfs_block_group *block_group,
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
int ret;
- int update = 0;
+ bool bg_ro;
const u64 end = start + bytes;
const u64 reserved_end = reserved_start + reserved_bytes;
enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
@@ -3670,12 +3664,14 @@ static int do_trimming(struct btrfs_block_group *block_group,
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- if (!block_group->ro) {
+ bg_ro = block_group->ro;
+ if (!bg_ro) {
block_group->reserved += reserved_bytes;
+ spin_unlock(&block_group->lock);
space_info->bytes_reserved += reserved_bytes;
- update = 1;
+ } else {
+ spin_unlock(&block_group->lock);
}
- spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
@@ -3696,14 +3692,16 @@ static int do_trimming(struct btrfs_block_group *block_group,
list_del(&trim_entry->list);
mutex_unlock(&ctl->cache_writeout_mutex);
- if (update) {
+ if (!bg_ro) {
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- if (block_group->ro)
- space_info->bytes_readonly += reserved_bytes;
+ bg_ro = block_group->ro;
block_group->reserved -= reserved_bytes;
- space_info->bytes_reserved -= reserved_bytes;
spin_unlock(&block_group->lock);
+
+ space_info->bytes_reserved -= reserved_bytes;
+ if (bg_ro)
+ space_info->bytes_readonly += reserved_bytes;
spin_unlock(&space_info->lock);
}
@@ -3835,7 +3833,7 @@ out_unlock:
/*
* If we break out of trimming a bitmap prematurely, we should reset the
- * trimming bit. In a rather contrieved case, it's possible to race here so
+ * trimming bit. In a rather contrived case, it's possible to race here so
* reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
*
* start = start of bitmap
@@ -4148,7 +4146,7 @@ int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool act
if (!active) {
set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
ret = cleanup_free_space_cache_v1(fs_info, trans);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 7ba50e133921..1ad2ad384b9e 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -35,7 +35,7 @@ static struct btrfs_root *btrfs_free_space_root(
return btrfs_global_root(block_group->fs_info, &key);
}
-void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
+void btrfs_set_free_space_tree_thresholds(struct btrfs_block_group *cache)
{
u32 bitmap_range;
size_t bitmap_size;
@@ -82,23 +82,19 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
info = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_free_space_info);
btrfs_set_free_space_extent_count(leaf, info, 0);
btrfs_set_free_space_flags(leaf, info, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
-
- ret = 0;
-out:
btrfs_release_path(path);
- return ret;
+ return 0;
}
EXPORT_FOR_TESTS
-struct btrfs_free_space_info *search_free_space_info(
+struct btrfs_free_space_info *btrfs_search_free_space_info(
struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path, int cow)
@@ -118,7 +114,7 @@ struct btrfs_free_space_info *search_free_space_info(
if (ret != 0) {
btrfs_warn(fs_info, "missing free space info for %llu",
block_group->start);
- ASSERT(0);
+ DEBUG_WARN();
return ERR_PTR(-ENOENT);
}
@@ -141,13 +137,13 @@ static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans,
if (ret < 0)
return ret;
- if (ret == 0) {
- ASSERT(0);
+ if (unlikely(ret == 0)) {
+ DEBUG_WARN();
return -EIO;
}
- if (p->slots[0] == 0) {
- ASSERT(0);
+ if (unlikely(p->slots[0] == 0)) {
+ DEBUG_WARN("no previous slot found");
return -EIO;
}
p->slots[0]--;
@@ -169,11 +165,9 @@ static unsigned long *alloc_bitmap(u32 bitmap_size)
/*
* GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse
- * into the filesystem as the free space bitmap can be modified in the
- * critical section of a transaction commit.
- *
- * TODO: push the memalloc_nofs_{save,restore}() to the caller where we
- * know that recursion is unsafe.
+ * into the filesystem here. All callers hold a transaction handle
+ * open, so if a GFP_KERNEL allocation recurses into the filesystem
+ * and triggers a transaction commit, we would deadlock.
*/
nofs_flag = memalloc_nofs_save();
ret = kvzalloc(bitmap_rounded_size, GFP_KERNEL);
@@ -202,9 +196,9 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
}
EXPORT_FOR_TESTS
-int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path)
+int btrfs_convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = btrfs_free_space_root(block_group);
@@ -222,10 +216,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
bitmap_size = free_space_bitmap_size(fs_info, block_group->length);
bitmap = alloc_bitmap(bitmap_size);
- if (!bitmap) {
- ret = -ENOMEM;
- goto out;
- }
+ if (unlikely(!bitmap))
+ return 0;
start = block_group->start;
end = block_group->start + block_group->length;
@@ -236,8 +228,10 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
while (!done) {
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
leaf = path->nodes[0];
nr = 0;
@@ -272,31 +266,35 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
}
ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
btrfs_release_path(path);
}
- info = search_free_space_info(trans, block_group, path, 1);
+ info = btrfs_search_free_space_info(trans, block_group, path, 1);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
leaf = path->nodes[0];
flags = btrfs_free_space_flags(leaf, info);
flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
+ block_group->using_free_space_bitmaps = true;
+ block_group->using_free_space_bitmaps_cached = true;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
- if (extent_count != expected_extent_count) {
+ if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count,
expected_extent_count);
- ASSERT(0);
ret = -EIO;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -317,14 +315,15 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, root, path, &key,
data_size);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
leaf = path->nodes[0];
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, bitmap_cursor, ptr,
data_size);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
i += extent_size;
@@ -334,15 +333,13 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ret = 0;
out:
kvfree(bitmap);
- if (ret)
- btrfs_abort_transaction(trans, ret);
return ret;
}
EXPORT_FOR_TESTS
-int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path)
+int btrfs_convert_free_space_to_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = btrfs_free_space_root(block_group);
@@ -359,10 +356,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
bitmap_size = free_space_bitmap_size(fs_info, block_group->length);
bitmap = alloc_bitmap(bitmap_size);
- if (!bitmap) {
- ret = -ENOMEM;
- goto out;
- }
+ if (unlikely(!bitmap))
+ return 0;
start = block_group->start;
end = block_group->start + block_group->length;
@@ -373,8 +368,10 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
while (!done) {
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
leaf = path->nodes[0];
nr = 0;
@@ -403,50 +400,56 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
data_size = free_space_bitmap_size(fs_info,
found_key.offset);
- ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
+ path->slots[0]--;
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
read_extent_buffer(leaf, bitmap_cursor, ptr,
data_size);
nr++;
- path->slots[0]--;
} else {
ASSERT(0);
}
}
ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
btrfs_release_path(path);
}
- info = search_free_space_info(trans, block_group, path, 1);
+ info = btrfs_search_free_space_info(trans, block_group, path, 1);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
leaf = path->nodes[0];
flags = btrfs_free_space_flags(leaf, info);
flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
+ block_group->using_free_space_bitmaps = false;
+ block_group->using_free_space_bitmaps_cached = true;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
- nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
+ nrbits = block_group->length >> fs_info->sectorsize_bits;
start_bit = find_next_bit_le(bitmap, nrbits, 0);
while (start_bit < nrbits) {
end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit);
ASSERT(start_bit < end_bit);
- key.objectid = start + start_bit * block_group->fs_info->sectorsize;
+ key.objectid = start + start_bit * fs_info->sectorsize;
key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
- key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize;
+ key.offset = (end_bit - start_bit) * fs_info->sectorsize;
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
btrfs_release_path(path);
extent_count++;
@@ -454,21 +457,19 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
start_bit = find_next_bit_le(bitmap, nrbits, end_bit);
}
- if (extent_count != expected_extent_count) {
+ if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count,
expected_extent_count);
- ASSERT(0);
ret = -EIO;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
ret = 0;
out:
kvfree(bitmap);
- if (ret)
- btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -485,34 +486,31 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
if (new_extents == 0)
return 0;
- info = search_free_space_info(trans, block_group, path, 1);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
+ info = btrfs_search_free_space_info(trans, block_group, path, 1);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
flags = btrfs_free_space_flags(path->nodes[0], info);
extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
extent_count += new_extents;
btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
extent_count > block_group->bitmap_high_thresh) {
- ret = convert_free_space_to_bitmaps(trans, block_group, path);
+ ret = btrfs_convert_free_space_to_bitmaps(trans, block_group, path);
} else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
extent_count < block_group->bitmap_low_thresh) {
- ret = convert_free_space_to_extents(trans, block_group, path);
+ ret = btrfs_convert_free_space_to_extents(trans, block_group, path);
}
-out:
return ret;
}
EXPORT_FOR_TESTS
-int free_space_test_bit(struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 offset)
+bool btrfs_free_space_test_bit(struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 offset)
{
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -530,13 +528,13 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
i = div_u64(offset - found_start,
block_group->fs_info->sectorsize);
- return !!extent_buffer_test_bit(leaf, ptr, i);
+ return extent_buffer_test_bit(leaf, ptr, i);
}
-static void free_space_set_bits(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 *start, u64 *size,
- int bit)
+static void free_space_modify_bits(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 *start, u64 *size,
+ bool set_bits)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_buffer *leaf;
@@ -560,7 +558,7 @@ static void free_space_set_bits(struct btrfs_trans_handle *trans,
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
first = (*start - found_start) >> fs_info->sectorsize_bits;
last = (end - found_start) >> fs_info->sectorsize_bits;
- if (bit)
+ if (set_bits)
extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else
extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
@@ -604,13 +602,14 @@ static int free_space_next_bitmap(struct btrfs_trans_handle *trans,
static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path,
- u64 start, u64 size, int remove)
+ u64 start, u64 size, bool remove)
{
struct btrfs_root *root = btrfs_free_space_root(block_group);
struct btrfs_key key;
u64 end = start + size;
u64 cur_start, cur_size;
- int prev_bit, next_bit;
+ bool prev_bit_set = false;
+ bool next_bit_set = false;
int new_extents;
int ret;
@@ -627,16 +626,16 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
if (ret)
- goto out;
+ return ret;
- prev_bit = free_space_test_bit(block_group, path, prev_block);
+ prev_bit_set = btrfs_free_space_test_bit(block_group, path, prev_block);
/* The previous block may have been in the previous bitmap. */
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (start >= key.objectid + key.offset) {
ret = free_space_next_bitmap(trans, root, path);
if (ret)
- goto out;
+ return ret;
}
} else {
key.objectid = start;
@@ -645,9 +644,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
if (ret)
- goto out;
-
- prev_bit = -1;
+ return ret;
}
/*
@@ -657,13 +654,13 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
cur_start = start;
cur_size = size;
while (1) {
- free_space_set_bits(trans, block_group, path, &cur_start, &cur_size,
- !remove);
+ free_space_modify_bits(trans, block_group, path, &cur_start,
+ &cur_size, !remove);
if (cur_size == 0)
break;
ret = free_space_next_bitmap(trans, root, path);
if (ret)
- goto out;
+ return ret;
}
/*
@@ -676,42 +673,36 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
if (end >= key.objectid + key.offset) {
ret = free_space_next_bitmap(trans, root, path);
if (ret)
- goto out;
+ return ret;
}
- next_bit = free_space_test_bit(block_group, path, end);
- } else {
- next_bit = -1;
+ next_bit_set = btrfs_free_space_test_bit(block_group, path, end);
}
if (remove) {
new_extents = -1;
- if (prev_bit == 1) {
+ if (prev_bit_set) {
/* Leftover on the left. */
new_extents++;
}
- if (next_bit == 1) {
+ if (next_bit_set) {
/* Leftover on the right. */
new_extents++;
}
} else {
new_extents = 1;
- if (prev_bit == 1) {
+ if (prev_bit_set) {
/* Merging with neighbor on the left. */
new_extents--;
}
- if (next_bit == 1) {
+ if (next_bit_set) {
/* Merging with neighbor on the right. */
new_extents--;
}
}
btrfs_release_path(path);
- ret = update_free_space_extent_count(trans, block_group, path,
- new_extents);
-
-out:
- return ret;
+ return update_free_space_extent_count(trans, block_group, path, new_extents);
}
static int remove_free_space_extent(struct btrfs_trans_handle *trans,
@@ -732,7 +723,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
if (ret)
- goto out;
+ return ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -764,7 +755,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
/* Delete the existing key (cases 1-4). */
ret = btrfs_del_item(trans, root, path);
if (ret)
- goto out;
+ return ret;
/* Add a key for leftovers at the beginning (cases 3 and 4). */
if (start > found_start) {
@@ -775,7 +766,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
if (ret)
- goto out;
+ return ret;
new_extents++;
}
@@ -788,81 +779,89 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
if (ret)
- goto out;
+ return ret;
new_extents++;
}
btrfs_release_path(path);
- ret = update_free_space_extent_count(trans, block_group, path,
- new_extents);
-
-out:
- return ret;
+ return update_free_space_extent_count(trans, block_group, path, new_extents);
}
-EXPORT_FOR_TESTS
-int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 start, u64 size)
+static int using_bitmaps(struct btrfs_block_group *bg, struct btrfs_path *path)
{
struct btrfs_free_space_info *info;
u32 flags;
- int ret;
- if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
- ret = __add_block_group_free_space(trans, block_group, path);
- if (ret)
- return ret;
- }
+ if (bg->using_free_space_bitmaps_cached)
+ return bg->using_free_space_bitmaps;
- info = search_free_space_info(NULL, block_group, path, 0);
+ info = btrfs_search_free_space_info(NULL, bg, path, 0);
if (IS_ERR(info))
return PTR_ERR(info);
flags = btrfs_free_space_flags(path->nodes[0], info);
btrfs_release_path(path);
- if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
+ bg->using_free_space_bitmaps = (flags & BTRFS_FREE_SPACE_USING_BITMAPS);
+ bg->using_free_space_bitmaps_cached = true;
+
+ return bg->using_free_space_bitmaps;
+}
+
+EXPORT_FOR_TESTS
+int __btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 start, u64 size)
+{
+ int ret;
+
+ ret = __add_block_group_free_space(trans, block_group, path);
+ if (ret)
+ return ret;
+
+ ret = using_bitmaps(block_group, path);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
return modify_free_space_bitmap(trans, block_group, path,
- start, size, 1);
- } else {
- return remove_free_space_extent(trans, block_group, path,
- start, size);
- }
+ start, size, true);
+
+ return remove_free_space_extent(trans, block_group, path, start, size);
}
-int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- u64 start, u64 size)
+int btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ u64 start, u64 size)
{
struct btrfs_block_group *block_group;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
return 0;
path = btrfs_alloc_path();
- if (!path) {
+ if (unlikely(!path)) {
ret = -ENOMEM;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
block_group = btrfs_lookup_block_group(trans->fs_info, start);
- if (!block_group) {
- ASSERT(0);
+ if (unlikely(!block_group)) {
+ DEBUG_WARN("no block group found for start=%llu", start);
ret = -ENOENT;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
mutex_lock(&block_group->free_space_lock);
- ret = __remove_from_free_space_tree(trans, block_group, path, start,
- size);
+ ret = __btrfs_remove_from_free_space_tree(trans, block_group, path, start, size);
mutex_unlock(&block_group->free_space_lock);
-
- btrfs_put_block_group(block_group);
-out:
- btrfs_free_path(path);
if (ret)
btrfs_abort_transaction(trans, ret);
+
+ btrfs_put_block_group(block_group);
+
return ret;
}
@@ -909,7 +908,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
if (ret)
- goto out;
+ return ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -932,7 +931,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
if (found_end == start) {
ret = btrfs_del_item(trans, root, path);
if (ret)
- goto out;
+ return ret;
new_key.objectid = found_start;
new_key.offset += key.offset;
new_extents--;
@@ -949,7 +948,7 @@ right:
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
if (ret)
- goto out;
+ return ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -973,7 +972,7 @@ right:
if (found_start == end) {
ret = btrfs_del_item(trans, root, path);
if (ret)
- goto out;
+ return ret;
new_key.offset += key.offset;
new_extents--;
}
@@ -983,78 +982,67 @@ insert:
/* Insert the new key (cases 1-4). */
ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
- ret = update_free_space_extent_count(trans, block_group, path,
- new_extents);
-
-out:
- return ret;
+ return update_free_space_extent_count(trans, block_group, path, new_extents);
}
EXPORT_FOR_TESTS
-int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 start, u64 size)
+int __btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 start, u64 size)
{
- struct btrfs_free_space_info *info;
- u32 flags;
int ret;
- if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
- ret = __add_block_group_free_space(trans, block_group, path);
- if (ret)
- return ret;
- }
+ ret = __add_block_group_free_space(trans, block_group, path);
+ if (ret)
+ return ret;
- info = search_free_space_info(NULL, block_group, path, 0);
- if (IS_ERR(info))
- return PTR_ERR(info);
- flags = btrfs_free_space_flags(path->nodes[0], info);
- btrfs_release_path(path);
+ ret = using_bitmaps(block_group, path);
+ if (ret < 0)
+ return ret;
- if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
+ if (ret)
return modify_free_space_bitmap(trans, block_group, path,
- start, size, 0);
- } else {
- return add_free_space_extent(trans, block_group, path, start,
- size);
- }
+ start, size, false);
+
+ return add_free_space_extent(trans, block_group, path, start, size);
}
-int add_to_free_space_tree(struct btrfs_trans_handle *trans,
- u64 start, u64 size)
+int btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ u64 start, u64 size)
{
struct btrfs_block_group *block_group;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
return 0;
path = btrfs_alloc_path();
- if (!path) {
+ if (unlikely(!path)) {
ret = -ENOMEM;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
block_group = btrfs_lookup_block_group(trans->fs_info, start);
- if (!block_group) {
- ASSERT(0);
+ if (unlikely(!block_group)) {
+ DEBUG_WARN("no block group found for start=%llu", start);
ret = -ENOENT;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
mutex_lock(&block_group->free_space_lock);
- ret = __add_to_free_space_tree(trans, block_group, path, start, size);
+ ret = __btrfs_add_to_free_space_tree(trans, block_group, path, start, size);
mutex_unlock(&block_group->free_space_lock);
-
- btrfs_put_block_group(block_group);
-out:
- btrfs_free_path(path);
if (ret)
btrfs_abort_transaction(trans, ret);
+
+ btrfs_put_block_group(block_group);
+
return ret;
}
@@ -1067,7 +1055,8 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group)
{
struct btrfs_root *extent_root;
- struct btrfs_path *path, *path2;
+ BTRFS_PATH_AUTO_FREE(path);
+ BTRFS_PATH_AUTO_FREE(path2);
struct btrfs_key key;
u64 start, end;
int ret;
@@ -1075,17 +1064,16 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->reada = READA_FORWARD;
path2 = btrfs_alloc_path();
- if (!path2) {
- btrfs_free_path(path);
+ if (!path2)
return -ENOMEM;
- }
+
+ path->reada = READA_FORWARD;
ret = add_new_free_space_info(trans, block_group, path2);
if (ret)
- goto out;
+ return ret;
mutex_lock(&block_group->free_space_lock);
@@ -1104,11 +1092,22 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0);
if (ret < 0)
goto out_locked;
- ASSERT(ret == 0);
-
+ /*
+ * If ret is 1 (no key found), it means this is an empty block group,
+ * without any extents allocated from it and there's no block group
+ * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
+ * because we are using the block group tree feature (so block group
+ * items are stored in the block group tree) or this is a new block
+ * group created in the current transaction and its block group item
+ * was not yet inserted in the extent tree (that happens in
+ * btrfs_create_pending_block_groups() -> insert_block_group_item()).
+ * It also means there are no extents allocated for block groups with a
+ * start offset beyond this block group's end offset (this is the last,
+ * highest, block group).
+ */
start = block_group->start;
end = block_group->start + block_group->length;
- while (1) {
+ while (ret == 0) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
@@ -1117,11 +1116,11 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
break;
if (start < key.objectid) {
- ret = __add_to_free_space_tree(trans,
- block_group,
- path2, start,
- key.objectid -
- start);
+ ret = __btrfs_add_to_free_space_tree(trans,
+ block_group,
+ path2, start,
+ key.objectid -
+ start);
if (ret)
goto out_locked;
}
@@ -1138,12 +1137,10 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
ret = btrfs_next_item(extent_root, path);
if (ret < 0)
goto out_locked;
- if (ret)
- break;
}
if (start < end) {
- ret = __add_to_free_space_tree(trans, block_group, path2,
- start, end - start);
+ ret = __btrfs_add_to_free_space_tree(trans, block_group, path2,
+ start, end - start);
if (ret)
goto out_locked;
}
@@ -1151,9 +1148,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
ret = 0;
out_locked:
mutex_unlock(&block_group->free_space_lock);
-out:
- btrfs_free_path(path2);
- btrfs_free_path(path);
+
return ret;
}
@@ -1181,7 +1176,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
goto out_clear;
}
ret = btrfs_global_root_insert(free_space_root);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_put_root(free_space_root);
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
@@ -1193,7 +1188,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
block_group = rb_entry(node, struct btrfs_block_group,
cache_node);
ret = populate_free_space_tree(trans, block_group);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out_clear;
@@ -1222,8 +1217,9 @@ out_clear:
static int clear_free_space_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
+ struct rb_node *node;
int nr;
int ret;
@@ -1238,7 +1234,7 @@ static int clear_free_space_tree(struct btrfs_trans_handle *trans,
while (1) {
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
nr = btrfs_header_nritems(path->nodes[0]);
if (!nr)
@@ -1247,15 +1243,22 @@ static int clear_free_space_tree(struct btrfs_trans_handle *trans,
path->slots[0] = 0;
ret = btrfs_del_items(trans, root, path, 0, nr);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
}
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ node = rb_first_cached(&trans->fs_info->block_group_cache_tree);
+ while (node) {
+ struct btrfs_block_group *bg;
+
+ bg = rb_entry(node, struct btrfs_block_group, cache_node);
+ clear_bit(BLOCK_GROUP_FLAG_FREE_SPACE_ADDED, &bg->runtime_flags);
+ node = rb_next(node);
+ cond_resched();
+ }
+
+ return 0;
}
int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info)
@@ -1278,14 +1281,14 @@ int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
ret = clear_free_space_tree(trans, free_space_root);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
}
ret = btrfs_del_root(trans, &free_space_root->root_key);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
@@ -1303,7 +1306,7 @@ int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info)
ret = btrfs_free_tree_block(trans, btrfs_root_id(free_space_root),
free_space_root->node, 0, 1);
btrfs_put_root(free_space_root);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
@@ -1332,7 +1335,7 @@ int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info)
set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
ret = clear_free_space_tree(trans, free_space_root);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
@@ -1344,12 +1347,24 @@ int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info)
block_group = rb_entry(node, struct btrfs_block_group,
cache_node);
+
+ if (test_bit(BLOCK_GROUP_FLAG_FREE_SPACE_ADDED,
+ &block_group->runtime_flags))
+ goto next;
+
ret = populate_free_space_tree(trans, block_group);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
}
+next:
+ if (btrfs_should_end_transaction(trans)) {
+ btrfs_end_transaction(trans);
+ trans = btrfs_start_transaction(free_space_root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ }
node = rb_next(node);
}
@@ -1366,54 +1381,82 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
+ bool own_path = false;
int ret;
- clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags);
+ if (!test_and_clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
+ &block_group->runtime_flags))
+ return 0;
+
+ /*
+ * While rebuilding the free space tree we may allocate new metadata
+ * block groups while modifying the free space tree.
+ *
+ * Because during the rebuild (at btrfs_rebuild_free_space_tree()) we
+ * can use multiple transactions, every time btrfs_end_transaction() is
+ * called at btrfs_rebuild_free_space_tree() we finish the creation of
+ * new block groups by calling btrfs_create_pending_block_groups(), and
+ * that in turn calls us, through add_block_group_free_space(), to add
+ * a free space info item and a free space extent item for the block
+ * group.
+ *
+ * Then later btrfs_rebuild_free_space_tree() may find such new block
+ * groups and processes them with populate_free_space_tree(), which can
+ * fail with EEXIST since there are already items for the block group in
+ * the free space tree. Notice that we say "may find" because a new
+ * block group may be added to the block groups rbtree in a node before
+ * or after the block group currently being processed by the rebuild
+ * process. So signal the rebuild process to skip such new block groups
+ * if it finds them.
+ */
+ set_bit(BLOCK_GROUP_FLAG_FREE_SPACE_ADDED, &block_group->runtime_flags);
+
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (unlikely(!path)) {
+ btrfs_abort_transaction(trans, -ENOMEM);
+ return -ENOMEM;
+ }
+ own_path = true;
+ }
ret = add_new_free_space_info(trans, block_group, path);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ ret = __btrfs_add_to_free_space_tree(trans, block_group, path,
+ block_group->start, block_group->length);
if (ret)
- return ret;
+ btrfs_abort_transaction(trans, ret);
+
+out:
+ if (own_path)
+ btrfs_free_path(path);
- return __add_to_free_space_tree(trans, block_group, path,
- block_group->start,
- block_group->length);
+ return ret;
}
-int add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group)
+int btrfs_add_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_path *path = NULL;
- int ret = 0;
+ int ret;
- if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+ if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
return 0;
mutex_lock(&block_group->free_space_lock);
- if (!test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags))
- goto out;
-
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = __add_block_group_free_space(trans, block_group, path);
-
-out:
- btrfs_free_path(path);
+ ret = __add_block_group_free_space(trans, block_group, NULL);
mutex_unlock(&block_group->free_space_lock);
- if (ret)
- btrfs_abort_transaction(trans, ret);
return ret;
}
-int remove_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group)
+int btrfs_remove_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group)
{
struct btrfs_root *root = btrfs_free_space_root(block_group);
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key, found_key;
struct extent_buffer *leaf;
u64 start, end;
@@ -1429,9 +1472,10 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
}
path = btrfs_alloc_path();
- if (!path) {
+ if (unlikely(!path)) {
ret = -ENOMEM;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
start = block_group->start;
@@ -1443,8 +1487,10 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
while (!done) {
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
- if (ret)
- goto out;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
leaf = path->nodes[0];
nr = 0;
@@ -1472,16 +1518,15 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
}
ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
- if (ret)
- goto out;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
btrfs_release_path(path);
}
ret = 0;
-out:
- btrfs_free_path(path);
- if (ret)
- btrfs_abort_transaction(trans, ret);
+
return ret;
}
@@ -1493,7 +1538,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
struct btrfs_key key;
- int prev_bit = 0, bit;
+ bool prev_bit_set = false;
/* Initialize to silence GCC. */
u64 extent_start = 0;
u64 end, offset;
@@ -1510,7 +1555,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
while (1) {
ret = btrfs_next_item(root, path);
if (ret < 0)
- goto out;
+ return ret;
if (ret)
break;
@@ -1524,10 +1569,12 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
offset = key.objectid;
while (offset < key.objectid + key.offset) {
- bit = free_space_test_bit(block_group, path, offset);
- if (prev_bit == 0 && bit == 1) {
+ bool bit_set;
+
+ bit_set = btrfs_free_space_test_bit(block_group, path, offset);
+ if (!prev_bit_set && bit_set) {
extent_start = offset;
- } else if (prev_bit == 1 && bit == 0) {
+ } else if (prev_bit_set && !bit_set) {
u64 space_added;
ret = btrfs_add_new_free_space(block_group,
@@ -1535,7 +1582,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
offset,
&space_added);
if (ret)
- goto out;
+ return ret;
total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
@@ -1543,30 +1590,27 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
}
extent_count++;
}
- prev_bit = bit;
+ prev_bit_set = bit_set;
offset += fs_info->sectorsize;
}
}
- if (prev_bit == 1) {
+ if (prev_bit_set) {
ret = btrfs_add_new_free_space(block_group, extent_start, end, NULL);
if (ret)
- goto out;
+ return ret;
extent_count++;
}
- if (extent_count != expected_extent_count) {
+ if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count,
expected_extent_count);
- ASSERT(0);
- ret = -EIO;
- goto out;
+ DEBUG_WARN();
+ return -EIO;
}
- ret = 0;
-out:
- return ret;
+ return 0;
}
static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
@@ -1593,7 +1637,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
ret = btrfs_next_item(root, path);
if (ret < 0)
- goto out;
+ return ret;
if (ret)
break;
@@ -1609,7 +1653,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
key.objectid + key.offset,
&space_added);
if (ret)
- goto out;
+ return ret;
total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
@@ -1618,28 +1662,24 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
extent_count++;
}
- if (extent_count != expected_extent_count) {
+ if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count,
expected_extent_count);
- ASSERT(0);
- ret = -EIO;
- goto out;
+ DEBUG_WARN();
+ return -EIO;
}
- ret = 0;
-out:
- return ret;
+ return 0;
}
-int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
+int btrfs_load_free_space_tree(struct btrfs_caching_control *caching_ctl)
{
struct btrfs_block_group *block_group;
struct btrfs_free_space_info *info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
u32 extent_count, flags;
- int ret;
block_group = caching_ctl->block_group;
@@ -1651,15 +1691,14 @@ int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
* Just like caching_thread() doesn't want to deadlock on the extent
* tree, we don't want to deadlock on the free space tree.
*/
- path->skip_locking = 1;
- path->search_commit_root = 1;
+ path->skip_locking = true;
+ path->search_commit_root = true;
path->reada = READA_FORWARD;
- info = search_free_space_info(NULL, block_group, path, 0);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
+ info = btrfs_search_free_space_info(NULL, block_group, path, 0);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
flags = btrfs_free_space_flags(path->nodes[0], info);
@@ -1669,11 +1708,7 @@ int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
* there.
*/
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS)
- ret = load_free_space_bitmaps(caching_ctl, path, extent_count);
+ return load_free_space_bitmaps(caching_ctl, path, extent_count);
else
- ret = load_free_space_extents(caching_ctl, path, extent_count);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return load_free_space_extents(caching_ctl, path, extent_count);
}
diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h
index e6c6d6f4f221..3d9a5d4477fc 100644
--- a/fs/btrfs/free-space-tree.h
+++ b/fs/btrfs/free-space-tree.h
@@ -22,39 +22,39 @@ struct btrfs_trans_handle;
#define BTRFS_FREE_SPACE_BITMAP_SIZE 256
#define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE)
-void set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
+void btrfs_set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info);
int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info);
-int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
-int add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group);
-int remove_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group);
-int add_to_free_space_tree(struct btrfs_trans_handle *trans,
- u64 start, u64 size);
-int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- u64 start, u64 size);
+int btrfs_load_free_space_tree(struct btrfs_caching_control *caching_ctl);
+int btrfs_add_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group);
+int btrfs_remove_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group);
+int btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ u64 start, u64 size);
+int btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ u64 start, u64 size);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_free_space_info *
-search_free_space_info(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, int cow);
-int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
+btrfs_search_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 start, u64 size);
-int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 start, u64 size);
-int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path);
-int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path);
-int free_space_test_bit(struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 offset);
+ struct btrfs_path *path, int cow);
+int __btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 start, u64 size);
+int __btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 start, u64 size);
+int btrfs_convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path);
+int btrfs_convert_free_space_to_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path);
+bool btrfs_free_space_test_bit(struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 offset);
#endif
#endif
diff --git a/fs/btrfs/fs.c b/fs/btrfs/fs.c
index 31c1648bc0b4..feb0a2faa837 100644
--- a/fs/btrfs/fs.c
+++ b/fs/btrfs/fs.c
@@ -1,9 +1,186 @@
// SPDX-License-Identifier: GPL-2.0
#include "messages.h"
-#include "ctree.h"
#include "fs.h"
#include "accessors.h"
+#include "volumes.h"
+
+static const struct btrfs_csums {
+ u16 size;
+ const char name[10];
+ const char driver[12];
+} btrfs_csums[] = {
+ [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
+ [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
+ [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
+ [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
+ .driver = "blake2b-256" },
+};
+
+/* This exists for btrfs-progs usages. */
+u16 btrfs_csum_type_size(u16 type)
+{
+ return btrfs_csums[type].size;
+}
+
+int btrfs_super_csum_size(const struct btrfs_super_block *s)
+{
+ u16 t = btrfs_super_csum_type(s);
+
+ /* csum type is validated at mount time. */
+ return btrfs_csum_type_size(t);
+}
+
+const char *btrfs_super_csum_name(u16 csum_type)
+{
+ /* csum type is validated at mount time. */
+ return btrfs_csums[csum_type].name;
+}
+
+/*
+ * Return driver name if defined, otherwise the name that's also a valid driver
+ * name.
+ */
+const char *btrfs_super_csum_driver(u16 csum_type)
+{
+ /* csum type is validated at mount time */
+ return btrfs_csums[csum_type].driver[0] ?
+ btrfs_csums[csum_type].driver :
+ btrfs_csums[csum_type].name;
+}
+
+size_t __attribute_const__ btrfs_get_num_csums(void)
+{
+ return ARRAY_SIZE(btrfs_csums);
+}
+
+/*
+ * We support the following block sizes for all systems:
+ *
+ * - 4K
+ * This is the most common block size. For PAGE SIZE > 4K cases the subpage
+ * mode is used.
+ *
+ * - PAGE_SIZE
+ * The straightforward block size to support.
+ *
+ * And extra support for the following block sizes based on the kernel config:
+ *
+ * - MIN_BLOCKSIZE
+ * This is either 4K (regular builds) or 2K (debug builds)
+ * This allows testing subpage routines on x86_64.
+ */
+bool __attribute_const__ btrfs_supported_blocksize(u32 blocksize)
+{
+ /* @blocksize should be validated first. */
+ ASSERT(is_power_of_2(blocksize) && blocksize >= BTRFS_MIN_BLOCKSIZE &&
+ blocksize <= BTRFS_MAX_BLOCKSIZE);
+
+ if (blocksize == PAGE_SIZE || blocksize == SZ_4K || blocksize == BTRFS_MIN_BLOCKSIZE)
+ return true;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /*
+ * For bs > ps support it's done by specifying a minimal folio order
+ * for filemap, thus implying large data folios.
+ * For HIGHMEM systems, we can not always access the content of a (large)
+ * folio in one go, but go through them page by page.
+ *
+ * A lot of features don't implement a proper PAGE sized loop for large
+ * folios, this includes:
+ *
+ * - compression
+ * - verity
+ * - encoded write
+ *
+ * Considering HIGHMEM is such a pain to deal with and it's going
+ * to be deprecated eventually, just reject HIGHMEM && bs > ps cases.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM) && blocksize > PAGE_SIZE)
+ return false;
+ return true;
+#endif
+ return false;
+}
+
+/*
+ * Start exclusive operation @type, return true on success.
+ */
+bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type)
+{
+ bool ret = false;
+
+ spin_lock(&fs_info->super_lock);
+ if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
+ fs_info->exclusive_operation = type;
+ ret = true;
+ }
+ spin_unlock(&fs_info->super_lock);
+
+ return ret;
+}
+
+/*
+ * Conditionally allow to enter the exclusive operation in case it's compatible
+ * with the running one. This must be paired with btrfs_exclop_start_unlock()
+ * and btrfs_exclop_finish().
+ *
+ * Compatibility:
+ * - the same type is already running
+ * - when trying to add a device and balance has been paused
+ * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
+ * must check the condition first that would allow none -> @type
+ */
+bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type)
+{
+ spin_lock(&fs_info->super_lock);
+ if (fs_info->exclusive_operation == type ||
+ (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED &&
+ type == BTRFS_EXCLOP_DEV_ADD))
+ return true;
+
+ spin_unlock(&fs_info->super_lock);
+ return false;
+}
+
+void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
+{
+ spin_unlock(&fs_info->super_lock);
+}
+
+void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
+{
+ spin_lock(&fs_info->super_lock);
+ WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
+ spin_unlock(&fs_info->super_lock);
+ sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
+}
+
+void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation op)
+{
+ switch (op) {
+ case BTRFS_EXCLOP_BALANCE_PAUSED:
+ spin_lock(&fs_info->super_lock);
+ ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
+ spin_unlock(&fs_info->super_lock);
+ break;
+ case BTRFS_EXCLOP_BALANCE:
+ spin_lock(&fs_info->super_lock);
+ ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
+ spin_unlock(&fs_info->super_lock);
+ break;
+ default:
+ btrfs_warn(fs_info,
+ "invalid exclop balance operation %d requested", op);
+ }
+}
void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
const char *name)
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 79a1a3d6f04d..0f7e1ef27891 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -14,10 +14,10 @@
#include <linux/lockdep.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/rwlock_types.h>
#include <linux/rwsem.h>
#include <linux/semaphore.h>
#include <linux/list.h>
+#include <linux/pagemap.h>
#include <linux/radix-tree.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
@@ -29,6 +29,7 @@
#include "extent-io-tree.h"
#include "async-thread.h"
#include "block-rsv.h"
+#include "messages.h"
struct inode;
struct super_block;
@@ -47,6 +48,20 @@ struct btrfs_subpage_info;
struct btrfs_stripe_hash_table;
struct btrfs_space_info;
+/*
+ * Minimum data and metadata block size.
+ *
+ * Normally it's 4K, but for testing subpage block size on 4K page systems, we
+ * allow DEBUG builds to accept 2K page size.
+ */
+#ifdef CONFIG_BTRFS_DEBUG
+#define BTRFS_MIN_BLOCKSIZE (SZ_2K)
+#else
+#define BTRFS_MIN_BLOCKSIZE (SZ_4K)
+#endif
+
+#define BTRFS_MAX_BLOCKSIZE (SZ_64K)
+
#define BTRFS_MAX_EXTENT_SIZE SZ_128M
#define BTRFS_OLDEST_GENERATION 0ULL
@@ -59,6 +74,13 @@ struct btrfs_space_info;
#define BTRFS_SUPER_INFO_SIZE 4096
static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
+/* Array of bytes with variable length, hexadecimal format 0x1234 */
+#define BTRFS_CSUM_FMT "0x%*phN"
+#define BTRFS_CSUM_FMT_VALUE(size, bytes) size, bytes
+
+#define BTRFS_KEY_FMT "(%llu %u %llu)"
+#define BTRFS_KEY_FMT_VALUE(key) (key)->objectid, (key)->type, (key)->offset
+
/*
* Number of metadata items necessary for an unlink operation:
*
@@ -90,6 +112,8 @@ enum {
BTRFS_FS_STATE_RO,
/* Track if a transaction abort has been reported on this filesystem */
BTRFS_FS_STATE_TRANS_ABORTED,
+ /* Track if log replay has failed. */
+ BTRFS_FS_STATE_LOG_REPLAY_ABORTED,
/*
* Bio operations should be blocked on this filesystem because a source
* or target device is being destroyed as part of a device replace
@@ -105,6 +129,15 @@ enum {
/* Indicates there was an error cleaning up a log tree. */
BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
+ /* No more delayed iput can be queued. */
+ BTRFS_FS_STATE_NO_DELAYED_IPUT,
+
+ /*
+ * Emergency shutdown, a step further than transaction aborted by
+ * rejecting all operations.
+ */
+ BTRFS_FS_STATE_EMERGENCY_SHUTDOWN,
+
BTRFS_FS_STATE_COUNT
};
@@ -228,6 +261,7 @@ enum {
BTRFS_MOUNT_NOSPACECACHE = (1ULL << 30),
BTRFS_MOUNT_IGNOREMETACSUMS = (1ULL << 31),
BTRFS_MOUNT_IGNORESUPERFLAGS = (1ULL << 32),
+ BTRFS_MOUNT_REF_TRACKER = (1ULL << 33),
};
/*
@@ -265,7 +299,7 @@ enum {
#ifdef CONFIG_BTRFS_EXPERIMENTAL
/*
- * Features under developmen like Extent tree v2 support is enabled
+ * Features under development like Extent tree v2 support is enabled
* only under CONFIG_BTRFS_EXPERIMENTAL
*/
#define BTRFS_FEATURE_INCOMPAT_SUPP \
@@ -285,8 +319,19 @@ enum {
#define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
+#define BTRFS_WARNING_COMMIT_INTERVAL (300)
#define BTRFS_DEFAULT_MAX_INLINE (2048)
+enum btrfs_compression_type {
+ BTRFS_COMPRESS_NONE = 0,
+ BTRFS_COMPRESS_ZLIB = 1,
+ BTRFS_COMPRESS_LZO = 2,
+ BTRFS_COMPRESS_ZSTD = 3,
+ BTRFS_NR_COMPRESS_TYPES = 4,
+
+ BTRFS_DEFRAG_DONT_COMPRESS,
+};
+
struct btrfs_dev_replace {
/* See #define above */
u64 replace_state;
@@ -404,6 +449,8 @@ struct btrfs_commit_stats {
u64 last_commit_dur;
/* The total commit duration in ns */
u64 total_commit_dur;
+ /* Start of the last critical section in ns. */
+ u64 critical_section_start_time;
};
struct btrfs_fs_info {
@@ -456,6 +503,8 @@ struct btrfs_fs_info {
struct btrfs_block_rsv delayed_block_rsv;
/* Block reservation for delayed refs */
struct btrfs_block_rsv delayed_refs_rsv;
+ /* Block reservation for treelog tree */
+ struct btrfs_block_rsv treelog_rsv;
struct btrfs_block_rsv empty_block_rsv;
@@ -485,8 +534,11 @@ struct btrfs_fs_info {
u64 last_trans_log_full_commit;
unsigned long long mount_opt;
- unsigned long compress_type:4;
- unsigned int compress_level;
+ /* Compress related structures. */
+ void *compr_wsm[BTRFS_NR_COMPRESS_TYPES];
+
+ int compress_type;
+ int compress_level;
u32 commit_interval;
/*
* It is a suggestive number, the read side is safe even it gets a
@@ -606,7 +658,6 @@ struct btrfs_fs_info {
struct workqueue_struct *endio_workers;
struct workqueue_struct *endio_meta_workers;
struct workqueue_struct *rmw_workers;
- struct workqueue_struct *compressed_write_workers;
struct btrfs_workqueue *endio_write_workers;
struct btrfs_workqueue *endio_freespace_worker;
struct btrfs_workqueue *caching_workers;
@@ -627,6 +678,9 @@ struct btrfs_fs_info {
struct kobject *qgroups_kobj;
struct kobject *discard_kobj;
+ /* Track the number of blocks (sectors) read by the filesystem. */
+ struct percpu_counter stats_read_blocks;
+
/* Used to keep from writing metadata until there is a nice batch */
struct percpu_counter dirty_metadata_bytes;
struct percpu_counter delalloc_bytes;
@@ -692,8 +746,6 @@ struct btrfs_fs_info {
u32 data_chunk_allocations;
u32 metadata_ratio;
- void *bdev_holder;
-
/* Private scrub information */
struct mutex scrub_lock;
atomic_t scrubs_running;
@@ -706,7 +758,6 @@ struct btrfs_fs_info {
* running.
*/
refcount_t scrub_workers_refcnt;
- u32 sectors_per_page;
struct workqueue_struct *scrub_workers;
struct btrfs_discard_ctl discard_ctl;
@@ -719,12 +770,6 @@ struct btrfs_fs_info {
spinlock_t qgroup_lock;
/*
- * Used to avoid frequently calling ulist_alloc()/ulist_free()
- * when doing qgroup accounting, it must be protected by qgroup_lock.
- */
- struct ulist *qgroup_ulist;
-
- /*
* Protect user change for quota operations. If a transaction is needed,
* it must be started before locking this lock.
*/
@@ -759,10 +804,8 @@ struct btrfs_fs_info {
struct btrfs_delayed_root *delayed_root;
- /* Extent buffer radix tree */
- spinlock_t buffer_lock;
- /* Entries are eb->start / sectorsize */
- struct radix_tree_root buffer_radix;
+ /* Entries are eb->start >> nodesize_bits */
+ struct xarray buffer_tree;
/* Next backup root to be overwritten */
int backup_root_index;
@@ -793,9 +836,12 @@ struct btrfs_fs_info {
/* Cached block sizes */
u32 nodesize;
+ u32 nodesize_bits;
u32 sectorsize;
/* ilog2 of sectorsize, use to avoid 64bit division */
u32 sectorsize_bits;
+ u32 block_min_order;
+ u32 block_max_order;
u32 csum_size;
u32 csums_per_leaf;
u32 stripesize;
@@ -865,12 +911,10 @@ struct btrfs_fs_info {
struct lockdep_map btrfs_trans_pending_ordered_map;
struct lockdep_map btrfs_ordered_extent_map;
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+#ifdef CONFIG_BTRFS_DEBUG
spinlock_t ref_verify_lock;
struct rb_root block_tree;
-#endif
-#ifdef CONFIG_BTRFS_DEBUG
struct kobject *debug_kobj;
struct list_head allocated_roots;
@@ -887,6 +931,17 @@ struct btrfs_fs_info {
#define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode), \
struct inode *: (_inode)))->root->fs_info)
+static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
+{
+ return mapping_gfp_constraint(mapping, ~__GFP_FS);
+}
+
+/* Return the minimal folio size of the fs. */
+static inline unsigned int btrfs_min_folio_size(struct btrfs_fs_info *fs_info)
+{
+ return 1U << (PAGE_SHIFT + fs_info->block_min_order);
+}
+
static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
{
return READ_ONCE(fs_info->generation);
@@ -953,6 +1008,8 @@ static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
sizeof(struct btrfs_item))
+#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) ((bytes) >> (fs_info)->sectorsize_bits)
+
static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
{
return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0;
@@ -971,6 +1028,13 @@ static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 siz
return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
}
+static inline unsigned int btrfs_blocks_per_folio(const struct btrfs_fs_info *fs_info,
+ const struct folio *folio)
+{
+ return folio_size(folio) >> fs_info->sectorsize_bits;
+}
+
+bool __attribute_const__ btrfs_supported_blocksize(u32 blocksize);
bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
enum btrfs_exclusive_operation type);
bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
@@ -982,6 +1046,17 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args);
+u16 btrfs_csum_type_size(u16 type);
+int btrfs_super_csum_size(const struct btrfs_super_block *s);
+const char *btrfs_super_csum_name(u16 csum_type);
+const char *btrfs_super_csum_driver(u16 csum_type);
+size_t __attribute_const__ btrfs_get_num_csums(void);
+
+static inline bool btrfs_is_empty_uuid(const u8 *uuid)
+{
+ return uuid_is_null((const uuid_t *)uuid);
+}
+
/* Compatibility and incompatibility defines */
void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
const char *name);
@@ -1058,13 +1133,42 @@ static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
(unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
&(fs_info)->fs_state)))
+static inline bool btrfs_is_shutdown(struct btrfs_fs_info *fs_info)
+{
+ return test_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state);
+}
+
+static inline void btrfs_force_shutdown(struct btrfs_fs_info *fs_info)
+{
+ /*
+ * Here we do not want to use handle_fs_error(), which will mark the fs
+ * read-only.
+ * Some call sites like shutdown ioctl will mark the fs shutdown when
+ * the fs is frozen. But thaw path will handle RO and RW fs
+ * differently.
+ *
+ * So here we only mark the fs error without flipping it RO.
+ */
+ WRITE_ONCE(fs_info->fs_error, -EIO);
+ if (!test_and_set_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state))
+ btrfs_crit(fs_info, "emergency shutdown");
+}
+
+/*
+ * We use folio flag owner_2 to indicate there is an ordered extent with
+ * unfinished IO.
+ */
+#define folio_test_ordered(folio) folio_test_owner_2(folio)
+#define folio_set_ordered(folio) folio_set_owner_2(folio)
+#define folio_clear_ordered(folio) folio_clear_owner_2(folio)
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
#define EXPORT_FOR_TESTS
-static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
+static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
{
- return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
+ return unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state));
}
void btrfs_test_destroy_inode(struct inode *inode);
@@ -1073,9 +1177,9 @@ void btrfs_test_destroy_inode(struct inode *inode);
#define EXPORT_FOR_TESTS static
-static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
+static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
{
- return 0;
+ return false;
}
#endif
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 29572dfaf878..b73e1dd97208 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -78,13 +78,10 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
}
/* Returns NULL if no extref found */
-struct btrfs_inode_extref *
-btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- const struct fscrypt_str *name,
- u64 inode_objectid, u64 ref_objectid, int ins_len,
- int cow)
+struct btrfs_inode_extref *btrfs_lookup_inode_extref(struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct fscrypt_str *name,
+ u64 inode_objectid, u64 ref_objectid)
{
int ret;
struct btrfs_key key;
@@ -93,7 +90,7 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
key.type = BTRFS_INODE_EXTREF_KEY;
key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
- ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0)
@@ -109,7 +106,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
u64 inode_objectid, u64 ref_objectid,
u64 *index)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_inode_extref *extref;
struct extent_buffer *leaf;
@@ -129,9 +126,9 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
- ret = -ENOENT;
+ return -ENOENT;
if (ret < 0)
- goto out;
+ return ret;
/*
* Sanity check - did we find the right item for this name?
@@ -140,10 +137,9 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
*/
extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
ref_objectid, name);
- if (!extref) {
+ if (unlikely(!extref)) {
btrfs_abort_transaction(trans, -ENOENT);
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
leaf = path->nodes[0];
@@ -152,12 +148,8 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
*index = btrfs_inode_extref_index(leaf, extref);
if (del_len == item_size) {
- /*
- * Common case only one ref in the item, remove the
- * whole item.
- */
- ret = btrfs_del_item(trans, root, path);
- goto out;
+ /* Common case only one ref in the item, remove the whole item. */
+ return btrfs_del_item(trans, root, path);
}
ptr = (unsigned long)extref;
@@ -168,9 +160,6 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
btrfs_truncate_item(trans, path, item_size - del_len, 1);
-out:
- btrfs_free_path(path);
-
return ret;
}
@@ -191,8 +180,8 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
int del_len = name->len + sizeof(*ref);
key.objectid = inode_objectid;
- key.offset = ref_objectid;
key.type = BTRFS_INODE_REF_KEY;
+ key.offset = ref_objectid;
path = btrfs_alloc_path();
if (!path)
@@ -260,7 +249,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
int ret;
int ins_len = name->len + sizeof(*extref);
unsigned long ptr;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *leaf;
@@ -279,13 +268,13 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
path->slots[0],
ref_objectid,
name))
- goto out;
+ return ret;
btrfs_extend_item(trans, path, ins_len);
ret = 0;
}
if (ret < 0)
- goto out;
+ return ret;
leaf = path->nodes[0];
ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char);
@@ -298,11 +287,8 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)&extref->name;
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
@@ -319,14 +305,14 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
int ins_len = name->len + sizeof(*ref);
key.objectid = inode_objectid;
- key.offset = ref_objectid;
key.type = BTRFS_INODE_REF_KEY;
+ key.offset = ref_objectid;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->skip_release_on_error = 1;
+ path->skip_release_on_error = true;
ret = btrfs_insert_empty_item(trans, root, path, &key,
ins_len);
if (ret == -EEXIST) {
@@ -363,8 +349,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)(ref + 1);
}
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
out:
btrfs_free_path(path);
@@ -460,7 +444,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_truncate_control *control)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
@@ -497,8 +481,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
path->reada = READA_BACK;
key.objectid = control->ino;
- key.offset = (u64)-1;
key.type = (u8)-1;
+ key.offset = (u64)-1;
search_again:
/*
@@ -590,7 +574,6 @@ search_again:
num_dec = (orig_num_bytes - extent_num_bytes);
if (extent_start != 0)
control->sub_bytes += num_dec;
- btrfs_mark_buffer_dirty(trans, leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf, fi);
@@ -644,7 +627,7 @@ delete:
if (control->clear_extent_range) {
ret = btrfs_inode_clear_file_extent_range(control->inode,
clear_start, clear_len);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -683,7 +666,7 @@ delete:
btrfs_init_data_ref(&ref, control->ino, extent_offset,
btrfs_root_id(root), false);
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -701,7 +684,7 @@ delete:
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -734,13 +717,12 @@ delete:
}
out:
if (ret >= 0 && pending_del_nr) {
- int err;
+ int ret2;
- err = btrfs_del_items(trans, root, path, pending_del_slot,
- pending_del_nr);
- if (err) {
- btrfs_abort_transaction(trans, err);
- ret = err;
+ ret2 = btrfs_del_items(trans, root, path, pending_del_slot, pending_del_nr);
+ if (unlikely(ret2)) {
+ btrfs_abort_transaction(trans, ret2);
+ ret = ret2;
}
}
@@ -748,6 +730,5 @@ out:
if (!ret && control->last_size > new_size)
control->last_size = new_size;
- btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h
index c11b97fdccc4..6d9f5ad20646 100644
--- a/fs/btrfs/inode-item.h
+++ b/fs/btrfs/inode-item.h
@@ -101,13 +101,10 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *location, int mod);
-struct btrfs_inode_extref *btrfs_lookup_inode_extref(
- struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- const struct fscrypt_str *name,
- u64 inode_objectid, u64 ref_objectid, int ins_len,
- int cow);
+struct btrfs_inode_extref *btrfs_lookup_inode_extref(struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct fscrypt_str *name,
+ u64 inode_objectid, u64 ref_objectid);
struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf,
int slot,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 03fe0de2cd0d..c4bee47829ed 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9,6 +9,7 @@
#include <linux/blk-cgroup.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/fs_struct.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
@@ -71,6 +72,10 @@
#include "backref.h"
#include "raid-stripe-tree.h"
#include "fiemap.h"
+#include "delayed-inode.h"
+
+#define COW_FILE_RANGE_KEEP_LOCKED (1UL << 0)
+#define COW_FILE_RANGE_NO_INLINE (1UL << 1)
struct btrfs_iget_args {
u64 ino;
@@ -127,7 +132,7 @@ static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
struct btrfs_fs_info *fs_info = warn->fs_info;
struct extent_buffer *eb;
struct btrfs_inode_item *inode_item;
- struct inode_fs_paths *ipath = NULL;
+ struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
struct btrfs_root *local_root;
struct btrfs_key key;
unsigned int nofs_flag;
@@ -174,8 +179,10 @@ static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
return ret;
}
ret = paths_from_inode(inum, ipath);
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_put_root(local_root);
goto err;
+ }
/*
* We deliberately ignore the bit ipath might have been too small to
@@ -190,7 +197,6 @@ static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
}
btrfs_put_root(local_root);
- free_ipath(ipath);
return 0;
err:
@@ -198,7 +204,6 @@ err:
"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
warn->logical, warn->mirror_num, root, inum, offset, ret);
- free_ipath(ipath);
return ret;
}
@@ -230,21 +235,21 @@ static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off
if (logical == U64_MAX) {
btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
btrfs_warn_rl(fs_info,
-"csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
+"csum failed root %lld ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
- CSUM_FMT_VALUE(csum_size, csum),
- CSUM_FMT_VALUE(csum_size, csum_expected),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
return;
}
logical += file_off;
btrfs_warn_rl(fs_info,
-"csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
+"csum failed root %lld ino %llu off %llu logical %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
btrfs_root_id(inode->root),
btrfs_ino(inode), file_off, logical,
- CSUM_FMT_VALUE(csum_size, csum),
- CSUM_FMT_VALUE(csum_size, csum_expected),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
@@ -308,26 +313,26 @@ static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
const u32 csum_size = root->fs_info->csum_size;
/* For data reloc tree, it's better to do a backref lookup instead. */
- if (btrfs_root_id(root) == BTRFS_DATA_RELOC_TREE_OBJECTID)
+ if (btrfs_is_data_reloc_root(root))
return print_data_reloc_error(inode, logical_start, csum,
csum_expected, mirror_num);
/* Output without objectid, which is more meaningful */
if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
btrfs_warn_rl(root->fs_info,
-"csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
+"csum failed root %lld ino %lld off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
btrfs_root_id(root), btrfs_ino(inode),
logical_start,
- CSUM_FMT_VALUE(csum_size, csum),
- CSUM_FMT_VALUE(csum_size, csum_expected),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
} else {
btrfs_warn_rl(root->fs_info,
-"csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
+"csum failed root %llu ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
btrfs_root_id(root), btrfs_ino(inode),
logical_start,
- CSUM_FMT_VALUE(csum_size, csum),
- CSUM_FMT_VALUE(csum_size, csum_expected),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
}
}
@@ -367,7 +372,7 @@ int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
}
/*
- * Unock inode i_rwsem.
+ * Unlock inode i_rwsem.
*
* ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
* to decide whether the lock acquired is shared or exclusive.
@@ -393,39 +398,20 @@ void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
* extent (btrfs_finish_ordered_io()).
*/
static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
- struct folio *locked_folio,
u64 offset, u64 bytes)
{
- unsigned long index = offset >> PAGE_SHIFT;
- unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
- u64 page_start = 0, page_end = 0;
+ pgoff_t index = offset >> PAGE_SHIFT;
+ const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT;
struct folio *folio;
- if (locked_folio) {
- page_start = folio_pos(locked_folio);
- page_end = page_start + folio_size(locked_folio) - 1;
- }
-
while (index <= end_index) {
- /*
- * For locked page, we will call btrfs_mark_ordered_io_finished
- * through btrfs_mark_ordered_io_finished() on it
- * in run_delalloc_range() for the error handling, which will
- * clear page Ordered and run the ordered extent accounting.
- *
- * Here we can't just clear the Ordered bit, or
- * btrfs_mark_ordered_io_finished() would skip the accounting
- * for the page range, and the ordered extent will never finish.
- */
- if (locked_folio && index == (page_start >> PAGE_SHIFT)) {
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
+ if (IS_ERR(folio)) {
index++;
continue;
}
- folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
- index++;
- if (IS_ERR(folio))
- continue;
+ index = folio_next_index(folio);
/*
* Here we just clear all Ordered bits for every page in the
* range, then btrfs_mark_ordered_io_finished() will handle
@@ -436,23 +422,6 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
folio_put(folio);
}
- if (locked_folio) {
- /* The locked page covers the full range, nothing needs to be done */
- if (bytes + offset <= page_start + folio_size(locked_folio))
- return;
- /*
- * In case this page belongs to the delalloc range being
- * instantiated then skip it, since the first page of a range is
- * going to be properly cleaned up by the caller of
- * run_delalloc_range
- */
- if (page_start >= offset && page_end <= (offset + bytes - 1)) {
- bytes = offset + bytes - folio_pos(locked_folio) -
- folio_size(locked_folio);
- offset = folio_pos(locked_folio) + folio_size(locked_folio);
- }
- }
-
return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
}
@@ -461,18 +430,18 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct btrfs_new_inode_args *args)
{
- int err;
+ int ret;
if (args->default_acl) {
- err = __btrfs_set_acl(trans, args->inode, args->default_acl,
+ ret = __btrfs_set_acl(trans, args->inode, args->default_acl,
ACL_TYPE_DEFAULT);
- if (err)
- return err;
+ if (ret)
+ return ret;
}
if (args->acl) {
- err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
- if (err)
- return err;
+ ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
+ if (ret)
+ return ret;
}
if (!args->default_acl && !args->acl)
cache_no_acl(args->inode);
@@ -527,8 +496,8 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
size_t datasize;
key.objectid = btrfs_ino(inode);
- key.offset = 0;
key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
datasize = btrfs_file_extent_calc_inline_size(cur_size);
ret = btrfs_insert_empty_item(trans, root, path, &key,
@@ -564,7 +533,6 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
kunmap_local(kaddr);
folio_put(folio);
}
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/*
@@ -605,23 +573,14 @@ static bool can_cow_file_range_inline(struct btrfs_inode *inode,
if (offset != 0)
return false;
- /*
- * Due to the page size limit, for subpage we can only trigger the
- * writeback for the dirty sectors of page, that means data writeback
- * is doing more writeback than what we want.
- *
- * This is especially unexpected for some call sites like fallocate,
- * where we only increase i_size after everything is done.
- * This means we can trigger inline extent even if we didn't want to.
- * So here we skip inline extent creation completely.
- */
- if (fs_info->sectorsize != PAGE_SIZE)
- return false;
-
/* Inline extents are limited to sectorsize. */
if (size > fs_info->sectorsize)
return false;
+ /* We do not allow a non-compressed extent to be as large as block size. */
+ if (data_len >= fs_info->sectorsize)
+ return false;
+
/* We cannot exceed the maximum inline data size. */
if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
return false;
@@ -634,6 +593,10 @@ static bool can_cow_file_range_inline(struct btrfs_inode *inode,
if (size < i_size_read(&inode->vfs_inode))
return false;
+ /* Encrypted file cannot be inlined. */
+ if (IS_ENCRYPTED(&inode->vfs_inode))
+ return false;
+
return true;
}
@@ -677,7 +640,7 @@ static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
drop_args.replace_extent = true;
drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
ret = btrfs_drop_extents(trans, root, inode, &drop_args);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -685,7 +648,7 @@ static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
size, compressed_size, compress_type,
compressed_folio, update_i_size);
- if (ret && ret != -ENOSPC) {
+ if (unlikely(ret && ret != -ENOSPC)) {
btrfs_abort_transaction(trans, ret);
goto out;
} else if (ret == -ENOSPC) {
@@ -695,7 +658,7 @@ static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
ret = btrfs_update_inode(trans, inode);
- if (ret && ret != -ENOSPC) {
+ if (unlikely(ret && ret != -ENOSPC)) {
btrfs_abort_transaction(trans, ret);
goto out;
} else if (ret == -ENOSPC) {
@@ -711,7 +674,7 @@ out:
* And at reserve time, it's always aligned to page size, so
* just free one page here.
*/
- btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
+ btrfs_qgroup_free_data(inode, NULL, 0, fs_info->sectorsize, NULL);
btrfs_free_path(path);
btrfs_end_transaction(trans);
return ret;
@@ -734,12 +697,12 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode,
if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
return 1;
- lock_extent(&inode->io_tree, offset, end, &cached);
+ btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
ret = __cow_file_range_inline(inode, size, compressed_size,
compress_type, compressed_folio,
update_i_size);
if (ret > 0) {
- unlock_extent(&inode->io_tree, offset, end, &cached);
+ btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
return ret;
}
@@ -825,33 +788,19 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
struct btrfs_fs_info *fs_info = inode->root->fs_info;
if (!btrfs_inode_can_compress(inode)) {
- WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
- KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
- btrfs_ino(inode));
+ DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode));
return 0;
}
- /*
- * Only enable sector perfect compression for experimental builds.
- *
- * This is a big feature change for subpage cases, and can hit
- * different corner cases, so only limit this feature for
- * experimental build for now.
- *
- * ETA for moving this out of experimental builds is 6.15.
- */
- if (fs_info->sectorsize < PAGE_SIZE &&
- !IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
- if (!PAGE_ALIGNED(start) ||
- !PAGE_ALIGNED(end + 1))
- return 0;
- }
+ /* Defrag ioctl takes precedence over mount options and properties. */
+ if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS)
+ return 0;
+ if (BTRFS_COMPRESS_NONE < inode->defrag_compress &&
+ inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES)
+ return 1;
/* force compress */
if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
return 1;
- /* defrag ioctl */
- if (inode->defrag_compress)
- return 1;
/* bad compression ratios */
if (inode->flags & BTRFS_INODE_NOCOMPRESS)
return 0;
@@ -871,21 +820,20 @@ static inline void inode_should_defrag(struct btrfs_inode *inode,
btrfs_add_inode_defrag(inode, small_write);
}
-static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
+static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end)
{
- unsigned long end_index = end >> PAGE_SHIFT;
+ const pgoff_t end_index = end >> PAGE_SHIFT;
struct folio *folio;
int ret = 0;
- for (unsigned long index = start >> PAGE_SHIFT;
- index <= end_index; index++) {
- folio = filemap_get_folio(inode->i_mapping, index);
+ for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) {
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
if (IS_ERR(folio)) {
if (!ret)
ret = PTR_ERR(folio);
continue;
}
- btrfs_folio_clamp_clear_dirty(inode_to_fs_info(inode), folio, start,
+ btrfs_folio_clamp_clear_dirty(inode->root->fs_info, folio, start,
end + 1 - start);
folio_put(folio);
}
@@ -912,19 +860,25 @@ static void compress_file_range(struct btrfs_work *work)
struct btrfs_inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct address_space *mapping = inode->vfs_inode.i_mapping;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 blocksize = fs_info->sectorsize;
u64 start = async_chunk->start;
u64 end = async_chunk->end;
u64 actual_end;
u64 i_size;
int ret = 0;
- struct folio **folios;
+ struct folio **folios = NULL;
unsigned long nr_folios;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
- unsigned int poff;
+ unsigned int loff;
int i;
int compress_type = fs_info->compress_type;
+ int compress_level = fs_info->compress_level;
+
+ if (unlikely(btrfs_is_shutdown(fs_info)))
+ goto cleanup_and_bail_uncompressed;
inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
@@ -933,7 +887,7 @@ static void compress_file_range(struct btrfs_work *work)
* Otherwise applications with the file mmap'd can wander in and change
* the page contents while we are compressing them.
*/
- ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+ ret = extent_range_clear_dirty_for_io(inode, start, end);
/*
* All the folios should have been locked thus no failure.
@@ -959,8 +913,8 @@ static void compress_file_range(struct btrfs_work *work)
actual_end = min_t(u64, i_size, end + 1);
again:
folios = NULL;
- nr_folios = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
- nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED_PAGES);
+ nr_folios = (end >> min_folio_shift) - (start >> min_folio_shift) + 1;
+ nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED >> min_folio_shift);
/*
* we don't want to send crud past the end of i_size through
@@ -1007,25 +961,27 @@ again:
goto cleanup_and_bail_uncompressed;
}
- if (inode->defrag_compress)
+ if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
compress_type = inode->defrag_compress;
- else if (inode->prop_compress)
+ compress_level = inode->defrag_compress_level;
+ } else if (inode->prop_compress) {
compress_type = inode->prop_compress;
+ }
/* Compression level is applied here. */
- ret = btrfs_compress_folios(compress_type | (fs_info->compress_level << 4),
- mapping, start, folios, &nr_folios, &total_in,
+ ret = btrfs_compress_folios(compress_type, compress_level,
+ inode, start, folios, &nr_folios, &total_in,
&total_compressed);
if (ret)
goto mark_incompressible;
/*
- * Zero the tail end of the last page, as we might be sending it down
+ * Zero the tail end of the last folio, as we might be sending it down
* to disk.
*/
- poff = offset_in_page(total_compressed);
- if (poff)
- folio_zero_range(folios[nr_folios - 1], poff, PAGE_SIZE - poff);
+ loff = (total_compressed & (min_folio_size - 1));
+ if (loff)
+ folio_zero_range(folios[nr_folios - 1], loff, min_folio_size - loff);
/*
* Try to create an inline extent.
@@ -1129,19 +1085,13 @@ static void submit_uncompressed_range(struct btrfs_inode *inode,
&wbc, false);
wbc_detach_inode(&wbc);
if (ret < 0) {
- btrfs_cleanup_ordered_extents(inode, locked_folio,
- start, end - start + 1);
- if (locked_folio) {
- const u64 page_start = folio_pos(locked_folio);
-
- folio_start_writeback(locked_folio);
- folio_end_writeback(locked_folio);
- btrfs_mark_ordered_io_finished(inode, locked_folio,
- page_start, PAGE_SIZE,
- !ret);
- mapping_set_error(locked_folio->mapping, ret);
- folio_unlock(locked_folio);
- }
+ if (locked_folio)
+ btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
+ start, async_extent->ram_size);
+ btrfs_err_rl(inode->root->fs_info,
+ "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start, async_extent->ram_size, ret);
}
}
@@ -1160,6 +1110,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
struct extent_state *cached = NULL;
struct extent_map *em;
int ret = 0;
+ bool free_pages = false;
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
@@ -1180,14 +1131,17 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
}
if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
+ ASSERT(!async_extent->folios);
+ ASSERT(async_extent->nr_folios == 0);
submit_uncompressed_range(inode, async_extent, locked_folio);
+ free_pages = true;
goto done;
}
ret = btrfs_reserve_extent(root, async_extent->ram_size,
async_extent->compressed_size,
async_extent->compressed_size,
- 0, *alloc_hint, &ins, 1, 1);
+ 0, *alloc_hint, &ins, true, true);
if (ret) {
/*
* We can't reserve contiguous space for the compressed size.
@@ -1196,10 +1150,11 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
* fall back to uncompressed.
*/
submit_uncompressed_range(inode, async_extent, locked_folio);
+ free_pages = true;
goto done;
}
- lock_extent(io_tree, start, end, &cached);
+ btrfs_lock_extent(io_tree, start, end, &cached);
/* Here we're doing allocation and writeback of the compressed pages */
file_extent.disk_bytenr = ins.objectid;
@@ -1214,10 +1169,10 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
ret = PTR_ERR(em);
goto out_free_reserve;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
- 1 << BTRFS_ORDERED_COMPRESSED);
+ 1U << BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(ordered)) {
btrfs_drop_extent_map_range(inode, start, end, false);
ret = PTR_ERR(ordered);
@@ -1237,12 +1192,14 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
done:
if (async_chunk->blkcg_css)
kthread_associate_blkcg(NULL);
+ if (free_pages)
+ free_async_extent_pages(async_extent);
kfree(async_extent);
return;
out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
extent_clear_unlock_delalloc(inode, start, end,
NULL, &cached,
@@ -1269,7 +1226,7 @@ u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
u64 alloc_hint = 0;
read_lock(&em_tree->lock);
- em = search_extent_mapping(em_tree, start, num_bytes);
+ em = btrfs_search_extent_mapping(em_tree, start, num_bytes);
if (em) {
/*
* if block start isn't an actual block number then find the
@@ -1277,15 +1234,15 @@ u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
* block is also bogus then just don't worry about it.
*/
if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
- free_extent_map(em);
- em = search_extent_mapping(em_tree, 0, 0);
+ btrfs_free_extent_map(em);
+ em = btrfs_search_extent_mapping(em_tree, 0, 0);
if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
- alloc_hint = extent_map_block_start(em);
+ alloc_hint = btrfs_extent_map_block_start(em);
if (em)
- free_extent_map(em);
+ btrfs_free_extent_map(em);
} else {
- alloc_hint = extent_map_block_start(em);
- free_extent_map(em);
+ alloc_hint = btrfs_extent_map_block_start(em);
+ btrfs_free_extent_map(em);
}
}
read_unlock(&em_tree->lock);
@@ -1302,29 +1259,26 @@ u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
* locked_folio is the folio that writepage had locked already. We use
* it to make sure we don't do extra locks or unlocks.
*
- * When this function fails, it unlocks all pages except @locked_folio.
+ * When this function fails, it unlocks all folios except @locked_folio.
*
* When this function successfully creates an inline extent, it returns 1 and
- * unlocks all pages including locked_folio and starts I/O on them.
- * (In reality inline extents are limited to a single page, so locked_folio is
- * the only page handled anyway).
+ * unlocks all folios including locked_folio and starts I/O on them.
+ * (In reality inline extents are limited to a single block, so locked_folio is
+ * the only folio handled anyway).
*
- * When this function succeed and creates a normal extent, the page locking
+ * When this function succeed and creates a normal extent, the folio locking
* status depends on the passed in flags:
*
- * - If @keep_locked is set, all pages are kept locked.
- * - Else all pages except for @locked_folio are unlocked.
+ * - If COW_FILE_RANGE_KEEP_LOCKED flag is set, all folios are kept locked.
+ * - Else all folios except for @locked_folio are unlocked.
*
* When a failure happens in the second or later iteration of the
- * while-loop, the ordered extents created in previous iterations are kept
- * intact. So, the caller must clean them up by calling
- * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
- * example.
+ * while-loop, the ordered extents created in previous iterations are cleaned up.
*/
static noinline int cow_file_range(struct btrfs_inode *inode,
struct folio *locked_folio, u64 start,
u64 end, u64 *done_offset,
- bool keep_locked, bool no_inline)
+ unsigned long flags)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1341,6 +1295,11 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
unsigned long page_ops;
int ret = 0;
+ if (unlikely(btrfs_is_shutdown(fs_info))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
if (btrfs_is_free_space_inode(inode)) {
ret = -EINVAL;
goto out_unlock;
@@ -1352,7 +1311,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
- if (!no_inline) {
+ if (!(flags & COW_FILE_RANGE_NO_INLINE)) {
/* lets try to make an inline extent */
ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
@@ -1373,6 +1332,17 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
/*
+ * We're not doing compressed IO, don't unlock the first page (which
+ * the caller expects to stay locked), don't clear any dirty bits and
+ * don't set any writeback bits.
+ *
+ * Do set the Ordered (Private2) bit so we know this page was properly
+ * setup for writepage.
+ */
+ page_ops = ((flags & COW_FILE_RANGE_KEEP_LOCKED) ? 0 : PAGE_UNLOCK);
+ page_ops |= PAGE_SET_ORDERED;
+
+ /*
* Relocation relies on the relocated extents to have exactly the same
* size as the original extents. Normally writeback for relocation data
* extents follows a NOCOW path because relocation preallocates the
@@ -1394,7 +1364,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
ret = btrfs_reserve_extent(root, num_bytes, num_bytes,
min_alloc_size, 0, alloc_hint,
- &ins, 1, 1);
+ &ins, true, true);
if (ret == -EAGAIN) {
/*
* btrfs_reserve_extent only returns -EAGAIN for zoned
@@ -1415,8 +1385,13 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
continue;
}
if (done_offset) {
- *done_offset = start - 1;
- return 0;
+ /*
+ * Move @end to the end of the processed range,
+ * and exit the loop to unlock the processed extents.
+ */
+ end = start - 1;
+ ret = 0;
+ break;
}
ret = -ENOSPC;
}
@@ -1431,24 +1406,28 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
file_extent.offset = 0;
file_extent.compression = BTRFS_COMPRESS_NONE;
- lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
- &cached);
+ /*
+ * Locked range will be released either during error clean up or
+ * after the whole range is finished.
+ */
+ btrfs_lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
+ &cached);
em = btrfs_create_io_em(inode, start, &file_extent,
BTRFS_ORDERED_REGULAR);
if (IS_ERR(em)) {
- unlock_extent(&inode->io_tree, start,
- start + cur_alloc_size - 1, &cached);
+ btrfs_unlock_extent(&inode->io_tree, start,
+ start + cur_alloc_size - 1, &cached);
ret = PTR_ERR(em);
goto out_reserve;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
- 1 << BTRFS_ORDERED_REGULAR);
+ 1U << BTRFS_ORDERED_REGULAR);
if (IS_ERR(ordered)) {
- unlock_extent(&inode->io_tree, start,
- start + cur_alloc_size - 1, &cached);
+ btrfs_unlock_extent(&inode->io_tree, start,
+ start + cur_alloc_size - 1, &cached);
ret = PTR_ERR(ordered);
goto out_drop_extent_cache;
}
@@ -1476,21 +1455,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- /*
- * We're not doing compressed IO, don't unlock the first page
- * (which the caller expects to stay locked), don't clear any
- * dirty bits and don't set any writeback bits
- *
- * Do set the Ordered flag so we know this page was
- * properly setup for writepage.
- */
- page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
- page_ops |= PAGE_SET_ORDERED;
-
- extent_clear_unlock_delalloc(inode, start, start + cur_alloc_size - 1,
- locked_folio, &cached,
- EXTENT_LOCKED | EXTENT_DELALLOC,
- page_ops);
if (num_bytes < cur_alloc_size)
num_bytes = 0;
else
@@ -1507,6 +1471,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (ret)
goto out_unlock;
}
+ extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
done:
if (done_offset)
*done_offset = end;
@@ -1516,7 +1482,7 @@ out_drop_extent_cache:
btrfs_drop_extent_map_range(inode, start, start + cur_alloc_size - 1, false);
out_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
out_unlock:
/*
* Now, we have three regions to clean up:
@@ -1527,35 +1493,30 @@ out_unlock:
* We process each region below.
*/
- clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
- page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
-
/*
* For the range (1). We have already instantiated the ordered extents
- * for this region. They are cleaned up by
- * btrfs_cleanup_ordered_extents() in e.g,
- * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
- * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
- * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
- * function.
+ * for this region, thus we need to cleanup those ordered extents.
+ * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
+ * are also handled by the ordered extents cleanup.
*
- * However, in case of @keep_locked, we still need to unlock the pages
- * (except @locked_folio) to ensure all the pages are unlocked.
+ * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
+ * finish the writeback of the involved folios, which will be never submitted.
*/
- if (keep_locked && orig_start < start) {
+ if (orig_start < start) {
+ clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
+ page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
+
if (!locked_folio)
mapping_set_error(inode->vfs_inode.i_mapping, ret);
+
+ btrfs_cleanup_ordered_extents(inode, orig_start, start - orig_start);
extent_clear_unlock_delalloc(inode, orig_start, start - 1,
- locked_folio, NULL, 0, page_ops);
+ locked_folio, NULL, clear_bits, page_ops);
}
- /*
- * At this point we're unlocked, we want to make sure we're only
- * clearing these flags under the extent lock, so lock the rest of the
- * range and clear everything up.
- */
- lock_extent(&inode->io_tree, start, end, NULL);
+ clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
+ page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
/*
* For the range (2). If we reserved an extent for our delalloc range
@@ -1589,6 +1550,11 @@ out_unlock:
btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
end - start - cur_alloc_size + 1, NULL);
}
+ btrfs_err(fs_info,
+"%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu cur_alloc_size=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), orig_start, end + 1 - orig_start,
+ start, cur_alloc_size, ret);
return ret;
}
@@ -1626,8 +1592,8 @@ static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_
PAGE_SHIFT;
while (!list_empty(&async_chunk->extents)) {
- async_extent = list_entry(async_chunk->extents.next,
- struct async_extent, list);
+ async_extent = list_first_entry(&async_chunk->extents,
+ struct async_extent, list);
list_del(&async_extent->list);
submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
}
@@ -1741,7 +1707,7 @@ static noinline int run_delalloc_cow(struct btrfs_inode *inode,
while (start <= end) {
ret = cow_file_range(inode, locked_folio, start, end,
- &done_offset, true, false);
+ &done_offset, COW_FILE_RANGE_KEEP_LOCKED);
if (ret)
return ret;
extent_write_locked_range(&inode->vfs_inode, locked_folio,
@@ -1797,9 +1763,9 @@ static int fallback_to_cow(struct btrfs_inode *inode,
* group that contains that extent to RO mode and therefore force COW
* when starting writeback.
*/
- lock_extent(io_tree, start, end, &cached_state);
- count = count_range_bits(io_tree, &range_start, end, range_bytes,
- EXTENT_NORESERVE, 0, NULL);
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
+ count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes,
+ EXTENT_NORESERVE, 0, NULL);
if (count > 0 || is_space_ino || is_reloc_ino) {
u64 bytes = count;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -1809,22 +1775,28 @@ static int fallback_to_cow(struct btrfs_inode *inode,
bytes = range_bytes;
spin_lock(&sinfo->lock);
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, bytes);
spin_unlock(&sinfo->lock);
if (count > 0)
- clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
- NULL);
+ btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
+ &cached_state);
}
- unlock_extent(io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
/*
* Don't try to create inline extents, as a mix of inline extent that
* is written out and unlocked directly and a normal NOCOW extent
* doesn't work.
+ *
+ * And here we do not unlock the folio after a successful run.
+ * The folios will be unlocked after everything is finished, or by error handling.
+ *
+ * This is to ensure error handling won't need to clear dirty/ordered flags without
+ * a locked folio, which can race with writeback.
*/
- ret = cow_file_range(inode, locked_folio, start, end, NULL, false,
- true);
+ ret = cow_file_range(inode, locked_folio, start, end, NULL,
+ COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED);
ASSERT(ret != 1);
return ret;
}
@@ -1837,7 +1809,6 @@ struct can_nocow_file_extent_args {
/* End file offset (inclusive) of the range we want to NOCOW. */
u64 end;
bool writeback_path;
- bool strict;
/*
* Free the path passed to can_nocow_file_extent() once it's not needed
* anymore.
@@ -1892,8 +1863,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
* for its subvolume was created, then this implies the extent is shared,
* hence we must COW.
*/
- if (!args->strict &&
- btrfs_file_extent_generation(leaf, fi) <=
+ if (btrfs_file_extent_generation(leaf, fi) <=
btrfs_root_last_snapshot(&root->root_item))
goto out;
@@ -1922,9 +1892,8 @@ static int can_nocow_file_extent(struct btrfs_path *path,
*/
btrfs_release_path(path);
- ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
- key->offset - args->file_extent.offset,
- args->file_extent.disk_bytenr, args->strict, path);
+ ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
+ args->file_extent.disk_bytenr, path);
WARN_ON_ONCE(ret > 0 && is_freespace_inode);
if (ret != 0)
goto out;
@@ -1970,8 +1939,74 @@ static int can_nocow_file_extent(struct btrfs_path *path,
return ret < 0 ? ret : can_nocow;
}
+static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
+ struct extent_state **cached,
+ struct can_nocow_file_extent_args *nocow_args,
+ u64 file_pos, bool is_prealloc)
+{
+ struct btrfs_ordered_extent *ordered;
+ const u64 len = nocow_args->file_extent.num_bytes;
+ const u64 end = file_pos + len - 1;
+ int ret = 0;
+
+ btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
+
+ if (is_prealloc) {
+ struct extent_map *em;
+
+ em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
+ BTRFS_ORDERED_PREALLOC);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto error;
+ }
+ btrfs_free_extent_map(em);
+ }
+
+ ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
+ is_prealloc
+ ? (1U << BTRFS_ORDERED_PREALLOC)
+ : (1U << BTRFS_ORDERED_NOCOW));
+ if (IS_ERR(ordered)) {
+ if (is_prealloc)
+ btrfs_drop_extent_map_range(inode, file_pos, end, false);
+ ret = PTR_ERR(ordered);
+ goto error;
+ }
+
+ if (btrfs_is_data_reloc_root(inode->root))
+ /*
+ * Errors are handled later, as we must prevent
+ * extent_clear_unlock_delalloc() in error handler from freeing
+ * metadata of the created ordered extent.
+ */
+ ret = btrfs_reloc_clone_csums(ordered);
+ btrfs_put_ordered_extent(ordered);
+
+ if (ret < 0)
+ goto error;
+ extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_CLEAR_DATA_RESV,
+ PAGE_SET_ORDERED);
+ return ret;
+
+error:
+ btrfs_cleanup_ordered_extents(inode, file_pos, len);
+ extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_CLEAR_DATA_RESV,
+ PAGE_UNLOCK | PAGE_START_WRITEBACK |
+ PAGE_END_WRITEBACK);
+ btrfs_err(inode->root->fs_info,
+ "%s failed, root=%lld inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
+ file_pos, len, ret);
+ return ret;
+}
+
/*
- * when nowcow writeback call back. This checks for snapshots or COW copies
+ * When nocow writeback calls back. This checks for snapshots or COW copies
* of the extents that exist in the file, and COWs the file as required.
*
* If no cow copies or snapshots exist, we write directly to the existing
@@ -1983,13 +2018,28 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
- struct btrfs_path *path;
+ struct btrfs_path *path = NULL;
u64 cow_start = (u64)-1;
+ /*
+ * If not 0, represents the inclusive end of the last fallback_to_cow()
+ * range. Only for error handling.
+ *
+ * The same for nocow_end, it's to avoid double cleaning up the range
+ * already cleaned by nocow_one_range().
+ */
+ u64 cow_end = 0;
+ u64 nocow_end = 0;
u64 cur_offset = start;
int ret;
bool check_prev = true;
u64 ino = btrfs_ino(inode);
struct can_nocow_file_extent_args nocow_args = { 0 };
+ /* The range that has ordered extent(s). */
+ u64 oe_cleanup_start;
+ u64 oe_cleanup_len = 0;
+ /* The range that is untouched. */
+ u64 untouched_start;
+ u64 untouched_len = 0;
/*
* Normally on a zoned device we're only doing COW writes, but in case
@@ -1998,6 +2048,10 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
*/
ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
+ if (unlikely(btrfs_is_shutdown(fs_info))) {
+ ret = -EIO;
+ goto error;
+ }
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
@@ -2009,15 +2063,12 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
while (cur_offset <= end) {
struct btrfs_block_group *nocow_bg = NULL;
- struct btrfs_ordered_extent *ordered;
struct btrfs_key found_key;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
struct extent_state *cached_state = NULL;
u64 extent_end;
- u64 nocow_end;
int extent_type;
- bool is_prealloc;
ret = btrfs_lookup_file_extent(NULL, root, path, ino,
cur_offset, 0);
@@ -2072,12 +2123,13 @@ next_slot:
/*
* If the found extent starts after requested offset, then
- * adjust extent_end to be right before this extent begins
+ * adjust cur_offset to be right before this extent begins.
*/
if (found_key.offset > cur_offset) {
- extent_end = found_key.offset;
- extent_type = 0;
- goto must_cow;
+ if (cow_start == (u64)-1)
+ cow_start = cur_offset;
+ cur_offset = found_key.offset;
+ goto next_slot;
}
/*
@@ -2143,74 +2195,23 @@ must_cow:
if (cow_start != (u64)-1) {
ret = fallback_to_cow(inode, locked_folio, cow_start,
found_key.offset - 1);
- cow_start = (u64)-1;
if (ret) {
+ cow_end = found_key.offset - 1;
btrfs_dec_nocow_writers(nocow_bg);
goto error;
}
+ cow_start = (u64)-1;
}
- nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
- lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state);
-
- is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
- if (is_prealloc) {
- struct extent_map *em;
-
- em = btrfs_create_io_em(inode, cur_offset,
- &nocow_args.file_extent,
- BTRFS_ORDERED_PREALLOC);
- if (IS_ERR(em)) {
- unlock_extent(&inode->io_tree, cur_offset,
- nocow_end, &cached_state);
- btrfs_dec_nocow_writers(nocow_bg);
- ret = PTR_ERR(em);
- goto error;
- }
- free_extent_map(em);
- }
-
- ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
- &nocow_args.file_extent,
- is_prealloc
- ? (1 << BTRFS_ORDERED_PREALLOC)
- : (1 << BTRFS_ORDERED_NOCOW));
+ ret = nocow_one_range(inode, locked_folio, &cached_state,
+ &nocow_args, cur_offset,
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC);
btrfs_dec_nocow_writers(nocow_bg);
- if (IS_ERR(ordered)) {
- if (is_prealloc) {
- btrfs_drop_extent_map_range(inode, cur_offset,
- nocow_end, false);
- }
- unlock_extent(&inode->io_tree, cur_offset,
- nocow_end, &cached_state);
- ret = PTR_ERR(ordered);
+ if (ret < 0) {
+ nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
goto error;
}
-
- if (btrfs_is_data_reloc_root(root))
- /*
- * Error handled later, as we must prevent
- * extent_clear_unlock_delalloc() in error handler
- * from freeing metadata of created ordered extent.
- */
- ret = btrfs_reloc_clone_csums(ordered);
- btrfs_put_ordered_extent(ordered);
-
- extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
- locked_folio, &cached_state,
- EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_CLEAR_DATA_RESV,
- PAGE_UNLOCK | PAGE_SET_ORDERED);
-
cur_offset = extent_end;
-
- /*
- * btrfs_reloc_clone_csums() error, now we're OK to call error
- * handler, as metadata for created ordered extent will only
- * be freed by btrfs_finish_ordered_io().
- */
- if (ret)
- goto error;
}
btrfs_release_path(path);
@@ -2218,43 +2219,113 @@ must_cow:
cow_start = cur_offset;
if (cow_start != (u64)-1) {
- cur_offset = end;
ret = fallback_to_cow(inode, locked_folio, cow_start, end);
- cow_start = (u64)-1;
- if (ret)
+ if (ret) {
+ cow_end = end;
goto error;
+ }
+ cow_start = (u64)-1;
}
+ /*
+ * Everything is finished without an error, can unlock the folios now.
+ *
+ * No need to touch the io tree range nor set folio ordered flag, as
+ * fallback_to_cow() and nocow_one_range() have already handled them.
+ */
+ extent_clear_unlock_delalloc(inode, start, end, locked_folio, NULL, 0, PAGE_UNLOCK);
+
btrfs_free_path(path);
return 0;
error:
- /*
- * If an error happened while a COW region is outstanding, cur_offset
- * needs to be reset to cow_start to ensure the COW region is unlocked
- * as well.
- */
- if (cow_start != (u64)-1)
- cur_offset = cow_start;
+ if (cow_start == (u64)-1) {
+ /*
+ * case a)
+ * start cur_offset end
+ * | OE cleanup | Untouched |
+ *
+ * We finished a fallback_to_cow() or nocow_one_range() call,
+ * but failed to check the next range.
+ *
+ * or
+ * start cur_offset nocow_end end
+ * | OE cleanup | Skip | Untouched |
+ *
+ * nocow_one_range() failed, the range [cur_offset, nocow_end] is
+ * already cleaned up.
+ */
+ oe_cleanup_start = start;
+ oe_cleanup_len = cur_offset - start;
+ if (nocow_end)
+ untouched_start = nocow_end + 1;
+ else
+ untouched_start = cur_offset;
+ untouched_len = end + 1 - untouched_start;
+ } else if (cow_start != (u64)-1 && cow_end == 0) {
+ /*
+ * case b)
+ * start cow_start cur_offset end
+ * | OE cleanup | Untouched |
+ *
+ * We got a range that needs COW, but before we hit the next NOCOW range,
+ * thus [cow_start, cur_offset) doesn't yet have any OE.
+ */
+ oe_cleanup_start = start;
+ oe_cleanup_len = cow_start - start;
+ untouched_start = cow_start;
+ untouched_len = end + 1 - untouched_start;
+ } else {
+ /*
+ * case c)
+ * start cow_start cow_end end
+ * | OE cleanup | Skip | Untouched |
+ *
+ * fallback_to_cow() failed, and fallback_to_cow() will do the
+ * cleanup for its range, we shouldn't touch the range
+ * [cow_start, cow_end].
+ */
+ ASSERT(cow_start != (u64)-1 && cow_end != 0);
+ oe_cleanup_start = start;
+ oe_cleanup_len = cow_start - start;
+ untouched_start = cow_end + 1;
+ untouched_len = end + 1 - untouched_start;
+ }
+
+ if (oe_cleanup_len) {
+ const u64 oe_cleanup_end = oe_cleanup_start + oe_cleanup_len - 1;
+ btrfs_cleanup_ordered_extents(inode, oe_cleanup_start, oe_cleanup_len);
+ extent_clear_unlock_delalloc(inode, oe_cleanup_start, oe_cleanup_end,
+ locked_folio, NULL,
+ EXTENT_LOCKED | EXTENT_DELALLOC,
+ PAGE_UNLOCK | PAGE_START_WRITEBACK |
+ PAGE_END_WRITEBACK);
+ }
- /*
- * We need to lock the extent here because we're clearing DELALLOC and
- * we're not locked at this point.
- */
- if (cur_offset < end) {
+ if (untouched_len) {
struct extent_state *cached = NULL;
+ const u64 untouched_end = untouched_start + untouched_len - 1;
- lock_extent(&inode->io_tree, cur_offset, end, &cached);
- extent_clear_unlock_delalloc(inode, cur_offset, end,
+ /*
+ * We need to lock the extent here because we're clearing DELALLOC and
+ * we're not locked at this point.
+ */
+ btrfs_lock_extent(&inode->io_tree, untouched_start, untouched_end, &cached);
+ extent_clear_unlock_delalloc(inode, untouched_start, untouched_end,
locked_folio, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
- btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL);
+ btrfs_qgroup_free_data(inode, NULL, untouched_start, untouched_len, NULL);
}
btrfs_free_path(path);
+ btrfs_err(fs_info,
+"%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu oe_cleanup=%llu oe_cleanup_len=%llu untouched_start=%llu untouched_len=%llu: %d",
+ __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
+ start, end + 1 - start, cur_offset, oe_cleanup_start, oe_cleanup_len,
+ untouched_start, untouched_len, ret);
return ret;
}
@@ -2262,7 +2333,7 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
{
if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
if (inode->defrag_bytes &&
- test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
+ btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
return false;
return true;
}
@@ -2284,11 +2355,11 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
* can confuse the caller.
*/
ASSERT(!(end <= folio_pos(locked_folio) ||
- start >= folio_pos(locked_folio) + folio_size(locked_folio)));
+ start >= folio_next_pos(locked_folio)));
if (should_nocow(inode, start, end)) {
ret = run_delalloc_nocow(inode, locked_folio, start, end);
- goto out;
+ return ret;
}
if (btrfs_inode_can_compress(inode) &&
@@ -2300,13 +2371,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
ret = run_delalloc_cow(inode, locked_folio, start, end, wbc,
true);
else
- ret = cow_file_range(inode, locked_folio, start, end, NULL,
- false, false);
-
-out:
- if (ret < 0)
- btrfs_cleanup_ordered_extents(inode, locked_folio, start,
- end - start + 1);
+ ret = cow_file_range(inode, locked_folio, start, end, NULL, 0);
return ret;
}
@@ -2556,7 +2621,7 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
!btrfs_is_free_space_inode(inode) &&
!(state->state & EXTENT_NORESERVE) &&
(bits & EXTENT_CLEAR_DATA_RESV))
- btrfs_free_reserved_data_space_noquota(fs_info, len);
+ btrfs_free_reserved_data_space_noquota(inode, len);
percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
fs_info->delalloc_batch);
@@ -2640,12 +2705,12 @@ static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
if (em_len > search_len)
em_len = search_len;
- ret = set_extent_bit(&inode->io_tree, search_start,
- search_start + em_len - 1,
- EXTENT_DELALLOC_NEW, cached_state);
+ ret = btrfs_set_extent_bit(&inode->io_tree, search_start,
+ search_start + em_len - 1,
+ EXTENT_DELALLOC_NEW, cached_state);
next:
- search_start = extent_map_end(em);
- free_extent_map(em);
+ search_start = btrfs_extent_map_end(em);
+ btrfs_free_extent_map(em);
if (ret)
return ret;
}
@@ -2675,8 +2740,8 @@ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
return ret;
}
- return set_extent_bit(&inode->io_tree, start, end,
- EXTENT_DELALLOC | extra_bits, cached_state);
+ return btrfs_set_extent_bit(&inode->io_tree, start, end,
+ EXTENT_DELALLOC | extra_bits, cached_state);
}
/* see btrfs_writepage_start_hook for details on why this is required */
@@ -2697,7 +2762,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
struct btrfs_inode *inode = fixup->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 page_start = folio_pos(folio);
- u64 page_end = folio_pos(folio) + folio_size(folio) - 1;
+ u64 page_end = folio_next_pos(folio) - 1;
int ret = 0;
bool free_delalloc_space = true;
@@ -2751,7 +2816,7 @@ again:
if (ret)
goto out_page;
- lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
/* already ordered? We're done */
if (folio_test_ordered(folio))
@@ -2759,8 +2824,8 @@ again:
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
if (ordered) {
- unlock_extent(&inode->io_tree, page_start, page_end,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
+ &cached_state);
folio_unlock(folio);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
@@ -2786,7 +2851,7 @@ out_reserved:
if (free_delalloc_space)
btrfs_delalloc_release_space(inode, data_reserved, page_start,
PAGE_SIZE, true);
- unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
out_page:
if (ret) {
/*
@@ -2833,6 +2898,21 @@ int btrfs_writepage_cow_fixup(struct folio *folio)
return 0;
/*
+ * For experimental build, we error out instead of EAGAIN.
+ *
+ * We should not hit such out-of-band dirty folios anymore.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
+ DEBUG_WARN();
+ btrfs_err_rl(fs_info,
+ "root %lld ino %llu folio %llu is marked dirty without notifying the fs",
+ btrfs_root_id(BTRFS_I(inode)->root),
+ btrfs_ino(BTRFS_I(inode)),
+ folio_pos(folio));
+ return -EUCLEAN;
+ }
+
+ /*
* folio_checked is set below when we create a fixup worker for this
* folio, don't try to create another one if we're already
* folio_test_checked.
@@ -2851,7 +2931,7 @@ int btrfs_writepage_cow_fixup(struct folio *folio)
* We are already holding a reference to this inode from
* write_cache_pages. We need to hold it because the space reservation
* takes place outside of the folio lock, and we can't trust
- * page->mapping outside of the folio lock.
+ * folio->mapping outside of the folio lock.
*/
ihold(inode);
btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
@@ -2872,7 +2952,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
const u64 sectorsize = root->fs_info->sectorsize;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key ins;
u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
@@ -2907,8 +2987,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
if (!drop_args.extent_inserted) {
ins.objectid = btrfs_ino(inode);
- ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
+ ins.offset = file_pos;
ret = btrfs_insert_empty_item(trans, root, path, &ins,
sizeof(*stack_fi));
@@ -2921,14 +3001,13 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(struct btrfs_file_extent_item));
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/*
* If we dropped an inline extent here, we know the range where it is
* was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
* number of bytes only for that range containing the inline extent.
- * The remaining of the range will be processed when clearning the
+ * The remaining of the range will be processed when clearing the
* EXTENT_DELALLOC_BIT bit through the ordered extent completion.
*/
if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
@@ -2944,8 +3023,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
ins.objectid = disk_bytenr;
- ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
+ ins.offset = disk_num_bytes;
ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
if (ret)
@@ -2955,8 +3034,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
file_pos - offset,
qgroup_reserved, &ins);
out:
- btrfs_free_path(path);
-
return ret;
}
@@ -3046,14 +3123,15 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
if (!freespace_inode)
btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
- if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
+ if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) {
ret = -EIO;
goto out;
}
- if (btrfs_is_zoned(fs_info))
- btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
- ordered_extent->disk_num_bytes);
+ ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
+ ordered_extent->disk_num_bytes);
+ if (ret)
+ goto out;
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
truncated = true;
@@ -3063,6 +3141,21 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
+ /*
+ * If it's a COW write we need to lock the extent range as we will be
+ * inserting/replacing file extent items and unpinning an extent map.
+ * This must be taken before joining a transaction, as it's a higher
+ * level lock (like the inode's VFS lock), otherwise we can run into an
+ * ABBA deadlock with other tasks (transactions work like a lock,
+ * depending on their current state).
+ */
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
+ clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED;
+ btrfs_lock_extent_bits(io_tree, start, end,
+ EXTENT_LOCKED | EXTENT_FINISHING_ORDERED,
+ &cached_state);
+ }
+
if (freespace_inode)
trans = btrfs_join_transaction_spacecache(root);
else
@@ -3076,7 +3169,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
trans->block_rsv = &inode->block_rsv;
ret = btrfs_insert_raid_extent(trans, ordered_extent);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3084,7 +3177,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
/* Logic error */
ASSERT(list_empty(&ordered_extent->list));
- if (!list_empty(&ordered_extent->list)) {
+ if (unlikely(!list_empty(&ordered_extent->list))) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -3092,16 +3185,13 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
btrfs_inode_safe_disk_i_size_write(inode, 0);
ret = btrfs_update_inode_fallback(trans, inode);
- if (ret) {
+ if (unlikely(ret)) {
/* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
}
goto out;
}
- clear_bits |= EXTENT_LOCKED;
- lock_extent(io_tree, start, end, &cached_state);
-
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
@@ -3122,20 +3212,20 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->disk_num_bytes);
}
}
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- ret = unpin_extent_cache(inode, ordered_extent->file_offset,
- ordered_extent->num_bytes, trans->transid);
- if (ret < 0) {
+ ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset,
+ ordered_extent->num_bytes, trans->transid);
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = add_pending_csums(trans, &ordered_extent->list);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3147,26 +3237,24 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
*/
if ((clear_bits & EXTENT_DELALLOC_NEW) &&
!test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
- clear_extent_bit(&inode->io_tree, start, end,
- EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
- &cached_state);
+ btrfs_clear_extent_bit(&inode->io_tree, start, end,
+ EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
+ &cached_state);
btrfs_inode_safe_disk_i_size_write(inode, 0);
ret = btrfs_update_inode_fallback(trans, inode);
- if (ret) { /* -ENOMEM or corruption */
+ if (unlikely(ret)) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
}
out:
- clear_extent_bit(&inode->io_tree, start, end, clear_bits,
- &cached_state);
+ btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits,
+ &cached_state);
if (trans)
btrfs_end_transaction(trans);
if (ret || truncated) {
- u64 unwritten_start = start;
-
/*
* If we failed to finish this ordered extent for any reason we
* need to make sure BTRFS_ORDERED_IOERR is set on the ordered
@@ -3178,10 +3266,6 @@ out:
if (ret)
btrfs_mark_ordered_extent_error(ordered_extent);
- if (truncated)
- unwritten_start += logical_len;
- clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
-
/*
* Drop extent maps for the part of the extent we didn't write.
*
@@ -3196,9 +3280,15 @@ out:
* we don't mess with the extent map tree in the NOCOW case, but
* for now simply skip this if we are the free space inode.
*/
- if (!btrfs_is_free_space_inode(inode))
+ if (!btrfs_is_free_space_inode(inode)) {
+ u64 unwritten_start = start;
+
+ if (truncated)
+ unwritten_start += logical_len;
+
btrfs_drop_extent_map_range(inode, unwritten_start,
end, false);
+ }
/*
* If the ordered extent had an IOERR or something else went
@@ -3225,7 +3315,7 @@ out:
NULL);
btrfs_free_reserved_extent(fs_info,
ordered_extent->disk_bytenr,
- ordered_extent->disk_num_bytes, 1);
+ ordered_extent->disk_num_bytes, true);
/*
* Actually free the qgroup rsv which was released when
* the ordered extent was created.
@@ -3260,35 +3350,89 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
}
/*
- * Verify the checksum for a single sector without any extra action that depend
- * on the type of I/O.
+ * Calculate the checksum of an fs block at physical memory address @paddr,
+ * and save the result to @dest.
+ *
+ * The folio containing @paddr must be large enough to contain a full fs block.
*/
-int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
- u32 pgoff, u8 *csum, const u8 * const csum_expected)
+void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
+ const phys_addr_t paddr, u8 *dest)
{
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- char *kaddr;
+ struct folio *folio = page_folio(phys_to_page(paddr));
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 step = min(blocksize, PAGE_SIZE);
+ const u32 nr_steps = blocksize / step;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
- ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE);
+ /* The full block must be inside the folio. */
+ ASSERT(offset_in_folio(folio, paddr) + blocksize <= folio_size(folio));
- shash->tfm = fs_info->csum_shash;
+ for (int i = 0; i < nr_steps; i++) {
+ u32 pindex = offset_in_folio(folio, paddr + i * step) >> PAGE_SHIFT;
- kaddr = kmap_local_page(page) + pgoff;
- crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
- kunmap_local(kaddr);
+ /*
+ * For bs <= ps cases, we will only run the loop once, so the offset
+ * inside the page will only added to paddrs[0].
+ *
+ * For bs > ps cases, the block must be page aligned, thus offset
+ * inside the page will always be 0.
+ */
+ paddrs[i] = page_to_phys(folio_page(folio, pindex)) + offset_in_page(paddr);
+ }
+ return btrfs_calculate_block_csum_pages(fs_info, paddrs, dest);
+}
+
+/*
+ * Calculate the checksum of a fs block backed by multiple noncontiguous pages
+ * at @paddrs[] and save the result to @dest.
+ *
+ * The folio containing @paddr must be large enough to contain a full fs block.
+ */
+void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
+ const phys_addr_t paddrs[], u8 *dest)
+{
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 step = min(blocksize, PAGE_SIZE);
+ const u32 nr_steps = blocksize / step;
+ SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- if (memcmp(csum, csum_expected, fs_info->csum_size))
+ shash->tfm = fs_info->csum_shash;
+ crypto_shash_init(shash);
+ for (int i = 0; i < nr_steps; i++) {
+ const phys_addr_t paddr = paddrs[i];
+ void *kaddr;
+
+ ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE);
+ kaddr = kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
+ crypto_shash_update(shash, kaddr, step);
+ kunmap_local(kaddr);
+ }
+ crypto_shash_final(shash, dest);
+}
+
+/*
+ * Verify the checksum for a single sector without any extra action that depend
+ * on the type of I/O.
+ *
+ * @kaddr must be a properly kmapped address.
+ */
+int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
+ const u8 * const csum_expected)
+{
+ btrfs_calculate_block_csum_folio(fs_info, paddr, csum);
+ if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
return -EIO;
return 0;
}
/*
- * Verify the checksum of a single data sector.
+ * Verify the checksum of a single data sector, which can be scattered at
+ * different noncontiguous pages.
*
* @bbio: btrfs_io_bio which contains the csum
* @dev: device the sector is on
* @bio_offset: offset to the beginning of the bio (in bytes)
- * @bv: bio_vec to check
+ * @paddrs: physical addresses which back the fs block
*
* Check if the checksum on a data block is valid. When a checksum mismatch is
* detected, report the error and fill the corrupted range with zero.
@@ -3296,33 +3440,34 @@ int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
* Return %true if the sector is ok or had no checksum to start with, else %false.
*/
bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
- u32 bio_offset, struct bio_vec *bv)
+ u32 bio_offset, const phys_addr_t paddrs[])
{
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 step = min(blocksize, PAGE_SIZE);
+ const u32 nr_steps = blocksize / step;
u64 file_offset = bbio->file_offset + bio_offset;
- u64 end = file_offset + bv->bv_len - 1;
+ u64 end = file_offset + blocksize - 1;
u8 *csum_expected;
u8 csum[BTRFS_CSUM_SIZE];
- ASSERT(bv->bv_len == fs_info->sectorsize);
-
if (!bbio->csum)
return true;
if (btrfs_is_data_reloc_root(inode->root) &&
- test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
- NULL)) {
+ btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
+ NULL)) {
/* Skip the range without csum for data reloc inode */
- clear_extent_bits(&inode->io_tree, file_offset, end,
- EXTENT_NODATASUM);
+ btrfs_clear_extent_bit(&inode->io_tree, file_offset, end,
+ EXTENT_NODATASUM, NULL);
return true;
}
csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
fs_info->csum_size;
- if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum,
- csum_expected))
+ btrfs_calculate_block_csum_pages(fs_info, paddrs, csum);
+ if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
goto zeroit;
return true;
@@ -3331,7 +3476,8 @@ zeroit:
bbio->mirror_num);
if (dev)
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
- memzero_bvec(bv);
+ for (int i = 0; i < nr_steps; i++)
+ memzero_page(phys_to_page(paddrs[i]), offset_in_page(paddrs[i]), step);
return false;
}
@@ -3353,6 +3499,7 @@ void btrfs_add_delayed_iput(struct btrfs_inode *inode)
if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
return;
+ WARN_ON_ONCE(test_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state));
atomic_inc(&fs_info->nr_delayed_iputs);
/*
* Need to be irq safe here because we can be called from either an irq
@@ -3444,7 +3591,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
int ret;
ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
- if (ret && ret != -EEXIST) {
+ if (unlikely(ret && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -3469,11 +3616,10 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key, found_key;
struct btrfs_trans_handle *trans;
- struct inode *inode;
u64 last_objectid = 0;
int ret = 0, nr_unlink = 0;
@@ -3492,6 +3638,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
key.offset = (u64)-1;
while (1) {
+ struct btrfs_inode *inode;
+
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
@@ -3615,10 +3763,10 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
* deleted but wasn't. The inode number may have been reused,
* but either way, we can delete the orphan item.
*/
- if (!inode || inode->i_nlink) {
+ if (!inode || inode->vfs_inode.i_nlink) {
if (inode) {
- ret = btrfs_drop_verity_items(BTRFS_I(inode));
- iput(inode);
+ ret = btrfs_drop_verity_items(inode);
+ iput(&inode->vfs_inode);
inode = NULL;
if (ret)
goto out;
@@ -3641,7 +3789,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
nr_unlink++;
/* this will do delete_inode and everything for us */
- iput(inode);
+ iput(&inode->vfs_inode);
}
/* release the path since we're done with it */
btrfs_release_path(path);
@@ -3658,19 +3806,22 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
out:
if (ret)
btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
- btrfs_free_path(path);
return ret;
}
/*
- * very simple check to peek ahead in the leaf looking for xattrs. If we
- * don't find any xattrs, we know there can't be any acls.
+ * Look ahead in the leaf for xattrs. If we don't find any then we know there
+ * can't be any ACLs.
*
- * slot is the slot the inode is in, objectid is the objectid of the inode
+ * @leaf: the eb leaf where to search
+ * @slot: the slot the inode is in
+ * @objectid: the objectid of the inode
+ *
+ * Return true if there is xattr/ACL, false otherwise.
*/
-static noinline int acls_after_inode_item(struct extent_buffer *leaf,
- int slot, u64 objectid,
- int *first_xattr_slot)
+static noinline bool acls_after_inode_item(struct extent_buffer *leaf,
+ int slot, u64 objectid,
+ int *first_xattr_slot)
{
u32 nritems = btrfs_header_nritems(leaf);
struct btrfs_key found_key;
@@ -3690,45 +3841,50 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
while (slot < nritems) {
btrfs_item_key_to_cpu(leaf, &found_key, slot);
- /* we found a different objectid, there must not be acls */
+ /* We found a different objectid, there must be no ACLs. */
if (found_key.objectid != objectid)
- return 0;
+ return false;
- /* we found an xattr, assume we've got an acl */
+ /* We found an xattr, assume we've got an ACL. */
if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
if (*first_xattr_slot == -1)
*first_xattr_slot = slot;
if (found_key.offset == xattr_access ||
found_key.offset == xattr_default)
- return 1;
+ return true;
}
/*
- * we found a key greater than an xattr key, there can't
- * be any acls later on
+ * We found a key greater than an xattr key, there can't be any
+ * ACLs later on.
*/
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
- return 0;
+ return false;
slot++;
scanned++;
/*
- * it goes inode, inode backrefs, xattrs, extents,
- * so if there are a ton of hard links to an inode there can
- * be a lot of backrefs. Don't waste time searching too hard,
- * this is just an optimization
+ * The item order goes like:
+ * - inode
+ * - inode backrefs
+ * - xattrs
+ * - extents,
+ *
+ * so if there are lots of hard links to an inode there can be
+ * a lot of backrefs. Don't waste time searching too hard,
+ * this is just an optimization.
*/
if (scanned >= 8)
break;
}
- /* we hit the end of the leaf before we found an xattr or
- * something larger than an xattr. We have to assume the inode
- * has acls
+ /*
+ * We hit the end of the leaf before we found an xattr or something
+ * larger than an xattr. We have to assume the inode has ACLs.
*/
if (*first_xattr_slot == -1)
*first_xattr_slot = slot;
- return 1;
+ return true;
}
static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
@@ -3748,7 +3904,8 @@ static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
if (!inode->file_extent_tree)
return -ENOMEM;
- extent_io_tree_init(fs_info, inode->file_extent_tree, IO_TREE_INODE_FILE_EXTENT);
+ btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree,
+ IO_TREE_INODE_FILE_EXTENT);
/* Lockdep class is set only for the file extent tree. */
lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
@@ -3779,7 +3936,7 @@ static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
ASSERT(ret != -ENOMEM);
return ret;
} else if (existing) {
- WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING)));
+ WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING)));
}
return 0;
@@ -3791,12 +3948,13 @@ static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
*
* On failure clean up the inode.
*/
-static int btrfs_read_locked_inode(struct inode *inode, struct btrfs_path *path)
+static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path *path)
{
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct inode *vfs_inode = &inode->vfs_inode;
struct btrfs_key location;
unsigned long ptr;
int maybe_acls;
@@ -3805,17 +3963,13 @@ static int btrfs_read_locked_inode(struct inode *inode, struct btrfs_path *path)
bool filled = false;
int first_xattr_slot;
- ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
- if (ret)
- goto out;
-
ret = btrfs_fill_inode(inode, &rdev);
if (!ret)
filled = true;
ASSERT(path);
- btrfs_get_inode_key(BTRFS_I(inode), &location);
+ btrfs_get_inode_key(inode, &location);
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
@@ -3835,43 +3989,47 @@ static int btrfs_read_locked_inode(struct inode *inode, struct btrfs_path *path)
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
- inode->i_mode = btrfs_inode_mode(leaf, inode_item);
- set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
- i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
- i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
- btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
- btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
- round_up(i_size_read(inode), fs_info->sectorsize));
-
- inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime),
+ vfs_inode->i_mode = btrfs_inode_mode(leaf, inode_item);
+ set_nlink(vfs_inode, btrfs_inode_nlink(leaf, inode_item));
+ i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item));
+ i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item));
+ btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
+
+ inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime),
btrfs_timespec_nsec(leaf, &inode_item->atime));
- inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
+ inode_set_mtime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
btrfs_timespec_nsec(leaf, &inode_item->mtime));
- inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
+ inode_set_ctime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
btrfs_timespec_nsec(leaf, &inode_item->ctime));
- BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
- BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
+ inode->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
+ inode->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
- inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
- BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
- BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
+ inode_set_bytes(vfs_inode, btrfs_inode_nbytes(leaf, inode_item));
+ inode->generation = btrfs_inode_generation(leaf, inode_item);
+ inode->last_trans = btrfs_inode_transid(leaf, inode_item);
- inode_set_iversion_queried(inode,
- btrfs_inode_sequence(leaf, inode_item));
- inode->i_generation = BTRFS_I(inode)->generation;
- inode->i_rdev = 0;
+ inode_set_iversion_queried(vfs_inode, btrfs_inode_sequence(leaf, inode_item));
+ vfs_inode->i_generation = inode->generation;
+ vfs_inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
- if (S_ISDIR(inode->i_mode))
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ if (S_ISDIR(vfs_inode->i_mode))
+ inode->index_cnt = (u64)-1;
btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
- &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
+ &inode->flags, &inode->ro_flags);
+ btrfs_update_inode_mapping_flags(inode);
+ btrfs_set_inode_mapping_order(inode);
cache_index:
+ ret = btrfs_init_file_extent_tree(inode);
+ if (ret)
+ goto out;
+ btrfs_inode_set_file_extent_range(inode, 0,
+ round_up(i_size_read(vfs_inode), fs_info->sectorsize));
/*
* If we were modified in the current generation and evicted from memory
* and then re-read we need to do a full sync since we don't have any
@@ -3881,9 +4039,8 @@ cache_index:
* This is required for both inode re-read from disk and delayed inode
* in the delayed_nodes xarray.
*/
- if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info))
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+ if (inode->last_trans == btrfs_get_fs_generation(fs_info))
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
/*
* We don't persist the id of the transaction where an unlink operation
@@ -3912,7 +4069,7 @@ cache_index:
* transaction commits on fsync if our inode is a directory, or if our
* inode is not a directory, logging its parent unnecessarily.
*/
- BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
+ inode->last_unlink_trans = inode->last_trans;
/*
* Same logic as for last_unlink_trans. We don't persist the generation
@@ -3920,15 +4077,15 @@ cache_index:
* operation, so after eviction and reloading the inode we must be
* pessimistic and assume the last transaction that modified the inode.
*/
- BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
+ inode->last_reflink_trans = inode->last_trans;
path->slots[0]++;
- if (inode->i_nlink != 1 ||
+ if (vfs_inode->i_nlink != 1 ||
path->slots[0] >= btrfs_header_nritems(leaf))
goto cache_acl;
btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
- if (location.objectid != btrfs_ino(BTRFS_I(inode)))
+ if (location.objectid != btrfs_ino(inode))
goto cache_acl;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3936,13 +4093,12 @@ cache_index:
struct btrfs_inode_ref *ref;
ref = (struct btrfs_inode_ref *)ptr;
- BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
+ inode->dir_index = btrfs_inode_ref_index(leaf, ref);
} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)ptr;
- BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
- extref);
+ inode->dir_index = btrfs_inode_extref_index(leaf, extref);
}
cache_acl:
/*
@@ -3950,50 +4106,49 @@ cache_acl:
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0],
- btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
+ btrfs_ino(inode), &first_xattr_slot);
if (first_xattr_slot != -1) {
path->slots[0] = first_xattr_slot;
ret = btrfs_load_inode_props(inode, path);
if (ret)
btrfs_err(fs_info,
"error loading props for ino %llu (root %llu): %d",
- btrfs_ino(BTRFS_I(inode)),
- btrfs_root_id(root), ret);
+ btrfs_ino(inode), btrfs_root_id(root), ret);
}
if (!maybe_acls)
- cache_no_acl(inode);
+ cache_no_acl(vfs_inode);
- switch (inode->i_mode & S_IFMT) {
+ switch (vfs_inode->i_mode & S_IFMT) {
case S_IFREG:
- inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
+ vfs_inode->i_mapping->a_ops = &btrfs_aops;
+ vfs_inode->i_fop = &btrfs_file_operations;
+ vfs_inode->i_op = &btrfs_file_inode_operations;
break;
case S_IFDIR:
- inode->i_fop = &btrfs_dir_file_operations;
- inode->i_op = &btrfs_dir_inode_operations;
+ vfs_inode->i_fop = &btrfs_dir_file_operations;
+ vfs_inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
- inode->i_op = &btrfs_symlink_inode_operations;
- inode_nohighmem(inode);
- inode->i_mapping->a_ops = &btrfs_aops;
+ vfs_inode->i_op = &btrfs_symlink_inode_operations;
+ inode_nohighmem(vfs_inode);
+ vfs_inode->i_mapping->a_ops = &btrfs_aops;
break;
default:
- inode->i_op = &btrfs_special_inode_operations;
- init_special_inode(inode, inode->i_mode, rdev);
+ vfs_inode->i_op = &btrfs_special_inode_operations;
+ init_special_inode(vfs_inode, vfs_inode->i_mode, rdev);
break;
}
btrfs_sync_inode_flags_to_i_flags(inode);
- ret = btrfs_add_inode_to_root(BTRFS_I(inode), true);
+ ret = btrfs_add_inode_to_root(inode, true);
if (ret)
goto out;
return 0;
out:
- iget_failed(inode);
+ iget_failed(vfs_inode);
return ret;
}
@@ -4005,45 +4160,35 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *item,
struct inode *inode)
{
- struct btrfs_map_token token;
u64 flags;
- btrfs_init_map_token(&token, leaf);
-
- btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
- btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
- btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
- btrfs_set_token_inode_mode(&token, item, inode->i_mode);
- btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
-
- btrfs_set_token_timespec_sec(&token, &item->atime,
- inode_get_atime_sec(inode));
- btrfs_set_token_timespec_nsec(&token, &item->atime,
- inode_get_atime_nsec(inode));
-
- btrfs_set_token_timespec_sec(&token, &item->mtime,
- inode_get_mtime_sec(inode));
- btrfs_set_token_timespec_nsec(&token, &item->mtime,
- inode_get_mtime_nsec(inode));
-
- btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode_get_ctime_sec(inode));
- btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode_get_ctime_nsec(inode));
-
- btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
- btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
-
- btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
- btrfs_set_token_inode_generation(&token, item,
- BTRFS_I(inode)->generation);
- btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
- btrfs_set_token_inode_transid(&token, item, trans->transid);
- btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
+ btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
+ btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
+ btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
+ btrfs_set_inode_mode(leaf, item, inode->i_mode);
+ btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+
+ btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
+
+ btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
+
+ btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
+
+ btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
+ btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
+
+ btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
+ btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
+ btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
+ btrfs_set_inode_transid(leaf, item, trans->transid);
+ btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
BTRFS_I(inode)->ro_flags);
- btrfs_set_token_inode_flags(&token, item, flags);
- btrfs_set_token_inode_block_group(&token, item, 0);
+ btrfs_set_inode_flags(leaf, item, flags);
+ btrfs_set_inode_block_group(leaf, item, 0);
}
/*
@@ -4053,7 +4198,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
struct btrfs_inode_item *inode_item;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
@@ -4067,7 +4212,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
if (ret) {
if (ret > 0)
ret = -ENOENT;
- goto failed;
+ return ret;
}
leaf = path->nodes[0];
@@ -4075,12 +4220,8 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_set_inode_last_trans(trans, inode);
- ret = 0;
-failed:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -4125,6 +4266,23 @@ int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
return ret;
}
+static void update_time_after_link_or_unlink(struct btrfs_inode *dir)
+{
+ struct timespec64 now;
+
+ /*
+ * If we are replaying a log tree, we do not want to update the mtime
+ * and ctime of the parent directory with the current time, since the
+ * log replay procedure is responsible for setting them to their correct
+ * values (the ones it had when the fsync was done).
+ */
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags))
+ return;
+
+ now = inode_set_ctime_current(&dir->vfs_inode);
+ inode_set_mtime_to_ts(&dir->vfs_inode, now);
+}
+
/*
* unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and
@@ -4146,20 +4304,22 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
if (IS_ERR_OR_NULL(di)) {
- ret = di ? PTR_ERR(di) : -ENOENT;
- goto err;
+ btrfs_free_path(path);
+ return di ? PTR_ERR(di) : -ENOENT;
}
ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ /*
+ * Down the call chains below we'll also need to allocate a path, so no
+ * need to hold on to this one for longer than necessary.
+ */
+ btrfs_free_path(path);
if (ret)
- goto err;
- btrfs_release_path(path);
+ return ret;
/*
* If we don't have dir index, we have to get it by looking up
@@ -4180,21 +4340,21 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
}
ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
- if (ret) {
- btrfs_info(fs_info,
- "failed to delete reference to %.*s, inode %llu parent %llu",
- name->len, name->name, ino, dir_ino);
+ if (unlikely(ret)) {
+ btrfs_crit(fs_info,
+ "failed to delete reference to %.*s, root %llu inode %llu parent %llu",
+ name->len, name->name, btrfs_root_id(root), ino, dir_ino);
btrfs_abort_transaction(trans, ret);
- goto err;
+ return ret;
}
skip_backref:
if (rename_ctx)
rename_ctx->index = index;
ret = btrfs_delete_delayed_dir_index(trans, dir, index);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto err;
+ return ret;
}
/*
@@ -4204,8 +4364,8 @@ skip_backref:
* operations on the log tree, increasing latency for applications.
*/
if (!rename_ctx) {
- btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
- btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
+ btrfs_del_inode_ref_in_log(trans, name, inode, dir);
+ btrfs_del_dir_entries_in_log(trans, name, dir, index);
}
/*
@@ -4218,19 +4378,14 @@ skip_backref:
* holding.
*/
btrfs_run_delayed_iput(fs_info, inode);
-err:
- btrfs_free_path(path);
- if (ret)
- goto out;
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
inode_inc_iversion(&inode->vfs_inode);
inode_set_ctime_current(&inode->vfs_inode);
inode_inc_iversion(&dir->vfs_inode);
- inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
- ret = btrfs_update_inode(trans, dir);
-out:
- return ret;
+ update_time_after_link_or_unlink(dir);
+
+ return btrfs_update_inode(trans, dir);
}
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
@@ -4309,7 +4464,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = dir->root;
struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
@@ -4352,7 +4507,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4383,14 +4538,14 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
ret = btrfs_del_root_ref(trans, objectid,
btrfs_root_id(root), dir_ino,
&index, &fname.disk_name);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
ret = btrfs_delete_delayed_dir_index(trans, dir, index);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4402,7 +4557,6 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
if (ret)
btrfs_abort_transaction(trans, ret);
out:
- btrfs_free_path(path);
fscrypt_free_filename(&fname);
return ret;
}
@@ -4414,7 +4568,7 @@ out:
static noinline int may_destroy_subvol(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *di;
struct btrfs_key key;
struct fscrypt_str name = FSTR_INIT("default", 7);
@@ -4436,7 +4590,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
btrfs_err(fs_info,
"deleting default subvolume %llu is not allowed",
key.objectid);
- goto out;
+ return ret;
}
btrfs_release_path(path);
}
@@ -4447,14 +4601,13 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- if (ret == 0) {
+ return ret;
+ if (unlikely(ret == 0)) {
/*
* Key with offset -1 found, there would have to exist a root
* with such id, but this is out of valid range.
*/
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
ret = 0;
@@ -4464,8 +4617,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
ret = -ENOTEMPTY;
}
-out:
- btrfs_free_path(path);
+
return ret;
}
@@ -4481,7 +4633,7 @@ static void btrfs_prune_dentries(struct btrfs_root *root)
inode = btrfs_find_first_inode(root, min_ino);
while (inode) {
- if (atomic_read(&inode->vfs_inode.i_count) > 1)
+ if (icount_read(&inode->vfs_inode) > 1)
d_prune_aliases(&inode->vfs_inode);
min_ino = btrfs_ino(inode) + 1;
@@ -4564,13 +4716,13 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
btrfs_record_snapshot_destroy(trans, dir);
ret = btrfs_unlink_subvol(trans, dir, dentry);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
ret = btrfs_record_root_in_trans(trans, dest);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -4584,7 +4736,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
ret = btrfs_insert_orphan_item(trans,
fs_info->tree_root,
btrfs_root_id(dest));
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -4592,7 +4744,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
- if (ret && ret != -ENOENT) {
+ if (unlikely(ret && ret != -ENOENT)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -4601,7 +4753,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
dest->root_item.received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
btrfs_root_id(dest));
- if (ret && ret != -ENOENT) {
+ if (unlikely(ret && ret != -ENOENT)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -4637,68 +4789,68 @@ out_up_write:
return ret;
}
-static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry)
{
- struct inode *inode = d_inode(dentry);
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct btrfs_inode *dir = BTRFS_I(vfs_dir);
+ struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret = 0;
struct btrfs_trans_handle *trans;
- u64 last_unlink_trans;
struct fscrypt_name fname;
- if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
+ if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
- if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
+ if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
btrfs_err(fs_info,
"extent tree v2 doesn't support snapshot deletion yet");
return -EOPNOTSUPP;
}
- return btrfs_delete_subvolume(BTRFS_I(dir), dentry);
+ return btrfs_delete_subvolume(dir, dentry);
}
- ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
+ ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname);
if (ret)
return ret;
/* This needs to handle no-key deletions later on */
- trans = __unlink_start_trans(BTRFS_I(dir));
+ trans = __unlink_start_trans(dir);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
}
- if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
+ /*
+ * Propagate the last_unlink_trans value of the deleted dir to its
+ * parent directory. This is to prevent an unrecoverable log tree in the
+ * case we do something like this:
+ * 1) create dir foo
+ * 2) create snapshot under dir foo
+ * 3) delete the snapshot
+ * 4) rmdir foo
+ * 5) mkdir foo
+ * 6) fsync foo or some file inside foo
+ *
+ * This is because we can't unlink other roots when replaying the dir
+ * deletes for directory foo.
+ */
+ if (inode->last_unlink_trans >= trans->transid)
+ btrfs_record_snapshot_destroy(trans, dir);
+
+ if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+ ret = btrfs_unlink_subvol(trans, dir, dentry);
goto out;
}
- ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ ret = btrfs_orphan_add(trans, inode);
if (ret)
goto out;
- last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
-
/* now the directory is empty */
- ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
- &fname.disk_name);
- if (!ret) {
- btrfs_i_size_write(BTRFS_I(inode), 0);
- /*
- * Propagate the last_unlink_trans value of the deleted dir to
- * its parent directory. This is to prevent an unrecoverable
- * log tree in the case we do something like this:
- * 1) create dir foo
- * 2) create snapshot under dir foo
- * 3) delete the snapshot
- * 4) rmdir foo
- * 5) mkdir foo
- * 6) fsync foo or some file inside foo
- */
- if (last_unlink_trans >= trans->transid)
- BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
- }
+ ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name);
+ if (!ret)
+ btrfs_i_size_write(inode, 0);
out:
btrfs_end_transaction(trans);
out_notrans:
@@ -4708,20 +4860,80 @@ out_notrans:
return ret;
}
+static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize)
+{
+ ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u",
+ blockstart, blocksize);
+
+ if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1)
+ return true;
+ return false;
+}
+
+static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
+{
+ const pgoff_t index = (start >> PAGE_SHIFT);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ struct folio *folio;
+ u64 zero_start;
+ u64 zero_end;
+ int ret = 0;
+
+again:
+ folio = filemap_lock_folio(mapping, index);
+ /* No folio present. */
+ if (IS_ERR(folio))
+ return 0;
+
+ if (!folio_test_uptodate(folio)) {
+ ret = btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
+ if (unlikely(!folio_test_uptodate(folio))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+ }
+ folio_wait_writeback(folio);
+
+ /*
+ * We do not need to lock extents nor wait for OE, as it's already
+ * beyond EOF.
+ */
+
+ zero_start = max_t(u64, folio_pos(folio), start);
+ zero_end = folio_next_pos(folio);
+ folio_zero_range(folio, zero_start - folio_pos(folio),
+ zero_end - zero_start);
+
+out_unlock:
+ folio_unlock(folio);
+ folio_put(folio);
+ return ret;
+}
+
/*
- * Read, zero a chunk and write a block.
+ * Handle the truncation of a fs block.
+ *
+ * @inode - inode that we're zeroing
+ * @offset - the file offset of the block to truncate
+ * The value must be inside [@start, @end], and the function will do
+ * extra checks if the block that covers @offset needs to be zeroed.
+ * @start - the start file offset of the range we want to zero
+ * @end - the end (inclusive) file offset of the range we want to zero.
*
- * @inode - inode that we're zeroing
- * @from - the offset to start zeroing
- * @len - the length to zero, 0 to zero the entire range respective to the
- * offset
- * @front - zero up to the offset instead of from the offset on
+ * If the range is not block aligned, read out the folio that covers @offset,
+ * and if needed zero blocks that are inside the folio and covered by [@start, @end).
+ * If @start or @end + 1 lands inside a block, that block will be marked dirty
+ * for writeback.
*
- * This will find the block for the "from" offset and cow the block and zero the
- * part we want to zero. This is used with truncate and hole punching.
+ * This is utilized by hole punch, zero range, file expansion.
*/
-int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
- int front)
+int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct address_space *mapping = inode->vfs_inode.i_mapping;
@@ -4731,27 +4943,66 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
struct extent_changeset *data_reserved = NULL;
bool only_release_metadata = false;
u32 blocksize = fs_info->sectorsize;
- pgoff_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (blocksize - 1);
+ pgoff_t index = (offset >> PAGE_SHIFT);
struct folio *folio;
gfp_t mask = btrfs_alloc_write_mask(mapping);
- size_t write_bytes = blocksize;
int ret = 0;
+ const bool in_head_block = is_inside_block(offset, round_down(start, blocksize),
+ blocksize);
+ const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize),
+ blocksize);
+ bool need_truncate_head = false;
+ bool need_truncate_tail = false;
+ u64 zero_start;
+ u64 zero_end;
u64 block_start;
u64 block_end;
- if (IS_ALIGNED(offset, blocksize) &&
- (!len || IS_ALIGNED(len, blocksize)))
+ /* @offset should be inside the range. */
+ ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu",
+ offset, start, end);
+
+ /* The range is aligned at both ends. */
+ if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) {
+ /*
+ * For block size < page size case, we may have polluted blocks
+ * beyond EOF. So we also need to zero them out.
+ */
+ if (end == (u64)-1 && blocksize < PAGE_SIZE)
+ ret = truncate_block_zero_beyond_eof(inode, start);
+ goto out;
+ }
+
+ /*
+ * @offset may not be inside the head nor tail block. In that case we
+ * don't need to do anything.
+ */
+ if (!in_head_block && !in_tail_block)
goto out;
- block_start = round_down(from, blocksize);
+ /*
+ * Skip the truncation if the range in the target block is already aligned.
+ * The seemingly complex check will also handle the same block case.
+ */
+ if (in_head_block && !IS_ALIGNED(start, blocksize))
+ need_truncate_head = true;
+ if (in_tail_block && !IS_ALIGNED(end + 1, blocksize))
+ need_truncate_tail = true;
+ if (!need_truncate_head && !need_truncate_tail)
+ goto out;
+
+ block_start = round_down(offset, blocksize);
block_end = block_start + blocksize - 1;
ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
blocksize, false);
if (ret < 0) {
+ size_t write_bytes = blocksize;
+
if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
- /* For nocow case, no need to reserve data space */
+ /* For nocow case, no need to reserve data space. */
+ ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u",
+ write_bytes, blocksize);
only_release_metadata = true;
} else {
goto out;
@@ -4768,10 +5019,13 @@ again:
folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
if (IS_ERR(folio)) {
- btrfs_delalloc_release_space(inode, data_reserved, block_start,
- blocksize, true);
+ if (only_release_metadata)
+ btrfs_delalloc_release_metadata(inode, blocksize, true);
+ else
+ btrfs_delalloc_release_space(inode, data_reserved,
+ block_start, blocksize, true);
btrfs_delalloc_release_extents(inode, blocksize);
- ret = -ENOMEM;
+ ret = PTR_ERR(folio);
goto out;
}
@@ -4783,7 +5037,7 @@ again:
folio_put(folio);
goto again;
}
- if (!folio_test_uptodate(folio)) {
+ if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO;
goto out_unlock;
}
@@ -4801,11 +5055,11 @@ again:
folio_wait_writeback(folio);
- lock_extent(io_tree, block_start, block_end, &cached_state);
+ btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
- unlock_extent(io_tree, block_start, block_end, &cached_state);
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
folio_unlock(folio);
folio_put(folio);
btrfs_start_ordered_extent(ordered);
@@ -4813,37 +5067,46 @@ again:
goto again;
}
- clear_extent_bit(&inode->io_tree, block_start, block_end,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- &cached_state);
+ btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ &cached_state);
ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state);
if (ret) {
- unlock_extent(io_tree, block_start, block_end, &cached_state);
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
goto out_unlock;
}
- if (offset != blocksize) {
- if (!len)
- len = blocksize - offset;
- if (front)
- folio_zero_range(folio, block_start - folio_pos(folio),
- offset);
- else
- folio_zero_range(folio,
- (block_start - folio_pos(folio)) + offset,
- len);
+ if (end == (u64)-1) {
+ /*
+ * We're truncating beyond EOF, the remaining blocks normally are
+ * already holes thus no need to zero again, but it's possible for
+ * fs block size < page size cases to have memory mapped writes
+ * to pollute ranges beyond EOF.
+ *
+ * In that case although such polluted blocks beyond EOF will
+ * not reach disk, it still affects our page caches.
+ */
+ zero_start = max_t(u64, folio_pos(folio), start);
+ zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
+ } else {
+ zero_start = max_t(u64, block_start, start);
+ zero_end = min_t(u64, block_end, end);
}
+ folio_zero_range(folio, zero_start - folio_pos(folio),
+ zero_end - zero_start + 1);
+
btrfs_folio_clear_checked(fs_info, folio, block_start,
block_end + 1 - block_start);
btrfs_folio_set_dirty(fs_info, folio, block_start,
block_end + 1 - block_start);
- unlock_extent(io_tree, block_start, block_end, &cached_state);
if (only_release_metadata)
- set_extent_bit(&inode->io_tree, block_start, block_end,
- EXTENT_NORESERVE, NULL);
+ btrfs_set_extent_bit(&inode->io_tree, block_start, block_end,
+ EXTENT_NORESERVE, &cached_state);
+
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
out_unlock:
if (ret) {
@@ -4894,7 +5157,7 @@ static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
drop_args.drop_cache = true;
ret = btrfs_drop_extents(trans, root, inode, &drop_args);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
@@ -4936,7 +5199,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
* rest of the block before we expand the i_size, otherwise we could
* expose stale data.
*/
- ret = btrfs_truncate_block(inode, oldsize, 0, 0);
+ ret = btrfs_truncate_block(inode, oldsize, oldsize, -1);
if (ret)
return ret;
@@ -4953,7 +5216,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
em = NULL;
break;
}
- last_byte = min(extent_map_end(em), block_end);
+ last_byte = min(btrfs_extent_map_end(em), block_end);
last_byte = ALIGN(last_byte, fs_info->sectorsize);
hole_size = last_byte - cur_offset;
@@ -4969,7 +5232,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
if (ret)
break;
- hole_em = alloc_extent_map();
+ hole_em = btrfs_alloc_extent_map();
if (!hole_em) {
btrfs_drop_extent_map_range(inode, cur_offset,
cur_offset + hole_size - 1,
@@ -4986,7 +5249,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
hole_em->generation = btrfs_get_fs_generation(fs_info);
ret = btrfs_replace_extent_map_range(inode, hole_em, true);
- free_extent_map(hole_em);
+ btrfs_free_extent_map(hole_em);
} else {
ret = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
@@ -4994,14 +5257,14 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
break;
}
next:
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = NULL;
cur_offset = last_byte;
if (cur_offset >= block_end)
break;
}
- free_extent_map(em);
- unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
+ btrfs_free_extent_map(em);
+ btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
return ret;
}
@@ -5081,7 +5344,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
if (ret && inode->i_nlink) {
- int err;
+ int ret2;
/*
* Truncate failed, so fix up the in-memory size. We
@@ -5089,9 +5352,9 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* wait for disk_i_size to be stable and then update the
* in-memory size to match.
*/
- err = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
- if (err)
- return err;
+ ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
+ if (ret2)
+ return ret2;
i_size_write(inode, BTRFS_I(inode)->disk_i_size);
}
}
@@ -5104,31 +5367,31 @@ static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
- int err;
+ int ret;
if (btrfs_root_readonly(root))
return -EROFS;
- err = setattr_prepare(idmap, dentry, attr);
- if (err)
- return err;
+ ret = setattr_prepare(idmap, dentry, attr);
+ if (ret)
+ return ret;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
- err = btrfs_setsize(inode, attr);
- if (err)
- return err;
+ ret = btrfs_setsize(inode, attr);
+ if (ret)
+ return ret;
}
if (attr->ia_valid) {
setattr_copy(idmap, inode, attr);
inode_inc_iversion(inode);
- err = btrfs_dirty_inode(BTRFS_I(inode));
+ ret = btrfs_dirty_inode(BTRFS_I(inode));
- if (!err && attr->ia_valid & ATTR_MODE)
- err = posix_acl_chmod(idmap, dentry, inode->i_mode);
+ if (!ret && attr->ia_valid & ATTR_MODE)
+ ret = posix_acl_chmod(idmap, dentry, inode->i_mode);
}
- return err;
+ return ret;
}
/*
@@ -5149,7 +5412,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct rb_node *node;
- ASSERT(inode->i_state & I_FREEING);
+ ASSERT(inode_state_read_once(inode) & I_FREEING);
truncate_inode_pages_final(&inode->i_data);
btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
@@ -5185,7 +5448,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
state_flags = state->state;
spin_unlock(&io_tree->lock);
- lock_extent(io_tree, start, end, &cached_state);
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
/*
* If still has DELALLOC flag, the extent didn't reach disk,
@@ -5199,9 +5462,9 @@ static void evict_inode_truncate_pages(struct inode *inode)
btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
end - start + 1, NULL);
- clear_extent_bit(io_tree, start, end,
- EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
- &cached_state);
+ btrfs_clear_extent_bit(io_tree, start, end,
+ EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
+ &cached_state);
cond_resched();
spin_lock(&io_tree->lock);
@@ -5262,7 +5525,7 @@ void btrfs_evict_inode(struct inode *inode)
struct btrfs_fs_info *fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_block_rsv *rsv = NULL;
+ struct btrfs_block_rsv rsv;
int ret;
trace_btrfs_inode_evict(inode);
@@ -5310,11 +5573,9 @@ void btrfs_evict_inode(struct inode *inode)
*/
btrfs_kill_delayed_inode_items(BTRFS_I(inode));
- rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
- if (!rsv)
- goto out;
- rsv->size = btrfs_calc_metadata_size(fs_info, 1);
- rsv->failfast = true;
+ btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
+ rsv.size = btrfs_calc_metadata_size(fs_info, 1);
+ rsv.failfast = true;
btrfs_i_size_write(BTRFS_I(inode), 0);
@@ -5326,11 +5587,11 @@ void btrfs_evict_inode(struct inode *inode)
.min_type = 0,
};
- trans = evict_refill_and_join(root, rsv);
+ trans = evict_refill_and_join(root, &rsv);
if (IS_ERR(trans))
- goto out;
+ goto out_release;
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
ret = btrfs_truncate_inode_items(trans, root, &control);
trans->block_rsv = &fs_info->trans_block_rsv;
@@ -5342,7 +5603,7 @@ void btrfs_evict_inode(struct inode *inode)
*/
btrfs_btree_balance_dirty_nodelay(fs_info);
if (ret && ret != -ENOSPC && ret != -EAGAIN)
- goto out;
+ goto out_release;
else if (!ret)
break;
}
@@ -5356,16 +5617,17 @@ void btrfs_evict_inode(struct inode *inode)
* If it turns out that we are dropping too many of these, we might want
* to add a mechanism for retrying these after a commit.
*/
- trans = evict_refill_and_join(root, rsv);
+ trans = evict_refill_and_join(root, &rsv);
if (!IS_ERR(trans)) {
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
btrfs_orphan_del(trans, BTRFS_I(inode));
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans);
}
+out_release:
+ btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
out:
- btrfs_free_block_rsv(fs_info, rsv);
/*
* If we didn't successfully delete, the orphan item will still be in
* the tree and we'll retry on the next mount. Again, we might also want
@@ -5387,7 +5649,7 @@ static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
struct btrfs_key *location, u8 *type)
{
struct btrfs_dir_item *di;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = dir->root;
int ret = 0;
struct fscrypt_name fname;
@@ -5398,7 +5660,7 @@ static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
if (ret < 0)
- goto out;
+ return ret;
/*
* fscrypt_setup_filename() should never return a positive value, but
* gcc on sparc/parisc thinks it can, so assert that doesn't happen.
@@ -5415,19 +5677,18 @@ static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
}
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
- if (location->type != BTRFS_INODE_ITEM_KEY &&
- location->type != BTRFS_ROOT_ITEM_KEY) {
+ if (unlikely(location->type != BTRFS_INODE_ITEM_KEY &&
+ location->type != BTRFS_ROOT_ITEM_KEY)) {
ret = -EUCLEAN;
btrfs_warn(root->fs_info,
-"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
+"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location " BTRFS_KEY_FMT ")",
__func__, fname.disk_name.name, btrfs_ino(dir),
- location->objectid, location->type, location->offset);
+ BTRFS_KEY_FMT_VALUE(location));
}
if (!ret)
*type = btrfs_dir_ftype(path->nodes[0], di);
out:
fscrypt_free_filename(&fname);
- btrfs_free_path(path);
return ret;
}
@@ -5442,7 +5703,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
@@ -5498,7 +5759,6 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
location->offset = 0;
err = 0;
out:
- btrfs_free_path(path);
fscrypt_free_filename(&fname);
return err;
}
@@ -5512,7 +5772,17 @@ static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
bool empty = false;
xa_lock(&root->inodes);
- entry = __xa_erase(&root->inodes, btrfs_ino(inode));
+ /*
+ * This btrfs_inode is being freed and has already been unhashed at this
+ * point. It's possible that another btrfs_inode has already been
+ * allocated for the same inode and inserted itself into the root, so
+ * don't delete it in that case.
+ *
+ * Note that this shouldn't need to allocate memory, so the gfp flags
+ * don't really matter.
+ */
+ entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
+ GFP_ATOMIC);
if (entry == inode)
empty = xa_empty(&root->inodes);
xa_unlock(&root->inodes);
@@ -5549,7 +5819,7 @@ static int btrfs_find_actor(struct inode *inode, void *opaque)
args->root == BTRFS_I(inode)->root;
}
-static struct inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
+static struct btrfs_inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
@@ -5561,40 +5831,42 @@ static struct inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
- return inode;
+ if (!inode)
+ return NULL;
+ return BTRFS_I(inode);
}
/*
* Get an inode object given its inode number and corresponding root. Path is
* preallocated to prevent recursing back to iget through allocator.
*/
-struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
- struct btrfs_path *path)
+struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
+ struct btrfs_path *path)
{
- struct inode *inode;
+ struct btrfs_inode *inode;
int ret;
inode = btrfs_iget_locked(ino, root);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
return inode;
ret = btrfs_read_locked_inode(inode, path);
if (ret)
return ERR_PTR(ret);
- unlock_new_inode(inode);
+ unlock_new_inode(&inode->vfs_inode);
return inode;
}
/*
* Get an inode object given its inode number and corresponding root.
*/
-struct inode *btrfs_iget(u64 ino, struct btrfs_root *root)
+struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
{
- struct inode *inode;
+ struct btrfs_inode *inode;
struct btrfs_path *path;
int ret;
@@ -5602,55 +5874,62 @@ struct inode *btrfs_iget(u64 ino, struct btrfs_root *root)
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
return inode;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ iget_failed(&inode->vfs_inode);
return ERR_PTR(-ENOMEM);
+ }
ret = btrfs_read_locked_inode(inode, path);
btrfs_free_path(path);
if (ret)
return ERR_PTR(ret);
- unlock_new_inode(inode);
+ if (S_ISDIR(inode->vfs_inode.i_mode))
+ inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC;
+ unlock_new_inode(&inode->vfs_inode);
return inode;
}
-static struct inode *new_simple_dir(struct inode *dir,
- struct btrfs_key *key,
- struct btrfs_root *root)
+static struct btrfs_inode *new_simple_dir(struct inode *dir,
+ struct btrfs_key *key,
+ struct btrfs_root *root)
{
struct timespec64 ts;
- struct inode *inode = new_inode(dir->i_sb);
+ struct inode *vfs_inode;
+ struct btrfs_inode *inode;
- if (!inode)
+ vfs_inode = new_inode(dir->i_sb);
+ if (!vfs_inode)
return ERR_PTR(-ENOMEM);
- BTRFS_I(inode)->root = btrfs_grab_root(root);
- BTRFS_I(inode)->ref_root_id = key->objectid;
- set_bit(BTRFS_INODE_ROOT_STUB, &BTRFS_I(inode)->runtime_flags);
- set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
+ inode = BTRFS_I(vfs_inode);
+ inode->root = btrfs_grab_root(root);
+ inode->ref_root_id = key->objectid;
+ set_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags);
+ set_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags);
- btrfs_set_inode_number(BTRFS_I(inode), BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
+ btrfs_set_inode_number(inode, BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
/*
* We only need lookup, the rest is read-only and there's no inode
* associated with the dentry
*/
- inode->i_op = &simple_dir_inode_operations;
- inode->i_opflags &= ~IOP_XATTR;
- inode->i_fop = &simple_dir_operations;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
+ vfs_inode->i_op = &simple_dir_inode_operations;
+ vfs_inode->i_opflags &= ~IOP_XATTR;
+ vfs_inode->i_fop = &simple_dir_operations;
+ vfs_inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
- ts = inode_set_ctime_current(inode);
- inode_set_mtime_to_ts(inode, ts);
- inode_set_atime_to_ts(inode, inode_get_atime(dir));
- BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
- BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
+ ts = inode_set_ctime_current(vfs_inode);
+ inode_set_mtime_to_ts(vfs_inode, ts);
+ inode_set_atime_to_ts(vfs_inode, inode_get_atime(dir));
+ inode->i_otime_sec = ts.tv_sec;
+ inode->i_otime_nsec = ts.tv_nsec;
- inode->i_uid = dir->i_uid;
- inode->i_gid = dir->i_gid;
+ vfs_inode->i_uid = dir->i_uid;
+ vfs_inode->i_gid = dir->i_gid;
return inode;
}
@@ -5664,15 +5943,15 @@ static_assert(BTRFS_FT_FIFO == FT_FIFO);
static_assert(BTRFS_FT_SOCK == FT_SOCK);
static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
-static inline u8 btrfs_inode_type(struct inode *inode)
+static inline u8 btrfs_inode_type(const struct btrfs_inode *inode)
{
- return fs_umode_to_ftype(inode->i_mode);
+ return fs_umode_to_ftype(inode->vfs_inode.i_mode);
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
- struct inode *inode;
+ struct btrfs_inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location = { 0 };
@@ -5689,18 +5968,18 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
if (location.type == BTRFS_INODE_ITEM_KEY) {
inode = btrfs_iget(location.objectid, root);
if (IS_ERR(inode))
- return inode;
+ return ERR_CAST(inode);
/* Do extra check against inode mode with di_type */
- if (btrfs_inode_type(inode) != di_type) {
+ if (unlikely(btrfs_inode_type(inode) != di_type)) {
btrfs_crit(fs_info,
"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
- inode->i_mode, btrfs_inode_type(inode),
+ inode->vfs_inode.i_mode, btrfs_inode_type(inode),
di_type);
- iput(inode);
+ iput(&inode->vfs_inode);
return ERR_PTR(-EUCLEAN);
}
- return inode;
+ return &inode->vfs_inode;
}
ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
@@ -5715,19 +5994,22 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
btrfs_put_root(sub_root);
if (IS_ERR(inode))
- return inode;
+ return ERR_CAST(inode);
down_read(&fs_info->cleanup_work_sem);
- if (!sb_rdonly(inode->i_sb))
+ if (!sb_rdonly(inode->vfs_inode.i_sb))
ret = btrfs_orphan_cleanup(sub_root);
up_read(&fs_info->cleanup_work_sem);
if (ret) {
- iput(inode);
+ iput(&inode->vfs_inode);
inode = ERR_PTR(ret);
}
}
- return inode;
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ return &inode->vfs_inode;
}
static int btrfs_dentry_delete(const struct dentry *dentry)
@@ -5767,7 +6049,7 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_key key, found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
int ret;
@@ -5781,15 +6063,14 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
/* FIXME: we should be able to handle this */
if (ret == 0)
- goto out;
- ret = 0;
+ return ret;
if (path->slots[0] == 0) {
inode->index_cnt = BTRFS_DIR_START_INDEX;
- goto out;
+ return 0;
}
path->slots[0]--;
@@ -5800,13 +6081,12 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
if (found_key.objectid != btrfs_ino(inode) ||
found_key.type != BTRFS_DIR_INDEX_KEY) {
inode->index_cnt = BTRFS_DIR_START_INDEX;
- goto out;
+ return 0;
}
inode->index_cnt = found_key.offset + 1;
-out:
- btrfs_free_path(path);
- return ret;
+
+ return 0;
}
static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
@@ -5909,7 +6189,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
struct btrfs_dir_item *di;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
void *addr;
LIST_HEAD(ins_list);
LIST_HEAD(del_list);
@@ -5992,8 +6272,7 @@ again:
if (ret)
goto nopos;
- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
- if (ret)
+ if (btrfs_readdir_delayed_dir_index(ctx, &ins_list))
goto nopos;
/*
@@ -6022,7 +6301,6 @@ nopos:
err:
if (put)
btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
- btrfs_free_path(path);
return ret;
}
@@ -6064,8 +6342,8 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode)
}
/*
- * This is a copy of file_update_time. We need this so we can return error on
- * ENOSPC for updating the inode in the case of file write and mmap writes.
+ * We need our own ->update_time so that we can return error on ENOSPC for
+ * updating the inode in the case of file write and mmap writes.
*/
static int btrfs_update_time(struct inode *inode, int flags)
{
@@ -6200,7 +6478,7 @@ static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *
inode->flags |= BTRFS_INODE_NODATASUM;
}
- btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
+ btrfs_sync_inode_flags_to_i_flags(inode);
}
int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
@@ -6280,12 +6558,14 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
if (!args->subvol)
btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
+ btrfs_set_inode_mapping_order(BTRFS_I(inode));
if (S_ISREG(inode->i_mode)) {
if (btrfs_test_opt(fs_info, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(fs_info, NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
BTRFS_INODE_NODATASUM;
+ btrfs_update_inode_mapping_flags(BTRFS_I(inode));
}
ret = btrfs_insert_inode_locked(inode);
@@ -6332,7 +6612,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
batch.nr = args->orphan ? 1 : 2;
ret = btrfs_insert_empty_items(trans, root, path, &batch);
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
btrfs_abort_transaction(trans, ret);
goto discard;
}
@@ -6370,7 +6650,6 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
}
}
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
/*
* We don't need the path anymore, plus inheriting properties, adding
* ACLs, security xattrs, orphan item or adding the link, will result in
@@ -6380,7 +6659,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
path = NULL;
if (args->subvol) {
- struct inode *parent;
+ struct btrfs_inode *parent;
/*
* Subvolumes inherit properties from their parent subvolume,
@@ -6390,11 +6669,13 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
if (IS_ERR(parent)) {
ret = PTR_ERR(parent);
} else {
- ret = btrfs_inode_inherit_props(trans, inode, parent);
- iput(parent);
+ ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
+ parent);
+ iput(&parent->vfs_inode);
}
} else {
- ret = btrfs_inode_inherit_props(trans, inode, dir);
+ ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
+ BTRFS_I(dir));
}
if (ret) {
btrfs_err(fs_info,
@@ -6408,7 +6689,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
*/
if (!args->subvol) {
ret = btrfs_init_inode_security(trans, args);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto discard;
}
@@ -6428,13 +6709,17 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
if (args->orphan) {
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto discard;
+ }
} else {
ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
0, BTRFS_I(inode)->dir_index);
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto discard;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto discard;
+ }
}
return 0;
@@ -6462,7 +6747,7 @@ out:
*/
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
- const struct fscrypt_str *name, int add_backref, u64 index)
+ const struct fscrypt_str *name, bool add_backref, u64 index)
{
int ret = 0;
struct btrfs_key key;
@@ -6492,10 +6777,10 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
return ret;
ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
- btrfs_inode_type(&inode->vfs_inode), index);
+ btrfs_inode_type(inode), index);
if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
- else if (ret) {
+ else if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -6503,15 +6788,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
name->len * 2);
inode_inc_iversion(&parent_inode->vfs_inode);
- /*
- * If we are replaying a log tree, we do not want to update the mtime
- * and ctime of the parent directory with the current time, since the
- * log replay procedure is responsible for setting them to their correct
- * values (the ones it had when the fsync was done).
- */
- if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
- inode_set_mtime_to_ts(&parent_inode->vfs_inode,
- inode_set_ctime_current(&parent_inode->vfs_inode));
+ update_time_after_link_or_unlink(parent_inode);
ret = btrfs_update_inode(trans, parent_inode);
if (ret)
@@ -6521,20 +6798,18 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
- int err;
- err = btrfs_del_root_ref(trans, key.objectid,
- btrfs_root_id(root), parent_ino,
- &local_index, name);
- if (err)
- btrfs_abort_transaction(trans, err);
+ int ret2;
+
+ ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root),
+ parent_ino, &local_index, name);
+ if (ret2)
+ btrfs_abort_transaction(trans, ret2);
} else if (add_backref) {
- u64 local_index;
- int err;
+ int ret2;
- err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
- &local_index);
- if (err)
- btrfs_abort_transaction(trans, err);
+ ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL);
+ if (ret2)
+ btrfs_abort_transaction(trans, ret2);
}
/* Return the original error code */
@@ -6553,30 +6828,33 @@ static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
};
unsigned int trans_num_items;
struct btrfs_trans_handle *trans;
- int err;
+ int ret;
- err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
- if (err)
+ ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
+ if (ret)
goto out_inode;
trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_new_inode_args;
}
- err = btrfs_create_new_inode(trans, &new_inode_args);
- if (!err)
+ ret = btrfs_create_new_inode(trans, &new_inode_args);
+ if (!ret) {
+ if (S_ISDIR(inode->i_mode))
+ inode->i_opflags |= IOP_FASTPERM_MAY_EXEC;
d_instantiate_new(dentry, inode);
+ }
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out_new_inode_args:
btrfs_new_inode_args_destroy(&new_inode_args);
out_inode:
- if (err)
+ if (ret)
iput(inode);
- return err;
+ return ret;
}
static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
@@ -6617,8 +6895,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct fscrypt_name fname;
u64 index;
- int err;
- int drop_inode = 0;
+ int ret;
/* do not allow sys_link's with other subvols of the same device */
if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
@@ -6627,12 +6904,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
if (inode->i_nlink >= BTRFS_LINK_MAX)
return -EMLINK;
- err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
- if (err)
+ ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
+ if (ret)
goto fail;
- err = btrfs_set_inode_index(BTRFS_I(dir), &index);
- if (err)
+ ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
+ if (ret)
goto fail;
/*
@@ -6643,67 +6920,66 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
*/
trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
trans = NULL;
goto fail;
}
/* There are several dir indexes for this inode, clear the cache. */
BTRFS_I(inode)->dir_index = 0ULL;
- inc_nlink(inode);
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
- ihold(inode);
- set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
- err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
+ ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
&fname.disk_name, 1, index);
+ if (ret)
+ goto fail;
- if (err) {
- drop_inode = 1;
- } else {
- struct dentry *parent = dentry->d_parent;
+ /* Link added now we update the inode item with the new link count. */
+ inc_nlink(inode);
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
- err = btrfs_update_inode(trans, BTRFS_I(inode));
- if (err)
+ if (inode->i_nlink == 1) {
+ /*
+ * If the new hard link count is 1, it's a file created with the
+ * open(2) O_TMPFILE flag.
+ */
+ ret = btrfs_orphan_del(trans, BTRFS_I(inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto fail;
- if (inode->i_nlink == 1) {
- /*
- * If new hard link count is 1, it's a file created
- * with open(2) O_TMPFILE flag.
- */
- err = btrfs_orphan_del(trans, BTRFS_I(inode));
- if (err)
- goto fail;
}
- d_instantiate(dentry, inode);
- btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
}
+ /* Grab reference for the new dentry passed to d_instantiate(). */
+ ihold(inode);
+ d_instantiate(dentry, inode);
+ btrfs_log_new_name(trans, old_dentry, NULL, 0, dentry->d_parent);
+
fail:
fscrypt_free_filename(&fname);
if (trans)
btrfs_end_transaction(trans);
- if (drop_inode) {
- inode_dec_link_count(inode);
- iput(inode);
- }
btrfs_btree_balance_dirty(fs_info);
- return err;
+ return ret;
}
-static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
inode = new_inode(dir->i_sb);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
- return btrfs_create_common(dir, dentry, inode);
+ return ERR_PTR(btrfs_create_common(dir, dentry, inode));
}
static noinline int uncompress_inline(struct btrfs_path *path,
@@ -6712,6 +6988,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
{
int ret;
struct extent_buffer *leaf = path->nodes[0];
+ const u32 blocksize = leaf->fs_info->sectorsize;
char *tmp;
size_t max_size;
unsigned long inline_size;
@@ -6728,7 +7005,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
read_extent_buffer(leaf, tmp, ptr, inline_size);
- max_size = min_t(unsigned long, PAGE_SIZE, max_size);
+ max_size = min_t(unsigned long, blocksize, max_size);
ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
max_size);
@@ -6740,14 +7017,15 @@ static noinline int uncompress_inline(struct btrfs_path *path,
* cover that region here.
*/
- if (max_size < PAGE_SIZE)
- folio_zero_range(folio, max_size, PAGE_SIZE - max_size);
+ if (max_size < blocksize)
+ folio_zero_range(folio, max_size, blocksize - max_size);
kfree(tmp);
return ret;
}
static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
{
+ const u32 blocksize = path->nodes[0]->fs_info->sectorsize;
struct btrfs_file_extent_item *fi;
void *kaddr;
size_t copy_size;
@@ -6762,14 +7040,14 @@ static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
return uncompress_inline(path, folio, fi);
- copy_size = min_t(u64, PAGE_SIZE,
+ copy_size = min_t(u64, blocksize,
btrfs_file_extent_ram_bytes(path->nodes[0], fi));
kaddr = kmap_local_folio(folio, 0);
read_extent_buffer(path->nodes[0], kaddr,
btrfs_file_extent_inline_start(fi), copy_size);
kunmap_local(kaddr);
- if (copy_size < PAGE_SIZE)
- folio_zero_range(folio, copy_size, PAGE_SIZE - copy_size);
+ if (copy_size < blocksize)
+ folio_zero_range(folio, copy_size, blocksize - copy_size);
return 0;
}
@@ -6808,18 +7086,18 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct extent_map_tree *em_tree = &inode->extent_tree;
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
+ em = btrfs_lookup_extent_mapping(em_tree, start, len);
read_unlock(&em_tree->lock);
if (em) {
if (em->start > start || em->start + em->len <= start)
- free_extent_map(em);
+ btrfs_free_extent_map(em);
else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
- free_extent_map(em);
+ btrfs_free_extent_map(em);
else
goto out;
}
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
ret = -ENOMEM;
goto out;
@@ -6843,8 +7121,8 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
* point the commit_root has everything we need.
*/
if (btrfs_is_free_space_inode(inode)) {
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
}
ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
@@ -6879,7 +7157,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
/* Only regular file could have regular/prealloc extent */
- if (!S_ISREG(inode->vfs_inode.i_mode)) {
+ if (unlikely(!S_ISREG(inode->vfs_inode.i_mode))) {
ret = -EUCLEAN;
btrfs_crit(fs_info,
"regular/prealloc extent found for non-regular inode %llu",
@@ -6956,7 +7234,7 @@ not_found:
insert:
ret = 0;
btrfs_release_path(path);
- if (em->start > start || extent_map_end(em) <= start) {
+ if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) {
btrfs_err(fs_info,
"bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len);
@@ -6973,7 +7251,7 @@ out:
trace_btrfs_get_extent(root, inode, em);
if (ret) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ERR_PTR(ret);
}
return em;
@@ -7001,8 +7279,6 @@ static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
* @orig_start: (optional) Return the original file offset of the file extent
* @orig_len: (optional) Return the original on-disk length of the file extent
* @ram_bytes: (optional) Return the ram_bytes of the file extent
- * @strict: if true, omit optimizations that might force us into unnecessary
- * cow. e.g., don't trust generation number.
*
* Return:
* >0 and update @len if we can do nocow write
@@ -7012,17 +7288,17 @@ static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
* NOTE: This only checks the file extents, caller is responsible to wait for
* any ordered extents.
*/
-noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
struct btrfs_file_extent *file_extent,
- bool nowait, bool strict)
+ bool nowait)
{
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct can_nocow_file_extent_args nocow_args = { 0 };
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
struct extent_buffer *leaf;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
int found_type;
@@ -7032,81 +7308,74 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
return -ENOMEM;
path->nowait = nowait;
- ret = btrfs_lookup_file_extent(NULL, root, path,
- btrfs_ino(BTRFS_I(inode)), offset, 0);
+ ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
+ offset, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret == 1) {
if (path->slots[0] == 0) {
- /* can't find the item, must cow */
- ret = 0;
- goto out;
+ /* Can't find the item, must COW. */
+ return 0;
}
path->slots[0]--;
}
ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
+ if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
- /* not our file or wrong item type, must cow */
- goto out;
+ /* Not our file or wrong item type, must COW. */
+ return 0;
}
if (key.offset > offset) {
- /* Wrong offset, must cow */
- goto out;
+ /* Wrong offset, must COW. */
+ return 0;
}
if (btrfs_file_extent_end(path) <= offset)
- goto out;
+ return 0;
fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
found_type = btrfs_file_extent_type(leaf, fi);
nocow_args.start = offset;
nocow_args.end = offset + *len - 1;
- nocow_args.strict = strict;
nocow_args.free_path = true;
- ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
+ ret = can_nocow_file_extent(path, &key, inode, &nocow_args);
/* can_nocow_file_extent() has freed the path. */
path = NULL;
if (ret != 1) {
/* Treat errors as not being able to NOCOW. */
- ret = 0;
- goto out;
+ return 0;
}
- ret = 0;
if (btrfs_extent_readonly(fs_info,
nocow_args.file_extent.disk_bytenr +
nocow_args.file_extent.offset))
- goto out;
+ return 0;
- if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
+ if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
u64 range_end;
range_end = round_up(offset + nocow_args.file_extent.num_bytes,
root->fs_info->sectorsize) - 1;
- ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
- if (ret) {
- ret = -EAGAIN;
- goto out;
- }
+ ret = btrfs_test_range_bit_exists(io_tree, offset, range_end,
+ EXTENT_DELALLOC);
+ if (ret)
+ return -EAGAIN;
}
if (file_extent)
memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
*len = nocow_args.file_extent.num_bytes;
- ret = 1;
-out:
- btrfs_free_path(path);
- return ret;
+
+ return 1;
}
/* The callers of this must take lock_extent() */
@@ -7154,7 +7423,7 @@ struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
break;
}
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em)
return ERR_PTR(-ENOMEM);
@@ -7167,15 +7436,15 @@ struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
em->offset = file_extent->offset;
em->flags |= EXTENT_FLAG_PINNED;
if (type == BTRFS_ORDERED_COMPRESSED)
- extent_map_set_compression(em, file_extent->compression);
+ btrfs_extent_map_set_compression(em, file_extent->compression);
ret = btrfs_replace_extent_map_range(inode, em, true);
if (ret) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ERR_PTR(ret);
}
- /* em got 2 refs now, callers needs to do free_extent_map once. */
+ /* em got 2 refs now, callers needs to do btrfs_free_extent_map once. */
return em;
}
@@ -7189,13 +7458,13 @@ struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
static void wait_subpage_spinlock(struct folio *folio)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_is_subpage(fs_info, folio))
return;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
- subpage = folio_get_private(folio);
+ bfs = folio_get_private(folio);
/*
* This may look insane as we just acquire the spinlock and release it,
@@ -7208,14 +7477,14 @@ static void wait_subpage_spinlock(struct folio *folio)
* Here we just acquire the spinlock so that all existing callers
* should exit and we're safe to release/invalidate the page.
*/
- spin_lock_irq(&subpage->lock);
- spin_unlock_irq(&subpage->lock);
+ spin_lock_irq(&bfs->lock);
+ spin_unlock_irq(&bfs->lock);
}
static int btrfs_launder_folio(struct folio *folio)
{
return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
- PAGE_SIZE, NULL);
+ folio_size(folio), NULL);
}
static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
@@ -7242,7 +7511,7 @@ static int btrfs_migrate_folio(struct address_space *mapping,
{
int ret = filemap_migrate_folio(mapping, dst, src, mode);
- if (ret != MIGRATEPAGE_SUCCESS)
+ if (ret)
return ret;
if (folio_test_ordered(src)) {
@@ -7250,7 +7519,7 @@ static int btrfs_migrate_folio(struct address_space *mapping,
folio_set_ordered(dst);
}
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
#else
#define btrfs_migrate_folio NULL
@@ -7266,7 +7535,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
u64 page_start = folio_pos(folio);
u64 page_end = page_start + folio_size(folio) - 1;
u64 cur;
- int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
+ int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING;
/*
* We have folio locked so no new ordered extent can be created on this
@@ -7302,7 +7571,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
}
if (!inode_evicting)
- lock_extent(tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(tree, page_start, page_end, &cached_state);
cur = page_start;
while (cur < page_end) {
@@ -7358,16 +7627,16 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* btrfs_finish_ordered_io().
*/
if (!inode_evicting)
- clear_extent_bit(tree, cur, range_end,
- EXTENT_DELALLOC |
- EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, &cached_state);
+ btrfs_clear_extent_bit(tree, cur, range_end,
+ EXTENT_DELALLOC |
+ EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, &cached_state);
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock(&inode->ordered_tree_lock);
set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
ordered->truncated_len = min(ordered->truncated_len,
cur - ordered->file_offset);
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock(&inode->ordered_tree_lock);
/*
* If the ordered extent has finished, we're safe to delete all
@@ -7403,12 +7672,11 @@ next:
* Since the IO will never happen for this page.
*/
btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
- if (!inode_evicting) {
- clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
- EXTENT_DELALLOC | EXTENT_UPTODATE |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
- extra_flags, &cached_state);
- }
+ if (!inode_evicting)
+ btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG | extra_flags,
+ &cached_state);
cur = range_end + 1;
}
/*
@@ -7430,19 +7698,22 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
.ino = btrfs_ino(inode),
.min_type = BTRFS_EXTENT_DATA_KEY,
.clear_extent_range = true,
+ .new_size = inode->vfs_inode.i_size,
};
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_block_rsv *rsv;
+ struct btrfs_block_rsv rsv;
int ret;
struct btrfs_trans_handle *trans;
- u64 mask = fs_info->sectorsize - 1;
const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
+ const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize);
+ const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
+
+ /* Our inode is locked and the i_size can't be changed concurrently. */
+ btrfs_assert_inode_locked(inode);
if (!skip_writeback) {
- ret = btrfs_wait_ordered_range(inode,
- inode->vfs_inode.i_size & (~mask),
- (u64)-1);
+ ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1);
if (ret)
return ret;
}
@@ -7475,11 +7746,9 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
* 2) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
- rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
- if (!rsv)
- return -ENOMEM;
- rsv->size = min_size;
- rsv->failfast = true;
+ btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
+ rsv.size = min_size;
+ rsv.failfast = true;
/*
* 1 for the truncate slack space
@@ -7492,7 +7761,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
}
/* Migrate the slack space for the truncate to our reserve */
- ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
min_size, false);
/*
* We have reserved 2 metadata units when we started the transaction and
@@ -7504,30 +7773,25 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
goto out;
}
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
while (1) {
struct extent_state *cached_state = NULL;
- const u64 new_size = inode->vfs_inode.i_size;
- const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
- control.new_size = new_size;
- lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
/*
* We want to drop from the next block forward in case this new
* size is not block aligned since we will be keeping the last
* block of the extent just the way it is.
*/
- btrfs_drop_extent_map_range(inode,
- ALIGN(new_size, fs_info->sectorsize),
- (u64)-1, false);
+ btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false);
ret = btrfs_truncate_inode_items(trans, root, &control);
inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
- unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
trans->block_rsv = &fs_info->trans_block_rsv;
if (ret != -ENOSPC && ret != -EAGAIN)
@@ -7547,9 +7811,9 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
break;
}
- btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
+ btrfs_block_rsv_release(fs_info, &rsv, -1, NULL);
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, false);
+ &rsv, min_size, false);
/*
* We have reserved 2 metadata units when we started the
* transaction and min_size matches 1 unit, so this should never
@@ -7558,7 +7822,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
if (WARN_ON(ret))
break;
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
}
/*
@@ -7571,7 +7835,8 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0);
+ ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size,
+ inode->vfs_inode.i_size, (u64)-1);
if (ret)
goto out;
trans = btrfs_start_transaction(root, 1);
@@ -7596,7 +7861,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
btrfs_btree_balance_dirty(fs_info);
}
out:
- btrfs_free_block_rsv(fs_info, rsv);
+ btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
/*
* So if we truncate and then write and fsync we normally would just
* write the extents that changed, which is a problem if we need to
@@ -7652,6 +7917,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
+ /* new_delalloc_bytes and last_dir_index_offset are in a union. */
ei->new_delalloc_bytes = 0;
ei->defrag_bytes = 0;
ei->disk_i_size = 0;
@@ -7683,10 +7949,10 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->i_otime_nsec = 0;
inode = &ei->vfs_inode;
- extent_map_tree_init(&ei->extent_tree);
+ btrfs_extent_map_tree_init(&ei->extent_tree);
/* This io tree sets the valid inode. */
- extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
+ btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
ei->io_tree.inode = ei;
ei->file_extent_tree = NULL;
@@ -7786,7 +8052,7 @@ int btrfs_drop_inode(struct inode *inode)
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
else
- return generic_drop_inode(inode);
+ return inode_generic_drop(inode);
}
static void init_once(void *foo)
@@ -7794,6 +8060,9 @@ static void init_once(void *foo)
struct btrfs_inode *ei = foo;
inode_init_once(&ei->vfs_inode);
+#ifdef CONFIG_FS_VERITY
+ ei->i_verity_info = NULL;
+#endif
}
void __cold btrfs_destroy_cachep(void)
@@ -7851,7 +8120,7 @@ static int btrfs_getattr(struct mnt_idmap *idmap,
generic_fillattr(idmap, request_mask, inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
- stat->subvol = BTRFS_I(inode)->root->root_key.objectid;
+ stat->subvol = btrfs_root_id(BTRFS_I(inode)->root);
stat->result_mask |= STATX_SUBVOL;
spin_lock(&BTRFS_I(inode)->lock);
@@ -7884,6 +8153,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
int ret;
int ret2;
bool need_abort = false;
+ bool logs_pinned = false;
struct fscrypt_name old_fname, new_fname;
struct fscrypt_str *old_name, *new_name;
@@ -7994,7 +8264,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
btrfs_ino(BTRFS_I(old_dir)),
new_idx);
if (ret) {
- if (need_abort)
+ if (unlikely(need_abort))
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -8007,6 +8277,31 @@ static int btrfs_rename_exchange(struct inode *old_dir,
inode_inc_iversion(new_inode);
simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID &&
+ new_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ /*
+ * If we are renaming in the same directory (and it's not for
+ * root entries) pin the log early to prevent any concurrent
+ * task from logging the directory after we removed the old
+ * entries and before we add the new entries, otherwise that
+ * task can sync a log without any entry for the inodes we are
+ * renaming and therefore replaying that log, if a power failure
+ * happens after syncing the log, would result in deleting the
+ * inodes.
+ *
+ * If the rename affects two different directories, we want to
+ * make sure the that there's no log commit that contains
+ * updates for only one of the directories but not for the
+ * other.
+ *
+ * If we are renaming an entry for a root, we don't care about
+ * log updates since we called btrfs_set_log_full_commit().
+ */
+ btrfs_pin_log_trans(root);
+ btrfs_pin_log_trans(dest);
+ logs_pinned = true;
+ }
+
if (old_dentry->d_parent != new_dentry->d_parent) {
btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
BTRFS_I(old_inode), true);
@@ -8017,43 +8312,57 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* src is a subvolume */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
} else { /* src is an inode */
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
BTRFS_I(old_dentry->d_inode),
old_name, &old_rename_ctx);
- if (!ret)
- ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
/* dest is a subvolume */
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
} else { /* dest is an inode */
ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
BTRFS_I(new_dentry->d_inode),
new_name, &new_rename_ctx);
- if (!ret)
- ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
new_name, 0, old_idx);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
old_name, 0, new_idx);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -8064,30 +8373,23 @@ static int btrfs_rename_exchange(struct inode *old_dir,
BTRFS_I(new_inode)->dir_index = new_idx;
/*
- * Now pin the logs of the roots. We do it to ensure that no other task
- * can sync the logs while we are in progress with the rename, because
- * that could result in an inconsistency in case any of the inodes that
- * are part of this rename operation were logged before.
+ * Do the log updates for all inodes.
+ *
+ * If either entry is for a root we don't need to update the logs since
+ * we've called btrfs_set_log_full_commit() before.
*/
- if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
- btrfs_pin_log_trans(root);
- if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
- btrfs_pin_log_trans(dest);
-
- /* Do the log updates for all inodes. */
- if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
+ if (logs_pinned) {
btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
old_rename_ctx.index, new_dentry->d_parent);
- if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
new_rename_ctx.index, old_dentry->d_parent);
+ }
- /* Now unpin the logs. */
- if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
+out_fail:
+ if (logs_pinned) {
btrfs_end_log_trans(root);
- if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
btrfs_end_log_trans(dest);
-out_fail:
+ }
ret2 = btrfs_end_transaction(trans);
ret = ret ? ret : ret2;
out_notrans:
@@ -8137,6 +8439,7 @@ static int btrfs_rename(struct mnt_idmap *idmap,
int ret2;
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
struct fscrypt_name old_fname, new_fname;
+ bool logs_pinned = false;
if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
@@ -8271,22 +8574,52 @@ static int btrfs_rename(struct mnt_idmap *idmap,
inode_inc_iversion(old_inode);
simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ /*
+ * If we are renaming in the same directory (and it's not a
+ * root entry) pin the log to prevent any concurrent task from
+ * logging the directory after we removed the old entry and
+ * before we add the new entry, otherwise that task can sync
+ * a log without any entry for the inode we are renaming and
+ * therefore replaying that log, if a power failure happens
+ * after syncing the log, would result in deleting the inode.
+ *
+ * If the rename affects two different directories, we want to
+ * make sure the that there's no log commit that contains
+ * updates for only one of the directories but not for the
+ * other.
+ *
+ * If we are renaming an entry for a root, we don't care about
+ * log updates since we called btrfs_set_log_full_commit().
+ */
+ btrfs_pin_log_trans(root);
+ btrfs_pin_log_trans(dest);
+ logs_pinned = true;
+ }
+
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
BTRFS_I(old_inode), true);
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
} else {
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
BTRFS_I(d_inode(old_dentry)),
&old_fname.disk_name, &rename_ctx);
- if (!ret)
- ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
if (new_inode) {
@@ -8294,24 +8627,33 @@ static int btrfs_rename(struct mnt_idmap *idmap,
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
BTRFS_I(d_inode(new_dentry)),
&new_fname.disk_name);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
- if (!ret && new_inode->i_nlink == 0)
+ if (new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans,
BTRFS_I(d_inode(new_dentry)));
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
}
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
&new_fname.disk_name, 0, index);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -8319,13 +8661,13 @@ static int btrfs_rename(struct mnt_idmap *idmap,
if (old_inode->i_nlink == 1)
BTRFS_I(old_inode)->dir_index = index;
- if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
+ if (logs_pinned)
btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
rename_ctx.index, new_dentry->d_parent);
if (flags & RENAME_WHITEOUT) {
ret = btrfs_create_new_inode(trans, &whiteout_args);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
} else {
@@ -8335,6 +8677,10 @@ static int btrfs_rename(struct mnt_idmap *idmap,
}
}
out_fail:
+ if (logs_pinned) {
+ btrfs_end_log_trans(root);
+ btrfs_end_log_trans(dest);
+ }
ret2 = btrfs_end_transaction(trans);
ret = ret ? ret : ret2;
out_notrans:
@@ -8416,46 +8762,42 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
-static int start_delalloc_inodes(struct btrfs_root *root,
- struct writeback_control *wbc, bool snapshot,
- bool in_reclaim_context)
+static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write,
+ bool snapshot, bool in_reclaim_context)
{
- struct btrfs_inode *binode;
- struct inode *inode;
struct btrfs_delalloc_work *work, *next;
LIST_HEAD(works);
LIST_HEAD(splice);
int ret = 0;
- bool full_flush = wbc->nr_to_write == LONG_MAX;
mutex_lock(&root->delalloc_mutex);
spin_lock(&root->delalloc_lock);
list_splice_init(&root->delalloc_inodes, &splice);
while (!list_empty(&splice)) {
- binode = list_entry(splice.next, struct btrfs_inode,
- delalloc_inodes);
+ struct btrfs_inode *inode;
+ struct inode *tmp_inode;
+
+ inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes);
- list_move_tail(&binode->delalloc_inodes,
- &root->delalloc_inodes);
+ list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
if (in_reclaim_context &&
- test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
+ test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags))
continue;
- inode = igrab(&binode->vfs_inode);
- if (!inode) {
+ tmp_inode = igrab(&inode->vfs_inode);
+ if (!tmp_inode) {
cond_resched_lock(&root->delalloc_lock);
continue;
}
spin_unlock(&root->delalloc_lock);
if (snapshot)
- set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
- &binode->runtime_flags);
- if (full_flush) {
- work = btrfs_alloc_delalloc_work(inode);
+ set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags);
+ if (nr_to_write == NULL) {
+ work = btrfs_alloc_delalloc_work(tmp_inode);
if (!work) {
- iput(inode);
+ iput(tmp_inode);
ret = -ENOMEM;
goto out;
}
@@ -8463,9 +8805,11 @@ static int start_delalloc_inodes(struct btrfs_root *root,
btrfs_queue_work(root->fs_info->flush_workers,
&work->work);
} else {
- ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
- btrfs_add_delayed_iput(BTRFS_I(inode));
- if (ret || wbc->nr_to_write <= 0)
+ ret = filemap_flush_nr(tmp_inode->i_mapping,
+ nr_to_write);
+ btrfs_add_delayed_iput(inode);
+
+ if (ret || *nr_to_write <= 0)
goto out;
}
cond_resched();
@@ -8491,29 +8835,17 @@ out:
int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
{
- struct writeback_control wbc = {
- .nr_to_write = LONG_MAX,
- .sync_mode = WB_SYNC_NONE,
- .range_start = 0,
- .range_end = LLONG_MAX,
- };
struct btrfs_fs_info *fs_info = root->fs_info;
if (BTRFS_FS_ERROR(fs_info))
return -EROFS;
-
- return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
+ return start_delalloc_inodes(root, NULL, true, in_reclaim_context);
}
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
bool in_reclaim_context)
{
- struct writeback_control wbc = {
- .nr_to_write = nr,
- .sync_mode = WB_SYNC_NONE,
- .range_start = 0,
- .range_end = LLONG_MAX,
- };
+ long *nr_to_write = nr == LONG_MAX ? NULL : &nr;
struct btrfs_root *root;
LIST_HEAD(splice);
int ret;
@@ -8525,13 +8857,6 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
while (!list_empty(&splice)) {
- /*
- * Reset nr_to_write here so we know that we're doing a full
- * flush.
- */
- if (nr == LONG_MAX)
- wbc.nr_to_write = LONG_MAX;
-
root = list_first_entry(&splice, struct btrfs_root,
delalloc_root);
root = btrfs_grab_root(root);
@@ -8540,9 +8865,10 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
- ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
+ ret = start_delalloc_inodes(root, nr_to_write, false,
+ in_reclaim_context);
btrfs_put_root(root);
- if (ret < 0 || wbc.nr_to_write <= 0)
+ if (ret < 0 || nr <= 0)
goto out;
spin_lock(&fs_info->delalloc_root_lock);
}
@@ -8573,7 +8899,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
.dentry = dentry,
};
unsigned int trans_num_items;
- int err;
+ int ret;
int name_len;
int datasize;
unsigned long ptr;
@@ -8581,7 +8907,12 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct extent_buffer *leaf;
name_len = strlen(symname);
- if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
+ /*
+ * Symlinks utilize uncompressed inline extent data, which should not
+ * reach block size.
+ */
+ if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
+ name_len >= fs_info->sectorsize)
return -ENAMETOOLONG;
inode = new_inode(dir->i_sb);
@@ -8595,38 +8926,37 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
inode_set_bytes(inode, name_len);
new_inode_args.inode = inode;
- err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
- if (err)
+ ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
+ if (ret)
goto out_inode;
/* 1 additional item for the inline extent */
trans_num_items++;
trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_new_inode_args;
}
- err = btrfs_create_new_inode(trans, &new_inode_args);
- if (err)
+ ret = btrfs_create_new_inode(trans, &new_inode_args);
+ if (ret)
goto out;
path = btrfs_alloc_path();
- if (!path) {
- err = -ENOMEM;
- btrfs_abort_transaction(trans, err);
+ if (unlikely(!path)) {
+ ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
discard_new_inode(inode);
inode = NULL;
goto out;
}
key.objectid = btrfs_ino(BTRFS_I(inode));
- key.offset = 0;
key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
datasize = btrfs_file_extent_calc_inline_size(name_len);
- err = btrfs_insert_empty_item(trans, root, path, &key,
- datasize);
- if (err) {
- btrfs_abort_transaction(trans, err);
+ ret = btrfs_insert_empty_item(trans, root, path, &key, datasize);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
btrfs_free_path(path);
discard_new_inode(inode);
inode = NULL;
@@ -8645,20 +8975,19 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_free_path(path);
d_instantiate_new(dentry, inode);
- err = 0;
+ ret = 0;
out:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out_new_inode_args:
btrfs_new_inode_args_destroy(&new_inode_args);
out_inode:
- if (err)
+ if (ret)
iput(inode);
- return err;
+ return ret;
}
static struct btrfs_trans_handle *insert_prealloc_file_extent(
@@ -8769,7 +9098,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
*/
cur_bytes = min(cur_bytes, last_alloc);
ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
- min_size, 0, *alloc_hint, &ins, 1, 0);
+ min_size, 0, *alloc_hint, &ins, true, false);
if (ret)
break;
@@ -8795,11 +9124,11 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
btrfs_free_reserved_extent(fs_info, ins.objectid,
- ins.offset, 0);
+ ins.offset, false);
break;
}
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
cur_offset + ins.offset - 1, false);
@@ -8817,7 +9146,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
em->generation = trans->transid;
ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
next:
num_bytes -= ins.offset;
cur_offset += ins.offset;
@@ -8839,7 +9168,7 @@ next:
ret = btrfs_update_inode(trans, BTRFS_I(inode));
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
if (own_trans)
btrfs_end_transaction(trans);
@@ -8875,6 +9204,11 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
min_size, actual_len, alloc_hint, trans);
}
+/*
+ * NOTE: in case you are adding MAY_EXEC check for directories:
+ * we are marking them with IOP_FASTPERM_MAY_EXEC, allowing path lookup to
+ * elide calls here.
+ */
static int btrfs_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
@@ -8989,7 +9323,7 @@ static ssize_t btrfs_encoded_read_inline(
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &inode->io_tree;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_file_extent_item *item;
u64 ram_bytes;
@@ -8999,21 +9333,19 @@ static ssize_t btrfs_encoded_read_inline(
const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
path->nowait = nowait;
ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
extent_start, 0);
if (ret) {
- if (ret > 0) {
+ if (unlikely(ret > 0)) {
/* The extent item disappeared? */
- ret = -EIO;
+ return -EIO;
}
- goto out;
+ return ret;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
@@ -9026,17 +9358,16 @@ static ssize_t btrfs_encoded_read_inline(
ret = btrfs_encoded_io_compression_from_extent(fs_info,
btrfs_file_extent_compression(leaf, item));
if (ret < 0)
- goto out;
+ return ret;
encoded->compression = ret;
if (encoded->compression) {
size_t inline_size;
inline_size = btrfs_file_extent_inline_item_len(leaf,
path->slots[0]);
- if (inline_size > count) {
- ret = -ENOBUFS;
- goto out;
- }
+ if (inline_size > count)
+ return -ENOBUFS;
+
count = inline_size;
encoded->unencoded_len = ram_bytes;
encoded->unencoded_offset = iocb->ki_pos - extent_start;
@@ -9048,13 +9379,12 @@ static ssize_t btrfs_encoded_read_inline(
}
tmp = kmalloc(count, GFP_NOFS);
- if (!tmp) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!tmp)
+ return -ENOMEM;
+
read_extent_buffer(leaf, tmp, ptr, count);
btrfs_release_path(path);
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
@@ -9062,15 +9392,14 @@ static ssize_t btrfs_encoded_read_inline(
if (ret != count)
ret = -EFAULT;
kfree(tmp);
-out:
- btrfs_free_path(path);
+
return ret;
}
struct btrfs_encoded_read_private {
- wait_queue_head_t wait;
+ struct completion *sync_reads;
void *uring_ctx;
- atomic_t pending;
+ refcount_t pending_refs;
blk_status_t status;
};
@@ -9080,23 +9409,22 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
if (bbio->bio.bi_status) {
/*
- * The memory barrier implied by the atomic_dec_return() here
- * pairs with the memory barrier implied by the
- * atomic_dec_return() or io_wait_event() in
- * btrfs_encoded_read_regular_fill_pages() to ensure that this
- * write is observed before the load of status in
+ * The memory barrier implied by the refcount_dec_and_test() here
+ * pairs with the memory barrier implied by the refcount_dec_and_test()
+ * in btrfs_encoded_read_regular_fill_pages() to ensure that
+ * this write is observed before the load of status in
* btrfs_encoded_read_regular_fill_pages().
*/
WRITE_ONCE(priv->status, bbio->bio.bi_status);
}
- if (atomic_dec_return(&priv->pending) == 0) {
+ if (refcount_dec_and_test(&priv->pending_refs)) {
int err = blk_status_to_errno(READ_ONCE(priv->status));
if (priv->uring_ctx) {
btrfs_uring_read_extent_endio(priv->uring_ctx, err);
kfree(priv);
} else {
- wake_up(&priv->wait);
+ complete(priv->sync_reads);
}
}
bio_put(&bbio->bio);
@@ -9106,37 +9434,44 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 disk_bytenr, u64 disk_io_size,
struct page **pages, void *uring_ctx)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_encoded_read_private *priv;
+ struct btrfs_encoded_read_private *priv, sync_priv;
+ struct completion sync_reads;
unsigned long i = 0;
struct btrfs_bio *bbio;
int ret;
- priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
- if (!priv)
- return -ENOMEM;
+ /*
+ * Fast path for synchronous reads which completes in this call, io_uring
+ * needs longer time span.
+ */
+ if (uring_ctx) {
+ priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
+ if (!priv)
+ return -ENOMEM;
+ } else {
+ priv = &sync_priv;
+ init_completion(&sync_reads);
+ priv->sync_reads = &sync_reads;
+ }
- init_waitqueue_head(&priv->wait);
- atomic_set(&priv->pending, 1);
+ refcount_set(&priv->pending_refs, 1);
priv->status = 0;
priv->uring_ctx = uring_ctx;
- bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
- bbio->inode = inode;
do {
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
- atomic_inc(&priv->pending);
+ refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
- bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
- bbio->inode = inode;
continue;
}
@@ -9145,11 +9480,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
disk_io_size -= bytes;
} while (disk_io_size);
- atomic_inc(&priv->pending);
+ refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
if (uring_ctx) {
- if (atomic_dec_return(&priv->pending) == 0) {
+ if (refcount_dec_and_test(&priv->pending_refs)) {
ret = blk_status_to_errno(READ_ONCE(priv->status));
btrfs_uring_read_extent_endio(uring_ctx, ret);
kfree(priv);
@@ -9158,12 +9493,10 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
return -EIOCBQUEUED;
} else {
- if (atomic_dec_return(&priv->pending) != 0)
- io_wait_event(priv->wait, !atomic_read(&priv->pending));
+ if (!refcount_dec_and_test(&priv->pending_refs))
+ wait_for_completion_io(&sync_reads);
/* See btrfs_encoded_read_endio() for ordering. */
- ret = blk_status_to_errno(READ_ONCE(priv->status));
- kfree(priv);
- return ret;
+ return blk_status_to_errno(READ_ONCE(priv->status));
}
}
@@ -9196,7 +9529,7 @@ ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
if (ret)
goto out;
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
@@ -9273,7 +9606,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
goto out_unlock_inode;
}
- if (!try_lock_extent(io_tree, start, lockend, cached_state)) {
+ if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
ret = -EAGAIN;
goto out_unlock_inode;
}
@@ -9282,7 +9615,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
lockend - start + 1);
if (ordered) {
btrfs_put_ordered_extent(ordered);
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
ret = -EAGAIN;
goto out_unlock_inode;
}
@@ -9295,13 +9628,13 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
if (ret)
goto out_unlock_inode;
- lock_extent(io_tree, start, lockend, cached_state);
+ btrfs_lock_extent(io_tree, start, lockend, cached_state);
ordered = btrfs_lookup_ordered_range(inode, start,
lockend - start + 1);
if (!ordered)
break;
btrfs_put_ordered_extent(ordered);
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
cond_resched();
}
}
@@ -9319,7 +9652,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
* For inline extents we get everything we need out of the
* extent item.
*/
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = NULL;
ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
cached_state, extent_start,
@@ -9331,7 +9664,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
* We only want to return up to EOF even if the extent extends beyond
* that.
*/
- encoded->len = min_t(u64, extent_map_end(em),
+ encoded->len = min_t(u64, btrfs_extent_map_end(em),
inode->vfs_inode.i_size) - iocb->ki_pos;
if (em->disk_bytenr == EXTENT_MAP_HOLE ||
(em->flags & EXTENT_FLAG_PREALLOC)) {
@@ -9339,7 +9672,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
count = min_t(u64, count, encoded->len);
encoded->len = count;
encoded->unencoded_len = count;
- } else if (extent_map_is_compressed(em)) {
+ } else if (btrfs_extent_map_is_compressed(em)) {
*disk_bytenr = em->disk_bytenr;
/*
* Bail if the buffer isn't large enough to return the whole
@@ -9354,12 +9687,12 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
encoded->unencoded_len = em->ram_bytes;
encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
ret = btrfs_encoded_io_compression_from_extent(fs_info,
- extent_map_compression(em));
+ btrfs_extent_map_compression(em));
if (ret < 0)
goto out_em;
encoded->compression = ret;
} else {
- *disk_bytenr = extent_map_block_start(em) + (start - em->start);
+ *disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start);
if (encoded->len > count)
encoded->len = count;
/*
@@ -9372,11 +9705,11 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
encoded->unencoded_len = count;
*disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = NULL;
if (*disk_bytenr == EXTENT_MAP_HOLE) {
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
unlocked = true;
ret = iov_iter_zero(count, iter);
@@ -9388,11 +9721,11 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
}
out_em:
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out_unlock_extent:
/* Leave inode and extent locked if we need to do a read. */
if (!unlocked && ret != -EIOCBQUEUED)
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
out_unlock_inode:
if (!unlocked && ret != -EIOCBQUEUED)
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
@@ -9529,8 +9862,6 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
}
for (;;) {
- struct btrfs_ordered_extent *ordered;
-
ret = btrfs_wait_ordered_range(inode, start, num_bytes);
if (ret)
goto out_folios;
@@ -9539,14 +9870,14 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
end >> PAGE_SHIFT);
if (ret)
goto out_folios;
- lock_extent(io_tree, start, end, &cached_state);
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
if (!ordered &&
!filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
break;
if (ordered)
btrfs_put_ordered_extent(ordered);
- unlock_extent(io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
cond_resched();
}
@@ -9580,7 +9911,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
}
ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
- disk_num_bytes, 0, 0, &ins, 1, 1);
+ disk_num_bytes, 0, 0, &ins, true, true);
if (ret)
goto out_delalloc_release;
extent_reserved = true;
@@ -9596,11 +9927,11 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
ret = PTR_ERR(em);
goto out_free_reserved;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
- (1 << BTRFS_ORDERED_ENCODED) |
- (1 << BTRFS_ORDERED_COMPRESSED));
+ (1U << BTRFS_ORDERED_ENCODED) |
+ (1U << BTRFS_ORDERED_COMPRESSED));
if (IS_ERR(ordered)) {
btrfs_drop_extent_map_range(inode, start, end, false);
ret = PTR_ERR(ordered);
@@ -9611,7 +9942,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
if (start + encoded->len > inode->vfs_inode.i_size)
i_size_write(&inode->vfs_inode, start + encoded->len);
- unlock_extent(io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
btrfs_delalloc_release_extents(inode, num_bytes);
@@ -9621,7 +9952,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
out_free_reserved:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
out_delalloc_release:
btrfs_delalloc_release_extents(inode, num_bytes);
btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
@@ -9634,9 +9965,9 @@ out_free_data_space:
* bytes_may_use.
*/
if (!extent_reserved)
- btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
+ btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
out_unlock:
- unlock_extent(io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
out_folios:
for (i = 0; i < nr_folios; i++) {
if (folios[i])
@@ -9789,15 +10120,25 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
- struct extent_map *em = NULL;
struct btrfs_chunk_map *map = NULL;
struct btrfs_device *device = NULL;
struct btrfs_swap_info bsi = {
.lowest_ppage = (sector_t)-1ULL,
};
+ struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
+ struct btrfs_path *path = NULL;
int ret = 0;
u64 isize;
- u64 start;
+ u64 prev_extent_end = 0;
+
+ /*
+ * Acquire the inode's mmap lock to prevent races with memory mapped
+ * writes, as they could happen after we flush delalloc below and before
+ * we lock the extent range further below. The inode was already locked
+ * up in the call chain.
+ */
+ btrfs_assert_inode_locked(BTRFS_I(inode));
+ down_write(&BTRFS_I(inode)->i_mmap_lock);
/*
* If the swap file was just created, make sure delalloc is done. If the
@@ -9806,22 +10147,32 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
*/
ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
- return ret;
+ goto out_unlock_mmap;
/*
* The inode is locked, so these flags won't change after we check them.
*/
if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
btrfs_warn(fs_info, "swapfile must not be compressed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
btrfs_warn(fs_info, "swapfile must not be copy-on-write");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
btrfs_warn(fs_info, "swapfile must not be checksummed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
+ }
+
+ path = btrfs_alloc_path();
+ backref_ctx = btrfs_alloc_backref_share_check_ctx();
+ if (!path || !backref_ctx) {
+ ret = -ENOMEM;
+ goto out_unlock_mmap;
}
/*
@@ -9836,7 +10187,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
btrfs_warn(fs_info,
"cannot activate swapfile while exclusive operation is running");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_mmap;
}
/*
@@ -9850,7 +10202,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because snapshot creation is in progress");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
/*
* Snapshots can create extents which require COW even if NODATACOW is
@@ -9866,36 +10219,53 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (btrfs_root_dead(root)) {
spin_unlock(&root->root_item_lock);
+ btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because subvolume %llu is being deleted",
btrfs_root_id(root));
- return -EPERM;
+ ret = -EPERM;
+ goto out_unlock_mmap;
}
atomic_inc(&root->nr_swapfiles);
spin_unlock(&root->root_item_lock);
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
- lock_extent(io_tree, 0, isize - 1, &cached_state);
- start = 0;
- while (start < isize) {
- u64 logical_block_start, physical_block_start;
+ btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
+ while (prev_extent_end < isize) {
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *ei;
struct btrfs_block_group *bg;
- u64 len = isize - start;
+ u64 logical_block_start;
+ u64 physical_block_start;
+ u64 extent_gen;
+ u64 disk_bytenr;
+ u64 len;
- em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
+ key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = prev_extent_end;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
goto out;
- }
- if (em->disk_bytenr == EXTENT_MAP_HOLE) {
+ /*
+ * If key not found it means we have an implicit hole (NO_HOLES
+ * is enabled).
+ */
+ if (ret > 0) {
btrfs_warn(fs_info, "swapfile must not have holes");
ret = -EINVAL;
goto out;
}
- if (em->disk_bytenr == EXTENT_MAP_INLINE) {
+
+ leaf = path->nodes[0];
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
/*
* It's unlikely we'll ever actually find ourselves
* here, as a file small enough to fit inline won't be
@@ -9907,23 +10277,45 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
ret = -EINVAL;
goto out;
}
- if (extent_map_is_compressed(em)) {
+
+ if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
btrfs_warn(fs_info, "swapfile must not be compressed");
ret = -EINVAL;
goto out;
}
- logical_block_start = extent_map_block_start(em) + (start - em->start);
- len = min(len, em->len - (start - em->start));
- free_extent_map(em);
- em = NULL;
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ if (disk_bytenr == 0) {
+ btrfs_warn(fs_info, "swapfile must not have holes");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
+ extent_gen = btrfs_file_extent_generation(leaf, ei);
+ prev_extent_end = btrfs_file_extent_end(path);
+
+ if (prev_extent_end > isize)
+ len = isize - key.offset;
+ else
+ len = btrfs_file_extent_num_bytes(leaf, ei);
- ret = can_nocow_extent(inode, start, &len, NULL, false, true);
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /*
+ * Don't need the path anymore, release to avoid deadlocks when
+ * calling btrfs_is_data_extent_shared() because when joining a
+ * transaction it can block waiting for the current one's commit
+ * which in turn may be trying to lock the same leaf to flush
+ * delayed items for example.
+ */
+ btrfs_release_path(path);
+
+ ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
+ extent_gen, backref_ctx);
if (ret < 0) {
goto out;
- } else if (ret) {
- ret = 0;
- } else {
+ } else if (ret > 0) {
btrfs_warn(fs_info,
"swapfile must not be copy-on-write");
ret = -EINVAL;
@@ -9958,7 +10350,6 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
physical_block_start = (map->stripes[0].physical +
(logical_block_start - map->start));
- len = min(len, map->chunk_len - (logical_block_start - map->start));
btrfs_free_chunk_map(map);
map = NULL;
@@ -9999,24 +10390,27 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (ret)
goto out;
}
- bsi.start = start;
+ bsi.start = key.offset;
bsi.block_start = physical_block_start;
bsi.block_len = len;
}
- start += len;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+
+ cond_resched();
}
if (bsi.block_len)
ret = btrfs_add_swap_extent(sis, &bsi);
out:
- if (!IS_ERR_OR_NULL(em))
- free_extent_map(em);
if (!IS_ERR_OR_NULL(map))
btrfs_free_chunk_map(map);
- unlock_extent(io_tree, 0, isize - 1, &cached_state);
+ btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
if (ret)
btrfs_swap_deactivate(file);
@@ -10025,6 +10419,10 @@ out:
btrfs_exclop_finish(fs_info);
+out_unlock_mmap:
+ up_write(&BTRFS_I(inode)->i_mmap_lock);
+ btrfs_free_backref_share_ctx(backref_ctx);
+ btrfs_free_path(path);
if (ret)
return ret;
@@ -10033,7 +10431,6 @@ out:
*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
sis->max = bsi.nr_pages;
sis->pages = bsi.nr_pages - 1;
- sis->highest_bit = bsi.nr_pages - 1;
return bsi.nr_extents;
}
#else
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c9302d193187..acb484546b1d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -118,8 +118,8 @@ struct btrfs_ioctl_encoded_io_args_32 {
#endif
/* Mask out flags that are inappropriate for the given type of inode. */
-static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
- unsigned int flags)
+static unsigned int btrfs_mask_fsflags_for_type(const struct inode *inode,
+ unsigned int flags)
{
if (S_ISDIR(inode->i_mode))
return flags;
@@ -133,11 +133,11 @@ static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
* Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
* ioctl.
*/
-static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
+static unsigned int btrfs_inode_flags_to_fsflags(const struct btrfs_inode *inode)
{
unsigned int iflags = 0;
- u32 flags = binode->flags;
- u32 ro_flags = binode->ro_flags;
+ u32 flags = inode->flags;
+ u32 ro_flags = inode->ro_flags;
if (flags & BTRFS_INODE_SYNC)
iflags |= FS_SYNC_FL;
@@ -167,25 +167,24 @@ static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
/*
* Update inode->i_flags based on the btrfs internal flags.
*/
-void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
+void btrfs_sync_inode_flags_to_i_flags(struct btrfs_inode *inode)
{
- struct btrfs_inode *binode = BTRFS_I(inode);
unsigned int new_fl = 0;
- if (binode->flags & BTRFS_INODE_SYNC)
+ if (inode->flags & BTRFS_INODE_SYNC)
new_fl |= S_SYNC;
- if (binode->flags & BTRFS_INODE_IMMUTABLE)
+ if (inode->flags & BTRFS_INODE_IMMUTABLE)
new_fl |= S_IMMUTABLE;
- if (binode->flags & BTRFS_INODE_APPEND)
+ if (inode->flags & BTRFS_INODE_APPEND)
new_fl |= S_APPEND;
- if (binode->flags & BTRFS_INODE_NOATIME)
+ if (inode->flags & BTRFS_INODE_NOATIME)
new_fl |= S_NOATIME;
- if (binode->flags & BTRFS_INODE_DIRSYNC)
+ if (inode->flags & BTRFS_INODE_DIRSYNC)
new_fl |= S_DIRSYNC;
- if (binode->ro_flags & BTRFS_INODE_RO_VERITY)
+ if (inode->ro_flags & BTRFS_INODE_RO_VERITY)
new_fl |= S_VERITY;
- set_mask_bits(&inode->i_flags,
+ set_mask_bits(&inode->vfs_inode.i_flags,
S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC |
S_VERITY, new_fl);
}
@@ -219,7 +218,7 @@ static int check_fsflags(unsigned int old_flags, unsigned int flags)
return 0;
}
-static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
+static int check_fsflags_compatible(const struct btrfs_fs_info *fs_info,
unsigned int flags)
{
if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL))
@@ -246,26 +245,25 @@ static int btrfs_check_ioctl_vol_args2_subvol_name(const struct btrfs_ioctl_vol_
* Set flags/xflags from the internal inode flags. The remaining items of
* fsxattr are zeroed.
*/
-int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int btrfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
- struct btrfs_inode *binode = BTRFS_I(d_inode(dentry));
+ const struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
- fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode));
+ fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(inode));
return 0;
}
int btrfs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
- struct inode *inode = d_inode(dentry);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct btrfs_inode *binode = BTRFS_I(inode);
- struct btrfs_root *root = binode->root;
+ struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
unsigned int fsflags, old_fsflags;
int ret;
const char *comp = NULL;
- u32 binode_flags;
+ u32 inode_flags;
if (btrfs_root_readonly(root))
return -EROFS;
@@ -273,8 +271,8 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap,
if (fileattr_has_fsx(fa))
return -EOPNOTSUPP;
- fsflags = btrfs_mask_fsflags_for_type(inode, fa->flags);
- old_fsflags = btrfs_inode_flags_to_fsflags(binode);
+ fsflags = btrfs_mask_fsflags_for_type(&inode->vfs_inode, fa->flags);
+ old_fsflags = btrfs_inode_flags_to_fsflags(inode);
ret = check_fsflags(old_fsflags, fsflags);
if (ret)
return ret;
@@ -283,27 +281,27 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap,
if (ret)
return ret;
- binode_flags = binode->flags;
+ inode_flags = inode->flags;
if (fsflags & FS_SYNC_FL)
- binode_flags |= BTRFS_INODE_SYNC;
+ inode_flags |= BTRFS_INODE_SYNC;
else
- binode_flags &= ~BTRFS_INODE_SYNC;
+ inode_flags &= ~BTRFS_INODE_SYNC;
if (fsflags & FS_IMMUTABLE_FL)
- binode_flags |= BTRFS_INODE_IMMUTABLE;
+ inode_flags |= BTRFS_INODE_IMMUTABLE;
else
- binode_flags &= ~BTRFS_INODE_IMMUTABLE;
+ inode_flags &= ~BTRFS_INODE_IMMUTABLE;
if (fsflags & FS_APPEND_FL)
- binode_flags |= BTRFS_INODE_APPEND;
+ inode_flags |= BTRFS_INODE_APPEND;
else
- binode_flags &= ~BTRFS_INODE_APPEND;
+ inode_flags &= ~BTRFS_INODE_APPEND;
if (fsflags & FS_NODUMP_FL)
- binode_flags |= BTRFS_INODE_NODUMP;
+ inode_flags |= BTRFS_INODE_NODUMP;
else
- binode_flags &= ~BTRFS_INODE_NODUMP;
+ inode_flags &= ~BTRFS_INODE_NODUMP;
if (fsflags & FS_NOATIME_FL)
- binode_flags |= BTRFS_INODE_NOATIME;
+ inode_flags |= BTRFS_INODE_NOATIME;
else
- binode_flags &= ~BTRFS_INODE_NOATIME;
+ inode_flags &= ~BTRFS_INODE_NOATIME;
/* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */
if (!fa->flags_valid) {
@@ -315,32 +313,32 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap,
}
if (fsflags & FS_DIRSYNC_FL)
- binode_flags |= BTRFS_INODE_DIRSYNC;
+ inode_flags |= BTRFS_INODE_DIRSYNC;
else
- binode_flags &= ~BTRFS_INODE_DIRSYNC;
+ inode_flags &= ~BTRFS_INODE_DIRSYNC;
if (fsflags & FS_NOCOW_FL) {
- if (S_ISREG(inode->i_mode)) {
+ if (S_ISREG(inode->vfs_inode.i_mode)) {
/*
* It's safe to turn csums off here, no extents exist.
* Otherwise we want the flag to reflect the real COW
* status of the file and will not set it.
*/
- if (inode->i_size == 0)
- binode_flags |= BTRFS_INODE_NODATACOW |
- BTRFS_INODE_NODATASUM;
+ if (inode->vfs_inode.i_size == 0)
+ inode_flags |= BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_NODATASUM;
} else {
- binode_flags |= BTRFS_INODE_NODATACOW;
+ inode_flags |= BTRFS_INODE_NODATACOW;
}
} else {
/*
* Revert back under same assumptions as above
*/
- if (S_ISREG(inode->i_mode)) {
- if (inode->i_size == 0)
- binode_flags &= ~(BTRFS_INODE_NODATACOW |
- BTRFS_INODE_NODATASUM);
+ if (S_ISREG(inode->vfs_inode.i_mode)) {
+ if (inode->vfs_inode.i_size == 0)
+ inode_flags &= ~(BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_NODATASUM);
} else {
- binode_flags &= ~BTRFS_INODE_NODATACOW;
+ inode_flags &= ~BTRFS_INODE_NODATACOW;
}
}
@@ -350,21 +348,21 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap,
* things smaller.
*/
if (fsflags & FS_NOCOMP_FL) {
- binode_flags &= ~BTRFS_INODE_COMPRESS;
- binode_flags |= BTRFS_INODE_NOCOMPRESS;
+ inode_flags &= ~BTRFS_INODE_COMPRESS;
+ inode_flags |= BTRFS_INODE_NOCOMPRESS;
} else if (fsflags & FS_COMPR_FL) {
- if (IS_SWAPFILE(inode))
+ if (IS_SWAPFILE(&inode->vfs_inode))
return -ETXTBSY;
- binode_flags |= BTRFS_INODE_COMPRESS;
- binode_flags &= ~BTRFS_INODE_NOCOMPRESS;
+ inode_flags |= BTRFS_INODE_COMPRESS;
+ inode_flags &= ~BTRFS_INODE_NOCOMPRESS;
comp = btrfs_compress_type2str(fs_info->compress_type);
if (!comp || comp[0] == 0)
comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
} else {
- binode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
+ inode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
}
/*
@@ -376,114 +374,34 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap,
return PTR_ERR(trans);
if (comp) {
- ret = btrfs_set_prop(trans, BTRFS_I(inode), "btrfs.compression",
+ ret = btrfs_set_prop(trans, inode, "btrfs.compression",
comp, strlen(comp), 0);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
} else {
- ret = btrfs_set_prop(trans, BTRFS_I(inode), "btrfs.compression",
- NULL, 0, 0);
- if (ret && ret != -ENODATA) {
+ ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL, 0, 0);
+ if (unlikely(ret && ret != -ENODATA)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
}
update_flags:
- binode->flags = binode_flags;
+ inode->flags = inode_flags;
+ btrfs_update_inode_mapping_flags(inode);
btrfs_sync_inode_flags_to_i_flags(inode);
- inode_inc_iversion(inode);
- inode_set_ctime_current(inode);
- ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ inode_inc_iversion(&inode->vfs_inode);
+ inode_set_ctime_current(&inode->vfs_inode);
+ ret = btrfs_update_inode(trans, inode);
out_end_trans:
btrfs_end_transaction(trans);
return ret;
}
-/*
- * Start exclusive operation @type, return true on success
- */
-bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation type)
-{
- bool ret = false;
-
- spin_lock(&fs_info->super_lock);
- if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
- fs_info->exclusive_operation = type;
- ret = true;
- }
- spin_unlock(&fs_info->super_lock);
-
- return ret;
-}
-
-/*
- * Conditionally allow to enter the exclusive operation in case it's compatible
- * with the running one. This must be paired with btrfs_exclop_start_unlock and
- * btrfs_exclop_finish.
- *
- * Compatibility:
- * - the same type is already running
- * - when trying to add a device and balance has been paused
- * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
- * must check the condition first that would allow none -> @type
- */
-bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation type)
-{
- spin_lock(&fs_info->super_lock);
- if (fs_info->exclusive_operation == type ||
- (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED &&
- type == BTRFS_EXCLOP_DEV_ADD))
- return true;
-
- spin_unlock(&fs_info->super_lock);
- return false;
-}
-
-void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
-{
- spin_unlock(&fs_info->super_lock);
-}
-
-void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
-{
- spin_lock(&fs_info->super_lock);
- WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
- spin_unlock(&fs_info->super_lock);
- sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
-}
-
-void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation op)
-{
- switch (op) {
- case BTRFS_EXCLOP_BALANCE_PAUSED:
- spin_lock(&fs_info->super_lock);
- ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
- fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
- fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
- fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
- fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
- spin_unlock(&fs_info->super_lock);
- break;
- case BTRFS_EXCLOP_BALANCE:
- spin_lock(&fs_info->super_lock);
- ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
- fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
- spin_unlock(&fs_info->super_lock);
- break;
- default:
- btrfs_warn(fs_info,
- "invalid exclop balance operation %d requested", op);
- }
-}
-
-static int btrfs_ioctl_getversion(struct inode *inode, int __user *arg)
+static int btrfs_ioctl_getversion(const struct inode *inode, int __user *arg)
{
return put_user(inode->i_generation, arg);
}
@@ -551,22 +469,11 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
return ret;
}
-int __pure btrfs_is_empty_uuid(const u8 *uuid)
-{
- int i;
-
- for (i = 0; i < BTRFS_UUID_SIZE; i++) {
- if (uuid[i])
- return 0;
- }
- return 1;
-}
-
/*
* Calculate the number of transaction items to reserve for creating a subvolume
* or snapshot, not including the inode, directory entries, or parent directory.
*/
-static unsigned int create_subvol_num_items(struct btrfs_qgroup_inherit *inherit)
+static unsigned int create_subvol_num_items(const struct btrfs_qgroup_inherit *inherit)
{
/*
* 1 to add root block
@@ -596,7 +503,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_trans_handle *trans;
struct btrfs_key key;
- struct btrfs_root_item *root_item;
+ struct btrfs_root_item AUTO_KFREE(root_item);
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
struct btrfs_root *root = BTRFS_I(dir)->root;
@@ -620,20 +527,18 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid);
if (ret)
- goto out_root_item;
+ return ret;
/*
* Don't create subvolume whose level is not zero. Or qgroup will be
* screwed up since it assumes subvolume qgroup's level to be 0.
*/
- if (btrfs_qgroup_level(objectid)) {
- ret = -ENOSPC;
- goto out_root_item;
- }
+ if (btrfs_qgroup_level(objectid))
+ return -ENOSPC;
ret = get_anon_bdev(&anon_dev);
if (ret < 0)
- goto out_root_item;
+ return ret;
new_inode_args.inode = btrfs_new_subvol_inode(idmap, dir);
if (!new_inode_args.inode) {
@@ -708,8 +613,8 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
key.objectid = objectid;
- key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = 0;
ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
root_item);
if (ret) {
@@ -726,7 +631,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
btrfs_clear_buffer_dirty(trans, leaf);
btrfs_tree_unlock(leaf);
ret2 = btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
- if (ret2 < 0)
+ if (unlikely(ret2 < 0))
btrfs_abort_transaction(trans, ret2);
free_extent_buffer(leaf);
goto out;
@@ -747,26 +652,26 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
/* ... and new_root is owned by new_inode_args.inode now. */
ret = btrfs_record_root_in_trans(trans, new_root);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = btrfs_uuid_tree_add(trans, root_item->uuid,
BTRFS_UUID_KEY_SUBVOL, objectid);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
+ btrfs_record_new_subvolume(trans, BTRFS_I(dir));
+
ret = btrfs_create_new_inode(trans, &new_inode_args);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- btrfs_record_new_subvolume(trans, BTRFS_I(dir));
-
d_instantiate_new(dentry, new_inode_args.inode);
new_inode_args.inode = NULL;
@@ -785,8 +690,7 @@ out_inode:
out_anon_dev:
if (anon_dev)
free_anon_bdev(anon_dev);
-out_root_item:
- kfree(root_item);
+
return ret;
}
@@ -934,7 +838,7 @@ free_pending:
static int btrfs_may_delete(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *victim, int isdir)
{
- int error;
+ int ret;
if (d_really_is_negative(victim))
return -ENOENT;
@@ -944,9 +848,9 @@ static int btrfs_may_delete(struct mnt_idmap *idmap,
return -EINVAL;
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
- error = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
- if (error)
- return error;
+ ret = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
+ if (ret)
+ return ret;
if (IS_APPEND(dir))
return -EPERM;
if (check_sticky(idmap, dir, d_inode(victim)) ||
@@ -969,7 +873,7 @@ static int btrfs_may_delete(struct mnt_idmap *idmap,
/* copy of may_create in fs/namei.c() */
static inline int btrfs_may_create(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *child)
+ struct inode *dir, const struct dentry *child)
{
if (d_really_is_positive(child))
return -EEXIST;
@@ -985,39 +889,32 @@ static inline int btrfs_may_create(struct mnt_idmap *idmap,
* sys_mkdirat and vfs_mkdir, but we only do a single component lookup
* inside this filesystem so it's quite a bit simpler.
*/
-static noinline int btrfs_mksubvol(const struct path *parent,
+static noinline int btrfs_mksubvol(struct dentry *parent,
struct mnt_idmap *idmap,
- const char *name, int namelen,
- struct btrfs_root *snap_src,
+ struct qstr *qname, struct btrfs_root *snap_src,
bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
- struct inode *dir = d_inode(parent->dentry);
+ struct inode *dir = d_inode(parent);
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct dentry *dentry;
- struct fscrypt_str name_str = FSTR_INIT((char *)name, namelen);
- int error;
-
- error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
- if (error == -EINTR)
- return error;
+ struct fscrypt_str name_str = FSTR_INIT((char *)qname->name, qname->len);
+ int ret;
- dentry = lookup_one(idmap, name, parent->dentry, namelen);
- error = PTR_ERR(dentry);
+ dentry = start_creating_killable(idmap, parent, qname);
if (IS_ERR(dentry))
- goto out_unlock;
+ return PTR_ERR(dentry);
- error = btrfs_may_create(idmap, dir, dentry);
- if (error)
+ ret = btrfs_may_create(idmap, dir, dentry);
+ if (ret)
goto out_dput;
/*
* even if this name doesn't exist, we may get hash collisions.
* check for them now when we can safely fail
*/
- error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
- dir->i_ino, &name_str);
- if (error)
+ ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, dir->i_ino, &name_str);
+ if (ret)
goto out_dput;
down_read(&fs_info->subvol_sem);
@@ -1026,24 +923,22 @@ static noinline int btrfs_mksubvol(const struct path *parent,
goto out_up_read;
if (snap_src)
- error = create_snapshot(snap_src, dir, dentry, readonly, inherit);
+ ret = create_snapshot(snap_src, dir, dentry, readonly, inherit);
else
- error = create_subvol(idmap, dir, dentry, inherit);
+ ret = create_subvol(idmap, dir, dentry, inherit);
- if (!error)
+ if (!ret)
fsnotify_mkdir(dir, dentry);
out_up_read:
up_read(&fs_info->subvol_sem);
out_dput:
- dput(dentry);
-out_unlock:
- btrfs_inode_unlock(BTRFS_I(dir), 0);
- return error;
+ end_creating(dentry);
+ return ret;
}
-static noinline int btrfs_mksnapshot(const struct path *parent,
+static noinline int btrfs_mksnapshot(struct dentry *parent,
struct mnt_idmap *idmap,
- const char *name, int namelen,
+ struct qstr *qname,
struct btrfs_root *root,
bool readonly,
struct btrfs_qgroup_inherit *inherit)
@@ -1052,7 +947,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
/*
* Force new buffered writes to reserve space even when NOCOW is
- * possible. This is to avoid later writeback (running dealloc) to
+ * possible. This is to avoid later writeback (running delalloc) to
* fallback to COW mode and unexpectedly fail with ENOSPC.
*/
btrfs_drew_read_lock(&root->snapshot_lock);
@@ -1070,8 +965,8 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
btrfs_wait_ordered_extents(root, U64_MAX, NULL);
- ret = btrfs_mksubvol(parent, idmap, name, namelen,
- root, readonly, inherit);
+ ret = btrfs_mksubvol(parent, idmap, qname, root, readonly, inherit);
+
atomic_dec(&root->snapshot_force_cow);
out:
btrfs_drew_read_unlock(&root->snapshot_lock);
@@ -1124,17 +1019,14 @@ static noinline int btrfs_ioctl_resize(struct file *file,
void __user *arg)
{
BTRFS_DEV_LOOKUP_ARGS(args);
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 new_size;
u64 old_size;
u64 devid = 1;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
- struct btrfs_trans_handle *trans;
struct btrfs_device *device = NULL;
char *sizestr;
- char *retptr;
char *devstr = NULL;
int ret = 0;
int mod = 0;
@@ -1202,6 +1094,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (!strcmp(sizestr, "max"))
new_size = bdev_nr_bytes(device->bdev);
else {
+ char *retptr;
+
if (sizestr[0] == '-') {
mod = -1;
sizestr++;
@@ -1249,6 +1143,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
new_size = round_down(new_size, fs_info->sectorsize);
if (new_size > old_size) {
+ struct btrfs_trans_handle *trans;
+
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -1261,7 +1157,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
} /* equal, nothing need to do */
if (ret == 0 && new_size != old_size)
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"resize device %s (devid %llu) from %llu to %llu",
btrfs_dev_name(device), device->devid,
old_size, new_size);
@@ -1276,12 +1172,12 @@ out_drop:
static noinline int __btrfs_ioctl_snap_create(struct file *file,
struct mnt_idmap *idmap,
- const char *name, unsigned long fd, int subvol,
+ const char *name, unsigned long fd, bool subvol,
bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
- int namelen;
int ret = 0;
+ struct qstr qname = QSTR_INIT(name, strlen(name));
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
@@ -1290,21 +1186,20 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
if (ret)
goto out;
- namelen = strlen(name);
if (strchr(name, '/')) {
ret = -EINVAL;
goto out_drop_write;
}
- if (name[0] == '.' &&
- (namelen == 1 || (name[1] == '.' && namelen == 2))) {
+ if (qname.name[0] == '.' &&
+ (qname.len == 1 || (qname.name[1] == '.' && qname.len == 2))) {
ret = -EEXIST;
goto out_drop_write;
}
if (subvol) {
- ret = btrfs_mksubvol(&file->f_path, idmap, name,
- namelen, NULL, readonly, inherit);
+ ret = btrfs_mksubvol(file_dentry(file), idmap, &qname, NULL,
+ readonly, inherit);
} else {
CLASS(fd, src)(fd);
struct inode *src_inode;
@@ -1334,8 +1229,7 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
*/
ret = -EINVAL;
} else {
- ret = btrfs_mksnapshot(&file->f_path, idmap,
- name, namelen,
+ ret = btrfs_mksnapshot(file_dentry(file), idmap, &qname,
BTRFS_I(src_inode)->root,
readonly, inherit);
}
@@ -1347,7 +1241,7 @@ out:
}
static noinline int btrfs_ioctl_snap_create(struct file *file,
- void __user *arg, int subvol)
+ void __user *arg, bool subvol)
{
struct btrfs_ioctl_vol_args *vol_args;
int ret;
@@ -1372,7 +1266,7 @@ out:
}
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
- void __user *arg, int subvol)
+ void __user *arg, bool subvol)
{
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
@@ -1427,15 +1321,15 @@ free_args:
return ret;
}
-static noinline int btrfs_ioctl_subvol_getflags(struct inode *inode,
+static noinline int btrfs_ioctl_subvol_getflags(struct btrfs_inode *inode,
void __user *arg)
{
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u64 flags = 0;
- if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
return -EINVAL;
down_read(&fs_info->subvol_sem);
@@ -1538,8 +1432,8 @@ out:
return ret;
}
-static noinline int key_in_sk(struct btrfs_key *key,
- struct btrfs_ioctl_search_key *sk)
+static noinline bool key_in_sk(const struct btrfs_key *key,
+ const struct btrfs_ioctl_search_key *sk)
{
struct btrfs_key test;
int ret;
@@ -1550,7 +1444,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
ret = btrfs_comp_cpu_keys(key, &test);
if (ret < 0)
- return 0;
+ return false;
test.objectid = sk->max_objectid;
test.type = sk->max_type;
@@ -1558,13 +1452,13 @@ static noinline int key_in_sk(struct btrfs_key *key,
ret = btrfs_comp_cpu_keys(key, &test);
if (ret > 0)
- return 0;
- return 1;
+ return false;
+ return true;
}
static noinline int copy_to_sk(struct btrfs_path *path,
struct btrfs_key *key,
- struct btrfs_ioctl_search_key *sk,
+ const struct btrfs_ioctl_search_key *sk,
u64 *buf_size,
char __user *ubuf,
unsigned long *sk_offset,
@@ -1621,8 +1515,8 @@ static noinline int copy_to_sk(struct btrfs_path *path,
}
sh.objectid = key->objectid;
- sh.offset = key->offset;
sh.type = key->type;
+ sh.offset = key->offset;
sh.len = item_len;
sh.transid = found_transid;
@@ -1695,15 +1589,14 @@ out:
return ret;
}
-static noinline int search_ioctl(struct inode *inode,
+static noinline int search_ioctl(struct btrfs_root *root,
struct btrfs_ioctl_search_key *sk,
u64 *buf_size,
char __user *ubuf)
{
- struct btrfs_fs_info *info = inode_to_fs_info(inode);
- struct btrfs_root *root;
+ struct btrfs_fs_info *info = root->fs_info;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
int num_found = 0;
unsigned long sk_offset = 0;
@@ -1718,14 +1611,13 @@ static noinline int search_ioctl(struct inode *inode,
return -ENOMEM;
if (sk->tree_id == 0) {
- /* search the root of the inode that was passed */
- root = btrfs_grab_root(BTRFS_I(inode)->root);
+ /* Search the root that we got passed. */
+ root = btrfs_grab_root(root);
} else {
+ /* Look up the root from the arguments. */
root = btrfs_get_fs_root(info, sk->tree_id, true);
- if (IS_ERR(root)) {
- btrfs_free_path(path);
+ if (IS_ERR(root))
return PTR_ERR(root);
- }
}
key.objectid = sk->min_objectid;
@@ -1733,21 +1625,19 @@ static noinline int search_ioctl(struct inode *inode,
key.offset = sk->min_offset;
while (1) {
- ret = -EFAULT;
/*
* Ensure that the whole user buffer is faulted in at sub-page
* granularity, otherwise the loop may live-lock.
*/
- if (fault_in_subpage_writeable(ubuf + sk_offset,
- *buf_size - sk_offset))
+ if (fault_in_subpage_writeable(ubuf + sk_offset, *buf_size - sk_offset)) {
+ ret = -EFAULT;
break;
+ }
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
- if (ret != 0) {
- if (ret > 0)
- ret = 0;
- goto err;
- }
+ if (ret)
+ break;
+
ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
&sk_offset, &num_found);
btrfs_release_path(path);
@@ -1755,16 +1645,16 @@ static noinline int search_ioctl(struct inode *inode,
break;
}
+ /* Normalize return values from btrfs_search_forward() and copy_to_sk(). */
if (ret > 0)
ret = 0;
-err:
+
sk->nr_items = num_found;
btrfs_put_root(root);
- btrfs_free_path(path);
return ret;
}
-static noinline int btrfs_ioctl_tree_search(struct inode *inode,
+static noinline int btrfs_ioctl_tree_search(struct btrfs_root *root,
void __user *argp)
{
struct btrfs_ioctl_search_args __user *uargs = argp;
@@ -1780,7 +1670,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode,
buf_size = sizeof(uargs->buf);
- ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
+ ret = search_ioctl(root, &sk, &buf_size, uargs->buf);
/*
* In the origin implementation an overflow is handled by returning a
@@ -1794,7 +1684,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode,
return ret;
}
-static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
+static noinline int btrfs_ioctl_tree_search_v2(struct btrfs_root *root,
void __user *argp)
{
struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
@@ -1816,7 +1706,7 @@ static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
if (buf_size > buf_limit)
buf_size = buf_limit;
- ret = search_ioctl(inode, &args.key, &buf_size,
+ ret = search_ioctl(root, &args.key, &buf_size,
(char __user *)(&uarg->buf[0]));
if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
ret = -EFAULT;
@@ -1843,7 +1733,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
int total_len = 0;
struct btrfs_inode_ref *iref;
struct extent_buffer *l;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
name[0]='\0';
@@ -1904,7 +1794,6 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
ret = 0;
out:
btrfs_put_root(root);
- btrfs_free_path(path);
return ret;
}
@@ -1921,10 +1810,9 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
struct btrfs_inode_ref *iref;
struct btrfs_root_ref *rref;
struct btrfs_root *root = NULL;
- struct btrfs_path *path;
- struct btrfs_key key, key2;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct btrfs_key key;
struct extent_buffer *leaf;
- struct inode *temp_inode;
char *ptr;
int slot;
int len;
@@ -1943,15 +1831,15 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
root = btrfs_get_fs_root(fs_info, treeid, true);
- if (IS_ERR(root)) {
- ret = PTR_ERR(root);
- goto out;
- }
+ if (IS_ERR(root))
+ return PTR_ERR(root);
key.objectid = dirid;
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
while (1) {
+ struct btrfs_inode *temp_inode;
+
ret = btrfs_search_backwards(root, &key, path);
if (ret < 0)
goto out_put;
@@ -1976,24 +1864,6 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
read_extent_buffer(leaf, ptr,
(unsigned long)(iref + 1), len);
- /* Check the read+exec permission of this directory */
- ret = btrfs_previous_item(root, path, dirid,
- BTRFS_INODE_ITEM_KEY);
- if (ret < 0) {
- goto out_put;
- } else if (ret > 0) {
- ret = -ENOENT;
- goto out_put;
- }
-
- leaf = path->nodes[0];
- slot = path->slots[0];
- btrfs_item_key_to_cpu(leaf, &key2, slot);
- if (key2.objectid != dirid) {
- ret = -ENOENT;
- goto out_put;
- }
-
/*
* We don't need the path anymore, so release it and
* avoid deadlocks and lockdep warnings in case
@@ -2001,18 +1871,17 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
* btree and lock the same leaf.
*/
btrfs_release_path(path);
- temp_inode = btrfs_iget(key2.objectid, root);
+ temp_inode = btrfs_iget(key.offset, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
goto out_put;
}
- ret = inode_permission(idmap, temp_inode,
+ /* Check the read+exec permission of this directory. */
+ ret = inode_permission(idmap, &temp_inode->vfs_inode,
MAY_READ | MAY_EXEC);
- iput(temp_inode);
- if (ret) {
- ret = -EACCES;
+ iput(&temp_inode->vfs_inode);
+ if (ret)
goto out_put;
- }
if (key.offset == upper_limit)
break;
@@ -2038,12 +1907,10 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
key.type = BTRFS_ROOT_REF_KEY;
key.offset = args->treeid;
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ return -ENOENT;
leaf = path->nodes[0];
slot = path->slots[0];
@@ -2053,10 +1920,8 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
item_len = btrfs_item_size(leaf, slot);
/* Check if dirid in ROOT_REF corresponds to passed dirid */
rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
- if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
- ret = -EINVAL;
- goto out;
- }
+ if (args->dirid != btrfs_root_ref_dirid(leaf, rref))
+ return -EINVAL;
/* Copy subvolume's name */
item_off += sizeof(struct btrfs_root_ref);
@@ -2066,8 +1931,7 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
out_put:
btrfs_put_root(root);
-out:
- btrfs_free_path(path);
+
return ret;
}
@@ -2229,7 +2093,7 @@ static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp)
ret = btrfs_next_leaf(fs_info->tree_root, path);
if (ret < 0) {
goto out;
- } else if (ret > 0) {
+ } else if (unlikely(ret > 0)) {
ret = -EUCLEAN;
goto out;
}
@@ -2312,7 +2176,7 @@ static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root,
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
goto out;
- } else if (ret > 0) {
+ } else if (unlikely(ret > 0)) {
ret = -EUCLEAN;
goto out;
}
@@ -2341,7 +2205,7 @@ static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root,
ret = btrfs_next_item(root, path);
if (ret < 0) {
goto out;
- } else if (ret > 0) {
+ } else if (unlikely(ret > 0)) {
ret = -EUCLEAN;
goto out;
}
@@ -2380,7 +2244,6 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL;
struct mnt_idmap *idmap = file_mnt_idmap(file);
char *subvol_name, *subvol_name_ptr = NULL;
- int subvol_namelen;
int ret = 0;
bool destroy_parent = false;
@@ -2503,10 +2366,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
goto out;
}
- subvol_namelen = strlen(subvol_name);
-
if (strchr(subvol_name, '/') ||
- strncmp(subvol_name, "..", subvol_namelen) == 0) {
+ strcmp(subvol_name, "..") == 0) {
ret = -EINVAL;
goto free_subvol_name;
}
@@ -2516,18 +2377,10 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
goto free_subvol_name;
}
- ret = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
- if (ret == -EINTR)
- goto free_subvol_name;
- dentry = lookup_one(idmap, subvol_name, parent, subvol_namelen);
+ dentry = start_removing_killable(idmap, parent, &QSTR(subvol_name));
if (IS_ERR(dentry)) {
ret = PTR_ERR(dentry);
- goto out_unlock_dir;
- }
-
- if (d_really_is_negative(dentry)) {
- ret = -ENOENT;
- goto out_dput;
+ goto out_end_removing;
}
inode = d_inode(dentry);
@@ -2548,7 +2401,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
*/
ret = -EPERM;
if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
- goto out_dput;
+ goto out_end_removing;
/*
* Do not allow deletion if the parent dir is the same
@@ -2559,21 +2412,21 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
*/
ret = -EINVAL;
if (root == dest)
- goto out_dput;
+ goto out_end_removing;
ret = inode_permission(idmap, inode, MAY_WRITE | MAY_EXEC);
if (ret)
- goto out_dput;
+ goto out_end_removing;
}
/* check if subvolume may be deleted by a user */
ret = btrfs_may_delete(idmap, dir, dentry, 1);
if (ret)
- goto out_dput;
+ goto out_end_removing;
if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
ret = -EINVAL;
- goto out_dput;
+ goto out_end_removing;
}
btrfs_inode_lock(BTRFS_I(inode), 0);
@@ -2582,10 +2435,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
if (!ret)
d_delete_notify(dir, dentry);
-out_dput:
- dput(dentry);
-out_unlock_dir:
- btrfs_inode_unlock(BTRFS_I(dir), 0);
+out_end_removing:
+ end_removing(dentry);
free_subvol_name:
kfree(subvol_name_ptr);
free_parent:
@@ -2635,6 +2486,15 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
goto out;
}
+ /*
+ * Don't allow defrag on pre-content watched files, as it could
+ * populate the page cache with 0's via readahead.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (argp) {
if (copy_from_user(&range, argp, sizeof(range))) {
ret = -EFAULT;
@@ -2644,8 +2504,14 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
ret = -EOPNOTSUPP;
goto out;
}
- /* compression requires us to start the IO */
- if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
+ if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) &&
+ (range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* Compression or no-compression require to start the IO. */
+ if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) ||
+ (range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) {
range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
range.extent_thresh = (u32)-1;
}
@@ -2653,7 +2519,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
/* the rest are all set to zero by kzalloc */
range.len = (u64)-1;
}
- ret = btrfs_defrag_file(file_inode(file), &file->f_ra,
+ ret = btrfs_defrag_file(BTRFS_I(file_inode(file)), &file->f_ra,
&range, BTRFS_OLDEST_GENERATION, 0);
if (ret > 0)
ret = 0;
@@ -2786,7 +2652,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
err_drop:
mnt_drop_write_file(file);
if (bdev_file)
- fput(bdev_file);
+ bdev_fput(bdev_file);
out:
btrfs_put_dev_args_from_path(&args);
kfree(vol_args);
@@ -2837,7 +2703,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
mnt_drop_write_file(file);
if (bdev_file)
- fput(bdev_file);
+ bdev_fput(bdev_file);
out:
btrfs_put_dev_args_from_path(&args);
out_free:
@@ -2845,7 +2711,7 @@ out_free:
return ret;
}
-static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
+static long btrfs_ioctl_fs_info(const struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_ioctl_fs_info_args *fi_args;
@@ -2899,7 +2765,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
return ret;
}
-static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
+static long btrfs_ioctl_dev_info(const struct btrfs_fs_info *fs_info,
void __user *arg)
{
BTRFS_DEV_LOOKUP_ARGS(args);
@@ -2976,7 +2842,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
ret = PTR_ERR(new_root);
goto out;
}
- if (!is_fstree(btrfs_root_id(new_root))) {
+ if (!btrfs_is_fstree(btrfs_root_id(new_root))) {
ret = -ENOENT;
goto out_free;
}
@@ -3007,7 +2873,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
@@ -3041,7 +2906,7 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_space_args space_args = { 0 };
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
- struct btrfs_ioctl_space_info *dest_orig;
+ struct btrfs_ioctl_space_info AUTO_KFREE(dest_orig);
struct btrfs_ioctl_space_info __user *user_dest;
struct btrfs_space_info *info;
static const u64 types[] = {
@@ -3162,9 +3027,8 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
(arg + sizeof(struct btrfs_ioctl_space_args));
if (copy_to_user(user_dest, dest_orig, alloc_size))
- ret = -EFAULT;
+ return -EFAULT;
- kfree(dest_orig);
out:
if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
ret = -EFAULT;
@@ -3226,7 +3090,7 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
return -EPERM;
if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
- btrfs_err(fs_info, "scrub is not supported on extent tree v2 yet");
+ btrfs_err(fs_info, "scrub: extent tree v2 not yet supported");
return -EINVAL;
}
@@ -3383,7 +3247,7 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
u64 rel_ptr;
int size;
struct btrfs_ioctl_ino_path_args *ipa = NULL;
- struct inode_fs_paths *ipath = NULL;
+ struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
struct btrfs_path *path;
if (!capable(CAP_DAC_READ_SEARCH))
@@ -3431,7 +3295,6 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
out:
btrfs_free_path(path);
- free_ipath(ipath);
kfree(ipa);
return ret;
@@ -3444,7 +3307,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
int size;
struct btrfs_ioctl_logical_ino_args *loi;
struct btrfs_data_container *inodes = NULL;
- struct btrfs_path *path = NULL;
bool ignore_offset;
if (!capable(CAP_SYS_ADMIN))
@@ -3478,14 +3340,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
goto out_loi;
}
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
- ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
- inodes, ignore_offset);
- btrfs_free_path(path);
+ ret = iterate_inodes_from_logical(loi->logical, fs_info, inodes, ignore_offset);
if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
@@ -3704,7 +3559,7 @@ static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
void __user *arg)
{
- struct btrfs_ioctl_balance_args *bargs;
+ struct btrfs_ioctl_balance_args AUTO_KFREE(bargs);
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
@@ -3726,8 +3581,6 @@ static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
if (copy_to_user(arg, bargs, sizeof(*bargs)))
ret = -EFAULT;
-
- kfree(bargs);
out:
mutex_unlock(&fs_info->balance_mutex);
return ret;
@@ -3802,22 +3655,6 @@ drop_write:
return ret;
}
-/*
- * Quick check for ioctl handlers if quotas are enabled. Proper locking must be
- * done before any operations.
- */
-static bool qgroup_enabled(struct btrfs_fs_info *fs_info)
-{
- bool ret = true;
-
- mutex_lock(&fs_info->qgroup_ioctl_lock);
- if (!fs_info->quota_root)
- ret = false;
- mutex_unlock(&fs_info->qgroup_ioctl_lock);
-
- return ret;
-}
-
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
@@ -3832,7 +3669,7 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!qgroup_enabled(root->fs_info))
+ if (!btrfs_qgroup_enabled(fs_info))
return -ENOTCONN;
ret = mnt_want_write_file(file);
@@ -3849,7 +3686,7 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
if (!prealloc) {
ret = -ENOMEM;
- goto drop_write;
+ goto out;
}
}
@@ -3902,7 +3739,7 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!qgroup_enabled(root->fs_info))
+ if (!btrfs_qgroup_enabled(root->fs_info))
return -ENOTCONN;
ret = mnt_want_write_file(file);
@@ -3920,7 +3757,7 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
goto out;
}
- if (sa->create && is_fstree(sa->qgroupid)) {
+ if (sa->create && btrfs_is_fstree(sa->qgroupid)) {
ret = -EINVAL;
goto out;
}
@@ -3961,7 +3798,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!qgroup_enabled(root->fs_info))
+ if (!btrfs_qgroup_enabled(root->fs_info))
return -ENOTCONN;
ret = mnt_want_write_file(file);
@@ -4009,7 +3846,7 @@ static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!qgroup_enabled(fs_info))
+ if (!btrfs_qgroup_enabled(fs_info))
return -ENOTCONN;
ret = mnt_want_write_file(file);
@@ -4117,7 +3954,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
btrfs_root_id(root));
- if (ret && ret != -ENOENT) {
+ if (unlikely(ret && ret != -ENOENT)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
@@ -4141,7 +3978,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
ret = btrfs_uuid_tree_add(trans, sa->uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
btrfs_root_id(root));
- if (ret < 0 && ret != -EEXIST) {
+ if (unlikely(ret < 0 && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
@@ -4287,7 +4124,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
}
spin_lock(&fs_info->super_lock);
- strcpy(super_block->label, label);
+ strscpy(super_block->label, label);
spin_unlock(&fs_info->super_lock);
ret = btrfs_commit_transaction(trans);
@@ -4331,13 +4168,13 @@ static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
return 0;
}
-static int check_feature_bits(struct btrfs_fs_info *fs_info,
+static int check_feature_bits(const struct btrfs_fs_info *fs_info,
enum btrfs_feature_set set,
u64 change_mask, u64 flags, u64 supported_flags,
u64 safe_set, u64 safe_clear)
{
const char *type = btrfs_feature_set_name(set);
- char *names;
+ const char AUTO_KFREE(names);
u64 disallowed, unsupported;
u64 set_mask = flags & change_mask;
u64 clear_mask = ~flags & change_mask;
@@ -4345,12 +4182,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info,
unsupported = set_mask & ~supported_flags;
if (unsupported) {
names = btrfs_printable_features(set, unsupported);
- if (names) {
+ if (names)
btrfs_warn(fs_info,
"this kernel does not support the %s feature bit%s",
names, strchr(names, ',') ? "s" : "");
- kfree(names);
- } else
+ else
btrfs_warn(fs_info,
"this kernel does not support %s bits 0x%llx",
type, unsupported);
@@ -4360,12 +4196,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info,
disallowed = set_mask & ~safe_set;
if (disallowed) {
names = btrfs_printable_features(set, disallowed);
- if (names) {
+ if (names)
btrfs_warn(fs_info,
"can't set the %s feature bit%s while mounted",
names, strchr(names, ',') ? "s" : "");
- kfree(names);
- } else
+ else
btrfs_warn(fs_info,
"can't set %s bits 0x%llx while mounted",
type, disallowed);
@@ -4375,12 +4210,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info,
disallowed = clear_mask & ~safe_clear;
if (disallowed) {
names = btrfs_printable_features(set, disallowed);
- if (names) {
+ if (names)
btrfs_warn(fs_info,
"can't clear the %s feature bit%s while mounted",
names, strchr(names, ',') ? "s" : "");
- kfree(names);
- } else
+ else
btrfs_warn(fs_info,
"can't clear %s bits 0x%llx while mounted",
type, disallowed);
@@ -4467,7 +4301,7 @@ out_drop_write:
return ret;
}
-static int _btrfs_ioctl_send(struct btrfs_inode *inode, void __user *argp, bool compat)
+static int _btrfs_ioctl_send(struct btrfs_root *root, void __user *argp, bool compat)
{
struct btrfs_ioctl_send_args *arg;
int ret;
@@ -4498,7 +4332,7 @@ static int _btrfs_ioctl_send(struct btrfs_inode *inode, void __user *argp, bool
if (IS_ERR(arg))
return PTR_ERR(arg);
}
- ret = btrfs_ioctl_send(inode, arg);
+ ret = btrfs_ioctl_send(root, arg);
kfree(arg);
return ret;
}
@@ -4594,7 +4428,7 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
args.compression, &unlocked);
if (!unlocked) {
- unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
}
}
@@ -4716,6 +4550,13 @@ out_acct:
return ret;
}
+struct btrfs_uring_encoded_data {
+ struct btrfs_ioctl_encoded_io_args args;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov;
+ struct iov_iter iter;
+};
+
/*
* Context that's attached to an encoded read io_uring command, in cmd->pdu. It
* contains the fields in btrfs_uring_read_extent that are necessary to finish
@@ -4737,20 +4578,25 @@ struct btrfs_uring_priv {
};
struct io_btrfs_cmd {
+ struct btrfs_uring_encoded_data *data;
struct btrfs_uring_priv *priv;
};
-static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int issue_flags)
+static void btrfs_uring_read_finished(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd);
struct btrfs_uring_priv *priv = bc->priv;
struct btrfs_inode *inode = BTRFS_I(file_inode(priv->iocb.ki_filp));
struct extent_io_tree *io_tree = &inode->io_tree;
- unsigned long index;
+ pgoff_t index;
u64 cur;
size_t page_offset;
ssize_t ret;
+ /* The inode lock has already been acquired in btrfs_uring_read_extent. */
+ btrfs_lockdep_inode_acquire(inode, i_rwsem);
+
if (priv->err) {
ret = priv->err;
goto out;
@@ -4780,10 +4626,10 @@ static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int iss
ret = priv->count;
out:
- unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
+ btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ io_uring_cmd_done(cmd, ret, IO_URING_CMD_TASK_WORK_ISSUE_FLAGS);
add_rchar(current, ret);
for (index = 0; index < priv->nr_pages; index++)
@@ -4792,6 +4638,7 @@ out:
kfree(priv->pages);
kfree(priv->iov);
kfree(priv);
+ kfree(bc->data);
}
void btrfs_uring_read_extent_endio(void *ctx, int err)
@@ -4859,10 +4706,17 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
* and inode and freeing the allocations.
*/
+ /*
+ * We're returning to userspace with the inode lock held, and that's
+ * okay - it'll get unlocked in a worker thread. Call
+ * btrfs_lockdep_inode_release() to avoid confusing lockdep.
+ */
+ btrfs_lockdep_inode_release(inode, i_rwsem);
+
return -EIOCBQUEUED;
out_fail:
- unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
kfree(priv);
return ret;
@@ -4870,73 +4724,92 @@ out_fail:
static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
+ struct file *file = cmd->file;
+ struct btrfs_inode *inode = BTRFS_I(file->f_inode);
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags);
size_t copy_end;
- struct btrfs_ioctl_encoded_io_args args = { 0 };
int ret;
u64 disk_bytenr, disk_io_size;
- struct file *file;
- struct btrfs_inode *inode;
- struct btrfs_fs_info *fs_info;
- struct extent_io_tree *io_tree;
- struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov = iovstack;
- struct iov_iter iter;
loff_t pos;
struct kiocb kiocb;
struct extent_state *cached_state = NULL;
u64 start, lockend;
void __user *sqe_addr;
+ struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd);
+ struct btrfs_uring_encoded_data *data = NULL;
+
+ if (cmd->flags & IORING_URING_CMD_REISSUE)
+ data = bc->data;
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
goto out_acct;
}
- file = cmd->file;
- inode = BTRFS_I(file->f_inode);
- fs_info = inode->root->fs_info;
- io_tree = &inode->io_tree;
sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
if (issue_flags & IO_URING_F_COMPAT) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
- struct btrfs_ioctl_encoded_io_args_32 args32;
-
copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags);
- if (copy_from_user(&args32, sqe_addr, copy_end)) {
- ret = -EFAULT;
- goto out_acct;
- }
- args.iov = compat_ptr(args32.iov);
- args.iovcnt = args32.iovcnt;
- args.offset = args32.offset;
- args.flags = args32.flags;
#else
- return -ENOTTY;
+ ret = -ENOTTY;
+ goto out_acct;
#endif
} else {
copy_end = copy_end_kernel;
- if (copy_from_user(&args, sqe_addr, copy_end)) {
- ret = -EFAULT;
+ }
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ ret = -ENOMEM;
goto out_acct;
}
- }
- if (args.flags != 0)
- return -EINVAL;
+ bc->data = data;
- ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
- &iov, &iter);
- if (ret < 0)
- goto out_acct;
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
- if (iov_iter_count(&iter) == 0) {
- ret = 0;
- goto out_free;
+ if (copy_from_user(&args32, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+
+ data->args.iov = compat_ptr(args32.iov);
+ data->args.iovcnt = args32.iovcnt;
+ data->args.offset = args32.offset;
+ data->args.flags = args32.flags;
+#endif
+ } else {
+ if (copy_from_user(&data->args, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+
+ if (data->args.flags != 0) {
+ ret = -EINVAL;
+ goto out_acct;
+ }
+
+ data->iov = data->iovstack;
+ ret = import_iovec(ITER_DEST, data->args.iov, data->args.iovcnt,
+ ARRAY_SIZE(data->iovstack), &data->iov,
+ &data->iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&data->iter) == 0) {
+ ret = 0;
+ goto out_free;
+ }
}
- pos = args.offset;
- ret = rw_verify_area(READ, file, &pos, args.len);
+ pos = data->args.offset;
+ ret = rw_verify_area(READ, file, &pos, data->args.len);
if (ret < 0)
goto out_free;
@@ -4949,17 +4822,20 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
start = ALIGN_DOWN(pos, fs_info->sectorsize);
lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
- ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
+ ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state,
&disk_bytenr, &disk_io_size);
+ if (ret == -EAGAIN)
+ goto out_acct;
if (ret < 0 && ret != -EIOCBQUEUED)
goto out_free;
file_accessed(file);
- if (copy_to_user(sqe_addr + copy_end, (const char *)&args + copy_end_kernel,
- sizeof(args) - copy_end_kernel)) {
+ if (copy_to_user(sqe_addr + copy_end,
+ (const char *)&data->args + copy_end_kernel,
+ sizeof(data->args) - copy_end_kernel)) {
if (ret == -EIOCBQUEUED) {
- unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
}
ret = -EFAULT;
@@ -4967,57 +4843,178 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
}
if (ret == -EIOCBQUEUED) {
- u64 count;
+ u64 count = min_t(u64, iov_iter_count(&data->iter), disk_io_size);
- /*
- * If we've optimized things by storing the iovecs on the stack,
- * undo this.
- */
- if (!iov) {
- iov = kmalloc(sizeof(struct iovec) * args.iovcnt, GFP_NOFS);
- if (!iov) {
- unlock_extent(io_tree, start, lockend, &cached_state);
- btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
- ret = -ENOMEM;
+ /* Match ioctl by not returning past EOF if uncompressed. */
+ if (!data->args.compression)
+ count = min_t(u64, count, data->args.len);
+
+ ret = btrfs_uring_read_extent(&kiocb, &data->iter, start, lockend,
+ cached_state, disk_bytenr, disk_io_size,
+ count, data->args.compression,
+ data->iov, cmd);
+
+ goto out_acct;
+ }
+
+out_free:
+ kfree(data->iov);
+
+out_acct:
+ if (ret > 0)
+ add_rchar(current, ret);
+ inc_syscr(current);
+
+ if (ret != -EIOCBQUEUED && ret != -EAGAIN)
+ kfree(data);
+
+ return ret;
+}
+
+static int btrfs_uring_encoded_write(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct file *file = cmd->file;
+ loff_t pos;
+ struct kiocb kiocb;
+ ssize_t ret;
+ void __user *sqe_addr;
+ struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd);
+ struct btrfs_uring_encoded_data *data = NULL;
+
+ if (cmd->flags & IORING_URING_CMD_REISSUE)
+ data = bc->data;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out_acct;
+ }
+ sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
+
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ goto out_acct;
+ }
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_acct;
+ }
+
+ bc->data = data;
+
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
+
+ if (copy_from_user(&args32, sqe_addr, sizeof(args32))) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ data->args.iov = compat_ptr(args32.iov);
+ data->args.iovcnt = args32.iovcnt;
+ data->args.offset = args32.offset;
+ data->args.flags = args32.flags;
+ data->args.len = args32.len;
+ data->args.unencoded_len = args32.unencoded_len;
+ data->args.unencoded_offset = args32.unencoded_offset;
+ data->args.compression = args32.compression;
+ data->args.encryption = args32.encryption;
+ memcpy(data->args.reserved, args32.reserved,
+ sizeof(data->args.reserved));
+#else
+ ret = -ENOTTY;
+ goto out_acct;
+#endif
+ } else {
+ if (copy_from_user(&data->args, sqe_addr, sizeof(data->args))) {
+ ret = -EFAULT;
goto out_acct;
}
-
- memcpy(iov, iovstack, sizeof(struct iovec) * args.iovcnt);
}
- count = min_t(u64, iov_iter_count(&iter), disk_io_size);
+ ret = -EINVAL;
+ if (data->args.flags != 0)
+ goto out_acct;
+ if (memchr_inv(data->args.reserved, 0, sizeof(data->args.reserved)))
+ goto out_acct;
+ if (data->args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE &&
+ data->args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE)
+ goto out_acct;
+ if (data->args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES ||
+ data->args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES)
+ goto out_acct;
+ if (data->args.unencoded_offset > data->args.unencoded_len)
+ goto out_acct;
+ if (data->args.len > data->args.unencoded_len - data->args.unencoded_offset)
+ goto out_acct;
- /* Match ioctl by not returning past EOF if uncompressed. */
- if (!args.compression)
- count = min_t(u64, count, args.len);
+ data->iov = data->iovstack;
+ ret = import_iovec(ITER_SOURCE, data->args.iov, data->args.iovcnt,
+ ARRAY_SIZE(data->iovstack), &data->iov,
+ &data->iter);
+ if (ret < 0)
+ goto out_acct;
- ret = btrfs_uring_read_extent(&kiocb, &iter, start, lockend,
- cached_state, disk_bytenr,
- disk_io_size, count,
- args.compression, iov, cmd);
+ if (iov_iter_count(&data->iter) == 0) {
+ ret = 0;
+ goto out_iov;
+ }
+ }
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ ret = -EAGAIN;
goto out_acct;
}
-out_free:
- kfree(iov);
+ pos = data->args.offset;
+ ret = rw_verify_area(WRITE, file, &pos, data->args.len);
+ if (ret < 0)
+ goto out_iov;
+
+ init_sync_kiocb(&kiocb, file);
+ ret = kiocb_set_rw_flags(&kiocb, 0, WRITE);
+ if (ret)
+ goto out_iov;
+ kiocb.ki_pos = pos;
+
+ file_start_write(file);
+
+ ret = btrfs_do_write_iter(&kiocb, &data->iter, &data->args);
+ if (ret > 0)
+ fsnotify_modify(file);
+ file_end_write(file);
+out_iov:
+ kfree(data->iov);
out_acct:
if (ret > 0)
- add_rchar(current, ret);
- inc_syscr(current);
+ add_wchar(current, ret);
+ inc_syscw(current);
+ if (ret != -EAGAIN)
+ kfree(data);
return ret;
}
int btrfs_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(cmd->file)))))
+ return -EIO;
+
switch (cmd->cmd_op) {
case BTRFS_IOC_ENCODED_READ:
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
case BTRFS_IOC_ENCODED_READ_32:
#endif
return btrfs_uring_encoded_read(cmd, issue_flags);
+
+ case BTRFS_IOC_ENCODED_WRITE:
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ case BTRFS_IOC_ENCODED_WRITE_32:
+#endif
+ return btrfs_uring_encoded_write(cmd, issue_flags);
}
return -EINVAL;
@@ -5149,6 +5146,43 @@ static int btrfs_ioctl_subvol_sync(struct btrfs_fs_info *fs_info, void __user *a
return 0;
}
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+static int btrfs_ioctl_shutdown(struct btrfs_fs_info *fs_info, unsigned long arg)
+{
+ int ret = 0;
+ u32 flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (u32 __user *)arg))
+ return -EFAULT;
+
+ if (flags >= BTRFS_SHUTDOWN_FLAGS_LAST)
+ return -EINVAL;
+
+ if (btrfs_is_shutdown(fs_info))
+ return 0;
+
+ switch (flags) {
+ case BTRFS_SHUTDOWN_FLAGS_LOGFLUSH:
+ case BTRFS_SHUTDOWN_FLAGS_DEFAULT:
+ ret = freeze_super(fs_info->sb, FREEZE_HOLDER_KERNEL, NULL);
+ if (ret)
+ return ret;
+ btrfs_force_shutdown(fs_info);
+ ret = thaw_super(fs_info->sb, FREEZE_HOLDER_KERNEL, NULL);
+ if (ret)
+ return ret;
+ break;
+ case BTRFS_SHUTDOWN_FLAGS_NOLOGFLUSH:
+ btrfs_force_shutdown(fs_info);
+ break;
+ }
+ return ret;
+}
+#endif
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
@@ -5167,19 +5201,19 @@ long btrfs_ioctl(struct file *file, unsigned int
case FITRIM:
return btrfs_ioctl_fitrim(fs_info, argp);
case BTRFS_IOC_SNAP_CREATE:
- return btrfs_ioctl_snap_create(file, argp, 0);
+ return btrfs_ioctl_snap_create(file, argp, false);
case BTRFS_IOC_SNAP_CREATE_V2:
- return btrfs_ioctl_snap_create_v2(file, argp, 0);
+ return btrfs_ioctl_snap_create_v2(file, argp, false);
case BTRFS_IOC_SUBVOL_CREATE:
- return btrfs_ioctl_snap_create(file, argp, 1);
+ return btrfs_ioctl_snap_create(file, argp, true);
case BTRFS_IOC_SUBVOL_CREATE_V2:
- return btrfs_ioctl_snap_create_v2(file, argp, 1);
+ return btrfs_ioctl_snap_create_v2(file, argp, true);
case BTRFS_IOC_SNAP_DESTROY:
return btrfs_ioctl_snap_destroy(file, argp, false);
case BTRFS_IOC_SNAP_DESTROY_V2:
return btrfs_ioctl_snap_destroy(file, argp, true);
case BTRFS_IOC_SUBVOL_GETFLAGS:
- return btrfs_ioctl_subvol_getflags(inode, argp);
+ return btrfs_ioctl_subvol_getflags(BTRFS_I(inode), argp);
case BTRFS_IOC_SUBVOL_SETFLAGS:
return btrfs_ioctl_subvol_setflags(file, argp);
case BTRFS_IOC_DEFAULT_SUBVOL:
@@ -5201,9 +5235,9 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_DEV_INFO:
return btrfs_ioctl_dev_info(fs_info, argp);
case BTRFS_IOC_TREE_SEARCH:
- return btrfs_ioctl_tree_search(inode, argp);
+ return btrfs_ioctl_tree_search(root, argp);
case BTRFS_IOC_TREE_SEARCH_V2:
- return btrfs_ioctl_tree_search_v2(inode, argp);
+ return btrfs_ioctl_tree_search_v2(root, argp);
case BTRFS_IOC_INO_LOOKUP:
return btrfs_ioctl_ino_lookup(root, argp);
case BTRFS_IOC_INO_PATHS:
@@ -5251,10 +5285,10 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
case BTRFS_IOC_SEND:
- return _btrfs_ioctl_send(BTRFS_I(inode), argp, false);
+ return _btrfs_ioctl_send(root, argp, false);
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
case BTRFS_IOC_SEND_32:
- return _btrfs_ioctl_send(BTRFS_I(inode), argp, true);
+ return _btrfs_ioctl_send(root, argp, true);
#endif
case BTRFS_IOC_GET_DEV_STATS:
return btrfs_ioctl_get_dev_stats(fs_info, argp);
@@ -5290,6 +5324,8 @@ long btrfs_ioctl(struct file *file, unsigned int
return fsverity_ioctl_enable(file, (const void __user *)argp);
case FS_IOC_MEASURE_VERITY:
return fsverity_ioctl_measure(file, argp);
+ case FS_IOC_READ_VERITY_METADATA:
+ return fsverity_ioctl_read_metadata(file, argp);
case BTRFS_IOC_ENCODED_READ:
return btrfs_ioctl_encoded_read(file, argp, false);
case BTRFS_IOC_ENCODED_WRITE:
@@ -5302,6 +5338,10 @@ long btrfs_ioctl(struct file *file, unsigned int
#endif
case BTRFS_IOC_SUBVOL_SYNC_WAIT:
return btrfs_ioctl_subvol_sync(fs_info, argp);
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ case BTRFS_IOC_SHUTDOWN:
+ return btrfs_ioctl_shutdown(fs_info, arg);
+#endif
}
return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 2b760c8778f8..ccf6bed9cc24 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -8,18 +8,19 @@
struct file;
struct dentry;
struct mnt_idmap;
-struct fileattr;
+struct file_kattr;
+struct io_uring_cmd;
+struct btrfs_inode;
struct btrfs_fs_info;
struct btrfs_ioctl_balance_args;
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int btrfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
int btrfs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
int btrfs_ioctl_get_supported_features(void __user *arg);
-void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
-int __pure btrfs_is_empty_uuid(const u8 *uuid);
+void btrfs_sync_inode_flags_to_i_flags(struct btrfs_inode *inode);
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_balance_args *bargs);
int btrfs_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 9a7a7b723305..0035851d72b0 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -9,7 +9,6 @@
#include <linux/page-flags.h>
#include <asm/bug.h>
#include <trace/events/btrfs.h>
-#include "misc.h"
#include "ctree.h"
#include "extent_io.h"
#include "locking.h"
@@ -150,15 +149,15 @@ void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesti
/*
* Try-lock for read.
*
- * Return 1 if the rwlock has been taken, 0 otherwise
+ * Return true if the rwlock has been taken, false otherwise
*/
-int btrfs_try_tree_read_lock(struct extent_buffer *eb)
+bool btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
if (down_read_trylock(&eb->lock)) {
trace_btrfs_try_tree_read_lock(eb);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -362,7 +361,7 @@ void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
atomic_inc(&lock->readers);
/*
- * Ensure the pending reader count is perceieved BEFORE this reader
+ * Ensure the pending reader count is perceived BEFORE this reader
* goes to sleep in case of active writers. This guarantees new writers
* won't be allowed and that the current reader will be woken up when
* the last active writer finishes its jobs.
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 46c8be2afab1..a4673e7d95d7 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -74,7 +74,7 @@ enum btrfs_lock_nesting {
BTRFS_NESTING_NEW_ROOT,
/*
- * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
+ * We are limited to MAX_LOCKDEP_SUBCLASSES number of subclasses, so
* add this in here and add a static_assert to keep us from going over
* the limit. As of this writing we're limited to 8, and we're
* definitely using 8, hence this check to keep us from messing up in
@@ -129,6 +129,16 @@ enum btrfs_lockdep_trans_states {
rwsem_release(&owner->lock##_map, _THIS_IP_)
/*
+ * Used to account for the fact that when doing io_uring encoded I/O, we can
+ * return to userspace with the inode lock still held.
+ */
+#define btrfs_lockdep_inode_acquire(owner, lock) \
+ rwsem_acquire_read(&owner->vfs_inode.lock.dep_map, 0, 0, _THIS_IP_)
+
+#define btrfs_lockdep_inode_release(owner, lock) \
+ rwsem_release(&owner->vfs_inode.lock.dep_map, _THIS_IP_)
+
+/*
* Macros for the transaction states wait events, similar to the generic wait
* event macros.
*/
@@ -179,7 +189,7 @@ static inline void btrfs_tree_read_lock(struct extent_buffer *eb)
}
void btrfs_tree_read_unlock(struct extent_buffer *eb);
-int btrfs_try_tree_read_lock(struct extent_buffer *eb);
+bool btrfs_try_tree_read_lock(struct extent_buffer *eb);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root);
@@ -189,8 +199,13 @@ static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
{
lockdep_assert_held_write(&eb->lock);
}
+static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+{
+ lockdep_assert_held_read(&eb->lock);
+}
#else
static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
+static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
#endif
void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a45bc11f8665..4758f66da449 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -58,9 +58,6 @@
* 0x1000 | SegHdr N+1| Data payload N+1 ... |
*/
-#define WORKSPACE_BUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
-#define WORKSPACE_CBUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
-
struct workspace {
void *mem;
void *buf; /* where decompressed data goes */
@@ -68,7 +65,14 @@ struct workspace {
struct list_head list;
};
-static struct workspace_manager wsm;
+static u32 workspace_buf_length(const struct btrfs_fs_info *fs_info)
+{
+ return lzo1x_worst_compress(fs_info->sectorsize);
+}
+static u32 workspace_cbuf_length(const struct btrfs_fs_info *fs_info)
+{
+ return lzo1x_worst_compress(fs_info->sectorsize);
+}
void lzo_free_workspace(struct list_head *ws)
{
@@ -80,7 +84,7 @@ void lzo_free_workspace(struct list_head *ws)
kfree(workspace);
}
-struct list_head *lzo_alloc_workspace(void)
+struct list_head *lzo_alloc_workspace(struct btrfs_fs_info *fs_info)
{
struct workspace *workspace;
@@ -89,8 +93,8 @@ struct list_head *lzo_alloc_workspace(void)
return ERR_PTR(-ENOMEM);
workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
- workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL | __GFP_NOWARN);
- workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL | __GFP_NOWARN);
+ workspace->buf = kvmalloc(workspace_buf_length(fs_info), GFP_KERNEL | __GFP_NOWARN);
+ workspace->cbuf = kvmalloc(workspace_cbuf_length(fs_info), GFP_KERNEL | __GFP_NOWARN);
if (!workspace->mem || !workspace->buf || !workspace->cbuf)
goto fail;
@@ -128,19 +132,21 @@ static inline size_t read_compress_length(const char *buf)
*
* Will allocate new pages when needed.
*/
-static int copy_compressed_data_to_page(char *compressed_data,
+static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
+ char *compressed_data,
size_t compressed_size,
struct folio **out_folios,
unsigned long max_nr_folio,
- u32 *cur_out,
- const u32 sectorsize)
+ u32 *cur_out)
{
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
u32 sector_bytes_left;
u32 orig_out;
struct folio *cur_folio;
char *kaddr;
- if ((*cur_out / PAGE_SIZE) >= max_nr_folio)
+ if ((*cur_out >> min_folio_shift) >= max_nr_folio)
return -E2BIG;
/*
@@ -149,18 +155,17 @@ static int copy_compressed_data_to_page(char *compressed_data,
*/
ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
- cur_folio = out_folios[*cur_out / PAGE_SIZE];
+ cur_folio = out_folios[*cur_out >> min_folio_shift];
/* Allocate a new page */
if (!cur_folio) {
- cur_folio = btrfs_alloc_compr_folio();
+ cur_folio = btrfs_alloc_compr_folio(fs_info);
if (!cur_folio)
return -ENOMEM;
- out_folios[*cur_out / PAGE_SIZE] = cur_folio;
+ out_folios[*cur_out >> min_folio_shift] = cur_folio;
}
- kaddr = kmap_local_folio(cur_folio, 0);
- write_compress_length(kaddr + offset_in_page(*cur_out),
- compressed_size);
+ kaddr = kmap_local_folio(cur_folio, offset_in_folio(cur_folio, *cur_out));
+ write_compress_length(kaddr, compressed_size);
*cur_out += LZO_LEN;
orig_out = *cur_out;
@@ -172,20 +177,20 @@ static int copy_compressed_data_to_page(char *compressed_data,
kunmap_local(kaddr);
- if ((*cur_out / PAGE_SIZE) >= max_nr_folio)
+ if ((*cur_out >> min_folio_shift) >= max_nr_folio)
return -E2BIG;
- cur_folio = out_folios[*cur_out / PAGE_SIZE];
+ cur_folio = out_folios[*cur_out >> min_folio_shift];
/* Allocate a new page */
if (!cur_folio) {
- cur_folio = btrfs_alloc_compr_folio();
+ cur_folio = btrfs_alloc_compr_folio(fs_info);
if (!cur_folio)
return -ENOMEM;
- out_folios[*cur_out / PAGE_SIZE] = cur_folio;
+ out_folios[*cur_out >> min_folio_shift] = cur_folio;
}
kaddr = kmap_local_folio(cur_folio, 0);
- memcpy(kaddr + offset_in_page(*cur_out),
+ memcpy(kaddr + offset_in_folio(cur_folio, *cur_out),
compressed_data + *cur_out - orig_out, copy_len);
*cur_out += copy_len;
@@ -209,12 +214,15 @@ out:
return 0;
}
-int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
+int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct workspace *workspace = list_entry(ws, struct workspace, list);
- const u32 sectorsize = inode_to_fs_info(mapping->host)->sectorsize;
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
struct folio *folio_in = NULL;
char *sizes_ptr;
const unsigned long max_nr_folio = *out_folios;
@@ -252,9 +260,8 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
/* Compress at most one sector of data each time */
in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
ASSERT(in_len);
- data_in = kmap_local_folio(folio_in, 0);
- ret = lzo1x_1_compress(data_in +
- offset_in_page(cur_in), in_len,
+ data_in = kmap_local_folio(folio_in, offset_in_folio(folio_in, cur_in));
+ ret = lzo1x_1_compress(data_in, in_len,
workspace->cbuf, &out_len,
workspace->mem);
kunmap_local(data_in);
@@ -264,9 +271,9 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
goto out;
}
- ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
+ ret = copy_compressed_data_to_page(fs_info, workspace->cbuf, out_len,
folios, max_nr_folio,
- &cur_out, sectorsize);
+ &cur_out);
if (ret < 0)
goto out;
@@ -281,8 +288,8 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
goto out;
}
- /* Check if we have reached page boundary */
- if (PAGE_ALIGNED(cur_in)) {
+ /* Check if we have reached folio boundary. */
+ if (IS_ALIGNED(cur_in, min_folio_size)) {
folio_put(folio_in);
folio_in = NULL;
}
@@ -299,7 +306,7 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
out:
if (folio_in)
folio_put(folio_in);
- *out_folios = DIV_ROUND_UP(cur_out, PAGE_SIZE);
+ *out_folios = DIV_ROUND_UP(cur_out, min_folio_size);
return ret;
}
@@ -311,15 +318,16 @@ out:
static void copy_compressed_segment(struct compressed_bio *cb,
char *dest, u32 len, u32 *cur_in)
{
+ struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
u32 orig_in = *cur_in;
while (*cur_in < orig_in + len) {
- struct folio *cur_folio;
- u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
- orig_in + len - *cur_in);
+ struct folio *cur_folio = cb->compressed_folios[*cur_in >> min_folio_shift];
+ u32 copy_len = min_t(u32, orig_in + len - *cur_in,
+ folio_size(cur_folio) - offset_in_folio(cur_folio, *cur_in));
ASSERT(copy_len);
- cur_folio = cb->compressed_folios[*cur_in / PAGE_SIZE];
memcpy_from_folio(dest + *cur_in - orig_in, cur_folio,
offset_in_folio(cur_folio, *cur_in), copy_len);
@@ -333,6 +341,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
struct workspace *workspace = list_entry(ws, struct workspace, list);
const struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
const u32 sectorsize = fs_info->sectorsize;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
char *kaddr;
int ret;
/* Compressed data length, can be unaligned */
@@ -379,14 +388,14 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
*/
ASSERT(cur_in / sectorsize ==
(cur_in + LZO_LEN - 1) / sectorsize);
- cur_folio = cb->compressed_folios[cur_in / PAGE_SIZE];
+ cur_folio = cb->compressed_folios[cur_in >> min_folio_shift];
ASSERT(cur_folio);
kaddr = kmap_local_folio(cur_folio, 0);
- seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
+ seg_len = read_compress_length(kaddr + offset_in_folio(cur_folio, cur_in));
kunmap_local(kaddr);
cur_in += LZO_LEN;
- if (unlikely(seg_len > WORKSPACE_CBUF_LENGTH)) {
+ if (unlikely(seg_len > workspace_cbuf_length(fs_info))) {
struct btrfs_inode *inode = cb->bbio.inode;
/*
@@ -446,19 +455,19 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
const u32 sectorsize = fs_info->sectorsize;
size_t in_len;
size_t out_len;
- size_t max_segment_len = WORKSPACE_BUF_LENGTH;
+ size_t max_segment_len = workspace_buf_length(fs_info);
int ret = 0;
- if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
+ if (unlikely(srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2))
return -EUCLEAN;
in_len = read_compress_length(data_in);
- if (in_len != srclen)
+ if (unlikely(in_len != srclen))
return -EUCLEAN;
data_in += LZO_LEN;
in_len = read_compress_length(data_in);
- if (in_len != srclen - LZO_LEN * 2) {
+ if (unlikely(in_len != srclen - LZO_LEN * 2)) {
ret = -EUCLEAN;
goto out;
}
@@ -488,8 +497,7 @@ out:
return ret;
}
-const struct btrfs_compress_op btrfs_lzo_compress = {
- .workspace_manager = &wsm,
+const struct btrfs_compress_levels btrfs_lzo_compress = {
.max_level = 1,
.default_level = 1,
};
diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c
index 363fd28c0268..2f853de44473 100644
--- a/fs/btrfs/messages.c
+++ b/fs/btrfs/messages.c
@@ -18,11 +18,13 @@ static const char fs_state_chars[] = {
[BTRFS_FS_STATE_REMOUNTING] = 'M',
[BTRFS_FS_STATE_RO] = 0,
[BTRFS_FS_STATE_TRANS_ABORTED] = 'A',
+ [BTRFS_FS_STATE_LOG_REPLAY_ABORTED] = 'O',
[BTRFS_FS_STATE_DEV_REPLACING] = 'R',
[BTRFS_FS_STATE_DUMMY_FS_INFO] = 0,
[BTRFS_FS_STATE_NO_DATA_CSUMS] = 'C',
[BTRFS_FS_STATE_SKIP_META_CSUMS] = 'S',
[BTRFS_FS_STATE_LOG_CLEANUP_ERROR] = 'L',
+ [BTRFS_FS_STATE_EMERGENCY_SHUTDOWN] = 'E',
};
static void btrfs_state_to_string(const struct btrfs_fs_info *info, char *buf)
diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h
index 08a9272399d2..d8c0bd17dcda 100644
--- a/fs/btrfs/messages.h
+++ b/fs/btrfs/messages.h
@@ -36,106 +36,46 @@ void _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
btrfs_no_printk(fs_info, fmt, ##args)
#endif
-#define btrfs_emerg(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_EMERG fmt, ##args)
-#define btrfs_alert(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_ALERT fmt, ##args)
-#define btrfs_crit(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_CRIT fmt, ##args)
-#define btrfs_err(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_ERR fmt, ##args)
-#define btrfs_warn(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_WARNING fmt, ##args)
-#define btrfs_notice(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_NOTICE fmt, ##args)
-#define btrfs_info(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_INFO fmt, ##args)
-
/*
- * Wrappers that use printk_in_rcu
+ * Print a message with filesystem info, enclosed in RCU protection.
*/
-#define btrfs_emerg_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_EMERG fmt, ##args)
-#define btrfs_alert_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_ALERT fmt, ##args)
-#define btrfs_crit_in_rcu(fs_info, fmt, args...) \
+#define btrfs_crit(fs_info, fmt, args...) \
btrfs_printk_in_rcu(fs_info, KERN_CRIT fmt, ##args)
-#define btrfs_err_in_rcu(fs_info, fmt, args...) \
+#define btrfs_err(fs_info, fmt, args...) \
btrfs_printk_in_rcu(fs_info, KERN_ERR fmt, ##args)
-#define btrfs_warn_in_rcu(fs_info, fmt, args...) \
+#define btrfs_warn(fs_info, fmt, args...) \
btrfs_printk_in_rcu(fs_info, KERN_WARNING fmt, ##args)
-#define btrfs_notice_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_NOTICE fmt, ##args)
-#define btrfs_info_in_rcu(fs_info, fmt, args...) \
+#define btrfs_info(fs_info, fmt, args...) \
btrfs_printk_in_rcu(fs_info, KERN_INFO fmt, ##args)
/*
- * Wrappers that use a ratelimited printk_in_rcu
- */
-#define btrfs_emerg_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_EMERG fmt, ##args)
-#define btrfs_alert_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_ALERT fmt, ##args)
-#define btrfs_crit_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args)
-#define btrfs_err_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args)
-#define btrfs_warn_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args)
-#define btrfs_notice_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_NOTICE fmt, ##args)
-#define btrfs_info_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args)
-
-/*
* Wrappers that use a ratelimited printk
*/
-#define btrfs_emerg_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_EMERG fmt, ##args)
-#define btrfs_alert_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_ALERT fmt, ##args)
#define btrfs_crit_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_CRIT fmt, ##args)
+ btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args)
#define btrfs_err_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_ERR fmt, ##args)
+ btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args)
#define btrfs_warn_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_WARNING fmt, ##args)
-#define btrfs_notice_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args)
+ btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args)
#define btrfs_info_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args)
+ btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args)
#if defined(CONFIG_DYNAMIC_DEBUG)
#define btrfs_debug(fs_info, fmt, args...) \
- _dynamic_func_call_no_desc(fmt, btrfs_printk, \
- fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
_dynamic_func_call_no_desc(fmt, btrfs_printk_in_rcu, \
fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
- _dynamic_func_call_no_desc(fmt, btrfs_printk_rl_in_rcu, \
- fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_rl(fs_info, fmt, args...) \
- _dynamic_func_call_no_desc(fmt, btrfs_printk_ratelimited, \
+ _dynamic_func_call_no_desc(fmt, btrfs_printk_rl_in_rcu, \
fs_info, KERN_DEBUG fmt, ##args)
#elif defined(DEBUG)
#define btrfs_debug(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args)
+ btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
#else
-#define btrfs_debug(fs_info, fmt, args...) \
- btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
- btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl(fs_info, fmt, args...) \
- btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+/* When printk() is no_printk(), expand to no-op. */
+#define btrfs_debug(fs_info, fmt, args...) do { (void)(fs_info); } while(0)
+#define btrfs_debug_rl(fs_info, fmt, args...) do { (void)(fs_info); } while(0)
#endif
#define btrfs_printk_in_rcu(fs_info, fmt, args...) \
@@ -145,40 +85,98 @@ do { \
rcu_read_unlock(); \
} while (0)
-#define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \
-do { \
- rcu_read_lock(); \
- btrfs_no_printk(fs_info, fmt, ##args); \
- rcu_read_unlock(); \
-} while (0)
-
-#define btrfs_printk_ratelimited(fs_info, fmt, args...) \
+#define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
+ \
+ rcu_read_lock(); \
if (__ratelimit(&_rs)) \
btrfs_printk(fs_info, fmt, ##args); \
-} while (0)
-
-#define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \
-do { \
- rcu_read_lock(); \
- btrfs_printk_ratelimited(fs_info, fmt, ##args); \
rcu_read_unlock(); \
} while (0)
#ifdef CONFIG_BTRFS_ASSERT
-#define btrfs_assertfail(expr, file, line) ({ \
- pr_err("assertion failed: %s, in %s:%d\n", (expr), (file), (line)); \
- BUG(); \
-})
+__printf(1, 2)
+static inline void verify_assert_printk_format(const char *fmt, ...) {
+ /* Stub to verify the assertion format string. */
+}
+
+/* Take the first token if any. */
+#define __FIRST_ARG(_, ...) _
+/*
+ * Skip the first token and return the rest, if it's empty the comma is dropped.
+ * As ##__VA_ARGS__ cannot be at the beginning of the macro the __VA_OPT__ is needed
+ * and supported since GCC 8 and Clang 12.
+ */
+#define __REST_ARGS(_, ... ) __VA_OPT__(,) __VA_ARGS__
+
+#if defined(CONFIG_CC_IS_CLANG) || GCC_VERSION >= 80000
+/*
+ * Assertion with optional printk() format.
+ *
+ * Accepted syntax:
+ * ASSERT(condition);
+ * ASSERT(condition, "string");
+ * ASSERT(condition, "variable=%d", variable);
+ *
+ * How it works:
+ * - if there's no format string, ""[0] evaluates at compile time to 0 and the
+ * true branch is executed
+ * - any non-empty format string with the "" prefix evaluates to != 0 at
+ * compile time and the false branch is executed
+ * - stringified condition is printed as %s so we don't accidentally mix format
+ * strings (the % operator)
+ * - there can be only one printk() call, so the format strings and arguments are
+ * spliced together:
+ * DEFAULT_FMT [USER_FMT], DEFAULT_ARGS [, USER_ARGS]
+ * - comma between DEFAULT_ARGS and USER_ARGS is handled by preprocessor
+ * (requires __VA_OPT__ support)
+ * - otherwise we could use __VA_OPT(,) __VA_ARGS__ for the 2nd+ argument of args,
+ */
+#define ASSERT(cond, args...) \
+do { \
+ verify_assert_printk_format("check the format string" args); \
+ if (!likely(cond)) { \
+ if (("" __FIRST_ARG(args) [0]) == 0) { \
+ pr_err("assertion failed: %s :: %ld, in %s:%d\n", \
+ #cond, (long)(cond), __FILE__, __LINE__); \
+ } else { \
+ pr_err("assertion failed: %s :: %ld, in %s:%d (" __FIRST_ARG(args) ")\n", \
+ #cond, (long)(cond), __FILE__, __LINE__ __REST_ARGS(args)); \
+ } \
+ BUG(); \
+ } \
+} while(0)
+
+#else
+
+/* For GCC < 8.x only the simple output. */
+
+#define ASSERT(cond, args...) \
+do { \
+ verify_assert_printk_format("check the format string" args); \
+ if (!likely(cond)) { \
+ pr_err("assertion failed: %s :: %ld, in %s:%d\n", \
+ #cond, (long)(cond), __FILE__, __LINE__); \
+ BUG(); \
+ } \
+} while(0)
+
+#endif
+
+#else
+/* Compile check the @cond expression but don't generate any code. */
+#define ASSERT(cond, args...) BUILD_BUG_ON_INVALID(cond)
+#endif
-#define ASSERT(expr) \
- (likely(expr) ? (void)0 : btrfs_assertfail(#expr, __FILE__, __LINE__))
+#ifdef CONFIG_BTRFS_DEBUG
+/* Verbose warning only under debug build. */
+#define DEBUG_WARN(args...) WARN(1, KERN_ERR args)
#else
-#define ASSERT(expr) (void)(expr)
+#define DEBUG_WARN(...) do {} while(0)
#endif
__printf(5, 6)
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
index 0d599fd847c9..12c5a9d6564f 100644
--- a/fs/btrfs/misc.h
+++ b/fs/btrfs/misc.h
@@ -7,8 +7,18 @@
#include <linux/bitmap.h>
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
#include <linux/math64.h>
#include <linux/rbtree.h>
+#include <linux/bio.h>
+
+/*
+ * Convenience macros to define a pointer with the __free(kfree) and
+ * __free(kvfree) cleanup attributes and initialized to NULL.
+ */
+#define AUTO_KFREE(name) *name __free(kfree) = NULL
+#define AUTO_KVFREE(name) *name __free(kvfree) = NULL
/*
* Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
@@ -18,6 +28,54 @@
name = (1U << __ ## name ## _BIT), \
__ ## name ## _SEQ = __ ## name ## _BIT
+static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
+{
+ struct bio_vec bv = bio_iter_iovec(bio, *iter);
+
+ return bvec_phys(&bv);
+}
+
+/*
+ * Iterate bio using btrfs block size.
+ *
+ * This will handle large folio and highmem.
+ *
+ * @paddr: Physical memory address of each iteration
+ * @bio: The bio to iterate
+ * @iter: The bvec_iter (pointer) to use.
+ * @blocksize: The blocksize to iterate.
+ *
+ * This requires all folios in the bio to cover at least one block.
+ */
+#define btrfs_bio_for_each_block(paddr, bio, iter, blocksize) \
+ for (; (iter)->bi_size && \
+ (paddr = bio_iter_phys((bio), (iter)), 1); \
+ bio_advance_iter_single((bio), (iter), (blocksize)))
+
+/* Initialize a bvec_iter to the size of the specified bio. */
+static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ u32 bio_size = 0;
+ int i;
+
+ bio_for_each_bvec_all(bvec, bio, i)
+ bio_size += bvec->bv_len;
+
+ return (struct bvec_iter) {
+ .bi_sector = 0,
+ .bi_size = bio_size,
+ .bi_idx = 0,
+ .bi_bvec_done = 0,
+ };
+}
+
+#define btrfs_bio_for_each_block_all(paddr, bio, blocksize) \
+ for (struct bvec_iter iter = init_bvec_iter_for_bio(bio); \
+ (iter).bi_size && \
+ (paddr = bio_iter_phys((bio), &(iter)), 1); \
+ bio_advance_iter_single((bio), &(iter), (blocksize)))
+
static inline void cond_wake_up(struct wait_queue_head *wq)
{
/*
@@ -119,28 +177,23 @@ static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
return ret;
}
-static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
- struct rb_node *node)
+static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct rb_simple_node *entry;
+ struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node);
+ struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node);
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct rb_simple_node, rb_node);
+ if (new_entry->bytenr < existing_entry->bytenr)
+ return -1;
+ else if (new_entry->bytenr > existing_entry->bytenr)
+ return 1;
- if (bytenr < entry->bytenr)
- p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
- p = &(*p)->rb_right;
- else
- return parent;
- }
+ return 0;
+}
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
+static inline struct rb_node *rb_simple_insert(struct rb_root *root,
+ struct rb_simple_node *simple_node)
+{
+ return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp);
}
static inline bool bitmap_test_range_all_set(const unsigned long *addr,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 30eceaf829a7..5df02c707aee 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -153,25 +153,30 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
struct btrfs_ordered_extent *entry;
int ret;
u64 qgroup_rsv = 0;
+ const bool is_nocow = (flags &
+ ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)));
- if (flags &
- ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
- /* For nocow write, we can release the qgroup rsv right now */
+ /*
+ * For a NOCOW write we can free the qgroup reserve right now. For a COW
+ * one we transfer the reserved space from the inode's iotree into the
+ * ordered extent by calling btrfs_qgroup_release_data() and tracking
+ * the qgroup reserved amount in the ordered extent, so that later after
+ * completing the ordered extent, when running the data delayed ref it
+ * creates, we free the reserved data with btrfs_qgroup_free_refroot().
+ */
+ if (is_nocow)
ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
- if (ret < 0)
- return ERR_PTR(ret);
- } else {
- /*
- * The ordered extent has reserved qgroup space, release now
- * and pass the reserved number for qgroup_record to free.
- */
+ else
ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
- if (ret < 0)
- return ERR_PTR(ret);
- }
+
+ if (ret < 0)
+ return ERR_PTR(ret);
+
entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
- if (!entry)
- return ERR_PTR(-ENOMEM);
+ if (!entry) {
+ entry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
entry->file_offset = file_offset;
entry->num_bytes = num_bytes;
@@ -180,7 +185,12 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
entry->disk_num_bytes = disk_num_bytes;
entry->offset = offset;
entry->bytes_left = num_bytes;
- entry->inode = BTRFS_I(igrab(&inode->vfs_inode));
+ if (WARN_ON_ONCE(!igrab(&inode->vfs_inode))) {
+ kmem_cache_free(btrfs_ordered_extent_cache, entry);
+ entry = ERR_PTR(-ESTALE);
+ goto out;
+ }
+ entry->inode = inode;
entry->compress_type = compress_type;
entry->truncated_len = (u64)-1;
entry->qgroup_rsv = qgroup_rsv;
@@ -203,6 +213,12 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
btrfs_mod_outstanding_extents(inode, 1);
spin_unlock(&inode->lock);
+out:
+ if (IS_ERR(entry) && !is_nocow)
+ btrfs_qgroup_free_refroot(inode->root->fs_info,
+ btrfs_root_id(inode->root),
+ qgroup_rsv, BTRFS_QGROUP_RSV_DATA);
+
return entry;
}
@@ -221,14 +237,14 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
/* One ref for the tree. */
refcount_inc(&entry->refs);
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock(&inode->ordered_tree_lock);
node = tree_insert(&inode->ordered_tree, entry->file_offset,
&entry->rb_node);
if (unlikely(node))
btrfs_panic(fs_info, -EEXIST,
"inconsistency in ordered tree at offset %llu",
entry->file_offset);
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock(&inode->ordered_tree_lock);
spin_lock(&root->ordered_extent_lock);
list_add_tail(&entry->root_extent_list,
@@ -253,7 +269,7 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
* @disk_bytenr: Offset of extent on disk.
* @disk_num_bytes: Size of extent on disk.
* @offset: Offset into unencoded data where file data starts.
- * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
+ * @flags: Flags specifying type of extent (1U << BTRFS_ORDERED_*).
* @compress_type: Compression algorithm used for data.
*
* Most of these parameters correspond to &struct btrfs_file_extent_item. The
@@ -312,9 +328,9 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
{
struct btrfs_inode *inode = entry->inode;
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock(&inode->ordered_tree_lock);
list_add_tail(&sum->list, &entry->list);
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock(&inode->ordered_tree_lock);
}
void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
@@ -343,7 +359,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
if (folio) {
ASSERT(folio->mapping);
ASSERT(folio_pos(folio) <= file_offset);
- ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio));
+ ASSERT(file_offset + len <= folio_next_pos(folio));
/*
* Ordered flag indicates whether we still have
@@ -401,15 +417,14 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
bool uptodate)
{
struct btrfs_inode *inode = ordered->inode;
- unsigned long flags;
bool ret;
trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
- spin_lock_irqsave(&inode->ordered_tree_lock, flags);
+ spin_lock(&inode->ordered_tree_lock);
ret = can_finish_ordered_extent(ordered, folio, file_offset, len,
uptodate);
- spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ spin_unlock(&inode->ordered_tree_lock);
/*
* If this is a COW write it means we created new extent maps for the
@@ -465,18 +480,16 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
{
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- unsigned long flags;
u64 cur = file_offset;
+ const u64 end = file_offset + num_bytes;
- trace_btrfs_writepage_end_io_hook(inode, file_offset,
- file_offset + num_bytes - 1,
- uptodate);
+ trace_btrfs_writepage_end_io_hook(inode, file_offset, end - 1, uptodate);
- spin_lock_irqsave(&inode->ordered_tree_lock, flags);
- while (cur < file_offset + num_bytes) {
+ spin_lock(&inode->ordered_tree_lock);
+ while (cur < end) {
u64 entry_end;
- u64 end;
- u32 len;
+ u64 this_end;
+ u64 len;
node = ordered_tree_search(inode, cur);
/* No ordered extents at all */
@@ -519,19 +532,18 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
* |
* cur
*/
- end = min(entry->file_offset + entry->num_bytes,
- file_offset + num_bytes) - 1;
- ASSERT(end + 1 - cur < U32_MAX);
- len = end + 1 - cur;
+ this_end = min(entry_end, end);
+ len = this_end - cur;
+ ASSERT(len < U32_MAX);
if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
- spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ spin_unlock(&inode->ordered_tree_lock);
btrfs_queue_ordered_fn(entry);
- spin_lock_irqsave(&inode->ordered_tree_lock, flags);
+ spin_lock(&inode->ordered_tree_lock);
}
cur += len;
}
- spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ spin_unlock(&inode->ordered_tree_lock);
}
/*
@@ -557,10 +569,9 @@ bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
{
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- unsigned long flags;
bool finished = false;
- spin_lock_irqsave(&inode->ordered_tree_lock, flags);
+ spin_lock(&inode->ordered_tree_lock);
if (cached && *cached) {
entry = *cached;
goto have_entry;
@@ -597,7 +608,7 @@ out:
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
}
- spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ spin_unlock(&inode->ordered_tree_lock);
return finished;
}
@@ -607,23 +618,18 @@ out:
*/
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
{
- struct list_head *cur;
- struct btrfs_ordered_sum *sum;
-
trace_btrfs_ordered_extent_put(entry->inode, entry);
if (refcount_dec_and_test(&entry->refs)) {
+ struct btrfs_ordered_sum *sum;
+ struct btrfs_ordered_sum *tmp;
+
ASSERT(list_empty(&entry->root_extent_list));
ASSERT(list_empty(&entry->log_list));
ASSERT(RB_EMPTY_NODE(&entry->rb_node));
- if (entry->inode)
- btrfs_add_delayed_iput(entry->inode);
- while (!list_empty(&entry->list)) {
- cur = entry->list.next;
- sum = list_entry(cur, struct btrfs_ordered_sum, list);
- list_del(&sum->list);
+ btrfs_add_delayed_iput(entry->inode);
+ list_for_each_entry_safe(sum, tmp, &entry->list, list)
kvfree(sum);
- }
kmem_cache_free(btrfs_ordered_extent_cache, entry);
}
}
@@ -667,7 +673,7 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
fs_info->delalloc_batch);
- spin_lock_irq(&btrfs_inode->ordered_tree_lock);
+ spin_lock(&btrfs_inode->ordered_tree_lock);
node = &entry->rb_node;
rb_erase(node, &btrfs_inode->ordered_tree);
RB_CLEAR_NODE(node);
@@ -675,7 +681,7 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
btrfs_inode->ordered_tree_last = NULL;
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
- spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
+ spin_unlock(&btrfs_inode->ordered_tree_lock);
/*
* The current running transaction is waiting on us, we need to let it
@@ -842,10 +848,12 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
/*
* Start IO and wait for a given ordered extent to finish.
*
- * Wait on page writeback for all the pages in the extent and the IO completion
- * code to insert metadata into the btree corresponding to the extent.
+ * Wait on page writeback for all the pages in the extent but not in
+ * [@nowriteback_start, @nowriteback_start + @nowriteback_len) and the
+ * IO completion code to insert metadata into the btree corresponding to the extent.
*/
-void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
+void btrfs_start_ordered_extent_nowriteback(struct btrfs_ordered_extent *entry,
+ u64 nowriteback_start, u32 nowriteback_len)
{
u64 start = entry->file_offset;
u64 end = start + entry->num_bytes - 1;
@@ -865,8 +873,19 @@ void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
* start IO on any dirty ones so the wait doesn't stall waiting
* for the flusher thread to find them
*/
- if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
- filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
+ if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) {
+ if (!nowriteback_len) {
+ filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
+ } else {
+ if (start < nowriteback_start)
+ filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start,
+ nowriteback_start - 1);
+ if (nowriteback_start + nowriteback_len < end)
+ filemap_fdatawrite_range(inode->vfs_inode.i_mapping,
+ nowriteback_start + nowriteback_len,
+ end);
+ }
+ }
if (!freespace_inode)
btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
@@ -947,9 +966,8 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
{
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- unsigned long flags;
- spin_lock_irqsave(&inode->ordered_tree_lock, flags);
+ spin_lock(&inode->ordered_tree_lock);
node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@@ -962,7 +980,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
trace_btrfs_ordered_extent_lookup(inode, entry);
}
out:
- spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ spin_unlock(&inode->ordered_tree_lock);
return entry;
}
@@ -975,7 +993,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock(&inode->ordered_tree_lock);
node = ordered_tree_search(inode, file_offset);
if (!node) {
node = ordered_tree_search(inode, file_offset + len);
@@ -1002,7 +1020,7 @@ out:
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_range(inode, entry);
}
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock(&inode->ordered_tree_lock);
return entry;
}
@@ -1017,7 +1035,7 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
btrfs_assert_inode_locked(inode);
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock(&inode->ordered_tree_lock);
for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
struct btrfs_ordered_extent *ordered;
@@ -1031,7 +1049,7 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
refcount_inc(&ordered->refs);
trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
}
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock(&inode->ordered_tree_lock);
}
/*
@@ -1044,7 +1062,7 @@ btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock(&inode->ordered_tree_lock);
node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@@ -1053,7 +1071,7 @@ btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_first(inode, entry);
out:
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock(&inode->ordered_tree_lock);
return entry;
}
@@ -1075,7 +1093,7 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
struct rb_node *next;
struct btrfs_ordered_extent *entry = NULL;
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock(&inode->ordered_tree_lock);
node = inode->ordered_tree.rb_node;
/*
* Here we don't want to use tree_search() which will use tree->last
@@ -1130,7 +1148,7 @@ out:
trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
}
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock(&inode->ordered_tree_lock);
return entry;
}
@@ -1160,7 +1178,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
cachedp = cached_state;
while (1) {
- lock_extent(&inode->io_tree, start, end, cachedp);
+ btrfs_lock_extent(&inode->io_tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1);
if (!ordered) {
@@ -1173,7 +1191,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
refcount_dec(&cache->refs);
break;
}
- unlock_extent(&inode->io_tree, start, end, cachedp);
+ btrfs_unlock_extent(&inode->io_tree, start, end, cachedp);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
@@ -1191,7 +1209,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
{
struct btrfs_ordered_extent *ordered;
- if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
+ if (!btrfs_try_lock_extent(&inode->io_tree, start, end, cached_state))
return false;
ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
@@ -1199,7 +1217,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
return true;
btrfs_put_ordered_extent(ordered);
- unlock_extent(&inode->io_tree, start, end, cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
return false;
}
@@ -1229,6 +1247,18 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
*/
if (WARN_ON_ONCE(len >= ordered->num_bytes))
return ERR_PTR(-EINVAL);
+ /*
+ * If our ordered extent had an error there's no point in continuing.
+ * The error may have come from a transaction abort done either by this
+ * task or some other concurrent task, and the transaction abort path
+ * iterates over all existing ordered extents and sets the flag
+ * BTRFS_ORDERED_IOERR on them.
+ */
+ if (unlikely(flags & (1U << BTRFS_ORDERED_IOERR))) {
+ const int fs_error = BTRFS_FS_ERROR(fs_info);
+
+ return fs_error ? ERR_PTR(fs_error) : ERR_PTR(-EIO);
+ }
/* We cannot split partially completed ordered extents. */
if (ordered->bytes_left) {
ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
@@ -1250,9 +1280,7 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
/*
* Take the root's ordered_extent_lock to avoid a race with
* btrfs_wait_ordered_extents() when updating the disk_bytenr and
- * disk_num_bytes fields of the ordered extent below. And we disable
- * IRQs because the inode's ordered_tree_lock is used in IRQ context
- * elsewhere.
+ * disk_num_bytes fields of the ordered extent below.
*
* There's no concern about a previous caller of
* btrfs_wait_ordered_extents() getting the trimmed ordered extent
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 4e152736d06c..1e6b0b182b29 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -17,6 +17,7 @@
struct inode;
struct page;
struct extent_state;
+struct btrfs_block_group;
struct btrfs_inode;
struct btrfs_root;
struct btrfs_fs_info;
@@ -191,7 +192,13 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum);
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
u64 file_offset);
-void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry);
+void btrfs_start_ordered_extent_nowriteback(struct btrfs_ordered_extent *entry,
+ u64 nowriteback_start, u32 nowriteback_len);
+static inline void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
+{
+ return btrfs_start_ordered_extent_nowriteback(entry, 0, 0);
+}
+
int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index fc821aa446f0..f189bf09ce6a 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -6,12 +6,19 @@
#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
+#include "file-item.h"
#include "print-tree.h"
#include "accessors.h"
#include "tree-checker.h"
#include "volumes.h"
#include "raid-stripe-tree.h"
+/*
+ * Large enough buffer size for the stringification of any key type yet short
+ * enough to use the stack and avoid allocations.
+ */
+#define KEY_TYPE_BUF_SIZE 32
+
struct root_name_map {
u64 id;
const char *name;
@@ -124,7 +131,7 @@ static void print_extent_item(const struct extent_buffer *eb, int slot, int type
struct btrfs_tree_block_info *info;
info = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_tree_block_key(eb, info, &key);
- pr_info("\t\ttree block key (%llu %u %llu) level %d\n",
+ pr_info("\t\ttree block key " BTRFS_KEY_FMT " level %d\n",
btrfs_disk_key_objectid(&key), key.type,
btrfs_disk_key_offset(&key),
btrfs_tree_block_level(eb, info));
@@ -190,7 +197,7 @@ static void print_uuid_item(const struct extent_buffer *l, unsigned long offset,
u32 item_size)
{
if (!IS_ALIGNED(item_size, sizeof(u64))) {
- pr_warn("BTRFS: uuid item with illegal size %lu!\n",
+ btrfs_warn(l->fs_info, "uuid item with illegal size %lu",
(unsigned long)item_size);
return;
}
@@ -223,25 +230,212 @@ static void print_eb_refs_lock(const struct extent_buffer *eb)
{
#ifdef CONFIG_BTRFS_DEBUG
btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u",
- atomic_read(&eb->refs), eb->lock_owner, current->pid);
+ refcount_read(&eb->refs), eb->lock_owner, current->pid);
#endif
}
+static void print_timespec(const struct extent_buffer *eb,
+ struct btrfs_timespec *timespec,
+ const char *prefix, const char *suffix)
+{
+ const u64 secs = btrfs_timespec_sec(eb, timespec);
+ const u32 nsecs = btrfs_timespec_nsec(eb, timespec);
+
+ pr_info("%s%llu.%u%s", prefix, secs, nsecs, suffix);
+}
+
+static void print_inode_item(const struct extent_buffer *eb, int i)
+{
+ struct btrfs_inode_item *ii = btrfs_item_ptr(eb, i, struct btrfs_inode_item);
+
+ pr_info("\t\tinode generation %llu transid %llu size %llu nbytes %llu\n",
+ btrfs_inode_generation(eb, ii), btrfs_inode_transid(eb, ii),
+ btrfs_inode_size(eb, ii), btrfs_inode_nbytes(eb, ii));
+ pr_info("\t\tblock group %llu mode %o links %u uid %u gid %u\n",
+ btrfs_inode_block_group(eb, ii), btrfs_inode_mode(eb, ii),
+ btrfs_inode_nlink(eb, ii), btrfs_inode_uid(eb, ii),
+ btrfs_inode_gid(eb, ii));
+ pr_info("\t\trdev %llu sequence %llu flags 0x%llx\n",
+ btrfs_inode_rdev(eb, ii), btrfs_inode_sequence(eb, ii),
+ btrfs_inode_flags(eb, ii));
+ print_timespec(eb, &ii->atime, "\t\tatime ", "\n");
+ print_timespec(eb, &ii->ctime, "\t\tctime ", "\n");
+ print_timespec(eb, &ii->mtime, "\t\tmtime ", "\n");
+ print_timespec(eb, &ii->otime, "\t\totime ", "\n");
+}
+
+static void print_dir_item(const struct extent_buffer *eb, int i)
+{
+ const u32 size = btrfs_item_size(eb, i);
+ struct btrfs_dir_item *di = btrfs_item_ptr(eb, i, struct btrfs_dir_item);
+ u32 cur = 0;
+
+ while (cur < size) {
+ const u32 name_len = btrfs_dir_name_len(eb, di);
+ const u32 data_len = btrfs_dir_data_len(eb, di);
+ const u32 len = sizeof(*di) + name_len + data_len;
+ struct btrfs_key location;
+
+ btrfs_dir_item_key_to_cpu(eb, di, &location);
+ pr_info("\t\tlocation key " BTRFS_KEY_FMT " type %d\n",
+ BTRFS_KEY_FMT_VALUE(&location), btrfs_dir_ftype(eb, di));
+ pr_info("\t\ttransid %llu data_len %u name_len %u\n",
+ btrfs_dir_transid(eb, di), data_len, name_len);
+ di = (struct btrfs_dir_item *)((char *)di + len);
+ cur += len;
+ }
+}
+
+static void print_inode_ref_item(const struct extent_buffer *eb, int i)
+{
+ const u32 size = btrfs_item_size(eb, i);
+ struct btrfs_inode_ref *ref = btrfs_item_ptr(eb, i, struct btrfs_inode_ref);
+ u32 cur = 0;
+
+ while (cur < size) {
+ const u64 index = btrfs_inode_ref_index(eb, ref);
+ const u32 name_len = btrfs_inode_ref_name_len(eb, ref);
+ const u32 len = sizeof(*ref) + name_len;
+
+ pr_info("\t\tindex %llu name_len %u\n", index, name_len);
+ ref = (struct btrfs_inode_ref *)((char *)ref + len);
+ cur += len;
+ }
+}
+
+static void print_inode_extref_item(const struct extent_buffer *eb, int i)
+{
+ const u32 size = btrfs_item_size(eb, i);
+ struct btrfs_inode_extref *extref;
+ u32 cur = 0;
+
+ extref = btrfs_item_ptr(eb, i, struct btrfs_inode_extref);
+ while (cur < size) {
+ const u64 index = btrfs_inode_extref_index(eb, extref);
+ const u32 name_len = btrfs_inode_extref_name_len(eb, extref);
+ const u64 parent = btrfs_inode_extref_parent(eb, extref);
+ const u32 len = sizeof(*extref) + name_len;
+
+ pr_info("\t\tindex %llu parent %llu name_len %u\n",
+ index, parent, name_len);
+ extref = (struct btrfs_inode_extref *)((char *)extref + len);
+ cur += len;
+ }
+}
+
+static void print_dir_log_index_item(const struct extent_buffer *eb, int i)
+{
+ struct btrfs_dir_log_item *dlog;
+
+ dlog = btrfs_item_ptr(eb, i, struct btrfs_dir_log_item);
+ pr_info("\t\tdir log end %llu\n", btrfs_dir_log_end(eb, dlog));
+}
+
+static void print_extent_csum(const struct extent_buffer *eb, int i)
+{
+ const struct btrfs_fs_info *fs_info = eb->fs_info;
+ const u32 size = btrfs_item_size(eb, i);
+ const u32 csum_bytes = (size / fs_info->csum_size) * fs_info->sectorsize;
+ struct btrfs_key key;
+
+ btrfs_item_key_to_cpu(eb, &key, i);
+ pr_info("\t\trange start %llu end %llu length %u\n",
+ key.offset, key.offset + csum_bytes, csum_bytes);
+}
+
+static void print_file_extent_item(const struct extent_buffer *eb, int i)
+{
+ struct btrfs_file_extent_item *fi;
+
+ fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
+ pr_info("\t\tgeneration %llu type %hhu\n",
+ btrfs_file_extent_generation(eb, fi),
+ btrfs_file_extent_type(eb, fi));
+
+ if (btrfs_file_extent_type(eb, fi) == BTRFS_FILE_EXTENT_INLINE) {
+ pr_info("\t\tinline extent data size %u ram_bytes %llu compression %hhu\n",
+ btrfs_file_extent_inline_item_len(eb, i),
+ btrfs_file_extent_ram_bytes(eb, fi),
+ btrfs_file_extent_compression(eb, fi));
+ return;
+ }
+
+ pr_info("\t\textent data disk bytenr %llu nr %llu\n",
+ btrfs_file_extent_disk_bytenr(eb, fi),
+ btrfs_file_extent_disk_num_bytes(eb, fi));
+ pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
+ btrfs_file_extent_offset(eb, fi),
+ btrfs_file_extent_num_bytes(eb, fi),
+ btrfs_file_extent_ram_bytes(eb, fi));
+ pr_info("\t\textent compression %hhu\n",
+ btrfs_file_extent_compression(eb, fi));
+}
+
+static void key_type_string(const struct btrfs_key *key, char *buf, int buf_size)
+{
+ static const char *key_to_str[256] = {
+ [BTRFS_INODE_ITEM_KEY] = "INODE_ITEM",
+ [BTRFS_INODE_REF_KEY] = "INODE_REF",
+ [BTRFS_INODE_EXTREF_KEY] = "INODE_EXTREF",
+ [BTRFS_DIR_ITEM_KEY] = "DIR_ITEM",
+ [BTRFS_DIR_INDEX_KEY] = "DIR_INDEX",
+ [BTRFS_DIR_LOG_ITEM_KEY] = "DIR_LOG_ITEM",
+ [BTRFS_DIR_LOG_INDEX_KEY] = "DIR_LOG_INDEX",
+ [BTRFS_XATTR_ITEM_KEY] = "XATTR_ITEM",
+ [BTRFS_VERITY_DESC_ITEM_KEY] = "VERITY_DESC_ITEM",
+ [BTRFS_VERITY_MERKLE_ITEM_KEY] = "VERITY_MERKLE_ITEM",
+ [BTRFS_ORPHAN_ITEM_KEY] = "ORPHAN_ITEM",
+ [BTRFS_ROOT_ITEM_KEY] = "ROOT_ITEM",
+ [BTRFS_ROOT_REF_KEY] = "ROOT_REF",
+ [BTRFS_ROOT_BACKREF_KEY] = "ROOT_BACKREF",
+ [BTRFS_EXTENT_ITEM_KEY] = "EXTENT_ITEM",
+ [BTRFS_METADATA_ITEM_KEY] = "METADATA_ITEM",
+ [BTRFS_TREE_BLOCK_REF_KEY] = "TREE_BLOCK_REF",
+ [BTRFS_SHARED_BLOCK_REF_KEY] = "SHARED_BLOCK_REF",
+ [BTRFS_EXTENT_DATA_REF_KEY] = "EXTENT_DATA_REF",
+ [BTRFS_SHARED_DATA_REF_KEY] = "SHARED_DATA_REF",
+ [BTRFS_EXTENT_OWNER_REF_KEY] = "EXTENT_OWNER_REF",
+ [BTRFS_EXTENT_CSUM_KEY] = "EXTENT_CSUM",
+ [BTRFS_EXTENT_DATA_KEY] = "EXTENT_DATA",
+ [BTRFS_BLOCK_GROUP_ITEM_KEY] = "BLOCK_GROUP_ITEM",
+ [BTRFS_FREE_SPACE_INFO_KEY] = "FREE_SPACE_INFO",
+ [BTRFS_FREE_SPACE_EXTENT_KEY] = "FREE_SPACE_EXTENT",
+ [BTRFS_FREE_SPACE_BITMAP_KEY] = "FREE_SPACE_BITMAP",
+ [BTRFS_CHUNK_ITEM_KEY] = "CHUNK_ITEM",
+ [BTRFS_DEV_ITEM_KEY] = "DEV_ITEM",
+ [BTRFS_DEV_EXTENT_KEY] = "DEV_EXTENT",
+ [BTRFS_TEMPORARY_ITEM_KEY] = "TEMPORARY_ITEM",
+ [BTRFS_DEV_REPLACE_KEY] = "DEV_REPLACE",
+ [BTRFS_STRING_ITEM_KEY] = "STRING_ITEM",
+ [BTRFS_QGROUP_STATUS_KEY] = "QGROUP_STATUS",
+ [BTRFS_QGROUP_RELATION_KEY] = "QGROUP_RELATION",
+ [BTRFS_QGROUP_INFO_KEY] = "QGROUP_INFO",
+ [BTRFS_QGROUP_LIMIT_KEY] = "QGROUP_LIMIT",
+ [BTRFS_PERSISTENT_ITEM_KEY] = "PERSISTENT_ITEM",
+ [BTRFS_UUID_KEY_SUBVOL] = "UUID_KEY_SUBVOL",
+ [BTRFS_UUID_KEY_RECEIVED_SUBVOL] = "UUID_KEY_RECEIVED_SUBVOL",
+ [BTRFS_RAID_STRIPE_KEY] = "RAID_STRIPE",
+ };
+
+ if (key->type == 0 && key->objectid == BTRFS_FREE_SPACE_OBJECTID)
+ scnprintf(buf, buf_size, "UNTYPED");
+ else if (key_to_str[key->type])
+ scnprintf(buf, buf_size, "%s", key_to_str[key->type]);
+ else
+ scnprintf(buf, buf_size, "UNKNOWN.%d", key->type);
+}
+
void btrfs_print_leaf(const struct extent_buffer *l)
{
struct btrfs_fs_info *fs_info;
int i;
u32 type, nr;
struct btrfs_root_item *ri;
- struct btrfs_dir_item *di;
- struct btrfs_inode_item *ii;
struct btrfs_block_group_item *bi;
- struct btrfs_file_extent_item *fi;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_dev_extent *dev_extent;
struct btrfs_key key;
- struct btrfs_key found_key;
if (!l)
return;
@@ -255,25 +449,35 @@ void btrfs_print_leaf(const struct extent_buffer *l)
btrfs_leaf_free_space(l), btrfs_header_owner(l));
print_eb_refs_lock(l);
for (i = 0 ; i < nr ; i++) {
+ char key_buf[KEY_TYPE_BUF_SIZE];
+
btrfs_item_key_to_cpu(l, &key, i);
type = key.type;
- pr_info("\titem %d key (%llu %u %llu) itemoff %d itemsize %d\n",
- i, key.objectid, type, key.offset,
+ key_type_string(&key, key_buf, KEY_TYPE_BUF_SIZE);
+
+ pr_info("\titem %d key (%llu %s %llu) itemoff %d itemsize %d\n",
+ i, key.objectid, key_buf, key.offset,
btrfs_item_offset(l, i), btrfs_item_size(l, i));
switch (type) {
case BTRFS_INODE_ITEM_KEY:
- ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
- pr_info("\t\tinode generation %llu size %llu mode %o\n",
- btrfs_inode_generation(l, ii),
- btrfs_inode_size(l, ii),
- btrfs_inode_mode(l, ii));
+ print_inode_item(l, i);
+ break;
+ case BTRFS_INODE_REF_KEY:
+ print_inode_ref_item(l, i);
+ break;
+ case BTRFS_INODE_EXTREF_KEY:
+ print_inode_extref_item(l, i);
break;
case BTRFS_DIR_ITEM_KEY:
- di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
- btrfs_dir_item_key_to_cpu(l, di, &found_key);
- pr_info("\t\tdir oid %llu flags %u\n",
- found_key.objectid,
- btrfs_dir_flags(l, di));
+ case BTRFS_DIR_INDEX_KEY:
+ case BTRFS_XATTR_ITEM_KEY:
+ print_dir_item(l, i);
+ break;
+ case BTRFS_DIR_LOG_INDEX_KEY:
+ print_dir_log_index_item(l, i);
+ break;
+ case BTRFS_EXTENT_CSUM_KEY:
+ print_extent_csum(l, i);
break;
case BTRFS_ROOT_ITEM_KEY:
ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
@@ -303,24 +507,7 @@ void btrfs_print_leaf(const struct extent_buffer *l)
btrfs_shared_data_ref_count(l, sref));
break;
case BTRFS_EXTENT_DATA_KEY:
- fi = btrfs_item_ptr(l, i,
- struct btrfs_file_extent_item);
- pr_info("\t\tgeneration %llu type %hhu\n",
- btrfs_file_extent_generation(l, fi),
- btrfs_file_extent_type(l, fi));
- if (btrfs_file_extent_type(l, fi) ==
- BTRFS_FILE_EXTENT_INLINE) {
- pr_info("\t\tinline extent data size %llu\n",
- btrfs_file_extent_ram_bytes(l, fi));
- break;
- }
- pr_info("\t\textent data disk bytenr %llu nr %llu\n",
- btrfs_file_extent_disk_bytenr(l, fi),
- btrfs_file_extent_disk_num_bytes(l, fi));
- pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
- btrfs_file_extent_offset(l, fi),
- btrfs_file_extent_num_bytes(l, fi),
- btrfs_file_extent_ram_bytes(l, fi));
+ print_file_extent_item(l, i);
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
@@ -410,10 +597,9 @@ void btrfs_print_tree(const struct extent_buffer *c, bool follow)
print_eb_refs_lock(c);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
- pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n",
- i, key.objectid, key.type, key.offset,
- btrfs_node_blockptr(c, i),
- btrfs_node_ptr_generation(c, i));
+ pr_info("\tkey %d " BTRFS_KEY_FMT " block %llu gen %llu\n",
+ i, BTRFS_KEY_FMT_VALUE(&key), btrfs_node_blockptr(c, i),
+ btrfs_node_ptr_generation(c, i));
}
if (!follow)
return;
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
index 8504bf1702c7..d0e620bf5f5a 100644
--- a/fs/btrfs/print-tree.h
+++ b/fs/btrfs/print-tree.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_PRINT_TREE_H
#define BTRFS_PRINT_TREE_H
+#include <linux/types.h>
+
/* Buffer size to contain tree name and possibly additional data (offset) */
#define BTRFS_ROOT_NAME_BUF_LEN 48
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index b8fa34e16abb..adc956432d2f 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -26,8 +26,8 @@ struct prop_handler {
const char *xattr_name;
int (*validate)(const struct btrfs_inode *inode, const char *value,
size_t len);
- int (*apply)(struct inode *inode, const char *value, size_t len);
- const char *(*extract)(const struct inode *inode);
+ int (*apply)(struct btrfs_inode *inode, const char *value, size_t len);
+ const char *(*extract)(const struct btrfs_inode *inode);
bool (*ignore)(const struct btrfs_inode *inode);
int inheritable;
};
@@ -121,7 +121,7 @@ int btrfs_set_prop(struct btrfs_trans_handle *trans, struct btrfs_inode *inode,
if (ret)
return ret;
- ret = handler->apply(&inode->vfs_inode, NULL, 0);
+ ret = handler->apply(inode, NULL, 0);
ASSERT(ret == 0);
return ret;
@@ -131,7 +131,7 @@ int btrfs_set_prop(struct btrfs_trans_handle *trans, struct btrfs_inode *inode,
value_len, flags);
if (ret)
return ret;
- ret = handler->apply(&inode->vfs_inode, value, value_len);
+ ret = handler->apply(inode, value, value_len);
if (ret) {
btrfs_setxattr(trans, &inode->vfs_inode, handler->xattr_name, NULL,
0, flags);
@@ -263,7 +263,7 @@ static void inode_prop_iterator(void *ctx,
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
- ret = handler->apply(inode, value, len);
+ ret = handler->apply(BTRFS_I(inode), value, len);
if (unlikely(ret))
btrfs_warn(root->fs_info,
"error applying prop %s to ino %llu (root %llu): %d",
@@ -273,12 +273,13 @@ static void inode_prop_iterator(void *ctx,
set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags);
}
-int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path)
+int btrfs_load_inode_props(struct btrfs_inode *inode, struct btrfs_path *path)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 ino = btrfs_ino(BTRFS_I(inode));
+ struct btrfs_root *root = inode->root;
+ u64 ino = btrfs_ino(inode);
- return iterate_object_props(root, path, ino, inode_prop_iterator, inode);
+ return iterate_object_props(root, path, ino, inode_prop_iterator,
+ &inode->vfs_inode);
}
static int prop_compression_validate(const struct btrfs_inode *inode,
@@ -300,26 +301,26 @@ static int prop_compression_validate(const struct btrfs_inode *inode,
return -EINVAL;
}
-static int prop_compression_apply(struct inode *inode, const char *value,
+static int prop_compression_apply(struct btrfs_inode *inode, const char *value,
size_t len)
{
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int type;
/* Reset to defaults */
if (len == 0) {
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
- BTRFS_I(inode)->prop_compress = BTRFS_COMPRESS_NONE;
+ inode->flags &= ~BTRFS_INODE_COMPRESS;
+ inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ inode->prop_compress = BTRFS_COMPRESS_NONE;
return 0;
}
/* Set NOCOMPRESS flag */
if ((len == 2 && strncmp("no", value, 2) == 0) ||
(len == 4 && strncmp("none", value, 4) == 0)) {
- BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
- BTRFS_I(inode)->prop_compress = BTRFS_COMPRESS_NONE;
+ inode->flags |= BTRFS_INODE_NOCOMPRESS;
+ inode->flags &= ~BTRFS_INODE_COMPRESS;
+ inode->prop_compress = BTRFS_COMPRESS_NONE;
return 0;
}
@@ -336,9 +337,9 @@ static int prop_compression_apply(struct inode *inode, const char *value,
return -EINVAL;
}
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
- BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
- BTRFS_I(inode)->prop_compress = type;
+ inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ inode->flags |= BTRFS_INODE_COMPRESS;
+ inode->prop_compress = type;
return 0;
}
@@ -359,13 +360,13 @@ static bool prop_compression_ignore(const struct btrfs_inode *inode)
return false;
}
-static const char *prop_compression_extract(const struct inode *inode)
+static const char *prop_compression_extract(const struct btrfs_inode *inode)
{
- switch (BTRFS_I(inode)->prop_compress) {
+ switch (inode->prop_compress) {
case BTRFS_COMPRESS_ZLIB:
case BTRFS_COMPRESS_LZO:
case BTRFS_COMPRESS_ZSTD:
- return btrfs_compress_type2str(BTRFS_I(inode)->prop_compress);
+ return btrfs_compress_type2str(inode->prop_compress);
default:
break;
}
@@ -385,16 +386,16 @@ static struct prop_handler prop_handlers[] = {
};
int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
- struct inode *inode, const struct inode *parent)
+ struct btrfs_inode *inode,
+ const struct btrfs_inode *parent)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int i;
bool need_reserve = false;
- if (!test_bit(BTRFS_INODE_HAS_PROPS,
- &BTRFS_I(parent)->runtime_flags))
+ if (!test_bit(BTRFS_INODE_HAS_PROPS, &parent->runtime_flags))
return 0;
for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) {
@@ -405,7 +406,7 @@ int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
if (!h->inheritable)
continue;
- if (h->ignore(BTRFS_I(inode)))
+ if (h->ignore(inode))
continue;
value = h->extract(parent);
@@ -416,7 +417,7 @@ int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
* This is not strictly necessary as the property should be
* valid, but in case it isn't, don't propagate it further.
*/
- ret = h->validate(BTRFS_I(inode), value, strlen(value));
+ ret = h->validate(inode, value, strlen(value));
if (ret)
continue;
@@ -436,16 +437,15 @@ int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
return ret;
}
- ret = btrfs_setxattr(trans, inode, h->xattr_name, value,
+ ret = btrfs_setxattr(trans, &inode->vfs_inode, h->xattr_name, value,
strlen(value), 0);
if (!ret) {
ret = h->apply(inode, value, strlen(value));
if (ret)
- btrfs_setxattr(trans, inode, h->xattr_name,
+ btrfs_setxattr(trans, &inode->vfs_inode, h->xattr_name,
NULL, 0, 0);
else
- set_bit(BTRFS_INODE_HAS_PROPS,
- &BTRFS_I(inode)->runtime_flags);
+ set_bit(BTRFS_INODE_HAS_PROPS, &inode->runtime_flags);
}
if (need_reserve) {
diff --git a/fs/btrfs/props.h b/fs/btrfs/props.h
index 63546d0a9444..15d9a025c923 100644
--- a/fs/btrfs/props.h
+++ b/fs/btrfs/props.h
@@ -6,9 +6,9 @@
#ifndef BTRFS_PROPS_H
#define BTRFS_PROPS_H
+#include <linux/types.h>
#include <linux/compiler_types.h>
-struct inode;
struct btrfs_inode;
struct btrfs_path;
struct btrfs_trans_handle;
@@ -22,10 +22,10 @@ int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name,
const char *value, size_t value_len);
bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name);
-int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path);
+int btrfs_load_inode_props(struct btrfs_inode *inode, struct btrfs_path *path);
int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
- struct inode *inode,
- const struct inode *dir);
+ struct btrfs_inode *inode,
+ const struct btrfs_inode *dir);
#endif
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a6f92836c9b1..9e2b53e90dcb 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -83,7 +83,7 @@ static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup, u64 num_bytes,
enum btrfs_qgroup_rsv_type type)
{
- trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
+ trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
qgroup->rsv.values[type] += num_bytes;
}
@@ -91,7 +91,7 @@ static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup, u64 num_bytes,
enum btrfs_qgroup_rsv_type type)
{
- trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
+ trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
if (qgroup->rsv.values[type] >= num_bytes) {
qgroup->rsv.values[type] -= num_bytes;
return;
@@ -160,23 +160,34 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
int init_flags);
static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
+static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *qgroupid = key;
+ const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node);
+
+ if (qgroup->qgroupid < *qgroupid)
+ return -1;
+ else if (qgroup->qgroupid > *qgroupid)
+ return 1;
+
+ return 0;
+}
+
/* must be called with qgroup_ioctl_lock held */
static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
u64 qgroupid)
{
- struct rb_node *n = fs_info->qgroup_tree.rb_node;
- struct btrfs_qgroup *qgroup;
+ struct rb_node *node;
- while (n) {
- qgroup = rb_entry(n, struct btrfs_qgroup, node);
- if (qgroup->qgroupid < qgroupid)
- n = n->rb_left;
- else if (qgroup->qgroupid > qgroupid)
- n = n->rb_right;
- else
- return qgroup;
- }
- return NULL;
+ node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp);
+ return rb_entry_safe(node, struct btrfs_qgroup, node);
+}
+
+static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node);
+
+ return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing);
}
/*
@@ -191,39 +202,25 @@ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *prealloc,
u64 qgroupid)
{
- struct rb_node **p = &fs_info->qgroup_tree.rb_node;
- struct rb_node *parent = NULL;
- struct btrfs_qgroup *qgroup;
+ struct rb_node *node;
/* Caller must have pre-allocated @prealloc. */
ASSERT(prealloc);
- while (*p) {
- parent = *p;
- qgroup = rb_entry(parent, struct btrfs_qgroup, node);
-
- if (qgroup->qgroupid < qgroupid) {
- p = &(*p)->rb_left;
- } else if (qgroup->qgroupid > qgroupid) {
- p = &(*p)->rb_right;
- } else {
- kfree(prealloc);
- return qgroup;
- }
+ prealloc->qgroupid = qgroupid;
+ node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp);
+ if (node) {
+ kfree(prealloc);
+ return rb_entry(node, struct btrfs_qgroup, node);
}
- qgroup = prealloc;
- qgroup->qgroupid = qgroupid;
- INIT_LIST_HEAD(&qgroup->groups);
- INIT_LIST_HEAD(&qgroup->members);
- INIT_LIST_HEAD(&qgroup->dirty);
- INIT_LIST_HEAD(&qgroup->iterator);
- INIT_LIST_HEAD(&qgroup->nested_iterator);
+ INIT_LIST_HEAD(&prealloc->groups);
+ INIT_LIST_HEAD(&prealloc->members);
+ INIT_LIST_HEAD(&prealloc->dirty);
+ INIT_LIST_HEAD(&prealloc->iterator);
+ INIT_LIST_HEAD(&prealloc->nested_iterator);
- rb_link_node(&qgroup->node, parent, p);
- rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
-
- return qgroup;
+ return prealloc;
}
static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
@@ -349,13 +346,27 @@ int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid
}
#endif
-static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
+__printf(2, 3)
+static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
+ const u64 old_flags = fs_info->qgroup_flags;
+
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
return;
fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
+ if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf);
+ va_end(args);
+ }
}
static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
@@ -386,12 +397,6 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
if (!fs_info->quota_root)
return 0;
- fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
- if (!fs_info->qgroup_ulist) {
- ret = -ENOMEM;
- goto out;
- }
-
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
@@ -434,13 +439,10 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
goto out;
}
fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
- if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) {
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
qgroup_read_enable_gen(fs_info, l, slot, ptr);
- } else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) {
- qgroup_mark_inconsistent(fs_info);
- btrfs_err(fs_info,
- "qgroup generation mismatch, marked as inconsistent");
- }
+ else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation)
+ qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch");
rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
goto next1;
}
@@ -451,10 +453,8 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
qgroup = find_qgroup_rb(fs_info, found_key.offset);
if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
- (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
- btrfs_err(fs_info, "inconsistent qgroup config");
- qgroup_mark_inconsistent(fs_info);
- }
+ (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY))
+ qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config");
if (!qgroup) {
struct btrfs_qgroup *prealloc;
struct btrfs_root *tree_root = fs_info->tree_root;
@@ -476,7 +476,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
* during mount before we start doing things like creating
* subvolumes.
*/
- if (is_fstree(qgroup->qgroupid) &&
+ if (btrfs_is_fstree(qgroup->qgroupid) &&
qgroup->qgroupid > tree_root->free_objectid)
/*
* Don't need to check against BTRFS_LAST_FREE_OBJECTID,
@@ -581,8 +581,6 @@ out:
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
} else {
- ulist_free(fs_info->qgroup_ulist);
- fs_info->qgroup_ulist = NULL;
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
btrfs_sysfs_del_qgroups(fs_info);
}
@@ -630,29 +628,30 @@ bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
/*
* This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
- * first two are in single-threaded paths.And for the third one, we have set
- * quota_root to be null with qgroup_lock held before, so it is safe to clean
- * up the in-memory structures without qgroup_lock held.
+ * first two are in single-threaded paths.
*/
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
{
struct rb_node *n;
struct btrfs_qgroup *qgroup;
+ /*
+ * btrfs_quota_disable() can be called concurrently with
+ * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
+ * lock.
+ */
+ spin_lock(&fs_info->qgroup_lock);
while ((n = rb_first(&fs_info->qgroup_tree))) {
qgroup = rb_entry(n, struct btrfs_qgroup, node);
rb_erase(n, &fs_info->qgroup_tree);
__del_qgroup_rb(qgroup);
+ spin_unlock(&fs_info->qgroup_lock);
btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
kfree(qgroup);
+ spin_lock(&fs_info->qgroup_lock);
}
- /*
- * We call btrfs_free_qgroup_config() when unmounting
- * filesystem and disabling quota, so we set qgroup_ulist
- * to be null here to avoid double free.
- */
- ulist_free(fs_info->qgroup_ulist);
- fs_info->qgroup_ulist = NULL;
+ spin_unlock(&fs_info->qgroup_lock);
+
btrfs_sysfs_del_qgroups(fs_info);
}
@@ -661,7 +660,7 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
{
int ret;
struct btrfs_root *quota_root = trans->fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -673,10 +672,6 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
key.offset = dst;
ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
-
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
- btrfs_free_path(path);
return ret;
}
@@ -685,7 +680,7 @@ static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
{
int ret;
struct btrfs_root *quota_root = trans->fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -698,24 +693,19 @@ static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
- ret = btrfs_del_item(trans, quota_root, path);
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_del_item(trans, quota_root, path);
}
static int add_qgroup_item(struct btrfs_trans_handle *trans,
struct btrfs_root *quota_root, u64 qgroupid)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_qgroup_info_item *qgroup_info;
struct btrfs_qgroup_limit_item *qgroup_limit;
struct extent_buffer *leaf;
@@ -741,7 +731,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*qgroup_info));
if (ret && ret != -EEXIST)
- goto out;
+ return ret;
leaf = path->nodes[0];
qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
@@ -752,15 +742,13 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
-
btrfs_release_path(path);
key.type = BTRFS_QGROUP_LIMIT_KEY;
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*qgroup_limit));
if (ret && ret != -EEXIST)
- goto out;
+ return ret;
leaf = path->nodes[0];
qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
@@ -771,19 +759,14 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
-
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
{
int ret;
struct btrfs_root *quota_root = trans->fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -795,33 +778,27 @@ static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
key.offset = qgroupid;
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
ret = btrfs_del_item(trans, quota_root, path);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
key.type = BTRFS_QGROUP_LIMIT_KEY;
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
ret = btrfs_del_item(trans, quota_root, path);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -829,7 +806,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
struct btrfs_qgroup *qgroup)
{
struct btrfs_root *quota_root = trans->fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *l;
struct btrfs_qgroup_limit_item *qgroup_limit;
@@ -849,7 +826,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
ret = -ENOENT;
if (ret)
- goto out;
+ return ret;
l = path->nodes[0];
slot = path->slots[0];
@@ -860,10 +837,6 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
- btrfs_mark_buffer_dirty(trans, l);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -872,7 +845,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root = fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *l;
struct btrfs_qgroup_info_item *qgroup_info;
@@ -895,7 +868,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
ret = -ENOENT;
if (ret)
- goto out;
+ return ret;
l = path->nodes[0];
slot = path->slots[0];
@@ -906,10 +879,6 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
- btrfs_mark_buffer_dirty(trans, l);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -917,7 +886,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root = fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *l;
struct btrfs_qgroup_status_item *ptr;
@@ -937,7 +906,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
ret = -ENOENT;
if (ret)
- goto out;
+ return ret;
l = path->nodes[0];
slot = path->slots[0];
@@ -948,10 +917,6 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
btrfs_set_qgroup_status_rescan(l, ptr,
fs_info->qgroup_rescan_progress.objectid);
- btrfs_mark_buffer_dirty(trans, l);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -961,7 +926,7 @@ out:
static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *leaf = NULL;
int ret;
@@ -972,13 +937,13 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
return -ENOMEM;
key.objectid = 0;
- key.offset = 0;
key.type = 0;
+ key.offset = 0;
while (1) {
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
leaf = path->nodes[0];
nr = btrfs_header_nritems(leaf);
if (!nr)
@@ -991,14 +956,12 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
path->slots[0] = 0;
ret = btrfs_del_items(trans, root, path, 0, nr);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
}
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+
+ return 0;
}
int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
@@ -1014,7 +977,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup = NULL;
struct btrfs_qgroup *prealloc = NULL;
struct btrfs_trans_handle *trans = NULL;
- struct ulist *ulist = NULL;
const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
int ret = 0;
int slot;
@@ -1037,12 +999,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
if (fs_info->quota_root)
goto out;
- ulist = ulist_alloc(GFP_KERNEL);
- if (!ulist) {
- ret = -ENOMEM;
- goto out;
- }
-
ret = btrfs_sysfs_add_qgroups(fs_info);
if (ret < 0)
goto out;
@@ -1082,9 +1038,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
if (fs_info->quota_root)
goto out;
- fs_info->qgroup_ulist = ulist;
- ulist = NULL;
-
/*
* initially create the quota tree
*/
@@ -1096,7 +1049,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
}
path = btrfs_alloc_path();
- if (!path) {
+ if (unlikely(!path)) {
ret = -ENOMEM;
btrfs_abort_transaction(trans, ret);
goto out_free_root;
@@ -1108,7 +1061,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*ptr));
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1121,6 +1074,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
if (simple) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
+ btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
} else {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -1129,8 +1083,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
BTRFS_QGROUP_STATUS_FLAGS_MASK);
btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
-
key.objectid = 0;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = 0;
@@ -1139,7 +1091,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
if (ret > 0)
goto out_add_root;
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1157,7 +1109,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
/* We should not have a stray @prealloc pointer. */
ASSERT(prealloc == NULL);
prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
- if (!prealloc) {
+ if (unlikely(!prealloc)) {
ret = -ENOMEM;
btrfs_abort_transaction(trans, ret);
goto out_free_path;
@@ -1165,26 +1117,21 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
ret = add_qgroup_item(trans, quota_root,
found_key.offset);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
prealloc = NULL;
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
- btrfs_abort_transaction(trans, ret);
- goto out_free_path;
- }
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
ret = btrfs_search_slot_for_read(tree_root, &found_key,
path, 1, 0);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1198,7 +1145,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
}
}
ret = btrfs_next_item(tree_root, path);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1209,7 +1156,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
out_add_root:
btrfs_release_path(path);
ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1223,7 +1170,7 @@ out_add_root:
qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
prealloc = NULL;
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1254,8 +1201,6 @@ out_add_root:
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- if (simple)
- btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
spin_unlock(&fs_info->qgroup_lock);
/* Skip rescan for simple qgroups. */
@@ -1291,18 +1236,21 @@ out_free_root:
if (ret)
btrfs_put_root(quota_root);
out:
- if (ret) {
- ulist_free(fs_info->qgroup_ulist);
- fs_info->qgroup_ulist = NULL;
+ if (ret)
btrfs_sysfs_del_qgroups(fs_info);
- }
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (ret && trans)
btrfs_end_transaction(trans);
else if (trans)
ret = btrfs_end_transaction(trans);
- ulist_free(ulist);
- kfree(prealloc);
+
+ /*
+ * At this point we either failed at allocating prealloc, or we
+ * succeeded and passed the ownership to it to add_qgroup_rb(). In any
+ * case, this needs to be NULL or there is something wrong.
+ */
+ ASSERT(prealloc == NULL);
+
return ret;
}
@@ -1373,11 +1321,14 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
/*
* We have nothing held here and no trans handle, just return the error
- * if there is one.
+ * if there is one and set back the quota enabled bit since we didn't
+ * actually disable quotas.
*/
ret = flush_reservations(fs_info);
- if (ret)
+ if (ret) {
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
return ret;
+ }
/*
* 1 For the root item
@@ -1412,13 +1363,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
btrfs_free_qgroup_config(fs_info);
ret = btrfs_clean_quota_tree(trans, quota_root);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = btrfs_del_root(trans, &quota_root->root_key);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1489,9 +1440,9 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
struct btrfs_qgroup *src, int sign)
{
struct btrfs_qgroup *qgroup;
- struct btrfs_qgroup *cur;
LIST_HEAD(qgroup_list);
u64 num_bytes = src->excl;
+ u64 num_bytes_cmpr = src->excl_cmpr;
int ret = 0;
qgroup = find_qgroup_rb(fs_info, ref_root);
@@ -1499,15 +1450,16 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
goto out;
qgroup_iterator_add(&qgroup_list, qgroup);
- list_for_each_entry(cur, &qgroup_list, iterator) {
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
struct btrfs_qgroup_list *glist;
qgroup->rfer += sign * num_bytes;
- qgroup->rfer_cmpr += sign * num_bytes;
+ qgroup->rfer_cmpr += sign * num_bytes_cmpr;
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
+ WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
qgroup->excl += sign * num_bytes;
- qgroup->excl_cmpr += sign * num_bytes;
+ qgroup->excl_cmpr += sign * num_bytes_cmpr;
if (sign > 0)
qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
@@ -1574,8 +1526,10 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst
ASSERT(prealloc);
/* Check the level of src and dst first */
- if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
+ if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) {
+ kfree(prealloc);
return -EINVAL;
+ }
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root) {
@@ -1698,9 +1652,6 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
struct btrfs_qgroup *prealloc = NULL;
int ret = 0;
- if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
- return 0;
-
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root) {
ret = -ENOTCONN;
@@ -1731,7 +1682,12 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
- kfree(prealloc);
+ /*
+ * At this point we either failed at allocating prealloc, or we
+ * succeeded and passed the ownership to it to add_qgroup_rb(). In any
+ * case, this needs to be NULL or there is something wrong.
+ */
+ ASSERT(prealloc == NULL);
return ret;
}
@@ -1743,8 +1699,7 @@ out:
static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
{
struct btrfs_key key;
- struct btrfs_path *path;
- int ret;
+ BTRFS_PATH_AUTO_FREE(path);
/*
* Squota would never be inconsistent, but there can still be case
@@ -1777,13 +1732,11 @@ static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup
if (!path)
return -ENOMEM;
- ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
- btrfs_free_path(path);
/*
* The @ret from btrfs_find_root() exactly matches our definition for
* the return value, thus can be returned directly.
*/
- return ret;
+ return btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
}
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
@@ -1839,9 +1792,19 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
* Thus its reserved space should all be zero, no matter if qgroup
* is consistent or the mode.
*/
- WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
- qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
- qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
+ if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
+ DEBUG_WARN();
+ btrfs_warn_rl(fs_info,
+"to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
+ btrfs_qgroup_level(qgroup->qgroupid),
+ btrfs_qgroup_subvolid(qgroup->qgroupid),
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
+
+ }
/*
* The same for rfer/excl numbers, but that's only if our qgroup is
* consistent and if it's in regular qgroup mode.
@@ -1850,15 +1813,15 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
*/
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
- if (WARN_ON(qgroup->rfer || qgroup->excl ||
- qgroup->rfer_cmpr || qgroup->excl_cmpr)) {
- btrfs_warn_rl(fs_info,
-"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
- btrfs_qgroup_level(qgroup->qgroupid),
- btrfs_qgroup_subvolid(qgroup->qgroupid),
- qgroup->rfer, qgroup->rfer_cmpr,
- qgroup->excl, qgroup->excl_cmpr);
- qgroup_mark_inconsistent(fs_info);
+ if (qgroup->rfer || qgroup->excl ||
+ qgroup->rfer_cmpr || qgroup->excl_cmpr) {
+ DEBUG_WARN();
+ qgroup_mark_inconsistent(fs_info,
+ "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
+ btrfs_qgroup_level(qgroup->qgroupid),
+ btrfs_qgroup_subvolid(qgroup->qgroupid),
+ qgroup->rfer, qgroup->rfer_cmpr,
+ qgroup->excl, qgroup->excl_cmpr);
}
}
del_qgroup_rb(fs_info, qgroupid);
@@ -1881,18 +1844,15 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su
struct btrfs_trans_handle *trans;
int ret;
- if (!is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || !fs_info->quota_root)
+ if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) ||
+ !fs_info->quota_root)
return 0;
/*
* Commit current transaction to make sure all the rfer/excl numbers
* get updated.
*/
- trans = btrfs_start_transaction(fs_info->quota_root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- ret = btrfs_commit_transaction(trans);
+ ret = btrfs_commit_current_transaction(fs_info->quota_root);
if (ret < 0)
return ret;
@@ -1905,8 +1865,11 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su
/*
* It's squota and the subvolume still has numbers needed for future
* accounting, in this case we can not delete it. Just skip it.
+ *
+ * Or the qgroup is already removed by a qgroup rescan. For both cases we're
+ * safe to ignore them.
*/
- if (ret == -EBUSY)
+ if (ret == -EBUSY || ret == -ENOENT)
ret = 0;
return ret;
}
@@ -1977,11 +1940,8 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
spin_unlock(&fs_info->qgroup_lock);
ret = update_qgroup_limit_item(trans, qgroup);
- if (ret) {
- qgroup_mark_inconsistent(fs_info);
- btrfs_info(fs_info, "unable to update quota limit for %llu",
- qgroupid);
- }
+ if (ret)
+ qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
@@ -2036,7 +1996,7 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
xa_unlock(&delayed_refs->dirty_extents);
if (xa_is_err(ret)) {
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret));
return xa_err(ret);
}
@@ -2103,10 +2063,8 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
ret = btrfs_find_all_roots(&ctx, true);
if (ret < 0) {
- qgroup_mark_inconsistent(fs_info);
- btrfs_warn(fs_info,
-"error accounting new delayed refs extent (err code: %d), quota inconsistent",
- ret);
+ qgroup_mark_inconsistent(fs_info,
+ "error accounting new delayed refs extent: %d", ret);
return 0;
}
@@ -2327,7 +2285,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
bool trace_leaf)
{
struct btrfs_key key;
- struct btrfs_path *src_path;
+ BTRFS_PATH_AUTO_FREE(src_path);
struct btrfs_fs_info *fs_info = trans->fs_info;
u32 nodesize = fs_info->nodesize;
int cur_level = root_level;
@@ -2339,10 +2297,8 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
return -EINVAL;
src_path = btrfs_alloc_path();
- if (!src_path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!src_path)
+ return -ENOMEM;
if (dst_level)
btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
@@ -2350,7 +2306,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
/* For src_path */
- atomic_inc(&src_eb->refs);
+ refcount_inc(&src_eb->refs);
src_path->nodes[root_level] = src_eb;
src_path->slots[root_level] = dst_path->slots[root_level];
src_path->locks[root_level] = 0;
@@ -2368,10 +2324,8 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
parent_slot = src_path->slots[cur_level + 1];
eb = btrfs_read_node_slot(eb, parent_slot);
- if (IS_ERR(eb)) {
- ret = PTR_ERR(eb);
- goto out;
- }
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
src_path->nodes[cur_level] = eb;
@@ -2392,10 +2346,8 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
&src_key, src_path->slots[cur_level]);
}
/* Content mismatch, something went wrong */
- if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
- ret = -ENOENT;
- goto out;
- }
+ if (btrfs_comp_cpu_keys(&dst_key, &src_key))
+ return -ENOENT;
cur_level--;
}
@@ -2406,21 +2358,20 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
nodesize);
if (ret < 0)
- goto out;
+ return ret;
ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
nodesize);
if (ret < 0)
- goto out;
+ return ret;
/* Record leaf file extents */
if (dst_level == 0 && trace_leaf) {
ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
if (ret < 0)
- goto out;
+ return ret;
ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
}
-out:
- btrfs_free_path(src_path);
+
return ret;
}
@@ -2459,9 +2410,9 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
int i;
/* Level sanity check */
- if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
- root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
- root_level < cur_level) {
+ if (unlikely(cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
+ root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
+ root_level < cur_level)) {
btrfs_err_rl(fs_info,
"%s: bad levels, cur_level=%d root_level=%d",
__func__, cur_level, root_level);
@@ -2477,7 +2428,7 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
* dst_path->nodes[root_level] must be initialized before
* calling this function.
*/
- if (cur_level == root_level) {
+ if (unlikely(cur_level == root_level)) {
btrfs_err_rl(fs_info,
"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
__func__, root_level, root_level, cur_level);
@@ -2563,7 +2514,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
return 0;
/* Wrong parameter order */
- if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
+ if (unlikely(btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb))) {
btrfs_err_rl(fs_info,
"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
btrfs_header_generation(src_eb),
@@ -2571,7 +2522,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
return -EUCLEAN;
}
- if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
+ if (unlikely(!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb))) {
ret = -EIO;
goto out;
}
@@ -2583,7 +2534,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
goto out;
}
/* For dst_path */
- atomic_inc(&dst_eb->refs);
+ refcount_inc(&dst_eb->refs);
dst_path->nodes[level] = dst_eb;
dst_path->slots[level] = 0;
dst_path->locks[level] = 0;
@@ -2598,7 +2549,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(dst_path);
if (ret < 0)
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
return ret;
}
@@ -2621,7 +2572,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
int level;
u8 drop_subptree_thres;
struct extent_buffer *eb = root_eb;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
ASSERT(root_eb != NULL);
@@ -2642,7 +2593,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
* mark qgroup inconsistent.
*/
if (root_level >= drop_subptree_thres) {
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info, "subtree level reached threshold");
return 0;
}
@@ -2654,12 +2605,12 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
ret = btrfs_read_extent_buffer(root_eb, &check);
if (ret)
- goto out;
+ return ret;
}
if (root_level == 0) {
ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
- goto out;
+ return ret;
}
path = btrfs_alloc_path();
@@ -2675,7 +2626,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
* walk back up the tree (adjusting slot pointers as we go)
* and restart the search process.
*/
- atomic_inc(&root_eb->refs); /* For path */
+ refcount_inc(&root_eb->refs); /* For path */
path->nodes[root_level] = root_eb;
path->slots[root_level] = 0;
path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
@@ -2695,10 +2646,8 @@ walk_down:
child_bytenr = btrfs_node_blockptr(eb, parent_slot);
eb = btrfs_read_node_slot(eb, parent_slot);
- if (IS_ERR(eb)) {
- ret = PTR_ERR(eb);
- goto out;
- }
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
path->nodes[level] = eb;
path->slots[level] = 0;
@@ -2709,14 +2658,14 @@ walk_down:
ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
fs_info->nodesize);
if (ret)
- goto out;
+ return ret;
}
if (level == 0) {
ret = btrfs_qgroup_trace_leaf_items(trans,
path->nodes[level]);
if (ret)
- goto out;
+ return ret;
/* Nonzero return here means we completed our search */
ret = adjust_slots_upwards(path, root_level);
@@ -2730,11 +2679,7 @@ walk_down:
level--;
}
- ret = 0;
-out:
- btrfs_free_path(path);
-
- return ret;
+ return 0;
}
static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
@@ -2762,7 +2707,7 @@ static void qgroup_iterator_nested_clean(struct list_head *head)
*/
static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
struct ulist *roots, struct list_head *qgroups,
- u64 seq, int update_old)
+ u64 seq, bool update_old)
{
struct ulist_node *unode;
struct ulist_iterator uiter;
@@ -2846,8 +2791,8 @@ static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
- trace_qgroup_update_counters(fs_info, qg, cur_old_count,
- cur_new_count);
+ trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count,
+ cur_new_count);
/* Rfer update part */
if (cur_old_count == 0 && cur_new_count > 0) {
@@ -2941,7 +2886,7 @@ static int maybe_fs_roots(struct ulist *roots)
* trees.
* If it contains a non-fs tree, it won't be shared with fs/subvol trees.
*/
- return is_fstree(unode->val);
+ return btrfs_is_fstree(unode->val);
}
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
@@ -3109,8 +3054,7 @@ cleanup:
kfree(record);
}
- trace_qgroup_num_dirty_extents(fs_info, trans->transid,
- num_dirty_extents);
+ trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents);
return ret;
}
@@ -3143,10 +3087,12 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
spin_unlock(&fs_info->qgroup_lock);
ret = update_qgroup_info_item(trans, qgroup);
if (ret)
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info,
+ "qgroup info item update error %d", ret);
ret = update_qgroup_limit_item(trans, qgroup);
if (ret)
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info,
+ "qgroup limit item update error %d", ret);
spin_lock(&fs_info->qgroup_lock);
}
if (btrfs_qgroup_enabled(fs_info))
@@ -3157,7 +3103,8 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
ret = update_qgroup_status_item(trans);
if (ret)
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info,
+ "qgroup status item update error %d", ret);
return ret;
}
@@ -3332,13 +3279,16 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
- struct btrfs_qgroup *prealloc;
+ struct btrfs_qgroup *prealloc = NULL;
struct btrfs_qgroup_list **qlist_prealloc = NULL;
bool free_inherit = false;
bool need_rescan = false;
u32 level_size = 0;
u64 nums;
+ if (!btrfs_qgroup_enabled(fs_info))
+ return 0;
+
prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
if (!prealloc)
return -ENOMEM;
@@ -3362,8 +3312,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
if (!committing)
mutex_lock(&fs_info->qgroup_ioctl_lock);
- if (!btrfs_qgroup_enabled(fs_info))
- goto out;
quota_root = fs_info->quota_root;
if (!quota_root) {
@@ -3564,7 +3512,7 @@ out:
if (!committing)
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (need_rescan)
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan");
if (qlist_prealloc) {
for (int i = 0; i < inherit->num_qgroups; i++)
kfree(qlist_prealloc[i]);
@@ -3572,7 +3520,14 @@ out:
}
if (free_inherit)
kfree(inherit);
- kfree(prealloc);
+
+ /*
+ * At this point we either failed at allocating prealloc, or we
+ * succeeded and passed the ownership to it to add_qgroup_rb(). In any
+ * case, this needs to be NULL or there is something wrong.
+ */
+ ASSERT(prealloc == NULL);
+
return ret;
}
@@ -3598,7 +3553,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
int ret = 0;
LIST_HEAD(qgroup_list);
- if (!is_fstree(ref_root))
+ if (!btrfs_is_fstree(ref_root))
return 0;
if (num_bytes == 0)
@@ -3658,7 +3613,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup;
LIST_HEAD(qgroup_list);
- if (!is_fstree(ref_root))
+ if (!btrfs_is_fstree(ref_root))
return;
if (num_bytes == 0)
@@ -3740,10 +3695,8 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
path, 1, 0);
btrfs_debug(fs_info,
- "current progress key (%llu %u %llu), search_slot ret %d",
- fs_info->qgroup_rescan_progress.objectid,
- fs_info->qgroup_rescan_progress.type,
- fs_info->qgroup_rescan_progress.offset, ret);
+ "current progress key " BTRFS_KEY_FMT ", search_slot ret %d",
+ BTRFS_KEY_FMT_VALUE(&fs_info->qgroup_rescan_progress), ret);
if (ret) {
/*
@@ -3845,8 +3798,8 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
* Rescan should only search for commit root, and any later difference
* should be recorded by qgroup
*/
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
while (!ret && !(stopped = rescan_should_stop(fs_info))) {
trans = btrfs_start_transaction(fs_info->fs_root, 0);
@@ -4046,12 +3999,21 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
qgroup_rescan_zero_tracking(fs_info);
mutex_lock(&fs_info->qgroup_rescan_lock);
- fs_info->qgroup_rescan_running = true;
- btrfs_queue_work(fs_info->qgroup_rescan_workers,
- &fs_info->qgroup_rescan_work);
+ /*
+ * The rescan worker is only for full accounting qgroups, check if it's
+ * enabled as it is pointless to queue it otherwise. A concurrent quota
+ * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
+ */
+ if (btrfs_qgroup_full_accounting(fs_info)) {
+ fs_info->qgroup_rescan_running = true;
+ btrfs_queue_work(fs_info->qgroup_rescan_workers,
+ &fs_info->qgroup_rescan_work);
+ } else {
+ ret = -ENOTCONN;
+ }
mutex_unlock(&fs_info->qgroup_rescan_lock);
- return 0;
+ return ret;
}
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
@@ -4138,8 +4100,8 @@ static int qgroup_unreserve_range(struct btrfs_inode *inode,
* Now the entry is in [start, start + len), revert the
* EXTENT_QGROUP_RESERVED bit.
*/
- clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
- entry_end, EXTENT_QGROUP_RESERVED);
+ clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end,
+ EXTENT_QGROUP_RESERVED, NULL);
if (!ret && clear_ret < 0)
ret = clear_ret;
@@ -4226,7 +4188,7 @@ static int qgroup_reserve_data(struct btrfs_inode *inode,
int ret;
if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(btrfs_root_id(root)) || len == 0)
+ !btrfs_is_fstree(btrfs_root_id(root)) || len == 0)
return 0;
/* @reserved parameter is mandatory for qgroup */
@@ -4241,8 +4203,9 @@ static int qgroup_reserve_data(struct btrfs_inode *inode,
reserved = *reserved_ret;
/* Record already reserved space */
orig_reserved = reserved->bytes_changed;
- ret = set_record_extent_bits(&inode->io_tree, start,
- start + len -1, EXTENT_QGROUP_RESERVED, reserved);
+ ret = btrfs_set_record_extent_bits(&inode->io_tree, start,
+ start + len - 1, EXTENT_QGROUP_RESERVED,
+ reserved);
/* Newly reserved space */
to_reserve = reserved->bytes_changed - orig_reserved;
@@ -4335,9 +4298,10 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
* EXTENT_QGROUP_RESERVED, we won't double free.
* So not need to rush.
*/
- ret = clear_record_extent_bits(&inode->io_tree, free_start,
- free_start + free_len - 1,
- EXTENT_QGROUP_RESERVED, &changeset);
+ ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start,
+ free_start + free_len - 1,
+ EXTENT_QGROUP_RESERVED,
+ &changeset);
if (ret < 0)
goto out;
freed += changeset.bytes_changed;
@@ -4361,9 +4325,9 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
int ret;
if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
- return clear_record_extent_bits(&inode->io_tree, start,
- start + len - 1,
- EXTENT_QGROUP_RESERVED, NULL);
+ return btrfs_clear_record_extent_bits(&inode->io_tree, start,
+ start + len - 1,
+ EXTENT_QGROUP_RESERVED, NULL);
}
/* In release case, we shouldn't have @reserved */
@@ -4371,8 +4335,8 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
if (free && reserved)
return qgroup_free_reserved_data(inode, reserved, start, len, released);
extent_changeset_init(&changeset);
- ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
- EXTENT_QGROUP_RESERVED, &changeset);
+ ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1,
+ EXTENT_QGROUP_RESERVED, &changeset);
if (ret < 0)
goto out;
@@ -4477,11 +4441,11 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
int ret;
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(btrfs_root_id(root)) || num_bytes == 0)
+ !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0)
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
+ trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type);
ret = qgroup_reserve(root, num_bytes, enforce, type);
if (ret < 0)
return ret;
@@ -4522,11 +4486,11 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(btrfs_root_id(root)))
+ !btrfs_is_fstree(btrfs_root_id(root)))
return;
/* TODO: Update trace point to handle such free */
- trace_qgroup_meta_free_all_pertrans(root);
+ trace_btrfs_qgroup_meta_free_all_pertrans(root);
/* Special value -1 means to free all reserved space */
btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
BTRFS_QGROUP_RSV_META_PERTRANS);
@@ -4538,7 +4502,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(btrfs_root_id(root)))
+ !btrfs_is_fstree(btrfs_root_id(root)))
return;
/*
@@ -4548,7 +4512,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
*/
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
+ trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type);
btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
}
@@ -4597,12 +4561,12 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(btrfs_root_id(root)))
+ !btrfs_is_fstree(btrfs_root_id(root)))
return;
/* Same as btrfs_qgroup_free_meta_prealloc() */
num_bytes = sub_root_meta_rsv(root, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
- trace_qgroup_meta_convert(root, num_bytes);
+ trace_btrfs_qgroup_meta_convert(root, num_bytes);
qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
if (!sb_rdonly(fs_info->sb))
add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
@@ -4620,8 +4584,8 @@ void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
int ret;
extent_changeset_init(&changeset);
- ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
- EXTENT_QGROUP_RESERVED, &changeset);
+ ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
+ EXTENT_QGROUP_RESERVED, &changeset);
WARN_ON(ret < 0);
if (WARN_ON(changeset.bytes_changed)) {
@@ -4681,6 +4645,28 @@ out:
spin_unlock(&swapped_blocks->lock);
}
+static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *bytenr = key;
+ const struct btrfs_qgroup_swapped_block *block = rb_entry(node,
+ struct btrfs_qgroup_swapped_block, node);
+
+ if (block->subvol_bytenr < *bytenr)
+ return -1;
+ else if (block->subvol_bytenr > *bytenr)
+ return 1;
+
+ return 0;
+}
+
+static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new,
+ struct btrfs_qgroup_swapped_block, node);
+
+ return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing);
+}
+
/*
* Add subtree roots record into @subvol_root.
*
@@ -4700,16 +4686,15 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
struct btrfs_fs_info *fs_info = subvol_root->fs_info;
struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
struct btrfs_qgroup_swapped_block *block;
- struct rb_node **cur;
- struct rb_node *parent = NULL;
+ struct rb_node *node;
int level = btrfs_header_level(subvol_parent) - 1;
int ret = 0;
if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
- if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
- btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
+ if (unlikely(btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
+ btrfs_node_ptr_generation(reloc_parent, reloc_slot))) {
btrfs_err_rl(fs_info,
"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
__func__,
@@ -4750,46 +4735,32 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
/* Insert @block into @blocks */
spin_lock(&blocks->lock);
- cur = &blocks->blocks[level].rb_node;
- while (*cur) {
+ node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp);
+ if (node) {
struct btrfs_qgroup_swapped_block *entry;
- parent = *cur;
- entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
- node);
+ entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
- if (entry->subvol_bytenr < block->subvol_bytenr) {
- cur = &(*cur)->rb_left;
- } else if (entry->subvol_bytenr > block->subvol_bytenr) {
- cur = &(*cur)->rb_right;
- } else {
- if (entry->subvol_generation !=
- block->subvol_generation ||
- entry->reloc_bytenr != block->reloc_bytenr ||
- entry->reloc_generation !=
- block->reloc_generation) {
- /*
- * Duplicated but mismatch entry found.
- * Shouldn't happen.
- *
- * Marking qgroup inconsistent should be enough
- * for end users.
- */
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
- ret = -EEXIST;
- }
- kfree(block);
- goto out_unlock;
+ if (entry->subvol_generation != block->subvol_generation ||
+ entry->reloc_bytenr != block->reloc_bytenr ||
+ entry->reloc_generation != block->reloc_generation) {
+ /*
+ * Duplicated but mismatch entry found. Shouldn't happen.
+ * Marking qgroup inconsistent should be enough for end
+ * users.
+ */
+ DEBUG_WARN("duplicated but mismatched entry found");
+ ret = -EEXIST;
}
+ kfree(block);
+ goto out_unlock;
}
- rb_link_node(&block->node, parent, cur);
- rb_insert_color(&block->node, &blocks->blocks[level]);
blocks->swapped = true;
out_unlock:
spin_unlock(&blocks->lock);
out:
if (ret < 0)
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
return ret;
}
@@ -4806,10 +4777,9 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_tree_parent_check check = { 0 };
struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
- struct btrfs_qgroup_swapped_block *block;
+ struct btrfs_qgroup_swapped_block AUTO_KFREE(block);
struct extent_buffer *reloc_eb = NULL;
struct rb_node *node;
- bool found = false;
bool swapped = false;
int level = btrfs_header_level(subvol_eb);
int ret = 0;
@@ -4817,7 +4787,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
- if (!is_fstree(btrfs_root_id(root)) || !root->reloc_root)
+ if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root)
return 0;
spin_lock(&blocks->lock);
@@ -4825,23 +4795,14 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
spin_unlock(&blocks->lock);
return 0;
}
- node = blocks->blocks[level].rb_node;
-
- while (node) {
- block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
- if (block->subvol_bytenr < subvol_eb->start) {
- node = node->rb_left;
- } else if (block->subvol_bytenr > subvol_eb->start) {
- node = node->rb_right;
- } else {
- found = true;
- break;
- }
- }
- if (!found) {
+ node = rb_find(&subvol_eb->start, &blocks->blocks[level],
+ qgroup_swapped_block_bytenr_key_cmp);
+ if (!node) {
spin_unlock(&blocks->lock);
goto out;
}
+ block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
+
/* Found one, remove it from @blocks first and update blocks->swapped */
rb_erase(&block->node, &blocks->blocks[level]);
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
@@ -4865,7 +4826,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
reloc_eb = NULL;
goto free_out;
}
- if (!extent_buffer_uptodate(reloc_eb)) {
+ if (unlikely(!extent_buffer_uptodate(reloc_eb))) {
ret = -EIO;
goto free_out;
}
@@ -4873,14 +4834,12 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
block->last_snapshot, block->trace_leaf);
free_out:
- kfree(block);
free_extent_buffer(reloc_eb);
out:
if (ret < 0) {
- btrfs_err_rl(fs_info,
- "failed to account subtree at bytenr %llu: %d",
- subvol_eb->start, ret);
- qgroup_mark_inconsistent(fs_info);
+ qgroup_mark_inconsistent(fs_info,
+ "failed to account subtree at bytenr %llu: %d",
+ subvol_eb->start, ret);
}
return ret;
}
@@ -4911,7 +4870,7 @@ int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
return 0;
- if (!is_fstree(root))
+ if (!btrfs_is_fstree(root))
return 0;
/* If the extent predates enabling quotas, don't count it. */
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index e233cc79af18..a979fd59a4da 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -22,6 +22,9 @@ struct btrfs_ioctl_quota_ctl_args;
struct btrfs_trans_handle;
struct btrfs_delayed_ref_root;
struct btrfs_inode;
+struct btrfs_transaction;
+struct btrfs_block_group;
+struct btrfs_qgroup_swapped_blocks;
/*
* Btrfs qgroup overview
diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c
index 9ffc79f250fb..2987cb7c686e 100644
--- a/fs/btrfs/raid-stripe-tree.c
+++ b/fs/btrfs/raid-stripe-tree.c
@@ -13,12 +13,13 @@
#include "volumes.h"
#include "print-tree.h"
-static void btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans,
+static int btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
const struct btrfs_key *oldkey,
u64 newlen, u64 frontpad)
{
- struct btrfs_stripe_extent *extent;
+ struct btrfs_root *stripe_root = trans->fs_info->stripe_root;
+ struct btrfs_stripe_extent *extent, AUTO_KFREE(newitem);
struct extent_buffer *leaf;
int slot;
size_t item_size;
@@ -27,30 +28,42 @@ static void btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans,
.type = BTRFS_RAID_STRIPE_KEY,
.offset = newlen,
};
+ int ret;
+ ASSERT(newlen > 0);
ASSERT(oldkey->type == BTRFS_RAID_STRIPE_KEY);
leaf = path->nodes[0];
slot = path->slots[0];
item_size = btrfs_item_size(leaf, slot);
+
+ newitem = kzalloc(item_size, GFP_NOFS);
+ if (!newitem)
+ return -ENOMEM;
+
extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) {
struct btrfs_raid_stride *stride = &extent->strides[i];
u64 phys;
- phys = btrfs_raid_stride_physical(leaf, stride);
- btrfs_set_raid_stride_physical(leaf, stride, phys + frontpad);
+ phys = btrfs_raid_stride_physical(leaf, stride) + frontpad;
+ btrfs_set_stack_raid_stride_physical(&newitem->strides[i], phys);
}
- btrfs_set_item_key_safe(trans, path, &newkey);
+ ret = btrfs_del_item(trans, stripe_root, path);
+ if (ret)
+ return ret;
+
+ btrfs_release_path(path);
+ return btrfs_insert_item(trans, stripe_root, &newkey, newitem, item_size);
}
int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *stripe_root = fs_info->stripe_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *leaf;
u64 found_start;
@@ -59,9 +72,22 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
int slot;
int ret;
- if (!stripe_root)
+ if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE) || !stripe_root)
return 0;
+ if (!btrfs_is_testing(fs_info)) {
+ struct btrfs_chunk_map *map;
+ bool use_rst;
+
+ map = btrfs_find_chunk_map(fs_info, start, length);
+ if (!map)
+ return -EINVAL;
+ use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
+ btrfs_free_chunk_map(map);
+ if (!use_rst)
+ return 0;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -85,6 +111,37 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
found_end = found_start + key.offset;
ret = 0;
+ /*
+ * The stripe extent starts before the range we want to delete,
+ * but the range spans more than one stripe extent:
+ *
+ * |--- RAID Stripe Extent ---||--- RAID Stripe Extent ---|
+ * |--- keep ---|--- drop ---|
+ *
+ * This means we have to get the previous item, truncate its
+ * length and then restart the search.
+ */
+ if (found_start > start) {
+ if (slot == 0) {
+ ret = btrfs_previous_item(stripe_root, path, start,
+ BTRFS_RAID_STRIPE_KEY);
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
+ break;
+ }
+ } else {
+ path->slots[0]--;
+ }
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ found_start = key.objectid;
+ found_end = found_start + key.offset;
+ ASSERT(found_start <= start);
+ }
+
if (key.type != BTRFS_RAID_STRIPE_KEY)
break;
@@ -96,6 +153,54 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
found_start, found_end);
/*
+ * The stripe extent starts before the range we want to delete
+ * and ends after the range we want to delete, i.e. we're
+ * punching a hole in the stripe extent:
+ *
+ * |--- RAID Stripe Extent ---|
+ * | keep |--- drop ---| keep |
+ *
+ * This means we need to a) truncate the existing item and b)
+ * create a second item for the remaining range.
+ */
+ if (found_start < start && found_end > end) {
+ size_t item_size;
+ u64 diff_start = start - found_start;
+ u64 diff_end = found_end - end;
+ struct btrfs_stripe_extent *extent;
+ struct btrfs_key newkey = {
+ .objectid = end,
+ .type = BTRFS_RAID_STRIPE_KEY,
+ .offset = diff_end,
+ };
+
+ /* The "right" item. */
+ ret = btrfs_duplicate_item(trans, stripe_root, path, &newkey);
+ if (ret)
+ break;
+
+ item_size = btrfs_item_size(leaf, path->slots[0]);
+ extent = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_stripe_extent);
+
+ for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) {
+ struct btrfs_raid_stride *stride = &extent->strides[i];
+ u64 phys;
+
+ phys = btrfs_raid_stride_physical(leaf, stride);
+ phys += diff_start + length;
+ btrfs_set_raid_stride_physical(leaf, stride, phys);
+ }
+
+ /* The "left" item. */
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ btrfs_partially_delete_raid_extent(trans, path, &key,
+ diff_start, 0);
+ break;
+ }
+
+ /*
* The stripe extent starts before the range we want to delete:
*
* |--- RAID Stripe Extent ---|
@@ -105,11 +210,18 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
* length to the new size and then re-insert the item.
*/
if (found_start < start) {
- u64 diff = start - found_start;
+ u64 diff_start = start - found_start;
btrfs_partially_delete_raid_extent(trans, path, &key,
- diff, 0);
- break;
+ diff_start, 0);
+
+ start += (key.offset - diff_start);
+ length -= (key.offset - diff_start);
+ if (length == 0)
+ break;
+
+ btrfs_release_path(path);
+ continue;
}
/*
@@ -122,13 +234,16 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
* length to the new size and then re-insert the item.
*/
if (found_end > end) {
- u64 diff = found_end - end;
+ u64 diff_end = found_end - end;
btrfs_partially_delete_raid_extent(trans, path, &key,
- diff, diff);
+ key.offset - length,
+ length);
+ ASSERT(key.offset - diff_end == length);
break;
}
+ /* Finally we can delete the whole item, no more special cases. */
ret = btrfs_del_item(trans, stripe_root, path);
if (ret)
break;
@@ -141,7 +256,6 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
btrfs_release_path(path);
}
- btrfs_free_path(path);
return ret;
}
@@ -150,7 +264,7 @@ static int update_raid_extent_item(struct btrfs_trans_handle *trans,
struct btrfs_stripe_extent *stripe_extent,
const size_t item_size)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
int ret;
int slot;
@@ -169,8 +283,6 @@ static int update_raid_extent_item(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, stripe_extent, btrfs_item_ptr_offset(leaf, slot),
item_size);
- btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_free_path(path);
return ret;
}
@@ -183,12 +295,12 @@ int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
struct btrfs_key stripe_key;
struct btrfs_root *stripe_root = fs_info->stripe_root;
const int num_stripes = btrfs_bg_type_to_factor(bioc->map_type);
- struct btrfs_stripe_extent *stripe_extent;
+ struct btrfs_stripe_extent AUTO_KFREE(stripe_extent);
const size_t item_size = struct_size(stripe_extent, strides, num_stripes);
int ret;
stripe_extent = kzalloc(item_size, GFP_NOFS);
- if (!stripe_extent) {
+ if (!unlikely(stripe_extent)) {
btrfs_abort_transaction(trans, -ENOMEM);
btrfs_end_transaction(trans);
return -ENOMEM;
@@ -199,12 +311,8 @@ int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
for (int i = 0; i < num_stripes; i++) {
u64 devid = bioc->stripes[i].dev->devid;
u64 physical = bioc->stripes[i].physical;
- u64 length = bioc->stripes[i].length;
struct btrfs_raid_stride *raid_stride = &stripe_extent->strides[i];
- if (length == 0)
- length = bioc->size;
-
btrfs_set_stack_raid_stride_devid(raid_stride, devid);
btrfs_set_stack_raid_stride_physical(raid_stride, physical);
}
@@ -215,13 +323,14 @@ int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent,
item_size);
- if (ret == -EEXIST)
+ if (ret == -EEXIST) {
ret = update_raid_extent_item(trans, &stripe_key, stripe_extent,
item_size);
- if (ret)
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ } else if (ret) {
btrfs_abort_transaction(trans, ret);
-
- kfree(stripe_extent);
+ }
return ret;
}
@@ -259,7 +368,7 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
struct btrfs_stripe_extent *stripe_extent;
struct btrfs_key stripe_key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
const u64 end = logical + *length;
int num_stripes;
@@ -279,13 +388,13 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
return -ENOMEM;
if (stripe->rst_search_commit_root) {
- path->skip_locking = 1;
- path->search_commit_root = 1;
+ path->skip_locking = true;
+ path->search_commit_root = true;
}
ret = btrfs_search_slot(NULL, stripe_root, &stripe_key, path, 0, 0);
if (ret < 0)
- goto free_path;
+ return ret;
if (ret) {
if (path->slots[0] != 0)
path->slots[0]--;
@@ -342,8 +451,7 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
trace_btrfs_get_raid_extent_offset(fs_info, logical, *length,
stripe->physical, devid);
- ret = 0;
- goto free_path;
+ return 0;
}
/* If we're here, we haven't found the requested devid in the stripe. */
@@ -357,8 +465,6 @@ out:
logical, logical + *length, stripe->dev->devid,
btrfs_bg_type_to_raid_name(map_type));
}
-free_path:
- btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/raid-stripe-tree.h b/fs/btrfs/raid-stripe-tree.h
index 541836421778..69942ad43140 100644
--- a/fs/btrfs/raid-stripe-tree.h
+++ b/fs/btrfs/raid-stripe-tree.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <uapi/linux/btrfs_tree.h>
#include "fs.h"
+#include "accessors.h"
#define BTRFS_RST_SUPP_BLOCK_GROUP_MASK (BTRFS_BLOCK_GROUP_DUP | \
BTRFS_BLOCK_GROUP_RAID1_MASK | \
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index cdd373c27784..f38d8305e46d 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -66,10 +66,10 @@ static void btrfs_dump_rbio(const struct btrfs_fs_info *fs_info,
dump_bioc(fs_info, rbio->bioc);
btrfs_crit(fs_info,
-"rbio flags=0x%lx nr_sectors=%u nr_data=%u real_stripes=%u stripe_nsectors=%u scrubp=%u dbitmap=0x%lx",
+"rbio flags=0x%lx nr_sectors=%u nr_data=%u real_stripes=%u stripe_nsectors=%u sector_nsteps=%u scrubp=%u dbitmap=0x%lx",
rbio->flags, rbio->nr_sectors, rbio->nr_data,
rbio->real_stripes, rbio->stripe_nsectors,
- rbio->scrubp, rbio->dbitmap);
+ rbio->sector_nsteps, rbio->scrubp, rbio->dbitmap);
}
#define ASSERT_RBIO(expr, rbio) \
@@ -134,15 +134,10 @@ struct btrfs_stripe_hash_table {
};
/*
- * A bvec like structure to present a sector inside a page.
- *
- * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
+ * The PFN may still be valid, but our paddrs should always be block size
+ * aligned, thus such -1 paddr is definitely not a valid one.
*/
-struct sector_ptr {
- struct page *page;
- unsigned int pgoff:24;
- unsigned int uptodate:8;
-};
+#define INVALID_PADDR (~(phys_addr_t)0)
static void rmw_rbio_work(struct work_struct *work);
static void rmw_rbio_work_locked(struct work_struct *work);
@@ -156,8 +151,8 @@ static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
{
bitmap_free(rbio->error_bitmap);
kfree(rbio->stripe_pages);
- kfree(rbio->bio_sectors);
- kfree(rbio->stripe_sectors);
+ kfree(rbio->bio_paddrs);
+ kfree(rbio->stripe_paddrs);
kfree(rbio->finish_pointers);
}
@@ -200,8 +195,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
struct btrfs_stripe_hash_table *x;
struct btrfs_stripe_hash *cur;
struct btrfs_stripe_hash *h;
- int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
- int i;
+ unsigned int num_entries = 1U << BTRFS_STRIPE_HASH_TABLE_BITS;
if (info->stripe_hash_table)
return 0;
@@ -222,7 +216,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
h = table->table;
- for (i = 0; i < num_entries; i++) {
+ for (unsigned int i = 0; i < num_entries; i++) {
cur = h + i;
INIT_LIST_HEAD(&cur->hash_list);
spin_lock_init(&cur->lock);
@@ -233,6 +227,24 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
return 0;
}
+static void memcpy_from_bio_to_stripe(struct btrfs_raid_bio *rbio, unsigned int sector_nr)
+{
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
+
+ ASSERT(sector_nr < rbio->nr_sectors);
+ for (int i = 0; i < rbio->sector_nsteps; i++) {
+ unsigned int index = sector_nr * rbio->sector_nsteps + i;
+ phys_addr_t dst = rbio->stripe_paddrs[index];
+ phys_addr_t src = rbio->bio_paddrs[index];
+
+ ASSERT(dst != INVALID_PADDR);
+ ASSERT(src != INVALID_PADDR);
+
+ memcpy_page(phys_to_page(dst), offset_in_page(dst),
+ phys_to_page(src), offset_in_page(src), step);
+ }
+}
+
/*
* caching an rbio means to copy anything from the
* bio_sectors array into the stripe_pages array. We
@@ -253,24 +265,19 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
for (i = 0; i < rbio->nr_sectors; i++) {
/* Some range not covered by bio (partial write), skip it */
- if (!rbio->bio_sectors[i].page) {
+ if (rbio->bio_paddrs[i * rbio->sector_nsteps] == INVALID_PADDR) {
/*
* Even if the sector is not covered by bio, if it is
* a data sector it should still be uptodate as it is
* read from disk.
*/
if (i < rbio->nr_data * rbio->stripe_nsectors)
- ASSERT(rbio->stripe_sectors[i].uptodate);
+ ASSERT(test_bit(i, rbio->stripe_uptodate_bitmap));
continue;
}
- ASSERT(rbio->stripe_sectors[i].page);
- memcpy_page(rbio->stripe_sectors[i].page,
- rbio->stripe_sectors[i].pgoff,
- rbio->bio_sectors[i].page,
- rbio->bio_sectors[i].pgoff,
- rbio->bioc->fs_info->sectorsize);
- rbio->stripe_sectors[i].uptodate = 1;
+ memcpy_from_bio_to_stripe(rbio, i);
+ set_bit(i, rbio->stripe_uptodate_bitmap);
}
set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
}
@@ -293,19 +300,48 @@ static int rbio_bucket(struct btrfs_raid_bio *rbio)
return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
}
-static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
- unsigned int page_nr)
+/* Get the sector number of the first sector covered by @page_nr. */
+static u32 page_nr_to_sector_nr(struct btrfs_raid_bio *rbio, unsigned int page_nr)
{
- const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
- const u32 sectors_per_page = PAGE_SIZE / sectorsize;
+ u32 sector_nr;
+
+ ASSERT(page_nr < rbio->nr_pages);
+
+ sector_nr = (page_nr << PAGE_SHIFT) >> rbio->bioc->fs_info->sectorsize_bits;
+ ASSERT(sector_nr < rbio->nr_sectors);
+ return sector_nr;
+}
+
+/*
+ * Get the number of sectors covered by @page_nr.
+ *
+ * For bs > ps cases, the result will always be 1.
+ * For bs <= ps cases, the result will be ps / bs.
+ */
+static u32 page_nr_to_num_sectors(struct btrfs_raid_bio *rbio, unsigned int page_nr)
+{
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ u32 nr_sectors;
+
+ ASSERT(page_nr < rbio->nr_pages);
+
+ nr_sectors = round_up(PAGE_SIZE, fs_info->sectorsize) >> fs_info->sectorsize_bits;
+ ASSERT(nr_sectors > 0);
+ return nr_sectors;
+}
+
+static __maybe_unused bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
+ unsigned int page_nr)
+{
+ const u32 sector_nr = page_nr_to_sector_nr(rbio, page_nr);
+ const u32 nr_bits = page_nr_to_num_sectors(rbio, page_nr);
int i;
ASSERT(page_nr < rbio->nr_pages);
+ ASSERT(sector_nr + nr_bits < rbio->nr_sectors);
- for (i = sectors_per_page * page_nr;
- i < sectors_per_page * page_nr + sectors_per_page;
- i++) {
- if (!rbio->stripe_sectors[i].uptodate)
+ for (i = sector_nr; i < sector_nr + nr_bits; i++) {
+ if (!test_bit(i, rbio->stripe_uptodate_bitmap))
return false;
}
return true;
@@ -318,41 +354,44 @@ static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
*/
static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
{
- const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
u32 offset;
int i;
- for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
+ for (i = 0, offset = 0; i < rbio->nr_sectors * rbio->sector_nsteps;
+ i++, offset += step) {
int page_index = offset >> PAGE_SHIFT;
ASSERT(page_index < rbio->nr_pages);
- rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
- rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
+ if (!rbio->stripe_pages[page_index])
+ continue;
+
+ rbio->stripe_paddrs[i] = page_to_phys(rbio->stripe_pages[page_index]) +
+ offset_in_page(offset);
}
}
static void steal_rbio_page(struct btrfs_raid_bio *src,
struct btrfs_raid_bio *dest, int page_nr)
{
- const u32 sectorsize = src->bioc->fs_info->sectorsize;
- const u32 sectors_per_page = PAGE_SIZE / sectorsize;
- int i;
+ const u32 sector_nr = page_nr_to_sector_nr(src, page_nr);
+ const u32 nr_bits = page_nr_to_num_sectors(src, page_nr);
+
+ ASSERT(page_nr < src->nr_pages);
+ ASSERT(sector_nr + nr_bits < src->nr_sectors);
if (dest->stripe_pages[page_nr])
__free_page(dest->stripe_pages[page_nr]);
dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
src->stripe_pages[page_nr] = NULL;
- /* Also update the sector->uptodate bits. */
- for (i = sectors_per_page * page_nr;
- i < sectors_per_page * page_nr + sectors_per_page; i++)
- dest->stripe_sectors[i].uptodate = true;
+ /* Also update the stripe_uptodate_bitmap bits. */
+ bitmap_set(dest->stripe_uptodate_bitmap, sector_nr, nr_bits);
}
static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
{
- const int sector_nr = (page_nr << PAGE_SHIFT) >>
- rbio->bioc->fs_info->sectorsize_bits;
+ const int sector_nr = page_nr_to_sector_nr(rbio, page_nr);
/*
* We have ensured PAGE_SIZE is aligned with sectorsize, thus
@@ -507,9 +546,8 @@ static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
spin_lock(&table->cache_lock);
while (!list_empty(&table->stripe_cache)) {
- rbio = list_entry(table->stripe_cache.next,
- struct btrfs_raid_bio,
- stripe_cache);
+ rbio = list_first_entry(&table->stripe_cache,
+ struct btrfs_raid_bio, stripe_cache);
__remove_rbio_from_cache(rbio);
}
spin_unlock(&table->cache_lock);
@@ -567,9 +605,9 @@ static void cache_rbio(struct btrfs_raid_bio *rbio)
if (table->cache_size > RBIO_CACHE_SIZE) {
struct btrfs_raid_bio *found;
- found = list_entry(table->stripe_cache.prev,
- struct btrfs_raid_bio,
- stripe_cache);
+ found = list_last_entry(&table->stripe_cache,
+ struct btrfs_raid_bio,
+ stripe_cache);
if (found != rbio)
__remove_rbio_from_cache(found);
@@ -667,39 +705,62 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
return 1;
}
-static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
- unsigned int stripe_nr,
- unsigned int sector_nr)
+/* Return the sector index for @stripe_nr and @sector_nr. */
+static unsigned int rbio_sector_index(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr,
+ unsigned int sector_nr)
{
+ unsigned int ret;
+
ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr);
ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr);
- return stripe_nr * rbio->stripe_nsectors + sector_nr;
+ ret = stripe_nr * rbio->stripe_nsectors + sector_nr;
+ ASSERT(ret < rbio->nr_sectors);
+ return ret;
+}
+
+/* Return the paddr array index for @stripe_nr, @sector_nr and @step_nr. */
+static unsigned int rbio_paddr_index(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr,
+ unsigned int sector_nr,
+ unsigned int step_nr)
+{
+ unsigned int ret;
+
+ ASSERT_RBIO_SECTOR(step_nr < rbio->sector_nsteps, rbio, step_nr);
+
+ ret = rbio_sector_index(rbio, stripe_nr, sector_nr) * rbio->sector_nsteps + step_nr;
+ ASSERT(ret < rbio->nr_sectors * rbio->sector_nsteps);
+ return ret;
}
-/* Return a sector from rbio->stripe_sectors, not from the bio list */
-static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
- unsigned int stripe_nr,
- unsigned int sector_nr)
+static phys_addr_t rbio_stripe_paddr(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr, unsigned int sector_nr,
+ unsigned int step_nr)
{
- return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
- sector_nr)];
+ return rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr)];
}
-/* Grab a sector inside P stripe */
-static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
- unsigned int sector_nr)
+static phys_addr_t rbio_pstripe_paddr(const struct btrfs_raid_bio *rbio,
+ unsigned int sector_nr, unsigned int step_nr)
{
- return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
+ return rbio_stripe_paddr(rbio, rbio->nr_data, sector_nr, step_nr);
}
-/* Grab a sector inside Q stripe, return NULL if not RAID6 */
-static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
- unsigned int sector_nr)
+static phys_addr_t rbio_qstripe_paddr(const struct btrfs_raid_bio *rbio,
+ unsigned int sector_nr, unsigned int step_nr)
{
if (rbio->nr_data + 1 == rbio->real_stripes)
- return NULL;
- return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
+ return INVALID_PADDR;
+ return rbio_stripe_paddr(rbio, rbio->nr_data + 1, sector_nr, step_nr);
+}
+
+/* Return a paddr pointer into the rbio::stripe_paddrs[] for the specified sector. */
+static phys_addr_t *rbio_stripe_paddrs(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr, unsigned int sector_nr)
+{
+ return &rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, 0)];
}
/*
@@ -882,14 +943,14 @@ done_nolock:
remove_rbio_from_cache(rbio);
}
-static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
+static void rbio_endio_bio_list(struct bio *cur, blk_status_t status)
{
struct bio *next;
while (cur) {
next = cur->bi_next;
cur->bi_next = NULL;
- cur->bi_status = err;
+ cur->bi_status = status;
bio_endio(cur);
cur = next;
}
@@ -899,7 +960,7 @@ static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
* this frees the rbio and runs through all the bios in the
* bio_list and calls end_io on them
*/
-static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t status)
{
struct bio *cur = bio_list_get(&rbio->bio_list);
struct bio *extra;
@@ -928,13 +989,13 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
extra = bio_list_get(&rbio->bio_list);
free_raid_bio(rbio);
- rbio_endio_bio_list(cur, err);
+ rbio_endio_bio_list(cur, status);
if (extra)
- rbio_endio_bio_list(extra, err);
+ rbio_endio_bio_list(extra, status);
}
/*
- * Get a sector pointer specified by its @stripe_nr and @sector_nr.
+ * Get paddr pointer for the sector specified by its @stripe_nr and @sector_nr.
*
* @rbio: The raid bio
* @stripe_nr: Stripe number, valid range [0, real_stripe)
@@ -944,34 +1005,52 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
*
* The read/modify/write code wants to reuse the original bio page as much
* as possible, and only use stripe_sectors as fallback.
+ *
+ * Return NULL if bio_list_only is set but the specified sector has no
+ * coresponding bio.
*/
-static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
- int stripe_nr, int sector_nr,
- bool bio_list_only)
+static phys_addr_t *sector_paddrs_in_rbio(struct btrfs_raid_bio *rbio,
+ int stripe_nr, int sector_nr,
+ bool bio_list_only)
{
- struct sector_ptr *sector;
- int index;
+ phys_addr_t *ret = NULL;
+ const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, 0);
- ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->real_stripes,
- rbio, stripe_nr);
- ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
- rbio, sector_nr);
+ ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps);
- index = stripe_nr * rbio->stripe_nsectors + sector_nr;
- ASSERT(index >= 0 && index < rbio->nr_sectors);
-
- spin_lock(&rbio->bio_list_lock);
- sector = &rbio->bio_sectors[index];
- if (sector->page || bio_list_only) {
- /* Don't return sector without a valid page pointer */
- if (!sector->page)
- sector = NULL;
- spin_unlock(&rbio->bio_list_lock);
- return sector;
+ scoped_guard(spinlock, &rbio->bio_list_lock) {
+ if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) {
+ /* Don't return sector without a valid page pointer */
+ if (rbio->bio_paddrs[index] != INVALID_PADDR)
+ ret = &rbio->bio_paddrs[index];
+ return ret;
+ }
}
- spin_unlock(&rbio->bio_list_lock);
+ return &rbio->stripe_paddrs[index];
+}
- return &rbio->stripe_sectors[index];
+/*
+ * Similar to sector_paddr_in_rbio(), but with extra consideration for
+ * bs > ps cases, where we can have multiple steps for a fs block.
+ */
+static phys_addr_t sector_paddr_in_rbio(struct btrfs_raid_bio *rbio,
+ int stripe_nr, int sector_nr, int step_nr,
+ bool bio_list_only)
+{
+ phys_addr_t ret = INVALID_PADDR;
+ const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr);
+
+ ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps);
+
+ scoped_guard(spinlock, &rbio->bio_list_lock) {
+ if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) {
+ /* Don't return sector without a valid page pointer */
+ if (rbio->bio_paddrs[index] != INVALID_PADDR)
+ ret = rbio->bio_paddrs[index];
+ return ret;
+ }
+ }
+ return rbio->stripe_paddrs[index];
}
/*
@@ -987,10 +1066,16 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
const unsigned int stripe_nsectors =
BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
const unsigned int num_sectors = stripe_nsectors * real_stripes;
+ const unsigned int step = min(fs_info->sectorsize, PAGE_SIZE);
+ const unsigned int sector_nsteps = fs_info->sectorsize / step;
struct btrfs_raid_bio *rbio;
- /* PAGE_SIZE must also be aligned to sectorsize for subpage support */
- ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
+ /*
+ * For bs <= ps cases, ps must be aligned to bs.
+ * For bs > ps cases, bs must be aligned to ps.
+ */
+ ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize) ||
+ IS_ALIGNED(fs_info->sectorsize, PAGE_SIZE));
/*
* Our current stripe len should be fixed to 64k thus stripe_nsectors
* (at most 16) should be no larger than BITS_PER_LONG.
@@ -1009,19 +1094,22 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
return ERR_PTR(-ENOMEM);
rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
GFP_NOFS);
- rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
- GFP_NOFS);
- rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
- GFP_NOFS);
+ rbio->bio_paddrs = kcalloc(num_sectors * sector_nsteps, sizeof(phys_addr_t), GFP_NOFS);
+ rbio->stripe_paddrs = kcalloc(num_sectors * sector_nsteps, sizeof(phys_addr_t), GFP_NOFS);
rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
+ rbio->stripe_uptodate_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
- if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
- !rbio->finish_pointers || !rbio->error_bitmap) {
+ if (!rbio->stripe_pages || !rbio->bio_paddrs || !rbio->stripe_paddrs ||
+ !rbio->finish_pointers || !rbio->error_bitmap || !rbio->stripe_uptodate_bitmap) {
free_raid_bio_pointers(rbio);
kfree(rbio);
return ERR_PTR(-ENOMEM);
}
+ for (int i = 0; i < num_sectors * sector_nsteps; i++) {
+ rbio->stripe_paddrs[i] = INVALID_PADDR;
+ rbio->bio_paddrs[i] = INVALID_PADDR;
+ }
bio_list_init(&rbio->bio_list);
init_waitqueue_head(&rbio->io_wait);
@@ -1036,6 +1124,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
rbio->real_stripes = real_stripes;
rbio->stripe_npages = stripe_npages;
rbio->stripe_nsectors = stripe_nsectors;
+ rbio->sector_nsteps = sector_nsteps;
refcount_set(&rbio->refs, 1);
atomic_set(&rbio->stripes_pending, 0);
@@ -1080,8 +1169,8 @@ static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
* @faila and @failb will also be updated to the first and second stripe
* number of the errors.
*/
-static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
- int *faila, int *failb)
+static int get_rbio_vertical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
+ int *faila, int *failb)
{
int stripe_nr;
int found_errors = 0;
@@ -1113,20 +1202,41 @@ static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
return found_errors;
}
+static int bio_add_paddrs(struct bio *bio, phys_addr_t *paddrs, unsigned int nr_steps,
+ unsigned int step)
+{
+ int added = 0;
+ int ret;
+
+ for (int i = 0; i < nr_steps; i++) {
+ ret = bio_add_page(bio, phys_to_page(paddrs[i]), step,
+ offset_in_page(paddrs[i]));
+ if (ret != step)
+ goto revert;
+ added += ret;
+ }
+ return added;
+revert:
+ /*
+ * We don't need to revert the bvec, as the bio will be submitted immediately,
+ * as long as the size is reduced the extra bvec will not be accessed.
+ */
+ bio->bi_iter.bi_size -= added;
+ return 0;
+}
+
/*
* Add a single sector @sector into our list of bios for IO.
*
* Return 0 if everything went well.
- * Return <0 for error.
+ * Return <0 for error, and no byte will be added to @rbio.
*/
-static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
- struct bio_list *bio_list,
- struct sector_ptr *sector,
- unsigned int stripe_nr,
- unsigned int sector_nr,
- enum req_op op)
+static int rbio_add_io_paddrs(struct btrfs_raid_bio *rbio, struct bio_list *bio_list,
+ phys_addr_t *paddrs, unsigned int stripe_nr,
+ unsigned int sector_nr, enum req_op op)
{
const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
+ const u32 step = min(sectorsize, PAGE_SIZE);
struct bio *last = bio_list->tail;
int ret;
struct bio *bio;
@@ -1142,7 +1252,7 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
rbio, stripe_nr);
ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
rbio, sector_nr);
- ASSERT(sector->page);
+ ASSERT(paddrs != NULL);
stripe = &rbio->bioc->stripes[stripe_nr];
disk_start = stripe->physical + sector_nr * sectorsize;
@@ -1155,9 +1265,9 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
rbio->error_bitmap);
/* Check if we have reached tolerance early. */
- found_errors = get_rbio_veritical_errors(rbio, sector_nr,
- NULL, NULL);
- if (found_errors > rbio->bioc->max_errors)
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr,
+ NULL, NULL);
+ if (unlikely(found_errors > rbio->bioc->max_errors))
return -EIO;
return 0;
}
@@ -1173,8 +1283,7 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
*/
if (last_end == disk_start && !last->bi_status &&
last->bi_bdev == stripe->dev->bdev) {
- ret = bio_add_page(last, sector->page, sectorsize,
- sector->pgoff);
+ ret = bio_add_paddrs(last, paddrs, rbio->sector_nsteps, step);
if (ret == sectorsize)
return 0;
}
@@ -1187,31 +1296,27 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT;
bio->bi_private = rbio;
- __bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
+ ret = bio_add_paddrs(bio, paddrs, rbio->sector_nsteps, step);
+ ASSERT(ret == sectorsize);
bio_list_add(bio_list, bio);
return 0;
}
static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
{
- const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
- struct bio_vec bvec;
- struct bvec_iter iter;
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u32 step_bits = min(fs_info->sectorsize_bits, PAGE_SHIFT);
+ struct bvec_iter iter = bio->bi_iter;
+ phys_addr_t paddr;
u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
rbio->bioc->full_stripe_logical;
- bio_for_each_segment(bvec, bio, iter) {
- u32 bvec_offset;
+ btrfs_bio_for_each_block(paddr, bio, &iter, step) {
+ unsigned int index = (offset >> step_bits);
- for (bvec_offset = 0; bvec_offset < bvec.bv_len;
- bvec_offset += sectorsize, offset += sectorsize) {
- int index = offset / sectorsize;
- struct sector_ptr *sector = &rbio->bio_sectors[index];
-
- sector->page = bvec.bv_page;
- sector->pgoff = bvec.bv_offset + bvec_offset;
- ASSERT(sector->pgoff < PAGE_SIZE);
- }
+ rbio->bio_paddrs[index] = paddr;
+ offset += step;
}
}
@@ -1289,49 +1394,64 @@ static void assert_rbio(struct btrfs_raid_bio *rbio)
ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio);
}
-/* Generate PQ for one vertical stripe. */
-static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
+static inline void *kmap_local_paddr(phys_addr_t paddr)
+{
+ /* The sector pointer must have a page mapped to it. */
+ ASSERT(paddr != INVALID_PADDR);
+
+ return kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
+}
+
+static void generate_pq_vertical_step(struct btrfs_raid_bio *rbio, unsigned int sector_nr,
+ unsigned int step_nr)
{
void **pointers = rbio->finish_pointers;
- const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
- struct sector_ptr *sector;
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
int stripe;
const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
/* First collect one sector from each data stripe */
- for (stripe = 0; stripe < rbio->nr_data; stripe++) {
- sector = sector_in_rbio(rbio, stripe, sectornr, 0);
- pointers[stripe] = kmap_local_page(sector->page) +
- sector->pgoff;
- }
+ for (stripe = 0; stripe < rbio->nr_data; stripe++)
+ pointers[stripe] = kmap_local_paddr(
+ sector_paddr_in_rbio(rbio, stripe, sector_nr, step_nr, 0));
/* Then add the parity stripe */
- sector = rbio_pstripe_sector(rbio, sectornr);
- sector->uptodate = 1;
- pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
+ pointers[stripe++] = kmap_local_paddr(rbio_pstripe_paddr(rbio, sector_nr, step_nr));
if (has_qstripe) {
/*
* RAID6, add the qstripe and call the library function
* to fill in our p/q
*/
- sector = rbio_qstripe_sector(rbio, sectornr);
- sector->uptodate = 1;
- pointers[stripe++] = kmap_local_page(sector->page) +
- sector->pgoff;
+ pointers[stripe++] = kmap_local_paddr(
+ rbio_qstripe_paddr(rbio, sector_nr, step_nr));
assert_rbio(rbio);
- raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
- pointers);
+ raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
} else {
/* raid5 */
- memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
- run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
+ memcpy(pointers[rbio->nr_data], pointers[0], step);
+ run_xor(pointers + 1, rbio->nr_data - 1, step);
}
for (stripe = stripe - 1; stripe >= 0; stripe--)
kunmap_local(pointers[stripe]);
}
+/* Generate PQ for one vertical stripe. */
+static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
+{
+ const bool has_qstripe = (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6);
+
+ for (int i = 0; i < rbio->sector_nsteps; i++)
+ generate_pq_vertical_step(rbio, sectornr, i);
+
+ set_bit(rbio_sector_index(rbio, rbio->nr_data, sectornr),
+ rbio->stripe_uptodate_bitmap);
+ if (has_qstripe)
+ set_bit(rbio_sector_index(rbio, rbio->nr_data + 1, sectornr),
+ rbio->stripe_uptodate_bitmap);
+}
+
static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
struct bio_list *bio_list)
{
@@ -1358,7 +1478,7 @@ static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
*/
for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
total_sector_nr++) {
- struct sector_ptr *sector;
+ phys_addr_t *paddrs;
stripe = total_sector_nr / rbio->stripe_nsectors;
sectornr = total_sector_nr % rbio->stripe_nsectors;
@@ -1368,14 +1488,14 @@ static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
continue;
if (stripe < rbio->nr_data) {
- sector = sector_in_rbio(rbio, stripe, sectornr, 1);
- if (!sector)
+ paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1);
+ if (paddrs == NULL)
continue;
} else {
- sector = rbio_stripe_sector(rbio, stripe, sectornr);
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
}
- ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
+ ret = rbio_add_io_paddrs(rbio, bio_list, paddrs, stripe,
sectornr, REQ_OP_WRITE);
if (ret)
goto error;
@@ -1393,7 +1513,7 @@ static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
total_sector_nr++) {
- struct sector_ptr *sector;
+ phys_addr_t *paddrs;
stripe = total_sector_nr / rbio->stripe_nsectors;
sectornr = total_sector_nr % rbio->stripe_nsectors;
@@ -1418,14 +1538,14 @@ static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
continue;
if (stripe < rbio->nr_data) {
- sector = sector_in_rbio(rbio, stripe, sectornr, 1);
- if (!sector)
+ paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1);
+ if (paddrs == NULL)
continue;
} else {
- sector = rbio_stripe_sector(rbio, stripe, sectornr);
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
}
- ret = rbio_add_io_sector(rbio, bio_list, sector,
+ ret = rbio_add_io_paddrs(rbio, bio_list, paddrs,
rbio->real_stripes,
sectornr, REQ_OP_WRITE);
if (ret)
@@ -1473,22 +1593,17 @@ static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
}
/*
- * For subpage case, we can no longer set page Up-to-date directly for
- * stripe_pages[], thus we need to locate the sector.
+ * Return the index inside the rbio->stripe_sectors[] array.
+ *
+ * Return -1 if not found.
*/
-static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
- struct page *page,
- unsigned int pgoff)
+static int find_stripe_sector_nr(struct btrfs_raid_bio *rbio, phys_addr_t paddr)
{
- int i;
-
- for (i = 0; i < rbio->nr_sectors; i++) {
- struct sector_ptr *sector = &rbio->stripe_sectors[i];
-
- if (sector->page == page && sector->pgoff == pgoff)
- return sector;
+ for (int i = 0; i < rbio->nr_sectors; i++) {
+ if (rbio->stripe_paddrs[i * rbio->sector_nsteps] == paddr)
+ return i;
}
- return NULL;
+ return -1;
}
/*
@@ -1498,38 +1613,34 @@ static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
{
const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ const u32 step = min(sectorsize, PAGE_SIZE);
+ u32 offset = 0;
+ phys_addr_t paddr;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct sector_ptr *sector;
- int pgoff;
+ btrfs_bio_for_each_block_all(paddr, bio, step) {
+ /* Hitting the first step of a sector. */
+ if (IS_ALIGNED(offset, sectorsize)) {
+ int sector_nr = find_stripe_sector_nr(rbio, paddr);
- for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
- pgoff += sectorsize) {
- sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
- ASSERT(sector);
- if (sector)
- sector->uptodate = 1;
+ ASSERT(sector_nr >= 0);
+ if (sector_nr >= 0)
+ set_bit(sector_nr, rbio->stripe_uptodate_bitmap);
}
+ offset += step;
}
}
static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
{
- struct bio_vec *bv = bio_first_bvec_all(bio);
+ phys_addr_t bvec_paddr = bvec_phys(bio_first_bvec_all(bio));
int i;
for (i = 0; i < rbio->nr_sectors; i++) {
- struct sector_ptr *sector;
-
- sector = &rbio->stripe_sectors[i];
- if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
+ if (rbio->stripe_paddrs[i * rbio->sector_nsteps] == bvec_paddr)
break;
- sector = &rbio->bio_sectors[i];
- if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
+ if (rbio->bio_paddrs[i * rbio->sector_nsteps] == bvec_paddr)
break;
}
ASSERT(i < rbio->nr_sectors);
@@ -1562,9 +1673,12 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
struct bio *bio)
{
struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u32 nr_steps = rbio->sector_nsteps;
int total_sector_nr = get_bio_sector_nr(rbio, bio);
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ u32 offset = 0;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ phys_addr_t paddr;
/* No data csum for the whole stripe, no need to verify. */
if (!rbio->csum_bitmap || !rbio->csum_buf)
@@ -1574,26 +1688,26 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
return;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- int bv_offset;
+ btrfs_bio_for_each_block_all(paddr, bio, step) {
+ u8 csum_buf[BTRFS_CSUM_SIZE];
+ u8 *expected_csum;
- for (bv_offset = bvec->bv_offset;
- bv_offset < bvec->bv_offset + bvec->bv_len;
- bv_offset += fs_info->sectorsize, total_sector_nr++) {
- u8 csum_buf[BTRFS_CSUM_SIZE];
- u8 *expected_csum = rbio->csum_buf +
- total_sector_nr * fs_info->csum_size;
- int ret;
+ paddrs[(offset / step) % nr_steps] = paddr;
+ offset += step;
- /* No csum for this sector, skip to the next sector. */
- if (!test_bit(total_sector_nr, rbio->csum_bitmap))
- continue;
+ /* Not yet covering the full fs block, continue to the next step. */
+ if (!IS_ALIGNED(offset, fs_info->sectorsize))
+ continue;
- ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
- bv_offset, csum_buf, expected_csum);
- if (ret < 0)
- set_bit(total_sector_nr, rbio->error_bitmap);
- }
+ /* No csum for this sector, skip to the next sector. */
+ if (!test_bit(total_sector_nr, rbio->csum_bitmap))
+ continue;
+
+ expected_csum = rbio->csum_buf + total_sector_nr * fs_info->csum_size;
+ btrfs_calculate_block_csum_pages(fs_info, paddrs, csum_buf);
+ if (unlikely(memcmp(csum_buf, expected_csum, fs_info->csum_size) != 0))
+ set_bit(total_sector_nr, rbio->error_bitmap);
+ total_sector_nr++;
}
}
@@ -1689,8 +1803,8 @@ static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
list_sort(NULL, &plug->rbio_list, plug_cmp);
while (!list_empty(&plug->rbio_list)) {
- cur = list_entry(plug->rbio_list.next,
- struct btrfs_raid_bio, plug_list);
+ cur = list_first_entry(&plug->rbio_list,
+ struct btrfs_raid_bio, plug_list);
list_del_init(&cur->plug_list);
if (rbio_is_full(cur)) {
@@ -1788,10 +1902,9 @@ static int verify_one_sector(struct btrfs_raid_bio *rbio,
int stripe_nr, int sector_nr)
{
struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
- struct sector_ptr *sector;
+ phys_addr_t *paddrs;
u8 csum_buf[BTRFS_CSUM_SIZE];
u8 *csum_expected;
- int ret;
if (!rbio->csum_bitmap || !rbio->csum_buf)
return 0;
@@ -1804,57 +1917,32 @@ static int verify_one_sector(struct btrfs_raid_bio *rbio,
* bio list if possible.
*/
if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
- sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
+ paddrs = sector_paddrs_in_rbio(rbio, stripe_nr, sector_nr, 0);
} else {
- sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
+ paddrs = rbio_stripe_paddrs(rbio, stripe_nr, sector_nr);
}
- ASSERT(sector->page);
-
csum_expected = rbio->csum_buf +
(stripe_nr * rbio->stripe_nsectors + sector_nr) *
fs_info->csum_size;
- ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
- csum_buf, csum_expected);
- return ret;
+ btrfs_calculate_block_csum_pages(fs_info, paddrs, csum_buf);
+ if (unlikely(memcmp(csum_buf, csum_expected, fs_info->csum_size) != 0))
+ return -EIO;
+ return 0;
}
-/*
- * Recover a vertical stripe specified by @sector_nr.
- * @*pointers are the pre-allocated pointers by the caller, so we don't
- * need to allocate/free the pointers again and again.
- */
-static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
- void **pointers, void **unmap_array)
+static void recover_vertical_step(struct btrfs_raid_bio *rbio,
+ unsigned int sector_nr,
+ unsigned int step_nr,
+ int faila, int failb,
+ void **pointers, void **unmap_array)
{
struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
- struct sector_ptr *sector;
- const u32 sectorsize = fs_info->sectorsize;
- int found_errors;
- int faila;
- int failb;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
int stripe_nr;
- int ret = 0;
- /*
- * Now we just use bitmap to mark the horizontal stripes in
- * which we have data when doing parity scrub.
- */
- if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
- !test_bit(sector_nr, &rbio->dbitmap))
- return 0;
-
- found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
- &failb);
- /*
- * No errors in the vertical stripe, skip it. Can happen for recovery
- * which only part of a stripe failed csum check.
- */
- if (!found_errors)
- return 0;
-
- if (found_errors > rbio->bioc->max_errors)
- return -EIO;
+ ASSERT(step_nr < rbio->sector_nsteps);
+ ASSERT(sector_nr < rbio->stripe_nsectors);
/*
* Setup our array of pointers with sectors from each stripe
@@ -1863,18 +1951,18 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
* pointer order.
*/
for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
+ phys_addr_t paddr;
+
/*
* If we're rebuilding a read, we have to use pages from the
* bio list if possible.
*/
if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
- sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
+ paddr = sector_paddr_in_rbio(rbio, stripe_nr, sector_nr, step_nr, 0);
} else {
- sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
+ paddr = rbio_stripe_paddr(rbio, stripe_nr, sector_nr, step_nr);
}
- ASSERT(sector->page);
- pointers[stripe_nr] = kmap_local_page(sector->page) +
- sector->pgoff;
+ pointers[stripe_nr] = kmap_local_paddr(paddr);
unmap_array[stripe_nr] = pointers[stripe_nr];
}
@@ -1920,10 +2008,10 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
}
if (failb == rbio->real_stripes - 2) {
- raid6_datap_recov(rbio->real_stripes, sectorsize,
+ raid6_datap_recov(rbio->real_stripes, step,
faila, pointers);
} else {
- raid6_2data_recov(rbio->real_stripes, sectorsize,
+ raid6_2data_recov(rbio->real_stripes, step,
faila, failb, pointers);
}
} else {
@@ -1933,7 +2021,7 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
ASSERT(failb == -1);
pstripe:
/* Copy parity block into failed block to start with */
- memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
+ memcpy(pointers[faila], pointers[rbio->nr_data], step);
/* Rearrange the pointer array */
p = pointers[faila];
@@ -1943,40 +2031,66 @@ pstripe:
pointers[rbio->nr_data - 1] = p;
/* Xor in the rest */
- run_xor(pointers, rbio->nr_data - 1, sectorsize);
-
+ run_xor(pointers, rbio->nr_data - 1, step);
}
+cleanup:
+ for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
+ kunmap_local(unmap_array[stripe_nr]);
+}
+
+/*
+ * Recover a vertical stripe specified by @sector_nr.
+ * @*pointers are the pre-allocated pointers by the caller, so we don't
+ * need to allocate/free the pointers again and again.
+ */
+static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
+ void **pointers, void **unmap_array)
+{
+ int found_errors;
+ int faila;
+ int failb;
+ int ret = 0;
+
/*
- * No matter if this is a RMW or recovery, we should have all
- * failed sectors repaired in the vertical stripe, thus they are now
- * uptodate.
- * Especially if we determine to cache the rbio, we need to
- * have at least all data sectors uptodate.
- *
- * If possible, also check if the repaired sector matches its data
- * checksum.
+ * Now we just use bitmap to mark the horizontal stripes in
+ * which we have data when doing parity scrub.
*/
+ if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
+ !test_bit(sector_nr, &rbio->dbitmap))
+ return 0;
+
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr, &faila,
+ &failb);
+ /*
+ * No errors in the vertical stripe, skip it. Can happen for recovery
+ * which only part of a stripe failed csum check.
+ */
+ if (!found_errors)
+ return 0;
+
+ if (unlikely(found_errors > rbio->bioc->max_errors))
+ return -EIO;
+
+ for (int i = 0; i < rbio->sector_nsteps; i++)
+ recover_vertical_step(rbio, sector_nr, i, faila, failb,
+ pointers, unmap_array);
if (faila >= 0) {
ret = verify_one_sector(rbio, faila, sector_nr);
if (ret < 0)
- goto cleanup;
+ return ret;
- sector = rbio_stripe_sector(rbio, faila, sector_nr);
- sector->uptodate = 1;
+ set_bit(rbio_sector_index(rbio, faila, sector_nr),
+ rbio->stripe_uptodate_bitmap);
}
if (failb >= 0) {
ret = verify_one_sector(rbio, failb, sector_nr);
if (ret < 0)
- goto cleanup;
+ return ret;
- sector = rbio_stripe_sector(rbio, failb, sector_nr);
- sector->uptodate = 1;
+ set_bit(rbio_sector_index(rbio, failb, sector_nr),
+ rbio->stripe_uptodate_bitmap);
}
-
-cleanup:
- for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
- kunmap_local(unmap_array[stripe_nr]);
return ret;
}
@@ -2051,7 +2165,7 @@ static void recover_rbio(struct btrfs_raid_bio *rbio)
total_sector_nr++) {
int stripe = total_sector_nr / rbio->stripe_nsectors;
int sectornr = total_sector_nr % rbio->stripe_nsectors;
- struct sector_ptr *sector;
+ phys_addr_t *paddrs;
/*
* Skip the range which has error. It can be a range which is
@@ -2068,8 +2182,8 @@ static void recover_rbio(struct btrfs_raid_bio *rbio)
continue;
}
- sector = rbio_stripe_sector(rbio, stripe, sectornr);
- ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe,
sectornr, REQ_OP_READ);
if (ret < 0) {
bio_list_put(&bio_list);
@@ -2114,7 +2228,7 @@ static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_n
int faila;
int failb;
- found_errors = get_rbio_veritical_errors(rbio, sector_nr,
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr,
&faila, &failb);
/* This vertical stripe doesn't have errors. */
if (!found_errors)
@@ -2258,13 +2372,13 @@ static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
*/
for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
total_sector_nr++) {
- struct sector_ptr *sector;
int stripe = total_sector_nr / rbio->stripe_nsectors;
int sectornr = total_sector_nr % rbio->stripe_nsectors;
+ phys_addr_t *paddrs;
- sector = rbio_stripe_sector(rbio, stripe, sectornr);
- ret = rbio_add_io_sector(rbio, &bio_list, sector,
- stripe, sectornr, REQ_OP_READ);
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe,
+ sectornr, REQ_OP_READ);
if (ret) {
bio_list_put(&bio_list);
return ret;
@@ -2282,9 +2396,8 @@ static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
static void raid_wait_write_end_io(struct bio *bio)
{
struct btrfs_raid_bio *rbio = bio->bi_private;
- blk_status_t err = bio->bi_status;
- if (err)
+ if (bio->bi_status)
rbio_update_error_bitmap(rbio, bio);
bio_put(bio);
if (atomic_dec_and_test(&rbio->stripes_pending))
@@ -2319,14 +2432,15 @@ static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
int i;
for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
- struct sector_ptr *sector = &rbio->stripe_sectors[i];
+ phys_addr_t paddr = rbio->stripe_paddrs[i * rbio->sector_nsteps];
/*
* We have a sector which doesn't have page nor uptodate,
* thus this rbio can not be cached one, as cached one must
* have all its data sectors present and uptodate.
*/
- if (!sector->page || !sector->uptodate)
+ if (paddr == INVALID_PADDR ||
+ !test_bit(i, rbio->stripe_uptodate_bitmap))
return true;
}
return false;
@@ -2407,8 +2521,8 @@ static void rmw_rbio(struct btrfs_raid_bio *rbio)
for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
int found_errors;
- found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
- if (found_errors > rbio->bioc->max_errors) {
+ found_errors = get_rbio_vertical_errors(rbio, sectornr, NULL, NULL);
+ if (unlikely(found_errors > rbio->bioc->max_errors)) {
ret = -EIO;
break;
}
@@ -2478,46 +2592,121 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
return rbio;
}
+static int alloc_rbio_sector_pages(struct btrfs_raid_bio *rbio,
+ int sector_nr)
+{
+ const u32 step = min(PAGE_SIZE, rbio->bioc->fs_info->sectorsize);
+ const u32 base = sector_nr * rbio->sector_nsteps;
+
+ for (int i = base; i < base + rbio->sector_nsteps; i++) {
+ const unsigned int page_index = (i * step) >> PAGE_SHIFT;
+ struct page *page;
+
+ if (rbio->stripe_pages[page_index])
+ continue;
+ page = alloc_page(GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+ rbio->stripe_pages[page_index] = page;
+ }
+ return 0;
+}
+
/*
* We just scrub the parity that we have correct data on the same horizontal,
* so we needn't allocate all pages for all the stripes.
*/
static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
{
- const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
int total_sector_nr;
for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
total_sector_nr++) {
- struct page *page;
int sectornr = total_sector_nr % rbio->stripe_nsectors;
- int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
+ int ret;
if (!test_bit(sectornr, &rbio->dbitmap))
continue;
- if (rbio->stripe_pages[index])
- continue;
- page = alloc_page(GFP_NOFS);
- if (!page)
- return -ENOMEM;
- rbio->stripe_pages[index] = page;
+ ret = alloc_rbio_sector_pages(rbio, total_sector_nr);
+ if (ret < 0)
+ return ret;
}
index_stripe_sectors(rbio);
return 0;
}
+/* Return true if the content of the step matches the caclulated one. */
+static bool verify_one_parity_step(struct btrfs_raid_bio *rbio,
+ void *pointers[], unsigned int sector_nr,
+ unsigned int step_nr)
+{
+ const unsigned int nr_data = rbio->nr_data;
+ const bool has_qstripe = (rbio->real_stripes - rbio->nr_data == 2);
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
+ void *parity;
+ bool ret = false;
+
+ ASSERT(step_nr < rbio->sector_nsteps);
+
+ /* First collect one page from each data stripe. */
+ for (int stripe = 0; stripe < nr_data; stripe++)
+ pointers[stripe] = kmap_local_paddr(
+ sector_paddr_in_rbio(rbio, stripe, sector_nr,
+ step_nr, 0));
+
+ if (has_qstripe) {
+ assert_rbio(rbio);
+ /* RAID6, call the library function to fill in our P/Q. */
+ raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
+ } else {
+ /* RAID5. */
+ memcpy(pointers[nr_data], pointers[0], step);
+ run_xor(pointers + 1, nr_data - 1, step);
+ }
+
+ /* Check scrubbing parity and repair it. */
+ parity = kmap_local_paddr(rbio_stripe_paddr(rbio, rbio->scrubp, sector_nr, step_nr));
+ if (memcmp(parity, pointers[rbio->scrubp], step) != 0)
+ memcpy(parity, pointers[rbio->scrubp], step);
+ else
+ ret = true;
+ kunmap_local(parity);
+
+ for (int stripe = nr_data - 1; stripe >= 0; stripe--)
+ kunmap_local(pointers[stripe]);
+ return ret;
+}
+
+/*
+ * The @pointers array should have the P/Q parity already mapped.
+ */
+static void verify_one_parity_sector(struct btrfs_raid_bio *rbio,
+ void *pointers[], unsigned int sector_nr)
+{
+ bool found_error = false;
+
+ for (int step_nr = 0; step_nr < rbio->sector_nsteps; step_nr++) {
+ bool match;
+
+ match = verify_one_parity_step(rbio, pointers, sector_nr, step_nr);
+ if (!match)
+ found_error = true;
+ }
+ if (!found_error)
+ bitmap_clear(&rbio->dbitmap, sector_nr, 1);
+}
+
static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
{
struct btrfs_io_context *bioc = rbio->bioc;
- const u32 sectorsize = bioc->fs_info->sectorsize;
void **pointers = rbio->finish_pointers;
unsigned long *pbitmap = &rbio->finish_pbitmap;
int nr_data = rbio->nr_data;
- int stripe;
int sectornr;
bool has_qstripe;
- struct sector_ptr p_sector = { 0 };
- struct sector_ptr q_sector = { 0 };
+ struct page *page;
+ phys_addr_t p_paddr = INVALID_PADDR;
+ phys_addr_t q_paddr = INVALID_PADDR;
struct bio_list bio_list;
int is_replace = 0;
int ret;
@@ -2547,73 +2736,39 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
*/
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
- p_sector.page = alloc_page(GFP_NOFS);
- if (!p_sector.page)
+ page = alloc_page(GFP_NOFS);
+ if (!page)
return -ENOMEM;
- p_sector.pgoff = 0;
- p_sector.uptodate = 1;
+ p_paddr = page_to_phys(page);
+ page = NULL;
+ pointers[nr_data] = kmap_local_paddr(p_paddr);
if (has_qstripe) {
/* RAID6, allocate and map temp space for the Q stripe */
- q_sector.page = alloc_page(GFP_NOFS);
- if (!q_sector.page) {
- __free_page(p_sector.page);
- p_sector.page = NULL;
+ page = alloc_page(GFP_NOFS);
+ if (!page) {
+ __free_page(phys_to_page(p_paddr));
+ p_paddr = INVALID_PADDR;
return -ENOMEM;
}
- q_sector.pgoff = 0;
- q_sector.uptodate = 1;
- pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
+ q_paddr = page_to_phys(page);
+ page = NULL;
+ pointers[rbio->real_stripes - 1] = kmap_local_paddr(q_paddr);
}
bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
/* Map the parity stripe just once */
- pointers[nr_data] = kmap_local_page(p_sector.page);
-
- for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
- struct sector_ptr *sector;
- void *parity;
-
- /* first collect one page from each data stripe */
- for (stripe = 0; stripe < nr_data; stripe++) {
- sector = sector_in_rbio(rbio, stripe, sectornr, 0);
- pointers[stripe] = kmap_local_page(sector->page) +
- sector->pgoff;
- }
-
- if (has_qstripe) {
- assert_rbio(rbio);
- /* RAID6, call the library function to fill in our P/Q */
- raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
- pointers);
- } else {
- /* raid5 */
- memcpy(pointers[nr_data], pointers[0], sectorsize);
- run_xor(pointers + 1, nr_data - 1, sectorsize);
- }
- /* Check scrubbing parity and repair it */
- sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
- parity = kmap_local_page(sector->page) + sector->pgoff;
- if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
- memcpy(parity, pointers[rbio->scrubp], sectorsize);
- else
- /* Parity is right, needn't writeback */
- bitmap_clear(&rbio->dbitmap, sectornr, 1);
- kunmap_local(parity);
-
- for (stripe = nr_data - 1; stripe >= 0; stripe--)
- kunmap_local(pointers[stripe]);
- }
+ for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors)
+ verify_one_parity_sector(rbio, pointers, sectornr);
kunmap_local(pointers[nr_data]);
- __free_page(p_sector.page);
- p_sector.page = NULL;
- if (q_sector.page) {
- kunmap_local(pointers[rbio->real_stripes - 1]);
- __free_page(q_sector.page);
- q_sector.page = NULL;
+ __free_page(phys_to_page(p_paddr));
+ p_paddr = INVALID_PADDR;
+ if (q_paddr != INVALID_PADDR) {
+ __free_page(phys_to_page(q_paddr));
+ q_paddr = INVALID_PADDR;
}
/*
@@ -2622,10 +2777,10 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
* everything else.
*/
for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
- struct sector_ptr *sector;
+ phys_addr_t *paddrs;
- sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
- ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
+ paddrs = rbio_stripe_paddrs(rbio, rbio->scrubp, sectornr);
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, rbio->scrubp,
sectornr, REQ_OP_WRITE);
if (ret)
goto cleanup;
@@ -2640,11 +2795,10 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
*/
ASSERT_RBIO(rbio->bioc->replace_stripe_src >= 0, rbio);
for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
- struct sector_ptr *sector;
+ phys_addr_t *paddrs;
- sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
- ret = rbio_add_io_sector(rbio, &bio_list, sector,
- rbio->real_stripes,
+ paddrs = rbio_stripe_paddrs(rbio, rbio->scrubp, sectornr);
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, rbio->real_stripes,
sectornr, REQ_OP_WRITE);
if (ret)
goto cleanup;
@@ -2692,9 +2846,9 @@ static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
int failb;
int found_errors;
- found_errors = get_rbio_veritical_errors(rbio, sector_nr,
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr,
&faila, &failb);
- if (found_errors > rbio->bioc->max_errors) {
+ if (unlikely(found_errors > rbio->bioc->max_errors)) {
ret = -EIO;
goto out;
}
@@ -2718,7 +2872,7 @@ static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
* data, so the capability of the repair is declined. (In the
* case of RAID5, we can not repair anything.)
*/
- if (dfail > rbio->bioc->max_errors - 1) {
+ if (unlikely(dfail > rbio->bioc->max_errors - 1)) {
ret = -EIO;
goto out;
}
@@ -2735,7 +2889,7 @@ static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
* scrubbing parity, luckily, use the other one to repair the
* data, or we can not repair the data stripe.
*/
- if (failp != rbio->scrubp) {
+ if (unlikely(failp != rbio->scrubp)) {
ret = -EIO;
goto out;
}
@@ -2761,7 +2915,7 @@ static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
total_sector_nr++) {
int sectornr = total_sector_nr % rbio->stripe_nsectors;
int stripe = total_sector_nr / rbio->stripe_nsectors;
- struct sector_ptr *sector;
+ phys_addr_t *paddrs;
/* No data in the vertical stripe, no need to read. */
if (!test_bit(sectornr, &rbio->dbitmap))
@@ -2769,22 +2923,23 @@ static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
/*
* We want to find all the sectors missing from the rbio and
- * read them from the disk. If sector_in_rbio() finds a sector
+ * read them from the disk. If sector_paddr_in_rbio() finds a sector
* in the bio list we don't need to read it off the stripe.
*/
- sector = sector_in_rbio(rbio, stripe, sectornr, 1);
- if (sector)
+ paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1);
+ if (paddrs == NULL)
continue;
- sector = rbio_stripe_sector(rbio, stripe, sectornr);
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
/*
* The bio cache may have handed us an uptodate sector. If so,
* use it.
*/
- if (sector->uptodate)
+ if (test_bit(rbio_sector_index(rbio, stripe, sectornr),
+ rbio->stripe_uptodate_bitmap))
continue;
- ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe,
sectornr, REQ_OP_READ);
if (ret) {
bio_list_put(&bio_list);
@@ -2825,8 +2980,8 @@ static void scrub_rbio(struct btrfs_raid_bio *rbio)
for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
int found_errors;
- found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
- if (found_errors > rbio->bioc->max_errors) {
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr, NULL, NULL);
+ if (unlikely(found_errors > rbio->bioc->max_errors)) {
ret = -EIO;
break;
}
@@ -2850,17 +3005,17 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
* This is for scrub call sites where we already have correct data contents.
* This allows us to avoid reading data stripes again.
*
- * Unfortunately here we have to do page copy, other than reusing the pages.
+ * Unfortunately here we have to do folio copy, other than reusing the pages.
* This is due to the fact rbio has its own page management for its cache.
*/
-void raid56_parity_cache_data_pages(struct btrfs_raid_bio *rbio,
- struct page **data_pages, u64 data_logical)
+void raid56_parity_cache_data_folios(struct btrfs_raid_bio *rbio,
+ struct folio **data_folios, u64 data_logical)
{
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
const u64 offset_in_full_stripe = data_logical -
rbio->bioc->full_stripe_logical;
- const int page_index = offset_in_full_stripe >> PAGE_SHIFT;
- const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
- const u32 sectors_per_page = PAGE_SIZE / sectorsize;
+ unsigned int findex = 0;
+ unsigned int foffset = 0;
int ret;
/*
@@ -2879,14 +3034,24 @@ void raid56_parity_cache_data_pages(struct btrfs_raid_bio *rbio,
ASSERT(IS_ALIGNED(offset_in_full_stripe, BTRFS_STRIPE_LEN));
ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT));
- for (int page_nr = 0; page_nr < (BTRFS_STRIPE_LEN >> PAGE_SHIFT); page_nr++) {
- struct page *dst = rbio->stripe_pages[page_nr + page_index];
- struct page *src = data_pages[page_nr];
-
- memcpy_page(dst, 0, src, 0, PAGE_SIZE);
- for (int sector_nr = sectors_per_page * page_index;
- sector_nr < sectors_per_page * (page_index + 1);
- sector_nr++)
- rbio->stripe_sectors[sector_nr].uptodate = true;
+ for (unsigned int cur_off = offset_in_full_stripe;
+ cur_off < offset_in_full_stripe + BTRFS_STRIPE_LEN;
+ cur_off += PAGE_SIZE) {
+ const unsigned int pindex = cur_off >> PAGE_SHIFT;
+ void *kaddr;
+
+ kaddr = kmap_local_page(rbio->stripe_pages[pindex]);
+ memcpy_from_folio(kaddr, data_folios[findex], foffset, PAGE_SIZE);
+ kunmap_local(kaddr);
+
+ foffset += PAGE_SIZE;
+ ASSERT(foffset <= folio_size(data_folios[findex]));
+ if (foffset == folio_size(data_folios[findex])) {
+ findex++;
+ foffset = 0;
+ }
}
+ bitmap_set(rbio->stripe_uptodate_bitmap,
+ offset_in_full_stripe >> fs_info->sectorsize_bits,
+ BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 0d7b4c2fb6ae..1f463ecf7e41 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -16,7 +16,6 @@
#include "volumes.h"
struct page;
-struct sector_ptr;
struct btrfs_fs_info;
enum btrfs_rbio_ops {
@@ -25,6 +24,84 @@ enum btrfs_rbio_ops {
BTRFS_RBIO_PARITY_SCRUB,
};
+/*
+ * Overview of btrfs_raid_bio.
+ *
+ * One btrfs_raid_bio represents a full stripe of RAID56, including both data
+ * and P/Q stripes. For now, each data and P/Q stripe is of a fixed length (64K).
+ *
+ * One btrfs_raid_bio can have one or more bios from higher layer, covering
+ * part or all of the data stripes.
+ *
+ * [PAGES FROM HIGHER LAYER BIOS]
+ * Higher layer bios are in the btrfs_raid_bio::bio_list.
+ *
+ * Pages from the bio_list are represented like the following:
+ *
+ * bio_list: |<- Bio 1 ->| |<- Bio 2 ->| ...
+ * bio_paddrs: [0] [1] [2] [3] [4] [5] ...
+ *
+ * If there is a bio covering a sector (one btrfs fs block), the corresponding
+ * pointer in btrfs_raid_bio::bio_paddrs[] will point to the physical address
+ * (with the offset inside the page) of the corresponding bio.
+ *
+ * If there is no bio covering a sector, then btrfs_raid_bio::bio_paddrs[i] will
+ * be INVALID_PADDR.
+ *
+ * The length of each entry in bio_paddrs[] is a step (aka, min(sectorsize, PAGE_SIZE)).
+ *
+ * [PAGES FOR INTERNAL USAGES]
+ * Pages not covered by any bio or belonging to P/Q stripes are stored in
+ * btrfs_raid_bio::stripe_pages[] and stripe_paddrs[], like the following:
+ *
+ * stripe_pages: |<- Page 0 ->|<- Page 1 ->| ...
+ * stripe_paddrs: [0] [1] [2] [3] [4] ...
+ *
+ * stripe_pages[] array stores all the pages covering the full stripe, including
+ * data and P/Q pages.
+ * stripe_pages[0] is the first page of the first data stripe.
+ * stripe_pages[BTRFS_STRIPE_LEN / PAGE_SIZE] is the first page of the second
+ * data stripe.
+ *
+ * Some pointers inside stripe_pages[] can be NULL, e.g. for a full stripe write
+ * (the bio covers all data stripes) there is no need to allocate pages for
+ * data stripes (can grab from bio_paddrs[]).
+ *
+ * If the corresponding page of stripe_paddrs[i] is not allocated, the value of
+ * stripe_paddrs[i] will be INVALID_PADDR.
+ *
+ * The length of each entry in stripe_paddrs[] is a step.
+ *
+ * [LOCATING A SECTOR]
+ * To locate a sector for IO, we need the following info:
+ *
+ * - stripe_nr
+ * Starts from 0 (representing the first data stripe), ends at
+ * @nr_data (RAID5, P stripe) or @nr_data + 1 (RAID6, Q stripe).
+ *
+ * - sector_nr
+ * Starts from 0 (representing the first sector of the stripe), ends
+ * at BTRFS_STRIPE_LEN / sectorsize - 1.
+ *
+ * - step_nr
+ * A step is min(sector_size, PAGE_SIZE).
+ *
+ * Starts from 0 (representing the first step of the sector), ends
+ * at @sector_nsteps - 1.
+ *
+ * For most call sites they do not need to bother this parameter.
+ * It is for bs > ps support and only for vertical stripe related works.
+ * (e.g. RMW/recover)
+ *
+ * - from which array
+ * Whether grabbing from stripe_paddrs[] (aka, internal pages) or from the
+ * bio_paddrs[] (aka, from the higher layer bios).
+ *
+ * For IO, a physical address is returned, so that we can extract the page and
+ * the offset inside the page for IO.
+ * A special value INVALID_PADDR represents when the physical address is invalid,
+ * normally meaning there is no page allocated for the specified sector.
+ */
struct btrfs_raid_bio {
struct btrfs_io_context *bioc;
@@ -82,6 +159,14 @@ struct btrfs_raid_bio {
/* How many sectors there are for each stripe */
u8 stripe_nsectors;
+ /*
+ * How many steps there are for one sector.
+ *
+ * For bs > ps cases, it's sectorsize / PAGE_SIZE.
+ * For bs <= ps cases, it's always 1.
+ */
+ u8 sector_nsteps;
+
/* Stripe number that we're scrubbing */
u8 scrubp;
@@ -116,13 +201,13 @@ struct btrfs_raid_bio {
struct page **stripe_pages;
/* Pointers to the sectors in the bio_list, for faster lookup */
- struct sector_ptr *bio_sectors;
+ phys_addr_t *bio_paddrs;
- /*
- * For subpage support, we need to map each sector to above
- * stripe_pages.
- */
- struct sector_ptr *stripe_sectors;
+ /* Pointers to the sectors in the stripe_pages[]. */
+ phys_addr_t *stripe_paddrs;
+
+ /* Each set bit means the corresponding sector in stripe_sectors[] is uptodate. */
+ unsigned long *stripe_uptodate_bitmap;
/* Allocated with real_stripes-many pointers for finish_*() calls */
void **finish_pointers;
@@ -131,10 +216,6 @@ struct btrfs_raid_bio {
* The bitmap recording where IO errors happened.
* Each bit is corresponding to one sector in either bio_sectors[] or
* stripe_sectors[] array.
- *
- * The reason we don't use another bit in sector_ptr is, we have two
- * arrays of sectors, and a lot of IO can use sectors in both arrays.
- * Thus making it much harder to iterate.
*/
unsigned long *error_bitmap;
@@ -201,8 +282,8 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
-void raid56_parity_cache_data_pages(struct btrfs_raid_bio *rbio,
- struct page **data_pages, u64 data_logical);
+void raid56_parity_cache_data_folios(struct btrfs_raid_bio *rbio,
+ struct folio **data_folios, u64 data_logical);
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
deleted file mode 100644
index 1c2d7cb1fe6f..000000000000
--- a/fs/btrfs/rcu-string.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Red Hat. All rights reserved.
- */
-
-#ifndef BTRFS_RCU_STRING_H
-#define BTRFS_RCU_STRING_H
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/rcupdate.h>
-#include <linux/printk.h>
-
-struct rcu_string {
- struct rcu_head rcu;
- char str[];
-};
-
-static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
-{
- size_t len = strlen(src) + 1;
- struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) +
- (len * sizeof(char)), mask);
- if (!ret)
- return ret;
- /* Warn if the source got unexpectedly truncated. */
- if (WARN_ON(strscpy(ret->str, src, len) < 0)) {
- kfree(ret);
- return NULL;
- }
- return ret;
-}
-
-static inline void rcu_string_free(struct rcu_string *str)
-{
- if (str)
- kfree_rcu(str, rcu);
-}
-
-#define printk_in_rcu(fmt, ...) do { \
- rcu_read_lock(); \
- printk(fmt, __VA_ARGS__); \
- rcu_read_unlock(); \
-} while (0)
-
-#define printk_ratelimited_in_rcu(fmt, ...) do { \
- rcu_read_lock(); \
- printk_ratelimited(fmt, __VA_ARGS__); \
- rcu_read_unlock(); \
-} while (0)
-
-#define rcu_str_deref(rcu_str) ({ \
- struct rcu_string *__str = rcu_dereference(rcu_str); \
- __str->str; \
-})
-
-#endif
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 9522a8b79d22..e9224145d754 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -75,69 +75,70 @@ struct block_entry {
struct list_head actions;
};
+static int block_entry_bytenr_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *bytenr = key;
+ const struct block_entry *entry = rb_entry(node, struct block_entry, node);
+
+ if (entry->bytenr < *bytenr)
+ return 1;
+ else if (entry->bytenr > *bytenr)
+ return -1;
+
+ return 0;
+}
+
+static int block_entry_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct block_entry *new_entry = rb_entry(new, struct block_entry, node);
+
+ return block_entry_bytenr_key_cmp(&new_entry->bytenr, existing);
+}
+
static struct block_entry *insert_block_entry(struct rb_root *root,
struct block_entry *be)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent_node = NULL;
- struct block_entry *entry;
-
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct block_entry, node);
- if (entry->bytenr > be->bytenr)
- p = &(*p)->rb_left;
- else if (entry->bytenr < be->bytenr)
- p = &(*p)->rb_right;
- else
- return entry;
- }
+ struct rb_node *node;
- rb_link_node(&be->node, parent_node, p);
- rb_insert_color(&be->node, root);
- return NULL;
+ node = rb_find_add(&be->node, root, block_entry_bytenr_cmp);
+ return rb_entry_safe(node, struct block_entry, node);
}
static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr)
{
- struct rb_node *n;
- struct block_entry *entry = NULL;
+ struct rb_node *node;
- n = root->rb_node;
- while (n) {
- entry = rb_entry(n, struct block_entry, node);
- if (entry->bytenr < bytenr)
- n = n->rb_right;
- else if (entry->bytenr > bytenr)
- n = n->rb_left;
- else
- return entry;
- }
- return NULL;
+ node = rb_find(&bytenr, root, block_entry_bytenr_key_cmp);
+ return rb_entry_safe(node, struct block_entry, node);
+}
+
+static int root_entry_root_objectid_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *objectid = key;
+ const struct root_entry *entry = rb_entry(node, struct root_entry, node);
+
+ if (entry->root_objectid < *objectid)
+ return 1;
+ else if (entry->root_objectid > *objectid)
+ return -1;
+
+ return 0;
+}
+
+static int root_entry_root_objectid_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct root_entry *new_entry = rb_entry(new, struct root_entry, node);
+
+ return root_entry_root_objectid_key_cmp(&new_entry->root_objectid, existing);
}
static struct root_entry *insert_root_entry(struct rb_root *root,
struct root_entry *re)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent_node = NULL;
- struct root_entry *entry;
-
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct root_entry, node);
- if (entry->root_objectid > re->root_objectid)
- p = &(*p)->rb_left;
- else if (entry->root_objectid < re->root_objectid)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- rb_link_node(&re->node, parent_node, p);
- rb_insert_color(&re->node, root);
- return NULL;
+ struct rb_node *node;
+ node = rb_find_add(&re->node, root, root_entry_root_objectid_cmp);
+ return rb_entry_safe(node, struct root_entry, node);
}
static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
@@ -161,48 +162,29 @@ static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
return 0;
}
+static int ref_entry_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ struct ref_entry *new_entry = rb_entry(new, struct ref_entry, node);
+ struct ref_entry *existing_entry = rb_entry(existing, struct ref_entry, node);
+
+ return comp_refs(new_entry, existing_entry);
+}
+
static struct ref_entry *insert_ref_entry(struct rb_root *root,
struct ref_entry *ref)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent_node = NULL;
- struct ref_entry *entry;
- int cmp;
-
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct ref_entry, node);
- cmp = comp_refs(entry, ref);
- if (cmp > 0)
- p = &(*p)->rb_left;
- else if (cmp < 0)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- rb_link_node(&ref->node, parent_node, p);
- rb_insert_color(&ref->node, root);
- return NULL;
+ struct rb_node *node;
+ node = rb_find_add(&ref->node, root, ref_entry_cmp);
+ return rb_entry_safe(node, struct ref_entry, node);
}
static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
{
- struct rb_node *n;
- struct root_entry *entry = NULL;
+ struct rb_node *node;
- n = root->rb_node;
- while (n) {
- entry = rb_entry(n, struct root_entry, node);
- if (entry->root_objectid < objectid)
- n = n->rb_right;
- else if (entry->root_objectid > objectid)
- n = n->rb_left;
- else
- return entry;
- }
- return NULL;
+ node = rb_find(&objectid, root, root_entry_root_objectid_key_cmp);
+ return rb_entry_safe(node, struct root_entry, node);
}
#ifdef CONFIG_STACKTRACE
@@ -668,7 +650,7 @@ static void dump_block_entry(struct btrfs_fs_info *fs_info,
* our sanity checks pass as they are no longer needed.
*/
int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *generic_ref)
+ const struct btrfs_ref *generic_ref)
{
struct ref_entry *ref = NULL, *exist;
struct ref_action *ra = NULL;
@@ -857,6 +839,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
"dropping a ref for a root that doesn't have a ref on the block");
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
+ rb_erase(&ref->node, &be->refs);
kfree(ref);
kfree(ra);
goto out_unlock;
@@ -988,7 +971,7 @@ void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *extent_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *eb;
int tree_block_level = 0;
u64 bytenr = 0, num_bytes = 0;
@@ -997,11 +980,18 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
if (!btrfs_test_opt(fs_info, REF_VERIFY))
return 0;
+ extent_root = btrfs_extent_root(fs_info, 0);
+ /* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */
+ if (!extent_root) {
+ btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling");
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ return 0;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- extent_root = btrfs_extent_root(fs_info, 0);
eb = btrfs_read_lock_root_node(extent_root);
level = btrfs_header_level(eb);
path->nodes[level] = eb;
@@ -1031,6 +1021,5 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
btrfs_free_ref_cache(fs_info);
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
}
- btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/ref-verify.h b/fs/btrfs/ref-verify.h
index 3511e1a5c96b..1ce544d53cc5 100644
--- a/fs/btrfs/ref-verify.h
+++ b/fs/btrfs/ref-verify.h
@@ -12,14 +12,14 @@
struct btrfs_fs_info;
struct btrfs_ref;
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+#ifdef CONFIG_BTRFS_DEBUG
#include <linux/spinlock.h>
int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info);
void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info);
int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *generic_ref);
+ const struct btrfs_ref *generic_ref);
void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
u64 len);
@@ -39,7 +39,7 @@ static inline void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info)
}
static inline int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *generic_ref)
+ const struct btrfs_ref *generic_ref)
{
return 0;
}
@@ -53,6 +53,6 @@ static inline void btrfs_init_ref_verify(struct btrfs_fs_info *fs_info)
{
}
-#endif /* CONFIG_BTRFS_FS_REF_VERIFY */
+#endif /* CONFIG_BTRFS_DEBUG */
#endif
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index f0824c948cb7..b5fe95baf92e 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/blkdev.h>
+#include <linux/fscrypt.h>
#include <linux/iversion.h>
#include "ctree.h"
#include "fs.h"
@@ -23,7 +24,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
u64 endoff,
const u64 destoff,
const u64 olen,
- int no_time_update)
+ bool no_time_update)
{
int ret;
@@ -43,14 +44,12 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
}
ret = btrfs_update_inode(trans, BTRFS_I(inode));
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
- goto out;
+ return ret;
}
- ret = btrfs_end_transaction(trans);
-out:
- return ret;
+ return btrfs_end_transaction(trans);
}
static int copy_inline_to_page(struct btrfs_inode *inode,
@@ -87,7 +86,7 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
btrfs_alloc_write_mask(mapping));
if (IS_ERR(folio)) {
- ret = -ENOMEM;
+ ret = PTR_ERR(folio);
goto out_unlock;
}
@@ -95,9 +94,8 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
if (ret < 0)
goto out_unlock;
- clear_extent_bit(&inode->io_tree, file_offset, range_end,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- NULL);
+ btrfs_clear_extent_bit(&inode->io_tree, file_offset, range_end,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, NULL);
ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
if (ret)
goto out_unlock;
@@ -165,7 +163,7 @@ out:
* the source inode to destination inode when possible. When not possible we
* copy the inline extent's data into the respective page of the inode.
*/
-static int clone_copy_inline_extent(struct inode *dst,
+static int clone_copy_inline_extent(struct btrfs_inode *inode,
struct btrfs_path *path,
struct btrfs_key *new_key,
const u64 drop_start,
@@ -175,8 +173,8 @@ static int clone_copy_inline_extent(struct inode *dst,
char *inline_data,
struct btrfs_trans_handle **trans_out)
{
- struct btrfs_fs_info *fs_info = inode_to_fs_info(dst);
- struct btrfs_root *root = BTRFS_I(dst)->root;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
const u64 aligned_end = ALIGN(new_key->offset + datal,
fs_info->sectorsize);
struct btrfs_trans_handle *trans = NULL;
@@ -185,12 +183,12 @@ static int clone_copy_inline_extent(struct inode *dst,
struct btrfs_key key;
if (new_key->offset > 0) {
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+ ret = copy_inline_to_page(inode, new_key->offset,
inline_data, size, datal, comp_type);
goto out;
}
- key.objectid = btrfs_ino(BTRFS_I(dst));
+ key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -205,7 +203,7 @@ static int clone_copy_inline_extent(struct inode *dst,
goto copy_inline_extent;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
+ if (key.objectid == btrfs_ino(inode) &&
key.type == BTRFS_EXTENT_DATA_KEY) {
/*
* There's an implicit hole at file offset 0, copy the
@@ -214,7 +212,7 @@ static int clone_copy_inline_extent(struct inode *dst,
ASSERT(key.offset > 0);
goto copy_to_page;
}
- } else if (i_size_read(dst) <= datal) {
+ } else if (i_size_read(&inode->vfs_inode) <= datal) {
struct btrfs_file_extent_item *ei;
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -236,7 +234,7 @@ copy_inline_extent:
* We have no extent items, or we have an extent at offset 0 which may
* or may not be inlined. All these cases are dealt the same way.
*/
- if (i_size_read(dst) > datal) {
+ if (i_size_read(&inode->vfs_inode) > datal) {
/*
* At the destination offset 0 we have either a hole, a regular
* extent or an inline extent larger then the one we want to
@@ -270,20 +268,26 @@ copy_inline_extent:
drop_args.start = drop_start;
drop_args.end = aligned_end;
drop_args.drop_cache = true;
- ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args);
- if (ret)
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
write_extent_buffer(path->nodes[0], inline_data,
btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]),
size);
- btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found);
- btrfs_set_inode_full_sync(BTRFS_I(dst));
- ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
+ btrfs_update_inode_bytes(inode, datal, drop_args.bytes_found);
+ btrfs_set_inode_full_sync(inode);
+ ret = btrfs_inode_set_file_extent_range(inode, 0, aligned_end);
+ if (unlikely(ret))
+ btrfs_abort_transaction(trans, ret);
out:
if (!ret && !trans) {
/*
@@ -298,10 +302,8 @@ out:
trans = NULL;
}
}
- if (ret && trans) {
- btrfs_abort_transaction(trans, ret);
+ if (ret && trans)
btrfs_end_transaction(trans);
- }
if (!ret)
*trans_out = trans;
@@ -318,7 +320,7 @@ copy_to_page:
*/
btrfs_release_path(path);
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+ ret = copy_inline_to_page(inode, new_key->offset,
inline_data, size, datal, comp_type);
goto out;
}
@@ -336,13 +338,13 @@ copy_to_page:
*/
static int btrfs_clone(struct inode *src, struct inode *inode,
const u64 off, const u64 olen, const u64 olen_aligned,
- const u64 destoff, int no_time_update)
+ const u64 destoff, bool no_time_update)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_trans_handle *trans;
- char *buf = NULL;
+ char AUTO_KVFREE(buf);
struct btrfs_key key;
u32 nritems;
int slot;
@@ -357,10 +359,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
return ret;
path = btrfs_alloc_path();
- if (!path) {
- kvfree(buf);
+ if (!path)
return ret;
- }
path->reada = READA_FORWARD;
/* Clone data */
@@ -526,7 +526,7 @@ process_slot:
goto out;
}
- ret = clone_copy_inline_extent(inode, path, &new_key,
+ ret = clone_copy_inline_extent(BTRFS_I(inode), path, &new_key,
drop_start, datal, size,
comp, buf, &trans);
if (ret)
@@ -610,33 +610,31 @@ process_slot:
}
out:
- btrfs_free_path(path);
- kvfree(buf);
clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
return ret;
}
-static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2)
+static void btrfs_double_mmap_lock(struct btrfs_inode *inode1, struct btrfs_inode *inode2)
{
if (inode1 < inode2)
swap(inode1, inode2);
- down_write(&BTRFS_I(inode1)->i_mmap_lock);
- down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING);
+ down_write(&inode1->i_mmap_lock);
+ down_write_nested(&inode2->i_mmap_lock, SINGLE_DEPTH_NESTING);
}
-static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2)
+static void btrfs_double_mmap_unlock(struct btrfs_inode *inode1, struct btrfs_inode *inode2)
{
- up_write(&BTRFS_I(inode1)->i_mmap_lock);
- up_write(&BTRFS_I(inode2)->i_mmap_lock);
+ up_write(&inode1->i_mmap_lock);
+ up_write(&inode2->i_mmap_lock);
}
-static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
- struct inode *dst, u64 dst_loff)
+static int btrfs_extent_same_range(struct btrfs_inode *src, u64 loff, u64 len,
+ struct btrfs_inode *dst, u64 dst_loff)
{
const u64 end = dst_loff + len - 1;
struct extent_state *cached_state = NULL;
- struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info;
+ struct btrfs_fs_info *fs_info = src->root->fs_info;
const u64 bs = fs_info->sectorsize;
int ret;
@@ -646,9 +644,10 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
* because we have already locked the inode's i_mmap_lock in exclusive
* mode.
*/
- lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, end, &cached_state);
- ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
- unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, end, &cached_state);
+ btrfs_lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+ ret = btrfs_clone(&src->vfs_inode, &dst->vfs_inode, loff, len,
+ ALIGN(len, bs), dst_loff, 1);
+ btrfs_unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
btrfs_btree_balance_dirty(fs_info);
@@ -678,8 +677,8 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
for (i = 0; i < chunk_count; i++) {
- ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
- dst, dst_loff);
+ ret = btrfs_extent_same_range(BTRFS_I(src), loff, BTRFS_MAX_DEDUPE_LEN,
+ BTRFS_I(dst), dst_loff);
if (ret)
goto out;
@@ -688,7 +687,8 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
}
if (tail_len > 0)
- ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
+ ret = btrfs_extent_same_range(BTRFS_I(src), loff, tail_len,
+ BTRFS_I(dst), dst_loff);
out:
spin_lock(&root_dst->root_item_lock);
root_dst->dedupe_in_progress--;
@@ -747,9 +747,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* mode.
*/
end = destoff + len - 1;
- lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
- unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
/*
* We may have copied an inline extent into a page of the destination
@@ -775,24 +775,28 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *len, unsigned int remap_flags)
{
- struct inode *inode_in = file_inode(file_in);
- struct inode *inode_out = file_inode(file_out);
- u64 bs = BTRFS_I(inode_out)->root->fs_info->sectorsize;
+ struct btrfs_inode *inode_in = BTRFS_I(file_inode(file_in));
+ struct btrfs_inode *inode_out = BTRFS_I(file_inode(file_out));
+ u64 bs = inode_out->root->fs_info->sectorsize;
u64 wb_len;
int ret;
if (!(remap_flags & REMAP_FILE_DEDUP)) {
- struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
+ struct btrfs_root *root_out = inode_out->root;
if (btrfs_root_readonly(root_out))
return -EROFS;
- ASSERT(inode_in->i_sb == inode_out->i_sb);
+ ASSERT(inode_in->vfs_inode.i_sb == inode_out->vfs_inode.i_sb);
}
+ /* Can only reflink encrypted files if both files are encrypted. */
+ if (IS_ENCRYPTED(&inode_in->vfs_inode) != IS_ENCRYPTED(&inode_out->vfs_inode))
+ return -EINVAL;
+
/* Don't make the dst file partly checksummed */
- if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
- (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
+ if ((inode_in->flags & BTRFS_INODE_NODATASUM) !=
+ (inode_out->flags & BTRFS_INODE_NODATASUM)) {
return -EINVAL;
}
@@ -811,7 +815,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
* to complete so that new file extent items are in the fs tree.
*/
if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
- wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
+ wb_len = ALIGN(inode_in->vfs_inode.i_size, bs) - ALIGN_DOWN(pos_in, bs);
else
wb_len = ALIGN(*len, bs);
@@ -832,16 +836,14 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
* Also we don't need to check ASYNC_EXTENT, as async extent will be
* CoWed anyway, not affecting nocow part.
*/
- ret = filemap_flush(inode_in->i_mapping);
+ ret = filemap_flush(inode_in->vfs_inode.i_mapping);
if (ret < 0)
return ret;
- ret = btrfs_wait_ordered_range(BTRFS_I(inode_in), ALIGN_DOWN(pos_in, bs),
- wb_len);
+ ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), wb_len);
if (ret < 0)
return ret;
- ret = btrfs_wait_ordered_range(BTRFS_I(inode_out), ALIGN_DOWN(pos_out, bs),
- wb_len);
+ ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), wb_len);
if (ret < 0)
return ret;
@@ -863,18 +865,21 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
struct file *dst_file, loff_t destoff, loff_t len,
unsigned int remap_flags)
{
- struct inode *src_inode = file_inode(src_file);
- struct inode *dst_inode = file_inode(dst_file);
+ struct btrfs_inode *src_inode = BTRFS_I(file_inode(src_file));
+ struct btrfs_inode *dst_inode = BTRFS_I(file_inode(dst_file));
bool same_inode = dst_inode == src_inode;
int ret;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(src_file)))))
+ return -EIO;
+
if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
return -EINVAL;
if (same_inode) {
- btrfs_inode_lock(BTRFS_I(src_inode), BTRFS_ILOCK_MMAP);
+ btrfs_inode_lock(src_inode, BTRFS_ILOCK_MMAP);
} else {
- lock_two_nondirectories(src_inode, dst_inode);
+ lock_two_nondirectories(&src_inode->vfs_inode, &dst_inode->vfs_inode);
btrfs_double_mmap_lock(src_inode, dst_inode);
}
@@ -884,16 +889,18 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
goto out_unlock;
if (remap_flags & REMAP_FILE_DEDUP)
- ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
+ ret = btrfs_extent_same(&src_inode->vfs_inode, off, len,
+ &dst_inode->vfs_inode, destoff);
else
ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
out_unlock:
if (same_inode) {
- btrfs_inode_unlock(BTRFS_I(src_inode), BTRFS_ILOCK_MMAP);
+ btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP);
} else {
btrfs_double_mmap_unlock(src_inode, dst_inode);
- unlock_two_nondirectories(src_inode, dst_inode);
+ unlock_two_nondirectories(&src_inode->vfs_inode,
+ &dst_inode->vfs_inode);
}
/*
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index bf267bdfa8f8..5bfefc3e9c06 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -90,10 +90,15 @@
* map address of tree root to tree
*/
struct mapping_node {
- struct {
- struct rb_node rb_node;
- u64 bytenr;
- }; /* Use rb_simle_node for search/insert */
+ union {
+ /* Use rb_simple_node for search/insert */
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ };
+
+ struct rb_simple_node simple_node;
+ };
void *data;
};
@@ -106,10 +111,15 @@ struct mapping_tree {
* present a tree block to process
*/
struct tree_block {
- struct {
- struct rb_node rb_node;
- u64 bytenr;
- }; /* Use rb_simple_node for search/insert */
+ union {
+ /* Use rb_simple_node for search/insert */
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ };
+
+ struct rb_simple_node simple_node;
+ };
u64 owner;
struct btrfs_key key;
u8 level;
@@ -178,8 +188,9 @@ static void mark_block_processed(struct reloc_control *rc,
in_range(node->bytenr, rc->block_group->start,
rc->block_group->length)) {
blocksize = rc->extent_root->fs_info->nodesize;
- set_extent_bit(&rc->processed_blocks, node->bytenr,
- node->bytenr + blocksize - 1, EXTENT_DIRTY, NULL);
+ btrfs_set_extent_bit(&rc->processed_blocks, node->bytenr,
+ node->bytenr + blocksize - 1, EXTENT_DIRTY,
+ NULL);
}
node->processed = 1;
}
@@ -195,8 +206,8 @@ static struct btrfs_backref_node *walk_up_backref(
int idx = *index;
while (!list_empty(&node->upper)) {
- edge = list_entry(node->upper.next,
- struct btrfs_backref_edge, list[LOWER]);
+ edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
+ list[LOWER]);
edges[idx++] = edge;
node = edge->node[UPPER];
}
@@ -222,8 +233,8 @@ static struct btrfs_backref_node *walk_down_backref(
idx--;
continue;
}
- edge = list_entry(edge->list[LOWER].next,
- struct btrfs_backref_edge, list[LOWER]);
+ edge = list_first_entry(&edge->list[LOWER], struct btrfs_backref_edge,
+ list[LOWER]);
edges[idx - 1] = edge;
*index = idx;
return edge->node[UPPER];
@@ -342,19 +353,13 @@ static bool handle_useless_nodes(struct reloc_control *rc,
if (cur == node)
ret = true;
- /* The node is the lowest node */
- if (cur->lowest) {
- list_del_init(&cur->lower);
- cur->lowest = 0;
- }
-
/* Cleanup the lower edges */
while (!list_empty(&cur->lower)) {
struct btrfs_backref_edge *edge;
struct btrfs_backref_node *lower;
- edge = list_entry(cur->lower.next,
- struct btrfs_backref_edge, list[UPPER]);
+ edge = list_first_entry(&cur->lower, struct btrfs_backref_edge,
+ list[UPPER]);
list_del(&edge->list[UPPER]);
list_del(&edge->list[LOWER]);
lower = edge->node[LOWER];
@@ -373,7 +378,6 @@ static bool handle_useless_nodes(struct reloc_control *rc,
* cache to avoid unnecessary backref lookup.
*/
if (cur->level > 0) {
- list_add(&cur->list, &cache->detached);
cur->detached = 1;
} else {
rb_erase(&cur->rb_node, &cache->rb_root);
@@ -426,7 +430,6 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
goto out;
}
- node->lowest = 1;
cur = node;
/* Breadth-first search to build backref cache */
@@ -470,92 +473,6 @@ out:
}
/*
- * helper to add backref node for the newly created snapshot.
- * the backref node is created by cloning backref node that
- * corresponds to root of source tree
- */
-static int clone_backref_node(struct btrfs_trans_handle *trans,
- struct reloc_control *rc,
- const struct btrfs_root *src,
- struct btrfs_root *dest)
-{
- struct btrfs_root *reloc_root = src->reloc_root;
- struct btrfs_backref_cache *cache = &rc->backref_cache;
- struct btrfs_backref_node *node = NULL;
- struct btrfs_backref_node *new_node;
- struct btrfs_backref_edge *edge;
- struct btrfs_backref_edge *new_edge;
- struct rb_node *rb_node;
-
- rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
- if (node->detached)
- node = NULL;
- else
- BUG_ON(node->new_bytenr != reloc_root->node->start);
- }
-
- if (!node) {
- rb_node = rb_simple_search(&cache->rb_root,
- reloc_root->commit_root->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct btrfs_backref_node,
- rb_node);
- BUG_ON(node->detached);
- }
- }
-
- if (!node)
- return 0;
-
- new_node = btrfs_backref_alloc_node(cache, dest->node->start,
- node->level);
- if (!new_node)
- return -ENOMEM;
-
- new_node->lowest = node->lowest;
- new_node->checked = 1;
- new_node->root = btrfs_grab_root(dest);
- ASSERT(new_node->root);
-
- if (!node->lowest) {
- list_for_each_entry(edge, &node->lower, list[UPPER]) {
- new_edge = btrfs_backref_alloc_edge(cache);
- if (!new_edge)
- goto fail;
-
- btrfs_backref_link_edge(new_edge, edge->node[LOWER],
- new_node, LINK_UPPER);
- }
- } else {
- list_add_tail(&new_node->lower, &cache->leaves);
- }
-
- rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
- &new_node->rb_node);
- if (rb_node)
- btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
-
- if (!new_node->lowest) {
- list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
- list_add_tail(&new_edge->list[LOWER],
- &new_edge->node[LOWER]->upper);
- }
- }
- return 0;
-fail:
- while (!list_empty(&new_node->lower)) {
- new_edge = list_entry(new_node->lower.next,
- struct btrfs_backref_edge, list[UPPER]);
- list_del(&new_edge->list[UPPER]);
- btrfs_backref_free_edge(cache, new_edge);
- }
- btrfs_backref_free_node(cache, new_node);
- return -ENOMEM;
-}
-
-/*
* helper to add 'address of tree root -> reloc tree' mapping
*/
static int __add_reloc_root(struct btrfs_root *root)
@@ -573,8 +490,7 @@ static int __add_reloc_root(struct btrfs_root *root)
node->data = root;
spin_lock(&rc->reloc_root_tree.lock);
- rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
- node->bytenr, &node->rb_node);
+ rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
btrfs_err(fs_info,
@@ -595,7 +511,7 @@ static void __del_reloc_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
- struct mapping_node *node = NULL;
+ struct mapping_node AUTO_KFREE(node);
struct reloc_control *rc = fs_info->reloc_ctl;
bool put_ref = false;
@@ -628,7 +544,6 @@ static void __del_reloc_root(struct btrfs_root *root)
spin_unlock(&fs_info->trans_lock);
if (put_ref)
btrfs_put_root(root);
- kfree(node);
}
/*
@@ -657,8 +572,7 @@ static int __update_reloc_root(struct btrfs_root *root)
spin_lock(&rc->reloc_root_tree.lock);
node->bytenr = root->node->start;
- rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
- node->bytenr, &node->rb_node);
+ rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node)
btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
@@ -671,10 +585,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct extent_buffer *eb;
- struct btrfs_root_item *root_item;
+ struct btrfs_root_item AUTO_KFREE(root_item);
struct btrfs_key root_key;
int ret = 0;
- bool must_abort = false;
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
if (!root_item)
@@ -687,11 +600,29 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
if (btrfs_root_id(root) == objectid) {
u64 commit_root_gen;
+ /*
+ * Relocation will wait for cleaner thread, and any half-dropped
+ * subvolume will be fully cleaned up at mount time.
+ * So here we shouldn't hit a subvolume with non-zero drop_progress.
+ *
+ * If this isn't the case, error out since it can make us attempt to
+ * drop references for extents that were already dropped before.
+ */
+ if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) {
+ struct btrfs_key cpu_key;
+
+ btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress);
+ btrfs_err(fs_info,
+ "cannot relocate partially dropped subvolume %llu, drop progress key " BTRFS_KEY_FMT,
+ objectid, BTRFS_KEY_FMT_VALUE(&cpu_key));
+ return ERR_PTR(-EUCLEAN);
+ }
+
/* called by btrfs_init_reloc_root */
ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
BTRFS_TREE_RELOC_OBJECTID);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
/*
* Set the last_snapshot field to the generation of the commit
@@ -714,14 +645,13 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
ret = btrfs_copy_root(trans, root, root->node, &eb,
BTRFS_TREE_RELOC_OBJECTID);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
}
/*
* We have changed references at this point, we must abort the
- * transaction if anything fails.
+ * transaction if anything fails (i.e. 'goto abort').
*/
- must_abort = true;
memcpy(root_item, &root->root_item, sizeof(*root_item));
btrfs_set_root_bytenr(root_item, eb->start);
@@ -741,9 +671,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
ret = btrfs_insert_root(trans, fs_info->tree_root,
&root_key, root_item);
if (ret)
- goto fail;
-
- kfree(root_item);
+ goto abort;
reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
if (IS_ERR(reloc_root)) {
@@ -753,11 +681,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
btrfs_set_root_last_trans(reloc_root, trans->transid);
return reloc_root;
-fail:
- kfree(root_item);
+
abort:
- if (must_abort)
- btrfs_abort_transaction(trans, ret);
+ btrfs_abort_transaction(trans, ret);
return ERR_PTR(ret);
}
@@ -887,7 +813,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
u64 bytenr, u64 num_bytes)
{
struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
int ret;
@@ -900,11 +826,9 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
ret = btrfs_lookup_file_extent(NULL, root, path,
btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
if (ret < 0)
- goto out;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ return ret;
+ if (ret > 0)
+ return -ENOENT;
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -915,16 +839,11 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi));
- if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
- ret = -EINVAL;
- goto out;
- }
+ if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi))
+ return -EINVAL;
*new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -950,7 +869,6 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
u32 i;
int ret = 0;
int first = 1;
- int dirty = 0;
if (rc->stage != UPDATE_DATA_PTRS)
return 0;
@@ -1005,16 +923,16 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
/* Take mmap lock to serialize with reflinks. */
if (!down_read_trylock(&inode->i_mmap_lock))
continue;
- ret = try_lock_extent(&inode->io_tree, key.offset,
- end, &cached_state);
+ ret = btrfs_try_lock_extent(&inode->io_tree, key.offset,
+ end, &cached_state);
if (!ret) {
up_read(&inode->i_mmap_lock);
continue;
}
btrfs_drop_extent_map_range(inode, key.offset, end, true);
- unlock_extent(&inode->io_tree, key.offset, end,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, key.offset, end,
+ &cached_state);
up_read(&inode->i_mmap_lock);
}
}
@@ -1030,7 +948,6 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
}
btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
- dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
ref.action = BTRFS_ADD_DELAYED_REF;
@@ -1042,7 +959,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
btrfs_init_data_ref(&ref, key.objectid, key.offset,
btrfs_root_id(root), false);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -1056,13 +973,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
btrfs_init_data_ref(&ref, key.objectid, key.offset,
btrfs_root_id(root), false);
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
}
- if (dirty)
- btrfs_mark_buffer_dirty(trans, leaf);
if (inode)
btrfs_add_delayed_iput(inode);
return ret;
@@ -1255,13 +1170,11 @@ again:
*/
btrfs_set_node_blockptr(parent, slot, new_bytenr);
btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
- btrfs_mark_buffer_dirty(trans, parent);
btrfs_set_node_blockptr(path->nodes[level],
path->slots[level], old_bytenr);
btrfs_set_node_ptr_generation(path->nodes[level],
path->slots[level], old_ptr_gen);
- btrfs_mark_buffer_dirty(trans, path->nodes[level]);
ref.action = BTRFS_ADD_DELAYED_REF;
ref.bytenr = old_bytenr;
@@ -1271,7 +1184,7 @@ again:
ref.ref_root = btrfs_root_id(src);
btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -1284,7 +1197,7 @@ again:
ref.ref_root = btrfs_root_id(dest);
btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -1298,7 +1211,7 @@ again:
ref.ref_root = btrfs_root_id(src);
btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -1312,7 +1225,7 @@ again:
ref.ref_root = btrfs_root_id(dest);
btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -1478,9 +1391,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
}
/* the lock_extent waits for read_folio to complete */
- lock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
btrfs_drop_extent_map_range(inode, start, end, true);
- unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
}
return 0;
}
@@ -1562,7 +1475,7 @@ static int clean_dirty_subvols(struct reloc_control *rc)
* ->reloc_root. If it fails however we must
* drop the ref ourselves.
*/
- ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
+ ret2 = btrfs_drop_snapshot(reloc_root, false, true);
if (ret2 < 0) {
btrfs_put_root(reloc_root);
if (!ret)
@@ -1572,7 +1485,7 @@ static int clean_dirty_subvols(struct reloc_control *rc)
btrfs_put_root(root);
} else {
/* Orphan reloc tree, just clean it up */
- ret2 = btrfs_drop_snapshot(root, 0, 1);
+ ret2 = btrfs_drop_snapshot(root, false, true);
if (ret2 < 0) {
btrfs_put_root(root);
if (!ret)
@@ -1615,7 +1528,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_root_level(root_item);
- atomic_inc(&reloc_root->node->refs);
+ refcount_inc(&reloc_root->node->refs);
path->nodes[level] = reloc_root->node;
path->slots[level] = 0;
} else {
@@ -1797,8 +1710,8 @@ again:
rc->merge_reloc_tree = true;
while (!list_empty(&rc->reloc_roots)) {
- reloc_root = list_entry(rc->reloc_roots.next,
- struct btrfs_root, root_list);
+ reloc_root = list_first_entry(&rc->reloc_roots,
+ struct btrfs_root, root_list);
list_del_init(&reloc_root->root_list);
root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
@@ -1863,7 +1776,7 @@ again:
list_add(&reloc_root->root_list, &reloc_roots);
btrfs_put_root(root);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
if (!err)
err = ret;
@@ -1913,8 +1826,7 @@ again:
while (!list_empty(&reloc_roots)) {
found = 1;
- reloc_root = list_entry(reloc_roots.next,
- struct btrfs_root, root_list);
+ reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list);
root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
false);
@@ -2030,11 +1942,11 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
* reloc root without a corresponding root this could return ENOENT.
*/
if (IS_ERR(root)) {
- ASSERT(0);
+ DEBUG_WARN("error %ld reading root for reloc root", PTR_ERR(root));
return PTR_ERR(root);
}
- if (root->reloc_root != reloc_root) {
- ASSERT(0);
+ if (unlikely(root->reloc_root != reloc_root)) {
+ DEBUG_WARN("unexpected reloc root found");
btrfs_err(fs_info,
"root %llu has two reloc roots associated with it",
reloc_root->root_key.offset);
@@ -2058,100 +1970,72 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
int index = 0;
int ret;
- next = node;
- while (1) {
- cond_resched();
- next = walk_up_backref(next, edges, &index);
- root = next->root;
+ next = walk_up_backref(node, edges, &index);
+ root = next->root;
- /*
- * If there is no root, then our references for this block are
- * incomplete, as we should be able to walk all the way up to a
- * block that is owned by a root.
- *
- * This path is only for SHAREABLE roots, so if we come upon a
- * non-SHAREABLE root then we have backrefs that resolve
- * improperly.
- *
- * Both of these cases indicate file system corruption, or a bug
- * in the backref walking code.
- */
- if (!root) {
- ASSERT(0);
- btrfs_err(trans->fs_info,
- "bytenr %llu doesn't have a backref path ending in a root",
- node->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
- ASSERT(0);
- btrfs_err(trans->fs_info,
- "bytenr %llu has multiple refs with one ending in a non-shareable root",
- node->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
-
- if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
- ret = record_reloc_root_in_trans(trans, root);
- if (ret)
- return ERR_PTR(ret);
- break;
- }
+ /*
+ * If there is no root, then our references for this block are
+ * incomplete, as we should be able to walk all the way up to a block
+ * that is owned by a root.
+ *
+ * This path is only for SHAREABLE roots, so if we come upon a
+ * non-SHAREABLE root then we have backrefs that resolve improperly.
+ *
+ * Both of these cases indicate file system corruption, or a bug in the
+ * backref walking code.
+ */
+ if (unlikely(!root)) {
+ btrfs_err(trans->fs_info,
+ "bytenr %llu doesn't have a backref path ending in a root",
+ node->bytenr);
+ return ERR_PTR(-EUCLEAN);
+ }
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_err(trans->fs_info,
+ "bytenr %llu has multiple refs with one ending in a non-shareable root",
+ node->bytenr);
+ return ERR_PTR(-EUCLEAN);
+ }
- ret = btrfs_record_root_in_trans(trans, root);
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
+ ret = record_reloc_root_in_trans(trans, root);
if (ret)
return ERR_PTR(ret);
- root = root->reloc_root;
-
- /*
- * We could have raced with another thread which failed, so
- * root->reloc_root may not be set, return ENOENT in this case.
- */
- if (!root)
- return ERR_PTR(-ENOENT);
+ goto found;
+ }
- if (next->new_bytenr != root->node->start) {
- /*
- * We just created the reloc root, so we shouldn't have
- * ->new_bytenr set and this shouldn't be in the changed
- * list. If it is then we have multiple roots pointing
- * at the same bytenr which indicates corruption, or
- * we've made a mistake in the backref walking code.
- */
- ASSERT(next->new_bytenr == 0);
- ASSERT(list_empty(&next->list));
- if (next->new_bytenr || !list_empty(&next->list)) {
- btrfs_err(trans->fs_info,
- "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
- node->bytenr, next->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
+ ret = btrfs_record_root_in_trans(trans, root);
+ if (ret)
+ return ERR_PTR(ret);
+ root = root->reloc_root;
- next->new_bytenr = root->node->start;
- btrfs_put_root(next->root);
- next->root = btrfs_grab_root(root);
- ASSERT(next->root);
- list_add_tail(&next->list,
- &rc->backref_cache.changed);
- mark_block_processed(rc, next);
- break;
- }
+ /*
+ * We could have raced with another thread which failed, so
+ * root->reloc_root may not be set, return ENOENT in this case.
+ */
+ if (!root)
+ return ERR_PTR(-ENOENT);
- WARN_ON(1);
- root = NULL;
- next = walk_down_backref(edges, &index);
- if (!next || next->level <= node->level)
- break;
- }
- if (!root) {
+ if (unlikely(next->new_bytenr)) {
/*
- * This can happen if there's fs corruption or if there's a bug
- * in the backref lookup code.
+ * We just created the reloc root, so we shouldn't have
+ * ->new_bytenr set yet. If it is then we have multiple roots
+ * pointing at the same bytenr which indicates corruption, or
+ * we've made a mistake in the backref walking code.
*/
- ASSERT(0);
- return ERR_PTR(-ENOENT);
+ ASSERT(next->new_bytenr == 0);
+ btrfs_err(trans->fs_info,
+ "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
+ node->bytenr, next->bytenr);
+ return ERR_PTR(-EUCLEAN);
}
+ next->new_bytenr = root->node->start;
+ btrfs_put_root(next->root);
+ next->root = btrfs_grab_root(root);
+ ASSERT(next->root);
+ mark_block_processed(rc, next);
+found:
next = node;
/* setup backref node path for btrfs_reloc_cow_block */
while (1) {
@@ -2191,7 +2075,7 @@ struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
* This can occur if we have incomplete extent refs leading all
* the way up a particular path, in this case return -EUCLEAN.
*/
- if (!root)
+ if (unlikely(!root))
return ERR_PTR(-EUCLEAN);
/* No other choice for non-shareable tree */
@@ -2237,8 +2121,8 @@ static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
if (list_empty(&next->upper))
break;
- edge = list_entry(next->upper.next,
- struct btrfs_backref_edge, list[LOWER]);
+ edge = list_first_entry(&next->upper, struct btrfs_backref_edge,
+ list[LOWER]);
edges[index++] = edge;
next = edge->node[UPPER];
}
@@ -2247,17 +2131,11 @@ static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
return num_bytes;
}
-static int reserve_metadata_space(struct btrfs_trans_handle *trans,
- struct reloc_control *rc,
- struct btrfs_backref_node *node)
+static int refill_metadata_space(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc, u64 num_bytes)
{
- struct btrfs_root *root = rc->extent_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- u64 num_bytes;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- u64 tmp;
-
- num_bytes = calcu_metadata_size(rc, node) * 2;
trans->block_rsv = rc->block_rsv;
rc->reserved_bytes += num_bytes;
@@ -2270,7 +2148,8 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret) {
- tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
+ u64 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
+
while (tmp <= rc->reserved_bytes)
tmp <<= 1;
/*
@@ -2288,6 +2167,16 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
return 0;
}
+static int reserve_metadata_space(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc,
+ struct btrfs_backref_node *node)
+{
+ u64 num_bytes;
+
+ num_bytes = calcu_metadata_size(rc, node) * 2;
+ return refill_metadata_space(trans, rc, num_bytes);
+}
+
/*
* relocate a block tree, and then update pointers in upper level
* blocks that reference the block to point to the new location.
@@ -2373,7 +2262,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(upper->eb, slot);
if (lowest) {
- if (bytenr != node->bytenr) {
+ if (unlikely(bytenr != node->bytenr)) {
btrfs_err(root->fs_info,
"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
bytenr, node->bytenr, slot,
@@ -2428,7 +2317,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (!ret)
ret = btrfs_drop_subtree(trans, root, eb,
upper->eb);
- if (ret)
+ if (unlikely(ret))
btrfs_abort_transaction(trans, ret);
}
next:
@@ -2442,7 +2331,7 @@ next:
if (!ret && node->pending) {
btrfs_backref_drop_node_buffer(node);
- list_move_tail(&node->list, &rc->backref_cache.changed);
+ list_del_init(&node->list);
node->pending = 0;
}
@@ -2479,8 +2368,8 @@ static int finish_pending_nodes(struct btrfs_trans_handle *trans,
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
while (!list_empty(&cache->pending[level])) {
- node = list_entry(cache->pending[level].next,
- struct btrfs_backref_node, list);
+ node = list_first_entry(&cache->pending[level],
+ struct btrfs_backref_node, list);
list_move_tail(&node->list, &list);
BUG_ON(!node->pending);
@@ -2518,8 +2407,8 @@ static void update_processed_blocks(struct reloc_control *rc,
if (list_empty(&next->upper))
break;
- edge = list_entry(next->upper.next,
- struct btrfs_backref_edge, list[LOWER]);
+ edge = list_first_entry(&next->upper, struct btrfs_backref_edge,
+ list[LOWER]);
edges[index++] = edge;
next = edge->node[UPPER];
}
@@ -2531,8 +2420,8 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
{
u32 blocksize = rc->extent_root->fs_info->nodesize;
- if (test_range_bit(&rc->processed_blocks, bytenr,
- bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
+ if (btrfs_test_range_bit(&rc->processed_blocks, bytenr,
+ bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
return 1;
return 0;
}
@@ -2550,7 +2439,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
eb = read_tree_block(fs_info, block->bytenr, &check);
if (IS_ERR(eb))
return PTR_ERR(eb);
- if (!extent_buffer_uptodate(eb)) {
+ if (unlikely(!extent_buffer_uptodate(eb))) {
free_extent_buffer(eb);
return -EIO;
}
@@ -2605,8 +2494,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
/*
* This block was the root block of a root, and this is
* the first time we're processing the block and thus it
- * should not have had the ->new_bytenr modified and
- * should have not been included on the changed list.
+ * should not have had the ->new_bytenr modified.
*
* However in the case of corruption we could have
* multiple refs pointing to the same block improperly,
@@ -2616,8 +2504,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
* normal user in the case of corruption.
*/
ASSERT(node->new_bytenr == 0);
- ASSERT(list_empty(&node->list));
- if (node->new_bytenr || !list_empty(&node->list)) {
+ if (unlikely(node->new_bytenr)) {
btrfs_err(root->fs_info,
"bytenr %llu has improper references to it",
node->bytenr);
@@ -2640,17 +2527,12 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
btrfs_put_root(node->root);
node->root = btrfs_grab_root(root);
ASSERT(node->root);
- list_add_tail(&node->list, &rc->backref_cache.changed);
} else {
- path->lowest_level = node->level;
- if (root == root->fs_info->chunk_root)
- btrfs_reserve_chunk_metadata(trans, false);
- ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- btrfs_release_path(path);
- if (root == root->fs_info->chunk_root)
- btrfs_trans_release_chunk_metadata(trans);
- if (ret > 0)
- ret = 0;
+ btrfs_err(root->fs_info,
+ "bytenr %llu resolved to a non-shareable root",
+ node->bytenr);
+ ret = -EUCLEAN;
+ goto out;
}
if (!ret)
update_processed_blocks(rc, node);
@@ -2658,11 +2540,50 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
ret = do_relocation(trans, rc, node, key, path, 1);
}
out:
- if (ret || node->level == 0 || node->cowonly)
+ if (ret || node->level == 0)
btrfs_backref_cleanup_node(&rc->backref_cache, node);
return ret;
}
+static int relocate_cowonly_block(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc, struct tree_block *block,
+ struct btrfs_path *path)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root;
+ u64 num_bytes;
+ int nr_levels;
+ int ret;
+
+ root = btrfs_get_fs_root(fs_info, block->owner, true);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1;
+
+ num_bytes = fs_info->nodesize * nr_levels;
+ ret = refill_metadata_space(trans, rc, num_bytes);
+ if (ret) {
+ btrfs_put_root(root);
+ return ret;
+ }
+ path->lowest_level = block->level;
+ if (root == root->fs_info->chunk_root)
+ btrfs_reserve_chunk_metadata(trans, false);
+
+ ret = btrfs_search_slot(trans, root, &block->key, path, 0, 1);
+ path->lowest_level = 0;
+ btrfs_release_path(path);
+
+ if (root == root->fs_info->chunk_root)
+ btrfs_trans_release_chunk_metadata(trans);
+ if (ret > 0)
+ ret = 0;
+ btrfs_put_root(root);
+
+ return ret;
+}
+
/*
* relocate a list of blocks
*/
@@ -2702,6 +2623,20 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Do tree relocation */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
+ /*
+ * For COWonly blocks, or the data reloc tree, we only need to
+ * COW down to the block, there's no need to generate a backref
+ * tree.
+ */
+ if (block->owner &&
+ (!btrfs_is_fstree(block->owner) ||
+ block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
+ ret = relocate_cowonly_block(trans, rc, block, path);
+ if (ret)
+ break;
+ continue;
+ }
+
node = build_backref_tree(trans, rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
@@ -2735,69 +2670,24 @@ static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control
u64 num_bytes;
int nr;
int ret = 0;
- u64 i_size = i_size_read(&inode->vfs_inode);
u64 prealloc_start = cluster->start - offset;
u64 prealloc_end = cluster->end - offset;
u64 cur_offset = prealloc_start;
/*
- * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
- * This means the range [i_size, PAGE_END + 1) is filled with zeros by
- * btrfs_do_readpage() call of previously relocated file cluster.
+ * For blocksize < folio size case (either bs < page size or large folios),
+ * beyond i_size, all blocks are filled with zero.
*
- * If the current cluster starts in the above range, btrfs_do_readpage()
+ * If the current cluster covers the above range, btrfs_do_readpage()
* will skip the read, and relocate_one_folio() will later writeback
* the padding zeros as new data, causing data corruption.
*
- * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
+ * Here we have to invalidate the cache covering our cluster.
*/
- if (!PAGE_ALIGNED(i_size)) {
- struct address_space *mapping = inode->vfs_inode.i_mapping;
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- const u32 sectorsize = fs_info->sectorsize;
- struct folio *folio;
-
- ASSERT(sectorsize < PAGE_SIZE);
- ASSERT(IS_ALIGNED(i_size, sectorsize));
-
- /*
- * Subpage can't handle page with DIRTY but without UPTODATE
- * bit as it can lead to the following deadlock:
- *
- * btrfs_read_folio()
- * | Page already *locked*
- * |- btrfs_lock_and_flush_ordered_range()
- * |- btrfs_start_ordered_extent()
- * |- extent_write_cache_pages()
- * |- lock_page()
- * We try to lock the page we already hold.
- *
- * Here we just writeback the whole data reloc inode, so that
- * we will be ensured to have no dirty range in the page, and
- * are safe to clear the uptodate bits.
- *
- * This shouldn't cause too much overhead, as we need to write
- * the data back anyway.
- */
- ret = filemap_write_and_wait(mapping);
- if (ret < 0)
- return ret;
-
- clear_extent_bits(&inode->io_tree, i_size,
- round_up(i_size, PAGE_SIZE) - 1,
- EXTENT_UPTODATE);
- folio = filemap_lock_folio(mapping, i_size >> PAGE_SHIFT);
- /*
- * If page is freed we don't need to do anything then, as we
- * will re-read the whole page anyway.
- */
- if (!IS_ERR(folio)) {
- btrfs_subpage_clear_uptodate(fs_info, folio, i_size,
- round_up(i_size, PAGE_SIZE) - i_size);
- folio_unlock(folio);
- folio_put(folio);
- }
- }
+ ret = filemap_invalidate_inode(&inode->vfs_inode, true, prealloc_start,
+ prealloc_end);
+ if (ret < 0)
+ return ret;
BUG_ON(cluster->start != cluster->boundary[0]);
ret = btrfs_alloc_data_chunk_ondemand(inode,
@@ -2815,21 +2705,21 @@ static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control
else
end = cluster->end - offset;
- lock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
num_bytes = end + 1 - start;
ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
num_bytes, num_bytes,
end + 1, &alloc_hint);
cur_offset = end + 1;
- unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
if (ret)
break;
}
btrfs_inode_unlock(inode, 0);
if (cur_offset < prealloc_end)
- btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
- prealloc_end + 1 - cur_offset);
+ btrfs_free_reserved_data_space_noquota(inode,
+ prealloc_end + 1 - cur_offset);
return ret;
}
@@ -2843,7 +2733,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_contr
u64 end = rc->cluster.end - offset;
int ret = 0;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em)
return -ENOMEM;
@@ -2854,10 +2744,10 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_contr
em->ram_bytes = em->len;
em->flags |= EXTENT_FLAG_PINNED;
- lock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
ret = btrfs_replace_extent_map_range(inode, em, false);
- unlock_extent(&inode->io_tree, start, end, &cached_state);
- free_extent_map(em);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_free_extent_map(em);
return ret;
}
@@ -2886,13 +2776,15 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
static int relocate_one_folio(struct reloc_control *rc,
struct file_ra_state *ra,
- int *cluster_nr, unsigned long index)
+ int *cluster_nr, u64 *file_offset_ret)
{
const struct file_extent_cluster *cluster = &rc->cluster;
struct inode *inode = rc->data_inode;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ const u64 orig_file_offset = *file_offset_ret;
u64 offset = BTRFS_I(inode)->reloc_block_group_start;
- const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
+ const pgoff_t last_index = (cluster->end - offset) >> PAGE_SHIFT;
+ const pgoff_t index = orig_file_offset >> PAGE_SHIFT;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
struct folio *folio;
u64 folio_start;
@@ -2902,6 +2794,7 @@ static int relocate_one_folio(struct reloc_control *rc,
const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
ASSERT(index <= last_index);
+again:
folio = filemap_lock_folio(inode->i_mapping, index);
if (IS_ERR(folio)) {
@@ -2924,8 +2817,6 @@ static int relocate_one_folio(struct reloc_control *rc,
return PTR_ERR(folio);
}
- WARN_ON(folio_order(folio));
-
if (folio_test_readahead(folio) && !use_rst)
page_cache_async_readahead(inode->i_mapping, ra, NULL,
folio, last_index + 1 - index);
@@ -2933,15 +2824,20 @@ static int relocate_one_folio(struct reloc_control *rc,
if (!folio_test_uptodate(folio)) {
btrfs_read_folio(NULL, folio);
folio_lock(folio);
- if (!folio_test_uptodate(folio)) {
+ if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO;
goto release_folio;
}
+ if (folio->mapping != inode->i_mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
}
/*
* We could have lost folio private when we dropped the lock to read the
- * folio above, make sure we set_page_extent_mapped here so we have any
+ * folio above, make sure we set_folio_extent_mapped() here so we have any
* of the subpage blocksize stuff we need in place.
*/
ret = set_folio_extent_mapped(folio);
@@ -2949,7 +2845,7 @@ static int relocate_one_folio(struct reloc_control *rc,
goto release_folio;
folio_start = folio_pos(folio);
- folio_end = folio_start + PAGE_SIZE - 1;
+ folio_end = folio_start + folio_size(folio) - 1;
/*
* Start from the cluster, as for subpage case, the cluster can start
@@ -2973,15 +2869,15 @@ static int relocate_one_folio(struct reloc_control *rc,
goto release_folio;
/* Mark the range delalloc and dirty for later writeback */
- lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
- &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start,
+ clamped_end, &cached_state);
ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
clamped_end, 0, &cached_state);
if (ret) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree,
- clamped_start, clamped_end,
- EXTENT_LOCKED | EXTENT_BOUNDARY,
- &cached_state);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ clamped_start, clamped_end,
+ EXTENT_LOCKED | EXTENT_BOUNDARY,
+ &cached_state);
btrfs_delalloc_release_metadata(BTRFS_I(inode),
clamped_len, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
@@ -2997,18 +2893,19 @@ static int relocate_one_folio(struct reloc_control *rc,
* EXTENT_BOUNDARY bit prevents current extent from being merged
* with previous extent.
*/
- if (in_range(cluster->boundary[*cluster_nr] - offset, folio_start, PAGE_SIZE)) {
+ if (in_range(cluster->boundary[*cluster_nr] - offset,
+ folio_start, folio_size(folio))) {
u64 boundary_start = cluster->boundary[*cluster_nr] -
offset;
u64 boundary_end = boundary_start +
fs_info->sectorsize - 1;
- set_extent_bit(&BTRFS_I(inode)->io_tree,
- boundary_start, boundary_end,
- EXTENT_BOUNDARY, NULL);
+ btrfs_set_extent_bit(&BTRFS_I(inode)->io_tree,
+ boundary_start, boundary_end,
+ EXTENT_BOUNDARY, NULL);
}
- unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
+ &cached_state);
btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
cur += clamped_len;
@@ -3027,6 +2924,7 @@ static int relocate_one_folio(struct reloc_control *rc,
btrfs_throttle(fs_info);
if (btrfs_should_cancel_balance(fs_info))
ret = -ECANCELED;
+ *file_offset_ret = folio_end + 1;
return ret;
release_folio:
@@ -3040,9 +2938,8 @@ static int relocate_file_extent_cluster(struct reloc_control *rc)
struct inode *inode = rc->data_inode;
const struct file_extent_cluster *cluster = &rc->cluster;
u64 offset = BTRFS_I(inode)->reloc_block_group_start;
- unsigned long index;
- unsigned long last_index;
- struct file_ra_state *ra;
+ u64 cur_file_offset = cluster->start - offset;
+ struct file_ra_state AUTO_KFREE(ra);
int cluster_nr = 0;
int ret = 0;
@@ -3055,22 +2952,21 @@ static int relocate_file_extent_cluster(struct reloc_control *rc)
ret = prealloc_file_extent_cluster(rc);
if (ret)
- goto out;
+ return ret;
file_ra_state_init(ra, inode->i_mapping);
ret = setup_relocation_extent_mapping(rc);
if (ret)
- goto out;
+ return ret;
- last_index = (cluster->end - offset) >> PAGE_SHIFT;
- for (index = (cluster->start - offset) >> PAGE_SHIFT;
- index <= last_index && !ret; index++)
- ret = relocate_one_folio(rc, ra, &cluster_nr, index);
+ while (cur_file_offset < cluster->end - offset) {
+ ret = relocate_one_folio(rc, ra, &cluster_nr, &cur_file_offset);
+ if (ret)
+ break;
+ }
if (ret == 0)
WARN_ON(cluster_nr != cluster->nr);
-out:
- kfree(ra);
return ret;
}
@@ -3229,7 +3125,7 @@ static int add_tree_block(struct reloc_control *rc,
block->key_ready = false;
block->owner = owner;
- rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
+ rb_node = rb_simple_insert(blocks, &block->simple_node);
if (rb_node)
btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
-EEXIST);
@@ -3245,7 +3141,7 @@ static int __add_tree_block(struct reloc_control *rc,
struct rb_root *blocks)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret;
bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
@@ -3269,11 +3165,11 @@ again:
key.offset = blocksize;
}
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0 && skinny) {
if (path->slots[0]) {
@@ -3300,31 +3196,29 @@ again:
"tree block extent item (%llu) is not found in extent tree",
bytenr);
WARN_ON(1);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- ret = add_tree_block(rc, &key, path, blocks);
-out:
- btrfs_free_path(path);
- return ret;
+ return add_tree_block(rc, &key, path, blocks);
}
-static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group *block_group,
+static int delete_block_group_cache(struct btrfs_block_group *block_group,
struct inode *inode,
u64 ino)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
+ struct btrfs_inode *btrfs_inode;
int ret = 0;
if (inode)
goto truncate;
- inode = btrfs_iget(ino, root);
- if (IS_ERR(inode))
+ btrfs_inode = btrfs_iget(ino, root);
+ if (IS_ERR(btrfs_inode))
return -ENOENT;
+ inode = &btrfs_inode->vfs_inode;
truncate:
ret = btrfs_check_trunc_cache_free_space(fs_info,
@@ -3384,8 +3278,7 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
}
if (!found)
return -ENOENT;
- ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
- space_cache_ino);
+ ret = delete_block_group_cache(block_group, NULL, space_cache_ino);
return ret;
}
@@ -3465,8 +3358,8 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
0, 0);
if (ret < 0)
@@ -3505,9 +3398,9 @@ next:
goto next;
}
- block_found = find_first_extent_bit(&rc->processed_blocks,
- key.objectid, &start, &end,
- EXTENT_DIRTY, NULL);
+ block_found = btrfs_find_first_extent_bit(&rc->processed_blocks,
+ key.objectid, &start, &end,
+ EXTENT_DIRTY, NULL);
if (block_found && start <= key.objectid) {
btrfs_release_path(path);
@@ -3596,7 +3489,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
struct rb_root blocks = RB_ROOT;
struct btrfs_key key;
struct btrfs_trans_handle *trans = NULL;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_extent_item *ei;
u64 flags;
int ret;
@@ -3716,7 +3609,7 @@ restart:
}
btrfs_release_path(path);
- clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
+ btrfs_clear_extent_bit(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, NULL);
if (trans) {
btrfs_end_transaction_throttle(trans);
@@ -3765,14 +3658,13 @@ out_free:
if (ret < 0 && !err)
err = ret;
btrfs_free_block_rsv(fs_info, rc->block_rsv);
- btrfs_free_path(path);
return err;
}
static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_inode_item *item;
struct extent_buffer *leaf;
int ret;
@@ -3783,7 +3675,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_inode(trans, root, path, objectid);
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
@@ -3793,16 +3685,13 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
- btrfs_mark_buffer_dirty(trans, leaf);
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static void delete_orphan_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret = 0;
@@ -3825,7 +3714,6 @@ static void delete_orphan_inode(struct btrfs_trans_handle *trans,
out:
if (ret)
btrfs_abort_transaction(trans, ret);
- btrfs_free_path(path);
}
/*
@@ -3833,10 +3721,10 @@ out:
* the inode is in data relocation tree and its link count is 0
*/
static noinline_for_stack struct inode *create_reloc_inode(
- struct btrfs_fs_info *fs_info,
const struct btrfs_block_group *group)
{
- struct inode *inode = NULL;
+ struct btrfs_fs_info *fs_info = group->fs_info;
+ struct btrfs_inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root;
u64 objectid;
@@ -3864,23 +3752,25 @@ static noinline_for_stack struct inode *create_reloc_inode(
inode = NULL;
goto out;
}
- BTRFS_I(inode)->reloc_block_group_start = group->start;
+ inode->reloc_block_group_start = group->start;
- ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ ret = btrfs_orphan_add(trans, inode);
out:
btrfs_put_root(root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
if (ret) {
- iput(inode);
- inode = ERR_PTR(ret);
+ if (inode)
+ iput(&inode->vfs_inode);
+ return ERR_PTR(ret);
}
- return inode;
+ return &inode->vfs_inode;
}
/*
* Mark start of chunk relocation that is cancellable. Check if the cancellation
* has been requested meanwhile and don't start in that case.
+ * NOTE: if this returns an error, reloc_chunk_end() must not be called.
*
* Return:
* 0 success
@@ -3897,10 +3787,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
btrfs_info(fs_info, "chunk relocation canceled on start");
- /*
- * On cancel, clear all requests but let the caller mark
- * the end after cleanup operations.
- */
+ /* On cancel, clear all requests. */
+ clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
atomic_set(&fs_info->reloc_cancel_req, 0);
return -ECANCELED;
}
@@ -3909,9 +3797,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
/*
* Mark end of chunk relocation that is cancellable and wake any waiters.
+ * NOTE: call only if a previous call to reloc_chunk_start() succeeded.
*/
static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
{
+ ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
/* Requested after start, clear bit first so any waiters can continue */
if (atomic_read(&fs_info->reloc_cancel_req) > 0)
btrfs_info(fs_info, "chunk relocation canceled during operation");
@@ -3932,7 +3822,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
rc->reloc_root_tree.rb_root = RB_ROOT;
spin_lock_init(&rc->reloc_root_tree.lock);
- extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
+ btrfs_extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
return rc;
}
@@ -3953,7 +3843,7 @@ static void free_reloc_control(struct reloc_control *rc)
*/
static void describe_relocation(struct btrfs_block_group *block_group)
{
- char buf[128] = {'\0'};
+ char buf[128] = "NONE";
btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
@@ -3973,7 +3863,8 @@ static const char *stage_to_string(enum reloc_stage stage)
/*
* function to relocate all extents in a block group.
*/
-int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start,
+ bool verbose)
{
struct btrfs_block_group *bg;
struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
@@ -3981,8 +3872,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
struct inode *inode;
struct btrfs_path *path;
int ret;
- int rw = 0;
- int err = 0;
+ bool bg_is_ro = false;
/*
* This only gets set if we had a half-deleted snapshot on mount. We
@@ -4024,24 +3914,20 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
}
ret = reloc_chunk_start(fs_info);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out_put_bg;
- }
rc->extent_root = extent_root;
rc->block_group = bg;
ret = btrfs_inc_block_group_ro(rc->block_group, true);
- if (ret) {
- err = ret;
+ if (ret)
goto out;
- }
- rw = 1;
+ bg_is_ro = true;
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
@@ -4049,23 +3935,22 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
btrfs_free_path(path);
if (!IS_ERR(inode))
- ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
+ ret = delete_block_group_cache(rc->block_group, inode, 0);
else
ret = PTR_ERR(inode);
- if (ret && ret != -ENOENT) {
- err = ret;
+ if (ret && ret != -ENOENT)
goto out;
- }
- rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
+ rc->data_inode = create_reloc_inode(rc->block_group);
if (IS_ERR(rc->data_inode)) {
- err = PTR_ERR(rc->data_inode);
+ ret = PTR_ERR(rc->data_inode);
rc->data_inode = NULL;
goto out;
}
- describe_relocation(rc->block_group);
+ if (verbose)
+ describe_relocation(rc->block_group);
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
@@ -4080,8 +3965,6 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
mutex_lock(&fs_info->cleaner_mutex);
ret = relocate_block_group(rc);
mutex_unlock(&fs_info->cleaner_mutex);
- if (ret < 0)
- err = ret;
finishes_stage = rc->stage;
/*
@@ -4094,37 +3977,41 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
* out of the loop if we hit an error.
*/
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
- ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 0,
- (u64)-1);
- if (ret)
- err = ret;
+ int wb_ret;
+
+ wb_ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 0,
+ (u64)-1);
+ if (wb_ret && ret == 0)
+ ret = wb_ret;
invalidate_mapping_pages(rc->data_inode->i_mapping,
0, -1);
rc->stage = UPDATE_DATA_PTRS;
}
- if (err < 0)
+ if (ret < 0)
goto out;
if (rc->extents_found == 0)
break;
- btrfs_info(fs_info, "found %llu extents, stage: %s",
- rc->extents_found, stage_to_string(finishes_stage));
+ if (verbose)
+ btrfs_info(fs_info, "found %llu extents, stage: %s",
+ rc->extents_found,
+ stage_to_string(finishes_stage));
}
WARN_ON(rc->block_group->pinned > 0);
WARN_ON(rc->block_group->reserved > 0);
WARN_ON(rc->block_group->used > 0);
out:
- if (err && rw)
+ if (ret && bg_is_ro)
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
+ reloc_chunk_end(fs_info);
out_put_bg:
btrfs_put_block_group(bg);
- reloc_chunk_end(fs_info);
free_reloc_control(rc);
- return err;
+ return ret;
}
static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
@@ -4255,8 +4142,7 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
rc->merge_reloc_tree = true;
while (!list_empty(&reloc_roots)) {
- reloc_root = list_entry(reloc_roots.next,
- struct btrfs_root, root_list);
+ reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list);
list_del(&reloc_root->root_list);
if (btrfs_root_refs(&reloc_root->root_item) == 0) {
@@ -4306,8 +4192,8 @@ out_clean:
ret = ret2;
out_unset:
unset_reloc_control(rc);
-out_end:
reloc_chunk_end(fs_info);
+out_end:
free_reloc_control(rc);
out:
free_reloc_roots(&reloc_roots);
@@ -4349,7 +4235,7 @@ int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
while (!list_empty(&list)) {
struct btrfs_ordered_sum *sums =
- list_entry(list.next, struct btrfs_ordered_sum, list);
+ list_first_entry(&list, struct btrfs_ordered_sum, list);
list_del_init(&sums->list);
@@ -4399,11 +4285,21 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
WARN_ON(!first_cow && level == 0);
node = rc->backref_cache.path[level];
- BUG_ON(node->bytenr != buf->start &&
- node->new_bytenr != buf->start);
+
+ /*
+ * If node->bytenr != buf->start and node->new_bytenr !=
+ * buf->start then we've got the wrong backref node for what we
+ * expected to see here and the cache is incorrect.
+ */
+ if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
+ btrfs_err(fs_info,
+"bytenr %llu was found but our backref cache was expecting %llu or %llu",
+ buf->start, node->bytenr, node->new_bytenr);
+ return -EUCLEAN;
+ }
btrfs_backref_drop_node_buffer(node);
- atomic_inc(&cow->refs);
+ refcount_inc(&cow->refs);
node->eb = cow;
node->new_bytenr = cow->start;
@@ -4500,10 +4396,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
return ret;
}
new_root->reloc_root = btrfs_grab_root(reloc_root);
-
- if (rc->create_reloc_tree)
- ret = clone_backref_node(trans, rc, root, reloc_root);
- return ret;
+ return 0;
}
/*
diff --git a/fs/btrfs/relocation.h b/fs/btrfs/relocation.h
index 788c86d8633a..5c36b3f84b57 100644
--- a/fs/btrfs/relocation.h
+++ b/fs/btrfs/relocation.h
@@ -12,7 +12,8 @@ struct btrfs_trans_handle;
struct btrfs_ordered_extent;
struct btrfs_pending_snapshot;
-int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start);
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start,
+ bool verbose);
int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root *root);
int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 33962671a96c..6a7e297ab0a7 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -85,7 +85,7 @@ int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
* Key with offset -1 found, there would have to exist a root
* with such id, but this is out of the valid range.
*/
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
ret = -EUCLEAN;
goto out;
}
@@ -130,7 +130,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
*item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *l;
int ret;
int slot;
@@ -143,15 +143,15 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
+ if (unlikely(ret > 0)) {
btrfs_crit(fs_info,
- "unable to find root key (%llu %u %llu) in tree %llu",
- key->objectid, key->type, key->offset, btrfs_root_id(root));
+ "unable to find root key " BTRFS_KEY_FMT " in tree %llu",
+ BTRFS_KEY_FMT_VALUE(key), btrfs_root_id(root));
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
l = path->nodes[0];
@@ -168,22 +168,22 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, key, path,
-1, 1);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
ret = btrfs_del_item(trans, root, path);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path,
key, sizeof(*item));
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
l = path->nodes[0];
slot = path->slots[0];
@@ -197,9 +197,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
write_extent_buffer(l, item, ptr, sizeof(*item));
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -217,7 +214,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *tree_root = fs_info->tree_root;
struct extent_buffer *leaf;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_root *root;
int err = 0;
@@ -310,7 +307,6 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
btrfs_put_root(root);
}
- btrfs_free_path(path);
return err;
}
@@ -319,7 +315,7 @@ int btrfs_del_root(struct btrfs_trans_handle *trans,
const struct btrfs_key *key)
{
struct btrfs_root *root = trans->fs_info->tree_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
path = btrfs_alloc_path();
@@ -327,17 +323,12 @@ int btrfs_del_root(struct btrfs_trans_handle *trans,
return -ENOMEM;
ret = btrfs_search_slot(trans, root, key, path, -1, 1);
if (ret < 0)
- goto out;
- if (ret != 0) {
+ return ret;
+ if (unlikely(ret > 0))
/* The root must exist but we did not find it by the key. */
- ret = -EUCLEAN;
- goto out;
- }
+ return -EUCLEAN;
- ret = btrfs_del_item(trans, root, path);
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_del_item(trans, root, path);
}
int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
@@ -345,7 +336,7 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
const struct fscrypt_str *name)
{
struct btrfs_root *tree_root = trans->fs_info->tree_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -362,7 +353,7 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
again:
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
if (ret < 0) {
- goto out;
+ return ret;
} else if (ret == 0) {
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
@@ -370,18 +361,16 @@ again:
ptr = (unsigned long)(ref + 1);
if ((btrfs_root_ref_dirid(leaf, ref) != dirid) ||
(btrfs_root_ref_name_len(leaf, ref) != name->len) ||
- memcmp_extent_buffer(leaf, name->name, ptr, name->len)) {
- ret = -ENOENT;
- goto out;
- }
+ memcmp_extent_buffer(leaf, name->name, ptr, name->len))
+ return -ENOENT;
+
*sequence = btrfs_root_ref_sequence(leaf, ref);
ret = btrfs_del_item(trans, tree_root, path);
if (ret)
- goto out;
+ return ret;
} else {
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
@@ -392,8 +381,6 @@ again:
goto again;
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -419,7 +406,7 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
struct btrfs_root *tree_root = trans->fs_info->tree_root;
struct btrfs_key key;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
unsigned long ptr;
@@ -434,9 +421,8 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
again:
ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
sizeof(*ref) + name->len);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- btrfs_free_path(path);
return ret;
}
@@ -447,7 +433,6 @@ again:
btrfs_set_root_ref_name_len(leaf, ref, name->len);
ptr = (unsigned long)(ref + 1);
write_extent_buffer(leaf, name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(trans, leaf);
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
btrfs_release_path(path);
@@ -457,7 +442,6 @@ again:
goto again;
}
- btrfs_free_path(path);
return 0;
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 204c928beaf9..a40ee41f42c6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -66,8 +66,6 @@ struct scrub_ctx;
/* Represent one sector and its needed info to verify the content. */
struct scrub_sector_verification {
- bool is_metadata;
-
union {
/*
* Csum pointer for data csum verification. Should point to a
@@ -100,7 +98,39 @@ enum scrub_stripe_flags {
SCRUB_STRIPE_FLAG_NO_REPORT,
};
-#define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE)
+/*
+ * We have multiple bitmaps for one scrub_stripe.
+ * However each bitmap has at most (BTRFS_STRIPE_LEN / blocksize) bits,
+ * which is normally 16, and much smaller than BITS_PER_LONG (32 or 64).
+ *
+ * So to reduce memory usage for each scrub_stripe, we pack those bitmaps
+ * into a larger one.
+ *
+ * These enum records where the sub-bitmap are inside the larger one.
+ * Each subbitmap starts at scrub_bitmap_nr_##name * nr_sectors bit.
+ */
+enum {
+ /* Which blocks are covered by extent items. */
+ scrub_bitmap_nr_has_extent = 0,
+
+ /* Which blocks are metadata. */
+ scrub_bitmap_nr_is_metadata,
+
+ /*
+ * Which blocks have errors, including IO, csum, and metadata
+ * errors.
+ * This sub-bitmap is the OR results of the next few error related
+ * sub-bitmaps.
+ */
+ scrub_bitmap_nr_error,
+ scrub_bitmap_nr_io_error,
+ scrub_bitmap_nr_csum_error,
+ scrub_bitmap_nr_meta_error,
+ scrub_bitmap_nr_meta_gen_error,
+ scrub_bitmap_nr_last,
+};
+
+#define SCRUB_STRIPE_MAX_FOLIOS (BTRFS_STRIPE_LEN / PAGE_SIZE)
/*
* Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
@@ -109,7 +139,7 @@ struct scrub_stripe {
struct scrub_ctx *sctx;
struct btrfs_block_group *bg;
- struct page *pages[SCRUB_STRIPE_PAGES];
+ struct folio *folios[SCRUB_STRIPE_MAX_FOLIOS];
struct scrub_sector_verification *sectors;
struct btrfs_device *dev;
@@ -138,36 +168,15 @@ struct scrub_stripe {
*/
unsigned long state;
- /* Indicate which sectors are covered by extent items. */
- unsigned long extent_sector_bitmap;
+ /* The large bitmap contains all the sub-bitmaps. */
+ unsigned long bitmaps[BITS_TO_LONGS(scrub_bitmap_nr_last *
+ (BTRFS_STRIPE_LEN / BTRFS_MIN_BLOCKSIZE))];
/*
- * The errors hit during the initial read of the stripe.
- *
- * Would be utilized for error reporting and repair.
- *
- * The remaining init_nr_* records the number of errors hit, only used
- * by error reporting.
+ * For writeback (repair or replace) error reporting.
+ * This one is protected by a spinlock, thus can not be packed into
+ * the larger bitmap.
*/
- unsigned long init_error_bitmap;
- unsigned int init_nr_io_errors;
- unsigned int init_nr_csum_errors;
- unsigned int init_nr_meta_errors;
-
- /*
- * The following error bitmaps are all for the current status.
- * Every time we submit a new read, these bitmaps may be updated.
- *
- * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
- *
- * IO and csum errors can happen for both metadata and data.
- */
- unsigned long error_bitmap;
- unsigned long io_error_bitmap;
- unsigned long csum_error_bitmap;
- unsigned long meta_error_bitmap;
-
- /* For writeback (repair or replace) error reporting. */
unsigned long write_error_bitmap;
/* Writeback can be concurrent, thus we need to protect the bitmap. */
@@ -197,7 +206,7 @@ struct scrub_ctx {
ktime_t throttle_deadline;
u64 throttle_sent;
- int is_dev_replace;
+ bool is_dev_replace;
u64 write_pointer;
struct mutex wr_lock;
@@ -219,6 +228,90 @@ struct scrub_ctx {
refcount_t refs;
};
+#define scrub_calc_start_bit(stripe, name, block_nr) \
+({ \
+ unsigned int __start_bit; \
+ \
+ ASSERT(block_nr < stripe->nr_sectors, \
+ "nr_sectors=%u block_nr=%u", stripe->nr_sectors, block_nr); \
+ __start_bit = scrub_bitmap_nr_##name * stripe->nr_sectors + block_nr; \
+ __start_bit; \
+})
+
+#define IMPLEMENT_SCRUB_BITMAP_OPS(name) \
+static inline void scrub_bitmap_set_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr, \
+ unsigned int nr_blocks) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, \
+ name, block_nr); \
+ \
+ bitmap_set(stripe->bitmaps, start_bit, nr_blocks); \
+} \
+static inline void scrub_bitmap_clear_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr, \
+ unsigned int nr_blocks) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \
+ block_nr); \
+ \
+ bitmap_clear(stripe->bitmaps, start_bit, nr_blocks); \
+} \
+static inline bool scrub_bitmap_test_bit_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \
+ block_nr); \
+ \
+ return test_bit(start_bit, stripe->bitmaps); \
+} \
+static inline void scrub_bitmap_set_bit_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \
+ block_nr); \
+ \
+ set_bit(start_bit, stripe->bitmaps); \
+} \
+static inline void scrub_bitmap_clear_bit_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \
+ block_nr); \
+ \
+ clear_bit(start_bit, stripe->bitmaps); \
+} \
+static inline unsigned long scrub_bitmap_read_##name(struct scrub_stripe *stripe) \
+{ \
+ const unsigned int nr_blocks = stripe->nr_sectors; \
+ \
+ ASSERT(nr_blocks > 0 && nr_blocks <= BITS_PER_LONG, \
+ "nr_blocks=%u BITS_PER_LONG=%u", \
+ nr_blocks, BITS_PER_LONG); \
+ \
+ return bitmap_read(stripe->bitmaps, nr_blocks * scrub_bitmap_nr_##name, \
+ stripe->nr_sectors); \
+} \
+static inline bool scrub_bitmap_empty_##name(struct scrub_stripe *stripe) \
+{ \
+ unsigned long bitmap = scrub_bitmap_read_##name(stripe); \
+ \
+ return bitmap_empty(&bitmap, stripe->nr_sectors); \
+} \
+static inline unsigned int scrub_bitmap_weight_##name(struct scrub_stripe *stripe) \
+{ \
+ unsigned long bitmap = scrub_bitmap_read_##name(stripe); \
+ \
+ return bitmap_weight(&bitmap, stripe->nr_sectors); \
+}
+IMPLEMENT_SCRUB_BITMAP_OPS(has_extent);
+IMPLEMENT_SCRUB_BITMAP_OPS(is_metadata);
+IMPLEMENT_SCRUB_BITMAP_OPS(error);
+IMPLEMENT_SCRUB_BITMAP_OPS(io_error);
+IMPLEMENT_SCRUB_BITMAP_OPS(csum_error);
+IMPLEMENT_SCRUB_BITMAP_OPS(meta_error);
+IMPLEMENT_SCRUB_BITMAP_OPS(meta_gen_error);
+
struct scrub_warning {
struct btrfs_path *path;
u64 extent_item_size;
@@ -228,15 +321,28 @@ struct scrub_warning {
struct btrfs_device *dev;
};
+struct scrub_error_records {
+ /*
+ * Bitmap recording which blocks hit errors (IO/csum/...) during the
+ * initial read.
+ */
+ unsigned long init_error_bitmap;
+
+ unsigned int nr_io_errors;
+ unsigned int nr_csum_errors;
+ unsigned int nr_meta_errors;
+ unsigned int nr_meta_gen_errors;
+};
+
static void release_scrub_stripe(struct scrub_stripe *stripe)
{
if (!stripe)
return;
- for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
- if (stripe->pages[i])
- __free_page(stripe->pages[i]);
- stripe->pages[i] = NULL;
+ for (int i = 0; i < SCRUB_STRIPE_MAX_FOLIOS; i++) {
+ if (stripe->folios[i])
+ folio_put(stripe->folios[i]);
+ stripe->folios[i] = NULL;
}
kfree(stripe->sectors);
kfree(stripe->csums);
@@ -249,6 +355,7 @@ static void release_scrub_stripe(struct scrub_stripe *stripe)
static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
struct scrub_stripe *stripe)
{
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
int ret;
memset(stripe, 0, sizeof(*stripe));
@@ -261,7 +368,9 @@ static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
atomic_set(&stripe->pending_io, 0);
spin_lock_init(&stripe->write_error_lock);
- ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, false);
+ ASSERT(BTRFS_STRIPE_LEN >> min_folio_shift <= SCRUB_STRIPE_MAX_FOLIOS);
+ ret = btrfs_alloc_folio_array(BTRFS_STRIPE_LEN >> min_folio_shift,
+ fs_info->block_min_order, stripe->folios);
if (ret < 0)
goto error;
@@ -340,7 +449,7 @@ static void scrub_put_ctx(struct scrub_ctx *sctx)
}
static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
- struct btrfs_fs_info *fs_info, int is_dev_replace)
+ struct btrfs_fs_info *fs_info, bool is_dev_replace)
{
struct scrub_ctx *sctx;
int i;
@@ -354,10 +463,10 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
refcount_set(&sctx->refs, 1);
sctx->is_dev_replace = is_dev_replace;
sctx->fs_info = fs_info;
- sctx->extent_path.search_commit_root = 1;
- sctx->extent_path.skip_locking = 1;
- sctx->csum_path.search_commit_root = 1;
- sctx->csum_path.skip_locking = 1;
+ sctx->extent_path.search_commit_root = true;
+ sctx->extent_path.skip_locking = true;
+ sctx->csum_path.search_commit_root = true;
+ sctx->csum_path.skip_locking = true;
for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
int ret;
@@ -396,7 +505,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
struct btrfs_inode_item *inode_item;
struct scrub_warning *swarn = warn_ctx;
struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
- struct inode_fs_paths *ipath = NULL;
+ struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
struct btrfs_root *local_root;
struct btrfs_key key;
@@ -450,8 +559,8 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
* hold all of the paths here
*/
for (i = 0; i < ipath->fspath->elem_cnt; ++i)
- btrfs_warn_in_rcu(fs_info,
-"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
+ btrfs_warn(fs_info,
+"scrub: %s at logical %llu on dev %s, physical %llu root %llu inode %llu offset %llu length %u links %u (path: %s)",
swarn->errstr, swarn->logical,
btrfs_dev_name(swarn->dev),
swarn->physical,
@@ -460,18 +569,16 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
(char *)(unsigned long)ipath->fspath->val[i]);
btrfs_put_root(local_root);
- free_ipath(ipath);
return 0;
err:
- btrfs_warn_in_rcu(fs_info,
- "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
+ btrfs_warn(fs_info,
+ "scrub: %s at logical %llu on dev %s, physical %llu root %llu inode %llu offset %llu: path resolving failed with ret=%d",
swarn->errstr, swarn->logical,
btrfs_dev_name(swarn->dev),
swarn->physical,
root, inum, offset, ret);
- free_ipath(ipath);
return 0;
}
@@ -479,7 +586,7 @@ static void scrub_print_common_warning(const char *errstr, struct btrfs_device *
bool is_super, u64 logical, u64 physical)
{
struct btrfs_fs_info *fs_info = dev->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key found_key;
struct extent_buffer *eb;
struct btrfs_extent_item *ei;
@@ -490,7 +597,7 @@ static void scrub_print_common_warning(const char *errstr, struct btrfs_device *
/* Super block error, no need to search extent tree. */
if (is_super) {
- btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
+ btrfs_warn(fs_info, "scrub: %s on device %s, physical %llu",
errstr, btrfs_dev_name(dev), physical);
return;
}
@@ -506,7 +613,7 @@ static void scrub_print_common_warning(const char *errstr, struct btrfs_device *
ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
&flags);
if (ret < 0)
- goto out;
+ return;
swarn.extent_item_size = found_key.offset;
@@ -525,14 +632,14 @@ static void scrub_print_common_warning(const char *errstr, struct btrfs_device *
&ref_level);
if (ret < 0) {
btrfs_warn(fs_info,
- "failed to resolve tree backref for logical %llu: %d",
- swarn.logical, ret);
+ "scrub: failed to resolve tree backref for logical %llu: %d",
+ swarn.logical, ret);
break;
}
if (ret > 0)
break;
- btrfs_warn_in_rcu(fs_info,
-"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
+ btrfs_warn(fs_info,
+"scrub: %s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
errstr, swarn.logical, btrfs_dev_name(dev),
swarn.physical, (ref_level ? "node" : "leaf"),
ref_level, ref_root);
@@ -552,9 +659,6 @@ static void scrub_print_common_warning(const char *errstr, struct btrfs_device *
iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
}
-
-out:
- btrfs_free_path(path);
}
static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
@@ -579,20 +683,32 @@ static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
return ret;
}
-static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
+static void *scrub_stripe_get_kaddr(struct scrub_stripe *stripe, int sector_nr)
{
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
- int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ u32 offset = (sector_nr << fs_info->sectorsize_bits);
+ const struct folio *folio = stripe->folios[offset >> min_folio_shift];
- return stripe->pages[page_index];
+ /* stripe->folios[] is allocated by us and no highmem is allowed. */
+ ASSERT(folio);
+ ASSERT(!folio_test_highmem(folio));
+ return folio_address(folio) + offset_in_folio(folio, offset);
}
-static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
- int sector_nr)
+static phys_addr_t scrub_stripe_get_paddr(struct scrub_stripe *stripe, int sector_nr)
{
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ u32 offset = (sector_nr << fs_info->sectorsize_bits);
+ const struct folio *folio = stripe->folios[offset >> min_folio_shift];
- return offset_in_page(sector_nr << fs_info->sectorsize_bits);
+ /* stripe->folios[] is allocated by us and no highmem is allowed. */
+ ASSERT(folio);
+ ASSERT(!folio_test_highmem(folio));
+ /* And the range must be contained inside the folio. */
+ ASSERT(offset_in_folio(folio, offset) + fs_info->sectorsize <= folio_size(folio));
+ return page_to_phys(folio_page(folio, 0)) + offset_in_folio(folio, offset);
}
static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
@@ -600,46 +716,44 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
- const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
- const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
+ void *first_kaddr = scrub_stripe_get_kaddr(stripe, sector_nr);
+ struct btrfs_header *header = first_kaddr;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
u8 on_disk_csum[BTRFS_CSUM_SIZE];
u8 calculated_csum[BTRFS_CSUM_SIZE];
- struct btrfs_header *header;
/*
* Here we don't have a good way to attach the pages (and subpages)
* to a dummy extent buffer, thus we have to directly grab the members
* from pages.
*/
- header = (struct btrfs_header *)(page_address(first_page) + first_off);
memcpy(on_disk_csum, header->csum, fs_info->csum_size);
if (logical != btrfs_stack_header_bytenr(header)) {
- bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
- bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info,
- "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
+ "scrub: tree block %llu mirror %u has bad bytenr, has %llu want %llu",
logical, stripe->mirror_num,
btrfs_stack_header_bytenr(header), logical);
return;
}
if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
BTRFS_FSID_SIZE) != 0) {
- bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
- bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info,
- "tree block %llu mirror %u has bad fsid, has %pU want %pU",
+ "scrub: tree block %llu mirror %u has bad fsid, has %pU want %pU",
logical, stripe->mirror_num,
header->fsid, fs_info->fs_devices->fsid);
return;
}
if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
BTRFS_UUID_SIZE) != 0) {
- bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
- bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info,
- "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
+ "scrub: tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
logical, stripe->mirror_num,
header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
return;
@@ -648,42 +762,40 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
/* Now check tree block csum. */
shash->tfm = fs_info->csum_shash;
crypto_shash_init(shash);
- crypto_shash_update(shash, page_address(first_page) + first_off +
- BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
+ crypto_shash_update(shash, first_kaddr + BTRFS_CSUM_SIZE,
+ fs_info->sectorsize - BTRFS_CSUM_SIZE);
for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
- struct page *page = scrub_stripe_get_page(stripe, i);
- unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
-
- crypto_shash_update(shash, page_address(page) + page_off,
+ crypto_shash_update(shash, scrub_stripe_get_kaddr(stripe, i),
fs_info->sectorsize);
}
crypto_shash_final(shash, calculated_csum);
if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
- bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
- bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info,
- "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
+"scrub: tree block %llu mirror %u has bad csum, has " BTRFS_CSUM_FMT " want " BTRFS_CSUM_FMT,
logical, stripe->mirror_num,
- CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
- CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
+ BTRFS_CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
+ BTRFS_CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
return;
}
if (stripe->sectors[sector_nr].generation !=
btrfs_stack_header_generation(header)) {
- bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
- bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_meta_gen_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info,
- "tree block %llu mirror %u has bad generation, has %llu want %llu",
+ "scrub: tree block %llu mirror %u has bad generation, has %llu want %llu",
logical, stripe->mirror_num,
btrfs_stack_header_generation(header),
stripe->sectors[sector_nr].generation);
return;
}
- bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
- bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
- bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
+ scrub_bitmap_clear_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_clear_csum_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_clear_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_clear_meta_gen_error(stripe, sector_nr, sectors_per_tree);
}
static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
@@ -691,23 +803,22 @@ static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
- struct page *page = scrub_stripe_get_page(stripe, sector_nr);
- unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
+ phys_addr_t paddr = scrub_stripe_get_paddr(stripe, sector_nr);
u8 csum_buf[BTRFS_CSUM_SIZE];
int ret;
ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
/* Sector not utilized, skip it. */
- if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
+ if (!scrub_bitmap_test_bit_has_extent(stripe, sector_nr))
return;
/* IO error, no need to check. */
- if (test_bit(sector_nr, &stripe->io_error_bitmap))
+ if (scrub_bitmap_test_bit_io_error(stripe, sector_nr))
return;
/* Metadata, verify the full tree block. */
- if (sector->is_metadata) {
+ if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr)) {
/*
* Check if the tree block crosses the stripe boundary. If
* crossed the boundary, we cannot verify it but only give a
@@ -718,7 +829,7 @@ static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
*/
if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
btrfs_warn_rl(fs_info,
- "tree block at %llu crosses stripe boundary %llu",
+ "scrub: tree block at %llu crosses stripe boundary %llu",
stripe->logical +
(sector_nr << fs_info->sectorsize_bits),
stripe->logical);
@@ -733,17 +844,17 @@ static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
* cases without csum, we have no other choice but to trust it.
*/
if (!sector->csum) {
- clear_bit(sector_nr, &stripe->error_bitmap);
+ scrub_bitmap_clear_bit_error(stripe, sector_nr);
return;
}
- ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
+ ret = btrfs_check_block_csum(fs_info, paddr, csum_buf, sector->csum);
if (ret < 0) {
- set_bit(sector_nr, &stripe->csum_error_bitmap);
- set_bit(sector_nr, &stripe->error_bitmap);
+ scrub_bitmap_set_bit_csum_error(stripe, sector_nr);
+ scrub_bitmap_set_bit_error(stripe, sector_nr);
} else {
- clear_bit(sector_nr, &stripe->csum_error_bitmap);
- clear_bit(sector_nr, &stripe->error_bitmap);
+ scrub_bitmap_clear_bit_csum_error(stripe, sector_nr);
+ scrub_bitmap_clear_bit_error(stripe, sector_nr);
}
}
@@ -756,7 +867,7 @@ static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long b
for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
scrub_verify_one_sector(stripe, sector_nr);
- if (stripe->sectors[sector_nr].is_metadata)
+ if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr))
sector_nr += sectors_per_tree - 1;
}
}
@@ -766,8 +877,7 @@ static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first
int i;
for (i = 0; i < stripe->nr_sectors; i++) {
- if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
- scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
+ if (scrub_stripe_get_kaddr(stripe, i) == bvec_virt(first_bvec))
break;
}
ASSERT(i < stripe->nr_sectors);
@@ -795,13 +905,13 @@ static void scrub_repair_read_endio(struct btrfs_bio *bbio)
bio_size += bvec->bv_len;
if (bbio->bio.bi_status) {
- bitmap_set(&stripe->io_error_bitmap, sector_nr,
- bio_size >> fs_info->sectorsize_bits);
- bitmap_set(&stripe->error_bitmap, sector_nr,
- bio_size >> fs_info->sectorsize_bits);
+ scrub_bitmap_set_io_error(stripe, sector_nr,
+ bio_size >> fs_info->sectorsize_bits);
+ scrub_bitmap_set_error(stripe, sector_nr,
+ bio_size >> fs_info->sectorsize_bits);
} else {
- bitmap_clear(&stripe->io_error_bitmap, sector_nr,
- bio_size >> fs_info->sectorsize_bits);
+ scrub_bitmap_clear_io_error(stripe, sector_nr,
+ bio_size >> fs_info->sectorsize_bits);
}
bio_put(&bbio->bio);
if (atomic_dec_and_test(&stripe->pending_io))
@@ -814,27 +924,55 @@ static int calc_next_mirror(int mirror, int num_copies)
return (mirror + 1 > num_copies) ? 1 : mirror + 1;
}
+static void scrub_bio_add_sector(struct btrfs_bio *bbio, struct scrub_stripe *stripe,
+ int sector_nr)
+{
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ void *kaddr = scrub_stripe_get_kaddr(stripe, sector_nr);
+ int ret;
+
+ ret = bio_add_page(&bbio->bio, virt_to_page(kaddr), fs_info->sectorsize,
+ offset_in_page(kaddr));
+ /*
+ * Caller should ensure the bbio has enough size.
+ * And we cannot use __bio_add_page(), which doesn't do any merge.
+ *
+ * Meanwhile for scrub_submit_initial_read() we fully rely on the merge
+ * to create the minimal amount of bio vectors, for fs block size < page
+ * size cases.
+ */
+ ASSERT(ret == fs_info->sectorsize);
+}
+
+static struct btrfs_bio *alloc_scrub_bbio(struct btrfs_fs_info *fs_info,
+ unsigned int nr_vecs, blk_opf_t opf,
+ u64 logical,
+ btrfs_bio_end_io_t end_io, void *private)
+{
+ struct btrfs_bio *bbio;
+
+ bbio = btrfs_bio_alloc(nr_vecs, opf, BTRFS_I(fs_info->btree_inode),
+ logical, end_io, private);
+ bbio->is_scrub = true;
+ bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+ return bbio;
+}
+
static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
int mirror, int blocksize, bool wait)
{
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
struct btrfs_bio *bbio = NULL;
- const unsigned long old_error_bitmap = stripe->error_bitmap;
+ const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe);
int i;
- ASSERT(stripe->mirror_num >= 1);
- ASSERT(atomic_read(&stripe->pending_io) == 0);
+ ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num);
+ ASSERT(atomic_read(&stripe->pending_io) == 0,
+ "atomic_read(&stripe->pending_io)=%d", atomic_read(&stripe->pending_io));
for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
- struct page *page;
- int pgoff;
- int ret;
-
- page = scrub_stripe_get_page(stripe, i);
- pgoff = scrub_stripe_get_page_offset(stripe, i);
-
/* The current sector cannot be merged, submit the bio. */
- if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
+ if (bbio && ((i > 0 && !test_bit(i - 1, &old_error_bitmap)) ||
bbio->bio.bi_iter.bi_size >= blocksize)) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
@@ -844,15 +982,12 @@ static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
bbio = NULL;
}
- if (!bbio) {
- bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
- fs_info, scrub_repair_read_endio, stripe);
- bbio->bio.bi_iter.bi_sector = (stripe->logical +
- (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
- }
+ if (!bbio)
+ bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_READ,
+ stripe->logical + (i << fs_info->sectorsize_bits),
+ scrub_repair_read_endio, stripe);
- ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
- ASSERT(ret == fs_info->sectorsize);
+ scrub_bio_add_sector(bbio, stripe, i);
}
if (bbio) {
ASSERT(bbio->bio.bi_iter.bi_size);
@@ -864,12 +999,15 @@ static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
}
static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
- struct scrub_stripe *stripe)
+ struct scrub_stripe *stripe,
+ const struct scrub_error_records *errors)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_device *dev = NULL;
+ const unsigned long extent_bitmap = scrub_bitmap_read_has_extent(stripe);
+ const unsigned long error_bitmap = scrub_bitmap_read_error(stripe);
u64 physical = 0;
int nr_data_sectors = 0;
int nr_meta_sectors = 0;
@@ -886,14 +1024,14 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
* Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
* thus no need for dev/physical, error reporting still needs dev and physical.
*/
- if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
+ if (!bitmap_empty(&errors->init_error_bitmap, stripe->nr_sectors)) {
u64 mapped_len = fs_info->sectorsize;
struct btrfs_io_context *bioc = NULL;
int stripe_index = stripe->mirror_num - 1;
int ret;
/* For scrub, our mirror_num should always start at 1. */
- ASSERT(stripe->mirror_num >= 1);
+ ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num);
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
stripe->logical, &mapped_len, &bioc,
NULL, NULL);
@@ -909,10 +1047,10 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
}
skip:
- for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
+ for_each_set_bit(sector_nr, &extent_bitmap, stripe->nr_sectors) {
bool repaired = false;
- if (stripe->sectors[sector_nr].is_metadata) {
+ if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr)) {
nr_meta_sectors++;
} else {
nr_data_sectors++;
@@ -920,14 +1058,14 @@ skip:
nr_nodatacsum_sectors++;
}
- if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
- !test_bit(sector_nr, &stripe->error_bitmap)) {
+ if (test_bit(sector_nr, &errors->init_error_bitmap) &&
+ !test_bit(sector_nr, &error_bitmap)) {
nr_repaired_sectors++;
repaired = true;
}
/* Good sector from the beginning, nothing need to be done. */
- if (!test_bit(sector_nr, &stripe->init_error_bitmap))
+ if (!test_bit(sector_nr, &errors->init_error_bitmap))
continue;
/*
@@ -936,13 +1074,13 @@ skip:
*/
if (repaired) {
if (dev) {
- btrfs_err_rl_in_rcu(fs_info,
- "fixed up error at logical %llu on dev %s physical %llu",
+ btrfs_err_rl(fs_info,
+ "scrub: fixed up error at logical %llu on dev %s physical %llu",
stripe->logical, btrfs_dev_name(dev),
physical);
} else {
- btrfs_err_rl_in_rcu(fs_info,
- "fixed up error at logical %llu on mirror %u",
+ btrfs_err_rl(fs_info,
+ "scrub: fixed up error at logical %llu on mirror %u",
stripe->logical, stripe->mirror_num);
}
continue;
@@ -950,41 +1088,56 @@ skip:
/* The remaining are all for unrepaired. */
if (dev) {
- btrfs_err_rl_in_rcu(fs_info,
- "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
+ btrfs_err_rl(fs_info,
+"scrub: unable to fixup (regular) error at logical %llu on dev %s physical %llu",
stripe->logical, btrfs_dev_name(dev),
physical);
} else {
- btrfs_err_rl_in_rcu(fs_info,
- "unable to fixup (regular) error at logical %llu on mirror %u",
+ btrfs_err_rl(fs_info,
+ "scrub: unable to fixup (regular) error at logical %llu on mirror %u",
stripe->logical, stripe->mirror_num);
}
- if (test_bit(sector_nr, &stripe->io_error_bitmap))
+ if (scrub_bitmap_test_bit_io_error(stripe, sector_nr))
if (__ratelimit(&rs) && dev)
scrub_print_common_warning("i/o error", dev, false,
stripe->logical, physical);
- if (test_bit(sector_nr, &stripe->csum_error_bitmap))
+ if (scrub_bitmap_test_bit_csum_error(stripe, sector_nr))
if (__ratelimit(&rs) && dev)
scrub_print_common_warning("checksum error", dev, false,
stripe->logical, physical);
- if (test_bit(sector_nr, &stripe->meta_error_bitmap))
+ if (scrub_bitmap_test_bit_meta_error(stripe, sector_nr))
if (__ratelimit(&rs) && dev)
scrub_print_common_warning("header error", dev, false,
stripe->logical, physical);
+ if (scrub_bitmap_test_bit_meta_gen_error(stripe, sector_nr))
+ if (__ratelimit(&rs) && dev)
+ scrub_print_common_warning("generation error", dev, false,
+ stripe->logical, physical);
}
+ /* Update the device stats. */
+ for (int i = 0; i < errors->nr_io_errors; i++)
+ btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_READ_ERRS);
+ for (int i = 0; i < errors->nr_csum_errors; i++)
+ btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
+ /* Generation mismatch error is based on each metadata, not each block. */
+ for (int i = 0; i < errors->nr_meta_gen_errors;
+ i += (fs_info->nodesize >> fs_info->sectorsize_bits))
+ btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_GENERATION_ERRS);
+
spin_lock(&sctx->stat_lock);
sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
sctx->stat.no_csum += nr_nodatacsum_sectors;
- sctx->stat.read_errors += stripe->init_nr_io_errors;
- sctx->stat.csum_errors += stripe->init_nr_csum_errors;
- sctx->stat.verify_errors += stripe->init_nr_meta_errors;
+ sctx->stat.read_errors += errors->nr_io_errors;
+ sctx->stat.csum_errors += errors->nr_csum_errors;
+ sctx->stat.verify_errors += errors->nr_meta_errors +
+ errors->nr_meta_gen_errors;
sctx->stat.uncorrectable_errors +=
- bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
+ bitmap_weight(&error_bitmap, stripe->nr_sectors);
sctx->stat.corrected_errors += nr_repaired_sectors;
spin_unlock(&sctx->stat_lock);
}
@@ -1010,26 +1163,26 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
struct scrub_ctx *sctx = stripe->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct scrub_error_records errors = { 0 };
int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
stripe->bg->length);
unsigned long repaired;
+ unsigned long error;
int mirror;
int i;
- ASSERT(stripe->mirror_num > 0);
+ ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num);
wait_scrub_stripe_io(stripe);
- scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
+ scrub_verify_one_stripe(stripe, scrub_bitmap_read_has_extent(stripe));
/* Save the initial failed bitmap for later repair and report usage. */
- stripe->init_error_bitmap = stripe->error_bitmap;
- stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
- stripe->nr_sectors);
- stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
- stripe->nr_sectors);
- stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
- stripe->nr_sectors);
-
- if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
+ errors.init_error_bitmap = scrub_bitmap_read_error(stripe);
+ errors.nr_io_errors = scrub_bitmap_weight_io_error(stripe);
+ errors.nr_csum_errors = scrub_bitmap_weight_csum_error(stripe);
+ errors.nr_meta_errors = scrub_bitmap_weight_meta_error(stripe);
+ errors.nr_meta_gen_errors = scrub_bitmap_weight_meta_gen_error(stripe);
+
+ if (bitmap_empty(&errors.init_error_bitmap, stripe->nr_sectors))
goto out;
/*
@@ -1041,13 +1194,13 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
mirror != stripe->mirror_num;
mirror = calc_next_mirror(mirror, num_copies)) {
- const unsigned long old_error_bitmap = stripe->error_bitmap;
+ const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe);
scrub_stripe_submit_repair_read(stripe, mirror,
BTRFS_STRIPE_LEN, false);
wait_scrub_stripe_io(stripe);
scrub_verify_one_stripe(stripe, old_error_bitmap);
- if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
+ if (scrub_bitmap_empty_error(stripe))
goto out;
}
@@ -1065,21 +1218,22 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
for (i = 0, mirror = stripe->mirror_num;
i < num_copies;
i++, mirror = calc_next_mirror(mirror, num_copies)) {
- const unsigned long old_error_bitmap = stripe->error_bitmap;
+ const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe);
scrub_stripe_submit_repair_read(stripe, mirror,
fs_info->sectorsize, true);
wait_scrub_stripe_io(stripe);
scrub_verify_one_stripe(stripe, old_error_bitmap);
- if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
+ if (scrub_bitmap_empty_error(stripe))
goto out;
}
out:
+ error = scrub_bitmap_read_error(stripe);
/*
* Submit the repaired sectors. For zoned case, we cannot do repair
* in-place, but queue the bg to be relocated.
*/
- bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
+ bitmap_andnot(&repaired, &errors.init_error_bitmap, &error,
stripe->nr_sectors);
if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
if (btrfs_is_zoned(fs_info)) {
@@ -1090,7 +1244,7 @@ out:
}
}
- scrub_stripe_report_errors(sctx, stripe);
+ scrub_stripe_report_errors(sctx, stripe, &errors);
set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
wake_up(&stripe->repair_wait);
}
@@ -1110,10 +1264,10 @@ static void scrub_read_endio(struct btrfs_bio *bbio)
num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
if (bbio->bio.bi_status) {
- bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
- bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
+ scrub_bitmap_set_io_error(stripe, sector_nr, num_sectors);
+ scrub_bitmap_set_error(stripe, sector_nr, num_sectors);
} else {
- bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
+ scrub_bitmap_clear_io_error(stripe, sector_nr, num_sectors);
}
bio_put(&bbio->bio);
if (atomic_dec_and_test(&stripe->pending_io)) {
@@ -1142,6 +1296,9 @@ static void scrub_write_endio(struct btrfs_bio *bbio)
bitmap_set(&stripe->write_error_bitmap, sector_nr,
bio_size >> fs_info->sectorsize_bits);
spin_unlock_irqrestore(&stripe->write_error_lock, flags);
+ for (i = 0; i < (bio_size >> fs_info->sectorsize_bits); i++)
+ btrfs_dev_stat_inc_and_print(stripe->dev,
+ BTRFS_DEV_STAT_WRITE_ERRS);
}
bio_put(&bbio->bio);
@@ -1199,27 +1356,19 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str
int sector_nr;
for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
- struct page *page = scrub_stripe_get_page(stripe, sector_nr);
- unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
- int ret;
-
/* We should only writeback sectors covered by an extent. */
- ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
+ ASSERT(scrub_bitmap_test_bit_has_extent(stripe, sector_nr));
/* Cannot merge with previous sector, submit the current one. */
if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
bbio = NULL;
}
- if (!bbio) {
- bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
- fs_info, scrub_write_endio, stripe);
- bbio->bio.bi_iter.bi_sector = (stripe->logical +
- (sector_nr << fs_info->sectorsize_bits)) >>
- SECTOR_SHIFT;
- }
- ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
- ASSERT(ret == fs_info->sectorsize);
+ if (!bbio)
+ bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_WRITE,
+ stripe->logical + (sector_nr << fs_info->sectorsize_bits),
+ scrub_write_endio, stripe);
+ scrub_bio_add_sector(bbio, stripe, sector_nr);
}
if (bbio)
scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
@@ -1246,8 +1395,7 @@ static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *d
* Slice is divided into intervals when the IO is submitted, adjust by
* bwlimit and maximum of 64 intervals.
*/
- div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
- div = min_t(u32, 64, div);
+ div = clamp(bwlimit / (16 * 1024 * 1024), 1, 64);
/* Start new epoch, set deadline */
now = ktime_get();
@@ -1339,7 +1487,7 @@ static int compare_extent_item_range(struct btrfs_path *path,
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
- key.type == BTRFS_METADATA_ITEM_KEY);
+ key.type == BTRFS_METADATA_ITEM_KEY, "key.type=%u", key.type);
if (key.type == BTRFS_METADATA_ITEM_KEY)
len = fs_info->nodesize;
else
@@ -1380,17 +1528,17 @@ static int find_first_extent_item(struct btrfs_root *extent_root,
if (path->nodes[0])
goto search_forward;
+ key.objectid = search_start;
if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
- key.objectid = search_start;
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
return ret;
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
/*
* Key with offset -1 found, there would have to exist an extent
* item with such offset, but this is out of the valid range.
@@ -1444,7 +1592,7 @@ static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
- key.type == BTRFS_EXTENT_ITEM_KEY);
+ key.type == BTRFS_EXTENT_ITEM_KEY, "key.type=%u", key.type);
*extent_start_ret = key.objectid;
if (key.type == BTRFS_METADATA_ITEM_KEY)
*size_ret = path->nodes[0]->fs_info->nodesize;
@@ -1470,8 +1618,7 @@ static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
physical,
sctx->write_pointer);
if (ret)
- btrfs_err(fs_info,
- "zoned: failed to recover write pointer");
+ btrfs_err(fs_info, "scrub: zoned: failed to recover write pointer");
}
mutex_unlock(&sctx->wr_lock);
btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
@@ -1493,9 +1640,9 @@ static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
struct scrub_sector_verification *sector =
&stripe->sectors[nr_sector];
- set_bit(nr_sector, &stripe->extent_sector_bitmap);
+ scrub_bitmap_set_bit_has_extent(stripe, nr_sector);
if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- sector->is_metadata = true;
+ scrub_bitmap_set_bit_is_metadata(stripe, nr_sector);
sector->generation = extent_gen;
}
}
@@ -1503,15 +1650,8 @@ static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
{
- stripe->extent_sector_bitmap = 0;
- stripe->init_error_bitmap = 0;
- stripe->init_nr_io_errors = 0;
- stripe->init_nr_csum_errors = 0;
- stripe->init_nr_meta_errors = 0;
- stripe->error_bitmap = 0;
- stripe->io_error_bitmap = 0;
- stripe->csum_error_bitmap = 0;
- stripe->meta_error_bitmap = 0;
+ ASSERT(stripe->nr_sectors);
+ bitmap_zero(stripe->bitmaps, scrub_bitmap_nr_last * stripe->nr_sectors);
}
/*
@@ -1541,12 +1681,18 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
u64 extent_gen;
int ret;
+ if (unlikely(!extent_root || !csum_root)) {
+ btrfs_err(fs_info, "scrub: no valid extent or csum root found");
+ return -EUCLEAN;
+ }
memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
stripe->nr_sectors);
scrub_stripe_reset_bitmaps(stripe);
/* The range must be inside the bg. */
- ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
+ ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length,
+ "bg->start=%llu logical_start=%llu logical_end=%llu end=%llu",
+ bg->start, logical_start, logical_end, bg->start + bg->length);
ret = find_first_extent_item(extent_root, extent_path, logical_start,
logical_len);
@@ -1642,7 +1788,6 @@ static void scrub_reset_stripe(struct scrub_stripe *stripe)
stripe->state = 0;
for (int i = 0; i < stripe->nr_sectors; i++) {
- stripe->sectors[i].is_metadata = false;
stripe->sectors[i].csum = NULL;
stripe->sectors[i].generation = 0;
}
@@ -1661,24 +1806,21 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
struct btrfs_bio *bbio = NULL;
unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
+ const unsigned long has_extent = scrub_bitmap_read_has_extent(stripe);
u64 stripe_len = BTRFS_STRIPE_LEN;
int mirror = stripe->mirror_num;
int i;
atomic_inc(&stripe->pending_io);
- for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
- struct page *page = scrub_stripe_get_page(stripe, i);
- unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
-
+ for_each_set_bit(i, &has_extent, stripe->nr_sectors) {
/* We're beyond the chunk boundary, no need to read anymore. */
if (i >= nr_sectors)
break;
/* The current sector cannot be merged, submit the bio. */
if (bbio &&
- ((i > 0 &&
- !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
+ ((i > 0 && !test_bit(i - 1, &has_extent)) ||
bbio->bio.bi_iter.bi_size >= stripe_len)) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
@@ -1691,7 +1833,7 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
struct btrfs_io_context *bioc = NULL;
const u64 logical = stripe->logical +
(i << fs_info->sectorsize_bits);
- int err;
+ int ret;
io_stripe.rst_search_commit_root = true;
stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
@@ -1699,11 +1841,11 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
* For RST cases, we need to manually split the bbio to
* follow the RST boundary.
*/
- err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
&stripe_len, &bioc, &io_stripe, &mirror);
btrfs_put_bioc(bioc);
- if (err < 0) {
- if (err != -ENODATA) {
+ if (ret < 0) {
+ if (ret != -ENODATA) {
/*
* Earlier btrfs_get_raid_extent_offset()
* returned -ENODATA, which means there's
@@ -1712,18 +1854,17 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
* the extent tree, then it's a preallocated
* extent and not an error.
*/
- set_bit(i, &stripe->io_error_bitmap);
- set_bit(i, &stripe->error_bitmap);
+ scrub_bitmap_set_bit_io_error(stripe, i);
+ scrub_bitmap_set_bit_error(stripe, i);
}
continue;
}
- bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
- fs_info, scrub_read_endio, stripe);
- bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+ bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_READ,
+ logical, scrub_read_endio, stripe);
}
- __bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
+ scrub_bio_add_sector(bbio, stripe, i);
}
if (bbio) {
@@ -1744,6 +1885,7 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_bio *bbio;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
int mirror = stripe->mirror_num;
@@ -1756,20 +1898,11 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
return;
}
- bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
- scrub_read_endio, stripe);
-
- bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
+ bbio = alloc_scrub_bbio(fs_info, BTRFS_STRIPE_LEN >> min_folio_shift, REQ_OP_READ,
+ stripe->logical, scrub_read_endio, stripe);
/* Read the whole range inside the chunk boundary. */
- for (unsigned int cur = 0; cur < nr_sectors; cur++) {
- struct page *page = scrub_stripe_get_page(stripe, cur);
- unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
- int ret;
-
- ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
- /* We should have allocated enough bio vectors. */
- ASSERT(ret == fs_info->sectorsize);
- }
+ for (unsigned int cur = 0; cur < nr_sectors; cur++)
+ scrub_bio_add_sector(bbio, stripe, cur);
atomic_inc(&stripe->pending_io);
/*
@@ -1790,14 +1923,15 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
{
+ const unsigned long error = scrub_bitmap_read_error(stripe);
int i;
- for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
- if (stripe->sectors[i].is_metadata) {
+ for_each_set_bit(i, &error, stripe->nr_sectors) {
+ if (scrub_bitmap_test_bit_is_metadata(stripe, i)) {
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
btrfs_err(fs_info,
- "stripe %llu has unrepaired metadata sector at %llu",
+ "scrub: stripe %llu has unrepaired metadata sector at logical %llu",
stripe->logical,
stripe->logical + (i << fs_info->sectorsize_bits));
return true;
@@ -1861,20 +1995,23 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
* metadata, we should immediately abort.
*/
for (int i = 0; i < nr_stripes; i++) {
- if (stripe_has_metadata_error(&sctx->stripes[i])) {
+ if (unlikely(stripe_has_metadata_error(&sctx->stripes[i]))) {
ret = -EIO;
goto out;
}
}
for (int i = 0; i < nr_stripes; i++) {
unsigned long good;
+ unsigned long has_extent;
+ unsigned long error;
stripe = &sctx->stripes[i];
ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
- bitmap_andnot(&good, &stripe->extent_sector_bitmap,
- &stripe->error_bitmap, stripe->nr_sectors);
+ has_extent = scrub_bitmap_read_has_extent(stripe);
+ error = scrub_bitmap_read_error(stripe);
+ bitmap_andnot(&good, &has_extent, &error, stripe->nr_sectors);
scrub_write_sectors(sctx, stripe, good, true);
}
}
@@ -1940,37 +2077,135 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
return 0;
}
+/*
+ * Return 0 if we should not cancel the scrub.
+ * Return <0 if we need to cancel the scrub, returned value will
+ * indicate the reason:
+ * - -ECANCELED - Being explicitly canceled through ioctl.
+ * - -EINTR - Being interrupted by signal or fs/process freezing.
+ */
+static int should_cancel_scrub(const struct scrub_ctx *sctx)
+{
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+
+ if (atomic_read(&fs_info->scrub_cancel_req) ||
+ atomic_read(&sctx->cancel_req))
+ return -ECANCELED;
+
+ /*
+ * The user (e.g. fsfreeze command) or power management (PM)
+ * suspend/hibernate can freeze the fs. And PM suspend/hibernate will
+ * also freeze all user processes.
+ *
+ * A user process can only be frozen when it is in user space, thus we
+ * have to cancel the run so that the process can return to the user
+ * space.
+ *
+ * Furthermore we have to check both filesystem and process freezing,
+ * as PM can be configured to freeze the filesystems before processes.
+ *
+ * If we only check fs freezing, then suspend without fs freezing
+ * will timeout, as the process is still in kernel space.
+ *
+ * If we only check process freezing, then suspend with fs freezing
+ * will timeout, as the running scrub will prevent the fs from being frozen.
+ */
+ if (fs_info->sb->s_writers.frozen > SB_UNFROZEN ||
+ freezing(current) || signal_pending(current))
+ return -EINTR;
+ return 0;
+}
+
+static int scrub_raid56_cached_parity(struct scrub_ctx *sctx,
+ struct btrfs_device *scrub_dev,
+ struct btrfs_chunk_map *map,
+ u64 full_stripe_start,
+ unsigned long *extent_bitmap)
+{
+ DECLARE_COMPLETION_ONSTACK(io_done);
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_io_context *bioc = NULL;
+ struct btrfs_raid_bio *rbio;
+ struct bio bio;
+ const int data_stripes = nr_data_stripes(map);
+ u64 length = btrfs_stripe_nr_to_offset(data_stripes);
+ int ret;
+
+ bio_init(&bio, NULL, NULL, 0, REQ_OP_READ);
+ bio.bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
+ bio.bi_private = &io_done;
+ bio.bi_end_io = raid56_scrub_wait_endio;
+
+ btrfs_bio_counter_inc_blocked(fs_info);
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
+ &length, &bioc, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ /* For RAID56 write there must be an @bioc allocated. */
+ ASSERT(bioc);
+ rbio = raid56_parity_alloc_scrub_rbio(&bio, bioc, scrub_dev, extent_bitmap,
+ BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
+ btrfs_put_bioc(bioc);
+ if (!rbio) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Use the recovered stripes as cache to avoid read them from disk again. */
+ for (int i = 0; i < data_stripes; i++) {
+ struct scrub_stripe *stripe = &sctx->raid56_data_stripes[i];
+
+ raid56_parity_cache_data_folios(rbio, stripe->folios,
+ full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
+ }
+ raid56_parity_submit_scrub_rbio(rbio);
+ wait_for_completion_io(&io_done);
+ ret = blk_status_to_errno(bio.bi_status);
+out:
+ btrfs_bio_counter_dec(fs_info);
+ bio_uninit(&bio);
+ return ret;
+}
+
static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev,
struct btrfs_block_group *bg,
struct btrfs_chunk_map *map,
u64 full_stripe_start)
{
- DECLARE_COMPLETION_ONSTACK(io_done);
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_raid_bio *rbio;
- struct btrfs_io_context *bioc = NULL;
struct btrfs_path extent_path = { 0 };
struct btrfs_path csum_path = { 0 };
- struct bio *bio;
struct scrub_stripe *stripe;
bool all_empty = true;
const int data_stripes = nr_data_stripes(map);
unsigned long extent_bitmap = 0;
- u64 length = btrfs_stripe_nr_to_offset(data_stripes);
int ret;
ASSERT(sctx->raid56_data_stripes);
+ ret = should_cancel_scrub(sctx);
+ if (ret < 0)
+ return ret;
+
+ if (atomic_read(&fs_info->scrub_pause_req))
+ scrub_blocked_if_needed(fs_info);
+
+ spin_lock(&bg->lock);
+ if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
+ spin_unlock(&bg->lock);
+ return 0;
+ }
+ spin_unlock(&bg->lock);
+
/*
* For data stripe search, we cannot reuse the same extent/csum paths,
* as the data stripe bytenr may be smaller than previous extent. Thus
* we have to use our own extent/csum paths.
*/
- extent_path.search_commit_root = 1;
- extent_path.skip_locking = 1;
- csum_path.search_commit_root = 1;
- csum_path.skip_locking = 1;
+ extent_path.search_commit_root = true;
+ extent_path.skip_locking = true;
+ csum_path.search_commit_root = true;
+ csum_path.skip_locking = true;
for (int i = 0; i < data_stripes; i++) {
int stripe_index;
@@ -2008,7 +2243,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
/* Check if all data stripes are empty. */
for (int i = 0; i < data_stripes; i++) {
stripe = &sctx->raid56_data_stripes[i];
- if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
+ if (!scrub_bitmap_empty_has_extent(stripe)) {
all_empty = false;
break;
}
@@ -2040,65 +2275,36 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
*/
for (int i = 0; i < data_stripes; i++) {
unsigned long error;
+ unsigned long has_extent;
stripe = &sctx->raid56_data_stripes[i];
+ error = scrub_bitmap_read_error(stripe);
+ has_extent = scrub_bitmap_read_has_extent(stripe);
+
/*
* We should only check the errors where there is an extent.
* As we may hit an empty data stripe while it's missing.
*/
- bitmap_and(&error, &stripe->error_bitmap,
- &stripe->extent_sector_bitmap, stripe->nr_sectors);
- if (!bitmap_empty(&error, stripe->nr_sectors)) {
+ bitmap_and(&error, &error, &has_extent, stripe->nr_sectors);
+ if (unlikely(!bitmap_empty(&error, stripe->nr_sectors))) {
btrfs_err(fs_info,
-"unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
+"scrub: unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
full_stripe_start, i, stripe->nr_sectors,
&error);
ret = -EIO;
goto out;
}
- bitmap_or(&extent_bitmap, &extent_bitmap,
- &stripe->extent_sector_bitmap, stripe->nr_sectors);
+ bitmap_or(&extent_bitmap, &extent_bitmap, &has_extent,
+ stripe->nr_sectors);
}
/* Now we can check and regenerate the P/Q stripe. */
- bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
- bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
- bio->bi_private = &io_done;
- bio->bi_end_io = raid56_scrub_wait_endio;
-
- btrfs_bio_counter_inc_blocked(fs_info);
- ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
- &length, &bioc, NULL, NULL);
- if (ret < 0) {
- btrfs_put_bioc(bioc);
- btrfs_bio_counter_dec(fs_info);
- goto out;
- }
- rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
- BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
- btrfs_put_bioc(bioc);
- if (!rbio) {
- ret = -ENOMEM;
- btrfs_bio_counter_dec(fs_info);
- goto out;
- }
- /* Use the recovered stripes as cache to avoid read them from disk again. */
- for (int i = 0; i < data_stripes; i++) {
- stripe = &sctx->raid56_data_stripes[i];
-
- raid56_parity_cache_data_pages(rbio, stripe->pages,
- full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
- }
- raid56_parity_submit_scrub_rbio(rbio);
- wait_for_completion_io(&io_done);
- ret = blk_status_to_errno(bio->bi_status);
- bio_put(bio);
- btrfs_bio_counter_dec(fs_info);
-
+ ret = scrub_raid56_cached_parity(sctx, scrub_dev, map, full_stripe_start,
+ &extent_bitmap);
+out:
btrfs_release_path(&extent_path);
btrfs_release_path(&csum_path);
-out:
return ret;
}
@@ -2129,18 +2335,13 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
u64 found_logical = U64_MAX;
u64 cur_physical = physical + cur_logical - logical_start;
- /* Canceled? */
- if (atomic_read(&fs_info->scrub_cancel_req) ||
- atomic_read(&sctx->cancel_req)) {
- ret = -ECANCELED;
+ ret = should_cancel_scrub(sctx);
+ if (ret < 0)
break;
- }
- /* Paused? */
- if (atomic_read(&fs_info->scrub_pause_req)) {
- /* Push queued extents */
+
+ if (atomic_read(&fs_info->scrub_pause_req))
scrub_blocked_if_needed(fs_info);
- }
- /* Block group removed? */
+
spin_lock(&bg->lock);
if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
spin_unlock(&bg->lock);
@@ -2395,8 +2596,6 @@ out:
}
if (sctx->is_dev_replace && ret >= 0) {
- int ret2;
-
ret2 = sync_write_pointer_for_zoned(sctx,
chunk_logical + offset,
map->stripes[stripe_index].physical,
@@ -2471,7 +2670,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev, u64 start, u64 end)
{
struct btrfs_dev_extent *dev_extent = NULL;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->dev_root;
u64 chunk_offset;
@@ -2489,12 +2688,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
return -ENOMEM;
path->reada = READA_FORWARD;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
key.objectid = scrub_dev->devid;
- key.offset = 0ull;
key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = 0ull;
while (1) {
u64 dev_extent_len;
@@ -2673,14 +2872,14 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
ro_set = 0;
} else if (ret == -ETXTBSY) {
btrfs_warn(fs_info,
- "skipping scrub of block group %llu due to active swapfile",
+ "scrub: skipping scrub of block group %llu due to active swapfile",
cache->start);
scrub_pause_off(fs_info);
ret = 0;
goto skip_unfreeze;
} else {
- btrfs_warn(fs_info,
- "failed setting block group ro: %d", ret);
+ btrfs_warn(fs_info, "scrub: failed setting block group ro: %d",
+ ret);
btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
scrub_pause_off(fs_info);
@@ -2743,8 +2942,8 @@ skip_unfreeze:
btrfs_put_block_group(cache);
if (ret)
break;
- if (sctx->is_dev_replace &&
- atomic64_read(&dev_replace->num_write_errors) > 0) {
+ if (unlikely(sctx->is_dev_replace &&
+ atomic64_read(&dev_replace->num_write_errors) > 0)) {
ret = -EIO;
break;
}
@@ -2757,8 +2956,6 @@ skip:
btrfs_release_path(path);
}
- btrfs_free_path(path);
-
return ret;
}
@@ -2766,29 +2963,23 @@ static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
struct page *page, u64 physical, u64 generation)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct bio_vec bvec;
- struct bio bio;
struct btrfs_super_block *sb = page_address(page);
int ret;
- bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
- bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
- __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
- ret = submit_bio_wait(&bio);
- bio_uninit(&bio);
-
+ ret = bdev_rw_virt(dev->bdev, physical >> SECTOR_SHIFT, sb,
+ BTRFS_SUPER_INFO_SIZE, REQ_OP_READ);
if (ret < 0)
return ret;
ret = btrfs_check_super_csum(fs_info, sb);
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
btrfs_err_rl(fs_info,
- "super block at physical %llu devid %llu has bad csum",
+ "scrub: super block at physical %llu devid %llu has bad csum",
physical, dev->devid);
return -EIO;
}
- if (btrfs_super_generation(sb) != generation) {
+ if (unlikely(btrfs_super_generation(sb) != generation)) {
btrfs_err_rl(fs_info,
-"super block at physical %llu devid %llu has bad generation %llu expect %llu",
+"scrub: super block at physical %llu devid %llu has bad generation %llu expect %llu",
physical, dev->devid,
btrfs_super_generation(sb), generation);
return -EUCLEAN;
@@ -2904,7 +3095,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
u64 end, struct btrfs_scrub_progress *progress,
- int readonly, int is_dev_replace)
+ bool readonly, bool is_dev_replace)
{
struct btrfs_dev_lookup_args args = { .devid = devid };
struct scrub_ctx *sctx;
@@ -2913,6 +3104,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
unsigned int nofs_flag;
bool need_commit = false;
+ /* Set the basic fallback @last_physical before we got a sctx. */
+ if (progress)
+ progress->last_physical = start;
+
if (btrfs_fs_closing(fs_info))
return -EAGAIN;
@@ -2931,6 +3126,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
sctx = scrub_setup_ctx(fs_info, is_dev_replace);
if (IS_ERR(sctx))
return PTR_ERR(sctx);
+ sctx->stat.last_physical = start;
ret = scrub_workers_get(fs_info);
if (ret)
@@ -2948,16 +3144,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (!is_dev_replace && !readonly &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- btrfs_err_in_rcu(fs_info,
- "scrub on devid %llu: filesystem on %s is not writable",
+ btrfs_err(fs_info,
+ "scrub: devid %llu: filesystem on %s is not writable",
devid, btrfs_dev_name(dev));
ret = -EROFS;
goto out;
}
mutex_lock(&fs_info->scrub_lock);
- if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
- test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
+ if (unlikely(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state))) {
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EIO;
diff --git a/fs/btrfs/scrub.h b/fs/btrfs/scrub.h
index f0df597b75c7..aa68b6ebaf55 100644
--- a/fs/btrfs/scrub.h
+++ b/fs/btrfs/scrub.h
@@ -11,7 +11,7 @@ struct btrfs_scrub_progress;
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
u64 end, struct btrfs_scrub_progress *progress,
- int readonly, int is_dev_replace);
+ bool readonly, bool is_dev_replace);
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
int btrfs_scrub_cancel(struct btrfs_fs_info *info);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 7254279c3cc9..2522faa97478 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4,6 +4,7 @@
*/
#include <linux/bsearch.h>
+#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/sort.h>
@@ -16,7 +17,6 @@
#include <linux/compat.h>
#include <linux/crc32c.h>
#include <linux/fsverity.h>
-
#include "send.h"
#include "ctree.h"
#include "backref.h"
@@ -47,28 +47,30 @@
* It allows fast adding of path elements on the right side (normal path) and
* fast adding to the left side (reversed path). A reversed path can also be
* unreversed if needed.
+ *
+ * The definition of struct fs_path relies on -fms-extensions to allow
+ * including a tagged struct as an anonymous member.
*/
+struct __fs_path {
+ char *start;
+ char *end;
+
+ char *buf;
+ unsigned short buf_len:15;
+ unsigned short reversed:1;
+};
+static_assert(sizeof(struct __fs_path) < 256);
struct fs_path {
- union {
- struct {
- char *start;
- char *end;
-
- char *buf;
- unsigned short buf_len:15;
- unsigned short reversed:1;
- char inline_buf[];
- };
- /*
- * Average path length does not exceed 200 bytes, we'll have
- * better packing in the slab and higher chance to satisfy
- * an allocation later during send.
- */
- char pad[256];
- };
+ struct __fs_path;
+ /*
+ * Average path length does not exceed 200 bytes, we'll have
+ * better packing in the slab and higher chance to satisfy
+ * an allocation later during send.
+ */
+ char inline_buf[256 - sizeof(struct __fs_path)];
};
#define FS_PATH_INLINE_SIZE \
- (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
+ sizeof_field(struct fs_path, inline_buf)
/* reused for each extent */
@@ -304,6 +306,8 @@ struct send_ctx {
struct btrfs_lru_cache dir_created_cache;
struct btrfs_lru_cache dir_utimes_cache;
+
+ struct fs_path cur_inode_path;
};
struct pending_dir_move {
@@ -383,11 +387,11 @@ static void inconsistent_snapshot_error(struct send_ctx *sctx,
result_string = "updated";
break;
case BTRFS_COMPARE_TREE_SAME:
- ASSERT(0);
+ DEBUG_WARN("no change between trees");
result_string = "unchanged";
break;
default:
- ASSERT(0);
+ DEBUG_WARN("unexpected comparison result %d", result);
result_string = "unexpected";
}
@@ -425,15 +429,21 @@ static int need_send_hole(struct send_ctx *sctx)
static void fs_path_reset(struct fs_path *p)
{
- if (p->reversed) {
+ if (p->reversed)
p->start = p->buf + p->buf_len - 1;
- p->end = p->start;
- *p->start = 0;
- } else {
+ else
p->start = p->buf;
- p->end = p->start;
- *p->start = 0;
- }
+
+ p->end = p->start;
+ *p->start = 0;
+}
+
+static void init_path(struct fs_path *p)
+{
+ p->reversed = 0;
+ p->buf = p->inline_buf;
+ p->buf_len = FS_PATH_INLINE_SIZE;
+ fs_path_reset(p);
}
static struct fs_path *fs_path_alloc(void)
@@ -443,10 +453,7 @@ static struct fs_path *fs_path_alloc(void)
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return NULL;
- p->reversed = 0;
- p->buf = p->inline_buf;
- p->buf_len = FS_PATH_INLINE_SIZE;
- fs_path_reset(p);
+ init_path(p);
return p;
}
@@ -471,7 +478,7 @@ static void fs_path_free(struct fs_path *p)
kfree(p);
}
-static int fs_path_len(struct fs_path *p)
+static inline int fs_path_len(const struct fs_path *p)
{
return p->end - p->start;
}
@@ -487,12 +494,10 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
if (p->buf_len >= len)
return 0;
- if (len > PATH_MAX) {
- WARN_ON(1);
- return -ENOMEM;
- }
+ if (WARN_ON(len > PATH_MAX))
+ return -ENAMETOOLONG;
- path_len = p->end - p->start;
+ path_len = fs_path_len(p);
old_buf_len = p->buf_len;
/*
@@ -533,12 +538,12 @@ static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
int ret;
int new_len;
- new_len = p->end - p->start + name_len;
+ new_len = fs_path_len(p) + name_len;
if (p->start != p->end)
new_len++;
ret = fs_path_ensure_buf(p, new_len);
if (ret < 0)
- goto out;
+ return ret;
if (p->reversed) {
if (p->start != p->end)
@@ -553,8 +558,7 @@ static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
*p->end = 0;
}
-out:
- return ret;
+ return 0;
}
static int fs_path_add(struct fs_path *p, const char *name, int name_len)
@@ -564,25 +568,15 @@ static int fs_path_add(struct fs_path *p, const char *name, int name_len)
ret = fs_path_prepare_for_add(p, name_len, &prepared);
if (ret < 0)
- goto out;
+ return ret;
memcpy(prepared, name, name_len);
-out:
- return ret;
+ return 0;
}
-static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
+static inline int fs_path_add_path(struct fs_path *p, const struct fs_path *p2)
{
- int ret;
- char *prepared;
-
- ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
- if (ret < 0)
- goto out;
- memcpy(prepared, p2->start, p2->end - p2->start);
-
-out:
- return ret;
+ return fs_path_add(p, p2->start, fs_path_len(p2));
}
static int fs_path_add_from_extent_buffer(struct fs_path *p,
@@ -594,12 +588,11 @@ static int fs_path_add_from_extent_buffer(struct fs_path *p,
ret = fs_path_prepare_for_add(p, len, &prepared);
if (ret < 0)
- goto out;
+ return ret;
read_extent_buffer(eb, prepared, off, len);
-out:
- return ret;
+ return 0;
}
static int fs_path_copy(struct fs_path *p, struct fs_path *from)
@@ -619,13 +612,21 @@ static void fs_path_unreverse(struct fs_path *p)
return;
tmp = p->start;
- len = p->end - p->start;
+ len = fs_path_len(p);
p->start = p->buf;
p->end = p->start + len;
memmove(p->start, tmp, len + 1);
p->reversed = 0;
}
+static inline bool is_current_inode_path(const struct send_ctx *sctx,
+ const struct fs_path *path)
+{
+ const struct fs_path *cur = &sctx->cur_inode_path;
+
+ return (strncmp(path->start, cur->start, fs_path_len(cur)) == 0);
+}
+
static struct btrfs_path *alloc_path_for_send(void)
{
struct btrfs_path *path;
@@ -633,9 +634,9 @@ static struct btrfs_path *alloc_path_for_send(void)
path = btrfs_alloc_path();
if (!path)
return NULL;
- path->search_commit_root = 1;
- path->skip_locking = 1;
- path->need_commit_sem = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
+ path->need_commit_sem = true;
return path;
}
@@ -648,7 +649,7 @@ static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
ret = kernel_write(filp, buf + pos, len - pos, off);
if (ret < 0)
return ret;
- if (ret == 0)
+ if (unlikely(ret == 0))
return -EIO;
pos += ret;
}
@@ -740,7 +741,7 @@ static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
#define TLV_PUT_PATH(sctx, attrtype, p) \
do { \
ret = tlv_put_string(sctx, attrtype, p->start, \
- p->end - p->start); \
+ fs_path_len((p))); \
if (ret < 0) \
goto tlv_put_failure; \
} while(0)
@@ -761,7 +762,7 @@ static int send_header(struct send_ctx *sctx)
{
struct btrfs_stream_header hdr;
- strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
+ strscpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
hdr.version = cpu_to_le32(sctx->proto);
return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
&sctx->send_off);
@@ -819,14 +820,11 @@ static int send_cmd(struct send_ctx *sctx)
static int send_rename(struct send_ctx *sctx,
struct fs_path *from, struct fs_path *to)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
- btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
-
ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
@@ -834,7 +832,6 @@ static int send_rename(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -844,14 +841,11 @@ out:
static int send_link(struct send_ctx *sctx,
struct fs_path *path, struct fs_path *lnk)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
- btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
-
ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
@@ -859,7 +853,6 @@ static int send_link(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -868,21 +861,17 @@ out:
*/
static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
- btrfs_debug(fs_info, "send_unlink %s", path->start);
-
ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -891,21 +880,17 @@ out:
*/
static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
- btrfs_debug(fs_info, "send_rmdir %s", path->start);
-
ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -927,7 +912,7 @@ static int get_inode_info(struct btrfs_root *root, u64 ino,
struct btrfs_inode_info *info)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_inode_item *ii;
struct btrfs_key key;
@@ -942,11 +927,11 @@ static int get_inode_info(struct btrfs_root *root, u64 ino,
if (ret) {
if (ret > 0)
ret = -ENOENT;
- goto out;
+ return ret;
}
if (!info)
- goto out;
+ return 0;
ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
@@ -963,9 +948,7 @@ static int get_inode_info(struct btrfs_root *root, u64 ino,
*/
info->fileattr = btrfs_inode_flags(path->nodes[0], ii);
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static int get_inode_gen(struct btrfs_root *root, u64 ino, u64 *gen)
@@ -991,13 +974,13 @@ typedef int (*iterate_inode_ref_t)(u64 dir, struct fs_path *p, void *ctx);
* path must point to the INODE_REF or INODE_EXTREF when called.
*/
static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *found_key, int resolve,
+ struct btrfs_key *found_key, bool resolve,
iterate_inode_ref_t iterate, void *ctx)
{
struct extent_buffer *eb = path->nodes[0];
struct btrfs_inode_ref *iref;
struct btrfs_inode_extref *extref;
- struct btrfs_path *tmp_path;
+ BTRFS_PATH_AUTO_FREE(tmp_path);
struct fs_path *p;
u32 cur = 0;
u32 total;
@@ -1071,10 +1054,8 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
}
if (unlikely(start < p->buf)) {
btrfs_err(root->fs_info,
- "send: path ref buffer underflow for key (%llu %u %llu)",
- found_key->objectid,
- found_key->type,
- found_key->offset);
+ "send: path ref buffer underflow for key " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(found_key));
ret = -EINVAL;
goto out;
}
@@ -1094,7 +1075,6 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
}
out:
- btrfs_free_path(tmp_path);
fs_path_free(p);
return ret;
}
@@ -1155,12 +1135,12 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
btrfs_dir_item_key_to_cpu(eb, di, &di_key);
if (btrfs_dir_ftype(eb, di) == BTRFS_FT_XATTR) {
- if (name_len > XATTR_NAME_MAX) {
+ if (unlikely(name_len > XATTR_NAME_MAX)) {
ret = -ENAMETOOLONG;
goto out;
}
- if (name_len + data_len >
- BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
+ if (unlikely(name_len + data_len >
+ BTRFS_MAX_XATTR_SIZE(root->fs_info))) {
ret = -E2BIG;
goto out;
}
@@ -1168,7 +1148,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
/*
* Path too long
*/
- if (name_len + data_len > PATH_MAX) {
+ if (unlikely(name_len + data_len > PATH_MAX)) {
ret = -ENAMETOOLONG;
goto out;
}
@@ -1242,7 +1222,7 @@ static int get_inode_path(struct btrfs_root *root,
{
int ret;
struct btrfs_key key, found_key;
- struct btrfs_path *p;
+ BTRFS_PATH_AUTO_FREE(p);
p = alloc_path_for_send();
if (!p)
@@ -1256,28 +1236,20 @@ static int get_inode_path(struct btrfs_root *root,
ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
if (ret < 0)
- goto out;
- if (ret) {
- ret = 1;
- goto out;
- }
+ return ret;
+ if (ret)
+ return 1;
+
btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
if (found_key.objectid != ino ||
(found_key.type != BTRFS_INODE_REF_KEY &&
- found_key.type != BTRFS_INODE_EXTREF_KEY)) {
- ret = -ENOENT;
- goto out;
- }
+ found_key.type != BTRFS_INODE_EXTREF_KEY))
+ return -ENOENT;
- ret = iterate_inode_ref(root, p, &found_key, 1,
- __copy_first_ref, path);
+ ret = iterate_inode_ref(root, p, &found_key, true, __copy_first_ref, path);
if (ret < 0)
- goto out;
- ret = 0;
-
-out:
- btrfs_free_path(p);
- return ret;
+ return ret;
+ return 0;
}
struct backref_ctx {
@@ -1407,7 +1379,7 @@ static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
struct backref_ctx *bctx = ctx;
struct send_ctx *sctx = bctx->sctx;
struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
- const u64 key = leaf_bytenr >> fs_info->sectorsize_bits;
+ const u64 key = leaf_bytenr >> fs_info->nodesize_bits;
struct btrfs_lru_cache_entry *raw_entry;
struct backref_cache_entry *entry;
@@ -1462,7 +1434,7 @@ static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
if (!new_entry)
return;
- new_entry->entry.key = leaf_bytenr >> fs_info->sectorsize_bits;
+ new_entry->entry.key = leaf_bytenr >> fs_info->nodesize_bits;
new_entry->entry.gen = 0;
new_entry->num_roots = 0;
ULIST_ITER_INIT(&uiter);
@@ -1580,7 +1552,6 @@ static int find_extent_clone(struct send_ctx *sctx,
struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
int extent_type;
- u64 logical;
u64 disk_byte;
u64 num_bytes;
struct btrfs_file_extent_item *fi;
@@ -1611,7 +1582,6 @@ static int find_extent_clone(struct send_ctx *sctx,
compressed = btrfs_file_extent_compression(eb, fi);
num_bytes = btrfs_file_extent_num_bytes(eb, fi);
- logical = disk_byte + btrfs_file_extent_offset(eb, fi);
/*
* Setup the clone roots.
@@ -1693,14 +1663,8 @@ static int find_extent_clone(struct send_ctx *sctx,
}
up_read(&fs_info->commit_root_sem);
- btrfs_debug(fs_info,
- "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
- data_offset, ino, num_bytes, logical);
-
- if (!backref_ctx.found) {
- btrfs_debug(fs_info, "no clones found");
+ if (!backref_ctx.found)
return -ENOENT;
- }
cur_clone_root = NULL;
for (i = 0; i < sctx->clone_roots_cnt; i++) {
@@ -1742,7 +1706,7 @@ static int read_symlink(struct btrfs_root *root,
struct fs_path *dest)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_file_extent_item *ei;
u8 type;
@@ -1759,21 +1723,20 @@ static int read_symlink(struct btrfs_root *root,
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- if (ret) {
+ return ret;
+ if (unlikely(ret)) {
/*
* An empty symlink inode. Can happen in rare error paths when
* creating a symlink (transaction committed before the inode
* eviction handler removed the symlink inode items and a crash
- * happened in between or the subvol was snapshoted in between).
+ * happened in between or the subvol was snapshotted in between).
* Print an informative message to dmesg/syslog so that the user
* can delete the symlink.
*/
btrfs_err(root->fs_info,
"Found empty symlink inode %llu at root %llu",
ino, btrfs_root_id(root));
- ret = -EIO;
- goto out;
+ return -EIO;
}
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -1784,7 +1747,7 @@ static int read_symlink(struct btrfs_root *root,
btrfs_crit(root->fs_info,
"send: found symlink extent that is not inline, ino %llu root %llu extent type %d",
ino, btrfs_root_id(root), type);
- goto out;
+ return ret;
}
compression = btrfs_file_extent_compression(path->nodes[0], ei);
if (unlikely(compression != BTRFS_COMPRESS_NONE)) {
@@ -1792,17 +1755,13 @@ static int read_symlink(struct btrfs_root *root,
btrfs_crit(root->fs_info,
"send: found symlink extent with compression, ino %llu root %llu compression type %d",
ino, btrfs_root_id(root), compression);
- goto out;
+ return ret;
}
off = btrfs_file_extent_inline_start(ei);
len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
- ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
}
/*
@@ -1813,8 +1772,7 @@ static int gen_unique_name(struct send_ctx *sctx,
u64 ino, u64 gen,
struct fs_path *dest)
{
- int ret = 0;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *di;
char tmp[64];
int len;
@@ -1831,16 +1789,15 @@ static int gen_unique_name(struct send_ctx *sctx,
ino, gen, idx);
ASSERT(len < sizeof(tmp));
tmp_name.name = tmp;
- tmp_name.len = strlen(tmp);
+ tmp_name.len = len;
di = btrfs_lookup_dir_item(NULL, sctx->send_root,
path, BTRFS_FIRST_FREE_OBJECTID,
&tmp_name, 0);
btrfs_release_path(path);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- }
+ if (IS_ERR(di))
+ return PTR_ERR(di);
+
if (di) {
/* not unique, try again */
idx++;
@@ -1849,7 +1806,6 @@ static int gen_unique_name(struct send_ctx *sctx,
if (!sctx->parent_root) {
/* unique */
- ret = 0;
break;
}
@@ -1857,10 +1813,9 @@ static int gen_unique_name(struct send_ctx *sctx,
path, BTRFS_FIRST_FREE_OBJECTID,
&tmp_name, 0);
btrfs_release_path(path);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- }
+ if (IS_ERR(di))
+ return PTR_ERR(di);
+
if (di) {
/* not unique, try again */
idx++;
@@ -1870,11 +1825,7 @@ static int gen_unique_name(struct send_ctx *sctx,
break;
}
- ret = fs_path_add(dest, tmp, strlen(tmp));
-
-out:
- btrfs_free_path(path);
- return ret;
+ return fs_path_add(dest, tmp, len);
}
enum inode_state {
@@ -1897,7 +1848,7 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen,
ret = get_inode_info(sctx->send_root, ino, &info);
if (ret < 0 && ret != -ENOENT)
- goto out;
+ return ret;
left_ret = (info.nlink == 0) ? -ENOENT : ret;
left_gen = info.gen;
if (send_gen)
@@ -1908,7 +1859,7 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen,
} else {
ret = get_inode_info(sctx->parent_root, ino, &info);
if (ret < 0 && ret != -ENOENT)
- goto out;
+ return ret;
right_ret = (info.nlink == 0) ? -ENOENT : ret;
right_gen = info.gen;
if (parent_gen)
@@ -1953,7 +1904,6 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen,
ret = -ENOENT;
}
-out:
return ret;
}
@@ -1967,17 +1917,14 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen,
ret = get_cur_inode_state(sctx, ino, gen, send_gen, parent_gen);
if (ret < 0)
- goto out;
+ return ret;
if (ret == inode_state_no_change ||
ret == inode_state_did_create ||
ret == inode_state_will_delete)
- ret = 1;
- else
- ret = 0;
+ return 1;
-out:
- return ret;
+ return 0;
}
/*
@@ -1990,7 +1937,7 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
int ret = 0;
struct btrfs_dir_item *di;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len);
path = alloc_path_for_send();
@@ -1998,19 +1945,15 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
return -ENOMEM;
di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0);
- if (IS_ERR_OR_NULL(di)) {
- ret = di ? PTR_ERR(di) : -ENOENT;
- goto out;
- }
+ if (IS_ERR_OR_NULL(di))
+ return di ? PTR_ERR(di) : -ENOENT;
+
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
- if (key.type == BTRFS_ROOT_ITEM_KEY) {
- ret = -ENOENT;
- goto out;
- }
+ if (key.type == BTRFS_ROOT_ITEM_KEY)
+ return -ENOENT;
+
*found_inode = key.objectid;
-out:
- btrfs_free_path(path);
return ret;
}
@@ -2024,7 +1967,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
int ret;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int len;
u64 parent_dir;
@@ -2038,16 +1981,14 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
if (ret < 0)
- goto out;
+ return ret;
if (!ret)
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
if (ret || found_key.objectid != ino ||
(found_key.type != BTRFS_INODE_REF_KEY &&
- found_key.type != BTRFS_INODE_EXTREF_KEY)) {
- ret = -ENOENT;
- goto out;
- }
+ found_key.type != BTRFS_INODE_EXTREF_KEY))
+ return -ENOENT;
if (found_key.type == BTRFS_INODE_REF_KEY) {
struct btrfs_inode_ref *iref;
@@ -2068,19 +2009,17 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
}
if (ret < 0)
- goto out;
+ return ret;
btrfs_release_path(path);
if (dir_gen) {
ret = get_inode_gen(root, parent_dir, dir_gen);
if (ret < 0)
- goto out;
+ return ret;
}
*dir = parent_dir;
-out:
- btrfs_free_path(path);
return ret;
}
@@ -2326,9 +2265,8 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
*parent_gen = nce->parent_gen;
ret = fs_path_add(dest, nce->name, nce->name_len);
if (ret < 0)
- goto out;
- ret = nce->ret;
- goto out;
+ return ret;
+ return nce->ret;
}
}
@@ -2339,12 +2277,12 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
*/
ret = is_inode_existent(sctx, ino, gen, NULL, NULL);
if (ret < 0)
- goto out;
+ return ret;
if (!ret) {
ret = gen_unique_name(sctx, ino, gen, dest);
if (ret < 0)
- goto out;
+ return ret;
ret = 1;
goto out_cache;
}
@@ -2360,21 +2298,21 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
ret = get_first_ref(sctx->parent_root, ino,
parent_ino, parent_gen, dest);
if (ret < 0)
- goto out;
+ return ret;
/*
* Check if the ref was overwritten by an inode's ref that was processed
* earlier. If yes, treat as orphan and return 1.
*/
ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
- dest->start, dest->end - dest->start);
+ dest->start, fs_path_len(dest));
if (ret < 0)
- goto out;
+ return ret;
if (ret) {
fs_path_reset(dest);
ret = gen_unique_name(sctx, ino, gen, dest);
if (ret < 0)
- goto out;
+ return ret;
ret = 1;
}
@@ -2383,10 +2321,8 @@ out_cache:
* Store the result of the lookup in the name cache.
*/
nce = kmalloc(sizeof(*nce) + fs_path_len(dest), GFP_KERNEL);
- if (!nce) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!nce)
+ return -ENOMEM;
nce->entry.key = ino;
nce->entry.gen = gen;
@@ -2404,10 +2340,9 @@ out_cache:
nce_ret = btrfs_lru_cache_store(&sctx->name_cache, &nce->entry, GFP_KERNEL);
if (nce_ret < 0) {
kfree(nce);
- ret = nce_ret;
+ return nce_ret;
}
-out:
return ret;
}
@@ -2444,6 +2379,14 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
u64 parent_inode = 0;
u64 parent_gen = 0;
int stop = 0;
+ const bool is_cur_inode = (ino == sctx->cur_ino && gen == sctx->cur_inode_gen);
+
+ if (is_cur_inode && fs_path_len(&sctx->cur_inode_path) > 0) {
+ if (dest != &sctx->cur_inode_path)
+ return fs_path_copy(dest, &sctx->cur_inode_path);
+
+ return 0;
+ }
name = fs_path_alloc();
if (!name) {
@@ -2495,8 +2438,12 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
out:
fs_path_free(name);
- if (!ret)
+ if (!ret) {
fs_path_unreverse(dest);
+ if (is_cur_inode && dest != &sctx->cur_inode_path)
+ ret = fs_path_copy(&sctx->cur_inode_path, dest);
+ }
+
return ret;
}
@@ -2508,11 +2455,11 @@ static int send_subvol_begin(struct send_ctx *sctx)
int ret;
struct btrfs_root *send_root = sctx->send_root;
struct btrfs_root *parent_root = sctx->parent_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
- char *name = NULL;
+ char AUTO_KFREE(name);
int namelen;
path = btrfs_alloc_path();
@@ -2520,10 +2467,8 @@ static int send_subvol_begin(struct send_ctx *sctx)
return -ENOMEM;
name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
- if (!name) {
- btrfs_free_path(path);
+ if (!name)
return -ENOMEM;
- }
key.objectid = btrfs_root_id(send_root);
key.type = BTRFS_ROOT_BACKREF_KEY;
@@ -2532,18 +2477,15 @@ static int send_subvol_begin(struct send_ctx *sctx)
ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
&key, path, 1, 0);
if (ret < 0)
- goto out;
- if (ret) {
- ret = -ENOENT;
- goto out;
- }
+ return ret;
+ if (ret)
+ return -ENOENT;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type != BTRFS_ROOT_BACKREF_KEY ||
key.objectid != btrfs_root_id(send_root)) {
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
namelen = btrfs_root_ref_name_len(leaf, ref);
@@ -2553,11 +2495,11 @@ static int send_subvol_begin(struct send_ctx *sctx)
if (parent_root) {
ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
if (ret < 0)
- goto out;
+ return ret;
} else {
ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
if (ret < 0)
- goto out;
+ return ret;
}
TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
@@ -2585,31 +2527,63 @@ static int send_subvol_begin(struct send_ctx *sctx)
ret = send_cmd(sctx);
tlv_put_failure:
-out:
- btrfs_free_path(path);
- kfree(name);
return ret;
}
+static struct fs_path *get_cur_inode_path(struct send_ctx *sctx)
+{
+ if (fs_path_len(&sctx->cur_inode_path) == 0) {
+ int ret;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ &sctx->cur_inode_path);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ return &sctx->cur_inode_path;
+}
+
+static struct fs_path *get_path_for_command(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ struct fs_path *path;
+ int ret;
+
+ if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen)
+ return get_cur_inode_path(sctx);
+
+ path = fs_path_alloc();
+ if (!path)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_cur_path(sctx, ino, gen, path);
+ if (ret < 0) {
+ fs_path_free(path);
+ return ERR_PTR(ret);
+ }
+
+ return path;
+}
+
+static void free_path_for_command(const struct send_ctx *sctx, struct fs_path *path)
+{
+ if (path != &sctx->cur_inode_path)
+ fs_path_free(path);
+}
+
static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
- btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
- if (ret < 0)
- goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
@@ -2617,29 +2591,23 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
tlv_put_failure:
out:
- fs_path_free(p);
+ free_path_for_command(sctx, p);
return ret;
}
static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
- btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
- if (ret < 0)
- goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
@@ -2647,32 +2615,26 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
tlv_put_failure:
out:
- fs_path_free(p);
+ free_path_for_command(sctx, p);
return ret;
}
static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
if (sctx->proto < 2)
return 0;
- btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr);
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR);
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
- if (ret < 0)
- goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr);
@@ -2680,30 +2642,23 @@ static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
tlv_put_failure:
out:
- fs_path_free(p);
+ free_path_for_command(sctx, p);
return ret;
}
static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
- btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
- ino, uid, gid);
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
- if (ret < 0)
- goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
@@ -2712,26 +2667,23 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
tlv_put_failure:
out:
- fs_path_free(p);
+ free_path_for_command(sctx, p);
return ret;
}
static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p = NULL;
struct btrfs_inode_item *ii;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *eb;
struct btrfs_key key;
int slot;
- btrfs_debug(fs_info, "send_utimes %llu", ino);
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
path = alloc_path_for_send();
if (!path) {
@@ -2756,9 +2708,6 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
- if (ret < 0)
- goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
@@ -2770,8 +2719,7 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
tlv_put_failure:
out:
- fs_path_free(p);
- btrfs_free_path(path);
+ free_path_for_command(sctx, p);
return ret;
}
@@ -2781,7 +2729,7 @@ out:
* processing an inode that is a directory and it just got renamed, and existing
* entries in the cache may refer to inodes that have the directory in their
* full path - in which case we would generate outdated paths (pre-rename)
- * for the inodes that the cache entries point to. Instead of prunning the
+ * for the inodes that the cache entries point to. Instead of pruning the
* cache when inserting, do it after we finish processing each inode at
* finish_inode_if_needed().
*/
@@ -2838,7 +2786,6 @@ static int trim_dir_utimes_cache(struct send_ctx *sctx)
*/
static int send_create_inode(struct send_ctx *sctx, u64 ino)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
int cmd;
@@ -2847,8 +2794,6 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino)
u64 mode;
u64 rdev;
- btrfs_debug(fs_info, "send_create_inode %llu", ino);
-
p = fs_path_alloc();
if (!p)
return -ENOMEM;
@@ -2945,7 +2890,7 @@ static int did_create_dir(struct send_ctx *sctx, u64 dir)
{
int ret = 0;
int iter_ret = 0;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_key di_key;
@@ -2985,7 +2930,6 @@ static int did_create_dir(struct send_ctx *sctx, u64 dir)
if (iter_ret < 0)
ret = iter_ret;
- btrfs_free_path(path);
return ret;
}
@@ -3075,7 +3019,7 @@ static void __free_recorded_refs(struct list_head *head)
struct recorded_ref *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct recorded_ref, list);
+ cur = list_first_entry(head, struct recorded_ref, list);
recorded_ref_free(cur);
}
}
@@ -3106,6 +3050,11 @@ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
goto out;
ret = send_rename(sctx, path, orphan);
+ if (ret < 0)
+ goto out;
+
+ if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen)
+ ret = fs_path_copy(&sctx->cur_inode_path, orphan);
out:
fs_path_free(orphan);
@@ -3760,7 +3709,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
struct recorded_ref *parent_ref,
const bool is_orphan)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key di_key;
struct btrfs_dir_item *di;
@@ -3781,19 +3730,15 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = 0;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
di = btrfs_match_dir_item_name(path, parent_ref->name,
parent_ref->name_len);
- if (!di) {
- ret = 0;
- goto out;
- }
+ if (!di)
+ return 0;
/*
* di_key.objectid has the number of the inode that has a dentry in the
* parent directory with the same name that sctx->cur_ino is being
@@ -3803,26 +3748,22 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
* that it happens after that other inode is renamed.
*/
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
- if (di_key.type != BTRFS_INODE_ITEM_KEY) {
- ret = 0;
- goto out;
- }
+ if (di_key.type != BTRFS_INODE_ITEM_KEY)
+ return 0;
ret = get_inode_gen(sctx->parent_root, di_key.objectid, &left_gen);
if (ret < 0)
- goto out;
+ return ret;
ret = get_inode_gen(sctx->send_root, di_key.objectid, &right_gen);
if (ret < 0) {
if (ret == -ENOENT)
ret = 0;
- goto out;
+ return ret;
}
/* Different inode, no need to delay the rename of sctx->cur_ino */
- if (right_gen != left_gen) {
- ret = 0;
- goto out;
- }
+ if (right_gen != left_gen)
+ return 0;
wdm = get_waiting_dir_move(sctx, di_key.objectid);
if (wdm && !wdm->orphanized) {
@@ -3836,8 +3777,6 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
if (!ret)
ret = 1;
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -3887,7 +3826,7 @@ static int is_ancestor(struct btrfs_root *root,
bool free_fs_path = false;
int ret = 0;
int iter_ret = 0;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
if (!fs_path) {
@@ -3955,7 +3894,6 @@ static int is_ancestor(struct btrfs_root *root,
ret = iter_ret;
out:
- btrfs_free_path(path);
if (free_fs_path)
fs_path_free(fs_path);
return ret;
@@ -4135,7 +4073,7 @@ static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
*/
static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
{
- char *name;
+ char AUTO_KFREE(name);
int ret;
name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
@@ -4145,17 +4083,75 @@ static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
fs_path_reset(ref->full_path);
ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
if (ret < 0)
- goto out;
+ return ret;
ret = fs_path_add(ref->full_path, name, ref->name_len);
if (ret < 0)
- goto out;
+ return ret;
/* Update the reference's base name pointer. */
set_ref_path(ref, ref->full_path);
-out:
- kfree(name);
- return ret;
+
+ return 0;
+}
+
+static int rbtree_check_dir_ref_comp(const void *k, const struct rb_node *node)
+{
+ const struct recorded_ref *data = k;
+ const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
+
+ if (data->dir > ref->dir)
+ return 1;
+ if (data->dir < ref->dir)
+ return -1;
+ if (data->dir_gen > ref->dir_gen)
+ return 1;
+ if (data->dir_gen < ref->dir_gen)
+ return -1;
+ return 0;
+}
+
+static bool rbtree_check_dir_ref_less(struct rb_node *node, const struct rb_node *parent)
+{
+ const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
+
+ return rbtree_check_dir_ref_comp(entry, parent) < 0;
+}
+
+static int record_check_dir_ref_in_tree(struct rb_root *root,
+ struct recorded_ref *ref, struct list_head *list)
+{
+ struct recorded_ref *tmp_ref;
+ int ret;
+
+ if (rb_find(ref, root, rbtree_check_dir_ref_comp))
+ return 0;
+
+ ret = dup_ref(ref, list);
+ if (ret < 0)
+ return ret;
+
+ tmp_ref = list_last_entry(list, struct recorded_ref, list);
+ rb_add(&tmp_ref->node, root, rbtree_check_dir_ref_less);
+ tmp_ref->root = root;
+ return 0;
+}
+
+static int rename_current_inode(struct send_ctx *sctx,
+ struct fs_path *current_path,
+ struct fs_path *new_path)
+{
+ int ret;
+
+ ret = send_rename(sctx, current_path, new_path);
+ if (ret < 0)
+ return ret;
+
+ ret = fs_path_copy(&sctx->cur_inode_path, new_path);
+ if (ret < 0)
+ return ret;
+
+ return fs_path_copy(current_path, new_path);
}
/*
@@ -4168,19 +4164,17 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
struct recorded_ref *cur;
struct recorded_ref *cur2;
LIST_HEAD(check_dirs);
+ struct rb_root rbtree_check_dirs = RB_ROOT;
struct fs_path *valid_path = NULL;
u64 ow_inode = 0;
u64 ow_gen;
u64 ow_mode;
- int did_overwrite = 0;
- int is_orphan = 0;
- u64 last_dir_ino_rm = 0;
+ bool did_overwrite = false;
+ bool is_orphan = false;
bool can_rename = true;
bool orphanized_dir = false;
bool orphanized_ancestor = false;
- btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
-
/*
* This should never happen as the root dir always has the same ref
* which is always '..'
@@ -4216,14 +4210,14 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (ret < 0)
goto out;
if (ret)
- did_overwrite = 1;
+ did_overwrite = true;
}
if (sctx->cur_inode_new || did_overwrite) {
ret = gen_unique_name(sctx, sctx->cur_ino,
sctx->cur_inode_gen, valid_path);
if (ret < 0)
goto out;
- is_orphan = 1;
+ is_orphan = true;
} else {
ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
valid_path);
@@ -4348,6 +4342,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (ret > 0) {
orphanized_ancestor = true;
fs_path_reset(valid_path);
+ fs_path_reset(&sctx->cur_inode_path);
ret = get_cur_path(sctx, sctx->cur_ino,
sctx->cur_inode_gen,
valid_path);
@@ -4443,13 +4438,10 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* it depending on the inode mode.
*/
if (is_orphan && can_rename) {
- ret = send_rename(sctx, valid_path, cur->full_path);
- if (ret < 0)
- goto out;
- is_orphan = 0;
- ret = fs_path_copy(valid_path, cur->full_path);
+ ret = rename_current_inode(sctx, valid_path, cur->full_path);
if (ret < 0)
goto out;
+ is_orphan = false;
} else if (can_rename) {
if (S_ISDIR(sctx->cur_inode_mode)) {
/*
@@ -4457,10 +4449,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* dirs, we always have one new and one deleted
* ref. The deleted ref is ignored later.
*/
- ret = send_rename(sctx, valid_path,
- cur->full_path);
- if (!ret)
- ret = fs_path_copy(valid_path,
+ ret = rename_current_inode(sctx, valid_path,
cur->full_path);
if (ret < 0)
goto out;
@@ -4483,7 +4472,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
goto out;
}
}
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4507,11 +4496,11 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
sctx->cur_inode_gen, valid_path);
if (ret < 0)
goto out;
- is_orphan = 1;
+ is_orphan = true;
}
list_for_each_entry(cur, &sctx->deleted_refs, list) {
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4520,9 +4509,8 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
/*
* We have a moved dir. Add the old parent to check_dirs
*/
- cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
- list);
- ret = dup_ref(cur, &check_dirs);
+ cur = list_first_entry(&sctx->deleted_refs, struct recorded_ref, list);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
} else if (!S_ISDIR(sctx->cur_inode_mode)) {
@@ -4553,8 +4541,10 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = send_unlink(sctx, cur->full_path);
if (ret < 0)
goto out;
+ if (is_current_inode_path(sctx, cur->full_path))
+ fs_path_reset(&sctx->cur_inode_path);
}
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4597,8 +4587,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
- } else if (ret == inode_state_did_delete &&
- cur->dir != last_dir_ino_rm) {
+ } else if (ret == inode_state_did_delete) {
ret = can_rmdir(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
@@ -4610,7 +4599,6 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = send_rmdir(sctx, valid_path);
if (ret < 0)
goto out;
- last_dir_ino_rm = cur->dir;
}
}
}
@@ -4628,7 +4616,6 @@ static int rbtree_ref_comp(const void *k, const struct rb_node *node)
{
const struct recorded_ref *data = k;
const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
- int result;
if (data->dir > ref->dir)
return 1;
@@ -4642,12 +4629,7 @@ static int rbtree_ref_comp(const void *k, const struct rb_node *node)
return 1;
if (data->name_len < ref->name_len)
return -1;
- result = strcmp(data->name, ref->name);
- if (result > 0)
- return 1;
- if (result < 0)
- return -1;
- return 0;
+ return strcmp(data->name, ref->name);
}
static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent)
@@ -4701,7 +4683,7 @@ out:
static int record_new_ref_if_needed(u64 dir, struct fs_path *name, void *ctx)
{
- int ret = 0;
+ int ret;
struct send_ctx *sctx = ctx;
struct rb_node *node = NULL;
struct recorded_ref data;
@@ -4710,7 +4692,7 @@ static int record_new_ref_if_needed(u64 dir, struct fs_path *name, void *ctx)
ret = get_inode_gen(sctx->send_root, dir, &dir_gen);
if (ret < 0)
- goto out;
+ return ret;
data.dir = dir;
data.dir_gen = dir_gen;
@@ -4724,13 +4706,13 @@ static int record_new_ref_if_needed(u64 dir, struct fs_path *name, void *ctx)
&sctx->new_refs, name, dir, dir_gen,
sctx);
}
-out:
+
return ret;
}
static int record_deleted_ref_if_needed(u64 dir, struct fs_path *name, void *ctx)
{
- int ret = 0;
+ int ret;
struct send_ctx *sctx = ctx;
struct rb_node *node = NULL;
struct recorded_ref data;
@@ -4739,7 +4721,7 @@ static int record_deleted_ref_if_needed(u64 dir, struct fs_path *name, void *ctx
ret = get_inode_gen(sctx->parent_root, dir, &dir_gen);
if (ret < 0)
- goto out;
+ return ret;
data.dir = dir;
data.dir_gen = dir_gen;
@@ -4753,7 +4735,7 @@ static int record_deleted_ref_if_needed(u64 dir, struct fs_path *name, void *ctx
&sctx->deleted_refs, name, dir,
dir_gen, sctx);
}
-out:
+
return ret;
}
@@ -4761,47 +4743,40 @@ static int record_new_ref(struct send_ctx *sctx)
{
int ret;
- ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
- sctx->cmp_key, 0, record_new_ref_if_needed, sctx);
+ ret = iterate_inode_ref(sctx->send_root, sctx->left_path, sctx->cmp_key,
+ false, record_new_ref_if_needed, sctx);
if (ret < 0)
- goto out;
- ret = 0;
+ return ret;
-out:
- return ret;
+ return 0;
}
static int record_deleted_ref(struct send_ctx *sctx)
{
int ret;
- ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
- sctx->cmp_key, 0, record_deleted_ref_if_needed,
- sctx);
+ ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, sctx->cmp_key,
+ false, record_deleted_ref_if_needed, sctx);
if (ret < 0)
- goto out;
- ret = 0;
+ return ret;
-out:
- return ret;
+ return 0;
}
static int record_changed_ref(struct send_ctx *sctx)
{
- int ret = 0;
+ int ret;
- ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
- sctx->cmp_key, 0, record_new_ref_if_needed, sctx);
+ ret = iterate_inode_ref(sctx->send_root, sctx->left_path, sctx->cmp_key,
+ false, record_new_ref_if_needed, sctx);
if (ret < 0)
- goto out;
- ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
- sctx->cmp_key, 0, record_deleted_ref_if_needed, sctx);
+ return ret;
+ ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, sctx->cmp_key,
+ false, record_deleted_ref_if_needed, sctx);
if (ret < 0)
- goto out;
- ret = 0;
+ return ret;
-out:
- return ret;
+ return 0;
}
/*
@@ -4814,7 +4789,7 @@ static int process_all_refs(struct send_ctx *sctx,
int ret = 0;
int iter_ret = 0;
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
iterate_inode_ref_t cb;
@@ -4833,8 +4808,7 @@ static int process_all_refs(struct send_ctx *sctx,
} else {
btrfs_err(sctx->send_root->fs_info,
"Wrong command %d in process_all_refs", cmd);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
key.objectid = sctx->cmp_key->objectid;
@@ -4846,15 +4820,14 @@ static int process_all_refs(struct send_ctx *sctx,
found_key.type != BTRFS_INODE_EXTREF_KEY))
break;
- ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
+ ret = iterate_inode_ref(root, path, &found_key, false, cb, sctx);
if (ret < 0)
- goto out;
+ return ret;
}
/* Catch error found during iteration */
- if (iter_ret < 0) {
- ret = iter_ret;
- goto out;
- }
+ if (iter_ret < 0)
+ return iter_ret;
+
btrfs_release_path(path);
/*
@@ -4862,22 +4835,23 @@ static int process_all_refs(struct send_ctx *sctx,
* re-creating this inode and will be rename'ing it into place once we
* rename the parent directory.
*/
- ret = process_recorded_refs(sctx, &pending_move);
-out:
- btrfs_free_path(path);
- return ret;
+ return process_recorded_refs(sctx, &pending_move);
}
static int send_set_xattr(struct send_ctx *sctx,
- struct fs_path *path,
const char *name, int name_len,
const char *data, int data_len)
{
- int ret = 0;
+ struct fs_path *path;
+ int ret;
+
+ path = get_cur_inode_path(sctx);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
@@ -4886,7 +4860,6 @@ static int send_set_xattr(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -4894,11 +4867,11 @@ static int send_remove_xattr(struct send_ctx *sctx,
struct fs_path *path,
const char *name, int name_len)
{
- int ret = 0;
+ int ret;
ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
@@ -4906,7 +4879,6 @@ static int send_remove_xattr(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -4914,19 +4886,13 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
const char *name, int name_len, const char *data,
int data_len, void *ctx)
{
- int ret;
struct send_ctx *sctx = ctx;
- struct fs_path *p;
struct posix_acl_xattr_header dummy_acl;
/* Capabilities are emitted by finish_inode_if_needed */
if (!strncmp(name, XATTR_NAME_CAPS, name_len))
return 0;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
-
/*
* This hack is needed because empty acls are stored as zero byte
* data in xattrs. Problem with that is, that receiving these zero byte
@@ -4943,48 +4909,27 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
}
}
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto out;
-
- ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
-
-out:
- fs_path_free(p);
- return ret;
+ return send_set_xattr(sctx, name, name_len, data, data_len);
}
static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
const char *name, int name_len,
const char *data, int data_len, void *ctx)
{
- int ret;
struct send_ctx *sctx = ctx;
struct fs_path *p;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto out;
-
- ret = send_remove_xattr(sctx, p, name, name_len);
-
-out:
- fs_path_free(p);
- return ret;
+ return send_remove_xattr(sctx, p, name, name_len);
}
static int process_new_xattr(struct send_ctx *sctx)
{
- int ret = 0;
-
- ret = iterate_dir_item(sctx->send_root, sctx->left_path,
- __process_new_xattr, sctx);
-
- return ret;
+ return iterate_dir_item(sctx->send_root, sctx->left_path,
+ __process_new_xattr, sctx);
}
static int process_deleted_xattr(struct send_ctx *sctx)
@@ -4999,6 +4944,7 @@ struct find_xattr_ctx {
int found_idx;
char *found_data;
int found_data_len;
+ bool copy_data;
};
static int __find_xattr(int num, struct btrfs_key *di_key, const char *name,
@@ -5010,9 +4956,11 @@ static int __find_xattr(int num, struct btrfs_key *di_key, const char *name,
strncmp(name, ctx->name, name_len) == 0) {
ctx->found_idx = num;
ctx->found_data_len = data_len;
- ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
- if (!ctx->found_data)
- return -ENOMEM;
+ if (ctx->copy_data) {
+ ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
+ if (!ctx->found_data)
+ return -ENOMEM;
+ }
return 1;
}
return 0;
@@ -5032,6 +4980,7 @@ static int find_xattr(struct btrfs_root *root,
ctx.found_idx = -1;
ctx.found_data = NULL;
ctx.found_data_len = 0;
+ ctx.copy_data = (data != NULL);
ret = iterate_dir_item(root, path, __find_xattr, &ctx);
if (ret < 0)
@@ -5043,7 +4992,7 @@ static int find_xattr(struct btrfs_root *root,
*data = ctx.found_data;
*data_len = ctx.found_data_len;
} else {
- kfree(ctx.found_data);
+ ASSERT(ctx.found_data == NULL);
}
return ctx.found_idx;
}
@@ -5056,8 +5005,8 @@ static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
{
int ret;
struct send_ctx *sctx = ctx;
- char *found_data = NULL;
- int found_data_len = 0;
+ char AUTO_KFREE(found_data);
+ int found_data_len = 0;
ret = find_xattr(sctx->parent_root, sctx->right_path,
sctx->cmp_key, name, name_len, &found_data,
@@ -5075,7 +5024,6 @@ static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
}
}
- kfree(found_data);
return ret;
}
@@ -5100,17 +5048,15 @@ static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
static int process_changed_xattr(struct send_ctx *sctx)
{
- int ret = 0;
+ int ret;
ret = iterate_dir_item(sctx->send_root, sctx->left_path,
__process_changed_new_xattr, sctx);
if (ret < 0)
- goto out;
- ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
- __process_changed_deleted_xattr, sctx);
+ return ret;
-out:
- return ret;
+ return iterate_dir_item(sctx->parent_root, sctx->right_path,
+ __process_changed_deleted_xattr, sctx);
}
static int process_all_new_xattrs(struct send_ctx *sctx)
@@ -5118,7 +5064,7 @@ static int process_all_new_xattrs(struct send_ctx *sctx)
int ret = 0;
int iter_ret = 0;
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
@@ -5146,7 +5092,6 @@ static int process_all_new_xattrs(struct send_ctx *sctx)
if (iter_ret < 0)
ret = iter_ret;
- btrfs_free_path(path);
return ret;
}
@@ -5157,7 +5102,7 @@ static int send_verity(struct send_ctx *sctx, struct fs_path *path,
ret = begin_cmd(sctx, BTRFS_SEND_C_ENABLE_VERITY);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
TLV_PUT_U8(sctx, BTRFS_SEND_A_VERITY_ALGORITHM,
@@ -5172,25 +5117,24 @@ static int send_verity(struct send_ctx *sctx, struct fs_path *path,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
static int process_verity(struct send_ctx *sctx)
{
int ret = 0;
- struct inode *inode;
+ struct btrfs_inode *inode;
struct fs_path *p;
inode = btrfs_iget(sctx->cur_ino, sctx->send_root);
if (IS_ERR(inode))
return PTR_ERR(inode);
- ret = btrfs_get_verity_descriptor(inode, NULL, 0);
+ ret = btrfs_get_verity_descriptor(&inode->vfs_inode, NULL, 0);
if (ret < 0)
goto iput;
- if (ret > FS_VERITY_MAX_DESCRIPTOR_SIZE) {
+ if (unlikely(ret > FS_VERITY_MAX_DESCRIPTOR_SIZE)) {
ret = -EMSGSIZE;
goto iput;
}
@@ -5203,27 +5147,19 @@ static int process_verity(struct send_ctx *sctx)
}
}
- ret = btrfs_get_verity_descriptor(inode, sctx->verity_descriptor, ret);
+ ret = btrfs_get_verity_descriptor(&inode->vfs_inode, sctx->verity_descriptor, ret);
if (ret < 0)
goto iput;
- p = fs_path_alloc();
- if (!p) {
- ret = -ENOMEM;
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
goto iput;
}
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto free_path;
ret = send_verity(sctx, p, sctx->verity_descriptor);
- if (ret < 0)
- goto free_path;
-
-free_path:
- fs_path_free(p);
iput:
- iput(inode);
+ iput(&inode->vfs_inode);
return ret;
}
@@ -5242,14 +5178,14 @@ static int put_data_header(struct send_ctx *sctx, u32 len)
* Since v2, the data attribute header doesn't include a length,
* it is implicitly to the end of the command.
*/
- if (sctx->send_max_size - sctx->send_size < sizeof(__le16) + len)
+ if (unlikely(sctx->send_max_size - sctx->send_size < sizeof(__le16) + len))
return -EOVERFLOW;
put_unaligned_le16(BTRFS_SEND_A_DATA, sctx->send_buf + sctx->send_size);
sctx->send_size += sizeof(__le16);
} else {
struct btrfs_tlv_header *hdr;
- if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
+ if (unlikely(sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len))
return -EOVERFLOW;
hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
@@ -5263,10 +5199,9 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
{
struct btrfs_root *root = sctx->send_root;
struct btrfs_fs_info *fs_info = root->fs_info;
- struct folio *folio;
- pgoff_t index = offset >> PAGE_SHIFT;
- pgoff_t last_index;
- unsigned pg_offset = offset_in_page(offset);
+ u64 cur = offset;
+ const u64 end = offset + len;
+ const pgoff_t last_index = ((end - 1) >> PAGE_SHIFT);
struct address_space *mapping = sctx->cur_inode->i_mapping;
int ret;
@@ -5274,11 +5209,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
if (ret)
return ret;
- last_index = (offset + len - 1) >> PAGE_SHIFT;
-
- while (index <= last_index) {
- unsigned cur_len = min_t(unsigned, len,
- PAGE_SIZE - pg_offset);
+ while (cur < end) {
+ pgoff_t index = (cur >> PAGE_SHIFT);
+ unsigned int cur_len;
+ unsigned int pg_offset;
+ struct folio *folio;
folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
@@ -5292,8 +5227,8 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
break;
}
}
-
- WARN_ON(folio_order(folio));
+ pg_offset = offset_in_folio(folio, cur);
+ cur_len = min_t(unsigned int, end - cur, folio_size(folio) - pg_offset);
if (folio_test_readahead(folio))
page_cache_async_readahead(mapping, &sctx->ra, NULL, folio,
@@ -5302,7 +5237,7 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
if (!folio_test_uptodate(folio)) {
btrfs_read_folio(NULL, folio);
folio_lock(folio);
- if (!folio_test_uptodate(folio)) {
+ if (unlikely(!folio_test_uptodate(folio))) {
folio_unlock(folio);
btrfs_err(fs_info,
"send: IO error at offset %llu for inode %llu root %llu",
@@ -5312,15 +5247,18 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
ret = -EIO;
break;
}
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ continue;
+ }
}
memcpy_from_folio(sctx->send_buf + sctx->send_size, folio,
pg_offset, cur_len);
folio_unlock(folio);
folio_put(folio);
- index++;
- pg_offset = 0;
- len -= cur_len;
+ cur += cur_len;
sctx->send_size += cur_len;
}
@@ -5333,35 +5271,26 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
*/
static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
-
- btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
if (ret < 0)
- goto out;
-
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
ret = put_file_data(sctx, offset, len);
if (ret < 0)
- goto out;
+ return ret;
ret = send_cmd(sctx);
tlv_put_failure:
-out:
- fs_path_free(p);
return ret;
}
@@ -5374,12 +5303,12 @@ static int send_clone(struct send_ctx *sctx,
{
int ret = 0;
struct fs_path *p;
+ struct fs_path *cur_inode_path;
u64 gen;
- btrfs_debug(sctx->send_root->fs_info,
- "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
- offset, len, btrfs_root_id(clone_root->root),
- clone_root->ino, clone_root->offset);
+ cur_inode_path = get_cur_inode_path(sctx);
+ if (IS_ERR(cur_inode_path))
+ return PTR_ERR(cur_inode_path);
p = fs_path_alloc();
if (!p)
@@ -5389,13 +5318,9 @@ static int send_clone(struct send_ctx *sctx,
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto out;
-
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
- TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, cur_inode_path);
if (clone_root->root == sctx->send_root) {
ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen);
@@ -5446,27 +5371,45 @@ static int send_update_extent(struct send_ctx *sctx,
int ret = 0;
struct fs_path *p;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
if (ret < 0)
- goto out;
+ return ret;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+ return ret;
+}
+
+static int send_fallocate(struct send_ctx *sctx, u32 mode, u64 offset, u64 len)
+{
+ struct fs_path *path;
+ int ret;
+
+ path = get_cur_inode_path(sctx);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_FALLOCATE);
if (ret < 0)
- goto out;
+ return ret;
- TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_U32(sctx, BTRFS_SEND_A_FALLOCATE_MODE, mode);
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
ret = send_cmd(sctx);
tlv_put_failure:
-out:
- fs_path_free(p);
return ret;
}
@@ -5478,6 +5421,14 @@ static int send_hole(struct send_ctx *sctx, u64 end)
int ret = 0;
/*
+ * Starting with send stream v2 we have fallocate and can use it to
+ * punch holes instead of sending writes full of zeroes.
+ */
+ if (proto_cmd_ok(sctx, BTRFS_SEND_C_FALLOCATE))
+ return send_fallocate(sctx, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ offset, end - offset);
+
+ /*
* A hole that starts at EOF or beyond it. Since we do not yet support
* fallocate (for extent preallocation and hole punching), sending a
* write of zeroes starting at EOF or beyond would later require issuing
@@ -5495,12 +5446,10 @@ static int send_hole(struct send_ctx *sctx, u64 end)
if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
return send_update_extent(sctx, offset, end - offset);
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto tlv_put_failure;
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
while (offset < end) {
u64 len = min(end - offset, read_size);
@@ -5521,7 +5470,6 @@ static int send_hole(struct send_ctx *sctx, u64 end)
}
sctx->cur_inode_next_write_offset = offset;
tlv_put_failure:
- fs_path_free(p);
return ret;
}
@@ -5529,9 +5477,7 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
struct btrfs_path *path, u64 offset,
u64 len)
{
- struct btrfs_root *root = sctx->send_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct inode *inode;
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
struct fs_path *fspath;
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_key key;
@@ -5540,23 +5486,13 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
size_t inline_size;
int ret;
- inode = btrfs_iget(sctx->cur_ino, root);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- fspath = fs_path_alloc();
- if (!fspath) {
- ret = -ENOMEM;
- goto out;
- }
+ fspath = get_cur_inode_path(sctx);
+ if (IS_ERR(fspath))
+ return PTR_ERR(fspath);
ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
if (ret < 0)
- goto out;
-
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
- if (ret < 0)
- goto out;
+ return ret;
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
@@ -5572,12 +5508,12 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
ret = btrfs_encoded_io_compression_from_extent(fs_info,
btrfs_file_extent_compression(leaf, ei));
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
ret = put_data_header(sctx, inline_size);
if (ret < 0)
- goto out;
+ return ret;
read_extent_buffer(leaf, sctx->send_buf + sctx->send_size,
btrfs_file_extent_inline_start(ei), inline_size);
sctx->send_size += inline_size;
@@ -5585,9 +5521,6 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
- fs_path_free(fspath);
- iput(inode);
return ret;
}
@@ -5596,7 +5529,7 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
{
struct btrfs_root *root = sctx->send_root;
struct btrfs_fs_info *fs_info = root->fs_info;
- struct inode *inode;
+ struct btrfs_inode *inode;
struct fs_path *fspath;
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_key key;
@@ -5611,9 +5544,9 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
if (IS_ERR(inode))
return PTR_ERR(inode);
- fspath = fs_path_alloc();
- if (!fspath) {
- ret = -ENOMEM;
+ fspath = get_cur_inode_path(sctx);
+ if (IS_ERR(fspath)) {
+ ret = PTR_ERR(fspath);
goto out;
}
@@ -5621,10 +5554,6 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
- if (ret < 0)
- goto out;
-
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
@@ -5656,8 +5585,8 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
* between the beginning of the command and the file data.
*/
data_offset = PAGE_ALIGN(sctx->send_size);
- if (data_offset > sctx->send_max_size ||
- sctx->send_max_size - data_offset < disk_num_bytes) {
+ if (unlikely(data_offset > sctx->send_max_size ||
+ sctx->send_max_size - data_offset < disk_num_bytes)) {
ret = -EOVERFLOW;
goto out;
}
@@ -5666,7 +5595,7 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
* Note that send_buf is a mapping of send_buf_pages, so this is really
* reading into send_buf.
*/
- ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode),
+ ret = btrfs_encoded_read_regular_fill_pages(inode,
disk_bytenr, disk_num_bytes,
sctx->send_buf_pages +
(data_offset >> PAGE_SHIFT),
@@ -5692,8 +5621,7 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
tlv_put_failure:
out:
- fs_path_free(fspath);
- iput(inode);
+ iput(&inode->vfs_inode);
return ret;
}
@@ -5735,15 +5663,14 @@ static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
}
if (sctx->cur_inode == NULL) {
+ struct btrfs_inode *btrfs_inode;
struct btrfs_root *root = sctx->send_root;
- sctx->cur_inode = btrfs_iget(sctx->cur_ino, root);
- if (IS_ERR(sctx->cur_inode)) {
- int err = PTR_ERR(sctx->cur_inode);
+ btrfs_inode = btrfs_iget(sctx->cur_ino, root);
+ if (IS_ERR(btrfs_inode))
+ return PTR_ERR(btrfs_inode);
- sctx->cur_inode = NULL;
- return err;
- }
+ sctx->cur_inode = &btrfs_inode->vfs_inode;
memset(&sctx->ra, 0, sizeof(struct file_ra_state));
file_ra_state_init(&sctx->ra, sctx->cur_inode->i_mapping);
@@ -5822,12 +5749,11 @@ static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
*/
static int send_capabilities(struct send_ctx *sctx)
{
- struct fs_path *fspath = NULL;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *di;
struct extent_buffer *leaf;
unsigned long data_ptr;
- char *buf = NULL;
+ char AUTO_KFREE(buf);
int buf_len;
int ret = 0;
@@ -5839,35 +5765,23 @@ static int send_capabilities(struct send_ctx *sctx)
XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
if (!di) {
/* There is no xattr for this inode */
- goto out;
+ return 0;
} else if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
+ return PTR_ERR(di);
}
leaf = path->nodes[0];
buf_len = btrfs_dir_data_len(leaf, di);
- fspath = fs_path_alloc();
buf = kmalloc(buf_len, GFP_KERNEL);
- if (!fspath || !buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
- if (ret < 0)
- goto out;
+ if (!buf)
+ return -ENOMEM;
data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
read_extent_buffer(leaf, buf, data_ptr, buf_len);
- ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
+ ret = send_set_xattr(sctx, XATTR_NAME_CAPS,
strlen(XATTR_NAME_CAPS), buf, buf_len);
-out:
- kfree(buf);
- fs_path_free(fspath);
- btrfs_free_path(path);
return ret;
}
@@ -5875,7 +5789,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
struct clone_root *clone_root, const u64 disk_byte,
u64 data_offset, u64 offset, u64 len)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret;
struct btrfs_inode_info info;
@@ -5911,7 +5825,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
ret = get_inode_info(clone_root->root, clone_root->ino, &info);
btrfs_release_path(path);
if (ret < 0)
- goto out;
+ return ret;
clone_src_i_size = info.size;
/*
@@ -5941,7 +5855,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
key.offset = clone_root->offset;
ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0 && path->slots[0] > 0) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
if (key.objectid == clone_root->ino &&
@@ -5962,7 +5876,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(clone_root->root, path);
if (ret < 0)
- goto out;
+ return ret;
else if (ret > 0)
break;
continue;
@@ -5999,7 +5913,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
ret = send_extent_data(sctx, dst_path, offset,
hole_len);
if (ret < 0)
- goto out;
+ return ret;
len -= hole_len;
if (len == 0)
@@ -6070,7 +5984,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
ret = send_clone(sctx, offset, slen,
clone_root);
if (ret < 0)
- goto out;
+ return ret;
}
ret = send_extent_data(sctx, dst_path,
offset + slen,
@@ -6104,7 +6018,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
}
if (ret < 0)
- goto out;
+ return ret;
len -= clone_len;
if (len == 0)
@@ -6135,8 +6049,6 @@ next:
ret = send_extent_data(sctx, dst_path, offset, len);
else
ret = 0;
-out:
- btrfs_free_path(path);
return ret;
}
@@ -6225,7 +6137,7 @@ static int is_extent_unchanged(struct send_ctx *sctx,
{
int ret = 0;
struct btrfs_key key;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *eb;
int slot;
struct btrfs_key found_key;
@@ -6251,10 +6163,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
left_type = btrfs_file_extent_type(eb, ei);
- if (left_type != BTRFS_FILE_EXTENT_REG) {
- ret = 0;
- goto out;
- }
+ if (left_type != BTRFS_FILE_EXTENT_REG)
+ return 0;
+
left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
left_len = btrfs_file_extent_num_bytes(eb, ei);
left_offset = btrfs_file_extent_offset(eb, ei);
@@ -6286,11 +6197,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
key.offset = ekey->offset;
ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- if (ret) {
- ret = 0;
- goto out;
- }
+ return ret;
+ if (ret)
+ return 0;
/*
* Handle special case where the right side has no extents at all.
@@ -6299,11 +6208,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
slot = path->slots[0];
btrfs_item_key_to_cpu(eb, &found_key, slot);
if (found_key.objectid != key.objectid ||
- found_key.type != key.type) {
+ found_key.type != key.type)
/* If we're a hole then just pretend nothing changed */
- ret = (left_disknr) ? 0 : 1;
- goto out;
- }
+ return (left_disknr ? 0 : 1);
/*
* We're now on 2a, 2b or 7.
@@ -6313,10 +6220,8 @@ static int is_extent_unchanged(struct send_ctx *sctx,
ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
right_type = btrfs_file_extent_type(eb, ei);
if (right_type != BTRFS_FILE_EXTENT_REG &&
- right_type != BTRFS_FILE_EXTENT_INLINE) {
- ret = 0;
- goto out;
- }
+ right_type != BTRFS_FILE_EXTENT_INLINE)
+ return 0;
if (right_type == BTRFS_FILE_EXTENT_INLINE) {
right_len = btrfs_file_extent_ram_bytes(eb, ei);
@@ -6329,11 +6234,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
* Are we at extent 8? If yes, we know the extent is changed.
* This may only happen on the first iteration.
*/
- if (found_key.offset + right_len <= ekey->offset) {
+ if (found_key.offset + right_len <= ekey->offset)
/* If we're a hole just pretend nothing changed */
- ret = (left_disknr) ? 0 : 1;
- goto out;
- }
+ return (left_disknr ? 0 : 1);
/*
* We just wanted to see if when we have an inline extent, what
@@ -6343,10 +6246,8 @@ static int is_extent_unchanged(struct send_ctx *sctx,
* compressed extent representing data with a size matching
* the page size (currently the same as sector size).
*/
- if (right_type == BTRFS_FILE_EXTENT_INLINE) {
- ret = 0;
- goto out;
- }
+ if (right_type == BTRFS_FILE_EXTENT_INLINE)
+ return 0;
right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
right_offset = btrfs_file_extent_offset(eb, ei);
@@ -6366,17 +6267,15 @@ static int is_extent_unchanged(struct send_ctx *sctx,
*/
if (left_disknr != right_disknr ||
left_offset_fixed != right_offset ||
- left_gen != right_gen) {
- ret = 0;
- goto out;
- }
+ left_gen != right_gen)
+ return 0;
/*
* Go to the next extent.
*/
ret = btrfs_next_item(sctx->parent_root, path);
if (ret < 0)
- goto out;
+ return ret;
if (!ret) {
eb = path->nodes[0];
slot = path->slots[0];
@@ -6387,10 +6286,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
key.offset += right_len;
break;
}
- if (found_key.offset != key.offset + right_len) {
- ret = 0;
- goto out;
- }
+ if (found_key.offset != key.offset + right_len)
+ return 0;
+
key = found_key;
}
@@ -6403,15 +6301,12 @@ static int is_extent_unchanged(struct send_ctx *sctx,
else
ret = 0;
-
-out:
- btrfs_free_path(path);
return ret;
}
static int get_last_extent(struct send_ctx *sctx, u64 offset)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = sctx->send_root;
struct btrfs_key key;
int ret;
@@ -6427,15 +6322,13 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
key.offset = offset;
ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
if (ret < 0)
- goto out;
+ return ret;
ret = 0;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
- goto out;
+ return ret;
sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -6443,7 +6336,7 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
const u64 start,
const u64 end)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_root *root = sctx->parent_root;
u64 search_start = start;
@@ -6458,7 +6351,7 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
key.offset = search_start;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0 && path->slots[0] > 0)
path->slots[0]--;
@@ -6471,8 +6364,8 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
- else if (ret > 0)
+ return ret;
+ if (ret > 0)
break;
continue;
}
@@ -6494,15 +6387,11 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
search_start = extent_end;
goto next;
}
- ret = 0;
- goto out;
+ return 0;
next:
path->slots[0]++;
}
- ret = 1;
-out:
- btrfs_free_path(path);
- return ret;
+ return 1;
}
static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
@@ -6610,7 +6499,7 @@ static int process_all_extents(struct send_ctx *sctx)
int ret = 0;
int iter_ret = 0;
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
@@ -6637,11 +6526,10 @@ static int process_all_extents(struct send_ctx *sctx)
if (iter_ret < 0)
ret = iter_ret;
- btrfs_free_path(path);
return ret;
}
-static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
+static int process_recorded_refs_if_needed(struct send_ctx *sctx, bool at_end,
int *pending_move,
int *refs_processed)
{
@@ -6664,7 +6552,7 @@ out:
return ret;
}
-static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
+static int finish_inode_if_needed(struct send_ctx *sctx, bool at_end)
{
int ret = 0;
struct btrfs_inode_info info;
@@ -6892,6 +6780,7 @@ static int changed_inode(struct send_ctx *sctx,
sctx->cur_inode_last_extent = (u64)-1;
sctx->cur_inode_next_write_offset = 0;
sctx->ignore_cur_inode = false;
+ fs_path_reset(&sctx->cur_inode_path);
/*
* Set send_progress to current inode. This will tell all get_cur_xxx
@@ -7098,7 +6987,7 @@ static int changed_ref(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ if (unlikely(sctx->cur_ino != sctx->cmp_key->objectid)) {
inconsistent_snapshot_error(sctx, result, "reference");
return -EIO;
}
@@ -7126,7 +7015,7 @@ static int changed_xattr(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ if (unlikely(sctx->cur_ino != sctx->cmp_key->objectid)) {
inconsistent_snapshot_error(sctx, result, "xattr");
return -EIO;
}
@@ -7253,7 +7142,7 @@ static int changed_cb(struct btrfs_path *left_path,
enum btrfs_compare_tree_result result,
struct send_ctx *sctx)
{
- int ret = 0;
+ int ret;
/*
* We can not hold the commit root semaphore here. This is because in
@@ -7313,7 +7202,6 @@ static int changed_cb(struct btrfs_path *left_path,
return 0;
}
result = BTRFS_COMPARE_TREE_CHANGED;
- ret = 0;
}
sctx->left_path = left_path;
@@ -7367,11 +7255,11 @@ static int search_key_again(const struct send_ctx *sctx,
*/
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
ASSERT(ret <= 0);
- if (ret > 0) {
+ if (unlikely(ret > 0)) {
btrfs_print_tree(path->nodes[path->lowest_level], false);
btrfs_err(root->fs_info,
-"send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d",
- key->objectid, key->type, key->offset,
+"send: key " BTRFS_KEY_FMT" not found in %s root %llu, lowest_level %d, slot %d",
+ BTRFS_KEY_FMT_VALUE(key),
(root == sctx->parent_root ? "parent" : "send"),
btrfs_root_id(root), path->lowest_level,
path->slots[path->lowest_level]);
@@ -7387,7 +7275,7 @@ static int full_send_tree(struct send_ctx *sctx)
struct btrfs_root *send_root = sctx->send_root;
struct btrfs_key key;
struct btrfs_fs_info *fs_info = send_root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = alloc_path_for_send();
if (!path)
@@ -7404,7 +7292,7 @@ static int full_send_tree(struct send_ctx *sctx)
ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret)
goto out_finish;
@@ -7414,7 +7302,7 @@ static int full_send_tree(struct send_ctx *sctx)
ret = changed_cb(path, NULL, &key,
BTRFS_COMPARE_TREE_NEW, sctx);
if (ret < 0)
- goto out;
+ return ret;
down_read(&fs_info->commit_root_sem);
if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
@@ -7433,14 +7321,14 @@ static int full_send_tree(struct send_ctx *sctx)
btrfs_release_path(path);
ret = search_key_again(sctx, send_root, path, &key);
if (ret < 0)
- goto out;
+ return ret;
} else {
up_read(&fs_info->commit_root_sem);
}
ret = btrfs_next_item(send_root, path);
if (ret < 0)
- goto out;
+ return ret;
if (ret) {
ret = 0;
break;
@@ -7448,11 +7336,7 @@ static int full_send_tree(struct send_ctx *sctx)
}
out_finish:
- ret = finish_inode_if_needed(sctx, 1);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return finish_inode_if_needed(sctx, 1);
}
static int replace_node_with_clone(struct btrfs_path *path, int level)
@@ -7707,8 +7591,8 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
struct btrfs_fs_info *fs_info = left_root->fs_info;
int ret;
int cmp;
- struct btrfs_path *left_path = NULL;
- struct btrfs_path *right_path = NULL;
+ BTRFS_PATH_AUTO_FREE(left_path);
+ BTRFS_PATH_AUTO_FREE(right_path);
struct btrfs_key left_key;
struct btrfs_key right_key;
char *tmp_buf = NULL;
@@ -7743,10 +7627,10 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
}
- left_path->search_commit_root = 1;
- left_path->skip_locking = 1;
- right_path->search_commit_root = 1;
- right_path->skip_locking = 1;
+ left_path->search_commit_root = true;
+ left_path->skip_locking = true;
+ right_path->search_commit_root = true;
+ right_path->skip_locking = true;
/*
* Strategy: Go to the first items of both trees. Then do
@@ -7981,8 +7865,6 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
out_unlock:
up_read(&fs_info->commit_root_sem);
out:
- btrfs_free_path(left_path);
- btrfs_free_path(right_path);
kvfree(tmp_buf);
return ret;
}
@@ -8049,7 +7931,7 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
}
/*
- * Make sure any existing dellaloc is flushed for any root used by a send
+ * Make sure any existing delalloc is flushed for any root used by a send
* operation so that we do not miss any data and we do not race with writeback
* finishing and changing a tree while send is using the tree. This could
* happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
@@ -8102,10 +7984,9 @@ static void dedupe_in_progress_warn(const struct btrfs_root *root)
btrfs_root_id(root), root->dedupe_in_progress);
}
-long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_args *arg)
+long btrfs_ioctl_send(struct btrfs_root *send_root, const struct btrfs_ioctl_send_args *arg)
{
int ret = 0;
- struct btrfs_root *send_root = inode->root;
struct btrfs_fs_info *fs_info = send_root->fs_info;
struct btrfs_root *clone_root;
struct send_ctx *sctx = NULL;
@@ -8168,6 +8049,7 @@ long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_a
goto out;
}
+ init_path(&sctx->cur_inode_path);
INIT_LIST_HEAD(&sctx->new_refs);
INIT_LIST_HEAD(&sctx->deleted_refs);
@@ -8444,6 +8326,9 @@ out:
btrfs_lru_cache_clear(&sctx->dir_created_cache);
btrfs_lru_cache_clear(&sctx->dir_utimes_cache);
+ if (sctx->cur_inode_path.buf != sctx->cur_inode_path.inline_buf)
+ kfree(sctx->cur_inode_path.buf);
+
kfree(sctx);
}
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index 9309886c5ea1..652bb28f63d4 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -11,7 +11,7 @@
#include <linux/sizes.h>
#include <linux/align.h>
-struct btrfs_inode;
+struct btrfs_root;
struct btrfs_ioctl_send_args;
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
@@ -182,6 +182,6 @@ enum {
__BTRFS_SEND_A_MAX = 35,
};
-long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_args *arg);
+long btrfs_ioctl_send(struct btrfs_root *send_root, const struct btrfs_ioctl_send_args *arg);
#endif
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 255e85f78313..6babbe333741 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-#include "linux/spinlock.h"
+#include <linux/spinlock.h>
#include <linux/minmax.h>
#include "misc.h"
#include "ctree.h"
@@ -14,6 +14,8 @@
#include "fs.h"
#include "accessors.h"
#include "extent-tree.h"
+#include "zoned.h"
+#include "delayed-inode.h"
/*
* HOW DOES SPACE RESERVATION WORK
@@ -49,11 +51,11 @@
* num_bytes we want to reserve.
*
* ->reserve
- * space_info->bytes_may_reserve += num_bytes
+ * space_info->bytes_may_use += num_bytes
*
* ->extent allocation
* Call btrfs_add_reserved_bytes() which does
- * space_info->bytes_may_reserve -= num_bytes
+ * space_info->bytes_may_use -= num_bytes
* space_info->bytes_reserved += extent_bytes
*
* ->insert reference
@@ -66,7 +68,7 @@
* Assume we are unable to simply make the reservation because we do not have
* enough space
*
- * -> __reserve_bytes
+ * -> reserve_bytes
* create a reserve_ticket with ->bytes set to our reservation, add it to
* the tail of space_info->tickets, kick async flush thread
*
@@ -127,6 +129,14 @@
* churn a lot and we can avoid making some extent tree modifications if we
* are able to delay for as long as possible.
*
+ * RESET_ZONES
+ * This state works only for the zoned mode. On the zoned mode, we cannot
+ * reuse once allocated then freed region until we reset the zone, due to
+ * the sequential write zone requirement. The RESET_ZONES state resets the
+ * zones of an unused block group and let us reuse the space. The reusing
+ * is faster than removing the block group and allocating another block
+ * group on the zones.
+ *
* ALLOC_CHUNK
* We will skip this the first time through space reservation, because of
* overcommit and we don't want to have a lot of useless metadata space when
@@ -163,15 +173,14 @@
* thing with or without extra unallocated space.
*/
-u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info,
- bool may_use_included)
-{
- ASSERT(s_info);
- return s_info->bytes_used + s_info->bytes_reserved +
- s_info->bytes_pinned + s_info->bytes_readonly +
- s_info->bytes_zone_unusable +
- (may_use_included ? s_info->bytes_may_use : 0);
-}
+struct reserve_ticket {
+ u64 bytes;
+ int error;
+ bool steal;
+ struct list_head list;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
/*
* after adding space to the filesystem, we need to clear the full flags
@@ -183,7 +192,7 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
struct btrfs_space_info *found;
list_for_each_entry(found, head, list)
- found->full = 0;
+ found->full = false;
}
/*
@@ -202,7 +211,7 @@ static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
if (btrfs_is_zoned(fs_info))
return fs_info->zone_size;
- ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
+ ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK, "flags=%llu", flags);
if (flags & BTRFS_BLOCK_GROUP_DATA)
return BTRFS_MAX_DATA_CHUNK_SIZE;
@@ -225,19 +234,11 @@ void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
WRITE_ONCE(space_info->chunk_size, chunk_size);
}
-static int create_space_info(struct btrfs_fs_info *info, u64 flags)
+static void init_space_info(struct btrfs_fs_info *info,
+ struct btrfs_space_info *space_info, u64 flags)
{
-
- struct btrfs_space_info *space_info;
- int i;
- int ret;
-
- space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
- if (!space_info)
- return -ENOMEM;
-
space_info->fs_info = info;
- for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
+ for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++)
INIT_LIST_HEAD(&space_info->block_groups[i]);
init_rwsem(&space_info->groups_sem);
spin_lock_init(&space_info->lock);
@@ -248,11 +249,67 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
INIT_LIST_HEAD(&space_info->priority_tickets);
space_info->clamp = 1;
btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
+ space_info->subgroup_id = BTRFS_SUB_GROUP_PRIMARY;
if (btrfs_is_zoned(info))
space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
+}
+
+static int create_space_info_sub_group(struct btrfs_space_info *parent, u64 flags,
+ enum btrfs_space_info_sub_group id, int index)
+{
+ struct btrfs_fs_info *fs_info = parent->fs_info;
+ struct btrfs_space_info *sub_group;
+ int ret;
+
+ ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY,
+ "parent->subgroup_id=%d", parent->subgroup_id);
+ ASSERT(id != BTRFS_SUB_GROUP_PRIMARY, "id=%d", id);
+
+ sub_group = kzalloc(sizeof(*sub_group), GFP_NOFS);
+ if (!sub_group)
+ return -ENOMEM;
+
+ init_space_info(fs_info, sub_group, flags);
+ parent->sub_group[index] = sub_group;
+ sub_group->parent = parent;
+ sub_group->subgroup_id = id;
+
+ ret = btrfs_sysfs_add_space_info_type(sub_group);
+ if (ret) {
+ kfree(sub_group);
+ parent->sub_group[index] = NULL;
+ }
+ return ret;
+}
+
+static int create_space_info(struct btrfs_fs_info *info, u64 flags)
+{
+
+ struct btrfs_space_info *space_info;
+ int ret = 0;
+
+ space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
+ if (!space_info)
+ return -ENOMEM;
+
+ init_space_info(info, space_info, flags);
+
+ if (btrfs_is_zoned(info)) {
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ ret = create_space_info_sub_group(space_info, flags,
+ BTRFS_SUB_GROUP_DATA_RELOC,
+ 0);
+ else if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ ret = create_space_info_sub_group(space_info, flags,
+ BTRFS_SUB_GROUP_TREELOG,
+ 0);
+
+ if (ret)
+ return ret;
+ }
- ret = btrfs_sysfs_add_space_info_type(info, space_info);
+ ret = btrfs_sysfs_add_space_info_type(space_info);
if (ret)
return ret;
@@ -303,31 +360,29 @@ out:
void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
struct btrfs_block_group *block_group)
{
- struct btrfs_space_info *found;
+ struct btrfs_space_info *space_info = block_group->space_info;
int factor, index;
factor = btrfs_bg_type_to_factor(block_group->flags);
- found = btrfs_find_space_info(info, block_group->flags);
- ASSERT(found);
- spin_lock(&found->lock);
- found->total_bytes += block_group->length;
- found->disk_total += block_group->length * factor;
- found->bytes_used += block_group->used;
- found->disk_used += block_group->used * factor;
- found->bytes_readonly += block_group->bytes_super;
- btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
+ spin_lock(&space_info->lock);
+ space_info->total_bytes += block_group->length;
+ space_info->disk_total += block_group->length * factor;
+ space_info->bytes_used += block_group->used;
+ space_info->disk_used += block_group->used * factor;
+ space_info->bytes_readonly += block_group->bytes_super;
+ btrfs_space_info_update_bytes_zone_unusable(space_info, block_group->zone_unusable);
if (block_group->length > 0)
- found->full = 0;
- btrfs_try_granting_tickets(info, found);
- spin_unlock(&found->lock);
+ space_info->full = false;
+ btrfs_try_granting_tickets(space_info);
+ spin_unlock(&space_info->lock);
- block_group->space_info = found;
+ block_group->space_info = space_info;
index = btrfs_bg_flags_to_raid_index(block_group->flags);
- down_write(&found->groups_sem);
- list_add_tail(&block_group->list, &found->block_groups[index]);
- up_write(&found->groups_sem);
+ down_write(&space_info->groups_sem);
+ list_add_tail(&block_group->list, &space_info->block_groups[index]);
+ up_write(&space_info->groups_sem);
}
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
@@ -367,10 +422,10 @@ static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info)
return min_t(u64, data_chunk_size, SZ_1G);
}
-static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
- const struct btrfs_space_info *space_info,
- enum btrfs_reserve_flush_enum flush)
+static u64 calc_available_free_space(const struct btrfs_space_info *space_info,
+ enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 profile;
u64 avail;
u64 data_chunk_size;
@@ -425,7 +480,7 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
/*
* On the zoned mode, we always allocate one zone as one chunk.
- * Returning non-zone size alingned bytes here will result in
+ * Returning non-zone size aligned bytes here will result in
* less pressure for the async metadata reclaim process, and it
* will over-commit too much leading to ENOSPC. Align down to the
* zone size to avoid that.
@@ -436,44 +491,77 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
return avail;
}
-int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
- const struct btrfs_space_info *space_info, u64 bytes,
- enum btrfs_reserve_flush_enum flush)
+static inline bool check_can_overcommit(const struct btrfs_space_info *space_info,
+ u64 space_info_used_bytes, u64 bytes,
+ enum btrfs_reserve_flush_enum flush)
+{
+ const u64 avail = calc_available_free_space(space_info, flush);
+
+ return (space_info_used_bytes + bytes < space_info->total_bytes + avail);
+}
+
+static inline bool can_overcommit(const struct btrfs_space_info *space_info,
+ u64 space_info_used_bytes, u64 bytes,
+ enum btrfs_reserve_flush_enum flush)
+{
+ /* Don't overcommit when in mixed mode. */
+ if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
+ return false;
+
+ return check_can_overcommit(space_info, space_info_used_bytes, bytes, flush);
+}
+
+bool btrfs_can_overcommit(const struct btrfs_space_info *space_info, u64 bytes,
+ enum btrfs_reserve_flush_enum flush)
{
- u64 avail;
u64 used;
/* Don't overcommit when in mixed mode */
if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
- return 0;
+ return false;
used = btrfs_space_info_used(space_info, true);
- avail = calc_available_free_space(fs_info, space_info, flush);
- if (used + bytes < space_info->total_bytes + avail)
- return 1;
- return 0;
+ return check_can_overcommit(space_info, used, bytes, flush);
}
static void remove_ticket(struct btrfs_space_info *space_info,
- struct reserve_ticket *ticket)
+ struct reserve_ticket *ticket, int error)
{
+ lockdep_assert_held(&space_info->lock);
+
if (!list_empty(&ticket->list)) {
list_del_init(&ticket->list);
- ASSERT(space_info->reclaim_size >= ticket->bytes);
+ ASSERT(space_info->reclaim_size >= ticket->bytes,
+ "space_info->reclaim_size=%llu ticket->bytes=%llu",
+ space_info->reclaim_size, ticket->bytes);
space_info->reclaim_size -= ticket->bytes;
}
+
+ spin_lock(&ticket->lock);
+ /*
+ * If we are called from a task waiting on the ticket, it may happen
+ * that before it sets an error on the ticket, a reclaim task was able
+ * to satisfy the ticket. In that case ignore the error.
+ */
+ if (error && ticket->bytes > 0)
+ ticket->error = error;
+ else
+ ticket->bytes = 0;
+
+ wake_up(&ticket->wait);
+ spin_unlock(&ticket->lock);
}
/*
* This is for space we already have accounted in space_info->bytes_may_use, so
* basically when we're returning space from block_rsv's.
*/
-void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+void btrfs_try_granting_tickets(struct btrfs_space_info *space_info)
{
struct list_head *head;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
+ u64 used = btrfs_space_info_used(space_info, true);
lockdep_assert_held(&space_info->lock);
@@ -481,21 +569,18 @@ void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
again:
while (!list_empty(head)) {
struct reserve_ticket *ticket;
- u64 used = btrfs_space_info_used(space_info, true);
+ u64 used_after;
ticket = list_first_entry(head, struct reserve_ticket, list);
+ used_after = used + ticket->bytes;
/* Check and see if our ticket can be satisfied now. */
- if ((used + ticket->bytes <= space_info->total_bytes) ||
- btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
- flush)) {
- btrfs_space_info_update_bytes_may_use(fs_info,
- space_info,
- ticket->bytes);
- remove_ticket(space_info, ticket);
- ticket->bytes = 0;
+ if (used_after <= space_info->total_bytes ||
+ can_overcommit(space_info, used, ticket->bytes, flush)) {
+ btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes);
+ remove_ticket(space_info, ticket, 0);
space_info->tickets_id++;
- wake_up(&ticket->wait);
+ used = used_after;
} else {
break;
}
@@ -542,15 +627,16 @@ static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
}
-static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info,
- const struct btrfs_space_info *info)
+static void __btrfs_dump_space_info(const struct btrfs_space_info *info)
{
+ const struct btrfs_fs_info *fs_info = info->fs_info;
const char *flag_str = space_info_flag_to_str(info);
lockdep_assert_held(&info->lock);
/* The free space could be negative in case of overcommit */
- btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
- flag_str,
+ btrfs_info(fs_info,
+ "space_info %s (sub-group id %d) has %lld free, is %sfull",
+ flag_str, info->subgroup_id,
(s64)(info->total_bytes - btrfs_space_info_used(info, true)),
info->full ? "" : "not ");
btrfs_info(fs_info,
@@ -560,16 +646,16 @@ static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info,
info->bytes_readonly, info->bytes_zone_unusable);
}
-void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *info, u64 bytes,
- int dump_block_groups)
+void btrfs_dump_space_info(struct btrfs_space_info *info, u64 bytes,
+ bool dump_block_groups)
{
+ struct btrfs_fs_info *fs_info = info->fs_info;
struct btrfs_block_group *cache;
u64 total_avail = 0;
int index = 0;
spin_lock(&info->lock);
- __btrfs_dump_space_info(fs_info, info);
+ __btrfs_dump_space_info(info);
dump_global_block_rsv(fs_info);
spin_unlock(&info->lock);
@@ -617,11 +703,11 @@ static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
/*
* shrink metadata reservation for delalloc
*/
-static void shrink_delalloc(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static void shrink_delalloc(struct btrfs_space_info *space_info,
u64 to_reclaim, bool wait_ordered,
bool for_preempt)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct btrfs_trans_handle *trans;
u64 delalloc_bytes;
u64 ordered_bytes;
@@ -748,10 +834,10 @@ skip_async:
* and may fail for various reasons. The caller is supposed to examine the
* state of @space_info to detect the outcome.
*/
-static void flush_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, u64 num_bytes,
- enum btrfs_flush_state state, bool for_preempt)
+static void flush_space(struct btrfs_space_info *space_info, u64 num_bytes,
+ enum btrfs_flush_state state, bool for_preempt)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
int nr;
@@ -780,7 +866,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
case FLUSH_DELALLOC_FULL:
if (state == FLUSH_DELALLOC_FULL)
num_bytes = U64_MAX;
- shrink_delalloc(fs_info, space_info, num_bytes,
+ shrink_delalloc(space_info, num_bytes,
state != FLUSH_DELALLOC, for_preempt);
break;
case FLUSH_DELAYED_REFS_NR:
@@ -805,7 +891,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
ret = PTR_ERR(trans);
break;
}
- ret = btrfs_chunk_alloc(trans,
+ ret = btrfs_chunk_alloc(trans, space_info,
btrfs_get_alloc_profile(fs_info, space_info->flags),
(state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
CHUNK_ALLOC_FORCE);
@@ -834,6 +920,9 @@ static void flush_space(struct btrfs_fs_info *fs_info,
*/
ret = btrfs_commit_current_transaction(root);
break;
+ case RESET_ZONES:
+ ret = btrfs_reset_unused_block_groups(space_info, num_bytes);
+ break;
default:
ret = -ENOSPC;
break;
@@ -844,8 +933,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
return;
}
-static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
- const struct btrfs_space_info *space_info)
+static u64 btrfs_calc_reclaim_metadata_size(const struct btrfs_space_info *space_info)
{
u64 used;
u64 avail;
@@ -853,8 +941,7 @@ static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
lockdep_assert_held(&space_info->lock);
- avail = calc_available_free_space(fs_info, space_info,
- BTRFS_RESERVE_FLUSH_ALL);
+ avail = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL);
used = btrfs_space_info_used(space_info, true);
/*
@@ -869,18 +956,25 @@ static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
return to_reclaim;
}
-static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
- const struct btrfs_space_info *space_info)
+static bool need_preemptive_reclaim(const struct btrfs_space_info *space_info)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
u64 ordered, delalloc;
u64 thresh;
u64 used;
- thresh = mult_perc(space_info->total_bytes, 90);
-
lockdep_assert_held(&space_info->lock);
+ /*
+ * We have tickets queued, bail so we don't compete with the async
+ * flushers.
+ */
+ if (space_info->reclaim_size)
+ return false;
+
+ thresh = mult_perc(space_info->total_bytes, 90);
+
/* If we're just plain full then async reclaim just slows us down. */
if ((space_info->bytes_used + space_info->bytes_reserved +
global_rsv_size) >= thresh)
@@ -901,13 +995,6 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
return false;
/*
- * We have tickets queued, bail so we don't compete with the async
- * flushers.
- */
- if (space_info->reclaim_size)
- return false;
-
- /*
* If we have over half of the free space occupied by reservations or
* pinned then we want to start flushing.
*
@@ -936,8 +1023,7 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
* much delalloc we need for the background flusher to kick in.
*/
- thresh = calc_available_free_space(fs_info, space_info,
- BTRFS_RESERVE_FLUSH_ALL);
+ thresh = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL);
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_readonly + global_rsv_size;
if (used < space_info->total_bytes)
@@ -981,13 +1067,15 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
-static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static bool steal_from_global_rsv(struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 min_bytes;
+ lockdep_assert_held(&space_info->lock);
+
if (!ticket->steal)
return false;
@@ -1001,21 +1089,19 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
return false;
}
global_rsv->reserved -= ticket->bytes;
- remove_ticket(space_info, ticket);
- ticket->bytes = 0;
- wake_up(&ticket->wait);
- space_info->tickets_id++;
if (global_rsv->reserved < global_rsv->size)
- global_rsv->full = 0;
+ global_rsv->full = false;
spin_unlock(&global_rsv->lock);
+ remove_ticket(space_info, ticket, 0);
+ space_info->tickets_id++;
+
return true;
}
/*
* We've exhausted our flushing, start failing tickets.
*
- * @fs_info - fs_info for this fs
* @space_info - the space info we were flushing
*
* We call this when we've exhausted our flushing ability and haven't made
@@ -1028,72 +1114,66 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
* other tickets, or if it stumbles across a ticket that was smaller than the
* first ticket.
*/
-static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+static bool maybe_fail_all_tickets(struct btrfs_space_info *space_info)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct reserve_ticket *ticket;
u64 tickets_id = space_info->tickets_id;
- const bool aborted = BTRFS_FS_ERROR(fs_info);
+ const int abort_error = BTRFS_FS_ERROR(fs_info);
trace_btrfs_fail_all_tickets(fs_info, space_info);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
- __btrfs_dump_space_info(fs_info, space_info);
+ __btrfs_dump_space_info(space_info);
}
while (!list_empty(&space_info->tickets) &&
tickets_id == space_info->tickets_id) {
ticket = list_first_entry(&space_info->tickets,
struct reserve_ticket, list);
+ if (unlikely(abort_error)) {
+ remove_ticket(space_info, ticket, abort_error);
+ } else {
+ if (steal_from_global_rsv(space_info, ticket))
+ return true;
- if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
- return true;
-
- if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
- btrfs_info(fs_info, "failing ticket with %llu bytes",
- ticket->bytes);
+ if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
+ btrfs_info(fs_info, "failing ticket with %llu bytes",
+ ticket->bytes);
- remove_ticket(space_info, ticket);
- if (aborted)
- ticket->error = -EIO;
- else
- ticket->error = -ENOSPC;
- wake_up(&ticket->wait);
+ remove_ticket(space_info, ticket, -ENOSPC);
- /*
- * We're just throwing tickets away, so more flushing may not
- * trip over btrfs_try_granting_tickets, so we need to call it
- * here to see if we can make progress with the next ticket in
- * the list.
- */
- if (!aborted)
- btrfs_try_granting_tickets(fs_info, space_info);
+ /*
+ * We're just throwing tickets away, so more flushing may
+ * not trip over btrfs_try_granting_tickets, so we need
+ * to call it here to see if we can make progress with
+ * the next ticket in the list.
+ */
+ btrfs_try_granting_tickets(space_info);
+ }
}
return (tickets_id != space_info->tickets_id);
}
-/*
- * This is for normal flushers, we can wait all goddamned day if we want to. We
- * will loop and continuously try to flush as long as we are making progress.
- * We count progress as clearing off tickets each time we have to loop.
- */
-static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
+static void do_async_reclaim_metadata_space(struct btrfs_space_info *space_info)
{
- struct btrfs_fs_info *fs_info;
- struct btrfs_space_info *space_info;
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 to_reclaim;
enum btrfs_flush_state flush_state;
int commit_cycles = 0;
u64 last_tickets_id;
+ enum btrfs_flush_state final_state;
- fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
- space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+ if (btrfs_is_zoned(fs_info))
+ final_state = RESET_ZONES;
+ else
+ final_state = COMMIT_TRANS;
spin_lock(&space_info->lock);
- to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
+ to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
if (!to_reclaim) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
@@ -1102,15 +1182,14 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
flush_state = FLUSH_DELAYED_ITEMS_NR;
do {
- flush_space(fs_info, space_info, to_reclaim, flush_state, false);
+ flush_space(space_info, to_reclaim, flush_state, false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
- to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
- space_info);
+ to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
if (last_tickets_id == space_info->tickets_id) {
flush_state++;
} else {
@@ -1141,21 +1220,40 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
flush_state++;
- if (flush_state > COMMIT_TRANS) {
+ if (flush_state > final_state) {
commit_cycles++;
if (commit_cycles > 2) {
- if (maybe_fail_all_tickets(fs_info, space_info)) {
+ if (maybe_fail_all_tickets(space_info)) {
flush_state = FLUSH_DELAYED_ITEMS_NR;
commit_cycles--;
} else {
- space_info->flush = 0;
+ space_info->flush = false;
}
} else {
flush_state = FLUSH_DELAYED_ITEMS_NR;
}
}
spin_unlock(&space_info->lock);
- } while (flush_state <= COMMIT_TRANS);
+ } while (flush_state <= final_state);
+}
+
+/*
+ * This is for normal flushers, it can wait as much time as needed. We will
+ * loop and continuously try to flush as long as we are making progress. We
+ * count progress as clearing off tickets each time we have to loop.
+ */
+static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_space_info *space_info;
+
+ fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
+ space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+ do_async_reclaim_metadata_space(space_info);
+ for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+ if (space_info->sub_group[i])
+ do_async_reclaim_metadata_space(space_info->sub_group[i]);
+ }
}
/*
@@ -1185,14 +1283,15 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
trans_rsv = &fs_info->trans_block_rsv;
spin_lock(&space_info->lock);
- while (need_preemptive_reclaim(fs_info, space_info)) {
+ while (need_preemptive_reclaim(space_info)) {
enum btrfs_flush_state flush;
u64 delalloc_size = 0;
u64 to_reclaim, block_rsv_size;
const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
+ const u64 bytes_may_use = space_info->bytes_may_use;
+ const u64 bytes_pinned = space_info->bytes_pinned;
- loops++;
-
+ spin_unlock(&space_info->lock);
/*
* We don't have a precise counter for the metadata being
* reserved for delalloc, so we'll approximate it by subtracting
@@ -1204,8 +1303,8 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
btrfs_block_rsv_reserved(delayed_block_rsv) +
btrfs_block_rsv_reserved(delayed_refs_rsv) +
btrfs_block_rsv_reserved(trans_rsv);
- if (block_rsv_size < space_info->bytes_may_use)
- delalloc_size = space_info->bytes_may_use - block_rsv_size;
+ if (block_rsv_size < bytes_may_use)
+ delalloc_size = bytes_may_use - block_rsv_size;
/*
* We don't want to include the global_rsv in our calculation,
@@ -1222,10 +1321,10 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
if (delalloc_size > block_rsv_size) {
to_reclaim = delalloc_size;
flush = FLUSH_DELALLOC;
- } else if (space_info->bytes_pinned >
+ } else if (bytes_pinned >
(btrfs_block_rsv_reserved(delayed_block_rsv) +
btrfs_block_rsv_reserved(delayed_refs_rsv))) {
- to_reclaim = space_info->bytes_pinned;
+ to_reclaim = bytes_pinned;
flush = COMMIT_TRANS;
} else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
btrfs_block_rsv_reserved(delayed_refs_rsv)) {
@@ -1236,7 +1335,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
flush = FLUSH_DELAYED_REFS_NR;
}
- spin_unlock(&space_info->lock);
+ loops++;
/*
* We don't want to reclaim everything, just a portion, so scale
@@ -1246,7 +1345,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
to_reclaim >>= 2;
if (!to_reclaim)
to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
- flush_space(fs_info, space_info, to_reclaim, flush, true);
+ flush_space(space_info, to_reclaim, flush, true);
cond_resched();
spin_lock(&space_info->lock);
}
@@ -1286,6 +1385,10 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* This is where we reclaim all of the pinned space generated by running the
* iputs
*
+ * RESET_ZONES
+ * This state works only for the zoned mode. We scan the unused block group
+ * list and reset the zones and reuse the block group.
+ *
* ALLOC_CHUNK_FORCE
* For data we start with alloc chunk force, however we could have been full
* before, and then the transaction commit could have freed new block groups,
@@ -1295,22 +1398,19 @@ static const enum btrfs_flush_state data_flush_states[] = {
FLUSH_DELALLOC_FULL,
RUN_DELAYED_IPUTS,
COMMIT_TRANS,
+ RESET_ZONES,
ALLOC_CHUNK_FORCE,
};
-static void btrfs_async_reclaim_data_space(struct work_struct *work)
+static void do_async_reclaim_data_space(struct btrfs_space_info *space_info)
{
- struct btrfs_fs_info *fs_info;
- struct btrfs_space_info *space_info;
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 last_tickets_id;
enum btrfs_flush_state flush_state = 0;
- fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
- space_info = fs_info->data_sinfo;
-
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
@@ -1318,27 +1418,27 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
spin_unlock(&space_info->lock);
while (!space_info->full) {
- flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
+ flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
/* Something happened, fail everything and bail. */
- if (BTRFS_FS_ERROR(fs_info))
+ if (unlikely(BTRFS_FS_ERROR(fs_info)))
goto aborted_fs;
last_tickets_id = space_info->tickets_id;
spin_unlock(&space_info->lock);
}
while (flush_state < ARRAY_SIZE(data_flush_states)) {
- flush_space(fs_info, space_info, U64_MAX,
+ flush_space(space_info, U64_MAX,
data_flush_states[flush_state], false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
@@ -1352,16 +1452,16 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
if (flush_state >= ARRAY_SIZE(data_flush_states)) {
if (space_info->full) {
- if (maybe_fail_all_tickets(fs_info, space_info))
+ if (maybe_fail_all_tickets(space_info))
flush_state = 0;
else
- space_info->flush = 0;
+ space_info->flush = false;
} else {
flush_state = 0;
}
/* Something happened, fail everything and bail. */
- if (BTRFS_FS_ERROR(fs_info))
+ if (unlikely(BTRFS_FS_ERROR(fs_info)))
goto aborted_fs;
}
@@ -1370,11 +1470,24 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
return;
aborted_fs:
- maybe_fail_all_tickets(fs_info, space_info);
- space_info->flush = 0;
+ maybe_fail_all_tickets(space_info);
+ space_info->flush = false;
spin_unlock(&space_info->lock);
}
+static void btrfs_async_reclaim_data_space(struct work_struct *work)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_space_info *space_info;
+
+ fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
+ space_info = fs_info->data_sinfo;
+ do_async_reclaim_data_space(space_info);
+ for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++)
+ if (space_info->sub_group[i])
+ do_async_reclaim_data_space(space_info->sub_group[i]);
+}
+
void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
{
INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
@@ -1386,6 +1499,7 @@ void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
static const enum btrfs_flush_state priority_flush_states[] = {
FLUSH_DELAYED_ITEMS_NR,
FLUSH_DELAYED_ITEMS,
+ RESET_ZONES,
ALLOC_CHUNK,
};
@@ -1399,92 +1513,90 @@ static const enum btrfs_flush_state evict_flush_states[] = {
FLUSH_DELALLOC_FULL,
ALLOC_CHUNK,
COMMIT_TRANS,
+ RESET_ZONES,
};
-static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- struct reserve_ticket *ticket,
- const enum btrfs_flush_state *states,
- int states_nr)
+static bool is_ticket_served(struct reserve_ticket *ticket)
{
+ bool ret;
+
+ spin_lock(&ticket->lock);
+ ret = (ticket->bytes == 0);
+ spin_unlock(&ticket->lock);
+
+ return ret;
+}
+
+static void priority_reclaim_metadata_space(struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket,
+ const enum btrfs_flush_state *states,
+ int states_nr)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 to_reclaim;
int flush_state = 0;
- spin_lock(&space_info->lock);
- to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
/*
* This is the priority reclaim path, so to_reclaim could be >0 still
* because we may have only satisfied the priority tickets and still
* left non priority tickets on the list. We would then have
* to_reclaim but ->bytes == 0.
*/
- if (ticket->bytes == 0) {
- spin_unlock(&space_info->lock);
+ if (is_ticket_served(ticket))
return;
- }
+
+ spin_lock(&space_info->lock);
+ to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
+ spin_unlock(&space_info->lock);
while (flush_state < states_nr) {
- spin_unlock(&space_info->lock);
- flush_space(fs_info, space_info, to_reclaim, states[flush_state],
- false);
- flush_state++;
- spin_lock(&space_info->lock);
- if (ticket->bytes == 0) {
- spin_unlock(&space_info->lock);
+ flush_space(space_info, to_reclaim, states[flush_state], false);
+ if (is_ticket_served(ticket))
return;
- }
+ flush_state++;
}
+ spin_lock(&space_info->lock);
/*
* Attempt to steal from the global rsv if we can, except if the fs was
* turned into error mode due to a transaction abort when flushing space
* above, in that case fail with the abort error instead of returning
* success to the caller if we can steal from the global rsv - this is
- * just to have caller fail immeditelly instead of later when trying to
+ * just to have caller fail immediately instead of later when trying to
* modify the fs, making it easier to debug -ENOSPC problems.
*/
- if (BTRFS_FS_ERROR(fs_info)) {
- ticket->error = BTRFS_FS_ERROR(fs_info);
- remove_ticket(space_info, ticket);
- } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
- ticket->error = -ENOSPC;
- remove_ticket(space_info, ticket);
- }
+ if (unlikely(BTRFS_FS_ERROR(fs_info)))
+ remove_ticket(space_info, ticket, BTRFS_FS_ERROR(fs_info));
+ else if (!steal_from_global_rsv(space_info, ticket))
+ remove_ticket(space_info, ticket, -ENOSPC);
/*
* We must run try_granting_tickets here because we could be a large
* ticket in front of a smaller ticket that can now be satisfied with
* the available space.
*/
- btrfs_try_granting_tickets(fs_info, space_info);
+ btrfs_try_granting_tickets(space_info);
spin_unlock(&space_info->lock);
}
-static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static void priority_reclaim_data_space(struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
- spin_lock(&space_info->lock);
-
/* We could have been granted before we got here. */
- if (ticket->bytes == 0) {
- spin_unlock(&space_info->lock);
+ if (is_ticket_served(ticket))
return;
- }
+ spin_lock(&space_info->lock);
while (!space_info->full) {
spin_unlock(&space_info->lock);
- flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
- spin_lock(&space_info->lock);
- if (ticket->bytes == 0) {
- spin_unlock(&space_info->lock);
+ flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
+ if (is_ticket_served(ticket))
return;
- }
+ spin_lock(&space_info->lock);
}
- ticket->error = -ENOSPC;
- remove_ticket(space_info, ticket);
- btrfs_try_granting_tickets(fs_info, space_info);
+ remove_ticket(space_info, ticket, -ENOSPC);
+ btrfs_try_granting_tickets(space_info);
spin_unlock(&space_info->lock);
}
@@ -1493,11 +1605,13 @@ static void wait_reserve_ticket(struct btrfs_space_info *space_info,
{
DEFINE_WAIT(wait);
- int ret = 0;
- spin_lock(&space_info->lock);
+ spin_lock(&ticket->lock);
while (ticket->bytes > 0 && ticket->error == 0) {
+ int ret;
+
ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
+ spin_unlock(&ticket->lock);
if (ret) {
/*
* Delete us from the list. After we unlock the space
@@ -1507,24 +1621,23 @@ static void wait_reserve_ticket(struct btrfs_space_info *space_info,
* despite getting an error, resulting in a space leak
* (bytes_may_use counter of our space_info).
*/
- remove_ticket(space_info, ticket);
- ticket->error = -EINTR;
- break;
+ spin_lock(&space_info->lock);
+ remove_ticket(space_info, ticket, -EINTR);
+ spin_unlock(&space_info->lock);
+ return;
}
- spin_unlock(&space_info->lock);
schedule();
finish_wait(&ticket->wait, &wait);
- spin_lock(&space_info->lock);
+ spin_lock(&ticket->lock);
}
- spin_unlock(&space_info->lock);
+ spin_unlock(&ticket->lock);
}
/*
* Do the appropriate flushing and waiting for a ticket.
*
- * @fs_info: the filesystem
* @space_info: space info for the reservation
* @ticket: ticket for the reservation
* @start_ns: timestamp when the reservation started
@@ -1534,8 +1647,7 @@ static void wait_reserve_ticket(struct btrfs_space_info *space_info,
* This does the work of figuring out how to flush for the ticket, waiting for
* the reservation, and returning the appropriate error if there is one.
*/
-static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static int handle_reserve_ticket(struct btrfs_space_info *space_info,
struct reserve_ticket *ticket,
u64 start_ns, u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
@@ -1549,20 +1661,20 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
wait_reserve_ticket(space_info, ticket);
break;
case BTRFS_RESERVE_FLUSH_LIMIT:
- priority_reclaim_metadata_space(fs_info, space_info, ticket,
+ priority_reclaim_metadata_space(space_info, ticket,
priority_flush_states,
ARRAY_SIZE(priority_flush_states));
break;
case BTRFS_RESERVE_FLUSH_EVICT:
- priority_reclaim_metadata_space(fs_info, space_info, ticket,
+ priority_reclaim_metadata_space(space_info, ticket,
evict_flush_states,
ARRAY_SIZE(evict_flush_states));
break;
case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
- priority_reclaim_data_space(fs_info, space_info, ticket);
+ priority_reclaim_data_space(space_info, ticket);
break;
default:
- ASSERT(0);
+ ASSERT(0, "flush=%d", flush);
break;
}
@@ -1574,9 +1686,10 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
* releasing reserved space (if an error happens the expectation is that
* space wasn't reserved at all).
*/
- ASSERT(!(ticket->bytes == 0 && ticket->error));
- trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
- start_ns, flush, ticket->error);
+ ASSERT(!(ticket->bytes == 0 && ticket->error),
+ "ticket->bytes=%llu ticket->error=%d", ticket->bytes, ticket->error);
+ trace_btrfs_reserve_ticket(space_info->fs_info, space_info->flags,
+ orig_bytes, start_ns, flush, ticket->error);
return ret;
}
@@ -1590,9 +1703,9 @@ static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
(flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
}
-static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+static inline void maybe_clamp_preempt(struct btrfs_space_info *space_info)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
@@ -1627,7 +1740,6 @@ static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
/*
* Try to reserve bytes from the block_rsv's space.
*
- * @fs_info: the filesystem
* @space_info: space info we want to allocate from
* @orig_bytes: number of bytes we want
* @flush: whether or not we can flush to make our reservation
@@ -1639,10 +1751,10 @@ static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
-static int __reserve_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, u64 orig_bytes,
- enum btrfs_reserve_flush_enum flush)
+static int reserve_bytes(struct btrfs_space_info *space_info, u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct work_struct *async_work;
struct reserve_ticket ticket;
u64 start_ns = 0;
@@ -1650,7 +1762,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
int ret = -ENOSPC;
bool pending_tickets;
- ASSERT(orig_bytes);
+ ASSERT(orig_bytes, "orig_bytes=%llu", orig_bytes);
/*
* If have a transaction handle (current->journal_info != NULL), then
* the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
@@ -1659,9 +1771,9 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
*/
if (current->journal_info) {
/* One assert per line for easier debugging. */
- ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL);
- ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL);
- ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT);
+ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL, "flush=%d", flush);
+ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL, "flush=%d", flush);
+ ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT, "flush=%d", flush);
}
if (flush == BTRFS_RESERVE_FLUSH_DATA)
@@ -1689,9 +1801,8 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
*/
if (!pending_tickets &&
((used + orig_bytes <= space_info->total_bytes) ||
- btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
- btrfs_space_info_update_bytes_may_use(fs_info, space_info,
- orig_bytes);
+ can_overcommit(space_info, used, orig_bytes, flush))) {
+ btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
ret = 0;
}
@@ -1701,10 +1812,9 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* left to allocate for the block.
*/
if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
- used = btrfs_space_info_used(space_info, false);
+ used -= space_info->bytes_may_use;
if (used + orig_bytes <= space_info->total_bytes) {
- btrfs_space_info_update_bytes_may_use(fs_info, space_info,
- orig_bytes);
+ btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
ret = 0;
}
}
@@ -1721,6 +1831,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
ticket.error = 0;
space_info->reclaim_size += ticket.bytes;
init_waitqueue_head(&ticket.wait);
+ spin_lock_init(&ticket.lock);
ticket.steal = can_steal(flush);
if (trace_btrfs_reserve_ticket_enabled())
start_ns = ktime_get_ns();
@@ -1737,14 +1848,14 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* preemptive flushing in order to keep up with
* the workload.
*/
- maybe_clamp_preempt(fs_info, space_info);
+ maybe_clamp_preempt(space_info);
- space_info->flush = 1;
+ space_info->flush = true;
trace_btrfs_trigger_flush(fs_info,
space_info->flags,
orig_bytes, flush,
"enospc");
- queue_work(system_unbound_wq, async_work);
+ queue_work(system_dfl_wq, async_work);
}
} else {
list_add_tail(&ticket.list,
@@ -1758,10 +1869,10 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
*/
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
!work_busy(&fs_info->preempt_reclaim_work) &&
- need_preemptive_reclaim(fs_info, space_info)) {
+ need_preemptive_reclaim(space_info)) {
trace_btrfs_trigger_flush(fs_info, space_info->flags,
orig_bytes, flush, "preempt");
- queue_work(system_unbound_wq,
+ queue_work(system_dfl_wq,
&fs_info->preempt_reclaim_work);
}
}
@@ -1769,14 +1880,12 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
if (!ret || !can_ticket(flush))
return ret;
- return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
- orig_bytes, flush);
+ return handle_reserve_ticket(space_info, &ticket, start_ns, orig_bytes, flush);
}
/*
* Try to reserve metadata bytes from the block_rsv's space.
*
- * @fs_info: the filesystem
* @space_info: the space_info we're allocating for
* @orig_bytes: number of bytes we want
* @flush: whether or not we can flush to make our reservation
@@ -1788,20 +1897,21 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
-int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+int btrfs_reserve_metadata_bytes(struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
{
int ret;
- ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush);
+ ret = reserve_bytes(space_info, orig_bytes, flush);
if (ret == -ENOSPC) {
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+
trace_btrfs_space_reservation(fs_info, "space_info:enospc",
space_info->flags, orig_bytes, 1);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
- btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0);
+ btrfs_dump_space_info(space_info, orig_bytes, false);
}
return ret;
}
@@ -1809,30 +1919,32 @@ int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
/*
* Try to reserve data bytes for an allocation.
*
- * @fs_info: the filesystem
+ * @space_info: the space_info we're allocating for
* @bytes: number of bytes we need
* @flush: how we are allowed to flush
*
* This will reserve bytes from the data space info. If there is not enough
* space then we will attempt to flush space as specified by flush.
*/
-int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
+int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
int ret;
ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
- flush == BTRFS_RESERVE_NO_FLUSH);
- ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
+ flush == BTRFS_RESERVE_NO_FLUSH, "flush=%d", flush);
+ ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA,
+ "current->journal_info=0x%lx flush=%d",
+ (unsigned long)current->journal_info, flush);
- ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
+ ret = reserve_bytes(space_info, bytes, flush);
if (ret == -ENOSPC) {
trace_btrfs_space_reservation(fs_info, "space_info:enospc",
- data_sinfo->flags, bytes, 1);
+ space_info->flags, bytes, 1);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
- btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
+ btrfs_dump_space_info(space_info, bytes, false);
}
return ret;
}
@@ -1845,7 +1957,7 @@ __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
btrfs_info(fs_info, "dumping space info:");
list_for_each_entry(space_info, &fs_info->space_info, list) {
spin_lock(&space_info->lock);
- __btrfs_dump_space_info(fs_info, space_info);
+ __btrfs_dump_space_info(space_info);
spin_unlock(&space_info->lock);
}
dump_global_block_rsv(fs_info);
@@ -1862,7 +1974,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
int factor;
/* It's df, we don't care if it's racy */
- if (list_empty(&sinfo->ro_bgs))
+ if (data_race(list_empty(&sinfo->ro_bgs)))
return 0;
spin_lock(&sinfo->lock);
@@ -1887,13 +1999,13 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
static u64 calc_pct_ratio(u64 x, u64 y)
{
- int err;
+ int ret;
if (!y)
return 0;
again:
- err = check_mul_overflow(100, x, &x);
- if (err)
+ ret = check_mul_overflow(100, x, &x);
+ if (ret)
goto lose_precision;
return div64_u64(x, y);
lose_precision:
@@ -2053,7 +2165,7 @@ void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool
}
}
-bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
+static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
{
bool ret;
@@ -2082,3 +2194,32 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
do_reclaim_sweep(space_info, raid);
}
}
+
+void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+
+ lockdep_assert_held(&space_info->lock);
+
+ /* Prioritize the global reservation to receive the freed space. */
+ if (global_rsv->space_info != space_info)
+ goto grant;
+
+ spin_lock(&global_rsv->lock);
+ if (!global_rsv->full) {
+ u64 to_add = min(len, global_rsv->size - global_rsv->reserved);
+
+ global_rsv->reserved += to_add;
+ btrfs_space_info_update_bytes_may_use(space_info, to_add);
+ if (global_rsv->reserved >= global_rsv->size)
+ global_rsv->full = true;
+ len -= to_add;
+ }
+ spin_unlock(&global_rsv->lock);
+
+grant:
+ /* Add to any tickets we may have. */
+ if (len)
+ btrfs_try_granting_tickets(space_info);
+}
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index efbecc0c5258..446c0614ad4a 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -79,6 +79,10 @@ enum btrfs_reserve_flush_enum {
BTRFS_RESERVE_FLUSH_EMERGENCY,
};
+/*
+ * Please be aware that the order of enum values will be the order of the reclaim
+ * process in btrfs_async_reclaim_metadata_space().
+ */
enum btrfs_flush_state {
FLUSH_DELAYED_ITEMS_NR = 1,
FLUSH_DELAYED_ITEMS = 2,
@@ -91,10 +95,21 @@ enum btrfs_flush_state {
ALLOC_CHUNK_FORCE = 9,
RUN_DELAYED_IPUTS = 10,
COMMIT_TRANS = 11,
+ RESET_ZONES = 12,
+};
+
+enum btrfs_space_info_sub_group {
+ BTRFS_SUB_GROUP_PRIMARY,
+ BTRFS_SUB_GROUP_DATA_RELOC,
+ BTRFS_SUB_GROUP_TREELOG,
};
+#define BTRFS_SPACE_INFO_SUB_GROUP_MAX 1
struct btrfs_space_info {
struct btrfs_fs_info *fs_info;
+ struct btrfs_space_info *parent;
+ struct btrfs_space_info *sub_group[BTRFS_SPACE_INFO_SUB_GROUP_MAX];
+ int subgroup_id;
spinlock_t lock;
u64 total_bytes; /* total bytes in the space,
@@ -127,11 +142,11 @@ struct btrfs_space_info {
flushing. The value is >> clamp, so turns
out to be a 2^clamp divisor. */
- unsigned int full:1; /* indicates that we cannot allocate any more
+ bool full; /* indicates that we cannot allocate any more
chunks for this space */
- unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
+ bool chunk_alloc; /* set if we are allocating a chunk */
- unsigned int flush:1; /* set if we are trying to make space */
+ bool flush; /* set if we are trying to make space */
unsigned int force_alloc; /* set if we need to force a chunk
alloc for this space */
@@ -209,14 +224,6 @@ struct btrfs_space_info {
s64 reclaimable_bytes;
};
-struct reserve_ticket {
- u64 bytes;
- int error;
- bool steal;
- struct list_head list;
- wait_queue_head_t wait;
-};
-
static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info)
{
return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
@@ -229,10 +236,10 @@ static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_i
*/
#define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \
static inline void \
-btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
- struct btrfs_space_info *sinfo, \
+btrfs_space_info_update_##name(struct btrfs_space_info *sinfo, \
s64 bytes) \
{ \
+ struct btrfs_fs_info *fs_info = sinfo->fs_info; \
const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \
lockdep_assert_held(&sinfo->lock); \
trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \
@@ -251,6 +258,17 @@ DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
+static inline u64 btrfs_space_info_used(const struct btrfs_space_info *s_info,
+ bool may_use_included)
+{
+ lockdep_assert_held(&s_info->lock);
+
+ return s_info->bytes_used + s_info->bytes_reserved +
+ s_info->bytes_pinned + s_info->bytes_readonly +
+ s_info->bytes_zone_unusable +
+ (may_use_included ? s_info->bytes_may_use : 0);
+}
+
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
struct btrfs_block_group *block_group);
@@ -258,33 +276,26 @@ void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
u64 chunk_size);
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
u64 flags);
-u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info,
- bool may_use_included);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
-void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *info, u64 bytes,
- int dump_block_groups);
-int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+void btrfs_dump_space_info(struct btrfs_space_info *info, u64 bytes,
+ bool dump_block_groups);
+int btrfs_reserve_metadata_bytes(struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush);
-void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info);
-int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
- const struct btrfs_space_info *space_info, u64 bytes,
- enum btrfs_reserve_flush_enum flush);
+void btrfs_try_granting_tickets(struct btrfs_space_info *space_info);
+bool btrfs_can_overcommit(const struct btrfs_space_info *space_info, u64 bytes,
+ enum btrfs_reserve_flush_enum flush);
static inline void btrfs_space_info_free_bytes_may_use(
- struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes)
{
spin_lock(&space_info->lock);
- btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes);
- btrfs_try_granting_tickets(fs_info, space_info);
+ btrfs_space_info_update_bytes_may_use(space_info, -num_bytes);
+ btrfs_try_granting_tickets(space_info);
spin_unlock(&space_info->lock);
}
-int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
+int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush);
void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info);
void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info);
@@ -292,8 +303,8 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes);
void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
-bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info);
void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info);
+void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len);
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 8c68059ac1b0..f82e71f5d88b 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -2,12 +2,11 @@
#include <linux/slab.h>
#include "messages.h"
-#include "ctree.h"
#include "subpage.h"
#include "btrfs_inode.h"
/*
- * Subpage (sectorsize < PAGE_SIZE) support overview:
+ * Subpage (block size < folio size) support overview:
*
* Limitations:
*
@@ -50,7 +49,7 @@
* Implementation:
*
* - Common
- * Both metadata and data will use a new structure, btrfs_subpage, to
+ * Both metadata and data will use a new structure, btrfs_folio_state, to
* record the status of each sector inside a page. This provides the extra
* granularity needed.
*
@@ -64,34 +63,14 @@
* This means a slightly higher tree locking latency.
*/
-#if PAGE_SIZE > SZ_4K
-bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
+int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, enum btrfs_folio_type type)
{
- if (fs_info->sectorsize >= PAGE_SIZE)
- return false;
+ struct btrfs_folio_state *bfs;
- /*
- * Only data pages (either through DIO or compression) can have no
- * mapping. And if page->mapping->host is data inode, it's subpage.
- * As we have ruled our sectorsize >= PAGE_SIZE case already.
- */
- if (!mapping || !mapping->host || is_data_inode(BTRFS_I(mapping->host)))
- return true;
-
- /*
- * Now the only remaining case is metadata, which we only go subpage
- * routine if nodesize < PAGE_SIZE.
- */
- if (fs_info->nodesize < PAGE_SIZE)
- return true;
- return false;
-}
-#endif
-
-int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
- struct folio *folio, enum btrfs_subpage_type type)
-{
- struct btrfs_subpage *subpage;
+ /* For metadata we don't support large folio yet. */
+ if (type == BTRFS_SUBPAGE_METADATA)
+ ASSERT(!folio_test_large(folio));
/*
* We have cases like a dummy extent buffer page, which is not mapped
@@ -101,40 +80,50 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
ASSERT(folio_test_locked(folio));
/* Either not subpage, or the folio already has private attached. */
- if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
+ if (folio_test_private(folio))
+ return 0;
+ if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
+ return 0;
+ if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
return 0;
- subpage = btrfs_alloc_subpage(fs_info, type);
- if (IS_ERR(subpage))
- return PTR_ERR(subpage);
+ bfs = btrfs_alloc_folio_state(fs_info, folio_size(folio), type);
+ if (IS_ERR(bfs))
+ return PTR_ERR(bfs);
- folio_attach_private(folio, subpage);
+ folio_attach_private(folio, bfs);
return 0;
}
-void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
+void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio,
+ enum btrfs_folio_type type)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
/* Either not subpage, or the folio already has private attached. */
- if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
+ if (!folio_test_private(folio))
+ return;
+ if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
+ return;
+ if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
return;
- subpage = folio_detach_private(folio);
- ASSERT(subpage);
- btrfs_free_subpage(subpage);
+ bfs = folio_detach_private(folio);
+ ASSERT(bfs);
+ btrfs_free_folio_state(bfs);
}
-struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
- enum btrfs_subpage_type type)
+struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info,
+ size_t fsize, enum btrfs_folio_type type)
{
- struct btrfs_subpage *ret;
+ struct btrfs_folio_state *ret;
unsigned int real_size;
- ASSERT(fs_info->sectorsize < PAGE_SIZE);
+ ASSERT(fs_info->sectorsize < fsize);
real_size = struct_size(ret, bitmaps,
- BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page));
+ BITS_TO_LONGS(btrfs_bitmap_nr_max *
+ (fsize >> fs_info->sectorsize_bits)));
ret = kzalloc(real_size, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -147,11 +136,6 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
return ret;
}
-void btrfs_free_subpage(struct btrfs_subpage *subpage)
-{
- kfree(subpage);
-}
-
/*
* Increase the eb_refs of current subpage.
*
@@ -163,59 +147,59 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage)
*/
void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_meta_is_subpage(fs_info))
return;
ASSERT(folio_test_private(folio) && folio->mapping);
lockdep_assert_held(&folio->mapping->i_private_lock);
- subpage = folio_get_private(folio);
- atomic_inc(&subpage->eb_refs);
+ bfs = folio_get_private(folio);
+ atomic_inc(&bfs->eb_refs);
}
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_meta_is_subpage(fs_info))
return;
ASSERT(folio_test_private(folio) && folio->mapping);
lockdep_assert_held(&folio->mapping->i_private_lock);
- subpage = folio_get_private(folio);
- ASSERT(atomic_read(&subpage->eb_refs));
- atomic_dec(&subpage->eb_refs);
+ bfs = folio_get_private(folio);
+ ASSERT(atomic_read(&bfs->eb_refs));
+ atomic_dec(&bfs->eb_refs);
}
static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- /* For subpage support, the folio must be single page. */
- ASSERT(folio_order(folio) == 0);
-
/* Basic checks */
ASSERT(folio_test_private(folio) && folio_get_private(folio));
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
- IS_ALIGNED(len, fs_info->sectorsize));
+ IS_ALIGNED(len, fs_info->sectorsize), "start=%llu len=%u", start, len);
/*
* The range check only works for mapped page, we can still have
* unmapped page like dummy extent buffer pages.
*/
if (folio->mapping)
ASSERT(folio_pos(folio) <= start &&
- start + len <= folio_pos(folio) + PAGE_SIZE);
+ start + len <= folio_next_pos(folio),
+ "start=%llu len=%u folio_pos=%llu folio_size=%zu",
+ start, len, folio_pos(folio), folio_size(folio));
}
#define subpage_calc_start_bit(fs_info, folio, name, start, len) \
({ \
- unsigned int __start_bit; \
+ unsigned int __start_bit; \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
\
btrfs_subpage_assert(fs_info, folio, start, len); \
- __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
- __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
+ __start_bit = offset_in_folio(folio, start) >> fs_info->sectorsize_bits; \
+ __start_bit += __bpf * btrfs_bitmap_nr_##name; \
__start_bit; \
})
@@ -233,14 +217,13 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
if (folio_pos(folio) >= orig_start + orig_len)
*len = 0;
else
- *len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
- orig_start + orig_len) - *start;
+ *len = min_t(u64, folio_next_pos(folio), orig_start + orig_len) - *start;
}
static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
const int nbits = (len >> fs_info->sectorsize_bits);
unsigned long flags;
@@ -250,7 +233,7 @@ static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
btrfs_subpage_assert(fs_info, folio, start, len);
- spin_lock_irqsave(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
/*
* We have call sites passing @lock_page into
* extent_clear_unlock_delalloc() for compression path.
@@ -258,18 +241,20 @@ static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
* This @locked_page is locked by plain lock_page(), thus its
* subpage::locked is 0. Handle them in a special way.
*/
- if (atomic_read(&subpage->nr_locked) == 0) {
- spin_unlock_irqrestore(&subpage->lock, flags);
+ if (atomic_read(&bfs->nr_locked) == 0) {
+ spin_unlock_irqrestore(&bfs->lock, flags);
return true;
}
- for_each_set_bit_from(bit, subpage->bitmaps, start_bit + nbits) {
- clear_bit(bit, subpage->bitmaps);
+ for_each_set_bit_from(bit, bfs->bitmaps, start_bit + nbits) {
+ clear_bit(bit, bfs->bitmaps);
cleared++;
}
- ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
- last = atomic_sub_and_test(cleared, &subpage->nr_locked);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ ASSERT(atomic_read(&bfs->nr_locked) >= cleared,
+ "atomic_read(&bfs->nr_locked)=%d cleared=%d",
+ atomic_read(&bfs->nr_locked), cleared);
+ last = atomic_sub_and_test(cleared, &bfs->nr_locked);
+ spin_unlock_irqrestore(&bfs->lock, flags);
return last;
}
@@ -292,11 +277,11 @@ static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
ASSERT(folio_test_locked(folio));
- if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
+ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio)) {
folio_unlock(folio);
return;
}
@@ -308,7 +293,7 @@ void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
* Since we own the page lock, no one else could touch subpage::locked
* and we are safe to do several atomic operations without spinlock.
*/
- if (atomic_read(&subpage->nr_locked) == 0) {
+ if (atomic_read(&bfs->nr_locked) == 0) {
/* No subpage lock, locked by plain lock_page(). */
folio_unlock(folio);
return;
@@ -322,86 +307,97 @@ void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, unsigned long bitmap)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
- const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
+ const int start_bit = blocks_per_folio * btrfs_bitmap_nr_locked;
unsigned long flags;
bool last = false;
int cleared = 0;
int bit;
- if (!btrfs_is_subpage(fs_info, folio->mapping)) {
+ if (!btrfs_is_subpage(fs_info, folio)) {
folio_unlock(folio);
return;
}
- if (atomic_read(&subpage->nr_locked) == 0) {
+ if (atomic_read(&bfs->nr_locked) == 0) {
/* No subpage lock, locked by plain lock_page(). */
folio_unlock(folio);
return;
}
- spin_lock_irqsave(&subpage->lock, flags);
- for_each_set_bit(bit, &bitmap, fs_info->sectors_per_page) {
- if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
+ spin_lock_irqsave(&bfs->lock, flags);
+ for_each_set_bit(bit, &bitmap, blocks_per_folio) {
+ if (test_and_clear_bit(bit + start_bit, bfs->bitmaps))
cleared++;
}
- ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
- last = atomic_sub_and_test(cleared, &subpage->nr_locked);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ ASSERT(atomic_read(&bfs->nr_locked) >= cleared,
+ "atomic_read(&bfs->nr_locked)=%d cleared=%d",
+ atomic_read(&bfs->nr_locked), cleared);
+ last = atomic_sub_and_test(cleared, &bfs->nr_locked);
+ spin_unlock_irqrestore(&bfs->lock, flags);
if (last)
folio_unlock(folio);
}
-#define subpage_test_bitmap_all_set(fs_info, subpage, name) \
- bitmap_test_range_all_set(subpage->bitmaps, \
- fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
- fs_info->sectors_per_page)
+#define subpage_test_bitmap_all_set(fs_info, folio, name) \
+({ \
+ struct btrfs_folio_state *__bfs = folio_get_private(folio); \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
+ \
+ bitmap_test_range_all_set(__bfs->bitmaps, \
+ __bpf * btrfs_bitmap_nr_##name, __bpf); \
+})
-#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
- bitmap_test_range_all_zero(subpage->bitmaps, \
- fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
- fs_info->sectors_per_page)
+#define subpage_test_bitmap_all_zero(fs_info, folio, name) \
+({ \
+ struct btrfs_folio_state *__bfs = folio_get_private(folio); \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
+ \
+ bitmap_test_range_all_zero(__bfs->bitmaps, \
+ __bpf * btrfs_bitmap_nr_##name, __bpf); \
+})
void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
uptodate, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_set(fs_info, folio, uptodate))
folio_mark_uptodate(folio);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
uptodate, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
folio_clear_uptodate(folio);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
dirty, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ spin_unlock_irqrestore(&bfs->lock, flags);
folio_mark_dirty(folio);
}
@@ -418,17 +414,17 @@ void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
dirty, start, len);
unsigned long flags;
bool last = false;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_zero(fs_info, folio, dirty))
last = true;
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
return last;
}
@@ -445,91 +441,100 @@ void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
writeback, start, len);
unsigned long flags;
+ bool keep_write;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+
+ /*
+ * Don't clear the TOWRITE tag when starting writeback on a still-dirty
+ * folio. Doing so can cause WB_SYNC_ALL writepages() to overlook it,
+ * assume writeback is complete, and exit too early — violating sync
+ * ordering guarantees.
+ */
+ keep_write = folio_test_dirty(folio);
if (!folio_test_writeback(folio))
- folio_start_writeback(folio);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ __folio_start_writeback(folio, keep_write);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
writeback, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_zero(fs_info, folio, writeback)) {
ASSERT(folio_test_writeback(folio));
folio_end_writeback(folio);
}
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
ordered, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
folio_set_ordered(folio);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
ordered, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_zero(fs_info, folio, ordered))
folio_clear_ordered(folio);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
checked, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_set(fs_info, folio, checked))
folio_set_checked(folio);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
checked, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
folio_clear_checked(folio);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
/*
@@ -540,16 +545,16 @@ void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
- struct btrfs_subpage *subpage = folio_get_private(folio); \
+ struct btrfs_folio_state *bfs = folio_get_private(folio); \
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
name, start, len); \
unsigned long flags; \
bool ret; \
\
- spin_lock_irqsave(&subpage->lock, flags); \
- ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
+ spin_lock_irqsave(&bfs->lock, flags); \
+ ret = bitmap_test_range_all_set(bfs->bitmaps, start_bit, \
len >> fs_info->sectorsize_bits); \
- spin_unlock_irqrestore(&subpage->lock, flags); \
+ spin_unlock_irqrestore(&bfs->lock, flags); \
return ret; \
}
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
@@ -569,7 +574,7 @@ void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) { \
+ !btrfs_is_subpage(fs_info, folio)) { \
folio_set_func(folio); \
return; \
} \
@@ -579,7 +584,7 @@ void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) { \
+ !btrfs_is_subpage(fs_info, folio)) { \
folio_clear_func(folio); \
return; \
} \
@@ -589,7 +594,7 @@ bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) \
+ !btrfs_is_subpage(fs_info, folio)) \
return folio_test_func(folio); \
return btrfs_subpage_test_##name(fs_info, folio, start, len); \
} \
@@ -597,7 +602,7 @@ void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) { \
+ !btrfs_is_subpage(fs_info, folio)) { \
folio_set_func(folio); \
return; \
} \
@@ -608,7 +613,7 @@ void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) { \
+ !btrfs_is_subpage(fs_info, folio)) { \
folio_clear_func(folio); \
return; \
} \
@@ -619,10 +624,32 @@ bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) \
+ !btrfs_is_subpage(fs_info, folio)) \
return folio_test_func(folio); \
btrfs_subpage_clamp_range(folio, &start, &len); \
return btrfs_subpage_test_##name(fs_info, folio, start, len); \
+} \
+void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb) \
+{ \
+ if (!btrfs_meta_is_subpage(eb->fs_info)) { \
+ folio_set_func(folio); \
+ return; \
+ } \
+ btrfs_subpage_set_##name(eb->fs_info, folio, eb->start, eb->len); \
+} \
+void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb) \
+{ \
+ if (!btrfs_meta_is_subpage(eb->fs_info)) { \
+ folio_clear_func(folio); \
+ return; \
+ } \
+ btrfs_subpage_clear_##name(eb->fs_info, folio, eb->start, eb->len); \
+} \
+bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb) \
+{ \
+ if (!btrfs_meta_is_subpage(eb->fs_info)) \
+ return folio_test_func(folio); \
+ return btrfs_subpage_test_##name(eb->fs_info, folio, eb->start, eb->len); \
}
IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
folio_test_uptodate);
@@ -635,6 +662,27 @@ IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
folio_test_checked);
+#define GET_SUBPAGE_BITMAP(fs_info, folio, name, dst) \
+{ \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
+ const struct btrfs_folio_state *__bfs = folio_get_private(folio); \
+ \
+ ASSERT(__bpf <= BITS_PER_LONG); \
+ *dst = bitmap_read(__bfs->bitmaps, \
+ __bpf * btrfs_bitmap_nr_##name, __bpf); \
+}
+
+#define SUBPAGE_DUMP_BITMAP(fs_info, folio, name, start, len) \
+{ \
+ unsigned long bitmap; \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
+ \
+ GET_SUBPAGE_BITMAP(fs_info, folio, name, &bitmap); \
+ btrfs_warn(fs_info, \
+ "dumping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \
+ start, len, folio_pos(folio), __bpf, &bitmap); \
+}
+
/*
* Make sure not only the page dirty bit is cleared, but also subpage dirty bit
* is cleared.
@@ -642,7 +690,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
unsigned int start_bit;
unsigned int nbits;
unsigned long flags;
@@ -650,18 +698,22 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
return;
- if (!btrfs_is_subpage(fs_info, folio->mapping)) {
+ if (!btrfs_is_subpage(fs_info, folio)) {
ASSERT(!folio_test_dirty(folio));
return;
}
start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
nbits = len >> fs_info->sectorsize_bits;
- subpage = folio_get_private(folio);
- ASSERT(subpage);
- spin_lock_irqsave(&subpage->lock, flags);
- ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
- spin_unlock_irqrestore(&subpage->lock, flags);
+ bfs = folio_get_private(folio);
+ ASSERT(bfs);
+ spin_lock_irqsave(&bfs->lock, flags);
+ if (unlikely(!bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits))) {
+ SUBPAGE_DUMP_BITMAP(fs_info, folio, dirty, start, len);
+ ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
+ }
+ ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
/*
@@ -674,86 +726,103 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
unsigned long flags;
unsigned int start_bit;
unsigned int nbits;
int ret;
ASSERT(folio_test_locked(folio));
- if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping))
+ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio))
return;
- subpage = folio_get_private(folio);
+ bfs = folio_get_private(folio);
start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
nbits = len >> fs_info->sectorsize_bits;
- spin_lock_irqsave(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
/* Target range should not yet be locked. */
- ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
- bitmap_set(subpage->bitmaps, start_bit, nbits);
- ret = atomic_add_return(nbits, &subpage->nr_locked);
- ASSERT(ret <= fs_info->sectors_per_page);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ if (unlikely(!bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits))) {
+ SUBPAGE_DUMP_BITMAP(fs_info, folio, locked, start, len);
+ ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
+ }
+ bitmap_set(bfs->bitmaps, start_bit, nbits);
+ ret = atomic_add_return(nbits, &bfs->nr_locked);
+ ASSERT(ret <= btrfs_blocks_per_folio(fs_info, folio));
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
-#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
-{ \
- const int sectors_per_page = fs_info->sectors_per_page; \
- \
- ASSERT(sectors_per_page < BITS_PER_LONG); \
- *dst = bitmap_read(subpage->bitmaps, \
- sectors_per_page * btrfs_bitmap_nr_##name, \
- sectors_per_page); \
+/*
+ * Clear the dirty flag for the folio.
+ *
+ * If the affected folio is no longer dirty, return true. Otherwise return false.
+ */
+bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb)
+{
+ bool last;
+
+ if (!btrfs_meta_is_subpage(eb->fs_info)) {
+ folio_clear_dirty_for_io(folio);
+ return true;
+ }
+
+ last = btrfs_subpage_clear_and_test_dirty(eb->fs_info, folio, eb->start, eb->len);
+ if (last) {
+ folio_clear_dirty_for_io(folio);
+ return true;
+ }
+ return false;
}
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage;
- const u32 sectors_per_page = fs_info->sectors_per_page;
+ struct btrfs_folio_state *bfs;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
unsigned long uptodate_bitmap;
unsigned long dirty_bitmap;
unsigned long writeback_bitmap;
unsigned long ordered_bitmap;
unsigned long checked_bitmap;
+ unsigned long locked_bitmap;
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
- ASSERT(sectors_per_page > 1);
- subpage = folio_get_private(folio);
-
- spin_lock_irqsave(&subpage->lock, flags);
- GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &checked_bitmap);
- spin_unlock_irqrestore(&subpage->lock, flags);
-
- dump_page(folio_page(folio, 0), "btrfs subpage dump");
+ ASSERT(blocks_per_folio > 1);
+ bfs = folio_get_private(folio);
+
+ spin_lock_irqsave(&bfs->lock, flags);
+ GET_SUBPAGE_BITMAP(fs_info, folio, uptodate, &uptodate_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, dirty, &dirty_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, writeback, &writeback_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, ordered, &ordered_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, checked, &checked_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, locked, &locked_bitmap);
+ spin_unlock_irqrestore(&bfs->lock, flags);
+
+ dump_page(folio_page(folio, 0), "btrfs folio state dump");
btrfs_warn(fs_info,
-"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
+"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
start, len, folio_pos(folio),
- sectors_per_page, &uptodate_bitmap,
- sectors_per_page, &dirty_bitmap,
- sectors_per_page, &writeback_bitmap,
- sectors_per_page, &ordered_bitmap,
- sectors_per_page, &checked_bitmap);
+ blocks_per_folio, &uptodate_bitmap,
+ blocks_per_folio, &dirty_bitmap,
+ blocks_per_folio, &locked_bitmap,
+ blocks_per_folio, &writeback_bitmap,
+ blocks_per_folio, &ordered_bitmap,
+ blocks_per_folio, &checked_bitmap);
}
void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
struct folio *folio,
unsigned long *ret_bitmap)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
- ASSERT(fs_info->sectors_per_page > 1);
- subpage = folio_get_private(folio);
+ ASSERT(btrfs_blocks_per_folio(fs_info, folio) > 1);
+ bfs = folio_get_private(folio);
- spin_lock_irqsave(&subpage->lock, flags);
- GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
+ GET_SUBPAGE_BITMAP(fs_info, folio, dirty, ret_bitmap);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
index 428fa9389fd4..d81a0ade559f 100644
--- a/fs/btrfs/subpage.h
+++ b/fs/btrfs/subpage.h
@@ -6,13 +6,13 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/sizes.h>
+#include "btrfs_inode.h"
struct address_space;
struct folio;
-struct btrfs_fs_info;
/*
- * Extra info for subpapge bitmap.
+ * Extra info for subpage bitmap.
*
* For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
* one larger bitmap.
@@ -31,9 +31,31 @@ struct btrfs_fs_info;
enum {
btrfs_bitmap_nr_uptodate = 0,
btrfs_bitmap_nr_dirty,
+
+ /*
+ * This can be changed to atomic eventually. But this change will rely
+ * on the async delalloc range rework for locked bitmap. As async
+ * delalloc can unlock its range and mark blocks writeback at random
+ * timing.
+ */
btrfs_bitmap_nr_writeback,
+
+ /*
+ * The ordered and checked flags are for COW fixup, already marked
+ * deprecated, and will be removed eventually.
+ */
btrfs_bitmap_nr_ordered,
btrfs_bitmap_nr_checked,
+
+ /*
+ * The locked bit is for async delalloc range (compression), currently
+ * async extent is queued with the range locked, until the compression
+ * is done.
+ * So an async extent can unlock the range at any random timing.
+ *
+ * This will need a rework on the async extent lifespan (mark writeback
+ * and do compression) before deprecating this flag.
+ */
btrfs_bitmap_nr_locked,
btrfs_bitmap_nr_max
};
@@ -42,7 +64,7 @@ enum {
* Structure to trace status of each sector inside a page, attached to
* page::private for both data and metadata inodes.
*/
-struct btrfs_subpage {
+struct btrfs_folio_state {
/* Common members for both data and metadata pages */
spinlock_t lock;
union {
@@ -50,7 +72,7 @@ struct btrfs_subpage {
* Structures only used by metadata
*
* @eb_refs should only be operated under private_lock, as it
- * manages whether the subpage can be detached.
+ * manages whether the btrfs_folio_state can be detached.
*/
atomic_t eb_refs;
@@ -64,29 +86,44 @@ struct btrfs_subpage {
unsigned long bitmaps[];
};
-enum btrfs_subpage_type {
+enum btrfs_folio_type {
BTRFS_SUBPAGE_METADATA,
BTRFS_SUBPAGE_DATA,
};
-#if PAGE_SIZE > SZ_4K
-bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping);
-#else
+/*
+ * Subpage support for metadata is more complex, as we can have dummy extent
+ * buffers, where folios have no mapping to determine the owning inode.
+ *
+ * Thankfully we only need to check if node size is smaller than page size.
+ * Even with larger folio support, we will only allocate a folio as large as
+ * node size.
+ * Thus if nodesize < PAGE_SIZE, we know metadata needs need to subpage routine.
+ */
+static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
+{
+ return fs_info->nodesize < PAGE_SIZE;
+}
static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
- struct address_space *mapping)
+ struct folio *folio)
{
- return false;
+ if (folio->mapping && folio->mapping->host)
+ ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
+ return fs_info->sectorsize < folio_size(folio);
}
-#endif
-int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
- struct folio *folio, enum btrfs_subpage_type type);
-void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio);
+int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, enum btrfs_folio_type type);
+void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio,
+ enum btrfs_folio_type type);
/* Allocate additional data where page represents more than one sector */
-struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
- enum btrfs_subpage_type type);
-void btrfs_free_subpage(struct btrfs_subpage *subpage);
+struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info,
+ size_t fsize, enum btrfs_folio_type type);
+static inline void btrfs_free_folio_state(struct btrfs_folio_state *bfs)
+{
+ kfree(bfs);
+}
void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
@@ -110,6 +147,13 @@ void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
* btrfs_folio_clamp_*() are similar to btrfs_folio_*(), except the range doesn't
* need to be inside the page. Those functions will truncate the range
* automatically.
+ *
+ * Both btrfs_folio_*() and btrfs_folio_clamp_*() are for data folios.
+ *
+ * For metadata, one should use btrfs_meta_folio_*() helpers instead, and there
+ * is no clamp version for metadata helpers, as we either go subpage
+ * (nodesize < PAGE_SIZE) or go regular folio helpers (nodesize >= PAGE_SIZE,
+ * and our folio is never larger than nodesize).
*/
#define DECLARE_BTRFS_SUBPAGE_OPS(name) \
void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
@@ -129,7 +173,10 @@ void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len); \
bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
- struct folio *folio, u64 start, u32 len);
+ struct folio *folio, u64 start, u32 len); \
+void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb); \
+void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb); \
+bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb);
DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
DECLARE_BTRFS_SUBPAGE_OPS(dirty);
@@ -137,11 +184,25 @@ DECLARE_BTRFS_SUBPAGE_OPS(writeback);
DECLARE_BTRFS_SUBPAGE_OPS(ordered);
DECLARE_BTRFS_SUBPAGE_OPS(checked);
+/*
+ * Helper for error cleanup, where a folio will have its dirty flag cleared,
+ * with writeback started and finished.
+ */
+static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
+ struct folio *locked_folio,
+ u64 start, u32 len)
+{
+ btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
+ btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
+ btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
+}
+
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
+bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb);
void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
struct folio *folio,
unsigned long *ret_bitmap);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 97a85d180b61..1999533b52be 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -84,10 +84,13 @@ struct btrfs_fs_context {
u32 thread_pool_size;
unsigned long long mount_opt;
unsigned long compress_type:4;
- unsigned int compress_level;
+ int compress_level;
refcount_t refs;
};
+static void btrfs_emit_options(struct btrfs_fs_info *info,
+ struct btrfs_fs_context *old);
+
enum {
Opt_acl,
Opt_clear_cache,
@@ -125,15 +128,13 @@ enum {
/* Rescue options */
Opt_rescue,
Opt_usebackuproot,
- Opt_nologreplay,
/* Debugging options */
Opt_enospc_debug,
#ifdef CONFIG_BTRFS_DEBUG
Opt_fragment, Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
-#endif
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
Opt_ref_verify,
+ Opt_ref_tracker,
#endif
Opt_err,
};
@@ -246,8 +247,6 @@ static const struct fs_parameter_spec btrfs_fs_parameters[] = {
/* Rescue options. */
fsparam_enum("rescue", Opt_rescue, btrfs_parameter_rescue),
- /* Deprecated, with alias rescue=nologreplay */
- __fsparam(NULL, "nologreplay", Opt_nologreplay, fs_param_deprecated, NULL),
/* Deprecated, with alias rescue=usebackuproot */
__fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL),
/* For compatibility only, alias for "rescue=nologreplay". */
@@ -257,17 +256,85 @@ static const struct fs_parameter_spec btrfs_fs_parameters[] = {
fsparam_flag_no("enospc_debug", Opt_enospc_debug),
#ifdef CONFIG_BTRFS_DEBUG
fsparam_enum("fragment", Opt_fragment, btrfs_parameter_fragment),
-#endif
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+ fsparam_flag("ref_tracker", Opt_ref_tracker),
fsparam_flag("ref_verify", Opt_ref_verify),
#endif
{}
};
-/* No support for restricting writes to btrfs devices yet... */
-static inline blk_mode_t btrfs_open_mode(struct fs_context *fc)
+static bool btrfs_match_compress_type(const char *string, const char *type, bool may_have_level)
{
- return sb_open_mode(fc->sb_flags) & ~BLK_OPEN_RESTRICT_WRITES;
+ const int len = strlen(type);
+
+ return (strncmp(string, type, len) == 0) &&
+ ((may_have_level && string[len] == ':') || string[len] == '\0');
+}
+
+static int btrfs_parse_compress(struct btrfs_fs_context *ctx,
+ const struct fs_parameter *param, int opt)
+{
+ const char *string = param->string;
+ int ret;
+
+ /*
+ * Provide the same semantics as older kernels that don't use fs
+ * context, specifying the "compress" option clears "force-compress"
+ * without the need to pass "compress-force=[no|none]" before
+ * specifying "compress".
+ */
+ if (opt != Opt_compress_force && opt != Opt_compress_force_type)
+ btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
+
+ if (opt == Opt_compress || opt == Opt_compress_force) {
+ ctx->compress_type = BTRFS_COMPRESS_ZLIB;
+ ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
+ btrfs_set_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ } else if (btrfs_match_compress_type(string, "zlib", true)) {
+ ctx->compress_type = BTRFS_COMPRESS_ZLIB;
+ ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, string + 4,
+ &ctx->compress_level);
+ if (ret < 0)
+ goto error;
+ btrfs_set_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ } else if (btrfs_match_compress_type(string, "lzo", true)) {
+ ctx->compress_type = BTRFS_COMPRESS_LZO;
+ ret = btrfs_compress_str2level(BTRFS_COMPRESS_LZO, string + 3,
+ &ctx->compress_level);
+ if (ret < 0)
+ goto error;
+ if (string[3] == ':' && string[4])
+ btrfs_warn(NULL, "Compression level ignored for LZO");
+ btrfs_set_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ } else if (btrfs_match_compress_type(string, "zstd", true)) {
+ ctx->compress_type = BTRFS_COMPRESS_ZSTD;
+ ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, string + 4,
+ &ctx->compress_level);
+ if (ret < 0)
+ goto error;
+ btrfs_set_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ } else if (btrfs_match_compress_type(string, "no", false) ||
+ btrfs_match_compress_type(string, "none", false)) {
+ ctx->compress_level = 0;
+ ctx->compress_type = 0;
+ btrfs_clear_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+ return 0;
+error:
+ btrfs_err(NULL, "failed to parse compression option '%s'", string);
+ return ret;
+
}
static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
@@ -306,10 +373,9 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_device: {
struct btrfs_device *device;
- blk_mode_t mode = btrfs_open_mode(fc);
mutex_lock(&uuid_mutex);
- device = btrfs_scan_one_device(param->string, mode, false);
+ device = btrfs_scan_one_device(param->string, false);
mutex_unlock(&uuid_mutex);
if (IS_ERR(device))
return PTR_ERR(device);
@@ -339,53 +405,8 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
fallthrough;
case Opt_compress:
case Opt_compress_type:
- /*
- * Provide the same semantics as older kernels that don't use fs
- * context, specifying the "compress" option clears
- * "force-compress" without the need to pass
- * "compress-force=[no|none]" before specifying "compress".
- */
- if (opt != Opt_compress_force && opt != Opt_compress_force_type)
- btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
-
- if (opt == Opt_compress || opt == Opt_compress_force) {
- ctx->compress_type = BTRFS_COMPRESS_ZLIB;
- ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
- btrfs_set_opt(ctx->mount_opt, COMPRESS);
- btrfs_clear_opt(ctx->mount_opt, NODATACOW);
- btrfs_clear_opt(ctx->mount_opt, NODATASUM);
- } else if (strncmp(param->string, "zlib", 4) == 0) {
- ctx->compress_type = BTRFS_COMPRESS_ZLIB;
- ctx->compress_level =
- btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB,
- param->string + 4);
- btrfs_set_opt(ctx->mount_opt, COMPRESS);
- btrfs_clear_opt(ctx->mount_opt, NODATACOW);
- btrfs_clear_opt(ctx->mount_opt, NODATASUM);
- } else if (strncmp(param->string, "lzo", 3) == 0) {
- ctx->compress_type = BTRFS_COMPRESS_LZO;
- ctx->compress_level = 0;
- btrfs_set_opt(ctx->mount_opt, COMPRESS);
- btrfs_clear_opt(ctx->mount_opt, NODATACOW);
- btrfs_clear_opt(ctx->mount_opt, NODATASUM);
- } else if (strncmp(param->string, "zstd", 4) == 0) {
- ctx->compress_type = BTRFS_COMPRESS_ZSTD;
- ctx->compress_level =
- btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD,
- param->string + 4);
- btrfs_set_opt(ctx->mount_opt, COMPRESS);
- btrfs_clear_opt(ctx->mount_opt, NODATACOW);
- btrfs_clear_opt(ctx->mount_opt, NODATASUM);
- } else if (strncmp(param->string, "no", 2) == 0) {
- ctx->compress_level = 0;
- ctx->compress_type = 0;
- btrfs_clear_opt(ctx->mount_opt, COMPRESS);
- btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
- } else {
- btrfs_err(NULL, "unrecognized compression value %s",
- param->string);
+ if (btrfs_parse_compress(ctx, param, opt))
return -EINVAL;
- }
break;
case Opt_ssd:
if (result.negated) {
@@ -449,11 +470,6 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
else
btrfs_clear_opt(ctx->mount_opt, NOTREELOG);
break;
- case Opt_nologreplay:
- btrfs_warn(NULL,
- "'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
- btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
- break;
case Opt_norecovery:
btrfs_info(NULL,
"'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'");
@@ -569,6 +585,10 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_commit_interval:
ctx->commit_interval = result.uint_32;
+ if (ctx->commit_interval > BTRFS_WARNING_COMMIT_INTERVAL) {
+ btrfs_warn(NULL, "excessive commit interval %u, use with care",
+ ctx->commit_interval);
+ }
if (ctx->commit_interval == 0)
ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
break;
@@ -624,11 +644,12 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
return -EINVAL;
}
break;
-#endif
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
case Opt_ref_verify:
btrfs_set_opt(ctx->mount_opt, REF_VERIFY);
break;
+ case Opt_ref_tracker:
+ btrfs_set_opt(ctx->mount_opt, REF_TRACKER);
+ break;
#endif
default:
btrfs_err(NULL, "unrecognized mount option '%s'", param->key);
@@ -693,12 +714,9 @@ bool btrfs_check_options(const struct btrfs_fs_info *info,
if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) {
if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
- btrfs_info(info, "disk space caching is enabled");
btrfs_warn(info,
"space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2");
}
- if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE))
- btrfs_info(info, "using free-space-tree");
}
return ret;
@@ -789,17 +807,15 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
struct btrfs_root_ref *root_ref;
struct btrfs_inode_ref *inode_ref;
struct btrfs_key key;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
char *name = NULL, *ptr;
u64 dirid;
int len;
int ret;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!path)
+ return ERR_PTR(-ENOMEM);
name = kmalloc(PATH_MAX, GFP_KERNEL);
if (!name) {
@@ -887,7 +903,6 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
fs_root = NULL;
}
- btrfs_free_path(path);
if (ptr == name + PATH_MAX - 1) {
name[0] = '/';
name[1] = '\0';
@@ -898,7 +913,6 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
err:
btrfs_put_root(fs_root);
- btrfs_free_path(path);
kfree(name);
return ERR_PTR(ret);
}
@@ -907,7 +921,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
{
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_dir_item *di;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key location;
struct fscrypt_str name = FSTR_INIT("default", 7);
u64 dir_id;
@@ -924,7 +938,6 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
dir_id = btrfs_super_root_dir(fs_info->super_copy);
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0);
if (IS_ERR(di)) {
- btrfs_free_path(path);
return PTR_ERR(di);
}
if (!di) {
@@ -933,13 +946,11 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
* it's always been there, but don't freak out, just try and
* mount the top-level subvolume.
*/
- btrfs_free_path(path);
*objectid = BTRFS_FS_TREE_OBJECTID;
return 0;
}
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
- btrfs_free_path(path);
*objectid = location.objectid;
return 0;
}
@@ -947,44 +958,46 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
static int btrfs_fill_super(struct super_block *sb,
struct btrfs_fs_devices *fs_devices)
{
- struct inode *inode;
+ struct btrfs_inode *inode;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- int err;
+ int ret;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_magic = BTRFS_SUPER_MAGIC;
sb->s_op = &btrfs_super_ops;
- sb->s_d_op = &btrfs_dentry_operations;
+ set_default_d_op(sb, &btrfs_dentry_operations);
sb->s_export_op = &btrfs_export_ops;
#ifdef CONFIG_FS_VERITY
sb->s_vop = &btrfs_verityops;
#endif
sb->s_xattr = btrfs_xattr_handlers;
sb->s_time_gran = 1;
- sb->s_iflags |= SB_I_CGROUPWB;
+ sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM;
- err = super_setup_bdi(sb);
- if (err) {
+ ret = super_setup_bdi(sb);
+ if (ret) {
btrfs_err(fs_info, "super_setup_bdi failed");
- return err;
+ return ret;
}
- err = open_ctree(sb, fs_devices);
- if (err) {
- btrfs_err(fs_info, "open_ctree failed");
- return err;
+ ret = open_ctree(sb, fs_devices);
+ if (ret) {
+ btrfs_err(fs_info, "open_ctree failed: %d", ret);
+ return ret;
}
+ btrfs_emit_options(fs_info, NULL);
+
inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- btrfs_handle_fs_error(fs_info, err, NULL);
+ ret = PTR_ERR(inode);
+ btrfs_handle_fs_error(fs_info, ret, NULL);
goto fail_close;
}
- sb->s_root = d_make_root(inode);
+ sb->s_root = d_make_root(&inode->vfs_inode);
if (!sb->s_root) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto fail_close;
}
@@ -993,7 +1006,7 @@ static int btrfs_fill_super(struct super_block *sb,
fail_close:
close_ctree(fs_info);
- return err;
+ return ret;
}
int btrfs_sync_fs(struct super_block *sb, int wait)
@@ -1072,7 +1085,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_printf(seq, ",compress-force=%s", compress_type);
else
seq_printf(seq, ",compress=%s", compress_type);
- if (info->compress_level)
+ if (info->compress_level && info->compress_type != BTRFS_COMPRESS_LZO)
seq_printf(seq, ":%d", info->compress_level);
}
if (btrfs_test_opt(info, NOSSD))
@@ -1135,12 +1148,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
#endif
if (btrfs_test_opt(info, REF_VERIFY))
seq_puts(seq, ",ref_verify");
+ if (btrfs_test_opt(info, REF_TRACKER))
+ seq_puts(seq, ",ref_tracker");
seq_printf(seq, ",subvolid=%llu", btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
subvol_name = btrfs_get_subvol_name_from_objectid(info,
btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
if (!IS_ERR(subvol_name)) {
- seq_puts(seq, ",subvol=");
- seq_escape(seq, subvol_name, " \t\n\\");
+ seq_show_option(seq, "subvol", subvol_name);
kfree(subvol_name);
}
return 0;
@@ -1149,11 +1163,11 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
/*
* subvolumes are identified by ino 256
*/
-static inline int is_subvolume_inode(struct inode *inode)
+static inline bool is_subvolume_inode(struct inode *inode)
{
if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
- return 1;
- return 0;
+ return true;
+ return false;
}
static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
@@ -1262,7 +1276,7 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
/*
- * We need to cleanup all defragable inodes if the autodefragment is
+ * We need to cleanup all defraggable inodes if the autodefragment is
* close or the filesystem is read only.
*/
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
@@ -1433,7 +1447,7 @@ static void btrfs_emit_options(struct btrfs_fs_info *info,
{
btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts");
- btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
+ btrfs_info_if_set(info, old, NODATACOW, "setting nodatacow");
btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations");
btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme");
btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers");
@@ -1455,10 +1469,11 @@ static void btrfs_emit_options(struct btrfs_fs_info *info,
btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums");
btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags");
+ btrfs_info_if_unset(info, old, NODATASUM, "setting datasum");
btrfs_info_if_unset(info, old, NODATACOW, "setting datacow");
btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations");
btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme");
- btrfs_info_if_unset(info, old, NOBARRIER, "turning off barriers");
+ btrfs_info_if_unset(info, old, NOBARRIER, "turning on barriers");
btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log");
btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching");
btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree");
@@ -1595,7 +1610,7 @@ static inline void btrfs_descending_sort_devices(
static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
u64 *free_bytes)
{
- struct btrfs_device_info *devices_info;
+ struct btrfs_device_info AUTO_KFREE(devices_info);
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
u64 type;
@@ -1693,7 +1708,6 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
nr_devices--;
}
- kfree(devices_info);
*free_bytes = avail_space;
return 0;
}
@@ -1831,10 +1845,9 @@ static int btrfs_get_tree_super(struct fs_context *fc)
struct btrfs_fs_info *fs_info = fc->s_fs_info;
struct btrfs_fs_context *ctx = fc->fs_private;
struct btrfs_fs_devices *fs_devices = NULL;
- struct block_device *bdev;
struct btrfs_device *device;
struct super_block *sb;
- blk_mode_t mode = btrfs_open_mode(fc);
+ blk_mode_t mode = sb_open_mode(fc->sb_flags);
int ret;
btrfs_ctx_to_info(fs_info, ctx);
@@ -1844,69 +1857,103 @@ static int btrfs_get_tree_super(struct fs_context *fc)
* With 'true' passed to btrfs_scan_one_device() (mount time) we expect
* either a valid device or an error.
*/
- device = btrfs_scan_one_device(fc->source, mode, true);
+ device = btrfs_scan_one_device(fc->source, true);
ASSERT(device != NULL);
if (IS_ERR(device)) {
mutex_unlock(&uuid_mutex);
return PTR_ERR(device);
}
-
fs_devices = device->fs_devices;
+ /*
+ * We cannot hold uuid_mutex calling sget_fc(), it will lead to a
+ * locking order reversal with s_umount.
+ *
+ * So here we increase the holding number of fs_devices, this will ensure
+ * the fs_devices itself won't be freed.
+ */
+ btrfs_fs_devices_inc_holding(fs_devices);
fs_info->fs_devices = fs_devices;
-
- ret = btrfs_open_devices(fs_devices, mode, &btrfs_fs_type);
mutex_unlock(&uuid_mutex);
- if (ret)
- return ret;
-
- if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
- ret = -EACCES;
- goto error;
- }
- bdev = fs_devices->latest_dev->bdev;
- /*
- * From now on the error handling is not straightforward.
- *
- * If successful, this will transfer the fs_info into the super block,
- * and fc->s_fs_info will be NULL. However if there's an existing
- * super, we'll still have fc->s_fs_info populated. If we error
- * completely out it'll be cleaned up when we drop the fs_context,
- * otherwise it's tied to the lifetime of the super_block.
- */
sb = sget_fc(fc, btrfs_fc_test_super, set_anon_super_fc);
if (IS_ERR(sb)) {
- ret = PTR_ERR(sb);
- goto error;
+ mutex_lock(&uuid_mutex);
+ btrfs_fs_devices_dec_holding(fs_devices);
+ /*
+ * Since the fs_devices is not opened, it can be freed at any
+ * time after unlocking uuid_mutex. We need to avoid double
+ * free through put_fs_context()->btrfs_free_fs_info().
+ * So here we reset fs_info->fs_devices to NULL, and let the
+ * regular fs_devices reclaim path to handle it.
+ *
+ * This applies to all later branches where no fs_devices is
+ * opened.
+ */
+ fs_info->fs_devices = NULL;
+ mutex_unlock(&uuid_mutex);
+ return PTR_ERR(sb);
}
- set_device_specific_options(fs_info);
-
if (sb->s_root) {
- btrfs_close_devices(fs_devices);
- if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY)
- ret = -EBUSY;
+ /*
+ * Not the first mount of the fs thus got an existing super block.
+ * Will reuse the returned super block, fs_info and fs_devices.
+ *
+ * fc->s_fs_info is not touched and will be later freed by
+ * put_fs_context() through btrfs_free_fs_context().
+ */
+ ASSERT(fc->s_fs_info == fs_info);
+
+ mutex_lock(&uuid_mutex);
+ btrfs_fs_devices_dec_holding(fs_devices);
+ fs_info->fs_devices = NULL;
+ mutex_unlock(&uuid_mutex);
+ /*
+ * At this stage we may have RO flag mismatch between
+ * fc->sb_flags and sb->s_flags. Caller should detect such
+ * mismatch and reconfigure with sb->s_umount rwsem held if
+ * needed.
+ */
} else {
+ struct block_device *bdev;
+
+ /*
+ * The first mount of the fs thus a new superblock, fc->s_fs_info
+ * must be NULL, and the ownership of our fs_info and fs_devices is
+ * transferred to the super block.
+ */
+ ASSERT(fc->s_fs_info == NULL);
+
+ mutex_lock(&uuid_mutex);
+ btrfs_fs_devices_dec_holding(fs_devices);
+ ret = btrfs_open_devices(fs_devices, mode, sb);
+ if (ret < 0)
+ fs_info->fs_devices = NULL;
+ mutex_unlock(&uuid_mutex);
+ if (ret < 0) {
+ deactivate_locked_super(sb);
+ return ret;
+ }
+ if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
+ deactivate_locked_super(sb);
+ return -EACCES;
+ }
+ set_device_specific_options(fs_info);
+ bdev = fs_devices->latest_dev->bdev;
snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);
- btrfs_sb(sb)->bdev_holder = &btrfs_fs_type;
ret = btrfs_fill_super(sb, fs_devices);
- }
-
- if (ret) {
- deactivate_locked_super(sb);
- return ret;
+ if (ret) {
+ deactivate_locked_super(sb);
+ return ret;
+ }
}
btrfs_clear_oneshot_options(fs_info);
fc->root = dget(sb->s_root);
return 0;
-
-error:
- btrfs_close_devices(fs_devices);
- return ret;
}
/*
@@ -1982,39 +2029,14 @@ error:
* btrfs or not, setting the whole super block RO. To make per-subvolume mounting
* work with different options work we need to keep backward compatibility.
*/
-static struct vfsmount *btrfs_reconfigure_for_mount(struct fs_context *fc)
+static int btrfs_reconfigure_for_mount(struct fs_context *fc)
{
- struct vfsmount *mnt;
- int ret;
- const bool ro2rw = !(fc->sb_flags & SB_RDONLY);
-
- /*
- * We got an EBUSY because our SB_RDONLY flag didn't match the existing
- * super block, so invert our setting here and retry the mount so we
- * can get our vfsmount.
- */
- if (ro2rw)
- fc->sb_flags |= SB_RDONLY;
- else
- fc->sb_flags &= ~SB_RDONLY;
-
- mnt = fc_mount(fc);
- if (IS_ERR(mnt))
- return mnt;
+ int ret = 0;
- if (!ro2rw)
- return mnt;
+ if (!(fc->sb_flags & SB_RDONLY) && (fc->root->d_sb->s_flags & SB_RDONLY))
+ ret = btrfs_reconfigure(fc);
- /* We need to convert to rw, call reconfigure. */
- fc->sb_flags &= ~SB_RDONLY;
- down_write(&mnt->mnt_sb->s_umount);
- ret = btrfs_reconfigure(fc);
- up_write(&mnt->mnt_sb->s_umount);
- if (ret) {
- mntput(mnt);
- return ERR_PTR(ret);
- }
- return mnt;
+ return ret;
}
static int btrfs_get_tree_subvol(struct fs_context *fc)
@@ -2024,6 +2046,7 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
struct fs_context *dup_fc;
struct dentry *dentry;
struct vfsmount *mnt;
+ int ret = 0;
/*
* Setup a dummy root and fs_info for test/set super. This is because
@@ -2040,7 +2063,13 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
if (!fs_info->super_copy || !fs_info->super_for_commit) {
- btrfs_free_fs_info(fs_info);
+ /*
+ * Dont call btrfs_free_fs_info() to free it as it's still
+ * initialized partially.
+ */
+ kfree(fs_info->super_copy);
+ kfree(fs_info->super_for_commit);
+ kvfree(fs_info);
return -ENOMEM;
}
btrfs_init_fs_info(fs_info);
@@ -2057,17 +2086,15 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
*/
dup_fc->s_fs_info = fs_info;
- /*
- * We'll do the security settings in our btrfs_get_tree_super() mount
- * loop, they were duplicated into dup_fc, we can drop the originals
- * here.
- */
- security_free_mnt_opts(&fc->security);
- fc->security = NULL;
+ ret = btrfs_get_tree_super(dup_fc);
+ if (ret)
+ goto error;
- mnt = fc_mount(dup_fc);
- if (PTR_ERR_OR_ZERO(mnt) == -EBUSY)
- mnt = btrfs_reconfigure_for_mount(dup_fc);
+ ret = btrfs_reconfigure_for_mount(dup_fc);
+ up_write(&dup_fc->root->d_sb->s_umount);
+ if (ret)
+ goto error;
+ mnt = vfs_create_mount(dup_fc);
put_fs_context(dup_fc);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
@@ -2084,25 +2111,15 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
fc->root = dentry;
return 0;
+error:
+ put_fs_context(dup_fc);
+ return ret;
}
static int btrfs_get_tree(struct fs_context *fc)
{
- /*
- * Since we use mount_subtree to mount the default/specified subvol, we
- * have to do mounts in two steps.
- *
- * First pass through we call btrfs_get_tree_subvol(), this is just a
- * wrapper around fc_mount() to call back into here again, and this time
- * we'll call btrfs_get_tree_super(). This will do the open_ctree() and
- * everything to open the devices and file system. Then we return back
- * with a fully constructed vfsmount in btrfs_get_tree_subvol(), and
- * from there we can do our mount_subvol() call, which will lookup
- * whichever subvol we're mounting and setup this fc with the
- * appropriate dentry for the subvol.
- */
- if (fc->s_fs_info)
- return btrfs_get_tree_super(fc);
+ ASSERT(fc->s_fs_info == NULL);
+
return btrfs_get_tree_subvol(fc);
}
@@ -2234,7 +2251,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
* Scanning outside of mount can return NULL which would turn
* into 0 error code.
*/
- device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
+ device = btrfs_scan_one_device(vol->name, false);
ret = PTR_ERR_OR_ZERO(device);
mutex_unlock(&uuid_mutex);
break;
@@ -2252,13 +2269,10 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
* Scanning outside of mount can return NULL which would turn
* into 0 error code.
*/
- device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
+ device = btrfs_scan_one_device(vol->name, false);
if (IS_ERR_OR_NULL(device)) {
mutex_unlock(&uuid_mutex);
- if (IS_ERR(device))
- ret = PTR_ERR(device);
- else
- ret = 0;
+ ret = PTR_ERR_OR_ZERO(device);
break;
}
ret = !(device->fs_devices->num_devices ==
@@ -2305,20 +2319,20 @@ static int check_dev_super(struct btrfs_device *dev)
return 0;
/* Only need to check the primary super block. */
- sb = btrfs_read_dev_one_super(dev->bdev, 0, true);
+ sb = btrfs_read_disk_super(dev->bdev, 0, true);
if (IS_ERR(sb))
return PTR_ERR(sb);
/* Verify the checksum. */
csum_type = btrfs_super_csum_type(sb);
- if (csum_type != btrfs_super_csum_type(fs_info->super_copy)) {
+ if (unlikely(csum_type != btrfs_super_csum_type(fs_info->super_copy))) {
btrfs_err(fs_info, "csum type changed, has %u expect %u",
csum_type, btrfs_super_csum_type(fs_info->super_copy));
ret = -EUCLEAN;
goto out;
}
- if (btrfs_check_super_csum(fs_info, sb)) {
+ if (unlikely(btrfs_check_super_csum(fs_info, sb))) {
btrfs_err(fs_info, "csum for on-disk super block no longer matches");
ret = -EUCLEAN;
goto out;
@@ -2330,7 +2344,7 @@ static int check_dev_super(struct btrfs_device *dev)
goto out;
last_trans = btrfs_get_last_trans_committed(fs_info);
- if (btrfs_super_generation(sb) != last_trans) {
+ if (unlikely(btrfs_super_generation(sb) != last_trans)) {
btrfs_err(fs_info, "transid mismatch, has %llu expect %llu",
btrfs_super_generation(sb), last_trans);
ret = -EUCLEAN;
@@ -2411,6 +2425,66 @@ static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_cont
return 0;
}
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+static int btrfs_remove_bdev(struct super_block *sb, struct block_device *bdev)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ struct btrfs_device *device;
+ struct btrfs_dev_lookup_args lookup_args = { .devt = bdev->bd_dev };
+ bool can_rw;
+
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ device = btrfs_find_device(fs_info->fs_devices, &lookup_args);
+ if (!device) {
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ /* Device not found, should not affect the running fs, just give a warning. */
+ btrfs_warn(fs_info, "unable to find btrfs device for block device '%pg'", bdev);
+ return 0;
+ }
+ /*
+ * The to-be-removed device is already missing?
+ *
+ * That's weird but no special handling needed and can exit right now.
+ */
+ if (unlikely(test_and_set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))) {
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ btrfs_warn(fs_info, "btrfs device id %llu is already missing", device->devid);
+ return 0;
+ }
+
+ device->fs_devices->missing_devices++;
+ if (test_and_clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
+ list_del_init(&device->dev_alloc_list);
+ WARN_ON(device->fs_devices->rw_devices < 1);
+ device->fs_devices->rw_devices--;
+ }
+ can_rw = btrfs_check_rw_degradable(fs_info, device);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ /*
+ * Now device is considered missing, btrfs_device_name() won't give a
+ * meaningful result anymore, so only output the devid.
+ */
+ if (unlikely(!can_rw)) {
+ btrfs_crit(fs_info,
+ "btrfs device id %llu has gone missing, can not maintain read-write",
+ device->devid);
+ return -EIO;
+ }
+ btrfs_warn(fs_info,
+ "btrfs device id %llu has gone missing, continue as degraded",
+ device->devid);
+ btrfs_set_opt(fs_info->mount_opt, DEGRADED);
+ return 0;
+}
+
+static void btrfs_shutdown(struct super_block *sb)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+
+ btrfs_force_shutdown(fs_info);
+}
+#endif
+
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.evict_inode = btrfs_evict_inode,
@@ -2426,6 +2500,10 @@ static const struct super_operations btrfs_super_ops = {
.unfreeze_fs = btrfs_unfreeze,
.nr_cached_objects = btrfs_nr_cached_objects,
.free_cached_objects = btrfs_free_cached_objects,
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ .remove_bdev = btrfs_remove_bdev,
+ .shutdown = btrfs_shutdown,
+#endif
};
static const struct file_operations btrfs_ctl_fops = {
@@ -2458,15 +2536,15 @@ static __cold void btrfs_interface_exit(void)
static int __init btrfs_print_mod_info(void)
{
static const char options[] = ""
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ ", experimental=on"
+#endif
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
#ifdef CONFIG_BTRFS_ASSERT
", assert=on"
#endif
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- ", ref-verify=on"
-#endif
#ifdef CONFIG_BLK_DEV_ZONED
", zoned=yes"
#else
@@ -2478,7 +2556,17 @@ static int __init btrfs_print_mod_info(void)
", fsverity=no"
#endif
;
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ if (btrfs_get_mod_read_policy() == NULL)
+ pr_info("Btrfs loaded%s\n", options);
+ else
+ pr_info("Btrfs loaded%s, read_policy=%s\n",
+ options, btrfs_get_mod_read_policy());
+#else
pr_info("Btrfs loaded%s\n", options);
+#endif
+
return 0;
}
@@ -2525,8 +2613,8 @@ static const struct init_sequence mod_init_seq[] = {
.init_func = btrfs_free_space_init,
.exit_func = btrfs_free_space_exit,
}, {
- .init_func = extent_state_init_cachep,
- .exit_func = extent_state_free_cachep,
+ .init_func = btrfs_extent_state_init_cachep,
+ .exit_func = btrfs_extent_state_free_cachep,
}, {
.init_func = extent_buffer_init_cachep,
.exit_func = extent_buffer_free_cachep,
@@ -2534,8 +2622,13 @@ static const struct init_sequence mod_init_seq[] = {
.init_func = btrfs_bioset_init,
.exit_func = btrfs_bioset_exit,
}, {
- .init_func = extent_map_init,
- .exit_func = extent_map_exit,
+ .init_func = btrfs_extent_map_init,
+ .exit_func = btrfs_extent_map_exit,
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ }, {
+ .init_func = btrfs_read_policy_init,
+ .exit_func = NULL,
+#endif
}, {
.init_func = ordered_data_init,
.exit_func = ordered_data_exit,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index b843308e2bc6..1f64c132b387 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -10,6 +10,7 @@
#include <linux/completion.h>
#include <linux/bug.h>
#include <linux/list.h>
+#include <linux/string_choices.h>
#include <crypto/hash.h>
#include "messages.h"
#include "ctree.h"
@@ -25,6 +26,7 @@
#include "misc.h"
#include "fs.h"
#include "accessors.h"
+#include "zoned.h"
/*
* Structure name Path
@@ -160,8 +162,7 @@ static int can_modify_feature(struct btrfs_feature_attr *fa)
clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR;
break;
default:
- pr_warn("btrfs: sysfs: unknown feature set %d\n",
- fa->feature_set);
+ btrfs_warn(NULL, "sysfs: unknown feature set %d", fa->feature_set);
return 0;
}
@@ -295,7 +296,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(simple_quota, SIMPLE_QUOTA);
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED);
#endif
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
/* Remove once support for extent tree v2 is feature complete */
BTRFS_FEAT_ATTR_INCOMPAT(extent_tree_v2, EXTENT_TREE_V2);
/* Remove once support for raid stripe tree is feature complete. */
@@ -329,7 +330,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_PTR(zoned),
#endif
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
BTRFS_FEAT_ATTR_PTR(extent_tree_v2),
BTRFS_FEAT_ATTR_PTR(raid_stripe_tree),
#endif
@@ -410,12 +411,17 @@ static ssize_t supported_sectorsizes_show(struct kobject *kobj,
char *buf)
{
ssize_t ret = 0;
+ bool has_output = false;
- /* An artificial limit to only support 4K and PAGE_SIZE */
- if (PAGE_SIZE > SZ_4K)
- ret += sysfs_emit_at(buf, ret, "%u ", SZ_4K);
- ret += sysfs_emit_at(buf, ret, "%lu\n", PAGE_SIZE);
-
+ for (u32 cur = BTRFS_MIN_BLOCKSIZE; cur <= BTRFS_MAX_BLOCKSIZE; cur *= 2) {
+ if (!btrfs_supported_blocksize(cur))
+ continue;
+ if (has_output)
+ ret += sysfs_emit_at(buf, ret, " ");
+ ret += sysfs_emit_at(buf, ret, "%u", cur);
+ has_output = true;
+ }
+ ret += sysfs_emit_at(buf, ret, "\n");
return ret;
}
BTRFS_ATTR(static_feature, supported_sectorsizes,
@@ -1118,7 +1124,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->nodesize);
+ return sysfs_emit(buf, "%u\n", fs_info->nodesize);
}
BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
@@ -1128,7 +1134,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
@@ -1137,13 +1143,21 @@ static ssize_t btrfs_commit_stats_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ u64 now = ktime_get_ns();
+ u64 start_time = fs_info->commit_stats.critical_section_start_time;
+ u64 pending = 0;
+
+ if (start_time)
+ pending = now - start_time;
return sysfs_emit(buf,
"commits %llu\n"
+ "cur_commit_ms %llu\n"
"last_commit_ms %llu\n"
"max_commit_ms %llu\n"
"total_commit_ms %llu\n",
fs_info->commit_stats.commit_count,
+ div_u64(pending, NSEC_PER_MSEC),
div_u64(fs_info->commit_stats.last_commit_dur, NSEC_PER_MSEC),
div_u64(fs_info->commit_stats.max_commit_dur, NSEC_PER_MSEC),
div_u64(fs_info->commit_stats.total_commit_dur, NSEC_PER_MSEC));
@@ -1175,12 +1189,62 @@ static ssize_t btrfs_commit_stats_store(struct kobject *kobj,
}
BTRFS_ATTR_RW(, commit_stats, btrfs_commit_stats_show, btrfs_commit_stats_store);
+static ssize_t btrfs_zoned_stats_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ struct btrfs_block_group *bg;
+ size_t ret = 0;
+
+
+ if (!btrfs_is_zoned(fs_info))
+ return ret;
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ ret += sysfs_emit_at(buf, ret, "active block-groups: %zu\n",
+ list_count_nodes(&fs_info->zone_active_bgs));
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+
+ mutex_lock(&fs_info->reclaim_bgs_lock);
+ spin_lock(&fs_info->unused_bgs_lock);
+ ret += sysfs_emit_at(buf, ret, "\treclaimable: %zu\n",
+ list_count_nodes(&fs_info->reclaim_bgs));
+ ret += sysfs_emit_at(buf, ret, "\tunused: %zu\n",
+ list_count_nodes(&fs_info->unused_bgs));
+ spin_unlock(&fs_info->unused_bgs_lock);
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+
+ ret += sysfs_emit_at(buf, ret, "\tneed reclaim: %s\n",
+ str_true_false(btrfs_zoned_should_reclaim(fs_info)));
+
+ if (fs_info->data_reloc_bg)
+ ret += sysfs_emit_at(buf, ret,
+ "data relocation block-group: %llu\n",
+ fs_info->data_reloc_bg);
+ if (fs_info->treelog_bg)
+ ret += sysfs_emit_at(buf, ret,
+ "tree-log block-group: %llu\n",
+ fs_info->treelog_bg);
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ ret += sysfs_emit_at(buf, ret, "active zones:\n");
+ list_for_each_entry(bg, &fs_info->zone_active_bgs, active_bg_list) {
+ ret += sysfs_emit_at(buf, ret,
+ "\tstart: %llu, wp: %llu used: %llu, reserved: %llu, unusable: %llu\n",
+ bg->start, bg->alloc_offset, bg->used,
+ bg->reserved, bg->zone_unusable);
+ }
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+ return ret;
+}
+BTRFS_ATTR(, zoned_stats, btrfs_zoned_stats_show);
+
static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
@@ -1201,7 +1265,7 @@ static ssize_t quota_override_store(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
unsigned long knob;
- int err;
+ int ret;
if (!fs_info)
return -EPERM;
@@ -1209,9 +1273,9 @@ static ssize_t quota_override_store(struct kobject *kobj,
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- err = kstrtoul(buf, 10, &knob);
- if (err)
- return err;
+ ret = kstrtoul(buf, 10, &knob);
+ if (ret)
+ return ret;
if (knob > 1)
return -EINVAL;
@@ -1305,7 +1369,74 @@ static ssize_t btrfs_temp_fsid_show(struct kobject *kobj,
}
BTRFS_ATTR(, temp_fsid, btrfs_temp_fsid_show);
-static const char * const btrfs_read_policy_name[] = { "pid" };
+static const char *btrfs_read_policy_name[] = {
+ "pid",
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ "round-robin",
+ "devid",
+#endif
+};
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+
+/* Global module configuration parameters. */
+static char *read_policy;
+char *btrfs_get_mod_read_policy(void)
+{
+ return read_policy;
+}
+
+/* Set perms to 0, disable /sys/module/btrfs/parameter/read_policy interface. */
+module_param(read_policy, charp, 0);
+MODULE_PARM_DESC(read_policy,
+"Global read policy: pid (default), round-robin[:<min_contig_read>], devid[:<devid>]");
+#endif
+
+int btrfs_read_policy_to_enum(const char *str, s64 *value_ret)
+{
+ char param[32];
+ char __maybe_unused *value_str;
+
+ if (!str || strlen(str) == 0)
+ return 0;
+
+ strscpy(param, str);
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* Separate value from input in policy:value format. */
+ value_str = strchr(param, ':');
+ if (value_str) {
+ char *retptr;
+
+ *value_str = 0;
+ value_str++;
+ if (!value_ret)
+ return -EINVAL;
+
+ *value_ret = memparse(value_str, &retptr);
+ /* There could be any trailing typos after the value. */
+ retptr = skip_spaces(retptr);
+ if (*retptr != 0 || *value_ret <= 0)
+ return -EINVAL;
+ }
+#endif
+
+ return sysfs_match_string(btrfs_read_policy_name, param);
+}
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+int __init btrfs_read_policy_init(void)
+{
+ s64 value;
+
+ if (btrfs_read_policy_to_enum(read_policy, &value) == -EINVAL) {
+ btrfs_err(NULL, "invalid read policy or value %s", read_policy);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
static ssize_t btrfs_read_policy_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -1316,14 +1447,25 @@ static ssize_t btrfs_read_policy_show(struct kobject *kobj,
int i;
for (i = 0; i < BTRFS_NR_READ_POLICY; i++) {
- if (policy == i)
- ret += sysfs_emit_at(buf, ret, "%s[%s]",
- (ret == 0 ? "" : " "),
- btrfs_read_policy_name[i]);
- else
- ret += sysfs_emit_at(buf, ret, "%s%s",
- (ret == 0 ? "" : " "),
- btrfs_read_policy_name[i]);
+ if (ret != 0)
+ ret += sysfs_emit_at(buf, ret, " ");
+
+ if (i == policy)
+ ret += sysfs_emit_at(buf, ret, "[");
+
+ ret += sysfs_emit_at(buf, ret, "%s", btrfs_read_policy_name[i]);
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ if (i == BTRFS_READ_POLICY_RR)
+ ret += sysfs_emit_at(buf, ret, ":%u",
+ READ_ONCE(fs_devices->rr_min_contig_read));
+
+ if (i == BTRFS_READ_POLICY_DEVID)
+ ret += sysfs_emit_at(buf, ret, ":%llu",
+ READ_ONCE(fs_devices->read_devid));
+#endif
+ if (i == policy)
+ ret += sysfs_emit_at(buf, ret, "]");
}
ret += sysfs_emit_at(buf, ret, "\n");
@@ -1336,21 +1478,80 @@ static ssize_t btrfs_read_policy_store(struct kobject *kobj,
const char *buf, size_t len)
{
struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj);
- int i;
+ int index;
+ s64 value = -1;
- for (i = 0; i < BTRFS_NR_READ_POLICY; i++) {
- if (sysfs_streq(buf, btrfs_read_policy_name[i])) {
- if (i != READ_ONCE(fs_devices->read_policy)) {
- WRITE_ONCE(fs_devices->read_policy, i);
- btrfs_info(fs_devices->fs_info,
- "read policy set to '%s'",
- btrfs_read_policy_name[i]);
+ index = btrfs_read_policy_to_enum(buf, &value);
+ if (index < 0)
+ return -EINVAL;
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* If moving from RR then disable collecting fs stats. */
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR && index != BTRFS_READ_POLICY_RR)
+ fs_devices->collect_fs_stats = false;
+
+ if (index == BTRFS_READ_POLICY_RR) {
+ if (value != -1) {
+ const u32 sectorsize = fs_devices->fs_info->sectorsize;
+
+ if (!IS_ALIGNED(value, sectorsize)) {
+ u64 temp_value = round_up(value, sectorsize);
+
+ btrfs_debug(fs_devices->fs_info,
+"read_policy: min contig read %lld should be multiple of sectorsize %u, rounded to %llu",
+ value, sectorsize, temp_value);
+ value = temp_value;
}
- return len;
+ } else {
+ value = BTRFS_DEFAULT_RR_MIN_CONTIG_READ;
}
+
+ if (index != READ_ONCE(fs_devices->read_policy) ||
+ value != READ_ONCE(fs_devices->rr_min_contig_read)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ WRITE_ONCE(fs_devices->rr_min_contig_read, value);
+
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s:%lld'",
+ btrfs_read_policy_name[index], value);
+ }
+
+ fs_devices->collect_fs_stats = true;
+
+ return len;
}
- return -EINVAL;
+ if (index == BTRFS_READ_POLICY_DEVID) {
+ if (value != -1) {
+ BTRFS_DEV_LOOKUP_ARGS(args);
+
+ /* Validate input devid. */
+ args.devid = value;
+ if (btrfs_find_device(fs_devices, &args) == NULL)
+ return -EINVAL;
+ } else {
+ /* Set default devid to the devid of the latest device. */
+ value = fs_devices->latest_dev->devid;
+ }
+
+ if (index != READ_ONCE(fs_devices->read_policy) ||
+ value != READ_ONCE(fs_devices->read_devid)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ WRITE_ONCE(fs_devices->read_devid, value);
+
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s:%llu'",
+ btrfs_read_policy_name[index], value);
+ }
+
+ return len;
+ }
+#endif
+ if (index != READ_ONCE(fs_devices->read_policy)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s'",
+ btrfs_read_policy_name[index]);
+ }
+
+ return len;
}
BTRFS_ATTR_RW(, read_policy, btrfs_read_policy_show, btrfs_read_policy_store);
@@ -1450,6 +1651,7 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, bg_reclaim_threshold),
BTRFS_ATTR_PTR(, commit_stats),
BTRFS_ATTR_PTR(, temp_fsid),
+ BTRFS_ATTR_PTR(, zoned_stats),
#ifdef CONFIG_BTRFS_EXPERIMENTAL
BTRFS_ATTR_PTR(, offload_csum),
#endif
@@ -1792,16 +1994,35 @@ void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info)
kobject_put(&space_info->kobj);
}
-static const char *alloc_name(u64 flags)
+static const char *alloc_name(struct btrfs_space_info *space_info)
{
+ u64 flags = space_info->flags;
+
switch (flags) {
case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
return "mixed";
case BTRFS_BLOCK_GROUP_METADATA:
- return "metadata";
+ switch (space_info->subgroup_id) {
+ case BTRFS_SUB_GROUP_PRIMARY:
+ return "metadata";
+ case BTRFS_SUB_GROUP_TREELOG:
+ return "metadata-treelog";
+ default:
+ WARN_ON_ONCE(1);
+ return "metadata (unknown sub-group)";
+ }
case BTRFS_BLOCK_GROUP_DATA:
- return "data";
+ switch (space_info->subgroup_id) {
+ case BTRFS_SUB_GROUP_PRIMARY:
+ return "data";
+ case BTRFS_SUB_GROUP_DATA_RELOC:
+ return "data-reloc";
+ default:
+ WARN_ON_ONCE(1);
+ return "data (unknown sub-group)";
+ }
case BTRFS_BLOCK_GROUP_SYSTEM:
+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY);
return "system";
default:
WARN_ON(1);
@@ -1813,14 +2034,13 @@ static const char *alloc_name(u64 flags)
* Create a sysfs entry for a space info type at path
* /sys/fs/btrfs/UUID/allocation/TYPE
*/
-int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+int btrfs_sysfs_add_space_info_type(struct btrfs_space_info *space_info)
{
int ret;
ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
- fs_info->space_info_kobj, "%s",
- alloc_name(space_info->flags));
+ space_info->fs_info->space_info_kobj, "%s",
+ alloc_name(space_info));
if (ret) {
kobject_put(&space_info->kobj);
return ret;
@@ -2082,7 +2302,7 @@ void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action)
ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
if (ret)
- pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
+ btrfs_warn(NULL, "sending event %d to kobject: '%s' (%p): failed",
action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
&disk_to_dev(bdev->bd_disk)->kobj);
}
@@ -2125,15 +2345,15 @@ static struct kset *btrfs_kset;
*/
int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs)
{
- int error;
+ int ret;
init_completion(&fs_devs->kobj_unregister);
fs_devs->fsid_kobj.kset = btrfs_kset;
- error = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, NULL,
- "%pU", fs_devs->fsid);
- if (error) {
+ ret = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, NULL,
+ "%pU", fs_devs->fsid);
+ if (ret) {
kobject_put(&fs_devs->fsid_kobj);
- return error;
+ return ret;
}
fs_devs->devices_kobj = kobject_create_and_add("devices",
@@ -2159,71 +2379,70 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs)
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
{
- int error;
+ int ret;
struct btrfs_fs_devices *fs_devs = fs_info->fs_devices;
struct kobject *fsid_kobj = &fs_devs->fsid_kobj;
- error = btrfs_sysfs_add_fs_devices(fs_devs);
- if (error)
- return error;
+ ret = btrfs_sysfs_add_fs_devices(fs_devs);
+ if (ret)
+ return ret;
- error = sysfs_create_files(fsid_kobj, btrfs_attrs);
- if (error) {
+ ret = sysfs_create_files(fsid_kobj, btrfs_attrs);
+ if (ret) {
btrfs_sysfs_remove_fs_devices(fs_devs);
- return error;
+ return ret;
}
- error = sysfs_create_group(fsid_kobj,
- &btrfs_feature_attr_group);
- if (error)
+ ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group);
+ if (ret)
goto failure;
#ifdef CONFIG_BTRFS_DEBUG
fs_info->debug_kobj = kobject_create_and_add("debug", fsid_kobj);
if (!fs_info->debug_kobj) {
- error = -ENOMEM;
+ ret = -ENOMEM;
goto failure;
}
- error = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs);
- if (error)
+ ret = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs);
+ if (ret)
goto failure;
#endif
/* Discard directory */
fs_info->discard_kobj = kobject_create_and_add("discard", fsid_kobj);
if (!fs_info->discard_kobj) {
- error = -ENOMEM;
+ ret = -ENOMEM;
goto failure;
}
- error = sysfs_create_files(fs_info->discard_kobj, discard_attrs);
- if (error)
+ ret = sysfs_create_files(fs_info->discard_kobj, discard_attrs);
+ if (ret)
goto failure;
- error = addrm_unknown_feature_attrs(fs_info, true);
- if (error)
+ ret = addrm_unknown_feature_attrs(fs_info, true);
+ if (ret)
goto failure;
- error = sysfs_create_link(fsid_kobj, &fs_info->sb->s_bdi->dev->kobj, "bdi");
- if (error)
+ ret = sysfs_create_link(fsid_kobj, &fs_info->sb->s_bdi->dev->kobj, "bdi");
+ if (ret)
goto failure;
fs_info->space_info_kobj = kobject_create_and_add("allocation",
fsid_kobj);
if (!fs_info->space_info_kobj) {
- error = -ENOMEM;
+ ret = -ENOMEM;
goto failure;
}
- error = sysfs_create_files(fs_info->space_info_kobj, allocation_attrs);
- if (error)
+ ret = sysfs_create_files(fs_info->space_info_kobj, allocation_attrs);
+ if (ret)
goto failure;
return 0;
failure:
btrfs_sysfs_remove_mounted(fs_info);
- return error;
+ return ret;
}
static ssize_t qgroup_enabled_show(struct kobject *qgroups_kobj,
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index e6a284c59809..05498e5346c3 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -7,6 +7,7 @@
#include <linux/compiler_types.h>
#include <linux/kobject.h>
+struct block_device;
struct btrfs_fs_info;
struct btrfs_device;
struct btrfs_fs_devices;
@@ -36,8 +37,7 @@ void __cold btrfs_exit_sysfs(void);
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache);
-int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info);
+int btrfs_sysfs_add_space_info_type(struct btrfs_space_info *space_info);
void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info);
void btrfs_sysfs_update_devid(struct btrfs_device *device);
@@ -47,5 +47,11 @@ void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info);
int btrfs_sysfs_add_qgroups(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup);
+int btrfs_read_policy_to_enum(const char *str, s64 *value);
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+int __init btrfs_read_policy_init(void);
+char *btrfs_get_mod_read_policy(void);
+#endif
#endif
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index e607b5d52fb1..b576897d71cc 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -30,6 +30,7 @@ const char *test_error[] = {
[TEST_ALLOC_EXTENT_MAP] = "cannot allocate extent map",
[TEST_ALLOC_CHUNK_MAP] = "cannot allocate chunk map",
[TEST_ALLOC_IO_CONTEXT] = "cannot allocate io context",
+ [TEST_ALLOC_TRANSACTION] = "cannot allocate transaction",
};
static const struct super_operations btrfs_test_super_ops = {
@@ -101,7 +102,7 @@ struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info)
if (!dev)
return ERR_PTR(-ENOMEM);
- extent_io_tree_init(fs_info, &dev->alloc_state, 0);
+ btrfs_extent_io_tree_init(fs_info, &dev->alloc_state, 0);
INIT_LIST_HEAD(&dev->dev_list);
list_add(&dev->dev_list, &fs_info->fs_devices->devices);
@@ -110,7 +111,7 @@ struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info)
static void btrfs_free_dummy_device(struct btrfs_device *dev)
{
- extent_io_tree_release(&dev->alloc_state);
+ btrfs_extent_io_tree_release(&dev->alloc_state);
kfree(dev);
}
@@ -142,6 +143,11 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
+
+ /* CRC32C csum size. */
+ fs_info->csum_size = 4;
+ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) /
+ fs_info->csum_size;
set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
test_mnt->mnt_sb->s_fs_info = fs_info;
@@ -151,9 +157,9 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
{
- struct radix_tree_iter iter;
- void **slot;
struct btrfs_device *dev, *tmp;
+ struct extent_buffer *eb;
+ unsigned long index;
if (!fs_info)
return;
@@ -163,25 +169,13 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
test_mnt->mnt_sb->s_fs_info = NULL;
- spin_lock(&fs_info->buffer_lock);
- radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
- struct extent_buffer *eb;
-
- eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
- if (!eb)
- continue;
- /* Shouldn't happen but that kind of thinking creates CVE's */
- if (radix_tree_exception(eb)) {
- if (radix_tree_deref_retry(eb))
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- slot = radix_tree_iter_resume(slot, &iter);
- spin_unlock(&fs_info->buffer_lock);
- free_extent_buffer_stale(eb);
- spin_lock(&fs_info->buffer_lock);
+ xa_lock_irq(&fs_info->buffer_tree);
+ xa_for_each(&fs_info->buffer_tree, index, eb) {
+ xa_unlock_irq(&fs_info->buffer_tree);
+ free_extent_buffer(eb);
+ xa_lock_irq(&fs_info->buffer_tree);
}
- spin_unlock(&fs_info->buffer_lock);
+ xa_unlock_irq(&fs_info->buffer_tree);
btrfs_mapping_tree_free(fs_info);
list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices,
@@ -247,6 +241,15 @@ void btrfs_free_dummy_block_group(struct btrfs_block_group *cache)
kfree(cache);
}
+void btrfs_init_dummy_transaction(struct btrfs_transaction *trans, struct btrfs_fs_info *fs_info)
+{
+ memset(trans, 0, sizeof(*trans));
+ trans->fs_info = fs_info;
+ xa_init(&trans->delayed_refs.head_refs);
+ xa_init(&trans->delayed_refs.dirty_extents);
+ spin_lock_init(&trans->delayed_refs.lock);
+}
+
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
@@ -295,6 +298,9 @@ int btrfs_run_sanity_tests(void)
ret = btrfs_test_raid_stripe_tree(sectorsize, nodesize);
if (ret)
goto out;
+ ret = btrfs_test_delayed_refs(sectorsize, nodesize);
+ if (ret)
+ goto out;
}
}
ret = btrfs_test_extent_map();
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index b524ecf2f452..4307bdaa6749 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_TESTS_H
#define BTRFS_TESTS_H
+#include <linux/types.h>
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int btrfs_run_sanity_tests(void);
@@ -25,12 +27,14 @@ enum {
TEST_ALLOC_EXTENT_MAP,
TEST_ALLOC_CHUNK_MAP,
TEST_ALLOC_IO_CONTEXT,
+ TEST_ALLOC_TRANSACTION,
};
extern const char *test_error[];
struct btrfs_root;
struct btrfs_trans_handle;
+struct btrfs_transaction;
int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
@@ -40,6 +44,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
int btrfs_test_raid_stripe_tree(u32 sectorsize, u32 nodesize);
int btrfs_test_extent_map(void);
+int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
@@ -49,6 +54,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long lengt
void btrfs_free_dummy_block_group(struct btrfs_block_group *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
+void btrfs_init_dummy_transaction(struct btrfs_transaction *trans, struct btrfs_fs_info *fs_info);
struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info);
#else
static inline int btrfs_run_sanity_tests(void)
diff --git a/fs/btrfs/tests/delayed-refs-tests.c b/fs/btrfs/tests/delayed-refs-tests.c
new file mode 100644
index 000000000000..e2248acb906b
--- /dev/null
+++ b/fs/btrfs/tests/delayed-refs-tests.c
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/sizes.h>
+#include "btrfs-tests.h"
+#include "../transaction.h"
+#include "../delayed-ref.h"
+#include "../extent-tree.h"
+
+#define FAKE_ROOT_OBJECTID 256
+#define FAKE_BYTENR 0
+#define FAKE_LEVEL 1
+#define FAKE_INO 256
+#define FAKE_FILE_OFFSET 0
+#define FAKE_PARENT SZ_1M
+
+struct ref_head_check {
+ u64 bytenr;
+ u64 num_bytes;
+ int ref_mod;
+ int total_ref_mod;
+ int must_insert;
+};
+
+struct ref_node_check {
+ u64 bytenr;
+ u64 num_bytes;
+ int ref_mod;
+ enum btrfs_delayed_ref_action action;
+ u8 type;
+ u64 parent;
+ u64 root;
+ u64 owner;
+ u64 offset;
+};
+
+static enum btrfs_ref_type ref_type_from_disk_ref_type(u8 type)
+{
+ if ((type == BTRFS_TREE_BLOCK_REF_KEY) ||
+ (type == BTRFS_SHARED_BLOCK_REF_KEY))
+ return BTRFS_REF_METADATA;
+ return BTRFS_REF_DATA;
+}
+
+static void delete_delayed_ref_head(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_delayed_ref_unlock(head);
+ btrfs_put_delayed_ref_head(head);
+}
+
+static void delete_delayed_ref_node(struct btrfs_delayed_ref_head *head,
+ struct btrfs_delayed_ref_node *node)
+{
+ rb_erase_cached(&node->ref_node, &head->ref_tree);
+ RB_CLEAR_NODE(&node->ref_node);
+ if (!list_empty(&node->add_list))
+ list_del_init(&node->add_list);
+ btrfs_put_delayed_ref(node);
+}
+
+static int validate_ref_head(struct btrfs_delayed_ref_head *head,
+ struct ref_head_check *check)
+{
+ if (head->bytenr != check->bytenr) {
+ test_err("invalid bytenr have: %llu want: %llu", head->bytenr,
+ check->bytenr);
+ return -EINVAL;
+ }
+
+ if (head->num_bytes != check->num_bytes) {
+ test_err("invalid num_bytes have: %llu want: %llu",
+ head->num_bytes, check->num_bytes);
+ return -EINVAL;
+ }
+
+ if (head->ref_mod != check->ref_mod) {
+ test_err("invalid ref_mod have: %d want: %d", head->ref_mod,
+ check->ref_mod);
+ return -EINVAL;
+ }
+
+ if (head->total_ref_mod != check->total_ref_mod) {
+ test_err("invalid total_ref_mod have: %d want: %d",
+ head->total_ref_mod, check->total_ref_mod);
+ return -EINVAL;
+ }
+
+ if (head->must_insert_reserved != check->must_insert) {
+ test_err("invalid must_insert have: %d want: %d",
+ head->must_insert_reserved, check->must_insert);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_ref_node(struct btrfs_delayed_ref_node *node,
+ struct ref_node_check *check)
+{
+ if (node->bytenr != check->bytenr) {
+ test_err("invalid bytenr have: %llu want: %llu", node->bytenr,
+ check->bytenr);
+ return -EINVAL;
+ }
+
+ if (node->num_bytes != check->num_bytes) {
+ test_err("invalid num_bytes have: %llu want: %llu",
+ node->num_bytes, check->num_bytes);
+ return -EINVAL;
+ }
+
+ if (node->ref_mod != check->ref_mod) {
+ test_err("invalid ref_mod have: %d want: %d", node->ref_mod,
+ check->ref_mod);
+ return -EINVAL;
+ }
+
+ if (node->action != check->action) {
+ test_err("invalid action have: %d want: %d", node->action,
+ check->action);
+ return -EINVAL;
+ }
+
+ if (node->parent != check->parent) {
+ test_err("invalid parent have: %llu want: %llu", node->parent,
+ check->parent);
+ return -EINVAL;
+ }
+
+ if (node->ref_root != check->root) {
+ test_err("invalid root have: %llu want: %llu", node->ref_root,
+ check->root);
+ return -EINVAL;
+ }
+
+ if (node->type != check->type) {
+ test_err("invalid type have: %d want: %d", node->type,
+ check->type);
+ return -EINVAL;
+ }
+
+ if (btrfs_delayed_ref_owner(node) != check->owner) {
+ test_err("invalid owner have: %llu want: %llu",
+ btrfs_delayed_ref_owner(node), check->owner);
+ return -EINVAL;
+ }
+
+ if (btrfs_delayed_ref_offset(node) != check->offset) {
+ test_err("invalid offset have: %llu want: %llu",
+ btrfs_delayed_ref_offset(node), check->offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int simple_test(struct btrfs_trans_handle *trans,
+ struct ref_head_check *head_check,
+ struct ref_node_check *node_check)
+{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = ref_type_from_disk_ref_type(node_check->type),
+ .action = node_check->action,
+ .parent = node_check->parent,
+ .ref_root = node_check->root,
+ .bytenr = node_check->bytenr,
+ .num_bytes = fs_info->nodesize,
+ };
+ int ret;
+
+ if (ref.type == BTRFS_REF_METADATA)
+ btrfs_init_tree_ref(&ref, node_check->owner, node_check->root,
+ false);
+ else
+ btrfs_init_data_ref(&ref, node_check->owner, node_check->offset,
+ node_check->root, true);
+
+ if (ref.type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, head_check))
+ goto out;
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, node_check))
+ goto out;
+ ret = 0;
+out:
+ btrfs_unselect_ref_head(delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+/*
+ * These are simple tests, make sure that our btrfs_ref's get turned into the
+ * appropriate btrfs_delayed_ref_node based on their settings and action.
+ */
+static int simple_tests(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .total_ref_mod = 1,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .type = BTRFS_TREE_BLOCK_REF_KEY,
+ .parent = 0,
+ .root = FAKE_ROOT_OBJECTID,
+ .owner = FAKE_LEVEL,
+ .offset = 0,
+ };
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add tree block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add extent data failed");
+ return -EINVAL;
+ }
+
+ node_check.parent = FAKE_PARENT;
+ node_check.type = BTRFS_SHARED_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add shared block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_SHARED_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add shared data failed");
+ return -EINVAL;
+ }
+
+ head_check.ref_mod = -1;
+ head_check.total_ref_mod = -1;
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.type = BTRFS_TREE_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+ node_check.parent = 0;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop tree block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop extent data failed");
+ return -EINVAL;
+ }
+
+ node_check.parent = FAKE_PARENT;
+ node_check.type = BTRFS_SHARED_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop shared block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_SHARED_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop shared data failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Merge tests, validate that we do delayed ref merging properly, the ref counts
+ * all end up properly, and delayed refs are deleted once they're no longer
+ * needed.
+ */
+static int merge_tests(struct btrfs_trans_handle *trans,
+ enum btrfs_ref_type type)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head = NULL;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = type,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .parent = 0,
+ .ref_root = FAKE_ROOT_OBJECTID,
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ };
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 0,
+ .total_ref_mod = 0,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 2,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .parent = 0,
+ .root = FAKE_ROOT_OBJECTID,
+ };
+ int ret;
+
+ /*
+ * First add a ref and then drop it, make sure we get a head ref with a
+ * 0 total ref mod and no nodes.
+ */
+ if (type == BTRFS_REF_METADATA) {
+ node_check.type = BTRFS_TREE_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ btrfs_init_tree_ref(&ref, FAKE_LEVEL, FAKE_ROOT_OBJECTID, false);
+ } else {
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+ btrfs_init_data_ref(&ref, FAKE_INO, FAKE_FILE_OFFSET,
+ FAKE_ROOT_OBJECTID, true);
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("single add and drop failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Add a ref, then add another ref, make sure we get a head ref with a
+ * 2 total ref mod and 1 node.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ head_check.ref_mod = 2;
+ head_check.total_ref_mod = 2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double add failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Add two drop refs, make sure they are merged properly. */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ head_check.ref_mod = -2;
+ head_check.total_ref_mod = -2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double drop failed");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Add multiple refs, then drop until we go negative again. */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 10; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 12; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = -2;
+ head_check.total_ref_mod = -2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double drop failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Drop multiple refs, then add until we go positive again. */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 10; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 12; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = 2;
+ head_check.total_ref_mod = 2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("add and drop to positive failed");
+ goto out;
+ }
+
+ node_check.action = BTRFS_ADD_DELAYED_REF;
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Add a bunch of refs with different roots and parents, then drop them
+ * all, make sure everything is properly merged.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 50; i++) {
+ if (!(i % 2)) {
+ ref.parent = 0;
+ ref.ref_root = FAKE_ROOT_OBJECTID + i;
+ } else {
+ ref.parent = FAKE_PARENT + (i * fs_info->nodesize);
+ }
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 50; i++) {
+ if (!(i % 2)) {
+ ref.parent = 0;
+ ref.ref_root = FAKE_ROOT_OBJECTID + i;
+ } else {
+ ref.parent = FAKE_PARENT + (i * fs_info->nodesize);
+ }
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = 0;
+ head_check.total_ref_mod = 0;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("add and drop multiple failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ ret = 0;
+out:
+ if (!IS_ERR_OR_NULL(head))
+ btrfs_unselect_ref_head(&trans->transaction->delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+/*
+ * Basic test to validate we always get the add operations first followed by any
+ * delete operations.
+ */
+static int select_delayed_refs_test(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head = NULL;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = BTRFS_REF_METADATA,
+ .action = BTRFS_DROP_DELAYED_REF,
+ .parent = 0,
+ .ref_root = FAKE_ROOT_OBJECTID,
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ };
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 0,
+ .total_ref_mod = 0,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .type = BTRFS_TREE_BLOCK_REF_KEY,
+ .parent = 0,
+ .owner = FAKE_LEVEL,
+ .offset = 0,
+ };
+ int ret;
+
+ /* Add the drop first. */
+ btrfs_init_tree_ref(&ref, FAKE_LEVEL, FAKE_ROOT_OBJECTID, false);
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ /*
+ * Now add the add, and make it a different root so it's logically later
+ * in the rb tree.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 1;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ head = NULL;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("head check failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.root = FAKE_ROOT_OBJECTID + 1;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Now we're going to do the same thing, but we're going to have an add
+ * that gets deleted because of a merge, and make sure we still have
+ * another add in place.
+ */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 1;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 2;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ head = NULL;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("head check failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_ADD_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID + 2;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+ ret = 0;
+out:
+ if (head)
+ btrfs_unselect_ref_head(delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_transaction *transaction;
+ struct btrfs_trans_handle trans;
+ struct btrfs_fs_info *fs_info;
+ int ret;
+
+ test_msg("running delayed refs tests");
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+ transaction = kmalloc(sizeof(*transaction), GFP_KERNEL);
+ if (!transaction) {
+ test_std_err(TEST_ALLOC_TRANSACTION);
+ ret = -ENOMEM;
+ goto out_free_fs_info;
+ }
+ btrfs_init_dummy_trans(&trans, fs_info);
+ btrfs_init_dummy_transaction(transaction, fs_info);
+ trans.transaction = transaction;
+
+ ret = simple_tests(&trans);
+ if (!ret) {
+ test_msg("running delayed refs merge tests on metadata refs");
+ ret = merge_tests(&trans, BTRFS_REF_METADATA);
+ }
+
+ if (!ret) {
+ test_msg("running delayed refs merge tests on data refs");
+ ret = merge_tests(&trans, BTRFS_REF_DATA);
+ }
+
+ if (!ret)
+ ret = select_delayed_refs_test(&trans);
+
+ kfree(transaction);
+out_free_fs_info:
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 0a2dbfaaf49e..a0187d6163df 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -14,17 +14,17 @@
#include "../disk-io.h"
#include "../btrfs_inode.h"
-#define PROCESS_UNLOCK (1 << 0)
-#define PROCESS_RELEASE (1 << 1)
-#define PROCESS_TEST_LOCKED (1 << 2)
+#define PROCESS_UNLOCK (1U << 0)
+#define PROCESS_RELEASE (1U << 1)
+#define PROCESS_TEST_LOCKED (1U << 2)
static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
unsigned long flags)
{
int ret;
struct folio_batch fbatch;
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
+ pgoff_t index = start >> PAGE_SHIFT;
+ pgoff_t end_index = end >> PAGE_SHIFT;
int i;
int count = 0;
int loops = 0;
@@ -74,9 +74,9 @@ static void extent_flag_to_str(const struct extent_state *state, char *dest)
dest[0] = 0;
PRINT_ONE_FLAG(state, dest, cur, DIRTY);
- PRINT_ONE_FLAG(state, dest, cur, UPTODATE);
PRINT_ONE_FLAG(state, dest, cur, LOCKED);
- PRINT_ONE_FLAG(state, dest, cur, NEW);
+ PRINT_ONE_FLAG(state, dest, cur, DIRTY_LOG1);
+ PRINT_ONE_FLAG(state, dest, cur, DIRTY_LOG2);
PRINT_ONE_FLAG(state, dest, cur, DELALLOC);
PRINT_ONE_FLAG(state, dest, cur, DEFRAG);
PRINT_ONE_FLAG(state, dest, cur, BOUNDARY);
@@ -114,7 +114,6 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
struct extent_io_tree *tmp;
struct page *page;
struct page *locked_page = NULL;
- unsigned long index = 0;
/* In this test we need at least 2 file extents at its maximum size */
u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
u64 total_dirty = 2 * max_bytes;
@@ -150,14 +149,14 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
* Passing NULL as we don't have fs_info but tracepoints are not used
* at this point
*/
- extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
+ btrfs_extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
/*
* First go through and create and mark all of our pages dirty, we pin
* everything to make sure our pages don't get evicted and screw up our
* test.
*/
- for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
+ for (pgoff_t index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
if (!page) {
test_err("failed to allocate test page");
@@ -177,7 +176,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
* |--- delalloc ---|
* |--- search ---|
*/
- set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
+ btrfs_set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
start = 0;
end = start + PAGE_SIZE - 1;
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
@@ -191,7 +190,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
sectorsize - 1, start, end);
goto out_bits;
}
- unlock_extent(tmp, start, end, NULL);
+ btrfs_unlock_extent(tmp, start, end, NULL);
unlock_page(locked_page);
put_page(locked_page);
@@ -208,7 +207,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
test_err("couldn't find the locked page");
goto out_bits;
}
- set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
+ btrfs_set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
start = test_start;
end = start + PAGE_SIZE - 1;
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
@@ -227,7 +226,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
test_err("there were unlocked pages in the range");
goto out_bits;
}
- unlock_extent(tmp, start, end, NULL);
+ btrfs_unlock_extent(tmp, start, end, NULL);
/* locked_page was unlocked above */
put_page(locked_page);
@@ -263,7 +262,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
*
* We are re-using our test_start from above since it works out well.
*/
- set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
+ btrfs_set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
start = test_start;
end = start + PAGE_SIZE - 1;
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
@@ -282,7 +281,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
test_err("pages in range were not all locked");
goto out_bits;
}
- unlock_extent(tmp, start, end, NULL);
+ btrfs_unlock_extent(tmp, start, end, NULL);
/*
* Now to test where we run into a page that is no longer dirty in the
@@ -327,7 +326,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
out_bits:
if (ret)
dump_extent_io_tree(tmp);
- clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1);
+ btrfs_clear_extent_bit(tmp, 0, total_dirty - 1, (unsigned)-1, NULL);
out:
if (locked_page)
put_page(locked_page);
@@ -344,11 +343,11 @@ static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb)
unsigned long i;
for (i = 0; i < eb->len * BITS_PER_BYTE; i++) {
- int bit, bit1;
+ bool bit_set, bit1_set;
- bit = !!test_bit(i, bitmap);
- bit1 = !!extent_buffer_test_bit(eb, 0, i);
- if (bit1 != bit) {
+ bit_set = test_bit(i, bitmap);
+ bit1_set = extent_buffer_test_bit(eb, 0, i);
+ if (bit1_set != bit_set) {
u8 has;
u8 expect;
@@ -361,9 +360,9 @@ static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb)
return -EINVAL;
}
- bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
- i % BITS_PER_BYTE);
- if (bit1 != bit) {
+ bit1_set = extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
+ i % BITS_PER_BYTE);
+ if (bit1_set != bit_set) {
u8 has;
u8 expect;
@@ -506,7 +505,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb)
static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
{
struct btrfs_fs_info *fs_info;
- unsigned long *bitmap = NULL;
+ unsigned long AUTO_KFREE(bitmap);
struct extent_buffer *eb = NULL;
int ret;
@@ -525,7 +524,7 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
goto out;
}
- eb = __alloc_dummy_extent_buffer(fs_info, 0, nodesize);
+ eb = alloc_dummy_extent_buffer(fs_info, 0);
if (!eb) {
test_std_err(TEST_ALLOC_ROOT);
ret = -ENOMEM;
@@ -542,7 +541,7 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
* Test again for case where the tree block is sectorsize aligned but
* not nodesize aligned.
*/
- eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize);
+ eb = alloc_dummy_extent_buffer(fs_info, sectorsize);
if (!eb) {
test_std_err(TEST_ALLOC_ROOT);
ret = -ENOMEM;
@@ -552,7 +551,6 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
ret = __test_eb_bitmaps(bitmap, eb);
out:
free_extent_buffer(eb);
- kfree(bitmap);
btrfs_free_dummy_fs_info(fs_info);
return ret;
}
@@ -565,10 +563,10 @@ static int test_find_first_clear_extent_bit(void)
test_msg("running find_first_clear_extent_bit test");
- extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST);
+ btrfs_extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST);
/* Test correct handling of empty tree */
- find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
+ btrfs_find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
if (start != 0 || end != -1) {
test_err(
"error getting a range from completely empty tree: start %llu end %llu",
@@ -579,11 +577,11 @@ static int test_find_first_clear_extent_bit(void)
* Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
* 4M-32M
*/
- set_extent_bit(&tree, SZ_1M, SZ_4M - 1,
- CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
+ btrfs_set_extent_bit(&tree, SZ_1M, SZ_4M - 1,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
- find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
if (start != 0 || end != SZ_1M - 1) {
test_err("error finding beginning range: start %llu end %llu",
@@ -592,14 +590,14 @@ static int test_find_first_clear_extent_bit(void)
}
/* Now add 32M-64M so that we have a hole between 4M-32M */
- set_extent_bit(&tree, SZ_32M, SZ_64M - 1,
- CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
+ btrfs_set_extent_bit(&tree, SZ_32M, SZ_64M - 1,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
/*
* Request first hole starting at 12M, we should get 4M-32M
*/
- find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
if (start != SZ_4M || end != SZ_32M - 1) {
test_err("error finding trimmed range: start %llu end %llu",
@@ -611,8 +609,8 @@ static int test_find_first_clear_extent_bit(void)
* Search in the middle of allocated range, should get the next one
* available, which happens to be unallocated -> 4M-32M
*/
- find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
if (start != SZ_4M || end != SZ_32M - 1) {
test_err("error finding next unalloc range: start %llu end %llu",
@@ -624,9 +622,9 @@ static int test_find_first_clear_extent_bit(void)
* Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
* being unset in this range, we should get the entry in range 64M-72M
*/
- set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL);
- find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
- CHUNK_TRIMMED);
+ btrfs_set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL);
+ btrfs_find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
+ CHUNK_TRIMMED);
if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
test_err("error finding exact range: start %llu end %llu",
@@ -634,8 +632,8 @@ static int test_find_first_clear_extent_bit(void)
goto out;
}
- find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
- CHUNK_TRIMMED);
+ btrfs_find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
+ CHUNK_TRIMMED);
/*
* Search in the middle of set range whose immediate neighbour doesn't
@@ -651,7 +649,7 @@ static int test_find_first_clear_extent_bit(void)
* Search beyond any known range, shall return after last known range
* and end should be -1
*/
- find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
+ btrfs_find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
if (start != SZ_64M + SZ_8M || end != -1) {
test_err(
"error handling beyond end of range search: start %llu end %llu",
@@ -663,7 +661,7 @@ static int test_find_first_clear_extent_bit(void)
out:
if (ret)
dump_extent_io_tree(&tree);
- clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_clear_extent_bit(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
return ret;
}
@@ -730,7 +728,7 @@ static int test_eb_mem_ops(u32 sectorsize, u32 nodesize)
goto out;
}
- eb = __alloc_dummy_extent_buffer(fs_info, SZ_1M, nodesize);
+ eb = alloc_dummy_extent_buffer(fs_info, SZ_1M);
if (!eb) {
test_std_err(TEST_ALLOC_EXTENT_BUFFER);
ret = -ENOMEM;
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index 56e61ac1cc64..0b9f25dd1a68 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -22,7 +22,7 @@ static int free_extent_map_tree(struct btrfs_inode *inode)
while (!RB_EMPTY_ROOT(&em_tree->root)) {
node = rb_first(&em_tree->root);
em = rb_entry(node, struct extent_map, rb_node);
- remove_extent_mapping(inode, em);
+ btrfs_remove_extent_mapping(inode, em);
#ifdef CONFIG_BTRFS_DEBUG
if (refcount_read(&em->refs) != 1) {
@@ -36,7 +36,7 @@ static int free_extent_map_tree(struct btrfs_inode *inode)
refcount_set(&em->refs, 1);
}
#endif
- free_extent_map(em);
+ btrfs_free_extent_map(em);
}
write_unlock(&em_tree->lock);
@@ -68,7 +68,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
int ret;
int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -87,10 +87,10 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
test_err("cannot add extent range [0, 16K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Add [16K, 20K) following [0, 16K) */
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -109,9 +109,9 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
test_err("cannot add extent range [16K, 20K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -137,7 +137,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
ret = -ENOENT;
goto out;
}
- if (em->start != 0 || extent_map_end(em) != SZ_16K ||
+ if (em->start != 0 || btrfs_extent_map_end(em) != SZ_16K ||
em->disk_bytenr != 0 || em->disk_num_bytes != SZ_16K) {
test_err(
"case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu",
@@ -145,7 +145,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out:
ret2 = free_extent_map_tree(inode);
if (ret == 0)
@@ -167,7 +167,7 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
int ret;
int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -186,10 +186,10 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
test_err("cannot add extent range [0, 1K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Add [4K, 8K) following [0, 1K) */
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -208,9 +208,9 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
test_err("cannot add extent range [4K, 8K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -235,14 +235,14 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
ret = -ENOENT;
goto out;
}
- if (em->start != 0 || extent_map_end(em) != SZ_1K ||
+ if (em->start != 0 || btrfs_extent_map_end(em) != SZ_1K ||
em->disk_bytenr != EXTENT_MAP_INLINE) {
test_err(
"case2 [0 1K]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu",
ret, em->start, em->len, em->disk_bytenr);
ret = -EINVAL;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out:
ret2 = free_extent_map_tree(inode);
if (ret == 0)
@@ -260,7 +260,7 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
int ret;
int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -279,9 +279,9 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
test_err("cannot add extent range [4K, 8K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -312,15 +312,15 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
* Since bytes within em are contiguous, em->block_start is identical to
* em->start.
*/
- if (start < em->start || start + len > extent_map_end(em) ||
- em->start != extent_map_block_start(em)) {
+ if (start < em->start || start + len > btrfs_extent_map_end(em) ||
+ em->start != btrfs_extent_map_block_start(em)) {
test_err(
"case3 [%llu %llu): ret %d em (start %llu len %llu disk_bytenr %llu block_len %llu)",
start, start + len, ret, em->start, em->len,
em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out:
ret2 = free_extent_map_tree(inode);
if (ret == 0)
@@ -369,7 +369,7 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
int ret;
int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -388,9 +388,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
test_err("cannot add extent range [0, 8K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -410,9 +410,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
test_err("cannot add extent range [8K, 32K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -438,14 +438,14 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
ret = -ENOENT;
goto out;
}
- if (start < em->start || start + len > extent_map_end(em)) {
+ if (start < em->start || start + len > btrfs_extent_map_end(em)) {
test_err(
"case4 [%llu %llu): ret %d, added wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu)",
start, start + len, ret, em->start, em->len,
em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out:
ret2 = free_extent_map_tree(inode);
if (ret == 0)
@@ -498,7 +498,7 @@ static int add_compressed_extent(struct btrfs_inode *inode,
struct extent_map *em;
int ret;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -513,7 +513,7 @@ static int add_compressed_extent(struct btrfs_inode *inode,
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
if (ret < 0) {
test_err("cannot add extent map [%llu, %llu)", start, start + len);
return ret;
@@ -719,7 +719,7 @@ static int test_case_6(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
if (ret)
goto out;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -751,7 +751,7 @@ static int test_case_6(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
}
ret = 0;
out:
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ret2 = free_extent_map_tree(inode);
if (ret == 0)
ret = ret2;
@@ -773,7 +773,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
test_msg("Running btrfs_drop_extent_cache with pinned");
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -793,9 +793,9 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
test_err("couldn't add extent map");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -815,7 +815,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
test_err("couldn't add extent map");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/*
* Drop [0, 36K) This should skip the [0, 4K) extent and then split the
@@ -826,7 +826,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
/* Make sure our extent maps look sane. */
ret = -EINVAL;
- em = lookup_extent_mapping(em_tree, 0, SZ_16K);
+ em = btrfs_lookup_extent_mapping(em_tree, 0, SZ_16K);
if (!em) {
test_err("didn't find an em at 0 as expected");
goto out;
@@ -842,10 +842,10 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, SZ_16K, SZ_16K);
+ em = btrfs_lookup_extent_mapping(em_tree, SZ_16K, SZ_16K);
read_unlock(&em_tree->lock);
if (em) {
test_err("found an em when we weren't expecting one");
@@ -853,7 +853,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
}
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, SZ_32K, SZ_16K);
+ em = btrfs_lookup_extent_mapping(em_tree, SZ_32K, SZ_16K);
read_unlock(&em_tree->lock);
if (!em) {
test_err("didn't find an em at 32K as expected");
@@ -870,16 +870,16 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
goto out;
}
- if (extent_map_block_start(em) != SZ_32K + SZ_4K) {
+ if (btrfs_extent_map_block_start(em) != SZ_32K + SZ_4K) {
test_err("em->block_start is %llu, expected 36K",
- extent_map_block_start(em));
+ btrfs_extent_map_block_start(em));
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1);
+ em = btrfs_lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1);
read_unlock(&em_tree->lock);
if (em) {
test_err("found an unexpected em above 48K");
@@ -888,9 +888,9 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
ret = 0;
out:
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Unpin our extent to prevent warning when removing it below. */
- ret2 = unpin_extent_cache(inode, 0, SZ_16K, 0);
+ ret2 = btrfs_unpin_extent_cache(inode, 0, SZ_16K, 0);
if (ret == 0)
ret = ret2;
ret2 = free_extent_map_tree(inode);
@@ -913,7 +913,7 @@ static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
int ret;
int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -928,13 +928,13 @@ static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
if (ret < 0) {
test_err("couldn't add extent map for range [120K, 128K)");
goto out;
}
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -967,7 +967,7 @@ static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, SZ_1K * 140, SZ_4K);
write_unlock(&em_tree->lock);
- free_extent_map(em);
+ btrfs_free_extent_map(em);
if (ret < 0) {
test_err("couldn't add extent map for range [108K, 144K)");
goto out;
@@ -1013,7 +1013,7 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
struct rmap_test_vector *test)
{
struct btrfs_chunk_map *map;
- u64 *logical = NULL;
+ u64 AUTO_KFREE(logical);
int i, out_ndaddrs, out_stripe_len;
int ret;
@@ -1045,7 +1045,8 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
ret = btrfs_add_chunk_map(fs_info, map);
if (ret) {
test_err("error adding chunk map to mapping tree");
- goto out_free;
+ btrfs_free_chunk_map(map);
+ return ret;
}
ret = btrfs_rmap_block(fs_info, map->start, btrfs_sb_offset(1),
@@ -1078,8 +1079,6 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
ret = 0;
out:
btrfs_remove_chunk_map(fs_info, map);
-out_free:
- kfree(logical);
return ret;
}
@@ -1094,7 +1093,7 @@ int btrfs_test_extent_map(void)
/*
* Test a chunk with 2 data stripes one of which
* intersects the physical address of the super block
- * is correctly recognised.
+ * is correctly recognized.
*/
.raid_type = BTRFS_BLOCK_GROUP_RAID1,
.physical_start = SZ_64M - SZ_4M,
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index b61972046feb..c8822edd32e2 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -32,7 +32,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans,
unsigned int i;
int ret;
- info = search_free_space_info(trans, cache, path, 0);
+ info = btrfs_search_free_space_info(trans, cache, path, 0);
if (IS_ERR(info)) {
test_err("could not find free space info");
ret = PTR_ERR(info);
@@ -57,7 +57,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans,
goto invalid;
offset = key.objectid;
while (offset < key.objectid + key.offset) {
- bit = free_space_test_bit(cache, path, offset);
+ bit = btrfs_free_space_test_bit(cache, path, offset);
if (prev_bit == 0 && bit == 1) {
extent_start = offset;
} else if (prev_bit == 1 && bit == 0) {
@@ -115,7 +115,7 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
u32 flags;
int ret;
- info = search_free_space_info(trans, cache, path, 0);
+ info = btrfs_search_free_space_info(trans, cache, path, 0);
if (IS_ERR(info)) {
test_err("could not find free space info");
btrfs_release_path(path);
@@ -131,13 +131,13 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
/* Flip it to the other format and check that for good measure. */
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
- ret = convert_free_space_to_extents(trans, cache, path);
+ ret = btrfs_convert_free_space_to_extents(trans, cache, path);
if (ret) {
test_err("could not convert to extents");
return ret;
}
} else {
- ret = convert_free_space_to_bitmaps(trans, cache, path);
+ ret = btrfs_convert_free_space_to_bitmaps(trans, cache, path);
if (ret) {
test_err("could not convert to bitmaps");
return ret;
@@ -170,9 +170,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
const struct free_space_extent extents[] = {};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start,
- cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -193,8 +192,8 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, alignment);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, alignment);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -216,7 +215,7 @@ static int test_remove_end(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
cache->start + cache->length - alignment,
alignment);
if (ret) {
@@ -240,9 +239,9 @@ static int test_remove_middle(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start + alignment,
- alignment);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start + alignment,
+ alignment);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -263,23 +262,22 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->start,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + alignment,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -300,24 +298,23 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + 2 * alignment,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment,
+ alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + alignment,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -338,29 +335,29 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->start,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + 2 * alignment, alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + alignment, alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -383,29 +380,29 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->start,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + 4 * alignment, alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + 4 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + 2 * alignment, alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -483,14 +480,14 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
goto out;
}
- ret = add_block_group_free_space(&trans, cache);
+ ret = btrfs_add_block_group_free_space(&trans, cache);
if (ret) {
test_err("could not add block group free space");
goto out;
}
if (bitmaps) {
- ret = convert_free_space_to_bitmaps(&trans, cache, path);
+ ret = btrfs_convert_free_space_to_bitmaps(&trans, cache, path);
if (ret) {
test_err("could not convert block group to bitmaps");
goto out;
@@ -501,7 +498,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
if (ret)
goto out;
- ret = remove_block_group_free_space(&trans, cache);
+ ret = btrfs_remove_block_group_free_space(&trans, cache);
if (ret) {
test_err("could not remove block group free space");
goto out;
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 3ea3bc2225fe..a4c2b7748b95 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -268,7 +268,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
/*
@@ -314,7 +314,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
* this?
*/
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
@@ -336,7 +336,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Regular extent */
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
@@ -363,7 +363,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* The next 3 are split extents */
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
@@ -389,10 +389,10 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- disk_bytenr = extent_map_block_start(em);
+ disk_bytenr = btrfs_extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
@@ -414,7 +414,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
@@ -441,13 +441,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
disk_bytenr += (em->start - orig_start);
- if (extent_map_block_start(em) != disk_bytenr) {
+ if (btrfs_extent_map_block_start(em) != disk_bytenr) {
test_err("wrong block start, want %llu, have %llu",
- disk_bytenr, extent_map_block_start(em));
+ disk_bytenr, btrfs_extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Prealloc extent */
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
@@ -475,7 +475,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* The next 3 are a half written prealloc extent */
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
@@ -502,10 +502,10 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- disk_bytenr = extent_map_block_start(em);
+ disk_bytenr = btrfs_extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
@@ -531,13 +531,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
em->start - orig_start, em->offset);
goto out;
}
- if (extent_map_block_start(em) != disk_bytenr + em->offset) {
+ if (btrfs_extent_map_block_start(em) != disk_bytenr + em->offset) {
test_err("unexpected block start, wanted %llu, have %llu",
- disk_bytenr + em->offset, extent_map_block_start(em));
+ disk_bytenr + em->offset, btrfs_extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
@@ -564,13 +564,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
em->start, em->offset, orig_start);
goto out;
}
- if (extent_map_block_start(em) != disk_bytenr + em->offset) {
+ if (btrfs_extent_map_block_start(em) != disk_bytenr + em->offset) {
test_err("unexpected block start, wanted %llu, have %llu",
- disk_bytenr + em->offset, extent_map_block_start(em));
+ disk_bytenr + em->offset, btrfs_extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Now for the compressed extent */
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
@@ -597,13 +597,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
+ if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
test_err("unexpected compress type, wanted %d, got %d",
- BTRFS_COMPRESS_ZLIB, extent_map_compression(em));
+ BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Split compressed extent */
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
@@ -630,15 +630,15 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
+ if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
test_err("unexpected compress type, wanted %d, got %d",
- BTRFS_COMPRESS_ZLIB, extent_map_compression(em));
+ BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
goto out;
}
- disk_bytenr = extent_map_block_start(em);
+ disk_bytenr = btrfs_extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
@@ -664,16 +664,16 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (extent_map_block_start(em) != disk_bytenr) {
+ if (btrfs_extent_map_block_start(em) != disk_bytenr) {
test_err("block start does not match, want %llu got %llu",
- disk_bytenr, extent_map_block_start(em));
+ disk_bytenr, btrfs_extent_map_block_start(em));
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -692,13 +692,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
em->start, em->offset, orig_start);
goto out;
}
- if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
+ if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
test_err("unexpected compress type, wanted %d, got %d",
- BTRFS_COMPRESS_ZLIB, extent_map_compression(em));
+ BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* A hole between regular extents but no hole extent */
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset + 6, sectorsize);
@@ -725,7 +725,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, SZ_4M);
if (IS_ERR(em)) {
@@ -757,7 +757,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
@@ -785,7 +785,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
ret = 0;
out:
if (!IS_ERR(em))
- free_extent_map(em);
+ btrfs_free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
@@ -858,15 +858,16 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
em->flags);
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = btrfs_get_extent(BTRFS_I(inode), NULL, sectorsize, 2 * sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (extent_map_block_start(em) != sectorsize) {
- test_err("expected a real extent, got %llu", extent_map_block_start(em));
+ if (btrfs_extent_map_block_start(em) != sectorsize) {
+ test_err("expected a real extent, got %llu",
+ btrfs_extent_map_block_start(em));
goto out;
}
if (em->start != sectorsize || em->len != sectorsize) {
@@ -883,7 +884,7 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
ret = 0;
out:
if (!IS_ERR(em))
- free_extent_map(em);
+ btrfs_free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
@@ -949,11 +950,10 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
}
/* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
- ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
- BTRFS_MAX_EXTENT_SIZE >> 1,
- (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_UPTODATE, NULL);
+ ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ BTRFS_MAX_EXTENT_SIZE >> 1,
+ (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1017,11 +1017,10 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
}
/* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
- ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
- BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_UPTODATE, NULL);
+ ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1052,9 +1051,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
}
/* Empty */
- ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_UPTODATE, NULL);
+ ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1068,9 +1066,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = 0;
out:
if (ret)
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_UPTODATE, NULL);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 3fc8dc3fd980..05cfda8af422 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -20,7 +20,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
struct btrfs_extent_item *item;
struct btrfs_extent_inline_ref *iref;
struct btrfs_tree_block_info *block_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key ins;
u32 size = sizeof(*item) + sizeof(*iref) + sizeof(*block_info);
@@ -41,7 +41,6 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
ret = btrfs_insert_empty_item(&trans, root, path, &ins, size);
if (ret) {
test_err("couldn't insert ref %d", ret);
- btrfs_free_path(path);
return ret;
}
@@ -61,7 +60,6 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_TREE_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
- btrfs_free_path(path);
return 0;
}
@@ -70,7 +68,7 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
{
struct btrfs_trans_handle trans;
struct btrfs_extent_item *item;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
u64 refs;
int ret;
@@ -90,7 +88,6 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
if (ret) {
test_err("couldn't find extent ref");
- btrfs_free_path(path);
return ret;
}
@@ -112,7 +109,6 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
ret = btrfs_insert_empty_item(&trans, root, path, &key, 0);
if (ret)
test_err("failed to insert backref");
- btrfs_free_path(path);
return ret;
}
@@ -121,7 +117,7 @@ static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
{
struct btrfs_trans_handle trans;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
btrfs_init_dummy_trans(&trans, NULL);
@@ -139,11 +135,9 @@ static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
ret = btrfs_search_slot(&trans, root, &key, path, -1, 1);
if (ret) {
test_err("didn't find our key %d", ret);
- btrfs_free_path(path);
return ret;
}
btrfs_del_item(&trans, root, path);
- btrfs_free_path(path);
return 0;
}
@@ -152,7 +146,7 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
{
struct btrfs_trans_handle trans;
struct btrfs_extent_item *item;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
u64 refs;
int ret;
@@ -172,7 +166,6 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
if (ret) {
test_err("couldn't find extent ref");
- btrfs_free_path(path);
return ret;
}
@@ -198,7 +191,6 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
return ret;
}
btrfs_del_item(&trans, root, path);
- btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/tests/raid-stripe-tree-tests.c b/fs/btrfs/tests/raid-stripe-tree-tests.c
index 30f17eb7b6a8..a7bc58a5c1e2 100644
--- a/fs/btrfs/tests/raid-stripe-tree-tests.c
+++ b/fs/btrfs/tests/raid-stripe-tree-tests.c
@@ -14,6 +14,8 @@
#define RST_TEST_NUM_DEVICES (2)
#define RST_TEST_RAID1_TYPE (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_RAID1)
+#define SZ_48K (SZ_32K + SZ_16K)
+
typedef int (*test_func_t)(struct btrfs_trans_handle *trans);
static struct btrfs_device *btrfs_device_by_devid(struct btrfs_fs_devices *fs_devices,
@@ -30,6 +32,613 @@ static struct btrfs_device *btrfs_device_by_devid(struct btrfs_fs_devices *fs_de
}
/*
+ * Test creating a range of three extents and then punch a hole in the middle,
+ * deleting all of the middle extents and partially deleting the "book ends".
+ */
+static int test_punch_hole_3extents(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 len1 = SZ_1M;
+ u64 logical2 = logical1 + len1;
+ u64 len2 = SZ_1M;
+ u64 logical3 = logical2 + len2;
+ u64 len3 = SZ_1M;
+ u64 hole_start = logical1 + SZ_256K;
+ u64 hole_len = SZ_2M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+
+ /* Prepare for the test, 1st create 3 x 1M extents. */
+ bioc->map_type = map_type;
+ bioc->size = len1;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ bioc->size = len2;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical3;
+ bioc->size = len3;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical3 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ /*
+ * Delete a range starting at logical1 + 256K and 2M in length. Extent
+ * 1 is truncated to 256k length, extent 2 is completely dropped and
+ * extent 3 is moved 256K to the right.
+ */
+ ret = btrfs_delete_raid_extent(trans, hole_start, hole_len);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ hole_start, hole_start + hole_len);
+ goto out;
+ }
+
+ /* Get the first extent and check its size. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len1 != SZ_256K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_256K, len1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the second extent and check it's absent. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded should fail",
+ logical2, logical2 + len2);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the third extent and check its size. */
+ logical3 += SZ_256K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical3, &len3, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical3, logical3 + len3);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical3) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical3 + SZ_256K, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len3 != SZ_1M - SZ_256K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_1M - SZ_256K, len3);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1, len1);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical3, len3);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+static int test_delete_two_extents(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 len1 = SZ_1M;
+ u64 logical2 = logical1 + len1;
+ u64 len2 = SZ_1M;
+ u64 logical3 = logical2 + len2;
+ u64 len3 = SZ_1M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+
+ /* Prepare for the test, 1st create 3 x 1M extents. */
+ bioc->map_type = map_type;
+ bioc->size = len1;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ bioc->size = len2;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical3;
+ bioc->size = len3;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical3 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ /*
+ * Delete a range starting at logical1 and 2M in length. Extents 1
+ * and 2 are dropped and extent 3 is kept as is.
+ */
+ ret = btrfs_delete_raid_extent(trans, logical1, len1 + len2);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1 + len2);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ logical1, len1);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ logical2, len2);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical3, &len3, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical3, len3);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical3) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical3, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len3 != SZ_1M) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_1M, len3);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical3, len3);
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/* Test punching a hole into a single RAID stripe-extent. */
+static int test_punch_hole(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 hole_start = logical1 + SZ_32K;
+ u64 hole_len = SZ_64K;
+ u64 logical2 = hole_start + hole_len;
+ u64 len = SZ_1M;
+ u64 len1 = SZ_32K;
+ u64 len2 = len - len1 - hole_len;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len, map_type, 0,
+ &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical1,
+ logical1 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_1M) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_1M, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, hole_start, hole_len);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ hole_start, hole_start + hole_len);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len1 != SZ_32K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_32K, len1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical2,
+ logical2 + len2);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical2) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical2, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len2 != len - len1 - hole_len) {
+ test_err("invalid length, expected %llu, got %llu",
+ len - len1 - hole_len, len2);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check for the absence of the hole. */
+ ret = btrfs_get_raid_extent_offset(fs_info, hole_start, &hole_len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ ret = -EINVAL;
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ hole_start, hole_start + SZ_64K);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1, len1);
+ if (ret)
+ goto out;
+
+ ret = btrfs_delete_raid_extent(trans, logical2, len2);
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 1M RST write that spans two adjacent RST items on disk and then
+ * delete a portion starting in the first item and spanning into the second
+ * item. This is similar to test_front_delete(), but spanning multiple items.
+ */
+static int test_front_delete_prev_item(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 logical2 = SZ_2M;
+ u64 len = SZ_1M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ /* Insert RAID extent 1. */
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ /* Insert RAID extent 2, directly adjacent to it. */
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1 + SZ_512K, SZ_1M);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1 + SZ_512K, (u64)SZ_1M);
+ goto out;
+ }
+
+ /* Verify item 1 is truncated to 512K. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len, map_type, 0,
+ &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical1,
+ logical1 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_512K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_512K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Verify item 2's start is moved by 512K. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2 + SZ_512K, &len,
+ map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical2 + SZ_512K, logical2 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical2 + SZ_512K) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical2 + SZ_512K, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_512K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_512K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Verify there's a hole at [1M+512K, 2M+512K] . */
+ len = SZ_1M;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1 + SZ_512K, &len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID [%llu, %llu] succeeded, should fail",
+ logical1 + SZ_512K, logical1 + SZ_512K + len);
+ goto out;
+ }
+
+ /* Clean up after us. */
+ ret = btrfs_delete_raid_extent(trans, logical1, SZ_512K);
+ if (ret)
+ goto out;
+
+ ret = btrfs_delete_raid_extent(trans, logical2 + SZ_512K, SZ_512K);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
* Test a 64K RST write on a 2 disk RAID1 at a logical address of 1M and then
* delete the 1st 32K, making the new start address 1M+32K.
*/
@@ -94,45 +703,45 @@ static int test_front_delete(struct btrfs_trans_handle *trans)
goto out;
}
- ret = btrfs_delete_raid_extent(trans, logical, SZ_32K);
+ ret = btrfs_delete_raid_extent(trans, logical, SZ_16K);
if (ret) {
test_err("deleting RAID extent [%llu, %llu] failed", logical,
- logical + SZ_32K);
+ logical + SZ_16K);
goto out;
}
- len = SZ_32K;
- ret = btrfs_get_raid_extent_offset(fs_info, logical + SZ_32K, &len,
+ len -= SZ_16K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical + SZ_16K, &len,
map_type, 0, &io_stripe);
if (ret) {
test_err("lookup of RAID extent [%llu, %llu] failed",
- logical + SZ_32K, logical + SZ_32K + len);
+ logical + SZ_16K, logical + SZ_64K);
goto out;
}
- if (io_stripe.physical != logical + SZ_32K) {
+ if (io_stripe.physical != logical + SZ_16K) {
test_err("invalid physical address, expected %llu, got %llu",
- logical + SZ_32K, io_stripe.physical);
+ logical + SZ_16K, io_stripe.physical);
ret = -EINVAL;
goto out;
}
- if (len != SZ_32K) {
+ if (len != SZ_48K) {
test_err("invalid stripe length, expected %llu, got %llu",
- (u64)SZ_32K, len);
+ (u64)SZ_48K, len);
ret = -EINVAL;
goto out;
}
ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
- if (!ret) {
+ if (ret != -ENODATA) {
ret = -EINVAL;
test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
- logical, logical + SZ_32K);
+ logical, logical + SZ_16K);
goto out;
}
- ret = btrfs_delete_raid_extent(trans, logical + SZ_32K, SZ_32K);
+ ret = btrfs_delete_raid_extent(trans, logical + SZ_16K, SZ_48K);
out:
btrfs_put_bioc(bioc);
return ret;
@@ -209,14 +818,14 @@ static int test_tail_delete(struct btrfs_trans_handle *trans)
goto out;
}
- ret = btrfs_delete_raid_extent(trans, logical + SZ_32K, SZ_32K);
+ ret = btrfs_delete_raid_extent(trans, logical + SZ_48K, SZ_16K);
if (ret) {
test_err("deleting RAID extent [%llu, %llu] failed",
- logical + SZ_32K, logical + SZ_64K);
+ logical + SZ_48K, logical + SZ_64K);
goto out;
}
- len = SZ_32K;
+ len = SZ_48K;
ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
if (ret) {
test_err("lookup of RAID extent [%llu, %llu] failed", logical,
@@ -231,9 +840,19 @@ static int test_tail_delete(struct btrfs_trans_handle *trans)
goto out;
}
- if (len != SZ_32K) {
+ if (len != SZ_48K) {
test_err("invalid stripe length, expected %llu, got %llu",
- (u64)SZ_32K, len);
+ (u64)SZ_48K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len = SZ_16K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical + SZ_48K, &len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded should fail",
+ logical + SZ_48K, logical + SZ_64K);
ret = -EINVAL;
goto out;
}
@@ -456,6 +1075,10 @@ static const test_func_t tests[] = {
test_create_update_delete,
test_tail_delete,
test_front_delete,
+ test_front_delete_prev_item,
+ test_punch_hole,
+ test_punch_hole_3extents,
+ test_delete_two_extents,
};
static int run_test(test_func_t test, u32 sectorsize, u32 nodesize)
@@ -478,8 +1101,8 @@ static int run_test(test_func_t test, u32 sectorsize, u32 nodesize)
ret = PTR_ERR(root);
goto out;
}
- btrfs_set_super_compat_ro_flags(root->fs_info->super_copy,
- BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE);
+ btrfs_set_super_incompat_flags(root->fs_info->super_copy,
+ BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE);
root->root_key.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
root->root_key.offset = 0;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dc0b837efd5d..05ee4391c83a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -32,6 +32,8 @@
#include "ioctl.h"
#include "relocation.h"
#include "scrub.h"
+#include "ordered-data.h"
+#include "delayed-inode.h"
static struct kmem_cache *btrfs_trans_handle_cachep;
@@ -103,7 +105,7 @@ static struct kmem_cache *btrfs_trans_handle_cachep;
* | attached to transid N+1. |
* | |
* | To next stage: |
- * | Until all tree blocks are super blocks are |
+ * | Until all tree blocks and super blocks are |
* | written to block devices |
* V |
* Transaction N [[TRANS_STATE_COMPLETED]] V
@@ -138,7 +140,6 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
void btrfs_put_transaction(struct btrfs_transaction *transaction)
{
- WARN_ON(refcount_read(&transaction->use_count) == 0);
if (refcount_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(!xa_empty(&transaction->delayed_refs.head_refs));
@@ -160,7 +161,13 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
cache = list_first_entry(&transaction->deleted_bgs,
struct btrfs_block_group,
bg_list);
+ /*
+ * Not strictly necessary to lock, as no other task will be using a
+ * block_group on the deleted_bgs list during a transaction abort.
+ */
+ spin_lock(&transaction->fs_info->unused_bgs_lock);
list_del_init(&cache->bg_list);
+ spin_unlock(&transaction->fs_info->unused_bgs_lock);
btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
}
@@ -179,7 +186,8 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
* At this point no one can be using this transaction to modify any tree
* and no one can start another transaction to modify any tree either.
*/
- ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING);
+ ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING,
+ "cur_trans->state=%d", cur_trans->state);
down_write(&fs_info->commit_root_sem);
@@ -191,7 +199,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
list_del_init(&root->dirty_list);
free_extent_buffer(root->commit_root);
root->commit_root = btrfs_root_node(root);
- extent_io_tree_release(&root->dirty_log_pages);
+ btrfs_extent_io_tree_release(&root->dirty_log_pages);
btrfs_qgroup_clean_swapped_blocks(root);
}
@@ -274,8 +282,10 @@ loop:
cur_trans = fs_info->running_transaction;
if (cur_trans) {
if (TRANS_ABORTED(cur_trans)) {
+ const int abort_error = cur_trans->aborted;
+
spin_unlock(&fs_info->trans_lock);
- return cur_trans->aborted;
+ return abort_error;
}
if (btrfs_blocked_trans_types[cur_trans->state] & type) {
spin_unlock(&fs_info->trans_lock);
@@ -375,10 +385,10 @@ loop:
INIT_LIST_HEAD(&cur_trans->deleted_bgs);
spin_lock_init(&cur_trans->dropped_roots_lock);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
- extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
- IO_TREE_TRANS_DIRTY_PAGES);
- extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
- IO_TREE_FS_PINNED_EXTENTS);
+ btrfs_extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
+ IO_TREE_TRANS_DIRTY_PAGES);
+ btrfs_extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
+ IO_TREE_FS_PINNED_EXTENTS);
btrfs_set_fs_generation(fs_info, fs_info->generation + 1);
cur_trans->transid = fs_info->generation;
fs_info->running_transaction = cur_trans;
@@ -396,7 +406,7 @@ loop:
*/
static int record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- int force)
+ bool force)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
@@ -530,15 +540,15 @@ static void wait_current_trans(struct btrfs_fs_info *fs_info)
}
}
-static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
+static bool may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
{
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
- return 0;
+ return false;
if (type == TRANS_START)
- return 1;
+ return true;
- return 0;
+ return false;
}
static inline bool need_reserve_reloc_root(struct btrfs_root *root)
@@ -567,7 +577,7 @@ static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
* We want to reserve all the bytes we may need all at once, so we only
* do 1 enospc flushing cycle per transaction start.
*/
- ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(si, bytes, flush);
/*
* If we are an emergency flush, which can steal from the global block
@@ -577,7 +587,7 @@ static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
bytes -= *delayed_refs_bytes;
*delayed_refs_bytes = 0;
- ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(si, bytes, flush);
}
return ret;
@@ -753,9 +763,10 @@ got_it:
* value here.
*/
if (do_chunk_alloc && num_bytes) {
- u64 flags = h->block_rsv->space_info->flags;
+ struct btrfs_space_info *space_info = h->block_rsv->space_info;
+ u64 flags = space_info->flags;
- btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
+ btrfs_chunk_alloc(h, space_info, btrfs_get_alloc_profile(fs_info, flags),
CHUNK_ALLOC_NO_FORCE);
}
@@ -795,8 +806,7 @@ alloc_fail:
if (num_bytes)
btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL);
if (delayed_refs_bytes)
- btrfs_space_info_free_bytes_may_use(fs_info, trans_rsv->space_info,
- delayed_refs_bytes);
+ btrfs_space_info_free_bytes_may_use(trans_rsv->space_info, delayed_refs_bytes);
reserve_fail:
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
return ERR_PTR(ret);
@@ -1016,13 +1026,18 @@ static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
if (!trans->block_rsv) {
- ASSERT(!trans->bytes_reserved);
- ASSERT(!trans->delayed_refs_bytes_reserved);
+ ASSERT(trans->bytes_reserved == 0,
+ "trans->bytes_reserved=%llu", trans->bytes_reserved);
+ ASSERT(trans->delayed_refs_bytes_reserved == 0,
+ "trans->delayed_refs_bytes_reserved=%llu",
+ trans->delayed_refs_bytes_reserved);
return;
}
if (!trans->bytes_reserved) {
- ASSERT(!trans->delayed_refs_bytes_reserved);
+ ASSERT(trans->delayed_refs_bytes_reserved == 0,
+ "trans->delayed_refs_bytes_reserved=%llu",
+ trans->delayed_refs_bytes_reserved);
return;
}
@@ -1121,13 +1136,13 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
u64 start = 0;
u64 end;
- while (find_first_extent_bit(dirty_pages, start, &start, &end,
- mark, &cached_state)) {
+ while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
+ mark, &cached_state)) {
bool wait_writeback = false;
- ret = convert_extent_bit(dirty_pages, start, end,
- EXTENT_NEED_WAIT,
- mark, &cached_state);
+ ret = btrfs_convert_extent_bit(dirty_pages, start, end,
+ EXTENT_NEED_WAIT,
+ mark, &cached_state);
/*
* convert_extent_bit can return -ENOMEM, which is most of the
* time a temporary error. So when it happens, ignore the error
@@ -1148,8 +1163,8 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
if (!ret)
ret = filemap_fdatawrite_range(mapping, start, end);
if (!ret && wait_writeback)
- ret = filemap_fdatawait_range(mapping, start, end);
- free_extent_state(cached_state);
+ btrfs_btree_wait_writeback_range(fs_info, start, end);
+ btrfs_free_extent_state(cached_state);
if (ret)
break;
cached_state = NULL;
@@ -1168,14 +1183,13 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages)
{
- struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
int ret = 0;
- while (find_first_extent_bit(dirty_pages, start, &start, &end,
- EXTENT_NEED_WAIT, &cached_state)) {
+ while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
+ EXTENT_NEED_WAIT, &cached_state)) {
/*
* Ignore -ENOMEM errors returned by clear_extent_bit().
* When committing the transaction, we'll remove any entries
@@ -1184,13 +1198,13 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
* concurrently - we do it only at transaction commit time when
* it's safe to do it (through extent_io_tree_release()).
*/
- ret = clear_extent_bit(dirty_pages, start, end,
- EXTENT_NEED_WAIT, &cached_state);
+ ret = btrfs_clear_extent_bit(dirty_pages, start, end,
+ EXTENT_NEED_WAIT, &cached_state);
if (ret == -ENOMEM)
ret = 0;
if (!ret)
- ret = filemap_fdatawait_range(mapping, start, end);
- free_extent_state(cached_state);
+ btrfs_btree_wait_writeback_range(fs_info, start, end);
+ btrfs_free_extent_state(cached_state);
if (ret)
break;
cached_state = NULL;
@@ -1204,15 +1218,15 @@ static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages)
{
bool errors = false;
- int err;
+ int ret;
- err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
+ ret = __btrfs_wait_marked_extents(fs_info, dirty_pages);
if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
errors = true;
- if (errors && !err)
- err = -EIO;
- return err;
+ if (errors && !ret)
+ ret = -EIO;
+ return ret;
}
int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
@@ -1220,22 +1234,23 @@ int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
struct btrfs_fs_info *fs_info = log_root->fs_info;
struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
bool errors = false;
- int err;
+ int ret;
- ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID,
+ "root_id(log_root)=%llu", btrfs_root_id(log_root));
- err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
- if ((mark & EXTENT_DIRTY) &&
+ ret = __btrfs_wait_marked_extents(fs_info, dirty_pages);
+ if ((mark & EXTENT_DIRTY_LOG1) &&
test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
errors = true;
- if ((mark & EXTENT_NEW) &&
+ if ((mark & EXTENT_DIRTY_LOG2) &&
test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
errors = true;
- if (errors && !err)
- err = -EIO;
- return err;
+ if (errors && !ret)
+ ret = -EIO;
+ return ret;
}
/*
@@ -1258,7 +1273,7 @@ static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
blk_finish_plug(&plug);
ret2 = btrfs_wait_extents(fs_info, dirty_pages);
- extent_io_tree_release(&trans->transaction->dirty_pages);
+ btrfs_extent_io_tree_release(&trans->transaction->dirty_pages);
if (ret)
return ret;
@@ -1320,7 +1335,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
struct list_head *io_bgs = &trans->transaction->io_bgs;
- struct list_head *next;
struct extent_buffer *eb;
int ret;
@@ -1328,7 +1342,8 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
* At this point no one can be using this transaction to modify any tree
* and no one can start another transaction to modify any tree either.
*/
- ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
+ ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING,
+ "trans->transaction->state=%d", trans->transaction->state);
eb = btrfs_lock_root_node(fs_info->tree_root);
ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
@@ -1356,13 +1371,13 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
again:
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
struct btrfs_root *root;
- next = fs_info->dirty_cowonly_roots.next;
- list_del_init(next);
- root = list_entry(next, struct btrfs_root, dirty_list);
+
+ root = list_first_entry(&fs_info->dirty_cowonly_roots,
+ struct btrfs_root, dirty_list);
clear_bit(BTRFS_ROOT_DIRTY, &root->state);
+ list_move_tail(&root->dirty_list,
+ &trans->transaction->switch_commits);
- list_add_tail(&root->dirty_list,
- &trans->transaction->switch_commits);
ret = update_cowonly_root(trans, root);
if (ret)
return ret;
@@ -1462,7 +1477,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
* At this point no one can be using this transaction to modify any tree
* and no one can start another transaction to modify any tree either.
*/
- ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
+ ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING,
+ "trans->transaction->state=%d", trans->transaction->state);
spin_lock(&fs_info->fs_roots_radix_lock);
while (1) {
@@ -1480,9 +1496,15 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
* At this point we can neither have tasks logging inodes
* from a root nor trying to commit a log tree.
*/
- ASSERT(atomic_read(&root->log_writers) == 0);
- ASSERT(atomic_read(&root->log_commit[0]) == 0);
- ASSERT(atomic_read(&root->log_commit[1]) == 0);
+ ASSERT(atomic_read(&root->log_writers) == 0,
+ "atomic_read(&root->log_writers)=%d",
+ atomic_read(&root->log_writers));
+ ASSERT(atomic_read(&root->log_commit[0]) == 0,
+ "atomic_read(&root->log_commit[0])=%d",
+ atomic_read(&root->log_commit[0]));
+ ASSERT(atomic_read(&root->log_commit[1]) == 0,
+ "atomic_read(&root->log_commit[1])=%d",
+ atomic_read(&root->log_commit[1]));
radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)btrfs_root_id(root),
@@ -1563,7 +1585,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
* qgroup counters could end up wrong.
*/
ret = btrfs_run_delayed_refs(trans, U64_MAX);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1634,8 +1656,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_root *root = pending->root;
struct btrfs_root *parent_root;
struct btrfs_block_rsv *rsv;
- struct inode *parent_inode = &pending->dir->vfs_inode;
- struct btrfs_path *path;
+ struct btrfs_inode *parent_inode = pending->dir;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *dir_item;
struct extent_buffer *tmp;
struct extent_buffer *old;
@@ -1660,7 +1682,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* filesystem.
*/
nofs_flags = memalloc_nofs_save();
- pending->error = fscrypt_setup_filename(parent_inode,
+ pending->error = fscrypt_setup_filename(&parent_inode->vfs_inode,
&pending->dentry->d_name, 0,
&fname);
memalloc_nofs_restore(nofs_flags);
@@ -1688,34 +1710,30 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto clear_skip_qgroup;
}
- key.objectid = objectid;
- key.offset = (u64)-1;
- key.type = BTRFS_ROOT_ITEM_KEY;
-
rsv = trans->block_rsv;
trans->block_rsv = &pending->block_rsv;
trans->bytes_reserved = trans->block_rsv->reserved;
trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid,
trans->bytes_reserved, 1);
- parent_root = BTRFS_I(parent_inode)->root;
+ parent_root = parent_inode->root;
ret = record_root_in_trans(trans, parent_root, 0);
if (ret)
goto fail;
- cur_time = current_time(parent_inode);
+ cur_time = current_time(&parent_inode->vfs_inode);
/*
* insert the directory item
*/
- ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
- if (ret) {
+ ret = btrfs_set_inode_index(parent_inode, &index);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
/* check if there is a file/dir which has the same name. */
dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
- btrfs_ino(BTRFS_I(parent_inode)),
+ btrfs_ino(parent_inode),
&fname.disk_name, 0);
if (dir_item != NULL && !IS_ERR(dir_item)) {
pending->error = -EEXIST;
@@ -1729,8 +1747,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_create_qgroup(trans, objectid);
if (ret && ret != -EEXIST) {
- btrfs_abort_transaction(trans, ret);
- goto fail;
+ if (unlikely(ret != -ENOTCONN || btrfs_qgroup_enabled(fs_info))) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
}
/*
@@ -1740,13 +1760,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* snapshot
*/
ret = btrfs_run_delayed_items(trans);
- if (ret) { /* Transaction aborted */
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = record_root_in_trans(trans, root, 0);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1781,7 +1801,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
old = btrfs_lock_root_node(root);
ret = btrfs_cow_block(trans, root, old, NULL, 0, &old,
BTRFS_NESTING_COW);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_tree_unlock(old);
free_extent_buffer(old);
btrfs_abort_transaction(trans, ret);
@@ -1792,21 +1812,23 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
/* clean up in any case */
btrfs_tree_unlock(old);
free_extent_buffer(old);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
/* see comments in should_cow_block() */
set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
- smp_wmb();
+ smp_mb__after_atomic();
btrfs_set_root_node(new_root_item, tmp);
/* record when the snapshot was created in key.offset */
+ key.objectid = objectid;
+ key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = trans->transid;
ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
btrfs_tree_unlock(tmp);
free_extent_buffer(tmp);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1816,9 +1838,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
*/
ret = btrfs_add_root_ref(trans, objectid,
btrfs_root_id(parent_root),
- btrfs_ino(BTRFS_I(parent_inode)), index,
+ btrfs_ino(parent_inode), index,
&fname.disk_name);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1833,7 +1855,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
ret = btrfs_reloc_post_snapshot(trans, pending);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1854,26 +1876,26 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto fail;
ret = btrfs_insert_dir_item(trans, &fname.disk_name,
- BTRFS_I(parent_inode), &key, BTRFS_FT_DIR,
+ parent_inode, &key, BTRFS_FT_DIR,
index);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
- btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
+ btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
fname.disk_name.len * 2);
- inode_set_mtime_to_ts(parent_inode,
- inode_set_ctime_current(parent_inode));
- ret = btrfs_update_inode_fallback(trans, BTRFS_I(parent_inode));
- if (ret) {
+ inode_set_mtime_to_ts(&parent_inode->vfs_inode,
+ inode_set_ctime_current(&parent_inode->vfs_inode));
+ ret = btrfs_update_inode_fallback(trans, parent_inode);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
BTRFS_UUID_KEY_SUBVOL,
objectid);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1881,7 +1903,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
objectid);
- if (ret && ret != -EEXIST) {
+ if (unlikely(ret && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1899,7 +1921,6 @@ free_fname:
free_pending:
kfree(new_root_item);
pending->root_item = NULL;
- btrfs_free_path(path);
pending->path = NULL;
return ret;
@@ -2095,7 +2116,14 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
+ /*
+ * Not strictly necessary to lock, as no other task will be using a
+ * block_group on the new_bgs list during a transaction abort.
+ */
+ spin_lock(&fs_info->unused_bgs_lock);
list_del_init(&block_group->bg_list);
+ btrfs_put_block_group(block_group);
+ spin_unlock(&fs_info->unused_bgs_lock);
}
}
@@ -2145,18 +2173,25 @@ static void add_pending_snapshot(struct btrfs_trans_handle *trans)
return;
lockdep_assert_held(&trans->fs_info->trans_lock);
- ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP);
+ ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP,
+ "cur_trans->state=%d", cur_trans->state);
list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
}
-static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval)
+static void update_commit_stats(struct btrfs_fs_info *fs_info)
{
+ ktime_t now = ktime_get_ns();
+ ktime_t interval = now - fs_info->commit_stats.critical_section_start_time;
+
+ ASSERT(fs_info->commit_stats.critical_section_start_time);
+
fs_info->commit_stats.commit_count++;
fs_info->commit_stats.last_commit_dur = interval;
fs_info->commit_stats.max_commit_dur =
max_t(u64, fs_info->commit_stats.max_commit_dur, interval);
fs_info->commit_stats.total_commit_dur += interval;
+ fs_info->commit_stats.critical_section_start_time = 0;
}
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
@@ -2165,10 +2200,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL;
int ret;
- ktime_t start_time;
- ktime_t interval;
- ASSERT(refcount_read(&trans->use_count) == 1);
+ ASSERT(refcount_read(&trans->use_count) == 1,
+ "refcount_read(&trans->use_count)=%d", refcount_read(&trans->use_count));
btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
@@ -2257,14 +2291,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
wake_up(&fs_info->transaction_blocked_wait);
btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
- if (cur_trans->list.prev != &fs_info->trans_list) {
+ if (!list_is_first(&cur_trans->list, &fs_info->trans_list)) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
if (trans->in_fsync)
want_state = TRANS_STATE_SUPER_COMMITTED;
- prev_trans = list_entry(cur_trans->list.prev,
- struct btrfs_transaction, list);
+ prev_trans = list_prev_entry(cur_trans, list);
if (prev_trans->state < want_state) {
refcount_inc(&prev_trans->use_count);
spin_unlock(&fs_info->trans_lock);
@@ -2300,8 +2333,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* Get the time spent on the work done by the commit thread and not
* the time spent waiting on a previous commit
*/
- start_time = ktime_get_ns();
-
+ fs_info->commit_stats.critical_section_start_time = ktime_get_ns();
extwriter_counter_dec(cur_trans, trans->type);
ret = btrfs_start_delalloc_flush(fs_info);
@@ -2406,7 +2438,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* them.
*
* We needn't worry that this operation will corrupt the snapshots,
- * because all the tree which are snapshoted will be forced to COW
+ * because all the tree which are snapshotted will be forced to COW
* the nodes and leaves.
*/
ret = btrfs_run_delayed_items(trans);
@@ -2533,6 +2565,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (ret)
goto scrub_continue;
+ update_commit_stats(fs_info);
/*
* We needn't acquire the lock here because there is no other task
* which can change it.
@@ -2541,7 +2574,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
wake_up(&cur_trans->commit_wait);
btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
- btrfs_finish_extent_commit(trans);
+ ret = btrfs_finish_extent_commit(trans);
+ if (ret)
+ goto scrub_continue;
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
btrfs_clear_space_info_full(fs_info);
@@ -2567,8 +2602,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
trace_btrfs_transaction_commit(fs_info);
- interval = ktime_get_ns() - start_time;
-
btrfs_scrub_continue(fs_info);
if (current->journal_info == trans)
@@ -2576,8 +2609,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
kmem_cache_free(btrfs_trans_handle_cachep, trans);
- update_commit_stats(fs_info, interval);
-
return ret;
unlock_reloc:
@@ -2641,9 +2672,9 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
- ret = btrfs_drop_snapshot(root, 0, 0);
+ ret = btrfs_drop_snapshot(root, false, false);
else
- ret = btrfs_drop_snapshot(root, 1, 0);
+ ret = btrfs_drop_snapshot(root, true, false);
btrfs_put_root(root);
return (ret < 0) ? 0 : 1;
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 184fa5c0062a..18ef069197e5 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -14,10 +14,6 @@
#include <linux/wait.h>
#include "btrfs_inode.h"
#include "delayed-ref.h"
-#include "extent-io-tree.h"
-#include "block-rsv.h"
-#include "messages.h"
-#include "misc.h"
struct dentry;
struct inode;
@@ -227,7 +223,21 @@ static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
delayed_refs->qgroup_to_skip = 0;
}
-bool __cold abort_should_print_stack(int error);
+/*
+ * We want the transaction abort to print stack trace only for errors where the
+ * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
+ * caused by external factors.
+ */
+static inline bool btrfs_abort_should_print_stack(int error)
+{
+ switch (error) {
+ case -EIO:
+ case -EROFS:
+ case -ENOMEM:
+ return false;
+ }
+ return true;
+}
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
@@ -240,7 +250,7 @@ do { \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
&((trans)->fs_info->fs_state))) { \
__first = true; \
- if (WARN(abort_should_print_stack(error), \
+ if (WARN(btrfs_abort_should_print_stack(error), \
KERN_ERR \
"BTRFS: Transaction aborted (error %d)\n", \
(error))) { \
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 148d8cefa40e..c21c21adf61e 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -183,15 +183,16 @@ static bool check_prev_ino(struct extent_buffer *leaf,
/* Only these key->types needs to be checked */
ASSERT(key->type == BTRFS_XATTR_ITEM_KEY ||
key->type == BTRFS_INODE_REF_KEY ||
+ key->type == BTRFS_INODE_EXTREF_KEY ||
key->type == BTRFS_DIR_INDEX_KEY ||
key->type == BTRFS_DIR_ITEM_KEY ||
- key->type == BTRFS_EXTENT_DATA_KEY);
+ key->type == BTRFS_EXTENT_DATA_KEY, "key->type=%u", key->type);
/*
* Only subvolume trees along with their reloc trees need this check.
* Things like log tree doesn't follow this ino requirement.
*/
- if (!is_fstree(btrfs_header_owner(leaf)))
+ if (!btrfs_is_fstree(btrfs_header_owner(leaf)))
return true;
if (key->objectid == prev_key->objectid)
@@ -475,7 +476,7 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
* to be COWed to be relocated.
*/
if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID &&
- !is_fstree(key->offset))) {
+ !btrfs_is_fstree(key->offset))) {
generic_err(leaf, slot,
"invalid reloc tree for root %lld, root id is not a subvolume tree",
key->offset);
@@ -493,7 +494,7 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
}
/* DIR_ITEM/INDEX/INODE_REF is not allowed to point to non-fs trees */
- if (unlikely(!is_fstree(key->objectid) && !is_root_item)) {
+ if (unlikely(!btrfs_is_fstree(key->objectid) && !is_root_item)) {
dir_item_err(leaf, slot,
"invalid location key objectid, have %llu expect [%llu, %llu]",
key->objectid, BTRFS_FIRST_FREE_OBJECTID,
@@ -764,22 +765,19 @@ static int check_block_group_item(struct extent_buffer *leaf,
return 0;
}
-__printf(4, 5)
+__printf(5, 6)
__cold
-static void chunk_err(const struct extent_buffer *leaf,
+static void chunk_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
const struct btrfs_chunk *chunk, u64 logical,
const char *fmt, ...)
{
- const struct btrfs_fs_info *fs_info = leaf->fs_info;
- bool is_sb;
+ bool is_sb = !leaf;
struct va_format vaf;
va_list args;
int i;
int slot = -1;
- /* Only superblock eb is able to have such small offset */
- is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
-
if (!is_sb) {
/*
* Get the slot number by iterating through all slots, this
@@ -812,13 +810,17 @@ static void chunk_err(const struct extent_buffer *leaf,
/*
* The common chunk check which could also work on super block sys chunk array.
*
+ * If @leaf is NULL, then @chunk must be an on-stack chunk item.
+ * (For superblock sys_chunk array, and fs_info->sectorsize is unreliable)
+ *
* Return -EUCLEAN if anything is corrupted.
* Return 0 if everything is OK.
*/
-int btrfs_check_chunk_valid(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 logical)
+int btrfs_check_chunk_valid(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
+ const struct btrfs_chunk *chunk, u64 logical,
+ u32 sectorsize)
{
- struct btrfs_fs_info *fs_info = leaf->fs_info;
u64 length;
u64 chunk_end;
u64 stripe_len;
@@ -826,63 +828,73 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
u16 sub_stripes;
u64 type;
u64 features;
+ u32 chunk_sector_size;
bool mixed = false;
int raid_index;
int nparity;
int ncopies;
- length = btrfs_chunk_length(leaf, chunk);
- stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
- num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
- type = btrfs_chunk_type(leaf, chunk);
+ if (leaf) {
+ length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ type = btrfs_chunk_type(leaf, chunk);
+ chunk_sector_size = btrfs_chunk_sector_size(leaf, chunk);
+ } else {
+ length = btrfs_stack_chunk_length(chunk);
+ stripe_len = btrfs_stack_chunk_stripe_len(chunk);
+ num_stripes = btrfs_stack_chunk_num_stripes(chunk);
+ sub_stripes = btrfs_stack_chunk_sub_stripes(chunk);
+ type = btrfs_stack_chunk_type(chunk);
+ chunk_sector_size = btrfs_stack_chunk_sector_size(chunk);
+ }
raid_index = btrfs_bg_flags_to_raid_index(type);
ncopies = btrfs_raid_array[raid_index].ncopies;
nparity = btrfs_raid_array[raid_index].nparity;
if (unlikely(!num_stripes)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes, have %u", num_stripes);
return -EUCLEAN;
}
if (unlikely(num_stripes < ncopies)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes < ncopies, have %u < %d",
num_stripes, ncopies);
return -EUCLEAN;
}
if (unlikely(nparity && num_stripes == nparity)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes == nparity, have %u == %d",
num_stripes, nparity);
return -EUCLEAN;
}
- if (unlikely(!IS_ALIGNED(logical, fs_info->sectorsize))) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(!IS_ALIGNED(logical, sectorsize))) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk logical, have %llu should aligned to %u",
- logical, fs_info->sectorsize);
+ logical, sectorsize);
return -EUCLEAN;
}
- if (unlikely(btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize)) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(chunk_sector_size != sectorsize)) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk sectorsize, have %u expect %u",
- btrfs_chunk_sector_size(leaf, chunk),
- fs_info->sectorsize);
+ chunk_sector_size, sectorsize);
return -EUCLEAN;
}
- if (unlikely(!length || !IS_ALIGNED(length, fs_info->sectorsize))) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(!length || !IS_ALIGNED(length, sectorsize))) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk length, have %llu", length);
return -EUCLEAN;
}
if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk logical start and length, have logical start %llu length %llu",
logical, length);
return -EUCLEAN;
}
if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk stripe length: %llu",
stripe_len);
return -EUCLEAN;
@@ -896,30 +908,29 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
* Thus it should be a good way to catch obvious bitflips.
*/
if (unlikely(length >= btrfs_stripe_nr_to_offset(U32_MAX))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"chunk length too large: have %llu limit %llu",
length, btrfs_stripe_nr_to_offset(U32_MAX));
return -EUCLEAN;
}
if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"unrecognized chunk type: 0x%llx",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
- BTRFS_BLOCK_GROUP_PROFILE_MASK) &
- btrfs_chunk_type(leaf, chunk));
+ BTRFS_BLOCK_GROUP_PROFILE_MASK) & type);
return -EUCLEAN;
}
if (unlikely(!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EUCLEAN;
}
if (unlikely((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
type, BTRFS_BLOCK_GROUP_TYPE_MASK);
return -EUCLEAN;
@@ -928,7 +939,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
if (unlikely((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
(type & (BTRFS_BLOCK_GROUP_METADATA |
BTRFS_BLOCK_GROUP_DATA)))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"system chunk with data or metadata type: 0x%llx",
type);
return -EUCLEAN;
@@ -941,7 +952,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
if (!mixed) {
if (unlikely((type & BTRFS_BLOCK_GROUP_METADATA) &&
(type & BTRFS_BLOCK_GROUP_DATA))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"mixed chunk type in non-mixed mode: 0x%llx", type);
return -EUCLEAN;
}
@@ -963,7 +974,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
num_stripes != btrfs_raid_array[BTRFS_RAID_DUP].dev_stripes) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
num_stripes != btrfs_raid_array[BTRFS_RAID_SINGLE].dev_stripes))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
@@ -983,14 +994,15 @@ static int check_leaf_chunk_item(struct extent_buffer *leaf,
struct btrfs_chunk *chunk,
struct btrfs_key *key, int slot)
{
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
int num_stripes;
if (unlikely(btrfs_item_size(leaf, slot) < sizeof(struct btrfs_chunk))) {
- chunk_err(leaf, chunk, key->offset,
+ chunk_err(fs_info, leaf, chunk, key->offset,
"invalid chunk item size: have %u expect [%zu, %u)",
btrfs_item_size(leaf, slot),
sizeof(struct btrfs_chunk),
- BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
+ BTRFS_LEAF_DATA_SIZE(fs_info));
return -EUCLEAN;
}
@@ -1001,14 +1013,15 @@ static int check_leaf_chunk_item(struct extent_buffer *leaf,
if (unlikely(btrfs_chunk_item_size(num_stripes) !=
btrfs_item_size(leaf, slot))) {
- chunk_err(leaf, chunk, key->offset,
+ chunk_err(fs_info, leaf, chunk, key->offset,
"invalid chunk item size: have %u expect %lu",
btrfs_item_size(leaf, slot),
btrfs_chunk_item_size(num_stripes));
return -EUCLEAN;
}
out:
- return btrfs_check_chunk_valid(leaf, chunk, key->offset);
+ return btrfs_check_chunk_valid(fs_info, leaf, chunk, key->offset,
+ fs_info->sectorsize);
}
__printf(3, 4)
@@ -1197,7 +1210,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
/*
* For legacy root item, the members starting at generation_v2 will be
* all filled with 0.
- * And since we allow geneartion_v2 as 0, it will still pass the check.
+ * And since we allow generation_v2 as 0, it will still pass the check.
*/
read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
btrfs_item_size(leaf, slot));
@@ -1299,7 +1312,7 @@ static bool is_valid_dref_root(u64 rootid)
* - tree root
* For v1 space cache
*/
- return is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID ||
+ return btrfs_is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID ||
rootid == BTRFS_ROOT_TREE_OBJECTID;
}
@@ -1527,6 +1540,11 @@ static int check_extent_item(struct extent_buffer *leaf,
dref_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_extent_data_ref_count(leaf, dref);
break;
/* Contains parent bytenr and ref count */
@@ -1539,6 +1557,11 @@ static int check_extent_item(struct extent_buffer *leaf,
inline_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_shared_data_ref_count(leaf, sref);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
@@ -1549,7 +1572,7 @@ static int check_extent_item(struct extent_buffer *leaf,
inline_type);
return -EUCLEAN;
}
- if (inline_type < last_type) {
+ if (unlikely(inline_type < last_type)) {
extent_err(leaf, slot,
"inline ref out-of-order: has type %u, prev type %u",
inline_type, last_type);
@@ -1558,7 +1581,7 @@ static int check_extent_item(struct extent_buffer *leaf,
/* Type changed, allow the sequence starts from U64_MAX again. */
if (inline_type > last_type)
last_seq = U64_MAX;
- if (seq > last_seq) {
+ if (unlikely(seq > last_seq)) {
extent_err(leaf, slot,
"inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx",
inline_type, inline_offset, seq,
@@ -1595,10 +1618,9 @@ static int check_extent_item(struct extent_buffer *leaf,
if (unlikely(prev_end > key->objectid)) {
extent_err(leaf, slot,
- "previous extent [%llu %u %llu] overlaps current extent [%llu %u %llu]",
- prev_key->objectid, prev_key->type,
- prev_key->offset, key->objectid, key->type,
- key->offset);
+ "previous extent " BTRFS_KEY_FMT " overlaps current extent " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(prev_key),
+ BTRFS_KEY_FMT_VALUE(key));
return -EUCLEAN;
}
}
@@ -1611,8 +1633,18 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf,
{
u32 expect_item_size = 0;
- if (key->type == BTRFS_SHARED_DATA_REF_KEY)
+ if (key->type == BTRFS_SHARED_DATA_REF_KEY) {
+ struct btrfs_shared_data_ref *sref;
+
+ sref = btrfs_item_ptr(leaf, slot, struct btrfs_shared_data_ref);
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
+
expect_item_size = sizeof(struct btrfs_shared_data_ref);
+ }
if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) {
generic_err(leaf, slot,
@@ -1689,6 +1721,11 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
offset, leaf->fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
}
return 0;
}
@@ -1719,10 +1756,10 @@ static int check_inode_ref(struct extent_buffer *leaf,
while (ptr < end) {
u16 namelen;
- if (unlikely(ptr + sizeof(iref) > end)) {
+ if (unlikely(ptr + sizeof(*iref) > end)) {
inode_ref_err(leaf, slot,
"inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
- ptr, end, sizeof(iref));
+ ptr, end, sizeof(*iref));
return -EUCLEAN;
}
@@ -1745,6 +1782,39 @@ static int check_inode_ref(struct extent_buffer *leaf,
return 0;
}
+static int check_inode_extref(struct extent_buffer *leaf,
+ struct btrfs_key *key, struct btrfs_key *prev_key,
+ int slot)
+{
+ unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
+ unsigned long end = ptr + btrfs_item_size(leaf, slot);
+
+ if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
+ return -EUCLEAN;
+
+ while (ptr < end) {
+ struct btrfs_inode_extref *extref = (struct btrfs_inode_extref *)ptr;
+ u16 namelen;
+
+ if (unlikely(ptr + sizeof(*extref) > end)) {
+ inode_ref_err(leaf, slot,
+ "inode extref overflow, ptr %lu end %lu inode_extref size %zu",
+ ptr, end, sizeof(*extref));
+ return -EUCLEAN;
+ }
+
+ namelen = btrfs_inode_extref_name_len(leaf, extref);
+ if (unlikely(ptr + sizeof(*extref) + namelen > end)) {
+ inode_ref_err(leaf, slot,
+ "inode extref overflow, ptr %lu end %lu namelen %u",
+ ptr, end, namelen);
+ return -EUCLEAN;
+ }
+ ptr += sizeof(*extref) + namelen;
+ }
+ return 0;
+}
+
static int check_raid_stripe_extent(const struct extent_buffer *leaf,
const struct btrfs_key *key, int slot)
{
@@ -1856,6 +1926,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
case BTRFS_INODE_REF_KEY:
ret = check_inode_ref(leaf, key, prev_key, slot);
break;
+ case BTRFS_INODE_EXTREF_KEY:
+ ret = check_inode_extref(leaf, key, prev_key, slot);
+ break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
ret = check_block_group_item(leaf, key, slot);
break;
@@ -1892,7 +1965,7 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
break;
}
- if (ret)
+ if (unlikely(ret))
return BTRFS_TREE_BLOCK_INVALID_ITEM;
return BTRFS_TREE_BLOCK_CLEAN;
}
@@ -1986,10 +2059,9 @@ enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf)
/* Make sure the keys are in the right order */
if (unlikely(btrfs_comp_cpu_keys(&prev_key, &key) >= 0)) {
generic_err(leaf, slot,
- "bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
- prev_key.objectid, prev_key.type,
- prev_key.offset, key.objectid, key.type,
- key.offset);
+ "bad key order, prev " BTRFS_KEY_FMT " current " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&prev_key),
+ BTRFS_KEY_FMT_VALUE(&key));
return BTRFS_TREE_BLOCK_BAD_KEY_ORDER;
}
@@ -2107,10 +2179,9 @@ enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node)
if (unlikely(btrfs_comp_cpu_keys(&key, &next_key) >= 0)) {
generic_err(node, slot,
- "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
- key.objectid, key.type, key.offset,
- next_key.objectid, next_key.type,
- next_key.offset);
+ "bad key order, current " BTRFS_KEY_FMT " next " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&key),
+ BTRFS_KEY_FMT_VALUE(&next_key));
return BTRFS_TREE_BLOCK_BAD_KEY_ORDER;
}
}
@@ -2130,7 +2201,7 @@ ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO);
int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner)
{
- const bool is_subvol = is_fstree(root_owner);
+ const bool is_subvol = btrfs_is_fstree(root_owner);
const u64 eb_owner = btrfs_header_owner(eb);
/*
@@ -2172,7 +2243,7 @@ int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner)
* For subvolume trees, owners can mismatch, but they should all belong
* to subvolume trees.
*/
- if (unlikely(is_subvol != is_fstree(eb_owner))) {
+ if (unlikely(is_subvol != btrfs_is_fstree(eb_owner))) {
btrfs_crit(eb->fs_info,
"corrupted %s, root=%llu block=%llu owner mismatch, have %llu expect [%llu, %llu]",
btrfs_header_level(eb) == 0 ? "leaf" : "node",
@@ -2192,13 +2263,12 @@ int btrfs_verify_level_key(struct extent_buffer *eb,
int ret;
found_level = btrfs_header_level(eb);
- if (found_level != check->level) {
- WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
- KERN_ERR "BTRFS: tree level check failed\n");
+ if (unlikely(found_level != check->level)) {
+ DEBUG_WARN();
btrfs_err(fs_info,
"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
eb->start, check->level, found_level);
- return -EIO;
+ return -EUCLEAN;
}
if (!check->has_first_key)
@@ -2214,11 +2284,11 @@ int btrfs_verify_level_key(struct extent_buffer *eb,
return 0;
/* We have @first_key, so this @eb must have at least one item */
- if (btrfs_header_nritems(eb) == 0) {
+ if (unlikely(btrfs_header_nritems(eb) == 0)) {
btrfs_err(fs_info,
"invalid tree nritems, bytenr=%llu nritems=0 expect >0",
eb->start);
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ DEBUG_WARN();
return -EUCLEAN;
}
@@ -2226,11 +2296,10 @@ int btrfs_verify_level_key(struct extent_buffer *eb,
btrfs_node_key_to_cpu(eb, &found_key, 0);
else
btrfs_item_key_to_cpu(eb, &found_key, 0);
- ret = btrfs_comp_cpu_keys(&check->first_key, &found_key);
- if (ret) {
- WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
- KERN_ERR "BTRFS: tree first key check failed\n");
+ ret = btrfs_comp_cpu_keys(&check->first_key, &found_key);
+ if (unlikely(ret)) {
+ DEBUG_WARN();
btrfs_err(fs_info,
"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
eb->start, check->transid, check->first_key.objectid,
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
index db67f96cbe4b..eb201f4ec3c7 100644
--- a/fs/btrfs/tree-checker.h
+++ b/fs/btrfs/tree-checker.h
@@ -10,6 +10,7 @@
#include <uapi/linux/btrfs_tree.h>
struct extent_buffer;
+struct btrfs_fs_info;
struct btrfs_chunk;
struct btrfs_key;
@@ -66,8 +67,10 @@ enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node);
int btrfs_check_leaf(struct extent_buffer *leaf);
int btrfs_check_node(struct extent_buffer *node);
-int btrfs_check_chunk_valid(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 logical);
+int btrfs_check_chunk_valid(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
+ const struct btrfs_chunk *chunk, u64 logical,
+ u32 sectorsize);
int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner);
int btrfs_verify_level_key(struct extent_buffer *eb,
const struct btrfs_tree_parent_check *check);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c8d6587688b3..fff37c8d96a4 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -27,7 +27,9 @@
#include "file-item.h"
#include "file.h"
#include "orphan.h"
+#include "print-tree.h"
#include "tree-checker.h"
+#include "delayed-inode.h"
#define MAX_CONFLICT_INODES 10
@@ -101,18 +103,135 @@ enum {
LOG_WALK_REPLAY_ALL,
};
+/*
+ * The walk control struct is used to pass state down the chain when processing
+ * the log tree. The stage field tells us which part of the log tree processing
+ * we are currently doing.
+ */
+struct walk_control {
+ /*
+ * Signal that we are freeing the metadata extents of a log tree.
+ * This is used at transaction commit time while freeing a log tree.
+ */
+ bool free;
+
+ /*
+ * Signal that we are pinning the metadata extents of a log tree and the
+ * data extents its leaves point to (if using mixed block groups).
+ * This happens in the first stage of log replay to ensure that during
+ * replay, while we are modifying subvolume trees, we don't overwrite
+ * the metadata extents of log trees.
+ */
+ bool pin;
+
+ /* What stage of the replay code we're currently in. */
+ int stage;
+
+ /*
+ * Ignore any items from the inode currently being processed. Needs
+ * to be set every time we find a BTRFS_INODE_ITEM_KEY.
+ */
+ bool ignore_cur_inode;
+
+ /*
+ * The root we are currently replaying to. This is NULL for the replay
+ * stage LOG_WALK_PIN_ONLY.
+ */
+ struct btrfs_root *root;
+
+ /* The log tree we are currently processing (not NULL for any stage). */
+ struct btrfs_root *log;
+
+ /* The transaction handle used for replaying all log trees. */
+ struct btrfs_trans_handle *trans;
+
+ /*
+ * The function that gets used to process blocks we find in the tree.
+ * Note the extent_buffer might not be up to date when it is passed in,
+ * and it must be checked or read if you need the data inside it.
+ */
+ int (*process_func)(struct extent_buffer *eb,
+ struct walk_control *wc, u64 gen, int level);
+
+ /*
+ * The following are used only when stage is >= LOG_WALK_REPLAY_INODES
+ * and by the replay_one_buffer() callback.
+ */
+
+ /* The current log leaf being processed. */
+ struct extent_buffer *log_leaf;
+ /* The key being processed of the current log leaf. */
+ struct btrfs_key log_key;
+ /* The slot being processed of the current log leaf. */
+ int log_slot;
+
+ /* A path used for searches and modifications to subvolume trees. */
+ struct btrfs_path *subvol_path;
+};
+
+static void do_abort_log_replay(struct walk_control *wc, const char *function,
+ unsigned int line, int error, const char *fmt, ...)
+{
+ struct btrfs_fs_info *fs_info = wc->trans->fs_info;
+ struct va_format vaf;
+ va_list args;
+
+ /*
+ * Do nothing if we already aborted, to avoid dumping leaves again which
+ * can be verbose. Further more, only the first call is useful since it
+ * is where we have a problem. Note that we do not use the flag
+ * BTRFS_FS_STATE_TRANS_ABORTED because log replay calls functions that
+ * are outside of tree-log.c that can abort transactions (such as
+ * btrfs_add_link() for example), so if that happens we still want to
+ * dump all log replay specific information below.
+ */
+ if (test_and_set_bit(BTRFS_FS_STATE_LOG_REPLAY_ABORTED, &fs_info->fs_state))
+ return;
+
+ btrfs_abort_transaction(wc->trans, error);
+
+ if (wc->subvol_path->nodes[0]) {
+ btrfs_crit(fs_info,
+ "subvolume (root %llu) leaf currently being processed:",
+ btrfs_root_id(wc->root));
+ btrfs_print_leaf(wc->subvol_path->nodes[0]);
+ }
+
+ if (wc->log_leaf) {
+ btrfs_crit(fs_info,
+"log tree (for root %llu) leaf currently being processed (slot %d key " BTRFS_KEY_FMT "):",
+ btrfs_root_id(wc->root), wc->log_slot,
+ BTRFS_KEY_FMT_VALUE(&wc->log_key));
+ btrfs_print_leaf(wc->log_leaf);
+ }
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "log replay failed in %s:%u for root %llu, stage %d, with error %d: %pV",
+ function, line, btrfs_root_id(wc->root), wc->stage, error, &vaf);
+
+ va_end(args);
+}
+
+/*
+ * Use this for aborting a transaction during log replay while we are down the
+ * call chain of replay_one_buffer(), so that we get a lot more useful
+ * information for debugging issues when compared to a plain call to
+ * btrfs_abort_transaction().
+ */
+#define btrfs_abort_log_replay(wc, error, fmt, args...) \
+ do_abort_log_replay((wc), __func__, __LINE__, (error), fmt, ##args)
+
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
int inode_only,
struct btrfs_log_ctx *ctx);
-static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 objectid);
-static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *log,
- struct btrfs_path *path,
- u64 dirid, int del_all);
+static int link_to_fixup_dir(struct walk_control *wc, u64 objectid);
+static noinline int replay_dir_deletes(struct walk_control *wc,
+ u64 dirid, bool del_all);
static void wait_log_commit(struct btrfs_root *root, int transid);
/*
@@ -138,10 +257,13 @@ static void wait_log_commit(struct btrfs_root *root, int transid);
* and once to do all the other items.
*/
-static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
+static struct btrfs_inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
{
unsigned int nofs_flag;
- struct inode *inode;
+ struct btrfs_inode *inode;
+
+ /* Only meant to be called for subvolume roots and not for log roots. */
+ ASSERT(btrfs_is_fstree(btrfs_root_id(root)), "root_id=%llu", btrfs_root_id(root));
/*
* We're holding a transaction handle whether we are logging or
@@ -297,54 +419,13 @@ void btrfs_end_log_trans(struct btrfs_root *root)
}
/*
- * the walk control struct is used to pass state down the chain when
- * processing the log tree. The stage field tells us which part
- * of the log tree processing we are currently doing. The others
- * are state fields used for that specific part
- */
-struct walk_control {
- /* should we free the extent on disk when done? This is used
- * at transaction commit time while freeing a log tree
- */
- int free;
-
- /* pin only walk, we record which extents on disk belong to the
- * log trees
- */
- int pin;
-
- /* what stage of the replay code we're currently in */
- int stage;
-
- /*
- * Ignore any items from the inode currently being processed. Needs
- * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
- * the LOG_WALK_REPLAY_INODES stage.
- */
- bool ignore_cur_inode;
-
- /* the root we are currently replaying */
- struct btrfs_root *replay_dest;
-
- /* the trans handle for the current replay */
- struct btrfs_trans_handle *trans;
-
- /* the function that gets used to process blocks we find in the
- * tree. Note the extent_buffer might not be up to date when it is
- * passed in, and it must be checked or read if you need the data
- * inside it
- */
- int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
- struct walk_control *wc, u64 gen, int level);
-};
-
-/*
* process_func used to pin down extents, write them or wait on them
*/
-static int process_one_buffer(struct btrfs_root *log,
- struct extent_buffer *eb,
+static int process_one_buffer(struct extent_buffer *eb,
struct walk_control *wc, u64 gen, int level)
{
+ struct btrfs_root *log = wc->log;
+ struct btrfs_trans_handle *trans = wc->trans;
struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
@@ -359,29 +440,40 @@ static int process_one_buffer(struct btrfs_root *log,
};
ret = btrfs_read_extent_buffer(eb, &check);
- if (ret)
+ if (unlikely(ret)) {
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
return ret;
+ }
}
if (wc->pin) {
- ret = btrfs_pin_extent_for_log_replay(wc->trans, eb);
- if (ret)
+ ASSERT(trans != NULL);
+ ret = btrfs_pin_extent_for_log_replay(trans, eb);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
return ret;
+ }
- if (btrfs_buffer_uptodate(eb, gen, 0) &&
- btrfs_header_level(eb) == 0)
+ if (btrfs_buffer_uptodate(eb, gen, false) && level == 0) {
ret = btrfs_exclude_logged_extents(eb);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ }
}
return ret;
}
/*
- * Item overwrite used by replay and tree logging. eb, slot and key all refer
- * to the src data we are copying out.
+ * Item overwrite used by log replay. The given log tree leaf, slot and key
+ * from the walk_control structure all refer to the source data we are copying
+ * out.
*
- * root is the tree we are copying into, and path is a scratch
- * path for use in this function (it should be released on entry and
- * will be released on exit).
+ * The given root is for the tree we are copying into, and path is a scratch
+ * path for use in this function (it should be released on entry and will be
+ * released on exit).
*
* If the key is already in the destination tree the existing item is
* overwritten. If the existing item isn't big enough, it is extended.
@@ -389,19 +481,19 @@ static int process_one_buffer(struct btrfs_root *log,
*
* If the key isn't in the destination yet, a new item is inserted.
*/
-static int overwrite_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+static int overwrite_item(struct walk_control *wc)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
int ret;
u32 item_size;
u64 saved_i_size = 0;
int save_old_i_size = 0;
unsigned long src_ptr;
unsigned long dst_ptr;
- bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
+ struct extent_buffer *dst_eb;
+ int dst_slot;
+ const bool is_inode_item = (wc->log_key.type == BTRFS_INODE_ITEM_KEY);
/*
* This is only used during log replay, so the root is always from a
@@ -410,45 +502,46 @@ static int overwrite_item(struct btrfs_trans_handle *trans,
* the leaf before writing into the log tree. See the comments at
* copy_items() for more details.
*/
- ASSERT(btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID, "root_id=%llu", btrfs_root_id(root));
- item_size = btrfs_item_size(eb, slot);
- src_ptr = btrfs_item_ptr_offset(eb, slot);
+ item_size = btrfs_item_size(wc->log_leaf, wc->log_slot);
+ src_ptr = btrfs_item_ptr_offset(wc->log_leaf, wc->log_slot);
/* Look for the key in the destination tree. */
- ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
- if (ret < 0)
+ ret = btrfs_search_slot(NULL, root, &wc->log_key, wc->subvol_path, 0, 0);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search subvolume tree for key " BTRFS_KEY_FMT " root %llu",
+ BTRFS_KEY_FMT_VALUE(&wc->log_key),
+ btrfs_root_id(root));
return ret;
+ }
+
+ dst_eb = wc->subvol_path->nodes[0];
+ dst_slot = wc->subvol_path->slots[0];
if (ret == 0) {
char *src_copy;
- char *dst_copy;
- u32 dst_size = btrfs_item_size(path->nodes[0],
- path->slots[0]);
+ const u32 dst_size = btrfs_item_size(dst_eb, dst_slot);
+
if (dst_size != item_size)
goto insert;
if (item_size == 0) {
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return 0;
}
- dst_copy = kmalloc(item_size, GFP_NOFS);
src_copy = kmalloc(item_size, GFP_NOFS);
- if (!dst_copy || !src_copy) {
- btrfs_release_path(path);
- kfree(dst_copy);
- kfree(src_copy);
+ if (!src_copy) {
+ btrfs_abort_log_replay(wc, -ENOMEM,
+ "failed to allocate memory for log leaf item");
return -ENOMEM;
}
- read_extent_buffer(eb, src_copy, src_ptr, item_size);
+ read_extent_buffer(wc->log_leaf, src_copy, src_ptr, item_size);
+ dst_ptr = btrfs_item_ptr_offset(dst_eb, dst_slot);
+ ret = memcmp_extent_buffer(dst_eb, src_copy, dst_ptr, item_size);
- dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
- read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
- item_size);
- ret = memcmp(dst_copy, src_copy, item_size);
-
- kfree(dst_copy);
kfree(src_copy);
/*
* they have the same contents, just return, this saves
@@ -457,7 +550,7 @@ static int overwrite_item(struct btrfs_trans_handle *trans,
* sync
*/
if (ret == 0) {
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return 0;
}
@@ -465,28 +558,28 @@ static int overwrite_item(struct btrfs_trans_handle *trans,
* We need to load the old nbytes into the inode so when we
* replay the extents we've logged we get the right nbytes.
*/
- if (inode_item) {
+ if (is_inode_item) {
struct btrfs_inode_item *item;
u64 nbytes;
u32 mode;
- item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ item = btrfs_item_ptr(dst_eb, dst_slot,
struct btrfs_inode_item);
- nbytes = btrfs_inode_nbytes(path->nodes[0], item);
- item = btrfs_item_ptr(eb, slot,
+ nbytes = btrfs_inode_nbytes(dst_eb, item);
+ item = btrfs_item_ptr(wc->log_leaf, wc->log_slot,
struct btrfs_inode_item);
- btrfs_set_inode_nbytes(eb, item, nbytes);
+ btrfs_set_inode_nbytes(wc->log_leaf, item, nbytes);
/*
* If this is a directory we need to reset the i_size to
* 0 so that we can set it up properly when replaying
* the rest of the items in this log.
*/
- mode = btrfs_inode_mode(eb, item);
+ mode = btrfs_inode_mode(wc->log_leaf, item);
if (S_ISDIR(mode))
- btrfs_set_inode_size(eb, item, 0);
+ btrfs_set_inode_size(wc->log_leaf, item, 0);
}
- } else if (inode_item) {
+ } else if (is_inode_item) {
struct btrfs_inode_item *item;
u32 mode;
@@ -494,40 +587,43 @@ static int overwrite_item(struct btrfs_trans_handle *trans,
* New inode, set nbytes to 0 so that the nbytes comes out
* properly when we replay the extents.
*/
- item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
- btrfs_set_inode_nbytes(eb, item, 0);
+ item = btrfs_item_ptr(wc->log_leaf, wc->log_slot, struct btrfs_inode_item);
+ btrfs_set_inode_nbytes(wc->log_leaf, item, 0);
/*
* If this is a directory we need to reset the i_size to 0 so
* that we can set it up properly when replaying the rest of
* the items in this log.
*/
- mode = btrfs_inode_mode(eb, item);
+ mode = btrfs_inode_mode(wc->log_leaf, item);
if (S_ISDIR(mode))
- btrfs_set_inode_size(eb, item, 0);
+ btrfs_set_inode_size(wc->log_leaf, item, 0);
}
insert:
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
/* try to insert the key into the destination tree */
- path->skip_release_on_error = 1;
- ret = btrfs_insert_empty_item(trans, root, path,
- key, item_size);
- path->skip_release_on_error = 0;
+ wc->subvol_path->skip_release_on_error = true;
+ ret = btrfs_insert_empty_item(trans, root, wc->subvol_path, &wc->log_key, item_size);
+ wc->subvol_path->skip_release_on_error = false;
+
+ dst_eb = wc->subvol_path->nodes[0];
+ dst_slot = wc->subvol_path->slots[0];
/* make sure any existing item is the correct size */
if (ret == -EEXIST || ret == -EOVERFLOW) {
- u32 found_size;
- found_size = btrfs_item_size(path->nodes[0],
- path->slots[0]);
+ const u32 found_size = btrfs_item_size(dst_eb, dst_slot);
+
if (found_size > item_size)
- btrfs_truncate_item(trans, path, item_size, 1);
+ btrfs_truncate_item(trans, wc->subvol_path, item_size, 1);
else if (found_size < item_size)
- btrfs_extend_item(trans, path, item_size - found_size);
+ btrfs_extend_item(trans, wc->subvol_path, item_size - found_size);
} else if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to insert item for key " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&wc->log_key));
return ret;
}
- dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
- path->slots[0]);
+ dst_ptr = btrfs_item_ptr_offset(dst_eb, dst_slot);
/* don't overwrite an existing inode if the generation number
* was logged as zero. This is done when the tree logging code
@@ -538,16 +634,15 @@ insert:
* state of the tree found in the subvolume, and i_size is modified
* as it goes
*/
- if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
+ if (is_inode_item && ret == -EEXIST) {
struct btrfs_inode_item *src_item;
struct btrfs_inode_item *dst_item;
src_item = (struct btrfs_inode_item *)src_ptr;
dst_item = (struct btrfs_inode_item *)dst_ptr;
- if (btrfs_inode_generation(eb, src_item) == 0) {
- struct extent_buffer *dst_eb = path->nodes[0];
- const u64 ino_size = btrfs_inode_size(eb, src_item);
+ if (btrfs_inode_generation(wc->log_leaf, src_item) == 0) {
+ const u64 ino_size = btrfs_inode_size(wc->log_leaf, src_item);
/*
* For regular files an ino_size == 0 is used only when
@@ -556,42 +651,39 @@ insert:
* case don't set the size of the inode in the fs/subvol
* tree, otherwise we would be throwing valid data away.
*/
- if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
+ if (S_ISREG(btrfs_inode_mode(wc->log_leaf, src_item)) &&
S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
ino_size != 0)
btrfs_set_inode_size(dst_eb, dst_item, ino_size);
goto no_copy;
}
- if (S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
- S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
+ if (S_ISDIR(btrfs_inode_mode(wc->log_leaf, src_item)) &&
+ S_ISDIR(btrfs_inode_mode(dst_eb, dst_item))) {
save_old_i_size = 1;
- saved_i_size = btrfs_inode_size(path->nodes[0],
- dst_item);
+ saved_i_size = btrfs_inode_size(dst_eb, dst_item);
}
}
- copy_extent_buffer(path->nodes[0], eb, dst_ptr,
- src_ptr, item_size);
+ copy_extent_buffer(dst_eb, wc->log_leaf, dst_ptr, src_ptr, item_size);
if (save_old_i_size) {
struct btrfs_inode_item *dst_item;
+
dst_item = (struct btrfs_inode_item *)dst_ptr;
- btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
+ btrfs_set_inode_size(dst_eb, dst_item, saved_i_size);
}
/* make sure the generation is filled in */
- if (key->type == BTRFS_INODE_ITEM_KEY) {
+ if (is_inode_item) {
struct btrfs_inode_item *dst_item;
+
dst_item = (struct btrfs_inode_item *)dst_ptr;
- if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
- btrfs_set_inode_generation(path->nodes[0], dst_item,
- trans->transid);
- }
+ if (btrfs_inode_generation(dst_eb, dst_item) == 0)
+ btrfs_set_inode_generation(dst_eb, dst_item, trans->transid);
}
no_copy:
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return 0;
}
@@ -610,21 +702,6 @@ static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len,
return 0;
}
-/*
- * simple helper to read an inode off the disk from a given root
- * This can only be called for subvolume roots and not for the log
- */
-static noinline struct inode *read_one_inode(struct btrfs_root *root,
- u64 objectid)
-{
- struct inode *inode;
-
- inode = btrfs_iget_logging(objectid, root);
- if (IS_ERR(inode))
- inode = NULL;
- return inode;
-}
-
/* replays a single extent in 'eb' at 'slot' with 'key' into the
* subvolume 'root'. path is released on entry and should be released
* on exit.
@@ -637,51 +714,53 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
* The extent is inserted into the file, dropping any existing extents
* from the file that overlap the new one.
*/
-static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+static noinline int replay_one_extent(struct walk_control *wc)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_fs_info *fs_info = root->fs_info;
int found_type;
u64 extent_end;
- u64 start = key->offset;
+ const u64 start = wc->log_key.offset;
u64 nbytes = 0;
+ u64 csum_start;
+ u64 csum_end;
+ LIST_HEAD(ordered_sums);
+ u64 offset;
+ unsigned long dest_offset;
+ struct btrfs_key ins;
struct btrfs_file_extent_item *item;
- struct inode *inode = NULL;
- unsigned long size;
+ struct btrfs_inode *inode = NULL;
int ret = 0;
- item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
- found_type = btrfs_file_extent_type(eb, item);
+ item = btrfs_item_ptr(wc->log_leaf, wc->log_slot, struct btrfs_file_extent_item);
+ found_type = btrfs_file_extent_type(wc->log_leaf, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
- nbytes = btrfs_file_extent_num_bytes(eb, item);
- extent_end = start + nbytes;
-
- /*
- * We don't add to the inodes nbytes if we are prealloc or a
- * hole.
- */
- if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
- nbytes = 0;
+ extent_end = start + btrfs_file_extent_num_bytes(wc->log_leaf, item);
+ /* Holes don't take up space. */
+ if (btrfs_file_extent_disk_bytenr(wc->log_leaf, item) != 0)
+ nbytes = btrfs_file_extent_num_bytes(wc->log_leaf, item);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- size = btrfs_file_extent_ram_bytes(eb, item);
- nbytes = btrfs_file_extent_ram_bytes(eb, item);
- extent_end = ALIGN(start + size,
- fs_info->sectorsize);
+ nbytes = btrfs_file_extent_ram_bytes(wc->log_leaf, item);
+ extent_end = ALIGN(start + nbytes, fs_info->sectorsize);
} else {
- ret = 0;
- goto out;
+ btrfs_abort_log_replay(wc, -EUCLEAN,
+ "unexpected extent type=%d root=%llu inode=%llu offset=%llu",
+ found_type, btrfs_root_id(root),
+ wc->log_key.objectid, wc->log_key.offset);
+ return -EUCLEAN;
}
- inode = read_one_inode(root, key->objectid);
- if (!inode) {
- ret = -EIO;
- goto out;
+ inode = btrfs_iget_logging(wc->log_key.objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get inode %llu for root %llu",
+ wc->log_key.objectid, btrfs_root_id(root));
+ return ret;
}
/*
@@ -689,249 +768,300 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* file. This must be done before the btrfs_drop_extents run
* so we don't try to drop this extent.
*/
- ret = btrfs_lookup_file_extent(trans, root, path,
- btrfs_ino(BTRFS_I(inode)), start, 0);
+ ret = btrfs_lookup_file_extent(trans, root, wc->subvol_path,
+ btrfs_ino(inode), start, 0);
if (ret == 0 &&
(found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
- struct btrfs_file_extent_item cmp1;
- struct btrfs_file_extent_item cmp2;
- struct btrfs_file_extent_item *existing;
- struct extent_buffer *leaf;
-
- leaf = path->nodes[0];
- existing = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
+ struct extent_buffer *leaf = wc->subvol_path->nodes[0];
+ struct btrfs_file_extent_item existing;
+ unsigned long ptr;
- read_extent_buffer(eb, &cmp1, (unsigned long)item,
- sizeof(cmp1));
- read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
- sizeof(cmp2));
+ ptr = btrfs_item_ptr_offset(leaf, wc->subvol_path->slots[0]);
+ read_extent_buffer(leaf, &existing, ptr, sizeof(existing));
/*
* we already have a pointer to this exact extent,
* we don't have to do anything
*/
- if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
- btrfs_release_path(path);
+ if (memcmp_extent_buffer(wc->log_leaf, &existing, (unsigned long)item,
+ sizeof(existing)) == 0) {
+ btrfs_release_path(wc->subvol_path);
goto out;
}
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
/* drop any overlapping extents */
drop_args.start = start;
drop_args.end = extent_end;
drop_args.drop_cache = true;
- ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
- if (ret)
+ drop_args.path = wc->subvol_path;
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to drop extents for inode %llu range [%llu, %llu) root %llu",
+ wc->log_key.objectid, start, extent_end,
+ btrfs_root_id(root));
goto out;
+ }
- if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
- u64 offset;
- unsigned long dest_offset;
- struct btrfs_key ins;
-
- if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
- btrfs_fs_incompat(fs_info, NO_HOLES))
- goto update_inode;
-
- ret = btrfs_insert_empty_item(trans, root, path, key,
- sizeof(*item));
+ if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ /* inline extents are easy, we just overwrite them */
+ ret = overwrite_item(wc);
if (ret)
goto out;
- dest_offset = btrfs_item_ptr_offset(path->nodes[0],
- path->slots[0]);
- copy_extent_buffer(path->nodes[0], eb, dest_offset,
- (unsigned long)item, sizeof(*item));
+ goto update_inode;
+ }
- ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
- ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
- ins.type = BTRFS_EXTENT_ITEM_KEY;
- offset = key->offset - btrfs_file_extent_offset(eb, item);
+ /*
+ * If not an inline extent, it can only be a regular or prealloc one.
+ * We have checked that above and returned -EUCLEAN if not.
+ */
- /*
- * Manually record dirty extent, as here we did a shallow
- * file extent item copy and skip normal backref update,
- * but modifying extent tree all by ourselves.
- * So need to manually record dirty extent for qgroup,
- * as the owner of the file extent changed from log tree
- * (doesn't affect qgroup) to fs/file tree(affects qgroup)
- */
- ret = btrfs_qgroup_trace_extent(trans,
- btrfs_file_extent_disk_bytenr(eb, item),
- btrfs_file_extent_disk_num_bytes(eb, item));
- if (ret < 0)
- goto out;
+ /* A hole and NO_HOLES feature enabled, nothing else to do. */
+ if (btrfs_file_extent_disk_bytenr(wc->log_leaf, item) == 0 &&
+ btrfs_fs_incompat(fs_info, NO_HOLES))
+ goto update_inode;
- if (ins.objectid > 0) {
- u64 csum_start;
- u64 csum_end;
- LIST_HEAD(ordered_sums);
+ ret = btrfs_insert_empty_item(trans, root, wc->subvol_path,
+ &wc->log_key, sizeof(*item));
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to insert item with key " BTRFS_KEY_FMT " root %llu",
+ BTRFS_KEY_FMT_VALUE(&wc->log_key),
+ btrfs_root_id(root));
+ goto out;
+ }
+ dest_offset = btrfs_item_ptr_offset(wc->subvol_path->nodes[0],
+ wc->subvol_path->slots[0]);
+ copy_extent_buffer(wc->subvol_path->nodes[0], wc->log_leaf, dest_offset,
+ (unsigned long)item, sizeof(*item));
- /*
- * is this extent already allocated in the extent
- * allocation tree? If so, just add a reference
- */
- ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
- ins.offset);
- if (ret < 0) {
- goto out;
- } else if (ret == 0) {
- struct btrfs_ref ref = {
- .action = BTRFS_ADD_DELAYED_REF,
- .bytenr = ins.objectid,
- .num_bytes = ins.offset,
- .owning_root = btrfs_root_id(root),
- .ref_root = btrfs_root_id(root),
- };
- btrfs_init_data_ref(&ref, key->objectid, offset,
- 0, false);
- ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret)
- goto out;
- } else {
- /*
- * insert the extent pointer in the extent
- * allocation tree
- */
- ret = btrfs_alloc_logged_file_extent(trans,
- btrfs_root_id(root),
- key->objectid, offset, &ins);
- if (ret)
- goto out;
- }
- btrfs_release_path(path);
+ /*
+ * We have an explicit hole and NO_HOLES is not enabled. We have added
+ * the hole file extent item to the subvolume tree, so we don't have
+ * anything else to do other than update the file extent item range and
+ * update the inode item.
+ */
+ if (btrfs_file_extent_disk_bytenr(wc->log_leaf, item) == 0) {
+ btrfs_release_path(wc->subvol_path);
+ goto update_inode;
+ }
- if (btrfs_file_extent_compression(eb, item)) {
- csum_start = ins.objectid;
- csum_end = csum_start + ins.offset;
- } else {
- csum_start = ins.objectid +
- btrfs_file_extent_offset(eb, item);
- csum_end = csum_start +
- btrfs_file_extent_num_bytes(eb, item);
- }
+ ins.objectid = btrfs_file_extent_disk_bytenr(wc->log_leaf, item);
+ ins.type = BTRFS_EXTENT_ITEM_KEY;
+ ins.offset = btrfs_file_extent_disk_num_bytes(wc->log_leaf, item);
+ offset = wc->log_key.offset - btrfs_file_extent_offset(wc->log_leaf, item);
- ret = btrfs_lookup_csums_list(root->log_root,
- csum_start, csum_end - 1,
- &ordered_sums, false);
- if (ret < 0)
- goto out;
- ret = 0;
- /*
- * Now delete all existing cums in the csum root that
- * cover our range. We do this because we can have an
- * extent that is completely referenced by one file
- * extent item and partially referenced by another
- * file extent item (like after using the clone or
- * extent_same ioctls). In this case if we end up doing
- * the replay of the one that partially references the
- * extent first, and we do not do the csum deletion
- * below, we can get 2 csum items in the csum tree that
- * overlap each other. For example, imagine our log has
- * the two following file extent items:
- *
- * key (257 EXTENT_DATA 409600)
- * extent data disk byte 12845056 nr 102400
- * extent data offset 20480 nr 20480 ram 102400
- *
- * key (257 EXTENT_DATA 819200)
- * extent data disk byte 12845056 nr 102400
- * extent data offset 0 nr 102400 ram 102400
- *
- * Where the second one fully references the 100K extent
- * that starts at disk byte 12845056, and the log tree
- * has a single csum item that covers the entire range
- * of the extent:
- *
- * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
- *
- * After the first file extent item is replayed, the
- * csum tree gets the following csum item:
- *
- * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
- *
- * Which covers the 20K sub-range starting at offset 20K
- * of our extent. Now when we replay the second file
- * extent item, if we do not delete existing csum items
- * that cover any of its blocks, we end up getting two
- * csum items in our csum tree that overlap each other:
- *
- * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
- * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
- *
- * Which is a problem, because after this anyone trying
- * to lookup up for the checksum of any block of our
- * extent starting at an offset of 40K or higher, will
- * end up looking at the second csum item only, which
- * does not contain the checksum for any block starting
- * at offset 40K or higher of our extent.
- */
- while (!list_empty(&ordered_sums)) {
- struct btrfs_ordered_sum *sums;
- struct btrfs_root *csum_root;
-
- sums = list_entry(ordered_sums.next,
- struct btrfs_ordered_sum,
- list);
- csum_root = btrfs_csum_root(fs_info,
- sums->logical);
- if (!ret)
- ret = btrfs_del_csums(trans, csum_root,
- sums->logical,
- sums->len);
- if (!ret)
- ret = btrfs_csum_file_blocks(trans,
- csum_root,
- sums);
- list_del(&sums->list);
- kfree(sums);
- }
- if (ret)
- goto out;
- } else {
- btrfs_release_path(path);
+ /*
+ * Manually record dirty extent, as here we did a shallow file extent
+ * item copy and skip normal backref update, but modifying extent tree
+ * all by ourselves. So need to manually record dirty extent for qgroup,
+ * as the owner of the file extent changed from log tree (doesn't affect
+ * qgroup) to fs/file tree (affects qgroup).
+ */
+ ret = btrfs_qgroup_trace_extent(trans, ins.objectid, ins.offset);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to trace extent for bytenr %llu disk_num_bytes %llu inode %llu root %llu",
+ ins.objectid, ins.offset,
+ wc->log_key.objectid, btrfs_root_id(root));
+ goto out;
+ }
+
+ /*
+ * Is this extent already allocated in the extent tree?
+ * If so, just add a reference.
+ */
+ ret = btrfs_lookup_data_extent(fs_info, ins.objectid, ins.offset);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to lookup data extent for bytenr %llu disk_num_bytes %llu inode %llu root %llu",
+ ins.objectid, ins.offset,
+ wc->log_key.objectid, btrfs_root_id(root));
+ goto out;
+ } else if (ret == 0) {
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = ins.objectid,
+ .num_bytes = ins.offset,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
+
+ btrfs_init_data_ref(&ref, wc->log_key.objectid, offset, 0, false);
+ ret = btrfs_inc_extent_ref(trans, &ref);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to increment data extent for bytenr %llu disk_num_bytes %llu inode %llu root %llu",
+ ins.objectid, ins.offset,
+ wc->log_key.objectid,
+ btrfs_root_id(root));
+ goto out;
}
- } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- /* inline extents are easy, we just overwrite them */
- ret = overwrite_item(trans, root, path, eb, slot, key);
- if (ret)
+ } else {
+ /* Insert the extent pointer in the extent tree. */
+ ret = btrfs_alloc_logged_file_extent(trans, btrfs_root_id(root),
+ wc->log_key.objectid, offset, &ins);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to allocate logged data extent for bytenr %llu disk_num_bytes %llu offset %llu inode %llu root %llu",
+ ins.objectid, ins.offset, offset,
+ wc->log_key.objectid, btrfs_root_id(root));
goto out;
+ }
+ }
+
+ btrfs_release_path(wc->subvol_path);
+
+ if (btrfs_file_extent_compression(wc->log_leaf, item)) {
+ csum_start = ins.objectid;
+ csum_end = csum_start + ins.offset;
+ } else {
+ csum_start = ins.objectid + btrfs_file_extent_offset(wc->log_leaf, item);
+ csum_end = csum_start + btrfs_file_extent_num_bytes(wc->log_leaf, item);
}
- ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
- extent_end - start);
+ ret = btrfs_lookup_csums_list(root->log_root, csum_start, csum_end - 1,
+ &ordered_sums, false);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookups csums for range [%llu, %llu) inode %llu root %llu",
+ csum_start, csum_end, wc->log_key.objectid,
+ btrfs_root_id(root));
+ goto out;
+ }
+ ret = 0;
+ /*
+ * Now delete all existing cums in the csum root that cover our range.
+ * We do this because we can have an extent that is completely
+ * referenced by one file extent item and partially referenced by
+ * another file extent item (like after using the clone or extent_same
+ * ioctls). In this case if we end up doing the replay of the one that
+ * partially references the extent first, and we do not do the csum
+ * deletion below, we can get 2 csum items in the csum tree that overlap
+ * each other. For example, imagine our log has the two following file
+ * extent items:
+ *
+ * key (257 EXTENT_DATA 409600)
+ * extent data disk byte 12845056 nr 102400
+ * extent data offset 20480 nr 20480 ram 102400
+ *
+ * key (257 EXTENT_DATA 819200)
+ * extent data disk byte 12845056 nr 102400
+ * extent data offset 0 nr 102400 ram 102400
+ *
+ * Where the second one fully references the 100K extent that starts at
+ * disk byte 12845056, and the log tree has a single csum item that
+ * covers the entire range of the extent:
+ *
+ * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
+ *
+ * After the first file extent item is replayed, the csum tree gets the
+ * following csum item:
+ *
+ * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
+ *
+ * Which covers the 20K sub-range starting at offset 20K of our extent.
+ * Now when we replay the second file extent item, if we do not delete
+ * existing csum items that cover any of its blocks, we end up getting
+ * two csum items in our csum tree that overlap each other:
+ *
+ * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
+ * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
+ *
+ * Which is a problem, because after this anyone trying to lookup for
+ * the checksum of any block of our extent starting at an offset of 40K
+ * or higher, will end up looking at the second csum item only, which
+ * does not contain the checksum for any block starting at offset 40K or
+ * higher of our extent.
+ */
+ while (!list_empty(&ordered_sums)) {
+ struct btrfs_ordered_sum *sums;
+ struct btrfs_root *csum_root;
+
+ sums = list_first_entry(&ordered_sums, struct btrfs_ordered_sum, list);
+ csum_root = btrfs_csum_root(fs_info, sums->logical);
+ if (!ret) {
+ ret = btrfs_del_csums(trans, csum_root, sums->logical,
+ sums->len);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to delete csums for range [%llu, %llu) inode %llu root %llu",
+ sums->logical,
+ sums->logical + sums->len,
+ wc->log_key.objectid,
+ btrfs_root_id(root));
+ }
+ if (!ret) {
+ ret = btrfs_csum_file_blocks(trans, csum_root, sums);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to add csums for range [%llu, %llu) inode %llu root %llu",
+ sums->logical,
+ sums->logical + sums->len,
+ wc->log_key.objectid,
+ btrfs_root_id(root));
+ }
+ list_del(&sums->list);
+ kfree(sums);
+ }
if (ret)
goto out;
update_inode:
- btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
- ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ ret = btrfs_inode_set_file_extent_range(inode, start, extent_end - start);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to set file extent range [%llu, %llu) inode %llu root %llu",
+ start, extent_end, wc->log_key.objectid,
+ btrfs_root_id(root));
+ goto out;
+ }
+
+ btrfs_update_inode_bytes(inode, nbytes, drop_args.bytes_found);
+ ret = btrfs_update_inode(trans, inode);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update inode %llu root %llu",
+ wc->log_key.objectid, btrfs_root_id(root));
out:
- iput(inode);
+ iput(&inode->vfs_inode);
return ret;
}
-static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans,
+static int unlink_inode_for_log_replay(struct walk_control *wc,
struct btrfs_inode *dir,
struct btrfs_inode *inode,
const struct fscrypt_str *name)
{
+ struct btrfs_trans_handle *trans = wc->trans;
int ret;
ret = btrfs_unlink_inode(trans, dir, inode, name);
- if (ret)
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to unlink inode %llu parent dir %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir), name->len,
+ name->name, btrfs_root_id(inode->root));
return ret;
+ }
/*
* Whenever we need to check if a name exists or not, we check the
* fs/subvolume tree. So after an unlink we must run delayed items, so
* that future checks for a name during log replay see that the name
* does not exists anymore.
*/
- return btrfs_run_delayed_items(trans);
+ ret = btrfs_run_delayed_items(trans);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+"failed to run delayed items current inode %llu parent dir %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir), name->len,
+ name->name, btrfs_root_id(inode->root));
+
+ return ret;
}
/*
@@ -942,41 +1072,48 @@ static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans,
* This is a helper function to do the unlink of a specific directory
* item
*/
-static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
+static noinline int drop_one_dir_item(struct walk_control *wc,
struct btrfs_inode *dir,
struct btrfs_dir_item *di)
{
struct btrfs_root *root = dir->root;
- struct inode *inode;
+ struct btrfs_inode *inode;
struct fscrypt_str name;
- struct extent_buffer *leaf;
+ struct extent_buffer *leaf = wc->subvol_path->nodes[0];
struct btrfs_key location;
int ret;
- leaf = path->nodes[0];
-
btrfs_dir_item_key_to_cpu(leaf, di, &location);
ret = read_alloc_one_name(leaf, di + 1, btrfs_dir_name_len(leaf, di), &name);
- if (ret)
- return -ENOMEM;
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for dir %llu root %llu",
+ btrfs_ino(dir), btrfs_root_id(root));
+ return ret;
+ }
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
- inode = read_one_inode(root, location.objectid);
- if (!inode) {
- ret = -EIO;
+ inode = btrfs_iget_logging(location.objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to open inode %llu parent dir %llu name %.*s root %llu",
+ location.objectid, btrfs_ino(dir),
+ name.len, name.name, btrfs_root_id(root));
+ inode = NULL;
goto out;
}
- ret = link_to_fixup_dir(trans, root, path, location.objectid);
+ ret = link_to_fixup_dir(wc, location.objectid);
if (ret)
goto out;
- ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), &name);
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &name);
out:
kfree(name.name);
- iput(inode);
+ if (inode)
+ iput(&inode->vfs_inode);
return ret;
}
@@ -1039,7 +1176,7 @@ static noinline int backref_in_log(struct btrfs_root *log,
u64 ref_objectid,
const struct fscrypt_str *name)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
path = btrfs_alloc_path();
@@ -1047,12 +1184,10 @@ static noinline int backref_in_log(struct btrfs_root *log,
return -ENOMEM;
ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
- if (ret < 0) {
- goto out;
- } else if (ret == 1) {
- ret = 0;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ if (ret == 1)
+ return 0;
if (key->type == BTRFS_INODE_EXTREF_KEY)
ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
@@ -1061,172 +1196,224 @@ static noinline int backref_in_log(struct btrfs_root *log,
else
ret = !!btrfs_find_name_in_backref(path->nodes[0],
path->slots[0], name);
-out:
- btrfs_free_path(path);
return ret;
}
-static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_root *log_root,
+static int unlink_refs_not_in_log(struct walk_control *wc,
+ struct btrfs_key *search_key,
struct btrfs_inode *dir,
- struct btrfs_inode *inode,
- u64 inode_objectid, u64 parent_objectid,
- u64 ref_index, struct fscrypt_str *name)
+ struct btrfs_inode *inode)
{
- int ret;
- struct extent_buffer *leaf;
- struct btrfs_dir_item *di;
- struct btrfs_key search_key;
- struct btrfs_inode_extref *extref;
+ struct extent_buffer *leaf = wc->subvol_path->nodes[0];
+ unsigned long ptr;
+ unsigned long ptr_end;
-again:
- /* Search old style refs */
- search_key.objectid = inode_objectid;
- search_key.type = BTRFS_INODE_REF_KEY;
- search_key.offset = parent_objectid;
- ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
- if (ret == 0) {
+ /*
+ * Check all the names in this back reference to see if they are in the
+ * log. If so, we allow them to stay otherwise they must be unlinked as
+ * a conflict.
+ */
+ ptr = btrfs_item_ptr_offset(leaf, wc->subvol_path->slots[0]);
+ ptr_end = ptr + btrfs_item_size(leaf, wc->subvol_path->slots[0]);
+ while (ptr < ptr_end) {
+ struct fscrypt_str victim_name;
struct btrfs_inode_ref *victim_ref;
- unsigned long ptr;
- unsigned long ptr_end;
-
- leaf = path->nodes[0];
-
- /* are we trying to overwrite a back ref for the root directory
- * if so, just jump out, we're done
- */
- if (search_key.objectid == search_key.offset)
- return 1;
-
- /* check all the names in this back reference to see
- * if they are in the log. if so, we allow them to stay
- * otherwise they must be unlinked as a conflict
- */
- ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
- ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]);
- while (ptr < ptr_end) {
- struct fscrypt_str victim_name;
+ int ret;
- victim_ref = (struct btrfs_inode_ref *)ptr;
- ret = read_alloc_one_name(leaf, (victim_ref + 1),
- btrfs_inode_ref_name_len(leaf, victim_ref),
- &victim_name);
- if (ret)
- return ret;
+ victim_ref = (struct btrfs_inode_ref *)ptr;
+ ret = read_alloc_one_name(leaf, (victim_ref + 1),
+ btrfs_inode_ref_name_len(leaf, victim_ref),
+ &victim_name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for inode %llu parent dir %llu root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ btrfs_root_id(inode->root));
+ return ret;
+ }
- ret = backref_in_log(log_root, &search_key,
- parent_objectid, &victim_name);
+ ret = backref_in_log(wc->log, search_key, btrfs_ino(dir), &victim_name);
+ if (ret) {
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if backref is in log tree for inode %llu parent dir %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ victim_name.len, victim_name.name,
+ btrfs_root_id(inode->root));
kfree(victim_name.name);
return ret;
- } else if (!ret) {
- inc_nlink(&inode->vfs_inode);
- btrfs_release_path(path);
-
- ret = unlink_inode_for_log_replay(trans, dir, inode,
- &victim_name);
- kfree(victim_name.name);
- if (ret)
- return ret;
- goto again;
}
kfree(victim_name.name);
-
ptr = (unsigned long)(victim_ref + 1) + victim_name.len;
+ continue;
}
- }
- btrfs_release_path(path);
- /* Same search but for extended refs */
- extref = btrfs_lookup_inode_extref(NULL, root, path, name,
- inode_objectid, parent_objectid, 0,
- 0);
- if (IS_ERR(extref)) {
- return PTR_ERR(extref);
- } else if (extref) {
- u32 item_size;
- u32 cur_offset = 0;
- unsigned long base;
- struct inode *victim_parent;
+ inc_nlink(&inode->vfs_inode);
+ btrfs_release_path(wc->subvol_path);
- leaf = path->nodes[0];
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &victim_name);
+ kfree(victim_name.name);
+ if (ret)
+ return ret;
+ return -EAGAIN;
+ }
- item_size = btrfs_item_size(leaf, path->slots[0]);
- base = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ return 0;
+}
- while (cur_offset < item_size) {
- struct fscrypt_str victim_name;
+static int unlink_extrefs_not_in_log(struct walk_control *wc,
+ struct btrfs_key *search_key,
+ struct btrfs_inode *dir,
+ struct btrfs_inode *inode)
+{
+ struct extent_buffer *leaf = wc->subvol_path->nodes[0];
+ const unsigned long base = btrfs_item_ptr_offset(leaf, wc->subvol_path->slots[0]);
+ const u32 item_size = btrfs_item_size(leaf, wc->subvol_path->slots[0]);
+ u32 cur_offset = 0;
- extref = (struct btrfs_inode_extref *)(base + cur_offset);
+ while (cur_offset < item_size) {
+ struct btrfs_root *log_root = wc->log;
+ struct btrfs_inode_extref *extref;
+ struct fscrypt_str victim_name;
+ int ret;
- if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
- goto next;
+ extref = (struct btrfs_inode_extref *)(base + cur_offset);
+ victim_name.len = btrfs_inode_extref_name_len(leaf, extref);
- ret = read_alloc_one_name(leaf, &extref->name,
- btrfs_inode_extref_name_len(leaf, extref),
- &victim_name);
- if (ret)
- return ret;
+ if (btrfs_inode_extref_parent(leaf, extref) != btrfs_ino(dir))
+ goto next;
- search_key.objectid = inode_objectid;
- search_key.type = BTRFS_INODE_EXTREF_KEY;
- search_key.offset = btrfs_extref_hash(parent_objectid,
- victim_name.name,
- victim_name.len);
- ret = backref_in_log(log_root, &search_key,
- parent_objectid, &victim_name);
+ ret = read_alloc_one_name(leaf, &extref->name, victim_name.len,
+ &victim_name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for inode %llu parent dir %llu root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ btrfs_root_id(inode->root));
+ return ret;
+ }
+
+ search_key->objectid = btrfs_ino(inode);
+ search_key->type = BTRFS_INODE_EXTREF_KEY;
+ search_key->offset = btrfs_extref_hash(btrfs_ino(dir),
+ victim_name.name,
+ victim_name.len);
+ ret = backref_in_log(log_root, search_key, btrfs_ino(dir), &victim_name);
+ if (ret) {
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if backref is in log tree for inode %llu parent dir %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ victim_name.len, victim_name.name,
+ btrfs_root_id(inode->root));
kfree(victim_name.name);
return ret;
- } else if (!ret) {
- ret = -ENOENT;
- victim_parent = read_one_inode(root,
- parent_objectid);
- if (victim_parent) {
- inc_nlink(&inode->vfs_inode);
- btrfs_release_path(path);
-
- ret = unlink_inode_for_log_replay(trans,
- BTRFS_I(victim_parent),
- inode, &victim_name);
- }
- iput(victim_parent);
- kfree(victim_name.name);
- if (ret)
- return ret;
- goto again;
}
kfree(victim_name.name);
next:
cur_offset += victim_name.len + sizeof(*extref);
+ continue;
}
+
+ inc_nlink(&inode->vfs_inode);
+ btrfs_release_path(wc->subvol_path);
+
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &victim_name);
+ kfree(victim_name.name);
+ if (ret)
+ return ret;
+ return -EAGAIN;
}
- btrfs_release_path(path);
+
+ return 0;
+}
+
+static inline int __add_inode_ref(struct walk_control *wc,
+ struct btrfs_inode *dir,
+ struct btrfs_inode *inode,
+ u64 ref_index, struct fscrypt_str *name)
+{
+ int ret;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_dir_item *di;
+ struct btrfs_key search_key;
+ struct btrfs_inode_extref *extref;
+
+again:
+ /* Search old style refs */
+ search_key.objectid = btrfs_ino(inode);
+ search_key.type = BTRFS_INODE_REF_KEY;
+ search_key.offset = btrfs_ino(dir);
+ ret = btrfs_search_slot(NULL, root, &search_key, wc->subvol_path, 0, 0);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search subvolume tree for key " BTRFS_KEY_FMT " root %llu",
+ BTRFS_KEY_FMT_VALUE(&search_key),
+ btrfs_root_id(root));
+ return ret;
+ } else if (ret == 0) {
+ /*
+ * Are we trying to overwrite a back ref for the root directory?
+ * If so, we're done.
+ */
+ if (search_key.objectid == search_key.offset)
+ return 1;
+
+ ret = unlink_refs_not_in_log(wc, &search_key, dir, inode);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+ }
+ btrfs_release_path(wc->subvol_path);
+
+ /* Same search but for extended refs */
+ extref = btrfs_lookup_inode_extref(root, wc->subvol_path, name,
+ btrfs_ino(inode), btrfs_ino(dir));
+ if (IS_ERR(extref)) {
+ return PTR_ERR(extref);
+ } else if (extref) {
+ ret = unlink_extrefs_not_in_log(wc, &search_key, dir, inode);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+ }
+ btrfs_release_path(wc->subvol_path);
/* look for a conflicting sequence number */
- di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
+ di = btrfs_lookup_dir_index_item(trans, root, wc->subvol_path, btrfs_ino(dir),
ref_index, name, 0);
if (IS_ERR(di)) {
- return PTR_ERR(di);
+ ret = PTR_ERR(di);
+ btrfs_abort_log_replay(wc, ret,
+"failed to lookup dir index item for dir %llu ref_index %llu name %.*s root %llu",
+ btrfs_ino(dir), ref_index, name->len,
+ name->name, btrfs_root_id(root));
+ return ret;
} else if (di) {
- ret = drop_one_dir_item(trans, path, dir, di);
+ ret = drop_one_dir_item(wc, dir, di);
if (ret)
return ret;
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
/* look for a conflicting name */
- di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, 0);
+ di = btrfs_lookup_dir_item(trans, root, wc->subvol_path, btrfs_ino(dir), name, 0);
if (IS_ERR(di)) {
- return PTR_ERR(di);
+ ret = PTR_ERR(di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir item for dir %llu name %.*s root %llu",
+ btrfs_ino(dir), name->len, name->name,
+ btrfs_root_id(root));
+ return ret;
} else if (di) {
- ret = drop_one_dir_item(trans, path, dir, di);
+ ret = drop_one_dir_item(wc, dir, di);
if (ret)
return ret;
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return 0;
}
@@ -1279,66 +1466,81 @@ static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
* proper unlink of that name (that is, remove its entry from the inode
* reference item and both dir index keys).
*/
-static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_inode *inode,
- struct extent_buffer *log_eb,
- int log_slot,
- struct btrfs_key *key)
+static int unlink_old_inode_refs(struct walk_control *wc, struct btrfs_inode *inode)
{
+ struct btrfs_root *root = wc->root;
int ret;
unsigned long ref_ptr;
unsigned long ref_end;
struct extent_buffer *eb;
again:
- btrfs_release_path(path);
- ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ btrfs_release_path(wc->subvol_path);
+ ret = btrfs_search_slot(NULL, root, &wc->log_key, wc->subvol_path, 0, 0);
if (ret > 0) {
ret = 0;
goto out;
}
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search subvolume tree for key " BTRFS_KEY_FMT " root %llu",
+ BTRFS_KEY_FMT_VALUE(&wc->log_key),
+ btrfs_root_id(root));
goto out;
+ }
- eb = path->nodes[0];
- ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
- ref_end = ref_ptr + btrfs_item_size(eb, path->slots[0]);
+ eb = wc->subvol_path->nodes[0];
+ ref_ptr = btrfs_item_ptr_offset(eb, wc->subvol_path->slots[0]);
+ ref_end = ref_ptr + btrfs_item_size(eb, wc->subvol_path->slots[0]);
while (ref_ptr < ref_end) {
struct fscrypt_str name;
u64 parent_id;
- if (key->type == BTRFS_INODE_EXTREF_KEY) {
+ if (wc->log_key.type == BTRFS_INODE_EXTREF_KEY) {
ret = extref_get_fields(eb, ref_ptr, &name,
NULL, &parent_id);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get extref details for inode %llu root %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root));
+ goto out;
+ }
} else {
- parent_id = key->offset;
+ parent_id = wc->log_key.offset;
ret = ref_get_fields(eb, ref_ptr, &name, NULL);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get ref details for inode %llu parent_id %llu root %llu",
+ btrfs_ino(inode), parent_id,
+ btrfs_root_id(root));
+ goto out;
+ }
}
- if (ret)
- goto out;
- if (key->type == BTRFS_INODE_EXTREF_KEY)
- ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
+ if (wc->log_key.type == BTRFS_INODE_EXTREF_KEY)
+ ret = !!btrfs_find_name_in_ext_backref(wc->log_leaf, wc->log_slot,
parent_id, &name);
else
- ret = !!btrfs_find_name_in_backref(log_eb, log_slot, &name);
+ ret = !!btrfs_find_name_in_backref(wc->log_leaf, wc->log_slot,
+ &name);
if (!ret) {
- struct inode *dir;
+ struct btrfs_inode *dir;
- btrfs_release_path(path);
- dir = read_one_inode(root, parent_id);
- if (!dir) {
- ret = -ENOENT;
+ btrfs_release_path(wc->subvol_path);
+ dir = btrfs_iget_logging(parent_id, root);
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
kfree(name.name);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ parent_id, btrfs_root_id(root));
goto out;
}
- ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir),
- inode, &name);
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &name);
kfree(name.name);
- iput(dir);
+ iput(&dir->vfs_inode);
if (ret)
goto out;
goto again;
@@ -1346,57 +1548,51 @@ again:
kfree(name.name);
ref_ptr += name.len;
- if (key->type == BTRFS_INODE_EXTREF_KEY)
+ if (wc->log_key.type == BTRFS_INODE_EXTREF_KEY)
ref_ptr += sizeof(struct btrfs_inode_extref);
else
ref_ptr += sizeof(struct btrfs_inode_ref);
}
ret = 0;
out:
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return ret;
}
/*
- * replay one inode back reference item found in the log tree.
- * eb, slot and key refer to the buffer and key found in the log tree.
- * root is the destination we are replaying into, and path is for temp
- * use by this function. (it should be released on return).
+ * Replay one inode back reference item found in the log tree.
+ * Path is for temporary use by this function (it should be released on return).
*/
-static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *log,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+static noinline int add_inode_ref(struct walk_control *wc)
{
- struct inode *dir = NULL;
- struct inode *inode = NULL;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_inode *dir = NULL;
+ struct btrfs_inode *inode = NULL;
unsigned long ref_ptr;
unsigned long ref_end;
struct fscrypt_str name = { 0 };
int ret;
- int log_ref_ver = 0;
+ const bool is_extref_item = (wc->log_key.type == BTRFS_INODE_EXTREF_KEY);
u64 parent_objectid;
u64 inode_objectid;
u64 ref_index = 0;
int ref_struct_size;
- ref_ptr = btrfs_item_ptr_offset(eb, slot);
- ref_end = ref_ptr + btrfs_item_size(eb, slot);
+ ref_ptr = btrfs_item_ptr_offset(wc->log_leaf, wc->log_slot);
+ ref_end = ref_ptr + btrfs_item_size(wc->log_leaf, wc->log_slot);
- if (key->type == BTRFS_INODE_EXTREF_KEY) {
+ if (is_extref_item) {
struct btrfs_inode_extref *r;
ref_struct_size = sizeof(struct btrfs_inode_extref);
- log_ref_ver = 1;
r = (struct btrfs_inode_extref *)ref_ptr;
- parent_objectid = btrfs_inode_extref_parent(eb, r);
+ parent_objectid = btrfs_inode_extref_parent(wc->log_leaf, r);
} else {
ref_struct_size = sizeof(struct btrfs_inode_ref);
- parent_objectid = key->offset;
+ parent_objectid = wc->log_key.offset;
}
- inode_objectid = key->objectid;
+ inode_objectid = wc->log_key.objectid;
/*
* it is possible that we didn't log all the parent directories
@@ -1404,41 +1600,93 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* copy the back ref in. The link count fixup code will take
* care of the rest
*/
- dir = read_one_inode(root, parent_objectid);
- if (!dir) {
- ret = -ENOENT;
+ dir = btrfs_iget_logging(parent_objectid, root);
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ if (ret == -ENOENT)
+ ret = 0;
+ else
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ parent_objectid, btrfs_root_id(root));
+ dir = NULL;
goto out;
}
- inode = read_one_inode(root, inode_objectid);
- if (!inode) {
- ret = -EIO;
+ inode = btrfs_iget_logging(inode_objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ inode_objectid, btrfs_root_id(root));
+ inode = NULL;
goto out;
}
while (ref_ptr < ref_end) {
- if (log_ref_ver) {
- ret = extref_get_fields(eb, ref_ptr, &name,
+ if (is_extref_item) {
+ ret = extref_get_fields(wc->log_leaf, ref_ptr, &name,
&ref_index, &parent_objectid);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get extref details for inode %llu root %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root));
+ goto out;
+ }
/*
* parent object can change from one array
* item to another.
*/
- if (!dir)
- dir = read_one_inode(root, parent_objectid);
if (!dir) {
- ret = -ENOENT;
- goto out;
+ dir = btrfs_iget_logging(parent_objectid, root);
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ dir = NULL;
+ /*
+ * A new parent dir may have not been
+ * logged and not exist in the subvolume
+ * tree, see the comment above before
+ * the loop when getting the first
+ * parent dir.
+ */
+ if (ret == -ENOENT) {
+ /*
+ * The next extref may refer to
+ * another parent dir that
+ * exists, so continue.
+ */
+ ret = 0;
+ goto next;
+ } else {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ parent_objectid,
+ btrfs_root_id(root));
+ }
+ goto out;
+ }
}
} else {
- ret = ref_get_fields(eb, ref_ptr, &name, &ref_index);
+ ret = ref_get_fields(wc->log_leaf, ref_ptr, &name, &ref_index);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get ref details for inode %llu parent_objectid %llu root %llu",
+ btrfs_ino(inode),
+ parent_objectid,
+ btrfs_root_id(root));
+ goto out;
+ }
}
- if (ret)
- goto out;
- ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
- btrfs_ino(BTRFS_I(inode)), ref_index, &name);
+ ret = inode_in_dir(root, wc->subvol_path, btrfs_ino(dir),
+ btrfs_ino(inode), ref_index, &name);
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if inode %llu is in dir %llu ref_index %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ ref_index, name.len, name.name,
+ btrfs_root_id(root));
goto out;
} else if (ret == 0) {
/*
@@ -1448,10 +1696,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* overwrite any existing back reference, and we don't
* want to create dangling pointers in the directory.
*/
- ret = __add_inode_ref(trans, root, path, log,
- BTRFS_I(dir), BTRFS_I(inode),
- inode_objectid, parent_objectid,
- ref_index, &name);
+ ret = __add_inode_ref(wc, dir, inode, ref_index, &name);
if (ret) {
if (ret == 1)
ret = 0;
@@ -1459,22 +1704,34 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
}
/* insert our name */
- ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
- &name, 0, ref_index);
- if (ret)
+ ret = btrfs_add_link(trans, dir, inode, &name, 0, ref_index);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to add link for inode %llu in dir %llu ref_index %llu name %.*s root %llu",
+ btrfs_ino(inode),
+ btrfs_ino(dir), ref_index,
+ name.len, name.name,
+ btrfs_root_id(root));
goto out;
+ }
- ret = btrfs_update_inode(trans, BTRFS_I(inode));
- if (ret)
+ ret = btrfs_update_inode(trans, inode);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update inode %llu root %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root));
goto out;
+ }
}
/* Else, ret == 1, we already have a perfect match, we're done. */
+next:
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len;
kfree(name.name);
name.name = NULL;
- if (log_ref_ver) {
- iput(dir);
+ if (is_extref_item && dir) {
+ iput(&dir->vfs_inode);
dir = NULL;
}
}
@@ -1487,18 +1744,19 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* dir index entries exist for a name but there is no inode reference
* item with the same name.
*/
- ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
- key);
+ ret = unlink_old_inode_refs(wc, inode);
if (ret)
goto out;
/* finally write the back reference in the inode */
- ret = overwrite_item(trans, root, path, eb, slot, key);
+ ret = overwrite_item(wc);
out:
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
kfree(name.name);
- iput(dir);
- iput(inode);
+ if (dir)
+ iput(&dir->vfs_inode);
+ if (inode)
+ iput(&inode->vfs_inode);
return ret;
}
@@ -1611,26 +1869,22 @@ process_slot:
* number of back refs found. If it goes down to zero, the iput
* will free the inode.
*/
-static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
- struct inode *inode)
+static noinline int fixup_inode_link_count(struct walk_control *wc,
+ struct btrfs_inode *inode)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = inode->root;
int ret;
u64 nlink = 0;
- u64 ino = btrfs_ino(BTRFS_I(inode));
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ const u64 ino = btrfs_ino(inode);
- ret = count_inode_refs(BTRFS_I(inode), path);
+ ret = count_inode_refs(inode, wc->subvol_path);
if (ret < 0)
goto out;
nlink = ret;
- ret = count_inode_extrefs(BTRFS_I(inode), path);
+ ret = count_inode_extrefs(inode, wc->subvol_path);
if (ret < 0)
goto out;
@@ -1638,19 +1892,18 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
ret = 0;
- if (nlink != inode->i_nlink) {
- set_nlink(inode, nlink);
- ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ if (nlink != inode->vfs_inode.i_nlink) {
+ set_nlink(&inode->vfs_inode, nlink);
+ ret = btrfs_update_inode(trans, inode);
if (ret)
goto out;
}
- if (S_ISDIR(inode->i_mode))
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ if (S_ISDIR(inode->vfs_inode.i_mode))
+ inode->index_cnt = (u64)-1;
- if (inode->i_nlink == 0) {
- if (S_ISDIR(inode->i_mode)) {
- ret = replay_dir_deletes(trans, root, NULL, path,
- ino, 1);
+ if (inode->vfs_inode.i_nlink == 0) {
+ if (S_ISDIR(inode->vfs_inode.i_mode)) {
+ ret = replay_dir_deletes(wc, ino, true);
if (ret)
goto out;
}
@@ -1660,62 +1913,63 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(wc->subvol_path);
return ret;
}
-static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path)
+static noinline int fixup_inode_link_counts(struct walk_control *wc)
{
int ret;
struct btrfs_key key;
- struct inode *inode;
key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = (u64)-1;
while (1) {
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_inode *inode;
+
+ ret = btrfs_search_slot(trans, root, &key, wc->subvol_path, -1, 1);
if (ret < 0)
break;
if (ret == 1) {
ret = 0;
- if (path->slots[0] == 0)
+ if (wc->subvol_path->slots[0] == 0)
break;
- path->slots[0]--;
+ wc->subvol_path->slots[0]--;
}
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ btrfs_item_key_to_cpu(wc->subvol_path->nodes[0], &key, wc->subvol_path->slots[0]);
if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
key.type != BTRFS_ORPHAN_ITEM_KEY)
break;
- ret = btrfs_del_item(trans, root, path);
+ ret = btrfs_del_item(trans, root, wc->subvol_path);
if (ret)
break;
- btrfs_release_path(path);
- inode = read_one_inode(root, key.offset);
- if (!inode) {
- ret = -EIO;
+ btrfs_release_path(wc->subvol_path);
+ inode = btrfs_iget_logging(key.offset, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
break;
}
- ret = fixup_inode_link_count(trans, inode);
- iput(inode);
+ ret = fixup_inode_link_count(wc, inode);
+ iput(&inode->vfs_inode);
if (ret)
break;
/*
* fixup on a directory may create new entries,
- * make sure we always look for the highset possible
+ * make sure we always look for the highest possible
* offset
*/
key.offset = (u64)-1;
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return ret;
}
@@ -1725,36 +1979,50 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
* count when replay is done. The link count is incremented here
* so the inode won't go away until we check it
*/
-static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- u64 objectid)
+static noinline int link_to_fixup_dir(struct walk_control *wc, u64 objectid)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
struct btrfs_key key;
int ret = 0;
- struct inode *inode;
+ struct btrfs_inode *inode;
+ struct inode *vfs_inode;
- inode = read_one_inode(root, objectid);
- if (!inode)
- return -EIO;
+ inode = btrfs_iget_logging(objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ objectid, btrfs_root_id(root));
+ return ret;
+ }
+ vfs_inode = &inode->vfs_inode;
key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = objectid;
- ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
+ ret = btrfs_insert_empty_item(trans, root, wc->subvol_path, &key, 0);
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
if (ret == 0) {
- if (!inode->i_nlink)
- set_nlink(inode, 1);
+ if (!vfs_inode->i_nlink)
+ set_nlink(vfs_inode, 1);
else
- inc_nlink(inode);
- ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ inc_nlink(vfs_inode);
+ ret = btrfs_update_inode(trans, inode);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update inode %llu root %llu",
+ objectid, btrfs_root_id(root));
} else if (ret == -EEXIST) {
ret = 0;
+ } else {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to insert fixup item for inode %llu root %llu",
+ objectid, btrfs_root_id(root));
}
- iput(inode);
+ iput(vfs_inode);
return ret;
}
@@ -1770,33 +2038,31 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
const struct fscrypt_str *name,
struct btrfs_key *location)
{
- struct inode *inode;
- struct inode *dir;
+ struct btrfs_inode *inode;
+ struct btrfs_inode *dir;
int ret;
- inode = read_one_inode(root, location->objectid);
- if (!inode)
- return -ENOENT;
+ inode = btrfs_iget_logging(location->objectid, root);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
- dir = read_one_inode(root, dirid);
- if (!dir) {
- iput(inode);
- return -EIO;
+ dir = btrfs_iget_logging(dirid, root);
+ if (IS_ERR(dir)) {
+ iput(&inode->vfs_inode);
+ return PTR_ERR(dir);
}
- ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
- 1, index);
+ ret = btrfs_add_link(trans, dir, inode, name, 1, index);
/* FIXME, put inode into FIXUP list */
- iput(inode);
- iput(dir);
+ iput(&inode->vfs_inode);
+ iput(&dir->vfs_inode);
return ret;
}
-static int delete_conflicting_dir_entry(struct btrfs_trans_handle *trans,
+static int delete_conflicting_dir_entry(struct walk_control *wc,
struct btrfs_inode *dir,
- struct btrfs_path *path,
struct btrfs_dir_item *dst_di,
const struct btrfs_key *log_key,
u8 log_flags,
@@ -1804,12 +2070,12 @@ static int delete_conflicting_dir_entry(struct btrfs_trans_handle *trans,
{
struct btrfs_key found_key;
- btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
+ btrfs_dir_item_key_to_cpu(wc->subvol_path->nodes[0], dst_di, &found_key);
/* The existing dentry points to the same inode, don't delete it. */
if (found_key.objectid == log_key->objectid &&
found_key.type == log_key->type &&
found_key.offset == log_key->offset &&
- btrfs_dir_flags(path->nodes[0], dst_di) == log_flags)
+ btrfs_dir_flags(wc->subvol_path->nodes[0], dst_di) == log_flags)
return 1;
/*
@@ -1819,7 +2085,7 @@ static int delete_conflicting_dir_entry(struct btrfs_trans_handle *trans,
if (!exists)
return 0;
- return drop_one_dir_item(trans, path, dir, dst_di);
+ return drop_one_dir_item(wc, dir, dst_di);
}
/*
@@ -1838,13 +2104,10 @@ static int delete_conflicting_dir_entry(struct btrfs_trans_handle *trans,
* Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
* non-existing inode) and 1 if the name was replayed.
*/
-static noinline int replay_one_name(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb,
- struct btrfs_dir_item *di,
- struct btrfs_key *key)
+static noinline int replay_one_name(struct walk_control *wc, struct btrfs_dir_item *di)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
struct fscrypt_str name = { 0 };
struct btrfs_dir_item *dir_dst_di;
struct btrfs_dir_item *index_dst_di;
@@ -1852,62 +2115,92 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
bool index_dst_matches = false;
struct btrfs_key log_key;
struct btrfs_key search_key;
- struct inode *dir;
+ struct btrfs_inode *dir;
u8 log_flags;
bool exists;
int ret;
bool update_size = true;
bool name_added = false;
- dir = read_one_inode(root, key->objectid);
- if (!dir)
- return -EIO;
+ dir = btrfs_iget_logging(wc->log_key.objectid, root);
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ wc->log_key.objectid, btrfs_root_id(root));
+ return ret;
+ }
- ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
- if (ret)
+ ret = read_alloc_one_name(wc->log_leaf, di + 1,
+ btrfs_dir_name_len(wc->log_leaf, di), &name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for dir %llu root %llu",
+ btrfs_ino(dir), btrfs_root_id(root));
goto out;
+ }
- log_flags = btrfs_dir_flags(eb, di);
- btrfs_dir_item_key_to_cpu(eb, di, &log_key);
- ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
- btrfs_release_path(path);
- if (ret < 0)
+ log_flags = btrfs_dir_flags(wc->log_leaf, di);
+ btrfs_dir_item_key_to_cpu(wc->log_leaf, di, &log_key);
+ ret = btrfs_lookup_inode(trans, root, wc->subvol_path, &log_key, 0);
+ btrfs_release_path(wc->subvol_path);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ log_key.objectid, btrfs_root_id(root));
goto out;
+ }
exists = (ret == 0);
ret = 0;
- dir_dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
- &name, 1);
+ dir_dst_di = btrfs_lookup_dir_item(trans, root, wc->subvol_path,
+ wc->log_key.objectid, &name, 1);
if (IS_ERR(dir_dst_di)) {
ret = PTR_ERR(dir_dst_di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir item for dir %llu name %.*s root %llu",
+ wc->log_key.objectid, name.len, name.name,
+ btrfs_root_id(root));
goto out;
} else if (dir_dst_di) {
- ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path,
- dir_dst_di, &log_key,
- log_flags, exists);
- if (ret < 0)
+ ret = delete_conflicting_dir_entry(wc, dir, dir_dst_di,
+ &log_key, log_flags, exists);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to delete conflicting entry for dir %llu name %.*s root %llu",
+ btrfs_ino(dir), name.len, name.name,
+ btrfs_root_id(root));
goto out;
+ }
dir_dst_matches = (ret == 1);
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
- index_dst_di = btrfs_lookup_dir_index_item(trans, root, path,
- key->objectid, key->offset,
- &name, 1);
+ index_dst_di = btrfs_lookup_dir_index_item(trans, root, wc->subvol_path,
+ wc->log_key.objectid,
+ wc->log_key.offset, &name, 1);
if (IS_ERR(index_dst_di)) {
ret = PTR_ERR(index_dst_di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir index item for dir %llu name %.*s root %llu",
+ wc->log_key.objectid, name.len, name.name,
+ btrfs_root_id(root));
goto out;
} else if (index_dst_di) {
- ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path,
- index_dst_di, &log_key,
- log_flags, exists);
- if (ret < 0)
+ ret = delete_conflicting_dir_entry(wc, dir, index_dst_di,
+ &log_key, log_flags, exists);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to delete conflicting entry for dir %llu name %.*s root %llu",
+ btrfs_ino(dir), name.len, name.name,
+ btrfs_root_id(root));
goto out;
+ }
index_dst_matches = (ret == 1);
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
if (dir_dst_matches && index_dst_matches) {
ret = 0;
@@ -1921,9 +2214,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
*/
search_key.objectid = log_key.objectid;
search_key.type = BTRFS_INODE_REF_KEY;
- search_key.offset = key->objectid;
+ search_key.offset = wc->log_key.objectid;
ret = backref_in_log(root->log_root, &search_key, 0, &name);
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if ref item is logged for inode %llu dir %llu name %.*s root %llu",
+ search_key.objectid, btrfs_ino(dir),
+ name.len, name.name, btrfs_root_id(root));
goto out;
} else if (ret) {
/* The dentry will be added later. */
@@ -1934,9 +2231,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
search_key.objectid = log_key.objectid;
search_key.type = BTRFS_INODE_EXTREF_KEY;
- search_key.offset = key->objectid;
- ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
+ search_key.offset = btrfs_extref_hash(wc->log_key.objectid, name.name, name.len);
+ ret = backref_in_log(root->log_root, &search_key, wc->log_key.objectid, &name);
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if extref item is logged for inode %llu dir %llu name %.*s root %llu",
+ search_key.objectid, btrfs_ino(dir),
+ name.len, name.name, btrfs_root_id(root));
goto out;
} else if (ret) {
/* The dentry will be added later. */
@@ -1944,11 +2245,15 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
update_size = false;
goto out;
}
- btrfs_release_path(path);
- ret = insert_one_name(trans, root, key->objectid, key->offset,
+ ret = insert_one_name(trans, root, wc->log_key.objectid, wc->log_key.offset,
&name, &log_key);
- if (ret && ret != -ENOENT && ret != -EEXIST)
+ if (ret && ret != -ENOENT && ret != -EEXIST) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to insert name %.*s for inode %llu dir %llu root %llu",
+ name.len, name.name, log_key.objectid,
+ btrfs_ino(dir), btrfs_root_id(root));
goto out;
+ }
if (!ret)
name_added = true;
update_size = false;
@@ -1956,31 +2261,32 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
out:
if (!ret && update_size) {
- btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2);
- ret = btrfs_update_inode(trans, BTRFS_I(dir));
+ btrfs_i_size_write(dir, dir->vfs_inode.i_size + name.len * 2);
+ ret = btrfs_update_inode(trans, dir);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update dir inode %llu root %llu",
+ btrfs_ino(dir), btrfs_root_id(root));
}
kfree(name.name);
- iput(dir);
+ iput(&dir->vfs_inode);
if (!ret && name_added)
ret = 1;
return ret;
}
/* Replay one dir item from a BTRFS_DIR_INDEX_KEY key. */
-static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+static noinline int replay_one_dir_item(struct walk_control *wc)
{
int ret;
struct btrfs_dir_item *di;
/* We only log dir index keys, which only contain a single dir item. */
- ASSERT(key->type == BTRFS_DIR_INDEX_KEY);
+ ASSERT(wc->log_key.type == BTRFS_DIR_INDEX_KEY,
+ "wc->log_key.type=%u", wc->log_key.type);
- di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
- ret = replay_one_name(trans, root, path, eb, di, key);
+ di = btrfs_item_ptr(wc->log_leaf, wc->log_slot, struct btrfs_dir_item);
+ ret = replay_one_name(wc, di);
if (ret < 0)
return ret;
@@ -2010,17 +2316,11 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
* to ever delete the parent directory has it would result in stale
* dentries that can never be deleted.
*/
- if (ret == 1 && btrfs_dir_ftype(eb, di) != BTRFS_FT_DIR) {
- struct btrfs_path *fixup_path;
+ if (ret == 1 && btrfs_dir_ftype(wc->log_leaf, di) != BTRFS_FT_DIR) {
struct btrfs_key di_key;
- fixup_path = btrfs_alloc_path();
- if (!fixup_path)
- return -ENOMEM;
-
- btrfs_dir_item_key_to_cpu(eb, di, &di_key);
- ret = link_to_fixup_dir(trans, root, fixup_path, di_key.objectid);
- btrfs_free_path(fixup_path);
+ btrfs_dir_item_key_to_cpu(wc->log_leaf, di, &di_key);
+ ret = link_to_fixup_dir(wc, di_key.objectid);
}
return ret;
@@ -2113,20 +2413,20 @@ out:
* item is not in the log, the item is removed and the inode it points
* to is unlinked
*/
-static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *log,
- struct btrfs_path *path,
+static noinline int check_item_in_log(struct walk_control *wc,
struct btrfs_path *log_path,
- struct inode *dir,
- struct btrfs_key *dir_key)
+ struct btrfs_inode *dir,
+ struct btrfs_key *dir_key,
+ bool force_remove)
{
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = dir->root;
int ret;
struct extent_buffer *eb;
int slot;
struct btrfs_dir_item *di;
struct fscrypt_str name = { 0 };
- struct inode *inode = NULL;
+ struct btrfs_inode *inode = NULL;
struct btrfs_key location;
/*
@@ -2135,23 +2435,33 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
* we need to do is process the dir index keys, we (and our caller) can
* safely ignore dir item keys (key type BTRFS_DIR_ITEM_KEY).
*/
- ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY);
+ ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY, "dir_key->type=%u", dir_key->type);
- eb = path->nodes[0];
- slot = path->slots[0];
+ eb = wc->subvol_path->nodes[0];
+ slot = wc->subvol_path->slots[0];
di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
- if (ret)
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for dir %llu index %llu root %llu",
+ btrfs_ino(dir), dir_key->offset,
+ btrfs_root_id(root));
goto out;
+ }
- if (log) {
+ if (!force_remove) {
struct btrfs_dir_item *log_di;
- log_di = btrfs_lookup_dir_index_item(trans, log, log_path,
+ log_di = btrfs_lookup_dir_index_item(trans, wc->log, log_path,
dir_key->objectid,
dir_key->offset, &name, 0);
if (IS_ERR(log_di)) {
ret = PTR_ERR(log_di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir index item for dir %llu index %llu name %.*s root %llu",
+ btrfs_ino(dir), dir_key->offset,
+ name.len, name.name,
+ btrfs_root_id(root));
goto out;
} else if (log_di) {
/* The dentry exists in the log, we have nothing to do. */
@@ -2161,87 +2471,99 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
}
btrfs_dir_item_key_to_cpu(eb, di, &location);
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
btrfs_release_path(log_path);
- inode = read_one_inode(root, location.objectid);
- if (!inode) {
- ret = -EIO;
+ inode = btrfs_iget_logging(location.objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ inode = NULL;
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ location.objectid, btrfs_root_id(root));
goto out;
}
- ret = link_to_fixup_dir(trans, root, path, location.objectid);
+ ret = link_to_fixup_dir(wc, location.objectid);
if (ret)
goto out;
- inc_nlink(inode);
- ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode),
- &name);
+ inc_nlink(&inode->vfs_inode);
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &name);
/*
* Unlike dir item keys, dir index keys can only have one name (entry) in
* them, as there are no key collisions since each key has a unique offset
* (an index number), so we're done.
*/
out:
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
btrfs_release_path(log_path);
kfree(name.name);
- iput(inode);
+ if (inode)
+ iput(&inode->vfs_inode);
return ret;
}
-static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *log,
- struct btrfs_path *path,
- const u64 ino)
+static int replay_xattr_deletes(struct walk_control *wc)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_root *log = wc->log;
struct btrfs_key search_key;
- struct btrfs_path *log_path;
- int i;
+ BTRFS_PATH_AUTO_FREE(log_path);
+ const u64 ino = wc->log_key.objectid;
int nritems;
int ret;
log_path = btrfs_alloc_path();
- if (!log_path)
+ if (!log_path) {
+ btrfs_abort_log_replay(wc, -ENOMEM, "failed to allocate path");
return -ENOMEM;
+ }
search_key.objectid = ino;
search_key.type = BTRFS_XATTR_ITEM_KEY;
search_key.offset = 0;
again:
- ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
- if (ret < 0)
+ ret = btrfs_search_slot(NULL, root, &search_key, wc->subvol_path, 0, 0);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search xattrs for inode %llu root %llu",
+ ino, btrfs_root_id(root));
goto out;
+ }
process_leaf:
- nritems = btrfs_header_nritems(path->nodes[0]);
- for (i = path->slots[0]; i < nritems; i++) {
+ nritems = btrfs_header_nritems(wc->subvol_path->nodes[0]);
+ for (int i = wc->subvol_path->slots[0]; i < nritems; i++) {
struct btrfs_key key;
struct btrfs_dir_item *di;
struct btrfs_dir_item *log_di;
u32 total_size;
u32 cur;
- btrfs_item_key_to_cpu(path->nodes[0], &key, i);
+ btrfs_item_key_to_cpu(wc->subvol_path->nodes[0], &key, i);
if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
ret = 0;
goto out;
}
- di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
- total_size = btrfs_item_size(path->nodes[0], i);
+ di = btrfs_item_ptr(wc->subvol_path->nodes[0], i, struct btrfs_dir_item);
+ total_size = btrfs_item_size(wc->subvol_path->nodes[0], i);
cur = 0;
while (cur < total_size) {
- u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
- u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
+ u16 name_len = btrfs_dir_name_len(wc->subvol_path->nodes[0], di);
+ u16 data_len = btrfs_dir_data_len(wc->subvol_path->nodes[0], di);
u32 this_len = sizeof(*di) + name_len + data_len;
char *name;
name = kmalloc(name_len, GFP_NOFS);
if (!name) {
ret = -ENOMEM;
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate memory for name of length %u",
+ name_len);
goto out;
}
- read_extent_buffer(path->nodes[0], name,
+ read_extent_buffer(wc->subvol_path->nodes[0], name,
(unsigned long)(di + 1), name_len);
log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
@@ -2249,40 +2571,59 @@ process_leaf:
btrfs_release_path(log_path);
if (!log_di) {
/* Doesn't exist in log tree, so delete it. */
- btrfs_release_path(path);
- di = btrfs_lookup_xattr(trans, root, path, ino,
+ btrfs_release_path(wc->subvol_path);
+ di = btrfs_lookup_xattr(trans, root, wc->subvol_path, ino,
name, name_len, -1);
- kfree(name);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup xattr with name %.*s for inode %llu root %llu",
+ name_len, name, ino,
+ btrfs_root_id(root));
+ kfree(name);
goto out;
}
ASSERT(di);
ret = btrfs_delete_one_dir_name(trans, root,
- path, di);
- if (ret)
+ wc->subvol_path, di);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to delete xattr with name %.*s for inode %llu root %llu",
+ name_len, name, ino,
+ btrfs_root_id(root));
+ kfree(name);
goto out;
- btrfs_release_path(path);
+ }
+ btrfs_release_path(wc->subvol_path);
+ kfree(name);
search_key = key;
goto again;
}
- kfree(name);
if (IS_ERR(log_di)) {
ret = PTR_ERR(log_di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup xattr in log tree with name %.*s for inode %llu root %llu",
+ name_len, name, ino,
+ btrfs_root_id(root));
+ kfree(name);
goto out;
}
+ kfree(name);
cur += this_len;
di = (struct btrfs_dir_item *)((char *)di + this_len);
}
}
- ret = btrfs_next_leaf(root, path);
+ ret = btrfs_next_leaf(root, wc->subvol_path);
if (ret > 0)
ret = 0;
else if (ret == 0)
goto process_leaf;
+ else
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get next leaf in subvolume root %llu",
+ btrfs_root_id(root));
out:
- btrfs_free_path(log_path);
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return ret;
}
@@ -2297,34 +2638,41 @@ out:
* Anything we don't find in the log is unlinked and removed from the
* directory.
*/
-static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *log,
- struct btrfs_path *path,
- u64 dirid, int del_all)
+static noinline int replay_dir_deletes(struct walk_control *wc,
+ u64 dirid, bool del_all)
{
+ struct btrfs_root *root = wc->root;
+ struct btrfs_root *log = (del_all ? NULL : wc->log);
u64 range_start;
u64 range_end;
int ret = 0;
struct btrfs_key dir_key;
struct btrfs_key found_key;
- struct btrfs_path *log_path;
- struct inode *dir;
+ BTRFS_PATH_AUTO_FREE(log_path);
+ struct btrfs_inode *dir;
dir_key.objectid = dirid;
dir_key.type = BTRFS_DIR_INDEX_KEY;
log_path = btrfs_alloc_path();
- if (!log_path)
+ if (!log_path) {
+ btrfs_abort_log_replay(wc, -ENOMEM, "failed to allocate path");
return -ENOMEM;
+ }
- dir = read_one_inode(root, dirid);
- /* it isn't an error if the inode isn't there, that can happen
- * because we replay the deletes before we copy in the inode item
- * from the log
+ dir = btrfs_iget_logging(dirid, root);
+ /*
+ * It isn't an error if the inode isn't there, that can happen because
+ * we replay the deletes before we copy in the inode item from the log.
*/
- if (!dir) {
- btrfs_free_path(log_path);
- return 0;
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ if (ret == -ENOENT)
+ ret = 0;
+ else
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ dirid, btrfs_root_id(root));
+ return ret;
}
range_start = 0;
@@ -2333,32 +2681,45 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
if (del_all)
range_end = (u64)-1;
else {
- ret = find_dir_range(log, path, dirid,
+ ret = find_dir_range(log, wc->subvol_path, dirid,
&range_start, &range_end);
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to find range for dir %llu in log tree root %llu",
+ dirid, btrfs_root_id(root));
goto out;
- else if (ret > 0)
+ } else if (ret > 0) {
break;
+ }
}
dir_key.offset = range_start;
while (1) {
int nritems;
- ret = btrfs_search_slot(NULL, root, &dir_key, path,
- 0, 0);
- if (ret < 0)
+ ret = btrfs_search_slot(NULL, root, &dir_key,
+ wc->subvol_path, 0, 0);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search root %llu for key " BTRFS_KEY_FMT,
+ btrfs_root_id(root),
+ BTRFS_KEY_FMT_VALUE(&dir_key));
goto out;
+ }
- nritems = btrfs_header_nritems(path->nodes[0]);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 1)
+ nritems = btrfs_header_nritems(wc->subvol_path->nodes[0]);
+ if (wc->subvol_path->slots[0] >= nritems) {
+ ret = btrfs_next_leaf(root, wc->subvol_path);
+ if (ret == 1) {
break;
- else if (ret < 0)
+ } else if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get next leaf in subvolume root %llu",
+ btrfs_root_id(root));
goto out;
+ }
}
- btrfs_item_key_to_cpu(path->nodes[0], &found_key,
- path->slots[0]);
+ btrfs_item_key_to_cpu(wc->subvol_path->nodes[0], &found_key,
+ wc->subvol_path->slots[0]);
if (found_key.objectid != dirid ||
found_key.type != dir_key.type) {
ret = 0;
@@ -2368,25 +2729,22 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
if (found_key.offset > range_end)
break;
- ret = check_item_in_log(trans, log, path,
- log_path, dir,
- &found_key);
+ ret = check_item_in_log(wc, log_path, dir, &found_key, del_all);
if (ret)
goto out;
if (found_key.offset == (u64)-1)
break;
dir_key.offset = found_key.offset + 1;
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
if (range_end == (u64)-1)
break;
range_start = range_end + 1;
}
ret = 0;
out:
- btrfs_release_path(path);
- btrfs_free_path(log_path);
- iput(dir);
+ btrfs_release_path(wc->subvol_path);
+ iput(&dir->vfs_inode);
return ret;
}
@@ -2401,7 +2759,7 @@ out:
* only in the log (references come from either directory items or inode
* back refs).
*/
-static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+static int replay_one_buffer(struct extent_buffer *eb,
struct walk_control *wc, u64 gen, int level)
{
int nritems;
@@ -2409,44 +2767,62 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
.transid = gen,
.level = level
};
- struct btrfs_path *path;
- struct btrfs_root *root = wc->replay_dest;
- struct btrfs_key key;
- int i;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_trans_handle *trans = wc->trans;
int ret;
- ret = btrfs_read_extent_buffer(eb, &check);
- if (ret)
- return ret;
-
- level = btrfs_header_level(eb);
-
if (level != 0)
return 0;
- path = btrfs_alloc_path();
- if (!path)
+ /*
+ * Set to NULL since it was not yet read and in case we abort log replay
+ * on error, we have no valid log tree leaf to dump.
+ */
+ wc->log_leaf = NULL;
+ ret = btrfs_read_extent_buffer(eb, &check);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to read log tree leaf %llu for root %llu",
+ eb->start, btrfs_root_id(root));
+ return ret;
+ }
+
+ ASSERT(wc->subvol_path == NULL);
+ wc->subvol_path = btrfs_alloc_path();
+ if (!wc->subvol_path) {
+ btrfs_abort_log_replay(wc, -ENOMEM, "failed to allocate path");
return -ENOMEM;
+ }
+
+ wc->log_leaf = eb;
nritems = btrfs_header_nritems(eb);
- for (i = 0; i < nritems; i++) {
- btrfs_item_key_to_cpu(eb, &key, i);
+ for (wc->log_slot = 0; wc->log_slot < nritems; wc->log_slot++) {
+ struct btrfs_inode_item *inode_item;
- /* inode keys are done during the first stage */
- if (key.type == BTRFS_INODE_ITEM_KEY &&
- wc->stage == LOG_WALK_REPLAY_INODES) {
- struct btrfs_inode_item *inode_item;
- u32 mode;
+ btrfs_item_key_to_cpu(eb, &wc->log_key, wc->log_slot);
- inode_item = btrfs_item_ptr(eb, i,
- struct btrfs_inode_item);
+ if (wc->log_key.type == BTRFS_INODE_ITEM_KEY) {
+ inode_item = btrfs_item_ptr(eb, wc->log_slot,
+ struct btrfs_inode_item);
/*
- * If we have a tmpfile (O_TMPFILE) that got fsync'ed
- * and never got linked before the fsync, skip it, as
- * replaying it is pointless since it would be deleted
- * later. We skip logging tmpfiles, but it's always
- * possible we are replaying a log created with a kernel
- * that used to log tmpfiles.
+ * An inode with no links is either:
+ *
+ * 1) A tmpfile (O_TMPFILE) that got fsync'ed and never
+ * got linked before the fsync, skip it, as replaying
+ * it is pointless since it would be deleted later.
+ * We skip logging tmpfiles, but it's always possible
+ * we are replaying a log created with a kernel that
+ * used to log tmpfiles;
+ *
+ * 2) A non-tmpfile which got its last link deleted
+ * while holding an open fd on it and later got
+ * fsynced through that fd. We always log the
+ * parent inodes when inode->last_unlink_trans is
+ * set to the current transaction, so ignore all the
+ * inode items for this inode. We will delete the
+ * inode when processing the parent directory with
+ * replay_dir_deletes().
*/
if (btrfs_inode_nlink(eb, inode_item) == 0) {
wc->ignore_cur_inode = true;
@@ -2454,19 +2830,23 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
} else {
wc->ignore_cur_inode = false;
}
- ret = replay_xattr_deletes(wc->trans, root, log,
- path, key.objectid);
+ }
+
+ /* Inode keys are done during the first stage. */
+ if (wc->log_key.type == BTRFS_INODE_ITEM_KEY &&
+ wc->stage == LOG_WALK_REPLAY_INODES) {
+ u32 mode;
+
+ ret = replay_xattr_deletes(wc);
if (ret)
break;
mode = btrfs_inode_mode(eb, inode_item);
if (S_ISDIR(mode)) {
- ret = replay_dir_deletes(wc->trans,
- root, log, path, key.objectid, 0);
+ ret = replay_dir_deletes(wc, wc->log_key.objectid, false);
if (ret)
break;
}
- ret = overwrite_item(wc->trans, root, path,
- eb, i, &key);
+ ret = overwrite_item(wc);
if (ret)
break;
@@ -2480,36 +2860,48 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
*/
if (S_ISREG(mode)) {
struct btrfs_drop_extents_args drop_args = { 0 };
- struct inode *inode;
+ struct btrfs_inode *inode;
u64 from;
- inode = read_one_inode(root, key.objectid);
- if (!inode) {
- ret = -EIO;
+ inode = btrfs_iget_logging(wc->log_key.objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ wc->log_key.objectid,
+ btrfs_root_id(root));
break;
}
- from = ALIGN(i_size_read(inode),
+ from = ALIGN(i_size_read(&inode->vfs_inode),
root->fs_info->sectorsize);
drop_args.start = from;
drop_args.end = (u64)-1;
drop_args.drop_cache = true;
- ret = btrfs_drop_extents(wc->trans, root,
- BTRFS_I(inode),
- &drop_args);
- if (!ret) {
- inode_sub_bytes(inode,
+ drop_args.path = wc->subvol_path;
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to drop extents for inode %llu root %llu offset %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root),
+ from);
+ } else {
+ inode_sub_bytes(&inode->vfs_inode,
drop_args.bytes_found);
/* Update the inode's nbytes. */
- ret = btrfs_update_inode(wc->trans,
- BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, inode);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update inode %llu root %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root));
}
- iput(inode);
+ iput(&inode->vfs_inode);
if (ret)
break;
}
- ret = link_to_fixup_dir(wc->trans, root,
- path, key.objectid);
+ ret = link_to_fixup_dir(wc, wc->log_key.objectid);
if (ret)
break;
}
@@ -2517,10 +2909,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
if (wc->ignore_cur_inode)
continue;
- if (key.type == BTRFS_DIR_INDEX_KEY &&
+ if (wc->log_key.type == BTRFS_DIR_INDEX_KEY &&
wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
- ret = replay_one_dir_item(wc->trans, root, path,
- eb, i, &key);
+ ret = replay_one_dir_item(wc);
if (ret)
break;
}
@@ -2529,21 +2920,17 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
continue;
/* these keys are simply copied */
- if (key.type == BTRFS_XATTR_ITEM_KEY) {
- ret = overwrite_item(wc->trans, root, path,
- eb, i, &key);
+ if (wc->log_key.type == BTRFS_XATTR_ITEM_KEY) {
+ ret = overwrite_item(wc);
if (ret)
break;
- } else if (key.type == BTRFS_INODE_REF_KEY ||
- key.type == BTRFS_INODE_EXTREF_KEY) {
- ret = add_inode_ref(wc->trans, root, log, path,
- eb, i, &key);
- if (ret && ret != -ENOENT)
+ } else if (wc->log_key.type == BTRFS_INODE_REF_KEY ||
+ wc->log_key.type == BTRFS_INODE_EXTREF_KEY) {
+ ret = add_inode_ref(wc);
+ if (ret)
break;
- ret = 0;
- } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
- ret = replay_one_extent(wc->trans, root, path,
- eb, i, &key);
+ } else if (wc->log_key.type == BTRFS_EXTENT_DATA_KEY) {
+ ret = replay_one_extent(wc);
if (ret)
break;
}
@@ -2554,37 +2941,16 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
* older kernel with such keys, ignore them.
*/
}
- btrfs_free_path(path);
+ btrfs_free_path(wc->subvol_path);
+ wc->subvol_path = NULL;
return ret;
}
-/*
- * Correctly adjust the reserved bytes occupied by a log tree extent buffer
- */
-static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
-{
- struct btrfs_block_group *cache;
-
- cache = btrfs_lookup_block_group(fs_info, start);
- if (!cache) {
- btrfs_err(fs_info, "unable to find block group for %llu", start);
- return;
- }
-
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- cache->reserved -= fs_info->nodesize;
- cache->space_info->bytes_reserved -= fs_info->nodesize;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
-
- btrfs_put_block_group(cache);
-}
-
static int clean_log_buffer(struct btrfs_trans_handle *trans,
struct extent_buffer *eb)
{
- int ret;
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct btrfs_block_group *bg;
btrfs_tree_lock(eb);
btrfs_clear_buffer_dirty(trans, eb);
@@ -2592,22 +2958,38 @@ static int clean_log_buffer(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(eb);
if (trans) {
+ int ret;
+
ret = btrfs_pin_reserved_extent(trans, eb);
if (ret)
- return ret;
- } else {
- unaccount_log_buffer(eb->fs_info, eb->start);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ bg = btrfs_lookup_block_group(fs_info, eb->start);
+ if (!bg) {
+ btrfs_err(fs_info, "unable to find block group for %llu", eb->start);
+ btrfs_handle_fs_error(fs_info, -ENOENT, NULL);
+ return -ENOENT;
}
+ spin_lock(&bg->space_info->lock);
+ spin_lock(&bg->lock);
+ bg->reserved -= fs_info->nodesize;
+ bg->space_info->bytes_reserved -= fs_info->nodesize;
+ spin_unlock(&bg->lock);
+ spin_unlock(&bg->space_info->lock);
+
+ btrfs_put_block_group(bg);
+
return 0;
}
-static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, int *level,
- struct walk_control *wc)
+static noinline int walk_down_log_tree(struct btrfs_path *path, int *level,
+ struct walk_control *wc)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_fs_info *fs_info = wc->log->fs_info;
u64 bytenr;
u64 ptr_gen;
struct extent_buffer *next;
@@ -2635,12 +3017,17 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
next = btrfs_find_create_tree_block(fs_info, bytenr,
btrfs_header_owner(cur),
*level - 1);
- if (IS_ERR(next))
- return PTR_ERR(next);
+ if (IS_ERR(next)) {
+ ret = PTR_ERR(next);
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
+ return ret;
+ }
if (*level == 1) {
- ret = wc->process_func(root, next, wc, ptr_gen,
- *level - 1);
+ ret = wc->process_func(next, wc, ptr_gen, *level - 1);
if (ret) {
free_extent_buffer(next);
return ret;
@@ -2651,6 +3038,10 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
ret = btrfs_read_extent_buffer(next, &check);
if (ret) {
free_extent_buffer(next);
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
return ret;
}
@@ -2666,6 +3057,10 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
ret = btrfs_read_extent_buffer(next, &check);
if (ret) {
free_extent_buffer(next);
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
return ret;
}
@@ -2682,10 +3077,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
return 0;
}
-static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, int *level,
- struct walk_control *wc)
+static noinline int walk_up_log_tree(struct btrfs_path *path, int *level,
+ struct walk_control *wc)
{
int i;
int slot;
@@ -2699,14 +3092,14 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
WARN_ON(*level == 0);
return 0;
} else {
- ret = wc->process_func(root, path->nodes[*level], wc,
+ ret = wc->process_func(path->nodes[*level], wc,
btrfs_header_generation(path->nodes[*level]),
*level);
if (ret)
return ret;
if (wc->free) {
- ret = clean_log_buffer(trans, path->nodes[*level]);
+ ret = clean_log_buffer(wc->trans, path->nodes[*level]);
if (ret)
return ret;
}
@@ -2723,13 +3116,13 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
* the tree freeing any blocks that have a ref count of zero after being
* decremented.
*/
-static int walk_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *log, struct walk_control *wc)
+static int walk_log_tree(struct walk_control *wc)
{
+ struct btrfs_root *log = wc->log;
int ret = 0;
int wret;
int level;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int orig_level;
path = btrfs_alloc_path();
@@ -2739,40 +3132,34 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
level = btrfs_header_level(log->node);
orig_level = level;
path->nodes[level] = log->node;
- atomic_inc(&log->node->refs);
+ refcount_inc(&log->node->refs);
path->slots[level] = 0;
while (1) {
- wret = walk_down_log_tree(trans, log, path, &level, wc);
+ wret = walk_down_log_tree(path, &level, wc);
if (wret > 0)
break;
- if (wret < 0) {
- ret = wret;
- goto out;
- }
+ if (wret < 0)
+ return wret;
- wret = walk_up_log_tree(trans, log, path, &level, wc);
+ wret = walk_up_log_tree(path, &level, wc);
if (wret > 0)
break;
- if (wret < 0) {
- ret = wret;
- goto out;
- }
+ if (wret < 0)
+ return wret;
}
/* was the root node processed? if not, catch it here */
if (path->nodes[orig_level]) {
- ret = wc->process_func(log, path->nodes[orig_level], wc,
+ ret = wc->process_func(path->nodes[orig_level], wc,
btrfs_header_generation(path->nodes[orig_level]),
orig_level);
if (ret)
- goto out;
+ return ret;
if (wc->free)
- ret = clean_log_buffer(trans, path->nodes[orig_level]);
+ ret = clean_log_buffer(wc->trans, path->nodes[orig_level]);
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -2951,7 +3338,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex);
return ctx->log_ret;
}
- ASSERT(log_transid == root->log_transid);
+ ASSERT(log_transid == root->log_transid,
+ "log_transid=%d root->log_transid=%d", log_transid, root->log_transid);
atomic_set(&root->log_commit[index1], 1);
/* wait for previous tree log sync to complete */
@@ -2980,9 +3368,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
if (log_transid % 2 == 0)
- mark = EXTENT_DIRTY;
+ mark = EXTENT_DIRTY_LOG1;
else
- mark = EXTENT_NEW;
+ mark = EXTENT_DIRTY_LOG2;
/* we start IO on all the marked extents here, but we don't actually
* wait for them until later.
@@ -3091,7 +3479,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = root_log_ctx.log_ret;
goto out;
}
- ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
+ ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid,
+ "root_log_ctx.log_transid=%d log_root_tree->log_transid=%d",
+ root_log_ctx.log_transid, log_root_tree->log_transid);
atomic_set(&log_root_tree->log_commit[index2], 1);
if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
@@ -3113,7 +3503,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_write_marked_extents(fs_info,
&log_root_tree->dirty_log_pages,
- EXTENT_DIRTY | EXTENT_NEW);
+ EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2);
blk_finish_plug(&plug);
/*
* As described above, -EAGAIN indicates a hole in the extents. We
@@ -3133,7 +3523,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_wait_tree_log_extents(log, mark);
if (!ret)
ret = btrfs_wait_tree_log_extents(log_root_tree,
- EXTENT_NEW | EXTENT_DIRTY);
+ EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2);
if (ret) {
btrfs_set_log_full_commit(trans);
mutex_unlock(&log_root_tree->log_mutex);
@@ -3181,7 +3571,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
ret = write_all_supers(fs_info, 1);
mutex_unlock(&fs_info->tree_log_mutex);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_set_log_full_commit(trans);
btrfs_abort_transaction(trans, ret);
goto out_wake_log_root;
@@ -3195,7 +3585,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* someone else already started it. We use <= and not < because the
* first log transaction has an ID of 0.
*/
- ASSERT(btrfs_get_root_last_log_commit(root) <= log_transid);
+ ASSERT(btrfs_get_root_last_log_commit(root) <= log_transid,
+ "last_log_commit(root)=%d log_transid=%d",
+ btrfs_get_root_last_log_commit(root), log_transid);
btrfs_set_root_last_log_commit(root, log_transid);
out_wake_log_root:
@@ -3233,12 +3625,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
{
int ret;
struct walk_control wc = {
- .free = 1,
- .process_func = process_one_buffer
+ .free = true,
+ .process_func = process_one_buffer,
+ .log = log,
+ .trans = trans,
};
if (log->node) {
- ret = walk_log_tree(trans, log, &wc);
+ ret = walk_log_tree(&wc);
if (ret) {
/*
* We weren't able to traverse the entire log tree, the
@@ -3259,9 +3653,9 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
*/
btrfs_write_marked_extents(log->fs_info,
&log->dirty_log_pages,
- EXTENT_DIRTY | EXTENT_NEW);
+ EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2);
btrfs_wait_tree_log_extents(log,
- EXTENT_DIRTY | EXTENT_NEW);
+ EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2);
if (trans)
btrfs_abort_transaction(trans, ret);
@@ -3270,8 +3664,8 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
}
}
- extent_io_tree_release(&log->dirty_log_pages);
- extent_io_tree_release(&log->log_csum_range);
+ btrfs_extent_io_tree_release(&log->dirty_log_pages);
+ btrfs_extent_io_tree_release(&log->log_csum_range);
btrfs_put_root(log);
}
@@ -3301,6 +3695,31 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
return 0;
}
+static bool mark_inode_as_not_logged(const struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode)
+{
+ bool ret = false;
+
+ /*
+ * Do this only if ->logged_trans is still 0 to prevent races with
+ * concurrent logging as we may see the inode not logged when
+ * inode_logged() is called but it gets logged after inode_logged() did
+ * not find it in the log tree and we end up setting ->logged_trans to a
+ * value less than trans->transid after the concurrent logging task has
+ * set it to trans->transid. As a consequence, subsequent rename, unlink
+ * and link operations may end up not logging new names and removing old
+ * names from the log.
+ */
+ spin_lock(&inode->lock);
+ if (inode->logged_trans == 0)
+ inode->logged_trans = trans->transid - 1;
+ else if (inode->logged_trans == trans->transid)
+ ret = true;
+ spin_unlock(&inode->lock);
+
+ return ret;
+}
+
/*
* Check if an inode was logged in the current transaction. This correctly deals
* with the case where the inode was logged but has a logged_trans of 0, which
@@ -3318,15 +3737,32 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ret;
- if (inode->logged_trans == trans->transid)
+ /*
+ * Quick lockless call, since once ->logged_trans is set to the current
+ * transaction, we never set it to a lower value anywhere else.
+ */
+ if (data_race(inode->logged_trans) == trans->transid)
return 1;
/*
- * If logged_trans is not 0, then we know the inode logged was not logged
- * in this transaction, so we can return false right away.
+ * If logged_trans is not 0 and not trans->transid, then we know the
+ * inode was not logged in this transaction, so we can return false
+ * right away. We take the lock to avoid a race caused by load/store
+ * tearing with a concurrent btrfs_log_inode() call or a concurrent task
+ * in this function further below - an update to trans->transid can be
+ * teared into two 32 bits updates for example, in which case we could
+ * see a positive value that is not trans->transid and assume the inode
+ * was not logged when it was.
*/
- if (inode->logged_trans > 0)
+ spin_lock(&inode->lock);
+ if (inode->logged_trans == trans->transid) {
+ spin_unlock(&inode->lock);
+ return 1;
+ } else if (inode->logged_trans > 0) {
+ spin_unlock(&inode->lock);
return 0;
+ }
+ spin_unlock(&inode->lock);
/*
* If no log tree was created for this root in this transaction, then
@@ -3335,10 +3771,8 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
* transaction's ID, to avoid the search below in a future call in case
* a log tree gets created after this.
*/
- if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) {
- inode->logged_trans = trans->transid - 1;
- return 0;
- }
+ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state))
+ return mark_inode_as_not_logged(trans, inode);
/*
* We have a log tree and the inode's logged_trans is 0. We can't tell
@@ -3392,29 +3826,17 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
* Set logged_trans to a value greater than 0 and less then the
* current transaction to avoid doing the search in future calls.
*/
- inode->logged_trans = trans->transid - 1;
- return 0;
+ return mark_inode_as_not_logged(trans, inode);
}
/*
* The inode was previously logged and then evicted, set logged_trans to
- * the current transacion's ID, to avoid future tree searches as long as
+ * the current transaction's ID, to avoid future tree searches as long as
* the inode is not evicted again.
*/
+ spin_lock(&inode->lock);
inode->logged_trans = trans->transid;
-
- /*
- * If it's a directory, then we must set last_dir_index_offset to the
- * maximum possible value, so that the next attempt to log the inode does
- * not skip checking if dir index keys found in modified subvolume tree
- * leaves have been logged before, otherwise it would result in attempts
- * to insert duplicate dir index keys in the log tree. This must be done
- * because last_dir_index_offset is an in-memory only field, not persisted
- * in the inode item or any other on-disk structure, so its value is lost
- * once the inode is evicted.
- */
- if (S_ISDIR(inode->vfs_inode.i_mode))
- inode->last_dir_index_offset = (u64)-1;
+ spin_unlock(&inode->lock);
return 1;
}
@@ -3451,7 +3873,7 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans,
* inode item because on log replay we update the field to reflect
* all existing entries in the directory (see overwrite_item()).
*/
- return btrfs_delete_one_dir_name(trans, log, path, di);
+ return btrfs_del_item(trans, log, path);
}
/*
@@ -3476,37 +3898,36 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans,
* or the entire directory.
*/
void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
const struct fscrypt_str *name,
struct btrfs_inode *dir, u64 index)
{
- struct btrfs_path *path;
+ struct btrfs_root *root = dir->root;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
ret = inode_logged(trans, dir, NULL);
if (ret == 0)
return;
- else if (ret < 0) {
+ if (ret < 0) {
+ btrfs_set_log_full_commit(trans);
+ return;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path) {
btrfs_set_log_full_commit(trans);
return;
}
ret = join_running_log_trans(root);
- if (ret)
+ ASSERT(ret == 0, "join_running_log_trans() ret=%d", ret);
+ if (WARN_ON(ret))
return;
mutex_lock(&dir->log_mutex);
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir),
name, index);
- btrfs_free_path(path);
-out_unlock:
mutex_unlock(&dir->log_mutex);
if (ret < 0)
btrfs_set_log_full_commit(trans);
@@ -3515,12 +3936,11 @@ out_unlock:
/* see comments for btrfs_del_dir_entries_in_log */
void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
const struct fscrypt_str *name,
- struct btrfs_inode *inode, u64 dirid)
+ struct btrfs_inode *inode,
+ struct btrfs_inode *dir)
{
- struct btrfs_root *log;
- u64 index;
+ struct btrfs_root *root = dir->root;
int ret;
ret = inode_logged(trans, inode, NULL);
@@ -3532,13 +3952,13 @@ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
}
ret = join_running_log_trans(root);
- if (ret)
+ ASSERT(ret == 0, "join_running_log_trans() ret=%d", ret);
+ if (WARN_ON(ret))
return;
- log = root->log_root;
mutex_lock(&inode->log_mutex);
- ret = btrfs_del_inode_ref(trans, log, name, btrfs_ino(inode),
- dirid, &index);
+ ret = btrfs_del_inode_ref(trans, root->log_root, name, btrfs_ino(inode),
+ btrfs_ino(dir), NULL);
mutex_unlock(&inode->log_mutex);
if (ret < 0 && ret != -ENOENT)
btrfs_set_log_full_commit(trans);
@@ -3561,8 +3981,8 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
struct btrfs_dir_log_item *item;
key.objectid = dirid;
- key.offset = first_offset;
key.type = BTRFS_DIR_LOG_INDEX_KEY;
+ key.offset = first_offset;
ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
/*
* -EEXIST is fine and can happen sporadically when we are logging a
@@ -3588,7 +4008,6 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
last_offset = max(last_offset, curr_end);
}
btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
return 0;
}
@@ -3601,7 +4020,7 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
int count)
{
struct btrfs_root *log = inode->root->log_root;
- char *ins_data = NULL;
+ char AUTO_KFREE(ins_data);
struct btrfs_item_batch batch;
struct extent_buffer *dst;
unsigned long src_offset;
@@ -3612,7 +4031,7 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
int ret;
int i;
- ASSERT(count > 0);
+ ASSERT(count > 0, "count=%d", count);
batch.nr = count;
if (count == 1) {
@@ -3625,8 +4044,7 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
struct btrfs_key *ins_keys;
u32 *ins_sizes;
- ins_data = kmalloc(count * sizeof(u32) +
- count * sizeof(struct btrfs_key), GFP_NOFS);
+ ins_data = kmalloc_array(count, sizeof(u32) + sizeof(struct btrfs_key), GFP_NOFS);
if (!ins_data)
return -ENOMEM;
@@ -3647,7 +4065,7 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
if (ret)
- goto out;
+ return ret;
dst = dst_path->nodes[0];
/*
@@ -3666,7 +4084,9 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
btrfs_release_path(dst_path);
last_index = batch.keys[count - 1].offset;
- ASSERT(last_index > inode->last_dir_index_offset);
+ ASSERT(last_index > inode->last_dir_index_offset,
+ "last_index=%llu inode->last_dir_index_offset=%llu",
+ last_index, inode->last_dir_index_offset);
/*
* If for some unexpected reason the last item's index is not greater
@@ -3679,8 +4099,6 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
if (btrfs_get_first_dir_index_to_log(inode) == 0)
btrfs_set_first_dir_index_to_log(inode, batch.keys[0].offset);
-out:
- kfree(ins_data);
return ret;
}
@@ -3704,7 +4122,7 @@ static int clone_leaf(struct btrfs_path *path, struct btrfs_log_ctx *ctx)
* Add extra ref to scratch eb so that it is not freed when callers
* release the path, so we can reuse it later if needed.
*/
- atomic_inc(&ctx->scratch_eb->refs);
+ refcount_inc(&ctx->scratch_eb->refs);
return 0;
}
@@ -3739,7 +4157,6 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
for (int i = path->slots[0]; i < nritems; i++) {
struct btrfs_dir_item *di;
struct btrfs_key key;
- int ret;
btrfs_item_key_to_cpu(src, &key, i);
@@ -3809,8 +4226,6 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
}
if (batch_size > 0) {
- int ret;
-
ret = flush_dir_items_batch(trans, inode, src, dst_path,
batch_start, batch_size);
if (ret < 0)
@@ -3995,7 +4410,9 @@ done:
* change in the current transaction), then we don't need to log
* a range, last_old_dentry_offset is == to last_offset.
*/
- ASSERT(last_old_dentry_offset <= last_offset);
+ ASSERT(last_old_dentry_offset <= last_offset,
+ "last_old_dentry_offset=%llu last_offset=%llu",
+ last_old_dentry_offset, last_offset);
if (last_old_dentry_offset < last_offset)
ret = insert_dir_log_key(trans, log, path, ino,
last_old_dentry_offset + 1,
@@ -4007,7 +4424,7 @@ done:
/*
* If the inode was logged before and it was evicted, then its
- * last_dir_index_offset is (u64)-1, so we don't the value of the last index
+ * last_dir_index_offset is 0, so we don't know the value of the last index
* key offset. If that's the case, search for it and update the inode. This
* is to avoid lookups in the log tree every time we try to insert a dir index
* key from a leaf changed in the current transaction, and to allow us to always
@@ -4023,7 +4440,7 @@ static int update_last_dir_index_offset(struct btrfs_inode *inode,
lockdep_assert_held(&inode->log_mutex);
- if (inode->last_dir_index_offset != (u64)-1)
+ if (inode->last_dir_index_offset != 0)
return 0;
if (!ctx->logged_before) {
@@ -4189,47 +4606,40 @@ static int truncate_inode_items(struct btrfs_trans_handle *trans,
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
- struct inode *inode, int log_inode_only,
+ struct inode *inode, bool log_inode_only,
u64 logged_isize)
{
- struct btrfs_map_token token;
u64 flags;
- btrfs_init_map_token(&token, leaf);
-
if (log_inode_only) {
/* set the generation to zero so the recover code
* can tell the difference between an logging
* just to say 'this inode exists' and a logging
* to say 'update this inode with these values'
*/
- btrfs_set_token_inode_generation(&token, item, 0);
- btrfs_set_token_inode_size(&token, item, logged_isize);
+ btrfs_set_inode_generation(leaf, item, 0);
+ btrfs_set_inode_size(leaf, item, logged_isize);
} else {
- btrfs_set_token_inode_generation(&token, item,
- BTRFS_I(inode)->generation);
- btrfs_set_token_inode_size(&token, item, inode->i_size);
+ btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
+ btrfs_set_inode_size(leaf, item, inode->i_size);
}
- btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
- btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
- btrfs_set_token_inode_mode(&token, item, inode->i_mode);
- btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
+ btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
+ btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
+ btrfs_set_inode_mode(leaf, item, inode->i_mode);
+ btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
- btrfs_set_token_timespec_sec(&token, &item->atime,
- inode_get_atime_sec(inode));
- btrfs_set_token_timespec_nsec(&token, &item->atime,
- inode_get_atime_nsec(inode));
+ btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
- btrfs_set_token_timespec_sec(&token, &item->mtime,
- inode_get_mtime_sec(inode));
- btrfs_set_token_timespec_nsec(&token, &item->mtime,
- inode_get_mtime_nsec(inode));
+ btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
- btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode_get_ctime_sec(inode));
- btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode_get_ctime_nsec(inode));
+ btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
+
+ btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
+ btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
/*
* We do not need to set the nbytes field, in fact during a fast fsync
@@ -4240,13 +4650,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
* inode item in subvolume tree as needed (see overwrite_item()).
*/
- btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
- btrfs_set_token_inode_transid(&token, item, trans->transid);
- btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
+ btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
+ btrfs_set_inode_transid(leaf, item, trans->transid);
+ btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
BTRFS_I(inode)->ro_flags);
- btrfs_set_token_inode_flags(&token, item, flags);
- btrfs_set_token_inode_block_group(&token, item, 0);
+ btrfs_set_inode_flags(leaf, item, flags);
+ btrfs_set_inode_block_group(leaf, item, 0);
}
static int log_inode_item(struct btrfs_trans_handle *trans,
@@ -4292,7 +4702,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
- 0, 0);
+ false, 0);
btrfs_release_path(path);
return 0;
}
@@ -4320,8 +4730,8 @@ static int log_csums(struct btrfs_trans_handle *trans,
* file which happens to refer to the same extent as well. Such races
* can leave checksum items in the log with overlapping ranges.
*/
- ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
- &cached_state);
+ ret = btrfs_lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+ &cached_state);
if (ret)
return ret;
/*
@@ -4337,8 +4747,8 @@ static int log_csums(struct btrfs_trans_handle *trans,
if (!ret)
ret = btrfs_csum_file_blocks(trans, log_root, sums);
- unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
- &cached_state);
+ btrfs_unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+ &cached_state);
return ret;
}
@@ -4357,7 +4767,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
struct btrfs_key *ins_keys;
u32 *ins_sizes;
struct btrfs_item_batch batch;
- char *ins_data;
+ char AUTO_KFREE(ins_data);
int dst_index;
const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM);
const u64 i_size = i_size_read(&inode->vfs_inode);
@@ -4396,8 +4806,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
src = src_path->nodes[0];
- ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
- nr * sizeof(u32), GFP_NOFS);
+ ins_data = kmalloc_array(nr, sizeof(struct btrfs_key) + sizeof(u32), GFP_NOFS);
if (!ins_data)
return -ENOMEM;
@@ -4486,7 +4895,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
disk_bytenr + extent_num_bytes - 1,
&ordered_sums, false);
if (ret < 0)
- goto out;
+ return ret;
ret = 0;
list_for_each_entry_safe(sums, sums_next, &ordered_sums, list) {
@@ -4496,7 +4905,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
kfree(sums);
}
if (ret)
- goto out;
+ return ret;
add_to_batch:
ins_sizes[dst_index] = btrfs_item_size(src, src_slot);
@@ -4510,11 +4919,11 @@ add_to_batch:
* so we don't need to do anything.
*/
if (batch.nr == 0)
- goto out;
+ return 0;
ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
if (ret)
- goto out;
+ return ret;
dst_index = 0;
for (int i = 0; i < nr; i++) {
@@ -4566,10 +4975,7 @@ copy_item:
dst_index++;
}
- btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
btrfs_release_path(dst_path);
-out:
- kfree(ins_data);
return ret;
}
@@ -4669,7 +5075,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
return 0;
/* If we're compressed we have to save the entire range of csums. */
- if (extent_map_is_compressed(em)) {
+ if (btrfs_extent_map_is_compressed(em)) {
csum_offset = 0;
csum_len = em->disk_num_bytes;
} else {
@@ -4678,7 +5084,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
}
/* block start is already adjusted for the file extent offset. */
- block_start = extent_map_block_start(em);
+ block_start = btrfs_extent_map_block_start(em);
csum_root = btrfs_csum_root(trans->fs_info, block_start);
ret = btrfs_lookup_csums_list(csum_root, block_start + csum_offset,
block_start + csum_offset + csum_len - 1,
@@ -4688,9 +5094,9 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
ret = 0;
while (!list_empty(&ordered_sums)) {
- struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
- struct btrfs_ordered_sum,
- list);
+ struct btrfs_ordered_sum *sums = list_first_entry(&ordered_sums,
+ struct btrfs_ordered_sum,
+ list);
if (!ret)
ret = log_csums(trans, inode, log_root, sums);
list_del(&sums->list);
@@ -4713,7 +5119,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_key key;
enum btrfs_compression_type compress_type;
u64 extent_offset = em->offset;
- u64 block_start = extent_map_block_start(em);
+ u64 block_start = btrfs_extent_map_block_start(em);
u64 block_len;
int ret;
@@ -4724,7 +5130,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG);
block_len = em->disk_num_bytes;
- compress_type = extent_map_compression(em);
+ compress_type = btrfs_extent_map_compression(em);
if (compress_type != BTRFS_COMPRESS_NONE) {
btrfs_set_stack_file_extent_disk_bytenr(&fi, block_start);
btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
@@ -4776,7 +5182,6 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, &fi,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(fi));
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
@@ -4800,7 +5205,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
struct btrfs_key key;
const u64 i_size = i_size_read(&inode->vfs_inode);
const u64 ino = btrfs_ino(inode);
- struct btrfs_path *dst_path = NULL;
+ BTRFS_PATH_AUTO_FREE(dst_path);
bool dropped_extents = false;
u64 truncate_offset = i_size;
struct extent_buffer *leaf;
@@ -4918,7 +5323,6 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
start_slot, ins_nr, 1, 0, ctx);
out:
btrfs_release_path(path);
- btrfs_free_path(dst_path);
return ret;
}
@@ -4969,7 +5373,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
list_sort(NULL, &extents, extent_cmp);
process:
while (!list_empty(&extents)) {
- em = list_entry(extents.next, struct extent_map, list);
+ em = list_first_entry(&extents, struct extent_map, list);
list_del_init(&em->list);
@@ -4978,8 +5382,8 @@ process:
* private list.
*/
if (ret) {
- clear_em_logging(inode, em);
- free_extent_map(em);
+ btrfs_clear_em_logging(inode, em);
+ btrfs_free_extent_map(em);
continue;
}
@@ -4987,8 +5391,8 @@ process:
ret = log_one_extent(trans, inode, em, path, ctx);
write_lock(&tree->lock);
- clear_em_logging(inode, em);
- free_extent_map(em);
+ btrfs_clear_em_logging(inode, em);
+ btrfs_free_extent_map(em);
}
WARN_ON(!list_empty(&extents));
write_unlock(&tree->lock);
@@ -5010,12 +5414,12 @@ process:
set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
- spin_lock_irq(&inode->ordered_tree_lock);
+ spin_lock(&inode->ordered_tree_lock);
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
atomic_inc(&trans->transaction->pending_ordered);
}
- spin_unlock_irq(&inode->ordered_tree_lock);
+ spin_unlock(&inode->ordered_tree_lock);
}
btrfs_put_ordered_extent(ordered);
}
@@ -5290,9 +5694,8 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
struct btrfs_inode *inode,
u64 *other_ino, u64 *other_parent)
{
- int ret;
- struct btrfs_path *search_path;
- char *name = NULL;
+ BTRFS_PATH_AUTO_FREE(search_path);
+ char AUTO_KFREE(name);
u32 name_len = 0;
u32 item_size = btrfs_item_size(eb, slot);
u32 cur_offset = 0;
@@ -5301,8 +5704,8 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
search_path = btrfs_alloc_path();
if (!search_path)
return -ENOMEM;
- search_path->search_commit_root = 1;
- search_path->skip_locking = 1;
+ search_path->search_commit_root = true;
+ search_path->skip_locking = true;
while (cur_offset < item_size) {
u64 parent;
@@ -5335,10 +5738,8 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
char *new_name;
new_name = krealloc(name, this_name_len, GFP_NOFS);
- if (!new_name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!new_name)
+ return -ENOMEM;
name_len = this_name_len;
name = new_name;
}
@@ -5356,29 +5757,24 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
di, &di_key);
if (di_key.type == BTRFS_INODE_ITEM_KEY) {
if (di_key.objectid != key->objectid) {
- ret = 1;
*other_ino = di_key.objectid;
*other_parent = parent;
+ return 1;
} else {
- ret = 0;
+ return 0;
}
} else {
- ret = -EAGAIN;
+ return -EAGAIN;
}
- goto out;
} else if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
+ return PTR_ERR(di);
}
btrfs_release_path(search_path);
cur_offset += this_len;
}
- ret = 0;
-out:
- btrfs_free_path(search_path);
- kfree(name);
- return ret;
+
+ return 0;
}
/*
@@ -5426,7 +5822,7 @@ struct btrfs_dir_list {
* See process_dir_items_leaf() for details about why it is needed.
* This is a recursive operation - if an existing dentry corresponds to a
* directory, that directory's new entries are logged too (same behaviour as
- * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
+ * ext3/4, xfs, f2fs, nilfs2). Note that when logging the inodes
* the dentries point to we do not acquire their VFS lock, otherwise lockdep
* complains about the following circular lock dependency / possible deadlock:
*
@@ -5485,7 +5881,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
ihold(&curr_inode->vfs_inode);
while (true) {
- struct inode *vfs_inode;
struct btrfs_key key;
struct btrfs_key found_key;
u64 next_index;
@@ -5501,7 +5896,7 @@ again:
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_dir_item *di;
struct btrfs_key di_key;
- struct inode *di_inode;
+ struct btrfs_inode *di_inode;
int log_mode = LOG_INODE_EXISTS;
int type;
@@ -5528,17 +5923,16 @@ again:
goto out;
}
- if (!need_log_inode(trans, BTRFS_I(di_inode))) {
- btrfs_add_delayed_iput(BTRFS_I(di_inode));
+ if (!need_log_inode(trans, di_inode)) {
+ btrfs_add_delayed_iput(di_inode);
break;
}
ctx->log_new_dentries = false;
if (type == BTRFS_FT_DIR)
log_mode = LOG_INODE_ALL;
- ret = btrfs_log_inode(trans, BTRFS_I(di_inode),
- log_mode, ctx);
- btrfs_add_delayed_iput(BTRFS_I(di_inode));
+ ret = btrfs_log_inode(trans, di_inode, log_mode, ctx);
+ btrfs_add_delayed_iput(di_inode);
if (ret)
goto out;
if (ctx->log_new_dentries) {
@@ -5580,14 +5974,13 @@ again:
kfree(dir_elem);
btrfs_add_delayed_iput(curr_inode);
- curr_inode = NULL;
- vfs_inode = btrfs_iget_logging(ino, root);
- if (IS_ERR(vfs_inode)) {
- ret = PTR_ERR(vfs_inode);
+ curr_inode = btrfs_iget_logging(ino, root);
+ if (IS_ERR(curr_inode)) {
+ ret = PTR_ERR(curr_inode);
+ curr_inode = NULL;
break;
}
- curr_inode = BTRFS_I(vfs_inode);
}
out:
btrfs_free_path(path);
@@ -5631,8 +6024,8 @@ static int conflicting_inode_is_dir(struct btrfs_root *root, u64 ino,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (WARN_ON_ONCE(ret > 0)) {
@@ -5652,8 +6045,8 @@ static int conflicting_inode_is_dir(struct btrfs_root *root, u64 ino,
}
btrfs_release_path(path);
- path->search_commit_root = 0;
- path->skip_locking = 0;
+ path->search_commit_root = false;
+ path->skip_locking = false;
return ret;
}
@@ -5665,7 +6058,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
struct btrfs_ino_list *ino_elem;
- struct inode *inode;
+ struct btrfs_inode *inode;
/*
* It's rare to have a lot of conflicting inodes, in practice it is not
@@ -5756,12 +6149,12 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
* inode in LOG_INODE_EXISTS mode and rename operations update the log,
* so that the log ends up with the new name and without the old name.
*/
- if (!need_log_inode(trans, BTRFS_I(inode))) {
- btrfs_add_delayed_iput(BTRFS_I(inode));
+ if (!need_log_inode(trans, inode)) {
+ btrfs_add_delayed_iput(inode);
return 0;
}
- btrfs_add_delayed_iput(BTRFS_I(inode));
+ btrfs_add_delayed_iput(inode);
ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
if (!ino_elem)
@@ -5797,7 +6190,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
*/
while (!list_empty(&ctx->conflict_inodes)) {
struct btrfs_ino_list *curr;
- struct inode *inode;
+ struct btrfs_inode *inode;
u64 ino;
u64 parent;
@@ -5833,9 +6226,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* dir index key range logged for the directory. So we
* must make sure the deletion is recorded.
*/
- ret = btrfs_log_inode(trans, BTRFS_I(inode),
- LOG_INODE_ALL, ctx);
- btrfs_add_delayed_iput(BTRFS_I(inode));
+ ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx);
+ btrfs_add_delayed_iput(inode);
if (ret)
break;
continue;
@@ -5851,8 +6243,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* it again because if some other task logged the inode after
* that, we can avoid doing it again.
*/
- if (!need_log_inode(trans, BTRFS_I(inode))) {
- btrfs_add_delayed_iput(BTRFS_I(inode));
+ if (!need_log_inode(trans, inode)) {
+ btrfs_add_delayed_iput(inode);
continue;
}
@@ -5863,8 +6255,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* well because during a rename we pin the log and update the
* log with the new name before we unpin it.
*/
- ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx);
- btrfs_add_delayed_iput(BTRFS_I(inode));
+ ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx);
+ btrfs_add_delayed_iput(inode);
if (ret)
break;
}
@@ -6108,8 +6500,7 @@ static int log_delayed_insertion_items(struct btrfs_trans_handle *trans,
if (!first)
return 0;
- ins_data = kmalloc(max_batch_size * sizeof(u32) +
- max_batch_size * sizeof(struct btrfs_key), GFP_NOFS);
+ ins_data = kmalloc_array(max_batch_size, sizeof(u32) + sizeof(struct btrfs_key), GFP_NOFS);
if (!ins_data)
return -ENOMEM;
ins_sizes = (u32 *)ins_data;
@@ -6145,7 +6536,7 @@ static int log_delayed_insertion_items(struct btrfs_trans_handle *trans,
curr = list_next_entry(curr, log_list);
}
- ASSERT(batch.nr >= 1);
+ ASSERT(batch.nr >= 1, "batch.nr=%d", batch.nr);
ret = insert_delayed_items_batch(trans, log, path, &batch, first);
curr = list_last_entry(delayed_ins_list, struct btrfs_delayed_item,
@@ -6189,7 +6580,9 @@ static int log_delayed_deletions_full(struct btrfs_trans_handle *trans,
}
last_dir_index = curr->index;
- ASSERT(last_dir_index >= first_dir_index);
+ ASSERT(last_dir_index >= first_dir_index,
+ "last_dir_index=%llu first_dir_index=%llu",
+ last_dir_index, first_dir_index);
ret = insert_dir_log_key(trans, inode->root->log_root, path,
ino, first_dir_index, last_dir_index);
@@ -6283,7 +6676,9 @@ static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans,
goto next_batch;
last_dir_index = last->index;
- ASSERT(last_dir_index >= first_dir_index);
+ ASSERT(last_dir_index >= first_dir_index,
+ "last_dir_index=%llu first_dir_index=%llu",
+ last_dir_index, first_dir_index);
/*
* If this range starts right after where the previous one ends,
* then we want to reuse the previous range item and change its
@@ -6350,12 +6745,13 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
*/
lockdep_assert_not_held(&inode->log_mutex);
- ASSERT(!ctx->logging_new_delayed_dentries);
+ ASSERT(!ctx->logging_new_delayed_dentries,
+ "ctx->logging_new_delayed_dentries=%d", ctx->logging_new_delayed_dentries);
ctx->logging_new_delayed_dentries = true;
list_for_each_entry(item, delayed_ins_list, log_list) {
struct btrfs_dir_item *dir_item;
- struct inode *di_inode;
+ struct btrfs_inode *di_inode;
struct btrfs_key key;
int log_mode = LOG_INODE_EXISTS;
@@ -6371,8 +6767,8 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
break;
}
- if (!need_log_inode(trans, BTRFS_I(di_inode))) {
- btrfs_add_delayed_iput(BTRFS_I(di_inode));
+ if (!need_log_inode(trans, di_inode)) {
+ btrfs_add_delayed_iput(di_inode);
continue;
}
@@ -6380,12 +6776,12 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
log_mode = LOG_INODE_ALL;
ctx->log_new_dentries = false;
- ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx);
+ ret = btrfs_log_inode(trans, di_inode, log_mode, ctx);
if (!ret && ctx->log_new_dentries)
- ret = log_new_dir_dentries(trans, BTRFS_I(di_inode), ctx);
+ ret = log_new_dir_dentries(trans, di_inode, ctx);
- btrfs_add_delayed_iput(BTRFS_I(di_inode));
+ btrfs_add_delayed_iput(di_inode);
if (ret)
break;
@@ -6609,6 +7005,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
btrfs_log_get_delayed_items(inode, &delayed_ins_list,
&delayed_del_list);
+ /*
+ * If we are fsyncing a file with 0 hard links, then commit the delayed
+ * inode because the last inode ref (or extref) item may still be in the
+ * subvolume tree and if we log it the file will still exist after a log
+ * replay. So commit the delayed inode to delete that last ref and we
+ * skip logging it.
+ */
+ if (inode->vfs_inode.i_nlink == 0) {
+ ret = btrfs_commit_inode_delayed_inode(inode);
+ if (ret)
+ goto out_unlock;
+ }
+
ret = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
path, dst_path, logged_isize,
inode_only, ctx,
@@ -6711,7 +7120,7 @@ log_extents:
* a power failure unless the log was synced as part of an fsync
* against any other unrelated inode.
*/
- if (inode_only != LOG_INODE_EXISTS)
+ if (!ctx->logging_new_name && inode_only != LOG_INODE_EXISTS)
inode->last_log_commit = inode->last_sub_trans;
spin_unlock(&inode->lock);
@@ -6750,7 +7159,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_root *root = inode->root;
const u64 ino = btrfs_ino(inode);
@@ -6758,15 +7167,15 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->skip_locking = 1;
- path->search_commit_root = 1;
+ path->skip_locking = true;
+ path->search_commit_root = true;
key.objectid = ino;
key.type = BTRFS_INODE_REF_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
while (true) {
struct extent_buffer *leaf = path->nodes[0];
@@ -6778,8 +7187,8 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
- else if (ret > 0)
+ return ret;
+ if (ret > 0)
break;
continue;
}
@@ -6792,28 +7201,24 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
item_size = btrfs_item_size(leaf, slot);
ptr = btrfs_item_ptr_offset(leaf, slot);
while (cur_offset < item_size) {
- struct btrfs_key inode_key;
- struct inode *dir_inode;
-
- inode_key.type = BTRFS_INODE_ITEM_KEY;
- inode_key.offset = 0;
+ u64 dir_id;
+ struct btrfs_inode *dir_inode;
if (key.type == BTRFS_INODE_EXTREF_KEY) {
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)
(ptr + cur_offset);
- inode_key.objectid = btrfs_inode_extref_parent(
- leaf, extref);
+ dir_id = btrfs_inode_extref_parent(leaf, extref);
cur_offset += sizeof(*extref);
cur_offset += btrfs_inode_extref_name_len(leaf,
extref);
} else {
- inode_key.objectid = key.offset;
+ dir_id = key.offset;
cur_offset = item_size;
}
- dir_inode = btrfs_iget_logging(inode_key.objectid, root);
+ dir_inode = btrfs_iget_logging(dir_id, root);
/*
* If the parent inode was deleted, return an error to
* fallback to a transaction commit. This is to prevent
@@ -6837,32 +7242,25 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
* at both parents and the old parent B would still
* exist.
*/
- if (IS_ERR(dir_inode)) {
- ret = PTR_ERR(dir_inode);
- goto out;
- }
+ if (IS_ERR(dir_inode))
+ return PTR_ERR(dir_inode);
- if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
- btrfs_add_delayed_iput(BTRFS_I(dir_inode));
+ if (!need_log_inode(trans, dir_inode)) {
+ btrfs_add_delayed_iput(dir_inode);
continue;
}
ctx->log_new_dentries = false;
- ret = btrfs_log_inode(trans, BTRFS_I(dir_inode),
- LOG_INODE_ALL, ctx);
+ ret = btrfs_log_inode(trans, dir_inode, LOG_INODE_ALL, ctx);
if (!ret && ctx->log_new_dentries)
- ret = log_new_dir_dentries(trans,
- BTRFS_I(dir_inode), ctx);
- btrfs_add_delayed_iput(BTRFS_I(dir_inode));
+ ret = log_new_dir_dentries(trans, dir_inode, ctx);
+ btrfs_add_delayed_iput(dir_inode);
if (ret)
- goto out;
+ return ret;
}
path->slots[0]++;
}
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static int log_new_ancestors(struct btrfs_trans_handle *trans,
@@ -6878,7 +7276,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
int slot;
struct btrfs_key search_key;
- struct inode *inode;
+ struct btrfs_inode *inode;
u64 ino;
int ret = 0;
@@ -6893,11 +7291,10 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (BTRFS_I(inode)->generation >= trans->transid &&
- need_log_inode(trans, BTRFS_I(inode)))
- ret = btrfs_log_inode(trans, BTRFS_I(inode),
- LOG_INODE_EXISTS, ctx);
- btrfs_add_delayed_iput(BTRFS_I(inode));
+ if (inode->generation >= trans->transid &&
+ need_log_inode(trans, inode))
+ ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx);
+ btrfs_add_delayed_iput(inode);
if (ret)
return ret;
@@ -6974,7 +7371,7 @@ static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
const u64 ino = btrfs_ino(inode);
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key search_key;
int ret;
@@ -6995,7 +7392,7 @@ static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
again:
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret == 0)
path->slots[0]++;
@@ -7007,8 +7404,8 @@ again:
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
- else if (ret > 0)
+ return ret;
+ if (ret > 0)
break;
continue;
}
@@ -7025,10 +7422,8 @@ again:
* this loop, etc). So just return some error to fallback to
* a transaction commit.
*/
- if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
- ret = -EMLINK;
- goto out;
- }
+ if (found_key.type == BTRFS_INODE_EXTREF_KEY)
+ return -EMLINK;
/*
* Logging ancestors needs to do more searches on the fs/subvol
@@ -7040,14 +7435,11 @@ again:
ret = log_new_ancestors(trans, root, path, ctx);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
goto again;
}
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -7065,42 +7457,29 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
- bool log_dentries = false;
+ bool log_dentries;
- if (btrfs_test_opt(fs_info, NOTREELOG)) {
- ret = BTRFS_LOG_FORCE_COMMIT;
- goto end_no_trans;
- }
+ if (btrfs_test_opt(fs_info, NOTREELOG))
+ return BTRFS_LOG_FORCE_COMMIT;
- if (btrfs_root_refs(&root->root_item) == 0) {
- ret = BTRFS_LOG_FORCE_COMMIT;
- goto end_no_trans;
- }
+ if (btrfs_root_refs(&root->root_item) == 0)
+ return BTRFS_LOG_FORCE_COMMIT;
/*
* If we're logging an inode from a subvolume created in the current
* transaction we must force a commit since the root is not persisted.
*/
- if (btrfs_root_generation(&root->root_item) == trans->transid) {
- ret = BTRFS_LOG_FORCE_COMMIT;
- goto end_no_trans;
- }
+ if (btrfs_root_generation(&root->root_item) == trans->transid)
+ return BTRFS_LOG_FORCE_COMMIT;
- /*
- * Skip already logged inodes or inodes corresponding to tmpfiles
- * (since logging them is pointless, a link count of 0 means they
- * will never be accessible).
- */
- if ((btrfs_inode_in_log(inode, trans->transid) &&
- list_empty(&ctx->ordered_extents)) ||
- inode->vfs_inode.i_nlink == 0) {
- ret = BTRFS_NO_LOG_SYNC;
- goto end_no_trans;
- }
+ /* Skip already logged inodes and without new extents. */
+ if (btrfs_inode_in_log(inode, trans->transid) &&
+ list_empty(&ctx->ordered_extents))
+ return BTRFS_NO_LOG_SYNC;
ret = start_log_trans(trans, root, ctx);
if (ret)
- goto end_no_trans;
+ return ret;
ret = btrfs_log_inode(trans, inode, inode_only, ctx);
if (ret)
@@ -7119,8 +7498,11 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_trans;
}
- if (S_ISDIR(inode->vfs_inode.i_mode) && ctx->log_new_dentries)
- log_dentries = true;
+ /*
+ * Track if we need to log dentries because ctx->log_new_dentries can
+ * be modified in the call chains below.
+ */
+ log_dentries = ctx->log_new_dentries;
/*
* On unlink we must make sure all our current and old parent directory
@@ -7175,8 +7557,6 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
if (log_dentries)
ret = log_new_dir_dentries(trans, inode, ctx);
- else
- ret = 0;
end_trans:
if (ret < 0) {
btrfs_set_log_full_commit(trans);
@@ -7186,7 +7566,7 @@ end_trans:
if (ret)
btrfs_remove_log_ctx(root, ctx);
btrfs_end_log_trans(root);
-end_no_trans:
+
return ret;
}
@@ -7220,8 +7600,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
struct btrfs_path *path;
struct btrfs_trans_handle *trans;
struct btrfs_key key;
- struct btrfs_key found_key;
- struct btrfs_root *log;
struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
struct walk_control wc = {
.process_func = process_one_buffer,
@@ -7241,23 +7619,27 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
}
wc.trans = trans;
- wc.pin = 1;
+ wc.pin = true;
+ wc.log = log_root_tree;
- ret = walk_log_tree(trans, log_root_tree, &wc);
- if (ret) {
+ ret = walk_log_tree(&wc);
+ wc.log = NULL;
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error;
}
again:
key.objectid = BTRFS_TREE_LOG_OBJECTID;
- key.offset = (u64)-1;
key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
while (1) {
+ struct btrfs_key found_key;
+
ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto error;
}
@@ -7272,17 +7654,22 @@ again:
if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
break;
- log = btrfs_read_tree_root(log_root_tree, &found_key);
- if (IS_ERR(log)) {
- ret = PTR_ERR(log);
+ wc.log = btrfs_read_tree_root(log_root_tree, &found_key);
+ if (IS_ERR(wc.log)) {
+ ret = PTR_ERR(wc.log);
+ wc.log = NULL;
btrfs_abort_transaction(trans, ret);
goto error;
}
- wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
- true);
- if (IS_ERR(wc.replay_dest)) {
- ret = PTR_ERR(wc.replay_dest);
+ wc.root = btrfs_get_fs_root(fs_info, found_key.offset, true);
+ if (IS_ERR(wc.root)) {
+ ret = PTR_ERR(wc.root);
+ wc.root = NULL;
+ if (unlikely(ret != -ENOENT)) {
+ btrfs_abort_transaction(trans, ret);
+ goto error;
+ }
/*
* We didn't find the subvol, likely because it was
@@ -7295,36 +7682,37 @@ again:
* block from being modified, and we'll just bail for
* each subsequent pass.
*/
- if (ret == -ENOENT)
- ret = btrfs_pin_extent_for_log_replay(trans, log->node);
- btrfs_put_root(log);
+ ret = btrfs_pin_extent_for_log_replay(trans, wc.log->node);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto error;
+ }
+ goto next;
+ }
- if (!ret)
- goto next;
+ wc.root->log_root = wc.log;
+ ret = btrfs_record_root_in_trans(trans, wc.root);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto error;
+ goto next;
}
- wc.replay_dest->log_root = log;
- ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
- if (ret)
- /* The loop needs to continue due to the root refs */
+ ret = walk_log_tree(&wc);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- else
- ret = walk_log_tree(trans, log, &wc);
-
- if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
- ret = fixup_inode_link_counts(trans, wc.replay_dest,
- path);
- if (ret)
- btrfs_abort_transaction(trans, ret);
+ goto next;
}
- if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
- struct btrfs_root *root = wc.replay_dest;
-
- btrfs_release_path(path);
+ if (wc.stage == LOG_WALK_REPLAY_ALL) {
+ struct btrfs_root *root = wc.root;
+ wc.subvol_path = path;
+ ret = fixup_inode_link_counts(&wc);
+ wc.subvol_path = NULL;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto next;
+ }
/*
* We have just replayed everything, and the highest
* objectid of fs roots probably has changed in case
@@ -7334,17 +7722,21 @@ again:
* could only happen during mount.
*/
ret = btrfs_init_root_free_objectid(root);
- if (ret)
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
+ goto next;
+ }
}
-
- wc.replay_dest->log_root = NULL;
- btrfs_put_root(wc.replay_dest);
- btrfs_put_root(log);
+next:
+ if (wc.root) {
+ wc.root->log_root = NULL;
+ btrfs_put_root(wc.root);
+ }
+ btrfs_put_root(wc.log);
+ wc.log = NULL;
if (ret)
goto error;
-next:
if (found_key.offset == 0)
break;
key.offset = found_key.offset - 1;
@@ -7353,7 +7745,7 @@ next:
/* step one is to pin it all, step two is to replay just inodes */
if (wc.pin) {
- wc.pin = 0;
+ wc.pin = false;
wc.process_func = replay_one_buffer;
wc.stage = LOG_WALK_REPLAY_INODES;
goto again;
@@ -7371,14 +7763,13 @@ next:
if (ret)
return ret;
- log_root_tree->log_root = NULL;
clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
- btrfs_put_root(log_root_tree);
return 0;
error:
if (wc.trans)
btrfs_end_transaction(wc.trans);
+ btrfs_put_root(wc.log);
clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
btrfs_free_path(path);
return ret;
@@ -7475,6 +7866,8 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
* full log sync.
* Also we don't need to worry with renames, since btrfs_rename() marks the log
* for full commit when renaming a subvolume.
+ *
+ * Must be called before creating the subvolume entry in its parent directory.
*/
void btrfs_record_new_subvolume(const struct btrfs_trans_handle *trans,
struct btrfs_inode *dir)
@@ -7511,6 +7904,12 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
bool log_pinned = false;
int ret;
+ /* The inode has a new name (ref/extref), so make sure we log it. */
+ set_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
+
+ btrfs_init_log_ctx(&ctx, inode);
+ ctx.logging_new_name = true;
+
/*
* this will force the logging code to walk the dentry chain
* up for the file
@@ -7542,6 +7941,13 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
ret = 0;
/*
+ * Now that we know we need to update the log, allocate the scratch eb
+ * for the context before joining a log transaction below, as this can
+ * take time and therefore we could delay log commits from other tasks.
+ */
+ btrfs_init_log_ctx_scratch_eb(&ctx);
+
+ /*
* If we are doing a rename (old_dir is not NULL) from a directory that
* was previously logged, make sure that on log replay we get the old
* dir entry deleted. This is needed because we will also log the new
@@ -7553,12 +7959,21 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct fscrypt_name fname;
- ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX);
+ ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX,
+ "old_dir_index=%llu", old_dir_index);
ret = fscrypt_setup_filename(&old_dir->vfs_inode,
&old_dentry->d_name, 0, &fname);
if (ret)
goto out;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ fscrypt_free_filename(&fname);
+ goto out;
+ }
+
/*
* We have two inodes to update in the log, the old directory and
* the inode that got renamed, so we must pin the log to prevent
@@ -7572,19 +7987,13 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
* mark the log for a full commit.
*/
if (WARN_ON_ONCE(ret < 0)) {
+ btrfs_free_path(path);
fscrypt_free_filename(&fname);
goto out;
}
log_pinned = true;
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- fscrypt_free_filename(&fname);
- goto out;
- }
-
/*
* Other concurrent task might be logging the old directory,
* as it can be triggered when logging other inode that had or
@@ -7616,9 +8025,6 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
goto out;
}
- btrfs_init_log_ctx(&ctx, inode);
- ctx.logging_new_name = true;
- btrfs_init_log_ctx_scratch_eb(&ctx);
/*
* We don't care about the return value. If we fail to log the new name
* then we know the next attempt to sync the log will fallback to a full
@@ -7627,7 +8033,6 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
* inconsistent state after a rename operation.
*/
btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);
- free_extent_buffer(ctx.scratch_eb);
ASSERT(list_empty(&ctx.conflict_inodes));
out:
/*
@@ -7640,5 +8045,6 @@ out:
btrfs_set_log_full_commit(trans);
if (log_pinned)
btrfs_end_log_trans(root);
+ free_extent_buffer(ctx.scratch_eb);
}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index dc313e6bb2fa..41e47fda036d 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -8,8 +8,7 @@
#include <linux/list.h>
#include <linux/fs.h>
-#include "messages.h"
-#include "ctree.h"
+#include <linux/fscrypt.h>
#include "transaction.h"
struct inode;
@@ -80,13 +79,12 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
struct dentry *dentry,
struct btrfs_log_ctx *ctx);
void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
const struct fscrypt_str *name,
struct btrfs_inode *dir, u64 index);
void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
const struct fscrypt_str *name,
- struct btrfs_inode *inode, u64 dirid);
+ struct btrfs_inode *inode,
+ struct btrfs_inode *dir);
void btrfs_end_log_trans(struct btrfs_root *root);
void btrfs_pin_log_trans(struct btrfs_root *root);
void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/tree-mod-log.c b/fs/btrfs/tree-mod-log.c
index 1ac2678fc4ca..9e8cb3b7c064 100644
--- a/fs/btrfs/tree-mod-log.c
+++ b/fs/btrfs/tree-mod-log.c
@@ -27,18 +27,29 @@ struct tree_mod_elem {
/* This is used for BTRFS_MOD_LOG_KEY* and BTRFS_MOD_LOG_ROOT_REPLACE. */
u64 generation;
- /* Those are used for op == BTRFS_MOD_LOG_KEY_{REPLACE,REMOVE}. */
- struct btrfs_disk_key key;
- u64 blockptr;
-
- /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */
- struct {
- int dst_slot;
- int nr_items;
- } move;
-
- /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */
- struct tree_mod_root old_root;
+ union {
+ /*
+ * This is used for the following op types:
+ *
+ * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING
+ * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING
+ * BTRFS_MOD_LOG_KEY_REMOVE
+ * BTRFS_MOD_LOG_KEY_REPLACE
+ */
+ struct {
+ struct btrfs_disk_key key;
+ u64 blockptr;
+ } slot_change;
+
+ /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */
+ struct {
+ int dst_slot;
+ int nr_items;
+ } move;
+
+ /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */
+ struct tree_mod_root old_root;
+ };
};
/*
@@ -164,6 +175,30 @@ static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info,
return 0;
}
+static inline bool skip_eb_logging(const struct extent_buffer *eb)
+{
+ const u64 owner = btrfs_header_owner(eb);
+
+ if (btrfs_header_level(eb) == 0)
+ return true;
+
+ /*
+ * Tree mod logging exists so that there's a consistent view of the
+ * extents and backrefs of inodes even if while a task is iterating over
+ * them other tasks are modifying subvolume trees and the extent tree
+ * (including running delayed refs). So we only need to log extent
+ * buffers from the extent tree and subvolume trees.
+ */
+
+ if (owner == BTRFS_EXTENT_TREE_OBJECTID)
+ return false;
+
+ if (btrfs_is_fstree(owner))
+ return false;
+
+ return true;
+}
+
/*
* Determines if logging can be omitted. Returns true if it can. Otherwise, it
* returns false with the tree_mod_log_lock acquired. The caller must hold
@@ -174,7 +209,7 @@ static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, const struct extent
{
if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
return true;
- if (eb && btrfs_header_level(eb) == 0)
+ if (eb && skip_eb_logging(eb))
return true;
write_lock(&fs_info->tree_mod_log_lock);
@@ -192,7 +227,7 @@ static bool tree_mod_need_log(const struct btrfs_fs_info *fs_info,
{
if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
return false;
- if (eb && btrfs_header_level(eb) == 0)
+ if (eb && skip_eb_logging(eb))
return false;
return true;
@@ -204,15 +239,17 @@ static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb,
{
struct tree_mod_elem *tm;
+ /* Can't be one of these types, due to union in struct tree_mod_elem. */
+ ASSERT(op != BTRFS_MOD_LOG_MOVE_KEYS);
+ ASSERT(op != BTRFS_MOD_LOG_ROOT_REPLACE);
+
tm = kzalloc(sizeof(*tm), GFP_NOFS);
if (!tm)
return NULL;
tm->logical = eb->start;
- if (op != BTRFS_MOD_LOG_KEY_ADD) {
- btrfs_node_key(eb, &tm->key, slot);
- tm->blockptr = btrfs_node_blockptr(eb, slot);
- }
+ btrfs_node_key(eb, &tm->slot_change.key, slot);
+ tm->slot_change.blockptr = btrfs_node_blockptr(eb, slot);
tm->op = op;
tm->slot = slot;
tm->generation = btrfs_node_ptr_generation(eb, slot);
@@ -830,8 +867,8 @@ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
fallthrough;
case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING:
case BTRFS_MOD_LOG_KEY_REMOVE:
- btrfs_set_node_key(eb, &tm->key, tm->slot);
- btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+ btrfs_set_node_key(eb, &tm->slot_change.key, tm->slot);
+ btrfs_set_node_blockptr(eb, tm->slot, tm->slot_change.blockptr);
btrfs_set_node_ptr_generation(eb, tm->slot,
tm->generation);
n++;
@@ -840,8 +877,8 @@ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
break;
case BTRFS_MOD_LOG_KEY_REPLACE:
BUG_ON(tm->slot >= n);
- btrfs_set_node_key(eb, &tm->key, tm->slot);
- btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+ btrfs_set_node_key(eb, &tm->slot_change.key, tm->slot);
+ btrfs_set_node_blockptr(eb, tm->slot, tm->slot_change.blockptr);
btrfs_set_node_ptr_generation(eb, tm->slot,
tm->generation);
break;
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index fc59b57257d6..7e16a253fb35 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -129,21 +129,25 @@ void ulist_free(struct ulist *ulist)
kfree(ulist);
}
+static int ulist_node_val_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *val = key;
+ const struct ulist_node *unode = rb_entry(node, struct ulist_node, rb_node);
+
+ if (unode->val < *val)
+ return 1;
+ else if (unode->val > *val)
+ return -1;
+
+ return 0;
+}
+
static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
{
- struct rb_node *n = ulist->root.rb_node;
- struct ulist_node *u = NULL;
-
- while (n) {
- u = rb_entry(n, struct ulist_node, rb_node);
- if (u->val < val)
- n = n->rb_right;
- else if (u->val > val)
- n = n->rb_left;
- else
- return u;
- }
- return NULL;
+ struct rb_node *node;
+
+ node = rb_find(&val, &ulist->root, ulist_node_val_key_cmp);
+ return rb_entry_safe(node, struct ulist_node, rb_node);
}
static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
@@ -155,25 +159,20 @@ static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
ulist->nnodes--;
}
+static int ulist_node_val_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct ulist_node *unode = rb_entry(new, struct ulist_node, rb_node);
+
+ return ulist_node_val_key_cmp(&unode->val, existing);
+}
+
static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
{
- struct rb_node **p = &ulist->root.rb_node;
- struct rb_node *parent = NULL;
- struct ulist_node *cur = NULL;
-
- while (*p) {
- parent = *p;
- cur = rb_entry(parent, struct ulist_node, rb_node);
-
- if (cur->val < ins->val)
- p = &(*p)->rb_right;
- else if (cur->val > ins->val)
- p = &(*p)->rb_left;
- else
- return -EEXIST;
- }
- rb_link_node(&ins->rb_node, parent, p);
- rb_insert_color(&ins->rb_node, &ulist->root);
+ struct rb_node *node;
+
+ node = rb_find_add(&ins->rb_node, &ulist->root, ulist_node_val_cmp);
+ if (node)
+ return -EEXIST;
return 0;
}
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index aca2861f2187..e3a1310fa7d5 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -27,32 +27,26 @@ static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, const u8 *uuid,
u8 type, u64 subid)
{
int ret;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *eb;
int slot;
u32 item_size;
unsigned long offset;
struct btrfs_key key;
- if (WARN_ON_ONCE(!uuid_root)) {
- ret = -ENOENT;
- goto out;
- }
+ if (WARN_ON_ONCE(!uuid_root))
+ return -ENOENT;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
btrfs_uuid_to_key(uuid, type, &key);
ret = btrfs_search_slot(NULL, uuid_root, &key, path, 0, 0);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -ENOENT;
eb = path->nodes[0];
slot = path->slots[0];
@@ -64,7 +58,7 @@ static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, const u8 *uuid,
btrfs_warn(uuid_root->fs_info,
"uuid item with illegal size %lu!",
(unsigned long)item_size);
- goto out;
+ return ret;
}
while (item_size) {
__le64 data;
@@ -78,8 +72,6 @@ static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, const u8 *uuid,
item_size -= sizeof(data);
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -89,7 +81,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 typ
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *uuid_root = fs_info->uuid_root;
int ret;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *eb;
int slot;
@@ -100,18 +92,14 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 typ
if (ret != -ENOENT)
return ret;
- if (WARN_ON_ONCE(!uuid_root)) {
- ret = -EINVAL;
- goto out;
- }
+ if (WARN_ON_ONCE(!uuid_root))
+ return -EINVAL;
btrfs_uuid_to_key(uuid, type, &key);
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
ret = btrfs_insert_empty_item(trans, uuid_root, path, &key,
sizeof(subid_le));
@@ -134,17 +122,12 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 typ
btrfs_warn(fs_info,
"insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
ret, key.objectid, key.offset, type);
- goto out;
+ return ret;
}
- ret = 0;
subid_le = cpu_to_le64(subid_cpu);
write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
- btrfs_mark_buffer_dirty(trans, eb);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
@@ -153,7 +136,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *uuid_root = fs_info->uuid_root;
int ret;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *eb;
int slot;
@@ -163,29 +146,23 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8
unsigned long move_src;
unsigned long move_len;
- if (WARN_ON_ONCE(!uuid_root)) {
- ret = -EINVAL;
- goto out;
- }
+ if (WARN_ON_ONCE(!uuid_root))
+ return -EINVAL;
btrfs_uuid_to_key(uuid, type, &key);
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn(fs_info, "error %d while searching for uuid item!",
ret);
- goto out;
- }
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
+ return ret;
}
+ if (ret > 0)
+ return -ENOENT;
eb = path->nodes[0];
slot = path->slots[0];
@@ -194,8 +171,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8
if (!IS_ALIGNED(item_size, sizeof(u64))) {
btrfs_warn(fs_info, "uuid item with illegal size %lu!",
(unsigned long)item_size);
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
while (item_size) {
__le64 read_subid;
@@ -207,16 +183,12 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8
item_size -= sizeof(read_subid);
}
- if (!item_size) {
- ret = -ENOENT;
- goto out;
- }
+ if (!item_size)
+ return -ENOENT;
item_size = btrfs_item_size(eb, slot);
- if (item_size == sizeof(subid)) {
- ret = btrfs_del_item(trans, uuid_root, path);
- goto out;
- }
+ if (item_size == sizeof(subid))
+ return btrfs_del_item(trans, uuid_root, path);
move_dst = offset;
move_src = offset + sizeof(subid);
@@ -224,9 +196,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8
memmove_extent_buffer(eb, move_dst, move_src, move_len);
btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
@@ -295,7 +265,7 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->uuid_root;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret = 0;
struct extent_buffer *leaf;
int slot;
@@ -303,10 +273,8 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info)
unsigned long offset;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
key.objectid = 0;
key.type = 0;
@@ -314,17 +282,15 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info)
again_search_slot:
ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
- if (ret) {
- if (ret > 0)
- ret = 0;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
while (1) {
- if (btrfs_fs_closing(fs_info)) {
- ret = -EINTR;
- goto out;
- }
+ if (btrfs_fs_closing(fs_info))
+ return -EINTR;
+
cond_resched();
leaf = path->nodes[0];
slot = path->slots[0];
@@ -355,7 +321,7 @@ again_search_slot:
ret = btrfs_check_uuid_tree_entry(fs_info, uuid,
key.type, subid_cpu);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0) {
btrfs_release_path(path);
ret = btrfs_uuid_iter_rem(root, uuid, key.type,
@@ -371,7 +337,7 @@ again_search_slot:
goto again_search_slot;
}
if (ret < 0 && ret != -ENOENT)
- goto out;
+ return ret;
key.offset++;
goto again_search_slot;
}
@@ -388,8 +354,6 @@ skip:
break;
}
-out:
- btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
index e97ad824ae16..a2ac3fb68bc8 100644
--- a/fs/btrfs/verity.c
+++ b/fs/btrfs/verity.c
@@ -109,7 +109,7 @@ static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = inode->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int count = 0;
int ret;
@@ -121,10 +121,8 @@ static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
while (1) {
/* 1 for the item being dropped */
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- goto out;
- }
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
/*
* Walk backwards through all the items until we find one that
@@ -143,7 +141,7 @@ static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
path->slots[0]--;
} else if (ret < 0) {
btrfs_end_transaction(trans);
- goto out;
+ return ret;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -161,17 +159,14 @@ static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
ret = btrfs_del_items(trans, root, path, path->slots[0], 1);
if (ret) {
btrfs_end_transaction(trans);
- goto out;
+ return ret;
}
count++;
btrfs_release_path(path);
btrfs_end_transaction(trans);
}
- ret = count;
btrfs_end_transaction(trans);
-out:
- btrfs_free_path(path);
- return ret;
+ return count;
}
/*
@@ -217,7 +212,7 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
const char *src, u64 len)
{
struct btrfs_trans_handle *trans;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -233,10 +228,8 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
while (len > 0) {
/* 1 for the new item being inserted */
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- break;
- }
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
key.objectid = btrfs_ino(inode);
key.type = key_type;
@@ -267,7 +260,6 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
btrfs_end_transaction(trans);
}
- btrfs_free_path(path);
return ret;
}
@@ -296,7 +288,7 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
char *dest, u64 len, struct folio *dest_folio)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -404,7 +396,6 @@ static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
}
}
out:
- btrfs_free_path(path);
if (!ret)
ret = copied;
return ret;
@@ -485,14 +476,14 @@ static int rollback_verity(struct btrfs_inode *inode)
goto out;
}
inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
- btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
+ btrfs_sync_inode_flags_to_i_flags(inode);
ret = btrfs_update_inode(trans, inode);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = del_orphan(trans, inode);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -552,7 +543,7 @@ static int finish_verity(struct btrfs_inode *inode, const void *desc,
goto out;
}
inode->ro_flags |= BTRFS_INODE_RO_VERITY;
- btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
+ btrfs_sync_inode_flags_to_i_flags(inode);
ret = btrfs_update_inode(trans, inode);
if (ret)
goto end_trans;
@@ -587,6 +578,9 @@ static int btrfs_begin_enable_verity(struct file *filp)
btrfs_assert_inode_locked(inode);
+ if (IS_ENCRYPTED(&inode->vfs_inode))
+ return -EOPNOTSUPP;
+
if (test_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags))
return -EBUSY;
@@ -676,11 +670,11 @@ int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size)
if (ret < 0)
return ret;
- if (item.reserved[0] != 0 || item.reserved[1] != 0)
+ if (unlikely(item.reserved[0] != 0 || item.reserved[1] != 0))
return -EUCLEAN;
true_size = btrfs_stack_verity_descriptor_size(&item);
- if (true_size > INT_MAX)
+ if (unlikely(true_size > INT_MAX))
return -EUCLEAN;
if (buf_size == 0)
@@ -742,7 +736,7 @@ again:
}
folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
- 0);
+ 0, NULL);
if (!folio)
return ERR_PTR(-ENOMEM);
@@ -802,6 +796,8 @@ static int btrfs_write_merkle_tree_block(struct inode *inode, const void *buf,
}
const struct fsverity_operations btrfs_verityops = {
+ .inode_info_offs = (int)offsetof(struct btrfs_inode, i_verity_info) -
+ (int)offsetof(struct btrfs_inode, vfs_inode),
.begin_enable_verity = btrfs_begin_enable_verity,
.end_enable_verity = btrfs_end_enable_verity,
.get_verity_descriptor = btrfs_get_verity_descriptor,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1cccaf9c2b0d..ae1742a35e76 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -13,12 +13,11 @@
#include <linux/list_sort.h>
#include <linux/namei.h>
#include "misc.h"
-#include "ctree.h"
#include "disk-io.h"
+#include "extent-tree.h"
#include "transaction.h"
#include "volumes.h"
#include "raid56.h"
-#include "rcu-string.h"
#include "dev-replace.h"
#include "sysfs.h"
#include "tree-checker.h"
@@ -48,6 +47,7 @@ struct btrfs_io_geometry {
u64 raid56_full_stripe_start;
int max_errors;
enum btrfs_map_op op;
+ bool use_rst;
};
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
@@ -213,10 +213,8 @@ void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
u64 flags = bg_flags;
u32 size_bp = size_buf;
- if (!flags) {
- strcpy(bp, "NONE");
+ if (!flags)
return;
- }
#define DESCRIBE_FLAG(flag, desc) \
do { \
@@ -402,8 +400,12 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
static void btrfs_free_device(struct btrfs_device *device)
{
WARN_ON(!list_empty(&device->post_commit_list));
- rcu_string_free(device->name);
- extent_io_tree_release(&device->alloc_state);
+ /*
+ * No need to call kfree_rcu() nor do RCU lock/unlock, nothing is
+ * reading the device name.
+ */
+ kfree(rcu_dereference_raw(device->name));
+ btrfs_extent_io_tree_release(&device->alloc_state);
btrfs_destroy_dev_zone_info(device);
kfree(device);
}
@@ -413,9 +415,10 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
struct btrfs_device *device;
WARN_ON(fs_devices->opened);
+ WARN_ON(fs_devices->holding);
while (!list_empty(&fs_devices->devices)) {
- device = list_entry(fs_devices->devices.next,
- struct btrfs_device, dev_list);
+ device = list_first_entry(&fs_devices->devices,
+ struct btrfs_device, dev_list);
list_del(&device->dev_list);
btrfs_free_device(device);
}
@@ -427,8 +430,8 @@ void __exit btrfs_cleanup_fs_uuids(void)
struct btrfs_fs_devices *fs_devices;
while (!list_empty(&fs_uuids)) {
- fs_devices = list_entry(fs_uuids.next,
- struct btrfs_fs_devices, fs_list);
+ fs_devices = list_first_entry(&fs_uuids, struct btrfs_fs_devices,
+ fs_list);
list_del(&fs_devices->fs_list);
free_fs_devices(fs_devices);
}
@@ -472,7 +475,7 @@ btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
struct block_device *bdev;
int ret;
- *bdev_file = bdev_file_open_by_path(device_path, flags, holder, NULL);
+ *bdev_file = bdev_file_open_by_path(device_path, flags, holder, &fs_holder_ops);
if (IS_ERR(*bdev_file)) {
ret = PTR_ERR(*bdev_file);
@@ -487,15 +490,15 @@ btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
if (holder) {
ret = set_blocksize(*bdev_file, BTRFS_BDEV_BLOCKSIZE);
if (ret) {
- fput(*bdev_file);
+ bdev_fput(*bdev_file);
goto error;
}
}
invalidate_bdev(bdev);
- *disk_super = btrfs_read_dev_super(bdev);
+ *disk_super = btrfs_read_disk_super(bdev, 0, false);
if (IS_ERR(*disk_super)) {
ret = PTR_ERR(*disk_super);
- fput(*bdev_file);
+ bdev_fput(*bdev_file);
goto error;
}
@@ -540,7 +543,7 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
continue;
if (devt && devt != device->devt)
continue;
- if (fs_devices->opened) {
+ if (fs_devices->opened || fs_devices->holding) {
if (devt)
ret = -EBUSY;
break;
@@ -656,7 +659,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
if (!device->name)
return -EINVAL;
- ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
+ ret = btrfs_get_bdev_and_sb(rcu_dereference_raw(device->name), flags, holder, 1,
&bdev_file, &disk_super);
if (ret)
return ret;
@@ -673,8 +676,8 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
if (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
- pr_err(
- "BTRFS: Invalid seeding and uuid-changed device detected\n");
+ btrfs_err(NULL,
+ "invalid seeding and uuid-changed device detected");
goto error_free_page;
}
@@ -700,7 +703,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
if (device->devt != device->bdev->bd_dev) {
btrfs_warn(NULL,
"device %s maj:min changed from %d:%d to %d:%d",
- device->name->str, MAJOR(device->devt),
+ rcu_dereference_raw(device->name), MAJOR(device->devt),
MINOR(device->devt), MAJOR(device->bdev->bd_dev),
MINOR(device->bdev->bd_dev));
@@ -719,7 +722,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
error_free_page:
btrfs_release_disk_super(disk_super);
- fput(bdev_file);
+ bdev_fput(bdev_file);
return -EINVAL;
}
@@ -732,83 +735,11 @@ const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
}
-/*
- * We can have very weird soft links passed in.
- * One example is "/proc/self/fd/<fd>", which can be a soft link to
- * a block device.
- *
- * But it's never a good idea to use those weird names.
- * Here we check if the path (not following symlinks) is a good one inside
- * "/dev/".
- */
-static bool is_good_dev_path(const char *dev_path)
-{
- struct path path = { .mnt = NULL, .dentry = NULL };
- char *path_buf = NULL;
- char *resolved_path;
- bool is_good = false;
- int ret;
-
- if (!dev_path)
- goto out;
-
- path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
- if (!path_buf)
- goto out;
-
- /*
- * Do not follow soft link, just check if the original path is inside
- * "/dev/".
- */
- ret = kern_path(dev_path, 0, &path);
- if (ret)
- goto out;
- resolved_path = d_path(&path, path_buf, PATH_MAX);
- if (IS_ERR(resolved_path))
- goto out;
- if (strncmp(resolved_path, "/dev/", strlen("/dev/")))
- goto out;
- is_good = true;
-out:
- kfree(path_buf);
- path_put(&path);
- return is_good;
-}
-
-static int get_canonical_dev_path(const char *dev_path, char *canonical)
-{
- struct path path = { .mnt = NULL, .dentry = NULL };
- char *path_buf = NULL;
- char *resolved_path;
- int ret;
-
- if (!dev_path) {
- ret = -EINVAL;
- goto out;
- }
-
- path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
- if (!path_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = kern_path(dev_path, LOOKUP_FOLLOW, &path);
- if (ret)
- goto out;
- resolved_path = d_path(&path, path_buf, PATH_MAX);
- ret = strscpy(canonical, resolved_path, PATH_MAX);
-out:
- kfree(path_buf);
- path_put(&path);
- return ret;
-}
-
static bool is_same_device(struct btrfs_device *device, const char *new_path)
{
struct path old = { .mnt = NULL, .dentry = NULL };
struct path new = { .mnt = NULL, .dentry = NULL };
- char *old_path = NULL;
+ char AUTO_KFREE(old_path);
bool is_same = false;
int ret;
@@ -820,7 +751,7 @@ static bool is_same_device(struct btrfs_device *device, const char *new_path)
goto out;
rcu_read_lock();
- ret = strscpy(old_path, rcu_str_deref(device->name), PATH_MAX);
+ ret = strscpy(old_path, rcu_dereference(device->name), PATH_MAX);
rcu_read_unlock();
if (ret < 0)
goto out;
@@ -834,7 +765,6 @@ static bool is_same_device(struct btrfs_device *device, const char *new_path)
if (path_equal(&old, &new))
is_same = true;
out:
- kfree(old_path);
path_put(&old);
path_put(&new);
return is_same;
@@ -853,11 +783,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices = NULL;
- struct rcu_string *name;
+ const char *name;
u64 found_transid = btrfs_super_generation(disk_super);
u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_t path_devt;
- int error;
+ int ret;
bool same_fsid_diff_dev = false;
bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
@@ -869,11 +799,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
return ERR_PTR(-EAGAIN);
}
- error = lookup_bdev(path, &path_devt);
- if (error) {
+ ret = lookup_bdev(path, &path_devt);
+ if (ret) {
btrfs_err(NULL, "failed to lookup block device for path %s: %d",
- path, error);
- return ERR_PTR(error);
+ path, ret);
+ return ERR_PTR(ret);
}
fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev);
@@ -890,7 +820,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (same_fsid_diff_dev) {
generate_random_uuid(fs_devices->fsid);
fs_devices->temp_fsid = true;
- pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n",
+ btrfs_info(NULL, "device %s (%d:%d) using temp-fsid %pU",
path, MAJOR(path_devt), MINOR(path_devt),
fs_devices->fsid);
}
@@ -961,6 +891,8 @@ static noinline struct btrfs_device *device_list_add(const char *path,
current->comm, task_pid_nr(current));
} else if (!device->name || !is_same_device(device, path)) {
+ const char *old_name;
+
/*
* When FS is already mounted.
* 1. If you are here and if the device->name is NULL that
@@ -1014,27 +946,31 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (device->bdev) {
if (device->devt != path_devt) {
mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_warn_in_rcu(NULL,
+ btrfs_warn(NULL,
"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
path, devid, found_transid,
current->comm,
task_pid_nr(current));
return ERR_PTR(-EEXIST);
}
- btrfs_info_in_rcu(NULL,
+ btrfs_info(NULL,
"devid %llu device path %s changed to %s scanned by %s (%d)",
devid, btrfs_dev_name(device),
path, current->comm,
task_pid_nr(current));
}
- name = rcu_string_strdup(path, GFP_NOFS);
+ name = kstrdup(path, GFP_NOFS);
if (!name) {
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-ENOMEM);
}
- rcu_string_free(device->name);
+ rcu_read_lock();
+ old_name = rcu_dereference(device->name);
+ rcu_read_unlock();
rcu_assign_pointer(device->name, name);
+ kfree_rcu_mightsleep(old_name);
+
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
fs_devices->missing_devices--;
clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
@@ -1083,7 +1019,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
* uuid mutex so nothing we touch in here is going to disappear.
*/
if (orig_dev->name)
- dev_path = orig_dev->name->str;
+ dev_path = rcu_dereference_raw(orig_dev->name);
device = btrfs_alloc_device(NULL, &orig_dev->devid,
orig_dev->uuid, dev_path);
@@ -1141,7 +1077,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
continue;
if (device->bdev_file) {
- fput(device->bdev_file);
+ bdev_fput(device->bdev_file);
device->bdev = NULL;
device->bdev_file = NULL;
fs_devices->open_devices--;
@@ -1188,7 +1124,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
invalidate_bdev(device->bdev);
}
- fput(device->bdev_file);
+ bdev_fput(device->bdev_file);
}
static void btrfs_close_one_device(struct btrfs_device *device)
@@ -1220,7 +1156,7 @@ static void btrfs_close_one_device(struct btrfs_device *device)
device->fs_info = NULL;
atomic_set(&device->dev_stats_ccnt, 0);
- extent_io_tree_release(&device->alloc_state);
+ btrfs_extent_io_tree_release(&device->alloc_state);
/*
* Reset the flush error record. We might have a transient flush error
@@ -1268,7 +1204,7 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
mutex_lock(&uuid_mutex);
close_fs_devices(fs_devices);
- if (!fs_devices->opened) {
+ if (!fs_devices->opened && !fs_devices->holding) {
list_splice_init(&fs_devices->seed_list, &list);
/*
@@ -1298,6 +1234,7 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
struct btrfs_device *tmp_device;
+ s64 __maybe_unused value = 0;
int ret = 0;
list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
@@ -1327,7 +1264,23 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
fs_devices->latest_dev = latest_dev;
fs_devices->total_rw_bytes = 0;
fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ fs_devices->rr_min_contig_read = BTRFS_DEFAULT_RR_MIN_CONTIG_READ;
+ fs_devices->read_devid = latest_dev->devid;
+ fs_devices->read_policy = btrfs_read_policy_to_enum(btrfs_get_mod_read_policy(),
+ &value);
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR)
+ fs_devices->collect_fs_stats = true;
+
+ if (value) {
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR)
+ fs_devices->rr_min_contig_read = value;
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_DEVID)
+ fs_devices->read_devid = value;
+ }
+#else
fs_devices->read_policy = BTRFS_READ_POLICY_PID;
+#endif
return 0;
}
@@ -1379,48 +1332,58 @@ void btrfs_release_disk_super(struct btrfs_super_block *super)
put_page(page);
}
-static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
- u64 bytenr, u64 bytenr_orig)
+struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
+ int copy_num, bool drop_cache)
{
- struct btrfs_super_block *disk_super;
+ struct btrfs_super_block *super;
struct page *page;
- void *p;
- pgoff_t index;
+ u64 bytenr, bytenr_orig;
+ struct address_space *mapping = bdev->bd_mapping;
+ int ret;
- /* make sure our super fits in the device */
- if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
- return ERR_PTR(-EINVAL);
+ bytenr_orig = btrfs_sb_offset(copy_num);
+ ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ ret = -EINVAL;
+ return ERR_PTR(ret);
+ }
- /* make sure our super fits in the page */
- if (sizeof(*disk_super) > PAGE_SIZE)
+ if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
return ERR_PTR(-EINVAL);
- /* make sure our super doesn't straddle pages on disk */
- index = bytenr >> PAGE_SHIFT;
- if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
- return ERR_PTR(-EINVAL);
+ if (drop_cache) {
+ /* This should only be called with the primary sb. */
+ ASSERT(copy_num == 0);
- /* pull in the page with our super */
- page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL);
+ /*
+ * Drop the page of the primary superblock, so later read will
+ * always read from the device.
+ */
+ invalidate_inode_pages2_range(mapping, bytenr >> PAGE_SHIFT,
+ (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
+ }
+ page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
if (IS_ERR(page))
return ERR_CAST(page);
- p = page_address(page);
-
- /* align our pointer to the offset of the super block */
- disk_super = p + offset_in_page(bytenr);
-
- if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
- btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
- btrfs_release_disk_super(p);
+ super = page_address(page);
+ if (btrfs_super_magic(super) != BTRFS_MAGIC ||
+ btrfs_super_bytenr(super) != bytenr_orig) {
+ btrfs_release_disk_super(super);
return ERR_PTR(-EINVAL);
}
- if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
- disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
+ /*
+ * Make sure the last byte of label is properly NUL terminated. We use
+ * '%s' to print the label, if not properly NUL terminated we can access
+ * beyond the label.
+ */
+ if (super->label[0] && super->label[BTRFS_LABEL_SIZE - 1])
+ super->label[BTRFS_LABEL_SIZE - 1] = 0;
- return disk_super;
+ return super;
}
int btrfs_forget_devices(dev_t devt)
@@ -1458,7 +1421,7 @@ static bool btrfs_skip_registration(struct btrfs_super_block *disk_super,
list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->bdev && (device->bdev->bd_dev == devt) &&
- strcmp(device->name->str, path) != 0) {
+ strcmp(rcu_dereference_raw(device->name), path) != 0) {
mutex_unlock(&fs_devices->device_list_mutex);
/* Do not skip registration. */
@@ -1484,30 +1447,17 @@ static bool btrfs_skip_registration(struct btrfs_super_block *disk_super,
* the device or return an error. Multi-device and seeding devices are registered
* in both cases.
*/
-struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+struct btrfs_device *btrfs_scan_one_device(const char *path,
bool mount_arg_dev)
{
struct btrfs_super_block *disk_super;
bool new_device_added = false;
struct btrfs_device *device = NULL;
struct file *bdev_file;
- char *canonical_path = NULL;
- u64 bytenr;
dev_t devt;
- int ret;
lockdep_assert_held(&uuid_mutex);
- if (!is_good_dev_path(path)) {
- canonical_path = kmalloc(PATH_MAX, GFP_KERNEL);
- if (canonical_path) {
- ret = get_canonical_dev_path(path, canonical_path);
- if (ret < 0) {
- kfree(canonical_path);
- canonical_path = NULL;
- }
- }
- }
/*
* Avoid an exclusive open here, as the systemd-udev may initiate the
* device scan which may race with the user's mount or mkfs command,
@@ -1518,24 +1468,11 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
* values temporarily, as the device paths of the fsid are the only
* required information for assembling the volume.
*/
- bdev_file = bdev_file_open_by_path(path, flags, NULL, NULL);
+ bdev_file = bdev_file_open_by_path(path, BLK_OPEN_READ, NULL, NULL);
if (IS_ERR(bdev_file))
return ERR_CAST(bdev_file);
- /*
- * We would like to check all the super blocks, but doing so would
- * allow a mount to succeed after a mkfs from a different filesystem.
- * Currently, recovery from a bad primary btrfs superblock is done
- * using the userspace command 'btrfs check --super'.
- */
- ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr);
- if (ret) {
- device = ERR_PTR(ret);
- goto error_bdev_put;
- }
-
- disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr,
- btrfs_sb_offset(0));
+ disk_super = btrfs_read_disk_super(file_bdev(bdev_file), 0, false);
if (IS_ERR(disk_super)) {
device = ERR_CAST(disk_super);
goto error_bdev_put;
@@ -1543,7 +1480,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
devt = file_bdev(bdev_file)->bd_dev;
if (btrfs_skip_registration(disk_super, path, devt, mount_arg_dev)) {
- pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n",
+ btrfs_debug(NULL, "skip registering single non-seed device %s (%d:%d)",
path, MAJOR(devt), MINOR(devt));
btrfs_free_stale_devices(devt, NULL);
@@ -1552,8 +1489,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
goto free_disk_super;
}
- device = device_list_add(canonical_path ? : path, disk_super,
- &new_device_added);
+ device = device_list_add(path, disk_super, &new_device_added);
if (!IS_ERR(device) && new_device_added)
btrfs_free_stale_devices(device->devt, device);
@@ -1561,8 +1497,7 @@ free_disk_super:
btrfs_release_disk_super(disk_super);
error_bdev_put:
- fput(bdev_file);
- kfree(canonical_path);
+ bdev_fput(bdev_file);
return device;
}
@@ -1578,9 +1513,9 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
lockdep_assert_held(&device->fs_info->chunk_mutex);
- if (find_first_extent_bit(&device->alloc_state, *start,
- &physical_start, &physical_end,
- CHUNK_ALLOCATED, NULL)) {
+ if (btrfs_find_first_extent_bit(&device->alloc_state, *start,
+ &physical_start, &physical_end,
+ CHUNK_ALLOCATED, NULL)) {
if (in_range(physical_start, *start, len) ||
in_range(*start, physical_start,
@@ -1595,6 +1530,9 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
static u64 dev_extent_search_start(struct btrfs_device *device)
{
switch (device->fs_devices->chunk_alloc_policy) {
+ default:
+ btrfs_warn_unknown_chunk_allocation(device->fs_devices->chunk_alloc_policy);
+ fallthrough;
case BTRFS_CHUNK_ALLOC_REGULAR:
return BTRFS_DEVICE_RANGE_RESERVED;
case BTRFS_CHUNK_ALLOC_ZONED:
@@ -1604,8 +1542,6 @@ static u64 dev_extent_search_start(struct btrfs_device *device)
* for superblock logging.
*/
return 0;
- default:
- BUG();
}
}
@@ -1618,7 +1554,8 @@ static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
int ret;
bool changed = false;
- ASSERT(IS_ALIGNED(*hole_start, zone_size));
+ ASSERT(IS_ALIGNED(*hole_start, zone_size),
+ "hole_start=%llu zone_size=%llu", *hole_start, zone_size);
while (*hole_size > 0) {
pos = btrfs_find_allocatable_zones(device, *hole_start,
@@ -1684,6 +1621,9 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
}
switch (device->fs_devices->chunk_alloc_policy) {
+ default:
+ btrfs_warn_unknown_chunk_allocation(device->fs_devices->chunk_alloc_policy);
+ fallthrough;
case BTRFS_CHUNK_ALLOC_REGULAR:
/* No extra check */
break;
@@ -1698,8 +1638,6 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
continue;
}
break;
- default:
- BUG();
}
break;
@@ -1742,7 +1680,7 @@ static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
struct btrfs_dev_extent *dev_extent;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
u64 search_start;
u64 hole_size;
u64 max_hole_start;
@@ -1772,12 +1710,12 @@ again:
}
path->reada = READA_FORWARD;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
key.objectid = device->devid;
- key.offset = search_start;
key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = search_start;
ret = btrfs_search_backwards(root, &key, path);
if (ret < 0)
@@ -1869,9 +1807,10 @@ next:
else
ret = 0;
- ASSERT(max_hole_start + max_hole_size <= search_end);
+ ASSERT(max_hole_start + max_hole_size <= search_end,
+ "max_hole_start=%llu max_hole_size=%llu search_end=%llu",
+ max_hole_start, max_hole_size, search_end);
out:
- btrfs_free_path(path);
*start = max_hole_start;
if (len)
*len = max_hole_size;
@@ -1885,7 +1824,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf = NULL;
@@ -1896,15 +1835,15 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
return -ENOMEM;
key.objectid = device->devid;
- key.offset = start;
key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = start;
again:
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid,
BTRFS_DEV_EXTENT_KEY);
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -1919,7 +1858,7 @@ again:
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
} else {
- goto out;
+ return ret;
}
*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
@@ -1927,8 +1866,6 @@ again:
ret = btrfs_del_item(trans, root, path);
if (ret == 0)
set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -1956,7 +1893,7 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
int ret;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = btrfs_alloc_path();
if (!path)
@@ -1968,13 +1905,12 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
if (ret < 0)
- goto error;
+ return ret;
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
/* Corruption */
btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
- ret = -EUCLEAN;
- goto error;
+ return -EUCLEAN;
}
ret = btrfs_previous_item(fs_info->chunk_root, path,
@@ -1987,10 +1923,7 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
path->slots[0]);
*devid_ret = found_key.offset + 1;
}
- ret = 0;
-error:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -2001,7 +1934,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -2020,7 +1953,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
&key, sizeof(*dev_item));
btrfs_trans_release_chunk_metadata(trans);
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
@@ -2045,12 +1978,8 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
ptr = btrfs_device_fsid(dev_item);
write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
ptr, BTRFS_FSID_SIZE);
- btrfs_mark_buffer_dirty(trans, leaf);
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -2062,14 +1991,11 @@ out:
static void update_dev_time(const char *device_path)
{
struct path path;
- int ret;
-
- ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
- if (ret)
- return;
- inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION);
- path_put(&path);
+ if (!kern_path(device_path, LOOKUP_FOLLOW, &path)) {
+ vfs_utimes(&path, NULL);
+ path_put(&path);
+ }
}
static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
@@ -2077,7 +2003,7 @@ static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = device->fs_info->chunk_root;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -2091,16 +2017,12 @@ static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
btrfs_reserve_chunk_metadata(trans, false);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
btrfs_trans_release_chunk_metadata(trans);
- if (ret) {
- if (ret > 0)
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
+ if (ret < 0)
+ return ret;
- ret = btrfs_del_item(trans, root, path);
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_del_item(trans, root, path);
}
/*
@@ -2183,7 +2105,7 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
down_read(&fs_info->dev_replace.rwsem);
if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
- ASSERT(num_devices > 1);
+ ASSERT(num_devices > 1, "num_devices=%llu", num_devices);
num_devices--;
}
up_read(&fs_info->dev_replace.rwsem);
@@ -2199,7 +2121,7 @@ static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
const u64 bytenr = btrfs_sb_offset(copy_num);
int ret;
- disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr);
+ disk_super = btrfs_read_disk_super(bdev, copy_num, false);
if (IS_ERR(disk_super))
return;
@@ -2232,7 +2154,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_devic
btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
/* Update ctime/mtime for device path for libblkid */
- update_dev_time(device->name->str);
+ update_dev_time(rcu_dereference_raw(device->name));
}
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
@@ -2272,7 +2194,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
}
if (btrfs_pinned_by_swapfile(fs_info, device)) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"cannot remove device %s (devid %llu) due to active swapfile",
btrfs_dev_name(device), device->devid);
return -ETXTBSY;
@@ -2303,7 +2225,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
}
ret = btrfs_rm_dev_item(trans, device);
- if (ret) {
+ if (unlikely(ret)) {
/* Any error in dev item removal is critical */
btrfs_crit(fs_info,
"failed to remove device item for devid %llu: %d",
@@ -2362,7 +2284,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
* free the device.
*
* We cannot call btrfs_close_bdev() here because we're holding the sb
- * write lock, and fput() on the block device will pull in the
+ * write lock, and bdev_fput() on the block device will pull in the
* ->open_mutex on the block device and it's dependencies. Instead
* just flush the device and let the caller do the final bdev_release.
*/
@@ -2387,7 +2309,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
*/
if (cur_devices->num_devices == 0) {
list_del_init(&cur_devices->seed_list);
- ASSERT(cur_devices->opened == 1);
+ ASSERT(cur_devices->opened == 1, "opened=%d", cur_devices->opened);
cur_devices->opened--;
free_fs_devices(cur_devices);
}
@@ -2541,7 +2463,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
else
memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
btrfs_release_disk_super(disk_super);
- fput(bdev_file);
+ bdev_fput(bdev_file);
return 0;
}
@@ -2686,7 +2608,7 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
BTRFS_DEV_LOOKUP_ARGS(args);
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->chunk_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_dev_item *dev_item;
struct btrfs_device *device;
@@ -2700,15 +2622,15 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
- key.offset = 0;
key.type = BTRFS_DEV_ITEM_KEY;
+ key.offset = 0;
while (1) {
btrfs_reserve_chunk_metadata(trans, false);
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
btrfs_trans_release_chunk_metadata(trans);
if (ret < 0)
- goto error;
+ return ret;
leaf = path->nodes[0];
next_slot:
@@ -2717,7 +2639,7 @@ next_slot:
if (ret > 0)
break;
if (ret < 0)
- goto error;
+ return ret;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
@@ -2741,19 +2663,14 @@ next_slot:
device = btrfs_find_device(fs_info->fs_devices, &args);
BUG_ON(!device); /* Logic error */
- if (device->fs_devices->seeding) {
+ if (device->fs_devices->seeding)
btrfs_set_device_generation(leaf, dev_item,
device->generation);
- btrfs_mark_buffer_dirty(trans, leaf);
- }
path->slots[0]++;
goto next_slot;
}
- ret = 0;
-error:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
@@ -2775,7 +2692,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
return -EROFS;
bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE,
- fs_info->bdev_holder, NULL);
+ fs_info->sb, &fs_holder_ops);
if (IS_ERR(bdev_file))
return PTR_ERR(bdev_file);
@@ -2784,6 +2701,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
goto error;
}
+ if (bdev_nr_bytes(file_bdev(bdev_file)) <= BTRFS_DEVICE_RANGE_RESERVED) {
+ ret = -EINVAL;
+ goto error;
+ }
+
if (fs_devices->seeding) {
seeding_dev = true;
down_write(&sb->s_umount);
@@ -2900,21 +2822,21 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
mutex_lock(&fs_info->chunk_mutex);
ret = init_first_rw_device(trans);
mutex_unlock(&fs_info->chunk_mutex);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
}
ret = btrfs_add_dev_item(trans, device);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
if (seeding_dev) {
ret = btrfs_finish_sprout(trans);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
@@ -2991,7 +2913,7 @@ error_free_zone:
error_free_device:
btrfs_free_device(device);
error:
- fput(bdev_file);
+ bdev_fput(bdev_file);
if (locked) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
@@ -3003,7 +2925,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = device->fs_info->chunk_root;
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
@@ -3019,12 +2941,10 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
@@ -3038,10 +2958,6 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
btrfs_device_get_disk_total_bytes(device));
btrfs_set_device_bytes_used(leaf, dev_item,
btrfs_device_get_bytes_used(device));
- btrfs_mark_buffer_dirty(trans, leaf);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -3094,7 +3010,7 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->chunk_root;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -3102,28 +3018,26 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
return -ENOMEM;
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
- key.offset = chunk_offset;
key.type = BTRFS_CHUNK_ITEM_KEY;
+ key.offset = chunk_offset;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
- goto out;
- else if (ret > 0) { /* Logic error or corruption */
+ return ret;
+ if (unlikely(ret > 0)) {
+ /* Logic error or corruption */
btrfs_err(fs_info, "failed to lookup chunk %llu when freeing",
chunk_offset);
btrfs_abort_transaction(trans, -ENOENT);
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
ret = btrfs_del_item(trans, root, path);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_err(fs_info, "failed to delete chunk %llu item", chunk_offset);
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -3321,7 +3235,8 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
* user having built with ASSERT enabled, so if ASSERT doesn't
* do anything we still error out.
*/
- ASSERT(0);
+ DEBUG_WARN("errr %ld reading chunk map at offset %llu",
+ PTR_ERR(map), chunk_offset);
return PTR_ERR(map);
}
@@ -3341,7 +3256,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
ret = btrfs_free_dev_extent(trans, device,
map->stripes[i].physical,
&dev_extent_len);
- if (ret) {
+ if (unlikely(ret)) {
mutex_unlock(&fs_devices->device_list_mutex);
btrfs_abort_transaction(trans, ret);
goto out;
@@ -3353,6 +3268,12 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
device->bytes_used - dev_extent_len);
atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
btrfs_clear_space_info_full(fs_info);
+
+ if (list_empty(&device->post_commit_list)) {
+ list_add_tail(&device->post_commit_list,
+ &trans->transaction->dev_update_list);
+ }
+
mutex_unlock(&fs_info->chunk_mutex);
}
}
@@ -3402,8 +3323,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
if (ret == -ENOSPC) {
const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
struct btrfs_block_group *sys_bg;
+ struct btrfs_space_info *space_info;
- sys_bg = btrfs_create_chunk(trans, sys_flags);
+ space_info = btrfs_find_space_info(fs_info, sys_flags);
+ if (unlikely(!space_info)) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ sys_bg = btrfs_create_chunk(trans, space_info, sys_flags);
if (IS_ERR(sys_bg)) {
ret = PTR_ERR(sys_bg);
btrfs_abort_transaction(trans, ret);
@@ -3411,17 +3340,17 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
}
ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = remove_chunk_item(trans, map, chunk_offset);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- } else if (ret) {
+ } else if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3430,7 +3359,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3446,7 +3375,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
btrfs_trans_release_chunk_metadata(trans);
ret = btrfs_remove_block_group(trans, map);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3461,7 +3390,8 @@ out:
return ret;
}
-int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
+int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ bool verbose)
{
struct btrfs_root *root = fs_info->chunk_root;
struct btrfs_trans_handle *trans;
@@ -3491,7 +3421,7 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
/* step one, relocate all the extents inside this chunk */
btrfs_scrub_pause(fs_info);
- ret = btrfs_relocate_block_group(fs_info, chunk_offset);
+ ret = btrfs_relocate_block_group(fs_info, chunk_offset, true);
btrfs_scrub_continue(fs_info);
if (ret) {
/*
@@ -3544,7 +3474,7 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *chunk_root = fs_info->chunk_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_chunk *chunk;
struct btrfs_key key;
@@ -3560,17 +3490,17 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
again:
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
- key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
+ key.offset = (u64)-1;
while (1) {
mutex_lock(&fs_info->reclaim_bgs_lock);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
mutex_unlock(&fs_info->reclaim_bgs_lock);
- goto error;
+ return ret;
}
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
/*
* On the first search we would find chunk tree with
* offset -1, which is not possible. On subsequent
@@ -3578,9 +3508,8 @@ again:
* offset (one less than the previous one, wrong
* alignment and size).
*/
- ret = -EUCLEAN;
mutex_unlock(&fs_info->reclaim_bgs_lock);
- goto error;
+ return -EUCLEAN;
}
ret = btrfs_previous_item(chunk_root, path, key.objectid,
@@ -3588,7 +3517,7 @@ again:
if (ret)
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret < 0)
- goto error;
+ return ret;
if (ret > 0)
break;
@@ -3601,7 +3530,8 @@ again:
btrfs_release_path(path);
if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
- ret = btrfs_relocate_chunk(fs_info, found_key.offset);
+ ret = btrfs_relocate_chunk(fs_info, found_key.offset,
+ true);
if (ret == -ENOSPC)
failed++;
else
@@ -3621,8 +3551,6 @@ again:
} else if (WARN_ON(failed && retried)) {
ret = -ENOSPC;
}
-error:
- btrfs_free_path(path);
return ret;
}
@@ -3748,10 +3676,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
btrfs_set_balance_meta(leaf, item, &disk_bargs);
btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
btrfs_set_balance_sys(leaf, item, &disk_bargs);
-
btrfs_set_balance_flags(leaf, item, bctl->flags);
-
- btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
err = btrfs_commit_transaction(trans);
@@ -3866,26 +3791,25 @@ static void reset_balance_state(struct btrfs_fs_info *fs_info)
* Balance filters. Return 1 if chunk should be filtered out
* (should not be balanced).
*/
-static int chunk_profiles_filter(u64 chunk_type,
- struct btrfs_balance_args *bargs)
+static bool chunk_profiles_filter(u64 chunk_type, struct btrfs_balance_args *bargs)
{
chunk_type = chunk_to_extended(chunk_type) &
BTRFS_EXTENDED_PROFILE_MASK;
if (bargs->profiles & chunk_type)
- return 0;
+ return false;
- return 1;
+ return true;
}
-static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
- struct btrfs_balance_args *bargs)
+static bool chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_block_group *cache;
u64 chunk_used;
u64 user_thresh_min;
u64 user_thresh_max;
- int ret = 1;
+ bool ret = true;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = cache->used;
@@ -3903,18 +3827,18 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off
user_thresh_max = mult_perc(cache->length, bargs->usage_max);
if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
- ret = 0;
+ ret = false;
btrfs_put_block_group(cache);
return ret;
}
-static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
- u64 chunk_offset, struct btrfs_balance_args *bargs)
+static bool chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_block_group *cache;
u64 chunk_used, user_thresh;
- int ret = 1;
+ bool ret = true;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = cache->used;
@@ -3927,15 +3851,14 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
user_thresh = mult_perc(cache->length, bargs->usage);
if (chunk_used < user_thresh)
- ret = 0;
+ ret = false;
btrfs_put_block_group(cache);
return ret;
}
-static int chunk_devid_filter(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk,
- struct btrfs_balance_args *bargs)
+static bool chunk_devid_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_stripe *stripe;
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
@@ -3944,10 +3867,10 @@ static int chunk_devid_filter(struct extent_buffer *leaf,
for (i = 0; i < num_stripes; i++) {
stripe = btrfs_stripe_nr(chunk, i);
if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static u64 calc_data_stripes(u64 type, int num_stripes)
@@ -3960,9 +3883,8 @@ static u64 calc_data_stripes(u64 type, int num_stripes)
}
/* [pstart, pend) */
-static int chunk_drange_filter(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk,
- struct btrfs_balance_args *bargs)
+static bool chunk_drange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_stripe *stripe;
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
@@ -3973,7 +3895,7 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
int i;
if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
- return 0;
+ return false;
type = btrfs_chunk_type(leaf, chunk);
factor = calc_data_stripes(type, num_stripes);
@@ -3989,56 +3911,53 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
if (stripe_offset < bargs->pend &&
stripe_offset + stripe_length > bargs->pstart)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
/* [vstart, vend) */
-static int chunk_vrange_filter(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk,
- u64 chunk_offset,
- struct btrfs_balance_args *bargs)
+static bool chunk_vrange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ u64 chunk_offset, struct btrfs_balance_args *bargs)
{
if (chunk_offset < bargs->vend &&
chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
/* at least part of the chunk is inside this vrange */
- return 0;
+ return false;
- return 1;
+ return true;
}
-static int chunk_stripes_range_filter(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk,
- struct btrfs_balance_args *bargs)
+static bool chunk_stripes_range_filter(struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk,
+ struct btrfs_balance_args *bargs)
{
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
if (bargs->stripes_min <= num_stripes
&& num_stripes <= bargs->stripes_max)
- return 0;
+ return false;
- return 1;
+ return true;
}
-static int chunk_soft_convert_filter(u64 chunk_type,
- struct btrfs_balance_args *bargs)
+static bool chunk_soft_convert_filter(u64 chunk_type, struct btrfs_balance_args *bargs)
{
if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
- return 0;
+ return false;
chunk_type = chunk_to_extended(chunk_type) &
BTRFS_EXTENDED_PROFILE_MASK;
if (bargs->target == chunk_type)
- return 1;
+ return true;
- return 0;
+ return false;
}
-static int should_balance_chunk(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 chunk_offset)
+static bool should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
@@ -4048,7 +3967,7 @@ static int should_balance_chunk(struct extent_buffer *leaf,
/* type filter */
if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
(bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
- return 0;
+ return false;
}
if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
@@ -4061,46 +3980,46 @@ static int should_balance_chunk(struct extent_buffer *leaf,
/* profiles filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
chunk_profiles_filter(chunk_type, bargs)) {
- return 0;
+ return false;
}
/* usage filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
chunk_usage_filter(fs_info, chunk_offset, bargs)) {
- return 0;
+ return false;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
- return 0;
+ return false;
}
/* devid filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
chunk_devid_filter(leaf, chunk, bargs)) {
- return 0;
+ return false;
}
/* drange filter, makes sense only with devid filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
chunk_drange_filter(leaf, chunk, bargs)) {
- return 0;
+ return false;
}
/* vrange filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
- return 0;
+ return false;
}
/* stripes filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
chunk_stripes_range_filter(leaf, chunk, bargs)) {
- return 0;
+ return false;
}
/* soft profile changing mode */
if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
chunk_soft_convert_filter(chunk_type, bargs)) {
- return 0;
+ return false;
}
/*
@@ -4108,7 +4027,7 @@ static int should_balance_chunk(struct extent_buffer *leaf,
*/
if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
if (bargs->limit == 0)
- return 0;
+ return false;
else
bargs->limit--;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
@@ -4118,12 +4037,12 @@ static int should_balance_chunk(struct extent_buffer *leaf,
* about the count of all chunks that satisfy the filters.
*/
if (bargs->limit_max == 0)
- return 0;
+ return false;
else
bargs->limit_max--;
}
- return 1;
+ return true;
}
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
@@ -4132,7 +4051,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
struct btrfs_root *chunk_root = fs_info->chunk_root;
u64 chunk_type;
struct btrfs_chunk *chunk;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf;
@@ -4170,8 +4089,8 @@ again:
bctl->sys.limit = limit_sys;
}
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
- key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
+ key.offset = (u64)-1;
while (1) {
if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
@@ -4275,7 +4194,7 @@ again:
}
}
- ret = btrfs_relocate_chunk(fs_info, found_key.offset);
+ ret = btrfs_relocate_chunk(fs_info, found_key.offset, true);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret == -ENOSPC) {
enospc_errors++;
@@ -4303,7 +4222,6 @@ loop:
goto again;
}
error:
- btrfs_free_path(path);
if (enospc_errors) {
btrfs_info(fs_info, "%d enospc errors during balance",
enospc_errors);
@@ -4320,7 +4238,7 @@ error:
* @flags: profile to validate
* @extended: if true @flags is treated as an extended profile
*/
-static int alloc_profile_is_valid(u64 flags, int extended)
+static int alloc_profile_is_valid(u64 flags, bool extended)
{
u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
BTRFS_BLOCK_GROUP_PROFILE_MASK);
@@ -4461,7 +4379,7 @@ static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
{
u32 size_buf = 1024;
char tmp_buf[192] = {'\0'};
- char *buf;
+ char AUTO_KFREE(buf);
char *bp;
u32 size_bp = size_buf;
int ret;
@@ -4509,12 +4427,10 @@ out_overflow:
btrfs_info(fs_info, "balance: %s %s",
(bctl->flags & BTRFS_BALANCE_RESUME) ?
"resume" : "start", buf);
-
- kfree(buf);
}
/*
- * Should be called with balance mutexe held
+ * Should be called with balance mutex held
*/
int btrfs_balance(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl,
@@ -4711,12 +4627,12 @@ static int balance_kthread(void *data)
struct btrfs_fs_info *fs_info = data;
int ret = 0;
- sb_start_write(fs_info->sb);
+ guard(super_write)(fs_info->sb);
+
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl)
ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
mutex_unlock(&fs_info->balance_mutex);
- sb_end_write(fs_info->sb);
return ret;
}
@@ -4738,7 +4654,8 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
}
spin_lock(&fs_info->super_lock);
- ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED,
+ "exclusive_operation=%d", fs_info->exclusive_operation);
fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
spin_unlock(&fs_info->super_lock);
/*
@@ -4759,7 +4676,7 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
struct btrfs_balance_control *bctl;
struct btrfs_balance_item *item;
struct btrfs_disk_balance_args disk_bargs;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
@@ -4774,17 +4691,14 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0) { /* ret = -ENOENT; */
- ret = 0;
- goto out;
+ return 0;
}
bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
- if (!bctl) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!bctl)
+ return -ENOMEM;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
@@ -4821,8 +4735,6 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
fs_info->balance_ctl = bctl;
spin_unlock(&fs_info->balance_lock);
mutex_unlock(&fs_info->balance_mutex);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -4987,8 +4899,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
again:
key.objectid = device->devid;
- key.offset = (u64)-1;
key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = (u64)-1;
do {
mutex_lock(&fs_info->reclaim_bgs_lock);
@@ -5042,7 +4954,7 @@ again:
goto done;
}
- ret = btrfs_relocate_chunk(fs_info, chunk_offset);
+ ret = btrfs_relocate_chunk(fs_info, chunk_offset, true);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret == -ENOSPC) {
failed++;
@@ -5074,8 +4986,8 @@ again:
mutex_lock(&fs_info->chunk_mutex);
/* Clear all state bits beyond the shrunk device size */
- clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
- CHUNK_STATE_MASK);
+ btrfs_clear_extent_bit(&device->alloc_state, new_size, (u64)-1,
+ CHUNK_STATE_MASK, NULL);
btrfs_device_set_disk_total_bytes(device, new_size);
if (list_empty(&device->post_commit_list))
@@ -5091,7 +5003,7 @@ again:
/* Now btrfs_update_device() will change the on-disk size. */
ret = btrfs_update_device(trans, device);
btrfs_trans_release_chunk_metadata(trans);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
} else {
@@ -5202,6 +5114,8 @@ struct alloc_chunk_ctl {
u64 stripe_size;
u64 chunk_size;
int ndevs;
+ /* Space_info the block group is going to belong. */
+ struct btrfs_space_info *space_info;
};
static void init_alloc_chunk_ctl_policy_regular(
@@ -5275,14 +5189,15 @@ static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
ctl->ndevs = 0;
switch (fs_devices->chunk_alloc_policy) {
+ default:
+ btrfs_warn_unknown_chunk_allocation(fs_devices->chunk_alloc_policy);
+ fallthrough;
case BTRFS_CHUNK_ALLOC_REGULAR:
init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
break;
case BTRFS_CHUNK_ALLOC_ZONED:
init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
break;
- default:
- BUG();
}
}
@@ -5421,7 +5336,9 @@ static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
* It should hold because:
* dev_extent_min == dev_extent_want == zone_size * dev_stripes
*/
- ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
+ ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min,
+ "ndevs=%d max_avail=%llu dev_extent_min=%llu", ctl->ndevs,
+ devices_info[ctl->ndevs - 1].max_avail, ctl->dev_extent_min);
ctl->stripe_size = zone_size;
ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
@@ -5434,7 +5351,9 @@ static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
ctl->dev_stripes);
ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
- ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
+ ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size,
+ "stripe_size=%llu data_stripes=%d max_chunk_size=%llu",
+ ctl->stripe_size, data_stripes, ctl->max_chunk_size);
}
ctl->chunk_size = ctl->stripe_size * data_stripes;
@@ -5467,12 +5386,13 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
switch (fs_devices->chunk_alloc_policy) {
+ default:
+ btrfs_warn_unknown_chunk_allocation(fs_devices->chunk_alloc_policy);
+ fallthrough;
case BTRFS_CHUNK_ALLOC_REGULAR:
return decide_stripe_size_regular(ctl, devices_info);
case BTRFS_CHUNK_ALLOC_ZONED:
return decide_stripe_size_zoned(ctl, devices_info);
- default:
- BUG();
}
}
@@ -5482,9 +5402,9 @@ static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int
struct btrfs_io_stripe *stripe = &map->stripes[i];
struct btrfs_device *device = stripe->dev;
- set_extent_bit(&device->alloc_state, stripe->physical,
- stripe->physical + map->stripe_size - 1,
- bits | EXTENT_NOWAIT, NULL);
+ btrfs_set_extent_bit(&device->alloc_state, stripe->physical,
+ stripe->physical + map->stripe_size - 1,
+ bits | EXTENT_NOWAIT, NULL);
}
}
@@ -5494,10 +5414,9 @@ static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned in
struct btrfs_io_stripe *stripe = &map->stripes[i];
struct btrfs_device *device = stripe->dev;
- __clear_extent_bit(&device->alloc_state, stripe->physical,
- stripe->physical + map->stripe_size - 1,
- bits | EXTENT_NOWAIT,
- NULL, NULL);
+ btrfs_clear_extent_bit(&device->alloc_state, stripe->physical,
+ stripe->physical + map->stripe_size - 1,
+ bits | EXTENT_NOWAIT, NULL);
}
}
@@ -5513,33 +5432,34 @@ void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_ma
btrfs_free_chunk_map(map);
}
+static int btrfs_chunk_map_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct btrfs_chunk_map *new_map =
+ rb_entry(new, struct btrfs_chunk_map, rb_node);
+ const struct btrfs_chunk_map *exist_map =
+ rb_entry(exist, struct btrfs_chunk_map, rb_node);
+
+ if (new_map->start == exist_map->start)
+ return 0;
+ if (new_map->start < exist_map->start)
+ return -1;
+ return 1;
+}
+
EXPORT_FOR_TESTS
int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
{
- struct rb_node **p;
- struct rb_node *parent = NULL;
- bool leftmost = true;
+ struct rb_node *exist;
write_lock(&fs_info->mapping_tree_lock);
- p = &fs_info->mapping_tree.rb_root.rb_node;
- while (*p) {
- struct btrfs_chunk_map *entry;
-
- parent = *p;
- entry = rb_entry(parent, struct btrfs_chunk_map, rb_node);
-
- if (map->start < entry->start) {
- p = &(*p)->rb_left;
- } else if (map->start > entry->start) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- write_unlock(&fs_info->mapping_tree_lock);
- return -EEXIST;
- }
+ exist = rb_find_add_cached(&map->rb_node, &fs_info->mapping_tree,
+ btrfs_chunk_map_cmp);
+
+ if (exist) {
+ write_unlock(&fs_info->mapping_tree_lock);
+ return -EEXIST;
}
- rb_link_node(&map->rb_node, parent, p);
- rb_insert_color_cached(&map->rb_node, &fs_info->mapping_tree, leftmost);
chunk_map_device_set_bits(map, CHUNK_ALLOCATED);
chunk_map_device_clear_bits(map, CHUNK_TRIMMED);
write_unlock(&fs_info->mapping_tree_lock);
@@ -5603,7 +5523,8 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
return ERR_PTR(ret);
}
- block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size);
+ block_group = btrfs_make_block_group(trans, ctl->space_info, type, start,
+ ctl->chunk_size);
if (IS_ERR(block_group)) {
btrfs_remove_chunk_map(info, map);
return block_group;
@@ -5629,19 +5550,19 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
}
struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
- u64 type)
+ struct btrfs_space_info *space_info,
+ u64 type)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
- struct btrfs_device_info *devices_info = NULL;
+ struct btrfs_device_info AUTO_KFREE(devices_info);
struct alloc_chunk_ctl ctl;
- struct btrfs_block_group *block_group;
int ret;
lockdep_assert_held(&info->chunk_mutex);
if (!alloc_profile_is_valid(type, 0)) {
- ASSERT(0);
+ DEBUG_WARN("invalid alloc profile for type %llu", type);
return ERR_PTR(-EINVAL);
}
@@ -5653,12 +5574,13 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
btrfs_err(info, "invalid chunk type 0x%llx requested", type);
- ASSERT(0);
+ DEBUG_WARN();
return ERR_PTR(-EINVAL);
}
ctl.start = find_next_chunk(info);
ctl.type = type;
+ ctl.space_info = space_info;
init_alloc_chunk_ctl(fs_devices, &ctl);
devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
@@ -5667,22 +5589,14 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
return ERR_PTR(-ENOMEM);
ret = gather_device_info(fs_devices, &ctl, devices_info);
- if (ret < 0) {
- block_group = ERR_PTR(ret);
- goto out;
- }
+ if (ret < 0)
+ return ERR_PTR(ret);
ret = decide_stripe_size(fs_devices, &ctl, devices_info);
- if (ret < 0) {
- block_group = ERR_PTR(ret);
- goto out;
- }
-
- block_group = create_chunk(trans, &ctl, devices_info);
+ if (ret < 0)
+ return ERR_PTR(ret);
-out:
- kfree(devices_info);
- return block_group;
+ return create_chunk(trans, &ctl, devices_info);
}
/*
@@ -5740,7 +5654,7 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
item_size = btrfs_chunk_item_size(map->num_stripes);
chunk = kzalloc(item_size, GFP_NOFS);
- if (!chunk) {
+ if (unlikely(!chunk)) {
ret = -ENOMEM;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -5802,7 +5716,9 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
u64 alloc_profile;
struct btrfs_block_group *meta_bg;
+ struct btrfs_space_info *meta_space_info;
struct btrfs_block_group *sys_bg;
+ struct btrfs_space_info *sys_space_info;
/*
* When adding a new device for sprouting, the seed device is read-only
@@ -5826,12 +5742,22 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
*/
alloc_profile = btrfs_metadata_alloc_profile(fs_info);
- meta_bg = btrfs_create_chunk(trans, alloc_profile);
+ meta_space_info = btrfs_find_space_info(fs_info, alloc_profile);
+ if (!meta_space_info) {
+ DEBUG_WARN();
+ return -EINVAL;
+ }
+ meta_bg = btrfs_create_chunk(trans, meta_space_info, alloc_profile);
if (IS_ERR(meta_bg))
return PTR_ERR(meta_bg);
alloc_profile = btrfs_system_alloc_profile(fs_info);
- sys_bg = btrfs_create_chunk(trans, alloc_profile);
+ sys_space_info = btrfs_find_space_info(fs_info, alloc_profile);
+ if (!sys_space_info) {
+ DEBUG_WARN();
+ return -EINVAL;
+ }
+ sys_bg = btrfs_create_chunk(trans, sys_space_info, alloc_profile);
if (IS_ERR(sys_bg))
return PTR_ERR(sys_bg);
@@ -5959,9 +5885,79 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
return len;
}
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+static int btrfs_read_preferred(struct btrfs_chunk_map *map, int first, int num_stripes)
+{
+ for (int index = first; index < first + num_stripes; index++) {
+ const struct btrfs_device *device = map->stripes[index].dev;
+
+ if (device->devid == READ_ONCE(device->fs_devices->read_devid))
+ return index;
+ }
+
+ /* If no read-preferred device is set use the first stripe. */
+ return first;
+}
+
+struct stripe_mirror {
+ u64 devid;
+ int num;
+};
+
+static int btrfs_cmp_devid(const void *a, const void *b)
+{
+ const struct stripe_mirror *s1 = (const struct stripe_mirror *)a;
+ const struct stripe_mirror *s2 = (const struct stripe_mirror *)b;
+
+ if (s1->devid < s2->devid)
+ return -1;
+ if (s1->devid > s2->devid)
+ return 1;
+ return 0;
+}
+
+/*
+ * Select a stripe for reading using the round-robin algorithm.
+ *
+ * 1. Compute the read cycle as the total sectors read divided by the minimum
+ * sectors per device.
+ * 2. Determine the stripe number for the current read by taking the modulus
+ * of the read cycle with the total number of stripes:
+ *
+ * stripe index = (total sectors / min sectors per dev) % num stripes
+ *
+ * The calculated stripe index is then used to select the corresponding device
+ * from the list of devices, which is ordered by devid.
+ */
+static int btrfs_read_rr(const struct btrfs_chunk_map *map, int first, int num_stripes)
+{
+ struct stripe_mirror stripes[BTRFS_RAID1_MAX_MIRRORS] = { 0 };
+ struct btrfs_device *device = map->stripes[first].dev;
+ struct btrfs_fs_info *fs_info = device->fs_devices->fs_info;
+ unsigned int read_cycle;
+ unsigned int total_reads;
+ unsigned int min_reads_per_dev;
+
+ total_reads = percpu_counter_sum(&fs_info->stats_read_blocks);
+ min_reads_per_dev = READ_ONCE(fs_info->fs_devices->rr_min_contig_read) >>
+ fs_info->sectorsize_bits;
+
+ for (int index = 0, i = first; i < first + num_stripes; i++) {
+ stripes[index].devid = map->stripes[i].dev->devid;
+ stripes[index].num = i;
+ index++;
+ }
+ sort(stripes, num_stripes, sizeof(struct stripe_mirror),
+ btrfs_cmp_devid, NULL);
+
+ read_cycle = total_reads / min_reads_per_dev;
+ return stripes[read_cycle % num_stripes].num;
+}
+#endif
+
static int find_live_mirror(struct btrfs_fs_info *fs_info,
struct btrfs_chunk_map *map, int first,
- int dev_replace_is_ongoing)
+ bool dev_replace_is_ongoing)
{
const enum btrfs_read_policy policy = READ_ONCE(fs_info->fs_devices->read_policy);
int i;
@@ -5970,8 +5966,8 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
int tolerance;
struct btrfs_device *srcdev;
- ASSERT((map->type &
- (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
+ ASSERT((map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)),
+ "type=%llu", map->type);
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
num_stripes = map->sub_stripes;
@@ -5988,6 +5984,14 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
case BTRFS_READ_POLICY_PID:
preferred_mirror = first + (current->pid % num_stripes);
break;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ case BTRFS_READ_POLICY_RR:
+ preferred_mirror = btrfs_read_rr(map, first, num_stripes);
+ break;
+ case BTRFS_READ_POLICY_DEVID:
+ preferred_mirror = btrfs_read_preferred(map, first, num_stripes);
+ break;
+#endif
}
if (dev_replace_is_ongoing &&
@@ -6025,12 +6029,7 @@ struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
{
struct btrfs_io_context *bioc;
- bioc = kzalloc(
- /* The size of btrfs_io_context */
- sizeof(struct btrfs_io_context) +
- /* Plus the variable array for the stripes */
- sizeof(struct btrfs_io_stripe) * (total_stripes),
- GFP_NOFS);
+ bioc = kzalloc(struct_size(bioc, stripes, total_stripes), GFP_NOFS);
if (!bioc)
return NULL;
@@ -6264,7 +6263,7 @@ static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc,
}
/* We can only have at most 2 extra nr_stripes (for DUP). */
- ASSERT(nr_extra_stripes <= 2);
+ ASSERT(nr_extra_stripes <= 2, "nr_extra_stripes=%d", nr_extra_stripes);
/*
* For GET_READ_MIRRORS, we can only return at most 1 extra stripe for
* replace.
@@ -6275,7 +6274,8 @@ static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc,
struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1];
/* Only DUP can have two extra stripes. */
- ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP);
+ ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP,
+ "map_type=%llu", bioc->map_type);
/*
* Swap the last stripe stripes and reduce @nr_extra_stripes.
@@ -6302,7 +6302,8 @@ static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset,
*/
io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK;
io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
- ASSERT(io_geom->stripe_offset < U32_MAX);
+ ASSERT(io_geom->stripe_offset < U32_MAX,
+ "stripe_offset=%llu", io_geom->stripe_offset);
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
unsigned long full_stripe_len =
@@ -6320,8 +6321,12 @@ static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset,
io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset(
rounddown(io_geom->stripe_nr, nr_data_stripes(map)));
- ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset);
- ASSERT(io_geom->raid56_full_stripe_start <= offset);
+ ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset,
+ "raid56_full_stripe_start=%llu full_stripe_len=%lu offset=%llu",
+ io_geom->raid56_full_stripe_start, full_stripe_len, offset);
+ ASSERT(io_geom->raid56_full_stripe_start <= offset,
+ "raid56_full_stripe_start=%llu offset=%llu",
+ io_geom->raid56_full_stripe_start, offset);
/*
* For writes to RAID56, allow to write a full stripe set, but
* no straddling of stripe sets.
@@ -6346,8 +6351,7 @@ static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical,
{
dst->dev = map->stripes[io_geom->stripe_index].dev;
- if (io_geom->op == BTRFS_MAP_READ &&
- btrfs_need_stripe_tree_update(fs_info, map->type))
+ if (io_geom->op == BTRFS_MAP_READ && io_geom->use_rst)
return btrfs_get_raid_extent_offset(fs_info, logical, length,
map->type,
io_geom->stripe_index, dst);
@@ -6362,7 +6366,7 @@ static bool is_single_device_io(struct btrfs_fs_info *fs_info,
const struct btrfs_io_stripe *smap,
const struct btrfs_chunk_map *map,
int num_alloc_stripes,
- enum btrfs_map_op op, int mirror_num)
+ struct btrfs_io_geometry *io_geom)
{
if (!smap)
return false;
@@ -6370,10 +6374,10 @@ static bool is_single_device_io(struct btrfs_fs_info *fs_info,
if (num_alloc_stripes != 1)
return false;
- if (btrfs_need_stripe_tree_update(fs_info, map->type) && op != BTRFS_MAP_READ)
+ if (io_geom->use_rst && io_geom->op != BTRFS_MAP_READ)
return false;
- if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)
+ if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && io_geom->mirror_num > 1)
return false;
return true;
@@ -6488,7 +6492,7 @@ static void map_blocks_raid56_read(struct btrfs_chunk_map *map,
{
int data_stripes = nr_data_stripes(map);
- ASSERT(io_geom->mirror_num <= 1);
+ ASSERT(io_geom->mirror_num <= 1, "mirror_num=%d", io_geom->mirror_num);
/* Just grab the data stripe directly. */
io_geom->stripe_index = io_geom->stripe_nr % data_stripes;
io_geom->stripe_nr /= data_stripes;
@@ -6556,7 +6560,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
int num_copies;
struct btrfs_io_context *bioc = NULL;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
- int dev_replace_is_ongoing = 0;
+ bool dev_replace_is_ongoing = false;
u16 num_alloc_stripes;
u64 max_len;
@@ -6579,6 +6583,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
io_geom.raid56_full_stripe_start = (u64)-1;
max_len = btrfs_max_io_len(map, map_offset, &io_geom);
*length = min_t(u64, map->chunk_len - map_offset, max_len);
+ io_geom.use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
if (dev_replace->replace_task != current)
down_read(&dev_replace->rwsem);
@@ -6647,8 +6652,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* physical block information on the stack instead of allocating an
* I/O context structure.
*/
- if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op,
- io_geom.mirror_num)) {
+ if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, &io_geom)) {
ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom);
if (mirror_num_ret)
*mirror_num_ret = io_geom.mirror_num;
@@ -6662,6 +6666,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
goto out;
}
bioc->map_type = map->type;
+ bioc->use_rst = io_geom.use_rst;
/*
* For RAID56 full map, we need to make sure the stripes[] follows the
@@ -6750,6 +6755,8 @@ static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
const struct btrfs_device *device)
{
+ if (args->devt)
+ return device->devt == args->devt;
if (args->missing) {
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
!device->bdev)
@@ -6860,7 +6867,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
atomic_set(&dev->dev_stats_ccnt, 0);
btrfs_device_data_ordered_init(dev);
- extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
+ btrfs_extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
if (devid)
tmp = *devid;
@@ -6881,9 +6888,9 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
generate_random_uuid(dev->uuid);
if (path) {
- struct rcu_string *name;
+ const char *name;
- name = rcu_string_strdup(path, GFP_KERNEL);
+ name = kstrdup(path, GFP_KERNEL);
if (!name) {
btrfs_free_device(dev);
return ERR_PTR(-ENOMEM);
@@ -7002,16 +7009,6 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
warn_32bit_meta_chunk(fs_info, logical, length, type);
#endif
- /*
- * Only need to verify chunk item if we're reading from sys chunk array,
- * as chunk item in tree block is already verified by tree-checker.
- */
- if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
- ret = btrfs_check_chunk_valid(leaf, chunk, logical);
- if (ret)
- return ret;
- }
-
map = btrfs_find_chunk_map(fs_info, logical, 1);
/* already mapped? */
@@ -7072,6 +7069,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
btrfs_err(fs_info,
"failed to add chunk map, start=%llu len=%llu: %d",
map->start, map->chunk_len, ret);
+ btrfs_free_chunk_map(map);
}
return ret;
@@ -7117,8 +7115,12 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
fs_devices = find_fsid(fsid, NULL);
if (!fs_devices) {
- if (!btrfs_test_opt(fs_info, DEGRADED))
+ if (!btrfs_test_opt(fs_info, DEGRADED)) {
+ btrfs_err(fs_info,
+ "failed to find fsid %pU when attempting to open seed devices",
+ fsid);
return ERR_PTR(-ENOENT);
+ }
fs_devices = alloc_fs_devices(fsid);
if (IS_ERR(fs_devices))
@@ -7137,7 +7139,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
if (IS_ERR(fs_devices))
return fs_devices;
- ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder);
+ ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->sb);
if (ret) {
free_fs_devices(fs_devices);
return ERR_PTR(ret);
@@ -7269,16 +7271,11 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
{
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct extent_buffer *sb;
- struct btrfs_disk_key *disk_key;
- struct btrfs_chunk *chunk;
u8 *array_ptr;
unsigned long sb_array_offset;
int ret = 0;
- u32 num_stripes;
u32 array_size;
- u32 len = 0;
u32 cur_offset;
- u64 type;
struct btrfs_key key;
ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
@@ -7301,10 +7298,15 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
cur_offset = 0;
while (cur_offset < array_size) {
- disk_key = (struct btrfs_disk_key *)array_ptr;
- len = sizeof(*disk_key);
- if (cur_offset + len > array_size)
- goto out_short_read;
+ struct btrfs_chunk *chunk;
+ struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)array_ptr;
+ u32 len = sizeof(*disk_key);
+
+ /*
+ * The sys_chunk_array has been already verified at super block
+ * read time. Only do ASSERT()s for basic checks.
+ */
+ ASSERT(cur_offset + len <= array_size);
btrfs_disk_key_to_cpu(&key, disk_key);
@@ -7312,44 +7314,14 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
sb_array_offset += len;
cur_offset += len;
- if (key.type != BTRFS_CHUNK_ITEM_KEY) {
- btrfs_err(fs_info,
- "unexpected item type %u in sys_array at offset %u",
- (u32)key.type, cur_offset);
- ret = -EIO;
- break;
- }
+ ASSERT(key.type == BTRFS_CHUNK_ITEM_KEY);
chunk = (struct btrfs_chunk *)sb_array_offset;
- /*
- * At least one btrfs_chunk with one stripe must be present,
- * exact stripe count check comes afterwards
- */
- len = btrfs_chunk_item_size(1);
- if (cur_offset + len > array_size)
- goto out_short_read;
-
- num_stripes = btrfs_chunk_num_stripes(sb, chunk);
- if (!num_stripes) {
- btrfs_err(fs_info,
- "invalid number of stripes %u in sys_array at offset %u",
- num_stripes, cur_offset);
- ret = -EIO;
- break;
- }
+ ASSERT(btrfs_chunk_type(sb, chunk) & BTRFS_BLOCK_GROUP_SYSTEM);
- type = btrfs_chunk_type(sb, chunk);
- if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
- btrfs_err(fs_info,
- "invalid chunk type %llu in sys_array at offset %u",
- type, cur_offset);
- ret = -EIO;
- break;
- }
+ len = btrfs_chunk_item_size(btrfs_chunk_num_stripes(sb, chunk));
- len = btrfs_chunk_item_size(num_stripes);
- if (cur_offset + len > array_size)
- goto out_short_read;
+ ASSERT(cur_offset + len <= array_size);
ret = read_one_chunk(&key, sb, chunk);
if (ret)
@@ -7362,13 +7334,6 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return ret;
-
-out_short_read:
- btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
- len, cur_offset);
- clear_extent_buffer_uptodate(sb);
- free_extent_buffer_stale(sb);
- return -EIO;
}
/*
@@ -7440,7 +7405,7 @@ static void readahead_tree_node_children(struct extent_buffer *node)
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->chunk_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key;
struct btrfs_key found_key;
@@ -7471,7 +7436,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
/*
* Lockdep complains about possible circular locking dependency between
* a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
- * used for freeze procection of a fs (struct super_block.s_writers),
+ * used for freeze protection of a fs (struct super_block.s_writers),
* which we take when starting a transaction, and extent buffers of the
* chunk tree if we call read_one_dev() while holding a lock on an
* extent buffer of the chunk tree. Since we are mounting the filesystem
@@ -7479,7 +7444,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
* chunk tree, to keep it simple, just skip locking on the chunk tree.
*/
ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
- path->skip_locking = 1;
+ path->skip_locking = true;
/*
* Read all device items, and then all the chunk items. All
@@ -7488,8 +7453,8 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
* item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
*/
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
- key.offset = 0;
key.type = 0;
+ key.offset = 0;
btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
struct extent_buffer *node = path->nodes[1];
@@ -7557,8 +7522,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
ret = 0;
error:
mutex_unlock(&uuid_mutex);
-
- btrfs_free_path(path);
return ret;
}
@@ -7568,8 +7531,6 @@ int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
struct btrfs_device *device;
int ret = 0;
- fs_devices->fs_info = fs_info;
-
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list)
device->fs_info = fs_info;
@@ -7660,7 +7621,7 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
struct btrfs_device *device;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
int ret = 0;
path = btrfs_alloc_path();
@@ -7682,8 +7643,6 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
}
out:
mutex_unlock(&fs_devices->device_list_mutex);
-
- btrfs_free_path(path);
return ret;
}
@@ -7692,7 +7651,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *eb;
struct btrfs_dev_stats_item *ptr;
@@ -7708,10 +7667,10 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
return -ENOMEM;
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"error %d while searching for dev_stats item for device %s",
ret, btrfs_dev_name(device));
- goto out;
+ return ret;
}
if (ret == 0 &&
@@ -7719,10 +7678,10 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
/* need to delete old one and insert a new one */
ret = btrfs_del_item(trans, dev_root, path);
if (ret != 0) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"delete too small dev_stats item for device %s failed %d",
btrfs_dev_name(device), ret);
- goto out;
+ return ret;
}
ret = 1;
}
@@ -7733,10 +7692,10 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, dev_root, path,
&key, sizeof(*ptr));
if (ret < 0) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"insert dev_stats item for device %s failed %d",
btrfs_dev_name(device), ret);
- goto out;
+ return ret;
}
}
@@ -7745,10 +7704,6 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
btrfs_set_dev_stats_value(eb, ptr, i,
btrfs_dev_stat_read(device, i));
- btrfs_mark_buffer_dirty(trans, eb);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -7798,7 +7753,7 @@ void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
if (!dev->dev_stats_valid)
return;
- btrfs_err_rl_in_rcu(dev->fs_info,
+ btrfs_err_rl(dev->fs_info,
"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
btrfs_dev_name(dev),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
@@ -7818,7 +7773,7 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
if (i == BTRFS_DEV_STAT_VALUES_MAX)
return; /* all values == 0, suppress message */
- btrfs_info_in_rcu(dev->fs_info,
+ btrfs_info(dev->fs_info,
"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
btrfs_dev_name(dev),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
@@ -7878,7 +7833,7 @@ void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
{
struct btrfs_device *curr, *next;
- ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
+ ASSERT(trans->state == TRANS_STATE_COMMIT_DOING, "state=%d" , trans->state);
if (list_empty(&trans->dev_update_list))
return;
@@ -7908,8 +7863,6 @@ int btrfs_bg_type_to_factor(u64 flags)
return btrfs_raid_array[index].ncopies;
}
-
-
static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
u64 chunk_offset, u64 devid,
u64 physical_offset, u64 physical_len)
@@ -7923,7 +7876,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
int i;
map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
- if (!map) {
+ if (unlikely(!map)) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
physical_offset, devid);
@@ -7932,7 +7885,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
}
stripe_len = btrfs_calc_stripe_length(map);
- if (physical_len != stripe_len) {
+ if (unlikely(physical_len != stripe_len)) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
physical_offset, devid, map->start, physical_len,
@@ -7942,7 +7895,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
}
/*
- * Very old mkfs.btrfs (before v4.1) will not respect the reserved
+ * Very old mkfs.btrfs (before v4.15) will not respect the reserved
* space. Although kernel can handle it without problem, better to warn
* the users.
*/
@@ -7952,8 +7905,8 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
devid, physical_offset, physical_len);
for (i = 0; i < map->num_stripes; i++) {
- if (map->stripes[i].dev->devid == devid &&
- map->stripes[i].physical == physical_offset) {
+ if (unlikely(map->stripes[i].dev->devid == devid &&
+ map->stripes[i].physical == physical_offset)) {
found = true;
if (map->verified_stripes >= map->num_stripes) {
btrfs_err(fs_info,
@@ -7966,7 +7919,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
break;
}
}
- if (!found) {
+ if (unlikely(!found)) {
btrfs_err(fs_info,
"dev extent physical offset %llu devid %llu has no corresponding chunk",
physical_offset, devid);
@@ -7975,13 +7928,13 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
/* Make sure no dev extent is beyond device boundary */
dev = btrfs_find_device(fs_info->fs_devices, &args);
- if (!dev) {
+ if (unlikely(!dev)) {
btrfs_err(fs_info, "failed to find devid %llu", devid);
ret = -EUCLEAN;
goto out;
}
- if (physical_offset + physical_len > dev->disk_total_bytes) {
+ if (unlikely(physical_offset + physical_len > dev->disk_total_bytes)) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
devid, physical_offset, physical_len,
@@ -7993,8 +7946,8 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
if (dev->zone_info) {
u64 zone_size = dev->zone_info->zone_size;
- if (!IS_ALIGNED(physical_offset, zone_size) ||
- !IS_ALIGNED(physical_len, zone_size)) {
+ if (unlikely(!IS_ALIGNED(physical_offset, zone_size) ||
+ !IS_ALIGNED(physical_len, zone_size))) {
btrfs_err(fs_info,
"zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
devid, physical_offset, physical_len);
@@ -8018,7 +7971,7 @@ static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
struct btrfs_chunk_map *map;
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
- if (map->num_stripes != map->verified_stripes) {
+ if (unlikely(map->num_stripes != map->verified_stripes)) {
btrfs_err(fs_info,
"chunk %llu has missing dev extent, have %d expect %d",
map->start, map->verified_stripes, map->num_stripes);
@@ -8040,7 +7993,7 @@ out:
*/
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
u64 prev_devid = 0;
@@ -8071,17 +8024,15 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
path->reada = READA_FORWARD;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
+ return ret;
/* No dev extents at all? Not good */
- if (ret > 0) {
- ret = -EUCLEAN;
- goto out;
- }
+ if (unlikely(ret > 0))
+ return -EUCLEAN;
}
while (1) {
struct extent_buffer *leaf = path->nodes[0];
@@ -8103,24 +8054,23 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
physical_len = btrfs_dev_extent_length(leaf, dext);
/* Check if this dev extent overlaps with the previous one */
- if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
+ if (unlikely(devid == prev_devid && physical_offset < prev_dev_ext_end)) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
devid, physical_offset, prev_dev_ext_end);
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
physical_offset, physical_len);
if (ret < 0)
- goto out;
+ return ret;
prev_devid = devid;
prev_dev_ext_end = physical_offset + physical_len;
ret = btrfs_next_item(root, path);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0) {
ret = 0;
break;
@@ -8128,10 +8078,7 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
}
/* Ensure all chunks have corresponding dev extents */
- ret = verify_chunk_dev_extent_mapping(fs_info);
-out:
- btrfs_free_path(path);
- return ret;
+ return verify_chunk_dev_extent_mapping(fs_info);
}
/*
@@ -8168,12 +8115,12 @@ static int relocating_repair_kthread(void *data)
target = cache->start;
btrfs_put_block_group(cache);
- sb_start_write(fs_info->sb);
+ guard(super_write)(fs_info->sb);
+
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
btrfs_info(fs_info,
"zoned: skip relocating block group %llu to repair: EBUSY",
target);
- sb_end_write(fs_info->sb);
return -EBUSY;
}
@@ -8194,14 +8141,13 @@ static int relocating_repair_kthread(void *data)
btrfs_info(fs_info,
"zoned: relocating block group %llu to repair IO failure",
target);
- ret = btrfs_relocate_chunk(fs_info, target);
+ ret = btrfs_relocate_chunk(fs_info, target, true);
out:
if (cache)
btrfs_put_block_group(cache);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_exclop_finish(fs_info);
- sb_end_write(fs_info->sb);
return ret;
}
@@ -8247,7 +8193,7 @@ static void map_raid56_repair_block(struct btrfs_io_context *bioc,
logical < stripe_start + BTRFS_STRIPE_LEN)
break;
}
- ASSERT(i < data_stripes);
+ ASSERT(i < data_stripes, "i=%d data_stripes=%d", i, data_stripes);
smap->dev = bioc->stripes[i].dev;
smap->physical = bioc->stripes[i].physical +
((logical - bioc->full_stripe_logical) &
@@ -8276,7 +8222,7 @@ int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
int mirror_ret = mirror_num;
int ret;
- ASSERT(mirror_num > 0);
+ ASSERT(mirror_num > 0, "mirror_num=%d", mirror_num);
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
&bioc, smap, &mirror_ret);
@@ -8284,7 +8230,7 @@ int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
return ret;
/* The map range should not cross stripe boundary. */
- ASSERT(map_length >= length);
+ ASSERT(map_length >= length, "map_length=%llu length=%u", map_length, length);
/* Already mapped to single stripe. */
if (!bioc)
@@ -8296,7 +8242,8 @@ int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
goto out;
}
- ASSERT(mirror_num <= bioc->num_stripes);
+ ASSERT(mirror_num <= bioc->num_stripes,
+ "mirror_num=%d num_stripes=%d", mirror_num, bioc->num_stripes);
smap->dev = bioc->stripes[mirror_num - 1].dev;
smap->physical = bioc->stripes[mirror_num - 1].physical;
out:
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 3a416b1bc24c..34b854c1a303 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -7,6 +7,7 @@
#define BTRFS_VOLUMES_H
#include <linux/blk_types.h>
+#include <linux/blkdev.h>
#include <linux/sizes.h>
#include <linux/atomic.h>
#include <linux/sort.h>
@@ -18,20 +19,22 @@
#include <linux/completion.h>
#include <linux/rbtree.h>
#include <uapi/linux/btrfs.h>
+#include <uapi/linux/btrfs_tree.h>
#include "messages.h"
-#include "rcu-string.h"
+#include "extent-io-tree.h"
struct block_device;
struct bdev_handle;
struct btrfs_fs_info;
struct btrfs_block_group;
struct btrfs_trans_handle;
+struct btrfs_transaction;
struct btrfs_zoned_device_info;
#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
/*
- * Arbitratry maximum size of one discard request to limit potentially long time
+ * Arbitrary maximum size of one discard request to limit potentially long time
* spent in blkdev_issue_discard().
*/
#define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G)
@@ -42,7 +45,7 @@ extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN_SHIFT (16)
#define BTRFS_STRIPE_LEN_MASK (BTRFS_STRIPE_LEN - 1)
-static_assert(const_ilog2(BTRFS_STRIPE_LEN) == BTRFS_STRIPE_LEN_SHIFT);
+static_assert(ilog2(BTRFS_STRIPE_LEN) == BTRFS_STRIPE_LEN_SHIFT);
/* Used by sanity check for btrfs_raid_types. */
#define const_ffs(n) (__builtin_ctzll(n) + 1)
@@ -55,8 +58,7 @@ static_assert(const_ilog2(BTRFS_STRIPE_LEN) == BTRFS_STRIPE_LEN_SHIFT);
*/
static_assert(const_ffs(BTRFS_BLOCK_GROUP_RAID0) <
const_ffs(BTRFS_BLOCK_GROUP_PROFILE_MASK & ~BTRFS_BLOCK_GROUP_RAID0));
-static_assert(const_ilog2(BTRFS_BLOCK_GROUP_RAID0) >
- ilog2(BTRFS_BLOCK_GROUP_TYPE_MASK));
+static_assert(ilog2(BTRFS_BLOCK_GROUP_RAID0) > ilog2(BTRFS_BLOCK_GROUP_TYPE_MASK));
/* ilog2() can handle both constants and variables */
#define BTRFS_BG_FLAG_TO_INDEX(profile) \
@@ -110,7 +112,8 @@ struct btrfs_device {
struct btrfs_fs_devices *fs_devices;
struct btrfs_fs_info *fs_info;
- struct rcu_string __rcu *name;
+ /* Device path or NULL if missing. */
+ const char __rcu *name;
u64 generation;
@@ -296,6 +299,9 @@ enum btrfs_chunk_allocation_policy {
BTRFS_CHUNK_ALLOC_ZONED,
};
+#define BTRFS_DEFAULT_RR_MIN_CONTIG_READ (SZ_256K)
+/* Keep in sync with raid_attr table, current maximum is RAID1C4. */
+#define BTRFS_RAID1_MAX_MIRRORS (4)
/*
* Read policies for mirrored block group profiles, read picks the stripe based
* on these policies.
@@ -303,6 +309,12 @@ enum btrfs_chunk_allocation_policy {
enum btrfs_read_policy {
/* Use process PID to choose the stripe */
BTRFS_READ_POLICY_PID,
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* Balancing RAID1 reads across all striped devices (round-robin). */
+ BTRFS_READ_POLICY_RR,
+ /* Read from a specific device. */
+ BTRFS_READ_POLICY_DEVID,
+#endif
BTRFS_NR_READ_POLICY,
};
@@ -409,6 +421,16 @@ struct btrfs_fs_devices {
/* Count fs-devices opened. */
int opened;
+ /*
+ * Counter of the processes that are holding this fs_devices but not
+ * yet opened.
+ * This is for mounting handling, as we can only open the fs_devices
+ * after a super block is created. But we cannot take uuid_mutex
+ * during sget_fc(), thus we have to hold the fs_devices (meaning it
+ * cannot be released) until a super block is returned.
+ */
+ int holding;
+
/* Set when we find or add a device that doesn't have the nonrot flag set. */
bool rotating;
/* Devices support TRIM/discard commands. */
@@ -417,6 +439,8 @@ struct btrfs_fs_devices {
bool seeding;
/* The mount needs to use a randomly generated fsid. */
bool temp_fsid;
+ /* Enable/disable the filesystem stats tracking. */
+ bool collect_fs_stats;
struct btrfs_fs_info *fs_info;
/* sysfs kobjects */
@@ -431,6 +455,15 @@ struct btrfs_fs_devices {
enum btrfs_read_policy read_policy;
#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /*
+ * Minimum contiguous reads before switching to next device, the unit
+ * is one block/sectorsize.
+ */
+ u32 rr_min_contig_read;
+
+ /* Device to be used for reading in case of RAID1. */
+ u64 read_devid;
+
/* Checksum mode - offload it or do it synchronously. */
enum btrfs_offload_csum_mode offload_csum_mode;
#endif
@@ -449,7 +482,6 @@ struct btrfs_io_stripe {
struct btrfs_device *dev;
/* Block mapping. */
u64 physical;
- u64 length;
bool rst_search_commit_root;
/* For the endio handler. */
struct btrfs_io_context *bioc;
@@ -462,7 +494,7 @@ struct btrfs_discard_stripe {
};
/*
- * Context for IO subsmission for device stripe.
+ * Context for IO submission for device stripe.
*
* - Track the unfinished mirrors for mirror based profiles
* Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
@@ -485,6 +517,7 @@ struct btrfs_io_context {
struct bio *orig_bio;
atomic_t error;
u16 max_errors;
+ bool use_rst;
u64 logical;
u64 size;
@@ -628,6 +661,11 @@ struct btrfs_dev_lookup_args {
u64 devid;
u8 *uuid;
u8 *fsid;
+ /*
+ * If devt is specified, all other members will be ignored as it is
+ * enough to uniquely locate a device.
+ */
+ dev_t devt;
bool missing;
};
@@ -643,7 +681,7 @@ enum btrfs_map_op {
BTRFS_MAP_GET_READ_MIRRORS,
};
-static inline enum btrfs_map_op btrfs_op(struct bio *bio)
+static inline enum btrfs_map_op btrfs_op(const struct bio *bio)
{
switch (bio_op(bio)) {
case REQ_OP_WRITE:
@@ -690,12 +728,12 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
- u64 type);
+ struct btrfs_space_info *space_info,
+ u64 type);
void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
blk_mode_t flags, void *holder);
-struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
- bool mount_arg_dev);
+struct btrfs_device *btrfs_scan_one_device(const char *path, bool mount_arg_dev);
int btrfs_forget_devices(dev_t devt);
void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);
@@ -729,7 +767,8 @@ void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
-int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset);
+int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ bool verbose);
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
@@ -761,6 +800,8 @@ struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_inf
struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map);
+struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
+ int copy_num, bool drop_cache);
void btrfs_release_disk_super(struct btrfs_super_block *super);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
@@ -819,7 +860,26 @@ static inline const char *btrfs_dev_name(const struct btrfs_device *device)
if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
return "<missing disk>";
else
- return rcu_str_deref(device->name);
+ return rcu_dereference(device->name);
+}
+
+static inline void btrfs_warn_unknown_chunk_allocation(enum btrfs_chunk_allocation_policy pol)
+{
+ WARN_ONCE(1, "unknown allocation policy %d, fallback to regular", pol);
+}
+
+static inline void btrfs_fs_devices_inc_holding(struct btrfs_fs_devices *fs_devices)
+{
+ lockdep_assert_held(&uuid_mutex);
+ ASSERT(fs_devices->holding >= 0);
+ fs_devices->holding++;
+}
+
+static inline void btrfs_fs_devices_dec_holding(struct btrfs_fs_devices *fs_devices)
+{
+ lockdep_assert_held(&uuid_mutex);
+ ASSERT(fs_devices->holding > 0);
+ fs_devices->holding--;
}
void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index bc18710d1dcf..ab55d10bd71f 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -29,9 +29,8 @@ int btrfs_getxattr(const struct inode *inode, const char *name,
{
struct btrfs_dir_item *di;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
- int ret = 0;
unsigned long data_ptr;
path = btrfs_alloc_path();
@@ -41,26 +40,19 @@ int btrfs_getxattr(const struct inode *inode, const char *name,
/* lookup the xattr by name */
di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(BTRFS_I(inode)),
name, strlen(name), 0);
- if (!di) {
- ret = -ENODATA;
- goto out;
- } else if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- }
+ if (!di)
+ return -ENODATA;
+ if (IS_ERR(di))
+ return PTR_ERR(di);
leaf = path->nodes[0];
/* if size is 0, that means we want the size of the attr */
- if (!size) {
- ret = btrfs_dir_data_len(leaf, di);
- goto out;
- }
+ if (!size)
+ return btrfs_dir_data_len(leaf, di);
/* now get the data out of our dir_item */
- if (btrfs_dir_data_len(leaf, di) > size) {
- ret = -ERANGE;
- goto out;
- }
+ if (btrfs_dir_data_len(leaf, di) > size)
+ return -ERANGE;
/*
* The way things are packed into the leaf is like this
@@ -73,11 +65,7 @@ int btrfs_getxattr(const struct inode *inode, const char *name,
btrfs_dir_name_len(leaf, di));
read_extent_buffer(leaf, buffer, data_ptr,
btrfs_dir_data_len(leaf, di));
- ret = btrfs_dir_data_len(leaf, di);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_dir_data_len(leaf, di);
}
int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
@@ -85,7 +73,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
{
struct btrfs_dir_item *di = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
size_t name_len = strlen(name);
int ret = 0;
@@ -97,7 +85,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->skip_release_on_error = 1;
+ path->skip_release_on_error = true;
if (!value) {
di = btrfs_lookup_xattr(trans, root, path,
@@ -204,7 +192,6 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
btrfs_set_dir_data_len(leaf, di, size);
data_ptr = ((unsigned long)(di + 1)) + name_len;
write_extent_buffer(leaf, value, data_ptr, size);
- btrfs_mark_buffer_dirty(trans, leaf);
} else {
/*
* Insert, and we had space for the xattr, so path->slots[0] is
@@ -213,7 +200,6 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
*/
}
out:
- btrfs_free_path(path);
if (!ret) {
set_bit(BTRFS_INODE_COPY_EVERYTHING,
&BTRFS_I(inode)->runtime_flags);
@@ -279,7 +265,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
struct btrfs_key key;
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int iter_ret = 0;
int ret = 0;
size_t total_size = 0, size_left = size;
@@ -355,8 +341,6 @@ next:
else
ret = total_size;
- btrfs_free_path(path);
-
return ret;
}
@@ -511,14 +495,15 @@ static int btrfs_initxattrs(struct inode *inode,
*/
nofs_flag = memalloc_nofs_save();
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
- name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
- strlen(xattr->name) + 1, GFP_KERNEL);
+ const size_t name_len = XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1;
+
+ name = kmalloc(name_len, GFP_KERNEL);
if (!name) {
ret = -ENOMEM;
break;
}
- strcpy(name, XATTR_SECURITY_PREFIX);
- strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+ scnprintf(name, name_len, "%s%s", XATTR_SECURITY_PREFIX, xattr->name);
if (strcmp(name, XATTR_NAME_CAPS) == 0)
clear_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 8dc4cf49f6f0..0ce10e4ec836 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_XATTR_H
#define BTRFS_XATTR_H
+#include <linux/types.h>
+
struct dentry;
struct inode;
struct qstr;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index ddf0d5a448a7..6caba8be7c84 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -34,11 +34,9 @@ struct workspace {
int level;
};
-static struct workspace_manager wsm;
-
-struct list_head *zlib_get_workspace(unsigned int level)
+struct list_head *zlib_get_workspace(struct btrfs_fs_info *fs_info, unsigned int level)
{
- struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
+ struct list_head *ws = btrfs_get_workspace(fs_info, BTRFS_COMPRESS_ZLIB, level);
struct workspace *workspace = list_entry(ws, struct workspace, list);
workspace->level = level;
@@ -55,8 +53,25 @@ void zlib_free_workspace(struct list_head *ws)
kfree(workspace);
}
-struct list_head *zlib_alloc_workspace(unsigned int level)
+/*
+ * For s390 hardware acceleration, the buffer size should be at least
+ * ZLIB_DFLTCC_BUF_SIZE to achieve the best performance.
+ *
+ * But if bs > ps we can have large enough folios that meet the s390 hardware
+ * handling.
+ */
+static bool need_special_buffer(struct btrfs_fs_info *fs_info)
+{
+ if (!zlib_deflate_dfltcc_enabled())
+ return false;
+ if (btrfs_min_folio_size(fs_info) >= ZLIB_DFLTCC_BUF_SIZE)
+ return false;
+ return true;
+}
+
+struct list_head *zlib_alloc_workspace(struct btrfs_fs_info *fs_info, unsigned int level)
{
+ const u32 blocksize = fs_info->sectorsize;
struct workspace *workspace;
int workspacesize;
@@ -69,19 +84,15 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL | __GFP_NOWARN);
workspace->level = level;
workspace->buf = NULL;
- /*
- * In case of s390 zlib hardware support, allocate lager workspace
- * buffer. If allocator fails, fall back to a single page buffer.
- */
- if (zlib_deflate_dfltcc_enabled()) {
+ if (need_special_buffer(fs_info)) {
workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
__GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_NOWARN | GFP_NOIO);
workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
}
if (!workspace->buf) {
- workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- workspace->buf_size = PAGE_SIZE;
+ workspace->buf = kmalloc(blocksize, GFP_KERNEL);
+ workspace->buf_size = blocksize;
}
if (!workspace->strm.workspace || !workspace->buf)
goto fail;
@@ -94,22 +105,64 @@ fail:
return ERR_PTR(-ENOMEM);
}
-int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
+/*
+ * Helper for S390x with hardware zlib compression support.
+ *
+ * That hardware acceleration requires a buffer size larger than a single page
+ * to get ideal performance, thus we need to do the memory copy rather than
+ * use the page cache directly as input buffer.
+ */
+static int copy_data_into_buffer(struct address_space *mapping,
+ struct workspace *workspace, u64 filepos,
+ unsigned long length)
+{
+ u64 cur = filepos;
+
+ /* It's only for hardware accelerated zlib code. */
+ ASSERT(zlib_deflate_dfltcc_enabled());
+
+ while (cur < filepos + length) {
+ struct folio *folio;
+ void *data_in;
+ unsigned int offset;
+ unsigned long copy_length;
+ int ret;
+
+ ret = btrfs_compress_filemap_get_folio(mapping, cur, &folio);
+ if (ret < 0)
+ return ret;
+
+ offset = offset_in_folio(folio, cur);
+ copy_length = min(folio_size(folio) - offset,
+ filepos + length - cur);
+
+ data_in = kmap_local_folio(folio, offset);
+ memcpy(workspace->buf + cur - filepos, data_in, copy_length);
+ kunmap_local(data_in);
+ cur += copy_length;
+ }
+ return 0;
+}
+
+int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
int ret;
char *data_in = NULL;
char *cfolio_out;
int nr_folios = 0;
struct folio *in_folio = NULL;
struct folio *out_folio = NULL;
- unsigned long bytes_left;
- unsigned int in_buf_folios;
unsigned long len = *total_out;
unsigned long nr_dest_folios = *out_folios;
- const unsigned long max_out = nr_dest_folios * PAGE_SIZE;
+ const unsigned long max_out = nr_dest_folios << min_folio_shift;
+ const u32 blocksize = fs_info->sectorsize;
const u64 orig_end = start + len;
*out_folios = 0;
@@ -118,9 +171,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
ret = zlib_deflateInit(&workspace->strm, workspace->level);
if (unlikely(ret != Z_OK)) {
- struct btrfs_inode *inode = BTRFS_I(mapping->host);
-
- btrfs_err(inode->root->fs_info,
+ btrfs_err(fs_info,
"zlib compression init failed, error %d root %llu inode %llu offset %llu",
ret, btrfs_root_id(inode->root), btrfs_ino(inode), start);
ret = -EIO;
@@ -130,7 +181,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
- out_folio = btrfs_alloc_compr_folio();
+ out_folio = btrfs_alloc_compr_folio(fs_info);
if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
@@ -142,7 +193,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
workspace->strm.next_in = workspace->buf;
workspace->strm.avail_in = 0;
workspace->strm.next_out = cfolio_out;
- workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.avail_out = min_folio_size;
while (workspace->strm.total_in < len) {
/*
@@ -150,36 +201,23 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
* the workspace buffer if required.
*/
if (workspace->strm.avail_in == 0) {
- bytes_left = len - workspace->strm.total_in;
- in_buf_folios = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
- workspace->buf_size / PAGE_SIZE);
- if (in_buf_folios > 1) {
- int i;
-
- /* S390 hardware acceleration path, not subpage. */
- ASSERT(!btrfs_is_subpage(
- inode_to_fs_info(mapping->host),
- mapping));
- for (i = 0; i < in_buf_folios; i++) {
- if (data_in) {
- kunmap_local(data_in);
- folio_put(in_folio);
- data_in = NULL;
- }
- ret = btrfs_compress_filemap_get_folio(mapping,
- start, &in_folio);
- if (ret < 0)
- goto out;
- data_in = kmap_local_folio(in_folio, 0);
- copy_page(workspace->buf + i * PAGE_SIZE,
- data_in);
- start += PAGE_SIZE;
- workspace->strm.avail_in =
- (in_buf_folios << PAGE_SHIFT);
- }
+ unsigned long bytes_left = len - workspace->strm.total_in;
+ unsigned int copy_length = min(bytes_left, workspace->buf_size);
+
+ /*
+ * For s390 hardware accelerated zlib, and our folio is smaller
+ * than the copy_length, we need to fill the buffer so that
+ * we can take full advantage of hardware acceleration.
+ */
+ if (need_special_buffer(fs_info)) {
+ ret = copy_data_into_buffer(mapping, workspace,
+ start, copy_length);
+ if (ret < 0)
+ goto out;
+ start += copy_length;
workspace->strm.next_in = workspace->buf;
+ workspace->strm.avail_in = copy_length;
} else {
- unsigned int pg_off;
unsigned int cur_len;
if (data_in) {
@@ -191,9 +229,9 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
start, &in_folio);
if (ret < 0)
goto out;
- pg_off = offset_in_page(start);
- cur_len = btrfs_calc_input_length(orig_end, start);
- data_in = kmap_local_folio(in_folio, pg_off);
+ cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
+ data_in = kmap_local_folio(in_folio,
+ offset_in_folio(in_folio, start));
start += cur_len;
workspace->strm.next_in = data_in;
workspace->strm.avail_in = cur_len;
@@ -202,9 +240,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
if (unlikely(ret != Z_OK)) {
- struct btrfs_inode *inode = BTRFS_I(mapping->host);
-
- btrfs_warn(inode->root->fs_info,
+ btrfs_warn(fs_info,
"zlib compression failed, error %d root %llu inode %llu offset %llu",
ret, btrfs_root_id(inode->root), btrfs_ino(inode),
start);
@@ -214,7 +250,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
}
/* we're making it bigger, give up */
- if (workspace->strm.total_in > 8192 &&
+ if (workspace->strm.total_in > blocksize * 2 &&
workspace->strm.total_in <
workspace->strm.total_out) {
ret = -E2BIG;
@@ -229,7 +265,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
- out_folio = btrfs_alloc_compr_folio();
+ out_folio = btrfs_alloc_compr_folio(fs_info);
if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
@@ -237,7 +273,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
cfolio_out = folio_address(out_folio);
folios[nr_folios] = out_folio;
nr_folios++;
- workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.avail_out = min_folio_size;
workspace->strm.next_out = cfolio_out;
}
/* we're all done */
@@ -255,7 +291,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
ret = zlib_deflate(&workspace->strm, Z_FINISH);
if (ret == Z_STREAM_END)
break;
- if (ret != Z_OK && ret != Z_BUF_ERROR) {
+ if (unlikely(ret != Z_OK && ret != Z_BUF_ERROR)) {
zlib_deflateEnd(&workspace->strm);
ret = -EIO;
goto out;
@@ -265,7 +301,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
- out_folio = btrfs_alloc_compr_folio();
+ out_folio = btrfs_alloc_compr_folio(fs_info);
if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
@@ -273,7 +309,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
cfolio_out = folio_address(out_folio);
folios[nr_folios] = out_folio;
nr_folios++;
- workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.avail_out = min_folio_size;
workspace->strm.next_out = cfolio_out;
}
}
@@ -299,20 +335,22 @@ out:
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
+ struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
struct workspace *workspace = list_entry(ws, struct workspace, list);
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
int ret = 0, ret2;
int wbits = MAX_WBITS;
char *data_in;
size_t total_out = 0;
unsigned long folio_in_index = 0;
size_t srclen = cb->compressed_len;
- unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
+ unsigned long total_folios_in = DIV_ROUND_UP(srclen, min_folio_size);
unsigned long buf_start;
struct folio **folios_in = cb->compressed_folios;
data_in = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->strm.next_in = data_in;
- workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->strm.avail_in = min_t(size_t, srclen, min_folio_size);
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
@@ -373,7 +411,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
data_in = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->strm.next_in = data_in;
tmp = srclen - workspace->strm.total_in;
- workspace->strm.avail_in = min(tmp, PAGE_SIZE);
+ workspace->strm.avail_in = min(tmp, min_folio_size);
}
}
if (unlikely(ret != Z_STREAM_END)) {
@@ -461,8 +499,8 @@ out:
return ret;
}
-const struct btrfs_compress_op btrfs_zlib_compress = {
- .workspace_manager = &wsm,
+const struct btrfs_compress_levels btrfs_zlib_compress = {
+ .min_level = 1,
.max_level = 9,
.default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
};
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 11ed523e528e..359a98e6de85 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -9,7 +9,6 @@
#include "ctree.h"
#include "volumes.h"
#include "zoned.h"
-#include "rcu-string.h"
#include "disk-io.h"
#include "block-group.h"
#include "dev-replace.h"
@@ -17,6 +16,8 @@
#include "fs.h"
#include "accessors.h"
#include "bio.h"
+#include "transaction.h"
+#include "sysfs.h"
/* Maximum number of zones to report per blkdev_report_zones() call */
#define BTRFS_REPORT_NR_ZONES 4096
@@ -36,12 +37,15 @@
#define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
#define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
-#define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
-#define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
+#define BTRFS_SB_LOG_FIRST_SHIFT ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
+#define BTRFS_SB_LOG_SECOND_SHIFT ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
/* Number of superblock log zones */
#define BTRFS_NR_SB_LOG_ZONES 2
+/* Default number of max active zones when the device has no limits. */
+#define BTRFS_DEFAULT_MAX_ACTIVE_ZONES 128
+
/*
* Minimum of active zones we need:
*
@@ -89,7 +93,8 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
sector_t sector;
for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
- ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
+ ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL,
+ "zones[%d].type=%d", i, zones[i].type);
empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
full[i] = sb_zone_is_full(&zones[i]);
}
@@ -162,14 +167,14 @@ static inline u32 sb_zone_number(int shift, int mirror)
{
u64 zone = U64_MAX;
- ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
+ ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX, "mirror=%d", mirror);
switch (mirror) {
case 0: zone = 0; break;
case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
}
- ASSERT(zone <= U32_MAX);
+ ASSERT(zone <= U32_MAX, "zone=%llu", zone);
return (u32)zone;
}
@@ -236,7 +241,8 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
unsigned int i;
u32 zno;
- ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
+ ASSERT(IS_ALIGNED(pos, zinfo->zone_size),
+ "pos=%llu zinfo->zone_size=%llu", pos, zinfo->zone_size);
zno = pos >> zinfo->zone_size_shift;
/*
* We cannot report zones beyond the zone end. So, it is OK to
@@ -260,17 +266,17 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
}
}
- ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
- copy_zone_info_cb, zones);
+ ret = blkdev_report_zones_cached(device->bdev, pos >> SECTOR_SHIFT,
+ *nr_zones, copy_zone_info_cb, zones);
if (ret < 0) {
- btrfs_err_in_rcu(device->fs_info,
+ btrfs_err(device->fs_info,
"zoned: failed to read zone %llu on %s (devid %llu)",
- pos, rcu_str_deref(device->name),
+ pos, rcu_dereference(device->name),
device->devid);
return ret;
}
*nr_zones = ret;
- if (!ret)
+ if (unlikely(!ret))
return -EIO;
/* Populate cache */
@@ -311,7 +317,7 @@ static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
if (ret < 0)
return ret;
/* No dev extents at all? Not good */
- if (ret > 0)
+ if (unlikely(ret > 0))
return -EUCLEAN;
}
@@ -395,16 +401,16 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
/* We reject devices with a zone size larger than 8GB */
if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
- btrfs_err_in_rcu(fs_info,
+ btrfs_err(fs_info,
"zoned: %s: zone size %llu larger than supported maximum %llu",
- rcu_str_deref(device->name),
+ rcu_dereference(device->name),
zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
ret = -EINVAL;
goto out;
} else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
- btrfs_err_in_rcu(fs_info,
+ btrfs_err(fs_info,
"zoned: %s: zone size %llu smaller than supported minimum %u",
- rcu_str_deref(device->name),
+ rcu_dereference(device->name),
zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
ret = -EINVAL;
goto out;
@@ -416,11 +422,14 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
if (!IS_ALIGNED(nr_sectors, zone_sectors))
zone_info->nr_zones++;
- max_active_zones = bdev_max_active_zones(bdev);
+ max_active_zones = min_not_zero(bdev_max_active_zones(bdev),
+ bdev_max_open_zones(bdev));
+ if (!max_active_zones && zone_info->nr_zones > BTRFS_DEFAULT_MAX_ACTIVE_ZONES)
+ max_active_zones = BTRFS_DEFAULT_MAX_ACTIVE_ZONES;
if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
- btrfs_err_in_rcu(fs_info,
+ btrfs_err(fs_info,
"zoned: %s: max active zones %u is too small, need at least %u active zones",
- rcu_str_deref(device->name), max_active_zones,
+ rcu_dereference(device->name), max_active_zones,
BTRFS_MIN_ACTIVE_ZONES);
ret = -EINVAL;
goto out;
@@ -460,9 +469,9 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
zone_info->zone_cache = vcalloc(zone_info->nr_zones,
sizeof(struct blk_zone));
if (!zone_info->zone_cache) {
- btrfs_err_in_rcu(device->fs_info,
+ btrfs_err(device->fs_info,
"zoned: failed to allocate zone cache for %s",
- rcu_str_deref(device->name));
+ rcu_dereference(device->name));
ret = -ENOMEM;
goto out;
}
@@ -487,6 +496,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
case BLK_ZONE_COND_CLOSED:
+ case BLK_ZONE_COND_ACTIVE:
__set_bit(nreported, zone_info->active_zones);
nactive++;
break;
@@ -496,20 +506,25 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
}
- if (nreported != zone_info->nr_zones) {
- btrfs_err_in_rcu(device->fs_info,
+ if (unlikely(nreported != zone_info->nr_zones)) {
+ btrfs_err(device->fs_info,
"inconsistent number of zones on %s (%u/%u)",
- rcu_str_deref(device->name), nreported,
+ rcu_dereference(device->name), nreported,
zone_info->nr_zones);
ret = -EIO;
goto out;
}
if (max_active_zones) {
- if (nactive > max_active_zones) {
- btrfs_err_in_rcu(device->fs_info,
+ if (unlikely(nactive > max_active_zones)) {
+ if (bdev_max_active_zones(bdev) == 0) {
+ max_active_zones = 0;
+ zone_info->max_active_zones = 0;
+ goto validate;
+ }
+ btrfs_err(device->fs_info,
"zoned: %u active zones on %s exceeds max_active_zones %u",
- nactive, rcu_str_deref(device->name),
+ nactive, rcu_dereference(device->name),
max_active_zones);
ret = -EIO;
goto out;
@@ -519,6 +534,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
}
+validate:
/* Validate superblock log */
nr_zones = BTRFS_NR_SB_LOG_ZONES;
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -537,8 +553,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
if (ret)
goto out;
- if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
- btrfs_err_in_rcu(device->fs_info,
+ if (unlikely(nr_zones != BTRFS_NR_SB_LOG_ZONES)) {
+ btrfs_err(device->fs_info,
"zoned: failed to read super block log zone info at devid %llu zone %u",
device->devid, sb_zone);
ret = -EUCLEAN;
@@ -555,8 +571,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
ret = sb_write_pointer(device->bdev,
&zone_info->sb_zones[sb_pos], &sb_wp);
- if (ret != -ENOENT && ret) {
- btrfs_err_in_rcu(device->fs_info,
+ if (unlikely(ret != -ENOENT && ret)) {
+ btrfs_err(device->fs_info,
"zoned: super block log zone corrupted devid %llu zone %u",
device->devid, sb_zone);
ret = -EUCLEAN;
@@ -575,9 +591,9 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
emulated = "emulated ";
}
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"%s block device %s, %u %szones of %llu bytes",
- model, rcu_str_deref(device->name), zone_info->nr_zones,
+ model, rcu_dereference(device->name), zone_info->nr_zones,
emulated, zone_info->zone_size);
return 0;
@@ -748,8 +764,9 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
(u64)lim->max_segments << PAGE_SHIFT),
fs_info->sectorsize);
fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
- if (fs_info->max_zone_append_size < fs_info->max_extent_size)
- fs_info->max_extent_size = fs_info->max_zone_append_size;
+
+ fs_info->max_extent_size = min_not_zero(fs_info->max_extent_size,
+ fs_info->max_zone_append_size);
/*
* Check mount options here, because we might change fs_info->zoned
@@ -882,12 +899,12 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
if (sb_zone + 1 >= nr_zones)
return -ENOENT;
- ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
- BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
- zones);
+ ret = blkdev_report_zones_cached(bdev, zone_start_sector(sb_zone, bdev),
+ BTRFS_NR_SB_LOG_ZONES,
+ copy_zone_info_cb, zones);
if (ret < 0)
return ret;
- if (ret != BTRFS_NR_SB_LOG_ZONES)
+ if (unlikely(ret != BTRFS_NR_SB_LOG_ZONES))
return -EIO;
return sb_log_location(bdev, zones, rw, bytenr_ret);
@@ -988,7 +1005,7 @@ int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
}
/* All the zones are FULL. Should not reach here. */
- ASSERT(0);
+ DEBUG_WARN("unexpected state, all zones full");
return -EIO;
}
@@ -1041,8 +1058,10 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
bool have_sb;
int i;
- ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
- ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
+ ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size),
+ "hole_start=%llu zinfo->zone_size=%llu", hole_start, zinfo->zone_size);
+ ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size),
+ "num_bytes=%llu zinfo->zone_size=%llu", num_bytes, zinfo->zone_size);
while (pos < hole_end) {
begin = pos >> shift;
@@ -1158,8 +1177,10 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
u64 pos;
int ret;
- ASSERT(IS_ALIGNED(start, zinfo->zone_size));
- ASSERT(IS_ALIGNED(size, zinfo->zone_size));
+ ASSERT(IS_ALIGNED(start, zinfo->zone_size),
+ "start=%llu, zinfo->zone_size=%llu", start, zinfo->zone_size);
+ ASSERT(IS_ALIGNED(size, zinfo->zone_size),
+ "size=%llu, zinfo->zone_size=%llu", size, zinfo->zone_size);
if (begin + nbits > zinfo->nr_zones)
return -ERANGE;
@@ -1181,10 +1202,10 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
continue;
/* Free regions should be empty */
- btrfs_warn_in_rcu(
+ btrfs_warn(
device->fs_info,
"zoned: resetting device %s (devid %llu) zone %llu for allocation",
- rcu_str_deref(device->name), device->devid, pos >> shift);
+ rcu_dereference(device->name), device->devid, pos >> shift);
WARN_ON_ONCE(1);
ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
@@ -1239,7 +1260,7 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
root = btrfs_extent_root(fs_info, key.objectid);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
/* We should not find the exact match */
- if (!ret)
+ if (unlikely(!ret))
ret = -EUCLEAN;
if (ret < 0)
return ret;
@@ -1260,8 +1281,8 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
else
length = fs_info->nodesize;
- if (!(found_key.objectid >= cache->start &&
- found_key.objectid + length <= cache->start + cache->length)) {
+ if (unlikely(!(found_key.objectid >= cache->start &&
+ found_key.objectid + length <= cache->start + cache->length))) {
return -EUCLEAN;
}
*offset_ret = found_key.objectid + length - cache->start;
@@ -1276,7 +1297,7 @@ struct zone_info {
static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
struct zone_info *info, unsigned long *active,
- struct btrfs_chunk_map *map)
+ struct btrfs_chunk_map *map, bool new)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
struct btrfs_device *device;
@@ -1303,9 +1324,12 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
if (!btrfs_dev_is_sequential(device, info->physical)) {
up_read(&dev_replace->rwsem);
info->alloc_offset = WP_CONVENTIONAL;
+ info->capacity = device->zone_info->zone_size;
return 0;
}
+ ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));
+
/* This zone will be used for allocation, so mark this zone non-empty. */
btrfs_dev_clear_zone_empty(device, info->physical);
@@ -1318,6 +1342,18 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
* to determine the allocation offset within the zone.
*/
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
+
+ if (new) {
+ sector_t capacity;
+
+ capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
+ up_read(&dev_replace->rwsem);
+ info->alloc_offset = 0;
+ info->capacity = capacity << SECTOR_SHIFT;
+
+ return 0;
+ }
+
nofs_flag = memalloc_nofs_save();
ret = btrfs_get_dev_zone(device, info->physical, &zone);
memalloc_nofs_restore(nofs_flag);
@@ -1329,10 +1365,10 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
return 0;
}
- if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
- btrfs_err_in_rcu(fs_info,
+ if (unlikely(zone.type == BLK_ZONE_TYPE_CONVENTIONAL)) {
+ btrfs_err(fs_info,
"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
- zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
+ zone.start << SECTOR_SHIFT, rcu_dereference(device->name),
device->devid);
up_read(&dev_replace->rwsem);
return -EIO;
@@ -1343,10 +1379,10 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
switch (zone.cond) {
case BLK_ZONE_COND_OFFLINE:
case BLK_ZONE_COND_READONLY:
- btrfs_err_in_rcu(fs_info,
+ btrfs_err(fs_info,
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
(info->physical >> device->zone_info->zone_size_shift),
- rcu_str_deref(device->name), device->devid);
+ rcu_dereference(device->name), device->devid);
info->alloc_offset = WP_MISSING_DEV;
break;
case BLK_ZONE_COND_EMPTY:
@@ -1371,7 +1407,7 @@ static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
struct zone_info *info,
unsigned long *active)
{
- if (info->alloc_offset == WP_MISSING_DEV) {
+ if (unlikely(info->alloc_offset == WP_MISSING_DEV)) {
btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu",
info->physical);
@@ -1388,7 +1424,8 @@ static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
struct btrfs_chunk_map *map,
struct zone_info *zone_info,
- unsigned long *active)
+ unsigned long *active,
+ u64 last_alloc)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
@@ -1399,26 +1436,33 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
- if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
+ if (unlikely(zone_info[0].alloc_offset == WP_MISSING_DEV)) {
btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu",
zone_info[0].physical);
return -EIO;
}
- if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
+ if (unlikely(zone_info[1].alloc_offset == WP_MISSING_DEV)) {
btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu",
zone_info[1].physical);
return -EIO;
}
- if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
+
+ if (zone_info[0].alloc_offset == WP_CONVENTIONAL)
+ zone_info[0].alloc_offset = last_alloc;
+
+ if (zone_info[1].alloc_offset == WP_CONVENTIONAL)
+ zone_info[1].alloc_offset = last_alloc;
+
+ if (unlikely(zone_info[0].alloc_offset != zone_info[1].alloc_offset)) {
btrfs_err(bg->fs_info,
"zoned: write pointer offset mismatch of zones in DUP profile");
return -EIO;
}
if (test_bit(0, active) != test_bit(1, active)) {
- if (!btrfs_zone_activate(bg))
+ if (unlikely(!btrfs_zone_activate(bg)))
return -EIO;
} else if (test_bit(0, active)) {
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
@@ -1431,7 +1475,8 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
struct btrfs_chunk_map *map,
struct zone_info *zone_info,
- unsigned long *active)
+ unsigned long *active,
+ u64 last_alloc)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
int i;
@@ -1446,20 +1491,22 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
for (i = 0; i < map->num_stripes; i++) {
- if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
- zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
continue;
- if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
- !btrfs_test_opt(fs_info, DEGRADED)) {
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ zone_info[i].alloc_offset = last_alloc;
+
+ if (unlikely((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
+ !btrfs_test_opt(fs_info, DEGRADED))) {
btrfs_err(fs_info,
"zoned: write pointer offset mismatch of zones in %s profile",
btrfs_bg_type_to_raid_name(map->type));
return -EIO;
}
if (test_bit(0, active) != test_bit(i, active)) {
- if (!btrfs_test_opt(fs_info, DEGRADED) &&
- !btrfs_zone_activate(bg)) {
+ if (unlikely(!btrfs_test_opt(fs_info, DEGRADED) &&
+ !btrfs_zone_activate(bg))) {
return -EIO;
}
} else {
@@ -1479,9 +1526,12 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
struct btrfs_chunk_map *map,
struct zone_info *zone_info,
- unsigned long *active)
+ unsigned long *active,
+ u64 last_alloc)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
+ u64 stripe_nr = 0, stripe_offset = 0;
+ u32 stripe_index = 0;
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
@@ -1489,13 +1539,30 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
return -EINVAL;
}
+ if (last_alloc) {
+ u32 factor = map->num_stripes;
+
+ stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
+ stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
+ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
+ }
+
for (int i = 0; i < map->num_stripes; i++) {
- if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
- zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
continue;
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+
+ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
+
+ if (stripe_index > i)
+ zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
+ else if (stripe_index == i)
+ zone_info[i].alloc_offset += stripe_offset;
+ }
+
if (test_bit(0, active) != test_bit(i, active)) {
- if (!btrfs_zone_activate(bg))
+ if (unlikely(!btrfs_zone_activate(bg)))
return -EIO;
} else {
if (test_bit(0, active))
@@ -1511,9 +1578,12 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
struct btrfs_chunk_map *map,
struct zone_info *zone_info,
- unsigned long *active)
+ unsigned long *active,
+ u64 last_alloc)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
+ u64 stripe_nr = 0, stripe_offset = 0;
+ u32 stripe_index = 0;
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
@@ -1521,19 +1591,35 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
return -EINVAL;
}
+ if (last_alloc) {
+ u32 factor = map->num_stripes / map->sub_stripes;
+
+ stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
+ stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
+ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
+ }
+
for (int i = 0; i < map->num_stripes; i++) {
- if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
- zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
continue;
if (test_bit(0, active) != test_bit(i, active)) {
- if (!btrfs_zone_activate(bg))
+ if (unlikely(!btrfs_zone_activate(bg)))
return -EIO;
} else {
if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
}
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
+
+ if (stripe_index > (i / map->sub_stripes))
+ zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
+ else if (stripe_index == (i / map->sub_stripes))
+ zone_info[i].alloc_offset += stripe_offset;
+ }
+
if ((i % map->sub_stripes) == 0) {
bg->zone_capacity += zone_info[i].capacity;
bg->alloc_offset += zone_info[i].alloc_offset;
@@ -1549,7 +1635,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
struct btrfs_chunk_map *map;
u64 logical = cache->start;
u64 length = cache->length;
- struct zone_info *zone_info = NULL;
+ struct zone_info AUTO_KFREE(zone_info);
int ret;
int i;
unsigned long *active = NULL;
@@ -1561,7 +1647,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
return 0;
/* Sanity check */
- if (!IS_ALIGNED(length, fs_info->zone_size)) {
+ if (unlikely(!IS_ALIGNED(length, fs_info->zone_size))) {
btrfs_err(fs_info,
"zoned: block group %llu len %llu unaligned to zone size %llu",
logical, length, fs_info->zone_size);
@@ -1587,7 +1673,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
for (i = 0; i < map->num_stripes; i++) {
- ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
+ ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
if (ret)
goto out;
@@ -1601,8 +1687,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
if (num_conventional > 0) {
- /* Zone capacity is always zone size in emulation */
- cache->zone_capacity = cache->length;
ret = calculate_alloc_pointer(cache, &last_alloc, new);
if (ret) {
btrfs_err(fs_info,
@@ -1611,6 +1695,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out;
} else if (map->num_stripes == num_conventional) {
cache->alloc_offset = last_alloc;
+ cache->zone_capacity = cache->length;
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
goto out;
}
@@ -1622,18 +1707,22 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
break;
case BTRFS_BLOCK_GROUP_DUP:
- ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
+ ret = btrfs_load_block_group_dup(cache, map, zone_info, active,
+ last_alloc);
break;
case BTRFS_BLOCK_GROUP_RAID1:
case BTRFS_BLOCK_GROUP_RAID1C3:
case BTRFS_BLOCK_GROUP_RAID1C4:
- ret = btrfs_load_block_group_raid1(cache, map, zone_info, active);
+ ret = btrfs_load_block_group_raid1(cache, map, zone_info,
+ active, last_alloc);
break;
case BTRFS_BLOCK_GROUP_RAID0:
- ret = btrfs_load_block_group_raid0(cache, map, zone_info, active);
+ ret = btrfs_load_block_group_raid0(cache, map, zone_info,
+ active, last_alloc);
break;
case BTRFS_BLOCK_GROUP_RAID10:
- ret = btrfs_load_block_group_raid10(cache, map, zone_info, active);
+ ret = btrfs_load_block_group_raid10(cache, map, zone_info,
+ active, last_alloc);
break;
case BTRFS_BLOCK_GROUP_RAID5:
case BTRFS_BLOCK_GROUP_RAID6:
@@ -1658,7 +1747,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
* stripe.
*/
cache->alloc_offset = cache->zone_capacity;
- ret = 0;
}
out:
@@ -1668,10 +1756,10 @@ out:
!fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type));
- return -EINVAL;
+ ret = -EINVAL;
}
- if (cache->alloc_offset > cache->zone_capacity) {
+ if (unlikely(cache->alloc_offset > cache->zone_capacity)) {
btrfs_err(fs_info,
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
cache->alloc_offset, cache->zone_capacity,
@@ -1701,7 +1789,6 @@ out:
cache->physical_map = NULL;
}
bitmap_free(active);
- kfree(zone_info);
return ret;
}
@@ -1728,14 +1815,14 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio)
{
u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
struct btrfs_inode *inode = bbio->inode;
- struct btrfs_fs_info *fs_info = bbio->fs_info;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_block_group *cache;
bool ret = false;
if (!btrfs_is_zoned(fs_info))
return false;
- if (!inode || !is_data_inode(inode))
+ if (!is_data_inode(inode))
return false;
if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
@@ -1783,12 +1870,12 @@ static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
ordered->disk_bytenr = logical;
write_lock(&em_tree->lock);
- em = search_extent_mapping(em_tree, ordered->file_offset,
- ordered->num_bytes);
+ em = btrfs_search_extent_mapping(em_tree, ordered->file_offset,
+ ordered->num_bytes);
/* The em should be a new COW extent, thus it should not have an offset. */
- ASSERT(em->offset == 0);
+ ASSERT(em->offset == 0, "em->offset=%llu", em->offset);
em->disk_bytenr = logical;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
write_unlock(&em_tree->lock);
}
@@ -1798,8 +1885,8 @@ static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
struct btrfs_ordered_extent *new;
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
- split_extent_map(ordered->inode, ordered->file_offset,
- ordered->num_bytes, len, logical))
+ btrfs_split_extent_map(ordered->inode, ordered->file_offset,
+ ordered->num_bytes, len, logical))
return false;
new = btrfs_split_ordered_extent(ordered, len);
@@ -2002,7 +2089,7 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&mapped_length, &bioc, NULL, NULL);
- if (ret || !bioc || mapped_length < PAGE_SIZE) {
+ if (unlikely(ret || !bioc || mapped_length < PAGE_SIZE)) {
ret = -EIO;
goto out_put_bioc;
}
@@ -2060,7 +2147,7 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
if (physical_pos == wp)
return 0;
- if (physical_pos > wp)
+ if (unlikely(physical_pos > wp))
return -EUCLEAN;
length = wp - physical_pos;
@@ -2096,10 +2183,15 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
goto out_unlock;
}
- /* No space left */
- if (btrfs_zoned_bg_is_full(block_group)) {
- ret = false;
- goto out_unlock;
+ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) {
+ /* The caller should check if the block group is full. */
+ if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) {
+ ret = false;
+ goto out_unlock;
+ }
+ } else {
+ /* Since it is already written, it should have been active. */
+ WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start);
}
for (i = 0; i < map->num_stripes; i++) {
@@ -2110,6 +2202,9 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
physical = map->stripes[i].physical;
zinfo = device->zone_info;
+ if (!device->bdev)
+ continue;
+
if (zinfo->max_active_zones == 0)
continue;
@@ -2154,27 +2249,15 @@ static void wait_eb_writebacks(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
const u64 end = block_group->start + block_group->length;
- struct radix_tree_iter iter;
struct extent_buffer *eb;
- void __rcu **slot;
+ unsigned long index, start = (block_group->start >> fs_info->nodesize_bits);
rcu_read_lock();
- radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
- block_group->start >> fs_info->sectorsize_bits) {
- eb = radix_tree_deref_slot(slot);
- if (!eb)
- continue;
- if (radix_tree_deref_retry(eb)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
-
+ xa_for_each_start(&fs_info->buffer_tree, index, eb, start) {
if (eb->start < block_group->start)
continue;
if (eb->start >= end)
break;
-
- slot = radix_tree_iter_resume(slot, &iter);
rcu_read_unlock();
wait_on_extent_buffer_writeback(eb);
rcu_read_lock();
@@ -2182,6 +2265,40 @@ static void wait_eb_writebacks(struct btrfs_block_group *block_group)
rcu_read_unlock();
}
+static int call_zone_finish(struct btrfs_block_group *block_group,
+ struct btrfs_io_stripe *stripe)
+{
+ struct btrfs_device *device = stripe->dev;
+ const u64 physical = stripe->physical;
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ int ret;
+
+ if (!device->bdev)
+ return 0;
+
+ if (zinfo->max_active_zones == 0)
+ return 0;
+
+ if (btrfs_dev_is_sequential(device, physical)) {
+ unsigned int nofs_flags;
+
+ nofs_flags = memalloc_nofs_save();
+ ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
+ physical >> SECTOR_SHIFT,
+ zinfo->zone_size >> SECTOR_SHIFT);
+ memalloc_nofs_restore(nofs_flags);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+ zinfo->reserved_active_zones++;
+ btrfs_dev_clear_active_zone(device, physical);
+
+ return 0;
+}
+
static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -2266,28 +2383,12 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
down_read(&dev_replace->rwsem);
map = block_group->physical_map;
for (i = 0; i < map->num_stripes; i++) {
- struct btrfs_device *device = map->stripes[i].dev;
- const u64 physical = map->stripes[i].physical;
- struct btrfs_zoned_device_info *zinfo = device->zone_info;
- unsigned int nofs_flags;
-
- if (zinfo->max_active_zones == 0)
- continue;
-
- nofs_flags = memalloc_nofs_save();
- ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
- physical >> SECTOR_SHIFT,
- zinfo->zone_size >> SECTOR_SHIFT);
- memalloc_nofs_restore(nofs_flags);
+ ret = call_zone_finish(block_group, &map->stripes[i]);
if (ret) {
up_read(&dev_replace->rwsem);
return ret;
}
-
- if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
- zinfo->reserved_active_zones++;
- btrfs_dev_clear_active_zone(device, physical);
}
up_read(&dev_replace->rwsem);
@@ -2324,6 +2425,9 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
if (!btrfs_is_zoned(fs_info))
return true;
+ if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
+ return false;
+
/* Check if there is a device with active zones left */
mutex_lock(&fs_info->chunk_mutex);
spin_lock(&fs_info->zone_active_bgs_lock);
@@ -2362,16 +2466,17 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
return ret;
}
-void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
+int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
{
struct btrfs_block_group *block_group;
u64 min_alloc_bytes;
if (!btrfs_is_zoned(fs_info))
- return;
+ return 0;
block_group = btrfs_lookup_block_group(fs_info, logical);
- ASSERT(block_group);
+ if (WARN_ON_ONCE(!block_group))
+ return -ENOENT;
/* No MIXED_BG on zoned btrfs. */
if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
@@ -2388,16 +2493,21 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len
out:
btrfs_put_block_group(block_group);
+ return 0;
}
static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
{
+ int ret;
struct btrfs_block_group *bg =
container_of(work, struct btrfs_block_group, zone_finish_work);
wait_on_extent_buffer_writeback(bg->last_eb);
free_extent_buffer(bg->last_eb);
- btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
+ ret = do_zone_finish(bg, true);
+ if (ret)
+ btrfs_handle_fs_error(bg->fs_info, ret,
+ "Failed to finish block-group's zone");
btrfs_put_block_group(bg);
}
@@ -2416,10 +2526,10 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
/* For the work */
btrfs_get_block_group(bg);
- atomic_inc(&eb->refs);
+ refcount_inc(&eb->refs);
bg->last_eb = eb;
INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
- queue_work(system_unbound_wq, &bg->zone_finish_work);
+ queue_work(system_dfl_wq, &bg->zone_finish_work);
}
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
@@ -2432,6 +2542,106 @@ void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
spin_unlock(&fs_info->relocation_bg_lock);
}
+void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
+ struct btrfs_space_info *space_info = data_sinfo;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_block_group *bg;
+ struct list_head *bg_list;
+ u64 alloc_flags;
+ bool first = true;
+ bool did_chunk_alloc = false;
+ int index;
+ int ret;
+
+ if (!btrfs_is_zoned(fs_info))
+ return;
+
+ if (fs_info->data_reloc_bg)
+ return;
+
+ if (sb_rdonly(fs_info->sb))
+ return;
+
+ alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags);
+ index = btrfs_bg_flags_to_raid_index(alloc_flags);
+
+ /* Scan the data space_info to find empty block groups. Take the second one. */
+again:
+ bg_list = &space_info->block_groups[index];
+ list_for_each_entry(bg, bg_list, list) {
+ if (bg->alloc_offset != 0)
+ continue;
+
+ if (first) {
+ first = false;
+ continue;
+ }
+
+ if (space_info == data_sinfo) {
+ /* Migrate the block group to the data relocation space_info. */
+ struct btrfs_space_info *reloc_sinfo = data_sinfo->sub_group[0];
+ int factor;
+
+ ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
+ "reloc_sinfo->subgroup_id=%d", reloc_sinfo->subgroup_id);
+ factor = btrfs_bg_type_to_factor(bg->flags);
+
+ down_write(&space_info->groups_sem);
+ list_del_init(&bg->list);
+ /* We can assume this as we choose the second empty one. */
+ ASSERT(!list_empty(&space_info->block_groups[index]));
+ up_write(&space_info->groups_sem);
+
+ spin_lock(&space_info->lock);
+ space_info->total_bytes -= bg->length;
+ space_info->disk_total -= bg->length * factor;
+ space_info->disk_total -= bg->zone_unusable;
+ /* There is no allocation ever happened. */
+ ASSERT(bg->used == 0, "bg->used=%llu", bg->used);
+ /* No super block in a block group on the zoned setup. */
+ ASSERT(bg->bytes_super == 0, "bg->bytes_super=%llu", bg->bytes_super);
+ spin_unlock(&space_info->lock);
+
+ bg->space_info = reloc_sinfo;
+ if (reloc_sinfo->block_group_kobjs[index] == NULL)
+ btrfs_sysfs_add_block_group_type(bg);
+
+ btrfs_add_bg_to_space_info(fs_info, bg);
+ }
+
+ fs_info->data_reloc_bg = bg->start;
+ set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &bg->runtime_flags);
+ btrfs_zone_activate(bg);
+
+ return;
+ }
+
+ if (did_chunk_alloc)
+ return;
+
+ trans = btrfs_join_transaction(fs_info->tree_root);
+ if (IS_ERR(trans))
+ return;
+
+ /* Allocate new BG in the data relocation space_info. */
+ space_info = data_sinfo->sub_group[0];
+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
+ "space_info->subgroup_id=%d", space_info->subgroup_id);
+ ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
+ btrfs_end_transaction(trans);
+ if (ret == 1) {
+ /*
+ * We allocated a new block group in the data relocation space_info. We
+ * can take that one.
+ */
+ first = false;
+ did_chunk_alloc = true;
+ goto again;
+ }
+}
+
void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@@ -2454,8 +2664,8 @@ bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
+ u64 total = btrfs_super_total_bytes(fs_info->super_copy);
u64 used = 0;
- u64 total = 0;
u64 factor;
ASSERT(btrfs_is_zoned(fs_info));
@@ -2468,7 +2678,6 @@ bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
if (!device->bdev)
continue;
- total += device->disk_total_bytes;
used += device->bytes_used;
}
mutex_unlock(&fs_devices->device_list_mutex);
@@ -2522,7 +2731,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
spin_lock(&block_group->lock);
if (block_group->reserved || block_group->alloc_offset == 0 ||
- (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
+ !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) ||
test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
continue;
@@ -2549,10 +2758,9 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
return ret < 0 ? ret : 1;
}
-int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- bool do_finish)
+int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info, bool do_finish)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct btrfs_block_group *bg;
int index;
@@ -2651,3 +2859,128 @@ void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
}
spin_unlock(&fs_info->zone_active_bgs_lock);
}
+
+/*
+ * Reset the zones of unused block groups from @space_info->bytes_zone_unusable.
+ *
+ * @space_info: the space to work on
+ * @num_bytes: targeting reclaim bytes
+ *
+ * This one resets the zones of a block group, so we can reuse the region
+ * without removing the block group. On the other hand, btrfs_delete_unused_bgs()
+ * just removes a block group and frees up the underlying zones. So, we still
+ * need to allocate a new block group to reuse the zones.
+ *
+ * Resetting is faster than deleting/recreating a block group. It is similar
+ * to freeing the logical space on the regular mode. However, we cannot change
+ * the block group's profile with this operation.
+ */
+int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ const sector_t zone_size_sectors = fs_info->zone_size >> SECTOR_SHIFT;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ while (num_bytes > 0) {
+ struct btrfs_chunk_map *map;
+ struct btrfs_block_group *bg = NULL;
+ bool found = false;
+ u64 reclaimed = 0;
+
+ /*
+ * Here, we choose a fully zone_unusable block group. It's
+ * technically possible to reset a partly zone_unusable block
+ * group, which still has some free space left. However,
+ * handling that needs to cope with the allocation side, which
+ * makes the logic more complex. So, let's handle the easy case
+ * for now.
+ */
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_for_each_entry(bg, &fs_info->unused_bgs, bg_list) {
+ if ((bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != space_info->flags)
+ continue;
+
+ /*
+ * Use trylock to avoid locking order violation. In
+ * btrfs_reclaim_bgs_work(), the lock order is
+ * &bg->lock -> &fs_info->unused_bgs_lock. We skip a
+ * block group if we cannot take its lock.
+ */
+ if (!spin_trylock(&bg->lock))
+ continue;
+ if (btrfs_is_block_group_used(bg) || bg->zone_unusable < bg->length) {
+ spin_unlock(&bg->lock);
+ continue;
+ }
+ spin_unlock(&bg->lock);
+ found = true;
+ break;
+ }
+ if (!found) {
+ spin_unlock(&fs_info->unused_bgs_lock);
+ return 0;
+ }
+
+ list_del_init(&bg->bg_list);
+ btrfs_put_block_group(bg);
+ spin_unlock(&fs_info->unused_bgs_lock);
+
+ /*
+ * Since the block group is fully zone_unusable and we cannot
+ * allocate from this block group anymore, we don't need to set
+ * this block group read-only.
+ */
+
+ down_read(&fs_info->dev_replace.rwsem);
+ map = bg->physical_map;
+ for (int i = 0; i < map->num_stripes; i++) {
+ struct btrfs_io_stripe *stripe = &map->stripes[i];
+ unsigned int nofs_flags;
+ int ret;
+
+ nofs_flags = memalloc_nofs_save();
+ ret = blkdev_zone_mgmt(stripe->dev->bdev, REQ_OP_ZONE_RESET,
+ stripe->physical >> SECTOR_SHIFT,
+ zone_size_sectors);
+ memalloc_nofs_restore(nofs_flags);
+
+ if (ret) {
+ up_read(&fs_info->dev_replace.rwsem);
+ return ret;
+ }
+ }
+ up_read(&fs_info->dev_replace.rwsem);
+
+ spin_lock(&space_info->lock);
+ spin_lock(&bg->lock);
+ ASSERT(!btrfs_is_block_group_used(bg));
+ if (bg->ro) {
+ spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
+ continue;
+ }
+
+ reclaimed = bg->alloc_offset;
+ bg->zone_unusable = bg->length - bg->zone_capacity;
+ bg->alloc_offset = 0;
+ /*
+ * This holds because we currently reset fully used then freed
+ * block group.
+ */
+ ASSERT(reclaimed == bg->zone_capacity,
+ "reclaimed=%llu bg->zone_capacity=%llu", reclaimed, bg->zone_capacity);
+ bg->free_space_ctl->free_space += reclaimed;
+ space_info->bytes_zone_unusable -= reclaimed;
+ spin_unlock(&bg->lock);
+ btrfs_return_free_space(space_info, reclaimed);
+ spin_unlock(&space_info->lock);
+
+ if (num_bytes <= reclaimed)
+ break;
+ num_bytes -= reclaimed;
+ }
+
+ return 0;
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 7612e6572605..5cefdeb08b7b 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -15,7 +15,6 @@
#include "disk-io.h"
#include "block-group.h"
#include "btrfs_inode.h"
-#include "fs.h"
struct block_device;
struct extent_buffer;
@@ -83,19 +82,20 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
bool btrfs_zone_activate(struct btrfs_block_group *block_group);
int btrfs_zone_finish(struct btrfs_block_group *block_group);
bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags);
-void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical,
+int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical,
u64 length);
void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
struct extent_buffer *eb);
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
+void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info);
void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info);
void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
u64 length);
int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
-int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, bool do_finish);
+int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info, bool do_finish);
void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
+int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
@@ -232,14 +232,19 @@ static inline bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices,
return true;
}
-static inline void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length) { }
+static inline int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
+{
+ return 0;
+}
static inline void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
struct extent_buffer *eb) { }
static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
+static inline void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info) { }
+
static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
static inline bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
@@ -255,8 +260,7 @@ static inline int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
return 1;
}
-static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static inline int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info,
bool do_finish)
{
/* Consider all the block groups are active */
@@ -265,6 +269,12 @@ static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
+static inline int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info,
+ u64 num_bytes)
+{
+ return 0;
+}
+
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 5232b56d5892..c9cddcfa337b 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -24,13 +24,14 @@
#include "super.h"
#define ZSTD_BTRFS_MAX_WINDOWLOG 17
-#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
+#define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG)
#define ZSTD_BTRFS_DEFAULT_LEVEL 3
+#define ZSTD_BTRFS_MIN_LEVEL -15
#define ZSTD_BTRFS_MAX_LEVEL 15
/* 307s to avoid pathologically clashing with transaction commit */
#define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
-static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
+static zstd_parameters zstd_get_btrfs_parameters(int level,
size_t src_len)
{
zstd_parameters params = zstd_get_params(level, src_len);
@@ -45,13 +46,14 @@ struct workspace {
void *mem;
size_t size;
char *buf;
- unsigned int level;
- unsigned int req_level;
+ int level;
+ int req_level;
unsigned long last_used; /* jiffies */
struct list_head list;
struct list_head lru_list;
zstd_in_buffer in_buf;
zstd_out_buffer out_buf;
+ zstd_parameters params;
};
/*
@@ -75,7 +77,6 @@ struct workspace {
*/
struct zstd_workspace_manager {
- const struct btrfs_compress_op *ops;
spinlock_t lock;
struct list_head lru_list;
struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
@@ -84,8 +85,6 @@ struct zstd_workspace_manager {
struct timer_list timer;
};
-static struct zstd_workspace_manager wsm;
-
static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
static inline struct workspace *list_to_workspace(struct list_head *list)
@@ -93,8 +92,10 @@ static inline struct workspace *list_to_workspace(struct list_head *list)
return container_of(list, struct workspace, list);
}
-void zstd_free_workspace(struct list_head *ws);
-struct list_head *zstd_alloc_workspace(unsigned int level);
+static inline int clip_level(int level)
+{
+ return max(0, level - 1);
+}
/*
* Timer callback to free unused workspaces.
@@ -108,22 +109,22 @@ struct list_head *zstd_alloc_workspace(unsigned int level);
*/
static void zstd_reclaim_timer_fn(struct timer_list *timer)
{
+ struct zstd_workspace_manager *zwsm =
+ container_of(timer, struct zstd_workspace_manager, timer);
unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
struct list_head *pos, *next;
- ASSERT(timer == &wsm.timer);
-
- spin_lock(&wsm.lock);
+ spin_lock(&zwsm->lock);
- if (list_empty(&wsm.lru_list)) {
- spin_unlock(&wsm.lock);
+ if (list_empty(&zwsm->lru_list)) {
+ spin_unlock(&zwsm->lock);
return;
}
- list_for_each_prev_safe(pos, next, &wsm.lru_list) {
+ list_for_each_prev_safe(pos, next, &zwsm->lru_list) {
struct workspace *victim = container_of(pos, struct workspace,
lru_list);
- unsigned int level;
+ int level;
if (time_after(victim->last_used, reclaim_threshold))
break;
@@ -137,15 +138,15 @@ static void zstd_reclaim_timer_fn(struct timer_list *timer)
list_del(&victim->list);
zstd_free_workspace(&victim->list);
- if (list_empty(&wsm.idle_ws[level - 1]))
- clear_bit(level - 1, &wsm.active_map);
+ if (list_empty(&zwsm->idle_ws[level]))
+ clear_bit(level, &zwsm->active_map);
}
- if (!list_empty(&wsm.lru_list))
- mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
+ if (!list_empty(&zwsm->lru_list))
+ mod_timer(&zwsm->timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
- spin_unlock(&wsm.lock);
+ spin_unlock(&zwsm->lock);
}
/*
@@ -160,9 +161,11 @@ static void zstd_reclaim_timer_fn(struct timer_list *timer)
static void zstd_calc_ws_mem_sizes(void)
{
size_t max_size = 0;
- unsigned int level;
+ int level;
- for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
+ for (level = ZSTD_BTRFS_MIN_LEVEL; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
+ if (level == 0)
+ continue;
zstd_parameters params =
zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
size_t level_size =
@@ -171,54 +174,61 @@ static void zstd_calc_ws_mem_sizes(void)
zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
max_size = max_t(size_t, max_size, level_size);
- zstd_ws_mem_sizes[level - 1] = max_size;
+ /* Use level 1 workspace size for all the fast mode negative levels. */
+ zstd_ws_mem_sizes[clip_level(level)] = max_size;
}
}
-void zstd_init_workspace_manager(void)
+int zstd_alloc_workspace_manager(struct btrfs_fs_info *fs_info)
{
+ struct zstd_workspace_manager *zwsm;
struct list_head *ws;
- int i;
+ ASSERT(fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] == NULL);
+ zwsm = kzalloc(sizeof(*zwsm), GFP_KERNEL);
+ if (!zwsm)
+ return -ENOMEM;
zstd_calc_ws_mem_sizes();
+ spin_lock_init(&zwsm->lock);
+ init_waitqueue_head(&zwsm->wait);
+ timer_setup(&zwsm->timer, zstd_reclaim_timer_fn, 0);
- wsm.ops = &btrfs_zstd_compress;
- spin_lock_init(&wsm.lock);
- init_waitqueue_head(&wsm.wait);
- timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
-
- INIT_LIST_HEAD(&wsm.lru_list);
- for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
- INIT_LIST_HEAD(&wsm.idle_ws[i]);
+ INIT_LIST_HEAD(&zwsm->lru_list);
+ for (int i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
+ INIT_LIST_HEAD(&zwsm->idle_ws[i]);
+ fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] = zwsm;
- ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
+ ws = zstd_alloc_workspace(fs_info, ZSTD_BTRFS_MAX_LEVEL);
if (IS_ERR(ws)) {
- pr_warn(
- "BTRFS: cannot preallocate zstd compression workspace\n");
+ btrfs_warn(NULL, "cannot preallocate zstd compression workspace");
} else {
- set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
- list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
+ set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &zwsm->active_map);
+ list_add(ws, &zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
}
+ return 0;
}
-void zstd_cleanup_workspace_manager(void)
+void zstd_free_workspace_manager(struct btrfs_fs_info *fs_info)
{
+ struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
struct workspace *workspace;
- int i;
- spin_lock_bh(&wsm.lock);
- for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
- while (!list_empty(&wsm.idle_ws[i])) {
- workspace = container_of(wsm.idle_ws[i].next,
+ if (!zwsm)
+ return;
+ fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] = NULL;
+ spin_lock_bh(&zwsm->lock);
+ for (int i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
+ while (!list_empty(&zwsm->idle_ws[i])) {
+ workspace = container_of(zwsm->idle_ws[i].next,
struct workspace, list);
list_del(&workspace->list);
list_del(&workspace->lru_list);
zstd_free_workspace(&workspace->list);
}
}
- spin_unlock_bh(&wsm.lock);
-
- del_timer_sync(&wsm.timer);
+ spin_unlock_bh(&zwsm->lock);
+ timer_delete_sync(&zwsm->timer);
+ kfree(zwsm);
}
/*
@@ -233,29 +243,31 @@ void zstd_cleanup_workspace_manager(void)
* offer the opportunity to reclaim the workspace in favor of allocating an
* appropriately sized one in the future.
*/
-static struct list_head *zstd_find_workspace(unsigned int level)
+static struct list_head *zstd_find_workspace(struct btrfs_fs_info *fs_info, int level)
{
+ struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
struct list_head *ws;
struct workspace *workspace;
- int i = level - 1;
+ int i = clip_level(level);
- spin_lock_bh(&wsm.lock);
- for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
- if (!list_empty(&wsm.idle_ws[i])) {
- ws = wsm.idle_ws[i].next;
+ ASSERT(zwsm);
+ spin_lock_bh(&zwsm->lock);
+ for_each_set_bit_from(i, &zwsm->active_map, ZSTD_BTRFS_MAX_LEVEL) {
+ if (!list_empty(&zwsm->idle_ws[i])) {
+ ws = zwsm->idle_ws[i].next;
workspace = list_to_workspace(ws);
list_del_init(ws);
/* keep its place if it's a lower level using this */
workspace->req_level = level;
- if (level == workspace->level)
+ if (clip_level(level) == workspace->level)
list_del(&workspace->lru_list);
- if (list_empty(&wsm.idle_ws[i]))
- clear_bit(i, &wsm.active_map);
- spin_unlock_bh(&wsm.lock);
+ if (list_empty(&zwsm->idle_ws[i]))
+ clear_bit(i, &zwsm->active_map);
+ spin_unlock_bh(&zwsm->lock);
return ws;
}
}
- spin_unlock_bh(&wsm.lock);
+ spin_unlock_bh(&zwsm->lock);
return NULL;
}
@@ -270,30 +282,33 @@ static struct list_head *zstd_find_workspace(unsigned int level)
* attempt to allocate a new workspace. If we fail to allocate one due to
* memory pressure, go to sleep waiting for the max level workspace to free up.
*/
-struct list_head *zstd_get_workspace(unsigned int level)
+struct list_head *zstd_get_workspace(struct btrfs_fs_info *fs_info, int level)
{
+ struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
struct list_head *ws;
unsigned int nofs_flag;
+ ASSERT(zwsm);
+
/* level == 0 means we can use any workspace */
if (!level)
level = 1;
again:
- ws = zstd_find_workspace(level);
+ ws = zstd_find_workspace(fs_info, level);
if (ws)
return ws;
nofs_flag = memalloc_nofs_save();
- ws = zstd_alloc_workspace(level);
+ ws = zstd_alloc_workspace(fs_info, level);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(ws)) {
DEFINE_WAIT(wait);
- prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&zwsm->wait, &wait, TASK_UNINTERRUPTIBLE);
schedule();
- finish_wait(&wsm.wait, &wait);
+ finish_wait(&zwsm->wait, &wait);
goto again;
}
@@ -312,34 +327,36 @@ again:
* isn't set, it is also set here. Only the max level workspace tries and wakes
* up waiting workspaces.
*/
-void zstd_put_workspace(struct list_head *ws)
+void zstd_put_workspace(struct btrfs_fs_info *fs_info, struct list_head *ws)
{
+ struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
struct workspace *workspace = list_to_workspace(ws);
- spin_lock_bh(&wsm.lock);
+ ASSERT(zwsm);
+ spin_lock_bh(&zwsm->lock);
/* A node is only taken off the lru if we are the corresponding level */
- if (workspace->req_level == workspace->level) {
+ if (clip_level(workspace->req_level) == workspace->level) {
/* Hide a max level workspace from reclaim */
- if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
+ if (list_empty(&zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
INIT_LIST_HEAD(&workspace->lru_list);
} else {
workspace->last_used = jiffies;
- list_add(&workspace->lru_list, &wsm.lru_list);
- if (!timer_pending(&wsm.timer))
- mod_timer(&wsm.timer,
+ list_add(&workspace->lru_list, &zwsm->lru_list);
+ if (!timer_pending(&zwsm->timer))
+ mod_timer(&zwsm->timer,
jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
}
}
- set_bit(workspace->level - 1, &wsm.active_map);
- list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]);
+ set_bit(workspace->level, &zwsm->active_map);
+ list_add(&workspace->list, &zwsm->idle_ws[workspace->level]);
workspace->req_level = 0;
- spin_unlock_bh(&wsm.lock);
+ spin_unlock_bh(&zwsm->lock);
- if (workspace->level == ZSTD_BTRFS_MAX_LEVEL)
- cond_wake_up(&wsm.wait);
+ if (workspace->level == clip_level(ZSTD_BTRFS_MAX_LEVEL))
+ cond_wake_up(&zwsm->wait);
}
void zstd_free_workspace(struct list_head *ws)
@@ -351,20 +368,22 @@ void zstd_free_workspace(struct list_head *ws)
kfree(workspace);
}
-struct list_head *zstd_alloc_workspace(unsigned int level)
+struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level)
{
+ const u32 blocksize = fs_info->sectorsize;
struct workspace *workspace;
workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
if (!workspace)
return ERR_PTR(-ENOMEM);
- workspace->size = zstd_ws_mem_sizes[level - 1];
- workspace->level = level;
+ /* Use level 1 workspace size for all the fast mode negative levels. */
+ workspace->size = zstd_ws_mem_sizes[clip_level(level)];
+ workspace->level = clip_level(level);
workspace->req_level = level;
workspace->last_used = jiffies;
workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN);
- workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ workspace->buf = kmalloc(blocksize, GFP_KERNEL);
if (!workspace->mem || !workspace->buf)
goto fail;
@@ -377,11 +396,13 @@ fail:
return ERR_PTR(-ENOMEM);
}
-int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
+int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
zstd_cstream *stream;
int ret = 0;
int nr_folios = 0;
@@ -392,23 +413,21 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
unsigned long len = *total_out;
const unsigned long nr_dest_folios = *out_folios;
const u64 orig_end = start + len;
- unsigned long max_out = nr_dest_folios * PAGE_SIZE;
- unsigned int pg_off;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
+ unsigned long max_out = nr_dest_folios * min_folio_size;
unsigned int cur_len;
- zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
- len);
+ workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len);
*out_folios = 0;
*total_out = 0;
*total_in = 0;
/* Initialize the stream */
- stream = zstd_init_cstream(&params, len, workspace->mem,
+ stream = zstd_init_cstream(&workspace->params, len, workspace->mem,
workspace->size);
if (unlikely(!stream)) {
- struct btrfs_inode *inode = BTRFS_I(mapping->host);
-
- btrfs_err(inode->root->fs_info,
+ btrfs_err(fs_info,
"zstd compression init level %d failed, root %llu inode %llu offset %llu",
workspace->req_level, btrfs_root_id(inode->root),
btrfs_ino(inode), start);
@@ -420,14 +439,13 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
if (ret < 0)
goto out;
- pg_off = offset_in_page(start);
- cur_len = btrfs_calc_input_length(orig_end, start);
- workspace->in_buf.src = kmap_local_folio(in_folio, pg_off);
+ cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
+ workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start));
workspace->in_buf.pos = 0;
workspace->in_buf.size = cur_len;
/* Allocate and map in the output buffer */
- out_folio = btrfs_alloc_compr_folio();
+ out_folio = btrfs_alloc_compr_folio(fs_info);
if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
@@ -435,7 +453,7 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
folios[nr_folios++] = out_folio;
workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
while (1) {
size_t ret2;
@@ -443,9 +461,7 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
ret2 = zstd_compress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
if (unlikely(zstd_is_error(ret2))) {
- struct btrfs_inode *inode = BTRFS_I(mapping->host);
-
- btrfs_warn(inode->root->fs_info,
+ btrfs_warn(fs_info,
"zstd compression level %d failed, error %d root %llu inode %llu offset %llu",
workspace->req_level, zstd_get_error_code(ret2),
btrfs_root_id(inode->root), btrfs_ino(inode),
@@ -455,7 +471,7 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
}
/* Check to see if we are making it bigger */
- if (tot_in + workspace->in_buf.pos > 8192 &&
+ if (tot_in + workspace->in_buf.pos > blocksize * 2 &&
tot_in + workspace->in_buf.pos <
tot_out + workspace->out_buf.pos) {
ret = -E2BIG;
@@ -471,13 +487,13 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
/* Check if we need more output space */
if (workspace->out_buf.pos == workspace->out_buf.size) {
- tot_out += PAGE_SIZE;
- max_out -= PAGE_SIZE;
+ tot_out += min_folio_size;
+ max_out -= min_folio_size;
if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_folio = btrfs_alloc_compr_folio();
+ out_folio = btrfs_alloc_compr_folio(fs_info);
if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
@@ -485,8 +501,7 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
folios[nr_folios++] = out_folio;
workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out,
- PAGE_SIZE);
+ workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
}
/* We've reached the end of the input */
@@ -506,9 +521,9 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
if (ret < 0)
goto out;
- pg_off = offset_in_page(start);
- cur_len = btrfs_calc_input_length(orig_end, start);
- workspace->in_buf.src = kmap_local_folio(in_folio, pg_off);
+ cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
+ workspace->in_buf.src = kmap_local_folio(in_folio,
+ offset_in_folio(in_folio, start));
workspace->in_buf.pos = 0;
workspace->in_buf.size = cur_len;
}
@@ -518,9 +533,7 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
ret2 = zstd_end_stream(stream, &workspace->out_buf);
if (unlikely(zstd_is_error(ret2))) {
- struct btrfs_inode *inode = BTRFS_I(mapping->host);
-
- btrfs_err(inode->root->fs_info,
+ btrfs_err(fs_info,
"zstd compression end level %d failed, error %d root %llu inode %llu offset %llu",
workspace->req_level, zstd_get_error_code(ret2),
btrfs_root_id(inode->root), btrfs_ino(inode),
@@ -538,13 +551,13 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
goto out;
}
- tot_out += PAGE_SIZE;
- max_out -= PAGE_SIZE;
+ tot_out += min_folio_size;
+ max_out -= min_folio_size;
if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_folio = btrfs_alloc_compr_folio();
+ out_folio = btrfs_alloc_compr_folio(fs_info);
if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
@@ -552,7 +565,7 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
folios[nr_folios++] = out_folio;
workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
}
if (tot_out >= tot_in) {
@@ -574,13 +587,16 @@ out:
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
+ struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
struct workspace *workspace = list_entry(ws, struct workspace, list);
struct folio **folios_in = cb->compressed_folios;
size_t srclen = cb->compressed_len;
zstd_dstream *stream;
int ret = 0;
+ const u32 blocksize = fs_info->sectorsize;
+ const unsigned int min_folio_size = btrfs_min_folio_size(fs_info);
unsigned long folio_in_index = 0;
- unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
+ unsigned long total_folios_in = DIV_ROUND_UP(srclen, min_folio_size);
unsigned long buf_start;
unsigned long total_out = 0;
@@ -598,11 +614,11 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
workspace->in_buf.src = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->in_buf.pos = 0;
- workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->in_buf.size = min_t(size_t, srclen, min_folio_size);
workspace->out_buf.dst = workspace->buf;
workspace->out_buf.pos = 0;
- workspace->out_buf.size = PAGE_SIZE;
+ workspace->out_buf.size = blocksize;
while (1) {
size_t ret2;
@@ -638,16 +654,16 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (workspace->in_buf.pos == workspace->in_buf.size) {
kunmap_local(workspace->in_buf.src);
folio_in_index++;
- if (folio_in_index >= total_folios_in) {
+ if (unlikely(folio_in_index >= total_folios_in)) {
workspace->in_buf.src = NULL;
ret = -EIO;
goto done;
}
- srclen -= PAGE_SIZE;
+ srclen -= min_folio_size;
workspace->in_buf.src =
kmap_local_folio(folios_in[folio_in_index], 0);
workspace->in_buf.pos = 0;
- workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->in_buf.size = min_t(size_t, srclen, min_folio_size);
}
}
ret = 0;
@@ -714,9 +730,8 @@ finish:
return ret;
}
-const struct btrfs_compress_op btrfs_zstd_compress = {
- /* ZSTD uses own workspace manager */
- .workspace_manager = NULL,
+const struct btrfs_compress_levels btrfs_zstd_compress = {
+ .min_level = ZSTD_BTRFS_MIN_LEVEL,
.max_level = ZSTD_BTRFS_MAX_LEVEL,
.default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
};
diff --git a/fs/buffer.c b/fs/buffer.c
index cc8452f60251..838c0c571022 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -157,8 +157,8 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
*/
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
{
- __end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
+ __end_buffer_read_notouch(bh, uptodate);
}
EXPORT_SYMBOL(end_buffer_read_sync);
@@ -176,18 +176,8 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
}
EXPORT_SYMBOL(end_buffer_write_sync);
-/*
- * Various filesystems appear to want __find_get_block to be non-blocking.
- * But it's the page lock which protects the buffers. To get around this,
- * we get exclusion from try_to_free_buffers with the blockdev mapping's
- * i_private_lock.
- *
- * Hack idea: for the blockdev mapping, i_private_lock contention
- * may be quite high. This code could TryLock the page, and if that
- * succeeds, there is no need to take i_private_lock.
- */
static struct buffer_head *
-__find_get_block_slow(struct block_device *bdev, sector_t block)
+__find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
{
struct address_space *bd_mapping = bdev->bd_mapping;
const int blkbits = bd_mapping->host->i_blkbits;
@@ -204,10 +194,28 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
if (IS_ERR(folio))
goto out;
- spin_lock(&bd_mapping->i_private_lock);
+ /*
+ * Folio lock protects the buffers. Callers that cannot block
+ * will fallback to serializing vs try_to_free_buffers() via
+ * the i_private_lock.
+ */
+ if (atomic)
+ spin_lock(&bd_mapping->i_private_lock);
+ else
+ folio_lock(folio);
+
head = folio_buffers(folio);
if (!head)
goto out_unlock;
+ /*
+ * Upon a noref migration, the folio lock serializes here;
+ * otherwise bail.
+ */
+ if (test_bit_acquire(BH_Migrate, &head->b_state)) {
+ WARN_ON(!atomic);
+ goto out_unlock;
+ }
+
bh = head;
do {
if (!buffer_mapped(bh))
@@ -236,7 +244,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
1 << blkbits);
}
out_unlock:
- spin_unlock(&bd_mapping->i_private_lock);
+ if (atomic)
+ spin_unlock(&bd_mapping->i_private_lock);
+ else
+ folio_unlock(folio);
folio_put(folio);
out:
return ret;
@@ -286,7 +297,6 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
still_busy:
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- return;
}
struct postprocess_bh_ctx {
@@ -411,7 +421,6 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
still_busy:
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- return;
}
/*
@@ -602,9 +611,9 @@ int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
return err;
ret = sync_mapping_buffers(inode->i_mapping);
- if (!(inode->i_state & I_DIRTY_ALL))
+ if (!(inode_state_read_once(inode) & I_DIRTY_ALL))
goto out;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))
goto out;
err = sync_inode_metadata(inode, 1);
@@ -656,7 +665,9 @@ EXPORT_SYMBOL(generic_buffers_fsync);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize)
{
- struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
+ struct buffer_head *bh;
+
+ bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
if (bh) {
if (buffer_dirty(bh))
write_dirty_buffer(bh, 0);
@@ -1109,27 +1120,26 @@ static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp)
{
- /* Size must be multiple of hard sectorsize */
- if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
- (size < 512 || size > PAGE_SIZE))) {
- printk(KERN_ERR "getblk(): invalid block size %d requested\n",
- size);
- printk(KERN_ERR "logical block size: %d\n",
- bdev_logical_block_size(bdev));
+ bool blocking = gfpflags_allow_blocking(gfp);
- dump_stack();
+ if (WARN_ON_ONCE(!IS_ALIGNED(size, bdev_logical_block_size(bdev)))) {
+ printk(KERN_ERR "getblk(): block size %d not aligned to logical block size %d\n",
+ size, bdev_logical_block_size(bdev));
return NULL;
}
for (;;) {
struct buffer_head *bh;
- bh = __find_get_block(bdev, block, size);
- if (bh)
- return bh;
-
if (!grow_buffers(bdev, block, size, gfp))
return NULL;
+
+ if (blocking)
+ bh = __find_get_block_nonatomic(bdev, block, size);
+ else
+ bh = __find_get_block(bdev, block, size);
+ if (bh)
+ return bh;
}
}
@@ -1207,10 +1217,8 @@ void mark_buffer_write_io_error(struct buffer_head *bh)
/* FIXME: do we need to set this in both places? */
if (bh->b_folio && bh->b_folio->mapping)
mapping_set_error(bh->b_folio->mapping, -EIO);
- if (bh->b_assoc_map) {
+ if (bh->b_assoc_map)
mapping_set_error(bh->b_assoc_map, -EIO);
- errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
- }
}
EXPORT_SYMBOL(mark_buffer_write_io_error);
@@ -1386,16 +1394,18 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
/*
* Perform a pagecache lookup for the matching buffer. If it's there, refresh
* it in the LRU and mark it as accessed. If it is not present then return
- * NULL
+ * NULL. Atomic context callers may also return NULL if the buffer is being
+ * migrated; similarly the page is not marked accessed either.
*/
-struct buffer_head *
-__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+static struct buffer_head *
+find_get_block_common(struct block_device *bdev, sector_t block,
+ unsigned size, bool atomic)
{
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
if (bh == NULL) {
/* __find_get_block_slow will mark the page accessed */
- bh = __find_get_block_slow(bdev, block);
+ bh = __find_get_block_slow(bdev, block, atomic);
if (bh)
bh_lru_install(bh);
} else
@@ -1403,8 +1413,23 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
return bh;
}
+
+struct buffer_head *
+__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+{
+ return find_get_block_common(bdev, block, size, true);
+}
EXPORT_SYMBOL(__find_get_block);
+/* same as __find_get_block() but allows sleeping contexts */
+struct buffer_head *
+__find_get_block_nonatomic(struct block_device *bdev, sector_t block,
+ unsigned size)
+{
+ return find_get_block_common(bdev, block, size, false);
+}
+EXPORT_SYMBOL(__find_get_block_nonatomic);
+
/**
* bdev_getblk - Get a buffer_head in a block device's buffer cache.
* @bdev: The block device.
@@ -1422,7 +1447,12 @@ EXPORT_SYMBOL(__find_get_block);
struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp)
{
- struct buffer_head *bh = __find_get_block(bdev, block, size);
+ struct buffer_head *bh;
+
+ if (gfpflags_allow_blocking(gfp))
+ bh = __find_get_block_nonatomic(bdev, block, size);
+ else
+ bh = __find_get_block(bdev, block, size);
might_alloc(gfp);
if (bh)
@@ -1578,8 +1608,8 @@ static void discard_buffer(struct buffer_head * bh)
bh->b_bdev = NULL;
b_state = READ_ONCE(bh->b_state);
do {
- } while (!try_cmpxchg(&bh->b_state, &b_state,
- b_state & ~BUFFER_FLAGS_DISCARD));
+ } while (!try_cmpxchg_relaxed(&bh->b_state, &b_state,
+ b_state & ~BUFFER_FLAGS_DISCARD));
unlock_buffer(bh);
}
@@ -1644,7 +1674,6 @@ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
filemap_release_folio(folio, 0);
out:
folio_clear_mappedtodisk(folio);
- return;
}
EXPORT_SYMBOL(block_invalidate_folio);
@@ -2166,7 +2195,7 @@ int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
}
EXPORT_SYMBOL(__block_write_begin);
-static void __block_commit_write(struct folio *folio, size_t from, size_t to)
+void block_commit_write(struct folio *folio, size_t from, size_t to)
{
size_t block_start, block_end;
bool partial = false;
@@ -2204,6 +2233,7 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
if (!partial)
folio_mark_uptodate(folio);
}
+EXPORT_SYMBOL(block_commit_write);
/*
* block_write_begin takes care of the basic task of block allocation and
@@ -2235,9 +2265,8 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
}
EXPORT_SYMBOL(block_write_begin);
-int block_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+int block_write_end(loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio)
{
size_t start = pos - folio_pos(folio);
@@ -2262,21 +2291,21 @@ int block_write_end(struct file *file, struct address_space *mapping,
flush_dcache_folio(folio);
/* This could be a short (even 0-length) commit */
- __block_commit_write(folio, start, start + copied);
+ block_commit_write(folio, start, start + copied);
return copied;
}
EXPORT_SYMBOL(block_write_end);
-int generic_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+int generic_write_end(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
bool i_size_changed = false;
- copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
+ copied = block_write_end(pos, len, copied, folio);
/*
* No need to use i_size_read() here, the i_size cannot change under us
@@ -2361,9 +2390,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
{
struct inode *inode = folio->mapping->host;
sector_t iblock, lblock;
- struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+ struct buffer_head *bh, *head, *prev = NULL;
size_t blocksize;
- int nr, i;
int fully_mapped = 1;
bool page_error = false;
loff_t limit = i_size_read(inode);
@@ -2372,16 +2400,12 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
limit = inode->i_sb->s_maxbytes;
- VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
-
head = folio_create_buffers(folio, inode, 0);
blocksize = head->b_size;
iblock = div_u64(folio_pos(folio), blocksize);
lblock = div_u64(limit + blocksize - 1, blocksize);
bh = head;
- nr = 0;
- i = 0;
do {
if (buffer_uptodate(bh))
@@ -2398,7 +2422,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
page_error = true;
}
if (!buffer_mapped(bh)) {
- folio_zero_range(folio, i * blocksize,
+ folio_zero_range(folio, bh_offset(bh),
blocksize);
if (!err)
set_buffer_uptodate(bh);
@@ -2411,40 +2435,33 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
if (buffer_uptodate(bh))
continue;
}
- arr[nr++] = bh;
- } while (i++, iblock++, (bh = bh->b_this_page) != head);
- if (fully_mapped)
- folio_set_mappedtodisk(folio);
-
- if (!nr) {
- /*
- * All buffers are uptodate or get_block() returned an
- * error when trying to map them - we can finish the read.
- */
- folio_end_read(folio, !page_error);
- return 0;
- }
-
- /* Stage two: lock the buffers */
- for (i = 0; i < nr; i++) {
- bh = arr[i];
lock_buffer(bh);
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+
mark_buffer_async_read(bh);
- }
+ if (prev)
+ submit_bh(REQ_OP_READ, prev);
+ prev = bh;
+ } while (iblock++, (bh = bh->b_this_page) != head);
+
+ if (fully_mapped)
+ folio_set_mappedtodisk(folio);
/*
- * Stage 3: start the IO. Check for uptodateness
- * inside the buffer lock in case another process reading
- * the underlying blockdev brought it uptodate (the sct fix).
+ * All buffers are uptodate or get_block() returned an error
+ * when trying to map them - we must finish the read because
+ * end_buffer_async_read() will never be called on any buffer
+ * in this folio.
*/
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- if (buffer_uptodate(bh))
- end_buffer_async_read(bh, 1);
- else
- submit_bh(REQ_OP_READ, bh);
- }
+ if (prev)
+ submit_bh(REQ_OP_READ, prev);
+ else
+ folio_end_read(folio, !page_error);
+
return 0;
}
EXPORT_SYMBOL(block_read_full_folio);
@@ -2477,7 +2494,8 @@ out:
}
EXPORT_SYMBOL(generic_cont_expand_simple);
-static int cont_expand_zero(struct file *file, struct address_space *mapping,
+static int cont_expand_zero(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, loff_t *bytes)
{
struct inode *inode = mapping->host;
@@ -2501,12 +2519,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = PAGE_SIZE - zerofrom;
- err = aops->write_begin(file, mapping, curpos, len,
+ err = aops->write_begin(iocb, mapping, curpos, len,
&folio, &fsdata);
if (err)
goto out;
folio_zero_range(folio, offset_in_folio(folio, curpos), len);
- err = aops->write_end(file, mapping, curpos, len, len,
+ err = aops->write_end(iocb, mapping, curpos, len, len,
folio, fsdata);
if (err < 0)
goto out;
@@ -2534,12 +2552,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = offset - zerofrom;
- err = aops->write_begin(file, mapping, curpos, len,
+ err = aops->write_begin(iocb, mapping, curpos, len,
&folio, &fsdata);
if (err)
goto out;
folio_zero_range(folio, offset_in_folio(folio, curpos), len);
- err = aops->write_end(file, mapping, curpos, len, len,
+ err = aops->write_end(iocb, mapping, curpos, len, len,
folio, fsdata);
if (err < 0)
goto out;
@@ -2554,17 +2572,16 @@ out:
* For moronic filesystems that do not allow holes in file.
* We may have to extend the file.
*/
-int cont_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata,
- get_block_t *get_block, loff_t *bytes)
+int cont_write_begin(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop,
+ void **fsdata, get_block_t *get_block, loff_t *bytes)
{
struct inode *inode = mapping->host;
unsigned int blocksize = i_blocksize(inode);
unsigned int zerofrom;
int err;
- err = cont_expand_zero(file, mapping, pos, bytes);
+ err = cont_expand_zero(iocb, mapping, pos, bytes);
if (err)
return err;
@@ -2578,13 +2595,6 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
}
EXPORT_SYMBOL(cont_write_begin);
-void block_commit_write(struct page *page, unsigned from, unsigned to)
-{
- struct folio *folio = page_folio(page);
- __block_commit_write(folio, from, to);
-}
-EXPORT_SYMBOL(block_commit_write);
-
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
@@ -2593,7 +2603,7 @@ EXPORT_SYMBOL(block_commit_write);
* holes and correct delalloc and unwritten extent mapping on filesystems that
* support these features.
*
- * We are not allowed to take the i_mutex here so we have to play games to
+ * We are not allowed to take the i_rwsem here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
* truncate writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
@@ -2630,7 +2640,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
if (unlikely(ret))
goto out_unlock;
- __block_commit_write(folio, 0, end);
+ block_commit_write(folio, 0, end);
folio_mark_dirty(folio);
folio_wait_stable(folio);
@@ -2713,7 +2723,7 @@ unlock:
EXPORT_SYMBOL(block_truncate_page);
/*
- * The generic ->writepage function for buffer-backed address_spaces
+ * The generic write folio function for buffer-backed address_spaces
*/
int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
void *get_block)
@@ -2722,7 +2732,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
loff_t i_size = i_size_read(inode);
/* Is the folio fully inside i_size? */
- if (folio_pos(folio) + folio_size(folio) <= i_size)
+ if (folio_next_pos(folio) <= i_size)
return __block_write_full_folio(inode, folio, get_block, wbc);
/* Is the folio fully outside i_size? (truncate in progress) */
@@ -2733,7 +2743,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
/*
* The folio straddles i_size. It must be zeroed out on each and every
- * writepage invocation because it may be mmapped. "A file is mapped
+ * writeback invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 89b11336a836..1806bff8e59b 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -15,6 +15,7 @@
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/mount.h>
+#include <linux/security.h>
#include <linux/statfs.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -576,7 +577,7 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
*/
static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
{
- char *secctx;
+ int err;
_enter(",%s", args);
@@ -585,16 +586,16 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
return -EINVAL;
}
- if (cache->secctx) {
+ if (cache->have_secid) {
pr_err("Second security context specified\n");
return -EINVAL;
}
- secctx = kstrdup(args, GFP_KERNEL);
- if (!secctx)
- return -ENOMEM;
+ err = security_secctx_to_secid(args, strlen(args), &cache->secid);
+ if (err)
+ return err;
- cache->secctx = secctx;
+ cache->have_secid = true;
return 0;
}
@@ -820,7 +821,6 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
put_cred(cache->cache_cred);
kfree(cache->rootdirname);
- kfree(cache->secctx);
kfree(cache->tag);
_leave("");
diff --git a/fs/cachefiles/error_inject.c b/fs/cachefiles/error_inject.c
index 1715d5ca2b2d..e341ade47dd8 100644
--- a/fs/cachefiles/error_inject.c
+++ b/fs/cachefiles/error_inject.c
@@ -11,7 +11,7 @@
unsigned int cachefiles_error_injection_state;
static struct ctl_table_header *cachefiles_sysctl;
-static struct ctl_table cachefiles_sysctls[] = {
+static const struct ctl_table cachefiles_sysctls[] = {
{
.procname = "error_injection",
.data = &cachefiles_error_injection_state,
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 3e63cfe15874..a08250d244ea 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -9,6 +9,7 @@
#include <linux/mount.h>
#include <linux/xattr.h>
#include <linux/file.h>
+#include <linux/namei.h>
#include <linux/falloc.h>
#include <trace/events/fscache.h>
#include "internal.h"
@@ -428,11 +429,13 @@ static bool cachefiles_invalidate_cookie(struct fscache_cookie *cookie)
if (!old_tmpfile) {
struct cachefiles_volume *volume = object->volume;
struct dentry *fan = volume->fanout[(u8)cookie->key_hash];
+ struct dentry *obj;
- inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
- cachefiles_bury_object(volume->cache, object, fan,
- old_file->f_path.dentry,
- FSCACHE_OBJECT_INVALIDATED);
+ obj = start_removing_dentry(fan, old_file->f_path.dentry);
+ if (!IS_ERR(obj))
+ cachefiles_bury_object(volume->cache, object,
+ fan, obj,
+ FSCACHE_OBJECT_INVALIDATED);
}
fput(old_file);
}
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 7b99bd98de75..b62cd3e9a18e 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -71,7 +71,6 @@ struct cachefiles_object {
int debug_id;
spinlock_t lock;
refcount_t ref;
- u8 d_name_len; /* Length of filename */
enum cachefiles_content content_info:8; /* Info about content presence */
unsigned long flags;
#define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */
@@ -122,7 +121,6 @@ struct cachefiles_cache {
#define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */
#define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */
char *rootdirname; /* name of cache root directory */
- char *secctx; /* LSM security context */
char *tag; /* cache binding tag */
refcount_t unbind_pincount;/* refcount to do daemon unbind */
struct xarray reqs; /* xarray of pending on-demand requests */
@@ -130,6 +128,8 @@ struct cachefiles_cache {
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next;
u32 msg_id_next;
+ u32 secid; /* LSM security id */
+ bool have_secid; /* whether "secid" was set */
};
static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 6a821a959b59..3e0576d9db1d 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -13,6 +13,7 @@
#include <linux/falloc.h>
#include <linux/sched/mm.h>
#include <trace/events/fscache.h>
+#include <trace/events/netfs.h>
#include "internal.h"
struct cachefiles_kiocb {
@@ -62,7 +63,7 @@ static void cachefiles_read_complete(struct kiocb *iocb, long ret)
ret = -ESTALE;
}
- ki->term_func(ki->term_func_priv, ret, ki->was_async);
+ ki->term_func(ki->term_func_priv, ret);
}
cachefiles_put_kiocb(ki);
@@ -187,7 +188,7 @@ in_progress:
presubmission_error:
if (term_func)
- term_func(term_func_priv, ret < 0 ? ret : skipped, false);
+ term_func(term_func_priv, ret < 0 ? ret : skipped);
return ret;
}
@@ -270,7 +271,7 @@ static void cachefiles_write_complete(struct kiocb *iocb, long ret)
atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
if (ki->term_func)
- ki->term_func(ki->term_func_priv, ret, ki->was_async);
+ ki->term_func(ki->term_func_priv, ret);
cachefiles_put_kiocb(ki);
}
@@ -300,7 +301,7 @@ int __cachefiles_write(struct cachefiles_object *object,
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki) {
if (term_func)
- term_func(term_func_priv, -ENOMEM, false);
+ term_func(term_func_priv, -ENOMEM);
return -ENOMEM;
}
@@ -346,8 +347,6 @@ int __cachefiles_write(struct cachefiles_object *object,
default:
ki->was_async = false;
cachefiles_write_complete(&ki->iocb, ret);
- if (ret > 0)
- ret = 0;
break;
}
@@ -365,7 +364,8 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
{
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
if (term_func)
- term_func(term_func_priv, -ENOBUFS, false);
+ term_func(term_func_priv, -ENOBUFS);
+ trace_netfs_sreq(term_func_priv, netfs_sreq_trace_cache_nowrite);
return -ENOBUFS;
}
@@ -663,7 +663,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
pre = CACHEFILES_DIO_BLOCK_SIZE - off;
if (pre >= len) {
fscache_count_dio_misfit();
- netfs_write_subrequest_terminated(subreq, len, false);
+ netfs_write_subrequest_terminated(subreq, len);
return;
}
subreq->transferred += pre;
@@ -689,21 +689,23 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
len -= post;
if (len == 0) {
fscache_count_dio_misfit();
- netfs_write_subrequest_terminated(subreq, post, false);
+ netfs_write_subrequest_terminated(subreq, post);
return;
}
iov_iter_truncate(&subreq->io_iter, len);
}
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cache_prepare);
cachefiles_begin_secure(cache, &saved_cred);
ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
&start, &len, len, true);
cachefiles_end_secure(cache, saved_cred);
if (ret < 0) {
- netfs_write_subrequest_terminated(subreq, ret, false);
+ netfs_write_subrequest_terminated(subreq, ret);
return;
}
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cache_write);
cachefiles_write(&subreq->rreq->cache_resources,
subreq->start, &subreq->io_iter,
netfs_write_subrequest_terminated, subreq);
diff --git a/fs/cachefiles/key.c b/fs/cachefiles/key.c
index bf935e25bdbe..aae86af48ed5 100644
--- a/fs/cachefiles/key.c
+++ b/fs/cachefiles/key.c
@@ -8,7 +8,7 @@
#include <linux/slab.h>
#include "internal.h"
-static const char cachefiles_charmap[64] =
+static const char cachefiles_charmap[64] __nonstring =
"0123456789" /* 0 - 9 */
"abcdefghijklmnopqrstuvwxyz" /* 10 - 35 */
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" /* 36 - 61 */
@@ -132,7 +132,6 @@ bool cachefiles_cook_key(struct cachefiles_object *object)
success:
name[len] = 0;
object->d_name = name;
- object->d_name_len = len;
_leave(" = %s", object->d_name);
return true;
}
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 7cf59713f0f7..e5ec90dccc27 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -93,12 +93,11 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
_enter(",,%s", dirname);
/* search the current directory for the element name */
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
retry:
ret = cachefiles_inject_read_error();
if (ret == 0)
- subdir = lookup_one_len(dirname, dir, strlen(dirname));
+ subdir = start_creating(&nop_mnt_idmap, dir, &QSTR(dirname));
else
subdir = ERR_PTR(ret);
trace_cachefiles_lookup(NULL, dir, subdir);
@@ -129,17 +128,21 @@ retry:
if (ret < 0)
goto mkdir_error;
ret = cachefiles_inject_write_error();
- if (ret == 0)
- ret = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
- if (ret < 0) {
+ if (ret == 0) {
+ subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700, NULL);
+ } else {
+ end_creating(subdir);
+ subdir = ERR_PTR(ret);
+ }
+ if (IS_ERR(subdir)) {
trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
cachefiles_trace_mkdir_error);
goto mkdir_error;
}
trace_cachefiles_mkdir(dir, subdir);
- if (unlikely(d_unhashed(subdir))) {
- cachefiles_put_directory(subdir);
+ if (unlikely(d_unhashed(subdir) || d_is_negative(subdir))) {
+ end_creating(subdir);
goto retry;
}
ASSERT(d_backing_inode(subdir));
@@ -152,7 +155,7 @@ retry:
/* Tell rmdir() it's not allowed to delete the subdir */
inode_lock(d_inode(subdir));
- inode_unlock(d_inode(dir));
+ end_creating_keep(subdir);
if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
@@ -194,13 +197,11 @@ mark_error:
return ERR_PTR(-EBUSY);
mkdir_error:
- inode_unlock(d_inode(dir));
- dput(subdir);
+ end_creating(subdir);
pr_err("mkdir %s failed with error %d\n", dirname, ret);
return ERR_PTR(ret);
lookup_error:
- inode_unlock(d_inode(dir));
ret = PTR_ERR(subdir);
pr_err("Lookup %s failed with error %d\n", dirname, ret);
return ERR_PTR(ret);
@@ -260,6 +261,8 @@ static int cachefiles_unlink(struct cachefiles_cache *cache,
* - File backed objects are unlinked
* - Directory backed objects are stuffed into the graveyard for userspace to
* delete
+ * On entry dir must be locked. It will be unlocked on exit.
+ * On entry there must be at least 2 refs on rep, one will be dropped on exit.
*/
int cachefiles_bury_object(struct cachefiles_cache *cache,
struct cachefiles_object *object,
@@ -275,27 +278,23 @@ int cachefiles_bury_object(struct cachefiles_cache *cache,
_enter(",'%pd','%pd'", dir, rep);
if (rep->d_parent != dir) {
- inode_unlock(d_inode(dir));
+ end_removing(rep);
_leave(" = -ESTALE");
return -ESTALE;
}
/* non-directories can just be unlinked */
if (!d_is_dir(rep)) {
- dget(rep); /* Stop the dentry being negated if it's only pinned
- * by a file struct.
- */
ret = cachefiles_unlink(cache, object, dir, rep, why);
- dput(rep);
+ end_removing(rep);
- inode_unlock(d_inode(dir));
_leave(" = %d", ret);
return ret;
}
/* directories have to be moved to the graveyard */
_debug("move stale object to graveyard");
- inode_unlock(d_inode(dir));
+ end_removing(rep);
try_again:
/* first step is to make up a grave dentry in the graveyard */
@@ -335,7 +334,7 @@ try_again:
return -EIO;
}
- grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
+ grave = lookup_one(&nop_mnt_idmap, &QSTR(nbuffer), cache->graveyard);
if (IS_ERR(grave)) {
unlock_rename(cache->graveyard, dir);
trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
@@ -384,11 +383,10 @@ try_again:
cachefiles_io_error(cache, "Rename security error %d", ret);
} else {
struct renamedata rd = {
- .old_mnt_idmap = &nop_mnt_idmap,
- .old_dir = d_inode(dir),
+ .mnt_idmap = &nop_mnt_idmap,
+ .old_parent = dir,
.old_dentry = rep,
- .new_mnt_idmap = &nop_mnt_idmap,
- .new_dir = d_inode(cache->graveyard),
+ .new_parent = cache->graveyard,
.new_dentry = grave,
};
trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
@@ -423,13 +421,12 @@ int cachefiles_delete_object(struct cachefiles_object *object,
_enter(",OBJ%x{%pD}", object->debug_id, object->file);
- /* Stop the dentry being negated if it's only pinned by a file struct. */
- dget(dentry);
-
- inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
- ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
- inode_unlock(d_backing_inode(fan));
- dput(dentry);
+ dentry = start_removing_dentry(fan, dentry);
+ if (IS_ERR(dentry))
+ ret = PTR_ERR(dentry);
+ else
+ ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
+ end_removing(dentry);
return ret;
}
@@ -627,8 +624,8 @@ bool cachefiles_look_up_object(struct cachefiles_object *object)
/* Look up path "cache/vol/fanout/file". */
ret = cachefiles_inject_read_error();
if (ret == 0)
- dentry = lookup_positive_unlocked(object->d_name, fan,
- object->d_name_len);
+ dentry = lookup_one_positive_unlocked(&nop_mnt_idmap,
+ &QSTR(object->d_name), fan);
else
dentry = ERR_PTR(ret);
trace_cachefiles_lookup(object, fan, dentry);
@@ -642,9 +639,13 @@ bool cachefiles_look_up_object(struct cachefiles_object *object)
if (!d_is_reg(dentry)) {
pr_err("%pd is not a file\n", dentry);
- inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
- ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
- FSCACHE_OBJECT_IS_WEIRD);
+ struct dentry *de = start_removing_dentry(fan, dentry);
+ if (IS_ERR(de))
+ ret = PTR_ERR(de);
+ else
+ ret = cachefiles_bury_object(volume->cache, object,
+ fan, de,
+ FSCACHE_OBJECT_IS_WEIRD);
dput(dentry);
if (ret < 0)
return false;
@@ -677,36 +678,41 @@ bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
_enter(",%pD", object->file);
- inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
ret = cachefiles_inject_read_error();
if (ret == 0)
- dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
+ dentry = start_creating(&nop_mnt_idmap, fan, &QSTR(object->d_name));
else
dentry = ERR_PTR(ret);
if (IS_ERR(dentry)) {
trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
cachefiles_trace_lookup_error);
_debug("lookup fail %ld", PTR_ERR(dentry));
- goto out_unlock;
+ goto out;
}
- if (!d_is_negative(dentry)) {
+ /*
+ * This loop will only execute more than once if some other thread
+ * races to create the object we are trying to create.
+ */
+ while (!d_is_negative(dentry)) {
ret = cachefiles_unlink(volume->cache, object, fan, dentry,
FSCACHE_OBJECT_IS_STALE);
if (ret < 0)
- goto out_dput;
+ goto out_end;
+
+ end_creating(dentry);
- dput(dentry);
ret = cachefiles_inject_read_error();
if (ret == 0)
- dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
+ dentry = start_creating(&nop_mnt_idmap, fan,
+ &QSTR(object->d_name));
else
dentry = ERR_PTR(ret);
if (IS_ERR(dentry)) {
trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
cachefiles_trace_lookup_error);
_debug("lookup fail %ld", PTR_ERR(dentry));
- goto out_unlock;
+ goto out;
}
}
@@ -727,10 +733,9 @@ bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
success = true;
}
-out_dput:
- dput(dentry);
-out_unlock:
- inode_unlock(d_inode(fan));
+out_end:
+ end_creating(dentry);
+out:
_leave(" = %u", success);
return success;
}
@@ -746,26 +751,20 @@ static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
struct dentry *victim;
int ret = -ENOENT;
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
+ victim = start_removing(&nop_mnt_idmap, dir, &QSTR(filename));
- victim = lookup_one_len(filename, dir, strlen(filename));
if (IS_ERR(victim))
goto lookup_error;
- if (d_is_negative(victim))
- goto lookup_put;
if (d_inode(victim)->i_flags & S_KERNEL_FILE)
goto lookup_busy;
return victim;
lookup_busy:
ret = -EBUSY;
-lookup_put:
- inode_unlock(d_inode(dir));
- dput(victim);
+ end_removing(victim);
return ERR_PTR(ret);
lookup_error:
- inode_unlock(d_inode(dir));
ret = PTR_ERR(victim);
if (ret == -ENOENT)
return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
@@ -813,18 +812,17 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
ret = cachefiles_bury_object(cache, NULL, dir, victim,
FSCACHE_OBJECT_WAS_CULLED);
+ dput(victim);
if (ret < 0)
goto error;
fscache_count_culled();
- dput(victim);
_leave(" = 0");
return 0;
error_unlock:
- inode_unlock(d_inode(dir));
+ end_removing(victim);
error:
- dput(victim);
if (ret == -ENOENT)
return -ESTALE; /* Probably got retired by the netfs */
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index fe3de9ad57bf..a7ed86fa98bb 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -83,10 +83,8 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
- if (!ret) {
- ret = len;
+ if (ret > 0)
kiocb->ki_pos += ret;
- }
out:
fput(file);
@@ -317,8 +315,9 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
goto err_free_id;
}
- anon_file->file = anon_inode_getfile("[cachefiles]",
- &cachefiles_ondemand_fd_fops, object, O_WRONLY);
+ anon_file->file = anon_inode_getfile_fmode("[cachefiles]",
+ &cachefiles_ondemand_fd_fops, object,
+ O_WRONLY, FMODE_PWRITE | FMODE_LSEEK);
if (IS_ERR(anon_file->file)) {
ret = PTR_ERR(anon_file->file);
goto err_put_fd;
@@ -333,8 +332,6 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
goto err_put_file;
}
- anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
-
load = (void *)req->msg.data;
load->fd = anon_file->fd;
object->ondemand->ondemand_id = object_id;
diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
index fe777164f1d8..fc6611886b3b 100644
--- a/fs/cachefiles/security.c
+++ b/fs/cachefiles/security.c
@@ -18,7 +18,7 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
struct cred *new;
int ret;
- _enter("{%s}", cache->secctx);
+ _enter("{%u}", cache->have_secid ? cache->secid : 0);
new = prepare_kernel_cred(current);
if (!new) {
@@ -26,8 +26,8 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
goto error;
}
- if (cache->secctx) {
- ret = set_security_override_from_ctx(new, cache->secctx);
+ if (cache->have_secid) {
+ ret = set_security_override(new, cache->secid);
if (ret < 0) {
put_cred(new);
pr_err("Security denies permission to nominate security context: error %d\n",
diff --git a/fs/cachefiles/volume.c b/fs/cachefiles/volume.c
index 781aac4ef274..90ba926f488e 100644
--- a/fs/cachefiles/volume.c
+++ b/fs/cachefiles/volume.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/namei.h>
#include "internal.h"
#include <trace/events/fscache.h>
@@ -58,9 +59,11 @@ retry:
if (ret < 0) {
if (ret != -ESTALE)
goto error_dir;
- inode_lock_nested(d_inode(cache->store), I_MUTEX_PARENT);
- cachefiles_bury_object(cache, NULL, cache->store, vdentry,
- FSCACHE_VOLUME_IS_WEIRD);
+ vdentry = start_removing_dentry(cache->store, vdentry);
+ if (!IS_ERR(vdentry))
+ cachefiles_bury_object(cache, NULL, cache->store,
+ vdentry,
+ FSCACHE_VOLUME_IS_WEIRD);
cachefiles_put_directory(volume->dentry);
cond_resched();
goto retry;
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 7c6f260a3be5..52383b1d0ba6 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -77,6 +77,7 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object)
trace_cachefiles_vfs_error(object, file_inode(file), ret,
cachefiles_trace_setxattr_error);
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
buf->content,
cachefiles_coherency_set_fail);
if (ret != -ENOMEM)
@@ -85,6 +86,7 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object)
"Failed to set xattr with error %d", ret);
} else {
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
buf->content,
cachefiles_coherency_set_ok);
}
@@ -126,7 +128,10 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
object,
"Failed to read aux with error %zd", xlen);
why = cachefiles_coherency_check_xattr;
- } else if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) {
+ goto out;
+ }
+
+ if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) {
why = cachefiles_coherency_check_type;
} else if (memcmp(buf->data, p, len) != 0) {
why = cachefiles_coherency_check_aux;
@@ -141,7 +146,9 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
ret = 0;
}
+out:
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
buf->content, why);
kfree(buf);
return ret;
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 7249d70e1a43..3e7def3d31c1 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,7 +3,7 @@ config CEPH_FS
tristate "Ceph distributed file system"
depends on INET
select CEPH_LIB
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO
select NETFS_SUPPORT
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 85936f6d2bf7..63b75d214210 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -82,6 +82,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
{
struct inode *inode = mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
@@ -92,6 +93,8 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
return false;
}
+ atomic64_inc(&mdsc->dirty_folios);
+
ci = ceph_inode(inode);
/* dirty the head */
@@ -223,15 +226,19 @@ static void finish_netfs_read(struct ceph_osd_request *req)
subreq->len, i_size_read(req->r_inode));
/* no object means success but no data */
- if (err == -ENOENT)
+ if (err == -ENOENT) {
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
err = 0;
- else if (err == -EBLOCKLISTED)
+ } else if (err == -EBLOCKLISTED) {
fsc->blocklisted = true;
+ }
if (err >= 0) {
if (sparse && err > 0)
err = ceph_sparse_ext_map_end(op);
if (err < subreq->len &&
+ subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
if (IS_ENCRYPTED(inode) && err > 0) {
@@ -242,6 +249,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
if (err > subreq->len)
err = subreq->len;
}
+ if (err > 0)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
}
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
@@ -253,8 +262,9 @@ static void finish_netfs_read(struct ceph_osd_request *req)
subreq->transferred = err;
err = 0;
}
+ subreq->error = err;
trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
- netfs_read_subreq_terminated(subreq, err, false);
+ netfs_read_subreq_terminated(subreq);
iput(req->r_inode);
ceph_dec_osd_stopping_blocker(fsc->mdsc);
}
@@ -272,7 +282,8 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
size_t len;
int mode;
- if (rreq->origin != NETFS_DIO_READ)
+ if (rreq->origin != NETFS_UNBUFFERED_READ &&
+ rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
@@ -314,7 +325,9 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
ceph_mdsc_put_request(req);
out:
- netfs_read_subreq_terminated(subreq, err, false);
+ subreq->error = err;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
+ netfs_read_subreq_terminated(subreq);
return true;
}
@@ -396,6 +409,15 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct page **pages;
size_t page_off;
+ /*
+ * FIXME: io_iter.count needs to be corrected to aligned
+ * length. Otherwise, iov_iter_get_pages_alloc2() operates
+ * with the initial unaligned length value. As a result,
+ * ceph_msg_data_cursor_init() triggers BUG_ON() in the case
+ * if msg->sparse_read_total > msg->data_length.
+ */
+ subreq->io_iter.count = len;
+
err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off);
if (err < 0) {
doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
@@ -426,8 +448,10 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
ceph_osdc_start_request(req->r_osdc, req);
out:
ceph_osdc_put_request(req);
- if (err)
- netfs_read_subreq_terminated(subreq, err, false);
+ if (err) {
+ subreq->error = err;
+ netfs_read_subreq_terminated(subreq);
+ }
doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
}
@@ -526,7 +550,7 @@ static void ceph_set_page_fscache(struct page *page)
folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
}
-static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
+static void ceph_fscache_write_terminated(void *priv, ssize_t error)
{
struct inode *inode = priv;
@@ -558,7 +582,36 @@ struct ceph_writeback_ctl
u64 truncate_size;
u32 truncate_seq;
bool size_stable;
+
bool head_snapc;
+ struct ceph_snap_context *snapc;
+ struct ceph_snap_context *last_snapc;
+
+ bool done;
+ bool should_loop;
+ bool range_whole;
+ pgoff_t start_index;
+ pgoff_t index;
+ pgoff_t end;
+ xa_mark_t tag;
+
+ pgoff_t strip_unit_end;
+ unsigned int wsize;
+ unsigned int nr_folios;
+ unsigned int max_pages;
+ unsigned int locked_pages;
+
+ int op_idx;
+ int num_ops;
+ u64 offset;
+ u64 len;
+
+ struct folio_batch fbatch;
+ unsigned int processed_in_fbatch;
+
+ bool from_pool;
+ struct page **pages;
+ struct page **data_pages;
};
/*
@@ -656,22 +709,23 @@ static u64 get_writepages_data_length(struct inode *inode,
}
/*
- * Write a single page, but leave the page locked.
+ * Write a folio, but leave it locked.
*
* If we get a write error, mark the mapping for error, but still adjust the
- * dirty page accounting (i.e., page is no longer dirty).
+ * dirty page accounting (i.e., folio is no longer dirty).
*/
-static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+static int write_folio_nounlock(struct folio *folio,
+ struct writeback_control *wbc)
{
- struct folio *folio = page_folio(page);
- struct inode *inode = page->mapping->host;
+ struct page *page = &folio->page;
+ struct inode *inode = folio->mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_snap_context *snapc, *oldest;
- loff_t page_off = page_offset(page);
+ loff_t page_off = folio_pos(folio);
int err;
- loff_t len = thp_size(page);
+ loff_t len = folio_size(folio);
loff_t wlen;
struct ceph_writeback_ctl ceph_wbc;
struct ceph_osd_client *osdc = &fsc->client->osdc;
@@ -679,27 +733,27 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
bool caching = ceph_is_cache_enabled(inode);
struct page *bounce_page = NULL;
- doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page,
- page->index);
+ doutc(cl, "%llx.%llx folio %p idx %lu\n", ceph_vinop(inode), folio,
+ folio->index);
if (ceph_inode_is_shutdown(inode))
return -EIO;
/* verify this is a writeable snap context */
- snapc = page_snap_context(page);
+ snapc = page_snap_context(&folio->page);
if (!snapc) {
- doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode),
- page);
+ doutc(cl, "%llx.%llx folio %p not dirty?\n", ceph_vinop(inode),
+ folio);
return 0;
}
oldest = get_oldest_context(inode, &ceph_wbc, snapc);
if (snapc->seq > oldest->seq) {
- doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n",
- ceph_vinop(inode), page, snapc);
+ doutc(cl, "%llx.%llx folio %p snapc %p not writeable - noop\n",
+ ceph_vinop(inode), folio, snapc);
/* we should only noop if called by kswapd */
WARN_ON(!(current->flags & PF_MEMALLOC));
ceph_put_snap_context(oldest);
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
return 0;
}
ceph_put_snap_context(oldest);
@@ -716,8 +770,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
len = ceph_wbc.i_size - page_off;
wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
- doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n",
- ceph_vinop(inode), page, page->index, page_off, wlen, snapc,
+ doutc(cl, "%llx.%llx folio %p index %lu on %llu~%llu snapc %p seq %lld\n",
+ ceph_vinop(inode), folio, folio->index, page_off, wlen, snapc,
snapc->seq);
if (atomic_long_inc_return(&fsc->writeback_count) >
@@ -730,32 +784,32 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
ceph_wbc.truncate_seq,
ceph_wbc.truncate_size, true);
if (IS_ERR(req)) {
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
return PTR_ERR(req);
}
if (wlen < len)
len = wlen;
- set_page_writeback(page);
+ folio_start_writeback(folio);
if (caching)
- ceph_set_page_fscache(page);
+ ceph_set_page_fscache(&folio->page);
ceph_fscache_write_to_cache(inode, page_off, len, caching);
if (IS_ENCRYPTED(inode)) {
- bounce_page = fscrypt_encrypt_pagecache_blocks(page,
+ bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
CEPH_FSCRYPT_BLOCK_SIZE, 0,
GFP_NOFS);
if (IS_ERR(bounce_page)) {
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_end_writeback(folio);
ceph_osdc_put_request(req);
return PTR_ERR(bounce_page);
}
}
/* it may be a short write due to an object boundary */
- WARN_ON_ONCE(len > thp_size(page));
+ WARN_ON_ONCE(len > folio_size(folio));
osd_req_op_extent_osd_data_pages(req, 0,
bounce_page ? &bounce_page : &page, wlen, 0,
false, false);
@@ -781,25 +835,25 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
if (err == -ERESTARTSYS) {
/* killed by SIGKILL */
doutc(cl, "%llx.%llx interrupted page %p\n",
- ceph_vinop(inode), page);
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ ceph_vinop(inode), folio);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_end_writeback(folio);
return err;
}
if (err == -EBLOCKLISTED)
fsc->blocklisted = true;
- doutc(cl, "%llx.%llx setting page/mapping error %d %p\n",
- ceph_vinop(inode), err, page);
+ doutc(cl, "%llx.%llx setting mapping error %d %p\n",
+ ceph_vinop(inode), err, folio);
mapping_set_error(&inode->i_data, err);
wbc->pages_skipped++;
} else {
doutc(cl, "%llx.%llx cleaned page %p\n",
- ceph_vinop(inode), page);
+ ceph_vinop(inode), folio);
err = 0; /* vfs expects us to return 0 */
}
- oldest = detach_page_private(page);
+ oldest = folio_detach_private(folio);
WARN_ON_ONCE(oldest != snapc);
- end_page_writeback(page);
+ folio_end_writeback(folio);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
ceph_put_snap_context(snapc); /* page's reference */
@@ -810,32 +864,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
return err;
}
-static int ceph_writepage(struct page *page, struct writeback_control *wbc)
-{
- int err;
- struct inode *inode = page->mapping->host;
- BUG_ON(!inode);
- ihold(inode);
-
- if (wbc->sync_mode == WB_SYNC_NONE &&
- ceph_inode_to_fs_client(inode)->write_congested) {
- redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
- }
-
- folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
-
- err = writepage_nounlock(page, wbc);
- if (err == -ERESTARTSYS) {
- /* direct memory reclaimer was killed by SIGKILL. return 0
- * to prevent caller from setting mapping/page error */
- err = 0;
- }
- unlock_page(page);
- iput(inode);
- return err;
-}
-
/*
* async writeback completion handler.
*
@@ -855,6 +883,7 @@ static void writepages_finish(struct ceph_osd_request *req)
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
unsigned int len = 0;
bool remove_page;
@@ -910,6 +939,12 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_put_snap_context(detach_page_private(page));
end_page_writeback(page);
+
+ if (atomic64_dec_return(&mdsc->dirty_folios) <= 0) {
+ wake_up_all(&mdsc->flush_end_wq);
+ WARN_ON(atomic64_read(&mdsc->dirty_folios) < 0);
+ }
+
doutc(cl, "unlocking %p\n", page);
if (remove_page)
@@ -939,36 +974,13 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_dec_osd_stopping_blocker(fsc->mdsc);
}
-/*
- * initiate async writeback
- */
-static int ceph_writepages_start(struct address_space *mapping,
- struct writeback_control *wbc)
+static inline
+bool is_forced_umount(struct address_space *mapping)
{
struct inode *inode = mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
- struct ceph_vino vino = ceph_vino(inode);
- pgoff_t index, start_index, end = -1;
- struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
- struct folio_batch fbatch;
- int rc = 0;
- unsigned int wsize = i_blocksize(inode);
- struct ceph_osd_request *req = NULL;
- struct ceph_writeback_ctl ceph_wbc;
- bool should_loop, range_whole = false;
- bool done = false;
- bool caching = ceph_is_cache_enabled(inode);
- xa_mark_t tag;
-
- if (wbc->sync_mode == WB_SYNC_NONE &&
- fsc->write_congested)
- return 0;
-
- doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
- wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
- (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
if (ceph_inode_is_shutdown(inode)) {
if (ci->i_wrbuffer_ref > 0) {
@@ -977,388 +989,730 @@ static int ceph_writepages_start(struct address_space *mapping,
ceph_vinop(inode), ceph_ino(inode));
}
mapping_set_error(mapping, -EIO);
- return -EIO; /* we're in a forced umount, don't write! */
+ return true;
}
+
+ return false;
+}
+
+static inline
+unsigned int ceph_define_write_size(struct address_space *mapping)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ unsigned int wsize = i_blocksize(inode);
+
if (fsc->mount_options->wsize < wsize)
wsize = fsc->mount_options->wsize;
- folio_batch_init(&fbatch);
+ return wsize;
+}
- start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
- index = start_index;
+static inline
+void ceph_folio_batch_init(struct ceph_writeback_ctl *ceph_wbc)
+{
+ folio_batch_init(&ceph_wbc->fbatch);
+ ceph_wbc->processed_in_fbatch = 0;
+}
+
+static inline
+void ceph_folio_batch_reinit(struct ceph_writeback_ctl *ceph_wbc)
+{
+ folio_batch_release(&ceph_wbc->fbatch);
+ ceph_folio_batch_init(ceph_wbc);
+}
+
+static inline
+void ceph_init_writeback_ctl(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ ceph_wbc->snapc = NULL;
+ ceph_wbc->last_snapc = NULL;
+
+ ceph_wbc->strip_unit_end = 0;
+ ceph_wbc->wsize = ceph_define_write_size(mapping);
+
+ ceph_wbc->nr_folios = 0;
+ ceph_wbc->max_pages = 0;
+ ceph_wbc->locked_pages = 0;
+
+ ceph_wbc->done = false;
+ ceph_wbc->should_loop = false;
+ ceph_wbc->range_whole = false;
+
+ ceph_wbc->start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
+ ceph_wbc->index = ceph_wbc->start_index;
+ ceph_wbc->end = -1;
+
+ ceph_wbc->tag = wbc_to_tag(wbc);
+
+ ceph_wbc->op_idx = -1;
+ ceph_wbc->num_ops = 0;
+ ceph_wbc->offset = 0;
+ ceph_wbc->len = 0;
+ ceph_wbc->from_pool = false;
+
+ ceph_folio_batch_init(ceph_wbc);
+
+ ceph_wbc->pages = NULL;
+ ceph_wbc->data_pages = NULL;
+}
+
+static inline
+int ceph_define_writeback_range(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
- tag = PAGECACHE_TAG_TOWRITE;
- } else {
- tag = PAGECACHE_TAG_DIRTY;
- }
-retry:
/* find oldest snap context with dirty data */
- snapc = get_oldest_context(inode, &ceph_wbc, NULL);
- if (!snapc) {
+ ceph_wbc->snapc = get_oldest_context(inode, ceph_wbc, NULL);
+ if (!ceph_wbc->snapc) {
/* hmm, why does writepages get called when there
is no dirty data? */
doutc(cl, " no snap context with dirty data?\n");
- goto out;
+ return -ENODATA;
}
- doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", snapc,
- snapc->seq, snapc->num_snaps);
- should_loop = false;
- if (ceph_wbc.head_snapc && snapc != last_snapc) {
+ doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n",
+ ceph_wbc->snapc, ceph_wbc->snapc->seq,
+ ceph_wbc->snapc->num_snaps);
+
+ ceph_wbc->should_loop = false;
+
+ if (ceph_wbc->head_snapc && ceph_wbc->snapc != ceph_wbc->last_snapc) {
/* where to start/end? */
if (wbc->range_cyclic) {
- index = start_index;
- end = -1;
- if (index > 0)
- should_loop = true;
- doutc(cl, " cyclic, start at %lu\n", index);
+ ceph_wbc->index = ceph_wbc->start_index;
+ ceph_wbc->end = -1;
+ if (ceph_wbc->index > 0)
+ ceph_wbc->should_loop = true;
+ doutc(cl, " cyclic, start at %lu\n", ceph_wbc->index);
} else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
+ ceph_wbc->index = wbc->range_start >> PAGE_SHIFT;
+ ceph_wbc->end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = true;
- doutc(cl, " not cyclic, %lu to %lu\n", index, end);
+ ceph_wbc->range_whole = true;
+ doutc(cl, " not cyclic, %lu to %lu\n",
+ ceph_wbc->index, ceph_wbc->end);
}
- } else if (!ceph_wbc.head_snapc) {
+ } else if (!ceph_wbc->head_snapc) {
/* Do not respect wbc->range_{start,end}. Dirty pages
* in that range can be associated with newer snapc.
* They are not writeable until we write all dirty pages
* associated with 'snapc' get written */
- if (index > 0)
- should_loop = true;
+ if (ceph_wbc->index > 0)
+ ceph_wbc->should_loop = true;
doutc(cl, " non-head snapc, range whole\n");
}
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag_pages_for_writeback(mapping, index, end);
+ ceph_put_snap_context(ceph_wbc->last_snapc);
+ ceph_wbc->last_snapc = ceph_wbc->snapc;
- ceph_put_snap_context(last_snapc);
- last_snapc = snapc;
+ return 0;
+}
- while (!done && index <= end) {
- int num_ops = 0, op_idx;
- unsigned i, nr_folios, max_pages, locked_pages = 0;
- struct page **pages = NULL, **data_pages;
- struct page *page;
- pgoff_t strip_unit_end = 0;
- u64 offset = 0, len = 0;
- bool from_pool = false;
+static inline
+bool has_writeback_done(struct ceph_writeback_ctl *ceph_wbc)
+{
+ return ceph_wbc->done && ceph_wbc->index > ceph_wbc->end;
+}
- max_pages = wsize >> PAGE_SHIFT;
+static inline
+bool can_next_page_be_processed(struct ceph_writeback_ctl *ceph_wbc,
+ unsigned index)
+{
+ return index < ceph_wbc->nr_folios &&
+ ceph_wbc->locked_pages < ceph_wbc->max_pages;
+}
-get_more_pages:
- nr_folios = filemap_get_folios_tag(mapping, &index,
- end, tag, &fbatch);
- doutc(cl, "pagevec_lookup_range_tag got %d\n", nr_folios);
- if (!nr_folios && !locked_pages)
- break;
- for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
- struct folio *folio = fbatch.folios[i];
+static
+int ceph_check_page_before_write(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc,
+ struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_snap_context *pgsnapc;
- page = &folio->page;
- doutc(cl, "? %p idx %lu\n", page, page->index);
- if (locked_pages == 0)
- lock_page(page); /* first page */
- else if (!trylock_page(page))
- break;
+ /* only dirty folios, or our accounting breaks */
+ if (unlikely(!folio_test_dirty(folio) || folio->mapping != mapping)) {
+ doutc(cl, "!dirty or !mapping %p\n", folio);
+ return -ENODATA;
+ }
- /* only dirty pages, or our accounting breaks */
- if (unlikely(!PageDirty(page)) ||
- unlikely(page->mapping != mapping)) {
- doutc(cl, "!dirty or !mapping %p\n", page);
- unlock_page(page);
- continue;
- }
- /* only if matching snap context */
- pgsnapc = page_snap_context(page);
- if (pgsnapc != snapc) {
- doutc(cl, "page snapc %p %lld != oldest %p %lld\n",
- pgsnapc, pgsnapc->seq, snapc, snapc->seq);
- if (!should_loop &&
- !ceph_wbc.head_snapc &&
- wbc->sync_mode != WB_SYNC_NONE)
- should_loop = true;
- unlock_page(page);
- continue;
+ /* only if matching snap context */
+ pgsnapc = page_snap_context(&folio->page);
+ if (pgsnapc != ceph_wbc->snapc) {
+ doutc(cl, "folio snapc %p %lld != oldest %p %lld\n",
+ pgsnapc, pgsnapc->seq,
+ ceph_wbc->snapc, ceph_wbc->snapc->seq);
+
+ if (!ceph_wbc->should_loop && !ceph_wbc->head_snapc &&
+ wbc->sync_mode != WB_SYNC_NONE)
+ ceph_wbc->should_loop = true;
+
+ return -ENODATA;
+ }
+
+ if (folio_pos(folio) >= ceph_wbc->i_size) {
+ doutc(cl, "folio at %lu beyond eof %llu\n",
+ folio->index, ceph_wbc->i_size);
+
+ if ((ceph_wbc->size_stable ||
+ folio_pos(folio) >= i_size_read(inode)) &&
+ folio_clear_dirty_for_io(folio))
+ folio_invalidate(folio, 0, folio_size(folio));
+
+ return -ENODATA;
+ }
+
+ if (ceph_wbc->strip_unit_end &&
+ (folio->index > ceph_wbc->strip_unit_end)) {
+ doutc(cl, "end of strip unit %p\n", folio);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static inline
+void __ceph_allocate_page_array(struct ceph_writeback_ctl *ceph_wbc,
+ unsigned int max_pages)
+{
+ ceph_wbc->pages = kmalloc_array(max_pages,
+ sizeof(*ceph_wbc->pages),
+ GFP_NOFS);
+ if (!ceph_wbc->pages) {
+ ceph_wbc->from_pool = true;
+ ceph_wbc->pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
+ BUG_ON(!ceph_wbc->pages);
+ }
+}
+
+static inline
+void ceph_allocate_page_array(struct address_space *mapping,
+ struct ceph_writeback_ctl *ceph_wbc,
+ struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 objnum;
+ u64 objoff;
+ u32 xlen;
+
+ /* prepare async write request */
+ ceph_wbc->offset = (u64)folio_pos(folio);
+ ceph_calc_file_object_mapping(&ci->i_layout,
+ ceph_wbc->offset, ceph_wbc->wsize,
+ &objnum, &objoff, &xlen);
+
+ ceph_wbc->num_ops = 1;
+ ceph_wbc->strip_unit_end = folio->index + ((xlen - 1) >> PAGE_SHIFT);
+
+ BUG_ON(ceph_wbc->pages);
+ ceph_wbc->max_pages = calc_pages_for(0, (u64)xlen);
+ __ceph_allocate_page_array(ceph_wbc, ceph_wbc->max_pages);
+
+ ceph_wbc->len = 0;
+}
+
+static inline
+bool is_folio_index_contiguous(const struct ceph_writeback_ctl *ceph_wbc,
+ const struct folio *folio)
+{
+ return folio->index == (ceph_wbc->offset + ceph_wbc->len) >> PAGE_SHIFT;
+}
+
+static inline
+bool is_num_ops_too_big(struct ceph_writeback_ctl *ceph_wbc)
+{
+ return ceph_wbc->num_ops >=
+ (ceph_wbc->from_pool ? CEPH_OSD_SLAB_OPS : CEPH_OSD_MAX_OPS);
+}
+
+static inline
+bool is_write_congestion_happened(struct ceph_fs_client *fsc)
+{
+ return atomic_long_inc_return(&fsc->writeback_count) >
+ CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb);
+}
+
+static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc, struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct page **pages = ceph_wbc->pages;
+ unsigned int index = ceph_wbc->locked_pages;
+ gfp_t gfp_flags = ceph_wbc->locked_pages ? GFP_NOWAIT : GFP_NOFS;
+
+ if (IS_ENCRYPTED(inode)) {
+ pages[index] = fscrypt_encrypt_pagecache_blocks(folio,
+ PAGE_SIZE,
+ 0,
+ gfp_flags);
+ if (IS_ERR(pages[index])) {
+ int err = PTR_ERR(pages[index]);
+
+ if (err == -EINVAL) {
+ pr_err_client(cl, "inode->i_blkbits=%hhu\n",
+ inode->i_blkbits);
}
- if (page_offset(page) >= ceph_wbc.i_size) {
- doutc(cl, "folio at %lu beyond eof %llu\n",
- folio->index, ceph_wbc.i_size);
- if ((ceph_wbc.size_stable ||
- folio_pos(folio) >= i_size_read(inode)) &&
- folio_clear_dirty_for_io(folio))
- folio_invalidate(folio, 0,
- folio_size(folio));
+
+ /* better not fail on first page! */
+ BUG_ON(ceph_wbc->locked_pages == 0);
+
+ pages[index] = NULL;
+ return err;
+ }
+ } else {
+ pages[index] = &folio->page;
+ }
+
+ ceph_wbc->locked_pages++;
+
+ return 0;
+}
+
+static
+int ceph_process_folio_batch(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct folio *folio = NULL;
+ unsigned i;
+ int rc = 0;
+
+ for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
+ folio = ceph_wbc->fbatch.folios[i];
+
+ if (!folio)
+ continue;
+
+ doutc(cl, "? %p idx %lu, folio_test_writeback %#x, "
+ "folio_test_dirty %#x, folio_test_locked %#x\n",
+ folio, folio->index, folio_test_writeback(folio),
+ folio_test_dirty(folio),
+ folio_test_locked(folio));
+
+ if (folio_test_writeback(folio) ||
+ folio_test_private_2(folio) /* [DEPRECATED] */) {
+ doutc(cl, "waiting on writeback %p\n", folio);
+ folio_wait_writeback(folio);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
+ continue;
+ }
+
+ if (ceph_wbc->locked_pages == 0)
+ folio_lock(folio);
+ else if (!folio_trylock(folio))
+ break;
+
+ rc = ceph_check_page_before_write(mapping, wbc,
+ ceph_wbc, folio);
+ if (rc == -ENODATA) {
+ rc = 0;
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ continue;
+ } else if (rc == -E2BIG) {
+ rc = 0;
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ break;
+ }
+
+ if (!folio_clear_dirty_for_io(folio)) {
+ doutc(cl, "%p !folio_clear_dirty_for_io\n", folio);
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ continue;
+ }
+
+ /*
+ * We have something to write. If this is
+ * the first locked page this time through,
+ * calculate max possible write size and
+ * allocate a page array
+ */
+ if (ceph_wbc->locked_pages == 0) {
+ ceph_allocate_page_array(mapping, ceph_wbc, folio);
+ } else if (!is_folio_index_contiguous(ceph_wbc, folio)) {
+ if (is_num_ops_too_big(ceph_wbc)) {
+ folio_redirty_for_writepage(wbc, folio);
folio_unlock(folio);
- continue;
- }
- if (strip_unit_end && (page->index > strip_unit_end)) {
- doutc(cl, "end of strip unit %p\n", page);
- unlock_page(page);
break;
}
- if (folio_test_writeback(folio) ||
- folio_test_private_2(folio) /* [DEPRECATED] */) {
- if (wbc->sync_mode == WB_SYNC_NONE) {
- doutc(cl, "%p under writeback\n", folio);
- folio_unlock(folio);
- continue;
- }
- doutc(cl, "waiting on writeback %p\n", folio);
- folio_wait_writeback(folio);
- folio_wait_private_2(folio); /* [DEPRECATED] */
- }
- if (!clear_page_dirty_for_io(page)) {
- doutc(cl, "%p !clear_page_dirty_for_io\n", page);
- unlock_page(page);
- continue;
- }
+ ceph_wbc->num_ops++;
+ ceph_wbc->offset = (u64)folio_pos(folio);
+ ceph_wbc->len = 0;
+ }
- /*
- * We have something to write. If this is
- * the first locked page this time through,
- * calculate max possinle write size and
- * allocate a page array
- */
- if (locked_pages == 0) {
- u64 objnum;
- u64 objoff;
- u32 xlen;
-
- /* prepare async write request */
- offset = (u64)page_offset(page);
- ceph_calc_file_object_mapping(&ci->i_layout,
- offset, wsize,
- &objnum, &objoff,
- &xlen);
- len = xlen;
-
- num_ops = 1;
- strip_unit_end = page->index +
- ((len - 1) >> PAGE_SHIFT);
-
- BUG_ON(pages);
- max_pages = calc_pages_for(0, (u64)len);
- pages = kmalloc_array(max_pages,
- sizeof(*pages),
- GFP_NOFS);
- if (!pages) {
- from_pool = true;
- pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
- BUG_ON(!pages);
- }
-
- len = 0;
- } else if (page->index !=
- (offset + len) >> PAGE_SHIFT) {
- if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS :
- CEPH_OSD_MAX_OPS)) {
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- break;
- }
-
- num_ops++;
- offset = (u64)page_offset(page);
- len = 0;
- }
+ /* note position of first page in fbatch */
+ doutc(cl, "%llx.%llx will write folio %p idx %lu\n",
+ ceph_vinop(inode), folio, folio->index);
- /* note position of first page in fbatch */
- doutc(cl, "%llx.%llx will write page %p idx %lu\n",
- ceph_vinop(inode), page, page->index);
-
- if (atomic_long_inc_return(&fsc->writeback_count) >
- CONGESTION_ON_THRESH(
- fsc->mount_options->congestion_kb))
- fsc->write_congested = true;
-
- if (IS_ENCRYPTED(inode)) {
- pages[locked_pages] =
- fscrypt_encrypt_pagecache_blocks(page,
- PAGE_SIZE, 0,
- locked_pages ? GFP_NOWAIT : GFP_NOFS);
- if (IS_ERR(pages[locked_pages])) {
- if (PTR_ERR(pages[locked_pages]) == -EINVAL)
- pr_err_client(cl,
- "inode->i_blkbits=%hhu\n",
- inode->i_blkbits);
- /* better not fail on first page! */
- BUG_ON(locked_pages == 0);
- pages[locked_pages] = NULL;
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- break;
- }
- ++locked_pages;
- } else {
- pages[locked_pages++] = page;
- }
+ fsc->write_congested = is_write_congestion_happened(fsc);
- fbatch.folios[i] = NULL;
- len += thp_size(page);
+ rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
+ folio);
+ if (rc) {
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ break;
}
- /* did we get anything? */
- if (!locked_pages)
- goto release_folios;
- if (i) {
- unsigned j, n = 0;
- /* shift unused page to beginning of fbatch */
- for (j = 0; j < nr_folios; j++) {
- if (!fbatch.folios[j])
- continue;
- if (n < j)
- fbatch.folios[n] = fbatch.folios[j];
- n++;
- }
- fbatch.nr = n;
+ ceph_wbc->fbatch.folios[i] = NULL;
+ ceph_wbc->len += folio_size(folio);
+ }
- if (nr_folios && i == nr_folios &&
- locked_pages < max_pages) {
- doutc(cl, "reached end fbatch, trying for more\n");
- folio_batch_release(&fbatch);
- goto get_more_pages;
- }
+ ceph_wbc->processed_in_fbatch = i;
+
+ return rc;
+}
+
+static inline
+void ceph_shift_unused_folios_left(struct folio_batch *fbatch)
+{
+ unsigned j, n = 0;
+
+ /* shift unused page to beginning of fbatch */
+ for (j = 0; j < folio_batch_count(fbatch); j++) {
+ if (!fbatch->folios[j])
+ continue;
+
+ if (n < j) {
+ fbatch->folios[n] = fbatch->folios[j];
}
+ n++;
+ }
+
+ fbatch->nr = n;
+}
+
+static
+int ceph_submit_write(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_vino vino = ceph_vino(inode);
+ struct ceph_osd_request *req = NULL;
+ struct page *page = NULL;
+ bool caching = ceph_is_cache_enabled(inode);
+ u64 offset;
+ u64 len;
+ unsigned i;
+
new_request:
- offset = ceph_fscrypt_page_offset(pages[0]);
- len = wsize;
+ offset = ceph_fscrypt_page_offset(ceph_wbc->pages[0]);
+ len = ceph_wbc->wsize;
+ req = ceph_osdc_new_request(&fsc->client->osdc,
+ &ci->i_layout, vino,
+ offset, &len, 0, ceph_wbc->num_ops,
+ CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
+ ceph_wbc->snapc, ceph_wbc->truncate_seq,
+ ceph_wbc->truncate_size, false);
+ if (IS_ERR(req)) {
req = ceph_osdc_new_request(&fsc->client->osdc,
- &ci->i_layout, vino,
- offset, &len, 0, num_ops,
- CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
- snapc, ceph_wbc.truncate_seq,
- ceph_wbc.truncate_size, false);
- if (IS_ERR(req)) {
- req = ceph_osdc_new_request(&fsc->client->osdc,
- &ci->i_layout, vino,
- offset, &len, 0,
- min(num_ops,
- CEPH_OSD_SLAB_OPS),
- CEPH_OSD_OP_WRITE,
- CEPH_OSD_FLAG_WRITE,
- snapc, ceph_wbc.truncate_seq,
- ceph_wbc.truncate_size, true);
- BUG_ON(IS_ERR(req));
+ &ci->i_layout, vino,
+ offset, &len, 0,
+ min(ceph_wbc->num_ops,
+ CEPH_OSD_SLAB_OPS),
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE,
+ ceph_wbc->snapc,
+ ceph_wbc->truncate_seq,
+ ceph_wbc->truncate_size,
+ true);
+ BUG_ON(IS_ERR(req));
+ }
+
+ page = ceph_wbc->pages[ceph_wbc->locked_pages - 1];
+ BUG_ON(len < ceph_fscrypt_page_offset(page) + thp_size(page) - offset);
+
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ for (i = 0; i < folio_batch_count(&ceph_wbc->fbatch); i++) {
+ struct folio *folio = ceph_wbc->fbatch.folios[i];
+
+ if (!folio)
+ continue;
+
+ page = &folio->page;
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
}
- BUG_ON(len < ceph_fscrypt_page_offset(pages[locked_pages - 1]) +
- thp_size(pages[locked_pages - 1]) - offset);
- if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
- rc = -EIO;
- goto release_folios;
+ for (i = 0; i < ceph_wbc->locked_pages; i++) {
+ page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
+
+ if (!page)
+ continue;
+
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
}
- req->r_callback = writepages_finish;
- req->r_inode = inode;
-
- /* Format the osd request message and submit the write */
- len = 0;
- data_pages = pages;
- op_idx = 0;
- for (i = 0; i < locked_pages; i++) {
- struct page *page = ceph_fscrypt_pagecache_page(pages[i]);
-
- u64 cur_offset = page_offset(page);
- /*
- * Discontinuity in page range? Ceph can handle that by just passing
- * multiple extents in the write op.
- */
- if (offset + len != cur_offset) {
- /* If it's full, stop here */
- if (op_idx + 1 == req->r_num_ops)
- break;
-
- /* Kick off an fscache write with what we have so far. */
- ceph_fscache_write_to_cache(inode, offset, len, caching);
-
- /* Start a new extent */
- osd_req_op_extent_dup_last(req, op_idx,
- cur_offset - offset);
- doutc(cl, "got pages at %llu~%llu\n", offset,
- len);
- osd_req_op_extent_osd_data_pages(req, op_idx,
- data_pages, len, 0,
- from_pool, false);
- osd_req_op_extent_update(req, op_idx, len);
-
- len = 0;
- offset = cur_offset;
- data_pages = pages + i;
- op_idx++;
- }
- set_page_writeback(page);
- if (caching)
- ceph_set_page_fscache(page);
- len += thp_size(page);
+ ceph_osdc_put_request(req);
+ return -EIO;
+ }
+
+ req->r_callback = writepages_finish;
+ req->r_inode = inode;
+
+ /* Format the osd request message and submit the write */
+ len = 0;
+ ceph_wbc->data_pages = ceph_wbc->pages;
+ ceph_wbc->op_idx = 0;
+ for (i = 0; i < ceph_wbc->locked_pages; i++) {
+ u64 cur_offset;
+
+ page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
+ cur_offset = page_offset(page);
+
+ /*
+ * Discontinuity in page range? Ceph can handle that by just passing
+ * multiple extents in the write op.
+ */
+ if (offset + len != cur_offset) {
+ /* If it's full, stop here */
+ if (ceph_wbc->op_idx + 1 == req->r_num_ops)
+ break;
+
+ /* Kick off an fscache write with what we have so far. */
+ ceph_fscache_write_to_cache(inode, offset, len, caching);
+
+ /* Start a new extent */
+ osd_req_op_extent_dup_last(req, ceph_wbc->op_idx,
+ cur_offset - offset);
+
+ doutc(cl, "got pages at %llu~%llu\n", offset, len);
+
+ osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
+ ceph_wbc->data_pages,
+ len, 0,
+ ceph_wbc->from_pool,
+ false);
+ osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
+
+ len = 0;
+ offset = cur_offset;
+ ceph_wbc->data_pages = ceph_wbc->pages + i;
+ ceph_wbc->op_idx++;
}
- ceph_fscache_write_to_cache(inode, offset, len, caching);
-
- if (ceph_wbc.size_stable) {
- len = min(len, ceph_wbc.i_size - offset);
- } else if (i == locked_pages) {
- /* writepages_finish() clears writeback pages
- * according to the data length, so make sure
- * data length covers all locked pages */
- u64 min_len = len + 1 - thp_size(page);
- len = get_writepages_data_length(inode, pages[i - 1],
- offset);
- len = max(len, min_len);
+
+ set_page_writeback(page);
+
+ if (caching)
+ ceph_set_page_fscache(page);
+
+ len += thp_size(page);
+ }
+
+ ceph_fscache_write_to_cache(inode, offset, len, caching);
+
+ if (ceph_wbc->size_stable) {
+ len = min(len, ceph_wbc->i_size - offset);
+ } else if (i == ceph_wbc->locked_pages) {
+ /* writepages_finish() clears writeback pages
+ * according to the data length, so make sure
+ * data length covers all locked pages */
+ u64 min_len = len + 1 - thp_size(page);
+ len = get_writepages_data_length(inode,
+ ceph_wbc->pages[i - 1],
+ offset);
+ len = max(len, min_len);
+ }
+
+ if (IS_ENCRYPTED(inode))
+ len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
+
+ doutc(cl, "got pages at %llu~%llu\n", offset, len);
+
+ if (IS_ENCRYPTED(inode) &&
+ ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK)) {
+ pr_warn_client(cl,
+ "bad encrypted write offset=%lld len=%llu\n",
+ offset, len);
+ }
+
+ osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
+ ceph_wbc->data_pages, len,
+ 0, ceph_wbc->from_pool, false);
+ osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
+
+ BUG_ON(ceph_wbc->op_idx + 1 != req->r_num_ops);
+
+ ceph_wbc->from_pool = false;
+ if (i < ceph_wbc->locked_pages) {
+ BUG_ON(ceph_wbc->num_ops <= req->r_num_ops);
+ ceph_wbc->num_ops -= req->r_num_ops;
+ ceph_wbc->locked_pages -= i;
+
+ /* allocate new pages array for next request */
+ ceph_wbc->data_pages = ceph_wbc->pages;
+ __ceph_allocate_page_array(ceph_wbc, ceph_wbc->locked_pages);
+ memcpy(ceph_wbc->pages, ceph_wbc->data_pages + i,
+ ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
+ memset(ceph_wbc->data_pages + i, 0,
+ ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
+ } else {
+ BUG_ON(ceph_wbc->num_ops != req->r_num_ops);
+ /* request message now owns the pages array */
+ ceph_wbc->pages = NULL;
+ }
+
+ req->r_mtime = inode_get_mtime(inode);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ req = NULL;
+
+ wbc->nr_to_write -= i;
+ if (ceph_wbc->pages)
+ goto new_request;
+
+ return 0;
+}
+
+static
+void ceph_wait_until_current_writes_complete(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct page *page;
+ unsigned i, nr;
+
+ if (wbc->sync_mode != WB_SYNC_NONE &&
+ ceph_wbc->start_index == 0 && /* all dirty pages were checked */
+ !ceph_wbc->head_snapc) {
+ ceph_wbc->index = 0;
+
+ while ((ceph_wbc->index <= ceph_wbc->end) &&
+ (nr = filemap_get_folios_tag(mapping,
+ &ceph_wbc->index,
+ (pgoff_t)-1,
+ PAGECACHE_TAG_WRITEBACK,
+ &ceph_wbc->fbatch))) {
+ for (i = 0; i < nr; i++) {
+ page = &ceph_wbc->fbatch.folios[i]->page;
+ if (page_snap_context(page) != ceph_wbc->snapc)
+ continue;
+ wait_on_page_writeback(page);
+ }
+
+ folio_batch_release(&ceph_wbc->fbatch);
+ cond_resched();
}
- if (IS_ENCRYPTED(inode))
- len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
+ }
+}
- doutc(cl, "got pages at %llu~%llu\n", offset, len);
+/*
+ * initiate async writeback
+ */
+static int ceph_writepages_start(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_writeback_ctl ceph_wbc;
+ int rc = 0;
- if (IS_ENCRYPTED(inode) &&
- ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
- pr_warn_client(cl,
- "bad encrypted write offset=%lld len=%llu\n",
- offset, len);
-
- osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
- 0, from_pool, false);
- osd_req_op_extent_update(req, op_idx, len);
-
- BUG_ON(op_idx + 1 != req->r_num_ops);
-
- from_pool = false;
- if (i < locked_pages) {
- BUG_ON(num_ops <= req->r_num_ops);
- num_ops -= req->r_num_ops;
- locked_pages -= i;
-
- /* allocate new pages array for next request */
- data_pages = pages;
- pages = kmalloc_array(locked_pages, sizeof(*pages),
- GFP_NOFS);
- if (!pages) {
- from_pool = true;
- pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
- BUG_ON(!pages);
+ if (wbc->sync_mode == WB_SYNC_NONE && fsc->write_congested)
+ return 0;
+
+ doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
+ wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
+ (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
+
+ if (is_forced_umount(mapping)) {
+ /* we're in a forced umount, don't write! */
+ return -EIO;
+ }
+
+ ceph_init_writeback_ctl(mapping, wbc, &ceph_wbc);
+
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ rc = -EIO;
+ goto out;
+ }
+
+retry:
+ rc = ceph_define_writeback_range(mapping, wbc, &ceph_wbc);
+ if (rc == -ENODATA) {
+ /* hmm, why does writepages get called when there
+ is no dirty data? */
+ rc = 0;
+ goto dec_osd_stopping_blocker;
+ }
+
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ tag_pages_for_writeback(mapping, ceph_wbc.index, ceph_wbc.end);
+
+ while (!has_writeback_done(&ceph_wbc)) {
+ ceph_wbc.locked_pages = 0;
+ ceph_wbc.max_pages = ceph_wbc.wsize >> PAGE_SHIFT;
+
+get_more_pages:
+ ceph_folio_batch_reinit(&ceph_wbc);
+
+ ceph_wbc.nr_folios = filemap_get_folios_tag(mapping,
+ &ceph_wbc.index,
+ ceph_wbc.end,
+ ceph_wbc.tag,
+ &ceph_wbc.fbatch);
+ doutc(cl, "pagevec_lookup_range_tag for tag %#x got %d\n",
+ ceph_wbc.tag, ceph_wbc.nr_folios);
+
+ if (!ceph_wbc.nr_folios && !ceph_wbc.locked_pages)
+ break;
+
+process_folio_batch:
+ rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
+ ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
+ if (rc)
+ goto release_folios;
+
+ /* did we get anything? */
+ if (!ceph_wbc.locked_pages)
+ goto release_folios;
+
+ if (ceph_wbc.processed_in_fbatch) {
+ if (folio_batch_count(&ceph_wbc.fbatch) == 0 &&
+ ceph_wbc.locked_pages < ceph_wbc.max_pages) {
+ doutc(cl, "reached end fbatch, trying for more\n");
+ goto get_more_pages;
}
- memcpy(pages, data_pages + i,
- locked_pages * sizeof(*pages));
- memset(data_pages + i, 0,
- locked_pages * sizeof(*pages));
- } else {
- BUG_ON(num_ops != req->r_num_ops);
- index = pages[i - 1]->index + 1;
- /* request message now owns the pages array */
- pages = NULL;
}
- req->r_mtime = inode_get_mtime(inode);
- ceph_osdc_start_request(&fsc->client->osdc, req);
- req = NULL;
+ rc = ceph_submit_write(mapping, wbc, &ceph_wbc);
+ if (rc)
+ goto release_folios;
+
+ ceph_wbc.locked_pages = 0;
+ ceph_wbc.strip_unit_end = 0;
- wbc->nr_to_write -= i;
- if (pages)
- goto new_request;
+ if (folio_batch_count(&ceph_wbc.fbatch) > 0) {
+ ceph_wbc.nr_folios =
+ folio_batch_count(&ceph_wbc.fbatch);
+ goto process_folio_batch;
+ }
/*
* We stop writing back only if we are not doing
@@ -1367,61 +1721,44 @@ new_request:
* we tagged for writeback prior to entering this loop.
*/
if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
- done = true;
+ ceph_wbc.done = true;
release_folios:
doutc(cl, "folio_batch release on %d folios (%p)\n",
- (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL);
- folio_batch_release(&fbatch);
+ (int)ceph_wbc.fbatch.nr,
+ ceph_wbc.fbatch.nr ? ceph_wbc.fbatch.folios[0] : NULL);
+ folio_batch_release(&ceph_wbc.fbatch);
}
- if (should_loop && !done) {
+ if (ceph_wbc.should_loop && !ceph_wbc.done) {
/* more to do; loop back to beginning of file */
doutc(cl, "looping back to beginning of file\n");
- end = start_index - 1; /* OK even when start_index == 0 */
+ /* OK even when start_index == 0 */
+ ceph_wbc.end = ceph_wbc.start_index - 1;
/* to write dirty pages associated with next snapc,
* we need to wait until current writes complete */
- if (wbc->sync_mode != WB_SYNC_NONE &&
- start_index == 0 && /* all dirty pages were checked */
- !ceph_wbc.head_snapc) {
- struct page *page;
- unsigned i, nr;
- index = 0;
- while ((index <= end) &&
- (nr = filemap_get_folios_tag(mapping, &index,
- (pgoff_t)-1,
- PAGECACHE_TAG_WRITEBACK,
- &fbatch))) {
- for (i = 0; i < nr; i++) {
- page = &fbatch.folios[i]->page;
- if (page_snap_context(page) != snapc)
- continue;
- wait_on_page_writeback(page);
- }
- folio_batch_release(&fbatch);
- cond_resched();
- }
- }
+ ceph_wait_until_current_writes_complete(mapping, wbc, &ceph_wbc);
- start_index = 0;
- index = 0;
+ ceph_wbc.start_index = 0;
+ ceph_wbc.index = 0;
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
+ if (wbc->range_cyclic || (ceph_wbc.range_whole && wbc->nr_to_write > 0))
+ mapping->writeback_index = ceph_wbc.index;
+
+dec_osd_stopping_blocker:
+ ceph_dec_osd_stopping_blocker(fsc->mdsc);
out:
- ceph_osdc_put_request(req);
- ceph_put_snap_context(last_snapc);
+ ceph_put_snap_context(ceph_wbc.last_snapc);
doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
rc);
+
return rc;
}
-
-
/*
* See if a given @snapc is either writeable, or already written.
*/
@@ -1437,56 +1774,56 @@ static int context_is_writeable_or_written(struct inode *inode,
/**
* ceph_find_incompatible - find an incompatible context and return it
- * @page: page being dirtied
+ * @folio: folio being dirtied
*
- * We are only allowed to write into/dirty a page if the page is
+ * We are only allowed to write into/dirty a folio if the folio is
* clean, or already dirty within the same snap context. Returns a
* conflicting context if there is one, NULL if there isn't, or a
* negative error code on other errors.
*
- * Must be called with page lock held.
+ * Must be called with folio lock held.
*/
static struct ceph_snap_context *
-ceph_find_incompatible(struct page *page)
+ceph_find_incompatible(struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
if (ceph_inode_is_shutdown(inode)) {
- doutc(cl, " %llx.%llx page %p is shutdown\n",
- ceph_vinop(inode), page);
+ doutc(cl, " %llx.%llx folio %p is shutdown\n",
+ ceph_vinop(inode), folio);
return ERR_PTR(-ESTALE);
}
for (;;) {
struct ceph_snap_context *snapc, *oldest;
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
- snapc = page_snap_context(page);
+ snapc = page_snap_context(&folio->page);
if (!snapc || snapc == ci->i_head_snapc)
break;
/*
- * this page is already dirty in another (older) snap
+ * this folio is already dirty in another (older) snap
* context! is it writeable now?
*/
oldest = get_oldest_context(inode, NULL, NULL);
if (snapc->seq > oldest->seq) {
/* not writeable -- return it for the caller to deal with */
ceph_put_snap_context(oldest);
- doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n",
- ceph_vinop(inode), page, snapc);
+ doutc(cl, " %llx.%llx folio %p snapc %p not current or oldest\n",
+ ceph_vinop(inode), folio, snapc);
return ceph_get_snap_context(snapc);
}
ceph_put_snap_context(oldest);
- /* yay, writeable, do it now (without dropping page lock) */
- doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n",
- ceph_vinop(inode), page, snapc);
- if (clear_page_dirty_for_io(page)) {
- int r = writepage_nounlock(page, NULL);
+ /* yay, writeable, do it now (without dropping folio lock) */
+ doutc(cl, " %llx.%llx folio %p snapc %p not current, but oldest\n",
+ ceph_vinop(inode), folio, snapc);
+ if (folio_clear_dirty_for_io(folio)) {
+ int r = write_folio_nounlock(folio, NULL);
if (r < 0)
return ERR_PTR(r);
}
@@ -1501,7 +1838,7 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc;
- snapc = ceph_find_incompatible(folio_page(*foliop, 0));
+ snapc = ceph_find_incompatible(*foliop);
if (snapc) {
int r;
@@ -1524,10 +1861,12 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
* We are only allowed to write into/dirty the page if the page is
* clean, or already dirty within the same snap context.
*/
-static int ceph_write_begin(struct file *file, struct address_space *mapping,
+static int ceph_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
int r;
@@ -1545,10 +1884,12 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
* we don't do anything in here that simple_write_end doesn't do
* except adjust dirty page accounting
*/
-static int ceph_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
+static int ceph_write_end(const struct kiocb *iocb,
+ struct address_space *mapping, loff_t pos,
+ unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_client *cl = ceph_inode_to_client(inode);
bool check_cap = false;
@@ -1584,7 +1925,6 @@ out:
const struct address_space_operations ceph_aops = {
.read_folio = netfs_read_folio,
.readahead = netfs_readahead,
- .writepage = ceph_writepage,
.writepages = ceph_writepages_start,
.write_begin = ceph_write_begin,
.write_end = ceph_write_end,
@@ -1592,6 +1932,7 @@ const struct address_space_operations ceph_aops = {
.invalidate_folio = ceph_invalidate_folio,
.release_folio = netfs_release_folio,
.direct_IO = noop_direct_IO,
+ .migrate_folio = filemap_migrate_folio,
};
static void ceph_block_sigs(sigset_t *oldset)
@@ -1708,8 +2049,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
struct ceph_cap_flush *prealloc_cf;
- struct page *page = vmf->page;
- loff_t off = page_offset(page);
+ struct folio *folio = page_folio(vmf->page);
+ loff_t off = folio_pos(folio);
loff_t size = i_size_read(inode);
size_t len;
int want, got, err;
@@ -1726,10 +2067,10 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
ceph_block_sigs(&oldset);
- if (off + thp_size(page) <= size)
- len = thp_size(page);
+ if (off + folio_size(folio) <= size)
+ len = folio_size(folio);
else
- len = offset_in_thp(page, size);
+ len = offset_in_folio(folio, size);
doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
ceph_vinop(inode), off, len, size);
@@ -1746,30 +2087,30 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
off, len, ceph_cap_string(got));
- /* Update time before taking page lock */
+ /* Update time before taking folio lock */
file_update_time(vma->vm_file);
inode_inc_iversion_raw(inode);
do {
struct ceph_snap_context *snapc;
- lock_page(page);
+ folio_lock(folio);
- if (page_mkwrite_check_truncate(page, inode) < 0) {
- unlock_page(page);
+ if (folio_mkwrite_check_truncate(folio, inode) < 0) {
+ folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
break;
}
- snapc = ceph_find_incompatible(page);
+ snapc = ceph_find_incompatible(folio);
if (!snapc) {
- /* success. we'll keep the page locked. */
- set_page_dirty(page);
+ /* success. we'll keep the folio locked. */
+ folio_mark_dirty(folio);
ret = VM_FAULT_LOCKED;
break;
}
- unlock_page(page);
+ folio_unlock(folio);
if (IS_ERR(snapc)) {
ret = VM_FAULT_SIGBUS;
@@ -1990,13 +2331,13 @@ static const struct vm_operations_struct ceph_vmops = {
.page_mkwrite = ceph_page_mkwrite,
};
-int ceph_mmap(struct file *file, struct vm_area_struct *vma)
+int ceph_mmap_prepare(struct vm_area_desc *desc)
{
- struct address_space *mapping = file->f_mapping;
+ struct address_space *mapping = desc->file->f_mapping;
if (!mapping->a_ops->read_folio)
return -ENOEXEC;
- vma->vm_ops = &ceph_vmops;
+ desc->vm_ops = &ceph_vmops;
return 0;
}
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 930fbd54d2c8..f678bab189d8 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -26,7 +26,7 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
return;
/* Only new inodes! */
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return;
WARN_ON_ONCE(ci->netfs.cache);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a8d8b56cf9d2..b1a8ff612c41 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -4957,24 +4957,20 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
cl = ceph_inode_to_client(dir);
spin_lock(&dentry->d_lock);
if (ret && di->lease_session && di->lease_session->s_mds == mds) {
+ int len = dentry->d_name.len;
doutc(cl, "%p mds%d seq %d\n", dentry, mds,
(int)di->lease_seq);
rel->dname_seq = cpu_to_le32(di->lease_seq);
__ceph_mdsc_drop_dentry_lease(dentry);
+ memcpy(*p, dentry->d_name.name, len);
spin_unlock(&dentry->d_lock);
if (IS_ENCRYPTED(dir) && fscrypt_has_encryption_key(dir)) {
- int ret2 = ceph_encode_encrypted_fname(dir, dentry, *p);
-
- if (ret2 < 0)
- return ret2;
-
- rel->dname_len = cpu_to_le32(ret2);
- *p += ret2;
- } else {
- rel->dname_len = cpu_to_le32(dentry->d_name.len);
- memcpy(*p, dentry->d_name.name, dentry->d_name.len);
- *p += dentry->d_name.len;
+ len = ceph_encode_encrypted_dname(dir, *p, len);
+ if (len < 0)
+ return len;
}
+ rel->dname_len = cpu_to_le32(len);
+ *p += len;
} else {
spin_unlock(&dentry->d_lock);
}
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
index 3b3c4d8d401e..0ea4db650f85 100644
--- a/fs/ceph/crypto.c
+++ b/fs/ceph/crypto.c
@@ -15,59 +15,6 @@
#include "mds_client.h"
#include "crypto.h"
-/*
- * The base64url encoding used by fscrypt includes the '_' character, which may
- * cause problems in snapshot names (which can not start with '_'). Thus, we
- * used the base64 encoding defined for IMAP mailbox names (RFC 3501) instead,
- * which replaces '-' and '_' by '+' and ','.
- */
-static const char base64_table[65] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
-
-int ceph_base64_encode(const u8 *src, int srclen, char *dst)
-{
- u32 ac = 0;
- int bits = 0;
- int i;
- char *cp = dst;
-
- for (i = 0; i < srclen; i++) {
- ac = (ac << 8) | src[i];
- bits += 8;
- do {
- bits -= 6;
- *cp++ = base64_table[(ac >> bits) & 0x3f];
- } while (bits >= 6);
- }
- if (bits)
- *cp++ = base64_table[(ac << (6 - bits)) & 0x3f];
- return cp - dst;
-}
-
-int ceph_base64_decode(const char *src, int srclen, u8 *dst)
-{
- u32 ac = 0;
- int bits = 0;
- int i;
- u8 *bp = dst;
-
- for (i = 0; i < srclen; i++) {
- const char *p = strchr(base64_table, src[i]);
-
- if (p == NULL || src[i] == 0)
- return -1;
- ac = (ac << 6) | (p - base64_table);
- bits += 6;
- if (bits >= 8) {
- bits -= 8;
- *bp++ = (u8)(ac >> bits);
- }
- }
- if (ac & ((1 << bits) - 1))
- return -1;
- return bp - dst;
-}
-
static int ceph_crypt_get_context(struct inode *inode, void *ctx, size_t len)
{
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -133,6 +80,8 @@ static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
}
static struct fscrypt_operations ceph_fscrypt_ops = {
+ .inode_info_offs = (int)offsetof(struct ceph_inode_info, i_crypt_info) -
+ (int)offsetof(struct ceph_inode_info, netfs.inode),
.needs_bounce_pages = 1,
.get_context = ceph_crypt_get_context,
.set_context = ceph_crypt_set_context,
@@ -215,35 +164,31 @@ static struct inode *parse_longname(const struct inode *parent,
struct ceph_client *cl = ceph_inode_to_client(parent);
struct inode *dir = NULL;
struct ceph_vino vino = { .snap = CEPH_NOSNAP };
- char *inode_number;
- char *name_end;
- int orig_len = *name_len;
+ char *name_end, *inode_number;
int ret = -EIO;
-
+ /* NUL-terminate */
+ char *str __free(kfree) = kmemdup_nul(name, *name_len, GFP_KERNEL);
+ if (!str)
+ return ERR_PTR(-ENOMEM);
/* Skip initial '_' */
- name++;
- name_end = strrchr(name, '_');
+ str++;
+ name_end = strrchr(str, '_');
if (!name_end) {
- doutc(cl, "failed to parse long snapshot name: %s\n", name);
+ doutc(cl, "failed to parse long snapshot name: %s\n", str);
return ERR_PTR(-EIO);
}
- *name_len = (name_end - name);
+ *name_len = (name_end - str);
if (*name_len <= 0) {
pr_err_client(cl, "failed to parse long snapshot name\n");
return ERR_PTR(-EIO);
}
/* Get the inode number */
- inode_number = kmemdup_nul(name_end + 1,
- orig_len - *name_len - 2,
- GFP_KERNEL);
- if (!inode_number)
- return ERR_PTR(-ENOMEM);
+ inode_number = name_end + 1;
ret = kstrtou64(inode_number, 10, &vino.ino);
if (ret) {
- doutc(cl, "failed to parse inode number: %s\n", name);
- dir = ERR_PTR(ret);
- goto out;
+ doutc(cl, "failed to parse inode number: %s\n", str);
+ return ERR_PTR(ret);
}
/* And finally the inode */
@@ -254,42 +199,29 @@ static struct inode *parse_longname(const struct inode *parent,
if (IS_ERR(dir))
doutc(cl, "can't find inode %s (%s)\n", inode_number, name);
}
-
-out:
- kfree(inode_number);
return dir;
}
-int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
- char *buf)
+int ceph_encode_encrypted_dname(struct inode *parent, char *buf, int elen)
{
struct ceph_client *cl = ceph_inode_to_client(parent);
struct inode *dir = parent;
- struct qstr iname;
+ char *p = buf;
u32 len;
- int name_len;
- int elen;
+ int name_len = elen;
int ret;
u8 *cryptbuf = NULL;
- iname.name = d_name->name;
- name_len = d_name->len;
-
/* Handle the special case of snapshot names that start with '_' */
- if ((ceph_snap(dir) == CEPH_SNAPDIR) && (name_len > 0) &&
- (iname.name[0] == '_')) {
- dir = parse_longname(parent, iname.name, &name_len);
+ if (ceph_snap(dir) == CEPH_SNAPDIR && *p == '_') {
+ dir = parse_longname(parent, p, &name_len);
if (IS_ERR(dir))
return PTR_ERR(dir);
- iname.name++; /* skip initial '_' */
+ p++; /* skip initial '_' */
}
- iname.len = name_len;
- if (!fscrypt_has_encryption_key(dir)) {
- memcpy(buf, d_name->name, d_name->len);
- elen = d_name->len;
+ if (!fscrypt_has_encryption_key(dir))
goto out;
- }
/*
* Convert cleartext d_name to ciphertext. If result is longer than
@@ -297,7 +229,7 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
*
* See: fscrypt_setup_filename
*/
- if (!fscrypt_fname_encrypted_size(dir, iname.len, NAME_MAX, &len)) {
+ if (!fscrypt_fname_encrypted_size(dir, name_len, NAME_MAX, &len)) {
elen = -ENAMETOOLONG;
goto out;
}
@@ -310,7 +242,9 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
goto out;
}
- ret = fscrypt_fname_encrypt(dir, &iname, cryptbuf, len);
+ ret = fscrypt_fname_encrypt(dir,
+ &(struct qstr)QSTR_INIT(p, name_len),
+ cryptbuf, len);
if (ret) {
elen = ret;
goto out;
@@ -331,23 +265,18 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
}
/* base64 encode the encrypted name */
- elen = ceph_base64_encode(cryptbuf, len, buf);
- doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, buf);
+ elen = base64_encode(cryptbuf, len, p, false, BASE64_IMAP);
+ doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, p);
/* To understand the 240 limit, see CEPH_NOHASH_NAME_MAX comments */
WARN_ON(elen > 240);
- if ((elen > 0) && (dir != parent)) {
- char tmp_buf[NAME_MAX];
-
- elen = snprintf(tmp_buf, sizeof(tmp_buf), "_%.*s_%ld",
- elen, buf, dir->i_ino);
- memcpy(buf, tmp_buf, elen);
- }
+ if (dir != parent) // leading _ is already there; append _<inum>
+ elen += 1 + sprintf(p + elen, "_%ld", dir->i_ino);
out:
kfree(cryptbuf);
if (dir != parent) {
- if ((dir->i_state & I_NEW))
+ if ((inode_state_read_once(dir) & I_NEW))
discard_new_inode(dir);
else
iput(dir);
@@ -355,14 +284,6 @@ out:
return elen;
}
-int ceph_encode_encrypted_fname(struct inode *parent, struct dentry *dentry,
- char *buf)
-{
- WARN_ON_ONCE(!fscrypt_has_encryption_key(parent));
-
- return ceph_encode_encrypted_dname(parent, &dentry->d_name, buf);
-}
-
/**
* ceph_fname_to_usr - convert a filename for userland presentation
* @fname: ceph_fname to be converted
@@ -438,7 +359,8 @@ int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
tname = &_tname;
}
- declen = ceph_base64_decode(name, name_len, tname->name);
+ declen = base64_decode(name, name_len,
+ tname->name, false, BASE64_IMAP);
if (declen <= 0) {
ret = -EIO;
goto out;
@@ -452,7 +374,7 @@ int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
ret = fscrypt_fname_disk_to_usr(dir, 0, 0, &iname, oname);
if (!ret && (dir != fname->dir)) {
- char tmp_buf[CEPH_BASE64_CHARS(NAME_MAX)];
+ char tmp_buf[BASE64_CHARS(NAME_MAX)];
name_len = snprintf(tmp_buf, sizeof(tmp_buf), "_%.*s_%ld",
oname->len, oname->name, dir->i_ino);
@@ -464,7 +386,7 @@ out:
fscrypt_fname_free_buffer(&_tname);
out_inode:
if (dir != fname->dir) {
- if ((dir->i_state & I_NEW))
+ if ((inode_state_read_once(dir) & I_NEW))
discard_new_inode(dir);
else
iput(dir);
@@ -516,15 +438,13 @@ int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
- unsigned int offs, u64 lblk_num,
- gfp_t gfp_flags)
+ unsigned int offs, u64 lblk_num)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
ceph_vinop(inode), len, offs, lblk_num);
- return fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk_num,
- gfp_flags);
+ return fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk_num);
}
/**
@@ -642,9 +562,8 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
* @page: pointer to page array
* @off: offset into the file that the data starts
* @len: max length to encrypt
- * @gfp: gfp flags to use for allocation
*
- * Decrypt an array of cleartext pages and return the amount of
+ * Encrypt an array of cleartext pages and return the amount of
* data encrypted. Any data in the page prior to the start of the
* first complete block in the read is ignored. Any incomplete
* crypto blocks at the end of the array are ignored.
@@ -652,7 +571,7 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
* Returns the length of the encrypted data or a negative errno.
*/
int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **page, u64 off,
- int len, gfp_t gfp)
+ int len)
{
int i, num_blocks;
u64 baseblk = off >> CEPH_FSCRYPT_BLOCK_SHIFT;
@@ -673,7 +592,7 @@ int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **page, u64 off,
fret = ceph_fscrypt_encrypt_block_inplace(inode, page[pgidx],
CEPH_FSCRYPT_BLOCK_SIZE, pgoffs,
- baseblk + i, gfp);
+ baseblk + i);
if (fret < 0) {
if (ret == 0)
ret = fret;
diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h
index d0768239a1c9..b748e2060bc9 100644
--- a/fs/ceph/crypto.h
+++ b/fs/ceph/crypto.h
@@ -8,6 +8,7 @@
#include <crypto/sha2.h>
#include <linux/fscrypt.h>
+#include <linux/base64.h>
#define CEPH_FSCRYPT_BLOCK_SHIFT 12
#define CEPH_FSCRYPT_BLOCK_SIZE (_AC(1, UL) << CEPH_FSCRYPT_BLOCK_SHIFT)
@@ -89,11 +90,6 @@ static inline u32 ceph_fscrypt_auth_len(struct ceph_fscrypt_auth *fa)
*/
#define CEPH_NOHASH_NAME_MAX (180 - SHA256_DIGEST_SIZE)
-#define CEPH_BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
-
-int ceph_base64_encode(const u8 *src, int srclen, char *dst);
-int ceph_base64_decode(const char *src, int srclen, u8 *dst);
-
void ceph_fscrypt_set_ops(struct super_block *sb);
void ceph_fscrypt_free_dummy_policy(struct ceph_fs_client *fsc);
@@ -102,10 +98,7 @@ int ceph_fscrypt_prepare_context(struct inode *dir, struct inode *inode,
struct ceph_acl_sec_ctx *as);
void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
struct ceph_acl_sec_ctx *as);
-int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
- char *buf);
-int ceph_encode_encrypted_fname(struct inode *parent, struct dentry *dentry,
- char *buf);
+int ceph_encode_encrypted_dname(struct inode *parent, char *buf, int len);
static inline int ceph_fname_alloc_buffer(struct inode *parent,
struct fscrypt_str *fname)
@@ -155,15 +148,14 @@ int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
unsigned int offs, u64 lblk_num);
int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
- unsigned int offs, u64 lblk_num,
- gfp_t gfp_flags);
+ unsigned int offs, u64 lblk_num);
int ceph_fscrypt_decrypt_pages(struct inode *inode, struct page **page,
u64 off, int len);
int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
u64 off, struct ceph_sparse_extent *map,
u32 ext_cnt);
int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **page, u64 off,
- int len, gfp_t gfp);
+ int len);
static inline struct page *ceph_fscrypt_pagecache_page(struct page *page)
{
@@ -194,17 +186,10 @@ static inline void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
{
}
-static inline int ceph_encode_encrypted_dname(struct inode *parent,
- struct qstr *d_name, char *buf)
-{
- memcpy(buf, d_name->name, d_name->len);
- return d_name->len;
-}
-
-static inline int ceph_encode_encrypted_fname(struct inode *parent,
- struct dentry *dentry, char *buf)
+static inline int ceph_encode_encrypted_dname(struct inode *parent, char *buf,
+ int len)
{
- return -EOPNOTSUPP;
+ return len;
}
static inline int ceph_fname_alloc_buffer(struct inode *parent,
@@ -246,8 +231,7 @@ static inline int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
static inline int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
- unsigned int offs, u64 lblk_num,
- gfp_t gfp_flags)
+ unsigned int offs, u64 lblk_num)
{
return 0;
}
@@ -269,7 +253,7 @@ static inline int ceph_fscrypt_decrypt_extents(struct inode *inode,
static inline int ceph_fscrypt_encrypt_pages(struct inode *inode,
struct page **page, u64 off,
- int len, gfp_t gfp)
+ int len)
{
return 0;
}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index fdf9dc15eafa..f3fe786b4143 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -55,8 +55,6 @@ static int mdsc_show(struct seq_file *s, void *p)
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct rb_node *rp;
- int pathlen = 0;
- u64 pathbase;
char *path;
mutex_lock(&mdsc->mutex);
@@ -81,8 +79,8 @@ static int mdsc_show(struct seq_file *s, void *p)
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
- path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
- &pathbase, 0);
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_dentry->d_lock);
@@ -91,7 +89,7 @@ static int mdsc_show(struct seq_file *s, void *p)
req->r_dentry,
path ? path : "");
spin_unlock(&req->r_dentry->d_lock);
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path1) {
seq_printf(s, " #%llx/%s", req->r_ino1.ino,
req->r_path1);
@@ -100,8 +98,8 @@ static int mdsc_show(struct seq_file *s, void *p)
}
if (req->r_old_dentry) {
- path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
- &pathbase, 0);
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_old_dentry->d_lock);
@@ -111,7 +109,7 @@ static int mdsc_show(struct seq_file *s, void *p)
req->r_old_dentry,
path ? path : "");
spin_unlock(&req->r_old_dentry->d_lock);
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
if (req->r_ino2.ino)
seq_printf(s, " #%llx/%s", req->r_ino2.ino,
@@ -412,7 +410,7 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
- char name[100];
+ char name[NAME_MAX];
doutc(fsc->client, "begin\n");
fsc->debugfs_congestion_kb =
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 0bf388e07a02..86d7aa594ea9 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -141,17 +141,18 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
if (ptr_pos >= i_size_read(dir))
return NULL;
- if (!cache_ctl->page || ptr_pgoff != cache_ctl->page->index) {
+ if (!cache_ctl->folio || ptr_pgoff != cache_ctl->folio->index) {
ceph_readdir_cache_release(cache_ctl);
- cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
- if (!cache_ctl->page) {
- doutc(cl, " page %lu not found\n", ptr_pgoff);
+ cache_ctl->folio = filemap_lock_folio(&dir->i_data, ptr_pgoff);
+ if (IS_ERR(cache_ctl->folio)) {
+ cache_ctl->folio = NULL;
+ doutc(cl, " folio %lu not found\n", ptr_pgoff);
return ERR_PTR(-EAGAIN);
}
/* reading/filling the cache are serialized by
- i_rwsem, no need to use page lock */
- unlock_page(cache_ctl->page);
- cache_ctl->dentries = kmap(cache_ctl->page);
+ i_rwsem, no need to use folio lock */
+ folio_unlock(cache_ctl->folio);
+ cache_ctl->dentries = kmap_local_folio(cache_ctl->folio, 0);
}
cache_ctl->index = idx & idx_mask;
@@ -422,17 +423,16 @@ more:
req->r_inode_drop = CEPH_CAP_FILE_EXCL;
}
if (dfi->last_name) {
- struct qstr d_name = { .name = dfi->last_name,
- .len = strlen(dfi->last_name) };
+ int len = strlen(dfi->last_name);
req->r_path2 = kzalloc(NAME_MAX + 1, GFP_KERNEL);
if (!req->r_path2) {
ceph_mdsc_put_request(req);
return -ENOMEM;
}
+ memcpy(req->r_path2, dfi->last_name, len);
- err = ceph_encode_encrypted_dname(inode, &d_name,
- req->r_path2);
+ err = ceph_encode_encrypted_dname(inode, req->r_path2, len);
if (err < 0) {
ceph_mdsc_put_request(req);
return err;
@@ -998,13 +998,14 @@ static int prep_encrypted_symlink_target(struct ceph_mds_request *req,
if (err)
goto out;
- req->r_path2 = kmalloc(CEPH_BASE64_CHARS(osd_link.len) + 1, GFP_KERNEL);
+ req->r_path2 = kmalloc(BASE64_CHARS(osd_link.len) + 1, GFP_KERNEL);
if (!req->r_path2) {
err = -ENOMEM;
goto out;
}
- len = ceph_base64_encode(osd_link.name, osd_link.len, req->r_path2);
+ len = base64_encode(osd_link.name, osd_link.len,
+ req->r_path2, false, BASE64_IMAP);
req->r_path2[len] = '\0';
out:
fscrypt_fname_free_buffer(&osd_link);
@@ -1092,19 +1093,20 @@ out:
return err;
}
-static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
+ struct dentry *ret;
int err;
int op;
err = ceph_wait_on_conflict_unlink(dentry);
if (err)
- return err;
+ return ERR_PTR(err);
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* mkdir .snap/foo is a MKSNAP */
@@ -1116,32 +1118,32 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
ceph_vinop(dir), dentry, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
- err = -EROFS;
+ ret = ERR_PTR(-EROFS);
goto out;
}
if (op == CEPH_MDS_OP_MKDIR &&
ceph_quota_is_max_files_exceeded(dir)) {
- err = -EDQUOT;
+ ret = ERR_PTR(-EDQUOT);
goto out;
}
if ((op == CEPH_MDS_OP_MKSNAP) && IS_ENCRYPTED(dir) &&
!fscrypt_has_encryption_key(dir)) {
- err = -ENOKEY;
+ ret = ERR_PTR(-ENOKEY);
goto out;
}
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req)) {
- err = PTR_ERR(req);
+ ret = ERR_CAST(req);
goto out;
}
mode |= S_IFDIR;
req->r_new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
if (IS_ERR(req->r_new_inode)) {
- err = PTR_ERR(req->r_new_inode);
+ ret = ERR_CAST(req->r_new_inode);
req->r_new_inode = NULL;
goto out_req;
}
@@ -1165,15 +1167,22 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
!req->r_reply_info.head->is_target &&
!req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
+ ret = ERR_PTR(err);
out_req:
+ if (!IS_ERR(ret) && req->r_dentry != dentry)
+ /* Some other dentry was spliced in */
+ ret = dget(req->r_dentry);
ceph_mdsc_put_request(req);
out:
- if (!err)
+ if (!IS_ERR(ret)) {
+ if (ret)
+ dentry = ret;
ceph_init_inode_acls(d_inode(dentry), &as_ctx);
- else
+ } else {
d_drop(dentry);
+ }
ceph_release_acl_sec_ctx(&as_ctx);
- return err;
+ return ret;
}
static int ceph_link(struct dentry *old_dentry, struct inode *dir,
@@ -1252,8 +1261,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
spin_unlock(&fsc->async_unlink_conflict_lock);
spin_lock(&dentry->d_lock);
- di->flags &= ~CEPH_DENTRY_ASYNC_UNLINK;
- wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT);
+ clear_and_wake_up_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags);
spin_unlock(&dentry->d_lock);
synchronize_rcu();
@@ -1263,10 +1271,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
/* If op failed, mark everyone involved for errors */
if (result) {
- int pathlen = 0;
- u64 base = 0;
- char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
- &base, 0);
+ struct ceph_path_info path_info = {0};
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
/* mark error on parent + clear complete */
mapping_set_error(req->r_parent->i_mapping, result);
@@ -1280,8 +1286,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
mapping_set_error(req->r_old_inode->i_mapping, result);
pr_warn_client(cl, "failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
- ceph_mdsc_free_path(path, pathlen);
+ path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
+ ceph_mdsc_free_path_info(&path_info);
}
out:
iput(req->r_old_inode);
@@ -1339,8 +1345,6 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
int err = -EROFS;
int op;
char *path;
- int pathlen;
- u64 pathbase;
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* rmdir .snap/foo is RMSNAP */
@@ -1359,14 +1363,15 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
if (!dn) {
try_async = false;
} else {
- path = ceph_mdsc_build_path(mdsc, dn, &pathlen, &pathbase, 0);
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
if (IS_ERR(path)) {
try_async = false;
err = 0;
} else {
err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
}
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
dput(dn);
/* For none EACCES cases will let the MDS do the mds auth check */
@@ -1940,29 +1945,19 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
/*
* Check if cached dentry can be trusted.
*/
-static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int ceph_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
int valid = 0;
- struct dentry *parent;
- struct inode *dir, *inode;
+ struct inode *inode;
- valid = fscrypt_d_revalidate(dentry, flags);
+ valid = fscrypt_d_revalidate(dir, name, dentry, flags);
if (valid <= 0)
return valid;
- if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- inode = d_inode_rcu(dentry);
- } else {
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- inode = d_inode(dentry);
- }
+ inode = d_inode_rcu(dentry);
doutc(cl, "%p '%pd' inode %p offset 0x%llx nokey %d\n",
dentry, dentry, inode, ceph_dentry(dentry)->offset,
@@ -2008,6 +2003,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
req->r_parent = dir;
ihold(dir);
+ req->r_dname = name;
+
mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
if (ceph_security_xattr_wanted(dir))
mask |= CEPH_CAP_XATTR_SHARED;
@@ -2038,9 +2035,6 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
doutc(cl, "%p '%pd' %s\n", dentry, dentry, valid ? "valid" : "invalid");
if (!valid)
ceph_dir_clear_complete(dir);
-
- if (!(flags & LOOKUP_RCU))
- dput(parent);
return valid;
}
@@ -2162,7 +2156,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
" rfiles: %20lld\n"
" rsubdirs: %20lld\n"
"rbytes: %20lld\n"
- "rctime: %10lld.%09ld\n",
+ "rctime: %ptSp\n",
ci->i_files + ci->i_subdirs,
ci->i_files,
ci->i_subdirs,
@@ -2170,8 +2164,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
ci->i_rfiles,
ci->i_rsubdirs,
ci->i_rbytes,
- ci->i_rctime.tv_sec,
- ci->i_rctime.tv_nsec);
+ &ci->i_rctime);
}
if (*ppos >= dfi->dir_info_len)
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 150076ced937..b2f2af104679 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -33,12 +33,19 @@ struct ceph_nfs_snapfh {
u32 hash;
} __attribute__ ((packed));
+#define BYTES_PER_U32 (sizeof(u32))
+#define CEPH_FH_BASIC_SIZE \
+ (sizeof(struct ceph_nfs_fh) / BYTES_PER_U32)
+#define CEPH_FH_WITH_PARENT_SIZE \
+ (sizeof(struct ceph_nfs_confh) / BYTES_PER_U32)
+#define CEPH_FH_SNAPPED_INODE_SIZE \
+ (sizeof(struct ceph_nfs_snapfh) / BYTES_PER_U32)
+
static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
- static const int snap_handle_length =
- sizeof(struct ceph_nfs_snapfh) >> 2;
+ static const int snap_handle_length = CEPH_FH_SNAPPED_INODE_SIZE;
struct ceph_nfs_snapfh *sfh = (void *)rawfh;
u64 snapid = ceph_snap(inode);
int ret;
@@ -88,10 +95,8 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
- static const int handle_length =
- sizeof(struct ceph_nfs_fh) >> 2;
- static const int connected_handle_length =
- sizeof(struct ceph_nfs_confh) >> 2;
+ static const int handle_length = CEPH_FH_BASIC_SIZE;
+ static const int connected_handle_length = CEPH_FH_WITH_PARENT_SIZE;
int type;
if (ceph_snap(inode) != CEPH_NOSNAP)
@@ -308,7 +313,7 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
if (fh_type != FILEID_INO32_GEN &&
fh_type != FILEID_INO32_GEN_PARENT)
return NULL;
- if (fh_len < sizeof(*fh) / 4)
+ if (fh_len < sizeof(*fh) / BYTES_PER_U32)
return NULL;
doutc(fsc->client, "%llx\n", fh->ino);
@@ -427,7 +432,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
if (fh_type != FILEID_INO32_GEN_PARENT)
return NULL;
- if (fh_len < sizeof(*cfh) / 4)
+ if (fh_len < sizeof(*cfh) / BYTES_PER_U32)
return NULL;
doutc(fsc->client, "%llx\n", cfh->parent_ino);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 4b8d59ebda00..983390069f73 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -368,8 +368,6 @@ int ceph_open(struct inode *inode, struct file *file)
int flags, fmode, wanted;
struct dentry *dentry;
char *path;
- int pathlen;
- u64 pathbase;
bool do_sync = false;
int mask = MAY_READ;
@@ -399,14 +397,15 @@ int ceph_open(struct inode *inode, struct file *file)
if (!dentry) {
do_sync = true;
} else {
- path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase, 0);
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
if (IS_ERR(path)) {
do_sync = true;
err = 0;
} else {
err = ceph_mds_check_access(mdsc, path, mask);
}
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
dput(dentry);
/* For none EACCES cases will let the MDS do the mds auth check */
@@ -580,8 +579,7 @@ static void wake_async_create_waiters(struct inode *inode,
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
- ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
- wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
+ clear_and_wake_up_bit(CEPH_ASYNC_CREATE_BIT, &ci->i_ceph_flags);
if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
@@ -614,15 +612,13 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
mapping_set_error(req->r_parent->i_mapping, result);
if (result) {
- int pathlen = 0;
- u64 base = 0;
- char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
- &base, 0);
+ struct ceph_path_info path_info = {0};
+ char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
pr_warn_client(cl,
"async create failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
- ceph_mdsc_free_path(path, pathlen);
+ path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
+ ceph_mdsc_free_path_info(&path_info);
ceph_dir_clear_complete(req->r_parent);
if (!d_unhashed(dentry))
@@ -744,7 +740,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
vino.ino, ceph_ino(dir), dentry->d_name.name);
ceph_dir_clear_ordered(dir);
ceph_init_inode_acls(inode, as_ctx);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
/*
* If it's not I_NEW, then someone created this before
* we got here. Assume the server is aware of it at
@@ -765,8 +761,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
}
spin_lock(&dentry->d_lock);
- di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
- wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
+ clear_and_wake_up_bit(CEPH_DENTRY_ASYNC_CREATE_BIT, &di->flags);
spin_unlock(&dentry->d_lock);
return ret;
@@ -791,8 +786,6 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
int mask;
int err;
char *path;
- int pathlen;
- u64 pathbase;
doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n",
dir, ceph_vinop(dir), dentry, dentry,
@@ -814,7 +807,8 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
if (!dn) {
try_async = false;
} else {
- path = ceph_mdsc_build_path(mdsc, dn, &pathlen, &pathbase, 0);
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
if (IS_ERR(path)) {
try_async = false;
err = 0;
@@ -826,7 +820,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
mask |= MAY_WRITE;
err = ceph_mds_check_access(mdsc, path, mask);
}
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
dput(dn);
/* For none EACCES cases will let the MDS do the mds auth check */
@@ -907,7 +901,7 @@ retry:
new_inode = NULL;
goto out_req;
}
- WARN_ON_ONCE(!(new_inode->i_state & I_NEW));
+ WARN_ON_ONCE(!(inode_state_read_once(new_inode) & I_NEW));
spin_lock(&dentry->d_lock);
di->flags |= CEPH_DENTRY_ASYNC_CREATE;
@@ -1066,7 +1060,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
if (ceph_inode_is_shutdown(inode))
return -EIO;
- if (!len)
+ if (!len || !i_size)
return 0;
/*
* flush any page cache pages in this range. this
@@ -1086,7 +1080,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
int num_pages;
size_t page_off;
bool more;
- int idx;
+ int idx = 0;
size_t left;
struct ceph_osd_req_op *op;
u64 read_off = off;
@@ -1116,6 +1110,16 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
len = read_off + read_len - off;
more = len < iov_iter_count(to);
+ op = &req->r_ops[0];
+ if (sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
num_pages = calc_pages_for(read_off, read_len);
page_off = offset_in_page(off);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
@@ -1127,17 +1131,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
offset_in_page(read_off),
- false, false);
-
- op = &req->r_ops[0];
- if (sparse) {
- extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
- ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
- if (ret) {
- ceph_osdc_put_request(req);
- break;
- }
- }
+ false, true);
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
@@ -1160,7 +1154,14 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
else if (ret == -ENOENT)
ret = 0;
- if (ret > 0 && IS_ENCRYPTED(inode)) {
+ if (ret < 0) {
+ ceph_osdc_put_request(req);
+ if (ret == -EBLOCKLISTED)
+ fsc->blocklisted = true;
+ break;
+ }
+
+ if (IS_ENCRYPTED(inode)) {
int fret;
fret = ceph_fscrypt_decrypt_extents(inode, pages,
@@ -1186,10 +1187,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
ret = min_t(ssize_t, fret, len);
}
- ceph_osdc_put_request(req);
-
/* Short read but not EOF? Zero out the remainder. */
- if (ret >= 0 && ret < len && (off + ret < i_size)) {
+ if (ret < len && (off + ret < i_size)) {
int zlen = min(len - ret, i_size - off - ret);
int zoff = page_off + ret;
@@ -1199,13 +1198,11 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
ret += zlen;
}
- idx = 0;
- if (ret <= 0)
- left = 0;
- else if (off + ret > i_size)
- left = i_size - off;
+ if (off + ret > i_size)
+ left = (i_size > off) ? i_size - off : 0;
else
left = ret;
+
while (left > 0) {
size_t plen, copied;
@@ -1221,13 +1218,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
break;
}
}
- ceph_release_page_vector(pages, num_pages);
- if (ret < 0) {
- if (ret == -EBLOCKLISTED)
- fsc->blocklisted = true;
- break;
- }
+ ceph_osdc_put_request(req);
if (off >= i_size || !more)
break;
@@ -1553,6 +1545,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
break;
}
+ op = &req->r_ops[0];
+ if (!write && sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
if (len < 0) {
ceph_osdc_put_request(req);
@@ -1562,6 +1564,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (len != size)
osd_req_op_extent_update(req, 0, len);
+ osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
+
/*
* To simplify error handling, allow AIO when IO within i_size
* or IO can be satisfied by single OSD request.
@@ -1593,17 +1597,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req->r_mtime = mtime;
}
- osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
- op = &req->r_ops[0];
- if (sparse) {
- extent_cnt = __ceph_sparse_read_ext_count(inode, size);
- ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
- if (ret) {
- ceph_osdc_put_request(req);
- break;
- }
- }
-
if (aio_req) {
aio_req->total_len += len;
aio_req->num_reqs++;
@@ -1993,8 +1986,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
if (IS_ENCRYPTED(inode)) {
ret = ceph_fscrypt_encrypt_pages(inode, pages,
- write_pos, write_len,
- GFP_KERNEL);
+ write_pos, write_len);
if (ret < 0) {
doutc(cl, "encryption failed with %d\n", ret);
ceph_release_page_vector(pages, num_pages);
@@ -2127,10 +2119,10 @@ again:
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
- if (direct_lock)
- ceph_start_io_direct(inode);
- else
- ceph_start_io_read(inode);
+ ret = direct_lock ? ceph_start_io_direct(inode) :
+ ceph_start_io_read(inode);
+ if (ret)
+ return ret;
if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
want |= CEPH_CAP_FILE_CACHE;
@@ -2283,7 +2275,9 @@ static ssize_t ceph_splice_read(struct file *in, loff_t *ppos,
(fi->flags & CEPH_F_SYNC))
return copy_splice_read(in, ppos, pipe, len, flags);
- ceph_start_io_read(inode);
+ ret = ceph_start_io_read(inode);
+ if (ret)
+ return ret;
want = CEPH_CAP_FILE_CACHE;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
@@ -2362,10 +2356,10 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
direct_lock = true;
retry_snap:
- if (direct_lock)
- ceph_start_io_direct(inode);
- else
- ceph_start_io_write(inode);
+ err = direct_lock ? ceph_start_io_direct(inode) :
+ ceph_start_io_write(inode);
+ if (err)
+ goto out_unlocked;
if (iocb->ki_flags & IOCB_APPEND) {
err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
@@ -2531,19 +2525,19 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
return generic_file_llseek(file, offset, whence);
}
-static inline void ceph_zero_partial_page(
- struct inode *inode, loff_t offset, unsigned size)
+static inline void ceph_zero_partial_page(struct inode *inode,
+ loff_t offset, size_t size)
{
- struct page *page;
- pgoff_t index = offset >> PAGE_SHIFT;
+ struct folio *folio;
- page = find_lock_page(inode->i_mapping, index);
- if (page) {
- wait_on_page_writeback(page);
- zero_user(page, offset & (PAGE_SIZE - 1), size);
- unlock_page(page);
- put_page(page);
- }
+ folio = filemap_lock_folio(inode->i_mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return;
+
+ folio_wait_writeback(folio);
+ folio_zero_range(folio, offset_in_folio(folio, offset), size);
+ folio_unlock(folio);
+ folio_put(folio);
}
static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
@@ -2617,7 +2611,7 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
s32 stripe_unit = ci->i_layout.stripe_unit;
s32 stripe_count = ci->i_layout.stripe_count;
s32 object_size = ci->i_layout.object_size;
- u64 object_set_size = object_size * stripe_count;
+ u64 object_set_size = (u64) object_size * stripe_count;
u64 nearly, t;
/* round offset up to next period boundary */
@@ -2884,7 +2878,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
struct ceph_object_id src_oid, dst_oid;
struct ceph_osd_client *osdc;
struct ceph_osd_request *req;
- size_t bytes = 0;
+ ssize_t bytes = 0;
u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
u32 src_objlen, dst_objlen;
u32 object_size = src_ci->i_layout.object_size;
@@ -2934,7 +2928,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
"OSDs don't support copy-from2; disabling copy offload\n");
}
doutc(cl, "returned %d\n", ret);
- if (!bytes)
+ if (bytes <= 0)
bytes = ret;
goto out;
}
@@ -3172,7 +3166,7 @@ const struct file_operations ceph_file_fops = {
.llseek = ceph_llseek,
.read_iter = ceph_read_iter,
.write_iter = ceph_write_iter,
- .mmap = ceph_mmap,
+ .mmap_prepare = ceph_mmap_prepare,
.fsync = ceph_fsync,
.lock = ceph_lock,
.setlease = simple_nosetlease,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 7dd6c2275085..2966f88310e3 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -55,6 +55,52 @@ static int ceph_set_ino_cb(struct inode *inode, void *data)
return 0;
}
+/*
+ * Check if the parent inode matches the vino from directory reply info
+ */
+static inline bool ceph_vino_matches_parent(struct inode *parent,
+ struct ceph_vino vino)
+{
+ return ceph_ino(parent) == vino.ino && ceph_snap(parent) == vino.snap;
+}
+
+/*
+ * Validate that the directory inode referenced by @req->r_parent matches the
+ * inode number and snapshot id contained in the reply's directory record. If
+ * they do not match – which can theoretically happen if the parent dentry was
+ * moved between the time the request was issued and the reply arrived – fall
+ * back to looking up the correct inode in the inode cache.
+ *
+ * A reference is *always* returned. Callers that receive a different inode
+ * than the original @parent are responsible for dropping the extra reference
+ * once the reply has been processed.
+ */
+static struct inode *ceph_get_reply_dir(struct super_block *sb,
+ struct inode *parent,
+ struct ceph_mds_reply_info_parsed *rinfo)
+{
+ struct ceph_vino vino;
+
+ if (unlikely(!rinfo->diri.in))
+ return parent; /* nothing to compare against */
+
+ /* If we didn't have a cached parent inode to begin with, just bail out. */
+ if (!parent)
+ return NULL;
+
+ vino.ino = le64_to_cpu(rinfo->diri.in->ino);
+ vino.snap = le64_to_cpu(rinfo->diri.in->snapid);
+
+ if (likely(ceph_vino_matches_parent(parent, vino)))
+ return parent; /* matches – use the original reference */
+
+ /* Mismatch – this should be rare. Emit a WARN and obtain the correct inode. */
+ WARN_ONCE(1, "ceph: reply dir mismatch (parent valid %llx.%llx reply %llx.%llx)\n",
+ ceph_ino(parent), ceph_snap(parent), vino.ino, vino.snap);
+
+ return ceph_get_inode(sb, vino, NULL);
+}
+
/**
* ceph_new_inode - allocate a new inode in advance of an expected create
* @dir: parent directory for new inode
@@ -86,7 +132,7 @@ struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
goto out_err;
}
- inode->i_state = 0;
+ inode_state_assign_raw(inode, 0);
inode->i_mode = *mode;
err = ceph_security_init_secctx(dentry, *mode, as_ctx);
@@ -155,7 +201,7 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
ceph_present_inode(inode), ceph_vinop(inode), inode,
- !!(inode->i_state & I_NEW));
+ !!(inode_state_read_once(inode) & I_NEW));
return inode;
}
@@ -182,7 +228,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
goto err;
}
- if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
+ if (!(inode_state_read_once(inode) & I_NEW) && !S_ISDIR(inode->i_mode)) {
pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
inode->i_mode);
goto err;
@@ -215,7 +261,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
}
}
#endif
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
inode->i_op = &ceph_snapdir_iops;
inode->i_fop = &ceph_snapdir_fops;
ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
@@ -224,7 +270,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
return inode;
err:
- if ((inode->i_state & I_NEW))
+ if ((inode_state_read_once(inode) & I_NEW))
discard_new_inode(inode);
else
iput(inode);
@@ -665,6 +711,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_work_mask = 0;
memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
#ifdef CONFIG_FS_ENCRYPTION
+ ci->i_crypt_info = NULL;
ci->fscrypt_auth = NULL;
ci->fscrypt_auth_len = 0;
#endif
@@ -697,7 +744,7 @@ void ceph_evict_inode(struct inode *inode)
netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
- if (inode->i_state & I_PINNING_NETFS_WB)
+ if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
ceph_fscache_unuse_cookie(inode, true);
clear_inode(inode);
@@ -832,7 +879,9 @@ void ceph_fill_file_time(struct inode *inode, int issued,
{
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct timespec64 iatime = inode_get_atime(inode);
struct timespec64 ictime = inode_get_ctime(inode);
+ struct timespec64 imtime = inode_get_mtime(inode);
int warn = 0;
if (issued & (CEPH_CAP_FILE_EXCL|
@@ -842,39 +891,26 @@ void ceph_fill_file_time(struct inode *inode, int issued,
CEPH_CAP_XATTR_EXCL)) {
if (ci->i_version == 0 ||
timespec64_compare(ctime, &ictime) > 0) {
- doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
- ictime.tv_sec, ictime.tv_nsec,
- ctime->tv_sec, ctime->tv_nsec);
+ doutc(cl, "ctime %ptSp -> %ptSp inc w/ cap\n", &ictime, ctime);
inode_set_ctime_to_ts(inode, *ctime);
}
if (ci->i_version == 0 ||
ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
/* the MDS did a utimes() */
- doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
- inode_get_mtime_sec(inode),
- inode_get_mtime_nsec(inode),
- mtime->tv_sec, mtime->tv_nsec,
- ci->i_time_warp_seq, (int)time_warp_seq);
+ doutc(cl, "mtime %ptSp -> %ptSp tw %d -> %d\n", &imtime, mtime,
+ ci->i_time_warp_seq, (int)time_warp_seq);
inode_set_mtime_to_ts(inode, *mtime);
inode_set_atime_to_ts(inode, *atime);
ci->i_time_warp_seq = time_warp_seq;
} else if (time_warp_seq == ci->i_time_warp_seq) {
- struct timespec64 ts;
-
/* nobody did utimes(); take the max */
- ts = inode_get_mtime(inode);
- if (timespec64_compare(mtime, &ts) > 0) {
- doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
- ts.tv_sec, ts.tv_nsec,
- mtime->tv_sec, mtime->tv_nsec);
+ if (timespec64_compare(mtime, &imtime) > 0) {
+ doutc(cl, "mtime %ptSp -> %ptSp inc\n", &imtime, mtime);
inode_set_mtime_to_ts(inode, *mtime);
}
- ts = inode_get_atime(inode);
- if (timespec64_compare(atime, &ts) > 0) {
- doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
- ts.tv_sec, ts.tv_nsec,
- atime->tv_sec, atime->tv_nsec);
+ if (timespec64_compare(atime, &iatime) > 0) {
+ doutc(cl, "atime %ptSp -> %ptSp inc\n", &iatime, atime);
inode_set_atime_to_ts(inode, *atime);
}
} else if (issued & CEPH_CAP_FILE_EXCL) {
@@ -911,7 +947,7 @@ static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
if (!sym)
return -ENOMEM;
- declen = ceph_base64_decode(encsym, enclen, sym);
+ declen = base64_decode(encsym, enclen, sym, false, BASE64_IMAP);
if (declen < 0) {
pr_err_client(cl,
"can't decode symlink (%d). Content: %.*s\n",
@@ -966,7 +1002,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
le64_to_cpu(info->version), ci->i_version);
/* Once I_NEW is cleared, we can't change type or dev numbers */
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
inode->i_mode = mode;
} else {
if (inode_wrong_type(inode, mode)) {
@@ -1043,7 +1079,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
#ifdef CONFIG_FS_ENCRYPTION
if (iinfo->fscrypt_auth_len &&
- ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
+ ((inode_state_read_once(inode) & I_NEW) || (ci->fscrypt_auth_len == 0))) {
kfree(ci->fscrypt_auth);
ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
ci->fscrypt_auth = iinfo->fscrypt_auth;
@@ -1523,6 +1559,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
struct ceph_vino tvino, dvino;
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_client *cl = fsc->client;
+ struct inode *parent_dir = NULL;
int err = 0;
doutc(cl, "%p is_dentry %d is_target %d\n", req,
@@ -1536,10 +1573,17 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
}
if (rinfo->head->is_dentry) {
- struct inode *dir = req->r_parent;
-
- if (dir) {
- err = ceph_fill_inode(dir, NULL, &rinfo->diri,
+ /*
+ * r_parent may be stale, in cases when R_PARENT_LOCKED is not set,
+ * so we need to get the correct inode
+ */
+ parent_dir = ceph_get_reply_dir(sb, req->r_parent, rinfo);
+ if (unlikely(IS_ERR(parent_dir))) {
+ err = PTR_ERR(parent_dir);
+ goto done;
+ }
+ if (parent_dir) {
+ err = ceph_fill_inode(parent_dir, NULL, &rinfo->diri,
rinfo->dirfrag, session, -1,
&req->r_caps_reservation);
if (err < 0)
@@ -1548,14 +1592,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
WARN_ON_ONCE(1);
}
- if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
+ if (parent_dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
bool is_nokey = false;
struct qstr dname;
struct dentry *dn, *parent;
struct fscrypt_str oname = FSTR_INIT(NULL, 0);
- struct ceph_fname fname = { .dir = dir,
+ struct ceph_fname fname = { .dir = parent_dir,
.name = rinfo->dname,
.ctext = rinfo->altname,
.name_len = rinfo->dname_len,
@@ -1564,10 +1608,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
BUG_ON(!rinfo->head->is_target);
BUG_ON(req->r_dentry);
- parent = d_find_any_alias(dir);
+ parent = d_find_any_alias(parent_dir);
BUG_ON(!parent);
- err = ceph_fname_alloc_buffer(dir, &oname);
+ err = ceph_fname_alloc_buffer(parent_dir, &oname);
if (err < 0) {
dput(parent);
goto done;
@@ -1576,7 +1620,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
if (err < 0) {
dput(parent);
- ceph_fname_free_buffer(dir, &oname);
+ ceph_fname_free_buffer(parent_dir, &oname);
goto done;
}
dname.name = oname.name;
@@ -1595,7 +1639,7 @@ retry_lookup:
dname.len, dname.name, dn);
if (!dn) {
dput(parent);
- ceph_fname_free_buffer(dir, &oname);
+ ceph_fname_free_buffer(parent_dir, &oname);
err = -ENOMEM;
goto done;
}
@@ -1610,12 +1654,12 @@ retry_lookup:
ceph_snap(d_inode(dn)) != tvino.snap)) {
doutc(cl, " dn %p points to wrong inode %p\n",
dn, d_inode(dn));
- ceph_dir_clear_ordered(dir);
+ ceph_dir_clear_ordered(parent_dir);
d_delete(dn);
dput(dn);
goto retry_lookup;
}
- ceph_fname_free_buffer(dir, &oname);
+ ceph_fname_free_buffer(parent_dir, &oname);
req->r_dentry = dn;
dput(parent);
@@ -1637,13 +1681,13 @@ retry_lookup:
pr_err_client(cl, "badness %p %llx.%llx\n", in,
ceph_vinop(in));
req->r_target_inode = NULL;
- if (in->i_state & I_NEW)
+ if (inode_state_read_once(in) & I_NEW)
discard_new_inode(in);
else
iput(in);
goto done;
}
- if (in->i_state & I_NEW)
+ if (inode_state_read_once(in) & I_NEW)
unlock_new_inode(in);
}
@@ -1739,6 +1783,11 @@ retry_lookup:
goto done;
}
+ if (unlikely(!in)) {
+ err = -EINVAL;
+ goto done;
+ }
+
/* attach proper inode */
if (d_really_is_negative(dn)) {
ceph_dir_clear_ordered(dir);
@@ -1774,6 +1823,12 @@ retry_lookup:
doutc(cl, " linking snapped dir %p to dn %p\n", in,
req->r_dentry);
ceph_dir_clear_ordered(dir);
+
+ if (unlikely(!in)) {
+ err = -EINVAL;
+ goto done;
+ }
+
ihold(in);
err = splice_dentry(&req->r_dentry, in);
if (err < 0)
@@ -1794,6 +1849,9 @@ retry_lookup:
&dvino, ptvino);
}
done:
+ /* Drop extra ref from ceph_get_reply_dir() if it returned a new inode */
+ if (unlikely(!IS_ERR_OR_NULL(parent_dir) && parent_dir != req->r_parent))
+ iput(parent_dir);
doutc(cl, "done err=%d\n", err);
return err;
}
@@ -1829,11 +1887,11 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
pr_err_client(cl, "inode badness on %p got %d\n", in,
rc);
err = rc;
- if (in->i_state & I_NEW) {
+ if (inode_state_read_once(in) & I_NEW) {
ihold(in);
discard_new_inode(in);
}
- } else if (in->i_state & I_NEW) {
+ } else if (inode_state_read_once(in) & I_NEW) {
unlock_new_inode(in);
}
@@ -1845,10 +1903,9 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
{
- if (ctl->page) {
- kunmap(ctl->page);
- put_page(ctl->page);
- ctl->page = NULL;
+ if (ctl->folio) {
+ folio_release_kmap(ctl->folio, ctl->dentries);
+ ctl->folio = NULL;
}
}
@@ -1862,20 +1919,26 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
unsigned idx = ctl->index % nsize;
pgoff_t pgoff = ctl->index / nsize;
- if (!ctl->page || pgoff != ctl->page->index) {
+ if (!ctl->folio || pgoff != ctl->folio->index) {
ceph_readdir_cache_release(ctl);
+ fgf_t fgf = FGP_LOCK;
+
if (idx == 0)
- ctl->page = grab_cache_page(&dir->i_data, pgoff);
- else
- ctl->page = find_lock_page(&dir->i_data, pgoff);
- if (!ctl->page) {
+ fgf |= FGP_ACCESSED | FGP_CREAT;
+
+ ctl->folio = __filemap_get_folio(&dir->i_data, pgoff,
+ fgf, mapping_gfp_mask(&dir->i_data));
+ if (IS_ERR(ctl->folio)) {
+ int err = PTR_ERR(ctl->folio);
+
+ ctl->folio = NULL;
ctl->index = -1;
- return idx == 0 ? -ENOMEM : 0;
+ return idx == 0 ? err : 0;
}
/* reading/filling the cache are serialized by
- * i_rwsem, no need to use page lock */
- unlock_page(ctl->page);
- ctl->dentries = kmap(ctl->page);
+ * i_rwsem, no need to use folio lock */
+ folio_unlock(ctl->folio);
+ ctl->dentries = kmap_local_folio(ctl->folio, 0);
if (idx == 0)
memset(ctl->dentries, 0, PAGE_SIZE);
}
@@ -2040,7 +2103,7 @@ retry_lookup:
pr_err_client(cl, "badness on %p %llx.%llx\n", in,
ceph_vinop(in));
if (d_really_is_negative(dn)) {
- if (in->i_state & I_NEW) {
+ if (inode_state_read_once(in) & I_NEW) {
ihold(in);
discard_new_inode(in);
}
@@ -2050,7 +2113,7 @@ retry_lookup:
err = ret;
goto next_item;
}
- if (in->i_state & I_NEW)
+ if (inode_state_read_once(in) & I_NEW)
unlock_new_inode(in);
if (d_really_is_negative(dn)) {
@@ -2362,7 +2425,7 @@ static int fill_fscrypt_truncate(struct inode *inode,
/* Try to writeback the dirty pagecaches */
if (issued & (CEPH_CAP_FILE_BUFFER)) {
- loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1;
+ loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SIZE - 1;
ret = filemap_write_and_wait_range(inode->i_mapping,
orig_pos, lend);
@@ -2431,8 +2494,7 @@ static int fill_fscrypt_truncate(struct inode *inode,
/* encrypt the last block */
ret = ceph_fscrypt_encrypt_block_inplace(inode, page,
CEPH_FSCRYPT_BLOCK_SIZE,
- 0, block,
- GFP_KERNEL);
+ 0, block);
if (ret)
goto out;
}
@@ -2483,22 +2545,21 @@ int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
int truncate_retry = 20; /* The RMW will take around 50ms */
struct dentry *dentry;
char *path;
- int pathlen;
- u64 pathbase;
bool do_sync = false;
dentry = d_find_alias(inode);
if (!dentry) {
do_sync = true;
} else {
- path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase, 0);
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
if (IS_ERR(path)) {
do_sync = true;
err = 0;
} else {
err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
}
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
dput(dentry);
/* For none EACCES cases will let the MDS do the mds auth check */
@@ -2631,10 +2692,8 @@ retry:
if (ia_valid & ATTR_ATIME) {
struct timespec64 atime = inode_get_atime(inode);
- doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
- inode, ceph_vinop(inode),
- atime.tv_sec, atime.tv_nsec,
- attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
+ doutc(cl, "%p %llx.%llx atime %ptSp -> %ptSp\n",
+ inode, ceph_vinop(inode), &atime, &attr->ia_atime);
if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
ci->i_time_warp_seq++;
inode_set_atime_to_ts(inode, attr->ia_atime);
@@ -2708,10 +2767,8 @@ retry:
if (ia_valid & ATTR_MTIME) {
struct timespec64 mtime = inode_get_mtime(inode);
- doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
- inode, ceph_vinop(inode),
- mtime.tv_sec, mtime.tv_nsec,
- attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
+ doutc(cl, "%p %llx.%llx mtime %ptSp -> %ptSp\n",
+ inode, ceph_vinop(inode), &mtime, &attr->ia_mtime);
if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
ci->i_time_warp_seq++;
inode_set_mtime_to_ts(inode, attr->ia_mtime);
@@ -2732,13 +2789,11 @@ retry:
/* these do nothing */
if (ia_valid & ATTR_CTIME) {
+ struct timespec64 ictime = inode_get_ctime(inode);
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
- doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
- inode, ceph_vinop(inode),
- inode_get_ctime_sec(inode),
- inode_get_ctime_nsec(inode),
- attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
+ doutc(cl, "%p %llx.%llx ctime %ptSp -> %ptSp (%s)\n",
+ inode, ceph_vinop(inode), &ictime, &attr->ia_ctime,
only ? "ctime only" : "ignored");
if (only) {
/*
diff --git a/fs/ceph/io.c b/fs/ceph/io.c
index c456509b31c3..2d10f49c93a9 100644
--- a/fs/ceph/io.c
+++ b/fs/ceph/io.c
@@ -21,14 +21,23 @@
/* Call with exclusively locked inode->i_rwsem */
static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
{
+ bool is_odirect;
+
lockdep_assert_held_write(&inode->i_rwsem);
- if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT) {
- spin_lock(&ci->i_ceph_lock);
- ci->i_ceph_flags &= ~CEPH_I_ODIRECT;
- spin_unlock(&ci->i_ceph_lock);
- inode_dio_wait(inode);
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ if (is_odirect) {
+ clear_bit(CEPH_I_ODIRECT_BIT, &ci->i_ceph_flags);
+ /* ensure modified bit is visible */
+ smp_mb__after_atomic();
}
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (is_odirect)
+ inode_dio_wait(inode);
}
/**
@@ -47,20 +56,35 @@ static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
*/
-void
-ceph_start_io_read(struct inode *inode)
+int ceph_start_io_read(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_odirect;
+ int err;
/* Be an optimist! */
- down_read(&inode->i_rwsem);
- if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT))
- return;
+ err = down_read_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ if (!is_odirect)
+ return 0;
up_read(&inode->i_rwsem);
+
/* Slow path.... */
- down_write(&inode->i_rwsem);
+ err = down_write_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
ceph_block_o_direct(ci, inode);
downgrade_write(&inode->i_rwsem);
+
+ return 0;
}
/**
@@ -83,11 +107,12 @@ ceph_end_io_read(struct inode *inode)
* Declare that a buffered write operation is about to start, and ensure
* that we block all direct I/O.
*/
-void
-ceph_start_io_write(struct inode *inode)
+int ceph_start_io_write(struct inode *inode)
{
- down_write(&inode->i_rwsem);
- ceph_block_o_direct(ceph_inode(inode), inode);
+ int err = down_write_killable(&inode->i_rwsem);
+ if (!err)
+ ceph_block_o_direct(ceph_inode(inode), inode);
+ return err;
}
/**
@@ -106,12 +131,22 @@ ceph_end_io_write(struct inode *inode)
/* Call with exclusively locked inode->i_rwsem */
static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
{
+ bool is_odirect;
+
lockdep_assert_held_write(&inode->i_rwsem);
- if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)) {
- spin_lock(&ci->i_ceph_lock);
- ci->i_ceph_flags |= CEPH_I_ODIRECT;
- spin_unlock(&ci->i_ceph_lock);
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ if (!is_odirect) {
+ set_bit(CEPH_I_ODIRECT_BIT, &ci->i_ceph_flags);
+ /* ensure modified bit is visible */
+ smp_mb__after_atomic();
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (!is_odirect) {
/* FIXME: unmap_mapping_range? */
filemap_write_and_wait(inode->i_mapping);
}
@@ -133,20 +168,35 @@ static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
*/
-void
-ceph_start_io_direct(struct inode *inode)
+int ceph_start_io_direct(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_odirect;
+ int err;
/* Be an optimist! */
- down_read(&inode->i_rwsem);
- if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)
- return;
+ err = down_read_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ if (is_odirect)
+ return 0;
up_read(&inode->i_rwsem);
+
/* Slow path.... */
- down_write(&inode->i_rwsem);
+ err = down_write_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
ceph_block_buffered(ci, inode);
downgrade_write(&inode->i_rwsem);
+
+ return 0;
}
/**
diff --git a/fs/ceph/io.h b/fs/ceph/io.h
index fa594cd77348..79029825e8b8 100644
--- a/fs/ceph/io.h
+++ b/fs/ceph/io.h
@@ -2,11 +2,13 @@
#ifndef _FS_CEPH_IO_H
#define _FS_CEPH_IO_H
-void ceph_start_io_read(struct inode *inode);
+#include <linux/compiler_attributes.h>
+
+int __must_check ceph_start_io_read(struct inode *inode);
void ceph_end_io_read(struct inode *inode);
-void ceph_start_io_write(struct inode *inode);
+int __must_check ceph_start_io_write(struct inode *inode);
void ceph_end_io_write(struct inode *inode);
-void ceph_start_io_direct(struct inode *inode);
+int __must_check ceph_start_io_direct(struct inode *inode);
void ceph_end_io_direct(struct inode *inode);
#endif /* FS_CEPH_IO_H */
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index e861de3c79b9..15cde055f3da 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -246,21 +246,28 @@ static long ceph_ioctl_lazyio(struct file *file)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
+ bool is_file_already_lazy = false;
+ spin_lock(&ci->i_ceph_lock);
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
- spin_lock(&ci->i_ceph_lock);
fi->fmode |= CEPH_FILE_MODE_LAZY;
ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++;
__ceph_touch_fmode(ci, mdsc, fi->fmode);
- spin_unlock(&ci->i_ceph_lock);
+ } else {
+ is_file_already_lazy = true;
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (is_file_already_lazy) {
+ doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode,
+ ceph_vinop(inode));
+ } else {
doutc(cl, "file %p %p %llx.%llx marked lazy\n", file, inode,
ceph_vinop(inode));
ceph_check_caps(ci, 0);
- } else {
- doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode,
- ceph_vinop(inode));
}
+
return 0;
}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ebf4ac0055dd..dd764f9c64b9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -221,7 +221,10 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
if (err && err != -ERESTARTSYS)
return err;
- wait_for_completion_killable(&req->r_safe_completion);
+ err = wait_for_completion_killable(&req->r_safe_completion);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 219a2cc2bf3c..1740047aef0f 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -979,14 +979,15 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
if (mds >= mdsc->max_sessions) {
int newmax = 1 << get_count_order(mds + 1);
struct ceph_mds_session **sa;
+ size_t ptr_size = sizeof(struct ceph_mds_session *);
doutc(cl, "realloc to %d\n", newmax);
- sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
+ sa = kcalloc(newmax, ptr_size, GFP_NOFS);
if (!sa)
goto fail_realloc;
if (mdsc->sessions) {
memcpy(sa, mdsc->sessions,
- mdsc->max_sessions * sizeof(void *));
+ mdsc->max_sessions * ptr_size);
kfree(mdsc->sessions);
}
mdsc->sessions = sa;
@@ -2221,7 +2222,7 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
int count;
dput(dentry);
d_prune_aliases(inode);
- count = atomic_read(&inode->i_count);
+ count = icount_read(inode);
if (count == 1)
(*remaining)--;
doutc(cl, "%p %llx.%llx cap %p pruned, count now %d\n",
@@ -2532,6 +2533,7 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
size_t size = sizeof(struct ceph_mds_reply_dir_entry);
unsigned int num_entries;
+ u64 bytes_count;
int order;
spin_lock(&ci->i_ceph_lock);
@@ -2540,7 +2542,11 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
num_entries = max(num_entries, 1U);
num_entries = min(num_entries, opt->max_readdir);
- order = get_order(size * num_entries);
+ bytes_count = (u64)size * num_entries;
+ if (unlikely(bytes_count > ULONG_MAX))
+ bytes_count = ULONG_MAX;
+
+ order = get_order((unsigned long)bytes_count);
while (order >= 0) {
rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
__GFP_NOWARN |
@@ -2550,7 +2556,7 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
break;
order--;
}
- if (!rinfo->dir_entries)
+ if (!rinfo->dir_entries || unlikely(order < 0))
return -ENOMEM;
num_entries = (PAGE_SIZE << order) / size;
@@ -2621,6 +2627,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
{
struct inode *dir = req->r_parent;
struct dentry *dentry = req->r_dentry;
+ const struct qstr *name = req->r_dname;
u8 *cryptbuf = NULL;
u32 len = 0;
int ret = 0;
@@ -2641,8 +2648,10 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
if (!fscrypt_has_encryption_key(dir))
goto success;
- if (!fscrypt_fname_encrypted_size(dir, dentry->d_name.len, NAME_MAX,
- &len)) {
+ if (!name)
+ name = &dentry->d_name;
+
+ if (!fscrypt_fname_encrypted_size(dir, name->len, NAME_MAX, &len)) {
WARN_ON_ONCE(1);
return ERR_PTR(-ENAMETOOLONG);
}
@@ -2657,7 +2666,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
if (!cryptbuf)
return ERR_PTR(-ENOMEM);
- ret = fscrypt_fname_encrypt(dir, &dentry->d_name, cryptbuf, len);
+ ret = fscrypt_fname_encrypt(dir, name, cryptbuf, len);
if (ret) {
kfree(cryptbuf);
return ERR_PTR(ret);
@@ -2678,8 +2687,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
* ceph_mdsc_build_path - build a path string to a given dentry
* @mdsc: mds client
* @dentry: dentry to which path should be built
- * @plen: returned length of string
- * @pbase: returned base inode number
+ * @path_info: output path, length, base ino+snap, and freepath ownership flag
* @for_wire: is this path going to be sent to the MDS?
*
* Build a string that represents the path to the dentry. This is mostly called
@@ -2697,7 +2705,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
* foo/.snap/bar -> foo//bar
*/
char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
- int *plen, u64 *pbase, int for_wire)
+ struct ceph_path_info *path_info, int for_wire)
{
struct ceph_client *cl = mdsc->fsc->client;
struct dentry *cur;
@@ -2763,8 +2771,8 @@ retry:
}
if (fscrypt_has_encryption_key(d_inode(parent))) {
- len = ceph_encode_encrypted_fname(d_inode(parent),
- cur, buf);
+ len = ceph_encode_encrypted_dname(d_inode(parent),
+ buf, len);
if (len < 0) {
dput(parent);
dput(cur);
@@ -2800,24 +2808,35 @@ retry:
if (pos < 0) {
/*
- * A rename didn't occur, but somehow we didn't end up where
- * we thought we would. Throw a warning and try again.
+ * The path is longer than PATH_MAX and this function
+ * cannot ever succeed. Creating paths that long is
+ * possible with Ceph, but Linux cannot use them.
*/
- pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
- pos);
- goto retry;
+ return ERR_PTR(-ENAMETOOLONG);
}
- *pbase = base;
- *plen = PATH_MAX - 1 - pos;
+ /* Initialize the output structure */
+ memset(path_info, 0, sizeof(*path_info));
+
+ path_info->vino.ino = base;
+ path_info->pathlen = PATH_MAX - 1 - pos;
+ path_info->path = path + pos;
+ path_info->freepath = true;
+
+ /* Set snap from dentry if available */
+ if (d_inode(dentry))
+ path_info->vino.snap = ceph_snap(d_inode(dentry));
+ else
+ path_info->vino.snap = CEPH_NOSNAP;
+
doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry),
- base, *plen, path + pos);
+ base, PATH_MAX - 1 - pos, path + pos);
return path + pos;
}
static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
- struct inode *dir, const char **ppath, int *ppathlen,
- u64 *pino, bool *pfreepath, bool parent_locked)
+ struct inode *dir, struct ceph_path_info *path_info,
+ bool parent_locked)
{
char *path;
@@ -2826,41 +2845,47 @@ static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry
dir = d_inode_rcu(dentry->d_parent);
if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
!IS_ENCRYPTED(dir)) {
- *pino = ceph_ino(dir);
+ path_info->vino.ino = ceph_ino(dir);
+ path_info->vino.snap = ceph_snap(dir);
rcu_read_unlock();
- *ppath = dentry->d_name.name;
- *ppathlen = dentry->d_name.len;
+ path_info->path = dentry->d_name.name;
+ path_info->pathlen = dentry->d_name.len;
+ path_info->freepath = false;
return 0;
}
rcu_read_unlock();
- path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
if (IS_ERR(path))
return PTR_ERR(path);
- *ppath = path;
- *pfreepath = true;
+ /*
+ * ceph_mdsc_build_path already fills path_info, including snap handling.
+ */
return 0;
}
-static int build_inode_path(struct inode *inode,
- const char **ppath, int *ppathlen, u64 *pino,
- bool *pfreepath)
+static int build_inode_path(struct inode *inode, struct ceph_path_info *path_info)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct dentry *dentry;
char *path;
if (ceph_snap(inode) == CEPH_NOSNAP) {
- *pino = ceph_ino(inode);
- *ppathlen = 0;
+ path_info->vino.ino = ceph_ino(inode);
+ path_info->vino.snap = ceph_snap(inode);
+ path_info->pathlen = 0;
+ path_info->freepath = false;
return 0;
}
dentry = d_find_alias(inode);
- path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
dput(dentry);
if (IS_ERR(path))
return PTR_ERR(path);
- *ppath = path;
- *pfreepath = true;
+ /*
+ * ceph_mdsc_build_path already fills path_info, including snap from dentry.
+ * Override with inode's snap since that's what this function is for.
+ */
+ path_info->vino.snap = ceph_snap(inode);
return 0;
}
@@ -2870,26 +2895,32 @@ static int build_inode_path(struct inode *inode,
*/
static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
struct dentry *rdentry, struct inode *rdiri,
- const char *rpath, u64 rino, const char **ppath,
- int *pathlen, u64 *ino, bool *freepath,
+ const char *rpath, u64 rino,
+ struct ceph_path_info *path_info,
bool parent_locked)
{
struct ceph_client *cl = mdsc->fsc->client;
int r = 0;
+ /* Initialize the output structure */
+ memset(path_info, 0, sizeof(*path_info));
+
if (rinode) {
- r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
+ r = build_inode_path(rinode, path_info);
doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
ceph_snap(rinode));
} else if (rdentry) {
- r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
- freepath, parent_locked);
- doutc(cl, " dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath);
+ r = build_dentry_path(mdsc, rdentry, rdiri, path_info, parent_locked);
+ doutc(cl, " dentry %p %llx/%.*s\n", rdentry, path_info->vino.ino,
+ path_info->pathlen, path_info->path);
} else if (rpath || rino) {
- *ino = rino;
- *ppath = rpath;
- *pathlen = rpath ? strlen(rpath) : 0;
- doutc(cl, " path %.*s\n", *pathlen, rpath);
+ path_info->vino.ino = rino;
+ path_info->vino.snap = CEPH_NOSNAP;
+ path_info->path = rpath;
+ path_info->pathlen = rpath ? strlen(rpath) : 0;
+ path_info->freepath = false;
+
+ doutc(cl, " path %.*s\n", path_info->pathlen, rpath);
}
return r;
@@ -2946,12 +2977,12 @@ static struct ceph_mds_request_head_legacy *
find_legacy_request_head(void *p, u64 features)
{
bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
- struct ceph_mds_request_head_old *ohead;
+ struct ceph_mds_request_head *head;
if (legacy)
return (struct ceph_mds_request_head_legacy *)p;
- ohead = (struct ceph_mds_request_head_old *)p;
- return (struct ceph_mds_request_head_legacy *)&ohead->oldest_client_tid;
+ head = (struct ceph_mds_request_head *)p;
+ return (struct ceph_mds_request_head_legacy *)&head->oldest_client_tid;
}
/*
@@ -2966,11 +2997,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
struct ceph_mds_request_head_legacy *lhead;
- const char *path1 = NULL;
- const char *path2 = NULL;
- u64 ino1 = 0, ino2 = 0;
- int pathlen1 = 0, pathlen2 = 0;
- bool freepath1 = false, freepath2 = false;
+ struct ceph_path_info path_info1 = {0};
+ struct ceph_path_info path_info2 = {0};
struct dentry *old_dentry = NULL;
int len;
u16 releases;
@@ -2980,25 +3008,49 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
u16 request_head_version = mds_supported_head_version(session);
kuid_t caller_fsuid = req->r_cred->fsuid;
kgid_t caller_fsgid = req->r_cred->fsgid;
+ bool parent_locked = test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
- req->r_parent, req->r_path1, req->r_ino1.ino,
- &path1, &pathlen1, &ino1, &freepath1,
- test_bit(CEPH_MDS_R_PARENT_LOCKED,
- &req->r_req_flags));
+ req->r_parent, req->r_path1, req->r_ino1.ino,
+ &path_info1, parent_locked);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
+ /*
+ * When the parent directory's i_rwsem is *not* locked, req->r_parent may
+ * have become stale (e.g. after a concurrent rename) between the time the
+ * dentry was looked up and now. If we detect that the stored r_parent
+ * does not match the inode number we just encoded for the request, switch
+ * to the correct inode so that the MDS receives a valid parent reference.
+ */
+ if (!parent_locked && req->r_parent && path_info1.vino.ino &&
+ ceph_ino(req->r_parent) != path_info1.vino.ino) {
+ struct inode *old_parent = req->r_parent;
+ struct inode *correct_dir = ceph_get_inode(mdsc->fsc->sb, path_info1.vino, NULL);
+ if (!IS_ERR(correct_dir)) {
+ WARN_ONCE(1, "ceph: r_parent mismatch (had %llx wanted %llx) - updating\n",
+ ceph_ino(old_parent), path_info1.vino.ino);
+ /*
+ * Transfer CEPH_CAP_PIN from the old parent to the new one.
+ * The pin was taken earlier in ceph_mdsc_submit_request().
+ */
+ ceph_put_cap_refs(ceph_inode(old_parent), CEPH_CAP_PIN);
+ iput(old_parent);
+ req->r_parent = correct_dir;
+ ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+ }
+ }
+
/* If r_old_dentry is set, then assume that its parent is locked */
if (req->r_old_dentry &&
!(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
old_dentry = req->r_old_dentry;
ret = set_request_path_attr(mdsc, NULL, old_dentry,
- req->r_old_dentry_dir,
- req->r_path2, req->r_ino2.ino,
- &path2, &pathlen2, &ino2, &freepath2, true);
+ req->r_old_dentry_dir,
+ req->r_path2, req->r_ino2.ino,
+ &path_info2, true);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out_free1;
@@ -3021,7 +3073,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
if (legacy)
len = sizeof(struct ceph_mds_request_head_legacy);
else if (request_head_version == 1)
- len = sizeof(struct ceph_mds_request_head_old);
+ len = offsetofend(struct ceph_mds_request_head, args);
else if (request_head_version == 2)
len = offsetofend(struct ceph_mds_request_head, ext_num_fwd);
else
@@ -3029,7 +3081,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
/* filepaths */
len += 2 * (1 + sizeof(u32) + sizeof(u64));
- len += pathlen1 + pathlen2;
+ len += path_info1.pathlen + path_info2.pathlen;
/* cap releases */
len += sizeof(struct ceph_mds_request_release) *
@@ -3037,9 +3089,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
if (req->r_dentry_drop)
- len += pathlen1;
+ len += path_info1.pathlen;
if (req->r_old_dentry_drop)
- len += pathlen2;
+ len += path_info2.pathlen;
/* MClientRequest tail */
@@ -3105,11 +3157,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
msg->hdr.version = cpu_to_le16(3);
p = msg->front.iov_base + sizeof(*lhead);
} else if (request_head_version == 1) {
- struct ceph_mds_request_head_old *ohead = msg->front.iov_base;
+ struct ceph_mds_request_head *nhead = msg->front.iov_base;
msg->hdr.version = cpu_to_le16(4);
- ohead->version = cpu_to_le16(1);
- p = msg->front.iov_base + sizeof(*ohead);
+ nhead->version = cpu_to_le16(1);
+ p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, args);
} else if (request_head_version == 2) {
struct ceph_mds_request_head *nhead = msg->front.iov_base;
@@ -3152,8 +3204,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
lhead->ino = cpu_to_le64(req->r_deleg_ino);
lhead->args = req->r_args;
- ceph_encode_filepath(&p, end, ino1, path1);
- ceph_encode_filepath(&p, end, ino2, path2);
+ ceph_encode_filepath(&p, end, path_info1.vino.ino, path_info1.path);
+ ceph_encode_filepath(&p, end, path_info2.vino.ino, path_info2.path);
/* make note of release offset, in case we need to replay */
req->r_request_release_offset = p - msg->front.iov_base;
@@ -3216,11 +3268,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
msg->hdr.data_off = cpu_to_le16(0);
out_free2:
- if (freepath2)
- ceph_mdsc_free_path((char *)path2, pathlen2);
+ ceph_mdsc_free_path_info(&path_info2);
out_free1:
- if (freepath1)
- ceph_mdsc_free_path((char *)path1, pathlen1);
+ ceph_mdsc_free_path_info(&path_info1);
out:
return msg;
out_err:
@@ -3266,7 +3316,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
* so we limit to retry at most 256 times.
*/
if (req->r_attempts) {
- old_max_retry = sizeof_field(struct ceph_mds_request_head_old,
+ old_max_retry = sizeof_field(struct ceph_mds_request_head,
num_retry);
old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE);
if ((old_version && req->r_attempts >= old_max_retry) ||
@@ -4577,24 +4627,20 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
struct ceph_cap *cap;
- char *path;
- int pathlen = 0, err;
- u64 pathbase;
+ struct ceph_path_info path_info = {0};
+ int err;
u64 snap_follows;
dentry = d_find_primary(inode);
if (dentry) {
/* set pathbase to parent dir when msg_version >= 2 */
- path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info,
recon_state->msg_version >= 2);
dput(dentry);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_err;
}
- } else {
- path = NULL;
- pathbase = 0;
}
spin_lock(&ci->i_ceph_lock);
@@ -4627,7 +4673,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
- rec.v2.pathbase = cpu_to_le64(pathbase);
+ rec.v2.pathbase = cpu_to_le64(path_info.vino.ino);
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
@@ -4642,7 +4688,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
ts = inode_get_atime(inode);
ceph_encode_timespec64(&rec.v1.atime, &ts);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
- rec.v1.pathbase = cpu_to_le64(pathbase);
+ rec.v1.pathbase = cpu_to_le64(path_info.vino.ino);
}
if (list_empty(&ci->i_cap_snaps)) {
@@ -4704,7 +4750,7 @@ encode_again:
sizeof(struct ceph_filelock);
rec.v2.flock_len = cpu_to_le32(struct_len);
- struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
+ struct_len += sizeof(u32) + path_info.pathlen + sizeof(rec.v2);
if (struct_v >= 2)
struct_len += sizeof(u64); /* snap_follows */
@@ -4728,7 +4774,7 @@ encode_again:
ceph_pagelist_encode_8(pagelist, 1);
ceph_pagelist_encode_32(pagelist, struct_len);
}
- ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
ceph_locks_to_pagelist(flocks, pagelist,
num_fcntl_locks, num_flock_locks);
@@ -4739,17 +4785,17 @@ out_freeflocks:
} else {
err = ceph_pagelist_reserve(pagelist,
sizeof(u64) + sizeof(u32) +
- pathlen + sizeof(rec.v1));
+ path_info.pathlen + sizeof(rec.v1));
if (err)
goto out_err;
ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
- ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
}
out_err:
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
if (!err)
recon_state->nr_caps++;
return err;
@@ -5487,6 +5533,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
spin_lock_init(&mdsc->stopping_lock);
atomic_set(&mdsc->stopping_blockers, 0);
init_completion(&mdsc->stopping_waiter);
+ atomic64_set(&mdsc->dirty_folios, 0);
+ init_waitqueue_head(&mdsc->flush_end_wq);
init_waitqueue_head(&mdsc->session_close_wq);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
mdsc->quotarealms_inodes = RB_ROOT;
@@ -5607,11 +5655,19 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
struct ceph_client *cl = mdsc->fsc->client;
+ const char *fs_name = mdsc->fsc->mount_options->mds_namespace;
const char *spath = mdsc->fsc->mount_options->server_path;
bool gid_matched = false;
u32 gid, tlen, len;
int i, j;
+ doutc(cl, "fsname check fs_name=%s match.fs_name=%s\n",
+ fs_name, auth->match.fs_name ? auth->match.fs_name : "");
+ if (auth->match.fs_name && strcmp(auth->match.fs_name, fs_name)) {
+ /* fsname mismatch, try next one */
+ return 0;
+ }
+
doutc(cl, "match.uid %lld\n", auth->match.uid);
if (auth->match.uid != MDS_AUTH_UID_ANY) {
if (auth->match.uid != caller_uid)
@@ -5691,18 +5747,18 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
*
* All the other cases --> mismatch
*/
+ bool path_matched = true;
char *first = strstr(_tpath, auth->match.path);
- if (first != _tpath) {
- if (free_tpath)
- kfree(_tpath);
- return 0;
+ if (first != _tpath ||
+ (tlen > len && _tpath[len] != '/')) {
+ path_matched = false;
}
- if (tlen > len && _tpath[len] != '/') {
- if (free_tpath)
- kfree(_tpath);
+ if (free_tpath)
+ kfree(_tpath);
+
+ if (!path_matched)
return 0;
- }
}
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 38bb7e0d2d79..0428a5eaf28c 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -299,6 +299,8 @@ struct ceph_mds_request {
struct inode *r_target_inode; /* resulting inode */
struct inode *r_new_inode; /* new inode (for creates) */
+ const struct qstr *r_dname; /* stable name (for ->d_revalidate) */
+
#define CEPH_MDS_R_DIRECT_IS_HASH (1) /* r_direct_hash is valid */
#define CEPH_MDS_R_ABORTED (2) /* call was aborted */
#define CEPH_MDS_R_GOT_UNSAFE (3) /* got an unsafe reply */
@@ -456,6 +458,9 @@ struct ceph_mds_client {
atomic_t stopping_blockers;
struct completion stopping_waiter;
+ atomic64_t dirty_folios;
+ wait_queue_head_t flush_end_wq;
+
atomic64_t quotarealms_count; /* # realms with quota */
/*
* We keep a list of inodes we don't see in the mountpoint but that we
@@ -612,14 +617,24 @@ extern int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath,
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
-static inline void ceph_mdsc_free_path(char *path, int len)
+/*
+ * Structure to group path-related output parameters for build_*_path functions
+ */
+struct ceph_path_info {
+ const char *path;
+ int pathlen;
+ struct ceph_vino vino;
+ bool freepath;
+};
+
+static inline void ceph_mdsc_free_path_info(const struct ceph_path_info *path_info)
{
- if (!IS_ERR_OR_NULL(path))
- __putname(path - (PATH_MAX - 1 - len));
+ if (path_info && path_info->freepath && !IS_ERR_OR_NULL(path_info->path))
+ __putname((char *)path_info->path - (PATH_MAX - 1 - path_info->pathlen));
}
extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
- struct dentry *dentry, int *plen, u64 *base,
+ struct dentry *dentry, struct ceph_path_info *path_info,
int for_wire);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 8109aba66e02..2c7b151a7c95 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -353,10 +353,22 @@ struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
__decode_and_drop_type(p, end, u8, bad_ext);
}
if (mdsmap_ev >= 8) {
+ u32 fsname_len;
/* enabled */
ceph_decode_8_safe(p, end, m->m_enabled, bad_ext);
/* fs_name */
- ceph_decode_skip_string(p, end, bad_ext);
+ ceph_decode_32_safe(p, end, fsname_len, bad_ext);
+
+ /* validate fsname against mds_namespace */
+ if (!namespace_equals(mdsc->fsc->mount_options, *p,
+ fsname_len)) {
+ pr_warn_client(cl, "fsname %*pE doesn't match mds_namespace %s\n",
+ (int)fsname_len, (char *)*p,
+ mdsc->fsc->mount_options->mds_namespace);
+ goto bad;
+ }
+ /* skip fsname after validation */
+ ceph_decode_skip_n(p, end, fsname_len, bad);
}
/* damaged */
if (mdsmap_ev >= 9) {
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 06ee397e0c3a..d90eda19bcc4 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -166,7 +166,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
if (IS_ERR(in)) {
doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
PTR_ERR(in));
- qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
+ qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */
} else {
qri->timeout = 0;
qri->inode = in;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index de03cd6eb86e..f6bf24b5c683 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -246,20 +246,6 @@ static void canonicalize_path(char *path)
path[j] = '\0';
}
-/*
- * Check if the mds namespace in ceph_mount_options matches
- * the passed in namespace string. First time match (when
- * ->mds_namespace is NULL) is treated specially, since
- * ->mds_namespace needs to be initialized by the caller.
- */
-static int namespace_equals(struct ceph_mount_options *fsopt,
- const char *namespace, size_t len)
-{
- return !(fsopt->mds_namespace &&
- (strlen(fsopt->mds_namespace) != len ||
- strncmp(fsopt->mds_namespace, namespace, len)));
-}
-
static int ceph_parse_old_source(const char *dev_name, const char *dev_name_end,
struct fs_context *fc)
{
@@ -431,6 +417,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
switch (token) {
case Opt_snapdirname:
+ if (strlen(param->string) > NAME_MAX)
+ return invalfc(fc, "snapdirname too long");
kfree(fsopt->snapdir_name);
fsopt->snapdir_name = param->string;
param->string = NULL;
@@ -860,7 +848,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0);
if (!fsc->inode_wq)
goto fail_client;
- fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
+ fsc->cap_wq = alloc_workqueue("ceph-cap", WQ_PERCPU, 1);
if (!fsc->cap_wq)
goto fail_inode_wq;
@@ -1031,8 +1019,7 @@ void ceph_umount_begin(struct super_block *sb)
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
doutc(fsc->client, "starting forced umount\n");
- if (!fsc)
- return;
+
fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
__ceph_umount_begin(fsc);
}
@@ -1041,7 +1028,7 @@ static const struct super_operations ceph_super_ops = {
.alloc_inode = ceph_alloc_inode,
.free_inode = ceph_free_inode,
.write_inode = ceph_write_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
.evict_inode = ceph_evict_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
@@ -1162,7 +1149,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
const char *path = fsc->mount_options->server_path ?
fsc->mount_options->server_path + 1 : "";
- err = __ceph_open_session(fsc->client, started);
+ err = __ceph_open_session(fsc->client);
if (err < 0)
goto out;
@@ -1218,13 +1205,14 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc)
fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
s->s_op = &ceph_super_ops;
- s->s_d_op = &ceph_dentry_ops;
+ set_default_d_op(s, &ceph_dentry_ops);
s->s_export_op = &ceph_export_ops;
s->s_time_gran = 1;
s->s_time_min = 0;
s->s_time_max = U32_MAX;
s->s_flags |= SB_NODIRATIME | SB_NOATIME;
+ s->s_magic = CEPH_SUPER_MAGIC;
ceph_fscrypt_set_ops(s);
@@ -1561,6 +1549,17 @@ static void ceph_kill_sb(struct super_block *s)
*/
sync_filesystem(s);
+ if (atomic64_read(&mdsc->dirty_folios) > 0) {
+ wait_queue_head_t *wq = &mdsc->flush_end_wq;
+ long timeleft = wait_event_killable_timeout(*wq,
+ atomic64_read(&mdsc->dirty_folios) <= 0,
+ fsc->client->options->mount_timeout);
+ if (!timeleft) /* timed out */
+ pr_warn_client(cl, "umount timed out, %ld\n", timeleft);
+ else if (timeleft < 0) /* killed */
+ pr_warn_client(cl, "umount was killed, %ld\n", timeleft);
+ }
+
spin_lock(&mdsc->stopping_lock);
mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHING;
wait = !!atomic_read(&mdsc->stopping_blockers);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index af14ec382246..a1f781c46b41 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -104,6 +104,20 @@ struct ceph_mount_options {
struct fscrypt_dummy_policy dummy_enc_policy;
};
+/*
+ * Check if the mds namespace in ceph_mount_options matches
+ * the passed in namespace string. First time match (when
+ * ->mds_namespace is NULL) is treated specially, since
+ * ->mds_namespace needs to be initialized by the caller.
+ */
+static inline int namespace_equals(struct ceph_mount_options *fsopt,
+ const char *namespace, size_t len)
+{
+ return !(fsopt->mds_namespace &&
+ (strlen(fsopt->mds_namespace) != len ||
+ strncmp(fsopt->mds_namespace, namespace, len)));
+}
+
/* mount state */
enum {
CEPH_MOUNT_MOUNTING,
@@ -463,6 +477,7 @@ struct ceph_inode_info {
unsigned long i_work_mask;
#ifdef CONFIG_FS_ENCRYPTION
+ struct fscrypt_inode_info *i_crypt_info;
u32 fscrypt_auth_len;
u32 fscrypt_file_len;
u8 *fscrypt_auth;
@@ -638,7 +653,8 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_FLUSH_SNAPS (1 << 8) /* need flush snapss */
#define CEPH_I_ERROR_WRITE (1 << 9) /* have seen write errors */
#define CEPH_I_ERROR_FILELOCK (1 << 10) /* have seen file lock errors */
-#define CEPH_I_ODIRECT (1 << 11) /* inode in direct I/O mode */
+#define CEPH_I_ODIRECT_BIT (11) /* inode in direct I/O mode */
+#define CEPH_I_ODIRECT (1 << CEPH_I_ODIRECT_BIT)
#define CEPH_ASYNC_CREATE_BIT (12) /* async create in flight for this */
#define CEPH_I_ASYNC_CREATE (1 << CEPH_ASYNC_CREATE_BIT)
#define CEPH_I_SHUTDOWN (1 << 13) /* inode is no longer usable */
@@ -903,7 +919,7 @@ ceph_find_rw_context(struct ceph_file_info *cf)
}
struct ceph_readdir_cache_control {
- struct page *page;
+ struct folio *folio;
struct dentry **dentries;
int index;
};
@@ -1132,8 +1148,7 @@ struct ceph_acl_sec_ctx {
void *acl;
#endif
#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
- void *sec_ctx;
- u32 sec_ctxlen;
+ struct lsm_context lsmctx;
#endif
#ifdef CONFIG_FS_ENCRYPTION
struct ceph_fscrypt_auth *fscrypt_auth;
@@ -1287,7 +1302,7 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
/* addr.c */
extern const struct address_space_operations ceph_aops;
extern const struct netfs_request_ops ceph_netfs_ops;
-extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
+int ceph_mmap_prepare(struct vm_area_desc *desc);
extern int ceph_uninline_data(struct file *file);
extern int ceph_pool_perm_check(struct inode *inode, int need);
extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 1a9f12204666..ad1f30bea175 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -249,8 +249,7 @@ static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
size_t size)
{
- return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec,
- ci->i_rctime.tv_nsec);
+ return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_rctime);
}
/* dir pin */
@@ -307,8 +306,7 @@ static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci)
static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
size_t size)
{
- return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec,
- ci->i_snap_btime.tv_nsec);
+ return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_snap_btime);
}
static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
@@ -1383,8 +1381,7 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
int err;
err = security_dentry_init_security(dentry, mode, &dentry->d_name,
- &name, &as_ctx->sec_ctx,
- &as_ctx->sec_ctxlen);
+ &name, &as_ctx->lsmctx);
if (err < 0) {
WARN_ON_ONCE(err != -EOPNOTSUPP);
err = 0; /* do nothing */
@@ -1409,7 +1406,7 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
*/
name_len = strlen(name);
err = ceph_pagelist_reserve(pagelist,
- 4 * 2 + name_len + as_ctx->sec_ctxlen);
+ 4 * 2 + name_len + as_ctx->lsmctx.len);
if (err)
goto out;
@@ -1432,8 +1429,9 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
ceph_pagelist_encode_32(pagelist, name_len);
ceph_pagelist_append(pagelist, name, name_len);
- ceph_pagelist_encode_32(pagelist, as_ctx->sec_ctxlen);
- ceph_pagelist_append(pagelist, as_ctx->sec_ctx, as_ctx->sec_ctxlen);
+ ceph_pagelist_encode_32(pagelist, as_ctx->lsmctx.len);
+ ceph_pagelist_append(pagelist, as_ctx->lsmctx.context,
+ as_ctx->lsmctx.len);
err = 0;
out:
@@ -1451,7 +1449,7 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
posix_acl_release(as_ctx->default_acl);
#endif
#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
- security_release_secctx(as_ctx->sec_ctx, as_ctx->sec_ctxlen);
+ security_release_secctx(&as_ctx->lsmctx);
#endif
#ifdef CONFIG_FS_ENCRYPTION
kfree(as_ctx->fscrypt_auth);
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index 62a3d2565c26..70bb0579b40c 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -70,7 +70,7 @@ retry:
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
cii = ITOC(inode);
/* we still need to set i_ino for things like stat(2) */
inode->i_ino = hash;
@@ -148,7 +148,7 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb)
/* we should never see newly created inodes because we intentionally
* fail in the initialization callback */
- BUG_ON(inode->i_state & I_NEW);
+ BUG_ON(inode_state_read_once(inode) & I_NEW);
return inode;
}
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 4e552ba7bd43..ca9990017265 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -166,8 +166,8 @@ err_out:
return error;
}
-static int coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *de, umode_t mode)
+static struct dentry *coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *de, umode_t mode)
{
struct inode *inode;
struct coda_vattr attrs;
@@ -177,14 +177,14 @@ static int coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct CodaFid newfid;
if (is_root_inode(dir) && coda_iscontrol(name, len))
- return -EPERM;
+ return ERR_PTR(-EPERM);
attrs.va_mode = mode;
- error = venus_mkdir(dir->i_sb, coda_i2f(dir),
+ error = venus_mkdir(dir->i_sb, coda_i2f(dir),
name, len, &newfid, &attrs);
if (error)
goto err_out;
-
+
inode = coda_iget(dir->i_sb, &newfid, &attrs);
if (IS_ERR(inode)) {
error = PTR_ERR(inode);
@@ -195,10 +195,10 @@ static int coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
coda_dir_inc_nlink(dir);
coda_dir_update_mtime(dir);
d_instantiate(de, inode);
- return 0;
+ return NULL;
err_out:
d_drop(de);
- return error;
+ return ERR_PTR(error);
}
/* try to make de an entry in dir_inodde linked to source_de */
@@ -429,23 +429,16 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx)
cfi = coda_ftoc(coda_file);
host_file = cfi->cfi_container;
- if (host_file->f_op->iterate_shared) {
- struct inode *host_inode = file_inode(host_file);
- ret = -ENOENT;
- if (!IS_DEADDIR(host_inode)) {
- inode_lock_shared(host_inode);
- ret = host_file->f_op->iterate_shared(host_file, ctx);
- file_accessed(host_file);
- inode_unlock_shared(host_inode);
- }
+ ret = iterate_dir(host_file, ctx);
+ if (ret != -ENOTDIR)
return ret;
- }
/* Venus: we must read Venus dirents from a file */
return coda_venus_readdir(coda_file, ctx);
}
/* called when a cache lookup succeeds */
-static int coda_dentry_revalidate(struct dentry *de, unsigned int flags)
+static int coda_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *de, unsigned int flags)
{
struct inode *inode;
struct coda_inode_info *cii;
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 148856a582a9..a390b5d21196 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -160,7 +160,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
size_t count;
int ret;
- if (!host_file->f_op->mmap)
+ if (!can_mmap_file(host_file))
return -ENODEV;
if (WARN_ON(coda_file != vma->vm_file))
@@ -199,10 +199,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
spin_unlock(&cii->c_lock);
vma->vm_file = get_file(host_file);
- ret = call_mmap(vma->vm_file, vma);
+ ret = vfs_mmap(vma->vm_file, vma);
if (ret) {
- /* if call_mmap fails, our caller will put host_file so we
+ /* if vfs_mmap fails, our caller will put host_file so we
* should drop the reference to the coda_file that we got.
*/
fput(coda_file);
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 6896fce122e1..08450d006016 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -230,7 +230,7 @@ static int coda_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_blocksize_bits = 12;
sb->s_magic = CODA_SUPER_MAGIC;
sb->s_op = &coda_super_operations;
- sb->s_d_op = &coda_dentry_operations;
+ set_default_d_op(sb, &coda_dentry_operations);
sb->s_time_gran = 1;
sb->s_time_min = S64_MIN;
sb->s_time_max = S64_MAX;
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index 9f2d5743e2c8..0df46f09b6cc 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -14,7 +14,7 @@
static struct ctl_table_header *fs_table_header;
-static struct ctl_table coda_table[] = {
+static const struct ctl_table coda_table[] = {
{
.procname = "timeout",
.data = &coda_timeout,
diff --git a/fs/configfs/Kconfig b/fs/configfs/Kconfig
index 272b64456999..1fcd761fe7be 100644
--- a/fs/configfs/Kconfig
+++ b/fs/configfs/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config CONFIGFS_FS
tristate "Userspace-driven configuration filesystem"
- select SYSFS
help
configfs is a RAM-based filesystem that provides the converse
of sysfs's functionality. Where sysfs is a filesystem-based
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 7d10278db30d..ba95f636a5ab 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -67,7 +67,6 @@ static void configfs_d_iput(struct dentry * dentry,
const struct dentry_operations configfs_dentry_ops = {
.d_iput = configfs_d_iput,
- .d_delete = always_delete_dentry,
};
#ifdef CONFIG_LOCKDEP
@@ -401,8 +400,14 @@ static void remove_dir(struct dentry * d)
configfs_remove_dirent(d);
- if (d_really_is_positive(d))
- simple_rmdir(d_inode(parent),d);
+ if (d_really_is_positive(d)) {
+ if (likely(simple_empty(d))) {
+ __simple_rmdir(d_inode(parent),d);
+ dput(d);
+ } else {
+ pr_warn("remove_dir (%pd): attributes remain", d);
+ }
+ }
pr_debug(" o %pd removing done (%d)\n", d, d_count(d));
@@ -599,7 +604,7 @@ static void detach_attrs(struct config_item * item)
static int populate_attrs(struct config_item *item)
{
const struct config_item_type *t = item->ci_type;
- struct configfs_group_operations *ops;
+ const struct configfs_group_operations *ops;
struct configfs_attribute *attr;
struct configfs_bin_attribute *bin_attr;
int error = 0;
@@ -619,7 +624,7 @@ static int populate_attrs(struct config_item *item)
break;
}
}
- if (t->ct_bin_attrs) {
+ if (!error && t->ct_bin_attrs) {
for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) {
if (ops && ops->is_bin_visible && !ops->is_bin_visible(item, bin_attr, i))
continue;
@@ -970,7 +975,7 @@ static void configfs_dump_one(struct configfs_dirent *sd, int level)
{
pr_info("%*s\"%s\":\n", level, " ", configfs_get_name(sd));
-#define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type);
+#define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type)
type_print(CONFIGFS_ROOT);
type_print(CONFIGFS_DIR);
type_print(CONFIGFS_ITEM_ATTR);
@@ -1280,8 +1285,8 @@ out_root_unlock:
}
EXPORT_SYMBOL(configfs_depend_item_unlocked);
-static int configfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *configfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int ret = 0;
int module_got = 0;
@@ -1461,7 +1466,7 @@ out_put:
put_fragment(frag);
out:
- return ret;
+ return ERR_PTR(ret);
}
static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -1602,10 +1607,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
err = -ENOENT;
if (configfs_dirent_is_ready(parent_sd)) {
file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
- if (IS_ERR(file->private_data))
- err = PTR_ERR(file->private_data);
- else
- err = 0;
+ err = PTR_ERR_OR_ZERO(file->private_data);
}
inode_unlock(d_inode(dentry));
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 0ad32150611e..affe4742bbb5 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -30,7 +30,7 @@ struct configfs_buffer {
size_t count;
loff_t pos;
char * page;
- struct configfs_item_operations * ops;
+ const struct configfs_item_operations *ops;
struct mutex mutex;
int needs_read_fill;
bool read_in_progress;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 1d2e3a5738d1..bcda3372e141 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -211,7 +211,8 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- simple_unlink(d_inode(parent), dentry);
+ __simple_unlink(d_inode(parent), dentry);
+ dput(dentry);
} else
spin_unlock(&dentry->d_lock);
}
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 254170a82aa3..c378b5cbf87d 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -66,7 +66,7 @@ int config_item_set_name(struct config_item *item, const char *fmt, ...)
name = kvasprintf(GFP_KERNEL, fmt, args);
va_end(args);
if (!name)
- return -EFAULT;
+ return -ENOMEM;
}
/* Free the old name, if necessary. */
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index c2d820063ec4..4929f3431189 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -36,7 +36,7 @@ static void configfs_free_inode(struct inode *inode)
static const struct super_operations configfs_ops = {
.statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
.free_inode = configfs_free_inode,
};
@@ -92,7 +92,8 @@ static int configfs_fill_super(struct super_block *sb, struct fs_context *fc)
configfs_root_group.cg_item.ci_dentry = root;
root->d_fsdata = &configfs_root;
sb->s_root = root;
- sb->s_d_op = &configfs_dentry_ops; /* the rest get that */
+ set_default_d_op(sb, &configfs_dentry_ops); /* the rest get that */
+ sb->s_d_flags |= DCACHE_DONTCACHE;
return 0;
}
@@ -115,7 +116,7 @@ static struct file_system_type configfs_fs_type = {
.owner = THIS_MODULE,
.name = "configfs",
.init_fs_context = configfs_init_fs_context,
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("configfs");
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 69133ec1fac2..f3f79c67add5 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -114,26 +114,21 @@ static int create_link(struct config_item *parent_item,
}
-static int get_target(const char *symname, struct path *path,
- struct config_item **target, struct super_block *sb)
+static int get_target(const char *symname, struct config_item **target,
+ struct super_block *sb)
{
+ struct path path __free(path_put) = {};
int ret;
- ret = kern_path(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, path);
- if (!ret) {
- if (path->dentry->d_sb == sb) {
- *target = configfs_get_config_item(path->dentry);
- if (!*target) {
- ret = -ENOENT;
- path_put(path);
- }
- } else {
- ret = -EPERM;
- path_put(path);
- }
- }
-
- return ret;
+ ret = kern_path(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &path);
+ if (ret)
+ return ret;
+ if (path.dentry->d_sb != sb)
+ return -EPERM;
+ *target = configfs_get_config_item(path.dentry);
+ if (!*target)
+ return -ENOENT;
+ return 0;
}
@@ -141,7 +136,6 @@ int configfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
int ret;
- struct path path;
struct configfs_dirent *sd;
struct config_item *parent_item;
struct config_item *target_item = NULL;
@@ -188,7 +182,7 @@ int configfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
* AV, a thoroughly annoyed bastard.
*/
inode_unlock(dir);
- ret = get_target(symname, &path, &target_item, dentry->d_sb);
+ ret = get_target(symname, &target_item, dentry->d_sb);
inode_lock(dir);
if (ret)
goto out_put;
@@ -210,7 +204,6 @@ int configfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
}
config_item_put(target_item);
- path_put(&path);
out_put:
config_item_put(parent_item);
diff --git a/fs/coredump.c b/fs/coredump.c
index d48edb37bc35..8feb9c1cf83d 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -43,6 +43,15 @@
#include <linux/timekeeping.h>
#include <linux/sysctl.h>
#include <linux/elf.h>
+#include <linux/pidfs.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <net/af_unix.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <uapi/linux/pidfd.h>
+#include <uapi/linux/un.h>
+#include <uapi/linux/coredump.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -60,16 +69,35 @@ static void free_vma_snapshot(struct coredump_params *cprm);
#define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024)
/* Define a reasonable max cap */
#define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024)
+/*
+ * File descriptor number for the pidfd for the thread-group leader of
+ * the coredumping task installed into the usermode helper's file
+ * descriptor table.
+ */
+#define COREDUMP_PIDFD_NUMBER 3
static int core_uses_pid;
static unsigned int core_pipe_limit;
+static unsigned int core_sort_vma;
static char core_pattern[CORENAME_MAX_SIZE] = "core";
static int core_name_size = CORENAME_MAX_SIZE;
unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT;
+static atomic_t core_pipe_count = ATOMIC_INIT(0);
+
+enum coredump_type_t {
+ COREDUMP_FILE = 1,
+ COREDUMP_PIPE = 2,
+ COREDUMP_SOCK = 3,
+ COREDUMP_SOCK_REQ = 4,
+};
struct core_name {
char *corename;
int used, size;
+ unsigned int core_pipe_limit;
+ bool core_dumped;
+ enum coredump_type_t core_type;
+ u64 mask;
};
static int expand_corename(struct core_name *cn, int size)
@@ -200,35 +228,104 @@ put_exe_file:
return ret;
}
-/* format_corename will inspect the pattern parameter, and output a
- * name into corename, which must have space for at least
- * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
+/*
+ * coredump_parse will inspect the pattern parameter, and output a name
+ * into corename, which must have space for at least CORENAME_MAX_SIZE
+ * bytes plus one byte for the zero terminator.
*/
-static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+static bool coredump_parse(struct core_name *cn, struct coredump_params *cprm,
size_t **argv, int *argc)
{
const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern;
- int ispipe = (*pat_ptr == '|');
bool was_space = false;
int pid_in_pattern = 0;
int err = 0;
+ cn->mask = COREDUMP_KERNEL;
+ if (core_pipe_limit)
+ cn->mask |= COREDUMP_WAIT;
cn->used = 0;
cn->corename = NULL;
+ cn->core_pipe_limit = 0;
+ cn->core_dumped = false;
+ if (*pat_ptr == '|')
+ cn->core_type = COREDUMP_PIPE;
+ else if (*pat_ptr == '@')
+ cn->core_type = COREDUMP_SOCK;
+ else
+ cn->core_type = COREDUMP_FILE;
if (expand_corename(cn, core_name_size))
- return -ENOMEM;
+ return false;
cn->corename[0] = '\0';
- if (ispipe) {
+ switch (cn->core_type) {
+ case COREDUMP_PIPE: {
int argvs = sizeof(core_pattern) / 2;
(*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
if (!(*argv))
- return -ENOMEM;
+ return false;
(*argv)[(*argc)++] = 0;
++pat_ptr;
if (!(*pat_ptr))
- return -ENOMEM;
+ return false;
+ break;
+ }
+ case COREDUMP_SOCK: {
+ /* skip the @ */
+ pat_ptr++;
+ if (!(*pat_ptr))
+ return false;
+ if (*pat_ptr == '@') {
+ pat_ptr++;
+ if (!(*pat_ptr))
+ return false;
+
+ cn->core_type = COREDUMP_SOCK_REQ;
+ }
+
+ err = cn_printf(cn, "%s", pat_ptr);
+ if (err)
+ return false;
+
+ /* Require absolute paths. */
+ if (cn->corename[0] != '/')
+ return false;
+
+ /*
+ * Ensure we can uses spaces to indicate additional
+ * parameters in the future.
+ */
+ if (strchr(cn->corename, ' ')) {
+ coredump_report_failure("Coredump socket may not %s contain spaces", cn->corename);
+ return false;
+ }
+
+ /* Must not contain ".." in the path. */
+ if (name_contains_dotdot(cn->corename)) {
+ coredump_report_failure("Coredump socket may not %s contain '..' spaces", cn->corename);
+ return false;
+ }
+
+ if (strlen(cn->corename) >= UNIX_PATH_MAX) {
+ coredump_report_failure("Coredump socket path %s too long", cn->corename);
+ return false;
+ }
+
+ /*
+ * Currently no need to parse any other options.
+ * Relevant information can be retrieved from the peer
+ * pidfd retrievable via SO_PEERPIDFD by the receiver or
+ * via /proc/<pid>, using the SO_PEERPIDFD to guard
+ * against pid recycling when opening /proc/<pid>.
+ */
+ return true;
+ }
+ case COREDUMP_FILE:
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return false;
}
/* Repeat as long as we have more pattern to process and more output
@@ -238,7 +335,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
* Split on spaces before doing template expansion so that
* %e and %E don't get split if they have spaces in them
*/
- if (ispipe) {
+ if (cn->core_type == COREDUMP_PIPE) {
if (isspace(*pat_ptr)) {
if (cn->used != 0)
was_space = true;
@@ -248,7 +345,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
was_space = false;
err = cn_printf(cn, "%c", '\0');
if (err)
- return err;
+ return false;
(*argv)[(*argc)++] = cn->used;
}
}
@@ -338,6 +435,27 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
case 'C':
err = cn_printf(cn, "%d", cprm->cpu);
break;
+ /* pidfd number */
+ case 'F': {
+ /*
+ * Installing a pidfd only makes sense if
+ * we actually spawn a usermode helper.
+ */
+ if (cn->core_type != COREDUMP_PIPE)
+ break;
+
+ /*
+ * Note that we'll install a pidfd for the
+ * thread-group leader. We know that task
+ * linkage hasn't been removed yet and even if
+ * this @current isn't the actual thread-group
+ * leader we know that the thread-group leader
+ * cannot be reaped until @current has exited.
+ */
+ cprm->pid = task_tgid(current);
+ err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER);
+ break;
+ }
default:
break;
}
@@ -345,7 +463,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
}
if (err)
- return err;
+ return false;
}
out:
@@ -354,12 +472,10 @@ out:
* If core_pattern does not include a %p (as is the default)
* and core_uses_pid is set, then .%pid will be appended to
* the filename. Do not do this for piped commands. */
- if (!ispipe && !pid_in_pattern && core_uses_pid) {
- err = cn_printf(cn, ".%d", task_tgid_vnr(current));
- if (err)
- return err;
- }
- return ispipe;
+ if (cn->core_type == COREDUMP_FILE && !pid_in_pattern && core_uses_pid)
+ return cn_printf(cn, ".%d", task_tgid_vnr(current)) == 0;
+
+ return true;
}
static int zap_process(struct signal_struct *signal, int exit_code)
@@ -492,7 +608,7 @@ static void wait_for_dump_helpers(struct file *file)
}
/*
- * umh_pipe_setup
+ * umh_coredump_setup
* helper function to customize the process used
* to collect the core in userspace. Specifically
* it sets up a pipe and installs it as fd 0 (stdin)
@@ -502,11 +618,34 @@ static void wait_for_dump_helpers(struct file *file)
* is a special value that we use to trap recursive
* core dumps
*/
-static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
+static int umh_coredump_setup(struct subprocess_info *info, struct cred *new)
{
struct file *files[2];
struct coredump_params *cp = (struct coredump_params *)info->data;
- int err = create_pipe_files(files, 0);
+ int err;
+
+ if (cp->pid) {
+ struct file *pidfs_file __free(fput) = NULL;
+
+ pidfs_file = pidfs_alloc_file(cp->pid, 0);
+ if (IS_ERR(pidfs_file))
+ return PTR_ERR(pidfs_file);
+
+ pidfs_coredump(cp);
+
+ /*
+ * Usermode helpers are childen of either
+ * system_dfl_wq or of kthreadd. So we know that
+ * we're starting off with a clean file descriptor
+ * table. So we should always be able to use
+ * COREDUMP_PIDFD_NUMBER as our file descriptor value.
+ */
+ err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0);
+ if (err < 0)
+ return err;
+ }
+
+ err = create_pipe_files(files, 0);
if (err)
return err;
@@ -514,277 +653,552 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
err = replace_fd(0, files[0], 0);
fput(files[0]);
+ if (err < 0)
+ return err;
+
/* and disallow core files too */
current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
- return err;
+ return 0;
}
-void do_coredump(const kernel_siginfo_t *siginfo)
+#ifdef CONFIG_UNIX
+static bool coredump_sock_connect(struct core_name *cn, struct coredump_params *cprm)
{
- struct core_state core_state;
- struct core_name cn;
- struct mm_struct *mm = current->mm;
- struct linux_binfmt * binfmt;
- const struct cred *old_cred;
- struct cred *cred;
- int retval = 0;
- int ispipe;
- size_t *argv = NULL;
- int argc = 0;
- /* require nonrelative corefile path and be extra careful */
- bool need_suid_safe = false;
- bool core_dumped = false;
- static atomic_t core_dump_count = ATOMIC_INIT(0);
- struct coredump_params cprm = {
- .siginfo = siginfo,
- .limit = rlimit(RLIMIT_CORE),
- /*
- * We must use the same mm->flags while dumping core to avoid
- * inconsistency of bit flags, since this flag is not protected
- * by any locks.
- */
- .mm_flags = mm->flags,
- .vma_meta = NULL,
- .cpu = raw_smp_processor_id(),
+ struct file *file __free(fput) = NULL;
+ struct sockaddr_un addr = {
+ .sun_family = AF_UNIX,
};
+ ssize_t addr_len;
+ int retval;
+ struct socket *socket;
- audit_core_dumps(siginfo->si_signo);
+ addr_len = strscpy(addr.sun_path, cn->corename);
+ if (addr_len < 0)
+ return false;
+ addr_len += offsetof(struct sockaddr_un, sun_path) + 1;
- binfmt = mm->binfmt;
- if (!binfmt || !binfmt->core_dump)
- goto fail;
- if (!__get_dumpable(cprm.mm_flags))
- goto fail;
+ /*
+ * It is possible that the userspace process which is supposed
+ * to handle the coredump and is listening on the AF_UNIX socket
+ * coredumps. Userspace should just mark itself non dumpable.
+ */
+
+ retval = sock_create_kern(&init_net, AF_UNIX, SOCK_STREAM, 0, &socket);
+ if (retval < 0)
+ return false;
+
+ file = sock_alloc_file(socket, 0, NULL);
+ if (IS_ERR(file))
+ return false;
- cred = prepare_creds();
- if (!cred)
- goto fail;
/*
- * We cannot trust fsuid as being the "true" uid of the process
- * nor do we know its entire history. We only know it was tainted
- * so we dump it as root in mode 2, and only into a controlled
- * environment (pipe handler or fully qualified path).
+ * Set the thread-group leader pid which is used for the peer
+ * credentials during connect() below. Then immediately register
+ * it in pidfs...
*/
- if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
- /* Setuid core dump mode */
- cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
- need_suid_safe = true;
+ cprm->pid = task_tgid(current);
+ retval = pidfs_register_pid(cprm->pid);
+ if (retval)
+ return false;
+
+ /*
+ * ... and set the coredump information so userspace has it
+ * available after connect()...
+ */
+ pidfs_coredump(cprm);
+
+ retval = kernel_connect(socket, (struct sockaddr_unsized *)(&addr), addr_len,
+ O_NONBLOCK | SOCK_COREDUMP);
+
+ if (retval) {
+ if (retval == -EAGAIN)
+ coredump_report_failure("Coredump socket %s receive queue full", addr.sun_path);
+ else
+ coredump_report_failure("Coredump socket connection %s failed %d", addr.sun_path, retval);
+ return false;
}
- retval = coredump_wait(siginfo->si_signo, &core_state);
- if (retval < 0)
- goto fail_creds;
+ /* ... and validate that @sk_peer_pid matches @cprm.pid. */
+ if (WARN_ON_ONCE(unix_peer(socket->sk)->sk_peer_pid != cprm->pid))
+ return false;
- old_cred = override_creds(cred);
+ cprm->limit = RLIM_INFINITY;
+ cprm->file = no_free_ptr(file);
- ispipe = format_corename(&cn, &cprm, &argv, &argc);
+ return true;
+}
- if (ispipe) {
- int argi;
- int dump_count;
- char **helper_argv;
- struct subprocess_info *sub_info;
+static inline bool coredump_sock_recv(struct file *file, struct coredump_ack *ack, size_t size, int flags)
+{
+ struct msghdr msg = {};
+ struct kvec iov = { .iov_base = ack, .iov_len = size };
+ ssize_t ret;
- if (ispipe < 0) {
- coredump_report_failure("format_corename failed, aborting core");
- goto fail_unlock;
- }
+ memset(ack, 0, size);
+ ret = kernel_recvmsg(sock_from_file(file), &msg, &iov, 1, size, flags);
+ return ret == size;
+}
- if (cprm.limit == 1) {
- /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
- *
- * Normally core limits are irrelevant to pipes, since
- * we're not writing to the file system, but we use
- * cprm.limit of 1 here as a special value, this is a
- * consistent way to catch recursive crashes.
- * We can still crash if the core_pattern binary sets
- * RLIM_CORE = !1, but it runs as root, and can do
- * lots of stupid things.
- *
- * Note that we use task_tgid_vnr here to grab the pid
- * of the process group leader. That way we get the
- * right pid if a thread in a multi-threaded
- * core_pattern process dies.
- */
- coredump_report_failure("RLIMIT_CORE is set to 1, aborting core");
- goto fail_unlock;
- }
- cprm.limit = RLIM_INFINITY;
+static inline bool coredump_sock_send(struct file *file, struct coredump_req *req)
+{
+ struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
+ struct kvec iov = { .iov_base = req, .iov_len = sizeof(*req) };
+ ssize_t ret;
- dump_count = atomic_inc_return(&core_dump_count);
- if (core_pipe_limit && (core_pipe_limit < dump_count)) {
- coredump_report_failure("over core_pipe_limit, skipping core dump");
- goto fail_dropcount;
- }
+ ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(*req));
+ return ret == sizeof(*req);
+}
- helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
- GFP_KERNEL);
- if (!helper_argv) {
- coredump_report_failure("%s failed to allocate memory", __func__);
- goto fail_dropcount;
- }
- for (argi = 0; argi < argc; argi++)
- helper_argv[argi] = cn.corename + argv[argi];
- helper_argv[argi] = NULL;
-
- retval = -ENOMEM;
- sub_info = call_usermodehelper_setup(helper_argv[0],
- helper_argv, NULL, GFP_KERNEL,
- umh_pipe_setup, NULL, &cprm);
- if (sub_info)
- retval = call_usermodehelper_exec(sub_info,
- UMH_WAIT_EXEC);
-
- kfree(helper_argv);
- if (retval) {
- coredump_report_failure("|%s pipe failed", cn.corename);
- goto close_fail;
- }
- } else {
- struct mnt_idmap *idmap;
- struct inode *inode;
- int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW |
- O_LARGEFILE | O_EXCL;
-
- if (cprm.limit < binfmt->min_coredump)
- goto fail_unlock;
-
- if (need_suid_safe && cn.corename[0] != '/') {
- coredump_report_failure(
- "this process can only dump core to a fully qualified path, skipping core dump");
- goto fail_unlock;
- }
+static_assert(sizeof(enum coredump_mark) == sizeof(__u32));
- /*
- * Unlink the file if it exists unless this is a SUID
- * binary - in that case, we're running around with root
- * privs and don't want to unlink another user's coredump.
- */
- if (!need_suid_safe) {
- /*
- * If it doesn't exist, that's fine. If there's some
- * other problem, we'll catch it at the filp_open().
- */
- do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
- }
+static inline bool coredump_sock_mark(struct file *file, enum coredump_mark mark)
+{
+ struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
+ struct kvec iov = { .iov_base = &mark, .iov_len = sizeof(mark) };
+ ssize_t ret;
+
+ ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(mark));
+ return ret == sizeof(mark);
+}
+
+static inline void coredump_sock_wait(struct file *file)
+{
+ ssize_t n;
+
+ /*
+ * We use a simple read to wait for the coredump processing to
+ * finish. Either the socket is closed or we get sent unexpected
+ * data. In both cases, we're done.
+ */
+ n = __kernel_read(file, &(char){ 0 }, 1, NULL);
+ if (n > 0)
+ coredump_report_failure("Coredump socket had unexpected data");
+ else if (n < 0)
+ coredump_report_failure("Coredump socket failed");
+}
+
+static inline void coredump_sock_shutdown(struct file *file)
+{
+ struct socket *socket;
+
+ socket = sock_from_file(file);
+ if (!socket)
+ return;
+
+ /* Let userspace know we're done processing the coredump. */
+ kernel_sock_shutdown(socket, SHUT_WR);
+}
+
+static bool coredump_sock_request(struct core_name *cn, struct coredump_params *cprm)
+{
+ struct coredump_req req = {
+ .size = sizeof(struct coredump_req),
+ .mask = COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT,
+ .size_ack = sizeof(struct coredump_ack),
+ };
+ struct coredump_ack ack = {};
+ ssize_t usize;
+
+ if (cn->core_type != COREDUMP_SOCK_REQ)
+ return true;
+
+ /* Let userspace know what we support. */
+ if (!coredump_sock_send(cprm->file, &req))
+ return false;
+
+ /* Peek the size of the coredump_ack. */
+ if (!coredump_sock_recv(cprm->file, &ack, sizeof(ack.size),
+ MSG_PEEK | MSG_WAITALL))
+ return false;
+
+ /* Refuse unknown coredump_ack sizes. */
+ usize = ack.size;
+ if (usize < COREDUMP_ACK_SIZE_VER0) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_MINSIZE);
+ return false;
+ }
+ if (usize > sizeof(ack)) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_MAXSIZE);
+ return false;
+ }
+
+ /* Now retrieve the coredump_ack. */
+ if (!coredump_sock_recv(cprm->file, &ack, usize, MSG_WAITALL))
+ return false;
+ if (ack.size != usize)
+ return false;
+
+ /* Refuse unknown coredump_ack flags. */
+ if (ack.mask & ~req.mask) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED);
+ return false;
+ }
+
+ /* Refuse mutually exclusive options. */
+ if (hweight64(ack.mask & (COREDUMP_USERSPACE | COREDUMP_KERNEL |
+ COREDUMP_REJECT)) != 1) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_CONFLICTING);
+ return false;
+ }
+
+ if (ack.spare) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED);
+ return false;
+ }
+
+ cn->mask = ack.mask;
+ return coredump_sock_mark(cprm->file, COREDUMP_MARK_REQACK);
+}
+
+static bool coredump_socket(struct core_name *cn, struct coredump_params *cprm)
+{
+ if (!coredump_sock_connect(cn, cprm))
+ return false;
+
+ return coredump_sock_request(cn, cprm);
+}
+#else
+static inline void coredump_sock_wait(struct file *file) { }
+static inline void coredump_sock_shutdown(struct file *file) { }
+static inline bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) { return false; }
+#endif
+
+/* cprm->mm_flags contains a stable snapshot of dumpability flags. */
+static inline bool coredump_force_suid_safe(const struct coredump_params *cprm)
+{
+ /* Require nonrelative corefile path and be extra careful. */
+ return __get_dumpable(cprm->mm_flags) == SUID_DUMP_ROOT;
+}
+
+static bool coredump_file(struct core_name *cn, struct coredump_params *cprm,
+ const struct linux_binfmt *binfmt)
+{
+ struct mnt_idmap *idmap;
+ struct inode *inode;
+ struct file *file __free(fput) = NULL;
+ int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW | O_LARGEFILE | O_EXCL;
+
+ if (cprm->limit < binfmt->min_coredump)
+ return false;
+
+ if (coredump_force_suid_safe(cprm) && cn->corename[0] != '/') {
+ coredump_report_failure("this process can only dump core to a fully qualified path, skipping core dump");
+ return false;
+ }
+
+ /*
+ * Unlink the file if it exists unless this is a SUID
+ * binary - in that case, we're running around with root
+ * privs and don't want to unlink another user's coredump.
+ */
+ if (!coredump_force_suid_safe(cprm)) {
/*
- * There is a race between unlinking and creating the
- * file, but if that causes an EEXIST here, that's
- * fine - another process raced with us while creating
- * the corefile, and the other process won. To userspace,
- * what matters is that at least one of the two processes
- * writes its coredump successfully, not which one.
+ * If it doesn't exist, that's fine. If there's some
+ * other problem, we'll catch it at the filp_open().
*/
- if (need_suid_safe) {
- /*
- * Using user namespaces, normal user tasks can change
- * their current->fs->root to point to arbitrary
- * directories. Since the intention of the "only dump
- * with a fully qualified path" rule is to control where
- * coredumps may be placed using root privileges,
- * current->fs->root must not be used. Instead, use the
- * root directory of init_task.
- */
- struct path root;
-
- task_lock(&init_task);
- get_fs_root(init_task.fs, &root);
- task_unlock(&init_task);
- cprm.file = file_open_root(&root, cn.corename,
- open_flags, 0600);
- path_put(&root);
- } else {
- cprm.file = filp_open(cn.corename, open_flags, 0600);
- }
- if (IS_ERR(cprm.file))
- goto fail_unlock;
-
- inode = file_inode(cprm.file);
- if (inode->i_nlink > 1)
- goto close_fail;
- if (d_unhashed(cprm.file->f_path.dentry))
- goto close_fail;
+ do_unlinkat(AT_FDCWD, getname_kernel(cn->corename));
+ }
+
+ /*
+ * There is a race between unlinking and creating the
+ * file, but if that causes an EEXIST here, that's
+ * fine - another process raced with us while creating
+ * the corefile, and the other process won. To userspace,
+ * what matters is that at least one of the two processes
+ * writes its coredump successfully, not which one.
+ */
+ if (coredump_force_suid_safe(cprm)) {
/*
- * AK: actually i see no reason to not allow this for named
- * pipes etc, but keep the previous behaviour for now.
+ * Using user namespaces, normal user tasks can change
+ * their current->fs->root to point to arbitrary
+ * directories. Since the intention of the "only dump
+ * with a fully qualified path" rule is to control where
+ * coredumps may be placed using root privileges,
+ * current->fs->root must not be used. Instead, use the
+ * root directory of init_task.
*/
- if (!S_ISREG(inode->i_mode))
- goto close_fail;
- /*
- * Don't dump core if the filesystem changed owner or mode
- * of the file during file creation. This is an issue when
- * a process dumps core while its cwd is e.g. on a vfat
- * filesystem.
+ struct path root;
+
+ task_lock(&init_task);
+ get_fs_root(init_task.fs, &root);
+ task_unlock(&init_task);
+ file = file_open_root(&root, cn->corename, open_flags, 0600);
+ path_put(&root);
+ } else {
+ file = filp_open(cn->corename, open_flags, 0600);
+ }
+ if (IS_ERR(file))
+ return false;
+
+ inode = file_inode(file);
+ if (inode->i_nlink > 1)
+ return false;
+ if (d_unhashed(file->f_path.dentry))
+ return false;
+ /*
+ * AK: actually i see no reason to not allow this for named
+ * pipes etc, but keep the previous behaviour for now.
+ */
+ if (!S_ISREG(inode->i_mode))
+ return false;
+ /*
+ * Don't dump core if the filesystem changed owner or mode
+ * of the file during file creation. This is an issue when
+ * a process dumps core while its cwd is e.g. on a vfat
+ * filesystem.
+ */
+ idmap = file_mnt_idmap(file);
+ if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid())) {
+ coredump_report_failure("Core dump to %s aborted: cannot preserve file owner", cn->corename);
+ return false;
+ }
+ if ((inode->i_mode & 0677) != 0600) {
+ coredump_report_failure("Core dump to %s aborted: cannot preserve file permissions", cn->corename);
+ return false;
+ }
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return false;
+ if (do_truncate(idmap, file->f_path.dentry, 0, 0, file))
+ return false;
+
+ cprm->file = no_free_ptr(file);
+ return true;
+}
+
+static bool coredump_pipe(struct core_name *cn, struct coredump_params *cprm,
+ size_t *argv, int argc)
+{
+ int argi;
+ char **helper_argv __free(kfree) = NULL;
+ struct subprocess_info *sub_info;
+
+ if (cprm->limit == 1) {
+ /* See umh_coredump_setup() which sets RLIMIT_CORE = 1.
+ *
+ * Normally core limits are irrelevant to pipes, since
+ * we're not writing to the file system, but we use
+ * cprm.limit of 1 here as a special value, this is a
+ * consistent way to catch recursive crashes.
+ * We can still crash if the core_pattern binary sets
+ * RLIM_CORE = !1, but it runs as root, and can do
+ * lots of stupid things.
+ *
+ * Note that we use task_tgid_vnr here to grab the pid
+ * of the process group leader. That way we get the
+ * right pid if a thread in a multi-threaded
+ * core_pattern process dies.
*/
- idmap = file_mnt_idmap(cprm.file);
- if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode),
- current_fsuid())) {
- coredump_report_failure("Core dump to %s aborted: "
- "cannot preserve file owner", cn.corename);
- goto close_fail;
- }
- if ((inode->i_mode & 0677) != 0600) {
- coredump_report_failure("Core dump to %s aborted: "
- "cannot preserve file permissions", cn.corename);
- goto close_fail;
- }
- if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
- goto close_fail;
- if (do_truncate(idmap, cprm.file->f_path.dentry,
- 0, 0, cprm.file))
- goto close_fail;
+ coredump_report_failure("RLIMIT_CORE is set to 1, aborting core");
+ return false;
+ }
+ cprm->limit = RLIM_INFINITY;
+
+ cn->core_pipe_limit = atomic_inc_return(&core_pipe_count);
+ if (core_pipe_limit && (core_pipe_limit < cn->core_pipe_limit)) {
+ coredump_report_failure("over core_pipe_limit, skipping core dump");
+ return false;
+ }
+
+ helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), GFP_KERNEL);
+ if (!helper_argv) {
+ coredump_report_failure("%s failed to allocate memory", __func__);
+ return false;
+ }
+ for (argi = 0; argi < argc; argi++)
+ helper_argv[argi] = cn->corename + argv[argi];
+ helper_argv[argi] = NULL;
+
+ sub_info = call_usermodehelper_setup(helper_argv[0], helper_argv, NULL,
+ GFP_KERNEL, umh_coredump_setup,
+ NULL, cprm);
+ if (!sub_info)
+ return false;
+
+ if (call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC)) {
+ coredump_report_failure("|%s pipe failed", cn->corename);
+ return false;
+ }
+
+ /*
+ * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
+ * have this set to NULL.
+ */
+ if (!cprm->file) {
+ coredump_report_failure("Core dump to |%s disabled", cn->corename);
+ return false;
+ }
+
+ return true;
+}
+
+static bool coredump_write(struct core_name *cn,
+ struct coredump_params *cprm,
+ const struct linux_binfmt *binfmt)
+{
+
+ if (dump_interrupted())
+ return true;
+
+ if (!dump_vma_snapshot(cprm))
+ return false;
+
+ file_start_write(cprm->file);
+ cn->core_dumped = binfmt->core_dump(cprm);
+ /*
+ * Ensures that file size is big enough to contain the current
+ * file postion. This prevents gdb from complaining about
+ * a truncated file if the last "write" to the file was
+ * dump_skip.
+ */
+ if (cprm->to_skip) {
+ cprm->to_skip--;
+ dump_emit(cprm, "", 1);
}
+ file_end_write(cprm->file);
+ free_vma_snapshot(cprm);
+ return true;
+}
+
+static void coredump_cleanup(struct core_name *cn, struct coredump_params *cprm)
+{
+ if (cprm->file)
+ filp_close(cprm->file, NULL);
+ if (cn->core_pipe_limit) {
+ VFS_WARN_ON_ONCE(cn->core_type != COREDUMP_PIPE);
+ atomic_dec(&core_pipe_count);
+ }
+ kfree(cn->corename);
+ coredump_finish(cn->core_dumped);
+}
+
+static inline bool coredump_skip(const struct coredump_params *cprm,
+ const struct linux_binfmt *binfmt)
+{
+ if (!binfmt)
+ return true;
+ if (!binfmt->core_dump)
+ return true;
+ if (!__get_dumpable(cprm->mm_flags))
+ return true;
+ return false;
+}
+
+static void do_coredump(struct core_name *cn, struct coredump_params *cprm,
+ size_t **argv, int *argc, const struct linux_binfmt *binfmt)
+{
+ if (!coredump_parse(cn, cprm, argv, argc)) {
+ coredump_report_failure("format_corename failed, aborting core");
+ return;
+ }
+
+ switch (cn->core_type) {
+ case COREDUMP_FILE:
+ if (!coredump_file(cn, cprm, binfmt))
+ return;
+ break;
+ case COREDUMP_PIPE:
+ if (!coredump_pipe(cn, cprm, *argv, *argc))
+ return;
+ break;
+ case COREDUMP_SOCK_REQ:
+ fallthrough;
+ case COREDUMP_SOCK:
+ if (!coredump_socket(cn, cprm))
+ return;
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return;
+ }
+
+ /* Don't even generate the coredump. */
+ if (cn->mask & COREDUMP_REJECT)
+ return;
/* get us an unshared descriptor table; almost always a no-op */
/* The cell spufs coredump code reads the file descriptor tables */
- retval = unshare_files();
- if (retval)
- goto close_fail;
- if (!dump_interrupted()) {
- /*
- * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
- * have this set to NULL.
- */
- if (!cprm.file) {
- coredump_report_failure("Core dump to |%s disabled", cn.corename);
- goto close_fail;
+ if (unshare_files())
+ return;
+
+ if ((cn->mask & COREDUMP_KERNEL) && !coredump_write(cn, cprm, binfmt))
+ return;
+
+ coredump_sock_shutdown(cprm->file);
+
+ /* Let the parent know that a coredump was generated. */
+ if (cn->mask & COREDUMP_USERSPACE)
+ cn->core_dumped = true;
+
+ /*
+ * When core_pipe_limit is set we wait for the coredump server
+ * or usermodehelper to finish before exiting so it can e.g.,
+ * inspect /proc/<pid>.
+ */
+ if (cn->mask & COREDUMP_WAIT) {
+ switch (cn->core_type) {
+ case COREDUMP_PIPE:
+ wait_for_dump_helpers(cprm->file);
+ break;
+ case COREDUMP_SOCK_REQ:
+ fallthrough;
+ case COREDUMP_SOCK:
+ coredump_sock_wait(cprm->file);
+ break;
+ default:
+ break;
}
- if (!dump_vma_snapshot(&cprm))
- goto close_fail;
+ }
+}
- file_start_write(cprm.file);
- core_dumped = binfmt->core_dump(&cprm);
+void vfs_coredump(const kernel_siginfo_t *siginfo)
+{
+ size_t *argv __free(kfree) = NULL;
+ struct core_state core_state;
+ struct core_name cn;
+ const struct mm_struct *mm = current->mm;
+ const struct linux_binfmt *binfmt = mm->binfmt;
+ int argc = 0;
+ struct coredump_params cprm = {
+ .siginfo = siginfo,
+ .limit = rlimit(RLIMIT_CORE),
/*
- * Ensures that file size is big enough to contain the current
- * file postion. This prevents gdb from complaining about
- * a truncated file if the last "write" to the file was
- * dump_skip.
+ * We must use the same mm->flags while dumping core to avoid
+ * inconsistency of bit flags, since this flag is not protected
+ * by any locks.
+ *
+ * Note that we only care about MMF_DUMP* flags.
*/
- if (cprm.to_skip) {
- cprm.to_skip--;
- dump_emit(&cprm, "", 1);
- }
- file_end_write(cprm.file);
- free_vma_snapshot(&cprm);
- }
- if (ispipe && core_pipe_limit)
- wait_for_dump_helpers(cprm.file);
-close_fail:
- if (cprm.file)
- filp_close(cprm.file, NULL);
-fail_dropcount:
- if (ispipe)
- atomic_dec(&core_dump_count);
-fail_unlock:
- kfree(argv);
- kfree(cn.corename);
- coredump_finish(core_dumped);
- revert_creds(old_cred);
-fail_creds:
- put_cred(cred);
-fail:
+ .mm_flags = __mm_flags_get_dumpable(mm),
+ .vma_meta = NULL,
+ .cpu = raw_smp_processor_id(),
+ };
+
+ audit_core_dumps(siginfo->si_signo);
+
+ if (coredump_skip(&cprm, binfmt))
+ return;
+
+ CLASS(prepare_creds, cred)();
+ if (!cred)
+ return;
+ /*
+ * We cannot trust fsuid as being the "true" uid of the process
+ * nor do we know its entire history. We only know it was tainted
+ * so we dump it as root in mode 2, and only into a controlled
+ * environment (pipe handler or fully qualified path).
+ */
+ if (coredump_force_suid_safe(&cprm))
+ cred->fsuid = GLOBAL_ROOT_UID;
+
+ if (coredump_wait(siginfo->si_signo, &core_state) < 0)
+ return;
+
+ scoped_with_creds(cred)
+ do_coredump(&cn, &cprm, &argv, &argc, binfmt);
+ coredump_cleanup(&cn, &cprm);
return;
}
@@ -798,10 +1212,9 @@ static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
struct file *file = cprm->file;
loff_t pos = file->f_pos;
ssize_t n;
+
if (cprm->written + nr > cprm->limit)
return 0;
-
-
if (dump_interrupted())
return 0;
n = __kernel_write(file, addr, nr, &pos);
@@ -818,20 +1231,21 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
{
static char zeroes[PAGE_SIZE];
struct file *file = cprm->file;
+
if (file->f_mode & FMODE_LSEEK) {
- if (dump_interrupted() ||
- vfs_llseek(file, nr, SEEK_CUR) < 0)
+ if (dump_interrupted() || vfs_llseek(file, nr, SEEK_CUR) < 0)
return 0;
cprm->pos += nr;
return 1;
- } else {
- while (nr > PAGE_SIZE) {
- if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
- return 0;
- nr -= PAGE_SIZE;
- }
- return __dump_emit(cprm, zeroes, nr);
}
+
+ while (nr > PAGE_SIZE) {
+ if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
+ return 0;
+ nr -= PAGE_SIZE;
+ }
+
+ return __dump_emit(cprm, zeroes, nr);
}
int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
@@ -925,14 +1339,23 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
{
unsigned long addr;
struct page *dump_page;
+ int locked, ret;
dump_page = dump_page_alloc();
if (!dump_page)
return 0;
+ ret = 0;
+ locked = 0;
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
struct page *page;
+ if (!locked) {
+ if (mmap_read_lock_killable(current->mm))
+ goto out;
+ locked = 1;
+ }
+
/*
* To avoid having to allocate page tables for virtual address
* ranges that have never been used yet, and also to make it
@@ -940,21 +1363,38 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
* NULL when encountering an empty page table entry that would
* otherwise have been filled with the zero page.
*/
- page = get_dump_page(addr);
+ page = get_dump_page(addr, &locked);
if (page) {
+ if (locked) {
+ mmap_read_unlock(current->mm);
+ locked = 0;
+ }
int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page));
put_page(page);
- if (stop) {
- dump_page_free(dump_page);
- return 0;
- }
+ if (stop)
+ goto out;
} else {
dump_skip(cprm, PAGE_SIZE);
}
+
+ if (dump_interrupted())
+ goto out;
+
+ if (!need_resched())
+ continue;
+ if (locked) {
+ mmap_read_unlock(current->mm);
+ locked = 0;
+ }
cond_resched();
}
+ ret = 1;
+out:
+ if (locked)
+ mmap_read_unlock(current->mm);
+
dump_page_free(dump_page);
- return 1;
+ return ret;
}
#endif
@@ -974,7 +1414,7 @@ EXPORT_SYMBOL(dump_align);
void validate_coredump_safety(void)
{
if (suid_dumpable == SUID_DUMP_ROOT &&
- core_pattern[0] != '/' && core_pattern[0] != '|') {
+ core_pattern[0] != '/' && core_pattern[0] != '|' && core_pattern[0] != '@') {
coredump_report_failure("Unsafe core_pattern used with fs.suid_dumpable=2: "
"pipe handler or fully qualified core dump path required. "
@@ -982,20 +1422,80 @@ void validate_coredump_safety(void)
}
}
+static inline bool check_coredump_socket(void)
+{
+ const char *p;
+
+ if (core_pattern[0] != '@')
+ return true;
+
+ /*
+ * Coredump socket must be located in the initial mount
+ * namespace. Don't give the impression that anything else is
+ * supported right now.
+ */
+ if (current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns)
+ return false;
+
+ /* Must be an absolute path... */
+ if (core_pattern[1] != '/') {
+ /* ... or the socket request protocol... */
+ if (core_pattern[1] != '@')
+ return false;
+ /* ... and if so must be an absolute path. */
+ if (core_pattern[2] != '/')
+ return false;
+ p = &core_pattern[2];
+ } else {
+ p = &core_pattern[1];
+ }
+
+ /* The path obviously cannot exceed UNIX_PATH_MAX. */
+ if (strlen(p) >= UNIX_PATH_MAX)
+ return false;
+
+ /* Must not contain ".." in the path. */
+ if (name_contains_dotdot(core_pattern))
+ return false;
+
+ return true;
+}
+
static int proc_dostring_coredump(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- int error = proc_dostring(table, write, buffer, lenp, ppos);
+ int error;
+ ssize_t retval;
+ char old_core_pattern[CORENAME_MAX_SIZE];
+
+ if (!write)
+ return proc_dostring(table, write, buffer, lenp, ppos);
+
+ retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE);
+
+ error = proc_dostring(table, write, buffer, lenp, ppos);
+ if (error)
+ return error;
+
+ if (!check_coredump_socket()) {
+ strscpy(core_pattern, old_core_pattern, retval + 1);
+ return -EINVAL;
+ }
- if (!error)
- validate_coredump_safety();
+ validate_coredump_safety();
return error;
}
static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT;
static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX;
+static char core_modes[] = {
+ "file\npipe"
+#ifdef CONFIG_UNIX
+ "\nsocket"
+#endif
+};
-static struct ctl_table coredump_sysctls[] = {
+static const struct ctl_table coredump_sysctls[] = {
{
.procname = "core_uses_pid",
.data = &core_uses_pid,
@@ -1015,7 +1515,9 @@ static struct ctl_table coredump_sysctls[] = {
.data = &core_pipe_limit,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX,
},
{
.procname = "core_file_note_size_limit",
@@ -1026,6 +1528,22 @@ static struct ctl_table coredump_sysctls[] = {
.extra1 = (unsigned int *)&core_file_note_size_min,
.extra2 = (unsigned int *)&core_file_note_size_max,
},
+ {
+ .procname = "core_sort_vma",
+ .data = &core_sort_vma,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "core_modes",
+ .data = core_modes,
+ .maxlen = sizeof(core_modes) - 1,
+ .mode = 0444,
+ .proc_handler = proc_dostring,
+ },
};
static int __init init_fs_coredump_sysctls(void)
@@ -1256,8 +1774,9 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
cprm->vma_data_size += m->dump_size;
}
- sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
- cmp_vma_size, NULL);
+ if (core_sort_vma)
+ sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
+ cmp_vma_size, NULL);
return true;
}
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index b84d1747a020..e54ebe402df7 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -17,7 +17,6 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
-#include <linux/pfn_t.h>
#include <linux/ramfs.h>
#include <linux/init.h>
#include <linux/string.h>
@@ -96,7 +95,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
inode = iget_locked(sb, cramino(cramfs_inode, offset));
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
switch (cramfs_inode->mode & S_IFMT) {
@@ -117,9 +116,18 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
inode_nohighmem(inode);
inode->i_data.a_ops = &cramfs_aops;
break;
- default:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
init_special_inode(inode, cramfs_inode->mode,
old_decode_dev(cramfs_inode->size));
+ break;
+ default:
+ printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
inode->i_mode = cramfs_inode->mode;
@@ -412,8 +420,8 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
for (i = 0; i < pages && !ret; i++) {
vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
- pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
- vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
+ vmf = vmf_insert_mixed(vma, vma->vm_start + off,
+ PHYS_PFN(address + off));
if (vmf & VM_FAULT_ERROR)
ret = vm_fault_to_errno(vmf, 0);
}
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index 5aff5934baa1..464b54610fd3 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -2,9 +2,9 @@
config FS_ENCRYPTION
bool "FS Encryption (Per-file encryption)"
select CRYPTO
- select CRYPTO_HASH
select CRYPTO_SKCIPHER
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select KEYS
help
Enable encryption of files and directories. This
@@ -24,20 +24,14 @@ config FS_ENCRYPTION
#
# Also note that this option only pulls in the generic implementations of the
# algorithms, not any per-architecture optimized implementations. It is
-# strongly recommended to enable optimized implementations too. It is safe to
-# disable these generic implementations if corresponding optimized
-# implementations will always be available too; for this reason, these are soft
-# dependencies ('imply' rather than 'select'). Only disable these generic
-# implementations if you're sure they will never be needed, though.
+# strongly recommended to enable optimized implementations too.
config FS_ENCRYPTION_ALGS
tristate
- imply CRYPTO_AES
- imply CRYPTO_CBC
- imply CRYPTO_CTS
- imply CRYPTO_ECB
- imply CRYPTO_HMAC
- imply CRYPTO_SHA512
- imply CRYPTO_XTS
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_CTS
+ select CRYPTO_ECB
+ select CRYPTO_XTS
config FS_ENCRYPTION_INLINE_CRYPT
bool "Enable fscrypt to use inline crypto"
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 0ad8c30b8fa5..5f5599020e94 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -7,10 +7,12 @@
* Copyright (C) 2015, Motorola Mobility
*/
-#include <linux/pagemap.h>
-#include <linux/module.h>
#include <linux/bio.h>
+#include <linux/export.h>
+#include <linux/module.h>
#include <linux/namei.h>
+#include <linux/pagemap.h>
+
#include "fscrypt_private.h"
/**
@@ -111,7 +113,7 @@ out:
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
- const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
const unsigned int du_bits = ci->ci_data_unit_bits;
const unsigned int du_size = 1U << du_bits;
const unsigned int du_per_page_bits = PAGE_SHIFT - du_bits;
@@ -146,7 +148,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
*/
for (i = 0; i < nr_pages; i++) {
pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
- GFP_NOWAIT | __GFP_NOWARN);
+ GFP_NOWAIT);
if (!pages[i])
break;
}
@@ -165,8 +167,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
do {
err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, du_index,
ZERO_PAGE(0), pages[i],
- du_size, offset,
- GFP_NOFS);
+ du_size, offset);
if (err)
goto out;
du_index++;
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 328470d40dec..07f9cbfe3ea4 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -20,12 +20,14 @@
* Special Publication 800-38E and IEEE P1619/D16.
*/
-#include <linux/pagemap.h>
+#include <crypto/skcipher.h>
+#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
+#include <linux/pagemap.h>
#include <linux/ratelimit.h>
-#include <crypto/skcipher.h>
+#include <linux/scatterlist.h>
+
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
@@ -108,15 +110,13 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
fscrypt_direction_t rw, u64 index,
struct page *src_page, struct page *dest_page,
- unsigned int len, unsigned int offs,
- gfp_t gfp_flags)
+ unsigned int len, unsigned int offs)
{
+ struct crypto_sync_skcipher *tfm = ci->ci_enc_key.tfm;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
union fscrypt_iv iv;
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
- struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
- int res = 0;
+ int err;
if (WARN_ON_ONCE(len <= 0))
return -EINVAL;
@@ -125,36 +125,28 @@ int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
fscrypt_generate_iv(&iv, index, ci);
- req = skcipher_request_alloc(tfm, gfp_flags);
- if (!req)
- return -ENOMEM;
-
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
-
+ NULL, NULL);
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, len, offs);
skcipher_request_set_crypt(req, &src, &dst, len, &iv);
if (rw == FS_DECRYPT)
- res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
+ err = crypto_skcipher_decrypt(req);
else
- res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
- skcipher_request_free(req);
- if (res) {
+ err = crypto_skcipher_encrypt(req);
+ if (err)
fscrypt_err(ci->ci_inode,
"%scryption failed for data unit %llu: %d",
- (rw == FS_DECRYPT ? "De" : "En"), index, res);
- return res;
- }
- return 0;
+ (rw == FS_DECRYPT ? "De" : "En"), index, err);
+ return err;
}
/**
- * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache page
- * @page: the locked pagecache page containing the data to encrypt
+ * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache folio
+ * @folio: the locked pagecache folio containing the data to encrypt
* @len: size of the data to encrypt, in bytes
* @offs: offset within @page of the data to encrypt, in bytes
* @gfp_flags: memory allocation flags; see details below
@@ -177,23 +169,21 @@ int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
*
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
*/
-struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags)
-
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
{
- const struct inode *inode = page->mapping->host;
- const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const struct inode *inode = folio->mapping->host;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
const unsigned int du_bits = ci->ci_data_unit_bits;
const unsigned int du_size = 1U << du_bits;
struct page *ciphertext_page;
- u64 index = ((u64)page->index << (PAGE_SHIFT - du_bits)) +
+ u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
(offs >> du_bits);
unsigned int i;
int err;
- if (WARN_ON_ONCE(!PageLocked(page)))
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+ if (WARN_ON_ONCE(!folio_test_locked(folio)))
return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
@@ -205,15 +195,15 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
for (i = offs; i < offs + len; i += du_size, index++) {
err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
- page, ciphertext_page,
- du_size, i, gfp_flags);
+ &folio->page, ciphertext_page,
+ du_size, i);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
return ERR_PTR(err);
}
}
SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)page);
+ set_page_private(ciphertext_page, (unsigned long)folio);
return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
@@ -227,7 +217,6 @@ EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
* @offs: Byte offset within @page at which the block to encrypt begins
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
* number of the block within the file
- * @gfp_flags: Memory allocation flags
*
* Encrypt a possibly-compressed filesystem block that is located in an
* arbitrary page, not necessarily in the original pagecache page. The @inode
@@ -239,13 +228,13 @@ EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
*/
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags)
+ u64 lblk_num)
{
if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
return -EOPNOTSUPP;
- return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_ENCRYPT,
- lblk_num, page, page, len, offs,
- gfp_flags);
+ return fscrypt_crypt_data_unit(fscrypt_get_inode_info_raw(inode),
+ FS_ENCRYPT, lblk_num, page, page, len,
+ offs);
}
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
@@ -267,7 +256,7 @@ int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
size_t offs)
{
const struct inode *inode = folio->mapping->host;
- const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
const unsigned int du_bits = ci->ci_data_unit_bits;
const unsigned int du_size = 1U << du_bits;
u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
@@ -285,8 +274,7 @@ int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
struct page *page = folio_page(folio, i >> PAGE_SHIFT);
err = fscrypt_crypt_data_unit(ci, FS_DECRYPT, index, page,
- page, du_size, i & ~PAGE_MASK,
- GFP_NOFS);
+ page, du_size, i & ~PAGE_MASK);
if (err)
return err;
}
@@ -318,9 +306,9 @@ int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
{
if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
return -EOPNOTSUPP;
- return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_DECRYPT,
- lblk_num, page, page, len, offs,
- GFP_NOFS);
+ return fscrypt_crypt_data_unit(fscrypt_get_inode_info_raw(inode),
+ FS_DECRYPT, lblk_num, page, page, len,
+ offs);
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 0ad52fbe51c9..a9a4432d12ba 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -11,11 +11,13 @@
* This has not yet undergone a rigorous security audit.
*/
-#include <linux/namei.h>
-#include <linux/scatterlist.h>
-#include <crypto/hash.h>
#include <crypto/sha2.h>
#include <crypto/skcipher.h>
+#include <linux/export.h>
+#include <linux/namei.h>
+#include <linux/scatterlist.h>
+#include <linux/base64.h>
+
#include "fscrypt_private.h"
/*
@@ -70,7 +72,7 @@ struct fscrypt_nokey_name {
/* Encoded size of max-size no-key name */
#define FSCRYPT_NOKEY_NAME_MAX_ENCODED \
- FSCRYPT_BASE64URL_CHARS(FSCRYPT_NOKEY_NAME_MAX)
+ BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX)
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
@@ -92,13 +94,12 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
u8 *out, unsigned int olen)
{
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
- const struct fscrypt_inode_info *ci = inode->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+ struct crypto_sync_skcipher *tfm = ci->ci_enc_key.tfm;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
union fscrypt_iv iv;
struct scatterlist sg;
- int res;
+ int err;
/*
* Copy the filename to the output buffer for encrypting in-place and
@@ -109,28 +110,17 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
memcpy(out, iname->name, iname->len);
memset(out + iname->len, 0, olen - iname->len);
- /* Initialize the IV */
fscrypt_generate_iv(&iv, 0, ci);
- /* Set up the encryption request */
- req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!req)
- return -ENOMEM;
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
+ skcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
sg_init_one(&sg, out, olen);
skcipher_request_set_crypt(req, &sg, &sg, olen, &iv);
-
- /* Do the encryption */
- res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
- skcipher_request_free(req);
- if (res < 0) {
- fscrypt_err(inode, "Filename encryption failed: %d", res);
- return res;
- }
-
- return 0;
+ err = crypto_skcipher_encrypt(req);
+ if (err)
+ fscrypt_err(inode, "Filename encryption failed: %d", err);
+ return err;
}
EXPORT_SYMBOL_GPL(fscrypt_fname_encrypt);
@@ -148,118 +138,31 @@ static int fname_decrypt(const struct inode *inode,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
- struct scatterlist src_sg, dst_sg;
- const struct fscrypt_inode_info *ci = inode->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+ struct crypto_sync_skcipher *tfm = ci->ci_enc_key.tfm;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
union fscrypt_iv iv;
- int res;
-
- /* Allocate request */
- req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!req)
- return -ENOMEM;
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
+ struct scatterlist src_sg, dst_sg;
+ int err;
- /* Initialize IV */
fscrypt_generate_iv(&iv, 0, ci);
- /* Create decryption request */
+ skcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
sg_init_one(&src_sg, iname->name, iname->len);
sg_init_one(&dst_sg, oname->name, oname->len);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv);
- res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
- skcipher_request_free(req);
- if (res < 0) {
- fscrypt_err(inode, "Filename decryption failed: %d", res);
- return res;
+ err = crypto_skcipher_decrypt(req);
+ if (err) {
+ fscrypt_err(inode, "Filename decryption failed: %d", err);
+ return err;
}
oname->len = strnlen(oname->name, iname->len);
return 0;
}
-static const char base64url_table[65] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
-
-#define FSCRYPT_BASE64URL_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
-
-/**
- * fscrypt_base64url_encode() - base64url-encode some binary data
- * @src: the binary data to encode
- * @srclen: the length of @src in bytes
- * @dst: (output) the base64url-encoded string. Not NUL-terminated.
- *
- * Encodes data using base64url encoding, i.e. the "Base 64 Encoding with URL
- * and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't used,
- * as it's unneeded and not required by the RFC. base64url is used instead of
- * base64 to avoid the '/' character, which isn't allowed in filenames.
- *
- * Return: the length of the resulting base64url-encoded string in bytes.
- * This will be equal to FSCRYPT_BASE64URL_CHARS(srclen).
- */
-static int fscrypt_base64url_encode(const u8 *src, int srclen, char *dst)
-{
- u32 ac = 0;
- int bits = 0;
- int i;
- char *cp = dst;
-
- for (i = 0; i < srclen; i++) {
- ac = (ac << 8) | src[i];
- bits += 8;
- do {
- bits -= 6;
- *cp++ = base64url_table[(ac >> bits) & 0x3f];
- } while (bits >= 6);
- }
- if (bits)
- *cp++ = base64url_table[(ac << (6 - bits)) & 0x3f];
- return cp - dst;
-}
-
-/**
- * fscrypt_base64url_decode() - base64url-decode a string
- * @src: the string to decode. Doesn't need to be NUL-terminated.
- * @srclen: the length of @src in bytes
- * @dst: (output) the decoded binary data
- *
- * Decodes a string using base64url encoding, i.e. the "Base 64 Encoding with
- * URL and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't
- * accepted, nor are non-encoding characters such as whitespace.
- *
- * This implementation hasn't been optimized for performance.
- *
- * Return: the length of the resulting decoded binary data in bytes,
- * or -1 if the string isn't a valid base64url string.
- */
-static int fscrypt_base64url_decode(const char *src, int srclen, u8 *dst)
-{
- u32 ac = 0;
- int bits = 0;
- int i;
- u8 *bp = dst;
-
- for (i = 0; i < srclen; i++) {
- const char *p = strchr(base64url_table, src[i]);
-
- if (p == NULL || src[i] == 0)
- return -1;
- ac = (ac << 6) | (p - base64url_table);
- bits += 6;
- if (bits >= 8) {
- bits -= 8;
- *bp++ = (u8)(ac >> bits);
- }
- }
- if (ac & ((1 << bits) - 1))
- return -1;
- return bp - dst;
-}
-
bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
u32 orig_len, u32 max_len,
u32 *encrypted_len_ret)
@@ -293,8 +196,9 @@ bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
u32 max_len, u32 *encrypted_len_ret)
{
- return __fscrypt_fname_encrypted_size(&inode->i_crypt_info->ci_policy,
- orig_len, max_len,
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+
+ return __fscrypt_fname_encrypted_size(&ci->ci_policy, orig_len, max_len,
encrypted_len_ret);
}
EXPORT_SYMBOL_GPL(fscrypt_fname_encrypted_size);
@@ -406,8 +310,8 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
nokey_name.sha256);
size = FSCRYPT_NOKEY_NAME_MAX;
}
- oname->len = fscrypt_base64url_encode((const u8 *)&nokey_name, size,
- oname->name);
+ oname->len = base64_encode((const u8 *)&nokey_name, size,
+ oname->name, false, BASE64_URLSAFE);
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
@@ -486,8 +390,8 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
if (fname->crypto_buf.name == NULL)
return -ENOMEM;
- ret = fscrypt_base64url_decode(iname->name, iname->len,
- fname->crypto_buf.name);
+ ret = base64_decode(iname->name, iname->len,
+ fname->crypto_buf.name, false, BASE64_URLSAFE);
if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) ||
(ret > offsetof(struct fscrypt_nokey_name, sha256) &&
ret != FSCRYPT_NOKEY_NAME_MAX)) {
@@ -562,7 +466,7 @@ EXPORT_SYMBOL_GPL(fscrypt_match_name);
*/
u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name)
{
- const struct fscrypt_inode_info *ci = dir->i_crypt_info;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(dir);
WARN_ON_ONCE(!ci->ci_dirhash_key_initialized);
@@ -574,11 +478,10 @@ EXPORT_SYMBOL_GPL(fscrypt_fname_siphash);
* Validate dentries in encrypted directories to make sure we aren't potentially
* caching stale dentries after a key has been added.
*/
-int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct dentry *dir;
int err;
- int valid;
/*
* Plaintext names are always valid, since fscrypt doesn't support
@@ -591,30 +494,21 @@ int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
/*
* No-key name; valid if the directory's key is still unavailable.
*
- * Although fscrypt forbids rename() on no-key names, we still must use
- * dget_parent() here rather than use ->d_parent directly. That's
- * because a corrupted fs image may contain directory hard links, which
- * the VFS handles by moving the directory's dentry tree in the dcache
- * each time ->lookup() finds the directory and it already has a dentry
- * elsewhere. Thus ->d_parent can be changing, and we must safely grab
- * a reference to some ->d_parent to prevent it from being freed.
+ * Note in RCU mode we have to bail if we get here -
+ * fscrypt_get_encryption_info() may block.
*/
if (flags & LOOKUP_RCU)
return -ECHILD;
- dir = dget_parent(dentry);
/*
* Pass allow_unsupported=true, so that files with an unsupported
* encryption policy can be deleted.
*/
- err = fscrypt_get_encryption_info(d_inode(dir), true);
- valid = !fscrypt_has_encryption_key(d_inode(dir));
- dput(dir);
-
+ err = fscrypt_get_encryption_info(dir, true);
if (err < 0)
return err;
- return valid;
+ return !fscrypt_has_encryption_key(dir);
}
EXPORT_SYMBOL_GPL(fscrypt_d_revalidate);
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 8371e4e1f596..4e8e82a9ccf9 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -11,9 +11,10 @@
#ifndef _FSCRYPT_PRIVATE_H
#define _FSCRYPT_PRIVATE_H
+#include <crypto/sha2.h>
#include <linux/fscrypt.h>
+#include <linux/minmax.h>
#include <linux/siphash.h>
-#include <crypto/hash.h>
#include <linux/blk-crypto.h>
#define CONST_STRLEN(str) (sizeof(str) - 1)
@@ -27,6 +28,41 @@
*/
#define FSCRYPT_MIN_KEY_SIZE 16
+/* Maximum size of a raw fscrypt master key */
+#define FSCRYPT_MAX_RAW_KEY_SIZE 64
+
+/* Maximum size of a hardware-wrapped fscrypt master key */
+#define FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE
+
+/* Maximum size of an fscrypt master key across both key types */
+#define FSCRYPT_MAX_ANY_KEY_SIZE \
+ MAX(FSCRYPT_MAX_RAW_KEY_SIZE, FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE)
+
+/*
+ * FSCRYPT_MAX_KEY_SIZE is defined in the UAPI header, but the addition of
+ * hardware-wrapped keys has made it misleading as it's only for raw keys.
+ * Don't use it in kernel code; use one of the above constants instead.
+ */
+#undef FSCRYPT_MAX_KEY_SIZE
+
+/*
+ * This mask is passed as the third argument to the crypto_alloc_*() functions
+ * to prevent fscrypt from using the Crypto API drivers for non-inline crypto
+ * engines. Those drivers have been problematic for fscrypt. fscrypt users
+ * have reported hangs and even incorrect en/decryption with these drivers.
+ * Since going to the driver, off CPU, and back again is really slow, such
+ * drivers can be over 50 times slower than the CPU-based code for fscrypt's
+ * workload. Even on platforms that lack AES instructions on the CPU, using the
+ * offloads has been shown to be slower, even staying with AES. (Of course,
+ * Adiantum is faster still, and is the recommended option on such platforms...)
+ *
+ * Note that fscrypt also supports inline crypto engines. Those don't use the
+ * Crypto API and work much better than the old-style (non-inline) engines.
+ */
+#define FSCRYPT_CRYPTOAPI_MASK \
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | \
+ CRYPTO_ALG_KERN_DRIVER_ONLY)
+
#define FSCRYPT_CONTEXT_V1 1
#define FSCRYPT_CONTEXT_V2 2
@@ -203,7 +239,7 @@ struct fscrypt_symlink_data {
* Normally only one of the fields will be non-NULL.
*/
struct fscrypt_prepared_key {
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
struct blk_crypto_key *blk_key;
#endif
@@ -213,8 +249,8 @@ struct fscrypt_prepared_key {
* fscrypt_inode_info - the "encryption key" for an inode
*
* When an encrypted file's key is made available, an instance of this struct is
- * allocated and stored in ->i_crypt_info. Once created, it remains until the
- * inode is evicted.
+ * allocated and a pointer to it is stored in the file's in-memory inode. Once
+ * created, it remains until the inode is evicted.
*/
struct fscrypt_inode_info {
@@ -301,8 +337,7 @@ int fscrypt_initialize(struct super_block *sb);
int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
fscrypt_direction_t rw, u64 index,
struct page *src_page, struct page *dest_page,
- unsigned int len, unsigned int offs,
- gfp_t gfp_flags);
+ unsigned int len, unsigned int offs);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
void __printf(3, 4) __cold
@@ -346,12 +381,8 @@ bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
u32 *encrypted_len_ret);
/* hkdf.c */
-struct fscrypt_hkdf {
- struct crypto_shash *hmac_tfm;
-};
-
-int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
- unsigned int master_key_size);
+void fscrypt_init_hkdf(struct hmac_sha512_key *hkdf, const u8 *master_key,
+ unsigned int master_key_size);
/*
* The list of contexts in which fscrypt uses HKDF. These values are used as
@@ -360,23 +391,24 @@ int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
* outputs are unique and cryptographically isolated, i.e. knowledge of one
* output doesn't reveal another.
*/
-#define HKDF_CONTEXT_KEY_IDENTIFIER 1 /* info=<empty> */
+#define HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY 1 /* info=<empty> */
#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 /* info=file_nonce */
#define HKDF_CONTEXT_DIRECT_KEY 3 /* info=mode_num */
#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 /* info=mode_num||fs_uuid */
#define HKDF_CONTEXT_DIRHASH_KEY 5 /* info=file_nonce */
#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 /* info=mode_num||fs_uuid */
#define HKDF_CONTEXT_INODE_HASH_KEY 7 /* info=<empty> */
+#define HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY \
+ 8 /* info=<empty> */
-int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
- const u8 *info, unsigned int infolen,
- u8 *okm, unsigned int okmlen);
-
-void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf);
+void fscrypt_hkdf_expand(const struct hmac_sha512_key *hkdf, u8 context,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen);
/* inline_crypt.c */
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
-int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci);
+int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci,
+ bool is_hw_wrapped_key);
static inline bool
fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
@@ -385,12 +417,17 @@ fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
}
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key,
+ const u8 *key_bytes, size_t key_size,
+ bool is_hw_wrapped,
const struct fscrypt_inode_info *ci);
void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
struct fscrypt_prepared_key *prep_key);
+int fscrypt_derive_sw_secret(struct super_block *sb,
+ const u8 *wrapped_key, size_t wrapped_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
/*
* Check whether the crypto transform or blk-crypto key has been allocated in
* @prep_key, depending on which encryption implementation the file will use.
@@ -414,7 +451,8 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
-static inline int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci)
+static inline int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci,
+ bool is_hw_wrapped_key)
{
return 0;
}
@@ -427,7 +465,8 @@ fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
static inline int
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key,
+ const u8 *key_bytes, size_t key_size,
+ bool is_hw_wrapped,
const struct fscrypt_inode_info *ci)
{
WARN_ON_ONCE(1);
@@ -440,6 +479,15 @@ fscrypt_destroy_inline_crypt_key(struct super_block *sb,
{
}
+static inline int
+fscrypt_derive_sw_secret(struct super_block *sb,
+ const u8 *wrapped_key, size_t wrapped_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ fscrypt_warn(NULL, "kernel doesn't support hardware-wrapped keys");
+ return -EOPNOTSUPP;
+}
+
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
const struct fscrypt_inode_info *ci)
@@ -456,20 +504,38 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
struct fscrypt_master_key_secret {
/*
- * For v2 policy keys: HKDF context keyed by this master key.
- * For v1 policy keys: not set (hkdf.hmac_tfm == NULL).
+ * The KDF with which subkeys of this key can be derived.
+ *
+ * For v1 policy keys, this isn't applicable and won't be set.
+ * Otherwise, this KDF will be keyed by this master key if
+ * ->is_hw_wrapped=false, or by the "software secret" that hardware
+ * derived from this master key if ->is_hw_wrapped=true.
*/
- struct fscrypt_hkdf hkdf;
+ struct hmac_sha512_key hkdf;
/*
- * Size of the raw key in bytes. This remains set even if ->raw was
+ * True if this key is a hardware-wrapped key; false if this key is a
+ * raw key (i.e. a "software key"). For v1 policy keys this will always
+ * be false, as v1 policy support is a legacy feature which doesn't
+ * support newer functionality such as hardware-wrapped keys.
+ */
+ bool is_hw_wrapped;
+
+ /*
+ * Size of the key in bytes. This remains set even if ->bytes was
* zeroized due to no longer being needed. I.e. we still remember the
* size of the key even if we don't need to remember the key itself.
*/
u32 size;
- /* For v1 policy keys: the raw key. Wiped for v2 policy keys. */
- u8 raw[FSCRYPT_MAX_KEY_SIZE];
+ /*
+ * The bytes of the key, when still needed. This can be either a raw
+ * key or a hardware-wrapped key, as indicated by ->is_hw_wrapped. In
+ * the case of a raw, v2 policy key, there is no need to remember the
+ * actual key separately from ->hkdf so this field will be zeroized as
+ * soon as ->hkdf is initialized.
+ */
+ u8 bytes[FSCRYPT_MAX_ANY_KEY_SIZE];
} __randomize_layout;
@@ -624,7 +690,7 @@ struct fscrypt_master_key *
fscrypt_find_master_key(struct super_block *sb,
const struct fscrypt_key_specifier *mk_spec);
-int fscrypt_get_test_dummy_key_identifier(
+void fscrypt_get_test_dummy_key_identifier(
u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]);
int fscrypt_add_test_dummy_key(struct super_block *sb,
@@ -660,8 +726,8 @@ void fscrypt_destroy_prepared_key(struct super_block *sb,
int fscrypt_set_per_file_enc_key(struct fscrypt_inode_info *ci,
const u8 *raw_key);
-int fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
- const struct fscrypt_master_key *mk);
+void fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
+ const struct fscrypt_master_key *mk);
void fscrypt_hash_inode_number(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk);
diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c
index 5a384dad2c72..706f56d0076e 100644
--- a/fs/crypto/hkdf.c
+++ b/fs/crypto/hkdf.c
@@ -4,14 +4,13 @@
* Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
* "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
*
- * This is used to derive keys from the fscrypt master keys.
+ * This is used to derive keys from the fscrypt master keys (or from the
+ * "software secrets" which hardware derives from the fscrypt master keys, in
+ * the case that the fscrypt master keys are hardware-wrapped keys).
*
* Copyright 2019 Google LLC
*/
-#include <crypto/hash.h>
-#include <crypto/sha2.h>
-
#include "fscrypt_private.h"
/*
@@ -25,7 +24,6 @@
* HKDF-SHA512 being much faster than HKDF-SHA256, as the longer digest size of
* SHA-512 causes HKDF-Expand to only need to do one iteration rather than two.
*/
-#define HKDF_HMAC_ALG "hmac(sha512)"
#define HKDF_HASHLEN SHA512_DIGEST_SIZE
/*
@@ -44,67 +42,25 @@
* there's no way to persist a random salt per master key from kernel mode.
*/
-/* HKDF-Extract (RFC 5869 section 2.2), unsalted */
-static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
- unsigned int ikmlen, u8 prk[HKDF_HASHLEN])
-{
- static const u8 default_salt[HKDF_HASHLEN];
- int err;
-
- err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN);
- if (err)
- return err;
-
- return crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
-}
-
/*
- * Compute HKDF-Extract using the given master key as the input keying material,
- * and prepare an HMAC transform object keyed by the resulting pseudorandom key.
- *
- * Afterwards, the keyed HMAC transform object can be used for HKDF-Expand many
- * times without having to recompute HKDF-Extract each time.
+ * Compute HKDF-Extract using 'master_key' as the input keying material, and
+ * prepare the resulting HMAC key in 'hkdf'. Afterwards, 'hkdf' can be used for
+ * HKDF-Expand many times without having to recompute HKDF-Extract each time.
*/
-int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
- unsigned int master_key_size)
+void fscrypt_init_hkdf(struct hmac_sha512_key *hkdf, const u8 *master_key,
+ unsigned int master_key_size)
{
- struct crypto_shash *hmac_tfm;
+ static const u8 default_salt[HKDF_HASHLEN];
u8 prk[HKDF_HASHLEN];
- int err;
-
- hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, 0);
- if (IS_ERR(hmac_tfm)) {
- fscrypt_err(NULL, "Error allocating " HKDF_HMAC_ALG ": %ld",
- PTR_ERR(hmac_tfm));
- return PTR_ERR(hmac_tfm);
- }
-
- if (WARN_ON_ONCE(crypto_shash_digestsize(hmac_tfm) != sizeof(prk))) {
- err = -EINVAL;
- goto err_free_tfm;
- }
-
- err = hkdf_extract(hmac_tfm, master_key, master_key_size, prk);
- if (err)
- goto err_free_tfm;
- err = crypto_shash_setkey(hmac_tfm, prk, sizeof(prk));
- if (err)
- goto err_free_tfm;
-
- hkdf->hmac_tfm = hmac_tfm;
- goto out;
-
-err_free_tfm:
- crypto_free_shash(hmac_tfm);
-out:
+ hmac_sha512_usingrawkey(default_salt, sizeof(default_salt),
+ master_key, master_key_size, prk);
+ hmac_sha512_preparekey(hkdf, prk, sizeof(prk));
memzero_explicit(prk, sizeof(prk));
- return err;
}
/*
- * HKDF-Expand (RFC 5869 section 2.3). This expands the pseudorandom key, which
- * was already keyed into 'hkdf->hmac_tfm' by fscrypt_init_hkdf(), into 'okmlen'
+ * HKDF-Expand (RFC 5869 section 2.3). Expand the HMAC key 'hkdf' into 'okmlen'
* bytes of output keying material parameterized by the application-specific
* 'info' of length 'infolen' bytes, prefixed by "fscrypt\0" and the 'context'
* byte. This is thread-safe and may be called by multiple threads in parallel.
@@ -113,70 +69,32 @@ out:
* adds to its application-specific info strings to guarantee that it doesn't
* accidentally repeat an info string when using HKDF for different purposes.)
*/
-int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
- const u8 *info, unsigned int infolen,
- u8 *okm, unsigned int okmlen)
+void fscrypt_hkdf_expand(const struct hmac_sha512_key *hkdf, u8 context,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen)
{
- SHASH_DESC_ON_STACK(desc, hkdf->hmac_tfm);
- u8 prefix[9];
- unsigned int i;
- int err;
- const u8 *prev = NULL;
+ struct hmac_sha512_ctx ctx;
u8 counter = 1;
u8 tmp[HKDF_HASHLEN];
- if (WARN_ON_ONCE(okmlen > 255 * HKDF_HASHLEN))
- return -EINVAL;
-
- desc->tfm = hkdf->hmac_tfm;
-
- memcpy(prefix, "fscrypt\0", 8);
- prefix[8] = context;
-
- for (i = 0; i < okmlen; i += HKDF_HASHLEN) {
-
- err = crypto_shash_init(desc);
- if (err)
- goto out;
-
- if (prev) {
- err = crypto_shash_update(desc, prev, HKDF_HASHLEN);
- if (err)
- goto out;
- }
-
- err = crypto_shash_update(desc, prefix, sizeof(prefix));
- if (err)
- goto out;
-
- err = crypto_shash_update(desc, info, infolen);
- if (err)
- goto out;
-
- BUILD_BUG_ON(sizeof(counter) != 1);
+ WARN_ON_ONCE(okmlen > 255 * HKDF_HASHLEN);
+
+ for (unsigned int i = 0; i < okmlen; i += HKDF_HASHLEN) {
+ hmac_sha512_init(&ctx, hkdf);
+ if (i != 0)
+ hmac_sha512_update(&ctx, &okm[i - HKDF_HASHLEN],
+ HKDF_HASHLEN);
+ hmac_sha512_update(&ctx, "fscrypt\0", 8);
+ hmac_sha512_update(&ctx, &context, 1);
+ hmac_sha512_update(&ctx, info, infolen);
+ hmac_sha512_update(&ctx, &counter, 1);
if (okmlen - i < HKDF_HASHLEN) {
- err = crypto_shash_finup(desc, &counter, 1, tmp);
- if (err)
- goto out;
+ hmac_sha512_final(&ctx, tmp);
memcpy(&okm[i], tmp, okmlen - i);
memzero_explicit(tmp, sizeof(tmp));
} else {
- err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
- if (err)
- goto out;
+ hmac_sha512_final(&ctx, &okm[i]);
}
counter++;
- prev = &okm[i];
}
- err = 0;
-out:
- if (unlikely(err))
- memzero_explicit(okm, okmlen); /* so caller doesn't need to */
- shash_desc_zero(desc);
- return err;
-}
-
-void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf)
-{
- crypto_free_shash(hkdf->hmac_tfm);
}
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index d8d5049b8fe1..b97de0d1430f 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -5,6 +5,8 @@
* Encryption hooks for higher-level filesystem operations.
*/
+#include <linux/export.h>
+
#include "fscrypt_private.h"
/**
@@ -197,13 +199,13 @@ int fscrypt_prepare_setflags(struct inode *inode,
err = fscrypt_require_key(inode);
if (err)
return err;
- ci = inode->i_crypt_info;
+ ci = fscrypt_get_inode_info_raw(inode);
if (ci->ci_policy.version != FSCRYPT_POLICY_V2)
return -EINVAL;
mk = ci->ci_master_key;
down_read(&mk->mk_sem);
if (mk->mk_present)
- err = fscrypt_derive_dirhash_key(ci, mk);
+ fscrypt_derive_dirhash_key(ci, mk);
else
err = -ENOKEY;
up_read(&mk->mk_sem);
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index 40de69860dcf..ed6e926226b5 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -15,6 +15,7 @@
#include <linux/blk-crypto.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
+#include <linux/export.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/uio.h>
@@ -89,7 +90,8 @@ static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
}
/* Enable inline encryption for this file if supported. */
-int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci)
+int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci,
+ bool is_hw_wrapped_key)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
@@ -130,6 +132,8 @@ int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci)
crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits;
crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
+ crypto_cfg.key_type = is_hw_wrapped_key ?
+ BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW;
devs = fscrypt_get_devices(sb, &num_devs);
if (IS_ERR(devs))
@@ -150,12 +154,15 @@ out_free_devs:
}
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key,
+ const u8 *key_bytes, size_t key_size,
+ bool is_hw_wrapped,
const struct fscrypt_inode_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
+ enum blk_crypto_key_type key_type = is_hw_wrapped ?
+ BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW;
struct blk_crypto_key *blk_key;
struct block_device **devs;
unsigned int num_devs;
@@ -166,8 +173,8 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
if (!blk_key)
return -ENOMEM;
- err = blk_crypto_init_key(blk_key, raw_key, crypto_mode,
- fscrypt_get_dun_bytes(ci),
+ err = blk_crypto_init_key(blk_key, key_bytes, key_size, key_type,
+ crypto_mode, fscrypt_get_dun_bytes(ci),
1U << ci->ci_data_unit_bits);
if (err) {
fscrypt_err(inode, "error %d initializing blk-crypto key", err);
@@ -226,9 +233,37 @@ void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
kfree_sensitive(blk_key);
}
+/*
+ * Ask the inline encryption hardware to derive the software secret from a
+ * hardware-wrapped key. Returns -EOPNOTSUPP if hardware-wrapped keys aren't
+ * supported on this filesystem or hardware.
+ */
+int fscrypt_derive_sw_secret(struct super_block *sb,
+ const u8 *wrapped_key, size_t wrapped_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ int err;
+
+ /* The filesystem must be mounted with -o inlinecrypt. */
+ if (!(sb->s_flags & SB_INLINECRYPT)) {
+ fscrypt_warn(NULL,
+ "%s: filesystem not mounted with inlinecrypt\n",
+ sb->s_id);
+ return -EOPNOTSUPP;
+ }
+
+ err = blk_crypto_derive_sw_secret(sb->s_bdev, wrapped_key,
+ wrapped_key_size, sw_secret);
+ if (err == -EOPNOTSUPP)
+ fscrypt_warn(NULL,
+ "%s: block device doesn't support hardware-wrapped keys\n",
+ sb->s_id);
+ return err;
+}
+
bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
{
- return inode->i_crypt_info->ci_inlinecrypt;
+ return fscrypt_get_inode_info_raw(inode)->ci_inlinecrypt;
}
EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
@@ -272,7 +307,7 @@ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
if (!fscrypt_inode_uses_inline_crypto(inode))
return;
- ci = inode->i_crypt_info;
+ ci = fscrypt_get_inode_info_raw(inode);
fscrypt_generate_dun(ci, first_lblk, dun);
bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
@@ -298,8 +333,7 @@ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
inode = mapping->host;
*inode_ret = inode;
- *lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) +
- (bh_offset(bh) >> inode->i_blkbits);
+ *lblk_num_ret = (folio_pos(folio) + bh_offset(bh)) >> inode->i_blkbits;
return true;
}
@@ -350,22 +384,24 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
u64 next_lblk)
{
const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
+ const struct fscrypt_inode_info *ci;
u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
return false;
if (!bc)
return true;
+ ci = fscrypt_get_inode_info_raw(inode);
/*
* Comparing the key pointers is good enough, as all I/O for each key
* uses the same pointer. I.e., there's currently no need to support
* merging requests where the keys are the same but the pointers differ.
*/
- if (bc->bc_key != inode->i_crypt_info->ci_enc_key.blk_key)
+ if (bc->bc_key != ci->ci_enc_key.blk_key)
return false;
- fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
+ fscrypt_generate_dun(ci, next_lblk, next_dun);
return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
}
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
@@ -467,7 +503,7 @@ u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
if (nr_blocks <= 1)
return nr_blocks;
- ci = inode->i_crypt_info;
+ ci = fscrypt_get_inode_info_raw(inode);
if (!(fscrypt_policy_flags(&ci->ci_policy) &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
return nr_blocks;
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 787e9c8938ba..5e939ea3ac28 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -18,12 +18,13 @@
* information about these ioctls.
*/
-#include <linux/unaligned.h>
#include <crypto/skcipher.h>
+#include <linux/export.h>
#include <linux/key-type.h>
-#include <linux/random.h>
#include <linux/once.h>
+#include <linux/random.h>
#include <linux/seq_file.h>
+#include <linux/unaligned.h>
#include "fscrypt_private.h"
@@ -41,7 +42,6 @@ struct fscrypt_keyring {
static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret)
{
- fscrypt_destroy_hkdf(&secret->hkdf);
memzero_explicit(secret, sizeof(*secret));
}
@@ -149,11 +149,11 @@ static int fscrypt_user_key_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
/*
- * We just charge FSCRYPT_MAX_KEY_SIZE bytes to the user's key quota for
- * each key, regardless of the exact key size. The amount of memory
+ * We just charge FSCRYPT_MAX_RAW_KEY_SIZE bytes to the user's key quota
+ * for each key, regardless of the exact key size. The amount of memory
* actually used is greater than the size of the raw key anyway.
*/
- return key_payload_reserve(key, FSCRYPT_MAX_KEY_SIZE);
+ return key_payload_reserve(key, FSCRYPT_MAX_RAW_KEY_SIZE);
}
static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m)
@@ -558,41 +558,79 @@ static int add_master_key(struct super_block *sb,
int err;
if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) {
- err = fscrypt_init_hkdf(&secret->hkdf, secret->raw,
- secret->size);
- if (err)
- return err;
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE];
+ u8 *kdf_key = secret->bytes;
+ unsigned int kdf_key_size = secret->size;
+ u8 keyid_kdf_ctx = HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY;
/*
- * Now that the HKDF context is initialized, the raw key is no
- * longer needed.
+ * For raw keys, the fscrypt master key is used directly as the
+ * fscrypt KDF key. For hardware-wrapped keys, we have to pass
+ * the master key to the hardware to derive the KDF key, which
+ * is then only used to derive non-file-contents subkeys.
*/
- memzero_explicit(secret->raw, secret->size);
+ if (secret->is_hw_wrapped) {
+ err = fscrypt_derive_sw_secret(sb, secret->bytes,
+ secret->size, sw_secret);
+ if (err)
+ return err;
+ kdf_key = sw_secret;
+ kdf_key_size = sizeof(sw_secret);
+ /*
+ * To avoid weird behavior if someone manages to
+ * determine sw_secret and add it as a raw key, ensure
+ * that hardware-wrapped keys and raw keys will have
+ * different key identifiers by deriving their key
+ * identifiers using different KDF contexts.
+ */
+ keyid_kdf_ctx =
+ HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY;
+ }
+ fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size);
+ /*
+ * Now that the KDF context is initialized, the raw KDF key is
+ * no longer needed.
+ */
+ memzero_explicit(kdf_key, kdf_key_size);
/* Calculate the key identifier */
- err = fscrypt_hkdf_expand(&secret->hkdf,
- HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0,
- key_spec->u.identifier,
- FSCRYPT_KEY_IDENTIFIER_SIZE);
- if (err)
- return err;
+ fscrypt_hkdf_expand(&secret->hkdf, keyid_kdf_ctx, NULL, 0,
+ key_spec->u.identifier,
+ FSCRYPT_KEY_IDENTIFIER_SIZE);
}
return do_add_master_key(sb, secret, key_spec);
}
+/*
+ * Validate the size of an fscrypt master key being added. Note that this is
+ * just an initial check, as we don't know which ciphers will be used yet.
+ * There is a stricter size check later when the key is actually used by a file.
+ */
+static inline bool fscrypt_valid_key_size(size_t size, u32 add_key_flags)
+{
+ u32 max_size = (add_key_flags & FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ?
+ FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE :
+ FSCRYPT_MAX_RAW_KEY_SIZE;
+
+ return size >= FSCRYPT_MIN_KEY_SIZE && size <= max_size;
+}
+
static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep)
{
const struct fscrypt_provisioning_key_payload *payload = prep->data;
- if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE ||
- prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE)
+ if (prep->datalen < sizeof(*payload))
+ return -EINVAL;
+
+ if (!fscrypt_valid_key_size(prep->datalen - sizeof(*payload),
+ payload->flags))
return -EINVAL;
if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER)
return -EINVAL;
- if (payload->__reserved)
+ if (payload->flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
return -EINVAL;
prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL);
@@ -636,21 +674,21 @@ static struct key_type key_type_fscrypt_provisioning = {
};
/*
- * Retrieve the raw key from the Linux keyring key specified by 'key_id', and
- * store it into 'secret'.
+ * Retrieve the key from the Linux keyring key specified by 'key_id', and store
+ * it into 'secret'.
*
- * The key must be of type "fscrypt-provisioning" and must have the field
- * fscrypt_provisioning_key_payload::type set to 'type', indicating that it's
- * only usable with fscrypt with the particular KDF version identified by
- * 'type'. We don't use the "logon" key type because there's no way to
- * completely restrict the use of such keys; they can be used by any kernel API
- * that accepts "logon" keys and doesn't require a specific service prefix.
+ * The key must be of type "fscrypt-provisioning" and must have the 'type' and
+ * 'flags' field of the payload set to the given values, indicating that the key
+ * is intended for use for the specified purpose. We don't use the "logon" key
+ * type because there's no way to completely restrict the use of such keys; they
+ * can be used by any kernel API that accepts "logon" keys and doesn't require a
+ * specific service prefix.
*
* The ability to specify the key via Linux keyring key is intended for cases
* where userspace needs to re-add keys after the filesystem is unmounted and
- * re-mounted. Most users should just provide the raw key directly instead.
+ * re-mounted. Most users should just provide the key directly instead.
*/
-static int get_keyring_key(u32 key_id, u32 type,
+static int get_keyring_key(u32 key_id, u32 type, u32 flags,
struct fscrypt_master_key_secret *secret)
{
key_ref_t ref;
@@ -667,12 +705,16 @@ static int get_keyring_key(u32 key_id, u32 type,
goto bad_key;
payload = key->payload.data[0];
- /* Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. */
- if (payload->type != type)
+ /*
+ * Don't allow fscrypt v1 keys to be used as v2 keys and vice versa.
+ * Similarly, don't allow hardware-wrapped keys to be used as
+ * non-hardware-wrapped keys and vice versa.
+ */
+ if (payload->type != type || payload->flags != flags)
goto bad_key;
secret->size = key->datalen - sizeof(*payload);
- memcpy(secret->raw, payload->raw, secret->size);
+ memcpy(secret->bytes, payload->raw, secret->size);
err = 0;
goto out_put;
@@ -734,19 +776,28 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
return -EACCES;
memset(&secret, 0, sizeof(secret));
+
+ if (arg.flags) {
+ if (arg.flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
+ return -EINVAL;
+ if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER)
+ return -EINVAL;
+ secret.is_hw_wrapped = true;
+ }
+
if (arg.key_id) {
if (arg.raw_size != 0)
return -EINVAL;
- err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret);
+ err = get_keyring_key(arg.key_id, arg.key_spec.type, arg.flags,
+ &secret);
if (err)
goto out_wipe_secret;
} else {
- if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
- arg.raw_size > FSCRYPT_MAX_KEY_SIZE)
+ if (!fscrypt_valid_key_size(arg.raw_size, arg.flags))
return -EINVAL;
secret.size = arg.raw_size;
err = -EFAULT;
- if (copy_from_user(secret.raw, uarg->raw, secret.size))
+ if (copy_from_user(secret.bytes, uarg->raw, secret.size))
goto out_wipe_secret;
}
@@ -770,32 +821,26 @@ EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key);
static void
fscrypt_get_test_dummy_secret(struct fscrypt_master_key_secret *secret)
{
- static u8 test_key[FSCRYPT_MAX_KEY_SIZE];
+ static u8 test_key[FSCRYPT_MAX_RAW_KEY_SIZE];
- get_random_once(test_key, FSCRYPT_MAX_KEY_SIZE);
+ get_random_once(test_key, sizeof(test_key));
memset(secret, 0, sizeof(*secret));
- secret->size = FSCRYPT_MAX_KEY_SIZE;
- memcpy(secret->raw, test_key, FSCRYPT_MAX_KEY_SIZE);
+ secret->size = sizeof(test_key);
+ memcpy(secret->bytes, test_key, sizeof(test_key));
}
-int fscrypt_get_test_dummy_key_identifier(
+void fscrypt_get_test_dummy_key_identifier(
u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
{
struct fscrypt_master_key_secret secret;
- int err;
fscrypt_get_test_dummy_secret(&secret);
-
- err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size);
- if (err)
- goto out;
- err = fscrypt_hkdf_expand(&secret.hkdf, HKDF_CONTEXT_KEY_IDENTIFIER,
- NULL, 0, key_identifier,
- FSCRYPT_KEY_IDENTIFIER_SIZE);
-out:
+ fscrypt_init_hkdf(&secret.hkdf, secret.bytes, secret.size);
+ fscrypt_hkdf_expand(&secret.hkdf,
+ HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY, NULL, 0,
+ key_identifier, FSCRYPT_KEY_IDENTIFIER_SIZE);
wipe_master_key_secret(&secret);
- return err;
}
/**
@@ -900,7 +945,7 @@ static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk)
list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) {
inode = ci->ci_inode;
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
continue;
}
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index b4fe01ea4bd4..40fa05688d3a 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -9,6 +9,7 @@
*/
#include <crypto/skcipher.h>
+#include <linux/export.h>
#include <linux/random.h>
#include "fscrypt_private.h"
@@ -96,14 +97,15 @@ select_encryption_mode(const union fscrypt_policy *policy,
}
/* Create a symmetric cipher object for the given encryption mode and key */
-static struct crypto_skcipher *
+static struct crypto_sync_skcipher *
fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
const struct inode *inode)
{
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
int err;
- tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
+ tfm = crypto_alloc_sync_skcipher(mode->cipher_str, 0,
+ FSCRYPT_CRYPTOAPI_MASK);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT) {
fscrypt_warn(inode,
@@ -123,21 +125,22 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
* first time a mode is used.
*/
pr_info("fscrypt: %s using implementation \"%s\"\n",
- mode->friendly_name, crypto_skcipher_driver_name(tfm));
+ mode->friendly_name,
+ crypto_skcipher_driver_name(&tfm->base));
}
- if (WARN_ON_ONCE(crypto_skcipher_ivsize(tfm) != mode->ivsize)) {
+ if (WARN_ON_ONCE(crypto_sync_skcipher_ivsize(tfm) != mode->ivsize)) {
err = -EINVAL;
goto err_free_tfm;
}
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
+ crypto_sync_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+ err = crypto_sync_skcipher_setkey(tfm, raw_key, mode->keysize);
if (err)
goto err_free_tfm;
return tfm;
err_free_tfm:
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
return ERR_PTR(err);
}
@@ -150,10 +153,12 @@ err_free_tfm:
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key, const struct fscrypt_inode_info *ci)
{
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
if (fscrypt_using_inline_encryption(ci))
- return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci);
+ return fscrypt_prepare_inline_crypt_key(prep_key, raw_key,
+ ci->ci_mode->keysize,
+ false, ci);
tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
if (IS_ERR(tfm))
@@ -172,7 +177,7 @@ int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
void fscrypt_destroy_prepared_key(struct super_block *sb,
struct fscrypt_prepared_key *prep_key)
{
- crypto_free_skcipher(prep_key->tfm);
+ crypto_free_sync_skcipher(prep_key->tfm);
fscrypt_destroy_inline_crypt_key(sb, prep_key);
memzero_explicit(prep_key, sizeof(*prep_key));
}
@@ -195,14 +200,29 @@ static int setup_per_mode_enc_key(struct fscrypt_inode_info *ci,
struct fscrypt_mode *mode = ci->ci_mode;
const u8 mode_num = mode - fscrypt_modes;
struct fscrypt_prepared_key *prep_key;
- u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
+ u8 mode_key[FSCRYPT_MAX_RAW_KEY_SIZE];
u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
unsigned int hkdf_infolen = 0;
+ bool use_hw_wrapped_key = false;
int err;
if (WARN_ON_ONCE(mode_num > FSCRYPT_MODE_MAX))
return -EINVAL;
+ if (mk->mk_secret.is_hw_wrapped && S_ISREG(inode->i_mode)) {
+ /* Using a hardware-wrapped key for file contents encryption */
+ if (!fscrypt_using_inline_encryption(ci)) {
+ if (sb->s_flags & SB_INLINECRYPT)
+ fscrypt_warn(ci->ci_inode,
+ "Hardware-wrapped key required, but no suitable inline encryption capabilities are available");
+ else
+ fscrypt_warn(ci->ci_inode,
+ "Hardware-wrapped keys require inline encryption (-o inlinecrypt)");
+ return -EINVAL;
+ }
+ use_hw_wrapped_key = true;
+ }
+
prep_key = &keys[mode_num];
if (fscrypt_is_key_prepared(prep_key, ci)) {
ci->ci_enc_key = *prep_key;
@@ -214,6 +234,16 @@ static int setup_per_mode_enc_key(struct fscrypt_inode_info *ci,
if (fscrypt_is_key_prepared(prep_key, ci))
goto done_unlock;
+ if (use_hw_wrapped_key) {
+ err = fscrypt_prepare_inline_crypt_key(prep_key,
+ mk->mk_secret.bytes,
+ mk->mk_secret.size, true,
+ ci);
+ if (err)
+ goto out_unlock;
+ goto done_unlock;
+ }
+
BUILD_BUG_ON(sizeof(mode_num) != 1);
BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
BUILD_BUG_ON(sizeof(hkdf_info) != 17);
@@ -223,11 +253,8 @@ static int setup_per_mode_enc_key(struct fscrypt_inode_info *ci,
sizeof(sb->s_uuid));
hkdf_infolen += sizeof(sb->s_uuid);
}
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- hkdf_context, hkdf_info, hkdf_infolen,
- mode_key, mode->keysize);
- if (err)
- goto out_unlock;
+ fscrypt_hkdf_expand(&mk->mk_secret.hkdf, hkdf_context, hkdf_info,
+ hkdf_infolen, mode_key, mode->keysize);
err = fscrypt_prepare_key(prep_key, mode_key, ci);
memzero_explicit(mode_key, mode->keysize);
if (err)
@@ -248,36 +275,25 @@ out_unlock:
* as a pair of 64-bit words. Therefore, on big endian CPUs we have to do an
* endianness swap in order to get the same results as on little endian CPUs.
*/
-static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
- u8 context, const u8 *info,
- unsigned int infolen, siphash_key_t *key)
+static void fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
+ u8 context, const u8 *info,
+ unsigned int infolen, siphash_key_t *key)
{
- int err;
-
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
- (u8 *)key, sizeof(*key));
- if (err)
- return err;
-
+ fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
+ (u8 *)key, sizeof(*key));
BUILD_BUG_ON(sizeof(*key) != 16);
BUILD_BUG_ON(ARRAY_SIZE(key->key) != 2);
le64_to_cpus(&key->key[0]);
le64_to_cpus(&key->key[1]);
- return 0;
}
-int fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
- const struct fscrypt_master_key *mk)
+void fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
+ const struct fscrypt_master_key *mk)
{
- int err;
-
- err = fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
- ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
- &ci->ci_dirhash_key);
- if (err)
- return err;
+ fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
+ ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
+ &ci->ci_dirhash_key);
ci->ci_dirhash_key_initialized = true;
- return 0;
}
void fscrypt_hash_inode_number(struct fscrypt_inode_info *ci,
@@ -308,17 +324,12 @@ static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_inode_info *ci,
if (mk->mk_ino_hash_key_initialized)
goto unlock;
- err = fscrypt_derive_siphash_key(mk,
- HKDF_CONTEXT_INODE_HASH_KEY,
- NULL, 0, &mk->mk_ino_hash_key);
- if (err)
- goto unlock;
+ fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_INODE_HASH_KEY,
+ NULL, 0, &mk->mk_ino_hash_key);
/* pairs with smp_load_acquire() above */
smp_store_release(&mk->mk_ino_hash_key_initialized, true);
unlock:
mutex_unlock(&fscrypt_mode_key_setup_mutex);
- if (err)
- return err;
}
/*
@@ -336,6 +347,14 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_inode_info *ci,
{
int err;
+ if (mk->mk_secret.is_hw_wrapped &&
+ !(ci->ci_policy.v2.flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 |
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))) {
+ fscrypt_warn(ci->ci_inode,
+ "Hardware-wrapped keys are only supported with IV_INO_LBLK policies");
+ return -EINVAL;
+ }
+
if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
/*
* DIRECT_KEY: instead of deriving per-file encryption keys, the
@@ -362,15 +381,12 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_inode_info *ci,
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk);
} else {
- u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
-
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- HKDF_CONTEXT_PER_FILE_ENC_KEY,
- ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
- derived_key, ci->ci_mode->keysize);
- if (err)
- return err;
+ u8 derived_key[FSCRYPT_MAX_RAW_KEY_SIZE];
+ fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
+ HKDF_CONTEXT_PER_FILE_ENC_KEY,
+ ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
+ derived_key, ci->ci_mode->keysize);
err = fscrypt_set_per_file_enc_key(ci, derived_key);
memzero_explicit(derived_key, ci->ci_mode->keysize);
}
@@ -378,11 +394,8 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_inode_info *ci,
return err;
/* Derive a secret dirhash key for directories that need it. */
- if (need_dirhash_key) {
- err = fscrypt_derive_dirhash_key(ci, mk);
- if (err)
- return err;
- }
+ if (need_dirhash_key)
+ fscrypt_derive_dirhash_key(ci, mk);
return 0;
}
@@ -445,10 +458,6 @@ static int setup_file_encryption_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk;
int err;
- err = fscrypt_select_encryption_impl(ci);
- if (err)
- return err;
-
err = fscrypt_policy_to_key_spec(&ci->ci_policy, &mk_spec);
if (err)
return err;
@@ -476,6 +485,10 @@ static int setup_file_encryption_key(struct fscrypt_inode_info *ci,
if (ci->ci_policy.version != FSCRYPT_POLICY_V1)
return -ENOKEY;
+ err = fscrypt_select_encryption_impl(ci, false);
+ if (err)
+ return err;
+
/*
* As a legacy fallback for v1 policies, search for the key in
* the current task's subscribed keyrings too. Don't move this
@@ -497,9 +510,21 @@ static int setup_file_encryption_key(struct fscrypt_inode_info *ci,
goto out_release_key;
}
+ err = fscrypt_select_encryption_impl(ci, mk->mk_secret.is_hw_wrapped);
+ if (err)
+ goto out_release_key;
+
switch (ci->ci_policy.version) {
case FSCRYPT_POLICY_V1:
- err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw);
+ if (WARN_ON_ONCE(mk->mk_secret.is_hw_wrapped)) {
+ /*
+ * This should never happen, as adding a v1 policy key
+ * that is hardware-wrapped isn't allowed.
+ */
+ err = -EINVAL;
+ goto out_release_key;
+ }
+ err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.bytes);
break;
case FSCRYPT_POLICY_V2:
err = fscrypt_setup_v2_file_key(ci, mk, need_dirhash_key);
@@ -592,15 +617,16 @@ fscrypt_setup_encryption_info(struct inode *inode,
goto out;
/*
- * For existing inodes, multiple tasks may race to set ->i_crypt_info.
- * So use cmpxchg_release(). This pairs with the smp_load_acquire() in
- * fscrypt_get_inode_info(). I.e., here we publish ->i_crypt_info with
- * a RELEASE barrier so that other tasks can ACQUIRE it.
+ * For existing inodes, multiple tasks may race to set the inode's
+ * fscrypt info pointer. So use cmpxchg_release(). This pairs with the
+ * smp_load_acquire() in fscrypt_get_inode_info(). I.e., publish the
+ * pointer with a RELEASE barrier so that other tasks can ACQUIRE it.
*/
- if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) {
+ if (cmpxchg_release(fscrypt_inode_info_addr(inode), NULL, crypt_info) ==
+ NULL) {
/*
- * We won the race and set ->i_crypt_info to our crypt_info.
- * Now link it into the master key's inode list.
+ * We won the race and set the inode's fscrypt info to our
+ * crypt_info. Now link it into the master key's inode list.
*/
if (mk) {
crypt_info->ci_master_key = mk;
@@ -631,13 +657,13 @@ out:
* %false unless the operation being performed is needed in
* order for files (or directories) to be deleted.
*
- * Set up ->i_crypt_info, if it hasn't already been done.
+ * Set up the inode's encryption key, if it hasn't already been done.
*
- * Note: unless ->i_crypt_info is already set, this isn't %GFP_NOFS-safe. So
+ * Note: unless the key setup was already done, this isn't %GFP_NOFS-safe. So
* generally this shouldn't be called from within a filesystem transaction.
*
- * Return: 0 if ->i_crypt_info was set or was already set, *or* if the
- * encryption key is unavailable. (Use fscrypt_has_encryption_key() to
+ * Return: 0 if the key is now set up, *or* if it couldn't be set up because the
+ * needed master key is absent. (Use fscrypt_has_encryption_key() to
* distinguish these cases.) Also can return another -errno code.
*/
int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported)
@@ -691,9 +717,9 @@ int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported)
* ->i_ino doesn't need to be set yet.
* @encrypt_ret: (output) set to %true if the new inode will be encrypted
*
- * If the directory is encrypted, set up its ->i_crypt_info in preparation for
+ * If the directory is encrypted, set up its encryption key in preparation for
* encrypting the name of the new file. Also, if the new inode will be
- * encrypted, set up its ->i_crypt_info and set *encrypt_ret=true.
+ * encrypted, set up its encryption key too and set *encrypt_ret=true.
*
* This isn't %GFP_NOFS-safe, and therefore it should be called before starting
* any filesystem transaction to create the inode. For this reason, ->i_ino
@@ -702,8 +728,8 @@ int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported)
* This doesn't persist the new inode's encryption context. That still needs to
* be done later by calling fscrypt_set_context().
*
- * Return: 0 on success, -ENOKEY if the encryption key is missing, or another
- * -errno code
+ * Return: 0 on success, -ENOKEY if a key needs to be set up for @dir or @inode
+ * but the needed master key is absent, or another -errno code
*/
int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
bool *encrypt_ret)
@@ -750,8 +776,16 @@ EXPORT_SYMBOL_GPL(fscrypt_prepare_new_inode);
*/
void fscrypt_put_encryption_info(struct inode *inode)
{
- put_crypt_info(inode->i_crypt_info);
- inode->i_crypt_info = NULL;
+ /*
+ * Ideally we'd start with a lightweight IS_ENCRYPTED() check here
+ * before proceeding to retrieve and check the pointer. However, during
+ * inode creation, the fscrypt_inode_info is set before S_ENCRYPTED. If
+ * an error occurs, it needs to be cleaned up regardless.
+ */
+ struct fscrypt_inode_info **ci_addr = fscrypt_inode_info_addr(inode);
+
+ put_crypt_info(*ci_addr);
+ *ci_addr = NULL;
}
EXPORT_SYMBOL(fscrypt_put_encryption_info);
@@ -800,7 +834,7 @@ int fscrypt_drop_inode(struct inode *inode)
* userspace is still using the files, inodes can be dirtied between
* then and now. We mustn't lose any writes, so skip dirty inodes here.
*/
- if (inode->i_state & I_DIRTY_ALL)
+ if (inode_state_read(inode) & I_DIRTY_ALL)
return 0;
/*
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index cf3b58ec32cc..c4d05168522b 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -48,39 +48,30 @@ static int derive_key_aes(const u8 *master_key,
const u8 nonce[FSCRYPT_FILE_NONCE_SIZE],
u8 *derived_key, unsigned int derived_keysize)
{
- int res = 0;
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
- struct scatterlist src_sg, dst_sg;
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
-
- if (IS_ERR(tfm)) {
- res = PTR_ERR(tfm);
- tfm = NULL;
- goto out;
- }
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- req = skcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- res = -ENOMEM;
- goto out;
- }
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
- res = crypto_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE);
- if (res < 0)
- goto out;
+ struct crypto_sync_skcipher *tfm;
+ int err;
- sg_init_one(&src_sg, master_key, derived_keysize);
- sg_init_one(&dst_sg, derived_key, derived_keysize);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg, derived_keysize,
- NULL);
- res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return res;
+ tfm = crypto_alloc_sync_skcipher("ecb(aes)", 0, FSCRYPT_CRYPTOAPI_MASK);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ err = crypto_sync_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE);
+ if (err == 0) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ struct scatterlist src_sg, dst_sg;
+
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ sg_init_one(&src_sg, master_key, derived_keysize);
+ sg_init_one(&dst_sg, derived_key, derived_keysize);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg,
+ derived_keysize, NULL);
+ err = crypto_skcipher_encrypt(req);
+ }
+ crypto_free_sync_skcipher(tfm);
+ return err;
}
/*
@@ -118,7 +109,7 @@ find_and_lock_process_key(const char *prefix,
payload = (const struct fscrypt_key *)ukp->data;
if (ukp->datalen != sizeof(struct fscrypt_key) ||
- payload->size < 1 || payload->size > FSCRYPT_MAX_KEY_SIZE) {
+ payload->size < 1 || payload->size > sizeof(payload->raw)) {
fscrypt_warn(NULL,
"key with description '%s' has invalid payload",
key->description);
@@ -149,7 +140,7 @@ struct fscrypt_direct_key {
const struct fscrypt_mode *dk_mode;
struct fscrypt_prepared_key dk_key;
u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
- u8 dk_raw[FSCRYPT_MAX_KEY_SIZE];
+ u8 dk_raw[FSCRYPT_MAX_RAW_KEY_SIZE];
};
static void free_direct_key(struct fscrypt_direct_key *dk)
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 701259991277..bbb2f5ced988 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -10,11 +10,13 @@
* Modified by Eric Biggers, 2019 for v2 policy support.
*/
+#include <linux/export.h>
#include <linux/fs_context.h>
+#include <linux/mount.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/string.h>
-#include <linux/mount.h>
+
#include "fscrypt_private.h"
/**
@@ -725,7 +727,7 @@ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir)
err = fscrypt_require_key(dir);
if (err)
return ERR_PTR(err);
- return &dir->i_crypt_info->ci_policy;
+ return &fscrypt_get_inode_info_raw(dir)->ci_policy;
}
return fscrypt_get_dummy_policy(dir->i_sb);
@@ -744,7 +746,7 @@ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir)
*/
int fscrypt_context_for_new_inode(void *ctx, struct inode *inode)
{
- struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
BUILD_BUG_ON(sizeof(union fscrypt_context) !=
FSCRYPT_SET_CONTEXT_MAX_SIZE);
@@ -769,7 +771,7 @@ EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode);
*/
int fscrypt_set_context(struct inode *inode, void *fs_data)
{
- struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ struct fscrypt_inode_info *ci;
union fscrypt_context ctx;
int ctxsize;
@@ -781,6 +783,7 @@ int fscrypt_set_context(struct inode *inode, void *fs_data)
* This may be the first time the inode number is available, so do any
* delayed key setup that requires the inode number.
*/
+ ci = fscrypt_get_inode_info_raw(inode);
if (ci->ci_policy.version == FSCRYPT_POLICY_V2 &&
(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
fscrypt_hash_inode_number(ci, ci->ci_master_key);
@@ -824,10 +827,8 @@ int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
policy->version = FSCRYPT_POLICY_V2;
policy->v2.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS;
policy->v2.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS;
- err = fscrypt_get_test_dummy_key_identifier(
+ fscrypt_get_test_dummy_key_identifier(
policy->v2.master_key_identifier);
- if (err)
- goto out;
} else {
err = -EINVAL;
goto out;
diff --git a/fs/d_path.c b/fs/d_path.c
index 5f4da5c8d5db..bb365511066b 100644
--- a/fs/d_path.c
+++ b/fs/d_path.c
@@ -241,9 +241,9 @@ static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->seq);
*root = fs->root;
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->seq, seq));
}
/**
@@ -385,10 +385,10 @@ static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->seq);
*root = fs->root;
*pwd = fs->pwd;
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->seq, seq));
}
/*
diff --git a/fs/dax.c b/fs/dax.c
index 21b47402b3dc..289e6254aa30 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -20,12 +20,11 @@
#include <linux/sched/signal.h>
#include <linux/uio.h>
#include <linux/vmstat.h>
-#include <linux/pfn_t.h>
#include <linux/sizes.h>
#include <linux/mmu_notifier.h>
#include <linux/iomap.h>
#include <linux/rmap.h>
-#include <asm/pgalloc.h>
+#include <linux/pgalloc.h>
#define CREATE_TRACE_POINTS
#include <trace/events/fs_dax.h>
@@ -71,9 +70,14 @@ static unsigned long dax_to_pfn(void *entry)
return xa_to_value(entry) >> DAX_SHIFT;
}
-static void *dax_make_entry(pfn_t pfn, unsigned long flags)
+static struct folio *dax_to_folio(void *entry)
{
- return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
+ return page_folio(pfn_to_page(dax_to_pfn(entry)));
+}
+
+static void *dax_make_entry(unsigned long pfn, unsigned long flags)
+{
+ return xa_mk_value(flags | (pfn << DAX_SHIFT));
}
static bool dax_is_locked(void *entry)
@@ -206,7 +210,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry,
*
* Must be called with the i_pages lock held.
*/
-static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
+static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order)
{
void *entry;
struct wait_exceptional_entry_queue ewait;
@@ -236,6 +240,37 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
}
/*
+ * Wait for the given entry to become unlocked. Caller must hold the i_pages
+ * lock and call either put_unlocked_entry() if it did not lock the entry or
+ * dax_unlock_entry() if it did. Returns an unlocked entry if still present.
+ */
+static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry)
+{
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq;
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+
+ while (unlikely(dax_is_locked(entry))) {
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+ prepare_to_wait_exclusive(wq, &ewait.wait,
+ TASK_UNINTERRUPTIBLE);
+ xas_reset(xas);
+ xas_unlock_irq(xas);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+ xas_lock_irq(xas);
+ entry = xas_load(xas);
+ }
+
+ if (xa_is_internal(entry))
+ return NULL;
+
+ return entry;
+}
+
+/*
* The only thing keeping the address space around is the i_pages lock
* (it's cycled in clear_inode() after removing the entries from i_pages)
* After we call xas_unlock_irq(), we cannot touch xas->xa.
@@ -250,7 +285,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
/*
- * Unlike get_unlocked_entry() there is no guarantee that this
+ * Unlike get_next_unlocked_entry() there is no guarantee that this
* path ever successfully retrieves an unlocked entry before an
* inode dies. Perform a non-exclusive wait in case this path
* never successfully performs its own wake up.
@@ -307,109 +342,151 @@ static unsigned long dax_entry_size(void *entry)
return PAGE_SIZE;
}
-static unsigned long dax_end_pfn(void *entry)
+/*
+ * A DAX folio is considered shared if it has no mapping set and ->share (which
+ * shares the ->index field) is non-zero. Note this may return false even if the
+ * page is shared between multiple files but has not yet actually been mapped
+ * into multiple address spaces.
+ */
+static inline bool dax_folio_is_shared(struct folio *folio)
{
- return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
+ return !folio->mapping && folio->share;
}
/*
- * Iterate through all mapped pfns represented by an entry, i.e. skip
- * 'empty' and 'zero' entries.
+ * When it is called by dax_insert_entry(), the shared flag will indicate
+ * whether this entry is shared by multiple files. If the page has not
+ * previously been associated with any mappings the ->mapping and ->index
+ * fields will be set. If it has already been associated with a mapping
+ * the mapping will be cleared and the share count set. It's then up to
+ * reverse map users like memory_failure() to call back into the filesystem to
+ * recover ->mapping and ->index information. For example by implementing
+ * dax_holder_operations.
*/
-#define for_each_mapped_pfn(entry, pfn) \
- for (pfn = dax_to_pfn(entry); \
- pfn < dax_end_pfn(entry); pfn++)
-
-static inline bool dax_page_is_shared(struct page *page)
+static void dax_folio_make_shared(struct folio *folio)
{
- return page->mapping == PAGE_MAPPING_DAX_SHARED;
+ /*
+ * folio is not currently shared so mark it as shared by clearing
+ * folio->mapping.
+ */
+ folio->mapping = NULL;
+
+ /*
+ * folio has previously been mapped into one address space so set the
+ * share count.
+ */
+ folio->share = 1;
}
-/*
- * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
- * refcount.
- */
-static inline void dax_page_share_get(struct page *page)
+static inline unsigned long dax_folio_put(struct folio *folio)
{
- if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
+ unsigned long ref;
+ int order, i;
+
+ if (!dax_folio_is_shared(folio))
+ ref = 0;
+ else
+ ref = --folio->share;
+
+ if (ref)
+ return ref;
+
+ folio->mapping = NULL;
+ order = folio_order(folio);
+ if (!order)
+ return 0;
+ folio_reset_order(folio);
+
+ for (i = 0; i < (1UL << order); i++) {
+ struct dev_pagemap *pgmap = page_pgmap(&folio->page);
+ struct page *page = folio_page(folio, i);
+ struct folio *new_folio = (struct folio *)page;
+
+ ClearPageHead(page);
+ clear_compound_head(page);
+
+ new_folio->mapping = NULL;
/*
- * Reset the index if the page was already mapped
- * regularly before.
+ * Reset pgmap which was over-written by
+ * prep_compound_page().
*/
- if (page->mapping)
- page->share = 1;
- page->mapping = PAGE_MAPPING_DAX_SHARED;
+ new_folio->pgmap = pgmap;
+ new_folio->share = 0;
+ WARN_ON_ONCE(folio_ref_count(new_folio));
}
- page->share++;
+
+ return ref;
}
-static inline unsigned long dax_page_share_put(struct page *page)
+static void dax_folio_init(void *entry)
{
- return --page->share;
+ struct folio *folio = dax_to_folio(entry);
+ int order = dax_entry_order(entry);
+
+ /*
+ * Folio should have been split back to order-0 pages in
+ * dax_folio_put() when they were removed from their
+ * final mapping.
+ */
+ WARN_ON_ONCE(folio_order(folio));
+
+ if (order > 0) {
+ prep_compound_page(&folio->page, order);
+ if (order > 1)
+ INIT_LIST_HEAD(&folio->_deferred_list);
+ WARN_ON_ONCE(folio_ref_count(folio));
+ }
}
-/*
- * When it is called in dax_insert_entry(), the shared flag will indicate that
- * whether this entry is shared by multiple files. If so, set the page->mapping
- * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
- */
static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address, bool shared)
+ struct vm_area_struct *vma,
+ unsigned long address, bool shared)
{
- unsigned long size = dax_entry_size(entry), pfn, index;
- int i = 0;
+ unsigned long size = dax_entry_size(entry), index;
+ struct folio *folio = dax_to_folio(entry);
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
return;
index = linear_page_index(vma, address & ~(size - 1));
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
+ if (shared && (folio->mapping || dax_folio_is_shared(folio))) {
+ if (folio->mapping)
+ dax_folio_make_shared(folio);
- if (shared) {
- dax_page_share_get(page);
- } else {
- WARN_ON_ONCE(page->mapping);
- page->mapping = mapping;
- page->index = index + i++;
- }
+ WARN_ON_ONCE(!folio->share);
+ WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio));
+ folio->share++;
+ } else {
+ WARN_ON_ONCE(folio->mapping);
+ dax_folio_init(entry);
+ folio = dax_to_folio(entry);
+ folio->mapping = mapping;
+ folio->index = index;
}
}
static void dax_disassociate_entry(void *entry, struct address_space *mapping,
- bool trunc)
+ bool trunc)
{
- unsigned long pfn;
+ struct folio *folio = dax_to_folio(entry);
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
return;
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
-
- WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
- if (dax_page_is_shared(page)) {
- /* keep the shared flag if this page is still shared */
- if (dax_page_share_put(page) > 0)
- continue;
- } else
- WARN_ON_ONCE(page->mapping && page->mapping != mapping);
- page->mapping = NULL;
- page->index = 0;
- }
+ dax_folio_put(folio);
}
static struct page *dax_busy_page(void *entry)
{
- unsigned long pfn;
+ struct folio *folio = dax_to_folio(entry);
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
+ return NULL;
- if (page_ref_count(page) > 1)
- return page;
- }
- return NULL;
+ if (folio_ref_count(folio) - folio_mapcount(folio))
+ return &folio->page;
+ else
+ return NULL;
}
/**
@@ -580,7 +657,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
retry:
pmd_downgrade = false;
xas_lock_irq(xas);
- entry = get_unlocked_entry(xas, order);
+ entry = get_next_unlocked_entry(xas, order);
if (entry) {
if (dax_is_conflict(entry))
@@ -635,7 +712,7 @@ retry:
if (order > 0)
flags |= DAX_PMD;
- entry = dax_make_entry(pfn_to_pfn_t(0), flags);
+ entry = dax_make_entry(0, flags);
dax_lock_entry(xas, entry);
if (xas_error(xas))
goto out_unlock;
@@ -684,13 +761,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
pgoff_t end_idx;
XA_STATE(xas, &mapping->i_pages, start_idx);
- /*
- * In the 'limited' case get_user_pages() for dax is disabled.
- */
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
- return NULL;
-
- if (!dax_mapping(mapping) || !mapping_mapped(mapping))
+ if (!dax_mapping(mapping))
return NULL;
/* If end == LLONG_MAX, all pages from start to till end of file */
@@ -716,8 +787,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
xas_for_each(&xas, entry, end_idx) {
if (WARN_ON_ONCE(!xa_is_value(entry)))
continue;
- if (unlikely(dax_is_locked(entry)))
- entry = get_unlocked_entry(&xas, 0);
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
if (entry)
page = dax_busy_page(entry);
put_unlocked_entry(&xas, entry, WAKE_NEXT);
@@ -743,14 +813,14 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
static int __dax_invalidate_entry(struct address_space *mapping,
- pgoff_t index, bool trunc)
+ pgoff_t index, bool trunc)
{
XA_STATE(xas, &mapping->i_pages, index);
int ret = 0;
void *entry;
xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas, 0);
+ entry = get_next_unlocked_entry(&xas, 0);
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
goto out;
if (!trunc &&
@@ -776,7 +846,9 @@ static int __dax_clear_dirty_range(struct address_space *mapping,
xas_lock_irq(&xas);
xas_for_each(&xas, entry, end) {
- entry = get_unlocked_entry(&xas, 0);
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
put_unlocked_entry(&xas, entry, WAKE_NEXT);
@@ -813,6 +885,107 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
return ret;
}
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end)
+{
+ void *entry;
+ pgoff_t start_idx = start >> PAGE_SHIFT;
+ pgoff_t end_idx;
+ XA_STATE(xas, &mapping->i_pages, start_idx);
+
+ /* If end == LLONG_MAX, all pages from start to till end of file */
+ if (end == LLONG_MAX)
+ end_idx = ULONG_MAX;
+ else
+ end_idx = end >> PAGE_SHIFT;
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, end_idx) {
+ if (!xa_is_value(entry))
+ continue;
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
+ dax_disassociate_entry(entry, mapping, true);
+ xas_store(&xas, NULL);
+ mapping->nrpages -= 1UL << dax_entry_order(entry);
+ put_unlocked_entry(&xas, entry, WAKE_ALL);
+ }
+ xas_unlock_irq(&xas);
+}
+EXPORT_SYMBOL_GPL(dax_delete_mapping_range);
+
+static int wait_page_idle(struct page *page,
+ void (cb)(struct inode *),
+ struct inode *inode)
+{
+ return ___wait_var_event(page, dax_page_is_idle(page),
+ TASK_INTERRUPTIBLE, 0, 0, cb(inode));
+}
+
+static void wait_page_idle_uninterruptible(struct page *page,
+ struct inode *inode)
+{
+ ___wait_var_event(page, dax_page_is_idle(page),
+ TASK_UNINTERRUPTIBLE, 0, 0, schedule());
+}
+
+/*
+ * Unmaps the inode and waits for any DMA to complete prior to deleting the
+ * DAX mapping entries for the range.
+ *
+ * For NOWAIT behavior, pass @cb as NULL to early-exit on first found
+ * busy page
+ */
+int dax_break_layout(struct inode *inode, loff_t start, loff_t end,
+ void (cb)(struct inode *))
+{
+ struct page *page;
+ int error = 0;
+
+ if (!dax_mapping(inode->i_mapping))
+ return 0;
+
+ do {
+ page = dax_layout_busy_page_range(inode->i_mapping, start, end);
+ if (!page)
+ break;
+ if (!cb) {
+ error = -ERESTARTSYS;
+ break;
+ }
+
+ error = wait_page_idle(page, cb, inode);
+ } while (error == 0);
+
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, start, end);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dax_break_layout);
+
+void dax_break_layout_final(struct inode *inode)
+{
+ struct page *page;
+
+ if (!dax_mapping(inode->i_mapping))
+ return;
+
+ do {
+ page = dax_layout_busy_page_range(inode->i_mapping, 0,
+ LLONG_MAX);
+ if (!page)
+ break;
+
+ wait_page_idle_uninterruptible(page, inode);
+ } while (true);
+
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX);
+}
+EXPORT_SYMBOL_GPL(dax_break_layout_final);
+
/*
* Invalidate DAX entry if it is clean.
*/
@@ -867,7 +1040,7 @@ static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
* appropriate.
*/
static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
- const struct iomap_iter *iter, void *entry, pfn_t pfn,
+ const struct iomap_iter *iter, void *entry, unsigned long pfn,
unsigned long flags)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -895,8 +1068,9 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
void *old;
dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
- shared);
+ dax_associate_entry(new_entry, mapping, vmf->vma,
+ vmf->address, shared);
+
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
@@ -940,7 +1114,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
if (unlikely(dax_is_locked(entry))) {
void *old_entry = entry;
- entry = get_unlocked_entry(xas, 0);
+ entry = get_next_unlocked_entry(xas, 0);
/* Entry got punched out / reallocated? */
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
@@ -1064,7 +1238,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
- size_t size, void **kaddr, pfn_t *pfnp)
+ size_t size, void **kaddr, unsigned long *pfnp)
{
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
int id, rc = 0;
@@ -1082,11 +1256,9 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
rc = -EINVAL;
if (PFN_PHYS(length) < size)
goto out;
- if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
- goto out;
- /* For larger pages we need devmap */
- if (length > 1 && !pfn_t_devmap(*pfnp))
+ if (*pfnp & (PHYS_PFN(size)-1))
goto out;
+
rc = 0;
out_check_addr:
@@ -1188,12 +1360,12 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
{
struct inode *inode = iter->inode;
unsigned long vaddr = vmf->address;
- pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+ unsigned long pfn = my_zero_pfn(vaddr);
vm_fault_t ret;
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
- ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
+ ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), false);
trace_dax_load_hole(inode, vmf, ret);
return ret;
}
@@ -1203,52 +1375,24 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
const struct iomap_iter *iter, void **entry)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
- unsigned long pmd_addr = vmf->address & PMD_MASK;
- struct vm_area_struct *vma = vmf->vma;
struct inode *inode = mapping->host;
- pgtable_t pgtable = NULL;
struct folio *zero_folio;
- spinlock_t *ptl;
- pmd_t pmd_entry;
- pfn_t pfn;
+ vm_fault_t ret;
zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
- if (unlikely(!zero_folio))
- goto fallback;
-
- pfn = page_to_pfn_t(&zero_folio->page);
- *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
- DAX_PMD | DAX_ZERO_PAGE);
-
- if (arch_needs_pgtable_deposit()) {
- pgtable = pte_alloc_one(vma->vm_mm);
- if (!pgtable)
- return VM_FAULT_OOM;
- }
-
- ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
- if (!pmd_none(*(vmf->pmd))) {
- spin_unlock(ptl);
- goto fallback;
+ if (unlikely(!zero_folio)) {
+ trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
+ return VM_FAULT_FALLBACK;
}
- if (pgtable) {
- pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
- mm_inc_nr_ptes(vma->vm_mm);
- }
- pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
- pmd_entry = pmd_mkhuge(pmd_entry);
- set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
- spin_unlock(ptl);
- trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
- return VM_FAULT_NOPAGE;
+ *entry = dax_insert_entry(xas, vmf, iter, *entry, folio_pfn(zero_folio),
+ DAX_PMD | DAX_ZERO_PAGE);
-fallback:
- if (pgtable)
- pte_free(vma->vm_mm, pgtable);
- trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
- return VM_FAULT_FALLBACK;
+ ret = vmf_insert_folio_pmd(vmf, zero_folio, false);
+ if (ret == VM_FAULT_NOPAGE)
+ trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
+ return ret;
}
#else
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
@@ -1258,7 +1402,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
}
#endif /* CONFIG_FS_DAX_PMD */
-static s64 dax_unshare_iter(struct iomap_iter *iter)
+static int dax_unshare_iter(struct iomap_iter *iter)
{
struct iomap *iomap = &iter->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
@@ -1266,11 +1410,11 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
u64 copy_len = iomap_length(iter);
u32 mod;
int id = 0;
- s64 ret = 0;
+ s64 ret;
void *daddr = NULL, *saddr = NULL;
if (!iomap_want_unshare_iter(iter))
- return iomap_length(iter);
+ return iomap_iter_advance_full(iter);
/*
* Extend the file range to be aligned to fsblock/pagesize, because
@@ -1300,14 +1444,14 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
if (ret < 0)
goto out_unlock;
- if (copy_mc_to_kernel(daddr, saddr, copy_len) == 0)
- ret = iomap_length(iter);
- else
+ if (copy_mc_to_kernel(daddr, saddr, copy_len) != 0)
ret = -EIO;
out_unlock:
dax_read_unlock(id);
- return dax_mem2blk_err(ret);
+ if (ret < 0)
+ return dax_mem2blk_err(ret);
+ return iomap_iter_advance_full(iter);
}
int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
@@ -1326,7 +1470,7 @@ int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = dax_unshare_iter(&iter);
+ iter.status = dax_unshare_iter(&iter);
return ret;
}
EXPORT_SYMBOL_GPL(dax_file_unshare);
@@ -1354,17 +1498,16 @@ static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
return ret;
}
-static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
+static int dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
{
const struct iomap *iomap = &iter->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- loff_t pos = iter->pos;
u64 length = iomap_length(iter);
- s64 written = 0;
+ int ret;
/* already zeroed? we're done. */
if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
- return length;
+ return iomap_iter_advance(iter, length);
/*
* invalidate the pages whose sharing state is to be changed
@@ -1372,33 +1515,35 @@ static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
*/
if (iomap->flags & IOMAP_F_SHARED)
invalidate_inode_pages2_range(iter->inode->i_mapping,
- pos >> PAGE_SHIFT,
- (pos + length - 1) >> PAGE_SHIFT);
+ iter->pos >> PAGE_SHIFT,
+ (iter->pos + length - 1) >> PAGE_SHIFT);
do {
+ loff_t pos = iter->pos;
unsigned offset = offset_in_page(pos);
- unsigned size = min_t(u64, PAGE_SIZE - offset, length);
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
- long rc;
int id;
+ length = min_t(u64, PAGE_SIZE - offset, length);
+
id = dax_read_lock();
- if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
- rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
+ if (IS_ALIGNED(pos, PAGE_SIZE) && length == PAGE_SIZE)
+ ret = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
else
- rc = dax_memzero(iter, pos, size);
+ ret = dax_memzero(iter, pos, length);
dax_read_unlock(id);
- if (rc < 0)
- return rc;
- pos += size;
- length -= size;
- written += size;
- } while (length > 0);
+ if (ret < 0)
+ return ret;
+
+ ret = iomap_iter_advance(iter, length);
+ if (ret)
+ return ret;
+ } while ((length = iomap_length(iter)) > 0);
if (did_zero)
*did_zero = true;
- return written;
+ return ret;
}
int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
@@ -1413,7 +1558,7 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
int ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = dax_zero_iter(&iter, did_zero);
+ iter.status = dax_zero_iter(&iter, did_zero);
return ret;
}
EXPORT_SYMBOL_GPL(dax_zero_range);
@@ -1431,8 +1576,7 @@ int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
}
EXPORT_SYMBOL_GPL(dax_truncate_page);
-static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
- struct iov_iter *iter)
+static int dax_iomap_iter(struct iomap_iter *iomi, struct iov_iter *iter)
{
const struct iomap *iomap = &iomi->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iomi);
@@ -1451,8 +1595,10 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
if (pos >= end)
return 0;
- if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
- return iov_iter_zero(min(length, end - pos), iter);
+ if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) {
+ done = iov_iter_zero(min(length, end - pos), iter);
+ return iomap_iter_advance(iomi, done);
+ }
}
/*
@@ -1485,7 +1631,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
}
id = dax_read_lock();
- while (pos < end) {
+ while ((pos = iomi->pos) < end) {
unsigned offset = pos & (PAGE_SIZE - 1);
const size_t size = ALIGN(length + offset, PAGE_SIZE);
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
@@ -1535,18 +1681,16 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
map_len, iter);
- pos += xfer;
- length -= xfer;
- done += xfer;
-
- if (xfer == 0)
+ ret = iomap_iter_advance(iomi, xfer);
+ if (!ret && xfer == 0)
ret = -EFAULT;
if (xfer < map_len)
break;
+ length = iomap_length(iomi);
}
dax_read_unlock(id);
- return done ? done : ret;
+ return ret;
}
/**
@@ -1572,13 +1716,16 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
loff_t done = 0;
int ret;
+ if (WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC))
+ return -EIO;
+
if (!iomi.len)
return 0;
if (iov_iter_rw(iter) == WRITE) {
lockdep_assert_held_write(&iomi.inode->i_rwsem);
iomi.flags |= IOMAP_WRITE;
- } else {
+ } else if (!sb_rdonly(iomi.inode->i_sb)) {
lockdep_assert_held(&iomi.inode->i_rwsem);
}
@@ -1586,7 +1733,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_NOWAIT;
while ((ret = iomap_iter(&iomi, ops)) > 0)
- iomi.processed = dax_iomap_iter(&iomi, iter);
+ iomi.status = dax_iomap_iter(&iomi, iter);
done = iomi.pos - iocb->ki_pos;
iocb->ki_pos = iomi.pos;
@@ -1607,7 +1754,8 @@ static vm_fault_t dax_fault_return(int error)
* insertion for now and return the pfn so that caller can insert it after the
* fsync is done.
*/
-static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
+static vm_fault_t dax_fault_synchronous_pfnp(unsigned long *pfnp,
+ unsigned long pfn)
{
if (WARN_ON_ONCE(!pfnp))
return VM_FAULT_SIGBUS;
@@ -1655,7 +1803,7 @@ static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
* @pmd: distinguish whether it is a pmd fault
*/
static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
- const struct iomap_iter *iter, pfn_t *pfnp,
+ const struct iomap_iter *iter, unsigned long *pfnp,
struct xa_state *xas, void **entry, bool pmd)
{
const struct iomap *iomap = &iter->iomap;
@@ -1664,8 +1812,9 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
bool write = iter->flags & IOMAP_WRITE;
unsigned long entry_flags = pmd ? DAX_PMD : 0;
- int err = 0;
- pfn_t pfn;
+ struct folio *folio;
+ int ret, err = 0;
+ unsigned long pfn;
void *kaddr;
if (!pmd && vmf->cow_page)
@@ -1696,20 +1845,21 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
return dax_fault_return(err);
}
+ folio = dax_to_folio(*entry);
if (dax_fault_is_synchronous(iter, vmf->vma))
return dax_fault_synchronous_pfnp(pfnp, pfn);
- /* insert PMD pfn */
+ folio_ref_inc(folio);
if (pmd)
- return vmf_insert_pfn_pmd(vmf, pfn, write);
+ ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn), write);
+ else
+ ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), write);
+ folio_put(folio);
- /* insert PTE pfn */
- if (write)
- return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
- return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+ return ret;
}
-static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, unsigned long *pfnp,
int *iomap_errp, const struct iomap_ops *ops)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1750,14 +1900,14 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
* the PTE we need to set up. If so just return and the fault will be
* retried.
*/
- if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+ if (pmd_trans_huge(*vmf->pmd)) {
ret = VM_FAULT_NOPAGE;
goto unlock_entry;
}
while ((error = iomap_iter(&iter, ops)) > 0) {
if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
- iter.processed = -EIO; /* fs corruption? */
+ iter.status = -EIO; /* fs corruption? */
continue;
}
@@ -1770,7 +1920,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
}
if (!(ret & VM_FAULT_ERROR))
- iter.processed = PAGE_SIZE;
+ iter.status = iomap_iter_advance(&iter, PAGE_SIZE);
}
if (iomap_errp)
@@ -1819,7 +1969,7 @@ static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
return false;
}
-static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp,
const struct iomap_ops *ops)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1871,8 +2021,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* the PMD we need to set up. If so just return and the fault will be
* retried.
*/
- if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
- !pmd_devmap(*vmf->pmd)) {
+ if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd)) {
ret = 0;
goto unlock_entry;
}
@@ -1884,7 +2033,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
if (ret != VM_FAULT_FALLBACK)
- iter.processed = PMD_SIZE;
+ iter.status = iomap_iter_advance(&iter, PMD_SIZE);
}
unlock_entry:
@@ -1899,7 +2048,7 @@ out:
return ret;
}
#else
-static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp,
const struct iomap_ops *ops)
{
return VM_FAULT_FALLBACK;
@@ -1920,7 +2069,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* successfully.
*/
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
- pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
+ unsigned long *pfnp, int *iomap_errp,
+ const struct iomap_ops *ops)
{
if (order == 0)
return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
@@ -1940,16 +2090,17 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
* This function inserts a writeable PTE or PMD entry into the page tables
* for an mmaped DAX file. It also marks the page cache entry as dirty.
*/
-static vm_fault_t
-dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
+static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
+ unsigned long pfn, unsigned int order)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
+ struct folio *folio;
void *entry;
vm_fault_t ret;
xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas, order);
+ entry = get_next_unlocked_entry(&xas, order);
/* Did we race with someone splitting entry or so? */
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
@@ -1962,14 +2113,17 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas);
+ folio = pfn_folio(pfn);
+ folio_ref_inc(folio);
if (order == 0)
- ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
+ ret = vmf_insert_page_mkwrite(vmf, &folio->page, true);
#ifdef CONFIG_FS_DAX_PMD
else if (order == PMD_ORDER)
- ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
+ ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE);
#endif
else
ret = VM_FAULT_FALLBACK;
+ folio_put(folio);
dax_unlock_entry(&xas, entry);
trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
return ret;
@@ -1986,7 +2140,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
* table entry.
*/
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
- pfn_t pfn)
+ unsigned long pfn)
{
int err;
loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
@@ -1999,7 +2153,7 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
}
EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
-static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
+static int dax_range_compare_iter(struct iomap_iter *it_src,
struct iomap_iter *it_dest, u64 len, bool *same)
{
const struct iomap *smap = &it_src->iomap;
@@ -2012,7 +2166,7 @@ static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
*same = true;
- return len;
+ goto advance;
}
if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
@@ -2035,7 +2189,12 @@ static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
if (!*same)
len = 0;
dax_read_unlock(id);
- return len;
+
+advance:
+ ret = iomap_iter_advance(it_src, len);
+ if (!ret)
+ ret = iomap_iter_advance(it_dest, len);
+ return ret;
out_unlock:
dax_read_unlock(id);
@@ -2058,15 +2217,15 @@ int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
.len = len,
.flags = IOMAP_DAX,
};
- int ret, compared = 0;
+ int ret, status;
while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
(ret = iomap_iter(&dst_iter, ops)) > 0) {
- compared = dax_range_compare_iter(&src_iter, &dst_iter,
+ status = dax_range_compare_iter(&src_iter, &dst_iter,
min(src_iter.len, dst_iter.len), same);
- if (compared < 0)
+ if (status < 0)
return ret;
- src_iter.processed = dst_iter.processed = compared;
+ src_iter.status = dst_iter.status = status;
}
return ret;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index b4d5e9e1e43d..dc2fff4811d1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -73,14 +73,21 @@
* If no ancestor relationship:
* arbitrary, since it's serialized on rename_lock
*/
-int sysctl_vfs_cache_pressure __read_mostly = 100;
-EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
+static int sysctl_vfs_cache_pressure __read_mostly = 100;
+static int sysctl_vfs_cache_pressure_denom __read_mostly = 100;
+
+unsigned long vfs_pressure_ratio(unsigned long val)
+{
+ return mult_frac(val, sysctl_vfs_cache_pressure, sysctl_vfs_cache_pressure_denom);
+}
+EXPORT_SYMBOL_GPL(vfs_pressure_ratio);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
EXPORT_SYMBOL(rename_lock);
-static struct kmem_cache *dentry_cache __ro_after_init;
+static struct kmem_cache *__dentry_cache __ro_after_init;
+#define dentry_cache runtime_const_ptr(__dentry_cache)
const struct qstr empty_name = QSTR_INIT("", 0);
EXPORT_SYMBOL(empty_name);
@@ -192,7 +199,7 @@ static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-static struct ctl_table fs_dcache_sysctls[] = {
+static const struct ctl_table fs_dcache_sysctls[] = {
{
.procname = "dentry-state",
.data = &dentry_stat,
@@ -211,8 +218,28 @@ static struct ctl_table fs_dcache_sysctls[] = {
},
};
+static const struct ctl_table vm_dcache_sysctls[] = {
+ {
+ .procname = "vfs_cache_pressure",
+ .data = &sysctl_vfs_cache_pressure,
+ .maxlen = sizeof(sysctl_vfs_cache_pressure),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
+ .procname = "vfs_cache_pressure_denom",
+ .data = &sysctl_vfs_cache_pressure_denom,
+ .maxlen = sizeof(sysctl_vfs_cache_pressure_denom),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE_HUNDRED,
+ },
+};
+
static int __init init_fs_dcache_sysctls(void)
{
+ register_sysctl_init("vm", vm_dcache_sysctls);
register_sysctl_init("fs", fs_dcache_sysctls);
return 0;
}
@@ -295,12 +322,16 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
return dentry_string_cmp(cs, ct, tcount);
}
+/*
+ * long names are allocated separately from dentry and never modified.
+ * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot()
+ * for the reason why ->count and ->head can't be combined into a union.
+ * dentry_string_cmp() relies upon ->name[] being word-aligned.
+ */
struct external_name {
- union {
- atomic_t count;
- struct rcu_head head;
- } u;
- unsigned char name[];
+ atomic_t count;
+ struct rcu_head head;
+ unsigned char name[] __aligned(sizeof(unsigned long));
};
static inline struct external_name *external_name(struct dentry *dentry)
@@ -324,31 +355,45 @@ static void __d_free_external(struct rcu_head *head)
static inline int dname_external(const struct dentry *dentry)
{
- return dentry->d_name.name != dentry->d_iname;
+ return dentry->d_name.name != dentry->d_shortname.string;
}
void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
- name->name = dentry->d_name;
- if (unlikely(dname_external(dentry))) {
- atomic_inc(&external_name(dentry)->u.count);
+ unsigned seq;
+ const unsigned char *s;
+
+ rcu_read_lock();
+retry:
+ seq = read_seqcount_begin(&dentry->d_seq);
+ s = READ_ONCE(dentry->d_name.name);
+ name->name.hash_len = dentry->d_name.hash_len;
+ name->name.name = name->inline_name.string;
+ if (likely(s == dentry->d_shortname.string)) {
+ name->inline_name = dentry->d_shortname;
} else {
- memcpy(name->inline_name, dentry->d_iname,
- dentry->d_name.len + 1);
- name->name.name = name->inline_name;
+ struct external_name *p;
+ p = container_of(s, struct external_name, name[0]);
+ // get a valid reference
+ if (unlikely(!atomic_inc_not_zero(&p->count)))
+ goto retry;
+ name->name.name = s;
}
- spin_unlock(&dentry->d_lock);
+ if (read_seqcount_retry(&dentry->d_seq, seq)) {
+ release_dentry_name_snapshot(name);
+ goto retry;
+ }
+ rcu_read_unlock();
}
EXPORT_SYMBOL(take_dentry_name_snapshot);
void release_dentry_name_snapshot(struct name_snapshot *name)
{
- if (unlikely(name->name.name != name->inline_name)) {
+ if (unlikely(name->name.name != name->inline_name.string)) {
struct external_name *p;
p = container_of(name->name.name, struct external_name, name[0]);
- if (unlikely(atomic_dec_and_test(&p->u.count)))
- kfree_rcu(p, u.head);
+ if (unlikely(atomic_dec_and_test(&p->count)))
+ kfree_rcu(p, head);
}
}
EXPORT_SYMBOL(release_dentry_name_snapshot);
@@ -386,7 +431,7 @@ static void dentry_free(struct dentry *dentry)
WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
if (unlikely(dname_external(dentry))) {
struct external_name *p = external_name(dentry);
- if (likely(atomic_dec_and_test(&p->u.count))) {
+ if (likely(atomic_dec_and_test(&p->count))) {
call_rcu(&dentry->d_u.d_rcu, __d_free_external);
return;
}
@@ -750,7 +795,7 @@ void d_mark_dontcache(struct inode *inode)
de->d_flags |= DCACHE_DONTCACHE;
spin_unlock(&de->d_lock);
}
- inode->i_state |= I_DONTCACHE;
+ inode_state_set(inode, I_DONTCACHE);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_mark_dontcache);
@@ -825,6 +870,24 @@ locked:
return false;
}
+static void finish_dput(struct dentry *dentry)
+ __releases(dentry->d_lock)
+ __releases(RCU)
+{
+ while (lock_for_kill(dentry)) {
+ rcu_read_unlock();
+ dentry = __dentry_kill(dentry);
+ if (!dentry)
+ return;
+ if (retain_dentry(dentry, true)) {
+ spin_unlock(&dentry->d_lock);
+ return;
+ }
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+ spin_unlock(&dentry->d_lock);
+}
/*
* This is dput
@@ -862,22 +925,21 @@ void dput(struct dentry *dentry)
rcu_read_unlock();
return;
}
- while (lock_for_kill(dentry)) {
- rcu_read_unlock();
- dentry = __dentry_kill(dentry);
- if (!dentry)
- return;
- if (retain_dentry(dentry, true)) {
- spin_unlock(&dentry->d_lock);
- return;
- }
- rcu_read_lock();
- }
- rcu_read_unlock();
- spin_unlock(&dentry->d_lock);
+ finish_dput(dentry);
}
EXPORT_SYMBOL(dput);
+void d_make_discardable(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ WARN_ON(!(dentry->d_flags & DCACHE_PERSISTENT));
+ dentry->d_flags &= ~DCACHE_PERSISTENT;
+ dentry->d_lockref.count--;
+ rcu_read_lock();
+ finish_dput(dentry);
+}
+EXPORT_SYMBOL(d_make_discardable);
+
static void to_shrink_list(struct dentry *dentry, struct list_head *list)
__must_hold(&dentry->d_lock)
{
@@ -1029,7 +1091,7 @@ struct dentry *d_find_alias_rcu(struct inode *inode)
spin_lock(&inode->i_lock);
// ->i_dentry and ->i_rcu are colocated, but the latter won't be
// used without having I_FREEING set, which means no aliases left
- if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
+ if (likely(!(inode_state_read(inode) & I_FREEING) && !hlist_empty(l))) {
if (S_ISDIR(inode->i_mode)) {
de = hlist_entry(l->first, struct dentry, d_u.d_alias);
} else {
@@ -1042,6 +1104,15 @@ struct dentry *d_find_alias_rcu(struct inode *inode)
return de;
}
+void d_dispose_if_unused(struct dentry *dentry, struct list_head *dispose)
+{
+ spin_lock(&dentry->d_lock);
+ if (!dentry->d_lockref.count)
+ to_shrink_list(dentry, dispose);
+ spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(d_dispose_if_unused);
+
/*
* Try to kill dentries associated with this inode.
* WARNING: you must own a reference to inode.
@@ -1052,12 +1123,8 @@ void d_prune_aliases(struct inode *inode)
struct dentry *dentry;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- spin_lock(&dentry->d_lock);
- if (!dentry->d_lockref.count)
- to_shrink_list(dentry, &dispose);
- spin_unlock(&dentry->d_lock);
- }
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias)
+ d_dispose_if_unused(dentry, &dispose);
spin_unlock(&inode->i_lock);
shrink_dentry_list(&dispose);
}
@@ -1097,6 +1164,7 @@ void shrink_dentry_list(struct list_head *list)
shrink_kill(dentry);
}
}
+EXPORT_SYMBOL(shrink_dentry_list);
static enum lru_status dentry_lru_isolate(struct list_head *item,
struct list_lru_one *lru, void *arg)
@@ -1346,6 +1414,7 @@ struct check_mount {
unsigned int mounted;
};
+/* locks: mount_locked_reader && dentry->d_lock */
static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
{
struct check_mount *info = data;
@@ -1372,9 +1441,8 @@ int path_has_submounts(const struct path *parent)
{
struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
- read_seqlock_excl(&mount_lock);
+ guard(mount_locked_reader)();
d_walk(parent->dentry, &data, path_check_mount);
- read_sequnlock_excl(&mount_lock);
return data.mounted;
}
@@ -1392,7 +1460,7 @@ int d_set_mounted(struct dentry *dentry)
{
struct dentry *p;
int ret = -ENOENT;
- write_seqlock(&rename_lock);
+ read_seqlock_excl(&rename_lock);
for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
/* Need exclusion wrt. d_invalidate() */
spin_lock(&p->d_lock);
@@ -1412,7 +1480,7 @@ int d_set_mounted(struct dentry *dentry)
}
spin_unlock(&dentry->d_lock);
out:
- write_sequnlock(&rename_lock);
+ read_sequnlock_excl(&rename_lock);
return ret;
}
@@ -1467,6 +1535,15 @@ out:
return ret;
}
+static enum d_walk_ret select_collect_umount(void *_data, struct dentry *dentry)
+{
+ if (dentry->d_flags & DCACHE_PERSISTENT) {
+ dentry->d_flags &= ~DCACHE_PERSISTENT;
+ dentry->d_lockref.count--;
+ }
+ return select_collect(_data, dentry);
+}
+
static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
{
struct select_data *data = _data;
@@ -1495,18 +1572,20 @@ out:
}
/**
- * shrink_dcache_parent - prune dcache
+ * shrink_dcache_tree - prune dcache
* @parent: parent of entries to prune
+ * @for_umount: true if we want to unpin the persistent ones
*
* Prune the dcache to remove unused children of the parent dentry.
*/
-void shrink_dcache_parent(struct dentry *parent)
+static void shrink_dcache_tree(struct dentry *parent, bool for_umount)
{
for (;;) {
struct select_data data = {.start = parent};
INIT_LIST_HEAD(&data.dispose);
- d_walk(parent, &data, select_collect);
+ d_walk(parent, &data,
+ for_umount ? select_collect_umount : select_collect);
if (!list_empty(&data.dispose)) {
shrink_dentry_list(&data.dispose);
@@ -1531,6 +1610,11 @@ void shrink_dcache_parent(struct dentry *parent)
shrink_dentry_list(&data.dispose);
}
}
+
+void shrink_dcache_parent(struct dentry *parent)
+{
+ shrink_dcache_tree(parent, false);
+}
EXPORT_SYMBOL(shrink_dcache_parent);
static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
@@ -1557,7 +1641,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
static void do_one_tree(struct dentry *dentry)
{
- shrink_dcache_parent(dentry);
+ shrink_dcache_tree(dentry, true);
d_walk(dentry, dentry, umount_check);
d_drop(dentry);
dput(dentry);
@@ -1654,10 +1738,10 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
* will still always have a NUL at the end, even if we might
* be overwriting an internal NUL character
*/
- dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
+ dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0;
if (unlikely(!name)) {
name = &slash_name;
- dname = dentry->d_iname;
+ dname = dentry->d_shortname.string;
} else if (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]);
struct external_name *p = kmalloc(size + name->len,
@@ -1667,35 +1751,34 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
kmem_cache_free(dentry_cache, dentry);
return NULL;
}
- atomic_set(&p->u.count, 1);
+ atomic_set(&p->count, 1);
dname = p->name;
} else {
- dname = dentry->d_iname;
+ dname = dentry->d_shortname.string;
}
- dentry->d_name.len = name->len;
- dentry->d_name.hash = name->hash;
+ dentry->__d_name.len = name->len;
+ dentry->__d_name.hash = name->hash;
memcpy(dname, name->name, name->len);
dname[name->len] = 0;
/* Make sure we always see the terminating NUL character */
- smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
+ smp_store_release(&dentry->__d_name.name, dname); /* ^^^ */
- dentry->d_lockref.count = 1;
dentry->d_flags = 0;
- spin_lock_init(&dentry->d_lock);
+ lockref_init(&dentry->d_lockref);
seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
dentry->d_inode = NULL;
dentry->d_parent = dentry;
dentry->d_sb = sb;
- dentry->d_op = NULL;
+ dentry->d_op = sb->__s_d_op;
+ dentry->d_flags = sb->s_d_flags;
dentry->d_fsdata = NULL;
INIT_HLIST_BL_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_HLIST_HEAD(&dentry->d_children);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
INIT_HLIST_NODE(&dentry->d_sib);
- d_set_d_op(dentry, dentry->d_sb->s_d_op);
if (dentry->d_op && dentry->d_op->d_init) {
err = dentry->d_op->d_init(dentry);
@@ -1778,8 +1861,9 @@ struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
struct dentry *dentry = __d_alloc(sb, name);
if (likely(dentry)) {
dentry->d_flags |= DCACHE_NORCU;
- if (!sb->s_d_op)
- d_set_d_op(dentry, &anon_ops);
+ /* d_op_flags(&anon_ops) is 0 */
+ if (!dentry->d_op)
+ dentry->d_op = &anon_ops;
}
return dentry;
}
@@ -1794,35 +1878,50 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
}
EXPORT_SYMBOL(d_alloc_name);
-void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+#define DCACHE_OP_FLAGS \
+ (DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | \
+ DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_DELETE | DCACHE_OP_PRUNE | \
+ DCACHE_OP_REAL)
+
+static unsigned int d_op_flags(const struct dentry_operations *op)
+{
+ unsigned int flags = 0;
+ if (op) {
+ if (op->d_hash)
+ flags |= DCACHE_OP_HASH;
+ if (op->d_compare)
+ flags |= DCACHE_OP_COMPARE;
+ if (op->d_revalidate)
+ flags |= DCACHE_OP_REVALIDATE;
+ if (op->d_weak_revalidate)
+ flags |= DCACHE_OP_WEAK_REVALIDATE;
+ if (op->d_delete)
+ flags |= DCACHE_OP_DELETE;
+ if (op->d_prune)
+ flags |= DCACHE_OP_PRUNE;
+ if (op->d_real)
+ flags |= DCACHE_OP_REAL;
+ }
+ return flags;
+}
+
+static void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
{
+ unsigned int flags = d_op_flags(op);
WARN_ON_ONCE(dentry->d_op);
- WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
- DCACHE_OP_COMPARE |
- DCACHE_OP_REVALIDATE |
- DCACHE_OP_WEAK_REVALIDATE |
- DCACHE_OP_DELETE |
- DCACHE_OP_REAL));
+ WARN_ON_ONCE(dentry->d_flags & DCACHE_OP_FLAGS);
dentry->d_op = op;
- if (!op)
- return;
- if (op->d_hash)
- dentry->d_flags |= DCACHE_OP_HASH;
- if (op->d_compare)
- dentry->d_flags |= DCACHE_OP_COMPARE;
- if (op->d_revalidate)
- dentry->d_flags |= DCACHE_OP_REVALIDATE;
- if (op->d_weak_revalidate)
- dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
- if (op->d_delete)
- dentry->d_flags |= DCACHE_OP_DELETE;
- if (op->d_prune)
- dentry->d_flags |= DCACHE_OP_PRUNE;
- if (op->d_real)
- dentry->d_flags |= DCACHE_OP_REAL;
-
-}
-EXPORT_SYMBOL(d_set_d_op);
+ if (flags)
+ dentry->d_flags |= flags;
+}
+
+void set_default_d_op(struct super_block *s, const struct dentry_operations *ops)
+{
+ unsigned int flags = d_op_flags(ops);
+ s->__s_d_op = ops;
+ s->s_d_flags = (s->s_d_flags & ~DCACHE_OP_FLAGS) | flags;
+}
+EXPORT_SYMBOL(set_default_d_op);
static unsigned d_flags_for_inode(struct inode *inode)
{
@@ -1864,7 +1963,6 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
unsigned add_flags = d_flags_for_inode(inode);
WARN_ON(d_in_lookup(dentry));
- spin_lock(&dentry->d_lock);
/*
* The negative counter only tracks dentries on the LRU. Don't dec if
* d_lru is on another list.
@@ -1877,7 +1975,6 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
__d_set_inode_and_type(dentry, inode, add_flags);
raw_write_seqcount_end(&dentry->d_seq);
fsnotify_update_flags(dentry);
- spin_unlock(&dentry->d_lock);
}
/**
@@ -1901,7 +1998,9 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
if (inode) {
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
+ spin_lock(&entry->d_lock);
__d_instantiate(entry, inode);
+ spin_unlock(&entry->d_lock);
spin_unlock(&inode->i_lock);
}
}
@@ -1920,15 +2019,11 @@ void d_instantiate_new(struct dentry *entry, struct inode *inode)
lockdep_annotate_inode_mutex_key(inode);
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
+ spin_lock(&entry->d_lock);
__d_instantiate(entry, inode);
- WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW & ~I_CREATING;
- /*
- * Pairs with the barrier in prepare_to_wait_event() to make sure
- * ___wait_var_event() either sees the bit cleared or
- * waitqueue_active() check in wake_up_var() sees the waiter.
- */
- smp_mb();
+ spin_unlock(&entry->d_lock);
+ WARN_ON(!(inode_state_read(inode) & I_NEW));
+ inode_state_clear(inode, I_NEW | I_CREATING);
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
@@ -2247,11 +2342,20 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
seq = raw_seqcount_begin(&dentry->d_seq);
if (dentry->d_parent != parent)
continue;
- if (d_unhashed(dentry))
- continue;
if (dentry->d_name.hash_len != hashlen)
continue;
- if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
+ if (unlikely(dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0))
+ continue;
+ /*
+ * Check for the dentry being unhashed.
+ *
+ * As tempting as it is, we *can't* skip it because of a race window
+ * between us finding the dentry before it gets unhashed and loading
+ * the sequence counter after unhashing is finished.
+ *
+ * We can at least predict on it.
+ */
+ if (unlikely(d_unhashed(dentry)))
continue;
*seqp = seq;
return dentry;
@@ -2378,7 +2482,6 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
}
return d_lookup(dir, name);
}
-EXPORT_SYMBOL(d_hash_and_lookup);
/*
* When a file is deleted, we have two options:
@@ -2451,8 +2554,8 @@ static inline unsigned start_dir_add(struct inode *dir)
{
preempt_disable_nested();
for (;;) {
- unsigned n = dir->i_dir_seq;
- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
+ unsigned n = READ_ONCE(dir->i_dir_seq);
+ if (!(n & 1) && try_cmpxchg(&dir->i_dir_seq, &n, n + 1))
return n;
cpu_relax();
}
@@ -2463,7 +2566,8 @@ static inline void end_dir_add(struct inode *dir, unsigned int n,
{
smp_store_release(&dir->i_dir_seq, n + 2);
preempt_enable_nested();
- wake_up_all(d_wait);
+ if (wq_has_sleeper(d_wait))
+ wake_up_all(d_wait);
}
static void d_wait_lookup(struct dentry *dentry)
@@ -2487,13 +2591,21 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
struct hlist_bl_node *node;
- struct dentry *new = d_alloc(parent, name);
+ struct dentry *new = __d_alloc(parent->d_sb, name);
struct dentry *dentry;
unsigned seq, r_seq, d_seq;
if (unlikely(!new))
return ERR_PTR(-ENOMEM);
+ new->d_flags |= DCACHE_PAR_LOOKUP;
+ spin_lock(&parent->d_lock);
+ new->d_parent = dget_dlock(parent);
+ hlist_add_head(&new->d_sib, &parent->d_children);
+ if (parent->d_flags & DCACHE_DISCONNECTED)
+ new->d_flags |= DCACHE_DISCONNECTED;
+ spin_unlock(&parent->d_lock);
+
retry:
rcu_read_lock();
seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
@@ -2577,8 +2689,6 @@ retry:
return dentry;
}
rcu_read_unlock();
- /* we can't take ->d_lock here; it's OK, though. */
- new->d_flags |= DCACHE_PAR_LOOKUP;
new->d_wait = wq;
hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
hlist_bl_unlock(b);
@@ -2624,7 +2734,8 @@ EXPORT_SYMBOL(__d_lookup_unhash_wake);
/* inode->i_lock held if inode is non-NULL */
-static inline void __d_add(struct dentry *dentry, struct inode *inode)
+static inline void __d_add(struct dentry *dentry, struct inode *inode,
+ const struct dentry_operations *ops)
{
wait_queue_head_t *d_wait;
struct inode *dir = NULL;
@@ -2635,6 +2746,8 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode)
n = start_dir_add(dir);
d_wait = __d_lookup_unhash(dentry);
}
+ if (unlikely(ops))
+ d_set_d_op(dentry, ops);
if (inode) {
unsigned add_flags = d_flags_for_inode(inode);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
@@ -2666,55 +2779,27 @@ void d_add(struct dentry *entry, struct inode *inode)
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
}
- __d_add(entry, inode);
+ __d_add(entry, inode, NULL);
}
EXPORT_SYMBOL(d_add);
-/**
- * d_exact_alias - find and hash an exact unhashed alias
- * @entry: dentry to add
- * @inode: The inode to go with this dentry
- *
- * If an unhashed dentry with the same name/parent and desired
- * inode already exists, hash and return it. Otherwise, return
- * NULL.
- *
- * Parent directory should be locked.
- */
-struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
+struct dentry *d_make_persistent(struct dentry *dentry, struct inode *inode)
{
- struct dentry *alias;
- unsigned int hash = entry->d_name.hash;
-
+ WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
+ WARN_ON(!inode);
+ security_d_instantiate(dentry, inode);
spin_lock(&inode->i_lock);
- hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- /*
- * Don't need alias->d_lock here, because aliases with
- * d_parent == entry->d_parent are not subject to name or
- * parent changes, because the parent inode i_mutex is held.
- */
- if (alias->d_name.hash != hash)
- continue;
- if (alias->d_parent != entry->d_parent)
- continue;
- if (!d_same_name(alias, entry->d_parent, &entry->d_name))
- continue;
- spin_lock(&alias->d_lock);
- if (!d_unhashed(alias)) {
- spin_unlock(&alias->d_lock);
- alias = NULL;
- } else {
- dget_dlock(alias);
- __d_rehash(alias);
- spin_unlock(&alias->d_lock);
- }
- spin_unlock(&inode->i_lock);
- return alias;
- }
+ spin_lock(&dentry->d_lock);
+ __d_instantiate(dentry, inode);
+ dentry->d_flags |= DCACHE_PERSISTENT;
+ dget_dlock(dentry);
+ if (d_unhashed(dentry))
+ __d_rehash(dentry);
+ spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
- return NULL;
+ return dentry;
}
-EXPORT_SYMBOL(d_exact_alias);
+EXPORT_SYMBOL(d_make_persistent);
static void swap_names(struct dentry *dentry, struct dentry *target)
{
@@ -2723,16 +2808,15 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
/*
* Both external: swap the pointers
*/
- swap(target->d_name.name, dentry->d_name.name);
+ swap(target->__d_name.name, dentry->__d_name.name);
} else {
/*
* dentry:internal, target:external. Steal target's
* storage and make target internal.
*/
- memcpy(target->d_iname, dentry->d_name.name,
- dentry->d_name.len + 1);
- dentry->d_name.name = target->d_name.name;
- target->d_name.name = target->d_iname;
+ dentry->__d_name.name = target->__d_name.name;
+ target->d_shortname = dentry->d_shortname;
+ target->__d_name.name = target->d_shortname.string;
}
} else {
if (unlikely(dname_external(dentry))) {
@@ -2740,23 +2824,19 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
* dentry:external, target:internal. Give dentry's
* storage to target and make dentry internal
*/
- memcpy(dentry->d_iname, target->d_name.name,
- target->d_name.len + 1);
- target->d_name.name = dentry->d_name.name;
- dentry->d_name.name = dentry->d_iname;
+ target->__d_name.name = dentry->__d_name.name;
+ dentry->d_shortname = target->d_shortname;
+ dentry->__d_name.name = dentry->d_shortname.string;
} else {
/*
* Both are internal.
*/
- unsigned int i;
- BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
- for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
- swap(((long *) &dentry->d_iname)[i],
- ((long *) &target->d_iname)[i]);
- }
+ for (int i = 0; i < DNAME_INLINE_WORDS; i++)
+ swap(dentry->d_shortname.words[i],
+ target->d_shortname.words[i]);
}
}
- swap(dentry->d_name.hash_len, target->d_name.hash_len);
+ swap(dentry->__d_name.hash_len, target->__d_name.hash_len);
}
static void copy_name(struct dentry *dentry, struct dentry *target)
@@ -2765,16 +2845,15 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
if (unlikely(dname_external(dentry)))
old_name = external_name(dentry);
if (unlikely(dname_external(target))) {
- atomic_inc(&external_name(target)->u.count);
- dentry->d_name = target->d_name;
+ atomic_inc(&external_name(target)->count);
+ dentry->__d_name = target->__d_name;
} else {
- memcpy(dentry->d_iname, target->d_name.name,
- target->d_name.len + 1);
- dentry->d_name.name = dentry->d_iname;
- dentry->d_name.hash_len = target->d_name.hash_len;
+ dentry->d_shortname = target->d_shortname;
+ dentry->__d_name.name = dentry->d_shortname.string;
+ dentry->__d_name.hash_len = target->__d_name.hash_len;
}
- if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
- kfree_rcu(old_name, u.head);
+ if (old_name && likely(atomic_dec_and_test(&old_name->count)))
+ kfree_rcu(old_name, head);
}
/*
@@ -2783,10 +2862,10 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
* @target: new dentry
* @exchange: exchange the two dentries
*
- * Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way. Caller must hold
- * rename_lock, the i_mutex of the source and target directories,
- * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
+ * Update the dcache to reflect the move of a file name. Negative dcache
+ * entries should not be moved in this way. Caller must hold rename_lock, the
+ * i_rwsem of the source and target directories (exclusively), and the sb->
+ * s_vfs_rename_mutex if they differ. See lock_rename().
*/
static void __d_move(struct dentry *dentry, struct dentry *target,
bool exchange)
@@ -2908,6 +2987,7 @@ void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
write_sequnlock(&rename_lock);
}
+EXPORT_SYMBOL(d_exchange);
/**
* d_ancestor - search for an ancestor
@@ -2932,7 +3012,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
* This helper attempts to cope with remotely renamed directories
*
* It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex, and rename_lock
+ * dentry->d_parent->d_inode->i_rwsem, and rename_lock
*
* Note: If ever the locking in lock_rename() changes, then please
* remember to update this too...
@@ -2955,7 +3035,12 @@ static int __d_unalias(struct dentry *dentry, struct dentry *alias)
goto out_err;
m2 = &alias->d_parent->d_inode->i_rwsem;
out_unalias:
+ if (alias->d_op && alias->d_op->d_unalias_trylock &&
+ !alias->d_op->d_unalias_trylock(alias))
+ goto out_err;
__d_move(alias, dentry, false);
+ if (alias->d_op && alias->d_op->d_unalias_unlock)
+ alias->d_op->d_unalias_unlock(alias);
ret = 0;
out_err:
if (m2)
@@ -2965,30 +3050,8 @@ out_err:
return ret;
}
-/**
- * d_splice_alias - splice a disconnected dentry into the tree if one exists
- * @inode: the inode which may have a disconnected dentry
- * @dentry: a negative dentry which we want to point to the inode.
- *
- * If inode is a directory and has an IS_ROOT alias, then d_move that in
- * place of the given dentry and return it, else simply d_add the inode
- * to the dentry and return NULL.
- *
- * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
- * we should error out: directories can't have multiple aliases.
- *
- * This is needed in the lookup routine of any filesystem that is exportable
- * (via knfsd) so that we can build dcache paths to directories effectively.
- *
- * If a dentry was found and moved, then it is returned. Otherwise NULL
- * is returned. This matches the expected return value of ->lookup.
- *
- * Cluster filesystems may call this function with a negative, hashed dentry.
- * In that case, we know that the inode will be a regular file, and also this
- * will only occur during atomic_open. So we need to check for the dentry
- * being already hashed only in the final case.
- */
-struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
+struct dentry *d_splice_alias_ops(struct inode *inode, struct dentry *dentry,
+ const struct dentry_operations *ops)
{
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -3034,9 +3097,37 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
}
}
out:
- __d_add(dentry, inode);
+ __d_add(dentry, inode, ops);
return NULL;
}
+
+/**
+ * d_splice_alias - splice a disconnected dentry into the tree if one exists
+ * @inode: the inode which may have a disconnected dentry
+ * @dentry: a negative dentry which we want to point to the inode.
+ *
+ * If inode is a directory and has an IS_ROOT alias, then d_move that in
+ * place of the given dentry and return it, else simply d_add the inode
+ * to the dentry and return NULL.
+ *
+ * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
+ * we should error out: directories can't have multiple aliases.
+ *
+ * This is needed in the lookup routine of any filesystem that is exportable
+ * (via knfsd) so that we can build dcache paths to directories effectively.
+ *
+ * If a dentry was found and moved, then it is returned. Otherwise NULL
+ * is returned. This matches the expected return value of ->lookup.
+ *
+ * Cluster filesystems may call this function with a negative, hashed dentry.
+ * In that case, we know that the inode will be a regular file, and also this
+ * will only occur during atomic_open. So we need to check for the dentry
+ * being already hashed only in the final case.
+ */
+struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
+{
+ return d_splice_alias_ops(inode, dentry, NULL);
+}
EXPORT_SYMBOL(d_splice_alias);
/*
@@ -3079,36 +3170,16 @@ bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
}
EXPORT_SYMBOL(is_subdir);
-static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
-{
- struct dentry *root = data;
- if (dentry != root) {
- if (d_unhashed(dentry) || !dentry->d_inode)
- return D_WALK_SKIP;
-
- if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
- dentry->d_flags |= DCACHE_GENOCIDE;
- dentry->d_lockref.count--;
- }
- }
- return D_WALK_CONTINUE;
-}
-
-void d_genocide(struct dentry *parent)
-{
- d_walk(parent, parent, d_genocide_kill);
-}
-
void d_mark_tmpfile(struct file *file, struct inode *inode)
{
struct dentry *dentry = file->f_path.dentry;
- BUG_ON(dentry->d_name.name != dentry->d_iname ||
+ BUG_ON(dname_external(dentry) ||
!hlist_unhashed(&dentry->d_u.d_alias) ||
!d_unlinked(dentry));
spin_lock(&dentry->d_parent->d_lock);
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
+ dentry->__d_name.len = sprintf(dentry->d_shortname.string, "#%llu",
(unsigned long long)inode->i_ino);
spin_unlock(&dentry->d_lock);
spin_unlock(&dentry->d_parent->d_lock);
@@ -3194,9 +3265,10 @@ static void __init dcache_init(void)
* but it is probably not worth it because of the cache nature
* of the dcache.
*/
- dentry_cache = KMEM_CACHE_USERCOPY(dentry,
+ __dentry_cache = KMEM_CACHE_USERCOPY(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
- d_iname);
+ d_shortname.string);
+ runtime_const_init(ptr, __dentry_cache);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 47dc96dfe386..3ec3324c2060 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -47,39 +47,19 @@ const struct file_operations debugfs_noop_file_operations = {
#define F_DENTRY(filp) ((filp)->f_path.dentry)
-const struct file_operations *debugfs_real_fops(const struct file *filp)
+void *debugfs_get_aux(const struct file *file)
{
- struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata;
-
- if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) {
- /*
- * Urgh, we've been called w/o a protecting
- * debugfs_file_get().
- */
- WARN_ON(1);
- return NULL;
- }
-
- return fsd->real_fops;
+ return DEBUGFS_I(file_inode(file))->aux;
}
-EXPORT_SYMBOL_GPL(debugfs_real_fops);
+EXPORT_SYMBOL_GPL(debugfs_get_aux);
-/**
- * debugfs_file_get - mark the beginning of file data access
- * @dentry: the dentry object whose data is being accessed.
- *
- * Up to a matching call to debugfs_file_put(), any successive call
- * into the file removing functions debugfs_remove() and
- * debugfs_remove_recursive() will block. Since associated private
- * file data may only get freed after a successful return of any of
- * the removal functions, you may safely access it after a successful
- * call to debugfs_file_get() without worrying about lifetime issues.
- *
- * If -%EIO is returned, the file has already been removed and thus,
- * it is not safe to access any of its data. If, on the other hand,
- * it is allowed to access the file data, zero is returned.
- */
-int debugfs_file_get(struct dentry *dentry)
+enum dbgfs_get_mode {
+ DBGFS_GET_ALREADY,
+ DBGFS_GET_REGULAR,
+ DBGFS_GET_SHORT,
+};
+
+static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode)
{
struct debugfs_fsdata *fsd;
void *d_fsd;
@@ -93,32 +73,55 @@ int debugfs_file_get(struct dentry *dentry)
return -EINVAL;
d_fsd = READ_ONCE(dentry->d_fsdata);
- if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+ if (d_fsd) {
fsd = d_fsd;
} else {
+ struct inode *inode = dentry->d_inode;
+ unsigned int methods = 0;
+
+ if (WARN_ON(mode == DBGFS_GET_ALREADY))
+ return -EINVAL;
+
fsd = kmalloc(sizeof(*fsd), GFP_KERNEL);
if (!fsd)
return -ENOMEM;
- if ((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT) {
+ if (mode == DBGFS_GET_SHORT) {
+ const struct debugfs_short_fops *ops;
+ ops = fsd->short_fops = DEBUGFS_I(inode)->short_fops;
+ if (ops->llseek)
+ methods |= HAS_LSEEK;
+ if (ops->read)
+ methods |= HAS_READ;
+ if (ops->write)
+ methods |= HAS_WRITE;
fsd->real_fops = NULL;
- fsd->short_fops = (void *)((unsigned long)d_fsd &
- ~(DEBUGFS_FSDATA_IS_REAL_FOPS_BIT |
- DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT));
} else {
- fsd->real_fops = (void *)((unsigned long)d_fsd &
- ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+ const struct file_operations *ops;
+ ops = fsd->real_fops = DEBUGFS_I(inode)->real_fops;
+ if (ops->llseek)
+ methods |= HAS_LSEEK;
+ if (ops->read)
+ methods |= HAS_READ;
+ if (ops->write)
+ methods |= HAS_WRITE;
+ if (ops->unlocked_ioctl)
+ methods |= HAS_IOCTL;
+ if (ops->poll)
+ methods |= HAS_POLL;
fsd->short_fops = NULL;
}
+ fsd->methods = methods;
refcount_set(&fsd->active_users, 1);
init_completion(&fsd->active_users_drained);
INIT_LIST_HEAD(&fsd->cancellations);
mutex_init(&fsd->cancellations_mtx);
- if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) {
+ d_fsd = cmpxchg(&dentry->d_fsdata, NULL, fsd);
+ if (d_fsd) {
mutex_destroy(&fsd->cancellations_mtx);
kfree(fsd);
- fsd = READ_ONCE(dentry->d_fsdata);
+ fsd = d_fsd;
}
}
@@ -138,6 +141,26 @@ int debugfs_file_get(struct dentry *dentry)
return 0;
}
+
+/**
+ * debugfs_file_get - mark the beginning of file data access
+ * @dentry: the dentry object whose data is being accessed.
+ *
+ * Up to a matching call to debugfs_file_put(), any successive call
+ * into the file removing functions debugfs_remove() and
+ * debugfs_remove_recursive() will block. Since associated private
+ * file data may only get freed after a successful return of any of
+ * the removal functions, you may safely access it after a successful
+ * call to debugfs_file_get() without worrying about lifetime issues.
+ *
+ * If -%EIO is returned, the file has already been removed and thus,
+ * it is not safe to access any of its data. If, on the other hand,
+ * it is allowed to access the file data, zero is returned.
+ */
+int debugfs_file_get(struct dentry *dentry)
+{
+ return __debugfs_file_get(dentry, DBGFS_GET_ALREADY);
+}
EXPORT_SYMBOL_GPL(debugfs_file_get);
/**
@@ -195,8 +218,7 @@ void debugfs_enter_cancellation(struct file *file,
return;
fsd = READ_ONCE(dentry->d_fsdata);
- if (WARN_ON(!fsd ||
- ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
+ if (WARN_ON(!fsd))
return;
mutex_lock(&fsd->cancellations_mtx);
@@ -227,8 +249,7 @@ void debugfs_leave_cancellation(struct file *file,
return;
fsd = READ_ONCE(dentry->d_fsdata);
- if (WARN_ON(!fsd ||
- ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
+ if (WARN_ON(!fsd))
return;
mutex_lock(&fsd->cancellations_mtx);
@@ -264,15 +285,13 @@ static int debugfs_locked_down(struct inode *inode,
static int open_proxy_open(struct inode *inode, struct file *filp)
{
struct dentry *dentry = F_DENTRY(filp);
- const struct file_operations *real_fops = NULL;
+ const struct file_operations *real_fops = DEBUGFS_I(inode)->real_fops;
int r;
- r = debugfs_file_get(dentry);
+ r = __debugfs_file_get(dentry, DBGFS_GET_REGULAR);
if (r)
return r == -EIO ? -ENOENT : r;
- real_fops = debugfs_real_fops(filp);
-
r = debugfs_locked_down(inode, filp, real_fops);
if (r)
goto out;
@@ -309,80 +328,93 @@ const struct file_operations debugfs_open_proxy_file_operations = {
#define PROTO(args...) args
#define ARGS(args...) args
-#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \
+#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args, bit, ret) \
static ret_type full_proxy_ ## name(proto) \
{ \
- struct dentry *dentry = F_DENTRY(filp); \
- const struct file_operations *real_fops; \
+ struct dentry *dentry = F_DENTRY(filp); \
+ struct debugfs_fsdata *fsd = dentry->d_fsdata; \
ret_type r; \
\
+ if (!(fsd->methods & bit)) \
+ return ret; \
r = debugfs_file_get(dentry); \
if (unlikely(r)) \
return r; \
- real_fops = debugfs_real_fops(filp); \
- r = real_fops->name(args); \
+ r = fsd->real_fops->name(args); \
debugfs_file_put(dentry); \
return r; \
}
-#define FULL_PROXY_FUNC_BOTH(name, ret_type, filp, proto, args) \
-static ret_type full_proxy_ ## name(proto) \
+#define SHORT_PROXY_FUNC(name, ret_type, filp, proto, args, bit, ret) \
+static ret_type short_proxy_ ## name(proto) \
{ \
struct dentry *dentry = F_DENTRY(filp); \
- struct debugfs_fsdata *fsd; \
+ struct debugfs_fsdata *fsd = dentry->d_fsdata; \
ret_type r; \
\
+ if (!(fsd->methods & bit)) \
+ return ret; \
r = debugfs_file_get(dentry); \
if (unlikely(r)) \
return r; \
- fsd = dentry->d_fsdata; \
- if (fsd->real_fops) \
- r = fsd->real_fops->name(args); \
- else \
- r = fsd->short_fops->name(args); \
+ r = fsd->short_fops->name(args); \
debugfs_file_put(dentry); \
return r; \
}
-FULL_PROXY_FUNC_BOTH(llseek, loff_t, filp,
- PROTO(struct file *filp, loff_t offset, int whence),
- ARGS(filp, offset, whence));
+SHORT_PROXY_FUNC(llseek, loff_t, filp,
+ PROTO(struct file *filp, loff_t offset, int whence),
+ ARGS(filp, offset, whence), HAS_LSEEK, -ESPIPE);
+
+FULL_PROXY_FUNC(llseek, loff_t, filp,
+ PROTO(struct file *filp, loff_t offset, int whence),
+ ARGS(filp, offset, whence), HAS_LSEEK, -ESPIPE);
+
+SHORT_PROXY_FUNC(read, ssize_t, filp,
+ PROTO(struct file *filp, char __user *buf, size_t size,
+ loff_t *ppos),
+ ARGS(filp, buf, size, ppos), HAS_READ, -EINVAL);
+
+FULL_PROXY_FUNC(read, ssize_t, filp,
+ PROTO(struct file *filp, char __user *buf, size_t size,
+ loff_t *ppos),
+ ARGS(filp, buf, size, ppos), HAS_READ, -EINVAL);
-FULL_PROXY_FUNC_BOTH(read, ssize_t, filp,
- PROTO(struct file *filp, char __user *buf, size_t size,
- loff_t *ppos),
- ARGS(filp, buf, size, ppos));
+SHORT_PROXY_FUNC(write, ssize_t, filp,
+ PROTO(struct file *filp, const char __user *buf,
+ size_t size, loff_t *ppos),
+ ARGS(filp, buf, size, ppos), HAS_WRITE, -EINVAL);
-FULL_PROXY_FUNC_BOTH(write, ssize_t, filp,
- PROTO(struct file *filp, const char __user *buf,
- size_t size, loff_t *ppos),
- ARGS(filp, buf, size, ppos));
+FULL_PROXY_FUNC(write, ssize_t, filp,
+ PROTO(struct file *filp, const char __user *buf,
+ size_t size, loff_t *ppos),
+ ARGS(filp, buf, size, ppos), HAS_WRITE, -EINVAL);
FULL_PROXY_FUNC(unlocked_ioctl, long, filp,
PROTO(struct file *filp, unsigned int cmd, unsigned long arg),
- ARGS(filp, cmd, arg));
+ ARGS(filp, cmd, arg), HAS_IOCTL, -ENOTTY);
static __poll_t full_proxy_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct dentry *dentry = F_DENTRY(filp);
+ struct debugfs_fsdata *fsd = dentry->d_fsdata;
__poll_t r = 0;
- const struct file_operations *real_fops;
+ if (!(fsd->methods & HAS_POLL))
+ return DEFAULT_POLLMASK;
if (debugfs_file_get(dentry))
return EPOLLHUP;
- real_fops = debugfs_real_fops(filp);
- r = real_fops->poll(filp, wait);
+ r = fsd->real_fops->poll(filp, wait);
debugfs_file_put(dentry);
return r;
}
-static int full_proxy_release(struct inode *inode, struct file *filp)
+static int full_proxy_release(struct inode *inode, struct file *file)
{
- const struct dentry *dentry = F_DENTRY(filp);
- const struct file_operations *real_fops = debugfs_real_fops(filp);
- const struct file_operations *proxy_fops = filp->f_op;
+ struct debugfs_fsdata *fsd = F_DENTRY(file)->d_fsdata;
+ const struct file_operations *real_fops = fsd->real_fops;
int r = 0;
/*
@@ -391,48 +423,21 @@ static int full_proxy_release(struct inode *inode, struct file *filp)
* not to leak any resources. Releasers must not assume that
* ->i_private is still being meaningful here.
*/
- if (real_fops && real_fops->release)
- r = real_fops->release(inode, filp);
+ if (real_fops->release)
+ r = real_fops->release(inode, file);
- replace_fops(filp, d_inode(dentry)->i_fop);
- kfree(proxy_fops);
fops_put(real_fops);
return r;
}
-static void __full_proxy_fops_init(struct file_operations *proxy_fops,
- struct debugfs_fsdata *fsd)
-{
- proxy_fops->release = full_proxy_release;
-
- if ((fsd->real_fops && fsd->real_fops->llseek) ||
- (fsd->short_fops && fsd->short_fops->llseek))
- proxy_fops->llseek = full_proxy_llseek;
-
- if ((fsd->real_fops && fsd->real_fops->read) ||
- (fsd->short_fops && fsd->short_fops->read))
- proxy_fops->read = full_proxy_read;
-
- if ((fsd->real_fops && fsd->real_fops->write) ||
- (fsd->short_fops && fsd->short_fops->write))
- proxy_fops->write = full_proxy_write;
-
- if (fsd->real_fops && fsd->real_fops->poll)
- proxy_fops->poll = full_proxy_poll;
-
- if (fsd->real_fops && fsd->real_fops->unlocked_ioctl)
- proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl;
-}
-
-static int full_proxy_open(struct inode *inode, struct file *filp)
+static int full_proxy_open_regular(struct inode *inode, struct file *filp)
{
struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops;
- struct file_operations *proxy_fops = NULL;
struct debugfs_fsdata *fsd;
int r;
- r = debugfs_file_get(dentry);
+ r = __debugfs_file_get(dentry, DBGFS_GET_REGULAR);
if (r)
return r == -EIO ? -ENOENT : r;
@@ -442,7 +447,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
if (r)
goto out;
- if (real_fops && !fops_get(real_fops)) {
+ if (!fops_get(real_fops)) {
#ifdef CONFIG_MODULES
if (real_fops->owner &&
real_fops->owner->state == MODULE_STATE_GOING) {
@@ -458,41 +463,52 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
goto out;
}
- proxy_fops = kzalloc(sizeof(*proxy_fops), GFP_KERNEL);
- if (!proxy_fops) {
- r = -ENOMEM;
- goto free_proxy;
- }
- __full_proxy_fops_init(proxy_fops, fsd);
- replace_fops(filp, proxy_fops);
-
- if (!real_fops || real_fops->open) {
- if (real_fops)
- r = real_fops->open(inode, filp);
- else
- r = simple_open(inode, filp);
+ if (real_fops->open) {
+ r = real_fops->open(inode, filp);
if (r) {
- replace_fops(filp, d_inode(dentry)->i_fop);
- goto free_proxy;
- } else if (filp->f_op != proxy_fops) {
+ fops_put(real_fops);
+ } else if (filp->f_op != &debugfs_full_proxy_file_operations) {
/* No protection against file removal anymore. */
WARN(1, "debugfs file owner replaced proxy fops: %pd",
dentry);
- goto free_proxy;
+ fops_put(real_fops);
}
}
-
- goto out;
-free_proxy:
- kfree(proxy_fops);
- fops_put(real_fops);
out:
debugfs_file_put(dentry);
return r;
}
const struct file_operations debugfs_full_proxy_file_operations = {
- .open = full_proxy_open,
+ .open = full_proxy_open_regular,
+ .release = full_proxy_release,
+ .llseek = full_proxy_llseek,
+ .read = full_proxy_read,
+ .write = full_proxy_write,
+ .poll = full_proxy_poll,
+ .unlocked_ioctl = full_proxy_unlocked_ioctl
+};
+
+static int full_proxy_open_short(struct inode *inode, struct file *filp)
+{
+ struct dentry *dentry = F_DENTRY(filp);
+ int r;
+
+ r = __debugfs_file_get(dentry, DBGFS_GET_SHORT);
+ if (r)
+ return r == -EIO ? -ENOENT : r;
+ r = debugfs_locked_down(inode, filp, NULL);
+ if (!r)
+ r = simple_open(inode, filp);
+ debugfs_file_put(dentry);
+ return r;
+}
+
+const struct file_operations debugfs_full_short_proxy_file_operations = {
+ .open = full_proxy_open_short,
+ .llseek = short_proxy_llseek,
+ .read = short_proxy_read,
+ .write = short_proxy_write,
};
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 38a9c7eb97e6..4b263c328ed2 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -35,7 +35,7 @@
static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
-static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
+static bool debugfs_enabled __ro_after_init = IS_ENABLED(CONFIG_DEBUG_FS_ALLOW_ALL);
/*
* Don't allow access attributes to be changed whilst the kernel is locked down
@@ -183,6 +183,9 @@ static int debugfs_reconfigure(struct fs_context *fc)
struct debugfs_fs_info *sb_opts = sb->s_fs_info;
struct debugfs_fs_info *new_opts = fc->s_fs_info;
+ if (!new_opts)
+ return 0;
+
sync_filesystem(sb);
/* structure copy of new mount options to sb */
@@ -208,16 +211,34 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
+static struct kmem_cache *debugfs_inode_cachep __ro_after_init;
+
+static void init_once(void *foo)
+{
+ struct debugfs_inode_info *info = foo;
+ inode_init_once(&info->vfs_inode);
+}
+
+static struct inode *debugfs_alloc_inode(struct super_block *sb)
+{
+ struct debugfs_inode_info *info;
+ info = alloc_inode_sb(sb, debugfs_inode_cachep, GFP_KERNEL);
+ if (!info)
+ return NULL;
+ return &info->vfs_inode;
+}
+
static void debugfs_free_inode(struct inode *inode)
{
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
- free_inode_nonrcu(inode);
+ kmem_cache_free(debugfs_inode_cachep, DEBUGFS_I(inode));
}
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
.show_options = debugfs_show_options,
+ .alloc_inode = debugfs_alloc_inode,
.free_inode = debugfs_free_inode,
};
@@ -225,27 +246,21 @@ static void debugfs_release_dentry(struct dentry *dentry)
{
struct debugfs_fsdata *fsd = dentry->d_fsdata;
- if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
- return;
-
- /* check it wasn't a dir (no fsdata) or automount (no real_fops) */
- if (fsd && fsd->real_fops) {
+ if (fsd) {
WARN_ON(!list_empty(&fsd->cancellations));
mutex_destroy(&fsd->cancellations_mtx);
}
-
kfree(fsd);
}
static struct vfsmount *debugfs_automount(struct path *path)
{
- struct debugfs_fsdata *fsd = path->dentry->d_fsdata;
+ struct inode *inode = path->dentry->d_inode;
- return fsd->automount(path->dentry, d_inode(path->dentry)->i_private);
+ return DEBUGFS_I(inode)->automount(path->dentry, inode->i_private);
}
static const struct dentry_operations debugfs_dops = {
- .d_delete = always_delete_dentry,
.d_release = debugfs_release_dentry,
.d_automount = debugfs_automount,
};
@@ -260,7 +275,8 @@ static int debugfs_fill_super(struct super_block *sb, struct fs_context *fc)
return err;
sb->s_op = &debugfs_super_operations;
- sb->s_d_op = &debugfs_dops;
+ set_default_d_op(sb, &debugfs_dops);
+ sb->s_d_flags |= DCACHE_DONTCACHE;
debugfs_apply_options(sb);
@@ -269,10 +285,13 @@ static int debugfs_fill_super(struct super_block *sb, struct fs_context *fc)
static int debugfs_get_tree(struct fs_context *fc)
{
- if (!(debugfs_allow & DEBUGFS_ALLOW_API))
- return -EPERM;
+ int err;
- return get_tree_single(fc, debugfs_fill_super);
+ err = get_tree_single(fc, debugfs_fill_super);
+ if (err)
+ return err;
+
+ return debugfs_reconfigure(fc);
}
static void debugfs_free_fc(struct fs_context *fc)
@@ -307,7 +326,7 @@ static struct file_system_type debug_fs_type = {
.name = "debugfs",
.init_fs_context = debugfs_init_fs_context,
.parameters = debugfs_param_specs,
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("debugfs");
@@ -333,19 +352,20 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
if (!parent)
parent = debugfs_mount->mnt_root;
- dentry = lookup_positive_unlocked(name, parent, strlen(name));
+ dentry = lookup_noperm_positive_unlocked(&QSTR(name), parent);
if (IS_ERR(dentry))
return NULL;
return dentry;
}
EXPORT_SYMBOL_GPL(debugfs_lookup);
-static struct dentry *start_creating(const char *name, struct dentry *parent)
+static struct dentry *debugfs_start_creating(const char *name,
+ struct dentry *parent)
{
struct dentry *dentry;
int error;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API))
+ if (!debugfs_enabled)
return ERR_PTR(-EPERM);
if (!debugfs_initialized())
@@ -371,46 +391,31 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
if (!parent)
parent = debugfs_mount->mnt_root;
- inode_lock(d_inode(parent));
- if (unlikely(IS_DEADDIR(d_inode(parent))))
- dentry = ERR_PTR(-ENOENT);
- else
- dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
- if (d_is_dir(dentry))
- pr_err("Directory '%s' with parent '%s' already present!\n",
- name, parent->d_name.name);
- else
- pr_err("File '%s' in directory '%s' already present!\n",
- name, parent->d_name.name);
- dput(dentry);
- dentry = ERR_PTR(-EEXIST);
- }
-
+ dentry = simple_start_creating(parent, name);
if (IS_ERR(dentry)) {
- inode_unlock(d_inode(parent));
+ if (dentry == ERR_PTR(-EEXIST))
+ pr_err("'%s' already exists in '%pd'\n", name, parent);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
-
return dentry;
}
-static struct dentry *failed_creating(struct dentry *dentry)
+static struct dentry *debugfs_failed_creating(struct dentry *dentry)
{
- inode_unlock(d_inode(dentry->d_parent));
- dput(dentry);
+ simple_done_creating(dentry);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
return ERR_PTR(-ENOMEM);
}
-static struct dentry *end_creating(struct dentry *dentry)
+static struct dentry *debugfs_end_creating(struct dentry *dentry)
{
- inode_unlock(d_inode(dentry->d_parent));
- return dentry;
+ simple_done_creating(dentry);
+ return dentry; // borrowed
}
static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
+ const void *aux,
const struct file_operations *proxy_fops,
const void *real_fops)
{
@@ -420,66 +425,52 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
if (!(mode & S_IFMT))
mode |= S_IFREG;
BUG_ON(!S_ISREG(mode));
- dentry = start_creating(name, parent);
+ dentry = debugfs_start_creating(name, parent);
if (IS_ERR(dentry))
return dentry;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
- failed_creating(dentry);
- return ERR_PTR(-EPERM);
- }
-
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create file '%s'\n",
name);
- return failed_creating(dentry);
+ return debugfs_failed_creating(dentry);
}
inode->i_mode = mode;
inode->i_private = data;
inode->i_op = &debugfs_file_inode_operations;
+ if (!real_fops)
+ proxy_fops = &debugfs_noop_file_operations;
inode->i_fop = proxy_fops;
- dentry->d_fsdata = (void *)((unsigned long)real_fops |
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+ DEBUGFS_I(inode)->raw = real_fops;
+ DEBUGFS_I(inode)->aux = (void *)aux;
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
fsnotify_create(d_inode(dentry->d_parent), dentry);
- return end_creating(dentry);
+ return debugfs_end_creating(dentry);
}
struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
struct dentry *parent, void *data,
+ const void *aux,
const struct file_operations *fops)
{
- if (WARN_ON((unsigned long)fops &
- (DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT |
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
- return ERR_PTR(-EINVAL);
-
- return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_full_proxy_file_operations :
- &debugfs_noop_file_operations,
+ return __debugfs_create_file(name, mode, parent, data, aux,
+ &debugfs_full_proxy_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_full);
struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct debugfs_short_fops *fops)
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct debugfs_short_fops *fops)
{
- if (WARN_ON((unsigned long)fops &
- (DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT |
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
- return ERR_PTR(-EINVAL);
-
- return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_full_proxy_file_operations :
- &debugfs_noop_file_operations,
- (const void *)((unsigned long)fops |
- DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT));
+ return __debugfs_create_file(name, mode, parent, data, aux,
+ &debugfs_full_short_proxy_file_operations,
+ fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_short);
@@ -515,9 +506,8 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
const struct file_operations *fops)
{
- return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_open_proxy_file_operations :
- &debugfs_noop_file_operations,
+ return __debugfs_create_file(name, mode, parent, data, NULL,
+ &debugfs_open_proxy_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
@@ -579,22 +569,17 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
*/
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
{
- struct dentry *dentry = start_creating(name, parent);
+ struct dentry *dentry = debugfs_start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
- failed_creating(dentry);
- return ERR_PTR(-EPERM);
- }
-
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create directory '%s'\n",
name);
- return failed_creating(dentry);
+ return debugfs_failed_creating(dentry);
}
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
@@ -603,10 +588,10 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
- return end_creating(dentry);
+ return debugfs_end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
@@ -626,45 +611,29 @@ struct dentry *debugfs_create_automount(const char *name,
debugfs_automount_t f,
void *data)
{
- struct dentry *dentry = start_creating(name, parent);
- struct debugfs_fsdata *fsd;
+ struct dentry *dentry = debugfs_start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
- fsd = kzalloc(sizeof(*fsd), GFP_KERNEL);
- if (!fsd) {
- failed_creating(dentry);
- return ERR_PTR(-ENOMEM);
- }
-
- fsd->automount = f;
-
- if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
- failed_creating(dentry);
- kfree(fsd);
- return ERR_PTR(-EPERM);
- }
-
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create automount '%s'\n",
name);
- kfree(fsd);
- return failed_creating(dentry);
+ return debugfs_failed_creating(dentry);
}
make_empty_dir_inode(inode);
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
- dentry->d_fsdata = fsd;
+ DEBUGFS_I(inode)->automount = f;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
- return end_creating(dentry);
+ return debugfs_end_creating(dentry);
}
EXPORT_SYMBOL(debugfs_create_automount);
@@ -700,7 +669,7 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
if (!link)
return ERR_PTR(-ENOMEM);
- dentry = start_creating(name, parent);
+ dentry = debugfs_start_creating(name, parent);
if (IS_ERR(dentry)) {
kfree(link);
return dentry;
@@ -711,13 +680,13 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
pr_err("out of free dentries, can not create symlink '%s'\n",
name);
kfree(link);
- return failed_creating(dentry);
+ return debugfs_failed_creating(dentry);
}
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_op = &debugfs_symlink_inode_operations;
inode->i_link = link;
- d_instantiate(dentry, inode);
- return end_creating(dentry);
+ d_make_persistent(dentry, inode);
+ return debugfs_end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_symlink);
@@ -733,7 +702,7 @@ static void __debugfs_file_removed(struct dentry *dentry)
*/
smp_mb();
fsd = READ_ONCE(dentry->d_fsdata);
- if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
+ if (!fsd)
return;
/* if this was the last reference, we're done */
@@ -837,76 +806,66 @@ void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
/**
- * debugfs_rename - rename a file/directory in the debugfs filesystem
- * @old_dir: a pointer to the parent dentry for the renamed object. This
- * should be a directory dentry.
- * @old_dentry: dentry of an object to be renamed.
- * @new_dir: a pointer to the parent dentry where the object should be
- * moved. This should be a directory dentry.
- * @new_name: a pointer to a string containing the target name.
+ * debugfs_change_name - rename a file/directory in the debugfs filesystem
+ * @dentry: dentry of an object to be renamed.
+ * @fmt: format for new name
*
* This function renames a file/directory in debugfs. The target must not
* exist for rename to succeed.
*
- * This function will return a pointer to old_dentry (which is updated to
- * reflect renaming) if it succeeds. If an error occurs, ERR_PTR(-ERROR)
- * will be returned.
+ * This function will return 0 on success and -E... on failure.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, const char *new_name)
+int __printf(2, 3) debugfs_change_name(struct dentry *dentry, const char *fmt, ...)
{
- int error;
- struct dentry *dentry = NULL, *trap;
+ int error = 0;
+ const char *new_name;
struct name_snapshot old_name;
+ struct dentry *target;
+ struct renamedata rd = {};
+ struct inode *dir;
+ va_list ap;
+
+ if (IS_ERR_OR_NULL(dentry))
+ return 0;
+
+ va_start(ap, fmt);
+ new_name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ if (!new_name)
+ return -ENOMEM;
+
+ rd.old_parent = dget_parent(dentry);
+ rd.new_parent = rd.old_parent;
+ rd.flags = RENAME_NOREPLACE;
+ target = lookup_noperm_unlocked(&QSTR(new_name), rd.new_parent);
+ if (IS_ERR(target))
+ return PTR_ERR(target);
- if (IS_ERR(old_dir))
- return old_dir;
- if (IS_ERR(new_dir))
- return new_dir;
- if (IS_ERR_OR_NULL(old_dentry))
- return old_dentry;
-
- trap = lock_rename(new_dir, old_dir);
- /* Source or destination directories don't exist? */
- if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
- goto exit;
- /* Source does not exist, cyclic rename, or mountpoint? */
- if (d_really_is_negative(old_dentry) || old_dentry == trap ||
- d_mountpoint(old_dentry))
- goto exit;
- dentry = lookup_one_len(new_name, new_dir, strlen(new_name));
- /* Lookup failed, cyclic rename or target exists? */
- if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
- goto exit;
-
- take_dentry_name_snapshot(&old_name, old_dentry);
-
- error = simple_rename(&nop_mnt_idmap, d_inode(old_dir), old_dentry,
- d_inode(new_dir), dentry, 0);
+ error = start_renaming_two_dentries(&rd, dentry, target);
if (error) {
- release_dentry_name_snapshot(&old_name);
- goto exit;
+ if (error == -EEXIST && target == dentry)
+ /* it isn't an error to rename a thing to itself */
+ error = 0;
+ goto out;
}
- d_move(old_dentry, dentry);
- fsnotify_move(d_inode(old_dir), d_inode(new_dir), &old_name.name,
- d_is_dir(old_dentry),
- NULL, old_dentry);
+
+ dir = d_inode(rd.old_parent);
+ take_dentry_name_snapshot(&old_name, dentry);
+ simple_rename_timestamp(dir, dentry, dir, rd.new_dentry);
+ d_move(dentry, rd.new_dentry);
+ fsnotify_move(dir, dir, &old_name.name, d_is_dir(dentry), NULL, dentry);
release_dentry_name_snapshot(&old_name);
- unlock_rename(new_dir, old_dir);
- dput(dentry);
- return old_dentry;
-exit:
- if (dentry && !IS_ERR(dentry))
- dput(dentry);
- unlock_rename(new_dir, old_dir);
- if (IS_ERR(dentry))
- return dentry;
- return ERR_PTR(-EINVAL);
+ end_renaming(&rd);
+out:
+ dput(rd.old_parent);
+ dput(target);
+ kfree_const(new_name);
+ return error;
}
-EXPORT_SYMBOL_GPL(debugfs_rename);
+EXPORT_SYMBOL_GPL(debugfs_change_name);
/**
* debugfs_initialized - Tells whether debugfs has been registered
@@ -921,33 +880,47 @@ static int __init debugfs_kernel(char *str)
{
if (str) {
if (!strcmp(str, "on"))
- debugfs_allow = DEBUGFS_ALLOW_API | DEBUGFS_ALLOW_MOUNT;
- else if (!strcmp(str, "no-mount"))
- debugfs_allow = DEBUGFS_ALLOW_API;
+ debugfs_enabled = true;
else if (!strcmp(str, "off"))
- debugfs_allow = 0;
+ debugfs_enabled = false;
+ else if (!strcmp(str, "no-mount")) {
+ pr_notice("debugfs=no-mount is a deprecated alias "
+ "for debugfs=off\n");
+ debugfs_enabled = false;
+ }
}
return 0;
}
early_param("debugfs", debugfs_kernel);
+
static int __init debugfs_init(void)
{
int retval;
- if (!(debugfs_allow & DEBUGFS_ALLOW_MOUNT))
+ if (!debugfs_enabled)
return -EPERM;
retval = sysfs_create_mount_point(kernel_kobj, "debug");
if (retval)
return retval;
- retval = register_filesystem(&debug_fs_type);
- if (retval)
+ debugfs_inode_cachep = kmem_cache_create("debugfs_inode_cache",
+ sizeof(struct debugfs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
+ init_once);
+ if (debugfs_inode_cachep == NULL) {
sysfs_remove_mount_point(kernel_kobj, "debug");
- else
- debugfs_registered = true;
+ return -ENOMEM;
+ }
- return retval;
+ retval = register_filesystem(&debug_fs_type);
+ if (retval) { // Really not going to happen
+ sysfs_remove_mount_point(kernel_kobj, "debug");
+ kmem_cache_destroy(debugfs_inode_cachep);
+ return retval;
+ }
+ debugfs_registered = true;
+ return 0;
}
core_initcall(debugfs_init);
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index a3edfa4f0d8e..c95699b27a56 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -11,53 +11,48 @@
struct file_operations;
+struct debugfs_inode_info {
+ struct inode vfs_inode;
+ union {
+ const void *raw;
+ const struct file_operations *real_fops;
+ const struct debugfs_short_fops *short_fops;
+ debugfs_automount_t automount;
+ };
+ void *aux;
+};
+
+static inline struct debugfs_inode_info *DEBUGFS_I(struct inode *inode)
+{
+ return container_of(inode, struct debugfs_inode_info, vfs_inode);
+}
+
/* declared over in file.c */
extern const struct file_operations debugfs_noop_file_operations;
extern const struct file_operations debugfs_open_proxy_file_operations;
extern const struct file_operations debugfs_full_proxy_file_operations;
+extern const struct file_operations debugfs_full_short_proxy_file_operations;
struct debugfs_fsdata {
const struct file_operations *real_fops;
const struct debugfs_short_fops *short_fops;
- union {
- /* automount_fn is used when real_fops is NULL */
- debugfs_automount_t automount;
- struct {
- refcount_t active_users;
- struct completion active_users_drained;
-
- /* protect cancellations */
- struct mutex cancellations_mtx;
- struct list_head cancellations;
- };
+ struct {
+ refcount_t active_users;
+ struct completion active_users_drained;
+
+ /* protect cancellations */
+ struct mutex cancellations_mtx;
+ struct list_head cancellations;
+ unsigned int methods;
};
};
-/*
- * A dentry's ->d_fsdata either points to the real fops or to a
- * dynamically allocated debugfs_fsdata instance.
- * In order to distinguish between these two cases, a real fops
- * pointer gets its lowest bit set.
- */
-#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0)
-/*
- * A dentry's ->d_fsdata, when pointing to real fops, is with
- * short fops instead of full fops.
- */
-#define DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT BIT(1)
-
-/* Access BITS */
-#define DEBUGFS_ALLOW_API BIT(0)
-#define DEBUGFS_ALLOW_MOUNT BIT(1)
-
-#ifdef CONFIG_DEBUG_FS_ALLOW_ALL
-#define DEFAULT_DEBUGFS_ALLOW_BITS (DEBUGFS_ALLOW_MOUNT | DEBUGFS_ALLOW_API)
-#endif
-#ifdef CONFIG_DEBUG_FS_DISALLOW_MOUNT
-#define DEFAULT_DEBUGFS_ALLOW_BITS (DEBUGFS_ALLOW_API)
-#endif
-#ifdef CONFIG_DEBUG_FS_ALLOW_NONE
-#define DEFAULT_DEBUGFS_ALLOW_BITS (0)
-#endif
+enum {
+ HAS_READ = 1,
+ HAS_WRITE = 2,
+ HAS_LSEEK = 4,
+ HAS_POLL = 8,
+ HAS_IOCTL = 16
+};
#endif /* _DEBUGFS_INTERNAL_H_ */
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index b20e565b9c5e..9f3de528c358 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/sched.h>
#include <linux/namei.h>
#include <linux/slab.h>
@@ -21,7 +23,6 @@
#include <linux/magic.h>
#include <linux/idr.h>
#include <linux/devpts_fs.h>
-#include <linux/parser.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
@@ -45,7 +46,7 @@ static int pty_limit_min;
static int pty_limit_max = INT_MAX;
static atomic_t pty_count = ATOMIC_INIT(0);
-static struct ctl_table pty_table[] = {
+static const struct ctl_table pty_table[] = {
{
.procname = "max",
.maxlen = sizeof(int),
@@ -87,21 +88,21 @@ enum {
Opt_err
};
-static const match_table_t tokens = {
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_mode, "mode=%o"},
- {Opt_ptmxmode, "ptmxmode=%o"},
- {Opt_newinstance, "newinstance"},
- {Opt_max, "max=%d"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec devpts_param_specs[] = {
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_s32 ("max", Opt_max),
+ fsparam_u32oct ("mode", Opt_mode),
+ fsparam_flag ("newinstance", Opt_newinstance),
+ fsparam_u32oct ("ptmxmode", Opt_ptmxmode),
+ fsparam_uid ("uid", Opt_uid),
+ {}
};
struct pts_fs_info {
struct ida allocated_ptys;
struct pts_mount_opts mount_opts;
struct super_block *sb;
- struct dentry *ptmx_dentry;
+ struct inode *ptmx_inode; // borrowed
};
static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
@@ -214,96 +215,50 @@ void devpts_release(struct pts_fs_info *fsi)
deactivate_super(fsi->sb);
}
-#define PARSE_MOUNT 0
-#define PARSE_REMOUNT 1
-
/*
- * parse_mount_options():
- * Set @opts to mount options specified in @data. If an option is not
- * specified in @data, set it to its default value.
- *
- * Note: @data may be NULL (in which case all options are set to default).
+ * devpts_parse_param - Parse mount parameters
*/
-static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
+static int devpts_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- kuid_t uid;
- kgid_t gid;
-
- opts->setuid = 0;
- opts->setgid = 0;
- opts->uid = GLOBAL_ROOT_UID;
- opts->gid = GLOBAL_ROOT_GID;
- opts->mode = DEVPTS_DEFAULT_MODE;
- opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
- opts->max = NR_UNIX98_PTY_MAX;
-
- /* Only allow instances mounted from the initial mount
- * namespace to tap the reserve pool of ptys.
- */
- if (op == PARSE_MOUNT)
- opts->reserve =
- (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
-
- while ((p = strsep(&data, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- int token;
- int option;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return -EINVAL;
- opts->uid = uid;
- opts->setuid = 1;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return -EINVAL;
- opts->gid = gid;
- opts->setgid = 1;
- break;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
- case Opt_ptmxmode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->ptmxmode = option & S_IALLUGO;
- break;
- case Opt_newinstance:
- break;
- case Opt_max:
- if (match_int(&args[0], &option) ||
- option < 0 || option > NR_UNIX98_PTY_MAX)
- return -EINVAL;
- opts->max = option;
- break;
- default:
- pr_err("called with bogus options\n");
- return -EINVAL;
- }
+ struct pts_fs_info *fsi = fc->s_fs_info;
+ struct pts_mount_opts *opts = &fsi->mount_opts;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, devpts_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ opts->uid = result.uid;
+ opts->setuid = 1;
+ break;
+ case Opt_gid:
+ opts->gid = result.gid;
+ opts->setgid = 1;
+ break;
+ case Opt_mode:
+ opts->mode = result.uint_32 & S_IALLUGO;
+ break;
+ case Opt_ptmxmode:
+ opts->ptmxmode = result.uint_32 & S_IALLUGO;
+ break;
+ case Opt_newinstance:
+ break;
+ case Opt_max:
+ if (result.uint_32 > NR_UNIX98_PTY_MAX)
+ return invalf(fc, "max out of range");
+ opts->max = result.uint_32;
+ break;
}
return 0;
}
-static int mknod_ptmx(struct super_block *sb)
+static int mknod_ptmx(struct super_block *sb, struct fs_context *fc)
{
int mode;
- int rc = -ENOMEM;
struct dentry *dentry;
struct inode *inode;
struct dentry *root = sb->s_root;
@@ -312,18 +267,10 @@ static int mknod_ptmx(struct super_block *sb)
kuid_t ptmx_uid = current_fsuid();
kgid_t ptmx_gid = current_fsgid();
- inode_lock(d_inode(root));
-
- /* If we have already created ptmx node, return */
- if (fsi->ptmx_dentry) {
- rc = 0;
- goto out;
- }
-
- dentry = d_alloc_name(root, "ptmx");
- if (!dentry) {
+ dentry = simple_start_creating(root, "ptmx");
+ if (IS_ERR(dentry)) {
pr_err("Unable to alloc dentry for ptmx node\n");
- goto out;
+ return PTR_ERR(dentry);
}
/*
@@ -331,9 +278,9 @@ static int mknod_ptmx(struct super_block *sb)
*/
inode = new_inode(sb);
if (!inode) {
+ simple_done_creating(dentry);
pr_err("Unable to alloc inode for ptmx node\n");
- dput(dentry);
- goto out;
+ return -ENOMEM;
}
inode->i_ino = 2;
@@ -343,32 +290,37 @@ static int mknod_ptmx(struct super_block *sb)
init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
inode->i_uid = ptmx_uid;
inode->i_gid = ptmx_gid;
+ fsi->ptmx_inode = inode;
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
- fsi->ptmx_dentry = dentry;
- rc = 0;
-out:
- inode_unlock(d_inode(root));
- return rc;
+ simple_done_creating(dentry);
+
+ return 0;
}
static void update_ptmx_mode(struct pts_fs_info *fsi)
{
- struct inode *inode;
- if (fsi->ptmx_dentry) {
- inode = d_inode(fsi->ptmx_dentry);
- inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
- }
+ fsi->ptmx_inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
}
-static int devpts_remount(struct super_block *sb, int *flags, char *data)
+static int devpts_reconfigure(struct fs_context *fc)
{
- int err;
- struct pts_fs_info *fsi = DEVPTS_SB(sb);
- struct pts_mount_opts *opts = &fsi->mount_opts;
+ struct pts_fs_info *fsi = DEVPTS_SB(fc->root->d_sb);
+ struct pts_fs_info *new = fc->s_fs_info;
- err = parse_mount_options(data, PARSE_REMOUNT, opts);
+ /* Apply the revised options. We don't want to change ->reserve.
+ * Ideally, we'd update each option conditionally on it having been
+ * explicitly changed, but the default is to reset everything so that
+ * would break UAPI...
+ */
+ fsi->mount_opts.setuid = new->mount_opts.setuid;
+ fsi->mount_opts.setgid = new->mount_opts.setgid;
+ fsi->mount_opts.uid = new->mount_opts.uid;
+ fsi->mount_opts.gid = new->mount_opts.gid;
+ fsi->mount_opts.mode = new->mount_opts.mode;
+ fsi->mount_opts.ptmxmode = new->mount_opts.ptmxmode;
+ fsi->mount_opts.max = new->mount_opts.max;
/*
* parse_mount_options() restores options to default values
@@ -378,7 +330,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
*/
update_ptmx_mode(fsi);
- return err;
+ return 0;
}
static int devpts_show_options(struct seq_file *seq, struct dentry *root)
@@ -402,53 +354,26 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root)
static const struct super_operations devpts_sops = {
.statfs = simple_statfs,
- .remount_fs = devpts_remount,
.show_options = devpts_show_options,
};
-static void *new_pts_fs_info(struct super_block *sb)
-{
- struct pts_fs_info *fsi;
-
- fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL);
- if (!fsi)
- return NULL;
-
- ida_init(&fsi->allocated_ptys);
- fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
- fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
- fsi->sb = sb;
-
- return fsi;
-}
-
-static int
-devpts_fill_super(struct super_block *s, void *data, int silent)
+static int devpts_fill_super(struct super_block *s, struct fs_context *fc)
{
+ struct pts_fs_info *fsi = DEVPTS_SB(s);
struct inode *inode;
- int error;
s->s_iflags &= ~SB_I_NODEV;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = DEVPTS_SUPER_MAGIC;
s->s_op = &devpts_sops;
- s->s_d_op = &simple_dentry_operations;
+ s->s_d_flags = DCACHE_DONTCACHE;
s->s_time_gran = 1;
+ fsi->sb = s;
- error = -ENOMEM;
- s->s_fs_info = new_pts_fs_info(s);
- if (!s->s_fs_info)
- goto fail;
-
- error = parse_mount_options(data, PARSE_MOUNT, &DEVPTS_SB(s)->mount_opts);
- if (error)
- goto fail;
-
- error = -ENOMEM;
inode = new_inode(s);
if (!inode)
- goto fail;
+ return -ENOMEM;
inode->i_ino = 1;
simple_inode_init_ts(inode);
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
@@ -459,31 +384,60 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_root = d_make_root(inode);
if (!s->s_root) {
pr_err("get root dentry failed\n");
- goto fail;
+ return -ENOMEM;
}
- error = mknod_ptmx(s);
- if (error)
- goto fail_dput;
-
- return 0;
-fail_dput:
- dput(s->s_root);
- s->s_root = NULL;
-fail:
- return error;
+ return mknod_ptmx(s, fc);
}
/*
- * devpts_mount()
+ * devpts_get_tree()
*
* Mount a new (private) instance of devpts. PTYs created in this
* instance are independent of the PTYs in other devpts instances.
*/
-static struct dentry *devpts_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int devpts_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, devpts_fill_super);
+}
+
+static void devpts_free_fc(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations devpts_context_ops = {
+ .free = devpts_free_fc,
+ .parse_param = devpts_parse_param,
+ .get_tree = devpts_get_tree,
+ .reconfigure = devpts_reconfigure,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int devpts_init_fs_context(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, devpts_fill_super);
+ struct pts_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ ida_init(&fsi->allocated_ptys);
+ fsi->mount_opts.uid = GLOBAL_ROOT_UID;
+ fsi->mount_opts.gid = GLOBAL_ROOT_GID;
+ fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
+ fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+ fsi->mount_opts.max = NR_UNIX98_PTY_MAX;
+
+ if (fc->purpose == FS_CONTEXT_FOR_MOUNT &&
+ current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns)
+ fsi->mount_opts.reserve = true;
+
+ fc->s_fs_info = fsi;
+ fc->ops = &devpts_context_ops;
+ return 0;
}
static void devpts_kill_sb(struct super_block *sb)
@@ -493,12 +447,13 @@ static void devpts_kill_sb(struct super_block *sb)
if (fsi)
ida_destroy(&fsi->allocated_ptys);
kfree(fsi);
- kill_litter_super(sb);
+ kill_anon_super(sb);
}
static struct file_system_type devpts_fs_type = {
.name = "devpts",
- .mount = devpts_mount,
+ .init_fs_context = devpts_init_fs_context,
+ .parameters = devpts_param_specs,
.kill_sb = devpts_kill_sb,
.fs_flags = FS_USERNS_MOUNT,
};
@@ -565,16 +520,15 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
sprintf(s, "%d", index);
dentry = d_alloc_name(root, s);
- if (dentry) {
- dentry->d_fsdata = priv;
- d_add(dentry, inode);
- fsnotify_create(d_inode(root), dentry);
- } else {
+ if (!dentry) {
iput(inode);
- dentry = ERR_PTR(-ENOMEM);
+ return ERR_PTR(-ENOMEM);
}
-
- return dentry;
+ dentry->d_fsdata = priv;
+ d_make_persistent(dentry, inode);
+ fsnotify_create(d_inode(root), dentry);
+ dput(dentry);
+ return dentry; // borrowed
}
/**
@@ -604,7 +558,7 @@ void devpts_pty_kill(struct dentry *dentry)
drop_nlink(dentry->d_inode);
d_drop(dentry);
fsnotify_unlink(d_inode(dentry->d_parent), dentry);
- dput(dentry); /* d_alloc_name() in devpts_pty_new() */
+ d_make_discardable(dentry);
}
static int __init init_devpts_fs(void)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index bbd05f1a2145..2267f5ae7f77 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -996,7 +996,7 @@ do_holes:
dio_unpin_page(dio, page);
goto out;
}
- zero_user(page, from, 1 << blkbits);
+ memzero_page(page, from, 1 << blkbits);
sdio->block_in_file++;
from += 1 << blkbits;
dio->result += 1 << blkbits;
@@ -1083,8 +1083,8 @@ static inline int drop_refcount(struct dio *dio)
* The locking rules are governed by the flags parameter:
* - if the flags value contains DIO_LOCKING we use a fancy locking
* scheme for dumb filesystems.
- * For writes this function is called under i_mutex and returns with
- * i_mutex held, for reads, i_mutex is not held on entry, but it is
+ * For writes this function is called under i_rwsem and returns with
+ * i_rwsem held, for reads, i_rwsem is not held on entry, but it is
* taken and dropped again before returning.
* - if the flags value does NOT contain DIO_LOCKING we don't use any
* internal locking but rather rely on the filesystem to synchronize
@@ -1094,7 +1094,7 @@ static inline int drop_refcount(struct dio *dio)
* counter before starting direct I/O, and decrement it once we are done.
* Truncate can wait for it to reach zero to provide exclusion. It is
* expected that filesystem provide exclusion between new direct I/O
- * and truncates. For DIO_LOCKING filesystems this is done by i_mutex,
+ * and truncates. For DIO_LOCKING filesystems this is done by i_rwsem,
* but other filesystems need to take care of this on their own.
*
* NOTE: if you pass "sdio" to anything by pointer make sure that function
@@ -1279,7 +1279,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
/*
* All block lookups have been performed. For READ requests
- * we can let i_mutex go now that its achieved its purpose
+ * we can let i_rwsem go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index f82a4952769d..b46165df5a91 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -3,7 +3,6 @@ menuconfig DLM
tristate "Distributed Lock Manager (DLM)"
depends on INET
depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n)
- select IP_SCTP
help
A general purpose distributed lock manager for kernel or userspace
applications.
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index b2f21aa00719..a0d75b5c83c6 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -26,6 +26,7 @@
/*
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid (refers to <node>)
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
+ * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/release_recover
* /config/dlm/<cluster>/comms/<comm>/nodeid (refers to <comm>)
* /config/dlm/<cluster>/comms/<comm>/local
* /config/dlm/<cluster>/comms/<comm>/addr (write only)
@@ -197,6 +198,9 @@ static int dlm_check_protocol_and_dlm_running(unsigned int x)
break;
case 1:
/* SCTP */
+ if (!IS_ENABLED(CONFIG_IP_SCTP))
+ return -EOPNOTSUPP;
+
break;
default:
return -EINVAL;
@@ -264,6 +268,7 @@ enum {
enum {
NODE_ATTR_NODEID = 0,
NODE_ATTR_WEIGHT,
+ NODE_ATTR_RELEASE_RECOVER,
};
struct dlm_clusters {
@@ -277,6 +282,8 @@ struct dlm_spaces {
struct dlm_space {
struct config_group group;
struct list_head members;
+ struct list_head members_gone;
+ int members_gone_count;
struct mutex members_lock;
int members_count;
struct dlm_nodes *nds;
@@ -307,6 +314,14 @@ struct dlm_node {
int weight;
int new;
int comm_seq; /* copy of cm->seq when nd->nodeid is set */
+ unsigned int release_recover;
+};
+
+struct dlm_member_gone {
+ int nodeid;
+ unsigned int release_recover;
+
+ struct list_head list; /* space->members_gone */
};
static struct configfs_group_operations clusters_ops = {
@@ -477,6 +492,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
configfs_add_default_group(&nds->ns_group, &sp->group);
INIT_LIST_HEAD(&sp->members);
+ INIT_LIST_HEAD(&sp->members_gone);
mutex_init(&sp->members_lock);
sp->members_count = 0;
sp->nds = nds;
@@ -584,10 +600,20 @@ static void drop_node(struct config_group *g, struct config_item *i)
{
struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
struct dlm_node *nd = config_item_to_node(i);
+ struct dlm_member_gone *mb_gone;
+
+ mb_gone = kzalloc(sizeof(*mb_gone), GFP_KERNEL);
+ if (!mb_gone)
+ return;
mutex_lock(&sp->members_lock);
list_del(&nd->list);
sp->members_count--;
+
+ mb_gone->nodeid = nd->nodeid;
+ mb_gone->release_recover = nd->release_recover;
+ list_add(&mb_gone->list, &sp->members_gone);
+ sp->members_gone_count++;
mutex_unlock(&sp->members_lock);
config_item_put(i);
@@ -812,12 +838,34 @@ static ssize_t node_weight_store(struct config_item *item, const char *buf,
return len;
}
+static ssize_t node_release_recover_show(struct config_item *item, char *buf)
+{
+ struct dlm_node *n = config_item_to_node(item);
+
+ return sprintf(buf, "%u\n", n->release_recover);
+}
+
+static ssize_t node_release_recover_store(struct config_item *item,
+ const char *buf, size_t len)
+{
+ struct dlm_node *n = config_item_to_node(item);
+ int rc;
+
+ rc = kstrtouint(buf, 0, &n->release_recover);
+ if (rc)
+ return rc;
+
+ return len;
+}
+
CONFIGFS_ATTR(node_, nodeid);
CONFIGFS_ATTR(node_, weight);
+CONFIGFS_ATTR(node_, release_recover);
static struct configfs_attribute *node_attrs[] = {
[NODE_ATTR_NODEID] = &node_attr_nodeid,
[NODE_ATTR_WEIGHT] = &node_attr_weight,
+ [NODE_ATTR_RELEASE_RECOVER] = &node_attr_release_recover,
NULL,
};
@@ -879,9 +927,10 @@ static void put_comm(struct dlm_comm *cm)
int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
int *count_out)
{
+ struct dlm_member_gone *mb_gone, *mb_safe;
+ struct dlm_config_node *nodes, *node;
struct dlm_space *sp;
struct dlm_node *nd;
- struct dlm_config_node *nodes, *node;
int rv, count;
sp = get_space(lsname);
@@ -895,7 +944,7 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
goto out;
}
- count = sp->members_count;
+ count = sp->members_count + sp->members_gone_count;
nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS);
if (!nodes) {
@@ -914,6 +963,20 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
nd->new = 0;
}
+ /* we delay the remove on nodes until here as configfs does
+ * not support addtional attributes for rmdir().
+ */
+ list_for_each_entry_safe(mb_gone, mb_safe, &sp->members_gone, list) {
+ node->nodeid = mb_gone->nodeid;
+ node->release_recover = mb_gone->release_recover;
+ node->gone = true;
+ node++;
+
+ list_del(&mb_gone->list);
+ sp->members_gone_count--;
+ kfree(mb_gone);
+ }
+
*count_out = count;
*nodes_out = nodes;
rv = 0;
@@ -935,7 +998,7 @@ int dlm_comm_seq(int nodeid, uint32_t *seq, bool locked)
mutex_unlock(&clusters_root.subsys.su_mutex);
}
if (!cm)
- return -EEXIST;
+ return -ENOENT;
*seq = cm->seq;
put_comm(cm);
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index e48c4f9686d3..4ebd45f75276 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -17,13 +17,15 @@
struct dlm_config_node {
int nodeid;
int weight;
+ bool gone;
int new;
uint32_t comm_seq;
+ unsigned int release_recover;
};
extern const struct rhashtable_params dlm_rhash_rsb_params;
-#define DLM_MAX_ADDR_COUNT 3
+#define DLM_MAX_ADDR_COUNT 8
#define DLM_PROTO_TCP 0
#define DLM_PROTO_SCTP 1
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index fc1d710166e9..be938fdf17d9 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -509,7 +509,7 @@ static void add_scan(struct dlm_ls *ls, struct dlm_rsb *r)
void dlm_rsb_scan(struct timer_list *timer)
{
- struct dlm_ls *ls = from_timer(ls, timer, ls_scan_timer);
+ struct dlm_ls *ls = timer_container_of(ls, timer, ls_scan_timer);
int our_nodeid = dlm_our_nodeid();
struct dlm_rsb *r;
int rv;
@@ -741,6 +741,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
read_lock_bh(&ls->ls_rsbtbl_lock);
if (!rsb_flag(r, RSB_HASHED)) {
read_unlock_bh(&ls->ls_rsbtbl_lock);
+ error = -EBADR;
goto do_new;
}
@@ -784,6 +785,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
}
} else {
write_unlock_bh(&ls->ls_rsbtbl_lock);
+ error = -EBADR;
goto do_new;
}
@@ -824,9 +826,12 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
r->res_first_lkid = 0;
}
- /* A dir record will not be on the scan list. */
- if (r->res_dir_nodeid != our_nodeid)
- del_scan(ls, r);
+ /* we always deactivate scan timer for the rsb, when
+ * we move it out of the inactive state as rsb state
+ * can be changed and scan timers are only for inactive
+ * rsbs.
+ */
+ del_scan(ls, r);
list_move(&r->res_slow_list, &ls->ls_slow_active);
rsb_clear_flag(r, RSB_INACTIVE);
kref_init(&r->res_ref); /* ref is now used in active state */
@@ -989,10 +994,10 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
r->res_nodeid = 0;
}
+ del_scan(ls, r);
list_move(&r->res_slow_list, &ls->ls_slow_active);
rsb_clear_flag(r, RSB_INACTIVE);
kref_init(&r->res_ref);
- del_scan(ls, r);
write_unlock_bh(&ls->ls_rsbtbl_lock);
goto out;
@@ -1337,9 +1342,13 @@ static int _dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *na
__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
r_nodeid, result);
- /* A dir record rsb should never be on scan list. */
- /* Try to fix this with del_scan? */
- WARN_ON(!list_empty(&r->res_scan_list));
+ /* A dir record rsb should never be on scan list.
+ * Except when we are the dir and master node.
+ * This function should only be called by the dir
+ * node.
+ */
+ WARN_ON(!list_empty(&r->res_scan_list) &&
+ r->res_master_nodeid != our_nodeid);
write_unlock_bh(&ls->ls_rsbtbl_lock);
@@ -1430,16 +1439,23 @@ static void deactivate_rsb(struct kref *kref)
list_move(&r->res_slow_list, &ls->ls_slow_inactive);
/*
- * When the rsb becomes unused:
- * - If it's not a dir record for a remote master rsb,
- * then it is put on the scan list to be freed.
- * - If it's a dir record for a remote master rsb,
- * then it is kept in the inactive state until
- * receive_remove() from the master node.
+ * When the rsb becomes unused, there are two possibilities:
+ * 1. Leave the inactive rsb in place (don't remove it).
+ * 2. Add it to the scan list to be removed.
+ *
+ * 1 is done when the rsb is acting as the dir record
+ * for a remotely mastered rsb. The rsb must be left
+ * in place as an inactive rsb to act as the dir record.
+ *
+ * 2 is done when a) the rsb is not the master and not the
+ * dir record, b) when the rsb is both the master and the
+ * dir record, c) when the rsb is master but not dir record.
+ *
+ * (If no directory is used, the rsb can always be removed.)
*/
- if (!dlm_no_directory(ls) &&
- (r->res_master_nodeid != our_nodeid) &&
- (dlm_dir_nodeid(r) != our_nodeid))
+ if (dlm_no_directory(ls) ||
+ (r->res_master_nodeid == our_nodeid ||
+ dlm_dir_nodeid(r) != our_nodeid))
add_scan(ls, r);
if (r->res_lvbptr) {
@@ -5560,7 +5576,7 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
if (rl->rl_status == DLM_LKSTS_CONVERT && middle_conversion(lkb)) {
/* We may need to adjust grmode depending on other granted locks. */
- log_limit(ls, "%s %x middle convert gr %d rq %d remote %d %x",
+ log_rinfo(ls, "%s %x middle convert gr %d rq %d remote %d %x",
__func__, lkb->lkb_id, lkb->lkb_grmode,
lkb->lkb_rqmode, lkb->lkb_nodeid, lkb->lkb_remid);
rsb_set_flag(r, RSB_RECOVER_CONVERT);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 8afac6e2dff0..ddaa76558706 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -186,12 +186,17 @@ static struct kobj_type dlm_ktype = {
static struct kset *dlm_kset;
-static int do_uevent(struct dlm_ls *ls, int in)
+static int do_uevent(struct dlm_ls *ls, int in, unsigned int release_recover)
{
- if (in)
+ char message[512] = {};
+ char *envp[] = { message, NULL };
+
+ if (in) {
kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
- else
- kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
+ } else {
+ snprintf(message, 511, "RELEASE_RECOVER=%u", release_recover);
+ kobject_uevent_env(&ls->ls_kobj, KOBJ_OFFLINE, envp);
+ }
log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving");
@@ -575,8 +580,8 @@ static int new_lockspace(const char *name, const char *cluster,
current lockspace members are (via configfs) and then tells the
lockspace to start running (via sysfs) in dlm_ls_start(). */
- error = do_uevent(ls, 1);
- if (error)
+ error = do_uevent(ls, 1, 0);
+ if (error < 0)
goto out_recoverd;
/* wait until recovery is successful or failed */
@@ -592,7 +597,7 @@ static int new_lockspace(const char *name, const char *cluster,
return 0;
out_members:
- do_uevent(ls, 0);
+ do_uevent(ls, 0, 0);
dlm_clear_members(ls);
kfree(ls->ls_node_array);
out_recoverd:
@@ -671,19 +676,20 @@ int dlm_new_user_lockspace(const char *name, const char *cluster,
This is because there may be LKBs queued as ASTs that have been unlinked
from their RSBs and are pending deletion once the AST has been delivered */
-static int lockspace_busy(struct dlm_ls *ls, int force)
+static int lockspace_busy(struct dlm_ls *ls, unsigned int release_option)
{
struct dlm_lkb *lkb;
unsigned long id;
int rv = 0;
read_lock_bh(&ls->ls_lkbxa_lock);
- if (force == 0) {
+ if (release_option == DLM_RELEASE_NO_LOCKS) {
xa_for_each(&ls->ls_lkbxa, id, lkb) {
rv = 1;
break;
}
- } else if (force == 1) {
+ } else if (release_option == DLM_RELEASE_UNUSED) {
+ /* TODO: handle this UNUSED option as NO_LOCKS in later patch */
xa_for_each(&ls->ls_lkbxa, id, lkb) {
if (lkb->lkb_nodeid == 0 &&
lkb->lkb_grmode != DLM_LOCK_IV) {
@@ -698,11 +704,11 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
return rv;
}
-static int release_lockspace(struct dlm_ls *ls, int force)
+static int release_lockspace(struct dlm_ls *ls, unsigned int release_option)
{
int busy, rv;
- busy = lockspace_busy(ls, force);
+ busy = lockspace_busy(ls, release_option);
spin_lock_bh(&lslist_lock);
if (ls->ls_create_count == 1) {
@@ -730,8 +736,9 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_device_deregister(ls);
- if (force < 3 && dlm_user_daemon_available())
- do_uevent(ls, 0);
+ if (release_option != DLM_RELEASE_NO_EVENT &&
+ dlm_user_daemon_available())
+ do_uevent(ls, 0, (release_option == DLM_RELEASE_RECOVER));
dlm_recoverd_stop(ls);
@@ -782,25 +789,24 @@ static int release_lockspace(struct dlm_ls *ls, int force)
* lockspace must continue to function as usual, participating in recoveries,
* until this returns.
*
- * Force has 4 possible values:
- * 0 - don't destroy lockspace if it has any LKBs
- * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
- * 2 - destroy lockspace regardless of LKBs
- * 3 - destroy lockspace as part of a forced shutdown
+ * See DLM_RELEASE defines for release_option values and their meaning.
*/
-int dlm_release_lockspace(void *lockspace, int force)
+int dlm_release_lockspace(void *lockspace, unsigned int release_option)
{
struct dlm_ls *ls;
int error;
+ if (release_option > __DLM_RELEASE_MAX)
+ return -EINVAL;
+
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
dlm_put_lockspace(ls);
mutex_lock(&ls_lock);
- error = release_lockspace(ls, force);
+ error = release_lockspace(ls, release_option);
if (!error)
ls_count--;
if (!ls_count)
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index df40c3fd1070..b3958008ba3f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -160,6 +160,7 @@ struct dlm_proto_ops {
bool try_new_addr;
const char *name;
int proto;
+ int how;
void (*sockopts)(struct socket *sock);
int (*bind)(struct socket *sock);
@@ -462,7 +463,8 @@ static bool dlm_lowcomms_con_has_addr(const struct connection *con,
int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr)
{
struct connection *con;
- bool ret, idx;
+ bool ret;
+ int idx;
idx = srcu_read_lock(&connections_srcu);
con = nodeid2con(nodeid, GFP_NOFS);
@@ -532,7 +534,7 @@ static void lowcomms_state_change(struct sock *sk)
/* SCTP layer is not calling sk_data_ready when the connection
* is done, so we catch the signal through here.
*/
- if (sk->sk_shutdown == RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
lowcomms_data_ready(sk);
}
@@ -809,7 +811,7 @@ static void shutdown_connection(struct connection *con, bool and_other)
return;
}
- ret = kernel_sock_shutdown(con->sock, SHUT_WR);
+ ret = kernel_sock_shutdown(con->sock, dlm_proto_ops->how);
up_read(&con->sock_lock);
if (ret) {
log_print("Connection %p failed to shutdown: %d will force close",
@@ -1124,7 +1126,7 @@ static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
static int sctp_bind_addrs(struct socket *sock, __be16 port)
{
struct sockaddr_storage localaddr;
- struct sockaddr *addr = (struct sockaddr *)&localaddr;
+ struct sockaddr_unsized *addr = (struct sockaddr_unsized *)&localaddr;
int i, addr_len, result = 0;
for (i = 0; i < dlm_local_count; i++) {
@@ -1597,7 +1599,7 @@ static int dlm_connect(struct connection *con)
log_print_ratelimited("connecting to %d", con->nodeid);
make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len);
- result = kernel_connect(sock, (struct sockaddr *)&addr, addr_len, 0);
+ result = kernel_connect(sock, (struct sockaddr_unsized *)&addr, addr_len, 0);
switch (result) {
case -EINPROGRESS:
/* not an error */
@@ -1701,7 +1703,7 @@ static int work_start(void)
return -ENOMEM;
}
- process_workqueue = alloc_workqueue("dlm_process", WQ_HIGHPRI | WQ_BH, 0);
+ process_workqueue = alloc_workqueue("dlm_process", WQ_HIGHPRI | WQ_BH | WQ_PERCPU, 0);
if (!process_workqueue) {
log_print("can't start dlm_process");
destroy_workqueue(io_workqueue);
@@ -1811,7 +1813,7 @@ static int dlm_tcp_bind(struct socket *sock)
memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
make_sockaddr(&src_addr, 0, &addr_len);
- result = kernel_bind(sock, (struct sockaddr *)&src_addr,
+ result = kernel_bind(sock, (struct sockaddr_unsized *)&src_addr,
addr_len);
if (result < 0) {
/* This *may* not indicate a critical error */
@@ -1825,8 +1827,8 @@ static int dlm_tcp_listen_validate(void)
{
/* We don't support multi-homed hosts */
if (dlm_local_count > 1) {
- log_print("TCP protocol can't handle multi-homed hosts, try SCTP");
- return -EINVAL;
+ log_print("Detect multi-homed hosts but use only the first IP address.");
+ log_print("Try SCTP, if you want to enable multi-link.");
}
return 0;
@@ -1850,13 +1852,14 @@ static int dlm_tcp_listen_bind(struct socket *sock)
/* Bind to our port */
make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
- return kernel_bind(sock, (struct sockaddr *)&dlm_local_addr[0],
+ return kernel_bind(sock, (struct sockaddr_unsized *)&dlm_local_addr[0],
addr_len);
}
static const struct dlm_proto_ops dlm_tcp_ops = {
.name = "TCP",
.proto = IPPROTO_TCP,
+ .how = SHUT_WR,
.sockopts = dlm_tcp_sockopts,
.bind = dlm_tcp_bind,
.listen_validate = dlm_tcp_listen_validate,
@@ -1895,6 +1898,7 @@ static void dlm_sctp_sockopts(struct socket *sock)
static const struct dlm_proto_ops dlm_sctp_ops = {
.name = "SCTP",
.proto = IPPROTO_SCTP,
+ .how = SHUT_RDWR,
.try_new_addr = true,
.sockopts = dlm_sctp_sockopts,
.bind = dlm_sctp_bind,
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index 4887c8a05318..a44d16da7187 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -52,7 +52,7 @@ static int __init init_dlm(void)
if (error)
goto out_user;
- dlm_wq = alloc_workqueue("dlm_wq", 0, 0);
+ dlm_wq = alloc_workqueue("dlm_wq", WQ_PERCPU, 0);
if (!dlm_wq) {
error = -ENOMEM;
goto out_plock;
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index b0864c93230f..c0f557a80a75 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -478,7 +478,8 @@ static void dlm_lsop_recover_prep(struct dlm_ls *ls)
ls->ls_ops->recover_prep(ls->ls_ops_arg);
}
-static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
+static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb,
+ unsigned int release_recover)
{
struct dlm_slot slot;
uint32_t seq;
@@ -495,7 +496,7 @@ static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
error = dlm_comm_seq(memb->nodeid, &seq, false);
- if (!error && seq == memb->comm_seq)
+ if (!release_recover && !error && seq == memb->comm_seq)
return;
slot.nodeid = memb->nodeid;
@@ -552,6 +553,7 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
struct dlm_member *memb, *safe;
struct dlm_config_node *node;
int i, error, neg = 0, low = -1;
+ unsigned int release_recover;
/* previously removed members that we've not finished removing need to
* count as a negative change so the "neg" recovery steps will happen
@@ -569,11 +571,21 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
node = find_config_node(rv, memb->nodeid);
- if (node && !node->new)
+ if (!node) {
+ log_error(ls, "remove member %d invalid",
+ memb->nodeid);
+ return -EFAULT;
+ }
+
+ if (!node->new && !node->gone)
continue;
- if (!node) {
- log_rinfo(ls, "remove member %d", memb->nodeid);
+ release_recover = 0;
+
+ if (node->gone) {
+ release_recover = node->release_recover;
+ log_rinfo(ls, "remove member %d%s", memb->nodeid,
+ release_recover ? " (release_recover)" : "");
} else {
/* removed and re-added */
log_rinfo(ls, "remove member %d comm_seq %u %u",
@@ -584,13 +596,16 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
list_move(&memb->list, &ls->ls_nodes_gone);
remove_remote_member(memb->nodeid);
ls->ls_num_nodes--;
- dlm_lsop_recover_slot(ls, memb);
+ dlm_lsop_recover_slot(ls, memb, release_recover);
}
/* add new members to ls_nodes */
for (i = 0; i < rv->nodes_count; i++) {
node = &rv->nodes[i];
+ if (node->gone)
+ continue;
+
if (dlm_is_member(ls, node->nodeid))
continue;
error = dlm_add_member(ls, node);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index be4240f09abd..3ac020fb8139 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -842,7 +842,7 @@ static void recover_conversion(struct dlm_rsb *r)
*/
if (((lkb->lkb_grmode == DLM_LOCK_PR) && (other_grmode == DLM_LOCK_CW)) ||
((lkb->lkb_grmode == DLM_LOCK_CW) && (other_grmode == DLM_LOCK_PR))) {
- log_limit(ls, "%s %x gr %d rq %d, remote %d %x, other_lkid %u, other gr %d, set gr=NL",
+ log_rinfo(ls, "%s %x gr %d rq %d, remote %d %x, other_lkid %u, other gr %d, set gr=NL",
__func__, lkb->lkb_id, lkb->lkb_grmode,
lkb->lkb_rqmode, lkb->lkb_nodeid,
lkb->lkb_remid, other_lkid, other_grmode);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 5cb3896be826..51daf4acbe31 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -425,7 +425,7 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
dlm_put_lockspace(ls);
if (error)
- dlm_release_lockspace(lockspace, 0);
+ dlm_release_lockspace(lockspace, DLM_RELEASE_NO_LOCKS);
else
error = ls->ls_device.minor;
@@ -436,7 +436,7 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
{
dlm_lockspace_t *lockspace;
struct dlm_ls *ls;
- int error, force = 0;
+ int error, force = DLM_RELEASE_NO_LOCKS;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -446,7 +446,7 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
return -ENOENT;
if (params->flags & DLM_USER_LSFLG_FORCEFREE)
- force = 2;
+ force = DLM_RELEASE_NORMAL;
lockspace = ls;
dlm_put_lockspace(ls);
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index d45ef541d848..49f56a598ecb 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -14,7 +14,7 @@
#include "internal.h"
/* A global variable is a bit ugly, but it keeps the code simple */
-int sysctl_drop_caches;
+static int sysctl_drop_caches;
static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
@@ -28,7 +28,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
* inodes without pages but we deliberately won't in case
* we need to reschedule to avoid softlockups.
*/
- if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
(mapping_empty(inode->i_mapping) && !need_resched())) {
spin_unlock(&inode->i_lock);
continue;
@@ -48,7 +48,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
iput(toput_inode);
}
-int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
+static int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int ret;
@@ -77,3 +77,22 @@ int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
}
return 0;
}
+
+static const struct ctl_table drop_caches_table[] = {
+ {
+ .procname = "drop_caches",
+ .data = &sysctl_drop_caches,
+ .maxlen = sizeof(int),
+ .mode = 0200,
+ .proc_handler = drop_caches_sysctl_handler,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_FOUR,
+ },
+};
+
+static int __init init_vm_drop_caches_sysctls(void)
+{
+ register_sysctl_init("vm", drop_caches_table);
+ return 0;
+}
+fs_initcall(init_vm_drop_caches_sysctls);
diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig
index 1bdeaa6d5790..c2f4fb41b4e6 100644
--- a/fs/ecryptfs/Kconfig
+++ b/fs/ecryptfs/Kconfig
@@ -4,7 +4,7 @@ config ECRYPT_FS
depends on KEYS && CRYPTO && (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
select CRYPTO_ECB
select CRYPTO_CBC
- select CRYPTO_MD5
+ select CRYPTO_LIB_MD5
help
Encrypted filesystem that operates on the VFS layer. See
<file:Documentation/filesystems/ecryptfs.rst> to learn more about
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 69536cacdea8..260f8a4938b0 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -9,7 +9,6 @@
* Michael C. Thompson <mcthomps@us.ibm.com>
*/
-#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/fs.h>
#include <linux/mount.h>
@@ -48,32 +47,6 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size)
}
}
-/**
- * ecryptfs_calculate_md5 - calculates the md5 of @src
- * @dst: Pointer to 16 bytes of allocated memory
- * @crypt_stat: Pointer to crypt_stat struct for the current inode
- * @src: Data to be md5'd
- * @len: Length of @src
- *
- * Uses the allocated crypto context that crypt_stat references to
- * generate the MD5 sum of the contents of src.
- */
-static int ecryptfs_calculate_md5(char *dst,
- struct ecryptfs_crypt_stat *crypt_stat,
- char *src, int len)
-{
- int rc = crypto_shash_tfm_digest(crypt_stat->hash_tfm, src, len, dst);
-
- if (rc) {
- printk(KERN_ERR
- "%s: Error computing crypto hash; rc = [%d]\n",
- __func__, rc);
- goto out;
- }
-out:
- return rc;
-}
-
static int ecryptfs_crypto_api_algify_cipher_name(char **algified_name,
char *cipher_name,
char *chaining_modifier)
@@ -104,13 +77,10 @@ out:
*
* Generate the initialization vector from the given root IV and page
* offset.
- *
- * Returns zero on success; non-zero on error.
*/
-int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
- loff_t offset)
+void ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
+ loff_t offset)
{
- int rc = 0;
char dst[MD5_DIGEST_SIZE];
char src[ECRYPTFS_MAX_IV_BYTES + 16];
@@ -129,20 +99,12 @@ int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
ecryptfs_printk(KERN_DEBUG, "source:\n");
ecryptfs_dump_hex(src, (crypt_stat->iv_bytes + 16));
}
- rc = ecryptfs_calculate_md5(dst, crypt_stat, src,
- (crypt_stat->iv_bytes + 16));
- if (rc) {
- ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
- "MD5 while generating IV for a page\n");
- goto out;
- }
+ md5(src, crypt_stat->iv_bytes + 16, dst);
memcpy(iv, dst, crypt_stat->iv_bytes);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "derived iv:\n");
ecryptfs_dump_hex(iv, crypt_stat->iv_bytes);
}
-out:
- return rc;
}
/**
@@ -151,29 +113,14 @@ out:
*
* Initialize the crypt_stat structure.
*/
-int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
+void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
{
- struct crypto_shash *tfm;
- int rc;
-
- tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
- if (IS_ERR(tfm)) {
- rc = PTR_ERR(tfm);
- ecryptfs_printk(KERN_ERR, "Error attempting to "
- "allocate crypto context; rc = [%d]\n",
- rc);
- return rc;
- }
-
memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
INIT_LIST_HEAD(&crypt_stat->keysig_list);
mutex_init(&crypt_stat->keysig_list_mutex);
mutex_init(&crypt_stat->cs_mutex);
mutex_init(&crypt_stat->cs_tfm_mutex);
- crypt_stat->hash_tfm = tfm;
crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED;
-
- return 0;
}
/**
@@ -187,7 +134,6 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
crypto_free_skcipher(crypt_stat->tfm);
- crypto_free_shash(crypt_stat->hash_tfm);
list_for_each_entry_safe(key_sig, key_sig_tmp,
&crypt_stat->keysig_list, crypt_stat_list) {
list_del(&key_sig->crypt_stat_list);
@@ -361,14 +307,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
int rc;
extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
- rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
- (extent_base + extent_offset));
- if (rc) {
- ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for "
- "extent [0x%.16llx]; rc = [%d]\n",
- (unsigned long long)(extent_base + extent_offset), rc);
- goto out;
- }
+ ecryptfs_derive_iv(extent_iv, crypt_stat, extent_base + extent_offset);
sg_init_table(&src_sg, 1);
sg_init_table(&dst_sg, 1);
@@ -609,31 +548,20 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
*/
int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat)
{
- int rc = 0;
char dst[MD5_DIGEST_SIZE];
BUG_ON(crypt_stat->iv_bytes > MD5_DIGEST_SIZE);
BUG_ON(crypt_stat->iv_bytes <= 0);
if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
- rc = -EINVAL;
ecryptfs_printk(KERN_WARNING, "Session key not valid; "
"cannot generate root IV\n");
- goto out;
- }
- rc = ecryptfs_calculate_md5(dst, crypt_stat, crypt_stat->key,
- crypt_stat->key_size);
- if (rc) {
- ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
- "MD5 while generating root IV\n");
- goto out;
- }
- memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes);
-out:
- if (rc) {
memset(crypt_stat->root_iv, 0, crypt_stat->iv_bytes);
crypt_stat->flags |= ECRYPTFS_SECURITY_WARNING;
+ return -EINVAL;
}
- return rc;
+ md5(crypt_stat->key, crypt_stat->key_size, dst);
+ memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes);
+ return 0;
}
static void ecryptfs_generate_new_key(struct ecryptfs_crypt_stat *crypt_stat)
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index acaa0825e9bb..6648a924e31a 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -17,7 +17,9 @@
/**
* ecryptfs_d_revalidate - revalidate an ecryptfs dentry
- * @dentry: The ecryptfs dentry
+ * @dir: inode of expected parent
+ * @name: expected name
+ * @dentry: dentry to revalidate
* @flags: lookup flags
*
* Called when the VFS needs to revalidate a dentry. This
@@ -28,7 +30,8 @@
* Returns 1 if valid, 0 otherwise.
*
*/
-static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int ecryptfs_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
int rc = 1;
@@ -36,8 +39,15 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
- rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
+ if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE) {
+ struct inode *lower_dir = ecryptfs_inode_to_lower(dir);
+ struct name_snapshot n;
+
+ take_dentry_name_snapshot(&n, lower_dentry);
+ rc = lower_dentry->d_op->d_revalidate(lower_dir, &n.name,
+ lower_dentry, flags);
+ release_dentry_name_snapshot(&n);
+ }
if (d_really_is_positive(dentry)) {
struct inode *inode = d_inode(dentry);
@@ -49,14 +59,6 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
return rc;
}
-struct kmem_cache *ecryptfs_dentry_info_cache;
-
-static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
-{
- kmem_cache_free(ecryptfs_dentry_info_cache,
- container_of(head, struct ecryptfs_dentry_info, rcu));
-}
-
/**
* ecryptfs_d_release
* @dentry: The ecryptfs dentry
@@ -65,11 +67,7 @@ static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
*/
static void ecryptfs_d_release(struct dentry *dentry)
{
- struct ecryptfs_dentry_info *p = dentry->d_fsdata;
- if (p) {
- path_put(&p->lower_path);
- call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
- }
+ dput(dentry->d_fsdata);
}
const struct dentry_operations ecryptfs_dops = {
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 1f562e75d0e4..62a2ea7f59ed 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -14,6 +14,7 @@
#ifndef ECRYPTFS_KERNEL_H
#define ECRYPTFS_KERNEL_H
+#include <crypto/md5.h>
#include <crypto/skcipher.h>
#include <keys/user-type.h>
#include <keys/encrypted-type.h>
@@ -137,8 +138,6 @@ ecryptfs_get_key_payload_data(struct key *key)
+ MAGIC_ECRYPTFS_MARKER_SIZE_BYTES)
#define ECRYPTFS_DEFAULT_CIPHER "aes"
#define ECRYPTFS_DEFAULT_KEY_BYTES 16
-#define ECRYPTFS_DEFAULT_HASH "md5"
-#define ECRYPTFS_TAG_70_DIGEST ECRYPTFS_DEFAULT_HASH
#define ECRYPTFS_TAG_1_PACKET_TYPE 0x01
#define ECRYPTFS_TAG_3_PACKET_TYPE 0x8C
#define ECRYPTFS_TAG_11_PACKET_TYPE 0xED
@@ -163,8 +162,6 @@ ecryptfs_get_key_payload_data(struct key *key)
* ECRYPTFS_MAX_IV_BYTES */
#define ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES 16
#define ECRYPTFS_NON_NULL 0x42 /* A reasonable substitute for NULL */
-#define MD5_DIGEST_SIZE 16
-#define ECRYPTFS_TAG_70_DIGEST_SIZE MD5_DIGEST_SIZE
#define ECRYPTFS_TAG_70_MIN_METADATA_SIZE (1 + ECRYPTFS_MIN_PKT_LEN_SIZE \
+ ECRYPTFS_SIG_SIZE + 1 + 1)
#define ECRYPTFS_TAG_70_MAX_METADATA_SIZE (1 + ECRYPTFS_MAX_PKT_LEN_SIZE \
@@ -237,8 +234,6 @@ struct ecryptfs_crypt_stat {
unsigned int extent_mask;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct crypto_skcipher *tfm;
- struct crypto_shash *hash_tfm; /* Crypto context for generating
- * the initialization vectors */
unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
@@ -258,13 +253,6 @@ struct ecryptfs_inode_info {
struct ecryptfs_crypt_stat crypt_stat;
};
-/* dentry private data. Each dentry must keep track of a lower
- * vfsmount too. */
-struct ecryptfs_dentry_info {
- struct path lower_path;
- struct rcu_head rcu;
-};
-
/**
* ecryptfs_global_auth_tok - A key used to encrypt all new files under the mountpoint
* @flags: Status flags
@@ -348,6 +336,7 @@ struct ecryptfs_mount_crypt_stat {
/* superblock private data. */
struct ecryptfs_sb_info {
struct super_block *wsi_sb;
+ struct vfsmount *lower_mnt;
struct ecryptfs_mount_crypt_stat mount_crypt_stat;
};
@@ -494,22 +483,25 @@ ecryptfs_set_superblock_lower(struct super_block *sb,
}
static inline void
-ecryptfs_set_dentry_private(struct dentry *dentry,
- struct ecryptfs_dentry_info *dentry_info)
+ecryptfs_set_dentry_lower(struct dentry *dentry,
+ struct dentry *lower_dentry)
{
- dentry->d_fsdata = dentry_info;
+ dentry->d_fsdata = lower_dentry;
}
static inline struct dentry *
ecryptfs_dentry_to_lower(struct dentry *dentry)
{
- return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry;
+ return dentry->d_fsdata;
}
-static inline const struct path *
-ecryptfs_dentry_to_lower_path(struct dentry *dentry)
+static inline struct path
+ecryptfs_lower_path(struct dentry *dentry)
{
- return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
+ return (struct path){
+ .mnt = ecryptfs_superblock_to_private(dentry->d_sb)->lower_mnt,
+ .dentry = ecryptfs_dentry_to_lower(dentry)
+ };
}
#define ecryptfs_printk(type, fmt, arg...) \
@@ -532,7 +524,6 @@ extern unsigned int ecryptfs_number_of_users;
extern struct kmem_cache *ecryptfs_auth_tok_list_item_cache;
extern struct kmem_cache *ecryptfs_file_info_cache;
-extern struct kmem_cache *ecryptfs_dentry_info_cache;
extern struct kmem_cache *ecryptfs_inode_info_cache;
extern struct kmem_cache *ecryptfs_sb_info_cache;
extern struct kmem_cache *ecryptfs_header_cache;
@@ -557,13 +548,12 @@ int ecryptfs_encrypt_and_encode_filename(
size_t *encoded_name_size,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
const char *name, size_t name_size);
-struct dentry *ecryptfs_lower_dentry(struct dentry *this_dentry);
void ecryptfs_dump_hex(char *data, int bytes);
int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
int sg_size);
int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_rotate_iv(unsigned char *iv);
-int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
+void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_destroy_mount_crypt_stat(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat);
@@ -698,8 +688,8 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
char *data, size_t max_packet_size);
int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat);
-int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
- loff_t offset);
+void ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
+ loff_t offset);
extern const struct xattr_handler * const ecryptfs_xattr_handlers[];
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index ce0a3c5ed0ca..7929411837cf 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -33,13 +33,12 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
struct iov_iter *to)
{
ssize_t rc;
- const struct path *path;
struct file *file = iocb->ki_filp;
rc = generic_file_read_iter(iocb, to);
if (rc >= 0) {
- path = ecryptfs_dentry_to_lower_path(file->f_path.dentry);
- touch_atime(path);
+ struct path path = ecryptfs_lower_path(file->f_path.dentry);
+ touch_atime(&path);
}
return rc;
}
@@ -59,12 +58,11 @@ static ssize_t ecryptfs_splice_read_update_atime(struct file *in, loff_t *ppos,
size_t len, unsigned int flags)
{
ssize_t rc;
- const struct path *path;
rc = filemap_splice_read(in, ppos, pipe, len, flags);
if (rc >= 0) {
- path = ecryptfs_dentry_to_lower_path(in->f_path.dentry);
- touch_atime(path);
+ struct path path = ecryptfs_lower_path(in->f_path.dentry);
+ touch_atime(&path);
}
return rc;
}
@@ -193,7 +191,7 @@ static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
* natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
* allows recursive mounting, this will need to be extended.
*/
- if (!lower_file->f_op->mmap)
+ if (!can_mmap_file(lower_file))
return -ENODEV;
return generic_file_mmap(file, vma);
}
@@ -283,6 +281,7 @@ static int ecryptfs_dir_open(struct inode *inode, struct file *file)
* ecryptfs_lookup() */
struct ecryptfs_file_info *file_info;
struct file *lower_file;
+ struct path path;
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
@@ -292,8 +291,8 @@ static int ecryptfs_dir_open(struct inode *inode, struct file *file)
"Error attempting to allocate memory\n");
return -ENOMEM;
}
- lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
- file->f_flags, current_cred());
+ path = ecryptfs_lower_path(ecryptfs_dentry);
+ lower_file = dentry_open(&path, file->f_flags, current_cred());
if (IS_ERR(lower_file)) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index a9819ddb1ab8..3978248247dc 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -24,18 +24,26 @@
#include <linux/unaligned.h>
#include "ecryptfs_kernel.h"
-static int lock_parent(struct dentry *dentry,
- struct dentry **lower_dentry,
- struct inode **lower_dir)
+static struct dentry *ecryptfs_start_creating_dentry(struct dentry *dentry)
{
- struct dentry *lower_dir_dentry;
+ struct dentry *parent = dget_parent(dentry);
+ struct dentry *ret;
- lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
- *lower_dir = d_inode(lower_dir_dentry);
- *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ ret = start_creating_dentry(ecryptfs_dentry_to_lower(parent),
+ ecryptfs_dentry_to_lower(dentry));
+ dput(parent);
+ return ret;
+}
- inode_lock_nested(*lower_dir, I_MUTEX_PARENT);
- return (*lower_dentry)->d_parent == lower_dir_dentry ? 0 : -EINVAL;
+static struct dentry *ecryptfs_start_removing_dentry(struct dentry *dentry)
+{
+ struct dentry *parent = dget_parent(dentry);
+ struct dentry *ret;
+
+ ret = start_removing_dentry(ecryptfs_dentry_to_lower(parent),
+ ecryptfs_dentry_to_lower(dentry));
+ dput(parent);
+ return ret;
}
static int ecryptfs_inode_test(struct inode *inode, void *lower_inode)
@@ -95,7 +103,7 @@ static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
iput(lower_inode);
return ERR_PTR(-EACCES);
}
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
iput(lower_inode);
return inode;
@@ -106,7 +114,7 @@ struct inode *ecryptfs_get_inode(struct inode *lower_inode,
{
struct inode *inode = __ecryptfs_get_inode(lower_inode, sb);
- if (!IS_ERR(inode) && (inode->i_state & I_NEW))
+ if (!IS_ERR(inode) && (inode_state_read_once(inode) & I_NEW))
unlock_new_inode(inode);
return inode;
@@ -141,15 +149,12 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
struct inode *lower_dir;
int rc;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- dget(lower_dentry); // don't even try to make the lower negative
- if (!rc) {
- if (d_unhashed(lower_dentry))
- rc = -EINVAL;
- else
- rc = vfs_unlink(&nop_mnt_idmap, lower_dir, lower_dentry,
- NULL);
- }
+ lower_dentry = ecryptfs_start_removing_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return PTR_ERR(lower_dentry);
+
+ lower_dir = lower_dentry->d_parent->d_inode;
+ rc = vfs_unlink(&nop_mnt_idmap, lower_dir, lower_dentry, NULL);
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
@@ -158,8 +163,7 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
out_unlock:
- dput(lower_dentry);
- inode_unlock(lower_dir);
+ end_removing(lower_dentry);
if (!rc)
d_drop(dentry);
return rc;
@@ -186,10 +190,11 @@ ecryptfs_do_create(struct inode *directory_inode,
struct inode *lower_dir;
struct inode *inode;
- rc = lock_parent(ecryptfs_dentry, &lower_dentry, &lower_dir);
- if (!rc)
- rc = vfs_create(&nop_mnt_idmap, lower_dir,
- lower_dentry, mode, true);
+ lower_dentry = ecryptfs_start_creating_dentry(ecryptfs_dentry);
+ if (IS_ERR(lower_dentry))
+ return ERR_CAST(lower_dentry);
+ lower_dir = lower_dentry->d_parent->d_inode;
+ rc = vfs_create(&nop_mnt_idmap, lower_dentry, mode, NULL);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
@@ -205,7 +210,7 @@ ecryptfs_do_create(struct inode *directory_inode,
fsstack_copy_attr_times(directory_inode, lower_dir);
fsstack_copy_inode_size(directory_inode, lower_dir);
out_lock:
- inode_unlock(lower_dir);
+ end_creating(lower_dentry);
return inode;
}
@@ -327,24 +332,15 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry)
{
- const struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
+ struct dentry *lower_parent = ecryptfs_dentry_to_lower(dentry->d_parent);
struct inode *inode, *lower_inode;
- struct ecryptfs_dentry_info *dentry_info;
int rc = 0;
- dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
- if (!dentry_info) {
- dput(lower_dentry);
- return ERR_PTR(-ENOMEM);
- }
-
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
- d_inode(path->dentry));
+ d_inode(lower_parent));
BUG_ON(!d_count(lower_dentry));
- ecryptfs_set_dentry_private(dentry, dentry_info);
- dentry_info->lower_path.mnt = mntget(path->mnt);
- dentry_info->lower_path.dentry = lower_dentry;
+ ecryptfs_set_dentry_lower(dentry, lower_dentry);
/*
* negative dentry can go positive under us here - its parent is not
@@ -373,7 +369,7 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
}
}
- if (inode->i_state & I_NEW)
+ if (inode_state_read_once(inode) & I_NEW)
unlock_new_inode(inode);
return d_splice_alias(inode, dentry);
}
@@ -394,8 +390,8 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
char *encrypted_and_encoded_name = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct dentry *lower_dir_dentry, *lower_dentry;
- const char *name = ecryptfs_dentry->d_name.name;
- size_t len = ecryptfs_dentry->d_name.len;
+ struct qstr qname = QSTR_INIT(ecryptfs_dentry->d_name.name,
+ ecryptfs_dentry->d_name.len);
struct dentry *res;
int rc = 0;
@@ -404,23 +400,25 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
+ size_t len = qname.len;
rc = ecryptfs_encrypt_and_encode_filename(
&encrypted_and_encoded_name, &len,
- mount_crypt_stat, name, len);
+ mount_crypt_stat, qname.name, len);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt and encode "
"filename; rc = [%d]\n", __func__, rc);
return ERR_PTR(rc);
}
- name = encrypted_and_encoded_name;
+ qname.name = encrypted_and_encoded_name;
+ qname.len = len;
}
- lower_dentry = lookup_one_len_unlocked(name, lower_dir_dentry, len);
+ lower_dentry = lookup_noperm_unlocked(&qname, lower_dir_dentry);
if (IS_ERR(lower_dentry)) {
- ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_noperm() returned "
"[%ld] on lower_dentry = [%s]\n", __func__,
PTR_ERR(lower_dentry),
- name);
+ qname.name);
res = ERR_CAST(lower_dentry);
} else {
res = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry);
@@ -440,10 +438,12 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
file_size_save = i_size_read(d_inode(old_dentry));
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
- rc = lock_parent(new_dentry, &lower_new_dentry, &lower_dir);
- if (!rc)
- rc = vfs_link(lower_old_dentry, &nop_mnt_idmap, lower_dir,
- lower_new_dentry, NULL);
+ lower_new_dentry = ecryptfs_start_creating_dentry(new_dentry);
+ if (IS_ERR(lower_new_dentry))
+ return PTR_ERR(lower_new_dentry);
+ lower_dir = lower_new_dentry->d_parent->d_inode;
+ rc = vfs_link(lower_old_dentry, &nop_mnt_idmap, lower_dir,
+ lower_new_dentry, NULL);
if (rc || d_really_is_negative(lower_new_dentry))
goto out_lock;
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb);
@@ -455,7 +455,7 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
ecryptfs_inode_to_lower(d_inode(old_dentry))->i_nlink);
i_size_write(d_inode(new_dentry), file_size_save);
out_lock:
- inode_unlock(lower_dir);
+ end_creating(lower_new_dentry);
return rc;
}
@@ -475,9 +475,11 @@ static int ecryptfs_symlink(struct mnt_idmap *idmap,
size_t encoded_symlen;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- if (rc)
- goto out_lock;
+ lower_dentry = ecryptfs_start_creating_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return PTR_ERR(lower_dentry);
+ lower_dir = lower_dentry->d_parent->d_inode;
+
mount_crypt_stat = &ecryptfs_superblock_to_private(
dir->i_sb)->mount_crypt_stat;
rc = ecryptfs_encrypt_and_encode_filename(&encoded_symname,
@@ -487,7 +489,7 @@ static int ecryptfs_symlink(struct mnt_idmap *idmap,
if (rc)
goto out_lock;
rc = vfs_symlink(&nop_mnt_idmap, lower_dir, lower_dentry,
- encoded_symname);
+ encoded_symname, NULL);
kfree(encoded_symname);
if (rc || d_really_is_negative(lower_dentry))
goto out_lock;
@@ -497,24 +499,32 @@ static int ecryptfs_symlink(struct mnt_idmap *idmap,
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out_lock:
- inode_unlock(lower_dir);
+ end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
-static int ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
+ struct dentry *lower_dir_dentry;
struct inode *lower_dir;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- if (!rc)
- rc = vfs_mkdir(&nop_mnt_idmap, lower_dir,
- lower_dentry, mode);
- if (rc || d_really_is_negative(lower_dentry))
+ lower_dentry = ecryptfs_start_creating_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return lower_dentry;
+ lower_dir_dentry = dget(lower_dentry->d_parent);
+ lower_dir = lower_dir_dentry->d_inode;
+ lower_dentry = vfs_mkdir(&nop_mnt_idmap, lower_dir,
+ lower_dentry, mode, NULL);
+ rc = PTR_ERR(lower_dentry);
+ if (IS_ERR(lower_dentry))
+ goto out;
+ rc = 0;
+ if (d_unhashed(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
@@ -523,10 +533,10 @@ static int ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
fsstack_copy_inode_size(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
out:
- inode_unlock(lower_dir);
+ end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
- return rc;
+ return ERR_PTR(rc);
}
static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -535,21 +545,18 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
struct inode *lower_dir;
int rc;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- dget(lower_dentry); // don't even try to make the lower negative
- if (!rc) {
- if (d_unhashed(lower_dentry))
- rc = -EINVAL;
- else
- rc = vfs_rmdir(&nop_mnt_idmap, lower_dir, lower_dentry);
- }
+ lower_dentry = ecryptfs_start_removing_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return PTR_ERR(lower_dentry);
+ lower_dir = lower_dentry->d_parent->d_inode;
+
+ rc = vfs_rmdir(&nop_mnt_idmap, lower_dir, lower_dentry, NULL);
if (!rc) {
clear_nlink(d_inode(dentry));
fsstack_copy_attr_times(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
}
- dput(lower_dentry);
- inode_unlock(lower_dir);
+ end_removing(lower_dentry);
if (!rc)
d_drop(dentry);
return rc;
@@ -563,10 +570,12 @@ ecryptfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *lower_dentry;
struct inode *lower_dir;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- if (!rc)
- rc = vfs_mknod(&nop_mnt_idmap, lower_dir,
- lower_dentry, mode, dev);
+ lower_dentry = ecryptfs_start_creating_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return PTR_ERR(lower_dentry);
+ lower_dir = lower_dentry->d_parent->d_inode;
+
+ rc = vfs_mknod(&nop_mnt_idmap, lower_dir, lower_dentry, mode, dev, NULL);
if (rc || d_really_is_negative(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
@@ -575,7 +584,7 @@ ecryptfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out:
- inode_unlock(lower_dir);
+ end_removing(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
@@ -591,7 +600,6 @@ ecryptfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
- struct dentry *trap;
struct inode *target_inode;
struct renamedata rd = {};
@@ -606,32 +614,13 @@ ecryptfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
target_inode = d_inode(new_dentry);
- trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
- if (IS_ERR(trap))
- return PTR_ERR(trap);
- dget(lower_new_dentry);
- rc = -EINVAL;
- if (lower_old_dentry->d_parent != lower_old_dir_dentry)
- goto out_lock;
- if (lower_new_dentry->d_parent != lower_new_dir_dentry)
- goto out_lock;
- if (d_unhashed(lower_old_dentry) || d_unhashed(lower_new_dentry))
- goto out_lock;
- /* source should not be ancestor of target */
- if (trap == lower_old_dentry)
- goto out_lock;
- /* target should not be ancestor of source */
- if (trap == lower_new_dentry) {
- rc = -ENOTEMPTY;
- goto out_lock;
- }
+ rd.mnt_idmap = &nop_mnt_idmap;
+ rd.old_parent = lower_old_dir_dentry;
+ rd.new_parent = lower_new_dir_dentry;
+ rc = start_renaming_two_dentries(&rd, lower_old_dentry, lower_new_dentry);
+ if (rc)
+ return rc;
- rd.old_mnt_idmap = &nop_mnt_idmap;
- rd.old_dir = d_inode(lower_old_dir_dentry);
- rd.old_dentry = lower_old_dentry;
- rd.new_mnt_idmap = &nop_mnt_idmap;
- rd.new_dir = d_inode(lower_new_dir_dentry);
- rd.new_dentry = lower_new_dentry;
rc = vfs_rename(&rd);
if (rc)
goto out_lock;
@@ -642,8 +631,7 @@ ecryptfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
out_lock:
- dput(lower_new_dentry);
- unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ end_renaming(&rd);
return rc;
}
@@ -905,11 +893,8 @@ static int ecryptfs_setattr(struct mnt_idmap *idmap,
struct ecryptfs_crypt_stat *crypt_stat;
crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat;
- if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) {
- rc = ecryptfs_init_crypt_stat(crypt_stat);
- if (rc)
- return rc;
- }
+ if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
+ ecryptfs_init_crypt_stat(crypt_stat);
inode = d_inode(dentry);
lower_inode = ecryptfs_inode_to_lower(inode);
lower_dentry = ecryptfs_dentry_to_lower(dentry);
@@ -1014,10 +999,10 @@ static int ecryptfs_getattr(struct mnt_idmap *idmap,
{
struct dentry *dentry = path->dentry;
struct kstat lower_stat;
+ struct path lower_path = ecryptfs_lower_path(dentry);
int rc;
- rc = vfs_getattr_nosec(ecryptfs_dentry_to_lower_path(dentry),
- &lower_stat, request_mask, flags);
+ rc = vfs_getattr_nosec(&lower_path, &lower_stat, request_mask, flags);
if (!rc) {
fsstack_copy_attr_all(d_inode(dentry),
ecryptfs_inode_to_lower(d_inode(dentry)));
@@ -1116,13 +1101,13 @@ out:
return rc;
}
-static int ecryptfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+static int ecryptfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
return vfs_fileattr_get(ecryptfs_dentry_to_lower(dentry), fa);
}
static int ecryptfs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
int rc;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 7f9f68c00ef6..bbf8603242fa 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -11,7 +11,6 @@
* Trevor S. Highland <trevor.highland@gmail.com>
*/
-#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/string.h>
#include <linux/pagemap.h>
@@ -601,10 +600,7 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
struct crypto_skcipher *skcipher_tfm;
struct skcipher_request *skcipher_req;
char iv[ECRYPTFS_MAX_IV_BYTES];
- char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
- char tmp_hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
- struct crypto_shash *hash_tfm;
- struct shash_desc *hash_desc;
+ char hash[MD5_DIGEST_SIZE];
};
/*
@@ -741,51 +737,15 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
"password tokens\n", __func__);
goto out_free_unlock;
}
- s->hash_tfm = crypto_alloc_shash(ECRYPTFS_TAG_70_DIGEST, 0, 0);
- if (IS_ERR(s->hash_tfm)) {
- rc = PTR_ERR(s->hash_tfm);
- printk(KERN_ERR "%s: Error attempting to "
- "allocate hash crypto context; rc = [%d]\n",
- __func__, rc);
- goto out_free_unlock;
- }
-
- s->hash_desc = kmalloc(sizeof(*s->hash_desc) +
- crypto_shash_descsize(s->hash_tfm), GFP_KERNEL);
- if (!s->hash_desc) {
- rc = -ENOMEM;
- goto out_release_free_unlock;
- }
- s->hash_desc->tfm = s->hash_tfm;
-
- rc = crypto_shash_digest(s->hash_desc,
- (u8 *)s->auth_tok->token.password.session_key_encryption_key,
- s->auth_tok->token.password.session_key_encryption_key_bytes,
- s->hash);
- if (rc) {
- printk(KERN_ERR
- "%s: Error computing crypto hash; rc = [%d]\n",
- __func__, rc);
- goto out_release_free_unlock;
- }
+ md5(s->auth_tok->token.password.session_key_encryption_key,
+ s->auth_tok->token.password.session_key_encryption_key_bytes,
+ s->hash);
for (s->j = 0; s->j < (s->num_rand_bytes - 1); s->j++) {
s->block_aligned_filename[s->j] =
- s->hash[(s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)];
- if ((s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)
- == (ECRYPTFS_TAG_70_DIGEST_SIZE - 1)) {
- rc = crypto_shash_digest(s->hash_desc, (u8 *)s->hash,
- ECRYPTFS_TAG_70_DIGEST_SIZE,
- s->tmp_hash);
- if (rc) {
- printk(KERN_ERR
- "%s: Error computing crypto hash; "
- "rc = [%d]\n", __func__, rc);
- goto out_release_free_unlock;
- }
- memcpy(s->hash, s->tmp_hash,
- ECRYPTFS_TAG_70_DIGEST_SIZE);
- }
+ s->hash[s->j % MD5_DIGEST_SIZE];
+ if ((s->j % MD5_DIGEST_SIZE) == (MD5_DIGEST_SIZE - 1))
+ md5(s->hash, MD5_DIGEST_SIZE, s->hash);
if (s->block_aligned_filename[s->j] == '\0')
s->block_aligned_filename[s->j] = ECRYPTFS_NON_NULL;
}
@@ -798,7 +758,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
"convert filename memory to scatterlist; rc = [%d]. "
"block_aligned_filename_size = [%zd]\n", __func__, rc,
s->block_aligned_filename_size);
- goto out_release_free_unlock;
+ goto out_free_unlock;
}
rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
s->dst_sg, 2);
@@ -807,7 +767,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
"convert encrypted filename memory to scatterlist; "
"rc = [%d]. block_aligned_filename_size = [%zd]\n",
__func__, rc, s->block_aligned_filename_size);
- goto out_release_free_unlock;
+ goto out_free_unlock;
}
/* The characters in the first block effectively do the job
* of the IV here, so we just use 0's for the IV. Note the
@@ -825,7 +785,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
rc,
s->auth_tok->token.password.session_key_encryption_key,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
- goto out_release_free_unlock;
+ goto out_free_unlock;
}
skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg,
s->block_aligned_filename_size, s->iv);
@@ -833,13 +793,11 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt filename; "
"rc = [%d]\n", __func__, rc);
- goto out_release_free_unlock;
+ goto out_free_unlock;
}
s->i += s->block_aligned_filename_size;
(*packet_size) = s->i;
(*remaining_bytes) -= (*packet_size);
-out_release_free_unlock:
- crypto_free_shash(s->hash_tfm);
out_free_unlock:
kfree_sensitive(s->block_aligned_filename);
out_unlock:
@@ -850,7 +808,6 @@ out:
key_put(auth_tok_key);
}
skcipher_request_free(s->skcipher_req);
- kfree_sensitive(s->hash_desc);
kfree(s);
return rc;
}
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 8dd1d7189c3b..c12dc680f8fe 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -12,6 +12,7 @@
#include <linux/dcache.h>
#include <linux/file.h>
+#include <linux/fips.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/skbuff.h>
@@ -20,6 +21,7 @@
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/fs_stack.h>
+#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/magic.h>
#include "ecryptfs_kernel.h"
@@ -105,15 +107,14 @@ static int ecryptfs_init_lower_file(struct dentry *dentry,
struct file **lower_file)
{
const struct cred *cred = current_cred();
- const struct path *path = ecryptfs_dentry_to_lower_path(dentry);
+ struct path path = ecryptfs_lower_path(dentry);
int rc;
- rc = ecryptfs_privileged_open(lower_file, path->dentry, path->mnt,
- cred);
+ rc = ecryptfs_privileged_open(lower_file, path.dentry, path.mnt, cred);
if (rc) {
printk(KERN_ERR "Error opening lower file "
"for lower_dentry [0x%p] and lower_mnt [0x%p]; "
- "rc = [%d]\n", path->dentry, path->mnt, rc);
+ "rc = [%d]\n", path.dentry, path.mnt, rc);
(*lower_file) = NULL;
}
return rc;
@@ -436,7 +437,6 @@ static int ecryptfs_get_tree(struct fs_context *fc)
struct ecryptfs_fs_context *ctx = fc->fs_private;
struct ecryptfs_sb_info *sbi = fc->s_fs_info;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
- struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
struct path path;
@@ -455,6 +455,12 @@ static int ecryptfs_get_tree(struct fs_context *fc)
goto out;
}
+ if (fips_enabled) {
+ rc = -EINVAL;
+ err = "eCryptfs support is disabled due to FIPS";
+ goto out;
+ }
+
s = sget_fc(fc, NULL, set_anon_super_fc);
if (IS_ERR(s)) {
rc = PTR_ERR(s);
@@ -471,7 +477,7 @@ static int ecryptfs_get_tree(struct fs_context *fc)
sbi = NULL;
s->s_op = &ecryptfs_sops;
s->s_xattr = ecryptfs_xattr_handlers;
- s->s_d_op = &ecryptfs_dops;
+ set_default_d_op(s, &ecryptfs_dops);
err = "Reading sb failed";
rc = kern_path(fc->source, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
@@ -542,14 +548,8 @@ static int ecryptfs_get_tree(struct fs_context *fc)
goto out_free;
}
- rc = -ENOMEM;
- root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
- if (!root_info)
- goto out_free;
-
- /* ->kill_sb() will take care of root_info */
- ecryptfs_set_dentry_private(s->s_root, root_info);
- root_info->lower_path = path;
+ ecryptfs_set_dentry_lower(s->s_root, path.dentry);
+ ecryptfs_superblock_to_private(s)->lower_mnt = path.mnt;
s->s_flags |= SB_ACTIVE;
fc->root = dget(s->s_root);
@@ -579,6 +579,7 @@ static void ecryptfs_kill_block_super(struct super_block *sb)
kill_anon_super(sb);
if (!sb_info)
return;
+ mntput(sb_info->lower_mnt);
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
}
@@ -667,11 +668,6 @@ static struct ecryptfs_cache_info {
.size = sizeof(struct ecryptfs_file_info),
},
{
- .cache = &ecryptfs_dentry_info_cache,
- .name = "ecryptfs_dentry_info_cache",
- .size = sizeof(struct ecryptfs_dentry_info),
- },
- {
.cache = &ecryptfs_inode_info_cache,
.name = "ecryptfs_inode_cache",
.size = sizeof(struct ecryptfs_inode_info),
@@ -764,7 +760,7 @@ static struct kobject *ecryptfs_kobj;
static ssize_t version_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
- return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK);
+ return sysfs_emit(buff, "%d\n", ECRYPTFS_VERSIONING_MASK);
}
static struct kobj_attribute version_attr = __ATTR_RO(version);
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 60f0ac8744b5..2c2b12fedeae 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -228,7 +228,7 @@ out:
/**
* ecryptfs_write_begin
- * @file: The eCryptfs file
+ * @iocb: I/O control block for the eCryptfs file
* @mapping: The eCryptfs object
* @pos: The file offset at which to start writing
* @len: Length of the write
@@ -239,7 +239,7 @@ out:
*
* Returns zero on success; non-zero otherwise
*/
-static int ecryptfs_write_begin(struct file *file,
+static int ecryptfs_write_begin(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
@@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file,
* Note, this will increase i_size. */
if (index != 0) {
if (prev_page_end_size > i_size_read(mapping->host)) {
- rc = ecryptfs_truncate(file->f_path.dentry,
+ rc = ecryptfs_truncate(iocb->ki_filp->f_path.dentry,
prev_page_end_size);
if (rc) {
printk(KERN_ERR "%s: Error on attempt to "
@@ -429,7 +429,7 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
/**
* ecryptfs_write_end
- * @file: The eCryptfs file object
+ * @iocb: I/O control block for the eCryptfs file
* @mapping: The eCryptfs object
* @pos: The file position
* @len: The length of the data (unused)
@@ -437,7 +437,7 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
* @folio: The eCryptfs folio
* @fsdata: The fsdata (unused)
*/
-static int ecryptfs_write_end(struct file *file,
+static int ecryptfs_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 0b1c878317ab..3bc21d677564 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -41,10 +41,7 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
inode_info = alloc_inode_sb(sb, ecryptfs_inode_info_cache, GFP_KERNEL);
if (unlikely(!inode_info))
goto out;
- if (ecryptfs_init_crypt_stat(&inode_info->crypt_stat)) {
- kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
- goto out;
- }
+ ecryptfs_init_crypt_stat(&inode_info->crypt_stat);
mutex_init(&inode_info->lower_file_mutex);
atomic_set(&inode_info->lower_file_count, 0);
inode_info->lower_file = NULL;
@@ -172,7 +169,6 @@ const struct super_operations ecryptfs_sops = {
.destroy_inode = ecryptfs_destroy_inode,
.free_inode = ecryptfs_free_inode,
.statfs = ecryptfs_statfs,
- .remount_fs = NULL,
.evict_inode = ecryptfs_evict_inode,
.show_options = ecryptfs_show_options
};
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 23c51d62f902..cb1b6d0c3454 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -36,28 +36,41 @@ static ssize_t efivarfs_file_write(struct file *file,
if (IS_ERR(data))
return PTR_ERR(data);
+ inode_lock(inode);
+ if (var->removed) {
+ /*
+ * file got removed; don't allow a set. Caused by an
+ * unsuccessful create or successful delete write
+ * racing with us.
+ */
+ bytes = -EIO;
+ goto out;
+ }
+
bytes = efivar_entry_set_get_size(var, attributes, &datasize,
data, &set);
- if (!set && bytes) {
+ if (!set) {
if (bytes == -ENOENT)
bytes = -EIO;
goto out;
}
if (bytes == -ENOENT) {
- drop_nlink(inode);
- d_delete(file->f_path.dentry);
- dput(file->f_path.dentry);
+ /*
+ * zero size signals to release that the write deleted
+ * the variable
+ */
+ i_size_write(inode, 0);
} else {
- inode_lock(inode);
i_size_write(inode, datasize + sizeof(attributes));
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- inode_unlock(inode);
}
bytes = count;
out:
+ inode_unlock(inode);
+
kfree(data);
return bytes;
@@ -106,8 +119,36 @@ out_free:
return size;
}
+static int efivarfs_file_release(struct inode *inode, struct file *file)
+{
+ struct efivar_entry *var = inode->i_private;
+
+ inode_lock(inode);
+ var->removed = (--var->open_count == 0 && i_size_read(inode) == 0);
+ inode_unlock(inode);
+
+ if (var->removed)
+ simple_recursive_removal(file->f_path.dentry, NULL);
+
+ return 0;
+}
+
+static int efivarfs_file_open(struct inode *inode, struct file *file)
+{
+ struct efivar_entry *entry = inode->i_private;
+
+ file->private_data = entry;
+
+ inode_lock(inode);
+ entry->open_count++;
+ inode_unlock(inode);
+
+ return 0;
+}
+
const struct file_operations efivarfs_file_operations = {
- .open = simple_open,
- .read = efivarfs_file_read,
- .write = efivarfs_file_write,
+ .open = efivarfs_file_open,
+ .read = efivarfs_file_read,
+ .write = efivarfs_file_write,
+ .release = efivarfs_file_release,
};
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 586446e02ef7..95dcad83da11 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -51,7 +51,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
*
* VariableName-12345678-1234-1234-1234-1234567891bc
*/
-bool efivarfs_valid_name(const char *str, int len)
+static bool efivarfs_valid_name(const char *str, int len)
{
const char *s = str + len - EFI_VARIABLE_GUID_LEN;
@@ -77,39 +77,34 @@ bool efivarfs_valid_name(const char *str, int len)
static int efivarfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
- struct efivarfs_fs_info *info = dir->i_sb->s_fs_info;
struct inode *inode = NULL;
struct efivar_entry *var;
int namelen, i = 0, err = 0;
bool is_removable = false;
+ efi_guid_t vendor;
if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
return -EINVAL;
- var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
- if (!var)
- return -ENOMEM;
-
/* length of the variable name itself: remove GUID and separator */
namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
- err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ err = guid_parse(dentry->d_name.name + namelen + 1, &vendor);
if (err)
- goto out;
- if (guid_equal(&var->var.VendorGuid, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) {
- err = -EPERM;
- goto out;
- }
+ return err;
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return -EPERM;
- if (efivar_variable_is_removable(var->var.VendorGuid,
+ if (efivar_variable_is_removable(vendor,
dentry->d_name.name, namelen))
is_removable = true;
inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
- if (!inode) {
- err = -ENOMEM;
- goto out;
- }
+ if (!inode)
+ return -ENOMEM;
+ var = efivar_entry(inode);
+
+ var->var.VendorGuid = vendor;
for (i = 0; i < namelen; i++)
var->var.VariableName[i] = dentry->d_name.name[i];
@@ -117,21 +112,10 @@ static int efivarfs_create(struct mnt_idmap *idmap, struct inode *dir,
var->var.VariableName[i] = '\0';
inode->i_private = var;
- kmemleak_ignore(var);
- err = efivar_entry_add(var, &info->efivarfs_list);
- if (err)
- goto out;
-
- d_instantiate(dentry, inode);
- dget(dentry);
-out:
- if (err) {
- kfree(var);
- if (inode)
- iput(inode);
- }
- return err;
+ d_make_persistent(dentry, inode);
+
+ return 0;
}
static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
@@ -141,9 +125,7 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
if (efivar_entry_delete(var))
return -EINVAL;
- drop_nlink(d_inode(dentry));
- dput(dentry);
- return 0;
+ return simple_unlink(dir, dentry);
};
const struct inode_operations efivarfs_dir_inode_operations = {
@@ -153,7 +135,7 @@ const struct inode_operations efivarfs_dir_inode_operations = {
};
static int
-efivarfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+efivarfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
unsigned int i_flags;
unsigned int flags = 0;
@@ -169,7 +151,7 @@ efivarfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
static int
efivarfs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
unsigned int i_flags = 0;
@@ -187,7 +169,24 @@ efivarfs_fileattr_set(struct mnt_idmap *idmap,
return 0;
}
+/* copy of simple_setattr except that it doesn't do i_size updates */
+static int efivarfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *iattr)
+{
+ struct inode *inode = d_inode(dentry);
+ int error;
+
+ error = setattr_prepare(idmap, dentry, iattr);
+ if (error)
+ return error;
+
+ setattr_copy(idmap, inode, iattr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
static const struct inode_operations efivarfs_file_inode_operations = {
.fileattr_get = efivarfs_fileattr_get,
.fileattr_set = efivarfs_fileattr_set,
+ .setattr = efivarfs_setattr,
};
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index d71d2e08422f..f913b6824289 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -6,7 +6,6 @@
#ifndef EFIVAR_FS_INTERNAL_H
#define EFIVAR_FS_INTERNAL_H
-#include <linux/list.h>
#include <linux/efi.h>
struct efivarfs_mount_opts {
@@ -16,7 +15,6 @@ struct efivarfs_mount_opts {
struct efivarfs_fs_info {
struct efivarfs_mount_opts mount_opts;
- struct list_head efivarfs_list;
struct super_block *sb;
struct notifier_block nb;
};
@@ -24,22 +22,23 @@ struct efivarfs_fs_info {
struct efi_variable {
efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
efi_guid_t VendorGuid;
- __u32 Attributes;
};
struct efivar_entry {
struct efi_variable var;
- struct list_head list;
- struct kobject kobj;
+ struct inode vfs_inode;
+ unsigned long open_count;
+ bool removed;
};
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
- struct list_head *),
- void *data, struct list_head *head);
+static inline struct efivar_entry *efivar_entry(struct inode *inode)
+{
+ return container_of(inode, struct efivar_entry, vfs_inode);
+}
+
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicate_check);
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-void efivar_entry_remove(struct efivar_entry *entry);
int efivar_entry_delete(struct efivar_entry *entry);
int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
@@ -50,17 +49,17 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
unsigned long *size, void *data, bool *set);
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data);
bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
unsigned long data_size);
bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
size_t len);
+char *efivar_get_utf8name(const efi_char16_t *name16, efi_guid_t *vendor);
+bool efivarfs_variable_is_present(efi_char16_t *variable_name,
+ efi_guid_t *vendor, void *data);
extern const struct file_operations efivarfs_file_operations;
extern const struct inode_operations efivarfs_dir_inode_operations;
-extern bool efivarfs_valid_name(const char *str, int len);
extern struct inode *efivarfs_get_inode(struct super_block *sb,
const struct inode *dir, int mode, dev_t dev,
bool is_removable);
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index a929f1b613be..9da992925920 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -13,12 +13,15 @@
#include <linux/pagemap.h>
#include <linux/ucs2_string.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/magic.h>
#include <linux/statfs.h>
#include <linux/notifier.h>
#include <linux/printk.h>
+#include <linux/namei.h>
#include "internal.h"
+#include "../internal.h"
static int efivarfs_ops_notifier(struct notifier_block *nb, unsigned long event,
void *data)
@@ -39,9 +42,24 @@ static int efivarfs_ops_notifier(struct notifier_block *nb, unsigned long event,
return NOTIFY_OK;
}
-static void efivarfs_evict_inode(struct inode *inode)
+static struct inode *efivarfs_alloc_inode(struct super_block *sb)
{
- clear_inode(inode);
+ struct efivar_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return NULL;
+
+ inode_init_once(&entry->vfs_inode);
+ entry->removed = false;
+
+ return &entry->vfs_inode;
+}
+
+static void efivarfs_free_inode(struct inode *inode)
+{
+ struct efivar_entry *entry = efivar_entry(inode);
+
+ kfree(entry);
}
static int efivarfs_show_options(struct seq_file *m, struct dentry *root)
@@ -103,11 +121,18 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
+
+static int efivarfs_freeze_fs(struct super_block *sb);
+static int efivarfs_unfreeze_fs(struct super_block *sb);
+
static const struct super_operations efivarfs_ops = {
.statfs = efivarfs_statfs,
- .drop_inode = generic_delete_inode,
- .evict_inode = efivarfs_evict_inode,
+ .drop_inode = inode_just_drop,
+ .alloc_inode = efivarfs_alloc_inode,
+ .free_inode = efivarfs_free_inode,
.show_options = efivarfs_show_options,
+ .freeze_fs = efivarfs_freeze_fs,
+ .unfreeze_fs = efivarfs_unfreeze_fs,
};
/*
@@ -127,6 +152,10 @@ static int efivarfs_d_compare(const struct dentry *dentry,
{
int guid = len - EFI_VARIABLE_GUID_LEN;
+ /* Parallel lookups may produce a temporary invalid filename */
+ if (guid <= 0)
+ return 1;
+
if (name->len != len)
return 1;
@@ -144,9 +173,6 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
const unsigned char *s = qstr->name;
unsigned int len = qstr->len;
- if (!efivarfs_valid_name(s, len))
- return -EINVAL;
-
while (len-- > EFI_VARIABLE_GUID_LEN)
hash = partial_name_hash(*s++, hash);
@@ -161,7 +187,6 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
static const struct dentry_operations efivarfs_d_ops = {
.d_compare = efivarfs_d_compare,
.d_hash = efivarfs_d_hash,
- .d_delete = always_delete_dentry,
};
static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
@@ -184,56 +209,60 @@ static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
return ERR_PTR(-ENOMEM);
}
-static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
- unsigned long name_size, void *data,
- struct list_head *list)
+bool efivarfs_variable_is_present(efi_char16_t *variable_name,
+ efi_guid_t *vendor, void *data)
+{
+ char *name = efivar_get_utf8name(variable_name, vendor);
+ struct super_block *sb = data;
+ struct dentry *dentry;
+
+ if (!name)
+ /*
+ * If the allocation failed there'll already be an
+ * error in the log (and likely a huge and growing
+ * number of them since they system will be under
+ * extreme memory pressure), so simply assume
+ * collision for safety but don't add to the log
+ * flood.
+ */
+ return true;
+
+ dentry = try_lookup_noperm(&QSTR(name), sb->s_root);
+ kfree(name);
+ if (!IS_ERR_OR_NULL(dentry))
+ dput(dentry);
+
+ return dentry != NULL;
+}
+
+static int efivarfs_create_dentry(struct super_block *sb, efi_char16_t *name16,
+ unsigned long name_size, efi_guid_t vendor,
+ char *name)
{
- struct super_block *sb = (struct super_block *)data;
struct efivar_entry *entry;
- struct inode *inode = NULL;
+ struct inode *inode;
struct dentry *dentry, *root = sb->s_root;
unsigned long size = 0;
- char *name;
int len;
int err = -ENOMEM;
bool is_removable = false;
- if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
- return 0;
+ /* length of the variable name itself: remove GUID and separator */
+ len = strlen(name) - EFI_VARIABLE_GUID_LEN - 1;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return err;
-
- memcpy(entry->var.VariableName, name16, name_size);
- memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
-
- len = ucs2_utf8size(entry->var.VariableName);
-
- /* name, plus '-', plus GUID, plus NUL*/
- name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
- if (!name)
- goto fail;
-
- ucs2_as_utf8(name, entry->var.VariableName, len);
-
- if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
+ if (efivar_variable_is_removable(vendor, name, len))
is_removable = true;
- name[len] = '-';
-
- efi_guid_to_str(&entry->var.VendorGuid, name + len + 1);
-
- name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
-
- /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */
- strreplace(name, '/', '!');
-
inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
is_removable);
if (!inode)
goto fail_name;
+ entry = efivar_entry(inode);
+
+ memcpy(entry->var.VariableName, name16, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
dentry = efivarfs_alloc_dentry(root, name);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -241,16 +270,16 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
}
__efivar_entry_get(entry, NULL, &size, NULL);
- __efivar_entry_add(entry, list);
/* copied by the above to local storage in the dentry. */
kfree(name);
inode_lock(inode);
inode->i_private = entry;
- i_size_write(inode, size + sizeof(entry->var.Attributes));
+ i_size_write(inode, size + sizeof(__u32)); /* attributes + data */
inode_unlock(inode);
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
+ dput(dentry);
return 0;
@@ -258,16 +287,24 @@ fail_inode:
iput(inode);
fail_name:
kfree(name);
-fail:
- kfree(entry);
+
return err;
}
-static int efivarfs_destroy(struct efivar_entry *entry, void *data)
+static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ unsigned long name_size, void *data)
{
- efivar_entry_remove(entry);
- kfree(entry);
- return 0;
+ struct super_block *sb = (struct super_block *)data;
+ char *name;
+
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return 0;
+
+ name = efivar_get_utf8name(name16, &vendor);
+ if (!name)
+ return -ENOMEM;
+
+ return efivarfs_create_dentry(sb, name16, name_size, vendor, name);
}
enum {
@@ -317,7 +354,8 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = EFIVARFS_MAGIC;
sb->s_op = &efivarfs_ops;
- sb->s_d_op = &efivarfs_d_ops;
+ set_default_d_op(sb, &efivarfs_d_ops);
+ sb->s_d_flags |= DCACHE_DONTCACHE;
sb->s_time_gran = 1;
if (!efivar_supports_writes())
@@ -339,7 +377,7 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
- return efivar_init(efivarfs_callback, sb, &sfi->efivarfs_list);
+ return efivar_init(efivarfs_callback, sb, true);
}
static int efivarfs_get_tree(struct fs_context *fc)
@@ -357,12 +395,109 @@ static int efivarfs_reconfigure(struct fs_context *fc)
return 0;
}
+static void efivarfs_free(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
static const struct fs_context_operations efivarfs_context_ops = {
.get_tree = efivarfs_get_tree,
.parse_param = efivarfs_parse_param,
.reconfigure = efivarfs_reconfigure,
+ .free = efivarfs_free,
};
+static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ char *name;
+ struct super_block *sb = data;
+ struct dentry *dentry;
+ int err;
+
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return 0;
+
+ name = efivar_get_utf8name(name16, &vendor);
+ if (!name)
+ return -ENOMEM;
+
+ dentry = try_lookup_noperm(&QSTR(name), sb->s_root);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+
+ if (!dentry) {
+ /* found missing entry */
+ pr_info("efivarfs: creating variable %s\n", name);
+ return efivarfs_create_dentry(sb, name16, name_size, vendor, name);
+ }
+
+ dput(dentry);
+ err = 0;
+
+ out:
+ kfree(name);
+
+ return err;
+}
+
+static struct file_system_type efivarfs_type;
+
+static int efivarfs_freeze_fs(struct super_block *sb)
+{
+ /* Nothing for us to do. */
+ return 0;
+}
+
+static int efivarfs_unfreeze_fs(struct super_block *sb)
+{
+ struct dentry *child = NULL;
+
+ /*
+ * Unconditionally resync the variable state on a thaw request.
+ * Given the size of efivarfs it really doesn't matter to simply
+ * iterate through all of the entries and resync. Freeze/thaw
+ * requests are rare enough for that to not matter and the
+ * number of entries is pretty low too. So we really don't care.
+ */
+ pr_info("efivarfs: resyncing variable state\n");
+ for (;;) {
+ int err;
+ unsigned long size = 0;
+ struct inode *inode;
+ struct efivar_entry *entry;
+
+ child = find_next_child(sb->s_root, child);
+ if (!child)
+ break;
+
+ inode = d_inode(child);
+ entry = efivar_entry(inode);
+
+ err = efivar_entry_size(entry, &size);
+ if (err)
+ size = 0;
+ else
+ size += sizeof(__u32);
+
+ inode_lock(inode);
+ i_size_write(inode, size);
+ inode_unlock(inode);
+
+ /* The variable doesn't exist anymore, delete it. */
+ if (!size) {
+ pr_info("efivarfs: removing variable %pd\n", child);
+ simple_recursive_removal(child, NULL);
+ }
+ }
+
+ efivar_init(efivarfs_check_missing, sb, false);
+ pr_info("efivarfs: finished resyncing variable state\n");
+ return 0;
+}
+
static int efivarfs_init_fs_context(struct fs_context *fc)
{
struct efivarfs_fs_info *sfi;
@@ -374,13 +509,12 @@ static int efivarfs_init_fs_context(struct fs_context *fc)
if (!sfi)
return -ENOMEM;
- INIT_LIST_HEAD(&sfi->efivarfs_list);
-
sfi->mount_opts.uid = GLOBAL_ROOT_UID;
sfi->mount_opts.gid = GLOBAL_ROOT_GID;
fc->s_fs_info = sfi;
fc->ops = &efivarfs_context_ops;
+
return 0;
}
@@ -389,10 +523,8 @@ static void efivarfs_kill_sb(struct super_block *sb)
struct efivarfs_fs_info *sfi = sb->s_fs_info;
blocking_notifier_chain_unregister(&efivar_ops_nh, &sfi->nb);
- kill_litter_super(sb);
+ kill_anon_super(sb);
- /* Remove all entries and destroy */
- efivar_entry_iter(efivarfs_destroy, &sfi->efivarfs_list, NULL);
kfree(sfi);
}
@@ -402,6 +534,7 @@ static struct file_system_type efivarfs_type = {
.init_fs_context = efivarfs_init_fs_context,
.kill_sb = efivarfs_kill_sb,
.parameters = efivarfs_parameters,
+ .fs_flags = FS_POWER_FREEZE,
};
static __init int efivarfs_init(void)
diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c
index 3cc89bb624f0..6edc10958ecf 100644
--- a/fs/efivarfs/vars.c
+++ b/fs/efivarfs/vars.c
@@ -22,7 +22,7 @@
#include "internal.h"
-MODULE_IMPORT_NS(EFIVAR);
+MODULE_IMPORT_NS("EFIVAR");
static bool
validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
@@ -225,6 +225,31 @@ variable_matches(const char *var_name, size_t len, const char *match_name,
}
}
+char *
+efivar_get_utf8name(const efi_char16_t *name16, efi_guid_t *vendor)
+{
+ int len = ucs2_utf8size(name16);
+ char *name;
+
+ /* name, plus '-', plus GUID, plus NUL*/
+ name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
+ if (!name)
+ return NULL;
+
+ ucs2_as_utf8(name, name16, len);
+
+ name[len] = '-';
+
+ efi_guid_to_str(vendor, name + len + 1);
+
+ name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+
+ /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */
+ strreplace(name, '/', '!');
+
+ return name;
+}
+
bool
efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
unsigned long data_size)
@@ -288,28 +313,6 @@ efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
return found;
}
-static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
- struct list_head *head)
-{
- struct efivar_entry *entry, *n;
- unsigned long strsize1, strsize2;
- bool found = false;
-
- strsize1 = ucs2_strsize(variable_name, EFI_VAR_NAME_LEN);
- list_for_each_entry_safe(entry, n, head, list) {
- strsize2 = ucs2_strsize(entry->var.VariableName, EFI_VAR_NAME_LEN);
- if (strsize1 == strsize2 &&
- !memcmp(variable_name, &(entry->var.VariableName),
- strsize2) &&
- !efi_guidcmp(entry->var.VendorGuid,
- *vendor)) {
- found = true;
- break;
- }
- }
- return found;
-}
-
/*
* Returns the size of variable_name, in bytes, including the
* terminating NULL character, or variable_name_size if no NULL
@@ -361,16 +364,15 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
* efivar_init - build the initial list of EFI variables
* @func: callback function to invoke for every variable
* @data: function-specific data to pass to @func
- * @head: initialised head of variable list
+ * @duplicate_check: fail if a duplicate variable is found
*
* Get every EFI variable from the firmware and invoke @func. @func
- * should call efivar_entry_add() to build the list of variables.
+ * should populate the initial dentry and inode tree.
*
* Returns 0 on success, or a kernel error code on failure.
*/
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
- struct list_head *),
- void *data, struct list_head *head)
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicate_check)
{
unsigned long variable_name_size = 512;
efi_char16_t *variable_name;
@@ -414,14 +416,15 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
* we'll ever see a different variable name,
* and may end up looping here forever.
*/
- if (variable_is_present(variable_name, &vendor_guid,
- head)) {
+ if (duplicate_check &&
+ efivarfs_variable_is_present(variable_name,
+ &vendor_guid, data)) {
dup_variable_bug(variable_name, &vendor_guid,
variable_name_size);
status = EFI_NOT_FOUND;
} else {
err = func(variable_name, vendor_guid,
- variable_name_size, data, head);
+ variable_name_size, data);
if (err)
status = EFI_NOT_FOUND;
}
@@ -453,70 +456,12 @@ free:
}
/**
- * efivar_entry_add - add entry to variable list
- * @entry: entry to add to list
- * @head: list head
- *
- * Returns 0 on success, or a kernel error code on failure.
- */
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
-{
- int err;
-
- err = efivar_lock();
- if (err)
- return err;
- list_add(&entry->list, head);
- efivar_unlock();
-
- return 0;
-}
-
-/**
- * __efivar_entry_add - add entry to variable list
- * @entry: entry to add to list
- * @head: list head
- */
-void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
-{
- list_add(&entry->list, head);
-}
-
-/**
- * efivar_entry_remove - remove entry from variable list
- * @entry: entry to remove from list
- *
- * Returns 0 on success, or a kernel error code on failure.
- */
-void efivar_entry_remove(struct efivar_entry *entry)
-{
- list_del(&entry->list);
-}
-
-/*
- * efivar_entry_list_del_unlock - remove entry from variable list
- * @entry: entry to remove
- *
- * Remove @entry from the variable list and release the list lock.
- *
- * NOTE: slightly weird locking semantics here - we expect to be
- * called with the efivars lock already held, and we release it before
- * returning. This is because this function is usually called after
- * set_variable() while the lock is still held.
- */
-static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
-{
- list_del(&entry->list);
- efivar_unlock();
-}
-
-/**
- * efivar_entry_delete - delete variable and remove entry from list
+ * efivar_entry_delete - delete variable
* @entry: entry containing variable to delete
*
- * Delete the variable from the firmware and remove @entry from the
- * variable list. It is the caller's responsibility to free @entry
- * once we return.
+ * Delete the variable from the firmware. It is the caller's
+ * responsibility to free @entry (by deleting the dentry/inode) once
+ * we return.
*
* Returns 0 on success, -EINTR if we can't grab the semaphore,
* converted EFI status code if set_variable() fails.
@@ -533,12 +478,10 @@ int efivar_entry_delete(struct efivar_entry *entry)
status = efivar_set_variable_locked(entry->var.VariableName,
&entry->var.VendorGuid,
0, 0, NULL, false);
- if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) {
- efivar_unlock();
+ efivar_unlock();
+ if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND))
return efi_status_to_err(status);
- }
- efivar_entry_list_del_unlock(entry);
return 0;
}
@@ -632,7 +575,7 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
* get_variable() fail.
*
* If the EFI variable does not exist when calling set_variable()
- * (EFI_NOT_FOUND), @entry is removed from the variable list.
+ * (EFI_NOT_FOUND).
*/
int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
unsigned long *size, void *data, bool *set)
@@ -648,9 +591,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
return -EINVAL;
/*
- * The lock here protects the get_variable call, the conditional
- * set_variable call, and removal of the variable from the efivars
- * list (in the case of an authenticated delete).
+ * The lock here protects the get_variable call and the
+ * conditional set_variable call
*/
err = efivar_lock();
if (err)
@@ -676,10 +618,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
&entry->var.VendorGuid,
NULL, size, NULL);
- if (status == EFI_NOT_FOUND)
- efivar_entry_list_del_unlock(entry);
- else
- efivar_unlock();
+ efivar_unlock();
if (status && status != EFI_BUFFER_TOO_SMALL)
return efi_status_to_err(status);
@@ -691,37 +630,3 @@ out:
return err;
}
-
-/**
- * efivar_entry_iter - iterate over variable list
- * @func: callback function
- * @head: head of variable list
- * @data: function-specific data to pass to callback
- *
- * Iterate over the list of EFI variables and call @func with every
- * entry on the list. It is safe for @func to remove entries in the
- * list via efivar_entry_delete() while iterating.
- *
- * Some notes for the callback function:
- * - a non-zero return value indicates an error and terminates the loop
- * - @func is called from atomic context
- */
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data)
-{
- struct efivar_entry *entry, *n;
- int err = 0;
-
- err = efivar_lock();
- if (err)
- return err;
-
- list_for_each_entry_safe(entry, n, head, list) {
- err = func(entry, data);
- if (err)
- break;
- }
- efivar_unlock();
-
- return err;
-}
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 462619e59766..28407578f83a 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -62,7 +62,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
inode = iget_locked(super, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
in = INODE_INFO(inode);
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index 6ea60661fa55..d81f3318417d 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -3,8 +3,18 @@
config EROFS_FS
tristate "EROFS filesystem support"
depends on BLOCK
+ select CACHEFILES if EROFS_FS_ONDEMAND
+ select CRC32
+ select CRYPTO if EROFS_FS_ZIP_ACCEL
+ select CRYPTO_DEFLATE if EROFS_FS_ZIP_ACCEL
select FS_IOMAP
- select LIBCRC32C
+ select LZ4_DECOMPRESS if EROFS_FS_ZIP
+ select NETFS_SUPPORT if EROFS_FS_ONDEMAND
+ select XXHASH if EROFS_FS_XATTR
+ select XZ_DEC if EROFS_FS_ZIP_LZMA
+ select XZ_DEC_MICROLZMA if EROFS_FS_ZIP_LZMA
+ select ZLIB_INFLATE if EROFS_FS_ZIP_DEFLATE
+ select ZSTD_DECOMPRESS if EROFS_FS_ZIP_ZSTD
help
EROFS (Enhanced Read-Only File System) is a lightweight read-only
file system with modern designs (e.g. no buffer heads, inline
@@ -13,12 +23,12 @@ config EROFS_FS
smartphones with Android OS, LiveCDs and high-density hosts with
numerous containers;
- It also provides fixed-sized output compression support in order to
- improve storage density as well as keep relatively higher compression
- ratios and implements in-place decompression to reuse the file page
- for compressed data temporarily with proper strategies, which is
- quite useful to ensure guaranteed end-to-end runtime decompression
- performance under extremely memory pressure without extra cost.
+ It also provides transparent compression and deduplication support to
+ improve storage density and maintain relatively high compression
+ ratios, and it implements in-place decompression to temporarily reuse
+ page cache for compressed data using proper strategies, which is
+ quite useful for ensuring guaranteed end-to-end runtime decompression
+ performance under extreme memory pressure without extra cost.
See the documentation at <file:Documentation/filesystems/erofs.rst>
and the web pages at <https://erofs.docs.kernel.org> for more details.
@@ -38,7 +48,6 @@ config EROFS_FS_DEBUG
config EROFS_FS_XATTR
bool "EROFS extended attributes"
depends on EROFS_FS
- select XXHASH
default y
help
Extended attributes are name:value pairs associated with inodes by
@@ -94,18 +103,15 @@ config EROFS_FS_BACKED_BY_FILE
config EROFS_FS_ZIP
bool "EROFS Data Compression Support"
depends on EROFS_FS
- select LZ4_DECOMPRESS
default y
help
- Enable fixed-sized output compression for EROFS.
+ Enable transparent compression support for EROFS file systems.
If you don't want to enable compression feature, say N.
config EROFS_FS_ZIP_LZMA
bool "EROFS LZMA compressed data support"
depends on EROFS_FS_ZIP
- select XZ_DEC
- select XZ_DEC_MICROLZMA
help
Saying Y here includes support for reading EROFS file systems
containing LZMA compressed data, specifically called microLZMA. It
@@ -117,7 +123,6 @@ config EROFS_FS_ZIP_LZMA
config EROFS_FS_ZIP_DEFLATE
bool "EROFS DEFLATE compressed data support"
depends on EROFS_FS_ZIP
- select ZLIB_INFLATE
help
Saying Y here includes support for reading EROFS file systems
containing DEFLATE compressed data. It gives better compression
@@ -132,7 +137,6 @@ config EROFS_FS_ZIP_DEFLATE
config EROFS_FS_ZIP_ZSTD
bool "EROFS Zstandard compressed data support"
depends on EROFS_FS_ZIP
- select ZSTD_DECOMPRESS
help
Saying Y here includes support for reading EROFS file systems
containing Zstandard compressed data. It gives better compression
@@ -144,12 +148,24 @@ config EROFS_FS_ZIP_ZSTD
If unsure, say N.
+config EROFS_FS_ZIP_ACCEL
+ bool "EROFS hardware decompression support"
+ depends on EROFS_FS_ZIP
+ help
+ Saying Y here includes hardware accelerator support for reading
+ EROFS file systems containing compressed data. It gives better
+ decompression speed than the software-implemented decompression, and
+ it costs lower CPU overhead.
+
+ Hardware accelerator support is an experimental feature for now and
+ file systems are still readable without selecting this option.
+
+ If unsure, say N.
+
config EROFS_FS_ONDEMAND
bool "EROFS fscache-based on-demand read support (deprecated)"
depends on EROFS_FS
- select NETFS_SUPPORT
select FSCACHE
- select CACHEFILES
select CACHEFILES_ONDEMAND
help
This permits EROFS to use fscache-backed data blobs with on-demand
diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile
index 4331d53c7109..549abc424763 100644
--- a/fs/erofs/Makefile
+++ b/fs/erofs/Makefile
@@ -7,5 +7,6 @@ erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o
erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o
erofs-$(CONFIG_EROFS_FS_ZIP_ZSTD) += decompressor_zstd.o
+erofs-$(CONFIG_EROFS_FS_ZIP_ACCEL) += decompressor_crypto.o
erofs-$(CONFIG_EROFS_FS_BACKED_BY_FILE) += fileio.o
erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 7bfe251680ec..84c8e52581f4 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -11,6 +11,7 @@
struct z_erofs_decompress_req {
struct super_block *sb;
struct page **in, **out;
+ unsigned int inpages, outpages;
unsigned short pageofs_in, pageofs_out;
unsigned int inputsize, outputsize;
@@ -22,36 +23,15 @@ struct z_erofs_decompress_req {
struct z_erofs_decompressor {
int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
void *data, int size);
- int (*decompress)(struct z_erofs_decompress_req *rq,
- struct page **pagepool);
+ const char *(*decompress)(struct z_erofs_decompress_req *rq,
+ struct page **pagepool);
int (*init)(void);
void (*exit)(void);
char *name;
};
-/* some special page->private (unsigned long, see below) */
#define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2)
-#define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2)
-
-/*
- * For all pages in a pcluster, page->private should be one of
- * Type Last 2bits page->private
- * short-lived page 00 Z_EROFS_SHORTLIVED_PAGE
- * preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE
- * cached/managed page 00 pointer to z_erofs_pcluster
- * online page (file-backed, 01/10/11 sub-index << 2 | count
- * some pages can be used for inplace I/O)
- *
- * page->mapping should be one of
- * Type page->mapping
- * short-lived page NULL
- * preallocated page NULL
- * cached/managed page non-NULL or NULL (invalidated/truncated page)
- * online page non-NULL
- *
- * For all managed pages, PG_private should be set with 1 extra refcount,
- * which is used for page reclaim / migration.
- */
+#define Z_EROFS_PREALLOCATED_FOLIO ((void *)(-2UL << 2))
/*
* Currently, short-lived pages are pages directly from buddy system
@@ -80,7 +60,6 @@ extern const struct z_erofs_decompressor *z_erofs_decomp[];
struct z_erofs_stream_dctx {
struct z_erofs_decompress_req *rq;
- unsigned int inpages, outpages; /* # of {en,de}coded pages */
int no, ni; /* the current {en,de}coded page # */
unsigned int avail_out; /* remaining bytes in the decoded buffer */
@@ -91,10 +70,20 @@ struct z_erofs_stream_dctx {
bool bounced; /* is the bounce buffer used now? */
};
-int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
- void **src, struct page **pgpl);
-int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
- unsigned int padbufsize);
+const char *z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx,
+ void **dst, void **src, struct page **pgpl);
+const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
+ const char *padbuf, unsigned int padbufsize);
int __init z_erofs_init_decompressor(void);
void z_erofs_exit_decompressor(void);
+int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl);
+int z_erofs_crypto_enable_engine(const char *name, int len);
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+void z_erofs_crypto_disable_all_engines(void);
+int z_erofs_crypto_show_engines(char *buf, int size, char sep);
+#else
+static inline void z_erofs_crypto_disable_all_engines(void) {}
+static inline int z_erofs_crypto_show_engines(char *buf, int size, char sep) { return 0; }
+#endif
#endif
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 1c49f8962021..bb13c4cb8455 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -25,10 +25,9 @@ void erofs_put_metabuf(struct erofs_buf *buf)
buf->page = NULL;
}
-void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
- enum erofs_kmap_type type)
+void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap)
{
- pgoff_t index = offset >> PAGE_SHIFT;
+ pgoff_t index = (buf->off + offset) >> PAGE_SHIFT;
struct folio *folio = NULL;
if (buf->page) {
@@ -43,86 +42,82 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
return folio;
}
buf->page = folio_file_page(folio, index);
- if (!buf->base && type == EROFS_KMAP)
- buf->base = kmap_local_page(buf->page);
- if (type == EROFS_NO_KMAP)
+ if (!need_kmap)
return NULL;
+ if (!buf->base)
+ buf->base = kmap_local_page(buf->page);
return buf->base + (offset & ~PAGE_MASK);
}
-void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
+int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb,
+ bool in_metabox)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
buf->file = NULL;
+ if (in_metabox) {
+ if (unlikely(!sbi->metabox_inode))
+ return -EFSCORRUPTED;
+ buf->mapping = sbi->metabox_inode->i_mapping;
+ return 0;
+ }
+ buf->off = sbi->dif0.fsoff;
if (erofs_is_fileio_mode(sbi)) {
- buf->file = sbi->fdev; /* some fs like FUSE needs it */
+ buf->file = sbi->dif0.file; /* some fs like FUSE needs it */
buf->mapping = buf->file->f_mapping;
} else if (erofs_is_fscache_mode(sb))
- buf->mapping = sbi->s_fscache->inode->i_mapping;
+ buf->mapping = sbi->dif0.fscache->inode->i_mapping;
else
buf->mapping = sb->s_bdev->bd_mapping;
+ return 0;
}
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
- erofs_off_t offset, enum erofs_kmap_type type)
+ erofs_off_t offset, bool in_metabox)
{
- erofs_init_metabuf(buf, sb);
- return erofs_bread(buf, offset, type);
-}
+ int err;
-static int erofs_map_blocks_flatmode(struct inode *inode,
- struct erofs_map_blocks *map)
-{
- struct erofs_inode *vi = EROFS_I(inode);
- struct super_block *sb = inode->i_sb;
- bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
- erofs_blk_t lastblk = erofs_iblks(inode) - tailendpacking;
-
- map->m_flags = EROFS_MAP_MAPPED; /* no hole in flat inodes */
- if (map->m_la < erofs_pos(sb, lastblk)) {
- map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
- map->m_plen = erofs_pos(sb, lastblk) - map->m_la;
- } else {
- DBG_BUGON(!tailendpacking);
- map->m_pa = erofs_iloc(inode) + vi->inode_isize +
- vi->xattr_isize + erofs_blkoff(sb, map->m_la);
- map->m_plen = inode->i_size - map->m_la;
-
- /* inline data should be located in the same meta block */
- if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
- erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
- map->m_flags |= EROFS_MAP_META;
- }
- return 0;
+ err = erofs_init_metabuf(buf, sb, in_metabox);
+ if (err)
+ return ERR_PTR(err);
+ return erofs_bread(buf, offset, true);
}
int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
{
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct super_block *sb = inode->i_sb;
+ unsigned int unit, blksz = sb->s_blocksize;
struct erofs_inode *vi = EROFS_I(inode);
struct erofs_inode_chunk_index *idx;
- struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
- u64 chunknr;
- unsigned int unit;
+ erofs_blk_t startblk, addrmask;
+ bool tailpacking;
erofs_off_t pos;
- void *kaddr;
+ u64 chunknr;
int err = 0;
trace_erofs_map_blocks_enter(inode, map, 0);
map->m_deviceid = 0;
- if (map->m_la >= inode->i_size) {
- /* leave out-of-bound access unmapped */
- map->m_flags = 0;
- map->m_plen = map->m_llen;
+ map->m_flags = 0;
+ if (map->m_la >= inode->i_size)
goto out;
- }
if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
- err = erofs_map_blocks_flatmode(inode, map);
+ tailpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
+ if (!tailpacking && vi->startblk == EROFS_NULL_ADDR)
+ goto out;
+ pos = erofs_pos(sb, erofs_iblks(inode) - tailpacking);
+
+ map->m_flags = EROFS_MAP_MAPPED;
+ if (map->m_la < pos) {
+ map->m_pa = erofs_pos(sb, vi->startblk) + map->m_la;
+ map->m_llen = pos - map->m_la;
+ } else {
+ map->m_pa = erofs_iloc(inode) + vi->inode_isize +
+ vi->xattr_isize + erofs_blkoff(sb, map->m_la);
+ map->m_llen = inode->i_size - map->m_la;
+ map->m_flags |= EROFS_MAP_META;
+ }
goto out;
}
@@ -135,78 +130,67 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
vi->xattr_isize, unit) + unit * chunknr;
- kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
- if (IS_ERR(kaddr)) {
- err = PTR_ERR(kaddr);
+ idx = erofs_read_metabuf(&buf, sb, pos, erofs_inode_in_metabox(inode));
+ if (IS_ERR(idx)) {
+ err = PTR_ERR(idx);
goto out;
}
map->m_la = chunknr << vi->chunkbits;
- map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
- round_up(inode->i_size - map->m_la, sb->s_blocksize));
-
- /* handle block map */
- if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
- __le32 *blkaddr = kaddr;
-
- if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
- map->m_flags = 0;
- } else {
- map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
+ map->m_llen = min_t(erofs_off_t, 1UL << vi->chunkbits,
+ round_up(inode->i_size - map->m_la, blksz));
+ if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) {
+ addrmask = (vi->chunkformat & EROFS_CHUNK_FORMAT_48BIT) ?
+ BIT_ULL(48) - 1 : BIT_ULL(32) - 1;
+ startblk = (((u64)le16_to_cpu(idx->startblk_hi) << 32) |
+ le32_to_cpu(idx->startblk_lo)) & addrmask;
+ if ((startblk ^ EROFS_NULL_ADDR) & addrmask) {
+ map->m_deviceid = le16_to_cpu(idx->device_id) &
+ EROFS_SB(sb)->device_id_mask;
+ map->m_pa = erofs_pos(sb, startblk);
+ map->m_flags = EROFS_MAP_MAPPED;
+ }
+ } else {
+ startblk = le32_to_cpu(*(__le32 *)idx);
+ if (startblk != (u32)EROFS_NULL_ADDR) {
+ map->m_pa = erofs_pos(sb, startblk);
map->m_flags = EROFS_MAP_MAPPED;
}
- goto out_unlock;
- }
- /* parse chunk indexes */
- idx = kaddr;
- switch (le32_to_cpu(idx->blkaddr)) {
- case EROFS_NULL_ADDR:
- map->m_flags = 0;
- break;
- default:
- map->m_deviceid = le16_to_cpu(idx->device_id) &
- EROFS_SB(sb)->device_id_mask;
- map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
- map->m_flags = EROFS_MAP_MAPPED;
- break;
}
-out_unlock:
erofs_put_metabuf(&buf);
out:
- if (!err)
- map->m_llen = map->m_plen;
+ if (!err) {
+ map->m_plen = map->m_llen;
+ /* inline data should be located in the same meta block */
+ if ((map->m_flags & EROFS_MAP_META) &&
+ erofs_blkoff(sb, map->m_pa) + map->m_plen > blksz) {
+ erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ }
trace_erofs_map_blocks_exit(inode, map, 0, err);
return err;
}
static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
- struct erofs_device_info *dif)
+ struct super_block *sb, struct erofs_device_info *dif)
{
+ map->m_sb = sb;
+ map->m_dif = dif;
map->m_bdev = NULL;
- map->m_fp = NULL;
- if (dif->file) {
- if (S_ISBLK(file_inode(dif->file)->i_mode))
- map->m_bdev = file_bdev(dif->file);
- else
- map->m_fp = dif->file;
- }
- map->m_daxdev = dif->dax_dev;
- map->m_dax_part_off = dif->dax_part_off;
- map->m_fscache = dif->fscache;
+ if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
+ map->m_bdev = file_bdev(dif->file);
}
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
{
struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
struct erofs_device_info *dif;
- erofs_off_t startoff, length;
+ erofs_off_t startoff;
int id;
- map->m_bdev = sb->s_bdev;
- map->m_daxdev = EROFS_SB(sb)->dax_dev;
- map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
- map->m_fscache = EROFS_SB(sb)->s_fscache;
- map->m_fp = EROFS_SB(sb)->fdev;
-
+ erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
+ map->m_bdev = sb->s_bdev; /* use s_bdev for the primary device */
if (map->m_deviceid) {
down_read(&devs->rwsem);
dif = idr_find(&devs->tree, map->m_deviceid - 1);
@@ -215,24 +199,23 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
return -ENODEV;
}
if (devs->flatdev) {
- map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
+ map->m_pa += erofs_pos(sb, dif->uniaddr);
up_read(&devs->rwsem);
return 0;
}
- erofs_fill_from_devinfo(map, dif);
+ erofs_fill_from_devinfo(map, sb, dif);
up_read(&devs->rwsem);
} else if (devs->extra_devices && !devs->flatdev) {
down_read(&devs->rwsem);
idr_for_each_entry(&devs->tree, dif, id) {
- if (!dif->mapped_blkaddr)
+ if (!dif->uniaddr)
continue;
- startoff = erofs_pos(sb, dif->mapped_blkaddr);
- length = erofs_pos(sb, dif->blocks);
+ startoff = erofs_pos(sb, dif->uniaddr);
if (map->m_pa >= startoff &&
- map->m_pa < startoff + length) {
+ map->m_pa < startoff + erofs_pos(sb, dif->blocks)) {
map->m_pa -= startoff;
- erofs_fill_from_devinfo(map, dif);
+ erofs_fill_from_devinfo(map, sb, dif);
break;
}
}
@@ -243,9 +226,11 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
/*
* bit 30: I/O error occurred on this folio
+ * bit 29: CPU has dirty data in D-cache (needs aliasing handling);
* bit 0 - 29: remaining parts to complete this folio
*/
-#define EROFS_ONLINEFOLIO_EIO (1 << 30)
+#define EROFS_ONLINEFOLIO_EIO 30
+#define EROFS_ONLINEFOLIO_DIRTY 29
void erofs_onlinefolio_init(struct folio *folio)
{
@@ -262,19 +247,23 @@ void erofs_onlinefolio_split(struct folio *folio)
atomic_inc((atomic_t *)&folio->private);
}
-void erofs_onlinefolio_end(struct folio *folio, int err)
+void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty)
{
int orig, v;
do {
orig = atomic_read((atomic_t *)&folio->private);
- v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0);
+ DBG_BUGON(orig <= 0);
+ v = dirty << EROFS_ONLINEFOLIO_DIRTY;
+ v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO);
} while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
- if (v & ~EROFS_ONLINEFOLIO_EIO)
+ if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1))
return;
folio->private = 0;
- folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO));
+ if (v & BIT(EROFS_ONLINEFOLIO_DIRTY))
+ flush_dcache_folio(folio);
+ folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO)));
}
static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
@@ -287,51 +276,51 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
map.m_la = offset;
map.m_llen = length;
-
ret = erofs_map_blocks(inode, &map);
if (ret < 0)
return ret;
- mdev = (struct erofs_map_dev) {
- .m_deviceid = map.m_deviceid,
- .m_pa = map.m_pa,
- };
- ret = erofs_map_dev(sb, &mdev);
- if (ret)
- return ret;
-
iomap->offset = map.m_la;
- if (flags & IOMAP_DAX)
- iomap->dax_dev = mdev.m_daxdev;
- else
- iomap->bdev = mdev.m_bdev;
iomap->length = map.m_llen;
iomap->flags = 0;
iomap->private = NULL;
-
+ iomap->addr = IOMAP_NULL_ADDR;
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
iomap->type = IOMAP_HOLE;
- iomap->addr = IOMAP_NULL_ADDR;
- if (!iomap->length)
- iomap->length = length;
return 0;
}
+ if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(inode)) {
+ mdev = (struct erofs_map_dev) {
+ .m_deviceid = map.m_deviceid,
+ .m_pa = map.m_pa,
+ };
+ ret = erofs_map_dev(sb, &mdev);
+ if (ret)
+ return ret;
+
+ if (flags & IOMAP_DAX)
+ iomap->dax_dev = mdev.m_dif->dax_dev;
+ else
+ iomap->bdev = mdev.m_bdev;
+ iomap->addr = mdev.m_dif->fsoff + mdev.m_pa;
+ if (flags & IOMAP_DAX)
+ iomap->addr += mdev.m_dif->dax_part_off;
+ }
+
if (map.m_flags & EROFS_MAP_META) {
void *ptr;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
iomap->type = IOMAP_INLINE;
- ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP);
+ ptr = erofs_read_metabuf(&buf, sb, map.m_pa,
+ erofs_inode_in_metabox(inode));
if (IS_ERR(ptr))
return PTR_ERR(ptr);
iomap->inline_data = ptr;
iomap->private = buf.base;
} else {
iomap->type = IOMAP_MAPPED;
- iomap->addr = mdev.m_pa;
- if (flags & IOMAP_DAX)
- iomap->addr += mdev.m_dax_part_off;
}
return 0;
}
@@ -380,12 +369,18 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
*/
static int erofs_read_folio(struct file *file, struct folio *folio)
{
- return iomap_read_folio(folio, &erofs_iomap_ops);
+ trace_erofs_read_folio(folio, true);
+
+ iomap_bio_read_folio(folio, &erofs_iomap_ops);
+ return 0;
}
static void erofs_readahead(struct readahead_control *rac)
{
- return iomap_readahead(rac, &erofs_iomap_ops);
+ trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
+ readahead_count(rac), true);
+
+ iomap_bio_readahead(rac, &erofs_iomap_ops);
}
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
@@ -438,20 +433,20 @@ static const struct vm_operations_struct erofs_dax_vm_ops = {
.huge_fault = erofs_dax_huge_fault,
};
-static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int erofs_file_mmap_prepare(struct vm_area_desc *desc)
{
- if (!IS_DAX(file_inode(file)))
- return generic_file_readonly_mmap(file, vma);
+ if (!IS_DAX(file_inode(desc->file)))
+ return generic_file_readonly_mmap_prepare(desc);
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
return -EINVAL;
- vma->vm_ops = &erofs_dax_vm_ops;
- vm_flags_set(vma, VM_HUGEPAGE);
+ desc->vm_ops = &erofs_dax_vm_ops;
+ desc->vm_flags |= VM_HUGEPAGE;
return 0;
}
#else
-#define erofs_file_mmap generic_file_readonly_mmap
+#define erofs_file_mmap_prepare generic_file_readonly_mmap_prepare
#endif
static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
@@ -481,7 +476,11 @@ static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
const struct file_operations erofs_file_fops = {
.llseek = erofs_file_llseek,
.read_iter = erofs_file_read_iter,
- .mmap = erofs_file_mmap,
+ .unlocked_ioctl = erofs_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = erofs_compat_ioctl,
+#endif
+ .mmap_prepare = erofs_file_mmap_prepare,
.get_unmapped_area = thp_get_unmapped_area,
.splice_read = filemap_splice_read,
};
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index eb318c7ddd80..d5d090276391 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -7,22 +7,7 @@
#include "compress.h"
#include <linux/lz4.h>
-#ifndef LZ4_DISTANCE_MAX /* history window size */
-#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
-#endif
-
#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
-#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
-#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
-#endif
-
-struct z_erofs_lz4_decompress_ctx {
- struct z_erofs_decompress_req *rq;
- /* # of encoded, decoded pages */
- unsigned int inpages, outpages;
- /* decoded block total length (used for in-place decompression) */
- unsigned int oend;
-};
static int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size)
@@ -62,10 +47,9 @@ static int z_erofs_load_lz4_config(struct super_block *sb,
* Fill all gaps with bounce pages if it's a sparse page list. Also check if
* all physical pages are consecutive, which can be seen for moderate CR.
*/
-static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
+static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
struct page **pagepool)
{
- struct z_erofs_decompress_req *rq = ctx->rq;
struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
BITS_PER_LONG)] = { 0 };
@@ -75,7 +59,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
unsigned int i, j, top;
top = 0;
- for (i = j = 0; i < ctx->outpages; ++i, ++j) {
+ for (i = j = 0; i < rq->outpages; ++i, ++j) {
struct page *const page = rq->out[i];
struct page *victim;
@@ -121,65 +105,72 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
return kaddr ? 1 : 0;
}
-static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+static void *z_erofs_lz4_handle_overlap(const struct z_erofs_decompress_req *rq,
void *inpage, void *out, unsigned int *inputmargin,
int *maptype, bool may_inplace)
{
- struct z_erofs_decompress_req *rq = ctx->rq;
- unsigned int omargin, total, i;
+ unsigned int oend, omargin, cnt, i;
struct page **in;
- void *src, *tmp;
+ void *src;
- if (rq->inplace_io) {
- omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
- if (rq->partial_decoding || !may_inplace ||
- omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
- goto docopy;
-
- for (i = 0; i < ctx->inpages; ++i)
- if (rq->out[ctx->outpages - ctx->inpages + i] !=
- rq->in[i])
- goto docopy;
+ /*
+ * If in-place I/O isn't used, for example, the bounce compressed cache
+ * can hold data for incomplete read requests. Just map the compressed
+ * buffer as well and decompress directly.
+ */
+ if (!rq->inplace_io) {
+ if (rq->inpages <= 1) {
+ *maptype = 0;
+ return inpage;
+ }
kunmap_local(inpage);
- *maptype = 3;
- return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
+ src = erofs_vm_map_ram(rq->in, rq->inpages);
+ if (!src)
+ return ERR_PTR(-ENOMEM);
+ *maptype = 1;
+ return src;
}
-
- if (ctx->inpages <= 1) {
- *maptype = 0;
- return inpage;
+ /*
+ * Then, deal with in-place I/Os. The reasons why in-place I/O is useful
+ * are: (1) It minimizes memory footprint during the I/O submission,
+ * which is useful for slow storage (including network devices and
+ * low-end HDDs/eMMCs) but with a lot inflight I/Os; (2) If in-place
+ * decompression can also be applied, it will reuse the unique buffer so
+ * that no extra CPU D-cache is polluted with temporary compressed data
+ * for extreme performance.
+ */
+ oend = rq->pageofs_out + rq->outputsize;
+ omargin = PAGE_ALIGN(oend) - oend;
+ if (!rq->partial_decoding && may_inplace &&
+ omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) {
+ for (i = 0; i < rq->inpages; ++i)
+ if (rq->out[rq->outpages - rq->inpages + i] !=
+ rq->in[i])
+ break;
+ if (i >= rq->inpages) {
+ kunmap_local(inpage);
+ *maptype = 3;
+ return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
+ }
}
- kunmap_local(inpage);
- src = erofs_vm_map_ram(rq->in, ctx->inpages);
- if (!src)
- return ERR_PTR(-ENOMEM);
- *maptype = 1;
- return src;
-
-docopy:
- /* Or copy compressed data which can be overlapped to per-CPU buffer */
- in = rq->in;
- src = z_erofs_get_gbuf(ctx->inpages);
+ /*
+ * If in-place decompression can't be applied, copy compressed data that
+ * may potentially overlap during decompression to a per-CPU buffer.
+ */
+ src = z_erofs_get_gbuf(rq->inpages);
if (!src) {
DBG_BUGON(1);
kunmap_local(inpage);
return ERR_PTR(-EFAULT);
}
- tmp = src;
- total = rq->inputsize;
- while (total) {
- unsigned int page_copycnt =
- min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
-
+ for (i = 0, in = rq->in; i < rq->inputsize; i += cnt, ++in) {
+ cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin);
if (!inpage)
inpage = kmap_local_page(*in);
- memcpy(tmp, inpage + *inputmargin, page_copycnt);
+ memcpy(src + i, inpage + *inputmargin, cnt);
kunmap_local(inpage);
inpage = NULL;
- tmp += page_copycnt;
- total -= page_copycnt;
- ++in;
*inputmargin = 0;
}
*maptype = 2;
@@ -187,30 +178,29 @@ docopy:
}
/*
- * Get the exact inputsize with zero_padding feature.
- * - For LZ4, it should work if zero_padding feature is on (5.3+);
- * - For MicroLZMA, it'd be enabled all the time.
+ * Get the exact on-disk size of the compressed data:
+ * - For LZ4, it should apply if the zero_padding feature is on (5.3+);
+ * - For others, zero_padding is enabled all the time.
*/
-int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
- unsigned int padbufsize)
+const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
+ const char *padbuf, unsigned int padbufsize)
{
const char *padend;
padend = memchr_inv(padbuf, 0, padbufsize);
if (!padend)
- return -EFSCORRUPTED;
+ return "compressed data start not found";
rq->inputsize -= padend - padbuf;
rq->pageofs_in += padend - padbuf;
- return 0;
+ return NULL;
}
-static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
- u8 *dst)
+static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst)
{
- struct z_erofs_decompress_req *rq = ctx->rq;
bool support_0padding = false, may_inplace = false;
unsigned int inputmargin;
u8 *out, *headpage, *src;
+ const char *reason;
int ret, maptype;
DBG_BUGON(*rq->in == NULL);
@@ -219,19 +209,19 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
/* LZ4 decompression inplace is only safe if zero_padding is enabled */
if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
support_0padding = true;
- ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
+ reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
min_t(unsigned int, rq->inputsize,
rq->sb->s_blocksize - rq->pageofs_in));
- if (ret) {
+ if (reason) {
kunmap_local(headpage);
- return ret;
+ return IS_ERR(reason) ? PTR_ERR(reason) : -EFSCORRUPTED;
}
may_inplace = !((rq->pageofs_in + rq->inputsize) &
(rq->sb->s_blocksize - 1));
}
inputmargin = rq->pageofs_in;
- src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
+ src = z_erofs_lz4_handle_overlap(rq, headpage, dst, &inputmargin,
&maptype, may_inplace);
if (IS_ERR(src))
return PTR_ERR(src);
@@ -246,8 +236,6 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
rq->inputsize, rq->outputsize);
if (ret != rq->outputsize) {
- erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
- ret, rq->inputsize, inputmargin, rq->outputsize);
if (ret >= 0)
memset(out + ret, 0, rq->outputsize - ret);
ret = -EFSCORRUPTED;
@@ -258,7 +246,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
if (maptype == 0) {
kunmap_local(headpage);
} else if (maptype == 1) {
- vm_unmap_ram(src, ctx->inpages);
+ vm_unmap_ram(src, rq->inpages);
} else if (maptype == 2) {
z_erofs_put_gbuf(src);
} else if (maptype != 3) {
@@ -268,82 +256,68 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
return ret;
}
-static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
+static const char *z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pagepool)
{
- struct z_erofs_lz4_decompress_ctx ctx;
unsigned int dst_maptype;
void *dst;
int ret;
- ctx.rq = rq;
- ctx.oend = rq->pageofs_out + rq->outputsize;
- ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
- ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
-
/* one optimized fast path only for non bigpcluster cases yet */
- if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
+ if (rq->inpages == 1 && rq->outpages == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out);
dst = kmap_local_page(*rq->out);
dst_maptype = 0;
- goto dstmap_out;
- }
-
- /* general decoding path which can be used for all cases */
- ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
- if (ret < 0) {
- return ret;
- } else if (ret > 0) {
- dst = page_address(*rq->out);
- dst_maptype = 1;
} else {
- dst = erofs_vm_map_ram(rq->out, ctx.outpages);
- if (!dst)
- return -ENOMEM;
- dst_maptype = 2;
+ /* general decoding path which can be used for all cases */
+ ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (ret > 0) {
+ dst = page_address(*rq->out);
+ dst_maptype = 1;
+ } else {
+ dst = erofs_vm_map_ram(rq->out, rq->outpages);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+ dst_maptype = 2;
+ }
}
-
-dstmap_out:
- ret = z_erofs_lz4_decompress_mem(&ctx, dst);
+ ret = z_erofs_lz4_decompress_mem(rq, dst);
if (!dst_maptype)
kunmap_local(dst);
else if (dst_maptype == 2)
- vm_unmap_ram(dst, ctx.outpages);
- return ret;
+ vm_unmap_ram(dst, rq->outpages);
+ return ERR_PTR(ret);
}
-static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
+static const char *z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
+ struct page **pagepool)
{
- const unsigned int nrpages_in =
- PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages;
const unsigned int bs = rq->sb->s_blocksize;
unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
u8 *kin;
if (rq->outputsize > rq->inputsize)
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
cur = bs - (rq->pageofs_out & (bs - 1));
pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
cur = min(cur, rq->outputsize);
if (cur && rq->out[0]) {
kin = kmap_local_page(rq->in[nrpages_in - 1]);
- if (rq->out[0] == rq->in[nrpages_in - 1]) {
+ if (rq->out[0] == rq->in[nrpages_in - 1])
memmove(kin + rq->pageofs_out, kin + pi, cur);
- flush_dcache_page(rq->out[0]);
- } else {
+ else
memcpy_to_page(rq->out[0], rq->pageofs_out,
kin + pi, cur);
- }
kunmap_local(kin);
}
rq->outputsize -= cur;
}
- for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
+ for (; rq->outputsize; rq->pageofs_in = 0, cur += insz, ni++) {
insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
rq->outputsize -= insz;
if (!rq->in[ni])
@@ -355,35 +329,30 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
DBG_BUGON(no >= nrpages_out);
cnt = min(insz - pi, PAGE_SIZE - po);
- if (rq->out[no] == rq->in[ni]) {
+ if (rq->out[no] == rq->in[ni])
memmove(kin + po,
kin + rq->pageofs_in + pi, cnt);
- flush_dcache_page(rq->out[no]);
- } else if (rq->out[no]) {
+ else if (rq->out[no])
memcpy_to_page(rq->out[no], po,
kin + rq->pageofs_in + pi, cnt);
- }
pi += cnt;
} while (pi < insz);
kunmap_local(kin);
}
DBG_BUGON(ni > nrpages_in);
- return 0;
+ return NULL;
}
-int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
- void **src, struct page **pgpl)
+const char *z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx,
+ void **dst, void **src, struct page **pgpl)
{
struct z_erofs_decompress_req *rq = dctx->rq;
- struct super_block *sb = rq->sb;
struct page **pgo, *tmppage;
unsigned int j;
if (!dctx->avail_out) {
- if (++dctx->no >= dctx->outpages || !rq->outputsize) {
- erofs_err(sb, "insufficient space for decompressed data");
- return -EFSCORRUPTED;
- }
+ if (++dctx->no >= rq->outpages || !rq->outputsize)
+ return "insufficient space for decompressed data";
if (dctx->kout)
kunmap_local(dctx->kout);
@@ -394,7 +363,7 @@ int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
*pgo = erofs_allocpage(pgpl, rq->gfp);
if (!*pgo) {
dctx->kout = NULL;
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
}
@@ -408,10 +377,8 @@ int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
}
if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
- if (++dctx->ni >= dctx->inpages) {
- erofs_err(sb, "invalid compressed data");
- return -EFSCORRUPTED;
- }
+ if (++dctx->ni >= rq->inpages)
+ return "invalid compressed data";
if (dctx->kout) /* unlike kmap(), take care of the orders */
kunmap_local(dctx->kout);
kunmap_local(dctx->kin);
@@ -441,17 +408,17 @@ int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
dctx->bounced = true;
}
- for (j = dctx->ni + 1; j < dctx->inpages; ++j) {
+ for (j = dctx->ni + 1; j < rq->inpages; ++j) {
if (rq->out[dctx->no] != rq->in[j])
continue;
tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
}
- return 0;
+ return NULL;
}
const struct z_erofs_decompressor *z_erofs_decomp[] = {
@@ -501,7 +468,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
return -EOPNOTSUPP;
}
- erofs_init_metabuf(&buf, sb);
+ (void)erofs_init_metabuf(&buf, sb, false);
offset = EROFS_SUPER_OFFSET + sbi->sb_size;
alg = 0;
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
diff --git a/fs/erofs/decompressor_crypto.c b/fs/erofs/decompressor_crypto.c
new file mode 100644
index 000000000000..5ef6f71d3b7f
--- /dev/null
+++ b/fs/erofs/decompressor_crypto.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/scatterlist.h>
+#include <crypto/acompress.h>
+#include "compress.h"
+
+static int __z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
+ struct crypto_acomp *tfm)
+{
+ struct sg_table st_src, st_dst;
+ struct acomp_req *req;
+ struct crypto_wait wait;
+ const char *reason;
+ u8 *headpage;
+ int ret;
+
+ headpage = kmap_local_page(*rq->in);
+ reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
+ min_t(unsigned int, rq->inputsize,
+ rq->sb->s_blocksize - rq->pageofs_in));
+ kunmap_local(headpage);
+ if (reason)
+ return IS_ERR(reason) ? PTR_ERR(reason) : -EFSCORRUPTED;
+
+ req = acomp_request_alloc(tfm);
+ if (!req)
+ return -ENOMEM;
+
+ ret = sg_alloc_table_from_pages_segment(&st_src, rq->in, rq->inpages,
+ rq->pageofs_in, rq->inputsize, UINT_MAX, GFP_KERNEL);
+ if (ret < 0)
+ goto failed_src_alloc;
+
+ ret = sg_alloc_table_from_pages_segment(&st_dst, rq->out, rq->outpages,
+ rq->pageofs_out, rq->outputsize, UINT_MAX, GFP_KERNEL);
+ if (ret < 0)
+ goto failed_dst_alloc;
+
+ acomp_request_set_params(req, st_src.sgl,
+ st_dst.sgl, rq->inputsize, rq->outputsize);
+
+ crypto_init_wait(&wait);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
+ if (ret) {
+ erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
+ ret, rq->inputsize, rq->pageofs_in, rq->outputsize);
+ ret = -EIO;
+ }
+
+ sg_free_table(&st_dst);
+failed_dst_alloc:
+ sg_free_table(&st_src);
+failed_src_alloc:
+ acomp_request_free(req);
+ return ret;
+}
+
+struct z_erofs_crypto_engine {
+ char *crypto_name;
+ struct crypto_acomp *tfm;
+};
+
+struct z_erofs_crypto_engine *z_erofs_crypto[Z_EROFS_COMPRESSION_MAX] = {
+ [Z_EROFS_COMPRESSION_LZ4] = (struct z_erofs_crypto_engine[]) {
+ {},
+ },
+ [Z_EROFS_COMPRESSION_LZMA] = (struct z_erofs_crypto_engine[]) {
+ {},
+ },
+ [Z_EROFS_COMPRESSION_DEFLATE] = (struct z_erofs_crypto_engine[]) {
+ { .crypto_name = "qat_deflate", },
+ {},
+ },
+ [Z_EROFS_COMPRESSION_ZSTD] = (struct z_erofs_crypto_engine[]) {
+ {},
+ },
+};
+static DECLARE_RWSEM(z_erofs_crypto_rwsem);
+
+static struct crypto_acomp *z_erofs_crypto_get_engine(int alg)
+{
+ struct z_erofs_crypto_engine *e;
+
+ for (e = z_erofs_crypto[alg]; e->crypto_name; ++e)
+ if (e->tfm)
+ return e->tfm;
+ return NULL;
+}
+
+int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
+{
+ struct crypto_acomp *tfm;
+ int i, err;
+
+ down_read(&z_erofs_crypto_rwsem);
+ tfm = z_erofs_crypto_get_engine(rq->alg);
+ if (!tfm) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ for (i = 0; i < rq->outpages; i++) {
+ struct page *const page = rq->out[i];
+ struct page *victim;
+
+ if (!page) {
+ victim = __erofs_allocpage(pgpl, rq->gfp, true);
+ if (!victim) {
+ err = -ENOMEM;
+ goto out;
+ }
+ set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
+ rq->out[i] = victim;
+ }
+ }
+ err = __z_erofs_crypto_decompress(rq, tfm);
+out:
+ up_read(&z_erofs_crypto_rwsem);
+ return err;
+}
+
+int z_erofs_crypto_enable_engine(const char *name, int len)
+{
+ struct z_erofs_crypto_engine *e;
+ struct crypto_acomp *tfm;
+ int alg;
+
+ down_write(&z_erofs_crypto_rwsem);
+ for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) {
+ for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) {
+ if (!strncmp(name, e->crypto_name, len)) {
+ if (e->tfm)
+ break;
+ tfm = crypto_alloc_acomp(e->crypto_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ up_write(&z_erofs_crypto_rwsem);
+ return -EOPNOTSUPP;
+ }
+ e->tfm = tfm;
+ break;
+ }
+ }
+ }
+ up_write(&z_erofs_crypto_rwsem);
+ return 0;
+}
+
+void z_erofs_crypto_disable_all_engines(void)
+{
+ struct z_erofs_crypto_engine *e;
+ int alg;
+
+ down_write(&z_erofs_crypto_rwsem);
+ for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) {
+ for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) {
+ if (!e->tfm)
+ continue;
+ crypto_free_acomp(e->tfm);
+ e->tfm = NULL;
+ }
+ }
+ up_write(&z_erofs_crypto_rwsem);
+}
+
+int z_erofs_crypto_show_engines(char *buf, int size, char sep)
+{
+ struct z_erofs_crypto_engine *e;
+ int alg, len = 0;
+
+ for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) {
+ for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) {
+ if (!e->tfm)
+ continue;
+ len += scnprintf(buf + len, size - len, "%s%c",
+ e->crypto_name, sep);
+ }
+ }
+ return len;
+}
diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
index 5070d2fcc737..3fb73000ed27 100644
--- a/fs/erofs/decompressor_deflate.c
+++ b/fs/erofs/decompressor_deflate.c
@@ -97,27 +97,22 @@ failed:
return -ENOMEM;
}
-static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
- struct page **pgpl)
+static const char *__z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
{
struct super_block *sb = rq->sb;
- struct z_erofs_stream_dctx dctx = {
- .rq = rq,
- .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
- .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
- >> PAGE_SHIFT,
- .no = -1, .ni = 0,
- };
+ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
struct z_erofs_deflate *strm;
- int zerr, err;
+ const char *reason;
+ int zerr;
/* 1. get the exact DEFLATE compressed size */
dctx.kin = kmap_local_page(*rq->in);
- err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
- if (err) {
+ if (reason) {
kunmap_local(dctx.kin);
- return err;
+ return reason;
}
/* 2. get an available DEFLATE context */
@@ -135,7 +130,7 @@ again:
/* 3. multi-call decompress */
zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
if (zerr != Z_OK) {
- err = -EIO;
+ reason = ERR_PTR(-EINVAL);
goto failed_zinit;
}
@@ -149,10 +144,10 @@ again:
while (1) {
dctx.avail_out = strm->z.avail_out;
dctx.inbuf_sz = strm->z.avail_in;
- err = z_erofs_stream_switch_bufs(&dctx,
+ reason = z_erofs_stream_switch_bufs(&dctx,
(void **)&strm->z.next_out,
(void **)&strm->z.next_in, pgpl);
- if (err)
+ if (reason)
break;
strm->z.avail_out = dctx.avail_out;
strm->z.avail_in = dctx.inbuf_sz;
@@ -163,14 +158,14 @@ again:
break;
if (zerr == Z_STREAM_END && !rq->outputsize)
break;
- erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
- zerr, rq->inputsize, rq->outputsize);
- err = -EFSCORRUPTED;
+ reason = (zerr == Z_DATA_ERROR ?
+ "corrupted compressed data" :
+ "unexpected end of stream");
break;
}
}
- if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
- err = -EIO;
+ if (zlib_inflateEnd(&strm->z) != Z_OK && !reason)
+ reason = ERR_PTR(-EIO);
if (dctx.kout)
kunmap_local(dctx.kout);
failed_zinit:
@@ -181,7 +176,23 @@ failed_zinit:
z_erofs_deflate_head = strm;
spin_unlock(&z_erofs_deflate_lock);
wake_up(&z_erofs_deflate_wq);
- return err;
+ return reason;
+}
+
+static const char *z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
+{
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+ int err;
+
+ if (!rq->partial_decoding) {
+ err = z_erofs_crypto_decompress(rq, pgpl);
+ if (err != -EOPNOTSUPP)
+ return ERR_PTR(err);
+
+ }
+#endif
+ return __z_erofs_deflate_decompress(rq, pgpl);
}
const struct z_erofs_decompressor z_erofs_deflate_decomp = {
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
index 40666815046f..b4ea6978faae 100644
--- a/fs/erofs/decompressor_lzma.c
+++ b/fs/erofs/decompressor_lzma.c
@@ -146,29 +146,23 @@ again:
return err;
}
-static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
- struct page **pgpl)
+static const char *z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
{
struct super_block *sb = rq->sb;
- struct z_erofs_stream_dctx dctx = {
- .rq = rq,
- .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
- .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
- >> PAGE_SHIFT,
- .no = -1, .ni = 0,
- };
+ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
struct xz_buf buf = {};
struct z_erofs_lzma *strm;
enum xz_ret xz_err;
- int err;
+ const char *reason;
/* 1. get the exact LZMA compressed size */
dctx.kin = kmap_local_page(*rq->in);
- err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
- if (err) {
+ if (reason) {
kunmap_local(dctx.kin);
- return err;
+ return reason;
}
/* 2. get an available lzma context */
@@ -194,9 +188,9 @@ again:
dctx.avail_out = buf.out_size - buf.out_pos;
dctx.inbuf_sz = buf.in_size;
dctx.inbuf_pos = buf.in_pos;
- err = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out,
- (void **)&buf.in, pgpl);
- if (err)
+ reason = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out,
+ (void **)&buf.in, pgpl);
+ if (reason)
break;
if (buf.out_size == buf.out_pos) {
@@ -213,9 +207,9 @@ again:
if (xz_err != XZ_OK) {
if (xz_err == XZ_STREAM_END && !rq->outputsize)
break;
- erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
- xz_err, rq->inputsize, rq->outputsize);
- err = -EFSCORRUPTED;
+ reason = (xz_err == XZ_DATA_ERROR ?
+ "corrupted compressed data" :
+ "unexpected end of stream");
break;
}
} while (1);
@@ -229,7 +223,7 @@ again:
z_erofs_lzma_head = strm;
spin_unlock(&z_erofs_lzma_lock);
wake_up(&z_erofs_lzma_wq);
- return err;
+ return reason;
}
const struct z_erofs_decompressor z_erofs_lzma_decomp = {
diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c
index 7e177304967e..beae49165c69 100644
--- a/fs/erofs/decompressor_zstd.c
+++ b/fs/erofs/decompressor_zstd.c
@@ -135,30 +135,25 @@ static int z_erofs_load_zstd_config(struct super_block *sb,
return strm ? -ENOMEM : 0;
}
-static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
- struct page **pgpl)
+static const char *z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
{
struct super_block *sb = rq->sb;
- struct z_erofs_stream_dctx dctx = {
- .rq = rq,
- .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
- .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
- >> PAGE_SHIFT,
- .no = -1, .ni = 0,
- };
+ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
zstd_in_buffer in_buf = { NULL, 0, 0 };
zstd_out_buffer out_buf = { NULL, 0, 0 };
struct z_erofs_zstd *strm;
zstd_dstream *stream;
- int zerr, err;
+ const char *reason;
+ int zerr;
/* 1. get the exact compressed size */
dctx.kin = kmap_local_page(*rq->in);
- err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
- if (err) {
+ if (reason) {
kunmap_local(dctx.kin);
- return err;
+ return reason;
}
/* 2. get an available ZSTD context */
@@ -167,7 +162,7 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
/* 3. multi-call decompress */
stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz);
if (!stream) {
- err = -EIO;
+ reason = ERR_PTR(-ENOMEM);
goto failed_zinit;
}
@@ -178,12 +173,11 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
dctx.bounce = strm->bounce;
do {
- dctx.avail_out = out_buf.size - out_buf.pos;
dctx.inbuf_sz = in_buf.size;
dctx.inbuf_pos = in_buf.pos;
- err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
+ reason = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
(void **)&in_buf.src, pgpl);
- if (err)
+ if (reason)
break;
if (out_buf.size == out_buf.pos) {
@@ -194,14 +188,15 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
in_buf.pos = dctx.inbuf_pos;
zerr = zstd_decompress_stream(stream, &out_buf, &in_buf);
- if (zstd_is_error(zerr) || (!zerr && rq->outputsize)) {
- erofs_err(sb, "failed to decompress in[%u] out[%u]: %s",
- rq->inputsize, rq->outputsize,
- zerr ? zstd_get_error_name(zerr) : "unexpected end of stream");
- err = -EFSCORRUPTED;
+ dctx.avail_out = out_buf.size - out_buf.pos;
+ if (zstd_is_error(zerr) ||
+ ((rq->outputsize + dctx.avail_out) && (!zerr || (zerr > 0 &&
+ !(rq->inputsize + in_buf.size - in_buf.pos))))) {
+ reason = zstd_is_error(zerr) ? zstd_get_error_name(zerr) :
+ "unexpected end of stream";
break;
}
- } while (rq->outputsize || out_buf.pos < out_buf.size);
+ } while (rq->outputsize + dctx.avail_out);
if (dctx.kout)
kunmap_local(dctx.kout);
@@ -213,7 +208,7 @@ failed_zinit:
z_erofs_zstd_head = strm;
spin_unlock(&z_erofs_zstd_lock);
wake_up(&z_erofs_zstd_wq);
- return err;
+ return reason;
}
const struct z_erofs_decompressor z_erofs_zstd_decomp = {
diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c
index c3b90abdee37..32b4f5aa60c9 100644
--- a/fs/erofs/dir.c
+++ b/fs/erofs/dir.c
@@ -34,7 +34,8 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
}
if (!dir_emit(ctx, de_name, de_namelen,
- le64_to_cpu(de->nid), d_type))
+ erofs_nid_to_ino64(EROFS_SB(dir->i_sb),
+ le64_to_cpu(de->nid)), d_type))
return 1;
++de;
ctx->pos += sizeof(struct erofs_dirent);
@@ -47,8 +48,12 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
struct inode *dir = file_inode(f);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct super_block *sb = dir->i_sb;
+ struct file_ra_state *ra = &f->f_ra;
unsigned long bsz = sb->s_blocksize;
unsigned int ofs = erofs_blkoff(sb, ctx->pos);
+ pgoff_t ra_pages = DIV_ROUND_UP_POW2(
+ EROFS_I_SB(dir)->dir_ra_bytes, PAGE_SIZE);
+ pgoff_t nr_pages = DIV_ROUND_UP_POW2(dir->i_size, PAGE_SIZE);
int err = 0;
bool initial = true;
@@ -58,9 +63,24 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
struct erofs_dirent *de;
unsigned int nameoff, maxsize;
- de = erofs_bread(&buf, dbstart, EROFS_KMAP);
+ if (fatal_signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ /* readahead blocks to enhance performance for large directories */
+ if (ra_pages) {
+ pgoff_t idx = DIV_ROUND_UP_POW2(ctx->pos, PAGE_SIZE);
+ pgoff_t pages = min(nr_pages - idx, ra_pages);
+
+ if (pages > 1 && !ra_has_index(ra, idx))
+ page_cache_sync_readahead(dir->i_mapping, ra,
+ f, idx, pages);
+ }
+
+ de = erofs_bread(&buf, dbstart, true);
if (IS_ERR(de)) {
- erofs_err(sb, "fail to readdir of logical block %u of nid %llu",
+ erofs_err(sb, "failed to readdir of logical block %llu of nid %llu",
erofs_blknr(sb, dbstart), EROFS_I(dir)->nid);
err = PTR_ERR(de);
break;
@@ -88,8 +108,14 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
break;
ctx->pos = dbstart + maxsize;
ofs = 0;
+ cond_resched();
}
erofs_put_metabuf(&buf);
+ if (EROFS_I(dir)->dot_omitted && ctx->pos == dir->i_size) {
+ if (!dir_emit_dot(f, ctx))
+ return 0;
+ ++ctx->pos;
+ }
return err < 0 ? err : 0;
}
@@ -97,4 +123,8 @@ const struct file_operations erofs_dir_fops = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = erofs_readdir,
+ .unlocked_ioctl = erofs_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = erofs_compat_ioctl,
+#endif
};
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index c8f2ae845bd2..e24268acdd62 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* EROFS (Enhanced ROM File System) on-disk format definition
*
@@ -9,11 +9,15 @@
#ifndef __EROFS_FS_H
#define __EROFS_FS_H
+/* to allow for x86 boot sectors and other oddities. */
#define EROFS_SUPER_OFFSET 1024
-#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
-#define EROFS_FEATURE_COMPAT_MTIME 0x00000002
-#define EROFS_FEATURE_COMPAT_XATTR_FILTER 0x00000004
+#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
+#define EROFS_FEATURE_COMPAT_MTIME 0x00000002
+#define EROFS_FEATURE_COMPAT_XATTR_FILTER 0x00000004
+#define EROFS_FEATURE_COMPAT_SHARED_EA_IN_METABOX 0x00000008
+#define EROFS_FEATURE_COMPAT_PLAIN_XATTR_PFX 0x00000010
+
/*
* Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
@@ -29,42 +33,38 @@
#define EROFS_FEATURE_INCOMPAT_FRAGMENTS 0x00000020
#define EROFS_FEATURE_INCOMPAT_DEDUPE 0x00000020
#define EROFS_FEATURE_INCOMPAT_XATTR_PREFIXES 0x00000040
+#define EROFS_FEATURE_INCOMPAT_48BIT 0x00000080
+#define EROFS_FEATURE_INCOMPAT_METABOX 0x00000100
#define EROFS_ALL_FEATURE_INCOMPAT \
- (EROFS_FEATURE_INCOMPAT_ZERO_PADDING | \
- EROFS_FEATURE_INCOMPAT_COMPR_CFGS | \
- EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER | \
- EROFS_FEATURE_INCOMPAT_CHUNKED_FILE | \
- EROFS_FEATURE_INCOMPAT_DEVICE_TABLE | \
- EROFS_FEATURE_INCOMPAT_COMPR_HEAD2 | \
- EROFS_FEATURE_INCOMPAT_ZTAILPACKING | \
- EROFS_FEATURE_INCOMPAT_FRAGMENTS | \
- EROFS_FEATURE_INCOMPAT_DEDUPE | \
- EROFS_FEATURE_INCOMPAT_XATTR_PREFIXES)
+ ((EROFS_FEATURE_INCOMPAT_METABOX << 1) - 1)
#define EROFS_SB_EXTSLOT_SIZE 16
struct erofs_deviceslot {
u8 tag[64]; /* digest(sha256), etc. */
- __le32 blocks; /* total fs blocks of this device */
- __le32 mapped_blkaddr; /* map starting at mapped_blkaddr */
- u8 reserved[56];
+ __le32 blocks_lo; /* total blocks count of this device */
+ __le32 uniaddr_lo; /* unified starting block of this device */
+ __le32 blocks_hi; /* total blocks count MSB */
+ __le16 uniaddr_hi; /* unified starting block MSB */
+ u8 reserved[50];
};
#define EROFS_DEVT_SLOT_SIZE sizeof(struct erofs_deviceslot)
-/* erofs on-disk super block (currently 128 bytes) */
+/* erofs on-disk super block (currently 144 bytes at maximum) */
struct erofs_super_block {
__le32 magic; /* file system magic number */
- __le32 checksum; /* crc32c(super_block) */
+ __le32 checksum; /* crc32c to avoid unexpected on-disk overlap */
__le32 feature_compat;
__u8 blkszbits; /* filesystem block size in bit shift */
__u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */
-
- __le16 root_nid; /* nid of root directory */
+ union {
+ __le16 rootnid_2b; /* nid of root directory */
+ __le16 blocks_hi; /* (48BIT on) blocks count MSB */
+ } __packed rb;
__le64 inos; /* total valid ino # (== f_files - f_favail) */
-
- __le64 build_time; /* compact inode time derivation */
- __le32 build_time_nsec; /* compact inode time derivation in ns scale */
- __le32 blocks; /* used for statfs */
+ __le64 epoch; /* base seconds used for compact inodes */
+ __le32 fixed_nsec; /* fixed nanoseconds for compact inodes */
+ __le32 blocks_lo; /* blocks count LSB */
__le32 meta_blkaddr; /* start block address of metadata area */
__le32 xattr_blkaddr; /* start block address of shared xattr area */
__u8 uuid[16]; /* 128-bit uuid for volume */
@@ -83,7 +83,12 @@ struct erofs_super_block {
__le32 xattr_prefix_start; /* start of long xattr prefixes */
__le64 packed_nid; /* nid of the special packed inode */
__u8 xattr_filter_reserved; /* reserved for xattr name filter */
- __u8 reserved2[23];
+ __u8 reserved[3];
+ __le32 build_time; /* seconds added to epoch for mkfs time */
+ __le64 rootnid_8b; /* (48BIT on) nid of root directory */
+ __le64 reserved2;
+ __le64 metabox_nid; /* (METABOX on) nid of the metabox inode */
+ __le64 reserved3; /* [align to extslot 1] */
};
/*
@@ -114,19 +119,19 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
#define EROFS_I_VERSION_MASK 0x01
#define EROFS_I_DATALAYOUT_MASK 0x07
-#define EROFS_I_VERSION_BIT 0
-#define EROFS_I_DATALAYOUT_BIT 1
-#define EROFS_I_ALL_BIT 4
-
-#define EROFS_I_ALL ((1 << EROFS_I_ALL_BIT) - 1)
+#define EROFS_I_VERSION_BIT 0
+#define EROFS_I_DATALAYOUT_BIT 1
+#define EROFS_I_NLINK_1_BIT 4 /* non-directory compact inodes only */
+#define EROFS_I_DOT_OMITTED_BIT 4 /* (directories) omit the `.` dirent */
+#define EROFS_I_ALL ((1 << (EROFS_I_NLINK_1_BIT + 1)) - 1)
/* indicate chunk blkbits, thus 'chunksize = blocksize << chunk blkbits' */
#define EROFS_CHUNK_FORMAT_BLKBITS_MASK 0x001F
-/* with chunk indexes or just a 4-byte blkaddr array */
+/* with chunk indexes or just a 4-byte block array */
#define EROFS_CHUNK_FORMAT_INDEXES 0x0020
+#define EROFS_CHUNK_FORMAT_48BIT 0x0040
-#define EROFS_CHUNK_FORMAT_ALL \
- (EROFS_CHUNK_FORMAT_BLKBITS_MASK | EROFS_CHUNK_FORMAT_INDEXES)
+#define EROFS_CHUNK_FORMAT_ALL ((EROFS_CHUNK_FORMAT_48BIT << 1) - 1)
/* 32-byte on-disk inode */
#define EROFS_INODE_LAYOUT_COMPACT 0
@@ -139,45 +144,40 @@ struct erofs_inode_chunk_info {
};
union erofs_inode_i_u {
- /* total compressed blocks for compressed inodes */
- __le32 compressed_blocks;
-
- /* block address for uncompressed flat inodes */
- __le32 raw_blkaddr;
-
- /* for device files, used to indicate old/new device # */
- __le32 rdev;
-
- /* for chunk-based files, it contains the summary info */
+ __le32 blocks_lo; /* total blocks count (if compressed inodes) */
+ __le32 startblk_lo; /* starting block number (if flat inodes) */
+ __le32 rdev; /* device ID (if special inodes) */
struct erofs_inode_chunk_info c;
};
+union erofs_inode_i_nb {
+ __le16 nlink; /* if EROFS_I_NLINK_1_BIT is unset */
+ __le16 blocks_hi; /* total blocks count MSB */
+ __le16 startblk_hi; /* starting block number MSB */
+} __packed;
+
/* 32-byte reduced form of an ondisk inode */
struct erofs_inode_compact {
__le16 i_format; /* inode format hints */
-
-/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
__le16 i_xattr_icount;
__le16 i_mode;
- __le16 i_nlink;
+ union erofs_inode_i_nb i_nb;
__le32 i_size;
- __le32 i_reserved;
+ __le32 i_mtime;
union erofs_inode_i_u i_u;
__le32 i_ino; /* only used for 32-bit stat compatibility */
__le16 i_uid;
__le16 i_gid;
- __le32 i_reserved2;
+ __le32 i_reserved;
};
/* 64-byte complete form of an ondisk inode */
struct erofs_inode_extended {
__le16 i_format; /* inode format hints */
-
-/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
__le16 i_xattr_icount;
__le16 i_mode;
- __le16 i_reserved;
+ union erofs_inode_i_nb i_nb;
__le64 i_size;
union erofs_inode_i_u i_u;
@@ -247,6 +247,7 @@ static inline unsigned int erofs_xattr_ibody_size(__le16 i_xattr_icount)
if (!i_xattr_icount)
return 0;
+ /* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
return sizeof(struct erofs_xattr_ibody_header) +
sizeof(__u32) * (le16_to_cpu(i_xattr_icount) - 1);
}
@@ -265,13 +266,16 @@ static inline unsigned int erofs_xattr_entry_size(struct erofs_xattr_entry *e)
/* 4-byte block address array */
#define EROFS_BLOCK_MAP_ENTRY_SIZE sizeof(__le32)
-/* 8-byte inode chunk indexes */
+/* 8-byte inode chunk index */
struct erofs_inode_chunk_index {
- __le16 advise; /* always 0, don't care for now */
+ __le16 startblk_hi; /* starting block number MSB */
__le16 device_id; /* back-end storage id (with bits masked) */
- __le32 blkaddr; /* start block address of this inode chunk */
+ __le32 startblk_lo; /* starting block number of this chunk */
};
+#define EROFS_DIRENT_NID_METABOX_BIT 63
+#define EROFS_DIRENT_NID_MASK (BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT) - 1)
+
/* dirent sorts in alphabet order, thus we can do binary search */
struct erofs_dirent {
__le64 nid; /* node number */
@@ -336,21 +340,20 @@ struct z_erofs_zstd_cfgs {
#define Z_EROFS_ZSTD_MAX_DICT_SIZE Z_EROFS_PCLUSTER_MAX_SIZE
/*
- * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
- * e.g. for 4k logical cluster size, 4B if compacted 2B is off;
- * (4B) + 2B + (4B) if compacted 2B is on.
- * bit 1 : HEAD1 big pcluster (0 - off; 1 - on)
- * bit 2 : HEAD2 big pcluster (0 - off; 1 - on)
- * bit 3 : tailpacking inline pcluster (0 - off; 1 - on)
- * bit 4 : interlaced plain pcluster (0 - off; 1 - on)
- * bit 5 : fragment pcluster (0 - off; 1 - on)
+ * Enable COMPACTED_2B for EROFS_INODE_COMPRESSED_COMPACT inodes:
+ * 4B (disabled) vs 4B+2B+4B (enabled)
*/
#define Z_EROFS_ADVISE_COMPACTED_2B 0x0001
+/* Enable extent metadata for EROFS_INODE_COMPRESSED_FULL inodes */
+#define Z_EROFS_ADVISE_EXTENTS 0x0001
#define Z_EROFS_ADVISE_BIG_PCLUSTER_1 0x0002
#define Z_EROFS_ADVISE_BIG_PCLUSTER_2 0x0004
#define Z_EROFS_ADVISE_INLINE_PCLUSTER 0x0008
#define Z_EROFS_ADVISE_INTERLACED_PCLUSTER 0x0010
#define Z_EROFS_ADVISE_FRAGMENT_PCLUSTER 0x0020
+/* Indicate the record size for each extent if extent metadata is used */
+#define Z_EROFS_ADVISE_EXTRECSZ_BIT 1
+#define Z_EROFS_ADVISE_EXTRECSZ_MASK 0x3
#define Z_EROFS_FRAGMENT_INODE_BIT 7
struct z_erofs_map_header {
@@ -362,45 +365,24 @@ struct z_erofs_map_header {
/* indicates the encoded size of tailpacking data */
__le16 h_idata_size;
};
+ __le32 h_extents_lo; /* extent count LSB */
};
__le16 h_advise;
- /*
- * bit 0-3 : algorithm type of head 1 (logical cluster type 01);
- * bit 4-7 : algorithm type of head 2 (logical cluster type 11).
- */
- __u8 h_algorithmtype;
- /*
- * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
- * bit 3-6 : reserved;
- * bit 7 : move the whole file into packed inode or not.
- */
- __u8 h_clusterbits;
+ union {
+ struct {
+ /* algorithm type (bit 0-3: HEAD1; bit 4-7: HEAD2) */
+ __u8 h_algorithmtype;
+ /*
+ * bit 0-3 : logical cluster bits - blkszbits
+ * bit 4-6 : reserved
+ * bit 7 : pack the whole file into packed inode
+ */
+ __u8 h_clusterbits;
+ } __packed;
+ __le16 h_extents_hi; /* extent count MSB */
+ } __packed;
};
-/*
- * On-disk logical cluster type:
- * 0 - literal (uncompressed) lcluster
- * 1,3 - compressed lcluster (for HEAD lclusters)
- * 2 - compressed lcluster (for NONHEAD lclusters)
- *
- * In detail,
- * 0 - literal (uncompressed) lcluster,
- * di_advise = 0
- * di_clusterofs = the literal data offset of the lcluster
- * di_blkaddr = the blkaddr of the literal pcluster
- *
- * 1,3 - compressed lcluster (for HEAD lclusters)
- * di_advise = 1 or 3
- * di_clusterofs = the decompressed data offset of the lcluster
- * di_blkaddr = the blkaddr of the compressed pcluster
- *
- * 2 - compressed lcluster (for NONHEAD lclusters)
- * di_advise = 2
- * di_clusterofs =
- * the decompressed data offset in its own HEAD lcluster
- * di_u.delta[0] = distance to this HEAD lcluster
- * di_u.delta[1] = distance to the next HEAD lcluster
- */
enum {
Z_EROFS_LCLUSTER_TYPE_PLAIN = 0,
Z_EROFS_LCLUSTER_TYPE_HEAD1 = 1,
@@ -414,11 +396,7 @@ enum {
/* (noncompact only, HEAD) This pcluster refers to partial decompressed data */
#define Z_EROFS_LI_PARTIAL_REF (1 << 15)
-/*
- * D0_CBLKCNT will be marked _only_ at the 1st non-head lcluster to store the
- * compressed block count of a compressed extent (in logical clusters, aka.
- * block count of a pcluster).
- */
+/* Set on 1st non-head lcluster to store compressed block counti (in blocks) */
#define Z_EROFS_LI_D0_CBLKCNT (1 << 11)
struct z_erofs_lcluster_index {
@@ -427,19 +405,36 @@ struct z_erofs_lcluster_index {
__le16 di_clusterofs;
union {
- /* for the HEAD lclusters */
- __le32 blkaddr;
+ __le32 blkaddr; /* for the HEAD lclusters */
/*
- * for the NONHEAD lclusters
* [0] - distance to its HEAD lcluster
* [1] - distance to the next HEAD lcluster
*/
- __le16 delta[2];
+ __le16 delta[2]; /* for the NONHEAD lclusters */
} di_u;
};
-#define Z_EROFS_FULL_INDEX_ALIGN(end) \
- (ALIGN(end, 8) + sizeof(struct z_erofs_map_header) + 8)
+#define Z_EROFS_MAP_HEADER_END(end) \
+ (ALIGN(end, 8) + sizeof(struct z_erofs_map_header))
+#define Z_EROFS_FULL_INDEX_START(end) (Z_EROFS_MAP_HEADER_END(end) + 8)
+
+#define Z_EROFS_EXTENT_PLEN_PARTIAL BIT(27)
+#define Z_EROFS_EXTENT_PLEN_FMT_BIT 28
+#define Z_EROFS_EXTENT_PLEN_MASK ((Z_EROFS_PCLUSTER_MAX_SIZE << 1) - 1)
+struct z_erofs_extent {
+ __le32 plen; /* encoded length */
+ __le32 pstart_lo; /* physical offset */
+ __le32 pstart_hi; /* physical offset MSB */
+ __le32 lstart_lo; /* logical offset */
+ __le32 lstart_hi; /* logical offset MSB (>= 4GiB inodes) */
+ __u8 reserved[12]; /* for future use */
+};
+
+static inline int z_erofs_extent_recsize(unsigned int advise)
+{
+ return 4 << ((advise >> Z_EROFS_ADVISE_EXTRECSZ_BIT) &
+ Z_EROFS_ADVISE_EXTRECSZ_MASK);
+}
/* check the EROFS on-disk layout strictly at compile time */
static inline void erofs_check_ondisk_layout_definitions(void)
@@ -448,7 +443,7 @@ static inline void erofs_check_ondisk_layout_definitions(void)
.h_clusterbits = 1 << Z_EROFS_FRAGMENT_INODE_BIT
};
- BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128);
+ BUILD_BUG_ON(sizeof(struct erofs_super_block) != 144);
BUILD_BUG_ON(sizeof(struct erofs_inode_compact) != 32);
BUILD_BUG_ON(sizeof(struct erofs_inode_extended) != 64);
BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12);
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
index 3af96b1e2c2a..932e8b353ba1 100644
--- a/fs/erofs/fileio.c
+++ b/fs/erofs/fileio.c
@@ -6,9 +6,10 @@
#include <trace/events/erofs.h>
struct erofs_fileio_rq {
- struct bio_vec bvecs[BIO_MAX_VECS];
+ struct bio_vec bvecs[16];
struct bio bio;
struct kiocb iocb;
+ struct super_block *sb;
};
struct erofs_fileio {
@@ -31,13 +32,15 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
ret = 0;
}
if (rq->bio.bi_end_io) {
- rq->bio.bi_end_io(&rq->bio);
+ if (ret < 0 && !rq->bio.bi_status)
+ rq->bio.bi_status = errno_to_blk_status(ret);
} else {
bio_for_each_folio_all(fi, &rq->bio) {
DBG_BUGON(folio_test_uptodate(fi.folio));
- erofs_onlinefolio_end(fi.folio, ret);
+ erofs_onlinefolio_end(fi.folio, ret, false);
}
}
+ bio_endio(&rq->bio);
bio_uninit(&rq->bio);
kfree(rq);
}
@@ -52,11 +55,13 @@ static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
rq->iocb.ki_ioprio = get_current_ioprio();
rq->iocb.ki_complete = erofs_fileio_ki_complete;
- rq->iocb.ki_flags = (rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) ?
- IOCB_DIRECT : 0;
+ if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
+ rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
+ rq->iocb.ki_flags = IOCB_DIRECT;
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
rq->bio.bi_iter.bi_size);
- ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
+ scoped_with_creds(rq->iocb.ki_filp->f_cred)
+ ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
if (ret != -EIOCBQUEUED)
erofs_fileio_ki_complete(&rq->iocb, ret);
}
@@ -66,8 +71,9 @@ static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
GFP_KERNEL | __GFP_NOFAIL);
- bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ);
- rq->iocb.ki_filp = mdev->m_fp;
+ bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
+ rq->iocb.ki_filp = mdev->m_dif->file;
+ rq->sb = mdev->m_sb;
return rq;
}
@@ -88,8 +94,6 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
struct erofs_map_blocks *map = &io->map;
unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
loff_t pos = folio_pos(folio), ofs;
- struct iov_iter iter;
- struct bio_vec bv;
int err = 0;
erofs_onlinefolio_init(folio);
@@ -109,18 +113,12 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
void *src;
src = erofs_read_metabuf(&buf, inode->i_sb,
- map->m_pa + ofs, EROFS_KMAP);
+ map->m_pa + ofs, erofs_inode_in_metabox(inode));
if (IS_ERR(src)) {
err = PTR_ERR(src);
break;
}
- bvec_set_folio(&bv, folio, len, cur);
- iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len);
- if (copy_to_iter(src, len, &iter) != len) {
- erofs_put_metabuf(&buf);
- err = -EIO;
- break;
- }
+ memcpy_to_folio(folio, cur, src, len);
erofs_put_metabuf(&buf);
} else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
folio_zero_segment(folio, cur, cur + len);
@@ -142,18 +140,19 @@ io_retry:
if (err)
break;
io->rq = erofs_fileio_rq_alloc(&io->dev);
- io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9;
+ io->rq->bio.bi_iter.bi_sector =
+ (io->dev.m_dif->fsoff + io->dev.m_pa) >> 9;
attached = 0;
}
- if (!attached++)
- erofs_onlinefolio_split(folio);
if (!bio_add_folio(&io->rq->bio, folio, len, cur))
goto io_retry;
+ if (!attached++)
+ erofs_onlinefolio_split(folio);
io->dev.m_pa += len;
}
cur += len;
}
- erofs_onlinefolio_end(folio, err);
+ erofs_onlinefolio_end(folio, err, false);
return err;
}
@@ -175,7 +174,7 @@ static void erofs_fileio_readahead(struct readahead_control *rac)
struct folio *folio;
int err;
- trace_erofs_readpages(inode, readahead_index(rac),
+ trace_erofs_readahead(inode, readahead_index(rac),
readahead_count(rac), true);
while ((folio = readahead_folio(rac))) {
err = erofs_fileio_scan_folio(&io, folio);
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index fda16eedafb5..7a346e20f7b7 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -102,8 +102,7 @@ static void erofs_fscache_req_io_put(struct erofs_fscache_io *io)
erofs_fscache_req_put(req);
}
-static void erofs_fscache_req_end_io(void *priv,
- ssize_t transferred_or_error, bool was_async)
+static void erofs_fscache_req_end_io(void *priv, ssize_t transferred_or_error)
{
struct erofs_fscache_io *io = priv;
struct erofs_fscache_rq *req = io->private;
@@ -180,14 +179,13 @@ struct erofs_fscache_bio {
struct bio_vec bvecs[BIO_MAX_VECS];
};
-static void erofs_fscache_bio_endio(void *priv,
- ssize_t transferred_or_error, bool was_async)
+static void erofs_fscache_bio_endio(void *priv, ssize_t transferred_or_error)
{
struct erofs_fscache_bio *io = priv;
if (IS_ERR_VALUE(transferred_or_error))
io->bio.bi_status = errno_to_blk_status(transferred_or_error);
- io->bio.bi_end_io(&io->bio);
+ bio_endio(&io->bio);
BUILD_BUG_ON(offsetof(struct erofs_fscache_bio, io) != 0);
erofs_fscache_io_put(&io->io);
}
@@ -198,7 +196,7 @@ struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL);
bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
- io->io.private = mdev->m_fscache->cookie;
+ io->io.private = mdev->m_dif->fscache->cookie;
io->io.end_io = erofs_fscache_bio_endio;
refcount_set(&io->io.ref, 1);
return &io->bio;
@@ -218,7 +216,7 @@ void erofs_fscache_submit_bio(struct bio *bio)
if (!ret)
return;
bio->bi_status = errno_to_blk_status(ret);
- bio->bi_end_io(bio);
+ bio_endio(bio);
}
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
@@ -276,7 +274,8 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
size_t size = map.m_llen;
void *src;
- src = erofs_read_metabuf(&buf, sb, map.m_pa, EROFS_KMAP);
+ src = erofs_read_metabuf(&buf, sb, map.m_pa,
+ erofs_inode_in_metabox(inode));
if (IS_ERR(src))
return PTR_ERR(src);
@@ -316,7 +315,7 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
if (!io)
return -ENOMEM;
iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
- ret = erofs_fscache_read_io_async(mdev.m_fscache->cookie,
+ ret = erofs_fscache_read_io_async(mdev.m_dif->fscache->cookie,
mdev.m_pa + (pos - map.m_la), io);
erofs_fscache_req_io_put(io);
@@ -657,7 +656,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
if (IS_ERR(fscache))
return PTR_ERR(fscache);
- sbi->s_fscache = fscache;
+ sbi->dif0.fscache = fscache;
return 0;
}
@@ -665,14 +664,14 @@ void erofs_fscache_unregister_fs(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- erofs_fscache_unregister_cookie(sbi->s_fscache);
+ erofs_fscache_unregister_cookie(sbi->dif0.fscache);
if (sbi->domain)
erofs_fscache_domain_put(sbi->domain);
else
fscache_relinquish_volume(sbi->volume, NULL, false);
- sbi->s_fscache = NULL;
+ sbi->dif0.fscache = NULL;
sbi->volume = NULL;
sbi->domain = NULL;
}
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index d4b89407822a..bce98c845a18 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021, Alibaba Cloud
*/
#include "xattr.h"
+#include <linux/compat.h>
#include <trace/events/erofs.h>
static int erofs_fill_symlink(struct inode *inode, void *kaddr,
@@ -27,29 +28,28 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr,
static int erofs_read_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
+ erofs_blk_t blkaddr = erofs_blknr(sb, erofs_iloc(inode));
+ unsigned int ofs = erofs_blkoff(sb, erofs_iloc(inode));
+ bool in_mbox = erofs_inode_in_metabox(inode);
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_sb_info *sbi = EROFS_SB(sb);
+ erofs_blk_t addrmask = BIT_ULL(48) - 1;
struct erofs_inode *vi = EROFS_I(inode);
- const erofs_off_t inode_loc = erofs_iloc(inode);
- erofs_blk_t blkaddr, nblks = 0;
- void *kaddr;
+ struct erofs_inode_extended *die, copied;
struct erofs_inode_compact *dic;
- struct erofs_inode_extended *die, *copied = NULL;
- union erofs_inode_i_u iu;
- struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
- unsigned int ifmt, ofs;
+ unsigned int ifmt;
+ void *ptr;
int err = 0;
- blkaddr = erofs_blknr(sb, inode_loc);
- ofs = erofs_blkoff(sb, inode_loc);
-
- kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP);
- if (IS_ERR(kaddr)) {
- erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
- vi->nid, PTR_ERR(kaddr));
- return PTR_ERR(kaddr);
+ ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), in_mbox);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ erofs_err(sb, "failed to read inode meta block (nid: %llu): %d",
+ vi->nid, err);
+ goto err_out;
}
- dic = kaddr + ofs;
+ dic = ptr + ofs;
ifmt = le16_to_cpu(dic->i_format);
if (ifmt & ~EROFS_I_ALL) {
erofs_err(sb, "unsupported i_format %u of nid %llu",
@@ -73,40 +73,34 @@ static int erofs_read_inode(struct inode *inode)
if (ofs + vi->inode_isize <= sb->s_blocksize) {
ofs += vi->inode_isize;
die = (struct erofs_inode_extended *)dic;
+ copied.i_u = die->i_u;
+ copied.i_nb = die->i_nb;
} else {
const unsigned int gotten = sb->s_blocksize - ofs;
- copied = kmalloc(vi->inode_isize, GFP_KERNEL);
- if (!copied) {
- err = -ENOMEM;
+ memcpy(&copied, dic, gotten);
+ ptr = erofs_read_metabuf(&buf, sb,
+ erofs_pos(sb, blkaddr + 1), in_mbox);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ erofs_err(sb, "failed to read inode payload block (nid: %llu): %d",
+ vi->nid, err);
goto err_out;
}
- memcpy(copied, dic, gotten);
- kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr + 1),
- EROFS_KMAP);
- if (IS_ERR(kaddr)) {
- erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
- vi->nid, PTR_ERR(kaddr));
- kfree(copied);
- return PTR_ERR(kaddr);
- }
ofs = vi->inode_isize - gotten;
- memcpy((u8 *)copied + gotten, kaddr, ofs);
- die = copied;
+ memcpy((u8 *)&copied + gotten, ptr, ofs);
+ die = &copied;
}
vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
inode->i_mode = le16_to_cpu(die->i_mode);
- iu = die->i_u;
i_uid_write(inode, le32_to_cpu(die->i_uid));
i_gid_write(inode, le32_to_cpu(die->i_gid));
set_nlink(inode, le32_to_cpu(die->i_nlink));
- /* each extended inode has its own timestamp */
- inode_set_ctime(inode, le64_to_cpu(die->i_mtime),
+ inode_set_mtime(inode, le64_to_cpu(die->i_mtime),
le32_to_cpu(die->i_mtime_nsec));
inode->i_size = le64_to_cpu(die->i_size);
- kfree(copied);
break;
case EROFS_INODE_LAYOUT_COMPACT:
vi->inode_isize = sizeof(struct erofs_inode_compact);
@@ -114,12 +108,20 @@ static int erofs_read_inode(struct inode *inode)
vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
inode->i_mode = le16_to_cpu(dic->i_mode);
- iu = dic->i_u;
+ copied.i_u = dic->i_u;
i_uid_write(inode, le16_to_cpu(dic->i_uid));
i_gid_write(inode, le16_to_cpu(dic->i_gid));
- set_nlink(inode, le16_to_cpu(dic->i_nlink));
- /* use build time for compact inodes */
- inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec);
+ if (!S_ISDIR(inode->i_mode) &&
+ ((ifmt >> EROFS_I_NLINK_1_BIT) & 1)) {
+ set_nlink(inode, 1);
+ copied.i_nb = dic->i_nb;
+ } else {
+ set_nlink(inode, le16_to_cpu(dic->i_nb.nlink));
+ copied.i_nb.startblk_hi = 0;
+ addrmask = BIT_ULL(32) - 1;
+ }
+ inode_set_mtime(inode, sbi->epoch + le32_to_cpu(dic->i_mtime),
+ sbi->fixed_nsec);
inode->i_size = le32_to_cpu(dic->i_size);
break;
@@ -136,19 +138,26 @@ static int erofs_read_inode(struct inode *inode)
goto err_out;
}
switch (inode->i_mode & S_IFMT) {
- case S_IFREG:
case S_IFDIR:
+ vi->dot_omitted = (ifmt >> EROFS_I_DOT_OMITTED_BIT) & 1;
+ fallthrough;
+ case S_IFREG:
case S_IFLNK:
- vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr);
+ vi->startblk = le32_to_cpu(copied.i_u.startblk_lo) |
+ ((u64)le16_to_cpu(copied.i_nb.startblk_hi) << 32);
+ if (vi->datalayout == EROFS_INODE_FLAT_PLAIN &&
+ !((vi->startblk ^ EROFS_NULL_ADDR) & addrmask))
+ vi->startblk = EROFS_NULL_ADDR;
+
if(S_ISLNK(inode->i_mode)) {
- err = erofs_fill_symlink(inode, kaddr, ofs);
+ err = erofs_fill_symlink(inode, ptr, ofs);
if (err)
goto err_out;
}
break;
case S_IFCHR:
case S_IFBLK:
- inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev));
+ inode->i_rdev = new_decode_dev(le32_to_cpu(copied.i_u.rdev));
break;
case S_IFIFO:
case S_IFSOCK:
@@ -161,12 +170,15 @@ static int erofs_read_inode(struct inode *inode)
goto err_out;
}
- /* total blocks for compressed files */
- if (erofs_inode_is_data_compressed(vi->datalayout)) {
- nblks = le32_to_cpu(iu.compressed_blocks);
- } else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
+ if (erofs_inode_is_data_compressed(vi->datalayout))
+ inode->i_blocks = le32_to_cpu(copied.i_u.blocks_lo) <<
+ (sb->s_blocksize_bits - 9);
+ else
+ inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
+
+ if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
/* fill chunked inode summary info */
- vi->chunkformat = le16_to_cpu(iu.c.format);
+ vi->chunkformat = le16_to_cpu(copied.i_u.c.format);
if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
erofs_err(sb, "unsupported chunk format %x of nid %llu",
vi->chunkformat, vi->nid);
@@ -176,22 +188,15 @@ static int erofs_read_inode(struct inode *inode)
vi->chunkbits = sb->s_blocksize_bits +
(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
}
- inode_set_mtime_to_ts(inode,
- inode_set_atime_to_ts(inode, inode_get_ctime(inode)));
+ inode_set_atime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, inode_get_mtime(inode)));
inode->i_flags &= ~S_DAX;
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
(vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
vi->datalayout == EROFS_INODE_CHUNK_BASED))
inode->i_flags |= S_DAX;
-
- if (!nblks)
- /* measure inode.i_blocks as generic filesystems */
- inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
- else
- inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
err_out:
- DBG_BUGON(err);
erofs_put_metabuf(&buf);
return err;
}
@@ -202,20 +207,14 @@ static int erofs_fill_inode(struct inode *inode)
int err;
trace_erofs_fill_inode(inode);
-
- /* read inode base data from disk */
err = erofs_read_inode(inode);
if (err)
return err;
- /* setup the new inode */
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &erofs_generic_iops;
- if (erofs_inode_is_data_compressed(vi->datalayout))
- inode->i_fop = &generic_ro_fops;
- else
- inode->i_fop = &erofs_file_fops;
+ inode->i_fop = &erofs_file_fops;
break;
case S_IFDIR:
inode->i_op = &erofs_dir_iops;
@@ -229,15 +228,10 @@ static int erofs_fill_inode(struct inode *inode)
inode->i_op = &erofs_symlink_iops;
inode_nohighmem(inode);
break;
- case S_IFCHR:
- case S_IFBLK:
- case S_IFIFO:
- case S_IFSOCK:
+ default:
inode->i_op = &erofs_generic_iops;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
return 0;
- default:
- return -EFSCORRUPTED;
}
mapping_set_large_folios(inode->i_mapping);
@@ -269,13 +263,13 @@ static int erofs_fill_inode(struct inode *inode)
* ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
* so that it will fit.
*/
-static ino_t erofs_squash_ino(erofs_nid_t nid)
+static ino_t erofs_squash_ino(struct super_block *sb, erofs_nid_t nid)
{
- ino_t ino = (ino_t)nid;
+ u64 ino64 = erofs_nid_to_ino64(EROFS_SB(sb), nid);
if (sizeof(ino_t) < sizeof(erofs_nid_t))
- ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
- return ino;
+ ino64 ^= ino64 >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
+ return (ino_t)ino64;
}
static int erofs_iget5_eq(struct inode *inode, void *opaque)
@@ -287,7 +281,7 @@ static int erofs_iget5_set(struct inode *inode, void *opaque)
{
const erofs_nid_t nid = *(erofs_nid_t *)opaque;
- inode->i_ino = erofs_squash_ino(nid);
+ inode->i_ino = erofs_squash_ino(inode->i_sb, nid);
EROFS_I(inode)->nid = nid;
return 0;
}
@@ -296,12 +290,12 @@ struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
{
struct inode *inode;
- inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq,
+ inode = iget5_locked(sb, erofs_squash_ino(sb, nid), erofs_iget5_eq,
erofs_iget5_set, &nid);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
int err = erofs_fill_inode(inode);
if (err) {
@@ -345,6 +339,40 @@ int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
return 0;
}
+static int erofs_ioctl_get_volume_label(struct inode *inode, void __user *arg)
+{
+ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ int ret;
+
+ if (!sbi->volume_name)
+ ret = clear_user(arg, 1);
+ else
+ ret = copy_to_user(arg, sbi->volume_name,
+ strlen(sbi->volume_name));
+ return ret ? -EFAULT : 0;
+}
+
+long erofs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case FS_IOC_GETFSLABEL:
+ return erofs_ioctl_get_volume_label(inode, argp);
+ default:
+ return -ENOTTY;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+long erofs_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return erofs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
const struct inode_operations erofs_generic_iops = {
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 1c847c30a918..f7f622836198 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -37,18 +37,17 @@ __printf(2, 3) void _erofs_printk(struct super_block *sb, const char *fmt, ...);
typedef u64 erofs_nid_t;
typedef u64 erofs_off_t;
-/* data type for filesystem-wide blocks number */
-typedef u32 erofs_blk_t;
+typedef u64 erofs_blk_t;
struct erofs_device_info {
char *path;
struct erofs_fscache *fscache;
struct file *file;
struct dax_device *dax_dev;
- u64 dax_part_off;
+ u64 fsoff, dax_part_off;
- u32 blocks;
- u32 mapped_blkaddr;
+ erofs_blk_t blocks;
+ erofs_blk_t uniaddr;
};
enum {
@@ -107,6 +106,7 @@ struct erofs_xattr_prefix_item {
};
struct erofs_sb_info {
+ struct erofs_device_info dif0;
struct erofs_mount_opts opt; /* options */
#ifdef CONFIG_EROFS_FS_ZIP
/* list for all registered superblocks, mainly for shrinker */
@@ -124,13 +124,10 @@ struct erofs_sb_info {
struct erofs_sb_lz4_info lz4;
#endif /* CONFIG_EROFS_FS_ZIP */
- struct file *fdev;
struct inode *packed_inode;
+ struct inode *metabox_inode;
struct erofs_dev_context *devs;
- struct dax_device *dax_dev;
- u64 dax_part_off;
u64 total_blocks;
- u32 primarydevice_blocks;
u32 meta_blkaddr;
#ifdef CONFIG_EROFS_FS_XATTR
@@ -146,27 +143,27 @@ struct erofs_sb_info {
unsigned char blkszbits; /* filesystem block size in bit shift */
u32 sb_size; /* total superblock size */
- u32 build_time_nsec;
- u64 build_time;
+ u32 fixed_nsec;
+ s64 epoch;
/* what we really care is nid, rather than ino.. */
erofs_nid_t root_nid;
erofs_nid_t packed_nid;
+ erofs_nid_t metabox_nid;
/* used for statfs, f_files - f_favail */
u64 inos;
- u8 uuid[16]; /* 128-bit uuid for volume */
- u8 volume_name[16]; /* volume name */
+ char *volume_name;
u32 feature_compat;
u32 feature_incompat;
/* sysfs support */
struct kobject s_kobj; /* /sys/fs/erofs/<devname> */
struct completion s_kobj_unregister;
+ erofs_off_t dir_ra_bytes;
/* fscache support */
struct fscache_volume *volume;
- struct erofs_fscache *s_fscache;
struct erofs_domain *domain;
char *fsid;
char *domain_id;
@@ -180,6 +177,7 @@ struct erofs_sb_info {
#define EROFS_MOUNT_POSIX_ACL 0x00000020
#define EROFS_MOUNT_DAX_ALWAYS 0x00000040
#define EROFS_MOUNT_DAX_NEVER 0x00000080
+#define EROFS_MOUNT_DIRECT_IO 0x00000100
#define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option)
#define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
@@ -187,7 +185,7 @@ struct erofs_sb_info {
static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi)
{
- return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->fdev;
+ return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->dif0.file;
}
static inline bool erofs_is_fscache_mode(struct super_block *sb)
@@ -202,21 +200,17 @@ enum {
EROFS_ZIP_CACHE_READAROUND
};
-enum erofs_kmap_type {
- EROFS_NO_KMAP, /* don't map the buffer */
- EROFS_KMAP, /* use kmap_local_page() to map the buffer */
-};
-
struct erofs_buf {
struct address_space *mapping;
struct file *file;
+ u64 off;
struct page *page;
void *base;
};
#define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL })
-#define erofs_blknr(sb, addr) ((erofs_blk_t)((addr) >> (sb)->s_blocksize_bits))
-#define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1))
+#define erofs_blknr(sb, pos) ((erofs_blk_t)((pos) >> (sb)->s_blocksize_bits))
+#define erofs_blkoff(sb, pos) ((pos) & ((sb)->s_blocksize - 1))
#define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
#define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits)
@@ -236,8 +230,29 @@ EROFS_FEATURE_FUNCS(ztailpacking, incompat, INCOMPAT_ZTAILPACKING)
EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS)
EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE)
EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES)
+EROFS_FEATURE_FUNCS(48bit, incompat, INCOMPAT_48BIT)
+EROFS_FEATURE_FUNCS(metabox, incompat, INCOMPAT_METABOX)
EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER)
+EROFS_FEATURE_FUNCS(shared_ea_in_metabox, compat, COMPAT_SHARED_EA_IN_METABOX)
+EROFS_FEATURE_FUNCS(plain_xattr_pfx, compat, COMPAT_PLAIN_XATTR_PFX)
+
+static inline u64 erofs_nid_to_ino64(struct erofs_sb_info *sbi, erofs_nid_t nid)
+{
+ if (!erofs_sb_has_metabox(sbi))
+ return nid;
+
+ /*
+ * When metadata compression is enabled, avoid generating excessively
+ * large inode numbers for metadata-compressed inodes. Shift NIDs in
+ * the 31-62 bit range left by one and move the metabox flag to bit 31.
+ *
+ * Note: on-disk NIDs remain unchanged as they are primarily used for
+ * compatibility with non-LFS 32-bit applications.
+ */
+ return ((nid << 1) & GENMASK_ULL(63, 32)) | (nid & GENMASK(30, 0)) |
+ ((nid >> EROFS_DIRENT_NID_METABOX_BIT) << 31);
+}
/* atomic flag definitions */
#define EROFS_I_EA_INITED_BIT 0
@@ -247,6 +262,9 @@ EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER)
#define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
#define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
+/* default readahead size of directories */
+#define EROFS_DIR_RA_BYTES 16384
+
struct erofs_inode {
erofs_nid_t nid;
@@ -255,6 +273,7 @@ struct erofs_inode {
unsigned char datalayout;
unsigned char inode_isize;
+ bool dot_omitted;
unsigned int xattr_isize;
unsigned int xattr_name_filter;
@@ -262,7 +281,7 @@ struct erofs_inode {
unsigned int *xattr_shared_xattrs;
union {
- erofs_blk_t raw_blkaddr;
+ erofs_blk_t startblk;
struct {
unsigned short chunkformat;
unsigned char chunkbits;
@@ -271,15 +290,13 @@ struct erofs_inode {
struct {
unsigned short z_advise;
unsigned char z_algorithmtype[2];
- unsigned char z_logical_clusterbits;
- unsigned long z_tailextent_headlcn;
+ unsigned char z_lclusterbits;
union {
- struct {
- erofs_off_t z_idataoff;
- unsigned short z_idata_size;
- };
- erofs_off_t z_fragmentoff;
+ u64 z_tailextent_headlcn;
+ u64 z_extents;
};
+ erofs_off_t z_fragmentoff;
+ unsigned short z_idata_size;
};
#endif /* CONFIG_EROFS_FS_ZIP */
};
@@ -289,12 +306,20 @@ struct erofs_inode {
#define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode)
+static inline bool erofs_inode_in_metabox(struct inode *inode)
+{
+ return EROFS_I(inode)->nid & BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT);
+}
+
static inline erofs_off_t erofs_iloc(struct inode *inode)
{
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ erofs_nid_t nid_lo = EROFS_I(inode)->nid & EROFS_DIRENT_NID_MASK;
+ if (erofs_inode_in_metabox(inode))
+ return nid_lo << sbi->islotbits;
return erofs_pos(inode->i_sb, sbi->meta_blkaddr) +
- (EROFS_I(inode)->nid << sbi->islotbits);
+ (nid_lo << sbi->islotbits);
}
static inline unsigned int erofs_inode_version(unsigned int ifmt)
@@ -325,10 +350,12 @@ static inline struct folio *erofs_grab_folio_nowait(struct address_space *as,
/* The length of extent is full */
#define EROFS_MAP_FULL_MAPPED 0x0008
/* Located in the special packed inode */
-#define EROFS_MAP_FRAGMENT 0x0010
+#define __EROFS_MAP_FRAGMENT 0x0010
/* The extent refers to partial decompressed data */
#define EROFS_MAP_PARTIAL_REF 0x0020
+#define EROFS_MAP_FRAGMENT (EROFS_MAP_MAPPED | __EROFS_MAP_FRAGMENT)
+
struct erofs_map_blocks {
struct erofs_buf buf;
@@ -357,11 +384,9 @@ enum {
};
struct erofs_map_dev {
- struct erofs_fscache *m_fscache;
+ struct super_block *m_sb;
+ struct erofs_device_info *m_dif;
struct block_device *m_bdev;
- struct dax_device *m_daxdev;
- struct file *m_fp;
- u64 m_dax_part_off;
erofs_off_t m_pa;
unsigned int m_deviceid;
@@ -392,18 +417,18 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
erofs_off_t *offset, int *lengthp);
void erofs_unmap_metabuf(struct erofs_buf *buf);
void erofs_put_metabuf(struct erofs_buf *buf);
-void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
- enum erofs_kmap_type type);
-void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb);
+void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap);
+int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb,
+ bool in_metabox);
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
- erofs_off_t offset, enum erofs_kmap_type type);
+ erofs_off_t offset, bool in_metabox);
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map);
void erofs_onlinefolio_init(struct folio *folio);
void erofs_onlinefolio_split(struct folio *folio);
-void erofs_onlinefolio_end(struct folio *folio, int err);
+void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty);
struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid);
int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
@@ -453,6 +478,7 @@ int __init erofs_init_shrinker(void);
void erofs_exit_shrinker(void);
int __init z_erofs_init_subsystem(void);
void z_erofs_exit_subsystem(void);
+int z_erofs_init_super(struct super_block *sb);
unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
unsigned long nr_shrink);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
@@ -462,7 +488,6 @@ void z_erofs_put_gbuf(void *ptr);
int z_erofs_gbuf_growsize(unsigned int nrpages);
int __init z_erofs_gbuf_init(void);
void z_erofs_gbuf_exit(void);
-int erofs_init_managed_cache(struct super_block *sb);
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
#else
static inline void erofs_shrinker_register(struct super_block *sb) {}
@@ -471,7 +496,7 @@ static inline int erofs_init_shrinker(void) { return 0; }
static inline void erofs_exit_shrinker(void) {}
static inline int z_erofs_init_subsystem(void) { return 0; }
static inline void z_erofs_exit_subsystem(void) {}
-static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
+static inline int z_erofs_init_super(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */
#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
@@ -512,6 +537,10 @@ static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) {
static inline void erofs_fscache_submit_bio(struct bio *bio) {}
#endif
+long erofs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+long erofs_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
#endif /* __EROFS_INTERNAL_H */
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
index c94d0c1608a8..f7cf4f41af28 100644
--- a/fs/erofs/namei.c
+++ b/fs/erofs/namei.c
@@ -100,7 +100,7 @@ static void *erofs_find_target_block(struct erofs_buf *target,
struct erofs_dirent *de;
buf.mapping = dir->i_mapping;
- de = erofs_bread(&buf, erofs_pos(dir->i_sb, mid), EROFS_KMAP);
+ de = erofs_bread(&buf, erofs_pos(dir->i_sb, mid), true);
if (!IS_ERR(de)) {
const int nameoff = nameoff_from_disk(de->nameoff, bsz);
const int ndirents = nameoff / sizeof(*de);
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index c235a8e4315e..937a215f626c 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -39,29 +39,21 @@ void _erofs_printk(struct super_block *sb, const char *fmt, ...)
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
- size_t len = 1 << EROFS_SB(sb)->blkszbits;
- struct erofs_super_block *dsb;
- u32 expected_crc, crc;
+ struct erofs_super_block *dsb = sbdata + EROFS_SUPER_OFFSET;
+ u32 len = 1 << EROFS_SB(sb)->blkszbits, crc;
if (len > EROFS_SUPER_OFFSET)
len -= EROFS_SUPER_OFFSET;
+ len -= offsetof(struct erofs_super_block, checksum) +
+ sizeof(dsb->checksum);
- dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
- if (!dsb)
- return -ENOMEM;
-
- expected_crc = le32_to_cpu(dsb->checksum);
- dsb->checksum = 0;
- /* to allow for x86 boot sectors and other oddities. */
- crc = crc32c(~0, dsb, len);
- kfree(dsb);
-
- if (crc != expected_crc) {
- erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
- crc, expected_crc);
- return -EBADMSG;
- }
- return 0;
+ /* skip .magic(pre-verified) and .checksum(0) fields */
+ crc = crc32c(0x5045B54A, (&dsb->checksum) + 1, len);
+ if (crc == le32_to_cpu(dsb->checksum))
+ return 0;
+ erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
+ crc, le32_to_cpu(dsb->checksum));
+ return -EBADMSG;
}
static void erofs_inode_init_once(void *ptr)
@@ -102,7 +94,7 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
int len, i, cnt;
*offset = round_up(*offset, 4);
- ptr = erofs_bread(buf, *offset, EROFS_KMAP);
+ ptr = erofs_bread(buf, *offset, true);
if (IS_ERR(ptr))
return ptr;
@@ -118,7 +110,7 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
for (i = 0; i < len; i += cnt) {
cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
len - i);
- ptr = erofs_bread(buf, *offset, EROFS_KMAP);
+ ptr = erofs_bread(buf, *offset, true);
if (IS_ERR(ptr)) {
kfree(buffer);
return ptr;
@@ -149,7 +141,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
struct erofs_deviceslot *dis;
struct file *file;
- dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP);
+ dis = erofs_read_metabuf(buf, sb, *pos, false);
if (IS_ERR(dis))
return PTR_ERR(dis);
@@ -173,8 +165,11 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0) :
bdev_file_open_by_path(dif->path,
BLK_OPEN_READ, sb->s_type, NULL);
- if (IS_ERR(file))
+ if (IS_ERR(file)) {
+ if (file == ERR_PTR(-ENOTBLK))
+ return -EINVAL;
return PTR_ERR(file);
+ }
if (!erofs_is_fileio_mode(sbi)) {
dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file),
@@ -183,11 +178,16 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
fput(file);
return -EINVAL;
}
+ if (!dif->dax_dev && test_opt(&sbi->opt, DAX_ALWAYS)) {
+ erofs_info(sb, "DAX unsupported by %s. Turning off DAX.",
+ dif->path);
+ clear_opt(&sbi->opt, DAX_ALWAYS);
+ }
dif->file = file;
}
- dif->blocks = le32_to_cpu(dis->blocks);
- dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
+ dif->blocks = le32_to_cpu(dis->blocks_lo);
+ dif->uniaddr = le32_to_cpu(dis->uniaddr_lo);
sbi->total_blocks += dif->blocks;
*pos += EROFS_DEVT_SLOT_SIZE;
return 0;
@@ -203,7 +203,7 @@ static int erofs_scan_devices(struct super_block *sb,
struct erofs_device_info *dif;
int id, err = 0;
- sbi->total_blocks = sbi->primarydevice_blocks;
+ sbi->total_blocks = sbi->dif0.blocks;
if (!erofs_sb_has_device_table(sbi))
ondisk_extradevs = 0;
else
@@ -215,6 +215,11 @@ static int erofs_scan_devices(struct super_block *sb,
ondisk_extradevs, sbi->devs->extra_devices);
return -EINVAL;
}
+
+ if (test_opt(&sbi->opt, DAX_ALWAYS) && !sbi->dif0.dax_dev) {
+ erofs_info(sb, "DAX unsupported by block device. Turning off DAX.");
+ clear_opt(&sbi->opt, DAX_ALWAYS);
+ }
if (!ondisk_extradevs)
return 0;
@@ -263,7 +268,7 @@ static int erofs_read_superblock(struct super_block *sb)
void *data;
int ret;
- data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
+ data = erofs_read_metabuf(&buf, sb, 0, false);
if (IS_ERR(data)) {
erofs_err(sb, "cannot read erofs superblock");
return PTR_ERR(data);
@@ -276,7 +281,7 @@ static int erofs_read_superblock(struct super_block *sb)
goto out;
}
- sbi->blkszbits = dsb->blkszbits;
+ sbi->blkszbits = dsb->blkszbits;
if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
goto out;
@@ -307,7 +312,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi->sb_size);
goto out;
}
- sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
+ sbi->dif0.blocks = le32_to_cpu(dsb->blocks_lo);
sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
#ifdef CONFIG_EROFS_FS_XATTR
sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
@@ -316,21 +321,33 @@ static int erofs_read_superblock(struct super_block *sb)
sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
#endif
sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
- sbi->root_nid = le16_to_cpu(dsb->root_nid);
+ if (erofs_sb_has_48bit(sbi) && dsb->rootnid_8b) {
+ sbi->root_nid = le64_to_cpu(dsb->rootnid_8b);
+ sbi->dif0.blocks = sbi->dif0.blocks |
+ ((u64)le16_to_cpu(dsb->rb.blocks_hi) << 32);
+ } else {
+ sbi->root_nid = le16_to_cpu(dsb->rb.rootnid_2b);
+ }
sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
+ if (erofs_sb_has_metabox(sbi)) {
+ if (sbi->sb_size <= offsetof(struct erofs_super_block,
+ metabox_nid))
+ return -EFSCORRUPTED;
+ sbi->metabox_nid = le64_to_cpu(dsb->metabox_nid);
+ if (sbi->metabox_nid & BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT))
+ return -EFSCORRUPTED; /* self-loop detection */
+ }
sbi->inos = le64_to_cpu(dsb->inos);
- sbi->build_time = le64_to_cpu(dsb->build_time);
- sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
-
+ sbi->epoch = (s64)le64_to_cpu(dsb->epoch);
+ sbi->fixed_nsec = le32_to_cpu(dsb->fixed_nsec);
super_set_uuid(sb, (void *)dsb->uuid, sizeof(dsb->uuid));
- ret = strscpy(sbi->volume_name, dsb->volume_name,
- sizeof(dsb->volume_name));
- if (ret < 0) { /* -E2BIG */
- erofs_err(sb, "bad volume name without NIL terminator");
- ret = -EFSCORRUPTED;
- goto out;
+ if (dsb->volume_name[0]) {
+ sbi->volume_name = kstrndup(dsb->volume_name,
+ sizeof(dsb->volume_name), GFP_KERNEL);
+ if (!sbi->volume_name)
+ return -ENOMEM;
}
/* parse on-disk compression configurations */
@@ -338,9 +355,12 @@ static int erofs_read_superblock(struct super_block *sb)
if (ret < 0)
goto out;
- /* handle multiple devices */
ret = erofs_scan_devices(sb, dsb);
+ if (erofs_sb_has_48bit(sbi))
+ erofs_info(sb, "EXPERIMENTAL 48-bit layout support in use. Use at your own risk!");
+ if (erofs_sb_has_metabox(sbi))
+ erofs_info(sb, "EXPERIMENTAL metadata compression support in use. Use at your own risk!");
if (erofs_is_fscache_mode(sb))
erofs_info(sb, "[deprecated] fscache-based on-demand read feature in use. Use at your own risk!");
out:
@@ -364,15 +384,8 @@ static void erofs_default_options(struct erofs_sb_info *sbi)
}
enum {
- Opt_user_xattr,
- Opt_acl,
- Opt_cache_strategy,
- Opt_dax,
- Opt_dax_enum,
- Opt_device,
- Opt_fsid,
- Opt_domain_id,
- Opt_err
+ Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum,
+ Opt_device, Opt_fsid, Opt_domain_id, Opt_directio, Opt_fsoffset,
};
static const struct constant_table erofs_param_cache_strategy[] = {
@@ -398,6 +411,8 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
fsparam_string("device", Opt_device),
fsparam_string("fsid", Opt_fsid),
fsparam_string("domain_id", Opt_domain_id),
+ fsparam_flag_no("directio", Opt_directio),
+ fsparam_u64("fsoffset", Opt_fsoffset),
{}
};
@@ -511,30 +526,69 @@ static int erofs_fc_parse_param(struct fs_context *fc,
errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
break;
#endif
- default:
- return -ENOPARAM;
+ case Opt_directio:
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ if (result.boolean)
+ set_opt(&sbi->opt, DIRECT_IO);
+ else
+ clear_opt(&sbi->opt, DIRECT_IO);
+#else
+ errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+#endif
+ break;
+ case Opt_fsoffset:
+ sbi->dif0.fsoff = result.uint_64;
+ break;
}
return 0;
}
-static struct inode *erofs_nfs_get_inode(struct super_block *sb,
- u64 ino, u32 generation)
+static int erofs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ struct inode *parent)
{
- return erofs_iget(sb, ino);
+ erofs_nid_t nid = EROFS_I(inode)->nid;
+ int len = parent ? 6 : 3;
+
+ if (*max_len < len) {
+ *max_len = len;
+ return FILEID_INVALID;
+ }
+
+ fh[0] = (u32)(nid >> 32);
+ fh[1] = (u32)(nid & 0xffffffff);
+ fh[2] = inode->i_generation;
+
+ if (parent) {
+ nid = EROFS_I(parent)->nid;
+
+ fh[3] = (u32)(nid >> 32);
+ fh[4] = (u32)(nid & 0xffffffff);
+ fh[5] = parent->i_generation;
+ }
+
+ *max_len = len;
+ return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN;
}
static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
- return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
- erofs_nfs_get_inode);
+ if ((fh_type != FILEID_INO64_GEN &&
+ fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3)
+ return NULL;
+
+ return d_obtain_alias(erofs_iget(sb,
+ ((u64)fid->raw[0] << 32) | fid->raw[1]));
}
static struct dentry *erofs_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
- return generic_fh_to_parent(sb, fid, fh_len, fh_type,
- erofs_nfs_get_inode);
+ if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6)
+ return NULL;
+
+ return d_obtain_alias(erofs_iget(sb,
+ ((u64)fid->raw[3] << 32) | fid->raw[4]));
}
static struct dentry *erofs_get_parent(struct dentry *child)
@@ -550,7 +604,7 @@ static struct dentry *erofs_get_parent(struct dentry *child)
}
static const struct export_operations erofs_export_ops = {
- .encode_fh = generic_encode_ino32_fh,
+ .encode_fh = erofs_encode_fh,
.fh_to_dentry = erofs_fh_to_dentry,
.fh_to_parent = erofs_fh_to_parent,
.get_parent = erofs_get_parent,
@@ -585,6 +639,22 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sbi->blkszbits = PAGE_SHIFT;
if (!sb->s_bdev) {
+ /*
+ * (File-backed mounts) EROFS claims it's safe to nest other
+ * fs contexts (including its own) due to self-controlled RO
+ * accesses/contexts and no side-effect changes that need to
+ * context save & restore so it can reuse the current thread
+ * context. However, it still needs to bump `s_stack_depth` to
+ * avoid kernel stack overflow from nested filesystems.
+ */
+ if (erofs_is_fileio_mode(sbi)) {
+ sb->s_stack_depth =
+ file_inode(sbi->dif0.file)->i_sb->s_stack_depth + 1;
+ if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+ erofs_err(sb, "maximum fs stacking depth exceeded");
+ return -ENOTBLK;
+ }
+ }
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -602,9 +672,8 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
return -EINVAL;
}
- sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
- &sbi->dax_part_off,
- NULL, NULL);
+ sbi->dif0.dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
+ &sbi->dif0.dax_part_off, NULL, NULL);
}
err = erofs_read_superblock(sb);
@@ -626,14 +695,17 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
}
}
- if (test_opt(&sbi->opt, DAX_ALWAYS)) {
- if (!sbi->dax_dev) {
- errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
- clear_opt(&sbi->opt, DAX_ALWAYS);
- } else if (sbi->blkszbits != PAGE_SHIFT) {
- errorfc(fc, "unsupported blocksize for DAX");
- clear_opt(&sbi->opt, DAX_ALWAYS);
- }
+ if (sbi->dif0.fsoff) {
+ if (sbi->dif0.fsoff & (sb->s_blocksize - 1))
+ return invalfc(fc, "fsoffset %llu is not aligned to block size %lu",
+ sbi->dif0.fsoff, sb->s_blocksize);
+ if (erofs_is_fscache_mode(sb))
+ return invalfc(fc, "cannot use fsoffset in fscache mode");
+ }
+
+ if (test_opt(&sbi->opt, DAX_ALWAYS) && sbi->blkszbits != PAGE_SHIFT) {
+ erofs_info(sb, "unsupported blocksize for DAX");
+ clear_opt(&sbi->opt, DAX_ALWAYS);
}
sb->s_time_gran = 1;
@@ -645,9 +717,22 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
else
sb->s_flags &= ~SB_POSIXACL;
-#ifdef CONFIG_EROFS_FS_ZIP
- xa_init(&sbi->managed_pslots);
-#endif
+ err = z_erofs_init_super(sb);
+ if (err)
+ return err;
+
+ if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
+ inode = erofs_iget(sb, sbi->packed_nid);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ sbi->packed_inode = inode;
+ }
+ if (erofs_sb_has_metabox(sbi)) {
+ inode = erofs_iget(sb, sbi->metabox_nid);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ sbi->metabox_inode = inode;
+ }
inode = erofs_iget(sb, sbi->root_nid);
if (IS_ERR(inode))
@@ -659,24 +744,11 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
iput(inode);
return -EINVAL;
}
-
sb->s_root = d_make_root(inode);
if (!sb->s_root)
return -ENOMEM;
erofs_shrinker_register(sb);
- if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
- sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
- if (IS_ERR(sbi->packed_inode)) {
- err = PTR_ERR(sbi->packed_inode);
- sbi->packed_inode = NULL;
- return err;
- }
- }
- err = erofs_init_managed_cache(sb);
- if (err)
- return err;
-
err = erofs_xattr_prefixes_init(sb);
if (err)
return err;
@@ -686,6 +758,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
+ sbi->dir_ra_bytes = EROFS_DIR_RA_BYTES;
erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid);
return 0;
}
@@ -703,16 +776,18 @@ static int erofs_fc_get_tree(struct fs_context *fc)
GET_TREE_BDEV_QUIET_LOOKUP : 0);
#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
if (ret == -ENOTBLK) {
+ struct file *file;
+
if (!fc->source)
return invalf(fc, "No source specified");
- sbi->fdev = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
- if (IS_ERR(sbi->fdev))
- return PTR_ERR(sbi->fdev);
+ file = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ sbi->dif0.file = file;
- if (S_ISREG(file_inode(sbi->fdev)->i_mode) &&
- sbi->fdev->f_mapping->a_ops->read_folio)
+ if (S_ISREG(file_inode(sbi->dif0.file)->i_mode) &&
+ sbi->dif0.file->f_mapping->a_ops->read_folio)
return get_tree_nodev(fc, erofs_fc_fill_super);
- fput(sbi->fdev);
}
#endif
return ret;
@@ -763,19 +838,25 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
kfree(devs);
}
-static void erofs_fc_free(struct fs_context *fc)
+static void erofs_sb_free(struct erofs_sb_info *sbi)
{
- struct erofs_sb_info *sbi = fc->s_fs_info;
-
- if (!sbi)
- return;
-
erofs_free_dev_context(sbi->devs);
kfree(sbi->fsid);
kfree(sbi->domain_id);
+ if (sbi->dif0.file)
+ fput(sbi->dif0.file);
+ kfree(sbi->volume_name);
kfree(sbi);
}
+static void erofs_fc_free(struct fs_context *fc)
+{
+ struct erofs_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi) /* free here if an error occurs before transferring to sb */
+ erofs_sb_free(sbi);
+}
+
static const struct fs_context_operations erofs_context_ops = {
.parse_param = erofs_fc_parse_param,
.get_tree = erofs_fc_get_tree,
@@ -805,23 +886,31 @@ static int erofs_init_fs_context(struct fs_context *fc)
return 0;
}
+static void erofs_drop_internal_inodes(struct erofs_sb_info *sbi)
+{
+ iput(sbi->packed_inode);
+ sbi->packed_inode = NULL;
+ iput(sbi->metabox_inode);
+ sbi->metabox_inode = NULL;
+#ifdef CONFIG_EROFS_FS_ZIP
+ iput(sbi->managed_cache);
+ sbi->managed_cache = NULL;
+#endif
+}
+
static void erofs_kill_sb(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) || sbi->fdev)
+ if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) ||
+ sbi->dif0.file)
kill_anon_super(sb);
else
kill_block_super(sb);
-
- erofs_free_dev_context(sbi->devs);
- fs_put_dax(sbi->dax_dev, NULL);
+ erofs_drop_internal_inodes(sbi);
+ fs_put_dax(sbi->dif0.dax_dev, NULL);
erofs_fscache_unregister_fs(sb);
- kfree(sbi->fsid);
- kfree(sbi->domain_id);
- if (sbi->fdev)
- fput(sbi->fdev);
- kfree(sbi);
+ erofs_sb_free(sbi);
sb->s_fs_info = NULL;
}
@@ -829,17 +918,10 @@ static void erofs_put_super(struct super_block *sb)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
- DBG_BUGON(!sbi);
-
erofs_unregister_sysfs(sb);
erofs_shrinker_unregister(sb);
erofs_xattr_prefixes_cleanup(sb);
-#ifdef CONFIG_EROFS_FS_ZIP
- iput(sbi->managed_cache);
- sbi->managed_cache = NULL;
-#endif
- iput(sbi->packed_inode);
- sbi->packed_inode = NULL;
+ erofs_drop_internal_inodes(sbi);
erofs_free_dev_context(sbi->devs);
sbi->devs = NULL;
erofs_fscache_unregister_fs(sb);
@@ -947,19 +1029,35 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",dax=always");
if (test_opt(opt, DAX_NEVER))
seq_puts(seq, ",dax=never");
+ if (erofs_is_fileio_mode(sbi) && test_opt(opt, DIRECT_IO))
+ seq_puts(seq, ",directio");
#ifdef CONFIG_EROFS_FS_ONDEMAND
if (sbi->fsid)
seq_printf(seq, ",fsid=%s", sbi->fsid);
if (sbi->domain_id)
seq_printf(seq, ",domain_id=%s", sbi->domain_id);
#endif
+ if (sbi->dif0.fsoff)
+ seq_printf(seq, ",fsoffset=%llu", sbi->dif0.fsoff);
return 0;
}
+static void erofs_evict_inode(struct inode *inode)
+{
+#ifdef CONFIG_FS_DAX
+ if (IS_DAX(inode))
+ dax_break_layout_final(inode);
+#endif
+
+ truncate_inode_pages_final(&inode->i_data);
+ clear_inode(inode);
+}
+
const struct super_operations erofs_sops = {
.put_super = erofs_put_super,
.alloc_inode = erofs_alloc_inode,
.free_inode = erofs_free_inode,
+ .evict_inode = erofs_evict_inode,
.statfs = erofs_statfs,
.show_options = erofs_show_options,
};
diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c
index 19d586273b70..1e0658a1d95b 100644
--- a/fs/erofs/sysfs.c
+++ b/fs/erofs/sysfs.c
@@ -7,12 +7,14 @@
#include <linux/kobject.h>
#include "internal.h"
+#include "compress.h"
enum {
attr_feature,
attr_drop_caches,
attr_pointer_ui,
attr_pointer_bool,
+ attr_accel,
};
enum {
@@ -60,12 +62,25 @@ static struct erofs_attr erofs_attr_##_name = { \
EROFS_ATTR_RW_UI(sync_decompress, erofs_mount_opts);
EROFS_ATTR_FUNC(drop_caches, 0200);
#endif
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+EROFS_ATTR_FUNC(accel, 0644);
+#endif
+EROFS_ATTR_RW_UI(dir_ra_bytes, erofs_sb_info);
-static struct attribute *erofs_attrs[] = {
+static struct attribute *erofs_sb_attrs[] = {
#ifdef CONFIG_EROFS_FS_ZIP
ATTR_LIST(sync_decompress),
ATTR_LIST(drop_caches),
#endif
+ ATTR_LIST(dir_ra_bytes),
+ NULL,
+};
+ATTRIBUTE_GROUPS(erofs_sb);
+
+static struct attribute *erofs_attrs[] = {
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+ ATTR_LIST(accel),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(erofs);
@@ -81,6 +96,8 @@ EROFS_ATTR_FEATURE(sb_chksum);
EROFS_ATTR_FEATURE(ztailpacking);
EROFS_ATTR_FEATURE(fragments);
EROFS_ATTR_FEATURE(dedupe);
+EROFS_ATTR_FEATURE(48bit);
+EROFS_ATTR_FEATURE(metabox);
static struct attribute *erofs_feat_attrs[] = {
ATTR_LIST(zero_padding),
@@ -93,6 +110,8 @@ static struct attribute *erofs_feat_attrs[] = {
ATTR_LIST(ztailpacking),
ATTR_LIST(fragments),
ATTR_LIST(dedupe),
+ ATTR_LIST(48bit),
+ ATTR_LIST(metabox),
NULL,
};
ATTRIBUTE_GROUPS(erofs_feat);
@@ -126,12 +145,14 @@ static ssize_t erofs_attr_show(struct kobject *kobj,
if (!ptr)
return 0;
return sysfs_emit(buf, "%d\n", *(bool *)ptr);
+ case attr_accel:
+ return z_erofs_crypto_show_engines(buf, PAGE_SIZE, '\n');
}
return 0;
}
static ssize_t erofs_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len)
+ const char *buf, size_t len)
{
struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info,
s_kobj);
@@ -180,6 +201,19 @@ static ssize_t erofs_attr_store(struct kobject *kobj, struct attribute *attr,
invalidate_mapping_pages(MNGD_MAPPING(sbi), 0, -1);
return len;
#endif
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+ case attr_accel:
+ buf = skip_spaces(buf);
+ z_erofs_crypto_disable_all_engines();
+ while (*buf) {
+ t = strcspn(buf, "\n");
+ ret = z_erofs_crypto_enable_engine(buf, t);
+ if (ret < 0)
+ return ret;
+ buf += buf[t] != '\0' ? t + 1 : t;
+ }
+ return len;
+#endif
}
return 0;
}
@@ -197,12 +231,13 @@ static const struct sysfs_ops erofs_attr_ops = {
};
static const struct kobj_type erofs_sb_ktype = {
- .default_groups = erofs_groups,
+ .default_groups = erofs_sb_groups,
.sysfs_ops = &erofs_attr_ops,
.release = erofs_sb_release,
};
static const struct kobj_type erofs_ktype = {
+ .default_groups = erofs_groups,
.sysfs_ops = &erofs_attr_ops,
};
@@ -246,6 +281,12 @@ void erofs_unregister_sysfs(struct super_block *sb)
}
}
+void erofs_exit_sysfs(void)
+{
+ kobject_put(&erofs_feat);
+ kset_unregister(&erofs_root);
+}
+
int __init erofs_init_sysfs(void)
{
int ret;
@@ -253,24 +294,12 @@ int __init erofs_init_sysfs(void)
kobject_set_name(&erofs_root.kobj, "erofs");
erofs_root.kobj.parent = fs_kobj;
ret = kset_register(&erofs_root);
- if (ret)
- goto root_err;
-
- ret = kobject_init_and_add(&erofs_feat, &erofs_feat_ktype,
- NULL, "features");
- if (ret)
- goto feat_err;
- return ret;
-
-feat_err:
- kobject_put(&erofs_feat);
- kset_unregister(&erofs_root);
-root_err:
+ if (!ret) {
+ ret = kobject_init_and_add(&erofs_feat, &erofs_feat_ktype,
+ NULL, "features");
+ if (!ret)
+ return 0;
+ erofs_exit_sysfs();
+ }
return ret;
}
-
-void erofs_exit_sysfs(void)
-{
- kobject_put(&erofs_feat);
- kset_unregister(&erofs_root);
-}
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index a90d7d649739..396536d9a862 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -72,16 +72,18 @@ static int erofs_init_inode_xattrs(struct inode *inode)
ret = -EFSCORRUPTED;
goto out_unlock; /* xattr ondisk layout error */
}
- ret = -ENOATTR;
+ ret = -ENODATA;
goto out_unlock;
}
it.buf = __EROFS_BUF_INITIALIZER;
- erofs_init_metabuf(&it.buf, sb);
+ ret = erofs_init_metabuf(&it.buf, sb, erofs_inode_in_metabox(inode));
+ if (ret)
+ goto out_unlock;
it.pos = erofs_iloc(inode) + vi->inode_isize;
/* read in shared xattr array (non-atomic, see kmalloc below) */
- it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP);
+ it.kaddr = erofs_bread(&it.buf, it.pos, true);
if (IS_ERR(it.kaddr)) {
ret = PTR_ERR(it.kaddr);
goto out_unlock;
@@ -102,7 +104,7 @@ static int erofs_init_inode_xattrs(struct inode *inode)
it.pos += sizeof(struct erofs_xattr_ibody_header);
for (i = 0; i < vi->xattr_shared_count; ++i) {
- it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP);
+ it.kaddr = erofs_bread(&it.buf, it.pos, true);
if (IS_ERR(it.kaddr)) {
kfree(vi->xattr_shared_xattrs);
vi->xattr_shared_xattrs = NULL;
@@ -183,7 +185,7 @@ static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it,
void *src;
for (processed = 0; processed < len; processed += slice) {
- it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
+ it->kaddr = erofs_bread(&it->buf, it->pos, true);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
@@ -266,27 +268,27 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
(entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
- return -ENOATTR;
+ return -ENODATA;
if (it->index != pf->prefix->base_index ||
it->name.len != entry.e_name_len + pf->infix_len)
- return -ENOATTR;
+ return -ENODATA;
if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
- return -ENOATTR;
+ return -ENODATA;
it->infix_len = pf->infix_len;
} else {
if (it->index != entry.e_name_index ||
it->name.len != entry.e_name_len)
- return -ENOATTR;
+ return -ENODATA;
it->infix_len = 0;
}
/* 2. handle xattr name */
for (processed = 0; processed < entry.e_name_len; processed += slice) {
- it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
+ it->kaddr = erofs_bread(&it->buf, it->pos, true);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
@@ -295,7 +297,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
entry.e_name_len - processed);
if (memcmp(it->name.name + it->infix_len + processed,
it->kaddr, slice))
- return -ENOATTR;
+ return -ENODATA;
it->pos += slice;
}
@@ -323,14 +325,17 @@ static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it,
sizeof(u32) * vi->xattr_shared_count;
if (xattr_header_sz >= vi->xattr_isize) {
DBG_BUGON(xattr_header_sz > vi->xattr_isize);
- return -ENOATTR;
+ return -ENODATA;
}
+ ret = erofs_init_metabuf(&it->buf, it->sb, erofs_inode_in_metabox(inode));
+ if (ret)
+ return ret;
remaining = vi->xattr_isize - xattr_header_sz;
it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz;
while (remaining) {
- it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
+ it->kaddr = erofs_bread(&it->buf, it->pos, true);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
@@ -347,7 +352,7 @@ static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it,
ret = erofs_getxattr_foreach(it);
else
ret = erofs_listxattr_foreach(it);
- if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
+ if ((getxattr && ret != -ENODATA) || (!getxattr && ret))
break;
it->pos = next_pos;
@@ -361,13 +366,18 @@ static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it,
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = it->sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
- unsigned int i;
- int ret = -ENOATTR;
+ unsigned int i = 0;
+ int ret;
- for (i = 0; i < vi->xattr_shared_count; ++i) {
+ ret = erofs_init_metabuf(&it->buf, sb,
+ erofs_sb_has_shared_ea_in_metabox(sbi));
+ if (ret)
+ return ret;
+
+ while (i < vi->xattr_shared_count) {
it->pos = erofs_pos(sb, sbi->xattr_blkaddr) +
- vi->xattr_shared_xattrs[i] * sizeof(__le32);
- it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
+ vi->xattr_shared_xattrs[i++] * sizeof(__le32);
+ it->kaddr = erofs_bread(&it->buf, it->pos, true);
if (IS_ERR(it->kaddr))
return PTR_ERR(it->kaddr);
@@ -375,10 +385,10 @@ static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it,
ret = erofs_getxattr_foreach(it);
else
ret = erofs_listxattr_foreach(it);
- if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
+ if ((getxattr && ret != -ENODATA) || (!getxattr && ret))
break;
}
- return ret;
+ return i ? ret : -ENODATA;
}
int erofs_getxattr(struct inode *inode, int index, const char *name,
@@ -403,23 +413,22 @@ int erofs_getxattr(struct inode *inode, int index, const char *name,
EROFS_XATTR_FILTER_SEED + index);
hashbit &= EROFS_XATTR_FILTER_BITS - 1;
if (vi->xattr_name_filter & (1U << hashbit))
- return -ENOATTR;
+ return -ENODATA;
}
it.index = index;
- it.name = (struct qstr)QSTR_INIT(name, strlen(name));
+ it.name = QSTR(name);
if (it.name.len > EROFS_NAME_LEN)
return -ERANGE;
it.sb = inode->i_sb;
it.buf = __EROFS_BUF_INITIALIZER;
- erofs_init_metabuf(&it.buf, it.sb);
it.buffer = buffer;
it.buffer_size = buffer_size;
it.buffer_ofs = 0;
ret = erofs_xattr_iter_inline(&it, inode, true);
- if (ret == -ENOATTR)
+ if (ret == -ENODATA)
ret = erofs_xattr_iter_shared(&it, inode, true);
erofs_put_metabuf(&it.buf);
return ret ? ret : it.buffer_ofs;
@@ -432,23 +441,22 @@ ssize_t erofs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
struct inode *inode = d_inode(dentry);
ret = erofs_init_inode_xattrs(inode);
- if (ret == -ENOATTR)
+ if (ret == -ENODATA)
return 0;
if (ret)
return ret;
it.sb = dentry->d_sb;
it.buf = __EROFS_BUF_INITIALIZER;
- erofs_init_metabuf(&it.buf, it.sb);
it.dentry = dentry;
it.buffer = buffer;
it.buffer_size = buffer_size;
it.buffer_ofs = 0;
ret = erofs_xattr_iter_inline(&it, inode, false);
- if (!ret || ret == -ENOATTR)
+ if (!ret || ret == -ENODATA)
ret = erofs_xattr_iter_shared(&it, inode, false);
- if (ret == -ENOATTR)
+ if (ret == -ENODATA)
ret = 0;
erofs_put_metabuf(&it.buf);
return ret ? ret : it.buffer_ofs;
@@ -474,18 +482,25 @@ int erofs_xattr_prefixes_init(struct super_block *sb)
erofs_off_t pos = (erofs_off_t)sbi->xattr_prefix_start << 2;
struct erofs_xattr_prefix_item *pfs;
int ret = 0, i, len;
+ bool plain = erofs_sb_has_plain_xattr_pfx(sbi);
if (!sbi->xattr_prefix_count)
return 0;
- pfs = kzalloc(sbi->xattr_prefix_count * sizeof(*pfs), GFP_KERNEL);
+ pfs = kcalloc(sbi->xattr_prefix_count, sizeof(*pfs), GFP_KERNEL);
if (!pfs)
return -ENOMEM;
- if (sbi->packed_inode)
- buf.mapping = sbi->packed_inode->i_mapping;
- else
- erofs_init_metabuf(&buf, sb);
+ if (!plain) {
+ if (erofs_sb_has_metabox(sbi))
+ (void)erofs_init_metabuf(&buf, sb, true);
+ else if (sbi->packed_inode)
+ buf.mapping = sbi->packed_inode->i_mapping;
+ else
+ plain = true;
+ }
+ if (plain)
+ (void)erofs_init_metabuf(&buf, sb, false);
for (i = 0; i < sbi->xattr_prefix_count; i++) {
void *ptr = erofs_read_metadata(sb, &buf, &pos, &len);
@@ -539,7 +554,7 @@ struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
rc = erofs_getxattr(inode, prefix, "", value, rc);
}
- if (rc == -ENOATTR)
+ if (rc == -ENODATA)
acl = NULL;
else if (rc < 0)
acl = ERR_PTR(rc);
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index b246cd0e135e..6317caa8413e 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -10,9 +10,6 @@
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
-/* Attribute not found */
-#define ENOATTR ENODATA
-
#ifdef CONFIG_EROFS_FS_XATTR
extern const struct xattr_handler erofs_xattr_user_handler;
extern const struct xattr_handler erofs_xattr_trusted_handler;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 01f147505487..65da21504632 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -12,12 +12,6 @@
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
#define Z_EROFS_INLINE_BVECS 2
-/*
- * let's leave a type here in case of introducing
- * another tagged pointer later.
- */
-typedef void *z_erofs_next_pcluster_t;
-
struct z_erofs_bvec {
struct page *page;
int offset;
@@ -48,10 +42,10 @@ struct z_erofs_pcluster {
struct lockref lockref;
/* A: point to next chained pcluster or TAILs */
- z_erofs_next_pcluster_t next;
+ struct z_erofs_pcluster *next;
- /* I: start block address of this pcluster */
- erofs_off_t index;
+ /* I: start physical position of this pcluster */
+ erofs_off_t pos;
/* L: the maximum decompression size of this round */
unsigned int length;
@@ -79,12 +73,12 @@ struct z_erofs_pcluster {
/* I: compression algorithm format */
unsigned char algorithmformat;
+ /* I: whether compressed data is in-lined or not */
+ bool from_meta;
+
/* L: whether partial decompression or not */
bool partial;
- /* L: indicate several pageofs_outs or not */
- bool multibases;
-
/* L: whether extra buffer allocations are best-effort */
bool besteffort;
@@ -94,12 +88,11 @@ struct z_erofs_pcluster {
/* the end of a chain of pclusters */
#define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA)
-#define Z_EROFS_PCLUSTER_NIL (NULL)
struct z_erofs_decompressqueue {
struct super_block *sb;
+ struct z_erofs_pcluster *head;
atomic_t pending_bios;
- z_erofs_next_pcluster_t head;
union {
struct completion done;
@@ -109,14 +102,9 @@ struct z_erofs_decompressqueue {
bool eio, sync;
};
-static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
-{
- return !pcl->index;
-}
-
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
{
- return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
+ return PAGE_ALIGN(pcl->pageofs_in + pcl->pclustersize) >> PAGE_SHIFT;
}
static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
@@ -140,7 +128,7 @@ struct z_erofs_pcluster_slab {
static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
_PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
- _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
+ _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES + 1)
};
struct z_erofs_bvec_iter {
@@ -274,7 +262,6 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
if (!pcl)
return ERR_PTR(-ENOMEM);
- pcl->pclustersize = size;
return pcl;
}
return ERR_PTR(-EINVAL);
@@ -301,6 +288,7 @@ static struct workqueue_struct *z_erofs_workqueue __read_mostly;
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
static struct kthread_worker __rcu **z_erofs_pcpu_workers;
+static atomic_t erofs_percpu_workers_initialized = ATOMIC_INIT(0);
static void erofs_destroy_percpu_workers(void)
{
@@ -320,7 +308,7 @@ static void erofs_destroy_percpu_workers(void)
static struct kthread_worker *erofs_init_percpu_worker(int cpu)
{
struct kthread_worker *worker =
- kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
+ kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u");
if (IS_ERR(worker))
return worker;
@@ -346,12 +334,8 @@ static int erofs_init_percpu_workers(void)
}
return 0;
}
-#else
-static inline void erofs_destroy_percpu_workers(void) {}
-static inline int erofs_init_percpu_workers(void) { return 0; }
-#endif
-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
+#ifdef CONFIG_HOTPLUG_CPU
static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
static enum cpuhp_state erofs_cpuhp_state;
@@ -408,17 +392,56 @@ static void erofs_cpu_hotplug_destroy(void)
if (erofs_cpuhp_state)
cpuhp_remove_state_nocalls(erofs_cpuhp_state);
}
-#else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
+#else /* !CONFIG_HOTPLUG_CPU */
static inline int erofs_cpu_hotplug_init(void) { return 0; }
static inline void erofs_cpu_hotplug_destroy(void) {}
-#endif
+#endif/* CONFIG_HOTPLUG_CPU */
+static int z_erofs_init_pcpu_workers(struct super_block *sb)
+{
+ int err;
-void z_erofs_exit_subsystem(void)
+ if (atomic_xchg(&erofs_percpu_workers_initialized, 1))
+ return 0;
+
+ err = erofs_init_percpu_workers();
+ if (err) {
+ erofs_err(sb, "per-cpu workers: failed to allocate.");
+ goto err_init_percpu_workers;
+ }
+
+ err = erofs_cpu_hotplug_init();
+ if (err < 0) {
+ erofs_err(sb, "per-cpu workers: failed CPU hotplug init.");
+ goto err_cpuhp_init;
+ }
+ erofs_info(sb, "initialized per-cpu workers successfully.");
+ return err;
+
+err_cpuhp_init:
+ erofs_destroy_percpu_workers();
+err_init_percpu_workers:
+ atomic_set(&erofs_percpu_workers_initialized, 0);
+ return err;
+}
+
+static void z_erofs_destroy_pcpu_workers(void)
{
+ if (!atomic_xchg(&erofs_percpu_workers_initialized, 0))
+ return;
erofs_cpu_hotplug_destroy();
erofs_destroy_percpu_workers();
+}
+#else /* !CONFIG_EROFS_FS_PCPU_KTHREAD */
+static inline int z_erofs_init_pcpu_workers(struct super_block *sb) { return 0; }
+static inline void z_erofs_destroy_pcpu_workers(void) {}
+#endif/* CONFIG_EROFS_FS_PCPU_KTHREAD */
+
+void z_erofs_exit_subsystem(void)
+{
+ z_erofs_destroy_pcpu_workers();
destroy_workqueue(z_erofs_workqueue);
z_erofs_destroy_pcluster_pool();
+ z_erofs_crypto_disable_all_engines();
z_erofs_exit_decompressor();
}
@@ -440,19 +463,8 @@ int __init z_erofs_init_subsystem(void)
goto err_workqueue_init;
}
- err = erofs_init_percpu_workers();
- if (err)
- goto err_pcpu_worker;
-
- err = erofs_cpu_hotplug_init();
- if (err < 0)
- goto err_cpuhp_init;
return err;
-err_cpuhp_init:
- erofs_destroy_percpu_workers();
-err_pcpu_worker:
- destroy_workqueue(z_erofs_workqueue);
err_workqueue_init:
z_erofs_destroy_pcluster_pool();
err_pcluster_pool:
@@ -462,39 +474,32 @@ err_decompressor:
}
enum z_erofs_pclustermode {
+ /* It has previously been linked into another processing chain */
Z_EROFS_PCLUSTER_INFLIGHT,
/*
- * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
- * could be dispatched into bypass queue later due to uptodated managed
- * pages. All related online pages cannot be reused for inplace I/O (or
- * bvpage) since it can be directly decoded without I/O submission.
+ * A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it
+ * may be dispatched to the bypass queue later due to uptodated managed
+ * folios. All file-backed folios related to this pcluster cannot be
+ * reused for in-place I/O (or bvpage) since the pcluster may be decoded
+ * in a separate queue (and thus out of order).
*/
Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
/*
- * The pcluster was just linked to a decompression chain by us. It can
- * also be linked with the remaining pclusters, which means if the
- * processing page is the tail page of a pcluster, this pcluster can
- * safely use the whole page (since the previous pcluster is within the
- * same chain) for in-place I/O, as illustrated below:
- * ___________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (of the current pcl) | (of the previous pcl) |
- * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
- *
- * [ (*) the page above can be used as inplace I/O. ]
+ * The pcluster has just been linked to our processing chain.
+ * File-backed folios (except for the head page) related to it can be
+ * used for in-place I/O (or bvpage).
*/
Z_EROFS_PCLUSTER_FOLLOWED,
};
-struct z_erofs_decompress_frontend {
+struct z_erofs_frontend {
struct inode *const inode;
struct erofs_map_blocks map;
struct z_erofs_bvec_iter biter;
struct page *pagepool;
struct page *candidate_bvpage;
- struct z_erofs_pcluster *pcl;
- z_erofs_next_pcluster_t owned_head;
+ struct z_erofs_pcluster *pcl, *head;
enum z_erofs_pclustermode mode;
erofs_off_t headoffset;
@@ -503,11 +508,11 @@ struct z_erofs_decompress_frontend {
unsigned int icur;
};
-#define DECOMPRESS_FRONTEND_INIT(__i) { \
- .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
- .mode = Z_EROFS_PCLUSTER_FOLLOWED }
+#define Z_EROFS_DEFINE_FRONTEND(fe, i, ho) struct z_erofs_frontend fe = { \
+ .inode = i, .head = Z_EROFS_PCLUSTER_TAIL, \
+ .mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho }
-static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
+static bool z_erofs_should_alloc_cache(struct z_erofs_frontend *fe)
{
unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
@@ -524,19 +529,18 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
return false;
}
-static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
+static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
{
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
struct z_erofs_pcluster *pcl = fe->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
bool shouldalloc = z_erofs_should_alloc_cache(fe);
- bool standalone = true;
- /*
- * optimistic allocation without direct reclaim since inplace I/O
- * can be used if low memory otherwise.
- */
+ pgoff_t poff = pcl->pos >> PAGE_SHIFT;
+ bool may_bypass = true;
+ /* Optimistic allocation, as in-place I/O can be used as a fallback */
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
+ struct folio *folio, *newfolio;
unsigned int i;
if (i_blocksize(fe->inode) != PAGE_SIZE ||
@@ -544,47 +548,42 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
return;
for (i = 0; i < pclusterpages; ++i) {
- struct page *page, *newpage;
-
/* Inaccurate check w/o locking to avoid unneeded lookups */
if (READ_ONCE(pcl->compressed_bvecs[i].page))
continue;
- page = find_get_page(mc, pcl->index + i);
- if (!page) {
- /* I/O is needed, no possible to decompress directly */
- standalone = false;
+ folio = filemap_get_folio(mc, poff + i);
+ if (IS_ERR(folio)) {
+ may_bypass = false;
if (!shouldalloc)
continue;
/*
- * Try cached I/O if allocation succeeds or fallback to
- * in-place I/O instead to avoid any direct reclaim.
+ * Allocate a managed folio for cached I/O, or it may be
+ * then filled with a file-backed folio for in-place I/O
*/
- newpage = erofs_allocpage(&fe->pagepool, gfp);
- if (!newpage)
+ newfolio = filemap_alloc_folio(gfp, 0, NULL);
+ if (!newfolio)
continue;
- set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
+ newfolio->private = Z_EROFS_PREALLOCATED_FOLIO;
+ folio = NULL;
}
spin_lock(&pcl->lockref.lock);
if (!pcl->compressed_bvecs[i].page) {
- pcl->compressed_bvecs[i].page = page ? page : newpage;
+ pcl->compressed_bvecs[i].page =
+ folio_page(folio ?: newfolio, 0);
spin_unlock(&pcl->lockref.lock);
continue;
}
spin_unlock(&pcl->lockref.lock);
-
- if (page)
- put_page(page);
- else if (newpage)
- erofs_pagepool_add(&fe->pagepool, newpage);
+ folio_put(folio ?: newfolio);
}
/*
- * don't do inplace I/O if all compressed pages are available in
- * managed cache since it can be moved to the bypass queue instead.
+ * Don't perform in-place I/O if all compressed pages are available in
+ * the managed cache, as the pcluster can be moved to the bypass queue.
*/
- if (standalone)
+ if (may_bypass)
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
@@ -596,7 +595,7 @@ static int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct folio *folio;
int i;
- DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
+ DBG_BUGON(pcl->from_meta);
/* Each cached folio contains one page unless bs > ps is supported */
for (i = 0; i < pclusterpages; ++i) {
if (pcl->compressed_bvecs[i].page) {
@@ -628,7 +627,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
ret = false;
spin_lock(&pcl->lockref.lock);
if (pcl->lockref.count <= 0) {
- DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
+ DBG_BUGON(pcl->from_meta);
for (; bvec < end; ++bvec) {
if (bvec->page && page_folio(bvec->page) == folio) {
bvec->page = NULL;
@@ -665,39 +664,49 @@ static const struct address_space_operations z_erofs_cache_aops = {
.invalidate_folio = z_erofs_cache_invalidate_folio,
};
-int erofs_init_managed_cache(struct super_block *sb)
+int z_erofs_init_super(struct super_block *sb)
{
- struct inode *const inode = new_inode(sb);
+ struct inode *inode;
+ int err;
+
+ err = z_erofs_init_pcpu_workers(sb);
+ if (err)
+ return err;
+ inode = new_inode(sb);
if (!inode)
return -ENOMEM;
-
set_nlink(inode, 1);
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &z_erofs_cache_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
EROFS_SB(sb)->managed_cache = inode;
+ xa_init(&EROFS_SB(sb)->managed_pslots);
return 0;
}
/* callers must be with pcluster lock held */
-static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
+static int z_erofs_attach_page(struct z_erofs_frontend *fe,
struct z_erofs_bvec *bvec, bool exclusive)
{
struct z_erofs_pcluster *pcl = fe->pcl;
int ret;
if (exclusive) {
- /* give priority for inplaceio to use file pages first */
- spin_lock(&pcl->lockref.lock);
- while (fe->icur > 0) {
- if (pcl->compressed_bvecs[--fe->icur].page)
- continue;
- pcl->compressed_bvecs[fe->icur] = *bvec;
+ /* Inplace I/O is limited to one page for uncompressed data */
+ if (pcl->algorithmformat < Z_EROFS_COMPRESSION_MAX ||
+ fe->icur <= 1) {
+ /* Try to prioritize inplace I/O here */
+ spin_lock(&pcl->lockref.lock);
+ while (fe->icur > 0) {
+ if (pcl->compressed_bvecs[--fe->icur].page)
+ continue;
+ pcl->compressed_bvecs[fe->icur] = *bvec;
+ spin_unlock(&pcl->lockref.lock);
+ return 0;
+ }
spin_unlock(&pcl->lockref.lock);
- return 0;
}
- spin_unlock(&pcl->lockref.lock);
/* otherwise, check if it can be used as a bvpage */
if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
@@ -727,35 +736,30 @@ static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
return true;
}
-static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+static int z_erofs_register_pcluster(struct z_erofs_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
struct super_block *sb = fe->inode->i_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
- bool ztailpacking = map->m_flags & EROFS_MAP_META;
struct z_erofs_pcluster *pcl, *pre;
+ unsigned int pageofs_in;
int err;
- if (!(map->m_flags & EROFS_MAP_ENCODED) ||
- (!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
-
- /* no available pcluster, let's allocate one */
- pcl = z_erofs_alloc_pcluster(map->m_plen);
+ pageofs_in = erofs_blkoff(sb, map->m_pa);
+ pcl = z_erofs_alloc_pcluster(pageofs_in + map->m_plen);
if (IS_ERR(pcl))
return PTR_ERR(pcl);
- spin_lock_init(&pcl->lockref.lock);
- pcl->lockref.count = 1; /* one ref for this request */
+ lockref_init(&pcl->lockref); /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
+ pcl->pclustersize = map->m_plen;
pcl->length = 0;
pcl->partial = true;
-
- /* new pclusters should be claimed as type 1, primary and followed */
- pcl->next = fe->owned_head;
+ pcl->next = fe->head;
+ pcl->pos = map->m_pa;
+ pcl->pageofs_in = pageofs_in;
pcl->pageofs_out = map->m_la & ~PAGE_MASK;
+ pcl->from_meta = map->m_flags & EROFS_MAP_META;
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
/*
@@ -765,13 +769,10 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
mutex_init(&pcl->lock);
DBG_BUGON(!mutex_trylock(&pcl->lock));
- if (ztailpacking) {
- pcl->index = 0; /* which indicates ztailpacking */
- } else {
- pcl->index = erofs_blknr(sb, map->m_pa);
+ if (!pcl->from_meta) {
while (1) {
xa_lock(&sbi->managed_pslots);
- pre = __xa_cmpxchg(&sbi->managed_pslots, pcl->index,
+ pre = __xa_cmpxchg(&sbi->managed_pslots, pcl->pos,
NULL, pcl, GFP_KERNEL);
if (!pre || xa_is_err(pre) || z_erofs_get_pcluster(pre)) {
xa_unlock(&sbi->managed_pslots);
@@ -790,8 +791,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
goto err_out;
}
}
- fe->owned_head = &pcl->next;
- fe->pcl = pcl;
+ fe->head = fe->pcl = pcl;
return 0;
err_out:
@@ -800,32 +800,29 @@ err_out:
return err;
}
-static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
+static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
struct super_block *sb = fe->inode->i_sb;
- erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
struct z_erofs_pcluster *pcl = NULL;
+ void *ptr;
int ret;
DBG_BUGON(fe->pcl);
/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
- DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
+ DBG_BUGON(!fe->head);
if (!(map->m_flags & EROFS_MAP_META)) {
while (1) {
rcu_read_lock();
- pcl = xa_load(&EROFS_SB(sb)->managed_pslots, blknr);
+ pcl = xa_load(&EROFS_SB(sb)->managed_pslots, map->m_pa);
if (!pcl || z_erofs_get_pcluster(pcl)) {
- DBG_BUGON(pcl && blknr != pcl->index);
+ DBG_BUGON(pcl && map->m_pa != pcl->pos);
rcu_read_unlock();
break;
}
rcu_read_unlock();
}
- } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
}
if (pcl) {
@@ -838,10 +835,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
if (ret == -EEXIST) {
mutex_lock(&fe->pcl->lock);
/* check if this pcluster hasn't been linked into any chain. */
- if (cmpxchg(&fe->pcl->next, Z_EROFS_PCLUSTER_NIL,
- fe->owned_head) == Z_EROFS_PCLUSTER_NIL) {
+ if (!cmpxchg(&fe->pcl->next, NULL, fe->head)) {
/* .. so it can be attached to our submission chain */
- fe->owned_head = &fe->pcl->next;
+ fe->head = fe->pcl;
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
} else { /* otherwise, it belongs to an inflight chain */
fe->mode = Z_EROFS_PCLUSTER_INFLIGHT;
@@ -852,19 +848,21 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
- if (!z_erofs_is_inline_pcluster(fe->pcl)) {
+ if (!fe->pcl->from_meta) {
/* bind cache first when cached decompression is preferred */
z_erofs_bind_cache(fe);
} else {
- void *mptr;
-
- mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
- if (IS_ERR(mptr)) {
- ret = PTR_ERR(mptr);
- erofs_err(sb, "failed to get inline data %d", ret);
+ ret = erofs_init_metabuf(&map->buf, sb,
+ erofs_inode_in_metabox(fe->inode));
+ if (ret)
+ return ret;
+ ptr = erofs_bread(&map->buf, map->m_pa, false);
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ erofs_err(sb, "failed to get inline folio %d", ret);
return ret;
}
- get_page(map->buf.page);
+ folio_get(page_folio(map->buf.page));
WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
@@ -874,14 +872,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
return 0;
}
-/*
- * keep in mind that no referenced pclusters will be freed
- * only after a RCU grace period.
- */
static void z_erofs_rcu_callback(struct rcu_head *head)
{
- z_erofs_free_pcluster(container_of(head,
- struct z_erofs_pcluster, rcu));
+ z_erofs_free_pcluster(container_of(head, struct z_erofs_pcluster, rcu));
}
static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
@@ -902,7 +895,7 @@ static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
* It's impossible to fail after the pcluster is freezed, but in order
* to avoid some race conditions, add a DBG_BUGON to observe this.
*/
- DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->index) != pcl);
+ DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->pos) != pcl);
lockref_mark_dead(&pcl->lockref);
return true;
@@ -923,12 +916,10 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
return free;
}
-unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
- unsigned long nr_shrink)
+unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr)
{
struct z_erofs_pcluster *pcl;
- unsigned int freed = 0;
- unsigned long index;
+ unsigned long index, freed = 0;
xa_lock(&sbi->managed_pslots);
xa_for_each(&sbi->managed_pslots, index, pcl) {
@@ -938,7 +929,7 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
xa_unlock(&sbi->managed_pslots);
++freed;
- if (!--nr_shrink)
+ if (!--nr)
return freed;
xa_lock(&sbi->managed_pslots);
}
@@ -967,7 +958,7 @@ static void z_erofs_put_pcluster(struct erofs_sb_info *sbi,
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
}
-static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
+static void z_erofs_pcluster_end(struct z_erofs_frontend *fe)
{
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -980,13 +971,9 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
if (fe->candidate_bvpage)
fe->candidate_bvpage = NULL;
- /*
- * if all pending pages are added, don't hold its reference
- * any longer if the pcluster isn't hosted by ourselves.
- */
+ /* Drop refcount if it doesn't belong to our processing chain */
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false);
-
fe->pcl = NULL;
}
@@ -1004,7 +991,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
buf.mapping = packed_inode->i_mapping;
for (; cur < end; cur += cnt, pos += cnt) {
cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
- src = erofs_bread(&buf, pos, EROFS_KMAP);
+ src = erofs_bread(&buf, pos, true);
if (IS_ERR(src)) {
erofs_put_metabuf(&buf);
return PTR_ERR(src);
@@ -1015,7 +1002,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
return 0;
}
-static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
+static int z_erofs_scan_folio(struct z_erofs_frontend *f,
struct folio *folio, bool ra)
{
struct inode *const inode = f->inode;
@@ -1047,7 +1034,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
folio_zero_segment(folio, cur, end);
tight = false;
- } else if (map->m_flags & EROFS_MAP_FRAGMENT) {
+ } else if (map->m_flags & __EROFS_MAP_FRAGMENT) {
erofs_off_t fpos = offset + cur - map->m_la;
err = z_erofs_read_fragment(inode->i_sb, folio, cur,
@@ -1087,8 +1074,6 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
break;
erofs_onlinefolio_split(folio);
- if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
- f->pcl->multibases = true;
if (f->pcl->length < offset + end - map->m_la) {
f->pcl->length = offset + end - map->m_la;
f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
@@ -1106,7 +1091,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
tight = (bs == PAGE_SIZE);
}
} while ((end = cur) > 0);
- erofs_onlinefolio_end(folio, err);
+ erofs_onlinefolio_end(folio, err, false);
return err;
}
@@ -1130,11 +1115,10 @@ static bool z_erofs_page_is_invalidated(struct page *page)
return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
}
-struct z_erofs_decompress_backend {
+struct z_erofs_backend {
struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
struct super_block *sb;
struct z_erofs_pcluster *pcl;
-
/* pages with the longest decompressed length for deduplication */
struct page **decompressed_pages;
/* pages to keep the compressed data */
@@ -1143,6 +1127,8 @@ struct z_erofs_decompress_backend {
struct list_head decompressed_secondary_bvecs;
struct page **pagepool;
unsigned int onstack_used, nr_pages;
+ /* indicate if temporary copies should be preserved for later use */
+ bool keepxcpy;
};
struct z_erofs_bvec_item {
@@ -1150,21 +1136,23 @@ struct z_erofs_bvec_item {
struct list_head list;
};
-static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
+static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be,
struct z_erofs_bvec *bvec)
{
+ int poff = bvec->offset + be->pcl->pageofs_out;
struct z_erofs_bvec_item *item;
- unsigned int pgnr;
-
- if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
- (bvec->end == PAGE_SIZE ||
- bvec->offset + bvec->end == be->pcl->length)) {
- pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
- DBG_BUGON(pgnr >= be->nr_pages);
- if (!be->decompressed_pages[pgnr]) {
- be->decompressed_pages[pgnr] = bvec->page;
+ struct page **page;
+
+ if (!(poff & ~PAGE_MASK) && (bvec->end == PAGE_SIZE ||
+ bvec->offset + bvec->end == be->pcl->length)) {
+ DBG_BUGON((poff >> PAGE_SHIFT) >= be->nr_pages);
+ page = be->decompressed_pages + (poff >> PAGE_SHIFT);
+ if (!*page) {
+ *page = bvec->page;
return;
}
+ } else {
+ be->keepxcpy = true;
}
/* (cold path) one pcluster is requested multiple times */
@@ -1173,8 +1161,7 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
list_add(&item->list, &be->decompressed_secondary_bvecs);
}
-static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
- int err)
+static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err)
{
unsigned int off0 = be->pcl->pageofs_out;
struct list_head *p, *n;
@@ -1209,13 +1196,13 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
cur += len;
}
kunmap_local(dst);
- erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
+ erofs_onlinefolio_end(page_folio(bvi->bvec.page), err, true);
list_del(p);
kfree(bvi);
}
}
-static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
+static void z_erofs_parse_out_bvecs(struct z_erofs_backend *be)
{
struct z_erofs_pcluster *pcl = be->pcl;
struct z_erofs_bvec_iter biter;
@@ -1240,8 +1227,7 @@ static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
}
-static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
- bool *overlapped)
+static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped)
{
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
@@ -1260,7 +1246,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
}
be->compressed_pages[i] = page;
- if (z_erofs_is_inline_pcluster(pcl) ||
+ if (pcl->from_meta ||
erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) {
if (!PageUptodate(page))
err = -EIO;
@@ -1276,18 +1262,18 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
return err;
}
-static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
- int err)
+static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
{
struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
- const struct z_erofs_decompressor *decomp =
+ const struct z_erofs_decompressor *alg =
z_erofs_decomp[pcl->algorithmformat];
+ bool try_free = true;
int i, j, jtop, err2;
struct page *page;
bool overlapped;
- bool try_free = true;
+ const char *reason;
mutex_lock(&pcl->lock);
be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
@@ -1319,11 +1305,13 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
err2 = z_erofs_parse_in_bvecs(be, &overlapped);
if (err2)
err = err2;
- if (!err)
- err = decomp->decompress(&(struct z_erofs_decompress_req) {
+ if (!err) {
+ reason = alg->decompress(&(struct z_erofs_decompress_req) {
.sb = be->sb,
.in = be->compressed_pages,
.out = be->decompressed_pages,
+ .inpages = pclusterpages,
+ .outpages = be->nr_pages,
.pageofs_in = pcl->pageofs_in,
.pageofs_out = pcl->pageofs_out,
.inputsize = pcl->pclustersize,
@@ -1331,16 +1319,27 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
.alg = pcl->algorithmformat,
.inplace_io = overlapped,
.partial_decoding = pcl->partial,
- .fillgaps = pcl->multibases,
+ .fillgaps = be->keepxcpy,
.gfp = pcl->besteffort ? GFP_KERNEL :
GFP_NOWAIT | __GFP_NORETRY
}, be->pagepool);
+ if (IS_ERR(reason)) {
+ erofs_err(be->sb, "failed to decompress (%s) %ld @ pa %llu size %u => %u",
+ alg->name, PTR_ERR(reason), pcl->pos,
+ pcl->pclustersize, pcl->length);
+ err = PTR_ERR(reason);
+ } else if (unlikely(reason)) {
+ erofs_err(be->sb, "failed to decompress (%s) %s @ pa %llu size %u => %u",
+ alg->name, reason, pcl->pos,
+ pcl->pclustersize, pcl->length);
+ err = -EFSCORRUPTED;
+ }
+ }
/* must handle all compressed pages before actual file pages */
- if (z_erofs_is_inline_pcluster(pcl)) {
- page = pcl->compressed_bvecs[0].page;
+ if (pcl->from_meta) {
+ folio_put(page_folio(pcl->compressed_bvecs[0].page));
WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
- put_page(page);
} else {
/* managed folios are still left in compressed_bvecs[] */
for (i = 0; i < pclusterpages; ++i) {
@@ -1368,7 +1367,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
DBG_BUGON(z_erofs_page_is_invalidated(page));
if (!z_erofs_is_shortlived_page(page)) {
- erofs_onlinefolio_end(page_folio(page), err);
+ erofs_onlinefolio_end(page_folio(page), err, true);
continue;
}
if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
@@ -1388,16 +1387,15 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
pcl->length = 0;
pcl->partial = true;
- pcl->multibases = false;
pcl->besteffort = false;
pcl->bvset.nextpage = NULL;
pcl->vcnt = 0;
/* pcluster lock MUST be taken before the following line */
- WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
+ WRITE_ONCE(pcl->next, NULL);
mutex_unlock(&pcl->lock);
- if (z_erofs_is_inline_pcluster(pcl))
+ if (pcl->from_meta)
z_erofs_free_pcluster(pcl);
else
z_erofs_put_pcluster(sbi, pcl, try_free);
@@ -1407,21 +1405,19 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
struct page **pagepool)
{
- struct z_erofs_decompress_backend be = {
+ struct z_erofs_backend be = {
.sb = io->sb,
.pagepool = pagepool,
.decompressed_secondary_bvecs =
LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
+ .pcl = io->head,
};
- z_erofs_next_pcluster_t owned = io->head;
+ struct z_erofs_pcluster *next;
int err = io->eio ? -EIO : 0;
- while (owned != Z_EROFS_PCLUSTER_TAIL) {
- DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
-
- be.pcl = container_of(owned, struct z_erofs_pcluster, next);
- owned = READ_ONCE(be.pcl->next);
-
+ for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) {
+ DBG_BUGON(!be.pcl);
+ next = READ_ONCE(be.pcl->next);
err = z_erofs_decompress_pcluster(&be, err) ?: err;
}
return err;
@@ -1446,6 +1442,16 @@ static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
}
#endif
+/* Use (kthread_)work in atomic contexts to minimize scheduling overhead */
+static inline bool z_erofs_in_atomic(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPTION) && rcu_preempt_depth())
+ return true;
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return true;
+ return !preemptible();
+}
+
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
int bios)
{
@@ -1460,8 +1466,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
if (atomic_add_return(bios, &io->pending_bios))
return;
- /* Use (kthread_)work and sync decompression for atomic contexts only */
- if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
+ if (z_erofs_in_atomic()) {
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
struct kthread_worker *worker;
@@ -1487,7 +1492,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
}
static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
- struct z_erofs_decompress_frontend *f,
+ struct z_erofs_frontend *f,
struct z_erofs_pcluster *pcl,
unsigned int nr,
struct address_space *mc)
@@ -1514,12 +1519,8 @@ repeat:
DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
folio = page_folio(zbv.page);
- /*
- * Handle preallocated cached folios. We tried to allocate such folios
- * without triggering direct reclaim. If allocation failed, inplace
- * file-backed folios will be used instead.
- */
- if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
+ /* For preallocated managed folios, add them to page cache here */
+ if (folio->private == Z_EROFS_PREALLOCATED_FOLIO) {
tocache = true;
goto out_tocache;
}
@@ -1584,7 +1585,7 @@ out_allocfolio:
folio = page_folio(page);
out_tocache:
if (!tocache || bs != PAGE_SIZE ||
- filemap_add_folio(mc, folio, pcl->index + nr, gfp)) {
+ filemap_add_folio(mc, folio, (pcl->pos >> PAGE_SHIFT) + nr, gfp)) {
/* turn into a temporary shortlived folio (1 ref) */
folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
return;
@@ -1631,18 +1632,13 @@ enum {
NR_JOBQUEUES,
};
-static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
- z_erofs_next_pcluster_t qtail[],
- z_erofs_next_pcluster_t owned_head)
+static void z_erofs_move_to_bypass_queue(struct z_erofs_pcluster *pcl,
+ struct z_erofs_pcluster *next,
+ struct z_erofs_pcluster **qtail[])
{
- z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
- z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
-
WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
-
- WRITE_ONCE(*submit_qtail, owned_head);
- WRITE_ONCE(*bypass_qtail, &pcl->next);
-
+ WRITE_ONCE(*qtail[JQ_SUBMIT], next);
+ WRITE_ONCE(*qtail[JQ_BYPASS], pcl);
qtail[JQ_BYPASS] = &pcl->next;
}
@@ -1671,15 +1667,15 @@ static void z_erofs_endio(struct bio *bio)
bio_put(bio);
}
-static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+static void z_erofs_submit_queue(struct z_erofs_frontend *f,
struct z_erofs_decompressqueue *fgq,
bool *force_fg, bool readahead)
{
struct super_block *sb = f->inode->i_sb;
struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
- z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
+ struct z_erofs_pcluster **qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
- z_erofs_next_pcluster_t owned_head = f->owned_head;
+ struct z_erofs_pcluster *pcl, *next;
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
erofs_off_t last_pa;
unsigned int nr_bios = 0;
@@ -1695,33 +1691,31 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
/* by default, all need io submission */
- q[JQ_SUBMIT]->head = owned_head;
+ q[JQ_SUBMIT]->head = next = f->head;
do {
struct erofs_map_dev mdev;
- struct z_erofs_pcluster *pcl;
erofs_off_t cur, end;
struct bio_vec bvec;
unsigned int i = 0;
bool bypass = true;
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
- pcl = container_of(owned_head, struct z_erofs_pcluster, next);
- owned_head = READ_ONCE(pcl->next);
-
- if (z_erofs_is_inline_pcluster(pcl)) {
- move_to_bypass_jobqueue(pcl, qtail, owned_head);
+ pcl = next;
+ next = READ_ONCE(pcl->next);
+ if (pcl->from_meta) {
+ z_erofs_move_to_bypass_queue(pcl, next, qtail);
continue;
}
/* no device id here, thus it will always succeed */
mdev = (struct erofs_map_dev) {
- .m_pa = erofs_pos(sb, pcl->index),
+ .m_pa = round_down(pcl->pos, sb->s_blocksize),
};
(void)erofs_map_dev(sb, &mdev);
cur = mdev.m_pa;
- end = cur + pcl->pclustersize;
+ end = round_up(cur + pcl->pageofs_in + pcl->pclustersize,
+ sb->s_blocksize);
do {
bvec.bv_page = NULL;
if (bio && (cur != last_pa ||
@@ -1765,7 +1759,8 @@ drain_io:
bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_endio;
- bio->bi_iter.bi_sector = cur >> 9;
+ bio->bi_iter.bi_sector =
+ (mdev.m_dif->fsoff + cur) >> 9;
bio->bi_private = q[JQ_SUBMIT];
if (readahead)
bio->bi_opf |= REQ_RAHEAD;
@@ -1782,8 +1777,8 @@ drain_io:
if (!bypass)
qtail[JQ_SUBMIT] = &pcl->next;
else
- move_to_bypass_jobqueue(pcl, qtail, owned_head);
- } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
+ z_erofs_move_to_bypass_queue(pcl, next, qtail);
+ } while (next != Z_EROFS_PCLUSTER_TAIL);
if (bio) {
if (erofs_is_fileio_mode(EROFS_SB(sb)))
@@ -1792,9 +1787,9 @@ drain_io:
erofs_fscache_submit_bio(bio);
else
submit_bio(bio);
- if (memstall)
- psi_memstall_leave(&pflags);
}
+ if (memstall)
+ psi_memstall_leave(&pflags);
/*
* although background is preferred, no one is pending for submission.
@@ -1807,17 +1802,16 @@ drain_io:
z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
}
-static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
- unsigned int ra_folios)
+static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
- bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios);
+ bool force_fg = z_erofs_is_sync_decompress(sbi, rapages);
int err;
- if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ if (f->head == Z_EROFS_PCLUSTER_TAIL)
return 0;
- z_erofs_submit_queue(f, io, &force_fg, !!ra_folios);
+ z_erofs_submit_queue(f, io, &force_fg, !!rapages);
/* handle bypass queue (no i/o pclusters) immediately */
err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
@@ -1835,7 +1829,7 @@ static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
* Since partial uptodate is still unimplemented for now, we have to use
* approximate readmore strategies as a start.
*/
-static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
+static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f,
struct readahead_control *rac, bool backmost)
{
struct inode *inode = f->inode;
@@ -1851,7 +1845,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
map->m_la = end;
err = z_erofs_map_blocks_iter(inode, map,
EROFS_GET_BLOCKS_READMORE);
- if (err)
+ if (err || !(map->m_flags & EROFS_MAP_ENCODED))
return;
/* expand ra for the trailing edge if readahead */
@@ -1863,7 +1857,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
end = round_up(end, PAGE_SIZE);
} else {
end = round_up(map->m_la, PAGE_SIZE);
- if (!map->m_llen)
+ if (!(map->m_flags & EROFS_MAP_ENCODED) || !map->m_llen)
return;
}
@@ -1890,12 +1884,10 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
struct inode *const inode = folio->mapping->host;
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+ Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio));
int err;
trace_erofs_read_folio(folio, false);
- f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
-
z_erofs_pcluster_readmore(&f, NULL, true);
err = z_erofs_scan_folio(&f, folio, false);
z_erofs_pcluster_readmore(&f, NULL, false);
@@ -1915,17 +1907,13 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
static void z_erofs_readahead(struct readahead_control *rac)
{
struct inode *const inode = rac->mapping->host;
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+ Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac));
+ unsigned int nrpages = readahead_count(rac);
struct folio *head = NULL, *folio;
- unsigned int nr_folios;
int err;
- f.headoffset = readahead_pos(rac);
-
+ trace_erofs_readahead(inode, readahead_index(rac), nrpages, false);
z_erofs_pcluster_readmore(&f, rac, true);
- nr_folios = readahead_count(rac);
- trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
-
while ((folio = readahead_folio(rac))) {
folio->private = head;
head = folio;
@@ -1944,7 +1932,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
z_erofs_pcluster_readmore(&f, rac, false);
z_erofs_pcluster_end(&f);
- (void)z_erofs_runqueue(&f, nr_folios);
+ (void)z_erofs_runqueue(&f, nrpages);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
}
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 4535f2f0a014..c8d8e129eb4b 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -17,7 +17,7 @@ struct z_erofs_maprecorder {
u16 delta[2];
erofs_blk_t pblk, compressedblks;
erofs_off_t nextpackoff;
- bool partialref;
+ bool partialref, in_mbox;
};
static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
@@ -25,13 +25,13 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
- const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) +
+ const erofs_off_t pos = Z_EROFS_FULL_INDEX_START(erofs_iloc(inode) +
vi->inode_isize + vi->xattr_isize) +
lcn * sizeof(struct z_erofs_lcluster_index);
struct z_erofs_lcluster_index *di;
unsigned int advise;
- di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, EROFS_KMAP);
+ di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
if (IS_ERR(di))
return PTR_ERR(di);
m->lcn = lcn;
@@ -40,7 +40,7 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
advise = le16_to_cpu(di->di_advise);
m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
- m->clusterofs = 1 << vi->z_logical_clusterbits;
+ m->clusterofs = 1 << vi->z_lclusterbits;
m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
@@ -55,10 +55,6 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
} else {
m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
m->clusterofs = le16_to_cpu(di->di_clusterofs);
- if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
m->pblk = le32_to_cpu(di->di_u.blkaddr);
}
return 0;
@@ -97,17 +93,48 @@ static int get_compacted_la_distance(unsigned int lobits,
return d1;
}
-static int unpack_compacted_index(struct z_erofs_maprecorder *m,
- unsigned int amortizedshift,
- erofs_off_t pos, bool lookahead)
+static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
+ unsigned long lcn, bool lookahead)
{
- struct erofs_inode *const vi = EROFS_I(m->inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ struct inode *const inode = m->inode;
+ struct erofs_inode *const vi = EROFS_I(inode);
+ const erofs_off_t ebase = Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
+ vi->inode_isize + vi->xattr_isize);
+ const unsigned int lclusterbits = vi->z_lclusterbits;
+ const unsigned int totalidx = erofs_iblks(inode);
+ unsigned int compacted_4b_initial, compacted_2b, amortizedshift;
unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
- bool big_pcluster;
+ bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
+ erofs_off_t pos;
u8 *in, type;
int i;
+ if (lcn >= totalidx || lclusterbits > 14)
+ return -EINVAL;
+
+ m->lcn = lcn;
+ /* used to align to 32-byte (compacted_2b) alignment */
+ compacted_4b_initial = ((32 - ebase % 32) / 4) & 7;
+ compacted_2b = 0;
+ if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+ compacted_4b_initial < totalidx)
+ compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
+
+ pos = ebase;
+ amortizedshift = 2; /* compact_4b */
+ if (lcn >= compacted_4b_initial) {
+ pos += compacted_4b_initial * 4;
+ lcn -= compacted_4b_initial;
+ if (lcn < compacted_2b) {
+ amortizedshift = 1;
+ } else {
+ pos += compacted_2b * 2;
+ lcn -= compacted_2b;
+ }
+ }
+ pos += lcn * (1 << amortizedshift);
+
+ /* figure out the lcluster count in this pack */
if (1 << amortizedshift == 4 && lclusterbits <= 14)
vcnt = 2;
else if (1 << amortizedshift == 2 && lclusterbits <= 12)
@@ -115,14 +142,13 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
else
return -EOPNOTSUPP;
- in = erofs_read_metabuf(&m->map->buf, m->inode->i_sb, pos, EROFS_KMAP);
+ in = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
if (IS_ERR(in))
return PTR_ERR(in);
/* it doesn't equal to round_up(..) */
m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
(vcnt << amortizedshift);
- big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
bytes = pos & ((vcnt << amortizedshift) - 1);
@@ -207,64 +233,32 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
return 0;
}
-static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
- unsigned long lcn, bool lookahead)
+static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned int lcn, bool lookahead)
{
- struct inode *const inode = m->inode;
- struct erofs_inode *const vi = EROFS_I(inode);
- const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
- ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
- unsigned int totalidx = erofs_iblks(inode);
- unsigned int compacted_4b_initial, compacted_2b;
- unsigned int amortizedshift;
- erofs_off_t pos;
-
- if (lcn >= totalidx || vi->z_logical_clusterbits > 14)
- return -EINVAL;
-
- m->lcn = lcn;
- /* used to align to 32-byte (compacted_2b) alignment */
- compacted_4b_initial = (32 - ebase % 32) / 4;
- if (compacted_4b_initial == 32 / 4)
- compacted_4b_initial = 0;
-
- if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
- compacted_4b_initial < totalidx)
- compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
- else
- compacted_2b = 0;
-
- pos = ebase;
- if (lcn < compacted_4b_initial) {
- amortizedshift = 2;
- goto out;
- }
- pos += compacted_4b_initial * 4;
- lcn -= compacted_4b_initial;
+ struct erofs_inode *vi = EROFS_I(m->inode);
+ int err;
- if (lcn < compacted_2b) {
- amortizedshift = 1;
- goto out;
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT) {
+ err = z_erofs_load_compact_lcluster(m, lcn, lookahead);
+ } else {
+ DBG_BUGON(vi->datalayout != EROFS_INODE_COMPRESSED_FULL);
+ err = z_erofs_load_full_lcluster(m, lcn);
}
- pos += compacted_2b * 2;
- lcn -= compacted_2b;
- amortizedshift = 2;
-out:
- pos += lcn * (1 << amortizedshift);
- return unpack_compacted_index(m, amortizedshift, pos, lookahead);
-}
+ if (err)
+ return err;
-static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned int lcn, bool lookahead)
-{
- switch (EROFS_I(m->inode)->datalayout) {
- case EROFS_INODE_COMPRESSED_FULL:
- return z_erofs_load_full_lcluster(m, lcn);
- case EROFS_INODE_COMPRESSED_COMPACT:
- return z_erofs_load_compact_lcluster(m, lcn, lookahead);
- default:
- return -EINVAL;
+ if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
+ erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu",
+ m->type, lcn, EROFS_I(m->inode)->nid);
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ } else if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD &&
+ m->clusterofs >= (1 << vi->z_lclusterbits)) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
}
+ return 0;
}
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
@@ -272,36 +266,26 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
{
struct super_block *sb = m->inode->i_sb;
struct erofs_inode *const vi = EROFS_I(m->inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ const unsigned int lclusterbits = vi->z_lclusterbits;
while (m->lcn >= lookback_distance) {
unsigned long lcn = m->lcn - lookback_distance;
int err;
+ if (!lookback_distance)
+ break;
+
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
if (err)
return err;
-
- switch (m->type) {
- case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
+ if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
lookback_distance = m->delta[0];
- if (!lookback_distance)
- goto err_bogus;
continue;
- case Z_EROFS_LCLUSTER_TYPE_PLAIN:
- case Z_EROFS_LCLUSTER_TYPE_HEAD1:
- case Z_EROFS_LCLUSTER_TYPE_HEAD2:
- m->headtype = m->type;
- m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
- return 0;
- default:
- erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
- m->type, lcn, vi->nid);
- DBG_BUGON(1);
- return -EOPNOTSUPP;
}
+ m->headtype = m->type;
+ m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
+ return 0;
}
-err_bogus:
erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
lookback_distance, m->lcn, vi->nid);
DBG_BUGON(1);
@@ -311,27 +295,23 @@ err_bogus:
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
unsigned int initial_lcn)
{
- struct super_block *sb = m->inode->i_sb;
- struct erofs_inode *const vi = EROFS_I(m->inode);
- struct erofs_map_blocks *const map = m->map;
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- unsigned long lcn;
+ struct inode *inode = m->inode;
+ struct super_block *sb = inode->i_sb;
+ struct erofs_inode *vi = EROFS_I(inode);
+ bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
+ bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2;
+ unsigned long lcn = m->lcn + 1;
int err;
- DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
- m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
- m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
+ DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
DBG_BUGON(m->type != m->headtype);
- if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
- ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
- ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
- map->m_plen = 1ULL << lclusterbits;
- return 0;
- }
- lcn = m->lcn + 1;
+ if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) ||
+ ((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
+ m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) ||
+ (lcn << vi->z_lclusterbits) >= inode->i_size)
+ m->compressedblks = 1;
+
if (m->compressedblks)
goto out;
@@ -350,35 +330,21 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
DBG_BUGON(lcn == initial_lcn &&
m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
- switch (m->type) {
- case Z_EROFS_LCLUSTER_TYPE_PLAIN:
- case Z_EROFS_LCLUSTER_TYPE_HEAD1:
- case Z_EROFS_LCLUSTER_TYPE_HEAD2:
- /*
- * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
- * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
- */
- m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
- break;
- case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
- if (m->delta[0] != 1)
- goto err_bonus_cblkcnt;
- if (m->compressedblks)
- break;
- fallthrough;
- default:
- erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
- vi->nid);
+ if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD && m->delta[0] != 1) {
+ erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
+
+ /*
+ * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type rather
+ * than CBLKCNT, it's a 1 block-sized pcluster.
+ */
+ if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD || !m->compressedblks)
+ m->compressedblks = 1;
out:
- map->m_plen = erofs_pos(sb, m->compressedblks);
+ m->map->m_plen = erofs_pos(sb, m->compressedblks);
return 0;
-err_bonus_cblkcnt:
- erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
}
static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
@@ -386,7 +352,7 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
struct inode *inode = m->inode;
struct erofs_inode *vi = EROFS_I(inode);
struct erofs_map_blocks *map = m->map;
- unsigned int lclusterbits = vi->z_logical_clusterbits;
+ unsigned int lclusterbits = vi->z_lclusterbits;
u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
int err;
@@ -407,17 +373,10 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
m->delta[1] = 1;
DBG_BUGON(1);
}
- } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
- m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
- m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
+ } else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
if (lcn != headlcn)
break; /* ends at the next HEAD lcluster */
m->delta[1] = 1;
- } else {
- erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
- m->type, lcn, vi->nid);
- DBG_BUGON(1);
- return -EOPNOTSUPP;
}
lcn += m->delta[1];
}
@@ -425,23 +384,32 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
return 0;
}
-static int z_erofs_do_map_blocks(struct inode *inode,
+static int z_erofs_map_blocks_fo(struct inode *inode,
struct erofs_map_blocks *map, int flags)
{
- struct erofs_inode *const vi = EROFS_I(inode);
- bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct super_block *sb = inode->i_sb;
bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
+ bool ztailpacking = vi->z_idata_size;
+ unsigned int lclusterbits = vi->z_lclusterbits;
struct z_erofs_maprecorder m = {
.inode = inode,
.map = map,
+ .in_mbox = erofs_inode_in_metabox(inode),
};
- int err = 0;
- unsigned int lclusterbits, endoff, afmt;
+ unsigned int endoff;
unsigned long initial_lcn;
unsigned long long ofs, end;
+ int err;
- lclusterbits = vi->z_logical_clusterbits;
ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
+ if (fragment && !(flags & EROFS_GET_BLOCKS_FINDTAIL) &&
+ !vi->z_tailextent_headlcn) {
+ map->m_la = 0;
+ map->m_llen = inode->i_size;
+ map->m_flags = EROFS_MAP_FRAGMENT;
+ return 0;
+ }
initial_lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1);
@@ -449,52 +417,31 @@ static int z_erofs_do_map_blocks(struct inode *inode,
if (err)
goto unmap_out;
- if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
- vi->z_idataoff = m.nextpackoff;
-
+ if ((flags & EROFS_GET_BLOCKS_FINDTAIL) && ztailpacking)
+ vi->z_fragmentoff = m.nextpackoff;
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
end = (m.lcn + 1ULL) << lclusterbits;
- switch (m.type) {
- case Z_EROFS_LCLUSTER_TYPE_PLAIN:
- case Z_EROFS_LCLUSTER_TYPE_HEAD1:
- case Z_EROFS_LCLUSTER_TYPE_HEAD2:
- if (endoff >= m.clusterofs) {
- m.headtype = m.type;
- map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
- /*
- * For ztailpacking files, in order to inline data more
- * effectively, special EOF lclusters are now supported
- * which can have three parts at most.
- */
- if (ztailpacking && end > inode->i_size)
- end = inode->i_size;
- break;
- }
- /* m.lcn should be >= 1 if endoff < m.clusterofs */
- if (!m.lcn) {
- erofs_err(inode->i_sb,
- "invalid logical cluster 0 at nid %llu",
- vi->nid);
- err = -EFSCORRUPTED;
- goto unmap_out;
+ if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD && endoff >= m.clusterofs) {
+ m.headtype = m.type;
+ map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
+ /*
+ * For ztailpacking files, in order to inline data more
+ * effectively, special EOF lclusters are now supported
+ * which can have three parts at most.
+ */
+ if (ztailpacking && end > inode->i_size)
+ end = inode->i_size;
+ } else {
+ if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ end = (m.lcn << lclusterbits) | m.clusterofs;
+ map->m_flags |= EROFS_MAP_FULL_MAPPED;
+ m.delta[0] = 1;
}
- end = (m.lcn << lclusterbits) | m.clusterofs;
- map->m_flags |= EROFS_MAP_FULL_MAPPED;
- m.delta[0] = 1;
- fallthrough;
- case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
/* get the corresponding first chunk */
err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err)
goto unmap_out;
- break;
- default:
- erofs_err(inode->i_sb,
- "unknown type %u @ offset %llu of nid %llu",
- m.type, ofs, vi->nid);
- err = -EOPNOTSUPP;
- goto unmap_out;
}
if (m.partialref)
map->m_flags |= EROFS_MAP_PARTIAL_REF;
@@ -508,12 +455,18 @@ static int z_erofs_do_map_blocks(struct inode *inode,
}
if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
map->m_flags |= EROFS_MAP_META;
- map->m_pa = vi->z_idataoff;
+ map->m_pa = vi->z_fragmentoff;
map->m_plen = vi->z_idata_size;
+ if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
+ erofs_err(sb, "ztailpacking inline data across blocks @ nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
- map->m_flags |= EROFS_MAP_FRAGMENT;
+ map->m_flags = EROFS_MAP_FRAGMENT;
} else {
- map->m_pa = erofs_pos(inode->i_sb, m.pblk);
+ map->m_pa = erofs_pos(sb, m.pblk);
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
if (err)
goto unmap_out;
@@ -525,20 +478,15 @@ static int z_erofs_do_map_blocks(struct inode *inode,
err = -EFSCORRUPTED;
goto unmap_out;
}
- afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
- Z_EROFS_COMPRESSION_INTERLACED :
- Z_EROFS_COMPRESSION_SHIFTED;
+ if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
+ map->m_algorithmformat = Z_EROFS_COMPRESSION_INTERLACED;
+ else
+ map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
+ } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
+ map->m_algorithmformat = vi->z_algorithmtype[1];
} else {
- afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
- vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
- if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
- erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
- afmt, vi->nid);
- err = -EFSCORRUPTED;
- goto unmap_out;
- }
+ map->m_algorithmformat = vi->z_algorithmtype[0];
}
- map->m_algorithmformat = afmt;
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
((flags & EROFS_GET_BLOCKS_READMORE) &&
@@ -556,14 +504,122 @@ unmap_out:
return err;
}
-static int z_erofs_fill_inode_lazy(struct inode *inode)
+static int z_erofs_map_blocks_ext(struct inode *inode,
+ struct erofs_map_blocks *map, int flags)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ bool interlaced = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER;
+ unsigned int recsz = z_erofs_extent_recsize(vi->z_advise);
+ erofs_off_t pos = round_up(Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
+ vi->inode_isize + vi->xattr_isize), recsz);
+ bool in_mbox = erofs_inode_in_metabox(inode);
+ erofs_off_t lend = inode->i_size;
+ erofs_off_t l, r, mid, pa, la, lstart;
+ struct z_erofs_extent *ext;
+ unsigned int fmt;
+ bool last;
+
+ map->m_flags = 0;
+ if (recsz <= offsetof(struct z_erofs_extent, pstart_hi)) {
+ if (recsz <= offsetof(struct z_erofs_extent, pstart_lo)) {
+ ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+ pa = le64_to_cpu(*(__le64 *)ext);
+ pos += sizeof(__le64);
+ lstart = 0;
+ } else {
+ lstart = round_down(map->m_la, 1 << vi->z_lclusterbits);
+ pos += (lstart >> vi->z_lclusterbits) * recsz;
+ pa = EROFS_NULL_ADDR;
+ }
+
+ for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) {
+ ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+ map->m_plen = le32_to_cpu(ext->plen);
+ if (pa != EROFS_NULL_ADDR) {
+ map->m_pa = pa;
+ pa += map->m_plen & Z_EROFS_EXTENT_PLEN_MASK;
+ } else {
+ map->m_pa = le32_to_cpu(ext->pstart_lo);
+ }
+ pos += recsz;
+ }
+ last = (lstart >= round_up(lend, 1 << vi->z_lclusterbits));
+ lend = min(lstart, lend);
+ lstart -= 1 << vi->z_lclusterbits;
+ } else {
+ lstart = lend;
+ for (l = 0, r = vi->z_extents; l < r; ) {
+ mid = l + (r - l) / 2;
+ ext = erofs_read_metabuf(&map->buf, sb,
+ pos + mid * recsz, in_mbox);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+
+ la = le32_to_cpu(ext->lstart_lo);
+ pa = le32_to_cpu(ext->pstart_lo) |
+ (u64)le32_to_cpu(ext->pstart_hi) << 32;
+ if (recsz > offsetof(struct z_erofs_extent, lstart_hi))
+ la |= (u64)le32_to_cpu(ext->lstart_hi) << 32;
+
+ if (la > map->m_la) {
+ r = mid;
+ if (la > lend) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ lend = la;
+ } else {
+ l = mid + 1;
+ if (map->m_la == la)
+ r = min(l + 1, r);
+ lstart = la;
+ map->m_plen = le32_to_cpu(ext->plen);
+ map->m_pa = pa;
+ }
+ }
+ last = (l >= vi->z_extents);
+ }
+
+ if (lstart < lend) {
+ map->m_la = lstart;
+ if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
+ map->m_flags = EROFS_MAP_FRAGMENT;
+ vi->z_fragmentoff = map->m_plen;
+ if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
+ vi->z_fragmentoff |= map->m_pa << 32;
+ } else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) {
+ map->m_flags |= EROFS_MAP_MAPPED |
+ EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
+ fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
+ if (fmt)
+ map->m_algorithmformat = fmt - 1;
+ else if (interlaced && !erofs_blkoff(sb, map->m_pa))
+ map->m_algorithmformat =
+ Z_EROFS_COMPRESSION_INTERLACED;
+ else
+ map->m_algorithmformat =
+ Z_EROFS_COMPRESSION_SHIFTED;
+ if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
+ map->m_flags |= EROFS_MAP_PARTIAL_REF;
+ map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK;
+ }
+ }
+ map->m_llen = lend - map->m_la;
+ return 0;
+}
+
+static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb;
- int err, headnr;
- erofs_off_t pos;
- struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct z_erofs_map_header *h;
+ erofs_off_t pos;
+ int err = 0;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
/*
@@ -577,12 +633,11 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
return -ERESTARTSYS;
- err = 0;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
goto out_unlock;
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
- h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
+ h = erofs_read_metabuf(&map->buf, sb, pos, erofs_inode_in_metabox(inode));
if (IS_ERR(h)) {
err = PTR_ERR(h);
goto out_unlock;
@@ -599,26 +654,28 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
goto done;
}
vi->z_advise = le16_to_cpu(h->h_advise);
+ vi->z_lclusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 15);
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
+ (vi->z_advise & Z_EROFS_ADVISE_EXTENTS)) {
+ vi->z_extents = le32_to_cpu(h->h_extents_lo) |
+ ((u64)le16_to_cpu(h->h_extents_hi) << 32);
+ goto done;
+ }
+
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
+ if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
+ vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
+ else if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER)
+ vi->z_idata_size = le16_to_cpu(h->h_idata_size);
- headnr = 0;
- if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
- vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
- erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
- headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
- err = -EOPNOTSUPP;
- goto out_put_metabuf;
- }
-
- vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7);
if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
- goto out_put_metabuf;
+ goto out_unlock;
}
if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
@@ -626,53 +683,59 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
- goto out_put_metabuf;
- }
-
- if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
- struct erofs_map_blocks map = {
- .buf = __EROFS_BUF_INITIALIZER
- };
-
- vi->z_idata_size = le16_to_cpu(h->h_idata_size);
- err = z_erofs_do_map_blocks(inode, &map,
- EROFS_GET_BLOCKS_FINDTAIL);
- erofs_put_metabuf(&map.buf);
-
- if (!map.m_plen ||
- erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
- erofs_err(sb, "invalid tail-packing pclustersize %llu",
- map.m_plen);
- err = -EFSCORRUPTED;
- }
- if (err < 0)
- goto out_put_metabuf;
+ goto out_unlock;
}
- if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
- !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
- struct erofs_map_blocks map = {
+ if (vi->z_idata_size ||
+ (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
+ struct erofs_map_blocks tm = {
.buf = __EROFS_BUF_INITIALIZER
};
- vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
- err = z_erofs_do_map_blocks(inode, &map,
+ err = z_erofs_map_blocks_fo(inode, &tm,
EROFS_GET_BLOCKS_FINDTAIL);
- erofs_put_metabuf(&map.buf);
+ erofs_put_metabuf(&tm.buf);
if (err < 0)
- goto out_put_metabuf;
+ goto out_unlock;
}
done:
/* paired with smp_mb() at the beginning of the function */
smp_mb();
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
-out_put_metabuf:
- erofs_put_metabuf(&buf);
out_unlock:
clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
return err;
}
+static int z_erofs_map_sanity_check(struct inode *inode,
+ struct erofs_map_blocks *map)
+{
+ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ u64 pend;
+
+ if (!(map->m_flags & EROFS_MAP_ENCODED))
+ return 0;
+ if (unlikely(map->m_algorithmformat >= Z_EROFS_COMPRESSION_RUNTIME_MAX)) {
+ erofs_err(inode->i_sb, "unknown algorithm %d @ pos %llu for nid %llu, please upgrade kernel",
+ map->m_algorithmformat, map->m_la, EROFS_I(inode)->nid);
+ return -EOPNOTSUPP;
+ }
+ if (unlikely(map->m_algorithmformat < Z_EROFS_COMPRESSION_MAX &&
+ !(sbi->available_compr_algs & (1 << map->m_algorithmformat)))) {
+ erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
+ map->m_algorithmformat, EROFS_I(inode)->nid);
+ return -EFSCORRUPTED;
+ }
+ if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
+ map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
+ return -EOPNOTSUPP;
+ /* Filesystems beyond 48-bit physical block addresses are invalid */
+ if (unlikely(check_add_overflow(map->m_pa, map->m_plen, &pend) ||
+ (pend >> sbi->blkszbits) >= BIT_ULL(48)))
+ return -EFSCORRUPTED;
+ return 0;
+}
+
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags)
{
@@ -685,22 +748,16 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
map->m_la = inode->i_size;
map->m_flags = 0;
} else {
- err = z_erofs_fill_inode_lazy(inode);
+ err = z_erofs_fill_inode(inode, map);
if (!err) {
- if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
- !vi->z_tailextent_headlcn) {
- map->m_la = 0;
- map->m_llen = inode->i_size;
- map->m_flags = EROFS_MAP_MAPPED |
- EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT;
- } else {
- err = z_erofs_do_map_blocks(inode, map, flags);
- }
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
+ (vi->z_advise & Z_EROFS_ADVISE_EXTENTS))
+ err = z_erofs_map_blocks_ext(inode, map, flags);
+ else
+ err = z_erofs_map_blocks_fo(inode, map, flags);
}
- if (!err && (map->m_flags & EROFS_MAP_ENCODED) &&
- unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
- map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
- err = -EOPNOTSUPP;
+ if (!err)
+ err = z_erofs_map_sanity_check(inode, map);
if (err)
map->m_llen = 0;
}
@@ -725,7 +782,7 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
iomap->length = map.m_llen;
if (map.m_flags & EROFS_MAP_MAPPED) {
iomap->type = IOMAP_MAPPED;
- iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
+ iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
IOMAP_NULL_ADDR : map.m_pa;
} else {
iomap->type = IOMAP_HOLE;
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
index 75704f58ecfa..55ff2ab5128e 100644
--- a/fs/erofs/zutil.c
+++ b/fs/erofs/zutil.c
@@ -87,8 +87,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages)
tmp_pages[j] = gbuf->pages[j];
do {
last = j;
- j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
- tmp_pages);
+ j = alloc_pages_bulk(GFP_KERNEL, nrpages,
+ tmp_pages);
if (last == j)
goto out;
} while (j != nrpages);
@@ -230,9 +230,10 @@ void erofs_shrinker_unregister(struct super_block *sb)
struct erofs_sb_info *const sbi = EROFS_SB(sb);
mutex_lock(&sbi->umount_mutex);
- /* clean up all remaining pclusters in memory */
- z_erofs_shrink_scan(sbi, ~0UL);
-
+ while (!xa_empty(&sbi->managed_pslots)) {
+ z_erofs_shrink_scan(sbi, ~0UL);
+ cond_resched();
+ }
spin_lock(&erofs_sb_list_lock);
list_del(&sbi->list);
spin_unlock(&erofs_sb_list_lock);
@@ -242,7 +243,7 @@ void erofs_shrinker_unregister(struct super_block *sb)
static unsigned long erofs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- return atomic_long_read(&erofs_global_shrink_cnt);
+ return atomic_long_read(&erofs_global_shrink_cnt) ?: SHRINK_EMPTY;
}
static unsigned long erofs_shrink_scan(struct shrinker *shrink,
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 76129bfcd663..3219e0d596fe 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -378,9 +378,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
static int do_eventfd(unsigned int count, int flags)
{
- struct eventfd_ctx *ctx;
- struct file *file;
- int fd;
+ struct eventfd_ctx *ctx __free(kfree) = NULL;
/* Check the EFD_* constants for consistency. */
BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
@@ -398,27 +396,19 @@ static int do_eventfd(unsigned int count, int flags)
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
- ctx->id = ida_alloc(&eventfd_ida, GFP_KERNEL);
flags &= EFD_SHARED_FCNTL_FLAGS;
flags |= O_RDWR;
- fd = get_unused_fd_flags(flags);
- if (fd < 0)
- goto err;
-
- file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- fd = PTR_ERR(file);
- goto err;
- }
- file->f_mode |= FMODE_NOWAIT;
- fd_install(fd, file);
- return fd;
-err:
- eventfd_free_ctx(ctx);
- return fd;
+ FD_PREPARE(fdf, flags,
+ anon_inode_getfile_fmode("[eventfd]", &eventfd_fops, ctx,
+ flags, FMODE_NOWAIT));
+ if (fdf.err)
+ return fdf.err;
+
+ ctx->id = ida_alloc(&eventfd_ida, GFP_KERNEL);
+ retain_and_null_ptr(ctx);
+ return fd_publish(fdf);
}
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index f9898e60dd8b..6c36d9dc6926 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -46,10 +46,10 @@
*
* 1) epnested_mutex (mutex)
* 2) ep->mtx (mutex)
- * 3) ep->lock (rwlock)
+ * 3) ep->lock (spinlock)
*
* The acquire order is the one listed above, from 1 to 3.
- * We need a rwlock (ep->lock) because we manipulate objects
+ * We need a spinlock (ep->lock) because we manipulate objects
* from inside the poll callback, that might be triggered from
* a wake_up() that in turn might be called from IRQ context.
* So we can't sleep inside the poll callback and hence we need
@@ -195,7 +195,7 @@ struct eventpoll {
struct list_head rdllist;
/* Lock which protects rdllist and ovflist */
- rwlock_t lock;
+ spinlock_t lock;
/* RB tree root used to store monitored fd structs */
struct rb_root_cached rbr;
@@ -218,6 +218,7 @@ struct eventpoll {
/* used to optimize loop detection check */
u64 gen;
struct hlist_head refs;
+ u8 loop_check_depth;
/*
* usage count, used together with epitem->dying to
@@ -318,7 +319,7 @@ static void unlist_file(struct epitems_head *head)
static long long_zero;
static long long_max = LONG_MAX;
-static struct ctl_table epoll_table[] = {
+static const struct ctl_table epoll_table[] = {
{
.procname = "max_user_watches",
.data = &max_user_watches,
@@ -438,7 +439,7 @@ static bool ep_busy_loop_end(void *p, unsigned long start_time)
*
* we must do our busy polling with irqs enabled
*/
-static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
+static bool ep_busy_loop(struct eventpoll *ep)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
u16 budget = READ_ONCE(ep->busy_poll_budget);
@@ -447,8 +448,8 @@ static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
if (!budget)
budget = BUSY_POLL_BUDGET;
- if (napi_id >= MIN_NAPI_ID && ep_busy_loop_on(ep)) {
- napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end,
+ if (napi_id_valid(napi_id) && ep_busy_loop_on(ep)) {
+ napi_busy_loop(napi_id, ep_busy_loop_end,
ep, prefer_busy_poll, budget);
if (ep_events_available(ep))
return true;
@@ -492,7 +493,7 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
* or
* Nothing to do if we already have this ID
*/
- if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
+ if (!napi_id_valid(napi_id) || napi_id == ep->napi_id)
return;
/* record NAPI ID for use in next busy poll */
@@ -546,7 +547,7 @@ static void ep_suspend_napi_irqs(struct eventpoll *ep)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
- if (napi_id >= MIN_NAPI_ID && READ_ONCE(ep->prefer_busy_poll))
+ if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll))
napi_suspend_irqs(napi_id);
}
@@ -554,13 +555,13 @@ static void ep_resume_napi_irqs(struct eventpoll *ep)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
- if (napi_id >= MIN_NAPI_ID && READ_ONCE(ep->prefer_busy_poll))
+ if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll))
napi_resume_irqs(napi_id);
}
#else
-static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
+static inline bool ep_busy_loop(struct eventpoll *ep)
{
return false;
}
@@ -740,10 +741,10 @@ static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
* in a lockless way.
*/
lockdep_assert_irqs_enabled();
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
list_splice_init(&ep->rdllist, txlist);
WRITE_ONCE(ep->ovflist, NULL);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
static void ep_done_scan(struct eventpoll *ep,
@@ -751,7 +752,7 @@ static void ep_done_scan(struct eventpoll *ep,
{
struct epitem *epi, *nepi;
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
@@ -792,7 +793,7 @@ static void ep_done_scan(struct eventpoll *ep,
wake_up(&ep->wq);
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
static void ep_get(struct eventpoll *ep)
@@ -867,10 +868,10 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
rb_erase_cached(&epi->rbn, &ep->rbr);
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
wakeup_source_unregister(ep_wakeup_source(epi));
/*
@@ -883,7 +884,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
kfree_rcu(epi, rcu);
percpu_counter_dec(&ep->user->epoll_watches);
- return ep_refcount_dec_and_test(ep);
+ return true;
}
/*
@@ -891,14 +892,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
*/
static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
{
- WARN_ON_ONCE(__ep_remove(ep, epi, false));
+ if (__ep_remove(ep, epi, false))
+ WARN_ON_ONCE(ep_refcount_dec_and_test(ep));
}
static void ep_clear_and_put(struct eventpoll *ep)
{
struct rb_node *rbp, *next;
struct epitem *epi;
- bool dispose;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
@@ -931,10 +932,8 @@ static void ep_clear_and_put(struct eventpoll *ep)
cond_resched();
}
- dispose = ep_refcount_dec_and_test(ep);
mutex_unlock(&ep->mtx);
-
- if (dispose)
+ if (ep_refcount_dec_and_test(ep))
ep_free(ep);
}
@@ -1137,7 +1136,7 @@ again:
dispose = __ep_remove(ep, epi, true);
mutex_unlock(&ep->mtx);
- if (dispose)
+ if (dispose && ep_refcount_dec_and_test(ep))
ep_free(ep);
goto again;
}
@@ -1153,7 +1152,7 @@ static int ep_alloc(struct eventpoll **pep)
return -ENOMEM;
mutex_init(&ep->mtx);
- rwlock_init(&ep->lock);
+ spin_lock_init(&ep->lock);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
@@ -1241,99 +1240,9 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
#endif /* CONFIG_KCMP */
/*
- * Adds a new entry to the tail of the list in a lockless way, i.e.
- * multiple CPUs are allowed to call this function concurrently.
- *
- * Beware: it is necessary to prevent any other modifications of the
- * existing list until all changes are completed, in other words
- * concurrent list_add_tail_lockless() calls should be protected
- * with a read lock, where write lock acts as a barrier which
- * makes sure all list_add_tail_lockless() calls are fully
- * completed.
- *
- * Also an element can be locklessly added to the list only in one
- * direction i.e. either to the tail or to the head, otherwise
- * concurrent access will corrupt the list.
- *
- * Return: %false if element has been already added to the list, %true
- * otherwise.
- */
-static inline bool list_add_tail_lockless(struct list_head *new,
- struct list_head *head)
-{
- struct list_head *prev;
-
- /*
- * This is simple 'new->next = head' operation, but cmpxchg()
- * is used in order to detect that same element has been just
- * added to the list from another CPU: the winner observes
- * new->next == new.
- */
- if (!try_cmpxchg(&new->next, &new, head))
- return false;
-
- /*
- * Initially ->next of a new element must be updated with the head
- * (we are inserting to the tail) and only then pointers are atomically
- * exchanged. XCHG guarantees memory ordering, thus ->next should be
- * updated before pointers are actually swapped and pointers are
- * swapped before prev->next is updated.
- */
-
- prev = xchg(&head->prev, new);
-
- /*
- * It is safe to modify prev->next and new->prev, because a new element
- * is added only to the tail and new->next is updated before XCHG.
- */
-
- prev->next = new;
- new->prev = prev;
-
- return true;
-}
-
-/*
- * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
- * i.e. multiple CPUs are allowed to call this function concurrently.
- *
- * Return: %false if epi element has been already chained, %true otherwise.
- */
-static inline bool chain_epi_lockless(struct epitem *epi)
-{
- struct eventpoll *ep = epi->ep;
-
- /* Fast preliminary check */
- if (epi->next != EP_UNACTIVE_PTR)
- return false;
-
- /* Check that the same epi has not been just chained from another CPU */
- if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
- return false;
-
- /* Atomically exchange tail */
- epi->next = xchg(&ep->ovflist, epi);
-
- return true;
-}
-
-/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they
* have events to report.
- *
- * This callback takes a read lock in order not to contend with concurrent
- * events from another file descriptor, thus all modifications to ->rdllist
- * or ->ovflist are lockless. Read lock is paired with the write lock from
- * ep_start/done_scan(), which stops all list modifications and guarantees
- * that lists state is seen correctly.
- *
- * Another thing worth to mention is that ep_poll_callback() can be called
- * concurrently for the same @epi from different CPUs if poll table was inited
- * with several wait queues entries. Plural wakeup from different CPUs of a
- * single wait queue is serialized by wq.lock, but the case when multiple wait
- * queues are used should be detected accordingly. This is detected using
- * cmpxchg() operation.
*/
static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
@@ -1344,7 +1253,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
unsigned long flags;
int ewake = 0;
- read_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
ep_set_busy_poll_napi_id(epi);
@@ -1373,12 +1282,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
* chained in ep->ovflist and requeued later on.
*/
if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
- if (chain_epi_lockless(epi))
+ if (epi->next == EP_UNACTIVE_PTR) {
+ epi->next = READ_ONCE(ep->ovflist);
+ WRITE_ONCE(ep->ovflist, epi);
ep_pm_stay_awake_rcu(epi);
+ }
} else if (!ep_is_linked(epi)) {
/* In the usual case, add event to ready list. */
- if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
- ep_pm_stay_awake_rcu(epi);
+ list_add_tail(&epi->rdllink, &ep->rdllist);
+ ep_pm_stay_awake_rcu(epi);
}
/*
@@ -1411,7 +1323,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
pwake++;
out_unlock:
- read_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
@@ -1746,7 +1658,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
}
/* We have to drop the new item inside our item list to keep track of it */
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi);
@@ -1763,7 +1675,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
pwake++;
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
/* We have to call this outside the lock */
if (pwake)
@@ -1827,7 +1739,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
* list, push it inside.
*/
if (ep_item_poll(epi, &pt, 1)) {
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
@@ -1838,7 +1750,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
@@ -1980,6 +1892,30 @@ static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
return ret;
}
+static int ep_try_send_events(struct eventpoll *ep,
+ struct epoll_event __user *events, int maxevents)
+{
+ int res;
+
+ /*
+ * Try to transfer events to user space. In case we get 0 events and
+ * there's still timeout left over, we go trying again in search of
+ * more luck.
+ */
+ res = ep_send_events(ep, events, maxevents);
+ if (res > 0)
+ ep_suspend_napi_irqs(ep);
+ return res;
+}
+
+static int ep_schedule_timeout(ktime_t *to)
+{
+ if (to)
+ return ktime_after(*to, ktime_get());
+ else
+ return 1;
+}
+
/**
* ep_poll - Retrieves ready events, and delivers them to the caller-supplied
* event buffer.
@@ -2031,23 +1967,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
while (1) {
if (eavail) {
- /*
- * Try to transfer events to user space. In case we get
- * 0 events and there's still timeout left over, we go
- * trying again in search of more luck.
- */
- res = ep_send_events(ep, events, maxevents);
- if (res) {
- if (res > 0)
- ep_suspend_napi_irqs(ep);
+ res = ep_try_send_events(ep, events, maxevents);
+ if (res)
return res;
- }
}
if (timed_out)
return 0;
- eavail = ep_busy_loop(ep, timed_out);
+ eavail = ep_busy_loop(ep);
if (eavail)
continue;
@@ -2074,7 +2002,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
init_wait(&wait);
wait.func = ep_autoremove_wake_function;
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* Barrierless variant, waitqueue_active() is called under
* the same lock on wakeup ep_poll_callback() side, so it
@@ -2093,11 +2021,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (!eavail)
__add_wait_queue_exclusive(&ep->wq, &wait);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
if (!eavail)
- timed_out = !schedule_hrtimeout_range(to, slack,
- HRTIMER_MODE_ABS);
+ timed_out = !ep_schedule_timeout(to) ||
+ !schedule_hrtimeout_range(to, slack,
+ HRTIMER_MODE_ABS);
__set_current_state(TASK_RUNNING);
/*
@@ -2108,7 +2037,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
eavail = 1;
if (!list_empty_careful(&wait.entry)) {
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* If the thread timed out and is not on the wait queue,
* it means that the thread was woken up after its
@@ -2119,29 +2048,30 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (timed_out)
eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
}
}
/**
- * ep_loop_check_proc - verify that adding an epoll file inside another
- * epoll structure does not violate the constraints, in
- * terms of closed loops, or too deep chains (which can
- * result in excessive stack usage).
+ * ep_loop_check_proc - verify that adding an epoll file @ep inside another
+ * epoll file does not create closed loops, and
+ * determine the depth of the subtree starting at @ep
*
* @ep: the &struct eventpoll to be currently checked.
* @depth: Current depth of the path being checked.
*
- * Return: %zero if adding the epoll @file inside current epoll
- * structure @ep does not violate the constraints, or %-1 otherwise.
+ * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep.
*/
static int ep_loop_check_proc(struct eventpoll *ep, int depth)
{
- int error = 0;
+ int result = 0;
struct rb_node *rbp;
struct epitem *epi;
+ if (ep->gen == loop_check_gen)
+ return ep->loop_check_depth;
+
mutex_lock_nested(&ep->mtx, depth + 1);
ep->gen = loop_check_gen;
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
@@ -2149,13 +2079,11 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
if (unlikely(is_file_epoll(epi->ffd.file))) {
struct eventpoll *ep_tovisit;
ep_tovisit = epi->ffd.file->private_data;
- if (ep_tovisit->gen == loop_check_gen)
- continue;
if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
- error = -1;
+ result = INT_MAX;
else
- error = ep_loop_check_proc(ep_tovisit, depth + 1);
- if (error != 0)
+ result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1);
+ if (result > EP_MAX_NESTS)
break;
} else {
/*
@@ -2169,9 +2097,25 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
list_file(epi->ffd.file);
}
}
+ ep->loop_check_depth = result;
mutex_unlock(&ep->mtx);
- return error;
+ return result;
+}
+
+/* ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards */
+static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth)
+{
+ int result = 0;
+ struct epitem *epi;
+
+ if (ep->gen == loop_check_gen)
+ return ep->loop_check_depth;
+ hlist_for_each_entry_rcu(epi, &ep->refs, fllink)
+ result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1);
+ ep->gen = loop_check_gen;
+ ep->loop_check_depth = result;
+ return result;
}
/**
@@ -2187,8 +2131,22 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
*/
static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
{
+ int depth, upwards_depth;
+
inserting_into = ep;
- return ep_loop_check_proc(to, 0);
+ /*
+ * Check how deep down we can get from @to, and whether it is possible
+ * to loop up to @ep.
+ */
+ depth = ep_loop_check_proc(to, 0);
+ if (depth > EP_MAX_NESTS)
+ return -1;
+ /* Check how far up we can go from @ep. */
+ rcu_read_lock();
+ upwards_depth = ep_get_upwards_depth_proc(ep, 0);
+ rcu_read_unlock();
+
+ return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0;
}
static void clear_tfile_check_list(void)
@@ -2207,9 +2165,8 @@ static void clear_tfile_check_list(void)
*/
static int do_epoll_create(int flags)
{
- int error, fd;
- struct eventpoll *ep = NULL;
- struct file *file;
+ int error;
+ struct eventpoll *ep;
/* Check the EPOLL_* constant for consistency. */
BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
@@ -2226,26 +2183,15 @@ static int do_epoll_create(int flags)
* Creates all the items needed to setup an eventpoll file. That is,
* a file structure and a free file descriptor.
*/
- fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
- if (fd < 0) {
- error = fd;
- goto out_free_ep;
- }
- file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
- O_RDWR | (flags & O_CLOEXEC));
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto out_free_fd;
+ FD_PREPARE(fdf, O_RDWR | (flags & O_CLOEXEC),
+ anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
+ O_RDWR | (flags & O_CLOEXEC)));
+ if (fdf.err) {
+ ep_clear_and_put(ep);
+ return fdf.err;
}
- ep->file = file;
- fd_install(fd, file);
- return fd;
-
-out_free_fd:
- put_unused_fd(fd);
-out_free_ep:
- ep_clear_and_put(ep);
- return error;
+ ep->file = fd_prepare_file(fdf);
+ return fd_publish(fdf);
}
SYSCALL_DEFINE1(epoll_create1, int, flags)
@@ -2445,6 +2391,47 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
return do_epoll_ctl(epfd, op, fd, &epds, false);
}
+static int ep_check_params(struct file *file, struct epoll_event __user *evs,
+ int maxevents)
+{
+ /* The maximum number of event must be greater than zero */
+ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
+ return -EINVAL;
+
+ /* Verify that the area passed by the user is writeable */
+ if (!access_ok(evs, maxevents * sizeof(struct epoll_event)))
+ return -EFAULT;
+
+ /*
+ * We have to check that the file structure underneath the fd
+ * the user passed to us _is_ an eventpoll file.
+ */
+ if (!is_file_epoll(file))
+ return -EINVAL;
+
+ return 0;
+}
+
+int epoll_sendevents(struct file *file, struct epoll_event __user *events,
+ int maxevents)
+{
+ struct eventpoll *ep;
+ int ret;
+
+ ret = ep_check_params(file, events, maxevents);
+ if (unlikely(ret))
+ return ret;
+
+ ep = file->private_data;
+ /*
+ * Racy call, but that's ok - it should get retried based on
+ * poll readiness anyway.
+ */
+ if (ep_events_available(ep))
+ return ep_try_send_events(ep, events, maxevents);
+ return 0;
+}
+
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_wait(2).
@@ -2453,26 +2440,16 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events,
int maxevents, struct timespec64 *to)
{
struct eventpoll *ep;
-
- /* The maximum number of event must be greater than zero */
- if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
- return -EINVAL;
-
- /* Verify that the area passed by the user is writeable */
- if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
- return -EFAULT;
+ int ret;
/* Get the "struct file *" for the eventpoll file */
CLASS(fd, f)(epfd);
if (fd_empty(f))
return -EBADF;
- /*
- * We have to check that the file structure underneath the fd
- * the user passed to us _is_ an eventpoll file.
- */
- if (!is_file_epoll(fd_file(f)))
- return -EINVAL;
+ ret = ep_check_params(fd_file(f), events, maxevents);
+ if (unlikely(ret))
+ return ret;
/*
* At this point it is safe to assume that the "private_data" contains
diff --git a/fs/exec.c b/fs/exec.c
index 98cb7ba9983c..9d5ebc9d15b0 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -78,6 +78,9 @@
#include <trace/events/sched.h>
+/* For vma exec functions. */
+#include "../mm/internal.h"
+
static int bprm_creds_from_file(struct linux_binprm *bprm);
int suid_dumpable = 0;
@@ -111,70 +114,13 @@ static inline void put_binfmt(struct linux_binfmt * fmt)
bool path_noexec(const struct path *path)
{
+ /* If it's an anonymous inode make sure that we catch any shenanigans. */
+ VFS_WARN_ON_ONCE(IS_ANON_FILE(d_inode(path->dentry)) &&
+ !(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC));
return (path->mnt->mnt_flags & MNT_NOEXEC) ||
(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
}
-#ifdef CONFIG_USELIB
-/*
- * Note that a shared library must be both readable and executable due to
- * security reasons.
- *
- * Also note that we take the address to load from the file itself.
- */
-SYSCALL_DEFINE1(uselib, const char __user *, library)
-{
- struct linux_binfmt *fmt;
- struct file *file;
- struct filename *tmp = getname(library);
- int error = PTR_ERR(tmp);
- static const struct open_flags uselib_flags = {
- .open_flag = O_LARGEFILE | O_RDONLY,
- .acc_mode = MAY_READ | MAY_EXEC,
- .intent = LOOKUP_OPEN,
- .lookup_flags = LOOKUP_FOLLOW,
- };
-
- if (IS_ERR(tmp))
- goto out;
-
- file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
- putname(tmp);
- error = PTR_ERR(file);
- if (IS_ERR(file))
- goto out;
-
- /*
- * Check do_open_execat() for an explanation.
- */
- error = -EACCES;
- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
- path_noexec(&file->f_path))
- goto exit;
-
- error = -ENOEXEC;
-
- read_lock(&binfmt_lock);
- list_for_each_entry(fmt, &formats, lh) {
- if (!fmt->load_shlib)
- continue;
- if (!try_module_get(fmt->module))
- continue;
- read_unlock(&binfmt_lock);
- error = fmt->load_shlib(file);
- read_lock(&binfmt_lock);
- put_binfmt(fmt);
- if (error != -ENOEXEC)
- break;
- }
- read_unlock(&binfmt_lock);
-exit:
- fput(file);
-out:
- return error;
-}
-#endif /* #ifdef CONFIG_USELIB */
-
#ifdef CONFIG_MMU
/*
* The nascent bprm->mm is not visible until exec_mmap() but it can
@@ -205,18 +151,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
/*
* Avoid relying on expanding the stack down in GUP (which
* does not work for STACK_GROWSUP anyway), and just do it
- * by hand ahead of time.
+ * ahead of time.
*/
- if (write && pos < vma->vm_start) {
- mmap_write_lock(mm);
- ret = expand_downwards(vma, pos);
- if (unlikely(ret < 0)) {
- mmap_write_unlock(mm);
- return NULL;
- }
- mmap_write_downgrade(mm);
- } else
- mmap_read_lock(mm);
+ if (!mmap_read_lock_maybe_expand(mm, vma, pos, write))
+ return NULL;
/*
* We are doing an exec(). 'current' is the process
@@ -250,60 +188,6 @@ static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
flush_cache_page(bprm->vma, pos, page_to_pfn(page));
}
-static int __bprm_mm_init(struct linux_binprm *bprm)
-{
- int err;
- struct vm_area_struct *vma = NULL;
- struct mm_struct *mm = bprm->mm;
-
- bprm->vma = vma = vm_area_alloc(mm);
- if (!vma)
- return -ENOMEM;
- vma_set_anonymous(vma);
-
- if (mmap_write_lock_killable(mm)) {
- err = -EINTR;
- goto err_free;
- }
-
- /*
- * Need to be called with mmap write lock
- * held, to avoid race with ksmd.
- */
- err = ksm_execve(mm);
- if (err)
- goto err_ksm;
-
- /*
- * Place the stack at the largest stack address the architecture
- * supports. Later, we'll move this to an appropriate place. We don't
- * use STACK_TOP because that can depend on attributes which aren't
- * configured yet.
- */
- BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
- vma->vm_end = STACK_TOP_MAX;
- vma->vm_start = vma->vm_end - PAGE_SIZE;
- vm_flags_init(vma, VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP);
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-
- err = insert_vm_struct(mm, vma);
- if (err)
- goto err;
-
- mm->stack_vm = mm->total_vm = 1;
- mmap_write_unlock(mm);
- bprm->p = vma->vm_end - sizeof(void *);
- return 0;
-err:
- ksm_exit(mm);
-err_ksm:
- mmap_write_unlock(mm);
-err_free:
- bprm->vma = NULL;
- vm_area_free(vma);
- return err;
-}
-
static bool valid_arg_len(struct linux_binprm *bprm, long len)
{
return len <= MAX_ARG_STRLEN;
@@ -356,12 +240,6 @@ static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
{
}
-static int __bprm_mm_init(struct linux_binprm *bprm)
-{
- bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
- return 0;
-}
-
static bool valid_arg_len(struct linux_binprm *bprm, long len)
{
return len <= bprm->p;
@@ -390,9 +268,13 @@ static int bprm_mm_init(struct linux_binprm *bprm)
bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
task_unlock(current->group_leader);
- err = __bprm_mm_init(bprm);
+#ifndef CONFIG_MMU
+ bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
+#else
+ err = create_init_stack_vma(bprm->mm, &bprm->vma, &bprm->p);
if (err)
goto err;
+#endif
return 0;
@@ -717,12 +599,12 @@ int setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top,
int executable_stack)
{
- unsigned long ret;
+ int ret;
unsigned long stack_shift;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = bprm->vma;
struct vm_area_struct *prev = NULL;
- unsigned long vm_flags;
+ vm_flags_t vm_flags;
unsigned long stack_base;
unsigned long stack_size;
unsigned long stack_expand;
@@ -763,8 +645,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
mm->arg_start = bprm->p;
#endif
- if (bprm->loader)
- bprm->loader -= stack_shift;
bprm->exec -= stack_shift;
if (mmap_write_lock_killable(mm))
@@ -892,7 +772,8 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
.lookup_flags = LOOKUP_FOLLOW,
};
- if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
+ if ((flags &
+ ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH | AT_EXECVE_CHECK)) != 0)
return ERR_PTR(-EINVAL);
if (flags & AT_SYMLINK_NOFOLLOW)
open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
@@ -903,16 +784,18 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
if (IS_ERR(file))
return file;
+ if (path_noexec(&file->f_path))
+ return ERR_PTR(-EACCES);
+
/*
* In the past the regular type check was here. It moved to may_open() in
* 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is
* an invariant that all non-regular files error out before we get here.
*/
- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
- path_noexec(&file->f_path))
+ if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)))
return ERR_PTR(-EACCES);
- err = deny_write_access(file);
+ err = exe_file_deny_write_access(file);
if (err)
return ERR_PTR(err);
@@ -927,7 +810,7 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
* Returns ERR_PTR on failure or allocated struct file on success.
*
* As this is a wrapper for the internal do_open_execat(), callers
- * must call allow_write_access() before fput() on release. Also see
+ * must call exe_file_allow_write_access() before fput() on release. Also see
* do_close_execat().
*/
struct file *open_exec(const char *name)
@@ -1194,16 +1077,16 @@ static int unshare_sighand(struct task_struct *me)
}
/*
- * These functions flushes out all traces of the currently running executable
- * so that a new one can be started
+ * This is unlocked -- the string will always be NUL-terminated, but
+ * may show overlapping contents if racing concurrent reads.
*/
-
void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
{
- task_lock(tsk);
+ size_t len = min(strlen(buf), sizeof(tsk->comm) - 1);
+
trace_task_rename(tsk, buf);
- strscpy_pad(tsk->comm, buf, sizeof(tsk->comm));
- task_unlock(tsk);
+ memcpy(tsk->comm, buf, len);
+ memset(&tsk->comm[len], 0, sizeof(tsk->comm) - len);
perf_event_comm(tsk, exec);
}
@@ -1236,13 +1119,12 @@ int begin_new_exec(struct linux_binprm * bprm)
*/
bprm->point_of_no_return = true;
- /*
- * Make this the only thread in the thread group.
- */
+ /* Make this the only thread in the thread group */
retval = de_thread(me);
if (retval)
goto out;
-
+ /* see the comment in check_unsafe_exec() */
+ current->fs->in_exec = 0;
/*
* Cancel any io_uring activity across execve
*/
@@ -1341,7 +1223,28 @@ int begin_new_exec(struct linux_binprm * bprm)
set_dumpable(current->mm, SUID_DUMP_USER);
perf_event_exec();
- __set_task_comm(me, kbasename(bprm->filename), true);
+
+ /*
+ * If the original filename was empty, alloc_bprm() made up a path
+ * that will probably not be useful to admins running ps or similar.
+ * Let's fix it up to be something reasonable.
+ */
+ if (bprm->comm_from_dentry) {
+ /*
+ * Hold RCU lock to keep the name from being freed behind our back.
+ * Use acquire semantics to make sure the terminating NUL from
+ * __d_alloc() is seen.
+ *
+ * Note, we're deliberately sloppy here. We don't need to care about
+ * detecting a concurrent rename and just want a terminated name.
+ */
+ rcu_read_lock();
+ __set_task_comm(me, smp_load_acquire(&bprm->file->f_path.dentry->d_name.name),
+ true);
+ rcu_read_unlock();
+ } else {
+ __set_task_comm(me, kbasename(bprm->filename), true);
+ }
/* An exec changes our domain. We are no longer part of the thread
group */
@@ -1377,10 +1280,9 @@ int begin_new_exec(struct linux_binprm * bprm)
/* Pass the opened binary to the interpreter. */
if (bprm->have_execfd) {
- retval = get_unused_fd_flags(0);
+ retval = FD_ADD(0, bprm->executable);
if (retval < 0)
goto out_unlock;
- fd_install(retval, bprm->executable);
bprm->executable = NULL;
bprm->execfd = retval;
}
@@ -1471,7 +1373,7 @@ static void do_close_execat(struct file *file)
{
if (!file)
return;
- allow_write_access(file);
+ exe_file_allow_write_access(file);
fput(file);
}
@@ -1483,6 +1385,8 @@ static void free_bprm(struct linux_binprm *bprm)
}
free_arg_pages(bprm);
if (bprm->cred) {
+ /* in case exec fails before de_thread() succeeds */
+ current->fs->in_exec = 0;
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@@ -1517,11 +1421,13 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl
if (fd == AT_FDCWD || filename->name[0] == '/') {
bprm->filename = filename->name;
} else {
- if (filename->name[0] == '\0')
+ if (filename->name[0] == '\0') {
bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
- else
+ bprm->comm_from_dentry = 1;
+ } else {
bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
fd, filename->name);
+ }
if (!bprm->fdpath)
goto out_free;
@@ -1541,6 +1447,21 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl
}
bprm->interp = bprm->filename;
+ /*
+ * At this point, security_file_open() has already been called (with
+ * __FMODE_EXEC) and access control checks for AT_EXECVE_CHECK will
+ * stop just after the security_bprm_creds_for_exec() call in
+ * bprm_execve(). Indeed, the kernel should not try to parse the
+ * content of the file with exec_binprm() nor change the calling
+ * thread, which means that the following security functions will not
+ * be called:
+ * - security_bprm_check()
+ * - security_bprm_creds_from_file()
+ * - security_bprm_committing_creds()
+ * - security_bprm_committed_creds()
+ */
+ bprm->is_check = !!(flags & AT_EXECVE_CHECK);
+
retval = bprm_mm_init(bprm);
if (!retval)
return bprm;
@@ -1587,9 +1508,13 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
* suid exec because the differently privileged task
* will be able to manipulate the current directory, etc.
* It would be nice to force an unshare instead...
+ *
+ * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS)
+ * from another sub-thread until de_thread() succeeds, this
+ * state is protected by cred_guard_mutex we hold.
*/
n_fs = 1;
- spin_lock(&p->fs->lock);
+ read_seqlock_excl(&p->fs->seq);
rcu_read_lock();
for_other_threads(p, t) {
if (t->fs == p->fs)
@@ -1602,7 +1527,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
bprm->unsafe |= LSM_UNSAFE_SHARE;
else
p->fs->in_exec = 1;
- spin_unlock(&p->fs->lock);
+ read_sequnlock_excl(&p->fs->seq);
}
static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
@@ -1719,13 +1644,11 @@ int remove_arg_zero(struct linux_binprm *bprm)
}
EXPORT_SYMBOL(remove_arg_zero);
-#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
/*
* cycle the list of binary formats handler, until one recognizes the image
*/
static int search_binary_handler(struct linux_binprm *bprm)
{
- bool need_retry = IS_ENABLED(CONFIG_MODULES);
struct linux_binfmt *fmt;
int retval;
@@ -1737,8 +1660,6 @@ static int search_binary_handler(struct linux_binprm *bprm)
if (retval)
return retval;
- retval = -ENOENT;
- retry:
read_lock(&binfmt_lock);
list_for_each_entry(fmt, &formats, lh) {
if (!try_module_get(fmt->module))
@@ -1756,17 +1677,7 @@ static int search_binary_handler(struct linux_binprm *bprm)
}
read_unlock(&binfmt_lock);
- if (need_retry) {
- if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
- printable(bprm->buf[2]) && printable(bprm->buf[3]))
- return retval;
- if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
- return retval;
- need_retry = false;
- goto retry;
- }
-
- return retval;
+ return -ENOEXEC;
}
/* binfmt handlers will call back into begin_new_exec() on success. */
@@ -1797,7 +1708,7 @@ static int exec_binprm(struct linux_binprm *bprm)
bprm->file = bprm->interpreter;
bprm->interpreter = NULL;
- allow_write_access(exec);
+ exe_file_allow_write_access(exec);
if (unlikely(bprm->have_execfd)) {
if (bprm->executable) {
fput(exec);
@@ -1836,7 +1747,7 @@ static int bprm_execve(struct linux_binprm *bprm)
/* Set the unchanging part of bprm->cred */
retval = security_bprm_creds_for_exec(bprm);
- if (retval)
+ if (retval || bprm->is_check)
goto out;
retval = exec_binprm(bprm);
@@ -1844,10 +1755,9 @@ static int bprm_execve(struct linux_binprm *bprm)
goto out;
sched_mm_cid_after_execve(current);
+ rseq_execve(current);
/* execve succeeded */
- current->fs->in_exec = 0;
current->in_execve = 0;
- rseq_execve(current);
user_events_execve(current);
acct_update_integrals(current);
task_numa_free(current, false);
@@ -1864,7 +1774,7 @@ out:
force_fatal_sig(SIGSEGV);
sched_mm_cid_after_execve(current);
- current->fs->in_exec = 0;
+ rseq_force_update();
current->in_execve = 0;
return retval;
@@ -1904,9 +1814,6 @@ static int do_execveat_common(int fd, struct filename *filename,
}
retval = count(argv, MAX_ARG_STRINGS);
- if (retval == 0)
- pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
- current->comm, bprm->filename);
if (retval < 0)
goto out_free;
bprm->argc = retval;
@@ -1944,6 +1851,9 @@ static int do_execveat_common(int fd, struct filename *filename,
if (retval < 0)
goto out_free;
bprm->argc = 1;
+
+ pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
+ current->comm, bprm->filename);
}
retval = bprm_execve(bprm);
@@ -2088,7 +1998,7 @@ void set_dumpable(struct mm_struct *mm, int value)
if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
return;
- set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
+ __mm_flags_set_mask_dumpable(mm, value);
}
SYSCALL_DEFINE3(execve,
@@ -2137,12 +2047,12 @@ static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int writ
{
int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (!error)
+ if (!error && write)
validate_coredump_safety();
return error;
}
-static struct ctl_table fs_exec_sysctls[] = {
+static const struct ctl_table fs_exec_sysctls[] = {
{
.procname = "suid_dumpable",
.data = &suid_dumpable,
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index ce9be95c9172..5429041c7eaf 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/bitmap.h>
#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
@@ -26,13 +27,58 @@
/*
* Allocation Bitmap Management Functions
*/
+static bool exfat_test_bitmap_range(struct super_block *sb, unsigned int clu,
+ unsigned int count)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned int start = clu;
+ unsigned int end = clu + count;
+ unsigned int ent_idx, i, b;
+ unsigned int bit_offset, bits_to_check;
+ __le_long *bitmap_le;
+ unsigned long mask, word;
+
+ if (!is_valid_cluster(sbi, start) || !is_valid_cluster(sbi, end - 1))
+ return false;
+
+ while (start < end) {
+ ent_idx = CLUSTER_TO_BITMAP_ENT(start);
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+
+ bitmap_le = (__le_long *)sbi->vol_amap[i]->b_data;
+
+ /* Calculate how many bits we can check in the current word */
+ bit_offset = b % BITS_PER_LONG;
+ bits_to_check = min(end - start,
+ (unsigned int)(BITS_PER_LONG - bit_offset));
+
+ /* Create a bitmask for the range of bits to check */
+ if (bits_to_check >= BITS_PER_LONG)
+ mask = ~0UL;
+ else
+ mask = ((1UL << bits_to_check) - 1) << bit_offset;
+ word = lel_to_cpu(bitmap_le[b / BITS_PER_LONG]);
+
+ /* Check if all bits in the mask are set */
+ if ((word & mask) != mask)
+ return false;
+
+ start += bits_to_check;
+ }
+
+ return true;
+}
+
static int exfat_allocate_bitmap(struct super_block *sb,
struct exfat_dentry *ep)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct blk_plug plug;
long long map_size;
- unsigned int i, need_map_size;
+ unsigned int i, j, need_map_size;
sector_t sector;
+ unsigned int max_ra_count;
sbi->map_clu = le32_to_cpu(ep->dentry.bitmap.start_clu);
map_size = le64_to_cpu(ep->dentry.bitmap.size);
@@ -56,22 +102,37 @@ static int exfat_allocate_bitmap(struct super_block *sb,
return -ENOMEM;
sector = exfat_cluster_to_sector(sbi, sbi->map_clu);
+ max_ra_count = min(sb->s_bdi->ra_pages, sb->s_bdi->io_pages) <<
+ (PAGE_SHIFT - sb->s_blocksize_bits);
for (i = 0; i < sbi->map_sectors; i++) {
- sbi->vol_amap[i] = sb_bread(sb, sector + i);
- if (!sbi->vol_amap[i]) {
- /* release all buffers and free vol_amap */
- int j = 0;
-
- while (j < i)
- brelse(sbi->vol_amap[j++]);
-
- kvfree(sbi->vol_amap);
- sbi->vol_amap = NULL;
- return -EIO;
+ /* Trigger the next readahead in advance. */
+ if (max_ra_count && 0 == (i % max_ra_count)) {
+ blk_start_plug(&plug);
+ for (j = i; j < min(max_ra_count, sbi->map_sectors - i) + i; j++)
+ sb_breadahead(sb, sector + j);
+ blk_finish_plug(&plug);
}
+
+ sbi->vol_amap[i] = sb_bread(sb, sector + i);
+ if (!sbi->vol_amap[i])
+ goto err_out;
}
+ if (exfat_test_bitmap_range(sb, sbi->map_clu,
+ EXFAT_B_TO_CLU_ROUND_UP(map_size, sbi)) == false)
+ goto err_out;
+
return 0;
+
+err_out:
+ j = 0;
+ /* release all buffers and free vol_amap */
+ while (j < i)
+ brelse(sbi->vol_amap[j++]);
+
+ kvfree(sbi->vol_amap);
+ sbi->vol_amap = NULL;
+ return -EIO;
}
int exfat_load_bitmap(struct super_block *sb)
@@ -122,11 +183,10 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
kvfree(sbi->vol_amap);
}
-int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
+int exfat_set_bitmap(struct super_block *sb, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
- struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
if (!is_valid_cluster(sbi, clu))
@@ -141,36 +201,49 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
return 0;
}
-void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
+int exfat_clear_bitmap(struct super_block *sb, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
- struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_mount_options *opts = &sbi->options;
if (!is_valid_cluster(sbi, clu))
- return;
+ return -EIO;
ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+ if (!test_bit_le(b, sbi->vol_amap[i]->b_data))
+ return -EIO;
+
clear_bit_le(b, sbi->vol_amap[i]->b_data);
+
exfat_update_bh(sbi->vol_amap[i], sync);
- if (opts->discard) {
- int ret_discard;
+ return 0;
+}
- ret_discard = sb_issue_discard(sb,
- exfat_cluster_to_sector(sbi, clu),
- (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
+bool exfat_test_bitmap(struct super_block *sb, unsigned int clu)
+{
+ int i, b;
+ unsigned int ent_idx;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
- if (ret_discard == -EOPNOTSUPP) {
- exfat_err(sb, "discard not supported by device, disabling");
- opts->discard = 0;
- }
- }
+ if (!sbi->vol_amap)
+ return true;
+
+ if (!is_valid_cluster(sbi, clu))
+ return false;
+
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+
+ if (!test_bit_le(b, sbi->vol_amap[i]->b_data))
+ return false;
+
+ return true;
}
/*
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index fe0a9b8a0cd0..3045a58e124a 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -122,7 +122,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED) {
brelse(bh);
- break;
+ goto out;
}
if (type != TYPE_FILE && type != TYPE_DIR) {
@@ -170,6 +170,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
}
}
+out:
dir_entry->namebuf.lfn[0] = '\0';
*cpos = EXFAT_DEN_TO_B(dentry);
return 0;
@@ -603,6 +604,11 @@ static int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir
if (ret)
return ret;
+ if (!exfat_test_bitmap(sb, clu)) {
+ exfat_err(sb, "failed to test cluster bit(%u)", clu);
+ return -EIO;
+ }
+
/* byte offset in cluster */
off = EXFAT_CLU_OFFSET(off, sbi);
@@ -995,6 +1001,7 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
struct exfat_hint_femp candi_empty;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
int num_entries = exfat_calc_num_entries(p_uniname);
+ unsigned int clu_count = 0;
if (num_entries < 0)
return num_entries;
@@ -1132,6 +1139,10 @@ rewind:
} else {
if (exfat_get_next_cluster(sb, &clu.dir))
return -EIO;
+
+ /* break if the cluster chain includes a loop */
+ if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi)))
+ goto not_found;
}
}
@@ -1194,6 +1205,7 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
int i, count = 0;
int dentries_per_clu;
unsigned int entry_type;
+ unsigned int clu_count = 0;
struct exfat_chain clu;
struct exfat_dentry *ep;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -1226,8 +1238,174 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
} else {
if (exfat_get_next_cluster(sb, &(clu.dir)))
return -EIO;
+
+ if (unlikely(++clu_count > sbi->used_clusters)) {
+ exfat_fs_error(sb, "FAT or bitmap is corrupted");
+ return -EIO;
+ }
+
}
}
return count;
}
+
+static int exfat_get_volume_label_dentry(struct super_block *sb,
+ struct exfat_entry_set_cache *es)
+{
+ int i;
+ int dentry = 0;
+ unsigned int type;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_hint_femp hint_femp;
+ struct exfat_inode_info *ei = EXFAT_I(sb->s_root->d_inode);
+ struct exfat_chain clu;
+ struct exfat_dentry *ep;
+ struct buffer_head *bh;
+
+ hint_femp.eidx = EXFAT_HINT_NONE;
+ exfat_chain_set(&clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ for (i = 0; i < sbi->dentries_per_clu; i++, dentry++) {
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
+ if (!ep)
+ return -EIO;
+
+ type = exfat_get_entry_type(ep);
+ if (hint_femp.eidx == EXFAT_HINT_NONE) {
+ if (type == TYPE_DELETED || type == TYPE_UNUSED) {
+ hint_femp.cur = clu;
+ hint_femp.eidx = dentry;
+ hint_femp.count = 1;
+ }
+ }
+
+ if (type == TYPE_UNUSED) {
+ brelse(bh);
+ goto not_found;
+ }
+
+ if (type != TYPE_VOLUME) {
+ brelse(bh);
+ continue;
+ }
+
+ memset(es, 0, sizeof(*es));
+ es->sb = sb;
+ es->bh = es->__bh;
+ es->bh[0] = bh;
+ es->num_bh = 1;
+ es->start_off = EXFAT_DEN_TO_B(i) % sb->s_blocksize;
+
+ return 0;
+ }
+
+ if (exfat_get_next_cluster(sb, &(clu.dir)))
+ return -EIO;
+ }
+
+not_found:
+ if (hint_femp.eidx == EXFAT_HINT_NONE) {
+ hint_femp.cur.dir = EXFAT_EOF_CLUSTER;
+ hint_femp.eidx = dentry;
+ hint_femp.count = 0;
+ }
+
+ ei->hint_femp = hint_femp;
+
+ return -ENOENT;
+}
+
+int exfat_read_volume_label(struct super_block *sb, struct exfat_uni_name *label_out)
+{
+ int ret, i;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_entry_set_cache es;
+ struct exfat_dentry *ep;
+
+ mutex_lock(&sbi->s_lock);
+
+ memset(label_out, 0, sizeof(*label_out));
+ ret = exfat_get_volume_label_dentry(sb, &es);
+ if (ret < 0) {
+ /*
+ * ENOENT signifies that a volume label dentry doesn't exist
+ * We will treat this as an empty volume label and not fail.
+ */
+ if (ret == -ENOENT)
+ ret = 0;
+
+ goto unlock;
+ }
+
+ ep = exfat_get_dentry_cached(&es, 0);
+ label_out->name_len = ep->dentry.volume_label.char_count;
+ if (label_out->name_len > EXFAT_VOLUME_LABEL_LEN) {
+ ret = -EIO;
+ exfat_put_dentry_set(&es, false);
+ goto unlock;
+ }
+
+ for (i = 0; i < label_out->name_len; i++)
+ label_out->name[i] = le16_to_cpu(ep->dentry.volume_label.volume_label[i]);
+
+ exfat_put_dentry_set(&es, false);
+unlock:
+ mutex_unlock(&sbi->s_lock);
+ return ret;
+}
+
+int exfat_write_volume_label(struct super_block *sb,
+ struct exfat_uni_name *label)
+{
+ int ret, i;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct inode *root_inode = sb->s_root->d_inode;
+ struct exfat_entry_set_cache es;
+ struct exfat_chain clu;
+ struct exfat_dentry *ep;
+
+ if (label->name_len > EXFAT_VOLUME_LABEL_LEN)
+ return -EINVAL;
+
+ mutex_lock(&sbi->s_lock);
+
+ ret = exfat_get_volume_label_dentry(sb, &es);
+ if (ret == -ENOENT) {
+ if (label->name_len == 0) {
+ /* No volume label dentry, no need to clear */
+ ret = 0;
+ goto unlock;
+ }
+
+ ret = exfat_find_empty_entry(root_inode, &clu, 1, &es);
+ }
+
+ if (ret < 0)
+ goto unlock;
+
+ ep = exfat_get_dentry_cached(&es, 0);
+
+ if (label->name_len == 0 && ep->dentry.volume_label.char_count == 0) {
+ /* volume label had been cleared */
+ exfat_put_dentry_set(&es, 0);
+ goto unlock;
+ }
+
+ memset(ep, 0, sizeof(*ep));
+ ep->type = EXFAT_VOLUME;
+
+ for (i = 0; i < label->name_len; i++)
+ ep->dentry.volume_label.volume_label[i] =
+ cpu_to_le16(label->name[i]);
+
+ ep->dentry.volume_label.char_count = label->name_len;
+ es.modified = true;
+
+ ret = exfat_put_dentry_set(&es, IS_DIRSYNC(root_inode));
+
+unlock:
+ mutex_unlock(&sbi->s_lock);
+ return ret;
+}
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 78be6964a8a0..176fef62574c 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -14,8 +14,6 @@
#define EXFAT_ROOT_INO 1
-#define EXFAT_CLUSTERS_UNTRACKED (~0u)
-
/*
* exfat error flags
*/
@@ -31,7 +29,6 @@ enum exfat_error_mode {
enum {
NLS_NAME_NO_LOSSY = 0, /* no lossy */
NLS_NAME_LOSSY = 1 << 0, /* just detected incorrect filename(s) */
- NLS_NAME_OVERLEN = 1 << 1, /* the length is over than its limit */
};
#define EXFAT_HASH_BITS 8
@@ -455,8 +452,9 @@ int exfat_count_num_clusters(struct super_block *sb,
/* balloc.c */
int exfat_load_bitmap(struct super_block *sb);
void exfat_free_bitmap(struct exfat_sb_info *sbi);
-int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync);
-void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync);
+int exfat_set_bitmap(struct super_block *sb, unsigned int clu, bool sync);
+int exfat_clear_bitmap(struct super_block *sb, unsigned int clu, bool sync);
+bool exfat_test_bitmap(struct super_block *sb, unsigned int clu);
unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);
int exfat_trim_fs(struct inode *inode, struct fstrim_range *range);
@@ -479,6 +477,9 @@ int exfat_force_shutdown(struct super_block *sb, u32 flags);
/* namei.c */
extern const struct dentry_operations exfat_dentry_ops;
extern const struct dentry_operations exfat_utf8_dentry_ops;
+int exfat_find_empty_entry(struct inode *inode,
+ struct exfat_chain *p_dir, int num_entries,
+ struct exfat_entry_set_cache *es);
/* cache.c */
int exfat_cache_init(void);
@@ -519,6 +520,10 @@ int exfat_get_empty_dentry_set(struct exfat_entry_set_cache *es,
unsigned int num_entries);
int exfat_put_dentry_set(struct exfat_entry_set_cache *es, int sync);
int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir);
+int exfat_read_volume_label(struct super_block *sb,
+ struct exfat_uni_name *label_out);
+int exfat_write_volume_label(struct super_block *sb,
+ struct exfat_uni_name *label);
/* inode.c */
extern const struct inode_operations exfat_file_inode_operations;
diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h
index 971a1ccd0e89..4082fa7b8c14 100644
--- a/fs/exfat/exfat_raw.h
+++ b/fs/exfat/exfat_raw.h
@@ -80,6 +80,7 @@
#define BOOTSEC_OLDBPB_LEN 53
#define EXFAT_FILE_NAME_LEN 15
+#define EXFAT_VOLUME_LABEL_LEN 11
#define EXFAT_MIN_SECT_SIZE_BITS 9
#define EXFAT_MAX_SECT_SIZE_BITS 12
@@ -160,6 +161,11 @@ struct exfat_dentry {
__le64 size;
} __packed upcase; /* up-case table directory entry */
struct {
+ __u8 char_count;
+ __le16 volume_label[EXFAT_VOLUME_LABEL_LEN];
+ __u8 reserved[8];
+ } __packed volume_label; /* volume label directory entry */
+ struct {
__u8 flags;
__u8 vendor_guid[16];
__u8 vendor_defined[14];
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index 773c320d68f3..c9c5f2e3a05e 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -89,35 +89,36 @@ int exfat_ent_get(struct super_block *sb, unsigned int loc,
int err;
if (!is_valid_cluster(sbi, loc)) {
- exfat_fs_error(sb, "invalid access to FAT (entry 0x%08x)",
+ exfat_fs_error_ratelimit(sb,
+ "invalid access to FAT (entry 0x%08x)",
loc);
return -EIO;
}
err = __exfat_ent_get(sb, loc, content);
if (err) {
- exfat_fs_error(sb,
+ exfat_fs_error_ratelimit(sb,
"failed to access to FAT (entry 0x%08x, err:%d)",
loc, err);
return err;
}
if (*content == EXFAT_FREE_CLUSTER) {
- exfat_fs_error(sb,
+ exfat_fs_error_ratelimit(sb,
"invalid access to FAT free cluster (entry 0x%08x)",
loc);
return -EIO;
}
if (*content == EXFAT_BAD_CLUSTER) {
- exfat_fs_error(sb,
+ exfat_fs_error_ratelimit(sb,
"invalid access to FAT bad cluster (entry 0x%08x)",
loc);
return -EIO;
}
if (*content != EXFAT_EOF_CLUSTER && !is_valid_cluster(sbi, *content)) {
- exfat_fs_error(sb,
+ exfat_fs_error_ratelimit(sb,
"invalid access to FAT (entry 0x%08x) bogus content (0x%08x)",
loc, *content);
return -EIO;
@@ -144,6 +145,20 @@ int exfat_chain_cont_cluster(struct super_block *sb, unsigned int chain,
return 0;
}
+static inline void exfat_discard_cluster(struct super_block *sb,
+ unsigned int clu, unsigned int num_clusters)
+{
+ int ret;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ ret = sb_issue_discard(sb, exfat_cluster_to_sector(sbi, clu),
+ sbi->sect_per_clus * num_clusters, GFP_NOFS, 0);
+ if (ret == -EOPNOTSUPP) {
+ exfat_err(sb, "discard not supported by device, disabling");
+ sbi->options.discard = 0;
+ }
+}
+
/* This function must be called with bitmap_lock held */
static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
{
@@ -175,6 +190,7 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu));
if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ int err;
unsigned int last_cluster = p_chain->dir + p_chain->size - 1;
do {
bool sync = false;
@@ -189,11 +205,18 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
cur_cmap_i = next_cmap_i;
}
- exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ err = exfat_clear_bitmap(sb, clu, (sync && IS_DIRSYNC(inode)));
+ if (err)
+ break;
clu++;
num_clusters++;
} while (num_clusters < p_chain->size);
+
+ if (sbi->options.discard)
+ exfat_discard_cluster(sb, p_chain->dir, p_chain->size);
} else {
+ unsigned int nr_clu = 1;
+
do {
bool sync = false;
unsigned int n_clu = clu;
@@ -210,16 +233,36 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
cur_cmap_i = next_cmap_i;
}
- exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ if (exfat_clear_bitmap(sb, clu, (sync && IS_DIRSYNC(inode))))
+ break;
+
+ if (sbi->options.discard) {
+ if (n_clu == clu + 1)
+ nr_clu++;
+ else {
+ exfat_discard_cluster(sb, clu - nr_clu + 1, nr_clu);
+ nr_clu = 1;
+ }
+ }
+
clu = n_clu;
num_clusters++;
if (err)
- goto dec_used_clus;
+ break;
+
+ if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) {
+ /*
+ * The cluster chain includes a loop, scan the
+ * bitmap to get the number of used clusters.
+ */
+ exfat_count_used_clusters(sb, &sbi->used_clusters);
+
+ return 0;
+ }
} while (clu != EXFAT_EOF_CLUSTER);
}
-dec_used_clus:
sbi->used_clusters -= num_clusters;
return 0;
}
@@ -252,7 +295,7 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain,
clu = next;
if (exfat_ent_get(sb, clu, &next))
return -EIO;
- } while (next != EXFAT_EOF_CLUSTER);
+ } while (next != EXFAT_EOF_CLUSTER && count <= p_chain->size);
if (p_chain->size != count) {
exfat_fs_error(sb,
@@ -366,7 +409,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
}
/* update allocation bitmap */
- if (exfat_set_bitmap(inode, new_clu, sync_bmap)) {
+ if (exfat_set_bitmap(sb, new_clu, sync_bmap)) {
ret = -EIO;
goto free_cluster;
}
@@ -448,5 +491,15 @@ int exfat_count_num_clusters(struct super_block *sb,
}
*ret_count = count;
+
+ /*
+ * since exfat_count_used_clusters() is not called, sbi->used_clusters
+ * cannot be used here.
+ */
+ if (unlikely(i == sbi->num_clusters && clu != EXFAT_EOF_CLUSTER)) {
+ exfat_fs_error(sb, "The cluster chain has a loop");
+ return -EIO;
+ }
+
return 0;
}
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index fb38769c3e39..536c8078f0c1 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -25,6 +25,8 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_chain clu;
+ truncate_pagecache(inode, i_size_read(inode));
+
ret = inode_newsize_ok(inode, size);
if (ret)
return ret;
@@ -486,6 +488,55 @@ static int exfat_ioctl_shutdown(struct super_block *sb, unsigned long arg)
return exfat_force_shutdown(sb, flags);
}
+static int exfat_ioctl_get_volume_label(struct super_block *sb, unsigned long arg)
+{
+ int ret;
+ char label[FSLABEL_MAX] = {0};
+ struct exfat_uni_name uniname;
+
+ ret = exfat_read_volume_label(sb, &uniname);
+ if (ret < 0)
+ return ret;
+
+ ret = exfat_utf16_to_nls(sb, &uniname, label, uniname.name_len);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user((char __user *)arg, label, ret + 1))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int exfat_ioctl_set_volume_label(struct super_block *sb,
+ unsigned long arg)
+{
+ int ret = 0, lossy, label_len;
+ char label[FSLABEL_MAX] = {0};
+ struct exfat_uni_name uniname;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(label, (char __user *)arg, FSLABEL_MAX))
+ return -EFAULT;
+
+ memset(&uniname, 0, sizeof(uniname));
+ label_len = strnlen(label, FSLABEL_MAX - 1);
+ if (label[0]) {
+ ret = exfat_nls_to_utf16(sb, label, label_len,
+ &uniname, &lossy);
+ if (ret < 0)
+ return ret;
+ else if (lossy & NLS_NAME_LOSSY)
+ return -EINVAL;
+ }
+
+ uniname.name_len = ret;
+
+ return exfat_write_volume_label(sb, &uniname);
+}
+
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -500,6 +551,10 @@ long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return exfat_ioctl_shutdown(inode->i_sb, arg);
case FITRIM:
return exfat_ioctl_fitrim(inode, arg);
+ case FS_IOC_GETFSLABEL:
+ return exfat_ioctl_get_volume_label(inode->i_sb, arg);
+ case FS_IOC_SETFSLABEL:
+ return exfat_ioctl_set_volume_label(inode->i_sb, arg);
default:
return -ENOTTY;
}
@@ -532,11 +587,10 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
return blkdev_issue_flush(inode->i_sb->s_bdev);
}
-static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
+static int exfat_extend_valid_size(struct inode *inode, loff_t new_valid_size)
{
int err;
loff_t pos;
- struct inode *inode = file_inode(file);
struct exfat_inode_info *ei = EXFAT_I(inode);
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *ops = mapping->a_ops;
@@ -545,16 +599,20 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
while (pos < new_valid_size) {
u32 len;
struct folio *folio;
+ unsigned long off;
len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
if (pos + len > new_valid_size)
len = new_valid_size - pos;
- err = ops->write_begin(file, mapping, pos, len, &folio, NULL);
+ err = ops->write_begin(NULL, mapping, pos, len, &folio, NULL);
if (err)
goto out;
- err = ops->write_end(file, mapping, pos, len, len, folio, NULL);
+ off = offset_in_folio(folio, pos);
+ folio_zero_new_buffers(folio, off, off + len);
+
+ err = ops->write_end(NULL, mapping, pos, len, len, folio, NULL);
if (err < 0)
goto out;
pos += len;
@@ -563,6 +621,8 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
cond_resched();
}
+ return 0;
+
out:
return err;
}
@@ -576,12 +636,18 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
loff_t pos = iocb->ki_pos;
loff_t valid_size;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
inode_lock(inode);
+ if (pos > i_size_read(inode))
+ truncate_pagecache(inode, i_size_read(inode));
+
valid_size = ei->valid_size;
ret = generic_write_checks(iocb, iter);
- if (ret < 0)
+ if (ret <= 0)
goto unlock;
if (iocb->ki_flags & IOCB_DIRECT) {
@@ -595,7 +661,7 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
}
if (pos > valid_size) {
- ret = exfat_extend_valid_size(file, pos);
+ ret = exfat_extend_valid_size(inode, pos);
if (ret < 0 && ret != -ENOSPC) {
exfat_err(inode->i_sb,
"write: fail to zero from %llu to %llu(%zd)",
@@ -614,9 +680,8 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (pos > valid_size)
pos = valid_size;
- if (iocb_is_dsync(iocb) && iocb->ki_pos > pos) {
- ssize_t err = vfs_fsync_range(file, pos, iocb->ki_pos - 1,
- iocb->ki_flags & IOCB_SYNC);
+ if (iocb->ki_pos > pos) {
+ ssize_t err = generic_write_sync(iocb, iocb->ki_pos - pos);
if (err < 0)
return err;
}
@@ -629,6 +694,16 @@ unlock:
return ret;
}
+static ssize_t exfat_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
+ return generic_file_read_iter(iocb, iter);
+}
+
static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf)
{
int err;
@@ -646,7 +721,7 @@ static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf)
start + vma->vm_end - vma->vm_start);
if (ei->valid_size < end) {
- err = exfat_extend_valid_size(file, end);
+ err = exfat_extend_valid_size(inode, end);
if (err < 0) {
inode_unlock(inode);
return vmf_fs_error(err);
@@ -664,24 +739,38 @@ static const struct vm_operations_struct exfat_file_vm_ops = {
.page_mkwrite = exfat_page_mkwrite,
};
-static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int exfat_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
+
+ if (unlikely(exfat_forced_shutdown(file_inode(desc->file)->i_sb)))
+ return -EIO;
+
file_accessed(file);
- vma->vm_ops = &exfat_file_vm_ops;
+ desc->vm_ops = &exfat_file_vm_ops;
return 0;
}
+static ssize_t exfat_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len, unsigned int flags)
+{
+ if (unlikely(exfat_forced_shutdown(file_inode(in)->i_sb)))
+ return -EIO;
+
+ return filemap_splice_read(in, ppos, pipe, len, flags);
+}
+
const struct file_operations exfat_file_operations = {
.llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = exfat_file_read_iter,
.write_iter = exfat_file_write_iter,
.unlocked_ioctl = exfat_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = exfat_compat_ioctl,
#endif
- .mmap = exfat_file_mmap,
+ .mmap_prepare = exfat_file_mmap_prepare,
.fsync = exfat_file_fsync,
- .splice_read = filemap_splice_read,
+ .splice_read = exfat_splice_read,
.splice_write = iter_file_splice_write,
};
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 96952d4acb50..f9501c3a3666 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -25,7 +25,7 @@ int __exfat_write_inode(struct inode *inode, int sync)
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- bool is_dir = (ei->type == TYPE_DIR) ? true : false;
+ bool is_dir = (ei->type == TYPE_DIR);
struct timespec64 ts;
if (inode->i_ino == EXFAT_ROOT_INO)
@@ -274,9 +274,11 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
sector_t last_block;
sector_t phys = 0;
sector_t valid_blks;
+ loff_t i_size;
mutex_lock(&sbi->s_lock);
- last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
+ i_size = i_size_read(inode);
+ last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size, sb);
if (iblock >= last_block && !create)
goto done;
@@ -305,77 +307,99 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
if (buffer_delay(bh_result))
clear_buffer_delay(bh_result);
- if (create) {
+ /*
+ * In most cases, we just need to set bh_result to mapped, unmapped
+ * or new status as follows:
+ * 1. i_size == valid_size
+ * 2. write case (create == 1)
+ * 3. direct_read (!bh_result->b_folio)
+ * -> the unwritten part will be zeroed in exfat_direct_IO()
+ *
+ * Otherwise, in the case of buffered read, it is necessary to take
+ * care the last nested block if valid_size is not equal to i_size.
+ */
+ if (i_size == ei->valid_size || create || !bh_result->b_folio)
valid_blks = EXFAT_B_TO_BLK_ROUND_UP(ei->valid_size, sb);
+ else
+ valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
- if (iblock + max_blocks < valid_blks) {
- /* The range has been written, map it */
- goto done;
- } else if (iblock < valid_blks) {
- /*
- * The range has been partially written,
- * map the written part.
- */
- max_blocks = valid_blks - iblock;
- goto done;
- }
+ /* The range has been fully written, map it */
+ if (iblock + max_blocks < valid_blks)
+ goto done;
- /* The area has not been written, map and mark as new. */
- set_buffer_new(bh_result);
+ /* The range has been partially written, map the written part */
+ if (iblock < valid_blks) {
+ max_blocks = valid_blks - iblock;
+ goto done;
+ }
+ /* The area has not been written, map and mark as new for create case */
+ if (create) {
+ set_buffer_new(bh_result);
ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb);
mark_inode_dirty(inode);
- } else {
- valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
+ goto done;
+ }
- if (iblock + max_blocks < valid_blks) {
- /* The range has been written, map it */
+ /*
+ * The area has just one block partially written.
+ * In that case, we should read and fill the unwritten part of
+ * a block with zero.
+ */
+ if (bh_result->b_folio && iblock == valid_blks &&
+ (ei->valid_size & (sb->s_blocksize - 1))) {
+ loff_t size, pos;
+ void *addr;
+
+ max_blocks = 1;
+
+ /*
+ * No buffer_head is allocated.
+ * (1) bmap: It's enough to set blocknr without I/O.
+ * (2) read: The unwritten part should be filled with zero.
+ * If a folio does not have any buffers,
+ * let's returns -EAGAIN to fallback to
+ * block_read_full_folio() for per-bh IO.
+ */
+ if (!folio_buffers(bh_result->b_folio)) {
+ err = -EAGAIN;
goto done;
- } else if (iblock < valid_blks) {
- /*
- * The area has been partially written,
- * map the written part.
- */
- max_blocks = valid_blks - iblock;
+ }
+
+ pos = EXFAT_BLK_TO_B(iblock, sb);
+ size = ei->valid_size - pos;
+ addr = folio_address(bh_result->b_folio) +
+ offset_in_folio(bh_result->b_folio, pos);
+
+ /* Check if bh->b_data points to proper addr in folio */
+ if (bh_result->b_data != addr) {
+ exfat_fs_error_ratelimit(sb,
+ "b_data(%p) != folio_addr(%p)",
+ bh_result->b_data, addr);
+ err = -EINVAL;
goto done;
- } else if (iblock == valid_blks &&
- (ei->valid_size & (sb->s_blocksize - 1))) {
- /*
- * The block has been partially written,
- * zero the unwritten part and map the block.
- */
- loff_t size, off, pos;
-
- max_blocks = 1;
-
- /*
- * For direct read, the unwritten part will be zeroed in
- * exfat_direct_IO()
- */
- if (!bh_result->b_folio)
- goto done;
-
- pos = EXFAT_BLK_TO_B(iblock, sb);
- size = ei->valid_size - pos;
- off = pos & (PAGE_SIZE - 1);
-
- folio_set_bh(bh_result, bh_result->b_folio, off);
- err = bh_read(bh_result, 0);
- if (err < 0)
- goto unlock_ret;
-
- folio_zero_segment(bh_result->b_folio, off + size,
- off + sb->s_blocksize);
- } else {
- /*
- * The range has not been written, clear the mapped flag
- * to only zero the cache and do not read from disk.
- */
- clear_buffer_mapped(bh_result);
}
+
+ /* Read a block */
+ err = bh_read(bh_result, 0);
+ if (err < 0)
+ goto done;
+
+ /* Zero unwritten part of a block */
+ memset(bh_result->b_data + size, 0, bh_result->b_size - size);
+ err = 0;
+ goto done;
}
+
+ /*
+ * The area has not been written, clear mapped for read/bmap cases.
+ * If so, it will be filled with zero without reading from disk.
+ */
+ clear_buffer_mapped(bh_result);
done:
bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb);
+ if (err < 0)
+ clear_buffer_mapped(bh_result);
unlock_ret:
mutex_unlock(&sbi->s_lock);
return err;
@@ -422,9 +446,10 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int exfat_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len,
- struct folio **foliop, void **fsdata)
+static int exfat_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned int len,
+ struct folio **foliop, void **fsdata)
{
int ret;
@@ -439,15 +464,16 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static int exfat_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct folio *folio, void *fsdata)
+static int exfat_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
struct exfat_inode_info *ei = EXFAT_I(inode);
int err;
- err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
+ err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (err < len)
exfat_write_failed(mapping, pos+len);
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 97d2774760fe..dfe957493d49 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -31,10 +31,9 @@ static inline void exfat_d_version_set(struct dentry *dentry,
* If it happened, the negative dentry isn't actually negative anymore. So,
* drop it.
*/
-static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int exfat_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- int ret;
-
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -58,11 +57,7 @@ static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
- spin_lock(&dentry->d_lock);
- ret = inode_eq_iversion(d_inode(dentry->d_parent),
- exfat_d_version(dentry));
- spin_unlock(&dentry->d_lock);
- return ret;
+ return inode_eq_iversion(dir, exfat_d_version(dentry));
}
/* returns the length of a struct qstr, ignoring trailing dots if necessary */
@@ -237,7 +232,7 @@ static int exfat_search_empty_slot(struct super_block *sb,
dentry = 0;
}
- while (dentry + num_entries < total_entries &&
+ while (dentry + num_entries <= total_entries &&
clu.dir != EXFAT_EOF_CLUSTER) {
i = dentry & (dentries_per_clu - 1);
@@ -305,7 +300,7 @@ static int exfat_check_max_dentries(struct inode *inode)
* the directory entry index in p_dir is returned on succeeds
* -error code is returned on failure
*/
-static int exfat_find_empty_entry(struct inode *inode,
+int exfat_find_empty_entry(struct inode *inode,
struct exfat_chain *p_dir, int num_entries,
struct exfat_entry_set_cache *es)
{
@@ -330,8 +325,8 @@ static int exfat_find_empty_entry(struct inode *inode,
while ((dentry = exfat_search_empty_slot(sb, &hint_femp, p_dir,
num_entries, es)) < 0) {
- if (dentry == -EIO)
- break;
+ if (dentry != -ENOSPC)
+ return dentry;
if (exfat_check_max_dentries(inode))
return -ENOSPC;
@@ -447,7 +442,7 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
return namelen; /* return error value */
if ((lossy && !lookup) || !namelen)
- return (lossy & NLS_NAME_OVERLEN) ? -ENAMETOOLONG : -EINVAL;
+ return -EINVAL;
return 0;
}
@@ -592,7 +587,7 @@ unlock:
}
/* lookup a file */
-static int exfat_find(struct inode *dir, struct qstr *qname,
+static int exfat_find(struct inode *dir, const struct qstr *qname,
struct exfat_dir_entry *info)
{
int ret, dentry, count;
@@ -647,7 +642,6 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
info->type = exfat_get_entry_type(ep);
info->attr = le16_to_cpu(ep->dentry.file.attr);
- info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size);
info->size = le64_to_cpu(ep2->dentry.stream.size);
@@ -688,6 +682,16 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
0);
exfat_put_dentry_set(&es, false);
+ if (info->valid_size < 0) {
+ exfat_fs_error(sb, "data valid size is invalid(%lld)", info->valid_size);
+ return -EIO;
+ }
+
+ if (unlikely(EXFAT_B_TO_CLU_ROUND_UP(info->size, sbi) > sbi->used_clusters)) {
+ exfat_fs_error(sb, "data size is invalid(%lld)", info->size);
+ return -EIO;
+ }
+
if (ei->start_clu == EXFAT_FREE_CLUSTER) {
exfat_fs_error(sb,
"non-zero size file starts with zero cluster (size : %llu, p_dir : %u, entry : 0x%08x)",
@@ -840,8 +844,8 @@ unlock:
return err;
}
-static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -851,7 +855,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
loff_t size = i_size_read(dir);
if (unlikely(exfat_forced_shutdown(sb)))
- return -EIO;
+ return ERR_PTR(-EIO);
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
@@ -882,7 +886,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
unlock:
mutex_unlock(&EXFAT_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
}
static int exfat_check_dir_empty(struct super_block *sb,
@@ -890,6 +894,7 @@ static int exfat_check_dir_empty(struct super_block *sb,
{
int i, dentries_per_clu;
unsigned int type;
+ unsigned int clu_count = 0;
struct exfat_chain clu;
struct exfat_dentry *ep;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -926,6 +931,10 @@ static int exfat_check_dir_empty(struct super_block *sb,
} else {
if (exfat_get_next_cluster(sb, &(clu.dir)))
return -EIO;
+
+ /* break if the cluster chain includes a loop */
+ if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi)))
+ break;
}
}
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index d47896a89596..57db08a5271c 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -616,9 +616,6 @@ static int exfat_nls_to_ucs2(struct super_block *sb,
unilen++;
}
- if (p_cstring[i] != '\0')
- lossy |= NLS_NAME_OVERLEN;
-
*uniname = '\0';
p_uniname->name_len = unilen;
p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0,
@@ -789,7 +786,7 @@ int exfat_create_upcase_table(struct super_block *sb)
return ret;
}
- if (exfat_get_next_cluster(sb, &(clu.dir)))
+ if (exfat_get_next_cluster(sb, &clu.dir))
return -EIO;
}
@@ -801,4 +798,5 @@ load_default:
void exfat_free_upcase_table(struct exfat_sb_info *sbi)
{
kvfree(sbi->vol_utbl);
+ sbi->vol_utbl = NULL;
}
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index bd57844414aa..10e872a99663 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -31,34 +31,25 @@ static void exfat_free_iocharset(struct exfat_sb_info *sbi)
kfree(sbi->options.iocharset);
}
-static void exfat_put_super(struct super_block *sb)
+static void exfat_set_iocharset(struct exfat_mount_options *opts,
+ char *iocharset)
{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- mutex_lock(&sbi->s_lock);
- exfat_free_bitmap(sbi);
- brelse(sbi->boot_bh);
- mutex_unlock(&sbi->s_lock);
+ opts->iocharset = iocharset;
+ if (!strcmp(opts->iocharset, "utf8"))
+ opts->utf8 = 1;
+ else
+ opts->utf8 = 0;
}
-static int exfat_sync_fs(struct super_block *sb, int wait)
+static void exfat_put_super(struct super_block *sb)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- int err = 0;
-
- if (unlikely(exfat_forced_shutdown(sb)))
- return 0;
-
- if (!wait)
- return 0;
- /* If there are some dirty buffers in the bdev inode */
mutex_lock(&sbi->s_lock);
- sync_blockdev(sb->s_bdev);
- if (exfat_clear_volume_dirty(sb))
- err = -EIO;
+ exfat_clear_volume_dirty(sb);
+ exfat_free_bitmap(sbi);
+ brelse(sbi->boot_bh);
mutex_unlock(&sbi->s_lock);
- return err;
}
static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -67,15 +58,6 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
unsigned long long id = huge_encode_dev(sb->s_bdev->bd_dev);
- if (sbi->used_clusters == EXFAT_CLUSTERS_UNTRACKED) {
- mutex_lock(&sbi->s_lock);
- if (exfat_count_used_clusters(sb, &sbi->used_clusters)) {
- mutex_unlock(&sbi->s_lock);
- return -EIO;
- }
- mutex_unlock(&sbi->s_lock);
- }
-
buf->f_type = sb->s_magic;
buf->f_bsize = sbi->cluster_size;
buf->f_blocks = sbi->num_clusters - 2; /* clu 0 & 1 */
@@ -228,7 +210,6 @@ static const struct super_operations exfat_sops = {
.write_inode = exfat_write_inode,
.evict_inode = exfat_evict_inode,
.put_super = exfat_put_super,
- .sync_fs = exfat_sync_fs,
.statfs = exfat_statfs,
.show_options = exfat_show_options,
.shutdown = exfat_shutdown,
@@ -272,11 +253,11 @@ static const struct fs_parameter_spec exfat_parameters[] = {
fsparam_u32oct("allow_utime", Opt_allow_utime),
fsparam_string("iocharset", Opt_charset),
fsparam_enum("errors", Opt_errors, exfat_param_enums),
- fsparam_flag("discard", Opt_discard),
+ fsparam_flag_no("discard", Opt_discard),
fsparam_flag("keep_last_dots", Opt_keep_last_dots),
fsparam_flag("sys_tz", Opt_sys_tz),
fsparam_s32("time_offset", Opt_time_offset),
- fsparam_flag("zero_size_dir", Opt_zero_size_dir),
+ fsparam_flag_no("zero_size_dir", Opt_zero_size_dir),
__fsparam(NULL, "utf8", Opt_utf8, fs_param_deprecated,
NULL),
__fsparam(NULL, "debug", Opt_debug, fs_param_deprecated,
@@ -321,14 +302,14 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_charset:
exfat_free_iocharset(sbi);
- opts->iocharset = param->string;
+ exfat_set_iocharset(opts, param->string);
param->string = NULL;
break;
case Opt_errors:
opts->errors = result.uint_32;
break;
case Opt_discard:
- opts->discard = 1;
+ opts->discard = !result.negated;
break;
case Opt_keep_last_dots:
opts->keep_last_dots = 1;
@@ -346,7 +327,7 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
opts->time_offset = result.int_32;
break;
case Opt_zero_size_dir:
- opts->zero_size_dir = true;
+ opts->zero_size_dir = !result.negated;
break;
case Opt_utf8:
case Opt_debug:
@@ -370,13 +351,12 @@ static void exfat_hash_init(struct super_block *sb)
INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
}
-static int exfat_read_root(struct inode *inode)
+static int exfat_read_root(struct inode *inode, struct exfat_chain *root_clu)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- struct exfat_chain cdir;
- int num_subdirs, num_clu = 0;
+ int num_subdirs;
exfat_chain_set(&ei->dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
ei->entry = -1;
@@ -389,12 +369,9 @@ static int exfat_read_root(struct inode *inode)
ei->hint_stat.clu = sbi->root_dir;
ei->hint_femp.eidx = EXFAT_HINT_NONE;
- exfat_chain_set(&cdir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
- if (exfat_count_num_clusters(sb, &cdir, &num_clu))
- return -EIO;
- i_size_write(inode, num_clu << sbi->cluster_size_bits);
+ i_size_write(inode, EXFAT_CLU_TO_B(root_clu->size, sbi));
- num_subdirs = exfat_count_dir_entries(sb, &cdir);
+ num_subdirs = exfat_count_dir_entries(sb, root_clu);
if (num_subdirs < 0)
return -EIO;
set_nlink(inode, num_subdirs + EXFAT_MIN_SUBDIR);
@@ -456,7 +433,10 @@ static int exfat_read_boot_sector(struct super_block *sb)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
/* set block size to read super block */
- sb_min_blocksize(sb, 512);
+ if (!sb_min_blocksize(sb, 512)) {
+ exfat_err(sb, "unable to set blocksize");
+ return -EINVAL;
+ }
/* read boot sector */
sbi->boot_bh = sb_bread(sb, 0);
@@ -531,7 +511,6 @@ static int exfat_read_boot_sector(struct super_block *sb)
sbi->vol_flags = le16_to_cpu(p_boot->vol_flags);
sbi->vol_flags_persistent = sbi->vol_flags & (VOLUME_DIRTY | MEDIA_FAILURE);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
- sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
/* check consistencies */
if ((u64)sbi->num_FAT_sectors << p_boot->sect_size_bits <
@@ -608,7 +587,8 @@ static int exfat_verify_boot_region(struct super_block *sb)
}
/* mount the file system volume */
-static int __exfat_fill_super(struct super_block *sb)
+static int __exfat_fill_super(struct super_block *sb,
+ struct exfat_chain *root_clu)
{
int ret;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -625,6 +605,18 @@ static int __exfat_fill_super(struct super_block *sb)
goto free_bh;
}
+ /*
+ * Call exfat_count_num_cluster() before searching for up-case and
+ * bitmap directory entries to avoid infinite loop if they are missing
+ * and the cluster chain includes a loop.
+ */
+ exfat_chain_set(root_clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+ ret = exfat_count_num_clusters(sb, root_clu, &root_clu->size);
+ if (ret) {
+ exfat_err(sb, "failed to count the number of clusters in root");
+ goto free_bh;
+ }
+
ret = exfat_create_upcase_table(sb);
if (ret) {
exfat_err(sb, "failed to load upcase table");
@@ -637,6 +629,17 @@ static int __exfat_fill_super(struct super_block *sb)
goto free_bh;
}
+ if (!exfat_test_bitmap(sb, sbi->root_dir)) {
+ exfat_warn(sb, "failed to test first cluster bit of root dir(%u)",
+ sbi->root_dir);
+ /*
+ * The first cluster bit of the root directory should never
+ * be unset except when storage is corrupted. This bit is
+ * set to allow operations after mount.
+ */
+ exfat_set_bitmap(sb, sbi->root_dir, false);
+ }
+
ret = exfat_count_used_clusters(sb, &sbi->used_clusters);
if (ret) {
exfat_err(sb, "failed to scan clusters");
@@ -657,6 +660,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
struct exfat_sb_info *sbi = sb->s_fs_info;
struct exfat_mount_options *opts = &sbi->options;
struct inode *root_inode;
+ struct exfat_chain root_clu;
int err;
if (opts->allow_utime == (unsigned short)-1)
@@ -675,7 +679,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_time_min = EXFAT_MIN_TIMESTAMP_SECS;
sb->s_time_max = EXFAT_MAX_TIMESTAMP_SECS;
- err = __exfat_fill_super(sb);
+ err = __exfat_fill_super(sb, &root_clu);
if (err) {
exfat_err(sb, "failed to recognize exfat type");
goto check_nls_io;
@@ -684,8 +688,8 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
/* set up enough so that it can read an inode */
exfat_hash_init(sb);
- if (!strcmp(sbi->options.iocharset, "utf8"))
- opts->utf8 = 1;
+ if (sbi->options.utf8)
+ set_default_d_op(sb, &exfat_utf8_dentry_ops);
else {
sbi->nls_io = load_nls(sbi->options.iocharset);
if (!sbi->nls_io) {
@@ -694,13 +698,9 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
err = -EINVAL;
goto free_table;
}
+ set_default_d_op(sb, &exfat_dentry_ops);
}
- if (sbi->options.utf8)
- sb->s_d_op = &exfat_utf8_dentry_ops;
- else
- sb->s_d_op = &exfat_dentry_ops;
-
root_inode = new_inode(sb);
if (!root_inode) {
exfat_err(sb, "failed to allocate root inode");
@@ -710,7 +710,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
root_inode->i_ino = EXFAT_ROOT_INO;
inode_set_iversion(root_inode, 1);
- err = exfat_read_root(root_inode);
+ err = exfat_read_root(root_inode, &root_clu);
if (err) {
exfat_err(sb, "failed to initialize root inode");
goto put_inode;
@@ -761,10 +761,46 @@ static void exfat_free(struct fs_context *fc)
static int exfat_reconfigure(struct fs_context *fc)
{
+ struct super_block *sb = fc->root->d_sb;
+ struct exfat_sb_info *remount_sbi = fc->s_fs_info;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *new_opts = &remount_sbi->options;
+ struct exfat_mount_options *cur_opts = &sbi->options;
+
fc->sb_flags |= SB_NODIRATIME;
- /* volume flag will be updated in exfat_sync_fs */
- sync_filesystem(fc->root->d_sb);
+ sync_filesystem(sb);
+ mutex_lock(&sbi->s_lock);
+ exfat_clear_volume_dirty(sb);
+ mutex_unlock(&sbi->s_lock);
+
+ if (new_opts->allow_utime == (unsigned short)-1)
+ new_opts->allow_utime = ~new_opts->fs_dmask & 0022;
+
+ /*
+ * Since the old settings of these mount options are cached in
+ * inodes or dentries, they cannot be modified dynamically.
+ */
+ if (strcmp(new_opts->iocharset, cur_opts->iocharset) ||
+ new_opts->keep_last_dots != cur_opts->keep_last_dots ||
+ new_opts->sys_tz != cur_opts->sys_tz ||
+ new_opts->time_offset != cur_opts->time_offset ||
+ !uid_eq(new_opts->fs_uid, cur_opts->fs_uid) ||
+ !gid_eq(new_opts->fs_gid, cur_opts->fs_gid) ||
+ new_opts->fs_fmask != cur_opts->fs_fmask ||
+ new_opts->fs_dmask != cur_opts->fs_dmask ||
+ new_opts->allow_utime != cur_opts->allow_utime)
+ return -EINVAL;
+
+ if (new_opts->discard != cur_opts->discard &&
+ new_opts->discard &&
+ !bdev_max_discard_sectors(sb->s_bdev)) {
+ exfat_warn(sb, "remounting with \"discard\" option, but the device does not support discard");
+ return -EINVAL;
+ }
+
+ swap(*cur_opts, *new_opts);
+
return 0;
}
@@ -788,13 +824,24 @@ static int exfat_init_fs_context(struct fs_context *fc)
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- sbi->options.fs_uid = current_uid();
- sbi->options.fs_gid = current_gid();
- sbi->options.fs_fmask = current->fs->umask;
- sbi->options.fs_dmask = current->fs->umask;
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE && fc->root) {
+ struct super_block *sb = fc->root->d_sb;
+ struct exfat_mount_options *cur_opts = &EXFAT_SB(sb)->options;
+
+ sbi->options.fs_uid = cur_opts->fs_uid;
+ sbi->options.fs_gid = cur_opts->fs_gid;
+ sbi->options.fs_fmask = cur_opts->fs_fmask;
+ sbi->options.fs_dmask = cur_opts->fs_dmask;
+ } else {
+ sbi->options.fs_uid = current_uid();
+ sbi->options.fs_gid = current_gid();
+ sbi->options.fs_fmask = current->fs->umask;
+ sbi->options.fs_dmask = current->fs->umask;
+ }
+
sbi->options.allow_utime = -1;
- sbi->options.iocharset = exfat_default_iocharset;
sbi->options.errors = EXFAT_ERRORS_RO;
+ exfat_set_iocharset(&sbi->options, exfat_default_iocharset);
fc->s_fs_info = sbi;
fc->ops = &exfat_context_ops;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 0c899cfba578..d3e55de4a2a2 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -126,10 +126,8 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
int err;
parent = ERR_PTR(-EACCES);
- inode_lock(dentry->d_inode);
if (mnt->mnt_sb->s_export_op->get_parent)
parent = mnt->mnt_sb->s_export_op->get_parent(dentry);
- inode_unlock(dentry->d_inode);
if (IS_ERR(parent)) {
dprintk("get_parent of %lu failed, err %ld\n",
@@ -145,7 +143,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
if (err)
goto out_err;
dprintk("%s: found name: %s\n", __func__, nbuf);
- tmp = lookup_one_unlocked(mnt_idmap(mnt), nbuf, parent, strlen(nbuf));
+ tmp = lookup_one_unlocked(mnt_idmap(mnt), &QSTR(nbuf), parent);
if (IS_ERR(tmp)) {
dprintk("lookup failed: %ld\n", PTR_ERR(tmp));
err = PTR_ERR(tmp);
@@ -286,6 +284,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
};
struct getdents_callback buffer = {
.ctx.actor = filldir_one,
+ .ctx.count = INT_MAX,
.name = name,
};
@@ -550,16 +549,13 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
goto err_result;
}
- inode_lock(target_dir->d_inode);
- nresult = lookup_one(mnt_idmap(mnt), nbuf,
- target_dir, strlen(nbuf));
+ nresult = lookup_one_unlocked(mnt_idmap(mnt), &QSTR(nbuf), target_dir);
if (!IS_ERR(nresult)) {
if (unlikely(nresult->d_inode != result->d_inode)) {
dput(nresult);
nresult = ERR_PTR(-ESTALE);
}
}
- inode_unlock(target_dir->d_inode);
/*
* At this point we are done with the parent, but it's pinned
* by the child dentry anyway.
@@ -610,4 +606,5 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
}
EXPORT_SYMBOL_GPL(exportfs_decode_fh);
+MODULE_DESCRIPTION("Code mapping from inodes to file handles");
MODULE_LICENSE("GPL");
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 402fecf90a44..b07b3b369710 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -87,7 +87,7 @@ static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
inode_inc_iversion(dir);
- block_write_end(NULL, mapping, pos, len, len, folio, NULL);
+ block_write_end(pos, len, len, folio);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index f38bdd46e4f7..cf97b76e9fd3 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -368,6 +368,7 @@ struct ext2_inode {
#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
+#define EXT2_MOUNT_ERRORS_MASK 0x000070
#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
@@ -749,9 +750,9 @@ extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
/* ioctl.c */
-extern int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+extern int ext2_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
extern int ext2_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
extern long ext2_compat_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 10b061ac5bc0..76bddce462fc 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -122,17 +122,19 @@ static const struct vm_operations_struct ext2_dax_vm_ops = {
.pfn_mkwrite = ext2_dax_fault,
};
-static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int ext2_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
+
if (!IS_DAX(file_inode(file)))
- return generic_file_mmap(file, vma);
+ return generic_file_mmap_prepare(desc);
file_accessed(file);
- vma->vm_ops = &ext2_dax_vm_ops;
+ desc->vm_ops = &ext2_dax_vm_ops;
return 0;
}
#else
-#define ext2_file_mmap generic_file_mmap
+#define ext2_file_mmap_prepare generic_file_mmap_prepare
#endif
/*
@@ -316,7 +318,7 @@ const struct file_operations ext2_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
- .mmap = ext2_file_mmap,
+ .mmap_prepare = ext2_file_mmap_prepare,
.open = ext2_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 30f8201c155f..dbfe9098a124 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -895,9 +895,19 @@ int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
int ret;
+ loff_t i_size;
inode_lock(inode);
- len = min_t(u64, len, i_size_read(inode));
+ i_size = i_size_read(inode);
+ /*
+ * iomap_fiemap() returns EINVAL for 0 length. Make sure we don't trim
+ * length to 0 but still trim the range as much as possible since
+ * ext2_get_blocks() iterates unmapped space block by block which is
+ * slow.
+ */
+ if (i_size == 0)
+ i_size = 1;
+ len = min_t(u64, len, i_size);
ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops);
inode_unlock(inode);
@@ -915,7 +925,7 @@ static void ext2_readahead(struct readahead_control *rac)
}
static int
-ext2_write_begin(struct file *file, struct address_space *mapping,
+ext2_write_begin(const struct kiocb *iocb, struct address_space *mapping,
loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
int ret;
@@ -926,13 +936,14 @@ ext2_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static int ext2_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+static int ext2_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
+ ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (ret < len)
ext2_write_failed(mapping, pos + len);
return ret;
@@ -1387,7 +1398,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ei = EXT2_I(inode);
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 44e04484e570..c3fea55b8efa 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -18,7 +18,7 @@
#include <linux/uaccess.h>
#include <linux/fileattr.h>
-int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int ext2_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct ext2_inode_info *ei = EXT2_I(d_inode(dentry));
@@ -28,7 +28,7 @@ int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
}
int ext2_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct ext2_inode_info *ei = EXT2_I(inode);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 8346ab9534c1..bde617a66cec 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -225,15 +225,16 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
return err;
}
-static int ext2_mkdir(struct mnt_idmap * idmap,
- struct inode * dir, struct dentry * dentry, umode_t mode)
+static struct dentry *ext2_mkdir(struct mnt_idmap * idmap,
+ struct inode * dir, struct dentry * dentry,
+ umode_t mode)
{
struct inode * inode;
int err;
err = dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
inode_inc_link_count(dir);
@@ -258,7 +259,7 @@ static int ext2_mkdir(struct mnt_idmap * idmap,
d_instantiate_new(dentry, inode);
out:
- return err;
+ return ERR_PTR(err);
out_fail:
inode_dec_link_count(inode);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 37f7ce56adce..121e634c792a 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -23,7 +23,8 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/random.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
@@ -40,7 +41,6 @@
#include "acl.h"
static void ext2_write_super(struct super_block *sb);
-static int ext2_remount (struct super_block * sb, int * flags, char * data);
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
static int ext2_sync_fs(struct super_block *sb, int wait);
static int ext2_freeze(struct super_block *sb);
@@ -81,6 +81,33 @@ void ext2_error(struct super_block *sb, const char *function,
}
}
+static void ext2_msg_fc(struct fs_context *fc, const char *prefix,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ const char *s_id;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ s_id = fc->root->d_sb->s_id;
+ } else {
+ /* get last path component of source */
+ s_id = strrchr(fc->source, '/');
+ if (s_id)
+ s_id++;
+ else
+ s_id = fc->source;
+ }
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk("%sEXT2-fs (%s): %pV\n", prefix, s_id, &vaf);
+
+ va_end(args);
+}
+
void ext2_msg(struct super_block *sb, const char *prefix,
const char *fmt, ...)
{
@@ -346,7 +373,6 @@ static const struct super_operations ext2_sops = {
.freeze_fs = ext2_freeze,
.unfreeze_fs = ext2_unfreeze,
.statfs = ext2_statfs,
- .remount_fs = ext2_remount,
.show_options = ext2_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext2_quota_read,
@@ -402,230 +428,218 @@ static const struct export_operations ext2_export_ops = {
.get_parent = ext2_get_parent,
};
-static unsigned long get_sb_block(void **data)
-{
- unsigned long sb_block;
- char *options = (char *) *data;
-
- if (!options || strncmp(options, "sb=", 3) != 0)
- return 1; /* Default location */
- options += 3;
- sb_block = simple_strtoul(options, &options, 0);
- if (*options && *options != ',') {
- printk("EXT2-fs: Invalid sb specification: %s\n",
- (char *) *data);
- return 1;
- }
- if (*options == ',')
- options++;
- *data = (void *) options;
- return sb_block;
-}
-
enum {
- Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
- Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
- Opt_err_ro, Opt_nouid32, Opt_debug,
- Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
- Opt_acl, Opt_noacl, Opt_xip, Opt_dax, Opt_ignore, Opt_err, Opt_quota,
- Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
+ Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid,
+ Opt_sb, Opt_errors, Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
+ Opt_nobh, Opt_user_xattr, Opt_acl, Opt_xip, Opt_dax, Opt_ignore,
+ Opt_quota, Opt_usrquota, Opt_grpquota, Opt_reservation,
};
-static const match_table_t tokens = {
- {Opt_bsd_df, "bsddf"},
- {Opt_minix_df, "minixdf"},
- {Opt_grpid, "grpid"},
- {Opt_grpid, "bsdgroups"},
- {Opt_nogrpid, "nogrpid"},
- {Opt_nogrpid, "sysvgroups"},
- {Opt_resgid, "resgid=%u"},
- {Opt_resuid, "resuid=%u"},
- {Opt_sb, "sb=%u"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_nouid32, "nouid32"},
- {Opt_debug, "debug"},
- {Opt_oldalloc, "oldalloc"},
- {Opt_orlov, "orlov"},
- {Opt_nobh, "nobh"},
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_xip, "xip"},
- {Opt_dax, "dax"},
- {Opt_grpquota, "grpquota"},
- {Opt_ignore, "noquota"},
- {Opt_quota, "quota"},
- {Opt_usrquota, "usrquota"},
- {Opt_reservation, "reservation"},
- {Opt_noreservation, "noreservation"},
- {Opt_err, NULL}
+static const struct constant_table ext2_param_errors[] = {
+ {"continue", EXT2_MOUNT_ERRORS_CONT},
+ {"panic", EXT2_MOUNT_ERRORS_PANIC},
+ {"remount-ro", EXT2_MOUNT_ERRORS_RO},
+ {}
+};
+
+static const struct fs_parameter_spec ext2_param_spec[] = {
+ fsparam_flag ("bsddf", Opt_bsd_df),
+ fsparam_flag ("minixdf", Opt_minix_df),
+ fsparam_flag ("grpid", Opt_grpid),
+ fsparam_flag ("bsdgroups", Opt_grpid),
+ fsparam_flag ("nogrpid", Opt_nogrpid),
+ fsparam_flag ("sysvgroups", Opt_nogrpid),
+ fsparam_gid ("resgid", Opt_resgid),
+ fsparam_uid ("resuid", Opt_resuid),
+ fsparam_u32 ("sb", Opt_sb),
+ fsparam_enum ("errors", Opt_errors, ext2_param_errors),
+ fsparam_flag ("nouid32", Opt_nouid32),
+ fsparam_flag ("debug", Opt_debug),
+ fsparam_flag ("oldalloc", Opt_oldalloc),
+ fsparam_flag ("orlov", Opt_orlov),
+ fsparam_flag ("nobh", Opt_nobh),
+ fsparam_flag_no ("user_xattr", Opt_user_xattr),
+ fsparam_flag_no ("acl", Opt_acl),
+ fsparam_flag ("xip", Opt_xip),
+ fsparam_flag ("dax", Opt_dax),
+ fsparam_flag ("grpquota", Opt_grpquota),
+ fsparam_flag ("noquota", Opt_ignore),
+ fsparam_flag ("quota", Opt_quota),
+ fsparam_flag ("usrquota", Opt_usrquota),
+ fsparam_flag_no ("reservation", Opt_reservation),
+ {}
+};
+
+#define EXT2_SPEC_s_resuid (1 << 0)
+#define EXT2_SPEC_s_resgid (1 << 1)
+
+struct ext2_fs_context {
+ unsigned long vals_s_flags; /* Bits to set in s_flags */
+ unsigned long mask_s_flags; /* Bits changed in s_flags */
+ unsigned int vals_s_mount_opt;
+ unsigned int mask_s_mount_opt;
+ kuid_t s_resuid;
+ kgid_t s_resgid;
+ unsigned long s_sb_block;
+ unsigned int spec;
+
};
-static int parse_options(char *options, struct super_block *sb,
- struct ext2_mount_options *opts)
+static inline void ctx_set_mount_opt(struct ext2_fs_context *ctx,
+ unsigned long flag)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- kuid_t uid;
- kgid_t gid;
-
- if (!options)
- return 1;
-
- while ((p = strsep (&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_bsd_df:
- clear_opt (opts->s_mount_opt, MINIX_DF);
- break;
- case Opt_minix_df:
- set_opt (opts->s_mount_opt, MINIX_DF);
- break;
- case Opt_grpid:
- set_opt (opts->s_mount_opt, GRPID);
- break;
- case Opt_nogrpid:
- clear_opt (opts->s_mount_opt, GRPID);
- break;
- case Opt_resuid:
- if (match_int(&args[0], &option))
- return 0;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid)) {
- ext2_msg(sb, KERN_ERR, "Invalid uid value %d", option);
- return 0;
-
- }
- opts->s_resuid = uid;
- break;
- case Opt_resgid:
- if (match_int(&args[0], &option))
- return 0;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid)) {
- ext2_msg(sb, KERN_ERR, "Invalid gid value %d", option);
- return 0;
- }
- opts->s_resgid = gid;
- break;
- case Opt_sb:
- /* handled by get_sb_block() instead of here */
- /* *sb_block = match_int(&args[0]); */
- break;
- case Opt_err_panic:
- clear_opt (opts->s_mount_opt, ERRORS_CONT);
- clear_opt (opts->s_mount_opt, ERRORS_RO);
- set_opt (opts->s_mount_opt, ERRORS_PANIC);
- break;
- case Opt_err_ro:
- clear_opt (opts->s_mount_opt, ERRORS_CONT);
- clear_opt (opts->s_mount_opt, ERRORS_PANIC);
- set_opt (opts->s_mount_opt, ERRORS_RO);
- break;
- case Opt_err_cont:
- clear_opt (opts->s_mount_opt, ERRORS_RO);
- clear_opt (opts->s_mount_opt, ERRORS_PANIC);
- set_opt (opts->s_mount_opt, ERRORS_CONT);
- break;
- case Opt_nouid32:
- set_opt (opts->s_mount_opt, NO_UID32);
- break;
- case Opt_debug:
- set_opt (opts->s_mount_opt, DEBUG);
- break;
- case Opt_oldalloc:
- set_opt (opts->s_mount_opt, OLDALLOC);
- break;
- case Opt_orlov:
- clear_opt (opts->s_mount_opt, OLDALLOC);
- break;
- case Opt_nobh:
- ext2_msg(sb, KERN_INFO,
- "nobh option not supported");
- break;
+ ctx->mask_s_mount_opt |= flag;
+ ctx->vals_s_mount_opt |= flag;
+}
+
+static inline void ctx_clear_mount_opt(struct ext2_fs_context *ctx,
+ unsigned long flag)
+{
+ ctx->mask_s_mount_opt |= flag;
+ ctx->vals_s_mount_opt &= ~flag;
+}
+
+static inline unsigned long
+ctx_test_mount_opt(struct ext2_fs_context *ctx, unsigned long flag)
+{
+ return (ctx->vals_s_mount_opt & flag);
+}
+
+static inline bool
+ctx_parsed_mount_opt(struct ext2_fs_context *ctx, unsigned long flag)
+{
+ return (ctx->mask_s_mount_opt & flag);
+}
+
+static void ext2_free_fc(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static int ext2_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ int opt;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, ext2_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_bsd_df:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_MINIX_DF);
+ break;
+ case Opt_minix_df:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_MINIX_DF);
+ break;
+ case Opt_grpid:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_GRPID);
+ break;
+ case Opt_nogrpid:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_GRPID);
+ break;
+ case Opt_resuid:
+ ctx->s_resuid = result.uid;
+ ctx->spec |= EXT2_SPEC_s_resuid;
+ break;
+ case Opt_resgid:
+ ctx->s_resgid = result.gid;
+ ctx->spec |= EXT2_SPEC_s_resgid;
+ break;
+ case Opt_sb:
+ /* Note that this is silently ignored on remount */
+ ctx->s_sb_block = result.uint_32;
+ break;
+ case Opt_errors:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_ERRORS_MASK);
+ ctx_set_mount_opt(ctx, result.uint_32);
+ break;
+ case Opt_nouid32:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_NO_UID32);
+ break;
+ case Opt_debug:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_DEBUG);
+ break;
+ case Opt_oldalloc:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_OLDALLOC);
+ break;
+ case Opt_orlov:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_OLDALLOC);
+ break;
+ case Opt_nobh:
+ ext2_msg_fc(fc, KERN_INFO, "nobh option not supported\n");
+ break;
#ifdef CONFIG_EXT2_FS_XATTR
- case Opt_user_xattr:
- set_opt (opts->s_mount_opt, XATTR_USER);
- break;
- case Opt_nouser_xattr:
- clear_opt (opts->s_mount_opt, XATTR_USER);
- break;
+ case Opt_user_xattr:
+ if (!result.negated)
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_XATTR_USER);
+ else
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_XATTR_USER);
+ break;
#else
- case Opt_user_xattr:
- case Opt_nouser_xattr:
- ext2_msg(sb, KERN_INFO, "(no)user_xattr options"
- "not supported");
- break;
+ case Opt_user_xattr:
+ ext2_msg_fc(fc, KERN_INFO, "(no)user_xattr options not supported");
+ break;
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
- case Opt_acl:
- set_opt(opts->s_mount_opt, POSIX_ACL);
- break;
- case Opt_noacl:
- clear_opt(opts->s_mount_opt, POSIX_ACL);
- break;
+ case Opt_acl:
+ if (!result.negated)
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL);
+ else
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL);
+ break;
#else
- case Opt_acl:
- case Opt_noacl:
- ext2_msg(sb, KERN_INFO,
- "(no)acl options not supported");
- break;
+ case Opt_acl:
+ ext2_msg_fc(fc, KERN_INFO, "(no)acl options not supported");
+ break;
#endif
- case Opt_xip:
- ext2_msg(sb, KERN_INFO, "use dax instead of xip");
- set_opt(opts->s_mount_opt, XIP);
- fallthrough;
- case Opt_dax:
+ case Opt_xip:
+ ext2_msg_fc(fc, KERN_INFO, "use dax instead of xip");
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_XIP);
+ fallthrough;
+ case Opt_dax:
#ifdef CONFIG_FS_DAX
- ext2_msg(sb, KERN_WARNING,
- "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- set_opt(opts->s_mount_opt, DAX);
+ ext2_msg_fc(fc, KERN_WARNING,
+ "DAX enabled. Warning: DAX support in ext2 driver is deprecated"
+ " and will be removed at the end of 2025. Please use ext4 driver instead.");
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_DAX);
#else
- ext2_msg(sb, KERN_INFO, "dax option not supported");
+ ext2_msg_fc(fc, KERN_INFO, "dax option not supported");
#endif
- break;
+ break;
#if defined(CONFIG_QUOTA)
- case Opt_quota:
- case Opt_usrquota:
- set_opt(opts->s_mount_opt, USRQUOTA);
- break;
-
- case Opt_grpquota:
- set_opt(opts->s_mount_opt, GRPQUOTA);
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_USRQUOTA);
+ break;
+
+ case Opt_grpquota:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_GRPQUOTA);
+ break;
#else
- case Opt_quota:
- case Opt_usrquota:
- case Opt_grpquota:
- ext2_msg(sb, KERN_INFO,
- "quota operations not supported");
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ case Opt_grpquota:
+ ext2_msg_fc(fc, KERN_INFO, "quota operations not supported");
+ break;
#endif
-
- case Opt_reservation:
- set_opt(opts->s_mount_opt, RESERVATION);
- ext2_msg(sb, KERN_INFO, "reservations ON");
- break;
- case Opt_noreservation:
- clear_opt(opts->s_mount_opt, RESERVATION);
- ext2_msg(sb, KERN_INFO, "reservations OFF");
- break;
- case Opt_ignore:
- break;
- default:
- return 0;
+ case Opt_reservation:
+ if (!result.negated) {
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ ext2_msg_fc(fc, KERN_INFO, "reservations ON");
+ } else {
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ ext2_msg_fc(fc, KERN_INFO, "reservations OFF");
}
+ break;
+ case Opt_ignore:
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int ext2_setup_super (struct super_block * sb,
@@ -801,24 +815,83 @@ static unsigned long descriptor_loc(struct super_block *sb,
return ext2_group_first_block_no(sb, bg) + ext2_bg_has_super(sb, bg);
}
-static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+/*
+ * Set all mount options either from defaults on disk, or from parsed
+ * options. Parsed/specified options override on-disk defaults.
+ */
+static void ext2_set_options(struct fs_context *fc, struct ext2_sb_info *sbi)
+{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ struct ext2_super_block *es = sbi->s_es;
+ unsigned long def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
+
+ /* Copy parsed mount options to sbi */
+ sbi->s_mount_opt = ctx->vals_s_mount_opt;
+
+ /* Use in-superblock defaults only if not specified during parsing */
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_DEBUG) &&
+ def_mount_opts & EXT2_DEFM_DEBUG)
+ set_opt(sbi->s_mount_opt, DEBUG);
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_GRPID) &&
+ def_mount_opts & EXT2_DEFM_BSDGROUPS)
+ set_opt(sbi->s_mount_opt, GRPID);
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_NO_UID32) &&
+ def_mount_opts & EXT2_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+
+#ifdef CONFIG_EXT2_FS_XATTR
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_XATTR_USER) &&
+ def_mount_opts & EXT2_DEFM_XATTR_USER)
+ set_opt(sbi->s_mount_opt, XATTR_USER);
+#endif
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL) &&
+ def_mount_opts & EXT2_DEFM_ACL)
+ set_opt(sbi->s_mount_opt, POSIX_ACL);
+#endif
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_ERRORS_MASK)) {
+ if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
+ set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+ else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
+ set_opt(sbi->s_mount_opt, ERRORS_CONT);
+ else
+ set_opt(sbi->s_mount_opt, ERRORS_RO);
+ }
+
+ if (ctx->spec & EXT2_SPEC_s_resuid)
+ sbi->s_resuid = ctx->s_resuid;
+ else
+ sbi->s_resuid = make_kuid(&init_user_ns,
+ le16_to_cpu(es->s_def_resuid));
+
+ if (ctx->spec & EXT2_SPEC_s_resgid)
+ sbi->s_resgid = ctx->s_resgid;
+ else
+ sbi->s_resgid = make_kgid(&init_user_ns,
+ le16_to_cpu(es->s_def_resgid));
+}
+
+static int ext2_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
struct buffer_head * bh;
struct ext2_sb_info * sbi;
struct ext2_super_block * es;
struct inode *root;
unsigned long block;
- unsigned long sb_block = get_sb_block(&data);
+ unsigned long sb_block = ctx->s_sb_block;
unsigned long logic_sb_block;
unsigned long offset = 0;
- unsigned long def_mount_opts;
long ret = -ENOMEM;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
__le32 features;
int err;
- struct ext2_mount_options opts;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
@@ -877,42 +950,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
- opts.s_mount_opt = 0;
- /* Set defaults before we parse the mount options */
- def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
- if (def_mount_opts & EXT2_DEFM_DEBUG)
- set_opt(opts.s_mount_opt, DEBUG);
- if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
- set_opt(opts.s_mount_opt, GRPID);
- if (def_mount_opts & EXT2_DEFM_UID16)
- set_opt(opts.s_mount_opt, NO_UID32);
-#ifdef CONFIG_EXT2_FS_XATTR
- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
- set_opt(opts.s_mount_opt, XATTR_USER);
-#endif
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- if (def_mount_opts & EXT2_DEFM_ACL)
- set_opt(opts.s_mount_opt, POSIX_ACL);
-#endif
-
- if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
- set_opt(opts.s_mount_opt, ERRORS_PANIC);
- else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
- set_opt(opts.s_mount_opt, ERRORS_CONT);
- else
- set_opt(opts.s_mount_opt, ERRORS_RO);
-
- opts.s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
- opts.s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
-
- set_opt(opts.s_mount_opt, RESERVATION);
-
- if (!parse_options((char *) data, sb, &opts))
- goto failed_mount;
-
- sbi->s_mount_opt = opts.s_mount_opt;
- sbi->s_resuid = opts.s_resuid;
- sbi->s_resgid = opts.s_resgid;
+ ext2_set_options(fc, sbi);
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
@@ -1324,23 +1362,21 @@ static void ext2_write_super(struct super_block *sb)
ext2_sync_fs(sb, 1);
}
-static int ext2_remount (struct super_block * sb, int * flags, char * data)
+static int ext2_reconfigure(struct fs_context *fc)
{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ struct super_block *sb = fc->root->d_sb;
struct ext2_sb_info * sbi = EXT2_SB(sb);
struct ext2_super_block * es;
struct ext2_mount_options new_opts;
+ int flags = fc->sb_flags;
int err;
sync_filesystem(sb);
- spin_lock(&sbi->s_lock);
- new_opts.s_mount_opt = sbi->s_mount_opt;
- new_opts.s_resuid = sbi->s_resuid;
- new_opts.s_resgid = sbi->s_resgid;
- spin_unlock(&sbi->s_lock);
-
- if (!parse_options(data, sb, &new_opts))
- return -EINVAL;
+ new_opts.s_mount_opt = ctx->vals_s_mount_opt;
+ new_opts.s_resuid = ctx->s_resuid;
+ new_opts.s_resgid = ctx->s_resgid;
spin_lock(&sbi->s_lock);
es = sbi->s_es;
@@ -1349,9 +1385,9 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
"dax flag with busy inodes while remounting");
new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
}
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ if ((bool)(flags & SB_RDONLY) == sb_rdonly(sb))
goto out_set;
- if (*flags & SB_RDONLY) {
+ if (flags & SB_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS))
goto out_set;
@@ -1470,10 +1506,9 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
return 0;
}
-static struct dentry *ext2_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ext2_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
+ return get_tree_bdev(fc, ext2_fill_super);
}
#ifdef CONFIG_QUOTA
@@ -1556,7 +1591,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, tocopy);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
@@ -1624,12 +1659,49 @@ out:
#endif
+static const struct fs_context_operations ext2_context_ops = {
+ .parse_param = ext2_parse_param,
+ .get_tree = ext2_get_tree,
+ .reconfigure = ext2_reconfigure,
+ .free = ext2_free_fc,
+};
+
+static int ext2_init_fs_context(struct fs_context *fc)
+{
+ struct ext2_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct super_block *sb = fc->root->d_sb;
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+ spin_lock(&sbi->s_lock);
+ ctx->vals_s_mount_opt = sbi->s_mount_opt;
+ ctx->vals_s_flags = sb->s_flags;
+ ctx->s_resuid = sbi->s_resuid;
+ ctx->s_resgid = sbi->s_resgid;
+ spin_unlock(&sbi->s_lock);
+ } else {
+ ctx->s_sb_block = 1;
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ }
+
+ fc->fs_private = ctx;
+ fc->ops = &ext2_context_ops;
+
+ return 0;
+}
+
static struct file_system_type ext2_fs_type = {
.owner = THIS_MODULE,
.name = "ext2",
- .mount = ext2_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = ext2_init_fs_context,
+ .parameters = ext2_param_spec,
};
MODULE_ALIAS_FS("ext2");
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e20d59221fc0..01873c2a34ad 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -1,38 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Ext3 configs are here for backward compatibility with old configs which may
-# have EXT3_FS set but not EXT4_FS set and thus would result in non-bootable
-# kernels after the removal of ext3 driver.
-config EXT3_FS
- tristate "The Extended 3 (ext3) filesystem"
- select EXT4_FS
- help
- This config option is here only for backward compatibility. ext3
- filesystem is now handled by the ext4 driver.
-
-config EXT3_FS_POSIX_ACL
- bool "Ext3 POSIX Access Control Lists"
- depends on EXT3_FS
- select EXT4_FS_POSIX_ACL
- select FS_POSIX_ACL
- help
- This config option is here only for backward compatibility. ext3
- filesystem is now handled by the ext4 driver.
-
-config EXT3_FS_SECURITY
- bool "Ext3 Security Labels"
- depends on EXT3_FS
- select EXT4_FS_SECURITY
- help
- This config option is here only for backward compatibility. ext3
- filesystem is now handled by the ext4 driver.
-
config EXT4_FS
tristate "The Extended 4 (ext4) filesystem"
select BUFFER_HEAD
select JBD2
select CRC16
- select CRYPTO
- select CRYPTO_CRC32C
+ select CRC32
select FS_IOMAP
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
help
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 8042ad873808..8040c731b3e4 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -649,8 +649,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
/* Hm, nope. Are (enough) root reserved clusters available? */
if (uid_eq(sbi->s_resuid, current_fsuid()) ||
(!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
- capable(CAP_SYS_RESOURCE) ||
- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
+ capable(CAP_SYS_RESOURCE)) {
if (free_clusters >= (nclusters + dirty_clusters +
resv_clusters))
@@ -703,7 +703,7 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
* possible we just missed a transaction commit that did so
*/
smp_mb();
- if (sbi->s_mb_free_pending == 0) {
+ if (atomic_read(&sbi->s_mb_free_pending) == 0) {
if (test_opt(sb, DISCARD)) {
atomic_inc(&sbi->s_retry_alloc_pending);
flush_work(&sbi->s_discard_work);
@@ -752,7 +752,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
*count = ar.len;
/*
* Account for the allocated meta blocks. We will never
- * fail EDQUOT for metdata, but we do account for it.
+ * fail EDQUOT for metadata, but we do account for it.
*/
if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
dquot_alloc_block_nofail(inode,
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index 2a135075468d..87760fabdd2e 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -25,12 +25,12 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
int sz;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
sz = EXT4_INODES_PER_GROUP(sb) >> 3;
provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
- calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+ calculated = ext4_chksum(sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) {
hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi);
provided |= (hi << 16);
@@ -48,11 +48,11 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
int sz;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
sz = EXT4_INODES_PER_GROUP(sb) >> 3;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END)
gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16);
@@ -67,11 +67,11 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
- calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+ calculated = ext4_chksum(sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) {
hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi);
provided |= (hi << 16);
@@ -89,10 +89,10 @@ void ext4_block_bitmap_csum_set(struct super_block *sb,
__u32 csum;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END)
gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16);
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 87ee3a17bd29..e8c5525afc67 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -351,10 +351,9 @@ int ext4_check_blockref(const char *function, unsigned int line,
{
__le32 *bref = p;
unsigned int blk;
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- if (ext4_has_feature_journal(inode->i_sb) &&
- (inode->i_ino ==
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+ if (journal && inode == journal->j_inode)
return 0;
while (bref < p+max) {
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 0a056d97e640..cf0a0970c095 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -227,6 +227,8 @@ static bool ext4_has_stable_inodes(struct super_block *sb)
}
const struct fscrypt_operations ext4_cryptops = {
+ .inode_info_offs = (int)offsetof(struct ext4_inode_info, i_crypt_info) -
+ (int)offsetof(struct ext4_inode_info, vfs_inode),
.needs_bounce_pages = 1,
.has_32bit_inodes = 1,
.supports_subblock_data_units = 1,
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 02d47a64e8d1..256fe2c1d4c1 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -86,7 +86,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
dir->i_sb->s_blocksize);
const int next_offset = ((char *) de - buf) + rlen;
bool fake = is_fake_dir_entry(de);
- bool has_csum = ext4_has_metadata_csum(dir->i_sb);
+ bool has_csum = ext4_has_feature_metadata_csum(dir->i_sb);
if (unlikely(rlen < ext4_dir_rec_len(1, fake ? NULL : dir)))
error_msg = "rec_len is smaller than minimal";
@@ -104,6 +104,9 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
+ else if (unlikely(next_offset == size && de->name_len == 1 &&
+ de->name[0] == '.'))
+ error_msg = "'.' directory cannot be the last in data block";
else
return 0;
@@ -145,7 +148,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
return err;
/* Can we just clear INDEX flag to ignore htree information? */
- if (!ext4_has_metadata_csum(sb)) {
+ if (!ext4_has_feature_metadata_csum(sb)) {
/*
* We don't set the inode dirty flag since it's not
* critical that it gets flushed back to the disk.
@@ -189,13 +192,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
continue;
}
if (err > 0) {
- pgoff_t index = map.m_pblk >>
- (PAGE_SHIFT - inode->i_blkbits);
+ pgoff_t index = map.m_pblk << inode->i_blkbits >>
+ PAGE_SHIFT;
if (!ra_has_index(&file->f_ra, index))
page_cache_sync_readahead(
sb->s_bdev->bd_mapping,
- &file->f_ra, file,
- index, 1);
+ &file->f_ra, file, index,
+ 1 << EXT4_SB(sb)->s_min_folio_order);
file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
bh = ext4_bread(NULL, inode, map.m_lblk, 0);
if (IS_ERR(bh)) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 74f2071189b2..56112f201cac 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -33,7 +33,7 @@
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
#include <linux/ratelimit.h>
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/falloc.h>
#include <linux/percpu-rwsem.h>
#include <linux/fiemap.h>
@@ -157,7 +157,7 @@ enum criteria {
/*
* Reads each block group sequentially, performing disk IO if
- * necessary, to find find_suitable block group. Tries to
+ * necessary, to find suitable block group. Tries to
* allocate goal length but might trim the request if nothing
* is found after enough tries.
*/
@@ -185,14 +185,8 @@ enum criteria {
/* prefer goal again. length */
#define EXT4_MB_HINT_MERGE 0x0001
-/* blocks already reserved */
-#define EXT4_MB_HINT_RESERVED 0x0002
-/* metadata is being allocated */
-#define EXT4_MB_HINT_METADATA 0x0004
/* first blocks in the file */
#define EXT4_MB_HINT_FIRST 0x0008
-/* search for the best chunk */
-#define EXT4_MB_HINT_BEST 0x0010
/* data is being allocated */
#define EXT4_MB_HINT_DATA 0x0020
/* don't preallocate (for tails) */
@@ -213,15 +207,6 @@ enum criteria {
#define EXT4_MB_USE_RESERVED 0x2000
/* Do strict check for free blocks while retrying block allocation */
#define EXT4_MB_STRICT_CHECK 0x4000
-/* Large fragment size list lookup succeeded at least once for
- * CR_POWER2_ALIGNED */
-#define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED 0x8000
-/* Avg fragment size rb tree lookup succeeded at least once for
- * CR_GOAL_LEN_FAST */
-#define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED 0x00010000
-/* Avg fragment size rb tree lookup succeeded at least once for
- * CR_BEST_AVAIL_LEN */
-#define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED 0x00020000
struct ext4_allocation_request {
/* target inode for block we're allocating */
@@ -256,15 +241,26 @@ struct ext4_allocation_request {
#define EXT4_MAP_UNWRITTEN BIT(BH_Unwritten)
#define EXT4_MAP_BOUNDARY BIT(BH_Boundary)
#define EXT4_MAP_DELAYED BIT(BH_Delay)
+/*
+ * This is for use in ext4_map_query_blocks() for a special case where we can
+ * have a physically and logically contiguous blocks split across two leaf
+ * nodes instead of a single extent. This is required in case of atomic writes
+ * to know whether the returned extent is last in leaf. If yes, then lookup for
+ * next in leaf block in ext4_map_query_blocks_next_in_leaf().
+ * - This is never going to be added to any buffer head state.
+ * - We use the next available bit after BH_BITMAP_UPTODATE.
+ */
+#define EXT4_MAP_QUERY_LAST_IN_LEAF BIT(BH_BITMAP_UPTODATE + 1)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
- EXT4_MAP_DELAYED)
+ EXT4_MAP_DELAYED | EXT4_MAP_QUERY_LAST_IN_LEAF)
struct ext4_map_blocks {
ext4_fsblk_t m_pblk;
ext4_lblk_t m_lblk;
unsigned int m_len;
unsigned int m_flags;
+ u64 m_seq;
};
/*
@@ -278,7 +274,10 @@ struct ext4_system_blocks {
/*
* Flags for ext4_io_end->flags
*/
-#define EXT4_IO_END_UNWRITTEN 0x0001
+#define EXT4_IO_END_UNWRITTEN 0x0001
+#define EXT4_IO_END_FAILED 0x0002
+
+#define EXT4_IO_END_DEFER_COMPLETION (EXT4_IO_END_UNWRITTEN | EXT4_IO_END_FAILED)
struct ext4_io_end_vec {
struct list_head list; /* list of io_end_vec */
@@ -367,7 +366,16 @@ struct ext4_io_submit {
#define EXT4_MAX_BLOCKS(size, offset, blkbits) \
((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
blkbits))
-
+#define EXT4_B_TO_LBLK(inode, offset) \
+ (round_up((offset), i_blocksize(inode)) >> (inode)->i_blkbits)
+#define EXT4_LBLK_TO_B(inode, lblk) ((loff_t)(lblk) << (inode)->i_blkbits)
+
+/* Translate a block number to a page index */
+#define EXT4_LBLK_TO_PG(inode, lblk) (EXT4_LBLK_TO_B((inode), (lblk)) >> \
+ PAGE_SHIFT)
+/* Translate a page index to a block number */
+#define EXT4_PG_TO_LBLK(inode, pnum) (((loff_t)(pnum) << PAGE_SHIFT) >> \
+ (inode)->i_blkbits)
/* Translate a block number to a cluster number */
#define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits)
/* Translate a cluster number to a block number */
@@ -694,16 +702,22 @@ enum {
/* Caller is from the delayed allocation writeout path
* finally doing the actual allocation of delayed blocks */
#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
- /* caller is from the direct IO path, request to creation of an
- unwritten extents if not allocated, split the unwritten
- extent if blocks has been preallocated already*/
-#define EXT4_GET_BLOCKS_PRE_IO 0x0008
-#define EXT4_GET_BLOCKS_CONVERT 0x0010
-#define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\
- EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
- /* Convert extent to initialized after IO complete */
-#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
+ /*
+ * This means that we cannot merge newly allocated extents, and if we
+ * found an unwritten extent, we need to split it.
+ */
+#define EXT4_GET_BLOCKS_SPLIT_NOMERGE 0x0008
+ /*
+ * Caller is from the dio or dioread_nolock buffered IO, reqest to
+ * create an unwritten extent if it does not exist or split the
+ * found unwritten extent. Also do not merge the newly created
+ * unwritten extent, io end will convert unwritten to written,
+ * and try to merge the written extent.
+ */
+#define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_SPLIT_NOMERGE|\
EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
+ /* Convert unwritten extent to initialized. */
+#define EXT4_GET_BLOCKS_CONVERT 0x0010
/* Eventual metadata allocation (due to growing extent tree)
* should not fail, so try to use reserved blocks for that.*/
#define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020
@@ -715,11 +729,23 @@ enum {
#define EXT4_GET_BLOCKS_ZERO 0x0200
#define EXT4_GET_BLOCKS_CREATE_ZERO (EXT4_GET_BLOCKS_CREATE |\
EXT4_GET_BLOCKS_ZERO)
- /* Caller will submit data before dropping transaction handle. This
- * allows jbd2 to avoid submitting data before commit. */
+ /* Caller is in the context of data submission, such as writeback,
+ * fsync, etc. Especially, in the generic writeback path, caller will
+ * submit data before dropping transaction handle. This allows jbd2
+ * to avoid submitting data before commit. */
#define EXT4_GET_BLOCKS_IO_SUBMIT 0x0400
+ /* Convert extent to initialized after IO complete */
+#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT |\
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |\
+ EXT4_GET_BLOCKS_IO_SUBMIT)
/* Caller is in the atomic contex, find extent if it has been cached */
#define EXT4_GET_BLOCKS_CACHED_NOWAIT 0x0800
+/*
+ * Atomic write caller needs this to query in the slow path of mixed mapping
+ * case, when a contiguous extent can be split across two adjacent leaf nodes.
+ * Look EXT4_MAP_QUERY_LAST_IN_LEAF.
+ */
+#define EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF 0x1000
/*
* The bit position of these flags must not overlap with any of the
@@ -733,6 +759,13 @@ enum {
#define EXT4_EX_NOCACHE 0x40000000
#define EXT4_EX_FORCE_CACHE 0x20000000
#define EXT4_EX_NOFAIL 0x10000000
+/*
+ * ext4_map_query_blocks() uses this filter mask to filter the flags needed to
+ * pass while lookup/querying of on disk extent tree.
+ */
+#define EXT4_EX_QUERY_FILTER (EXT4_EX_NOCACHE | EXT4_EX_FORCE_CACHE |\
+ EXT4_EX_NOFAIL |\
+ EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF)
/*
* Flags used by ext4_free_blocks
@@ -1056,15 +1089,16 @@ struct ext4_inode_info {
/* End of lblk range that needs to be committed in this fast commit */
ext4_lblk_t i_fc_lblk_len;
- /* Number of ongoing updates on this inode */
- atomic_t i_fc_updates;
- atomic_t i_unwritten; /* Nr. of inflight conversions pending */
+ spinlock_t i_raw_lock; /* protects updates to the raw inode */
/* Fast commit wait queue for this inode */
wait_queue_head_t i_fc_wait;
- /* Protect concurrent accesses on i_fc_lblk_start, i_fc_lblk_len */
- struct mutex i_fc_lock;
+ /*
+ * Protect concurrent accesses on i_fc_lblk_start, i_fc_lblk_len
+ * and inode's EXT4_FC_STATE_COMMITTING state bit.
+ */
+ spinlock_t i_fc_lock;
/*
* i_disksize keeps track of what the inode size is ON DISK, not
@@ -1097,8 +1131,6 @@ struct ext4_inode_info {
struct inode vfs_inode;
struct jbd2_inode *jinode;
- spinlock_t i_raw_lock; /* protects updates to the raw inode */
-
/*
* File creation time. Its function is same as that of
* struct timespec64 i_{a,c,m}time in the generic inode.
@@ -1123,6 +1155,8 @@ struct ext4_inode_info {
ext4_lblk_t i_es_shrink_lblk; /* Offset where we start searching for
extents to shrink. Protected by
i_es_lock */
+ u64 i_es_seq; /* Change counter for extents.
+ Protected by i_es_lock */
/* ialloc */
ext4_group_t i_last_alloc_group;
@@ -1141,6 +1175,7 @@ struct ext4_inode_info {
/* quota space reservation, managed internally by quota code */
qsize_t i_reserved_quota;
#endif
+ spinlock_t i_block_reservation_lock;
/* Lock protecting lists below */
spinlock_t i_completed_io_lock;
@@ -1151,8 +1186,6 @@ struct ext4_inode_info {
struct list_head i_rsv_conversion_list;
struct work_struct i_rsv_conversion_work;
- spinlock_t i_block_reservation_lock;
-
/*
* Transactions that contain inode's metadata needed to complete
* fsync and fdatasync, respectively.
@@ -1168,6 +1201,14 @@ struct ext4_inode_info {
__u32 i_csum_seed;
kprojid_t i_projid;
+
+#ifdef CONFIG_FS_ENCRYPTION
+ struct fscrypt_inode_info *i_crypt_info;
+#endif
+
+#ifdef CONFIG_FS_VERITY
+ struct fsverity_info *i_verity_info;
+#endif
};
/*
@@ -1428,7 +1469,9 @@ struct ext4_super_block {
__le16 s_encoding; /* Filename charset encoding */
__le16 s_encoding_flags; /* Filename charset encoding flags */
__le32 s_orphan_file_inum; /* Inode for tracking orphan inodes */
- __le32 s_reserved[94]; /* Padding to the end of the block */
+ __le16 s_def_resuid_hi;
+ __le16 s_def_resgid_hi;
+ __le32 s_reserved[93]; /* Padding to the end of the block */
__le32 s_checksum; /* crc32c(superblock) */
};
@@ -1579,16 +1622,14 @@ struct ext4_sb_info {
unsigned short *s_mb_offsets;
unsigned int *s_mb_maxs;
unsigned int s_group_info_size;
- unsigned int s_mb_free_pending;
+ atomic_t s_mb_free_pending;
struct list_head s_freed_data_list[2]; /* List of blocks to be freed
after commit completed */
struct list_head s_discard_list;
struct work_struct s_discard_work;
atomic_t s_retry_alloc_pending;
- struct list_head *s_mb_avg_fragment_size;
- rwlock_t *s_mb_avg_fragment_size_locks;
- struct list_head *s_mb_largest_free_orders;
- rwlock_t *s_mb_largest_free_orders_locks;
+ struct xarray *s_mb_avg_fragment_size;
+ struct xarray *s_mb_largest_free_orders;
/* tunables */
unsigned long s_stripe;
@@ -1600,12 +1641,15 @@ struct ext4_sb_info {
unsigned int s_mb_order2_reqs;
unsigned int s_mb_group_prealloc;
unsigned int s_max_dir_size_kb;
- /* where last allocation was done - for stream allocation */
- unsigned long s_mb_last_group;
- unsigned long s_mb_last_start;
unsigned int s_mb_prefetch;
unsigned int s_mb_prefetch_limit;
unsigned int s_mb_best_avail_max_trim_order;
+ unsigned int s_sb_update_sec;
+ unsigned int s_sb_update_kb;
+
+ /* where last allocation was done - for stream allocation */
+ ext4_group_t *s_mb_last_groups;
+ unsigned int s_mb_nr_global_goals;
/* stats for buddy allocator */
atomic_t s_bal_reqs; /* number of reqs with len > 1 */
@@ -1615,12 +1659,10 @@ struct ext4_sb_info {
atomic_t s_bal_cX_ex_scanned[EXT4_MB_NUM_CRS]; /* total extents scanned */
atomic_t s_bal_groups_scanned; /* number of groups scanned */
atomic_t s_bal_goals; /* goal hits */
+ atomic_t s_bal_stream_goals; /* stream allocation global goal hits */
atomic_t s_bal_len_goals; /* len goal hits */
atomic_t s_bal_breaks; /* too long searches */
atomic_t s_bal_2orders; /* 2^order hits */
- atomic_t s_bal_p2_aligned_bad_suggestions;
- atomic_t s_bal_goal_fast_bad_suggestions;
- atomic_t s_bal_best_avail_bad_suggestions;
atomic64_t s_bal_cX_groups_considered[EXT4_MB_NUM_CRS];
atomic64_t s_bal_cX_hits[EXT4_MB_NUM_CRS];
atomic64_t s_bal_cX_failed[EXT4_MB_NUM_CRS]; /* cX loop didn't find blocks */
@@ -1662,8 +1704,10 @@ struct ext4_sb_info {
/* record the last minlen when FITRIM is called. */
unsigned long s_last_trim_minblks;
- /* Reference to checksum algorithm driver via cryptoapi */
- struct crypto_shash *s_chksum_driver;
+ /* minimum folio order of a page cache allocation */
+ u16 s_min_folio_order;
+ /* supported maximum folio order, 0 means not supported */
+ u16 s_max_folio_order;
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_csum_seed;
@@ -1752,7 +1796,7 @@ struct ext4_sb_info {
* following fields:
* ei->i_fc_list, s_fc_dentry_q, s_fc_q, s_fc_bytes, s_fc_bh.
*/
- spinlock_t s_fc_lock;
+ struct mutex s_fc_lock;
struct buffer_head *s_fc_bh;
struct ext4_fc_stats s_fc_stats;
tid_t s_fc_ineligible_tid;
@@ -1802,6 +1846,18 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
}
+static inline int ext4_get_resuid(struct ext4_super_block *es)
+{
+ return le16_to_cpu(es->s_def_resuid) |
+ le16_to_cpu(es->s_def_resuid_hi) << 16;
+}
+
+static inline int ext4_get_resgid(struct ext4_super_block *es)
+{
+ return le16_to_cpu(es->s_def_resgid) |
+ le16_to_cpu(es->s_def_resgid_hi) << 16;
+}
+
/*
* Returns: sbi->field[index]
* Used to access an array element from the following sbi fields which require
@@ -1824,7 +1880,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
*/
enum {
EXT4_MF_MNTDIR_SAMPLED,
- EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */
+ EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */
+ EXT4_MF_JOURNAL_DESTROY /* Journal is in process of destroying */
};
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
@@ -1910,6 +1967,7 @@ enum {
EXT4_STATE_LUSTRE_EA_INODE, /* Lustre-style ea_inode */
EXT4_STATE_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
EXT4_STATE_FC_COMMITTING, /* Fast commit ongoing */
+ EXT4_STATE_FC_FLUSHING_DATA, /* Fast commit flushing data */
EXT4_STATE_ORPHAN_FILE, /* Inode orphaned in orphan file */
};
@@ -1970,6 +2028,16 @@ static inline bool ext4_verity_in_progress(struct inode *inode)
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
/*
+ * Check whether the inode is tracked as orphan (either in orphan file or
+ * orphan list).
+ */
+static inline bool ext4_inode_orphan_tracked(struct inode *inode)
+{
+ return ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
+ !list_empty(&EXT4_I(inode)->i_orphan);
+}
+
+/*
* Codes for operating systems
*/
#define EXT4_OS_LINUX 0
@@ -2235,15 +2303,32 @@ extern int ext4_feature_set_ok(struct super_block *sb, int readonly);
/*
* Superblock flags
*/
-#define EXT4_FLAGS_RESIZING 0
-#define EXT4_FLAGS_SHUTDOWN 1
-#define EXT4_FLAGS_BDEV_IS_DAX 2
+enum {
+ EXT4_FLAGS_RESIZING, /* Avoid superblock update and resize race */
+ EXT4_FLAGS_SHUTDOWN, /* Prevent access to the file system */
+ EXT4_FLAGS_BDEV_IS_DAX, /* Current block device support DAX */
+ EXT4_FLAGS_EMERGENCY_RO,/* Emergency read-only due to fs errors */
+};
static inline int ext4_forced_shutdown(struct super_block *sb)
{
return test_bit(EXT4_FLAGS_SHUTDOWN, &EXT4_SB(sb)->s_ext4_flags);
}
+static inline int ext4_emergency_ro(struct super_block *sb)
+{
+ return test_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags);
+}
+
+static inline int ext4_emergency_state(struct super_block *sb)
+{
+ if (unlikely(ext4_forced_shutdown(sb)))
+ return -EIO;
+ if (unlikely(ext4_emergency_ro(sb)))
+ return -EROFS;
+ return 0;
+}
+
/*
* Default values for user and/or group using reserved blocks
*/
@@ -2275,10 +2360,19 @@ static inline int ext4_forced_shutdown(struct super_block *sb)
#define EXT4_DEFM_NODELALLOC 0x0800
/*
- * Default journal batch times
+ * Default journal batch times and ioprio.
*/
#define EXT4_DEF_MIN_BATCH_TIME 0
#define EXT4_DEF_MAX_BATCH_TIME 15000 /* 15ms */
+#define EXT4_DEF_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+
+
+/*
+ * Default values for superblock update
+ */
+#define EXT4_DEF_SB_UPDATE_INTERVAL_SEC (3600) /* seconds (1 hour) */
+#define EXT4_DEF_SB_UPDATE_INTERVAL_KB (16384) /* kilobytes (16MB) */
+
/*
* Minimum number of groups in a flexgroup before we separate out
@@ -2402,28 +2496,19 @@ static inline unsigned int ext4_dir_rec_len(__u8 name_len,
return (rec_len & ~EXT4_DIR_ROUND);
}
-/*
- * If we ever get support for fs block sizes > page_size, we'll need
- * to remove the #if statements in the next two functions...
- */
static inline unsigned int
ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
{
unsigned len = le16_to_cpu(dlen);
-#if (PAGE_SIZE >= 65536)
if (len == EXT4_MAX_REC_LEN || len == 0)
return blocksize;
return (len & 65532) | ((len & 3) << 16);
-#else
- return len;
-#endif
}
static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
{
BUG_ON((len > blocksize) || (blocksize > (1 << 18)) || (len & 3));
-#if (PAGE_SIZE >= 65536)
if (len < 65536)
return cpu_to_le16(len);
if (len == blocksize) {
@@ -2433,9 +2518,6 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
return cpu_to_le16(0);
}
return cpu_to_le16((len & 65532) | ((len >> 16) & 3));
-#else
- return cpu_to_le16(len);
-#endif
}
/*
@@ -2460,22 +2542,9 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
#define DX_HASH_SIPHASH 6
#define DX_HASH_LAST DX_HASH_SIPHASH
-static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
- const void *address, unsigned int length)
+static inline u32 ext4_chksum(u32 crc, const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[4];
- } desc;
-
- BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx));
-
- desc.shash.tfm = sbi->s_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- BUG_ON(crypto_shash_update(&desc.shash, address, length));
-
- return *(u32 *)desc.ctx;
+ return crc32c(crc, address, length);
}
#ifdef __KERNEL__
@@ -2825,8 +2894,7 @@ extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
struct ext4_dir_entry_2 *dirent,
struct fscrypt_str *ent_name);
extern void ext4_htree_free_dir_info(struct dir_private_info *p);
-extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
- struct buffer_head *bh,
+extern int ext4_find_dest_de(struct inode *dir, struct buffer_head *bh,
void *buf, int buf_size,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **dest_de);
@@ -2908,8 +2976,6 @@ void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle);
-void ext4_fc_start_update(struct inode *inode);
-void ext4_fc_stop_update(struct inode *inode);
void ext4_fc_del(struct inode *inode);
bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t block);
void ext4_fc_replay_cleanup(struct super_block *sb);
@@ -2959,6 +3025,7 @@ static inline bool ext4_mb_cr_expensive(enum criteria cr)
void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei);
int ext4_inode_is_fast_symlink(struct inode *inode);
+void ext4_check_map_extents_env(struct inode *inode);
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
@@ -2979,6 +3046,7 @@ int ext4_walk_page_buffers(handle_t *handle,
struct buffer_head *bh));
int do_journal_get_write_access(handle_t *handle, struct inode *inode,
struct buffer_head *bh);
+void ext4_set_inode_mapping_order(struct inode *inode);
#define FALL_BACK_TO_NONDELALLOC 1
#define CONVERT_INLINE_DATA 2
@@ -3016,13 +3084,17 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
extern int ext4_break_layouts(struct inode *);
+extern int ext4_truncate_page_cache_block_range(struct inode *inode,
+ loff_t start, loff_t end);
extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
extern void ext4_set_inode_flags(struct inode *, bool init);
extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);
-extern int ext4_writepage_trans_blocks(struct inode *);
extern int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode);
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
+extern int ext4_chunk_trans_extent(struct inode *inode, int nrblocks);
+extern int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+ int pextents);
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t lend);
extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
@@ -3034,6 +3106,17 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
ext4_fsblk_t pblk, ext4_lblk_t len);
+static inline bool is_special_ino(struct super_block *sb, unsigned long ino)
+{
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+ return (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
+ ino == le32_to_cpu(es->s_usr_quota_inum) ||
+ ino == le32_to_cpu(es->s_grp_quota_inum) ||
+ ino == le32_to_cpu(es->s_prj_quota_inum) ||
+ ino == le32_to_cpu(es->s_orphan_file_inum);
+}
+
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
@@ -3046,8 +3129,8 @@ extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
int ext4_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
-int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
+int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
extern void ext4_reset_inode_seed(struct inode *inode);
int ext4_update_overhead(struct super_block *sb, bool force);
int ext4_force_shutdown(struct super_block *sb, u32 flags);
@@ -3095,6 +3178,8 @@ extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
sector_t block, blk_opf_t op_flags);
extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
sector_t block);
+extern struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
+ sector_t block);
extern void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
bh_end_io_t *end_io, bool simu_fail);
extern int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
@@ -3103,8 +3188,7 @@ extern int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wa
extern void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block);
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
extern int ext4_calculate_overhead(struct super_block *sb);
-extern __le32 ext4_superblock_csum(struct super_block *sb,
- struct ext4_super_block *es);
+extern __le32 ext4_superblock_csum(struct ext4_super_block *es);
extern void ext4_superblock_csum_set(struct super_block *sb);
extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
@@ -3274,18 +3358,10 @@ extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
extern int ext4_register_li_request(struct super_block *sb,
ext4_group_t first_not_zeroed);
-static inline int ext4_has_metadata_csum(struct super_block *sb)
-{
- WARN_ON_ONCE(ext4_has_feature_metadata_csum(sb) &&
- !EXT4_SB(sb)->s_chksum_driver);
-
- return ext4_has_feature_metadata_csum(sb) &&
- (EXT4_SB(sb)->s_chksum_driver != NULL);
-}
-
static inline int ext4_has_group_desc_csum(struct super_block *sb)
{
- return ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb);
+ return ext4_has_feature_gdt_csum(sb) ||
+ ext4_has_feature_metadata_csum(sb);
}
#define ext4_read_incompat_64bit_val(es, name) \
@@ -3370,6 +3446,13 @@ static inline unsigned int ext4_flex_bg_size(struct ext4_sb_info *sbi)
return 1 << sbi->s_log_groups_per_flex;
}
+static inline loff_t ext4_get_maxbytes(struct inode *inode)
+{
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ return inode->i_sb->s_maxbytes;
+ return EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
+}
+
#define ext4_std_error(sb, errno) \
do { \
if ((errno)) \
@@ -3434,8 +3517,6 @@ struct ext4_group_info {
void *bb_bitmap;
#endif
struct rw_semaphore alloc_sem;
- struct list_head bb_avg_fragment_size_node;
- struct list_head bb_largest_free_order_node;
ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block
* regions, index is order.
* bb_counters[3] = 5 means
@@ -3486,23 +3567,28 @@ static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi)
return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD);
}
+static inline bool ext4_try_lock_group(struct super_block *sb, ext4_group_t group)
+{
+ if (!spin_trylock(ext4_group_lock_ptr(sb, group)))
+ return false;
+ /*
+ * We're able to grab the lock right away, so drop the lock
+ * contention counter.
+ */
+ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
+ return true;
+}
+
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
{
- spinlock_t *lock = ext4_group_lock_ptr(sb, group);
- if (spin_trylock(lock))
- /*
- * We're able to grab the lock right away, so drop the
- * lock contention counter.
- */
- atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
- else {
+ if (!ext4_try_lock_group(sb, group)) {
/*
* The lock is busy, so bump the contention counter,
* and then wait on the spin lock.
*/
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1,
EXT4_MAX_CONTENTION);
- spin_lock(lock);
+ spin_lock(ext4_group_lock_ptr(sb, group));
}
}
@@ -3557,6 +3643,7 @@ extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
extern int ext4_get_max_inline_size(struct inode *inode);
extern int ext4_find_inline_data_nolock(struct inode *inode);
extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
+extern void ext4_update_final_de(void *de_buf, int old_size, int new_size);
int ext4_readpage_inline(struct inode *inode, struct folio *folio);
extern int ext4_try_to_write_inline_data(struct address_space *mapping,
@@ -3565,11 +3652,11 @@ extern int ext4_try_to_write_inline_data(struct address_space *mapping,
struct folio **foliop);
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct folio *folio);
-extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
- struct inode *inode,
- loff_t pos, unsigned len,
- struct folio **foliop,
- void **fsdata);
+extern int ext4_generic_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ struct folio **foliop,
+ void **fsdata, bool da);
extern int ext4_try_add_inline_entry(handle_t *handle,
struct ext4_filename *fname,
struct inode *dir, struct inode *inode);
@@ -3616,10 +3703,10 @@ static inline int ext4_has_inline_data(struct inode *inode)
extern const struct inode_operations ext4_dir_inode_operations;
extern const struct inode_operations ext4_special_inode_operations;
extern struct dentry *ext4_get_parent(struct dentry *child);
-extern struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
- struct ext4_dir_entry_2 *de,
- int blocksize, int csum_size,
- unsigned int parent_ino, int dotdot_real_len);
+extern int ext4_init_dirblock(handle_t *handle, struct inode *inode,
+ struct buffer_head *dir_block,
+ unsigned int parent_ino, void *inline_buf,
+ int inline_size);
extern void ext4_initialize_dirent_tail(struct buffer_head *bh,
unsigned int blocksize);
extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode,
@@ -3702,6 +3789,8 @@ extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len);
+extern int ext4_convert_unwritten_extents_atomic(handle_t *handle,
+ struct inode *inode, loff_t offset, ssize_t len);
extern int ext4_convert_unwritten_io_end_vec(handle_t *handle,
ext4_io_end_t *io_end);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
@@ -3804,34 +3893,19 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
}
-/* For ioend & aio unwritten conversion wait queues */
-#define EXT4_WQ_HASH_SZ 37
-#define ext4_ioend_wq(v) (&ext4__ioend_wq[((unsigned long)(v)) %\
- EXT4_WQ_HASH_SZ])
-extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
-
extern int ext4_resize_begin(struct super_block *sb);
extern int ext4_resize_end(struct super_block *sb, bool update_backups);
-static inline void ext4_set_io_unwritten_flag(struct inode *inode,
- struct ext4_io_end *io_end)
+static inline void ext4_set_io_unwritten_flag(struct ext4_io_end *io_end)
{
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN))
io_end->flag |= EXT4_IO_END_UNWRITTEN;
- atomic_inc(&EXT4_I(inode)->i_unwritten);
- }
}
static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
{
- struct inode *inode = io_end->inode;
-
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+ if (io_end->flag & EXT4_IO_END_UNWRITTEN)
io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
- /* Wake up anyone waiting on unwritten extent conversion */
- if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
- wake_up_all(ext4_ioend_wq(inode));
- }
}
extern const struct iomap_ops ext4_iomap_ops;
@@ -3854,7 +3928,9 @@ static inline int ext4_buffer_uptodate(struct buffer_head *bh)
static inline bool ext4_inode_can_atomic_write(struct inode *inode)
{
- return S_ISREG(inode->i_mode) && EXT4_SB(inode->i_sb)->s_awu_min > 0;
+ return S_ISREG(inode->i_mode) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
+ EXT4_SB(inode->i_sb)->s_awu_min > 0;
}
extern int ext4_block_write_begin(handle_t *handle, struct folio *folio,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 26435f3a3094..c484125d963f 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -31,13 +31,6 @@
#define CHECK_BINSEARCH__
/*
- * If EXT_STATS is defined then stats numbers are collected.
- * These number will be displayed at umount time.
- */
-#define EXT_STATS_
-
-
-/*
* ext4_inode has i_block array (60 bytes total).
* The first 12 bytes store ext4_extent_header;
* the remainder stores an array of ext4_extent.
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index da4a82456383..05e5946ed9b3 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -63,12 +63,14 @@ static void ext4_put_nojournal(handle_t *handle)
*/
static int ext4_journal_check_start(struct super_block *sb)
{
+ int ret;
journal_t *journal;
might_sleep();
- if (unlikely(ext4_forced_shutdown(sb)))
- return -EIO;
+ ret = ext4_emergency_state(sb);
+ if (unlikely(ret))
+ return ret;
if (WARN_ON_ONCE(sb_rdonly(sb)))
return -EROFS;
@@ -244,7 +246,8 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
}
} else
ext4_check_bdev_write_error(sb);
- if (trigger_type == EXT4_JTR_NONE || !ext4_has_metadata_csum(sb))
+ if (trigger_type == EXT4_JTR_NONE ||
+ !ext4_has_feature_metadata_csum(sb))
return 0;
BUG_ON(trigger_type >= EXT4_JOURNAL_TRIGGER_COUNT);
jbd2_journal_set_triggers(bh,
@@ -276,9 +279,16 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
bh, is_metadata, inode->i_mode,
test_opt(inode->i_sb, DATA_FLAGS));
- /* In the no journal case, we can just do a bforget and return */
+ /*
+ * In the no journal case, we should wait for the ongoing buffer
+ * to complete and do a forget.
+ */
if (!ext4_handle_valid(handle)) {
- bforget(bh);
+ if (bh) {
+ clear_buffer_dirty(bh);
+ wait_on_buffer(bh);
+ __bforget(bh);
+ }
return 0;
}
@@ -331,7 +341,8 @@ int __ext4_journal_get_create_access(const char *where, unsigned int line,
err);
return err;
}
- if (trigger_type == EXT4_JTR_NONE || !ext4_has_metadata_csum(sb))
+ if (trigger_type == EXT4_JTR_NONE ||
+ !ext4_has_feature_metadata_csum(sb))
return 0;
BUG_ON(trigger_type >= EXT4_JOURNAL_TRIGGER_COUNT);
jbd2_journal_set_triggers(bh,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 0c77697d5e90..63d17c5201b5 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -122,90 +122,6 @@
#define EXT4_HT_EXT_CONVERT 11
#define EXT4_HT_MAX 12
-/**
- * struct ext4_journal_cb_entry - Base structure for callback information.
- *
- * This struct is a 'seed' structure for a using with your own callback
- * structs. If you are using callbacks you must allocate one of these
- * or another struct of your own definition which has this struct
- * as it's first element and pass it to ext4_journal_callback_add().
- */
-struct ext4_journal_cb_entry {
- /* list information for other callbacks attached to the same handle */
- struct list_head jce_list;
-
- /* Function to call with this callback structure */
- void (*jce_func)(struct super_block *sb,
- struct ext4_journal_cb_entry *jce, int error);
-
- /* user data goes here */
-};
-
-/**
- * ext4_journal_callback_add: add a function to call after transaction commit
- * @handle: active journal transaction handle to register callback on
- * @func: callback function to call after the transaction has committed:
- * @sb: superblock of current filesystem for transaction
- * @jce: returned journal callback data
- * @rc: journal state at commit (0 = transaction committed properly)
- * @jce: journal callback data (internal and function private data struct)
- *
- * The registered function will be called in the context of the journal thread
- * after the transaction for which the handle was created has completed.
- *
- * No locks are held when the callback function is called, so it is safe to
- * call blocking functions from within the callback, but the callback should
- * not block or run for too long, or the filesystem will be blocked waiting for
- * the next transaction to commit. No journaling functions can be used, or
- * there is a risk of deadlock.
- *
- * There is no guaranteed calling order of multiple registered callbacks on
- * the same transaction.
- */
-static inline void _ext4_journal_callback_add(handle_t *handle,
- struct ext4_journal_cb_entry *jce)
-{
- /* Add the jce to transaction's private list */
- list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list);
-}
-
-static inline void ext4_journal_callback_add(handle_t *handle,
- void (*func)(struct super_block *sb,
- struct ext4_journal_cb_entry *jce,
- int rc),
- struct ext4_journal_cb_entry *jce)
-{
- struct ext4_sb_info *sbi =
- EXT4_SB(handle->h_transaction->t_journal->j_private);
-
- /* Add the jce to transaction's private list */
- jce->jce_func = func;
- spin_lock(&sbi->s_md_lock);
- _ext4_journal_callback_add(handle, jce);
- spin_unlock(&sbi->s_md_lock);
-}
-
-
-/**
- * ext4_journal_callback_del: delete a registered callback
- * @handle: active journal transaction handle on which callback was registered
- * @jce: registered journal callback entry to unregister
- * Return true if object was successfully removed
- */
-static inline bool ext4_journal_callback_try_del(handle_t *handle,
- struct ext4_journal_cb_entry *jce)
-{
- bool deleted;
- struct ext4_sb_info *sbi =
- EXT4_SB(handle->h_transaction->t_journal->j_private);
-
- spin_lock(&sbi->s_md_lock);
- deleted = !list_empty(&jce->jce_list);
- list_del_init(&jce->jce_list);
- spin_unlock(&sbi->s_md_lock);
- return deleted;
-}
-
int
ext4_mark_iloc_dirty(handle_t *handle,
struct inode *inode,
@@ -403,10 +319,10 @@ static inline int ext4_journal_ensure_credits(handle_t *handle, int credits,
revoke_creds, 0);
}
-static inline int ext4_journal_blocks_per_page(struct inode *inode)
+static inline int ext4_journal_blocks_per_folio(struct inode *inode)
{
if (EXT4_JOURNAL(inode) != NULL)
- return jbd2_journal_blocks_per_page(inode);
+ return jbd2_journal_blocks_per_folio(inode);
return 0;
}
@@ -513,4 +429,33 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
return 1;
}
+/*
+ * Pass journal explicitly as it may not be cached in the sbi->s_journal in some
+ * cases
+ */
+static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal)
+{
+ int err = 0;
+
+ /*
+ * At this point only two things can be operating on the journal.
+ * JBD2 thread performing transaction commit and s_sb_upd_work
+ * issuing sb update through the journal. Once we set
+ * EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not
+ * queue s_sb_upd_work and ext4_force_commit() makes sure any
+ * ext4_handle_error() calls from the running transaction commit are
+ * finished. Hence no new s_sb_upd_work can be queued after we
+ * flush it here.
+ */
+ ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY);
+
+ ext4_force_commit(sbi->s_sb);
+ flush_work(&sbi->s_sb_upd_work);
+
+ err = jbd2_journal_destroy(journal);
+ sbi->s_journal = NULL;
+
+ return err;
+}
+
#endif /* _EXT4_JBD2_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a07a98a4b97a..2cf5759ba689 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -50,10 +50,9 @@ static __le32 ext4_extent_block_csum(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_inode_info *ei = EXT4_I(inode);
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
+ csum = ext4_chksum(ei->i_csum_seed, (__u8 *)eh,
EXT4_EXTENT_TAIL_OFFSET(eh));
return cpu_to_le32(csum);
}
@@ -63,7 +62,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode,
{
struct ext4_extent_tail *et;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return 1;
et = find_ext4_extent_tail(eh);
@@ -77,7 +76,7 @@ static void ext4_extent_block_csum_set(struct inode *inode,
{
struct ext4_extent_tail *et;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return;
et = find_ext4_extent_tail(eh);
@@ -334,7 +333,7 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
int nofail)
{
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
- int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
+ int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_SPLIT_NOMERGE;
if (nofail)
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
@@ -611,6 +610,8 @@ int ext4_ext_precache(struct inode *inode)
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return 0; /* not an extent-mapped inode */
+ ext4_check_map_extents_env(inode);
+
down_read(&ei->i_data_sem);
depth = ext_depth(inode);
@@ -1530,7 +1531,7 @@ static int ext4_ext_search_left(struct inode *inode,
static int ext4_ext_search_right(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t *logical, ext4_fsblk_t *phys,
- struct ext4_extent *ret_ex)
+ struct ext4_extent *ret_ex, int flags)
{
struct buffer_head *bh = NULL;
struct ext4_extent_header *eh;
@@ -1604,7 +1605,8 @@ got_index:
ix++;
while (++depth < path->p_depth) {
/* subtract from p_depth to get proper eh_depth */
- bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
+ bh = read_extent_tree_block(inode, ix, path->p_depth - depth,
+ flags);
if (IS_ERR(bh))
return PTR_ERR(bh);
eh = ext_block_hdr(bh);
@@ -1612,7 +1614,7 @@ got_index:
put_bh(bh);
}
- bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
+ bh = read_extent_tree_block(inode, ix, path->p_depth - depth, flags);
if (IS_ERR(bh))
return PTR_ERR(bh);
eh = ext_block_hdr(bh);
@@ -2000,7 +2002,7 @@ ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
}
/* try to insert block into found extent and return */
- if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
+ if (ex && !(gb_flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE)) {
/*
* Try to see whether we should rather test the extent on
@@ -2179,7 +2181,7 @@ has_space:
merge:
/* try to merge extents */
- if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
+ if (!(gb_flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE))
ext4_ext_try_to_merge(handle, inode, path, nearex);
/* time to correct all indexes above */
@@ -2211,7 +2213,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
while (block <= end) {
next = 0;
flags = 0;
- if (!ext4_es_lookup_extent(inode, block, &next, &es))
+ if (!ext4_es_lookup_extent(inode, block, &next, &es, NULL))
break;
if (ext4_es_is_unwritten(&es))
flags |= FIEMAP_EXTENT_UNWRITTEN;
@@ -2396,18 +2398,20 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
{
int index;
- int depth;
/* If we are converting the inline data, only one is needed here. */
if (ext4_has_inline_data(inode))
return 1;
- depth = ext_depth(inode);
-
+ /*
+ * Extent tree can change between the time we estimate credits and
+ * the time we actually modify the tree. Assume the worst case.
+ */
if (extents <= 1)
- index = depth * 2;
+ index = (EXT4_MAX_EXTENT_DEPTH * 2) + extents;
else
- index = depth * 3;
+ index = (EXT4_MAX_EXTENT_DEPTH * 3) +
+ DIV_ROUND_UP(extents, ext4_ext_space_block(inode, 0));
return index;
}
@@ -2821,6 +2825,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
struct partial_cluster partial;
handle_t *handle;
int i = 0, err = 0;
+ int flags = EXT4_EX_NOCACHE | EXT4_EX_NOFAIL;
partial.pclu = 0;
partial.lblk = 0;
@@ -2851,8 +2856,7 @@ again:
ext4_fsblk_t pblk;
/* find extent for or closest extent to this block */
- path = ext4_find_extent(inode, end, NULL,
- EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
+ path = ext4_find_extent(inode, end, NULL, flags);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
return PTR_ERR(path);
@@ -2918,7 +2922,7 @@ again:
*/
lblk = ex_end + 1;
err = ext4_ext_search_right(inode, path, &lblk, &pblk,
- NULL);
+ NULL, flags);
if (err < 0)
goto out;
if (pblk) {
@@ -2994,8 +2998,7 @@ again:
i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
bh = read_extent_tree_block(inode, path[i].p_idx,
- depth - i - 1,
- EXT4_EX_NOCACHE);
+ depth - i - 1, flags);
if (IS_ERR(bh)) {
/* should we reset i_size? */
err = PTR_ERR(bh);
@@ -3221,7 +3224,7 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
else
ext4_ext_mark_initialized(ex);
- if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
+ if (!(flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE))
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
@@ -3365,7 +3368,7 @@ static struct ext4_ext_path *ext4_split_extent(handle_t *handle,
if (map->m_lblk + map->m_len < ee_block + ee_len) {
split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
- flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
+ flags1 = flags | EXT4_GET_BLOCKS_SPLIT_NOMERGE;
if (unwritten)
split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
@@ -3718,10 +3721,6 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
>> inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
eof_block = map->m_lblk + map->m_len;
- /*
- * It is safe to convert extent to initialized via explicit
- * zeroout only if extent is fully inside i_size or new_size.
- */
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
@@ -3732,11 +3731,15 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
split_flag |= EXT4_EXT_DATA_VALID1;
/* Convert to initialized */
} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
+ /*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully inside i_size or new_size.
+ */
split_flag |= ee_block + ee_len <= eof_block ?
EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
}
- flags |= EXT4_GET_BLOCKS_PRE_IO;
+ flags |= EXT4_GET_BLOCKS_SPLIT_NOMERGE;
return ext4_split_extent(handle, inode, path, map, split_flag, flags,
allocated);
}
@@ -3908,7 +3911,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
*allocated, newblock);
/* get_block() before submitting IO, split the extent */
- if (flags & EXT4_GET_BLOCKS_PRE_IO) {
+ if (flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE) {
path = ext4_split_convert_extents(handle, inode, map, path,
flags | EXT4_GET_BLOCKS_CONVERT, allocated);
if (IS_ERR(path))
@@ -4202,7 +4205,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
- path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
+ path = ext4_find_extent(inode, map->m_lblk, NULL, flags);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out;
@@ -4314,7 +4317,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
if (err)
goto out;
ar.lright = map->m_lblk;
- err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
+ err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright,
+ &ex2, flags);
if (err < 0)
goto out;
@@ -4433,6 +4437,20 @@ got_allocated_blocks:
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
out:
+ /*
+ * We never use EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF with CREATE flag.
+ * So we know that the depth used here is correct, since there was no
+ * block allocation done if EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF is set.
+ * If tomorrow we start using this QUERY flag with CREATE, then we will
+ * need to re-calculate the depth as it might have changed due to block
+ * allocation.
+ */
+ if (flags & EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF) {
+ WARN_ON_ONCE(flags & EXT4_GET_BLOCKS_CREATE);
+ if (!err && ex && (ex == EXT_LAST_EXTENT(path[depth].p_hdr)))
+ map->m_flags |= EXT4_MAP_QUERY_LAST_IN_LEAF;
+ }
+
ext4_free_ext_path(path);
trace_ext4_ext_map_blocks_exit(inode, flags, map,
@@ -4483,6 +4501,8 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
struct ext4_map_blocks map;
unsigned int credits;
loff_t epos, old_size = i_size_read(inode);
+ unsigned int blkbits = inode->i_blkbits;
+ bool alloc_zero = false;
BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
map.m_lblk = offset;
@@ -4496,6 +4516,17 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
/*
+ * Do the actual write zero during a running journal transaction
+ * costs a lot. First allocate an unwritten extent and then
+ * convert it to written after zeroing it out.
+ */
+ if (flags & EXT4_GET_BLOCKS_ZERO) {
+ flags &= ~EXT4_GET_BLOCKS_ZERO;
+ flags |= EXT4_GET_BLOCKS_UNWRIT_EXT;
+ alloc_zero = true;
+ }
+
+ /*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, len);
@@ -4531,9 +4562,7 @@ retry:
* allow a full retry cycle for any remaining allocations
*/
retries = 0;
- map.m_lblk += ret;
- map.m_len = len = len - ret;
- epos = (loff_t)map.m_lblk << inode->i_blkbits;
+ epos = EXT4_LBLK_TO_B(inode, map.m_lblk + ret);
inode_set_ctime_current(inode);
if (new_size) {
if (epos > new_size)
@@ -4553,6 +4582,21 @@ retry:
ret2 = ret3 ? ret3 : ret2;
if (unlikely(ret2))
break;
+
+ if (alloc_zero &&
+ (map.m_flags & (EXT4_MAP_MAPPED | EXT4_MAP_UNWRITTEN))) {
+ ret2 = ext4_issue_zeroout(inode, map.m_lblk, map.m_pblk,
+ map.m_len);
+ if (likely(!ret2))
+ ret2 = ext4_convert_unwritten_extents(NULL,
+ inode, (loff_t)map.m_lblk << blkbits,
+ (loff_t)map.m_len << blkbits);
+ if (ret2)
+ break;
+ }
+
+ map.m_lblk += ret;
+ map.m_len = len = len - ret;
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
@@ -4568,131 +4612,69 @@ static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
{
struct inode *inode = file_inode(file);
- struct address_space *mapping = file->f_mapping;
handle_t *handle = NULL;
- unsigned int max_blocks;
loff_t new_size = 0;
- int ret = 0;
- int flags;
- int credits;
- int partial_begin, partial_end;
- loff_t start, end;
- ext4_lblk_t lblk;
+ loff_t end = offset + len;
+ ext4_lblk_t start_lblk, end_lblk;
+ unsigned int blocksize = i_blocksize(inode);
unsigned int blkbits = inode->i_blkbits;
+ int ret, flags, credits;
trace_ext4_zero_range(inode, offset, len, mode);
+ WARN_ON_ONCE(!inode_is_locked(inode));
- /*
- * Round up offset. This is not fallocate, we need to zero out
- * blocks, so convert interior block aligned part of the range to
- * unwritten and possibly manually zero out unaligned parts of the
- * range. Here, start and partial_begin are inclusive, end and
- * partial_end are exclusive.
- */
- start = round_up(offset, 1 << blkbits);
- end = round_down((offset + len), 1 << blkbits);
-
- if (start < offset || end > offset + len)
- return -EINVAL;
- partial_begin = offset & ((1 << blkbits) - 1);
- partial_end = (offset + len) & ((1 << blkbits) - 1);
-
- lblk = start >> blkbits;
- max_blocks = (end >> blkbits);
- if (max_blocks < lblk)
- max_blocks = 0;
- else
- max_blocks -= lblk;
-
- inode_lock(inode);
-
- /*
- * Indirect files do not support unwritten extents
- */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
- ret = -EOPNOTSUPP;
- goto out_mutex;
- }
+ /* Indirect files do not support unwritten extents */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+ return -EOPNOTSUPP;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- (offset + len > inode->i_size ||
- offset + len > EXT4_I(inode)->i_disksize)) {
- new_size = offset + len;
+ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
+ new_size = end;
ret = inode_newsize_ok(inode, new_size);
if (ret)
- goto out_mutex;
+ return ret;
}
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
-
- /* Wait all existing dio workers, newcomers will block on i_rwsem */
- inode_dio_wait(inode);
-
- ret = file_modified(file);
- if (ret)
- goto out_mutex;
-
/* Preallocate the range including the unaligned edges */
- if (partial_begin || partial_end) {
- ret = ext4_alloc_file_blocks(file,
- round_down(offset, 1 << blkbits) >> blkbits,
- (round_up((offset + len), 1 << blkbits) -
- round_down(offset, 1 << blkbits)) >> blkbits,
- new_size, flags);
- if (ret)
- goto out_mutex;
+ if (!IS_ALIGNED(offset | end, blocksize)) {
+ ext4_lblk_t alloc_lblk = offset >> blkbits;
+ ext4_lblk_t len_lblk = EXT4_MAX_BLOCKS(len, offset, blkbits);
+ ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
+ new_size, flags);
+ if (ret)
+ return ret;
}
- /* Zero range excluding the unaligned edges */
- if (max_blocks > 0) {
- flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
- EXT4_EX_NOCACHE);
-
- /*
- * Prevent page faults from reinstantiating pages we have
- * released from page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
- if (ret) {
- filemap_invalidate_unlock(mapping);
- goto out_mutex;
- }
-
- ret = ext4_update_disksize_before_punch(inode, offset, len);
- if (ret) {
- filemap_invalidate_unlock(mapping);
- goto out_mutex;
- }
+ ret = ext4_update_disksize_before_punch(inode, offset, len);
+ if (ret)
+ return ret;
- /*
- * For journalled data we need to write (and checkpoint) pages
- * before discarding page cache to avoid inconsitent data on
- * disk in case of crash before zeroing trans is committed.
- */
- if (ext4_should_journal_data(inode)) {
- ret = filemap_write_and_wait_range(mapping, start,
- end - 1);
- if (ret) {
- filemap_invalidate_unlock(mapping);
- goto out_mutex;
- }
- }
+ /* Now release the pages and zero block aligned part of pages */
+ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
+ if (ret)
+ return ret;
- /* Now release the pages and zero block aligned part of pages */
- truncate_pagecache_range(inode, start, end - 1);
- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ /* Zero range excluding the unaligned edges */
+ start_lblk = EXT4_B_TO_LBLK(inode, offset);
+ end_lblk = end >> blkbits;
+ if (end_lblk > start_lblk) {
+ ext4_lblk_t zero_blks = end_lblk - start_lblk;
- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
- flags);
- filemap_invalidate_unlock(mapping);
+ if (mode & FALLOC_FL_WRITE_ZEROES)
+ flags = EXT4_GET_BLOCKS_CREATE_ZERO | EXT4_EX_NOCACHE;
+ else
+ flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+ EXT4_EX_NOCACHE);
+ ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
+ new_size, flags);
if (ret)
- goto out_mutex;
+ return ret;
}
- if (!partial_begin && !partial_end)
- goto out_mutex;
+ /* Finish zeroing out if it doesn't contain partial block */
+ if (IS_ALIGNED(offset | end, blocksize))
+ return ret;
/*
* In worst case we have to writeout two nonadjacent unwritten
@@ -4705,27 +4687,69 @@ static long ext4_zero_range(struct file *file, loff_t offset,
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(inode->i_sb, ret);
- goto out_mutex;
+ return ret;
}
- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ /* Zero out partial block at the edges of the range */
+ ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+ if (ret)
+ goto out_handle;
+
if (new_size)
ext4_update_inode_size(inode, new_size);
ret = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret))
goto out_handle;
- /* Zero out partial block at the edges of the range */
- ret = ext4_zero_partial_blocks(handle, inode, offset, len);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
out_handle:
ext4_journal_stop(handle);
-out_mutex:
- inode_unlock(inode);
+ return ret;
+}
+
+static long ext4_do_fallocate(struct file *file, loff_t offset,
+ loff_t len, int mode)
+{
+ struct inode *inode = file_inode(file);
+ loff_t end = offset + len;
+ loff_t new_size = 0;
+ ext4_lblk_t start_lblk, len_lblk;
+ int ret;
+
+ trace_ext4_fallocate_enter(inode, offset, len, mode);
+ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ start_lblk = offset >> inode->i_blkbits;
+ len_lblk = EXT4_MAX_BLOCKS(len, offset, inode->i_blkbits);
+
+ /* We only support preallocation for extent-based files only. */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
+ new_size = end;
+ ret = inode_newsize_ok(inode, new_size);
+ if (ret)
+ goto out;
+ }
+
+ ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
+ if (ret)
+ goto out;
+
+ if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
+ ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
+ EXT4_I(inode)->i_sync_tid);
+ }
+out:
+ trace_ext4_fallocate_exit(inode, offset, len_lblk, ret);
return ret;
}
@@ -4739,12 +4763,8 @@ out_mutex:
long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
- loff_t new_size = 0;
- unsigned int max_blocks;
- int ret = 0;
- int flags;
- ext4_lblk_t lblk;
- unsigned int blkbits = inode->i_blkbits;
+ struct address_space *mapping = file->f_mapping;
+ int ret;
/*
* Encrypted inodes can't handle collapse range or insert
@@ -4755,83 +4775,158 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (IS_ENCRYPTED(inode) &&
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
+ /*
+ * Don't allow writing zeroes if the underlying device does not
+ * enable the unmap write zeroes operation.
+ */
+ if ((mode & FALLOC_FL_WRITE_ZEROES) &&
+ !bdev_write_zeroes_unmap_sectors(inode->i_sb->s_bdev))
+ return -EOPNOTSUPP;
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
- FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
- FALLOC_FL_INSERT_RANGE))
+ FALLOC_FL_ZERO_RANGE | FALLOC_FL_COLLAPSE_RANGE |
+ FALLOC_FL_INSERT_RANGE | FALLOC_FL_WRITE_ZEROES))
return -EOPNOTSUPP;
inode_lock(inode);
ret = ext4_convert_inline_data(inode);
- inode_unlock(inode);
if (ret)
- goto exit;
+ goto out_inode_lock;
- if (mode & FALLOC_FL_PUNCH_HOLE) {
- ret = ext4_punch_hole(file, offset, len);
- goto exit;
- }
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+ inode_dio_wait(inode);
- if (mode & FALLOC_FL_COLLAPSE_RANGE) {
- ret = ext4_collapse_range(file, offset, len);
- goto exit;
- }
+ ret = file_modified(file);
+ if (ret)
+ goto out_inode_lock;
- if (mode & FALLOC_FL_INSERT_RANGE) {
- ret = ext4_insert_range(file, offset, len);
- goto exit;
+ if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ALLOCATE_RANGE) {
+ ret = ext4_do_fallocate(file, offset, len, mode);
+ goto out_inode_lock;
}
- if (mode & FALLOC_FL_ZERO_RANGE) {
+ /*
+ * Follow-up operations will drop page cache, hold invalidate lock
+ * to prevent page faults from reinstantiating pages we have
+ * released from page cache.
+ */
+ filemap_invalidate_lock(mapping);
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_invalidate_lock;
+
+ switch (mode & FALLOC_FL_MODE_MASK) {
+ case FALLOC_FL_PUNCH_HOLE:
+ ret = ext4_punch_hole(file, offset, len);
+ break;
+ case FALLOC_FL_COLLAPSE_RANGE:
+ ret = ext4_collapse_range(file, offset, len);
+ break;
+ case FALLOC_FL_INSERT_RANGE:
+ ret = ext4_insert_range(file, offset, len);
+ break;
+ case FALLOC_FL_ZERO_RANGE:
+ case FALLOC_FL_WRITE_ZEROES:
ret = ext4_zero_range(file, offset, len, mode);
- goto exit;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
}
- trace_ext4_fallocate_enter(inode, offset, len, mode);
- lblk = offset >> blkbits;
+out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+out_inode_lock:
+ inode_unlock(inode);
+ return ret;
+}
+
+/*
+ * This function converts a range of blocks to written extents. The caller of
+ * this function will pass the start offset and the size. all unwritten extents
+ * within this range will be converted to written extents.
+ *
+ * This function is called from the direct IO end io call back function for
+ * atomic writes, to convert the unwritten extents after IO is completed.
+ *
+ * Note that the requirement for atomic writes is that all conversion should
+ * happen atomically in a single fs journal transaction. We mainly only allocate
+ * unwritten extents either on a hole on a pre-exiting unwritten extent range in
+ * ext4_map_blocks_atomic_write(). The only case where we can have multiple
+ * unwritten extents in a range [offset, offset+len) is when there is a split
+ * unwritten extent between two leaf nodes which was cached in extent status
+ * cache during ext4_iomap_alloc() time. That will allow
+ * ext4_map_blocks_atomic_write() to return the unwritten extent range w/o going
+ * into the slow path. That means we might need a loop for conversion of this
+ * unwritten extent split across leaf block within a single journal transaction.
+ * Split extents across leaf nodes is a rare case, but let's still handle that
+ * to meet the requirements of multi-fsblock atomic writes.
+ *
+ * Returns 0 on success.
+ */
+int ext4_convert_unwritten_extents_atomic(handle_t *handle, struct inode *inode,
+ loff_t offset, ssize_t len)
+{
+ unsigned int max_blocks;
+ int ret = 0, ret2 = 0, ret3 = 0;
+ struct ext4_map_blocks map;
+ unsigned int blkbits = inode->i_blkbits;
+ unsigned int credits = 0;
+ int flags = EXT4_GET_BLOCKS_IO_CONVERT_EXT | EXT4_EX_NOCACHE;
+
+ map.m_lblk = offset >> blkbits;
max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
- inode_lock(inode);
+ if (!handle) {
+ /*
+ * TODO: An optimization can be added later by having an extent
+ * status flag e.g. EXTENT_STATUS_SPLIT_LEAF. If we query that
+ * it can tell if the extent in the cache is a split extent.
+ * But for now let's assume pextents as 2 always.
+ */
+ credits = ext4_meta_trans_blocks(inode, max_blocks, 2);
+ }
- /*
- * We only support preallocation for extent-based files only
- */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
- ret = -EOPNOTSUPP;
- goto out;
+ if (credits) {
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ return ret;
+ }
}
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- (offset + len > inode->i_size ||
- offset + len > EXT4_I(inode)->i_disksize)) {
- new_size = offset + len;
- ret = inode_newsize_ok(inode, new_size);
- if (ret)
- goto out;
+ while (ret >= 0 && ret < max_blocks) {
+ map.m_lblk += ret;
+ map.m_len = (max_blocks -= ret);
+ ret = ext4_map_blocks(handle, inode, &map, flags);
+ if (ret != max_blocks)
+ ext4_msg(inode->i_sb, KERN_INFO,
+ "inode #%lu: block %u: len %u: "
+ "split block mapping found for atomic write, "
+ "ret = %d",
+ inode->i_ino, map.m_lblk,
+ map.m_len, ret);
+ if (ret <= 0)
+ break;
}
- /* Wait all existing dio workers, newcomers will block on i_rwsem */
- inode_dio_wait(inode);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
- ret = file_modified(file);
- if (ret)
- goto out;
+ if (credits) {
+ ret3 = ext4_journal_stop(handle);
+ if (unlikely(ret3))
+ ret2 = ret3;
+ }
- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
- if (ret)
- goto out;
+ if (ret <= 0 || ret2)
+ ext4_warning(inode->i_sb,
+ "inode #%lu: block %u: len %u: "
+ "returned %d or %d",
+ inode->i_ino, map.m_lblk,
+ map.m_len, ret, ret2);
- if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
- ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
- EXT4_I(inode)->i_sync_tid);
- }
-out:
- inode_unlock(inode);
- trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
-exit:
- return ret;
+ return ret > 0 ? ret2 : ret;
}
/*
@@ -4873,8 +4968,14 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
break;
}
}
+ /*
+ * Do not cache any unrelated extents, as it does not hold the
+ * i_rwsem or invalidate_lock, which could corrupt the extent
+ * status tree.
+ */
ret = ext4_map_blocks(handle, inode, &map,
- EXT4_GET_BLOCKS_IO_CONVERT_EXT);
+ EXT4_GET_BLOCKS_IO_CONVERT_EXT |
+ EXT4_EX_NOCACHE);
if (ret <= 0)
ext4_warning(inode->i_sb,
"inode #%lu: block %u: len %u: "
@@ -4985,12 +5086,7 @@ static const struct iomap_ops ext4_iomap_xattr_ops = {
static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
{
- u64 maxbytes;
-
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- maxbytes = inode->i_sb->s_maxbytes;
- else
- maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
+ u64 maxbytes = ext4_get_maxbytes(inode);
if (*len == 0)
return -EINVAL;
@@ -5010,10 +5106,11 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
{
int error = 0;
+ inode_lock_shared(inode);
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
error = ext4_ext_precache(inode);
if (error)
- return error;
+ goto unlock;
fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
}
@@ -5024,15 +5121,19 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
*/
error = ext4_fiemap_check_ranges(inode, start, &len);
if (error)
- return error;
+ goto unlock;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
- return iomap_fiemap(inode, fieinfo, start, len,
- &ext4_iomap_xattr_ops);
+ error = iomap_fiemap(inode, fieinfo, start, len,
+ &ext4_iomap_xattr_ops);
+ } else {
+ error = iomap_fiemap(inode, fieinfo, start, len,
+ &ext4_iomap_report_ops);
}
-
- return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
+unlock:
+ inode_unlock_shared(inode);
+ return error;
}
int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -5053,7 +5154,9 @@ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+ inode_lock_shared(inode);
error = ext4_ext_precache(inode);
+ inode_unlock_shared(inode);
if (error)
return error;
fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
@@ -5112,7 +5215,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
credits = depth + 2;
}
- restart_credits = ext4_writepage_trans_blocks(inode);
+ restart_credits = ext4_chunk_trans_extent(inode, 0);
err = ext4_datasem_ensure_credits(handle, inode, credits,
restart_credits, 0);
if (err) {
@@ -5332,109 +5435,74 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
- ext4_lblk_t punch_start, punch_stop;
+ loff_t end = offset + len;
+ ext4_lblk_t start_lblk, end_lblk;
handle_t *handle;
unsigned int credits;
- loff_t new_size, ioffset;
+ loff_t start, new_size;
int ret;
- /*
- * We need to test this early because xfstests assumes that a
- * collapse range of (0, 1) will return EOPNOTSUPP if the file
- * system does not support collapse range.
- */
+ trace_ext4_collapse_range(inode, offset, len);
+ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return -EOPNOTSUPP;
-
/* Collapse range works only on fs cluster size aligned regions. */
if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
return -EINVAL;
-
- trace_ext4_collapse_range(inode, offset, len);
-
- punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
- punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
-
- inode_lock(inode);
/*
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
*/
- if (offset + len >= inode->i_size) {
- ret = -EINVAL;
- goto out_mutex;
- }
-
- /* Currently just for extent based files */
- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- ret = -EOPNOTSUPP;
- goto out_mutex;
- }
-
- /* Wait for existing dio to complete */
- inode_dio_wait(inode);
-
- ret = file_modified(file);
- if (ret)
- goto out_mutex;
-
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
- if (ret)
- goto out_mmap;
+ if (end >= inode->i_size)
+ return -EINVAL;
/*
+ * Write tail of the last page before removed range and data that
+ * will be shifted since they will get removed from the page cache
+ * below. We are also protected from pages becoming dirty by
+ * i_rwsem and invalidate_lock.
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
*/
- ioffset = round_down(offset, PAGE_SIZE);
- /*
- * Write tail of the last page before removed range since it will get
- * removed from the page cache below.
- */
- ret = filemap_write_and_wait_range(mapping, ioffset, offset);
+ start = round_down(offset, PAGE_SIZE);
+ ret = filemap_write_and_wait_range(mapping, start, offset);
+ if (!ret)
+ ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
if (ret)
- goto out_mmap;
- /*
- * Write data that will be shifted to preserve them when discarding
- * page cache below. We are also protected from pages becoming dirty
- * by i_rwsem and invalidate_lock.
- */
- ret = filemap_write_and_wait_range(mapping, offset + len,
- LLONG_MAX);
- if (ret)
- goto out_mmap;
- truncate_pagecache(inode, ioffset);
+ return ret;
+
+ truncate_pagecache(inode, start);
- credits = ext4_writepage_trans_blocks(inode);
+ credits = ext4_chunk_trans_extent(inode, 0);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out_mmap;
- }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+ start_lblk = offset >> inode->i_blkbits;
+ end_lblk = (offset + len) >> inode->i_blkbits;
+
+ ext4_check_map_extents_env(inode);
+
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
- ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
+ ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
- ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
+ ret = ext4_ext_remove_space(inode, start_lblk, end_lblk - 1);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
+ goto out_handle;
}
ext4_discard_preallocations(inode);
- ret = ext4_ext_shift_extents(inode, handle, punch_stop,
- punch_stop - punch_start, SHIFT_LEFT);
+ ret = ext4_ext_shift_extents(inode, handle, end_lblk,
+ end_lblk - start_lblk, SHIFT_LEFT);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
+ goto out_handle;
}
new_size = inode->i_size - len;
@@ -5442,18 +5510,16 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
EXT4_I(inode)->i_disksize = new_size;
up_write(&EXT4_I(inode)->i_data_sem);
- if (IS_SYNC(inode))
- ext4_handle_sync(handle);
- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_mark_inode_dirty(handle, inode);
+ if (ret)
+ goto out_handle;
+
ext4_update_inode_fsync_trans(handle, inode, 1);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
-out_stop:
+out_handle:
ext4_journal_stop(handle);
-out_mmap:
- filemap_invalidate_unlock(mapping);
-out_mutex:
- inode_unlock(inode);
return ret;
}
@@ -5473,100 +5539,65 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
handle_t *handle;
struct ext4_ext_path *path;
struct ext4_extent *extent;
- ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
+ ext4_lblk_t start_lblk, len_lblk, ee_start_lblk = 0;
unsigned int credits, ee_len;
- int ret = 0, depth, split_flag = 0;
- loff_t ioffset;
+ int ret, depth, split_flag = 0;
+ loff_t start;
- /*
- * We need to test this early because xfstests assumes that an
- * insert range of (0, 1) will return EOPNOTSUPP if the file
- * system does not support insert range.
- */
+ trace_ext4_insert_range(inode, offset, len);
+ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return -EOPNOTSUPP;
-
/* Insert range works only on fs cluster size aligned regions. */
if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
return -EINVAL;
-
- trace_ext4_insert_range(inode, offset, len);
-
- offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
- len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
-
- inode_lock(inode);
- /* Currently just for extent based files */
- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- ret = -EOPNOTSUPP;
- goto out_mutex;
- }
-
- /* Check whether the maximum file size would be exceeded */
- if (len > inode->i_sb->s_maxbytes - inode->i_size) {
- ret = -EFBIG;
- goto out_mutex;
- }
-
/* Offset must be less than i_size */
- if (offset >= inode->i_size) {
- ret = -EINVAL;
- goto out_mutex;
- }
-
- /* Wait for existing dio to complete */
- inode_dio_wait(inode);
-
- ret = file_modified(file);
- if (ret)
- goto out_mutex;
+ if (offset >= inode->i_size)
+ return -EINVAL;
+ /* Check whether the maximum file size would be exceeded */
+ if (len > inode->i_sb->s_maxbytes - inode->i_size)
+ return -EFBIG;
/*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
+ * Write out all dirty pages. Need to round down to align start offset
+ * to page size boundary for page size > block size.
*/
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
+ start = round_down(offset, PAGE_SIZE);
+ ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
if (ret)
- goto out_mmap;
+ return ret;
- /*
- * Need to round down to align start offset to page size boundary
- * for page size > block size.
- */
- ioffset = round_down(offset, PAGE_SIZE);
- /* Write out all dirty pages */
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
- LLONG_MAX);
- if (ret)
- goto out_mmap;
- truncate_pagecache(inode, ioffset);
+ truncate_pagecache(inode, start);
- credits = ext4_writepage_trans_blocks(inode);
+ credits = ext4_chunk_trans_extent(inode, 0);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out_mmap;
- }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
EXT4_I(inode)->i_disksize += len;
- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_mark_inode_dirty(handle, inode);
if (ret)
- goto out_stop;
+ goto out_handle;
+
+ start_lblk = offset >> inode->i_blkbits;
+ len_lblk = len >> inode->i_blkbits;
+
+ ext4_check_map_extents_env(inode);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
- path = ext4_find_extent(inode, offset_lblk, NULL, 0);
+ path = ext4_find_extent(inode, start_lblk, NULL, 0);
if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
ret = PTR_ERR(path);
- goto out_stop;
+ goto out_handle;
}
depth = ext_depth(inode);
@@ -5576,50 +5607,47 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
ee_len = ext4_ext_get_actual_len(extent);
/*
- * If offset_lblk is not the starting block of extent, split
- * the extent @offset_lblk
+ * If start_lblk is not the starting block of extent, split
+ * the extent @start_lblk
*/
- if ((offset_lblk > ee_start_lblk) &&
- (offset_lblk < (ee_start_lblk + ee_len))) {
+ if ((start_lblk > ee_start_lblk) &&
+ (start_lblk < (ee_start_lblk + ee_len))) {
if (ext4_ext_is_unwritten(extent))
split_flag = EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
path = ext4_split_extent_at(handle, inode, path,
- offset_lblk, split_flag,
+ start_lblk, split_flag,
EXT4_EX_NOCACHE |
- EXT4_GET_BLOCKS_PRE_IO |
+ EXT4_GET_BLOCKS_SPLIT_NOMERGE |
EXT4_GET_BLOCKS_METADATA_NOFAIL);
}
if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
ret = PTR_ERR(path);
- goto out_stop;
+ goto out_handle;
}
}
ext4_free_ext_path(path);
- ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
+ ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
/*
- * if offset_lblk lies in a hole which is at start of file, use
+ * if start_lblk lies in a hole which is at start of file, use
* ee_start_lblk to shift extents
*/
ret = ext4_ext_shift_extents(inode, handle,
- max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT);
-
+ max(ee_start_lblk, start_lblk), len_lblk, SHIFT_RIGHT);
up_write(&EXT4_I(inode)->i_data_sem);
+ if (ret)
+ goto out_handle;
+
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
-out_stop:
+out_handle:
ext4_journal_stop(handle);
-out_mmap:
- filemap_invalidate_unlock(mapping);
-out_mutex:
- inode_unlock(inode);
return ret;
}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index ae29832aab1e..e04fbf10fe4f 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -120,9 +120,40 @@
* memory. Hence, we will reclaim written/unwritten/hole extents from
* the tree under a heavy memory pressure.
*
+ * ==========================================================================
+ * 3. Assurance of Ext4 extent status tree consistency
+ *
+ * When mapping blocks, Ext4 queries the extent status tree first and should
+ * always trusts that the extent status tree is consistent and up to date.
+ * Therefore, it is important to adheres to the following rules when createing,
+ * modifying and removing extents.
+ *
+ * 1. Besides fastcommit replay, when Ext4 creates or queries block mappings,
+ * the extent information should always be processed through the extent
+ * status tree instead of being organized manually through the on-disk
+ * extent tree.
+ *
+ * 2. When updating the extent tree, Ext4 should acquire the i_data_sem
+ * exclusively and update the extent status tree atomically. If the extents
+ * to be modified are large enough to exceed the range that a single
+ * i_data_sem can process (as ext4_datasem_ensure_credits() may drop
+ * i_data_sem to restart a transaction), it must (e.g. as ext4_punch_hole()
+ * does):
+ *
+ * a) Hold the i_rwsem and invalidate_lock exclusively. This ensures
+ * exclusion against page faults, as well as reads and writes that may
+ * concurrently modify the extent status tree.
+ * b) Evict all page cache in the affected range and recommend rebuilding
+ * or dropping the extent status tree after modifying the on-disk
+ * extent tree. This ensures exclusion against concurrent writebacks
+ * that do not hold those locks but only holds a folio lock.
+ *
+ * 3. Based on the rules above, when querying block mappings, Ext4 should at
+ * least hold the i_rwsem or invalidate_lock or folio lock(s) for the
+ * specified querying range.
*
* ==========================================================================
- * 3. Performance analysis
+ * 4. Performance analysis
*
* -- overhead
* 1. There is a cache extent for write access, so if writes are
@@ -134,7 +165,7 @@
*
*
* ==========================================================================
- * 4. TODO list
+ * 5. TODO list
*
* -- Refactor delayed space reservation
*
@@ -204,6 +235,13 @@ static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
return es->es_lblk + es->es_len - 1;
}
+static inline void ext4_es_inc_seq(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ WRITE_ONCE(ei->i_es_seq, ei->i_es_seq + 1);
+}
+
/*
* search through the tree for an delayed extent with a given offset. If
* it can't be found, try to find next extent.
@@ -875,7 +913,6 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
newes.es_lblk = lblk;
newes.es_len = len;
ext4_es_store_pblock_status(&newes, pblk, status);
- trace_ext4_es_insert_extent(inode, &newes);
ext4_es_insert_extent_check(inode, &newes);
@@ -924,6 +961,11 @@ retry:
}
pending = err3;
}
+ /*
+ * TODO: For cache on-disk extents, there is no need to increment
+ * the sequence counter, this requires future optimization.
+ */
+ ext4_es_inc_seq(inode);
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
/*
@@ -950,6 +992,7 @@ error:
if (err1 || err2 || err3 < 0)
goto retry;
+ trace_ext4_es_insert_extent(inode, &newes);
ext4_es_print_tree(inode);
return;
}
@@ -996,8 +1039,8 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
* Return: 1 on found, 0 on not
*/
int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t *next_lblk,
- struct extent_status *es)
+ ext4_lblk_t *next_lblk, struct extent_status *es,
+ u64 *pseq)
{
struct ext4_es_tree *tree;
struct ext4_es_stats *stats;
@@ -1056,6 +1099,8 @@ out:
} else
*next_lblk = 0;
}
+ if (pseq)
+ *pseq = EXT4_I(inode)->i_es_seq;
} else {
percpu_counter_inc(&stats->es_stats_cache_misses);
}
@@ -1519,7 +1564,6 @@ void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return;
- trace_ext4_es_remove_extent(inode, lblk, len);
es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
lblk, len, inode->i_ino);
@@ -1539,19 +1583,23 @@ retry:
*/
write_lock(&EXT4_I(inode)->i_es_lock);
err = __es_remove_extent(inode, lblk, end, &reserved, es);
+ if (err)
+ goto error;
/* Free preallocated extent if it didn't get used. */
if (es) {
if (!es->es_len)
__es_free_extent(es);
es = NULL;
}
+ ext4_es_inc_seq(inode);
+error:
write_unlock(&EXT4_I(inode)->i_es_lock);
if (err)
goto retry;
+ trace_ext4_es_remove_extent(inode, lblk, len);
ext4_es_print_tree(inode);
ext4_da_release_space(inode, reserved);
- return;
}
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
@@ -2110,8 +2158,6 @@ void ext4_es_insert_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
newes.es_lblk = lblk;
newes.es_len = len;
ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
- trace_ext4_es_insert_delayed_extent(inode, &newes, lclu_allocated,
- end_allocated);
ext4_es_insert_extent_check(inode, &newes);
@@ -2166,11 +2212,14 @@ retry:
pr2 = NULL;
}
}
+ ext4_es_inc_seq(inode);
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
if (err1 || err2 || err3 < 0)
goto retry;
+ trace_ext4_es_insert_delayed_extent(inode, &newes, lclu_allocated,
+ end_allocated);
ext4_es_print_tree(inode);
ext4_print_pending_tree(inode);
return;
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 8f9c008d11e8..f3396cf32b44 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -148,7 +148,7 @@ extern void ext4_es_find_extent_range(struct inode *inode,
struct extent_status *es);
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t *next_lblk,
- struct extent_status *es);
+ struct extent_status *es, u64 *pseq);
extern bool ext4_es_scan_range(struct inode *inode,
int (*matching_fn)(struct extent_status *es),
ext4_lblk_t lblk, ext4_lblk_t end);
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 26c4fc37edcf..fa66b08de999 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -12,6 +12,7 @@
#include "ext4_extents.h"
#include "mballoc.h"
+#include <linux/lockdep.h>
/*
* Ext4 Fast Commits
* -----------------
@@ -49,19 +50,27 @@
* that need to be committed during a fast commit in another in memory queue of
* inodes. During the commit operation, we commit in the following order:
*
- * [1] Lock inodes for any further data updates by setting COMMITTING state
- * [2] Submit data buffers of all the inodes
- * [3] Wait for [2] to complete
- * [4] Commit all the directory entry updates in the fast commit space
- * [5] Commit all the changed inode structures
- * [6] Write tail tag (this tag ensures the atomicity, please read the following
+ * [1] Prepare all the inodes to write out their data by setting
+ * "EXT4_STATE_FC_FLUSHING_DATA". This ensures that inode cannot be
+ * deleted while it is being flushed.
+ * [2] Flush data buffers to disk and clear "EXT4_STATE_FC_FLUSHING_DATA"
+ * state.
+ * [3] Lock the journal by calling jbd2_journal_lock_updates. This ensures that
+ * all the exsiting handles finish and no new handles can start.
+ * [4] Mark all the fast commit eligible inodes as undergoing fast commit
+ * by setting "EXT4_STATE_FC_COMMITTING" state.
+ * [5] Unlock the journal by calling jbd2_journal_unlock_updates. This allows
+ * starting of new handles. If new handles try to start an update on
+ * any of the inodes that are being committed, ext4_fc_track_inode()
+ * will block until those inodes have finished the fast commit.
+ * [6] Commit all the directory entry updates in the fast commit space.
+ * [7] Commit all the changed inodes in the fast commit space and clear
+ * "EXT4_STATE_FC_COMMITTING" for these inodes.
+ * [8] Write tail tag (this tag ensures the atomicity, please read the following
* section for more details).
- * [7] Wait for [4], [5] and [6] to complete.
*
- * All the inode updates must call ext4_fc_start_update() before starting an
- * update. If such an ongoing update is present, fast commit waits for it to
- * complete. The completion of such an update is marked by
- * ext4_fc_stop_update().
+ * All the inode updates must be enclosed within jbd2_jounrnal_start()
+ * and jbd2_journal_stop() similar to JBD2 journaling.
*
* Fast Commit Ineligibility
* -------------------------
@@ -142,6 +151,13 @@
* similarly. Thus, by converting a non-idempotent procedure into a series of
* idempotent outcomes, fast commits ensured idempotence during the replay.
*
+ * Locking
+ * -------
+ * sbi->s_fc_lock protects the fast commit inodes queue and the fast commit
+ * dentry queue. ei->i_fc_lock protects the fast commit related info in a given
+ * inode. Most of the code avoids acquiring both the locks, but if one must do
+ * that then sbi->s_fc_lock must be acquired before ei->i_fc_lock.
+ *
* TODOs
* -----
*
@@ -156,13 +172,12 @@
* fast commit recovery even if that area is invalidated by later full
* commits.
*
- * 1) Fast commit's commit path locks the entire file system during fast
- * commit. This has significant performance penalty. Instead of that, we
- * should use ext4_fc_start/stop_update functions to start inode level
- * updates from ext4_journal_start/stop. Once we do that we can drop file
- * system locking during commit path.
+ * 1) Handle more ineligible cases.
*
- * 2) Handle more ineligible cases.
+ * 2) Change ext4_fc_commit() to lookup logical to physical mapping using extent
+ * status tree. This would get rid of the need to call ext4_fc_track_inode()
+ * before acquiring i_data_sem. To do that we would need to ensure that
+ * modified extents from the extent status tree are not evicted from memory.
*/
#include <trace/events/ext4.h>
@@ -201,32 +216,6 @@ void ext4_fc_init_inode(struct inode *inode)
INIT_LIST_HEAD(&ei->i_fc_list);
INIT_LIST_HEAD(&ei->i_fc_dilist);
init_waitqueue_head(&ei->i_fc_wait);
- atomic_set(&ei->i_fc_updates, 0);
-}
-
-/* This function must be called with sbi->s_fc_lock held. */
-static void ext4_fc_wait_committing_inode(struct inode *inode)
-__releases(&EXT4_SB(inode->i_sb)->s_fc_lock)
-{
- wait_queue_head_t *wq;
- struct ext4_inode_info *ei = EXT4_I(inode);
-
-#if (BITS_PER_LONG < 64)
- DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
- EXT4_STATE_FC_COMMITTING);
- wq = bit_waitqueue(&ei->i_state_flags,
- EXT4_STATE_FC_COMMITTING);
-#else
- DEFINE_WAIT_BIT(wait, &ei->i_flags,
- EXT4_STATE_FC_COMMITTING);
- wq = bit_waitqueue(&ei->i_flags,
- EXT4_STATE_FC_COMMITTING);
-#endif
- lockdep_assert_held(&EXT4_SB(inode->i_sb)->s_fc_lock);
- prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
- spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
- schedule();
- finish_wait(wq, &wait.wq_entry);
}
static bool ext4_fc_disabled(struct super_block *sb)
@@ -236,48 +225,6 @@ static bool ext4_fc_disabled(struct super_block *sb)
}
/*
- * Inform Ext4's fast about start of an inode update
- *
- * This function is called by the high level call VFS callbacks before
- * performing any inode update. This function blocks if there's an ongoing
- * fast commit on the inode in question.
- */
-void ext4_fc_start_update(struct inode *inode)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
-
- if (ext4_fc_disabled(inode->i_sb))
- return;
-
-restart:
- spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock);
- if (list_empty(&ei->i_fc_list))
- goto out;
-
- if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
- ext4_fc_wait_committing_inode(inode);
- goto restart;
- }
-out:
- atomic_inc(&ei->i_fc_updates);
- spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
-}
-
-/*
- * Stop inode update and wake up waiting fast commits if any.
- */
-void ext4_fc_stop_update(struct inode *inode)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
-
- if (ext4_fc_disabled(inode->i_sb))
- return;
-
- if (atomic_dec_and_test(&ei->i_fc_updates))
- wake_up_all(&ei->i_fc_wait);
-}
-
-/*
* Remove inode from fast commit list. If the inode is being committed
* we wait until inode commit is done.
*/
@@ -286,31 +233,62 @@ void ext4_fc_del(struct inode *inode)
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_fc_dentry_update *fc_dentry;
+ wait_queue_head_t *wq;
if (ext4_fc_disabled(inode->i_sb))
return;
-restart:
- spin_lock(&sbi->s_fc_lock);
+ mutex_lock(&sbi->s_fc_lock);
if (list_empty(&ei->i_fc_list) && list_empty(&ei->i_fc_dilist)) {
- spin_unlock(&sbi->s_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
return;
}
- if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
- ext4_fc_wait_committing_inode(inode);
- goto restart;
+ /*
+ * Since ext4_fc_del is called from ext4_evict_inode while having a
+ * handle open, there is no need for us to wait here even if a fast
+ * commit is going on. That is because, if this inode is being
+ * committed, ext4_mark_inode_dirty would have waited for inode commit
+ * operation to finish before we come here. So, by the time we come
+ * here, inode's EXT4_STATE_FC_COMMITTING would have been cleared. So,
+ * we shouldn't see EXT4_STATE_FC_COMMITTING to be set on this inode
+ * here.
+ *
+ * We may come here without any handles open in the "no_delete" case of
+ * ext4_evict_inode as well. However, if that happens, we first mark the
+ * file system as fast commit ineligible anyway. So, even in that case,
+ * it is okay to remove the inode from the fc list.
+ */
+ WARN_ON(ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)
+ && !ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE));
+ while (ext4_test_inode_state(inode, EXT4_STATE_FC_FLUSHING_DATA)) {
+#if (BITS_PER_LONG < 64)
+ DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
+ EXT4_STATE_FC_FLUSHING_DATA);
+ wq = bit_waitqueue(&ei->i_state_flags,
+ EXT4_STATE_FC_FLUSHING_DATA);
+#else
+ DEFINE_WAIT_BIT(wait, &ei->i_flags,
+ EXT4_STATE_FC_FLUSHING_DATA);
+ wq = bit_waitqueue(&ei->i_flags,
+ EXT4_STATE_FC_FLUSHING_DATA);
+#endif
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ if (ext4_test_inode_state(inode, EXT4_STATE_FC_FLUSHING_DATA)) {
+ mutex_unlock(&sbi->s_fc_lock);
+ schedule();
+ mutex_lock(&sbi->s_fc_lock);
+ }
+ finish_wait(wq, &wait.wq_entry);
}
-
- if (!list_empty(&ei->i_fc_list))
- list_del_init(&ei->i_fc_list);
+ list_del_init(&ei->i_fc_list);
/*
* Since this inode is getting removed, let's also remove all FC
* dentry create references, since it is not needed to log it anyways.
*/
if (list_empty(&ei->i_fc_dilist)) {
- spin_unlock(&sbi->s_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
return;
}
@@ -320,14 +298,10 @@ restart:
list_del_init(&fc_dentry->fcd_dilist);
WARN_ON(!list_empty(&ei->i_fc_dilist));
- spin_unlock(&sbi->s_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
- if (fc_dentry->fcd_name.name &&
- fc_dentry->fcd_name.len > DNAME_INLINE_LEN)
- kfree(fc_dentry->fcd_name.name);
+ release_dentry_name_snapshot(&fc_dentry->fcd_name);
kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
-
- return;
}
/*
@@ -355,12 +329,12 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
has_transaction = false;
read_unlock(&sbi->s_journal->j_state_lock);
}
- spin_lock(&sbi->s_fc_lock);
+ mutex_lock(&sbi->s_fc_lock);
is_ineligible = ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
if (has_transaction && (!is_ineligible || tid_gt(tid, sbi->s_fc_ineligible_tid)))
sbi->s_fc_ineligible_tid = tid;
ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
- spin_unlock(&sbi->s_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
}
@@ -387,7 +361,7 @@ static int ext4_fc_track_template(
int ret;
tid = handle->h_transaction->t_tid;
- mutex_lock(&ei->i_fc_lock);
+ spin_lock(&ei->i_fc_lock);
if (tid == ei->i_sync_tid) {
update = true;
} else {
@@ -395,19 +369,18 @@ static int ext4_fc_track_template(
ei->i_sync_tid = tid;
}
ret = __fc_track_fn(handle, inode, args, update);
- mutex_unlock(&ei->i_fc_lock);
-
+ spin_unlock(&ei->i_fc_lock);
if (!enqueue)
return ret;
- spin_lock(&sbi->s_fc_lock);
+ mutex_lock(&sbi->s_fc_lock);
if (list_empty(&EXT4_I(inode)->i_fc_list))
list_add_tail(&EXT4_I(inode)->i_fc_list,
(sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
&sbi->s_fc_q[FC_Q_STAGING] :
&sbi->s_fc_q[FC_Q_MAIN]);
- spin_unlock(&sbi->s_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
return ret;
}
@@ -430,43 +403,29 @@ static int __track_dentry_update(handle_t *handle, struct inode *inode,
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- mutex_unlock(&ei->i_fc_lock);
+ spin_unlock(&ei->i_fc_lock);
if (IS_ENCRYPTED(dir)) {
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME,
handle);
- mutex_lock(&ei->i_fc_lock);
+ spin_lock(&ei->i_fc_lock);
return -EOPNOTSUPP;
}
node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
if (!node) {
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, handle);
- mutex_lock(&ei->i_fc_lock);
+ spin_lock(&ei->i_fc_lock);
return -ENOMEM;
}
node->fcd_op = dentry_update->op;
node->fcd_parent = dir->i_ino;
node->fcd_ino = inode->i_ino;
- if (dentry->d_name.len > DNAME_INLINE_LEN) {
- node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS);
- if (!node->fcd_name.name) {
- kmem_cache_free(ext4_fc_dentry_cachep, node);
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, handle);
- mutex_lock(&ei->i_fc_lock);
- return -ENOMEM;
- }
- memcpy((u8 *)node->fcd_name.name, dentry->d_name.name,
- dentry->d_name.len);
- } else {
- memcpy(node->fcd_iname, dentry->d_name.name,
- dentry->d_name.len);
- node->fcd_name.name = node->fcd_iname;
- }
- node->fcd_name.len = dentry->d_name.len;
+ take_dentry_name_snapshot(&node->fcd_name, dentry);
INIT_LIST_HEAD(&node->fcd_dilist);
- spin_lock(&sbi->s_fc_lock);
+ INIT_LIST_HEAD(&node->fcd_list);
+ mutex_lock(&sbi->s_fc_lock);
if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
list_add_tail(&node->fcd_list,
@@ -487,8 +446,8 @@ static int __track_dentry_update(handle_t *handle, struct inode *inode,
WARN_ON(!list_empty(&ei->i_fc_dilist));
list_add_tail(&node->fcd_dilist, &ei->i_fc_dilist);
}
- spin_unlock(&sbi->s_fc_lock);
- mutex_lock(&ei->i_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
+ spin_lock(&ei->i_fc_lock);
return 0;
}
@@ -588,6 +547,8 @@ static int __track_inode(handle_t *handle, struct inode *inode, void *arg,
void ext4_fc_track_inode(handle_t *handle, struct inode *inode)
{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ wait_queue_head_t *wq;
int ret;
if (S_ISDIR(inode->i_mode))
@@ -605,6 +566,35 @@ void ext4_fc_track_inode(handle_t *handle, struct inode *inode)
if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
return;
+ /*
+ * If we come here, we may sleep while waiting for the inode to
+ * commit. We shouldn't be holding i_data_sem when we go to sleep since
+ * the commit path needs to grab the lock while committing the inode.
+ */
+ lockdep_assert_not_held(&ei->i_data_sem);
+
+ while (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
+#if (BITS_PER_LONG < 64)
+ DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
+ EXT4_STATE_FC_COMMITTING);
+ wq = bit_waitqueue(&ei->i_state_flags,
+ EXT4_STATE_FC_COMMITTING);
+#else
+ DEFINE_WAIT_BIT(wait, &ei->i_flags,
+ EXT4_STATE_FC_COMMITTING);
+ wq = bit_waitqueue(&ei->i_flags,
+ EXT4_STATE_FC_COMMITTING);
+#endif
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
+ schedule();
+ finish_wait(wq, &wait.wq_entry);
+ }
+
+ /*
+ * From this point on, this inode will not be committed either
+ * by fast or full commit as long as the handle is open.
+ */
ret = ext4_fc_track_template(handle, inode, __track_inode, NULL, 1);
trace_ext4_fc_track_inode(handle, inode, ret);
}
@@ -673,7 +663,7 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star
static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
{
- blk_opf_t write_flags = REQ_SYNC;
+ blk_opf_t write_flags = JBD2_JOURNAL_REQ_FLAGS;
struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
/* Add REQ_FUA | REQ_PREFLUSH only its tail */
@@ -744,7 +734,7 @@ static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
tl.fc_len = cpu_to_le16(remaining);
memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
memset(dst + EXT4_FC_TAG_BASE_LEN, 0, remaining);
- *crc = ext4_chksum(sbi, *crc, sbi->s_fc_bh->b_data, bsize);
+ *crc = ext4_chksum(*crc, sbi->s_fc_bh->b_data, bsize);
ext4_fc_submit_bh(sb, false);
@@ -791,7 +781,7 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
tail.fc_tid = cpu_to_le32(sbi->s_journal->j_running_transaction->t_tid);
memcpy(dst, &tail.fc_tid, sizeof(tail.fc_tid));
dst += sizeof(tail.fc_tid);
- crc = ext4_chksum(sbi, crc, sbi->s_fc_bh->b_data,
+ crc = ext4_chksum(crc, sbi->s_fc_bh->b_data,
dst - (u8 *)sbi->s_fc_bh->b_data);
tail.fc_crc = cpu_to_le32(crc);
memcpy(dst, &tail.fc_crc, sizeof(tail.fc_crc));
@@ -832,7 +822,7 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc,
{
struct ext4_fc_dentry_info fcd;
struct ext4_fc_tl tl;
- int dlen = fc_dentry->fcd_name.len;
+ int dlen = fc_dentry->fcd_name.name.len;
u8 *dst = ext4_fc_reserve_space(sb,
EXT4_FC_TAG_BASE_LEN + sizeof(fcd) + dlen, crc);
@@ -847,7 +837,7 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc,
dst += EXT4_FC_TAG_BASE_LEN;
memcpy(dst, &fcd, sizeof(fcd));
dst += sizeof(fcd);
- memcpy(dst, fc_dentry->fcd_name.name, dlen);
+ memcpy(dst, fc_dentry->fcd_name.name.name, dlen);
return true;
}
@@ -910,15 +900,15 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
struct ext4_extent *ex;
int ret;
- mutex_lock(&ei->i_fc_lock);
+ spin_lock(&ei->i_fc_lock);
if (ei->i_fc_lblk_len == 0) {
- mutex_unlock(&ei->i_fc_lock);
+ spin_unlock(&ei->i_fc_lock);
return 0;
}
old_blk_size = ei->i_fc_lblk_start;
new_blk_size = ei->i_fc_lblk_start + ei->i_fc_lblk_len - 1;
ei->i_fc_lblk_len = 0;
- mutex_unlock(&ei->i_fc_lock);
+ spin_unlock(&ei->i_fc_lock);
cur_lblk_off = old_blk_size;
ext4_debug("will try writing %d to %d for inode %ld\n",
@@ -927,7 +917,9 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
while (cur_lblk_off <= new_blk_size) {
map.m_lblk = cur_lblk_off;
map.m_len = new_blk_size - cur_lblk_off + 1;
- ret = ext4_map_blocks(NULL, inode, &map, 0);
+ ret = ext4_map_blocks(NULL, inode, &map,
+ EXT4_GET_BLOCKS_IO_SUBMIT |
+ EXT4_EX_NOCACHE);
if (ret < 0)
return -ECANCELED;
@@ -971,69 +963,31 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
}
-/* Submit data for all the fast commit inodes */
-static int ext4_fc_submit_inode_data_all(journal_t *journal)
+/* Flushes data of all the inodes in the commit queue. */
+static int ext4_fc_flush_data(journal_t *journal)
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_inode_info *ei;
int ret = 0;
- spin_lock(&sbi->s_fc_lock);
list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
- ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
- while (atomic_read(&ei->i_fc_updates)) {
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&ei->i_fc_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (atomic_read(&ei->i_fc_updates)) {
- spin_unlock(&sbi->s_fc_lock);
- schedule();
- spin_lock(&sbi->s_fc_lock);
- }
- finish_wait(&ei->i_fc_wait, &wait);
- }
- spin_unlock(&sbi->s_fc_lock);
ret = jbd2_submit_inode_data(journal, ei->jinode);
if (ret)
return ret;
- spin_lock(&sbi->s_fc_lock);
}
- spin_unlock(&sbi->s_fc_lock);
- return ret;
-}
-
-/* Wait for completion of data for all the fast commit inodes */
-static int ext4_fc_wait_inode_data_all(journal_t *journal)
-{
- struct super_block *sb = journal->j_private;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_inode_info *pos, *n;
- int ret = 0;
-
- spin_lock(&sbi->s_fc_lock);
- list_for_each_entry_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
- if (!ext4_test_inode_state(&pos->vfs_inode,
- EXT4_STATE_FC_COMMITTING))
- continue;
- spin_unlock(&sbi->s_fc_lock);
-
- ret = jbd2_wait_inode_data(journal, pos->jinode);
+ list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ ret = jbd2_wait_inode_data(journal, ei->jinode);
if (ret)
return ret;
- spin_lock(&sbi->s_fc_lock);
}
- spin_unlock(&sbi->s_fc_lock);
return 0;
}
/* Commit all the directory entry updates */
static int ext4_fc_commit_dentry_updates(journal_t *journal, u32 *crc)
-__acquires(&sbi->s_fc_lock)
-__releases(&sbi->s_fc_lock)
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -1047,26 +1001,22 @@ __releases(&sbi->s_fc_lock)
list_for_each_entry_safe(fc_dentry, fc_dentry_n,
&sbi->s_fc_dentry_q[FC_Q_MAIN], fcd_list) {
if (fc_dentry->fcd_op != EXT4_FC_TAG_CREAT) {
- spin_unlock(&sbi->s_fc_lock);
- if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry)) {
- ret = -ENOSPC;
- goto lock_and_exit;
- }
- spin_lock(&sbi->s_fc_lock);
+ if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry))
+ return -ENOSPC;
continue;
}
/*
* With fcd_dilist we need not loop in sbi->s_fc_q to get the
- * corresponding inode pointer
+ * corresponding inode. Also, the corresponding inode could have been
+ * deleted, in which case, we don't need to do anything.
*/
- WARN_ON(list_empty(&fc_dentry->fcd_dilist));
+ if (list_empty(&fc_dentry->fcd_dilist))
+ continue;
ei = list_first_entry(&fc_dentry->fcd_dilist,
struct ext4_inode_info, i_fc_dilist);
inode = &ei->vfs_inode;
WARN_ON(inode->i_ino != fc_dentry->fcd_ino);
- spin_unlock(&sbi->s_fc_lock);
-
/*
* We first write the inode and then the create dirent. This
* allows the recovery code to create an unnamed inode first
@@ -1076,23 +1026,14 @@ __releases(&sbi->s_fc_lock)
*/
ret = ext4_fc_write_inode(inode, crc);
if (ret)
- goto lock_and_exit;
-
+ return ret;
ret = ext4_fc_write_inode_data(inode, crc);
if (ret)
- goto lock_and_exit;
-
- if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry)) {
- ret = -ENOSPC;
- goto lock_and_exit;
- }
-
- spin_lock(&sbi->s_fc_lock);
+ return ret;
+ if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry))
+ return -ENOSPC;
}
return 0;
-lock_and_exit:
- spin_lock(&sbi->s_fc_lock);
- return ret;
}
static int ext4_fc_perform_commit(journal_t *journal)
@@ -1106,26 +1047,81 @@ static int ext4_fc_perform_commit(journal_t *journal)
int ret = 0;
u32 crc = 0;
- ret = ext4_fc_submit_inode_data_all(journal);
- if (ret)
- return ret;
+ /*
+ * Step 1: Mark all inodes on s_fc_q[MAIN] with
+ * EXT4_STATE_FC_FLUSHING_DATA. This prevents these inodes from being
+ * freed until the data flush is over.
+ */
+ mutex_lock(&sbi->s_fc_lock);
+ list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ ext4_set_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_FLUSHING_DATA);
+ }
+ mutex_unlock(&sbi->s_fc_lock);
- ret = ext4_fc_wait_inode_data_all(journal);
+ /* Step 2: Flush data for all the eligible inodes. */
+ ret = ext4_fc_flush_data(journal);
+
+ /*
+ * Step 3: Clear EXT4_STATE_FC_FLUSHING_DATA flag, before returning
+ * any error from step 2. This ensures that waiters waiting on
+ * EXT4_STATE_FC_FLUSHING_DATA can resume.
+ */
+ mutex_lock(&sbi->s_fc_lock);
+ list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ ext4_clear_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_FLUSHING_DATA);
+#if (BITS_PER_LONG < 64)
+ wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_FLUSHING_DATA);
+#else
+ wake_up_bit(&iter->i_flags, EXT4_STATE_FC_FLUSHING_DATA);
+#endif
+ }
+
+ /*
+ * Make sure clearing of EXT4_STATE_FC_FLUSHING_DATA is visible before
+ * the waiter checks the bit. Pairs with implicit barrier in
+ * prepare_to_wait() in ext4_fc_del().
+ */
+ smp_mb();
+ mutex_unlock(&sbi->s_fc_lock);
+
+ /*
+ * If we encountered error in Step 2, return it now after clearing
+ * EXT4_STATE_FC_FLUSHING_DATA bit.
+ */
if (ret)
return ret;
+
+ /* Step 4: Mark all inodes as being committed. */
+ jbd2_journal_lock_updates(journal);
/*
- * If file system device is different from journal device, issue a cache
- * flush before we start writing fast commit blocks.
+ * The journal is now locked. No more handles can start and all the
+ * previous handles are now drained. We now mark the inodes on the
+ * commit queue as being committed.
+ */
+ mutex_lock(&sbi->s_fc_lock);
+ list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ ext4_set_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_COMMITTING);
+ }
+ mutex_unlock(&sbi->s_fc_lock);
+ jbd2_journal_unlock_updates(journal);
+
+ /*
+ * Step 5: If file system device is different from journal device,
+ * issue a cache flush before we start writing fast commit blocks.
*/
if (journal->j_fs_dev != journal->j_dev)
blkdev_issue_flush(journal->j_fs_dev);
blk_start_plug(&plug);
+ /* Step 6: Write fast commit blocks to disk. */
if (sbi->s_fc_bytes == 0) {
/*
- * Add a head tag only if this is the first fast commit
- * in this TID.
+ * Step 6.1: Add a head tag only if this is the first fast
+ * commit in this TID.
*/
head.fc_features = cpu_to_le32(EXT4_FC_SUPPORTED_FEATURES);
head.fc_tid = cpu_to_le32(
@@ -1137,32 +1133,30 @@ static int ext4_fc_perform_commit(journal_t *journal)
}
}
- spin_lock(&sbi->s_fc_lock);
+ /* Step 6.2: Now write all the dentry updates. */
+ mutex_lock(&sbi->s_fc_lock);
ret = ext4_fc_commit_dentry_updates(journal, &crc);
- if (ret) {
- spin_unlock(&sbi->s_fc_lock);
+ if (ret)
goto out;
- }
+ /* Step 6.3: Now write all the changed inodes to disk. */
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
inode = &iter->vfs_inode;
if (!ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
continue;
- spin_unlock(&sbi->s_fc_lock);
ret = ext4_fc_write_inode_data(inode, &crc);
if (ret)
goto out;
ret = ext4_fc_write_inode(inode, &crc);
if (ret)
goto out;
- spin_lock(&sbi->s_fc_lock);
}
- spin_unlock(&sbi->s_fc_lock);
-
+ /* Step 6.4: Finally write tail tag to conclude this fast commit. */
ret = ext4_fc_write_tail(sb, crc);
out:
+ mutex_unlock(&sbi->s_fc_lock);
blk_finish_plug(&plug);
return ret;
}
@@ -1208,6 +1202,7 @@ int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
int subtid = atomic_read(&sbi->s_fc_subtid);
int status = EXT4_FC_STATUS_OK, fc_bufs_before = 0;
ktime_t start_time, commit_time;
+ int old_ioprio, journal_ioprio;
if (!test_opt2(sb, JOURNAL_FAST_COMMIT))
return jbd2_complete_transaction(journal, commit_tid);
@@ -1215,6 +1210,7 @@ int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
trace_ext4_fc_commit_start(sb, commit_tid);
start_time = ktime_get();
+ old_ioprio = get_current_ioprio();
restart_fc:
ret = jbd2_fc_begin_commit(journal, commit_tid);
@@ -1245,6 +1241,15 @@ restart_fc:
goto fallback;
}
+ /*
+ * Now that we know that this thread is going to do a fast commit,
+ * elevate the priority to match that of the journal thread.
+ */
+ if (journal->j_task->io_context)
+ journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
+ else
+ journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
+ set_task_ioprio(current, journal_ioprio);
fc_bufs_before = (sbi->s_fc_bytes + bsize - 1) / bsize;
ret = ext4_fc_perform_commit(journal);
if (ret < 0) {
@@ -1259,6 +1264,7 @@ restart_fc:
}
atomic_inc(&sbi->s_fc_subtid);
ret = jbd2_fc_end_commit(journal);
+ set_task_ioprio(current, old_ioprio);
/*
* weight the commit time higher than the average time so we
* don't react too strongly to vast changes in the commit time
@@ -1268,6 +1274,7 @@ restart_fc:
return ret;
fallback:
+ set_task_ioprio(current, old_ioprio);
ret = jbd2_fc_end_commit_fallback(journal);
ext4_fc_update_stats(sb, status, 0, 0, commit_tid);
return ret;
@@ -1281,7 +1288,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_inode_info *iter, *iter_n;
+ struct ext4_inode_info *ei;
struct ext4_fc_dentry_update *fc_dentry;
if (full && sbi->s_fc_bh)
@@ -1290,14 +1297,16 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
trace_ext4_fc_cleanup(journal, full, tid);
jbd2_fc_release_bufs(journal);
- spin_lock(&sbi->s_fc_lock);
- list_for_each_entry_safe(iter, iter_n, &sbi->s_fc_q[FC_Q_MAIN],
- i_fc_list) {
- list_del_init(&iter->i_fc_list);
- ext4_clear_inode_state(&iter->vfs_inode,
+ mutex_lock(&sbi->s_fc_lock);
+ while (!list_empty(&sbi->s_fc_q[FC_Q_MAIN])) {
+ ei = list_first_entry(&sbi->s_fc_q[FC_Q_MAIN],
+ struct ext4_inode_info,
+ i_fc_list);
+ list_del_init(&ei->i_fc_list);
+ ext4_clear_inode_state(&ei->vfs_inode,
EXT4_STATE_FC_COMMITTING);
- if (tid_geq(tid, iter->i_sync_tid)) {
- ext4_fc_reset_inode(&iter->vfs_inode);
+ if (tid_geq(tid, ei->i_sync_tid)) {
+ ext4_fc_reset_inode(&ei->vfs_inode);
} else if (full) {
/*
* We are called after a full commit, inode has been
@@ -1308,15 +1317,19 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
* time in that case (and tid doesn't increase so
* tid check above isn't reliable).
*/
- list_add_tail(&EXT4_I(&iter->vfs_inode)->i_fc_list,
+ list_add_tail(&ei->i_fc_list,
&sbi->s_fc_q[FC_Q_STAGING]);
}
- /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
+ /*
+ * Make sure clearing of EXT4_STATE_FC_COMMITTING is
+ * visible before we send the wakeup. Pairs with implicit
+ * barrier in prepare_to_wait() in ext4_fc_track_inode().
+ */
smp_mb();
#if (BITS_PER_LONG < 64)
- wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_COMMITTING);
+ wake_up_bit(&ei->i_state_flags, EXT4_STATE_FC_COMMITTING);
#else
- wake_up_bit(&iter->i_flags, EXT4_STATE_FC_COMMITTING);
+ wake_up_bit(&ei->i_flags, EXT4_STATE_FC_COMMITTING);
#endif
}
@@ -1326,13 +1339,9 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
fcd_list);
list_del_init(&fc_dentry->fcd_list);
list_del_init(&fc_dentry->fcd_dilist);
- spin_unlock(&sbi->s_fc_lock);
- if (fc_dentry->fcd_name.name &&
- fc_dentry->fcd_name.len > DNAME_INLINE_LEN)
- kfree(fc_dentry->fcd_name.name);
+ release_dentry_name_snapshot(&fc_dentry->fcd_name);
kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
- spin_lock(&sbi->s_fc_lock);
}
list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
@@ -1347,7 +1356,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
if (full)
sbi->s_fc_bytes = 0;
- spin_unlock(&sbi->s_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
trace_ext4_fc_stats(sb);
}
@@ -2124,13 +2133,13 @@ static int ext4_fc_replay_scan(journal_t *journal,
case EXT4_FC_TAG_INODE:
case EXT4_FC_TAG_PAD:
state->fc_cur_tag++;
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ state->fc_crc = ext4_chksum(state->fc_crc, cur,
EXT4_FC_TAG_BASE_LEN + tl.fc_len);
break;
case EXT4_FC_TAG_TAIL:
state->fc_cur_tag++;
memcpy(&tail, val, sizeof(tail));
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ state->fc_crc = ext4_chksum(state->fc_crc, cur,
EXT4_FC_TAG_BASE_LEN +
offsetof(struct ext4_fc_tail,
fc_crc));
@@ -2157,7 +2166,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
break;
}
state->fc_cur_tag++;
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ state->fc_crc = ext4_chksum(state->fc_crc, cur,
EXT4_FC_TAG_BASE_LEN + tl.fc_len);
break;
default:
diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
index 2fadb2c4780c..3bd534e4dbbf 100644
--- a/fs/ext4/fast_commit.h
+++ b/fs/ext4/fast_commit.h
@@ -109,8 +109,7 @@ struct ext4_fc_dentry_update {
int fcd_op; /* Type of update create / unlink / link */
int fcd_parent; /* Parent inode number */
int fcd_ino; /* Inode number */
- struct qstr fcd_name; /* Dirent name */
- unsigned char fcd_iname[DNAME_INLINE_LEN]; /* Dirent name string */
+ struct name_snapshot fcd_name; /* Dirent name */
struct list_head fcd_list;
struct list_head fcd_dilist;
};
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 3bd96c3d4cd0..7a8b30932189 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -354,7 +354,7 @@ static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc)
* to cleanup the orphan list in ext4_handle_inode_extension(). Do it
* now.
*/
- if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+ if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) {
handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
@@ -377,7 +377,12 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
loff_t pos = iocb->ki_pos;
struct inode *inode = file_inode(iocb->ki_filp);
- if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
+
+ if (!error && size && (flags & IOMAP_DIO_UNWRITTEN) &&
+ (iocb->ki_flags & IOCB_ATOMIC))
+ error = ext4_convert_unwritten_extents_atomic(NULL, inode, pos,
+ size);
+ else if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
if (error)
return error;
@@ -688,10 +693,12 @@ out:
static ssize_t
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
+ int ret;
struct inode *inode = file_inode(iocb->ki_filp);
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
#ifdef CONFIG_FS_DAX
if (IS_DAX(inode))
@@ -700,7 +707,6 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_flags & IOCB_ATOMIC) {
size_t len = iov_iter_count(from);
- int ret;
if (len < EXT4_SB(inode->i_sb)->s_awu_min ||
len > EXT4_SB(inode->i_sb)->s_awu_max)
@@ -741,7 +747,7 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
(vmf->vma->vm_flags & VM_SHARED);
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
- pfn_t pfn;
+ unsigned long pfn;
if (write) {
sb_start_pagefault(sb);
@@ -798,27 +804,33 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
.page_mkwrite = ext4_page_mkwrite,
};
-static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int ext4_file_mmap_prepare(struct vm_area_desc *desc)
{
+ int ret;
+ struct file *file = desc->file;
struct inode *inode = file->f_mapping->host;
struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ if (file->f_mode & FMODE_WRITE)
+ ret = ext4_emergency_state(inode->i_sb);
+ else
+ ret = ext4_forced_shutdown(inode->i_sb) ? -EIO : 0;
+ if (unlikely(ret))
+ return ret;
/*
* We don't support synchronous mappings for non-DAX files and
* for DAX files if underneath dax_device is not synchronous.
*/
- if (!daxdev_mapping_supported(vma, dax_dev))
+ if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev))
return -EOPNOTSUPP;
file_accessed(file);
if (IS_DAX(file_inode(file))) {
- vma->vm_ops = &ext4_dax_vm_ops;
- vm_flags_set(vma, VM_HUGEPAGE);
+ desc->vm_ops = &ext4_dax_vm_ops;
+ desc->vm_flags |= VM_HUGEPAGE;
} else {
- vma->vm_ops = &ext4_file_vm_ops;
+ desc->vm_ops = &ext4_file_vm_ops;
}
return 0;
}
@@ -835,7 +847,8 @@ static int ext4_sample_last_mounted(struct super_block *sb,
if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
return 0;
- if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
+ if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
+ !sb_start_intwrite_trylock(sb))
return 0;
ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
@@ -878,8 +891,12 @@ static int ext4_file_open(struct inode *inode, struct file *filp)
{
int ret;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ if (filp->f_mode & FMODE_WRITE)
+ ret = ext4_emergency_state(inode->i_sb);
+ else
+ ret = ext4_forced_shutdown(inode->i_sb) ? -EIO : 0;
+ if (unlikely(ret))
+ return ret;
ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
if (ret)
@@ -918,12 +935,7 @@ static int ext4_file_open(struct inode *inode, struct file *filp)
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes;
-
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
- else
- maxbytes = inode->i_sb->s_maxbytes;
+ loff_t maxbytes = ext4_get_maxbytes(inode);
switch (whence) {
default:
@@ -957,7 +969,7 @@ const struct file_operations ext4_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
#endif
- .mmap = ext4_file_mmap,
+ .mmap_prepare = ext4_file_mmap_prepare,
.open = ext4_file_open,
.release = ext4_release_file,
.fsync = ext4_sync_file,
@@ -966,7 +978,8 @@ const struct file_operations ext4_file_operations = {
.splice_write = iter_file_splice_write,
.fallocate = ext4_fallocate,
.fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
- FOP_DIO_PARALLEL_WRITE,
+ FOP_DIO_PARALLEL_WRITE |
+ FOP_DONTCACHE,
};
const struct inode_operations ext4_file_inode_operations = {
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
index 383c6edea6dd..22fc333244ef 100644
--- a/fs/ext4/fsmap.c
+++ b/fs/ext4/fsmap.c
@@ -74,7 +74,8 @@ static int ext4_getfsmap_dev_compare(const void *p1, const void *p2)
static bool ext4_getfsmap_rec_before_low_key(struct ext4_getfsmap_info *info,
struct ext4_fsmap *rec)
{
- return rec->fmr_physical < info->gfi_low.fmr_physical;
+ return rec->fmr_physical + rec->fmr_length <=
+ info->gfi_low.fmr_physical;
}
/*
@@ -200,15 +201,18 @@ static int ext4_getfsmap_meta_helper(struct super_block *sb,
ext4_group_first_block_no(sb, agno));
fs_end = fs_start + EXT4_C2B(sbi, len);
- /* Return relevant extents from the meta_list */
+ /*
+ * Return relevant extents from the meta_list. We emit all extents that
+ * partially/fully overlap with the query range
+ */
list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) {
- if (p->fmr_physical < info->gfi_next_fsblk) {
+ if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) {
list_del(&p->fmr_list);
kfree(p);
continue;
}
- if (p->fmr_physical <= fs_start ||
- p->fmr_physical + p->fmr_length <= fs_end) {
+ if (p->fmr_physical <= fs_end &&
+ p->fmr_physical + p->fmr_length > fs_start) {
/* Emit the retained free extent record if present */
if (info->gfi_lastfree.fmr_owner) {
error = ext4_getfsmap_helper(sb, info,
@@ -393,6 +397,14 @@ static unsigned int ext4_getfsmap_find_sb(struct super_block *sb,
/* Reserved GDT blocks */
if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) {
len = le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
+
+ /*
+ * mkfs.ext4 can set s_reserved_gdt_blocks as 0 in some cases,
+ * check for that.
+ */
+ if (!len)
+ return 0;
+
error = ext4_getfsmap_fill(meta_list, fsb, len,
EXT4_FMR_OWN_RESV_GDT);
if (error)
@@ -526,6 +538,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
ext4_group_t end_ag;
ext4_grpblk_t first_cluster;
ext4_grpblk_t last_cluster;
+ struct ext4_fsmap irec;
int error = 0;
bofs = le32_to_cpu(sbi->s_es->s_first_data_block);
@@ -609,10 +622,18 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
goto err;
}
- /* Report any gaps at the end of the bg */
+ /*
+ * The dummy record below will cause ext4_getfsmap_helper() to report
+ * any allocated blocks at the end of the range.
+ */
+ irec.fmr_device = 0;
+ irec.fmr_physical = end_fsb + 1;
+ irec.fmr_length = 0;
+ irec.fmr_owner = EXT4_FMR_OWN_FREE;
+ irec.fmr_flags = 0;
+
info->gfi_last = true;
- error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster + 1,
- 0, info);
+ error = ext4_getfsmap_helper(sb, info, &irec);
if (error)
goto err;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index b40d3b29f7e5..e476c6de3074 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -132,20 +132,16 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
bool needs_barrier = false;
struct inode *inode = file->f_mapping->host;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
ASSERT(ext4_journal_current_handle() == NULL);
trace_ext4_sync_file_enter(file, datasync);
- if (sb_rdonly(inode->i_sb)) {
- /* Make sure that we read updated s_ext4_flags value */
- smp_rmb();
- if (ext4_forced_shutdown(inode->i_sb))
- ret = -EROFS;
+ if (sb_rdonly(inode->i_sb))
goto out;
- }
if (!EXT4_SB(inode->i_sb)->s_journal) {
ret = ext4_fsync_nojournal(file, start, end, datasync,
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index deabe29da7fb..48483cd015d3 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -268,7 +268,7 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
combined_hash = fscrypt_fname_siphash(dir, &qname);
} else {
ext4_warning_inode(dir, "Siphash requires key");
- return -1;
+ return -EINVAL;
}
hash = (__u32)(combined_hash >> 32);
@@ -302,7 +302,7 @@ int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
if (len && IS_CASEFOLDED(dir) &&
(!IS_ENCRYPTED(dir) || fscrypt_has_encryption_key(dir))) {
- buff = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
+ buff = kzalloc(PATH_MAX, GFP_KERNEL);
if (!buff)
return -ENOMEM;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 21d228073d79..b20a1bf866ab 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -252,10 +252,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
"nonexistent device\n", __func__, __LINE__);
return;
}
- if (atomic_read(&inode->i_count) > 1) {
+ if (icount_read(inode) > 1) {
ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
__func__, __LINE__, inode->i_ino,
- atomic_read(&inode->i_count));
+ icount_read(inode));
return;
}
if (inode->i_nlink) {
@@ -691,7 +691,8 @@ static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
if (!bh || !buffer_uptodate(bh))
/*
* If the block is not in the buffer cache, then it
- * must have been written out.
+ * must have been written out, or, most unlikely, is
+ * being migrated - false failure should be OK here.
*/
goto out;
@@ -951,8 +952,9 @@ struct inode *__ext4_new_inode(struct mnt_idmap *idmap,
sb = dir->i_sb;
sbi = EXT4_SB(sb);
- if (unlikely(ext4_forced_shutdown(sb)))
- return ERR_PTR(-EIO);
+ ret2 = ext4_emergency_state(sb);
+ if (unlikely(ret2))
+ return ERR_PTR(ret2);
ngroups = ext4_get_groups_count(sb);
trace_ext4_request_inode(dir, mode);
@@ -1282,23 +1284,21 @@ got:
inode->i_generation = get_random_u32();
/* Precompute checksum seed for inode metadata */
- if (ext4_has_metadata_csum(sb)) {
+ if (ext4_has_feature_metadata_csum(sb)) {
__u32 csum;
__le32 inum = cpu_to_le32(inode->i_ino);
__le32 gen = cpu_to_le32(inode->i_generation);
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum,
sizeof(inum));
- ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
- sizeof(gen));
+ ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen));
}
- ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = sbi->s_want_extra_isize;
ei->i_inline_off = 0;
if (ext4_has_feature_inline_data(sb) &&
- (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
+ (!(ei->i_flags & (EXT4_DAX_FL|EXT4_EA_INODE_FL)) || S_ISDIR(mode)))
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
ret = inode;
err = dquot_alloc_inode(inode);
@@ -1334,6 +1334,8 @@ got:
}
}
+ ext4_set_inode_mapping_order(inode);
+
ext4_update_inode_fsync_trans(handle, inode, 1);
err = ext4_mark_inode_dirty(handle, inode);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 7de327fa7b1c..da76353b3a57 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -539,7 +539,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
int indirect_blks;
int blocks_to_boundary = 0;
int depth;
- int count = 0;
+ u64 count = 0;
ext4_fsblk_t first_block = 0;
trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
@@ -588,7 +588,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
count++;
/* Fill in size of a hole we found */
map->m_pblk = 0;
- map->m_len = min_t(unsigned int, map->m_len, count);
+ map->m_len = umin(map->m_len, count);
goto cleanup;
}
@@ -1025,7 +1025,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
}
/* Go read the buffer for the next level down */
- bh = ext4_sb_bread(inode->i_sb, nr, 0);
+ bh = ext4_sb_bread_nofail(inode->i_sb, nr);
/*
* A read failure? Report error and clear slot
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 3536ca7e4fcc..1f6bc05593df 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -20,6 +20,11 @@
#define EXT4_INLINE_DOTDOT_OFFSET 2
#define EXT4_INLINE_DOTDOT_SIZE 4
+
+static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
+ struct inode *inode,
+ void **fsdata);
+
static int ext4_get_inline_size(struct inode *inode)
{
if (EXT4_I(inode)->i_inline_off)
@@ -228,7 +233,7 @@ static void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
struct ext4_inode *raw_inode;
int cp_len = 0;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
+ if (unlikely(ext4_emergency_state(inode->i_sb)))
return;
BUG_ON(!EXT4_I(inode)->i_inline_off);
@@ -298,7 +303,11 @@ static int ext4_create_inline_data(handle_t *handle,
if (error)
goto out;
- BUG_ON(!is.s.not_found);
+ if (!is.s.not_found) {
+ EXT4_ERROR_INODE(inode, "unexpected inline data xattr");
+ error = -EFSCORRUPTED;
+ goto out;
+ }
error = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (error) {
@@ -349,7 +358,11 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
if (error)
goto out;
- BUG_ON(is.s.not_found);
+ if (is.s.not_found) {
+ EXT4_ERROR_INODE(inode, "missing inline data xattr");
+ error = -EFSCORRUPTED;
+ goto out;
+ }
len -= EXT4_MIN_INLINE_DATA_SIZE;
value = kzalloc(len, GFP_NOFS);
@@ -392,7 +405,7 @@ out:
}
static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
- unsigned int len)
+ loff_t len)
{
int ret, size, no_expand;
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -405,7 +418,12 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
return -ENOSPC;
ext4_write_lock_xattr(inode, &no_expand);
-
+ /*
+ * ei->i_inline_size may have changed since the initial check
+ * if other xattrs were added. Recalculate to ensure
+ * ext4_update_inline_data() validates against current capacity.
+ */
+ (void) ext4_find_inline_data_nolock(inode);
if (ei->i_inline_off)
ret = ext4_update_inline_data(handle, inode, len);
else
@@ -433,9 +451,13 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
if (!ei->i_inline_off)
return 0;
+ down_write(&ei->i_data_sem);
+
error = ext4_get_inode_loc(inode, &is.iloc);
- if (error)
+ if (error) {
+ up_write(&ei->i_data_sem);
return error;
+ }
error = ext4_xattr_ibody_find(inode, &i, &is);
if (error)
@@ -474,6 +496,7 @@ out:
brelse(is.iloc.bh);
if (error == -ENODATA)
error = 0;
+ up_write(&ei->i_data_sem);
return error;
}
@@ -557,7 +580,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
return 0;
}
- needed_blocks = ext4_writepage_trans_blocks(inode);
+ needed_blocks = ext4_chunk_trans_extent(inode, 1);
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -596,6 +619,7 @@ retry:
goto out;
}
+ ext4_fc_track_inode(handle, inode);
ret = ext4_destroy_inline_data_nolock(handle, inode);
if (ret)
goto out;
@@ -606,6 +630,7 @@ retry:
} else
ret = ext4_block_write_begin(handle, folio, from, to,
ext4_get_block);
+ clear_buffer_new(folio_buffers(folio));
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode,
@@ -637,7 +662,7 @@ retry:
goto retry;
if (folio)
- block_commit_write(&folio->page, from, to);
+ block_commit_write(folio, from, to);
out:
if (folio) {
folio_unlock(folio);
@@ -653,91 +678,109 @@ out_nofolio:
}
/*
- * Try to write data in the inode.
- * If the inode has inline data, check whether the new write can be
- * in the inode also. If not, create the page the handle, move the data
- * to the page make it update and let the later codes create extent for it.
+ * Prepare the write for the inline data.
+ * If the data can be written into the inode, we just read
+ * the page and make it uptodate, and start the journal.
+ * Otherwise read the page, makes it dirty so that it can be
+ * handle in writepages(the i_disksize update is left to the
+ * normal ext4_da_write_end).
*/
-int ext4_try_to_write_inline_data(struct address_space *mapping,
- struct inode *inode,
- loff_t pos, unsigned len,
- struct folio **foliop)
+int ext4_generic_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ struct folio **foliop,
+ void **fsdata, bool da)
{
int ret;
handle_t *handle;
struct folio *folio;
struct ext4_iloc iloc;
-
- if (pos + len > ext4_get_max_inline_size(inode))
- goto convert;
+ int retries = 0;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
return ret;
- /*
- * The possible write could happen in the inode,
- * so try to reserve the space in inode first.
- */
+retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- handle = NULL;
- goto out;
+ goto out_release_bh;
}
ret = ext4_prepare_inline_data(handle, inode, pos + len);
if (ret && ret != -ENOSPC)
- goto out;
+ goto out_stop_journal;
- /* We don't have space in inline inode, so convert it to extent. */
if (ret == -ENOSPC) {
ext4_journal_stop(handle);
- brelse(iloc.bh);
- goto convert;
- }
+ if (!da) {
+ brelse(iloc.bh);
+ /* Retry inside */
+ return ext4_convert_inline_data_to_extent(mapping, inode);
+ }
- ret = ext4_journal_get_write_access(handle, inode->i_sb, iloc.bh,
- EXT4_JTR_NONE);
- if (ret)
- goto out;
+ ret = ext4_da_convert_inline_data_to_extent(mapping, inode, fsdata);
+ if (ret == -ENOSPC &&
+ ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry_journal;
+ goto out_release_bh;
+ }
folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN | FGP_NOFS,
mapping_gfp_mask(mapping));
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
- goto out;
+ goto out_stop_journal;
}
- *foliop = folio;
down_read(&EXT4_I(inode)->xattr_sem);
+ /* Someone else had converted it to extent */
if (!ext4_has_inline_data(inode)) {
ret = 0;
- folio_unlock(folio);
- folio_put(folio);
- goto out_up_read;
+ goto out_release_folio;
}
if (!folio_test_uptodate(folio)) {
ret = ext4_read_inline_folio(inode, folio);
- if (ret < 0) {
- folio_unlock(folio);
- folio_put(folio);
- goto out_up_read;
- }
+ if (ret < 0)
+ goto out_release_folio;
}
- ret = 1;
- handle = NULL;
-out_up_read:
+ ret = ext4_journal_get_write_access(handle, inode->i_sb, iloc.bh, EXT4_JTR_NONE);
+ if (ret)
+ goto out_release_folio;
+ *foliop = folio;
up_read(&EXT4_I(inode)->xattr_sem);
-out:
- if (handle && (ret != 1))
- ext4_journal_stop(handle);
+ brelse(iloc.bh);
+ return 1;
+
+out_release_folio:
+ up_read(&EXT4_I(inode)->xattr_sem);
+ folio_unlock(folio);
+ folio_put(folio);
+out_stop_journal:
+ ext4_journal_stop(handle);
+out_release_bh:
brelse(iloc.bh);
return ret;
-convert:
- return ext4_convert_inline_data_to_extent(mapping, inode);
+}
+
+/*
+ * Try to write data in the inode.
+ * If the inode has inline data, check whether the new write can be
+ * in the inode also. If not, create the page the handle, move the data
+ * to the page make it update and let the later codes create extent for it.
+ */
+int ext4_try_to_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ struct folio **foliop)
+{
+ if (pos + len > ext4_get_max_inline_size(inode))
+ return ext4_convert_inline_data_to_extent(mapping, inode);
+ return ext4_generic_write_inline_data(mapping, inode, pos, len,
+ foliop, NULL, false);
}
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
@@ -867,6 +910,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
return ret;
}
+ clear_buffer_new(folio_buffers(folio));
folio_mark_dirty(folio);
folio_mark_uptodate(folio);
ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
@@ -881,94 +925,6 @@ out:
return ret;
}
-/*
- * Prepare the write for the inline data.
- * If the data can be written into the inode, we just read
- * the page and make it uptodate, and start the journal.
- * Otherwise read the page, makes it dirty so that it can be
- * handle in writepages(the i_disksize update is left to the
- * normal ext4_da_write_end).
- */
-int ext4_da_write_inline_data_begin(struct address_space *mapping,
- struct inode *inode,
- loff_t pos, unsigned len,
- struct folio **foliop,
- void **fsdata)
-{
- int ret;
- handle_t *handle;
- struct folio *folio;
- struct ext4_iloc iloc;
- int retries = 0;
-
- ret = ext4_get_inode_loc(inode, &iloc);
- if (ret)
- return ret;
-
-retry_journal:
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
- }
-
- ret = ext4_prepare_inline_data(handle, inode, pos + len);
- if (ret && ret != -ENOSPC)
- goto out_journal;
-
- if (ret == -ENOSPC) {
- ext4_journal_stop(handle);
- ret = ext4_da_convert_inline_data_to_extent(mapping,
- inode,
- fsdata);
- if (ret == -ENOSPC &&
- ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry_journal;
- goto out;
- }
-
- /*
- * We cannot recurse into the filesystem as the transaction
- * is already started.
- */
- folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN | FGP_NOFS,
- mapping_gfp_mask(mapping));
- if (IS_ERR(folio)) {
- ret = PTR_ERR(folio);
- goto out_journal;
- }
-
- down_read(&EXT4_I(inode)->xattr_sem);
- if (!ext4_has_inline_data(inode)) {
- ret = 0;
- goto out_release_page;
- }
-
- if (!folio_test_uptodate(folio)) {
- ret = ext4_read_inline_folio(inode, folio);
- if (ret < 0)
- goto out_release_page;
- }
- ret = ext4_journal_get_write_access(handle, inode->i_sb, iloc.bh,
- EXT4_JTR_NONE);
- if (ret)
- goto out_release_page;
-
- up_read(&EXT4_I(inode)->xattr_sem);
- *foliop = folio;
- brelse(iloc.bh);
- return 1;
-out_release_page:
- up_read(&EXT4_I(inode)->xattr_sem);
- folio_unlock(folio);
- folio_put(folio);
-out_journal:
- ext4_journal_stop(handle);
-out:
- brelse(iloc.bh);
- return ret;
-}
-
#ifdef INLINE_DIR_DEBUG
void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
void *inline_start, int inline_size)
@@ -1012,7 +968,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
int err;
struct ext4_dir_entry_2 *de;
- err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start,
+ err = ext4_find_dest_de(dir, iloc->bh, inline_start,
inline_size, fname, &de);
if (err)
return err;
@@ -1059,7 +1015,7 @@ static void *ext4_get_inline_xattr_pos(struct inode *inode,
}
/* Set the final de to cover the whole block. */
-static void ext4_update_final_de(void *de_buf, int old_size, int new_size)
+void ext4_update_final_de(void *de_buf, int old_size, int new_size)
{
struct ext4_dir_entry_2 *de, *prev_de;
void *limit;
@@ -1123,51 +1079,6 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
}
-static int ext4_finish_convert_inline_dir(handle_t *handle,
- struct inode *inode,
- struct buffer_head *dir_block,
- void *buf,
- int inline_size)
-{
- int err, csum_size = 0, header_size = 0;
- struct ext4_dir_entry_2 *de;
- void *target = dir_block->b_data;
-
- /*
- * First create "." and ".." and then copy the dir information
- * back to the block.
- */
- de = target;
- de = ext4_init_dot_dotdot(inode, de,
- inode->i_sb->s_blocksize, csum_size,
- le32_to_cpu(((struct ext4_dir_entry_2 *)buf)->inode), 1);
- header_size = (void *)de - target;
-
- memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
- inline_size - EXT4_INLINE_DOTDOT_SIZE);
-
- if (ext4_has_metadata_csum(inode->i_sb))
- csum_size = sizeof(struct ext4_dir_entry_tail);
-
- inode->i_size = inode->i_sb->s_blocksize;
- i_size_write(inode, inode->i_sb->s_blocksize);
- EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
- ext4_update_final_de(dir_block->b_data,
- inline_size - EXT4_INLINE_DOTDOT_SIZE + header_size,
- inode->i_sb->s_blocksize - csum_size);
-
- if (csum_size)
- ext4_initialize_dirent_tail(dir_block,
- inode->i_sb->s_blocksize);
- set_buffer_uptodate(dir_block);
- unlock_buffer(dir_block);
- err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
- if (err)
- return err;
- set_buffer_verified(dir_block);
- return ext4_mark_inode_dirty(handle, inode);
-}
-
static int ext4_convert_inline_data_nolock(handle_t *handle,
struct inode *inode,
struct ext4_iloc *iloc)
@@ -1239,8 +1150,17 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
error = ext4_handle_dirty_metadata(handle,
inode, data_bh);
} else {
- error = ext4_finish_convert_inline_dir(handle, inode, data_bh,
- buf, inline_size);
+ unlock_buffer(data_bh);
+ inode->i_size = inode->i_sb->s_blocksize;
+ i_size_write(inode, inode->i_sb->s_blocksize);
+ EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+
+ error = ext4_init_dirblock(handle, inode, data_bh,
+ le32_to_cpu(((struct ext4_dir_entry_2 *)buf)->inode),
+ buf + EXT4_INLINE_DOTDOT_SIZE,
+ inline_size - EXT4_INLINE_DOTDOT_SIZE);
+ if (!error)
+ error = ext4_mark_inode_dirty(handle, inode);
}
out_restore:
@@ -1379,7 +1299,7 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
if (pos == 0) {
fake.inode = cpu_to_le32(inode->i_ino);
fake.name_len = 1;
- strcpy(fake.name, ".");
+ memcpy(fake.name, ".", 2);
fake.rec_len = ext4_rec_len_to_disk(
ext4_dir_rec_len(fake.name_len, NULL),
inline_size);
@@ -1389,7 +1309,7 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
} else if (pos == EXT4_INLINE_DOTDOT_OFFSET) {
fake.inode = cpu_to_le32(parent_ino);
fake.name_len = 2;
- strcpy(fake.name, "..");
+ memcpy(fake.name, "..", 3);
fake.rec_len = ext4_rec_len_to_disk(
ext4_dir_rec_len(fake.name_len, NULL),
inline_size);
@@ -1928,7 +1848,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
};
- needed_blocks = ext4_writepage_trans_blocks(inode);
+ needed_blocks = ext4_chunk_trans_extent(inode, 1);
handle = ext4_journal_start(inode, EXT4_HT_INODE, needed_blocks);
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -1967,7 +1887,12 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
goto out_error;
- BUG_ON(is.s.not_found);
+ if (is.s.not_found) {
+ EXT4_ERROR_INODE(inode,
+ "missing inline data xattr");
+ err = -EFSCORRUPTED;
+ goto out_error;
+ }
value_len = le32_to_cpu(is.s.here->e_value_size);
value = kmalloc(value_len, GFP_NOFS);
@@ -2043,7 +1968,7 @@ int ext4_convert_inline_data(struct inode *inode)
return 0;
}
- needed_blocks = ext4_writepage_trans_blocks(inode);
+ needed_blocks = ext4_chunk_trans_extent(inode, 1);
iloc.bh = NULL;
error = ext4_get_inode_loc(inode, &iloc);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 89aade6f45f6..0c466ccbed69 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -31,6 +31,7 @@
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
+#include <linux/rmap.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include <linux/bio.h>
@@ -57,29 +58,27 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle,
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
__u16 dummy_csum = 0;
int offset = offsetof(struct ext4_inode, i_checksum_lo);
unsigned int csum_size = sizeof(dummy_csum);
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
- csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
+ csum = ext4_chksum(ei->i_csum_seed, (__u8 *)raw, offset);
+ csum = ext4_chksum(csum, (__u8 *)&dummy_csum, csum_size);
offset += csum_size;
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ csum = ext4_chksum(csum, (__u8 *)raw + offset,
EXT4_GOOD_OLD_INODE_SIZE - offset);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
offset = offsetof(struct ext4_inode, i_checksum_hi);
- csum = ext4_chksum(sbi, csum, (__u8 *)raw +
- EXT4_GOOD_OLD_INODE_SIZE,
+ csum = ext4_chksum(csum, (__u8 *)raw + EXT4_GOOD_OLD_INODE_SIZE,
offset - EXT4_GOOD_OLD_INODE_SIZE);
if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
- csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
+ csum = ext4_chksum(csum, (__u8 *)&dummy_csum,
csum_size);
offset += csum_size;
}
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ csum = ext4_chksum(csum, (__u8 *)raw + offset,
EXT4_INODE_SIZE(inode->i_sb) - offset);
}
@@ -93,7 +92,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_LINUX) ||
- !ext4_has_metadata_csum(inode->i_sb))
+ !ext4_has_feature_metadata_csum(inode->i_sb))
return 1;
provided = le16_to_cpu(raw->i_checksum_lo);
@@ -114,7 +113,7 @@ void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_LINUX) ||
- !ext4_has_metadata_csum(inode->i_sb))
+ !ext4_has_feature_metadata_csum(inode->i_sb))
return;
csum = ext4_inode_csum(inode, raw, ei);
@@ -141,16 +140,13 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
new_size);
}
-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
- int pextents);
-
/*
* Test whether an inode is a fast symlink.
* A fast symlink has its symlink data stored in ext4_inode_info->i_data.
*/
int ext4_inode_is_fast_symlink(struct inode *inode)
{
- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ if (!ext4_has_feature_ea_inode(inode->i_sb)) {
int ea_blocks = EXT4_I(inode)->i_file_acl ?
EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
@@ -181,6 +177,8 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
+ dax_break_layout_final(inode);
+
if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
ext4_evict_ea_inode(inode);
if (inode->i_nlink) {
@@ -204,8 +202,7 @@ void ext4_evict_inode(struct inode *inode)
* the inode. Flush worker is ignoring it because of I_FREEING flag but
* we still need to remove the inode from the writeback lists.
*/
- if (!list_empty_careful(&inode->i_io_list))
- inode_io_list_del(inode);
+ inode_io_list_del(inode);
/*
* Protect us against freezing - iput() caller didn't have to have any
@@ -383,10 +380,11 @@ static int __check_block_validity(struct inode *inode, const char *func,
unsigned int line,
struct ext4_map_blocks *map)
{
- if (ext4_has_feature_journal(inode->i_sb) &&
- (inode->i_ino ==
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+
+ if (journal && inode == journal->j_inode)
return 0;
+
if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock %llu "
@@ -412,6 +410,32 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
return ret;
}
+/*
+ * For generic regular files, when updating the extent tree, Ext4 should
+ * hold the i_rwsem and invalidate_lock exclusively. This ensures
+ * exclusion against concurrent page faults, as well as reads and writes.
+ */
+#ifdef CONFIG_EXT4_DEBUG
+void ext4_check_map_extents_env(struct inode *inode)
+{
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+
+ if (!S_ISREG(inode->i_mode) ||
+ IS_NOQUOTA(inode) || IS_VERITY(inode) ||
+ is_special_ino(inode->i_sb, inode->i_ino) ||
+ (inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
+ ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
+ ext4_verity_in_progress(inode))
+ return;
+
+ WARN_ON_ONCE(!inode_is_locked(inode) &&
+ !rwsem_is_locked(&inode->i_mapping->invalidate_lock));
+}
+#else
+void ext4_check_map_extents_env(struct inode *inode) {}
+#endif
+
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
@@ -458,20 +482,80 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
}
#endif /* ES_AGGRESSIVE_TEST */
+static int ext4_map_query_blocks_next_in_leaf(handle_t *handle,
+ struct inode *inode, struct ext4_map_blocks *map,
+ unsigned int orig_mlen)
+{
+ struct ext4_map_blocks map2;
+ unsigned int status, status2;
+ int retval;
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+
+ WARN_ON_ONCE(!(map->m_flags & EXT4_MAP_QUERY_LAST_IN_LEAF));
+ WARN_ON_ONCE(orig_mlen <= map->m_len);
+
+ /* Prepare map2 for lookup in next leaf block */
+ map2.m_lblk = map->m_lblk + map->m_len;
+ map2.m_len = orig_mlen - map->m_len;
+ map2.m_flags = 0;
+ retval = ext4_ext_map_blocks(handle, inode, &map2, 0);
+
+ if (retval <= 0) {
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status, false);
+ return map->m_len;
+ }
+
+ if (unlikely(retval != map2.m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map2.m_len);
+ WARN_ON(1);
+ }
+
+ status2 = map2.m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+
+ /*
+ * If map2 is contiguous with map, then let's insert it as a single
+ * extent in es cache and return the combined length of both the maps.
+ */
+ if (map->m_pblk + map->m_len == map2.m_pblk &&
+ status == status2) {
+ ext4_es_insert_extent(inode, map->m_lblk,
+ map->m_len + map2.m_len, map->m_pblk,
+ status, false);
+ map->m_len += map2.m_len;
+ } else {
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status, false);
+ }
+
+ return map->m_len;
+}
+
static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
- struct ext4_map_blocks *map)
+ struct ext4_map_blocks *map, int flags)
{
unsigned int status;
int retval;
+ unsigned int orig_mlen = map->m_len;
+ flags &= EXT4_EX_QUERY_FILTER;
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- retval = ext4_ext_map_blocks(handle, inode, map, 0);
+ retval = ext4_ext_map_blocks(handle, inode, map, flags);
else
- retval = ext4_ind_map_blocks(handle, inode, map, 0);
-
- if (retval <= 0)
+ retval = ext4_ind_map_blocks(handle, inode, map, flags);
+ if (retval < 0)
return retval;
+ /* A hole? */
+ if (retval == 0)
+ goto out;
+
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
@@ -480,10 +564,23 @@ static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
WARN_ON(1);
}
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status, false);
+ /*
+ * No need to query next in leaf:
+ * - if returned extent is not last in leaf or
+ * - if the last in leaf is the full requested range
+ */
+ if (!(map->m_flags & EXT4_MAP_QUERY_LAST_IN_LEAF) ||
+ map->m_len == orig_mlen) {
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status, false);
+ } else {
+ retval = ext4_map_query_blocks_next_in_leaf(handle, inode, map,
+ orig_mlen);
+ }
+out:
+ map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
return retval;
}
@@ -555,8 +652,8 @@ static int ext4_map_create_blocks(handle_t *handle, struct inode *inode,
* If the extent has been zeroed out, we don't need to update
* extent status tree.
*/
- if (flags & EXT4_GET_BLOCKS_PRE_IO &&
- ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ if (flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE &&
+ ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, &map->m_seq)) {
if (ext4_es_is_written(&es))
return retval;
}
@@ -565,6 +662,7 @@ static int ext4_map_create_blocks(handle_t *handle, struct inode *inode,
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk,
status, flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+ map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
return retval;
}
@@ -598,6 +696,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct extent_status es;
int retval;
int ret = 0;
+ unsigned int orig_mlen = map->m_len;
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
@@ -618,9 +717,18 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
return -EFSCORRUPTED;
+ /*
+ * Callers from the context of data submission are the only exceptions
+ * for regular files that do not hold the i_rwsem or invalidate_lock.
+ * However, caching unrelated ranges is not permitted.
+ */
+ if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
+ WARN_ON_ONCE(!(flags & EXT4_EX_NOCACHE));
+ else
+ ext4_check_map_extents_env(inode);
+
/* Lookup extent status tree firstly */
- if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
- ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, &map->m_seq)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
@@ -649,7 +757,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
ext4_map_blocks_es_recheck(handle, inode, map,
&orig_map, flags);
#endif
- goto found;
+ if (!(flags & EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF) ||
+ orig_mlen == map->m_len)
+ goto found;
+
+ map->m_len = orig_mlen;
}
/*
* In the query cache no-wait mode, nothing we can do more if we
@@ -663,7 +775,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
- retval = ext4_map_query_blocks(handle, inode, map);
+ retval = ext4_map_query_blocks(handle, inode, map, flags);
up_read((&EXT4_I(inode)->i_data_sem));
found:
@@ -692,6 +804,8 @@ found:
if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
return retval;
+
+ ext4_fc_track_inode(handle, inode);
/*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
@@ -701,7 +815,13 @@ found:
down_write(&EXT4_I(inode)->i_data_sem);
retval = ext4_map_create_blocks(handle, inode, map, flags);
up_write((&EXT4_I(inode)->i_data_sem));
- if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+
+ if (retval < 0)
+ ext_debug(inode, "failed with err %d\n", retval);
+ if (retval <= 0)
+ return retval;
+
+ if (map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
@@ -716,9 +836,8 @@ found:
!(flags & EXT4_GET_BLOCKS_ZERO) &&
!ext4_is_quota_file(inode) &&
ext4_should_order_data(inode)) {
- loff_t start_byte =
- (loff_t)map->m_lblk << inode->i_blkbits;
- loff_t length = (loff_t)map->m_len << inode->i_blkbits;
+ loff_t start_byte = EXT4_LBLK_TO_B(inode, map->m_lblk);
+ loff_t length = EXT4_LBLK_TO_B(inode, map->m_len);
if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
ret = ext4_jbd2_inode_add_wait(handle, inode,
@@ -730,12 +849,8 @@ found:
return ret;
}
}
- if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
- map->m_flags & EXT4_MAP_MAPPED))
- ext4_fc_track_range(handle, inode, map->m_lblk,
- map->m_lblk + map->m_len - 1);
- if (retval < 0)
- ext_debug(inode, "failed with err %d\n", retval);
+ ext4_fc_track_range(handle, inode, map->m_lblk, map->m_lblk +
+ map->m_len - 1);
return retval;
}
@@ -751,7 +866,7 @@ static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
flags &= EXT4_MAP_FLAGS;
/* Dummy buffer_head? Set non-atomically. */
- if (!bh->b_page) {
+ if (!bh->b_folio) {
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
return;
}
@@ -766,6 +881,26 @@ static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
} while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
}
+/*
+ * Make sure that the current journal transaction has enough credits to map
+ * one extent. Return -EAGAIN if it cannot extend the current running
+ * transaction.
+ */
+static inline int ext4_journal_ensure_extent_credits(handle_t *handle,
+ struct inode *inode)
+{
+ int credits;
+ int ret;
+
+ /* Called from ext4_da_write_begin() which has no handle started? */
+ if (!handle)
+ return 0;
+
+ credits = ext4_chunk_trans_blocks(inode, 1);
+ ret = __ext4_journal_ensure_credits(handle, credits, credits, 0);
+ return ret <= 0 ? ret : -EAGAIN;
+}
+
static int _ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int flags)
{
@@ -1005,7 +1140,12 @@ int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
*/
static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
{
- folio_mark_dirty(bh->b_folio);
+ struct folio *folio = bh->b_folio;
+ struct inode *inode = folio->mapping->host;
+
+ /* only regular files have a_ops */
+ if (S_ISREG(inode->i_mode))
+ folio_mark_dirty(folio);
return ext4_handle_dirty_metadata(handle, NULL, bh);
}
@@ -1023,29 +1163,27 @@ int ext4_block_write_begin(handle_t *handle, struct folio *folio,
loff_t pos, unsigned len,
get_block_t *get_block)
{
- unsigned from = pos & (PAGE_SIZE - 1);
+ unsigned int from = offset_in_folio(folio, pos);
unsigned to = from + len;
struct inode *inode = folio->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
- unsigned blocksize = inode->i_sb->s_blocksize;
- unsigned bbits;
+ unsigned int blocksize = i_blocksize(inode);
struct buffer_head *bh, *head, *wait[2];
int nr_wait = 0;
int i;
bool should_journal_data = ext4_should_journal_data(inode);
BUG_ON(!folio_test_locked(folio));
- BUG_ON(from > PAGE_SIZE);
- BUG_ON(to > PAGE_SIZE);
+ BUG_ON(to > folio_size(folio));
BUG_ON(from > to);
+ WARN_ON_ONCE(blocksize > folio_size(folio));
head = folio_buffers(folio);
if (!head)
head = create_empty_buffers(folio, blocksize, 0);
- bbits = ilog2(blocksize);
- block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
+ block = EXT4_PG_TO_LBLK(inode, folio->index);
for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start = block_end, bh = bh->b_this_page) {
@@ -1056,11 +1194,13 @@ int ext4_block_write_begin(handle_t *handle, struct folio *folio,
}
continue;
}
- if (buffer_new(bh))
+ if (WARN_ON_ONCE(buffer_new(bh)))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
- err = get_block(inode, block, bh, 1);
+ err = ext4_journal_ensure_extent_credits(handle, inode);
+ if (!err)
+ err = get_block(inode, block, bh, 1);
if (err)
break;
if (buffer_new(bh)) {
@@ -1137,7 +1277,8 @@ int ext4_block_write_begin(handle_t *handle, struct folio *folio,
* and the ext4_write_end(). So doing the jbd2_journal_start at the start of
* ext4_write_begin() is the right place.
*/
-static int ext4_write_begin(struct file *file, struct address_space *mapping,
+static int ext4_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
{
@@ -1149,18 +1290,18 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index;
unsigned from, to;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
trace_ext4_write_begin(inode, pos, len);
/*
* Reserve one block more for addition to orphan list in case
* we allocate blocks but write fails for some reason
*/
- needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
+ needed_blocks = ext4_chunk_trans_extent(inode,
+ ext4_journal_blocks_per_folio(inode)) + 1;
index = pos >> PAGE_SHIFT;
- from = pos & (PAGE_SIZE - 1);
- to = from + len;
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
@@ -1172,17 +1313,23 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
}
/*
- * __filemap_get_folio() can take a long time if the
+ * write_begin_get_folio() can take a long time if the
* system is thrashing due to memory pressure, or if the folio
* is being written back. So grab it first before we start
* the transaction handle. This also allows us to allocate
* the folio (if needed) without using GFP_NOFS.
*/
retry_grab:
- folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
- mapping_gfp_mask(mapping));
+ folio = write_begin_get_folio(iocb, mapping, index, len);
if (IS_ERR(folio))
return PTR_ERR(folio);
+
+ if (len > folio_next_pos(folio) - pos)
+ len = folio_next_pos(folio) - pos;
+
+ from = offset_in_folio(folio, pos);
+ to = from + len;
+
/*
* The same as page allocation, we prealloc buffer heads before
* starting the handle.
@@ -1251,8 +1398,9 @@ retry_journal:
ext4_orphan_del(NULL, inode);
}
- if (ret == -ENOSPC &&
- ext4_should_retry_alloc(inode->i_sb, &retries))
+ if (ret == -EAGAIN ||
+ (ret == -ENOSPC &&
+ ext4_should_retry_alloc(inode->i_sb, &retries)))
goto retry_journal;
folio_put(folio);
return ret;
@@ -1272,17 +1420,18 @@ static int write_end_fn(handle_t *handle, struct inode *inode,
ret = ext4_dirty_journalled_data(handle, bh);
clear_buffer_meta(bh);
clear_buffer_prio(bh);
+ clear_buffer_new(bh);
return ret;
}
/*
* We need to pick up the new inode size which generic_commit_write gave us
- * `file' can be NULL - eg, when called from page_symlink().
+ * `iocb` can be NULL - eg, when called from page_symlink().
*
* ext4 never places buffers on inode->i_mapping->i_private_list. metadata
* buffers are managed internally.
*/
-static int ext4_write_end(struct file *file,
+static int ext4_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
@@ -1301,7 +1450,7 @@ static int ext4_write_end(struct file *file,
return ext4_write_inline_data_end(inode, pos, len, copied,
folio);
- copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
+ copied = block_write_end(pos, len, copied, folio);
/*
* it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size.
@@ -1387,7 +1536,7 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle,
} while (bh != head);
}
-static int ext4_journalled_write_end(struct file *file,
+static int ext4_journalled_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
@@ -1544,11 +1693,12 @@ struct mpage_da_data {
unsigned int can_map:1; /* Can writepages call map blocks? */
/* These are internal state of ext4_do_writepages() */
- pgoff_t first_page; /* The first page to write */
- pgoff_t next_page; /* Current page to examine */
- pgoff_t last_page; /* Last page to examine */
+ loff_t start_pos; /* The start pos to write */
+ loff_t next_pos; /* Current pos to examine */
+ loff_t end_pos; /* Last pos to examine */
+
/*
- * Extent to map - this can be after first_page because that can be
+ * Extent to map - this can be after start_pos because that can be
* fully mapped. We somewhat abuse m_flags to store whether the extent
* is delalloc or unwritten.
*/
@@ -1568,38 +1718,38 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
- /* This is necessary when next_page == 0. */
- if (mpd->first_page >= mpd->next_page)
+ /* This is necessary when next_pos == 0. */
+ if (mpd->start_pos >= mpd->next_pos)
return;
mpd->scanned_until_end = 0;
- index = mpd->first_page;
- end = mpd->next_page - 1;
if (invalidate) {
ext4_lblk_t start, last;
- start = index << (PAGE_SHIFT - inode->i_blkbits);
- last = end << (PAGE_SHIFT - inode->i_blkbits);
+ start = EXT4_B_TO_LBLK(inode, mpd->start_pos);
+ last = mpd->next_pos >> inode->i_blkbits;
/*
* avoid racing with extent status tree scans made by
* ext4_insert_delayed_block()
*/
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_es_remove_extent(inode, start, last - start + 1);
+ ext4_es_remove_extent(inode, start, last - start);
up_write(&EXT4_I(inode)->i_data_sem);
}
folio_batch_init(&fbatch);
- while (index <= end) {
- nr = filemap_get_folios(mapping, &index, end, &fbatch);
+ index = mpd->start_pos >> PAGE_SHIFT;
+ end = mpd->next_pos >> PAGE_SHIFT;
+ while (index < end) {
+ nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
if (nr == 0)
break;
for (i = 0; i < nr; i++) {
struct folio *folio = fbatch.folios[i];
- if (folio->index < mpd->first_page)
+ if (folio_pos(folio) < mpd->start_pos)
continue;
- if (folio_next_index(folio) - 1 > end)
+ if (folio_next_index(folio) > end)
continue;
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));
@@ -1760,8 +1910,10 @@ static int ext4_da_map_blocks(struct inode *inode, struct ext4_map_blocks *map)
ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
(unsigned long) map->m_lblk);
+ ext4_check_map_extents_env(inode);
+
/* Lookup extent status tree firstly */
- if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, NULL)) {
map->m_len = min_t(unsigned int, map->m_len,
es.es_len - (map->m_lblk - es.es_lblk));
@@ -1800,7 +1952,7 @@ found:
if (ext4_has_inline_data(inode))
retval = 0;
else
- retval = ext4_map_query_blocks(NULL, inode, map);
+ retval = ext4_map_query_blocks(NULL, inode, map, 0);
up_read(&EXT4_I(inode)->i_data_sem);
if (retval)
return retval < 0 ? retval : 0;
@@ -1814,7 +1966,7 @@ add_delayed:
* is held in write mode, before inserting a new da entry in
* the extent status tree.
*/
- if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, NULL)) {
map->m_len = min_t(unsigned int, map->m_len,
es.es_len - (map->m_lblk - es.es_lblk));
@@ -1823,7 +1975,7 @@ add_delayed:
goto found;
}
} else if (!ext4_has_inline_data(inode)) {
- retval = ext4_map_query_blocks(NULL, inode, map);
+ retval = ext4_map_query_blocks(NULL, inode, map, 0);
if (retval) {
up_write(&EXT4_I(inode)->i_data_sem);
return retval < 0 ? retval : 0;
@@ -1832,6 +1984,8 @@ add_delayed:
map->m_flags |= EXT4_MAP_DELAYED;
retval = ext4_insert_delayed_blocks(inode, map->m_lblk, map->m_len);
+ if (!retval)
+ map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
up_write(&EXT4_I(inode)->i_data_sem);
return retval;
@@ -1899,7 +2053,8 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
{
- mpd->first_page += folio_nr_pages(folio);
+ mpd->start_pos += folio_size(folio);
+ mpd->wbc->nr_to_write -= folio_nr_pages(folio);
folio_unlock(folio);
}
@@ -1909,7 +2064,7 @@ static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
loff_t size;
int err;
- BUG_ON(folio->index != mpd->first_page);
+ WARN_ON_ONCE(folio_pos(folio) != mpd->start_pos);
folio_clear_dirty_for_io(folio);
/*
* We have to be very careful here! Nothing protects writeback path
@@ -1930,8 +2085,6 @@ static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
!ext4_verity_in_progress(mpd->inode))
len = size & (len - 1);
err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
- if (!err)
- mpd->wbc->nr_to_write--;
return err;
}
@@ -2079,7 +2232,6 @@ static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
ext4_lblk_t lblk = *m_lblk;
ext4_fsblk_t pblock = *m_pblk;
int err = 0;
- int blkbits = mpd->inode->i_blkbits;
ssize_t io_end_size = 0;
struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
@@ -2105,7 +2257,8 @@ static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
err = PTR_ERR(io_end_vec);
goto out;
}
- io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
+ io_end_vec->offset = EXT4_LBLK_TO_B(mpd->inode,
+ mpd->map.m_lblk);
}
*map_bh = true;
goto out;
@@ -2115,7 +2268,7 @@ static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
bh->b_blocknr = pblock++;
}
clear_buffer_unwritten(bh);
- io_end_size += (1 << blkbits);
+ io_end_size += i_blocksize(mpd->inode);
} while (lblk++, (bh = bh->b_this_page) != head);
io_end_vec->size += io_end_size;
@@ -2145,16 +2298,14 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
struct folio_batch fbatch;
unsigned nr, i;
struct inode *inode = mpd->inode;
- int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
ext4_fsblk_t pblock;
int err;
bool map_bh = false;
- start = mpd->map.m_lblk >> bpp_bits;
- end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
- lblk = start << bpp_bits;
+ start = EXT4_LBLK_TO_PG(inode, mpd->map.m_lblk);
+ end = EXT4_LBLK_TO_PG(inode, mpd->map.m_lblk + mpd->map.m_len - 1);
pblock = mpd->map.m_pblk;
folio_batch_init(&fbatch);
@@ -2165,6 +2316,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
for (i = 0; i < nr; i++) {
struct folio *folio = fbatch.folios[i];
+ lblk = EXT4_PG_TO_LBLK(inode, folio->index);
err = mpage_process_folio(mpd, folio, &lblk, &pblock,
&map_bh);
/*
@@ -2198,6 +2350,11 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
int get_blocks_flags;
int err, dioread_nolock;
+ /* Make sure transaction has enough credits for this extent */
+ err = ext4_journal_ensure_extent_credits(handle, inode);
+ if (err < 0)
+ return err;
+
trace_ext4_da_write_pages_extent(inode, map);
/*
* Call ext4_map_blocks() to allocate any delayed allocation blocks, or
@@ -2207,11 +2364,15 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
* previously reserved. However we must not fail because we're in
* writeback and there is nothing we can do about it so it might result
* in data loss. So use reserved blocks to allocate metadata if
- * possible.
+ * possible. In addition, do not cache any unrelated extents, as it
+ * only holds the folio lock but does not hold the i_rwsem or
+ * invalidate_lock, which could corrupt the extent status tree.
*/
get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
EXT4_GET_BLOCKS_METADATA_NOFAIL |
- EXT4_GET_BLOCKS_IO_SUBMIT;
+ EXT4_GET_BLOCKS_IO_SUBMIT |
+ EXT4_EX_NOCACHE;
+
dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
@@ -2225,7 +2386,7 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
mpd->io_submit.io_end->handle = handle->h_rsv_handle;
handle->h_rsv_handle = NULL;
}
- ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
+ ext4_set_io_unwritten_flag(mpd->io_submit.io_end);
}
BUG_ON(map->m_len == 0);
@@ -2233,6 +2394,47 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
}
/*
+ * This is used to submit mapped buffers in a single folio that is not fully
+ * mapped for various reasons, such as insufficient space or journal credits.
+ */
+static int mpage_submit_partial_folio(struct mpage_da_data *mpd)
+{
+ struct inode *inode = mpd->inode;
+ struct folio *folio;
+ loff_t pos;
+ int ret;
+
+ folio = filemap_get_folio(inode->i_mapping,
+ mpd->start_pos >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ /*
+ * The mapped position should be within the current processing folio
+ * but must not be the folio start position.
+ */
+ pos = ((loff_t)mpd->map.m_lblk) << inode->i_blkbits;
+ if (WARN_ON_ONCE((folio_pos(folio) == pos) ||
+ !folio_contains(folio, pos >> PAGE_SHIFT)))
+ return -EINVAL;
+
+ ret = mpage_submit_folio(mpd, folio);
+ if (ret)
+ goto out;
+ /*
+ * Update start_pos to prevent this folio from being released in
+ * mpage_release_unused_pages(), it will be reset to the aligned folio
+ * pos when this folio is written again in the next round. Additionally,
+ * do not update wbc->nr_to_write here, as it will be updated once the
+ * entire folio has finished processing.
+ */
+ mpd->start_pos = pos;
+out:
+ folio_unlock(folio);
+ folio_put(folio);
+ return ret;
+}
+
+/*
* mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
* mpd->len and submit pages underlying it for IO
*
@@ -2267,23 +2469,31 @@ static int mpage_map_and_submit_extent(handle_t *handle,
io_end_vec = ext4_alloc_io_end_vec(io_end);
if (IS_ERR(io_end_vec))
return PTR_ERR(io_end_vec);
- io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
+ io_end_vec->offset = EXT4_LBLK_TO_B(inode, map->m_lblk);
do {
err = mpage_map_one_extent(handle, mpd);
if (err < 0) {
struct super_block *sb = inode->i_sb;
- if (ext4_forced_shutdown(sb))
+ if (ext4_emergency_state(sb))
goto invalidate_dirty_pages;
/*
* Let the uper layers retry transient errors.
* In the case of ENOSPC, if ext4_count_free_blocks()
* is non-zero, a commit should free up blocks.
*/
- if ((err == -ENOMEM) ||
+ if ((err == -ENOMEM) || (err == -EAGAIN) ||
(err == -ENOSPC && ext4_count_free_clusters(sb))) {
- if (progress)
+ /*
+ * We may have already allocated extents for
+ * some bhs inside the folio, issue the
+ * corresponding data to prevent stale data.
+ */
+ if (progress) {
+ if (mpage_submit_partial_folio(mpd))
+ goto invalidate_dirty_pages;
goto update_disksize;
+ }
return err;
}
ext4_msg(sb, KERN_CRIT,
@@ -2317,7 +2527,7 @@ update_disksize:
* Update on-disk size after IO is submitted. Races with
* truncate are avoided by checking i_size under i_data_sem.
*/
- disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
+ disksize = mpd->start_pos;
if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
int err2;
loff_t i_size;
@@ -2341,21 +2551,6 @@ update_disksize:
return err;
}
-/*
- * Calculate the total number of credits to reserve for one writepages
- * iteration. This is called from ext4_writepages(). We map an extent of
- * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
- * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
- * bpp - 1 blocks in bpp different extents.
- */
-static int ext4_da_writepages_trans_blocks(struct inode *inode)
-{
- int bpp = ext4_journal_blocks_per_page(inode);
-
- return ext4_meta_trans_blocks(inode,
- MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
-}
-
static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
size_t len)
{
@@ -2386,7 +2581,7 @@ static int mpage_journal_page_buffers(handle_t *handle,
size_t len = folio_size(folio);
folio_clear_checked(folio);
- mpd->wbc->nr_to_write--;
+ mpd->wbc->nr_to_write -= folio_nr_pages(folio);
if (folio_pos(folio) + len > size &&
!ext4_verity_in_progress(inode))
@@ -2420,23 +2615,19 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
struct address_space *mapping = mpd->inode->i_mapping;
struct folio_batch fbatch;
unsigned int nr_folios;
- pgoff_t index = mpd->first_page;
- pgoff_t end = mpd->last_page;
+ pgoff_t index = mpd->start_pos >> PAGE_SHIFT;
+ pgoff_t end = mpd->end_pos >> PAGE_SHIFT;
xa_mark_t tag;
int i, err = 0;
- int blkbits = mpd->inode->i_blkbits;
ext4_lblk_t lblk;
struct buffer_head *head;
handle_t *handle = NULL;
- int bpp = ext4_journal_blocks_per_page(mpd->inode);
+ int bpp = ext4_journal_blocks_per_folio(mpd->inode);
- if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
+ tag = wbc_to_tag(mpd->wbc);
mpd->map.m_len = 0;
- mpd->next_page = index;
+ mpd->next_pos = mpd->start_pos;
if (ext4_should_journal_data(mpd->inode)) {
handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
bpp);
@@ -2463,11 +2654,12 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
*/
if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
mpd->wbc->nr_to_write <=
- mpd->map.m_len >> (PAGE_SHIFT - blkbits))
+ EXT4_LBLK_TO_PG(mpd->inode, mpd->map.m_len))
goto out;
/* If we can't merge this page, we are done. */
- if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
+ if (mpd->map.m_len > 0 &&
+ mpd->next_pos != folio_pos(folio))
goto out;
if (handle) {
@@ -2513,8 +2705,8 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
}
if (mpd->map.m_len == 0)
- mpd->first_page = folio->index;
- mpd->next_page = folio_next_index(folio);
+ mpd->start_pos = folio_pos(folio);
+ mpd->next_pos = folio_next_pos(folio);
/*
* Writeout when we cannot modify metadata is simple.
* Just submit the page. For data=journal mode we
@@ -2540,8 +2732,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
mpage_folio_done(mpd, folio);
} else {
/* Add all dirty buffers to mpd */
- lblk = ((ext4_lblk_t)folio->index) <<
- (PAGE_SHIFT - blkbits);
+ lblk = EXT4_PG_TO_LBLK(mpd->inode, folio->index);
head = folio_buffers(folio);
err = mpage_process_page_bufs(mpd, head, head,
lblk);
@@ -2599,10 +2790,9 @@ static int ext4_do_writepages(struct mpage_da_data *mpd)
* *never* be called, so if that ever happens, we would want
* the stack trace.
*/
- if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) {
- ret = -EROFS;
+ ret = ext4_emergency_state(mapping->host->i_sb);
+ if (unlikely(ret))
goto out_writepages;
- }
/*
* If we have inline data and arrive here, it means that
@@ -2643,12 +2833,12 @@ static int ext4_do_writepages(struct mpage_da_data *mpd)
mpd->journalled_more_data = 0;
if (ext4_should_dioread_nolock(inode)) {
+ int bpf = ext4_journal_blocks_per_folio(inode);
/*
* We may need to convert up to one extent per block in
- * the page and we may dirty the inode.
+ * the folio and we may dirty the inode.
*/
- rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
- PAGE_SIZE >> inode->i_blkbits);
+ rsv_blocks = 1 + ext4_ext_index_trans_blocks(inode, bpf);
}
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
@@ -2658,18 +2848,18 @@ static int ext4_do_writepages(struct mpage_da_data *mpd)
writeback_index = mapping->writeback_index;
if (writeback_index)
cycled = 0;
- mpd->first_page = writeback_index;
- mpd->last_page = -1;
+ mpd->start_pos = writeback_index << PAGE_SHIFT;
+ mpd->end_pos = LLONG_MAX;
} else {
- mpd->first_page = wbc->range_start >> PAGE_SHIFT;
- mpd->last_page = wbc->range_end >> PAGE_SHIFT;
+ mpd->start_pos = wbc->range_start;
+ mpd->end_pos = wbc->range_end;
}
ext4_io_submit_init(&mpd->io_submit, wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag_pages_for_writeback(mapping, mpd->first_page,
- mpd->last_page);
+ tag_pages_for_writeback(mapping, mpd->start_pos >> PAGE_SHIFT,
+ mpd->end_pos >> PAGE_SHIFT);
blk_start_plug(&plug);
/*
@@ -2712,8 +2902,14 @@ retry:
* not supported by delalloc.
*/
BUG_ON(ext4_should_journal_data(inode));
- needed_blocks = ext4_da_writepages_trans_blocks(inode);
-
+ /*
+ * Calculate the number of credits needed to reserve for one
+ * extent of up to MAX_WRITEPAGES_EXTENT_LEN blocks. It will
+ * attempt to extend the transaction or start a new iteration
+ * if the reserved credits are insufficient.
+ */
+ needed_blocks = ext4_chunk_trans_blocks(inode,
+ MAX_WRITEPAGES_EXTENT_LEN);
/* start a new transaction */
handle = ext4_journal_start_with_reserve(inode,
EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
@@ -2729,7 +2925,8 @@ retry:
}
mpd->do_map = 1;
- trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
+ trace_ext4_da_write_folios_start(inode, mpd->start_pos,
+ mpd->next_pos, wbc);
ret = mpage_prepare_extent_to_map(mpd);
if (!ret && mpd->map.m_len)
ret = mpage_map_and_submit_extent(handle, mpd,
@@ -2767,6 +2964,8 @@ retry:
} else
ext4_put_io_end(mpd->io_submit.io_end);
mpd->io_submit.io_end = NULL;
+ trace_ext4_da_write_folios_end(inode, mpd->start_pos,
+ mpd->next_pos, wbc, ret);
if (ret == -ENOSPC && sbi->s_journal) {
/*
@@ -2778,6 +2977,8 @@ retry:
ret = 0;
continue;
}
+ if (ret == -EAGAIN)
+ ret = 0;
/* Fatal error - ENOMEM, EIO... */
if (ret)
break;
@@ -2786,8 +2987,8 @@ unplug:
blk_finish_plug(&plug);
if (!ret && !cycled && wbc->nr_to_write > 0) {
cycled = 1;
- mpd->last_page = writeback_index - 1;
- mpd->first_page = 0;
+ mpd->end_pos = (writeback_index << PAGE_SHIFT) - 1;
+ mpd->start_pos = 0;
goto retry;
}
@@ -2797,7 +2998,7 @@ unplug:
* Set the writeback_index so that range_cyclic
* mode will write it back later
*/
- mapping->writeback_index = mpd->first_page;
+ mapping->writeback_index = mpd->start_pos >> PAGE_SHIFT;
out_writepages:
trace_ext4_writepages_result(inode, wbc, ret,
@@ -2817,8 +3018,9 @@ static int ext4_writepages(struct address_space *mapping,
int ret;
int alloc_ctx;
- if (unlikely(ext4_forced_shutdown(sb)))
- return -EIO;
+ ret = ext4_emergency_state(sb);
+ if (unlikely(ret))
+ return ret;
alloc_ctx = ext4_writepages_down_read(sb);
ret = ext4_do_writepages(&mpd);
@@ -2858,8 +3060,9 @@ static int ext4_dax_writepages(struct address_space *mapping,
struct inode *inode = mapping->host;
int alloc_ctx;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
alloc_ctx = ext4_writepages_down_read(inode->i_sb);
trace_ext4_writepages(inode, wbc);
@@ -2906,7 +3109,8 @@ static int ext4_nonda_switch(struct super_block *sb)
return 0;
}
-static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
+static int ext4_da_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
{
@@ -2915,22 +3119,23 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index;
struct inode *inode = mapping->host;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
index = pos >> PAGE_SHIFT;
if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
- return ext4_write_begin(file, mapping, pos,
+ return ext4_write_begin(iocb, mapping, pos,
len, foliop, fsdata);
}
*fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
- ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
- foliop, fsdata);
+ ret = ext4_generic_write_inline_data(mapping, inode, pos, len,
+ foliop, fsdata, true);
if (ret < 0)
return ret;
if (ret == 1)
@@ -2938,18 +3143,20 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
}
retry:
- folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
- mapping_gfp_mask(mapping));
+ folio = write_begin_get_folio(iocb, mapping, index, len);
if (IS_ERR(folio))
return PTR_ERR(folio);
+ if (len > folio_next_pos(folio) - pos)
+ len = folio_next_pos(folio) - pos;
+
ret = ext4_block_write_begin(NULL, folio, pos, len,
ext4_da_get_block_prep);
if (ret < 0) {
folio_unlock(folio);
folio_put(folio);
/*
- * block_write_begin may have instantiated a few blocks
+ * ext4_block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold inode lock.
*/
@@ -3008,8 +3215,7 @@ static int ext4_da_do_write_end(struct address_space *mapping,
* block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
* flag, which all that's needed to trigger page writeback.
*/
- copied = block_write_end(NULL, mapping, pos, len, copied,
- folio, NULL);
+ copied = block_write_end(pos, len, copied, folio);
new_i_size = pos + copied;
/*
@@ -3031,7 +3237,7 @@ static int ext4_da_do_write_end(struct address_space *mapping,
unsigned long end;
i_size_write(inode, new_i_size);
- end = (new_i_size - 1) & (PAGE_SIZE - 1);
+ end = offset_in_folio(folio, new_i_size - 1);
if (copied && ext4_da_should_update_i_disksize(folio, end)) {
ext4_update_i_disksize(inode, new_i_size);
disksize_changed = true;
@@ -3060,7 +3266,7 @@ static int ext4_da_do_write_end(struct address_space *mapping,
return copied;
}
-static int ext4_da_write_end(struct file *file,
+static int ext4_da_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
@@ -3069,7 +3275,7 @@ static int ext4_da_write_end(struct file *file,
int write_mode = (int)(unsigned long)fsdata;
if (write_mode == FALL_BACK_TO_NONDELALLOC)
- return ext4_write_end(file, mapping, pos,
+ return ext4_write_end(iocb, mapping, pos,
len, copied, folio, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied);
@@ -3268,7 +3474,7 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
/* Any metadata buffers to write? */
if (!list_empty(&inode->i_mapping->i_private_list))
return true;
- return inode->i_state & I_DIRTY_DATASYNC;
+ return inode_state_read_once(inode) & I_DIRTY_DATASYNC;
}
static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
@@ -3290,12 +3496,16 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
if (map->m_flags & EXT4_MAP_NEW)
iomap->flags |= IOMAP_F_NEW;
+ /* HW-offload atomics are always used */
+ if (flags & IOMAP_ATOMIC)
+ iomap->flags |= IOMAP_F_ATOMIC_BIO;
+
if (flags & IOMAP_DAX)
iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
else
iomap->bdev = inode->i_sb->s_bdev;
- iomap->offset = (u64) map->m_lblk << blkbits;
- iomap->length = (u64) map->m_len << blkbits;
+ iomap->offset = EXT4_LBLK_TO_B(inode, map->m_lblk);
+ iomap->length = EXT4_LBLK_TO_B(inode, map->m_len);
if ((map->m_flags & EXT4_MAP_MAPPED) &&
!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -3329,12 +3539,148 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
}
}
+static int ext4_map_blocks_atomic_write_slow(handle_t *handle,
+ struct inode *inode, struct ext4_map_blocks *map)
+{
+ ext4_lblk_t m_lblk = map->m_lblk;
+ unsigned int m_len = map->m_len;
+ unsigned int mapped_len = 0, m_flags = 0;
+ ext4_fsblk_t next_pblk = 0;
+ bool check_next_pblk = false;
+ int ret = 0;
+
+ WARN_ON_ONCE(!ext4_has_feature_bigalloc(inode->i_sb));
+
+ /*
+ * This is a slow path in case of mixed mapping. We use
+ * EXT4_GET_BLOCKS_CREATE_ZERO flag here to make sure we get a single
+ * contiguous mapped mapping. This will ensure any unwritten or hole
+ * regions within the requested range is zeroed out and we return
+ * a single contiguous mapped extent.
+ */
+ m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
+
+ do {
+ ret = ext4_map_blocks(handle, inode, map, m_flags);
+ if (ret < 0 && ret != -ENOSPC)
+ goto out_err;
+ /*
+ * This should never happen, but let's return an error code to
+ * avoid an infinite loop in here.
+ */
+ if (ret == 0) {
+ ret = -EFSCORRUPTED;
+ ext4_warning_inode(inode,
+ "ext4_map_blocks() couldn't allocate blocks m_flags: 0x%x, ret:%d",
+ m_flags, ret);
+ goto out_err;
+ }
+ /*
+ * With bigalloc we should never get ENOSPC nor discontiguous
+ * physical extents.
+ */
+ if ((check_next_pblk && next_pblk != map->m_pblk) ||
+ ret == -ENOSPC) {
+ ext4_warning_inode(inode,
+ "Non-contiguous allocation detected: expected %llu, got %llu, "
+ "or ext4_map_blocks() returned out of space ret: %d",
+ next_pblk, map->m_pblk, ret);
+ ret = -EFSCORRUPTED;
+ goto out_err;
+ }
+ next_pblk = map->m_pblk + map->m_len;
+ check_next_pblk = true;
+
+ mapped_len += map->m_len;
+ map->m_lblk += map->m_len;
+ map->m_len = m_len - mapped_len;
+ } while (mapped_len < m_len);
+
+ /*
+ * We might have done some work in above loop, so we need to query the
+ * start of the physical extent, based on the origin m_lblk and m_len.
+ * Let's also ensure we were able to allocate the required range for
+ * mixed mapping case.
+ */
+ map->m_lblk = m_lblk;
+ map->m_len = m_len;
+ map->m_flags = 0;
+
+ ret = ext4_map_blocks(handle, inode, map,
+ EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF);
+ if (ret != m_len) {
+ ext4_warning_inode(inode,
+ "allocation failed for atomic write request m_lblk:%u, m_len:%u, ret:%d\n",
+ m_lblk, m_len, ret);
+ ret = -EINVAL;
+ }
+ return ret;
+
+out_err:
+ /* reset map before returning an error */
+ map->m_lblk = m_lblk;
+ map->m_len = m_len;
+ map->m_flags = 0;
+ return ret;
+}
+
+/*
+ * ext4_map_blocks_atomic: Helper routine to ensure the entire requested
+ * range in @map [lblk, lblk + len) is one single contiguous extent with no
+ * mixed mappings.
+ *
+ * We first use m_flags passed to us by our caller (ext4_iomap_alloc()).
+ * We only call EXT4_GET_BLOCKS_ZERO in the slow path, when the underlying
+ * physical extent for the requested range does not have a single contiguous
+ * mapping type i.e. (Hole, Mapped, or Unwritten) throughout.
+ * In that case we will loop over the requested range to allocate and zero out
+ * the unwritten / holes in between, to get a single mapped extent from
+ * [m_lblk, m_lblk + m_len). Note that this is only possible because we know
+ * this can be called only with bigalloc enabled filesystem where the underlying
+ * cluster is already allocated. This avoids allocating discontiguous extents
+ * in the slow path due to multiple calls to ext4_map_blocks().
+ * The slow path is mostly non-performance critical path, so it should be ok to
+ * loop using ext4_map_blocks() with appropriate flags to allocate & zero the
+ * underlying short holes/unwritten extents within the requested range.
+ */
+static int ext4_map_blocks_atomic_write(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, int m_flags,
+ bool *force_commit)
+{
+ ext4_lblk_t m_lblk = map->m_lblk;
+ unsigned int m_len = map->m_len;
+ int ret = 0;
+
+ WARN_ON_ONCE(m_len > 1 && !ext4_has_feature_bigalloc(inode->i_sb));
+
+ ret = ext4_map_blocks(handle, inode, map, m_flags);
+ if (ret < 0 || ret == m_len)
+ goto out;
+ /*
+ * This is a mixed mapping case where we were not able to allocate
+ * a single contiguous extent. In that case let's reset requested
+ * mapping and call the slow path.
+ */
+ map->m_lblk = m_lblk;
+ map->m_len = m_len;
+ map->m_flags = 0;
+
+ /*
+ * slow path means we have mixed mapping, that means we will need
+ * to force txn commit.
+ */
+ *force_commit = true;
+ return ext4_map_blocks_atomic_write_slow(handle, inode, map);
+out:
+ return ret;
+}
+
static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
unsigned int flags)
{
handle_t *handle;
- u8 blkbits = inode->i_blkbits;
int ret, dio_credits, m_flags = 0, retries = 0;
+ bool force_commit = false;
/*
* Trim the mapping request to the maximum value that we can map at
@@ -3342,7 +3688,30 @@ static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
*/
if (map->m_len > DIO_MAX_BLOCKS)
map->m_len = DIO_MAX_BLOCKS;
- dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
+
+ /*
+ * journal credits estimation for atomic writes. We call
+ * ext4_map_blocks(), to find if there could be a mixed mapping. If yes,
+ * then let's assume the no. of pextents required can be m_len i.e.
+ * every alternate block can be unwritten and hole.
+ */
+ if (flags & IOMAP_ATOMIC) {
+ unsigned int orig_mlen = map->m_len;
+
+ ret = ext4_map_blocks(NULL, inode, map, 0);
+ if (ret < 0)
+ return ret;
+ if (map->m_len < orig_mlen) {
+ map->m_len = orig_mlen;
+ dio_credits = ext4_meta_trans_blocks(inode, orig_mlen,
+ map->m_len);
+ } else {
+ dio_credits = ext4_chunk_trans_blocks(inode,
+ map->m_len);
+ }
+ } else {
+ dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
+ }
retry:
/*
@@ -3368,12 +3737,16 @@ retry:
* i_disksize out to i_size. This could be beyond where direct I/O is
* happening and thus expose allocated blocks to direct I/O reads.
*/
- else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
+ else if (EXT4_LBLK_TO_B(inode, map->m_lblk) >= i_size_read(inode))
m_flags = EXT4_GET_BLOCKS_CREATE;
else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
- ret = ext4_map_blocks(handle, inode, map, m_flags);
+ if (flags & IOMAP_ATOMIC)
+ ret = ext4_map_blocks_atomic_write(handle, inode, map, m_flags,
+ &force_commit);
+ else
+ ret = ext4_map_blocks(handle, inode, map, m_flags);
/*
* We cannot fill holes in indirect tree based inodes as that could
@@ -3387,6 +3760,22 @@ retry:
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
+ /*
+ * Force commit the current transaction if the allocation spans a mixed
+ * mapping range. This ensures any pending metadata updates (like
+ * unwritten to written extents conversion) in this range are in
+ * consistent state with the file data blocks, before performing the
+ * actual write I/O. If the commit fails, the whole I/O must be aborted
+ * to prevent any possible torn writes.
+ */
+ if (ret > 0 && force_commit) {
+ int ret2;
+
+ ret2 = ext4_force_commit(inode->i_sb);
+ if (ret2)
+ return ret2;
+ }
+
return ret;
}
@@ -3397,6 +3786,7 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
int ret;
struct ext4_map_blocks map;
u8 blkbits = inode->i_blkbits;
+ unsigned int orig_mlen;
if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
return -EINVAL;
@@ -3410,6 +3800,7 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
map.m_lblk = offset >> blkbits;
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
+ orig_mlen = map.m_len;
if (flags & IOMAP_WRITE) {
/*
@@ -3420,11 +3811,23 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
*/
if (offset + length <= i_size_read(inode)) {
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
- goto out;
+ /*
+ * For atomic writes the entire requested length should
+ * be mapped.
+ */
+ if (map.m_flags & EXT4_MAP_MAPPED) {
+ if ((!(flags & IOMAP_ATOMIC) && ret > 0) ||
+ (flags & IOMAP_ATOMIC && ret >= orig_mlen))
+ goto out;
+ }
+ map.m_len = orig_mlen;
}
ret = ext4_iomap_alloc(inode, &map, flags);
} else {
+ /*
+ * This can be called for overwrites path from
+ * ext4_iomap_overwrite_begin().
+ */
ret = ext4_map_blocks(NULL, inode, &map, 0);
}
@@ -3438,6 +3841,16 @@ out:
*/
map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
+ /*
+ * Before returning to iomap, let's ensure the allocated mapping
+ * covers the entire requested length for atomic writes.
+ */
+ if (flags & IOMAP_ATOMIC) {
+ if (map.m_len < (length >> blkbits)) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ }
ext4_set_iomap(inode, iomap, &map, offset, length, flags);
return 0;
@@ -3459,47 +3872,12 @@ static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
return ret;
}
-static inline bool ext4_want_directio_fallback(unsigned flags, ssize_t written)
-{
- /* must be a directio to fall back to buffered */
- if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) !=
- (IOMAP_WRITE | IOMAP_DIRECT))
- return false;
-
- /* atomic writes are all-or-nothing */
- if (flags & IOMAP_ATOMIC)
- return false;
-
- /* can only try again if we wrote nothing */
- return written == 0;
-}
-
-static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
- ssize_t written, unsigned flags, struct iomap *iomap)
-{
- /*
- * Check to see whether an error occurred while writing out the data to
- * the allocated blocks. If so, return the magic error code for
- * non-atomic write so that we fallback to buffered I/O and attempt to
- * complete the remainder of the I/O.
- * For non-atomic writes, any blocks that may have been
- * allocated in preparation for the direct I/O will be reused during
- * buffered I/O. For atomic write, we never fallback to buffered-io.
- */
- if (ext4_want_directio_fallback(flags, written))
- return -ENOTBLK;
-
- return 0;
-}
-
const struct iomap_ops ext4_iomap_ops = {
.iomap_begin = ext4_iomap_begin,
- .iomap_end = ext4_iomap_end,
};
const struct iomap_ops ext4_iomap_overwrite_ops = {
.iomap_begin = ext4_iomap_overwrite_begin,
- .iomap_end = ext4_iomap_end,
};
static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
@@ -3679,9 +4057,7 @@ void ext4_set_aops(struct inode *inode)
static int __ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
- ext4_fsblk_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (PAGE_SIZE-1);
- unsigned blocksize, pos;
+ unsigned int offset, blocksize, pos;
ext4_lblk_t iblock;
struct inode *inode = mapping->host;
struct buffer_head *bh;
@@ -3696,13 +4072,14 @@ static int __ext4_block_zero_page_range(handle_t *handle,
blocksize = inode->i_sb->s_blocksize;
- iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
+ iblock = EXT4_PG_TO_LBLK(inode, folio->index);
bh = folio_buffers(folio);
if (!bh)
bh = create_empty_buffers(folio, blocksize, 0);
/* Find the buffer that contains "offset" */
+ offset = offset_in_folio(folio, from);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
@@ -3780,9 +4157,8 @@ static int ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
struct inode *inode = mapping->host;
- unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize = inode->i_sb->s_blocksize;
- unsigned max = blocksize - (offset & (blocksize - 1));
+ unsigned int max = blocksize - (from & (blocksize - 1));
/*
* correct length if it does not fall between
@@ -3807,7 +4183,6 @@ static int ext4_block_zero_page_range(handle_t *handle,
static int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from)
{
- unsigned offset = from & (PAGE_SIZE-1);
unsigned length;
unsigned blocksize;
struct inode *inode = mapping->host;
@@ -3816,8 +4191,8 @@ static int ext4_block_truncate_page(handle_t *handle,
if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
return 0;
- blocksize = inode->i_sb->s_blocksize;
- length = blocksize - (offset & (blocksize - 1));
+ blocksize = i_blocksize(inode);
+ length = blocksize - (from & (blocksize - 1));
return ext4_block_zero_page_range(handle, mapping, from, length);
}
@@ -3875,7 +4250,11 @@ int ext4_can_truncate(struct inode *inode)
* We have to make sure i_disksize gets properly updated before we truncate
* page cache due to hole punching or zero range. Otherwise i_disksize update
* can get lost as it may have been postponed to submission of writeback but
- * that will never happen after we truncate page cache.
+ * that will never happen if we remove the folio containing i_size from the
+ * page cache. Also if we punch hole within i_size but above i_disksize,
+ * following ext4_page_mkwrite() may mistakenly allocate written blocks over
+ * the hole and thus introduce allocated blocks beyond i_disksize which is
+ * not allowed (e2fsck would complain in case of crash).
*/
int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
loff_t len)
@@ -3886,9 +4265,11 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
loff_t size = i_size_read(inode);
WARN_ON(!inode_is_locked(inode));
- if (offset > size || offset + len < size)
+ if (offset > size)
return 0;
+ if (offset + len < size)
+ size = offset + len;
if (EXT4_I(inode)->i_disksize >= size)
return 0;
@@ -3902,6 +4283,68 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
return ret;
}
+static inline void ext4_truncate_folio(struct inode *inode,
+ loff_t start, loff_t end)
+{
+ unsigned long blocksize = i_blocksize(inode);
+ struct folio *folio;
+
+ /* Nothing to be done if no complete block needs to be truncated. */
+ if (round_up(start, blocksize) >= round_down(end, blocksize))
+ return;
+
+ folio = filemap_lock_folio(inode->i_mapping, start >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return;
+
+ if (folio_mkclean(folio))
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+}
+
+int ext4_truncate_page_cache_block_range(struct inode *inode,
+ loff_t start, loff_t end)
+{
+ unsigned long blocksize = i_blocksize(inode);
+ int ret;
+
+ /*
+ * For journalled data we need to write (and checkpoint) pages
+ * before discarding page cache to avoid inconsitent data on disk
+ * in case of crash before freeing or unwritten converting trans
+ * is committed.
+ */
+ if (ext4_should_journal_data(inode)) {
+ ret = filemap_write_and_wait_range(inode->i_mapping, start,
+ end - 1);
+ if (ret)
+ return ret;
+ goto truncate_pagecache;
+ }
+
+ /*
+ * If the block size is less than the page size, the file's mapped
+ * blocks within one page could be freed or converted to unwritten.
+ * So it's necessary to remove writable userspace mappings, and then
+ * ext4_page_mkwrite() can be called during subsequent write access
+ * to these partial folios.
+ */
+ if (!IS_ALIGNED(start | end, PAGE_SIZE) &&
+ blocksize < PAGE_SIZE && start < inode->i_size) {
+ loff_t page_boundary = round_up(start, PAGE_SIZE);
+
+ ext4_truncate_folio(inode, start, min(page_boundary, end));
+ if (end > page_boundary)
+ ext4_truncate_folio(inode,
+ round_down(end, PAGE_SIZE), end);
+ }
+
+truncate_pagecache:
+ truncate_pagecache_range(inode, start, end - 1);
+ return 0;
+}
+
static void ext4_wait_dax_page(struct inode *inode)
{
filemap_invalidate_unlock(inode->i_mapping);
@@ -3911,24 +4354,10 @@ static void ext4_wait_dax_page(struct inode *inode)
int ext4_break_layouts(struct inode *inode)
{
- struct page *page;
- int error;
-
if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
return -EINVAL;
- do {
- page = dax_layout_busy_page(inode->i_mapping);
- if (!page)
- return 0;
-
- error = ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1,
- TASK_INTERRUPTIBLE, 0, 0,
- ext4_wait_dax_page(inode));
- } while (error == 0);
-
- return error;
+ return dax_break_layout_inode(inode, ext4_wait_dax_page);
}
/*
@@ -3946,148 +4375,112 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
- ext4_lblk_t first_block, stop_block;
- struct address_space *mapping = inode->i_mapping;
- loff_t first_block_offset, last_block_offset, max_length;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t start_lblk, end_lblk;
+ loff_t max_end = sb->s_maxbytes;
+ loff_t end = offset + length;
handle_t *handle;
unsigned int credits;
- int ret = 0, ret2 = 0;
+ int ret;
trace_ext4_punch_hole(inode, offset, length, 0);
+ WARN_ON_ONCE(!inode_is_locked(inode));
/*
- * Write out all dirty pages to avoid race conditions
- * Then release them.
+ * For indirect-block based inodes, make sure that the hole within
+ * one block before last range.
*/
- if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- ret = filemap_write_and_wait_range(mapping, offset,
- offset + length - 1);
- if (ret)
- return ret;
- }
-
- inode_lock(inode);
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
/* No need to punch hole beyond i_size */
- if (offset >= inode->i_size)
- goto out_mutex;
+ if (offset >= inode->i_size || offset >= max_end)
+ return 0;
/*
- * If the hole extends beyond i_size, set the hole
- * to end after the page that contains i_size
+ * If the hole extends beyond i_size, set the hole to end after
+ * the block that contains i_size to save pointless tail block zeroing.
*/
- if (offset + length > inode->i_size) {
- length = inode->i_size +
- PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
- offset;
- }
+ if (end >= inode->i_size)
+ end = round_up(inode->i_size, sb->s_blocksize);
+ if (end > max_end)
+ end = max_end;
+ length = end - offset;
/*
- * For punch hole the length + offset needs to be within one block
- * before last range. Adjust the length if it goes beyond that limit.
+ * Attach jinode to inode for jbd2 if we do any zeroing of partial
+ * block.
*/
- max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
- if (offset + length > max_length)
- length = max_length - offset;
-
- if (offset & (sb->s_blocksize - 1) ||
- (offset + length) & (sb->s_blocksize - 1)) {
- /*
- * Attach jinode to inode for jbd2 if we do any zeroing of
- * partial block
- */
+ if (!IS_ALIGNED(offset | end, sb->s_blocksize)) {
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
- goto out_mutex;
-
+ return ret;
}
- /* Wait all existing dio workers, newcomers will block on i_rwsem */
- inode_dio_wait(inode);
- ret = file_modified(file);
+ ret = ext4_update_disksize_before_punch(inode, offset, length);
if (ret)
- goto out_mutex;
-
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
- if (ret)
- goto out_dio;
-
- first_block_offset = round_up(offset, sb->s_blocksize);
- last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
+ return ret;
/* Now release the pages and zero block aligned part of pages*/
- if (last_block_offset > first_block_offset) {
- ret = ext4_update_disksize_before_punch(inode, offset, length);
- if (ret)
- goto out_dio;
- truncate_pagecache_range(inode, first_block_offset,
- last_block_offset);
- }
+ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
+ if (ret)
+ return ret;
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- credits = ext4_writepage_trans_blocks(inode);
+ credits = ext4_chunk_trans_extent(inode, 2);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
- goto out_dio;
+ return ret;
}
- ret = ext4_zero_partial_blocks(handle, inode, offset,
- length);
+ ret = ext4_zero_partial_blocks(handle, inode, offset, length);
if (ret)
- goto out_stop;
-
- first_block = (offset + sb->s_blocksize - 1) >>
- EXT4_BLOCK_SIZE_BITS(sb);
- stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+ goto out_handle;
/* If there are blocks to remove, do it */
- if (stop_block > first_block) {
- ext4_lblk_t hole_len = stop_block - first_block;
+ start_lblk = EXT4_B_TO_LBLK(inode, offset);
+ end_lblk = end >> inode->i_blkbits;
+ if (end_lblk > start_lblk) {
+ ext4_lblk_t hole_len = end_lblk - start_lblk;
+
+ ext4_fc_track_inode(handle, inode);
+ ext4_check_map_extents_env(inode);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
- ext4_es_remove_extent(inode, first_block, hole_len);
+ ext4_es_remove_extent(inode, start_lblk, hole_len);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- ret = ext4_ext_remove_space(inode, first_block,
- stop_block - 1);
+ ret = ext4_ext_remove_space(inode, start_lblk,
+ end_lblk - 1);
else
- ret = ext4_ind_remove_space(handle, inode, first_block,
- stop_block);
+ ret = ext4_ind_remove_space(handle, inode, start_lblk,
+ end_lblk);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ goto out_handle;
+ }
- ext4_es_insert_extent(inode, first_block, hole_len, ~0,
+ ext4_es_insert_extent(inode, start_lblk, hole_len, ~0,
EXTENT_STATUS_HOLE, 0);
up_write(&EXT4_I(inode)->i_data_sem);
}
- ext4_fc_track_range(handle, inode, first_block, stop_block);
+ ext4_fc_track_range(handle, inode, start_lblk, end_lblk);
+
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret))
+ goto out_handle;
+
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
-
- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- ret2 = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(ret2))
- ret = ret2;
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
-out_stop:
+out_handle:
ext4_journal_stop(handle);
-out_dio:
- filemap_invalidate_unlock(mapping);
-out_mutex:
- inode_unlock(inode);
return ret;
}
@@ -4157,7 +4550,7 @@ int ext4_truncate(struct inode *inode)
* or it's a completely new inode. In those cases we might not
* have i_rwsem locked because it's not necessary.
*/
- if (!(inode->i_state & (I_NEW|I_FREEING)))
+ if (!(inode_state_read_once(inode) & (I_NEW | I_FREEING)))
WARN_ON(!inode_is_locked(inode));
trace_ext4_truncate_enter(inode);
@@ -4183,7 +4576,7 @@ int ext4_truncate(struct inode *inode)
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- credits = ext4_writepage_trans_blocks(inode);
+ credits = ext4_chunk_trans_extent(inode, 1);
else
credits = ext4_blocks_for_truncate(inode);
@@ -4209,8 +4602,10 @@ int ext4_truncate(struct inode *inode)
if (err)
goto out_stop;
- down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_fc_track_inode(handle, inode);
+ ext4_check_map_extents_env(inode);
+ down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -4322,7 +4717,7 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
* old inodes get re-used with the upper 16 bits of the
* uid/gid intact.
*/
- if (ei->i_dtime && list_empty(&ei->i_orphan)) {
+ if (ei->i_dtime && !ext4_inode_orphan_tracked(inode)) {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
} else {
@@ -4674,6 +5069,11 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
int err;
+ err = xattr_check_inode(inode, IHDR(inode, raw_inode),
+ ITAIL(inode, raw_inode));
+ if (err)
+ return err;
+
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
err = ext4_find_inline_data_nolock(inode);
if (!err && ext4_has_inline_data(inode))
@@ -4705,22 +5105,62 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
inode_set_iversion_queried(inode, val);
}
-static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
-
+static int check_igot_inode(struct inode *inode, ext4_iget_flags flags,
+ const char *function, unsigned int line)
{
+ const char *err_str;
+
if (flags & EXT4_IGET_EA_INODE) {
- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
- return "missing EA_INODE flag";
+ if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ err_str = "missing EA_INODE flag";
+ goto error;
+ }
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
- EXT4_I(inode)->i_file_acl)
- return "ea_inode with extended attributes";
+ EXT4_I(inode)->i_file_acl) {
+ err_str = "ea_inode with extended attributes";
+ goto error;
+ }
} else {
- if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
- return "unexpected EA_INODE flag";
+ if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ /*
+ * open_by_handle_at() could provide an old inode number
+ * that has since been reused for an ea_inode; this does
+ * not indicate filesystem corruption
+ */
+ if (flags & EXT4_IGET_HANDLE)
+ return -ESTALE;
+ err_str = "unexpected EA_INODE flag";
+ goto error;
+ }
}
- if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
- return "unexpected bad inode w/o EXT4_IGET_BAD";
- return NULL;
+ if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
+ err_str = "unexpected bad inode w/o EXT4_IGET_BAD";
+ goto error;
+ }
+ return 0;
+
+error:
+ ext4_error_inode(inode, function, line, 0, "%s", err_str);
+ return -EFSCORRUPTED;
+}
+
+void ext4_set_inode_mapping_order(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ u16 min_order, max_order;
+
+ max_order = EXT4_SB(sb)->s_max_folio_order;
+ if (!max_order)
+ return;
+
+ min_order = EXT4_SB(sb)->s_min_folio_order;
+ if (!min_order && !S_ISREG(inode->i_mode))
+ return;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
+ max_order = min_order;
+
+ mapping_set_folio_order_range(inode->i_mapping, min_order, max_order);
}
struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
@@ -4732,7 +5172,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
struct ext4_inode_info *ei;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
struct inode *inode;
- const char *err_str;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
loff_t size;
@@ -4741,12 +5180,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
gid_t i_gid;
projid_t i_projid;
- if ((!(flags & EXT4_IGET_SPECIAL) &&
- ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
- ino == le32_to_cpu(es->s_usr_quota_inum) ||
- ino == le32_to_cpu(es->s_grp_quota_inum) ||
- ino == le32_to_cpu(es->s_prj_quota_inum) ||
- ino == le32_to_cpu(es->s_orphan_file_inum))) ||
+ if ((!(flags & EXT4_IGET_SPECIAL) && is_special_ino(sb, ino)) ||
(ino < EXT4_ROOT_INO) ||
(ino > le32_to_cpu(es->s_inodes_count))) {
if (flags & EXT4_IGET_HANDLE)
@@ -4760,11 +5194,11 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW)) {
- if ((err_str = check_igot_inode(inode, flags)) != NULL) {
- ext4_error_inode(inode, function, line, 0, err_str);
+ if (!(inode_state_read_once(inode) & I_NEW)) {
+ ret = check_igot_inode(inode, flags, function, line);
+ if (ret) {
iput(inode);
- return ERR_PTR(-EFSCORRUPTED);
+ return ERR_PTR(ret);
}
return inode;
}
@@ -4800,15 +5234,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ei->i_extra_isize = 0;
/* Precompute checksum seed for inode metadata */
- if (ext4_has_metadata_csum(sb)) {
+ if (ext4_has_feature_metadata_csum(sb)) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
__le32 inum = cpu_to_le32(inode->i_ino);
__le32 gen = raw_inode->i_generation;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum,
sizeof(inum));
- ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
- sizeof(gen));
+ ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen));
}
if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
@@ -4839,7 +5272,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ei->i_projid = make_kprojid(&init_user_ns, i_projid);
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
- ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ei->i_inline_off = 0;
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
@@ -4870,13 +5302,22 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
ext4_set_inode_flags(inode, true);
+ /* Detect invalid flag combination - can't have both inline data and extents */
+ if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ ext4_error_inode(inode, function, line, 0,
+ "inode has both inline data and extents flags");
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (ext4_has_feature_64bit(sb))
ei->i_file_acl |=
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(sb, raw_inode);
- if ((size = i_size_read(inode)) < 0) {
+ size = i_size_read(inode);
+ if (size < 0 || size > ext4_get_maxbytes(inode)) {
ext4_error_inode(inode, function, line, 0,
"iget: bad i_size value: %lld", size);
ret = -EFSCORRUPTED;
@@ -4887,7 +5328,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
* we'd normally treat htree data as empty space. But with metadata
* checksumming that corrupts checksums so forbid that.
*/
- if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
+ if (!ext4_has_feature_dir_index(sb) &&
+ ext4_has_feature_metadata_csum(sb) &&
ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
ext4_error_inode(inode, function, line, 0,
"iget: Dir with htree data on filesystem without dir_index feature.");
@@ -5006,10 +5448,19 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
if (IS_ENCRYPTED(inode)) {
inode->i_op = &ext4_encrypted_symlink_inode_operations;
} else if (ext4_inode_is_fast_symlink(inode)) {
- inode->i_link = (char *)ei->i_data;
inode->i_op = &ext4_fast_symlink_inode_operations;
- nd_terminate_link(ei->i_data, inode->i_size,
- sizeof(ei->i_data) - 1);
+ if (inode->i_size == 0 ||
+ inode->i_size >= sizeof(ei->i_data) ||
+ strnlen((char *)ei->i_data, inode->i_size + 1) !=
+ inode->i_size) {
+ ext4_error_inode(inode, function, line, 0,
+ "invalid fast symlink length %llu",
+ (unsigned long long)inode->i_size);
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
+ inode_set_cached_link(inode, (char *)ei->i_data,
+ inode->i_size);
} else {
inode->i_op = &ext4_symlink_inode_operations;
}
@@ -5036,13 +5487,26 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = -EFSCORRUPTED;
goto bad_inode;
}
- if ((err_str = check_igot_inode(inode, flags)) != NULL) {
- ext4_error_inode(inode, function, line, 0, err_str);
- ret = -EFSCORRUPTED;
- goto bad_inode;
- }
+ ext4_set_inode_mapping_order(inode);
+
+ ret = check_igot_inode(inode, flags, function, line);
+ /*
+ * -ESTALE here means there is nothing inherently wrong with the inode,
+ * it's just not an inode we can return for an fhandle lookup.
+ */
+ if (ret == -ESTALE) {
+ brelse(iloc.bh);
+ unlock_new_inode(inode);
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
+ if (ret)
+ goto bad_inode;
brelse(iloc.bh);
+ /* Initialize the "no ACL's" state for the simple cases */
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) && !ei->i_file_acl)
+ cache_no_acl(inode);
unlock_new_inode(inode);
return inode;
@@ -5070,7 +5534,7 @@ static void __ext4_update_other_inode_time(struct super_block *sb,
if (inode_is_dirtytime_only(inode)) {
struct ext4_inode_info *ei = EXT4_I(inode);
- inode->i_state &= ~I_DIRTY_TIME;
+ inode_state_clear(inode, I_DIRTY_TIME);
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
@@ -5227,8 +5691,9 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
return 0;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ err = ext4_emergency_state(inode->i_sb);
+ if (unlikely(err))
+ return err;
if (EXT4_SB(inode->i_sb)->s_journal) {
if (ext4_journal_current_handle()) {
@@ -5350,8 +5815,9 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
const unsigned int ia_valid = attr->ia_valid;
bool inc_ivers = true;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ error = ext4_emergency_state(inode->i_sb);
+ if (unlikely(error))
+ return error;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
@@ -5463,7 +5929,7 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
oldsize & (inode->i_sb->s_blocksize - 1)) {
error = ext4_inode_attach_jinode(inode);
if (error)
- goto err_out;
+ goto out_mmap_sem;
}
handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
@@ -5504,9 +5970,7 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
down_write(&EXT4_I(inode)->i_data_sem);
old_disksize = EXT4_I(inode)->i_disksize;
EXT4_I(inode)->i_disksize = attr->ia_size;
- rc = ext4_mark_inode_dirty(handle, inode);
- if (!error)
- error = rc;
+
/*
* We have to update i_size under i_data_sem together
* with i_disksize to avoid races with writeback code
@@ -5517,6 +5981,9 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
else
EXT4_I(inode)->i_disksize = old_disksize;
up_write(&EXT4_I(inode)->i_data_sem);
+ rc = ext4_mark_inode_dirty(handle, inode);
+ if (!error)
+ error = rc;
ext4_journal_stop(handle);
if (error)
goto out_mmap_sem;
@@ -5632,7 +6099,7 @@ int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
awu_max = sbi->s_awu_max;
}
- generic_fill_statx_atomic_writes(stat, awu_min, awu_max);
+ generic_fill_statx_atomic_writes(stat, awu_min, awu_max, 0);
}
flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
@@ -5713,8 +6180,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
*
* Also account for superblock, inode, quota and xattr blocks
*/
-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
- int pextents)
+int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents)
{
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
int gdpblocks;
@@ -5722,13 +6188,11 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int ret;
/*
- * How many index blocks need to touch to map @lblocks logical blocks
- * to @pextents physical extents?
+ * How many index and leaf blocks need to touch to map @lblocks
+ * logical blocks to @pextents physical extents?
*/
idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
- ret = idxblocks;
-
/*
* Now let's see how many group bitmaps and group descriptors need
* to account
@@ -5741,7 +6205,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
/* bitmaps and block group descriptor blocks */
- ret += groups + gdpblocks;
+ ret = idxblocks + groups + gdpblocks;
/* Blocks for super block, inode, quota and xattr blocks */
ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
@@ -5750,25 +6214,19 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
}
/*
- * Calculate the total number of credits to reserve to fit
- * the modification of a single pages into a single transaction,
- * which may include multiple chunks of block allocations.
- *
- * This could be called via ext4_write_begin()
- *
- * We need to consider the worse case, when
- * one new block per extent.
+ * Calculate the journal credits for modifying the number of blocks
+ * in a single extent within one transaction. 'nrblocks' is used only
+ * for non-extent inodes. For extent type inodes, 'nrblocks' can be
+ * zero if the exact number of blocks is unknown.
*/
-int ext4_writepage_trans_blocks(struct inode *inode)
+int ext4_chunk_trans_extent(struct inode *inode, int nrblocks)
{
- int bpp = ext4_journal_blocks_per_page(inode);
int ret;
- ret = ext4_meta_trans_blocks(inode, bpp, bpp);
-
+ ret = ext4_meta_trans_blocks(inode, nrblocks, 1);
/* Account for data blocks for journalled mode */
if (ext4_should_journal_data(inode))
- ret += bpp;
+ ret += nrblocks;
return ret;
}
@@ -5795,9 +6253,10 @@ int ext4_mark_iloc_dirty(handle_t *handle,
{
int err = 0;
- if (unlikely(ext4_forced_shutdown(inode->i_sb))) {
+ err = ext4_emergency_state(inode->i_sb);
+ if (unlikely(err)) {
put_bh(iloc->bh);
- return -EIO;
+ return err;
}
ext4_fc_track_inode(handle, inode);
@@ -5821,8 +6280,9 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
{
int err;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
- return -EIO;
+ err = ext4_emergency_state(inode->i_sb);
+ if (unlikely(err))
+ return err;
err = ext4_get_inode_loc(inode, iloc);
if (!err) {
@@ -5833,6 +6293,7 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
brelse(iloc->bh);
iloc->bh = NULL;
}
+ ext4_fc_track_inode(handle, inode);
}
ext4_std_error(inode->i_sb, err);
return err;
@@ -6076,14 +6537,14 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
* dirty data which can be converted only after flushing the dirty
* data (and journalled aops don't know how to handle these cases).
*/
- if (val) {
- filemap_invalidate_lock(inode->i_mapping);
- err = filemap_write_and_wait(inode->i_mapping);
- if (err < 0) {
- filemap_invalidate_unlock(inode->i_mapping);
- return err;
- }
+ filemap_invalidate_lock(inode->i_mapping);
+ err = filemap_write_and_wait(inode->i_mapping);
+ if (err < 0) {
+ filemap_invalidate_unlock(inode->i_mapping);
+ return err;
}
+ /* Before switch the inode journalling mode evict all the page cache. */
+ truncate_pagecache(inode, 0);
alloc_ctx = ext4_writepages_down_write(inode->i_sb);
jbd2_journal_lock_updates(journal);
@@ -6103,17 +6564,17 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
if (err < 0) {
jbd2_journal_unlock_updates(journal);
ext4_writepages_up_write(inode->i_sb, alloc_ctx);
+ filemap_invalidate_unlock(inode->i_mapping);
return err;
}
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
}
ext4_set_aops(inode);
+ ext4_set_inode_mapping_order(inode);
jbd2_journal_unlock_updates(journal);
ext4_writepages_up_write(inode->i_sb, alloc_ctx);
-
- if (val)
- filemap_invalidate_unlock(inode->i_mapping);
+ filemap_invalidate_unlock(inode->i_mapping);
/* Finally we can mark the inode as dirty. */
@@ -6137,6 +6598,55 @@ static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
return !buffer_mapped(bh);
}
+static int ext4_block_page_mkwrite(struct inode *inode, struct folio *folio,
+ get_block_t get_block)
+{
+ handle_t *handle;
+ loff_t size;
+ unsigned long len;
+ int credits;
+ int ret;
+
+ credits = ext4_chunk_trans_extent(inode,
+ ext4_journal_blocks_per_folio(inode));
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ folio_lock(folio);
+ size = i_size_read(inode);
+ /* Page got truncated from under us? */
+ if (folio->mapping != inode->i_mapping || folio_pos(folio) > size) {
+ ret = -EFAULT;
+ goto out_error;
+ }
+
+ len = folio_size(folio);
+ if (folio_pos(folio) + len > size)
+ len = size - folio_pos(folio);
+
+ ret = ext4_block_write_begin(handle, folio, 0, len, get_block);
+ if (ret)
+ goto out_error;
+
+ if (!ext4_should_journal_data(inode)) {
+ block_commit_write(folio, 0, len);
+ folio_mark_dirty(folio);
+ } else {
+ ret = ext4_journal_folio_buffers(handle, folio, len);
+ if (ret)
+ goto out_error;
+ }
+ ext4_journal_stop(handle);
+ folio_wait_stable(folio);
+ return ret;
+
+out_error:
+ folio_unlock(folio);
+ ext4_journal_stop(handle);
+ return ret;
+}
+
vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -6148,8 +6658,7 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
- handle_t *handle;
- get_block_t *get_block;
+ get_block_t *get_block = ext4_get_block;
int retries = 0;
if (unlikely(IS_IMMUTABLE(inode)))
@@ -6217,47 +6726,11 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
/* OK, we need to fill the hole... */
if (ext4_should_dioread_nolock(inode))
get_block = ext4_get_block_unwritten;
- else
- get_block = ext4_get_block;
retry_alloc:
- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
- ext4_writepage_trans_blocks(inode));
- if (IS_ERR(handle)) {
- ret = VM_FAULT_SIGBUS;
- goto out;
- }
- /*
- * Data journalling can't use block_page_mkwrite() because it
- * will set_buffer_dirty() before do_journal_get_write_access()
- * thus might hit warning messages for dirty metadata buffers.
- */
- if (!ext4_should_journal_data(inode)) {
- err = block_page_mkwrite(vma, vmf, get_block);
- } else {
- folio_lock(folio);
- size = i_size_read(inode);
- /* Page got truncated from under us? */
- if (folio->mapping != mapping || folio_pos(folio) > size) {
- ret = VM_FAULT_NOPAGE;
- goto out_error;
- }
-
- len = folio_size(folio);
- if (folio_pos(folio) + len > size)
- len = size - folio_pos(folio);
-
- err = ext4_block_write_begin(handle, folio, 0, len,
- ext4_get_block);
- if (!err) {
- ret = VM_FAULT_SIGBUS;
- if (ext4_journal_folio_buffers(handle, folio, len))
- goto out_error;
- } else {
- folio_unlock(folio);
- }
- }
- ext4_journal_stop(handle);
- if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ /* Start journal and allocate blocks */
+ err = ext4_block_page_mkwrite(inode, folio, get_block);
+ if (err == -EAGAIN ||
+ (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)))
goto retry_alloc;
out_ret:
ret = vmf_fs_error(err);
@@ -6265,8 +6738,4 @@ out:
filemap_invalidate_unlock_shared(mapping);
sb_end_pagefault(inode->i_sb);
return ret;
-out_error:
- folio_unlock(folio);
- ext4_journal_stop(handle);
- goto out;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 7b9ce71c1c81..7ce0fc40aec2 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -27,14 +27,16 @@
#include "fsmap.h"
#include <trace/events/ext4.h>
-typedef void ext4_update_sb_callback(struct ext4_super_block *es,
- const void *arg);
+typedef void ext4_update_sb_callback(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es,
+ const void *arg);
/*
* Superblock modification callback function for changing file system
* label
*/
-static void ext4_sb_setlabel(struct ext4_super_block *es, const void *arg)
+static void ext4_sb_setlabel(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es, const void *arg)
{
/* Sanity check, this should never happen */
BUILD_BUG_ON(sizeof(es->s_volume_name) < EXT4_LABEL_MAX);
@@ -46,7 +48,8 @@ static void ext4_sb_setlabel(struct ext4_super_block *es, const void *arg)
* Superblock modification callback function for changing file system
* UUID.
*/
-static void ext4_sb_setuuid(struct ext4_super_block *es, const void *arg)
+static void ext4_sb_setuuid(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es, const void *arg)
{
memcpy(es->s_uuid, (__u8 *)arg, UUID_SIZE);
}
@@ -71,7 +74,7 @@ int ext4_update_primary_sb(struct super_block *sb, handle_t *handle,
goto out_err;
lock_buffer(bh);
- func(es, arg);
+ func(sbi, es, arg);
ext4_superblock_csum_set(sb);
unlock_buffer(bh);
@@ -142,16 +145,16 @@ static int ext4_update_backup_sb(struct super_block *sb,
es = (struct ext4_super_block *) (bh->b_data + offset);
lock_buffer(bh);
- if (ext4_has_metadata_csum(sb) &&
- es->s_checksum != ext4_superblock_csum(sb, es)) {
+ if (ext4_has_feature_metadata_csum(sb) &&
+ es->s_checksum != ext4_superblock_csum(es)) {
ext4_msg(sb, KERN_ERR, "Invalid checksum for backup "
"superblock %llu", sb_block);
unlock_buffer(bh);
goto out_bh;
}
- func(es, arg);
- if (ext4_has_metadata_csum(sb))
- es->s_checksum = ext4_superblock_csum(sb, es);
+ func(EXT4_SB(sb), es, arg);
+ if (ext4_has_feature_metadata_csum(sb))
+ es->s_checksum = ext4_superblock_csum(es);
set_buffer_uptodate(bh);
unlock_buffer(bh);
@@ -351,11 +354,11 @@ void ext4_reset_inode_seed(struct inode *inode)
__le32 gen = cpu_to_le32(inode->i_generation);
__u32 csum;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
- ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen));
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
+ ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen));
}
/*
@@ -980,7 +983,7 @@ group_add_out:
return err;
}
-int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -997,7 +1000,7 @@ int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa)
}
int ext4_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
u32 flags = fa->flags;
@@ -1205,7 +1208,8 @@ static int ext4_ioctl_setuuid(struct file *filp,
* If any checksums (group descriptors or metadata) are being used
* then the checksum seed feature is required to change the UUID.
*/
- if (((ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb))
+ if (((ext4_has_feature_gdt_csum(sb) ||
+ ext4_has_feature_metadata_csum(sb))
&& !ext4_has_feature_csum_seed(sb))
|| ext4_has_feature_stable_inodes(sb))
return -EOPNOTSUPP;
@@ -1229,6 +1233,299 @@ static int ext4_ioctl_setuuid(struct file *filp,
return ret;
}
+
+#define TUNE_OPS_SUPPORTED (EXT4_TUNE_FL_ERRORS_BEHAVIOR | \
+ EXT4_TUNE_FL_MNT_COUNT | EXT4_TUNE_FL_MAX_MNT_COUNT | \
+ EXT4_TUNE_FL_CHECKINTRVAL | EXT4_TUNE_FL_LAST_CHECK_TIME | \
+ EXT4_TUNE_FL_RESERVED_BLOCKS | EXT4_TUNE_FL_RESERVED_UID | \
+ EXT4_TUNE_FL_RESERVED_GID | EXT4_TUNE_FL_DEFAULT_MNT_OPTS | \
+ EXT4_TUNE_FL_DEF_HASH_ALG | EXT4_TUNE_FL_RAID_STRIDE | \
+ EXT4_TUNE_FL_RAID_STRIPE_WIDTH | EXT4_TUNE_FL_MOUNT_OPTS | \
+ EXT4_TUNE_FL_FEATURES | EXT4_TUNE_FL_EDIT_FEATURES | \
+ EXT4_TUNE_FL_FORCE_FSCK | EXT4_TUNE_FL_ENCODING | \
+ EXT4_TUNE_FL_ENCODING_FLAGS)
+
+#define EXT4_TUNE_SET_COMPAT_SUPP \
+ (EXT4_FEATURE_COMPAT_DIR_INDEX | \
+ EXT4_FEATURE_COMPAT_STABLE_INODES)
+#define EXT4_TUNE_SET_INCOMPAT_SUPP \
+ (EXT4_FEATURE_INCOMPAT_EXTENTS | \
+ EXT4_FEATURE_INCOMPAT_EA_INODE | \
+ EXT4_FEATURE_INCOMPAT_ENCRYPT | \
+ EXT4_FEATURE_INCOMPAT_CSUM_SEED | \
+ EXT4_FEATURE_INCOMPAT_LARGEDIR | \
+ EXT4_FEATURE_INCOMPAT_CASEFOLD)
+#define EXT4_TUNE_SET_RO_COMPAT_SUPP \
+ (EXT4_FEATURE_RO_COMPAT_LARGE_FILE | \
+ EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
+ EXT4_FEATURE_RO_COMPAT_PROJECT | \
+ EXT4_FEATURE_RO_COMPAT_VERITY)
+
+#define EXT4_TUNE_CLEAR_COMPAT_SUPP (0)
+#define EXT4_TUNE_CLEAR_INCOMPAT_SUPP (0)
+#define EXT4_TUNE_CLEAR_RO_COMPAT_SUPP (0)
+
+#define SB_ENC_SUPP_MASK (SB_ENC_STRICT_MODE_FL | \
+ SB_ENC_NO_COMPAT_FALLBACK_FL)
+
+static int ext4_ioctl_get_tune_sb(struct ext4_sb_info *sbi,
+ struct ext4_tune_sb_params __user *params)
+{
+ struct ext4_tune_sb_params ret;
+ struct ext4_super_block *es = sbi->s_es;
+
+ memset(&ret, 0, sizeof(ret));
+ ret.set_flags = TUNE_OPS_SUPPORTED;
+ ret.errors_behavior = le16_to_cpu(es->s_errors);
+ ret.mnt_count = le16_to_cpu(es->s_mnt_count);
+ ret.max_mnt_count = le16_to_cpu(es->s_max_mnt_count);
+ ret.checkinterval = le32_to_cpu(es->s_checkinterval);
+ ret.last_check_time = le32_to_cpu(es->s_lastcheck);
+ ret.reserved_blocks = ext4_r_blocks_count(es);
+ ret.blocks_count = ext4_blocks_count(es);
+ ret.reserved_uid = ext4_get_resuid(es);
+ ret.reserved_gid = ext4_get_resgid(es);
+ ret.default_mnt_opts = le32_to_cpu(es->s_default_mount_opts);
+ ret.def_hash_alg = es->s_def_hash_version;
+ ret.raid_stride = le16_to_cpu(es->s_raid_stride);
+ ret.raid_stripe_width = le32_to_cpu(es->s_raid_stripe_width);
+ ret.encoding = le16_to_cpu(es->s_encoding);
+ ret.encoding_flags = le16_to_cpu(es->s_encoding_flags);
+ strscpy_pad(ret.mount_opts, es->s_mount_opts);
+ ret.feature_compat = le32_to_cpu(es->s_feature_compat);
+ ret.feature_incompat = le32_to_cpu(es->s_feature_incompat);
+ ret.feature_ro_compat = le32_to_cpu(es->s_feature_ro_compat);
+ ret.set_feature_compat_mask = EXT4_TUNE_SET_COMPAT_SUPP;
+ ret.set_feature_incompat_mask = EXT4_TUNE_SET_INCOMPAT_SUPP;
+ ret.set_feature_ro_compat_mask = EXT4_TUNE_SET_RO_COMPAT_SUPP;
+ ret.clear_feature_compat_mask = EXT4_TUNE_CLEAR_COMPAT_SUPP;
+ ret.clear_feature_incompat_mask = EXT4_TUNE_CLEAR_INCOMPAT_SUPP;
+ ret.clear_feature_ro_compat_mask = EXT4_TUNE_CLEAR_RO_COMPAT_SUPP;
+ if (copy_to_user(params, &ret, sizeof(ret)))
+ return -EFAULT;
+ return 0;
+}
+
+static void ext4_sb_setparams(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es, const void *arg)
+{
+ const struct ext4_tune_sb_params *params = arg;
+
+ if (params->set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR)
+ es->s_errors = cpu_to_le16(params->errors_behavior);
+ if (params->set_flags & EXT4_TUNE_FL_MNT_COUNT)
+ es->s_mnt_count = cpu_to_le16(params->mnt_count);
+ if (params->set_flags & EXT4_TUNE_FL_MAX_MNT_COUNT)
+ es->s_max_mnt_count = cpu_to_le16(params->max_mnt_count);
+ if (params->set_flags & EXT4_TUNE_FL_CHECKINTRVAL)
+ es->s_checkinterval = cpu_to_le32(params->checkinterval);
+ if (params->set_flags & EXT4_TUNE_FL_LAST_CHECK_TIME)
+ es->s_lastcheck = cpu_to_le32(params->last_check_time);
+ if (params->set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) {
+ ext4_fsblk_t blk = params->reserved_blocks;
+
+ es->s_r_blocks_count_lo = cpu_to_le32((u32)blk);
+ es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
+ }
+ if (params->set_flags & EXT4_TUNE_FL_RESERVED_UID) {
+ int uid = params->reserved_uid;
+
+ es->s_def_resuid = cpu_to_le16(uid & 0xFFFF);
+ es->s_def_resuid_hi = cpu_to_le16(uid >> 16);
+ }
+ if (params->set_flags & EXT4_TUNE_FL_RESERVED_GID) {
+ int gid = params->reserved_gid;
+
+ es->s_def_resgid = cpu_to_le16(gid & 0xFFFF);
+ es->s_def_resgid_hi = cpu_to_le16(gid >> 16);
+ }
+ if (params->set_flags & EXT4_TUNE_FL_DEFAULT_MNT_OPTS)
+ es->s_default_mount_opts = cpu_to_le32(params->default_mnt_opts);
+ if (params->set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
+ es->s_def_hash_version = params->def_hash_alg;
+ if (params->set_flags & EXT4_TUNE_FL_RAID_STRIDE)
+ es->s_raid_stride = cpu_to_le16(params->raid_stride);
+ if (params->set_flags & EXT4_TUNE_FL_RAID_STRIPE_WIDTH)
+ es->s_raid_stripe_width =
+ cpu_to_le32(params->raid_stripe_width);
+ if (params->set_flags & EXT4_TUNE_FL_ENCODING)
+ es->s_encoding = cpu_to_le16(params->encoding);
+ if (params->set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)
+ es->s_encoding_flags = cpu_to_le16(params->encoding_flags);
+ strscpy_pad(es->s_mount_opts, params->mount_opts);
+ if (params->set_flags & EXT4_TUNE_FL_EDIT_FEATURES) {
+ es->s_feature_compat |=
+ cpu_to_le32(params->set_feature_compat_mask);
+ es->s_feature_incompat |=
+ cpu_to_le32(params->set_feature_incompat_mask);
+ es->s_feature_ro_compat |=
+ cpu_to_le32(params->set_feature_ro_compat_mask);
+ es->s_feature_compat &=
+ ~cpu_to_le32(params->clear_feature_compat_mask);
+ es->s_feature_incompat &=
+ ~cpu_to_le32(params->clear_feature_incompat_mask);
+ es->s_feature_ro_compat &=
+ ~cpu_to_le32(params->clear_feature_ro_compat_mask);
+ if (params->set_feature_compat_mask &
+ EXT4_FEATURE_COMPAT_DIR_INDEX)
+ es->s_def_hash_version = sbi->s_def_hash_version;
+ if (params->set_feature_incompat_mask &
+ EXT4_FEATURE_INCOMPAT_CSUM_SEED)
+ es->s_checksum_seed = cpu_to_le32(sbi->s_csum_seed);
+ }
+ if (params->set_flags & EXT4_TUNE_FL_FORCE_FSCK)
+ es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+}
+
+static int ext4_ioctl_set_tune_sb(struct file *filp,
+ struct ext4_tune_sb_params __user *in)
+{
+ struct ext4_tune_sb_params params;
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ int enabling_casefold = 0;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&params, in, sizeof(params)))
+ return -EFAULT;
+
+ if (strnlen(params.mount_opts, sizeof(params.mount_opts)) ==
+ sizeof(params.mount_opts))
+ return -E2BIG;
+
+ if ((params.set_flags & ~TUNE_OPS_SUPPORTED) != 0)
+ return -EOPNOTSUPP;
+
+ if ((params.set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR) &&
+ (params.errors_behavior > EXT4_ERRORS_PANIC))
+ return -EINVAL;
+
+ if ((params.set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) &&
+ (params.reserved_blocks > ext4_blocks_count(sbi->s_es) / 2))
+ return -EINVAL;
+ if ((params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) &&
+ ((params.def_hash_alg > DX_HASH_LAST) ||
+ (params.def_hash_alg == DX_HASH_SIPHASH)))
+ return -EINVAL;
+ if ((params.set_flags & EXT4_TUNE_FL_FEATURES) &&
+ (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES))
+ return -EINVAL;
+
+ if (params.set_flags & EXT4_TUNE_FL_FEATURES) {
+ params.set_feature_compat_mask =
+ params.feature_compat &
+ ~le32_to_cpu(es->s_feature_compat);
+ params.set_feature_incompat_mask =
+ params.feature_incompat &
+ ~le32_to_cpu(es->s_feature_incompat);
+ params.set_feature_ro_compat_mask =
+ params.feature_ro_compat &
+ ~le32_to_cpu(es->s_feature_ro_compat);
+ params.clear_feature_compat_mask =
+ ~params.feature_compat &
+ le32_to_cpu(es->s_feature_compat);
+ params.clear_feature_incompat_mask =
+ ~params.feature_incompat &
+ le32_to_cpu(es->s_feature_incompat);
+ params.clear_feature_ro_compat_mask =
+ ~params.feature_ro_compat &
+ le32_to_cpu(es->s_feature_ro_compat);
+ params.set_flags |= EXT4_TUNE_FL_EDIT_FEATURES;
+ }
+ if (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES) {
+ if ((params.set_feature_compat_mask &
+ ~EXT4_TUNE_SET_COMPAT_SUPP) ||
+ (params.set_feature_incompat_mask &
+ ~EXT4_TUNE_SET_INCOMPAT_SUPP) ||
+ (params.set_feature_ro_compat_mask &
+ ~EXT4_TUNE_SET_RO_COMPAT_SUPP) ||
+ (params.clear_feature_compat_mask &
+ ~EXT4_TUNE_CLEAR_COMPAT_SUPP) ||
+ (params.clear_feature_incompat_mask &
+ ~EXT4_TUNE_CLEAR_INCOMPAT_SUPP) ||
+ (params.clear_feature_ro_compat_mask &
+ ~EXT4_TUNE_CLEAR_RO_COMPAT_SUPP))
+ return -EOPNOTSUPP;
+
+ /*
+ * Filter out the features that are already set from
+ * the set_mask.
+ */
+ params.set_feature_compat_mask &=
+ ~le32_to_cpu(es->s_feature_compat);
+ params.set_feature_incompat_mask &=
+ ~le32_to_cpu(es->s_feature_incompat);
+ params.set_feature_ro_compat_mask &=
+ ~le32_to_cpu(es->s_feature_ro_compat);
+ if ((params.set_feature_incompat_mask &
+ EXT4_FEATURE_INCOMPAT_CASEFOLD)) {
+ enabling_casefold = 1;
+ if (!(params.set_flags & EXT4_TUNE_FL_ENCODING)) {
+ params.encoding = EXT4_ENC_UTF8_12_1;
+ params.set_flags |= EXT4_TUNE_FL_ENCODING;
+ }
+ if (!(params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)) {
+ params.encoding_flags = 0;
+ params.set_flags |= EXT4_TUNE_FL_ENCODING_FLAGS;
+ }
+ }
+ if ((params.set_feature_compat_mask &
+ EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+ uuid_t uu;
+
+ memcpy(&uu, sbi->s_hash_seed, UUID_SIZE);
+ if (uuid_is_null(&uu))
+ generate_random_uuid((char *)
+ &sbi->s_hash_seed);
+ if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
+ sbi->s_def_hash_version = params.def_hash_alg;
+ else if (sbi->s_def_hash_version == 0)
+ sbi->s_def_hash_version = DX_HASH_HALF_MD4;
+ if (!(es->s_flags &
+ cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH)) &&
+ !(es->s_flags &
+ cpu_to_le32(EXT2_FLAGS_SIGNED_HASH))) {
+#ifdef __CHAR_UNSIGNED__
+ sbi->s_hash_unsigned = 3;
+#else
+ sbi->s_hash_unsigned = 0;
+#endif
+ }
+ }
+ }
+ if (params.set_flags & EXT4_TUNE_FL_ENCODING) {
+ if (!enabling_casefold)
+ return -EINVAL;
+ if (params.encoding == 0)
+ params.encoding = EXT4_ENC_UTF8_12_1;
+ else if (params.encoding != EXT4_ENC_UTF8_12_1)
+ return -EINVAL;
+ }
+ if (params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS) {
+ if (!enabling_casefold)
+ return -EINVAL;
+ if (params.encoding_flags & ~SB_ENC_SUPP_MASK)
+ return -EINVAL;
+ }
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ ret = ext4_update_superblocks_fn(sb, ext4_sb_setparams, &params);
+ mnt_drop_write_file(filp);
+
+ if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
+ sbi->s_def_hash_version = params.def_hash_alg;
+
+ return ret;
+}
+
static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1253,7 +1550,7 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!inode_owner_or_capable(idmap, inode))
return -EPERM;
- if (ext4_has_metadata_csum(inode->i_sb)) {
+ if (ext4_has_feature_metadata_csum(inode->i_sb)) {
ext4_warning(sb, "Setting inode version is not "
"supported with metadata_csum enabled.");
return -ENOTTY;
@@ -1348,16 +1645,6 @@ group_extend_out:
if (!(fd_file(donor)->f_mode & FMODE_WRITE))
return -EBADF;
- if (ext4_has_feature_bigalloc(sb)) {
- ext4_msg(sb, KERN_ERR,
- "Online defrag not supported with bigalloc");
- return -EOPNOTSUPP;
- } else if (IS_DAX(inode)) {
- ext4_msg(sb, KERN_ERR,
- "Online defrag not supported with DAX");
- return -EOPNOTSUPP;
- }
-
err = mnt_want_write_file(filp);
if (err)
return err;
@@ -1504,8 +1791,14 @@ resizefs_out:
return 0;
}
case EXT4_IOC_PRECACHE_EXTENTS:
- return ext4_ext_precache(inode);
+ {
+ int ret;
+ inode_lock_shared(inode);
+ ret = ext4_ext_precache(inode);
+ inode_unlock_shared(inode);
+ return ret;
+ }
case FS_IOC_SET_ENCRYPTION_POLICY:
if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
@@ -1609,6 +1902,11 @@ resizefs_out:
return ext4_ioctl_getuuid(EXT4_SB(sb), (void __user *)arg);
case EXT4_IOC_SETFSUUID:
return ext4_ioctl_setuuid(filp, (const void __user *)arg);
+ case EXT4_IOC_GET_TUNE_SB_PARAM:
+ return ext4_ioctl_get_tune_sb(EXT4_SB(sb),
+ (void __user *)arg);
+ case EXT4_IOC_SET_TUNE_SB_PARAM:
+ return ext4_ioctl_set_tune_sb(filp, (void __user *)arg);
default:
return -ENOTTY;
}
@@ -1696,7 +1994,8 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#endif
-static void set_overhead(struct ext4_super_block *es, const void *arg)
+static void set_overhead(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es, const void *arg)
{
es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
}
@@ -1705,7 +2004,7 @@ int ext4_update_overhead(struct super_block *sb, bool force)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (sb_rdonly(sb))
+ if (ext4_emergency_state(sb) || sb_rdonly(sb))
return 0;
if (!force &&
(sbi->s_overhead == 0 ||
diff --git a/fs/ext4/mballoc-test.c b/fs/ext4/mballoc-test.c
index bb2a223b207c..a9416b20ff64 100644
--- a/fs/ext4/mballoc-test.c
+++ b/fs/ext4/mballoc-test.c
@@ -155,6 +155,7 @@ static struct super_block *mbt_ext4_alloc_super_block(void)
bgl_lock_init(sbi->s_blockgroup_lock);
sbi->s_es = &fsb->es;
+ sbi->s_sb = sb;
sb->s_fs_info = sbi;
up_write(&sb->s_umount);
@@ -796,11 +797,14 @@ static void test_mb_mark_used(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
KUNIT_ASSERT_EQ(test, ret, 0);
grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
+ grp->bb_largest_free_order = -1;
+ grp->bb_avg_fragment_size_order = -1;
mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
for (i = 0; i < TEST_RANGE_COUNT; i++)
test_mb_mark_used_range(test, &e4b, ranges[i].start,
@@ -860,6 +864,7 @@ static void test_mb_free_blocks(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -873,6 +878,8 @@ static void test_mb_free_blocks(struct kunit *test)
ext4_unlock_group(sb, TEST_GOAL_GROUP);
grp->bb_free = 0;
+ grp->bb_largest_free_order = -1;
+ grp->bb_avg_fragment_size_order = -1;
memset(bitmap, 0xff, sb->s_blocksize);
mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index b25a27c86696..56d50fd3310b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -98,14 +98,14 @@
* block bitmap and buddy information. The information are stored in the
* inode as:
*
- * { page }
+ * { folio }
* [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
*
*
* one block each for bitmap and buddy information. So for each group we
- * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
- * blocksize) blocks. So it can have information regarding groups_per_page
- * which is blocks_per_page/2
+ * take up 2 blocks. A folio can contain blocks_per_folio (folio_size /
+ * blocksize) blocks. So it can have information regarding groups_per_folio
+ * which is blocks_per_folio/2
*
* The buddy cache inode is not stored on disk. The inode is thrown
* away when the filesystem is unmounted.
@@ -132,25 +132,30 @@
* If "mb_optimize_scan" mount option is set, we maintain in memory group info
* structures in two data structures:
*
- * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
+ * 1) Array of largest free order xarrays (sbi->s_mb_largest_free_orders)
*
- * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
+ * Locking: Writers use xa_lock, readers use rcu_read_lock.
*
- * This is an array of lists where the index in the array represents the
+ * This is an array of xarrays where the index in the array represents the
* largest free order in the buddy bitmap of the participating group infos of
- * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
- * number of buddy bitmap orders possible) number of lists. Group-infos are
- * placed in appropriate lists.
+ * that xarray. So, there are exactly MB_NUM_ORDERS(sb) (which means total
+ * number of buddy bitmap orders possible) number of xarrays. Group-infos are
+ * placed in appropriate xarrays.
*
- * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
+ * 2) Average fragment size xarrays (sbi->s_mb_avg_fragment_size)
*
- * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
+ * Locking: Writers use xa_lock, readers use rcu_read_lock.
*
- * This is an array of lists where in the i-th list there are groups with
+ * This is an array of xarrays where in the i-th xarray there are groups with
* average fragment size >= 2^i and < 2^(i+1). The average fragment size
* is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
- * Note that we don't bother with a special list for completely empty groups
- * so we only have MB_NUM_ORDERS(sb) lists.
+ * Note that we don't bother with a special xarray for completely empty
+ * groups so we only have MB_NUM_ORDERS(sb) xarrays. Group-infos are placed
+ * in appropriate xarrays.
+ *
+ * In xarray, the index is the block group number, the value is the block group
+ * information, and a non-empty value indicates the block group is present in
+ * the current xarray.
*
* When "mb_optimize_scan" mount option is set, mballoc consults the above data
* structures to decide the order in which groups are to be traversed for
@@ -187,7 +192,7 @@
* /sys/fs/ext4/<partition>/mb_min_to_scan
* /sys/fs/ext4/<partition>/mb_max_to_scan
* /sys/fs/ext4/<partition>/mb_order2_req
- * /sys/fs/ext4/<partition>/mb_linear_limit
+ * /sys/fs/ext4/<partition>/mb_max_linear_groups
*
* The regular allocator uses buddy scan only if the request len is power of
* 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
@@ -209,7 +214,7 @@
* get traversed linearly. That may result in subsequent allocations being not
* close to each other. And so, the underlying device may get filled up in a
* non-linear fashion. While that may not matter on non-rotational devices, for
- * rotational devices that may result in higher seek times. "mb_linear_limit"
+ * rotational devices that may result in higher seek times. "mb_max_linear_groups"
* tells mballoc how many groups mballoc should search linearly before
* performing consulting above data structures for more efficient lookups. For
* non rotational devices, this value defaults to 0 and for rotational devices
@@ -420,8 +425,8 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group);
static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
-static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
- ext4_group_t group, enum criteria cr);
+static int ext4_mb_scan_group(struct ext4_allocation_context *ac,
+ ext4_group_t group);
static int ext4_try_to_trim_range(struct super_block *sb,
struct ext4_buddy *e4b, ext4_grpblk_t start,
@@ -677,6 +682,24 @@ do { \
} \
} while (0)
+/*
+ * Perform buddy integrity check with the following steps:
+ *
+ * 1. Top-down validation (from highest order down to order 1, excluding order-0 bitmap):
+ * For each pair of adjacent orders, if a higher-order bit is set (indicating a free block),
+ * at most one of the two corresponding lower-order bits may be clear (free).
+ *
+ * 2. Order-0 (bitmap) validation, performed on bit pairs:
+ * - If either bit in a pair is set (1, allocated), then all corresponding higher-order bits
+ * must not be free (0).
+ * - If both bits in a pair are clear (0, free), then exactly one of the corresponding
+ * higher-order bits must be free (0).
+ *
+ * 3. Preallocation (pa) list validation:
+ * For each preallocated block (pa) in the group:
+ * - Verify that pa_pstart falls within the bounds of this block group.
+ * - Ensure the corresponding bit(s) in the order-0 bitmap are marked as allocated (1).
+ */
static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
const char *function, int line)
{
@@ -718,15 +741,6 @@ static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
continue;
}
- /* both bits in buddy2 must be 1 */
- MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
- MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-
- for (j = 0; j < (1 << order); j++) {
- k = (i * (1 << order)) + j;
- MB_CHECK_ASSERT(
- !mb_test_bit(k, e4b->bd_bitmap));
- }
count++;
}
MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
@@ -742,15 +756,21 @@ static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
fragments++;
fstart = i;
}
- continue;
+ } else {
+ fstart = -1;
}
- fstart = -1;
- /* check used bits only */
- for (j = 0; j < e4b->bd_blkbits + 1; j++) {
- buddy2 = mb_find_buddy(e4b, j, &max2);
- k = i >> j;
- MB_CHECK_ASSERT(k < max2);
- MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
+ if (!(i & 1)) {
+ int in_use, zero_bit_count = 0;
+
+ in_use = mb_test_bit(i, buddy) || mb_test_bit(i + 1, buddy);
+ for (j = 1; j < e4b->bd_blkbits + 2; j++) {
+ buddy2 = mb_find_buddy(e4b, j, &max2);
+ k = i >> j;
+ MB_CHECK_ASSERT(k < max2);
+ if (!mb_test_bit(k, buddy2))
+ zero_bit_count++;
+ }
+ MB_CHECK_ASSERT(zero_bit_count == !in_use);
}
}
MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
@@ -763,6 +783,8 @@ static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
ext4_group_t groupnr;
struct ext4_prealloc_space *pa;
pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
+ if (!pa->pa_len)
+ continue;
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
MB_CHECK_ASSERT(groupnr == e4b->bd_group);
for (i = 0; i < pa->pa_len; i++)
@@ -841,132 +863,161 @@ static void
mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- int new_order;
+ int new, old;
- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
+ if (!test_opt2(sb, MB_OPTIMIZE_SCAN))
return;
- new_order = mb_avg_fragment_size_order(sb,
- grp->bb_free / grp->bb_fragments);
- if (new_order == grp->bb_avg_fragment_size_order)
+ old = grp->bb_avg_fragment_size_order;
+ new = grp->bb_fragments == 0 ? -1 :
+ mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments);
+ if (new == old)
return;
- if (grp->bb_avg_fragment_size_order != -1) {
- write_lock(&sbi->s_mb_avg_fragment_size_locks[
- grp->bb_avg_fragment_size_order]);
- list_del(&grp->bb_avg_fragment_size_node);
- write_unlock(&sbi->s_mb_avg_fragment_size_locks[
- grp->bb_avg_fragment_size_order]);
+ if (old >= 0)
+ xa_erase(&sbi->s_mb_avg_fragment_size[old], grp->bb_group);
+
+ grp->bb_avg_fragment_size_order = new;
+ if (new >= 0) {
+ /*
+ * Cannot use __GFP_NOFAIL because we hold the group lock.
+ * Although allocation for insertion may fails, it's not fatal
+ * as we have linear traversal to fall back on.
+ */
+ int err = xa_insert(&sbi->s_mb_avg_fragment_size[new],
+ grp->bb_group, grp, GFP_ATOMIC);
+ if (err)
+ mb_debug(sb, "insert group: %u to s_mb_avg_fragment_size[%d] failed, err %d",
+ grp->bb_group, new, err);
}
- grp->bb_avg_fragment_size_order = new_order;
- write_lock(&sbi->s_mb_avg_fragment_size_locks[
- grp->bb_avg_fragment_size_order]);
- list_add_tail(&grp->bb_avg_fragment_size_node,
- &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
- write_unlock(&sbi->s_mb_avg_fragment_size_locks[
- grp->bb_avg_fragment_size_order]);
+}
+
+static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac,
+ struct xarray *xa,
+ ext4_group_t start, ext4_group_t end)
+{
+ struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ enum criteria cr = ac->ac_criteria;
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ unsigned long group = start;
+ struct ext4_group_info *grp;
+
+ if (WARN_ON_ONCE(end > ngroups || start >= end))
+ return 0;
+
+ xa_for_each_range(xa, group, grp, start, end - 1) {
+ int err;
+
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
+
+ err = ext4_mb_scan_group(ac, grp->bb_group);
+ if (err || ac->ac_status != AC_STATUS_CONTINUE)
+ return err;
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
+/*
+ * Find a suitable group of given order from the largest free orders xarray.
+ */
+static inline int
+ext4_mb_scan_groups_largest_free_order_range(struct ext4_allocation_context *ac,
+ int order, ext4_group_t start,
+ ext4_group_t end)
+{
+ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order];
+
+ if (xa_empty(xa))
+ return 0;
+
+ return ext4_mb_scan_groups_xa_range(ac, xa, start, end);
}
/*
* Choose next group by traversing largest_free_order lists. Updates *new_cr if
* cr level needs an update.
*/
-static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
- enum criteria *new_cr, ext4_group_t *group)
+static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac,
+ ext4_group_t group)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_group_info *iter;
int i;
+ int ret = 0;
+ ext4_group_t start, end;
- if (ac->ac_status == AC_STATUS_FOUND)
- return;
-
- if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
- atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
-
+ start = group;
+ end = ext4_get_groups_count(ac->ac_sb);
+wrap_around:
for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
- if (list_empty(&sbi->s_mb_largest_free_orders[i]))
- continue;
- read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
- if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
- read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
- continue;
- }
- list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
- bb_largest_free_order_node) {
- if (sbi->s_mb_stats)
- atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
- if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
- *group = iter->bb_group;
- ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
- read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
- return;
- }
- }
- read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
+ ret = ext4_mb_scan_groups_largest_free_order_range(ac, i,
+ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ }
+ if (start) {
+ end = start;
+ start = 0;
+ goto wrap_around;
}
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
+
/* Increment cr and search again if no group is found */
- *new_cr = CR_GOAL_LEN_FAST;
+ ac->ac_criteria = CR_GOAL_LEN_FAST;
+ return ret;
}
/*
- * Find a suitable group of given order from the average fragments list.
+ * Find a suitable group of given order from the average fragments xarray.
*/
-static struct ext4_group_info *
-ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
+static int
+ext4_mb_scan_groups_avg_frag_order_range(struct ext4_allocation_context *ac,
+ int order, ext4_group_t start,
+ ext4_group_t end)
{
- struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
- rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
- struct ext4_group_info *grp = NULL, *iter;
- enum criteria cr = ac->ac_criteria;
+ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order];
- if (list_empty(frag_list))
- return NULL;
- read_lock(frag_list_lock);
- if (list_empty(frag_list)) {
- read_unlock(frag_list_lock);
- return NULL;
- }
- list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
- if (sbi->s_mb_stats)
- atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
- if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
- grp = iter;
- break;
- }
- }
- read_unlock(frag_list_lock);
- return grp;
+ if (xa_empty(xa))
+ return 0;
+
+ return ext4_mb_scan_groups_xa_range(ac, xa, start, end);
}
/*
* Choose next group by traversing average fragment size list of suitable
* order. Updates *new_cr if cr level needs an update.
*/
-static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
- enum criteria *new_cr, ext4_group_t *group)
+static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac,
+ ext4_group_t group)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_group_info *grp = NULL;
- int i;
-
- if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
- if (sbi->s_mb_stats)
- atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
+ int i, ret = 0;
+ ext4_group_t start, end;
+
+ start = group;
+ end = ext4_get_groups_count(ac->ac_sb);
+wrap_around:
+ i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
+ for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+ ret = ext4_mb_scan_groups_avg_frag_order_range(ac, i,
+ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
}
-
- for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
- i < MB_NUM_ORDERS(ac->ac_sb); i++) {
- grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
- if (grp) {
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
- return;
- }
+ if (start) {
+ end = start;
+ start = 0;
+ goto wrap_around;
}
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
/*
* CR_BEST_AVAIL_LEN works based on the concept that we have
* a larger normalized goal len request which can be trimmed to
@@ -976,9 +1027,11 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
* See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
*/
if (ac->ac_flags & EXT4_MB_HINT_DATA)
- *new_cr = CR_BEST_AVAIL_LEN;
+ ac->ac_criteria = CR_BEST_AVAIL_LEN;
else
- *new_cr = CR_GOAL_LEN_SLOW;
+ ac->ac_criteria = CR_GOAL_LEN_SLOW;
+
+ return ret;
}
/*
@@ -990,18 +1043,14 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
* preallocations. However, we make sure that we don't trim the request too
* much and fall to CR_GOAL_LEN_SLOW in that case.
*/
-static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
- enum criteria *new_cr, ext4_group_t *group)
+static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac,
+ ext4_group_t group)
{
+ int ret = 0;
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_group_info *grp = NULL;
int i, order, min_order;
unsigned long num_stripe_clusters = 0;
-
- if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
- if (sbi->s_mb_stats)
- atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
- }
+ ext4_group_t start, end;
/*
* mb_avg_fragment_size_order() returns order in a way that makes
@@ -1033,6 +1082,9 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
if (1 << min_order < ac->ac_o_ex.fe_len)
min_order = fls(ac->ac_o_ex.fe_len);
+ start = group;
+ end = ext4_get_groups_count(ac->ac_sb);
+wrap_around:
for (i = order; i >= min_order; i--) {
int frag_order;
/*
@@ -1055,17 +1107,24 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
frag_order = mb_avg_fragment_size_order(ac->ac_sb,
ac->ac_g_ex.fe_len);
- grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
- if (grp) {
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
- return;
- }
+ ret = ext4_mb_scan_groups_avg_frag_order_range(ac, frag_order,
+ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ }
+ if (start) {
+ end = start;
+ start = 0;
+ goto wrap_around;
}
/* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
- *new_cr = CR_GOAL_LEN_SLOW;
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
+ ac->ac_criteria = CR_GOAL_LEN_SLOW;
+
+ return ret;
}
static inline int should_optimize_scan(struct ext4_allocation_context *ac)
@@ -1080,59 +1139,82 @@ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
}
/*
- * Return next linear group for allocation.
+ * next linear group for allocation.
*/
-static ext4_group_t
-next_linear_group(ext4_group_t group, ext4_group_t ngroups)
+static void next_linear_group(ext4_group_t *group, ext4_group_t ngroups)
{
/*
* Artificially restricted ngroups for non-extent
* files makes group > ngroups possible on first loop.
*/
- return group + 1 >= ngroups ? 0 : group + 1;
+ *group = *group + 1 >= ngroups ? 0 : *group + 1;
}
-/*
- * ext4_mb_choose_next_group: choose next group for allocation.
- *
- * @ac Allocation Context
- * @new_cr This is an output parameter. If the there is no good group
- * available at current CR level, this field is updated to indicate
- * the new cr level that should be used.
- * @group This is an input / output parameter. As an input it indicates the
- * next group that the allocator intends to use for allocation. As
- * output, this field indicates the next group that should be used as
- * determined by the optimization functions.
- * @ngroups Total number of groups
- */
-static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
- enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+static int ext4_mb_scan_groups_linear(struct ext4_allocation_context *ac,
+ ext4_group_t ngroups, ext4_group_t *start, ext4_group_t count)
{
- *new_cr = ac->ac_criteria;
+ int ret, i;
+ enum criteria cr = ac->ac_criteria;
+ struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_group_t group = *start;
- if (!should_optimize_scan(ac)) {
- *group = next_linear_group(*group, ngroups);
- return;
+ for (i = 0; i < count; i++, next_linear_group(&group, ngroups)) {
+ ret = ext4_mb_scan_group(ac, group);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ cond_resched();
}
+ *start = group;
+ if (count == ngroups)
+ ac->ac_criteria++;
+
+ /* Processed all groups and haven't found blocks */
+ if (sbi->s_mb_stats && i == ngroups)
+ atomic64_inc(&sbi->s_bal_cX_failed[cr]);
+
+ return 0;
+}
+
+static int ext4_mb_scan_groups(struct ext4_allocation_context *ac)
+{
+ int ret = 0;
+ ext4_group_t start;
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb);
+
+ /* non-extent files are limited to low blocks/groups */
+ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
+ ngroups = sbi->s_blockfile_groups;
+
+ /* searching for the right group start from the goal value specified */
+ start = ac->ac_g_ex.fe_group;
+ ac->ac_prefetch_grp = start;
+ ac->ac_prefetch_nr = 0;
+
+ if (!should_optimize_scan(ac))
+ return ext4_mb_scan_groups_linear(ac, ngroups, &start, ngroups);
+
/*
* Optimized scanning can return non adjacent groups which can cause
* seek overhead for rotational disks. So try few linear groups before
* trying optimized scan.
*/
- if (ac->ac_groups_linear_remaining) {
- *group = next_linear_group(*group, ngroups);
- ac->ac_groups_linear_remaining--;
- return;
- }
+ if (sbi->s_mb_max_linear_groups)
+ ret = ext4_mb_scan_groups_linear(ac, ngroups, &start,
+ sbi->s_mb_max_linear_groups);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
- if (*new_cr == CR_POWER2_ALIGNED) {
- ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group);
- } else if (*new_cr == CR_GOAL_LEN_FAST) {
- ext4_mb_choose_next_group_goal_fast(ac, new_cr, group);
- } else if (*new_cr == CR_BEST_AVAIL_LEN) {
- ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
- } else {
+ switch (ac->ac_criteria) {
+ case CR_POWER2_ALIGNED:
+ return ext4_mb_scan_groups_p2_aligned(ac, start);
+ case CR_GOAL_LEN_FAST:
+ return ext4_mb_scan_groups_goal_fast(ac, start);
+ case CR_BEST_AVAIL_LEN:
+ return ext4_mb_scan_groups_best_avail(ac, start);
+ default:
/*
* TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
* rb tree sorted by bb_free. But until that happens, we should
@@ -1140,6 +1222,8 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
*/
WARN_ON(1);
}
+
+ return 0;
}
/*
@@ -1150,33 +1234,35 @@ static void
mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- int i;
+ int new, old = grp->bb_largest_free_order;
- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
- if (grp->bb_counters[i] > 0)
+ for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--)
+ if (grp->bb_counters[new] > 0)
break;
+
/* No need to move between order lists? */
- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
- i == grp->bb_largest_free_order) {
- grp->bb_largest_free_order = i;
+ if (new == old)
return;
- }
- if (grp->bb_largest_free_order >= 0) {
- write_lock(&sbi->s_mb_largest_free_orders_locks[
- grp->bb_largest_free_order]);
- list_del_init(&grp->bb_largest_free_order_node);
- write_unlock(&sbi->s_mb_largest_free_orders_locks[
- grp->bb_largest_free_order]);
+ if (old >= 0) {
+ struct xarray *xa = &sbi->s_mb_largest_free_orders[old];
+
+ if (!xa_empty(xa) && xa_load(xa, grp->bb_group))
+ xa_erase(xa, grp->bb_group);
}
- grp->bb_largest_free_order = i;
- if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
- write_lock(&sbi->s_mb_largest_free_orders_locks[
- grp->bb_largest_free_order]);
- list_add_tail(&grp->bb_largest_free_order_node,
- &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
- write_unlock(&sbi->s_mb_largest_free_orders_locks[
- grp->bb_largest_free_order]);
+
+ grp->bb_largest_free_order = new;
+ if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) {
+ /*
+ * Cannot use __GFP_NOFAIL because we hold the group lock.
+ * Although allocation for insertion may fails, it's not fatal
+ * as we have linear traversal to fall back on.
+ */
+ int err = xa_insert(&sbi->s_mb_largest_free_orders[new],
+ grp->bb_group, grp, GFP_ATOMIC);
+ if (err)
+ mb_debug(sb, "insert group: %u to s_mb_largest_free_orders[%d] failed, err %d",
+ grp->bb_group, new, err);
}
}
@@ -1260,26 +1346,25 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
* block bitmap and buddy information. The information are
* stored in the inode as
*
- * { page }
+ * { folio }
* [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
*
*
* one block each for bitmap and buddy information.
- * So for each group we take up 2 blocks. A page can
- * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
- * So it can have information regarding groups_per_page which
- * is blocks_per_page/2
+ * So for each group we take up 2 blocks. A folio can
+ * contain blocks_per_folio (folio_size / blocksize) blocks.
+ * So it can have information regarding groups_per_folio which
+ * is blocks_per_folio/2
*
* Locking note: This routine takes the block group lock of all groups
- * for this page; do not hold this lock when calling this routine!
+ * for this folio; do not hold this lock when calling this routine!
*/
-
static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
{
ext4_group_t ngroups;
unsigned int blocksize;
- int blocks_per_page;
- int groups_per_page;
+ int blocks_per_folio;
+ int groups_per_folio;
int err = 0;
int i;
ext4_group_t first_group, group;
@@ -1296,27 +1381,24 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
sb = inode->i_sb;
ngroups = ext4_get_groups_count(sb);
blocksize = i_blocksize(inode);
- blocks_per_page = PAGE_SIZE / blocksize;
+ blocks_per_folio = folio_size(folio) / blocksize;
+ WARN_ON_ONCE(!blocks_per_folio);
+ groups_per_folio = DIV_ROUND_UP(blocks_per_folio, 2);
mb_debug(sb, "init folio %lu\n", folio->index);
- groups_per_page = blocks_per_page >> 1;
- if (groups_per_page == 0)
- groups_per_page = 1;
-
/* allocate buffer_heads to read bitmaps */
- if (groups_per_page > 1) {
- i = sizeof(struct buffer_head *) * groups_per_page;
+ if (groups_per_folio > 1) {
+ i = sizeof(struct buffer_head *) * groups_per_folio;
bh = kzalloc(i, gfp);
if (bh == NULL)
return -ENOMEM;
} else
bh = &bhs;
- first_group = folio->index * blocks_per_page / 2;
-
/* read all groups the folio covers into the cache */
- for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
+ first_group = EXT4_PG_TO_LBLK(inode, folio->index) / 2;
+ for (i = 0, group = first_group; i < groups_per_folio; i++, group++) {
if (group >= ngroups)
break;
@@ -1324,7 +1406,7 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
if (!grinfo)
continue;
/*
- * If page is uptodate then we came here after online resize
+ * If folio is uptodate then we came here after online resize
* which added some new uninitialized group info structs, so
* we must skip all initialized uptodate buddies on the folio,
* which may be currently in use by an allocating task.
@@ -1344,7 +1426,7 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
}
/* wait for I/O completion */
- for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
+ for (i = 0, group = first_group; i < groups_per_folio; i++, group++) {
int err2;
if (!bh[i])
@@ -1354,8 +1436,8 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
err = err2;
}
- first_block = folio->index * blocks_per_page;
- for (i = 0; i < blocks_per_page; i++) {
+ first_block = EXT4_PG_TO_LBLK(inode, folio->index);
+ for (i = 0; i < blocks_per_folio; i++) {
group = (first_block + i) >> 1;
if (group >= ngroups)
break;
@@ -1432,7 +1514,7 @@ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
out:
if (bh) {
- for (i = 0; i < groups_per_page; i++)
+ for (i = 0; i < groups_per_folio; i++)
brelse(bh[i]);
if (bh != &bhs)
kfree(bh);
@@ -1441,55 +1523,57 @@ out:
}
/*
- * Lock the buddy and bitmap pages. This make sure other parallel init_group
- * on the same buddy page doesn't happen whild holding the buddy page lock.
- * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
- * are on the same page e4b->bd_buddy_folio is NULL and return value is 0.
+ * Lock the buddy and bitmap folios. This makes sure other parallel init_group
+ * on the same buddy folio doesn't happen while holding the buddy folio lock.
+ * Return locked buddy and bitmap folios on e4b struct. If buddy and bitmap
+ * are on the same folio e4b->bd_buddy_folio is NULL and return value is 0.
*/
-static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+static int ext4_mb_get_buddy_folio_lock(struct super_block *sb,
ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
{
struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
- int block, pnum, poff;
- int blocks_per_page;
+ int block, pnum;
struct folio *folio;
e4b->bd_buddy_folio = NULL;
e4b->bd_bitmap_folio = NULL;
- blocks_per_page = PAGE_SIZE / sb->s_blocksize;
/*
* the buddy cache inode stores the block bitmap
* and buddy information in consecutive blocks.
* So for each group we need two blocks.
*/
block = group * 2;
- pnum = block / blocks_per_page;
- poff = block % blocks_per_page;
+ pnum = EXT4_LBLK_TO_PG(inode, block);
folio = __filemap_get_folio(inode->i_mapping, pnum,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
return PTR_ERR(folio);
BUG_ON(folio->mapping != inode->i_mapping);
+ WARN_ON_ONCE(folio_size(folio) < sb->s_blocksize);
e4b->bd_bitmap_folio = folio;
- e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
+ e4b->bd_bitmap = folio_address(folio) +
+ offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block));
- if (blocks_per_page >= 2) {
- /* buddy and bitmap are on the same page */
+ block++;
+ pnum = EXT4_LBLK_TO_PG(inode, block);
+ if (folio_contains(folio, pnum)) {
+ /* buddy and bitmap are on the same folio */
return 0;
}
- /* blocks_per_page == 1, hence we need another page for the buddy */
- folio = __filemap_get_folio(inode->i_mapping, block + 1,
+ /* we need another folio for the buddy */
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
return PTR_ERR(folio);
BUG_ON(folio->mapping != inode->i_mapping);
+ WARN_ON_ONCE(folio_size(folio) < sb->s_blocksize);
e4b->bd_buddy_folio = folio;
return 0;
}
-static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
+static void ext4_mb_put_buddy_folio_lock(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_folio) {
folio_unlock(e4b->bd_bitmap_folio);
@@ -1503,7 +1587,7 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
/*
* Locking note: This routine calls ext4_mb_init_cache(), which takes the
- * block group lock of all groups for this page; do not hold the BG lock when
+ * block group lock of all groups for this folio; do not hold the BG lock when
* calling this routine!
*/
static noinline_for_stack
@@ -1523,14 +1607,14 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
/*
* This ensures that we don't reinit the buddy cache
- * page which map to the group from which we are already
+ * folio which map to the group from which we are already
* allocating. If we are looking at the buddy cache we would
* have taken a reference using ext4_mb_load_buddy and that
- * would have pinned buddy page to page cache.
- * The call to ext4_mb_get_buddy_page_lock will mark the
- * page accessed.
+ * would have pinned buddy folio to page cache.
+ * The call to ext4_mb_get_buddy_folio_lock will mark the
+ * folio accessed.
*/
- ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
+ ret = ext4_mb_get_buddy_folio_lock(sb, group, &e4b, gfp);
if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
/*
* somebody initialized the group
@@ -1551,7 +1635,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
if (e4b.bd_buddy_folio == NULL) {
/*
* If both the bitmap and buddy are in
- * the same page we don't need to force
+ * the same folio we don't need to force
* init the buddy
*/
ret = 0;
@@ -1567,23 +1651,21 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
goto err;
}
err:
- ext4_mb_put_buddy_page_lock(&e4b);
+ ext4_mb_put_buddy_folio_lock(&e4b);
return ret;
}
/*
* Locking note: This routine calls ext4_mb_init_cache(), which takes the
- * block group lock of all groups for this page; do not hold the BG lock when
+ * block group lock of all groups for this folio; do not hold the BG lock when
* calling this routine!
*/
static noinline_for_stack int
ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
struct ext4_buddy *e4b, gfp_t gfp)
{
- int blocks_per_page;
int block;
int pnum;
- int poff;
struct folio *folio;
int ret;
struct ext4_group_info *grp;
@@ -1593,7 +1675,6 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
might_sleep();
mb_debug(sb, "load group %u\n", group);
- blocks_per_page = PAGE_SIZE / sb->s_blocksize;
grp = ext4_get_group_info(sb, group);
if (!grp)
return -EFSCORRUPTED;
@@ -1621,8 +1702,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
* So for each group we need two blocks.
*/
block = group * 2;
- pnum = block / blocks_per_page;
- poff = block % blocks_per_page;
+ pnum = EXT4_LBLK_TO_PG(inode, block);
/* Avoid locking the folio in the fast path ... */
folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
@@ -1654,7 +1734,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
goto err;
}
mb_cmp_bitmaps(e4b, folio_address(folio) +
- (poff * sb->s_blocksize));
+ offset_in_folio(folio,
+ EXT4_LBLK_TO_B(inode, block)));
}
folio_unlock(folio);
}
@@ -1670,12 +1751,18 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
/* Folios marked accessed already */
e4b->bd_bitmap_folio = folio;
- e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
+ e4b->bd_bitmap = folio_address(folio) +
+ offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block));
block++;
- pnum = block / blocks_per_page;
- poff = block % blocks_per_page;
+ pnum = EXT4_LBLK_TO_PG(inode, block);
+ /* buddy and bitmap are on the same folio? */
+ if (folio_contains(folio, pnum)) {
+ folio_get(folio);
+ goto update_buddy;
+ }
+ /* we need another folio for the buddy */
folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
if (!IS_ERR(folio))
@@ -1710,9 +1797,11 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
goto err;
}
+update_buddy:
/* Folios marked accessed already */
e4b->bd_buddy_folio = folio;
- e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize);
+ e4b->bd_buddy = folio_address(folio) +
+ offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block));
return 0;
@@ -2155,7 +2244,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
ac->ac_buddy = ret >> 16;
/*
- * take the page reference. We want the page to be pinned
+ * take the folio reference. We want the folio to be pinned
* so that we don't get a ext4_mb_init_cache_call for this
* group until we update the bitmap. That would mean we
* double allocate blocks. The reference is dropped
@@ -2167,11 +2256,11 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
folio_get(ac->ac_buddy_folio);
/* store last allocated for subsequent stream allocation */
if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
- spin_lock(&sbi->s_md_lock);
- sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
- sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
- spin_unlock(&sbi->s_md_lock);
+ int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals;
+
+ WRITE_ONCE(sbi->s_mb_last_groups[hash], ac->ac_f_ex.fe_group);
}
+
/*
* As we've just preallocated more space than
* user requested originally, we store allocated
@@ -2571,6 +2660,30 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
}
}
+static void __ext4_mb_scan_group(struct ext4_allocation_context *ac)
+{
+ bool is_stripe_aligned;
+ struct ext4_sb_info *sbi;
+ enum criteria cr = ac->ac_criteria;
+
+ ac->ac_groups_scanned++;
+ if (cr == CR_POWER2_ALIGNED)
+ return ext4_mb_simple_scan_group(ac, ac->ac_e4b);
+
+ sbi = EXT4_SB(ac->ac_sb);
+ is_stripe_aligned = false;
+ if ((sbi->s_stripe >= sbi->s_cluster_ratio) &&
+ !(ac->ac_g_ex.fe_len % EXT4_NUM_B2C(sbi, sbi->s_stripe)))
+ is_stripe_aligned = true;
+
+ if ((cr == CR_GOAL_LEN_FAST || cr == CR_BEST_AVAIL_LEN) &&
+ is_stripe_aligned)
+ ext4_mb_scan_aligned(ac, ac->ac_e4b);
+
+ if (ac->ac_status == AC_STATUS_CONTINUE)
+ ext4_mb_complex_scan_group(ac, ac->ac_e4b);
+}
+
/*
* This is also called BEFORE we load the buddy bitmap.
* Returns either 1 or 0 indicating that the group is either suitable
@@ -2761,6 +2874,37 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
}
/*
+ * Batch reads of the block allocation bitmaps to get
+ * multiple READs in flight; limit prefetching at inexpensive
+ * CR, otherwise mballoc can spend a lot of time loading
+ * imperfect groups
+ */
+static void ext4_mb_might_prefetch(struct ext4_allocation_context *ac,
+ ext4_group_t group)
+{
+ struct ext4_sb_info *sbi;
+
+ if (ac->ac_prefetch_grp != group)
+ return;
+
+ sbi = EXT4_SB(ac->ac_sb);
+ if (ext4_mb_cr_expensive(ac->ac_criteria) ||
+ ac->ac_prefetch_ios < sbi->s_mb_prefetch_limit) {
+ unsigned int nr = sbi->s_mb_prefetch;
+
+ if (ext4_has_feature_flex_bg(ac->ac_sb)) {
+ nr = 1 << sbi->s_log_groups_per_flex;
+ nr -= group & (nr - 1);
+ nr = umin(nr, sbi->s_mb_prefetch);
+ }
+
+ ac->ac_prefetch_nr = nr;
+ ac->ac_prefetch_grp = ext4_mb_prefetch(ac->ac_sb, group, nr,
+ &ac->ac_prefetch_ios);
+ }
+}
+
+/*
* Prefetching reads the block bitmap into the buffer cache; but we
* need to make sure that the buddy bitmap in the page cache has been
* initialized. Note that ext4_mb_init_group() will block if the I/O
@@ -2793,24 +2937,58 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
}
}
+static int ext4_mb_scan_group(struct ext4_allocation_context *ac,
+ ext4_group_t group)
+{
+ int ret;
+ struct super_block *sb = ac->ac_sb;
+ enum criteria cr = ac->ac_criteria;
+
+ ext4_mb_might_prefetch(ac, group);
+
+ /* prevent unnecessary buddy loading. */
+ if (cr < CR_ANY_FREE && spin_is_locked(ext4_group_lock_ptr(sb, group)))
+ return 0;
+
+ /* This now checks without needing the buddy folio */
+ ret = ext4_mb_good_group_nolock(ac, group, cr);
+ if (ret <= 0) {
+ if (!ac->ac_first_err)
+ ac->ac_first_err = ret;
+ return 0;
+ }
+
+ ret = ext4_mb_load_buddy(sb, group, ac->ac_e4b);
+ if (ret)
+ return ret;
+
+ /* skip busy group */
+ if (cr >= CR_ANY_FREE)
+ ext4_lock_group(sb, group);
+ else if (!ext4_try_lock_group(sb, group))
+ goto out_unload;
+
+ /* We need to check again after locking the block group. */
+ if (unlikely(!ext4_mb_good_group(ac, group, cr)))
+ goto out_unlock;
+
+ __ext4_mb_scan_group(ac);
+
+out_unlock:
+ ext4_unlock_group(sb, group);
+out_unload:
+ ext4_mb_unload_buddy(ac->ac_e4b);
+ return ret;
+}
+
static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
- ext4_group_t prefetch_grp = 0, ngroups, group, i;
- enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
- int err = 0, first_err = 0;
- unsigned int nr = 0, prefetch_ios = 0;
- struct ext4_sb_info *sbi;
- struct super_block *sb;
+ ext4_group_t i;
+ int err = 0;
+ struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_buddy e4b;
- int lost;
-
- sb = ac->ac_sb;
- sbi = EXT4_SB(sb);
- ngroups = ext4_get_groups_count(sb);
- /* non-extent files are limited to low blocks/groups */
- if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
- ngroups = sbi->s_blockfile_groups;
BUG_ON(ac->ac_status == AC_STATUS_FOUND);
@@ -2844,11 +3022,11 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
/* if stream allocation is enabled, use global goal */
if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
- /* TBD: may be hot point */
- spin_lock(&sbi->s_md_lock);
- ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
- ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
- spin_unlock(&sbi->s_md_lock);
+ int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals;
+
+ ac->ac_g_ex.fe_group = READ_ONCE(sbi->s_mb_last_groups[hash]);
+ ac->ac_g_ex.fe_start = -1;
+ ac->ac_flags &= ~EXT4_MB_HINT_TRY_GOAL;
}
/*
@@ -2856,107 +3034,21 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* start with CR_GOAL_LEN_FAST, unless it is power of 2
* aligned, in which case let's do that faster approach first.
*/
+ ac->ac_criteria = CR_GOAL_LEN_FAST;
if (ac->ac_2order)
- cr = CR_POWER2_ALIGNED;
-repeat:
- for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
- ac->ac_criteria = cr;
- /*
- * searching for the right group start
- * from the goal value specified
- */
- group = ac->ac_g_ex.fe_group;
- ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
- prefetch_grp = group;
- nr = 0;
-
- for (i = 0, new_cr = cr; i < ngroups; i++,
- ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
- int ret = 0;
-
- cond_resched();
- if (new_cr != cr) {
- cr = new_cr;
- goto repeat;
- }
-
- /*
- * Batch reads of the block allocation bitmaps
- * to get multiple READs in flight; limit
- * prefetching at inexpensive CR, otherwise mballoc
- * can spend a lot of time loading imperfect groups
- */
- if ((prefetch_grp == group) &&
- (ext4_mb_cr_expensive(cr) ||
- prefetch_ios < sbi->s_mb_prefetch_limit)) {
- nr = sbi->s_mb_prefetch;
- if (ext4_has_feature_flex_bg(sb)) {
- nr = 1 << sbi->s_log_groups_per_flex;
- nr -= group & (nr - 1);
- nr = min(nr, sbi->s_mb_prefetch);
- }
- prefetch_grp = ext4_mb_prefetch(sb, group,
- nr, &prefetch_ios);
- }
-
- /* This now checks without needing the buddy page */
- ret = ext4_mb_good_group_nolock(ac, group, cr);
- if (ret <= 0) {
- if (!first_err)
- first_err = ret;
- continue;
- }
-
- err = ext4_mb_load_buddy(sb, group, &e4b);
- if (err)
- goto out;
-
- ext4_lock_group(sb, group);
-
- /*
- * We need to check again after locking the
- * block group
- */
- ret = ext4_mb_good_group(ac, group, cr);
- if (ret == 0) {
- ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
- continue;
- }
-
- ac->ac_groups_scanned++;
- if (cr == CR_POWER2_ALIGNED)
- ext4_mb_simple_scan_group(ac, &e4b);
- else {
- bool is_stripe_aligned =
- (sbi->s_stripe >=
- sbi->s_cluster_ratio) &&
- !(ac->ac_g_ex.fe_len %
- EXT4_NUM_B2C(sbi, sbi->s_stripe));
-
- if ((cr == CR_GOAL_LEN_FAST ||
- cr == CR_BEST_AVAIL_LEN) &&
- is_stripe_aligned)
- ext4_mb_scan_aligned(ac, &e4b);
-
- if (ac->ac_status == AC_STATUS_CONTINUE)
- ext4_mb_complex_scan_group(ac, &e4b);
- }
+ ac->ac_criteria = CR_POWER2_ALIGNED;
- ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
-
- if (ac->ac_status != AC_STATUS_CONTINUE)
- break;
- }
- /* Processed all groups and haven't found blocks */
- if (sbi->s_mb_stats && i == ngroups)
- atomic64_inc(&sbi->s_bal_cX_failed[cr]);
+ ac->ac_e4b = &e4b;
+ ac->ac_prefetch_ios = 0;
+ ac->ac_first_err = 0;
+repeat:
+ while (ac->ac_criteria < EXT4_MB_NUM_CRS) {
+ err = ext4_mb_scan_groups(ac);
+ if (err)
+ goto out;
- if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
- /* Reset goal length to original goal length before
- * falling into CR_GOAL_LEN_SLOW */
- ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
+ if (ac->ac_status != AC_STATUS_CONTINUE)
+ break;
}
if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
@@ -2967,6 +3059,8 @@ repeat:
*/
ext4_mb_try_best_found(ac, &e4b);
if (ac->ac_status != AC_STATUS_FOUND) {
+ int lost;
+
/*
* Someone more lucky has already allocated it.
* The only thing we can do is just take first
@@ -2982,23 +3076,27 @@ repeat:
ac->ac_b_ex.fe_len = 0;
ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_flags |= EXT4_MB_HINT_FIRST;
- cr = CR_ANY_FREE;
+ ac->ac_criteria = CR_ANY_FREE;
goto repeat;
}
}
- if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
+ if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) {
atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
+ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC &&
+ ac->ac_b_ex.fe_group == ac->ac_g_ex.fe_group)
+ atomic_inc(&sbi->s_bal_stream_goals);
+ }
out:
- if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
- err = first_err;
+ if (!err && ac->ac_status != AC_STATUS_FOUND && ac->ac_first_err)
+ err = ac->ac_first_err;
mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
- ac->ac_flags, cr, err);
+ ac->ac_flags, ac->ac_criteria, err);
- if (nr)
- ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
+ if (ac->ac_prefetch_nr)
+ ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr);
return err;
}
@@ -3037,10 +3135,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
unsigned char blocksize_bits = min_t(unsigned char,
sb->s_blocksize_bits,
EXT4_MAX_BLOCK_LOG_SIZE);
- struct sg {
- struct ext4_group_info info;
- ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
- } sg;
+ DEFINE_RAW_FLEX(struct ext4_group_info, sg, bb_counters,
+ EXT4_MAX_BLOCK_LOG_SIZE + 2);
group--;
if (group == 0)
@@ -3048,7 +3144,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
" 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
" 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
- i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
+ i = (blocksize_bits + 2) * sizeof(sg->bb_counters[0]) +
sizeof(struct ext4_group_info);
grinfo = ext4_get_group_info(sb, group);
@@ -3068,14 +3164,14 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
* We care only about free space counters in the group info and
* these are safe to access even after the buddy has been unloaded
*/
- memcpy(&sg, grinfo, i);
- seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
- sg.info.bb_fragments, sg.info.bb_first_free);
+ memcpy(sg, grinfo, i);
+ seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg->bb_free,
+ sg->bb_fragments, sg->bb_first_free);
for (i = 0; i <= 13; i++)
seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
- sg.info.bb_counters[i] : 0);
+ sg->bb_counters[i] : 0);
seq_puts(seq, " ]");
- if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
+ if (EXT4_MB_GRP_BBITMAP_CORRUPT(sg))
seq_puts(seq, " Block bitmap corrupted!");
seq_putc(seq, '\n');
return 0;
@@ -3123,8 +3219,6 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
seq_printf(seq, "\t\tuseless_loops: %llu\n",
atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
- seq_printf(seq, "\t\tbad_suggestions: %u\n",
- atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
/* CR_GOAL_LEN_FAST stats */
seq_puts(seq, "\tcr_goal_fast_stats:\n");
@@ -3137,8 +3231,6 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
seq_printf(seq, "\t\tuseless_loops: %llu\n",
atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
- seq_printf(seq, "\t\tbad_suggestions: %u\n",
- atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
/* CR_BEST_AVAIL_LEN stats */
seq_puts(seq, "\tcr_best_avail_stats:\n");
@@ -3152,8 +3244,6 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
seq_printf(seq, "\t\tuseless_loops: %llu\n",
atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
- seq_printf(seq, "\t\tbad_suggestions: %u\n",
- atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
/* CR_GOAL_LEN_SLOW stats */
seq_puts(seq, "\tcr_goal_slow_stats:\n");
@@ -3183,6 +3273,8 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
seq_printf(seq, "\textents_scanned: %u\n",
atomic_read(&sbi->s_bal_ex_scanned));
seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
+ seq_printf(seq, "\t\tstream_goal_hits: %u\n",
+ atomic_read(&sbi->s_bal_stream_goals));
seq_printf(seq, "\t\tlen_goal_hits: %u\n",
atomic_read(&sbi->s_bal_len_goals));
seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
@@ -3229,6 +3321,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
unsigned long position = ((unsigned long) v);
struct ext4_group_info *grp;
unsigned int count;
+ unsigned long idx;
position--;
if (position >= MB_NUM_ORDERS(sb)) {
@@ -3237,11 +3330,8 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
seq_puts(seq, "avg_fragment_size_lists:\n");
count = 0;
- read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
- list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
- bb_avg_fragment_size_node)
+ xa_for_each(&sbi->s_mb_avg_fragment_size[position], idx, grp)
count++;
- read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
seq_printf(seq, "\tlist_order_%u_groups: %u\n",
(unsigned int)position, count);
return 0;
@@ -3253,11 +3343,8 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
seq_puts(seq, "max_free_order_lists:\n");
}
count = 0;
- read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
- list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
- bb_largest_free_order_node)
+ xa_for_each(&sbi->s_mb_largest_free_orders[position], idx, grp)
count++;
- read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
seq_printf(seq, "\tlist_order_%u_groups: %u\n",
(unsigned int)position, count);
@@ -3377,8 +3464,6 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
init_rwsem(&meta_group_info[i]->alloc_sem);
meta_group_info[i]->bb_free_root = RB_ROOT;
- INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
- INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
meta_group_info[i]->bb_group = group;
@@ -3425,6 +3510,8 @@ static int ext4_mb_init_backend(struct super_block *sb)
* this will avoid confusion if it ever shows up during debugging. */
sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
+ ext4_set_inode_mapping_order(sbi->s_buddy_cache);
+
for (i = 0; i < ngroups; i++) {
cond_resched();
desc = ext4_get_group_desc(sb, i, NULL);
@@ -3588,6 +3675,30 @@ static void ext4_discard_work(struct work_struct *work)
ext4_mb_unload_buddy(&e4b);
}
+static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi)
+{
+ if (!sbi->s_mb_avg_fragment_size)
+ return;
+
+ for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
+ xa_destroy(&sbi->s_mb_avg_fragment_size[i]);
+
+ kfree(sbi->s_mb_avg_fragment_size);
+ sbi->s_mb_avg_fragment_size = NULL;
+}
+
+static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi)
+{
+ if (!sbi->s_mb_largest_free_orders)
+ return;
+
+ for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
+ xa_destroy(&sbi->s_mb_largest_free_orders[i]);
+
+ kfree(sbi->s_mb_largest_free_orders);
+ sbi->s_mb_largest_free_orders = NULL;
+}
+
int ext4_mb_init(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -3633,44 +3744,27 @@ int ext4_mb_init(struct super_block *sb)
} while (i < MB_NUM_ORDERS(sb));
sbi->s_mb_avg_fragment_size =
- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray),
GFP_KERNEL);
if (!sbi->s_mb_avg_fragment_size) {
ret = -ENOMEM;
goto out;
}
- sbi->s_mb_avg_fragment_size_locks =
- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
- GFP_KERNEL);
- if (!sbi->s_mb_avg_fragment_size_locks) {
- ret = -ENOMEM;
- goto out;
- }
- for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
- INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
- rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
- }
+ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+ xa_init(&sbi->s_mb_avg_fragment_size[i]);
+
sbi->s_mb_largest_free_orders =
- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray),
GFP_KERNEL);
if (!sbi->s_mb_largest_free_orders) {
ret = -ENOMEM;
goto out;
}
- sbi->s_mb_largest_free_orders_locks =
- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
- GFP_KERNEL);
- if (!sbi->s_mb_largest_free_orders_locks) {
- ret = -ENOMEM;
- goto out;
- }
- for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
- INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
- rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
- }
+ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+ xa_init(&sbi->s_mb_largest_free_orders[i]);
spin_lock_init(&sbi->s_md_lock);
- sbi->s_mb_free_pending = 0;
+ atomic_set(&sbi->s_mb_free_pending, 0);
INIT_LIST_HEAD(&sbi->s_freed_data_list[0]);
INIT_LIST_HEAD(&sbi->s_freed_data_list[1]);
INIT_LIST_HEAD(&sbi->s_discard_list);
@@ -3711,10 +3805,19 @@ int ext4_mb_init(struct super_block *sb)
sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
}
+ sbi->s_mb_nr_global_goals = umin(num_possible_cpus(),
+ DIV_ROUND_UP(sbi->s_groups_count, 4));
+ sbi->s_mb_last_groups = kcalloc(sbi->s_mb_nr_global_goals,
+ sizeof(ext4_group_t), GFP_KERNEL);
+ if (sbi->s_mb_last_groups == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
if (sbi->s_locality_groups == NULL) {
ret = -ENOMEM;
- goto out;
+ goto out_free_last_groups;
}
for_each_possible_cpu(i) {
struct ext4_locality_group *lg;
@@ -3739,11 +3842,12 @@ int ext4_mb_init(struct super_block *sb)
out_free_locality_groups:
free_percpu(sbi->s_locality_groups);
sbi->s_locality_groups = NULL;
+out_free_last_groups:
+ kfree(sbi->s_mb_last_groups);
+ sbi->s_mb_last_groups = NULL;
out:
- kfree(sbi->s_mb_avg_fragment_size);
- kfree(sbi->s_mb_avg_fragment_size_locks);
- kfree(sbi->s_mb_largest_free_orders);
- kfree(sbi->s_mb_largest_free_orders_locks);
+ ext4_mb_avg_fragment_size_destroy(sbi);
+ ext4_mb_largest_free_orders_destroy(sbi);
kfree(sbi->s_mb_offsets);
sbi->s_mb_offsets = NULL;
kfree(sbi->s_mb_maxs);
@@ -3810,10 +3914,8 @@ void ext4_mb_release(struct super_block *sb)
kvfree(group_info);
rcu_read_unlock();
}
- kfree(sbi->s_mb_avg_fragment_size);
- kfree(sbi->s_mb_avg_fragment_size_locks);
- kfree(sbi->s_mb_largest_free_orders);
- kfree(sbi->s_mb_largest_free_orders_locks);
+ ext4_mb_avg_fragment_size_destroy(sbi);
+ ext4_mb_largest_free_orders_destroy(sbi);
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
iput(sbi->s_buddy_cache);
@@ -3843,6 +3945,7 @@ void ext4_mb_release(struct super_block *sb)
}
free_percpu(sbi->s_locality_groups);
+ kfree(sbi->s_mb_last_groups);
}
static inline int ext4_issue_discard(struct super_block *sb,
@@ -3873,10 +3976,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
- spin_lock(&EXT4_SB(sb)->s_md_lock);
- EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
- spin_unlock(&EXT4_SB(sb)->s_md_lock);
-
+ atomic_sub(entry->efd_count, &EXT4_SB(sb)->s_mb_free_pending);
db = e4b.bd_info;
/* there are blocks to put in buddy to make them really free */
count += entry->efd_count;
@@ -3927,7 +4027,7 @@ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
list_splice_tail(&freed_data_list, &sbi->s_discard_list);
spin_unlock(&sbi->s_md_lock);
if (wake)
- queue_work(system_unbound_wq, &sbi->s_discard_work);
+ queue_work(system_dfl_wq, &sbi->s_discard_work);
} else {
list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
kmem_cache_free(ext4_free_data_cachep, entry);
@@ -4642,7 +4742,7 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
"ext4: mb_load_buddy failed (%d)", err))
/*
* This should never happen since we pin the
- * pages in the ext4_allocation_context so
+ * folios in the ext4_allocation_context so
* ext4_mb_load_buddy() should never fail.
*/
return;
@@ -5653,7 +5753,7 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
{
ext4_group_t i, ngroups;
- if (ext4_forced_shutdown(sb))
+ if (ext4_emergency_state(sb))
return;
ngroups = ext4_get_groups_count(sb);
@@ -5687,7 +5787,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
- if (ext4_forced_shutdown(sb))
+ if (ext4_emergency_state(sb))
return;
mb_debug(sb, "Can't allocate:"
@@ -6280,28 +6380,63 @@ out:
* are contiguous, AND the extents were freed by the same transaction,
* AND the blocks are associated with the same group.
*/
-static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
- struct ext4_free_data *entry,
- struct ext4_free_data *new_entry,
- struct rb_root *entry_rb_root)
+static inline bool
+ext4_freed_extents_can_be_merged(struct ext4_free_data *entry1,
+ struct ext4_free_data *entry2)
{
- if ((entry->efd_tid != new_entry->efd_tid) ||
- (entry->efd_group != new_entry->efd_group))
- return;
- if (entry->efd_start_cluster + entry->efd_count ==
- new_entry->efd_start_cluster) {
- new_entry->efd_start_cluster = entry->efd_start_cluster;
- new_entry->efd_count += entry->efd_count;
- } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
- entry->efd_start_cluster) {
- new_entry->efd_count += entry->efd_count;
- } else
- return;
+ if (entry1->efd_tid != entry2->efd_tid)
+ return false;
+ if (entry1->efd_start_cluster + entry1->efd_count !=
+ entry2->efd_start_cluster)
+ return false;
+ if (WARN_ON_ONCE(entry1->efd_group != entry2->efd_group))
+ return false;
+ return true;
+}
+
+static inline void
+ext4_merge_freed_extents(struct ext4_sb_info *sbi, struct rb_root *root,
+ struct ext4_free_data *entry1,
+ struct ext4_free_data *entry2)
+{
+ entry1->efd_count += entry2->efd_count;
spin_lock(&sbi->s_md_lock);
- list_del(&entry->efd_list);
+ list_del(&entry2->efd_list);
spin_unlock(&sbi->s_md_lock);
- rb_erase(&entry->efd_node, entry_rb_root);
- kmem_cache_free(ext4_free_data_cachep, entry);
+ rb_erase(&entry2->efd_node, root);
+ kmem_cache_free(ext4_free_data_cachep, entry2);
+}
+
+static inline void
+ext4_try_merge_freed_extent_prev(struct ext4_sb_info *sbi, struct rb_root *root,
+ struct ext4_free_data *entry)
+{
+ struct ext4_free_data *prev;
+ struct rb_node *node;
+
+ node = rb_prev(&entry->efd_node);
+ if (!node)
+ return;
+
+ prev = rb_entry(node, struct ext4_free_data, efd_node);
+ if (ext4_freed_extents_can_be_merged(prev, entry))
+ ext4_merge_freed_extents(sbi, root, prev, entry);
+}
+
+static inline void
+ext4_try_merge_freed_extent_next(struct ext4_sb_info *sbi, struct rb_root *root,
+ struct ext4_free_data *entry)
+{
+ struct ext4_free_data *next;
+ struct rb_node *node;
+
+ node = rb_next(&entry->efd_node);
+ if (!node)
+ return;
+
+ next = rb_entry(node, struct ext4_free_data, efd_node);
+ if (ext4_freed_extents_can_be_merged(entry, next))
+ ext4_merge_freed_extents(sbi, root, entry, next);
}
static noinline_for_stack void
@@ -6311,11 +6446,12 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
ext4_group_t group = e4b->bd_group;
ext4_grpblk_t cluster;
ext4_grpblk_t clusters = new_entry->efd_count;
- struct ext4_free_data *entry;
+ struct ext4_free_data *entry = NULL;
struct ext4_group_info *db = e4b->bd_info;
struct super_block *sb = e4b->bd_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct rb_node **n = &db->bb_free_root.rb_node, *node;
+ struct rb_root *root = &db->bb_free_root;
+ struct rb_node **n = &root->rb_node;
struct rb_node *parent = NULL, *new_node;
BUG_ON(!ext4_handle_valid(handle));
@@ -6351,27 +6487,30 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
}
}
- rb_link_node(new_node, parent, n);
- rb_insert_color(new_node, &db->bb_free_root);
+ atomic_add(clusters, &sbi->s_mb_free_pending);
+ if (!entry)
+ goto insert;
- /* Now try to see the extent can be merged to left and right */
- node = rb_prev(new_node);
- if (node) {
- entry = rb_entry(node, struct ext4_free_data, efd_node);
- ext4_try_merge_freed_extent(sbi, entry, new_entry,
- &(db->bb_free_root));
+ /* Now try to see the extent can be merged to prev and next */
+ if (ext4_freed_extents_can_be_merged(new_entry, entry)) {
+ entry->efd_start_cluster = cluster;
+ entry->efd_count += new_entry->efd_count;
+ kmem_cache_free(ext4_free_data_cachep, new_entry);
+ ext4_try_merge_freed_extent_prev(sbi, root, entry);
+ return;
}
-
- node = rb_next(new_node);
- if (node) {
- entry = rb_entry(node, struct ext4_free_data, efd_node);
- ext4_try_merge_freed_extent(sbi, entry, new_entry,
- &(db->bb_free_root));
+ if (ext4_freed_extents_can_be_merged(entry, new_entry)) {
+ entry->efd_count += new_entry->efd_count;
+ kmem_cache_free(ext4_free_data_cachep, new_entry);
+ ext4_try_merge_freed_extent_next(sbi, root, entry);
+ return;
}
+insert:
+ rb_link_node(new_node, parent, n);
+ rb_insert_color(new_node, root);
spin_lock(&sbi->s_md_lock);
list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]);
- sbi->s_mb_free_pending += clusters;
spin_unlock(&sbi->s_md_lock);
}
@@ -6644,7 +6783,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
for (i = 0; i < count; i++) {
cond_resched();
if (is_metadata)
- bh = sb_find_get_block(inode->i_sb, block + i);
+ bh = sb_find_get_block_nonatomic(inode->i_sb,
+ block + i);
ext4_forget(handle, is_metadata, inode, bh, block + i);
}
}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index f8280de3e882..15a049f05d04 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -192,8 +192,13 @@ struct ext4_allocation_context {
*/
ext4_grpblk_t ac_orig_goal_len;
+ ext4_group_t ac_prefetch_grp;
+ unsigned int ac_prefetch_ios;
+ unsigned int ac_prefetch_nr;
+
+ int ac_first_err;
+
__u32 ac_flags; /* allocation hints */
- __u32 ac_groups_linear_remaining;
__u16 ac_groups_scanned;
__u16 ac_found;
__u16 ac_cX_found[EXT4_MB_NUM_CRS];
@@ -204,6 +209,8 @@ struct ext4_allocation_context {
__u8 ac_2order; /* if request is to allocate 2^N blocks and
* N > 0, the field stores N, otherwise 0 */
__u8 ac_op; /* operation, for history only */
+
+ struct ext4_buddy *ac_e4b;
struct folio *ac_bitmap_folio;
struct folio *ac_buddy_folio;
struct ext4_prealloc_space *ac_pa;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index d64c04ed061a..6f57c181ff77 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -14,14 +14,14 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
int offset = offsetof(struct mmp_struct, mmp_checksum);
__u32 csum;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
+ csum = ext4_chksum(sbi->s_csum_seed, (char *)mmp, offset);
return cpu_to_le32(csum);
}
static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
{
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
@@ -29,7 +29,7 @@ static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
{
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
@@ -57,16 +57,12 @@ static int write_mmp_block_thawed(struct super_block *sb,
static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
{
- int err;
-
/*
* We protect against freezing so that we don't create dirty buffers
* on frozen filesystem.
*/
- sb_start_write(sb);
- err = write_mmp_block_thawed(sb, bh);
- sb_end_write(sb);
- return err;
+ scoped_guard(super_write, sb)
+ return write_mmp_block_thawed(sb, bh);
}
/*
@@ -162,7 +158,7 @@ static int kmmpd(void *data)
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
sizeof(mmp->mmp_nodename));
- while (!kthread_should_stop() && !ext4_forced_shutdown(sb)) {
+ while (!kthread_should_stop() && !ext4_emergency_state(sb)) {
if (!ext4_has_feature_mmp(sb)) {
ext4_warning(sb, "kmmpd being stopped since MMP feature"
" has been disabled.");
@@ -231,9 +227,9 @@ static int kmmpd(void *data)
* Adjust the mmp_check_interval depending on how much time
* it took for the MMP block to be written.
*/
- mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
- EXT4_MMP_MAX_CHECK_INTERVAL),
- EXT4_MMP_MIN_CHECK_INTERVAL);
+ mmp_check_interval = clamp(EXT4_MMP_CHECK_MULT * diff / HZ,
+ EXT4_MMP_MIN_CHECK_INTERVAL,
+ EXT4_MMP_MAX_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
}
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 898443e98efc..0550fd30fd10 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -13,28 +13,14 @@
#include "ext4.h"
#include "ext4_extents.h"
-/**
- * get_ext_path() - Find an extent path for designated logical block number.
- * @inode: inode to be searched
- * @lblock: logical block number to find an extent path
- * @path: pointer to an extent path
- *
- * ext4_find_extent wrapper. Return an extent path pointer on success,
- * or an error pointer on failure.
- */
-static inline struct ext4_ext_path *
-get_ext_path(struct inode *inode, ext4_lblk_t lblock,
- struct ext4_ext_path *path)
-{
- path = ext4_find_extent(inode, lblock, path, EXT4_EX_NOCACHE);
- if (IS_ERR(path))
- return path;
- if (path[ext_depth(inode)].p_ext == NULL) {
- ext4_free_ext_path(path);
- return ERR_PTR(-ENODATA);
- }
- return path;
-}
+#include <trace/events/ext4.h>
+
+struct mext_data {
+ struct inode *orig_inode; /* Origin file inode */
+ struct inode *donor_inode; /* Donor file inode */
+ struct ext4_map_blocks orig_map;/* Origin file's move mapping */
+ ext4_lblk_t donor_lblk; /* Start block of the donor file */
+};
/**
* ext4_double_down_write_data_sem() - write lock two inodes's i_data_sem
@@ -52,7 +38,6 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
} else {
down_write(&EXT4_I(second)->i_data_sem);
down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
-
}
}
@@ -71,59 +56,14 @@ ext4_double_up_write_data_sem(struct inode *orig_inode,
up_write(&EXT4_I(donor_inode)->i_data_sem);
}
-/**
- * mext_check_coverage - Check that all extents in range has the same type
- *
- * @inode: inode in question
- * @from: block offset of inode
- * @count: block count to be checked
- * @unwritten: extents expected to be unwritten
- * @err: pointer to save error value
- *
- * Return 1 if all extents in range has expected type, and zero otherwise.
- */
-static int
-mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
- int unwritten, int *err)
-{
- struct ext4_ext_path *path = NULL;
- struct ext4_extent *ext;
- int ret = 0;
- ext4_lblk_t last = from + count;
- while (from < last) {
- path = get_ext_path(inode, from, path);
- if (IS_ERR(path)) {
- *err = PTR_ERR(path);
- return ret;
- }
- ext = path[ext_depth(inode)].p_ext;
- if (unwritten != ext4_ext_is_unwritten(ext))
- goto out;
- from += ext4_ext_get_actual_len(ext);
- }
- ret = 1;
-out:
- ext4_free_ext_path(path);
- return ret;
-}
-
-/**
- * mext_folio_double_lock - Grab and lock folio on both @inode1 and @inode2
- *
- * @inode1: the inode structure
- * @inode2: the inode structure
- * @index1: folio index
- * @index2: folio index
- * @folio: result folio vector
- *
- * Grab two locked folio for inode's by inode order
- */
-static int
-mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
- pgoff_t index1, pgoff_t index2, struct folio *folio[2])
+/* Grab and lock folio on both @inode1 and @inode2 by inode order. */
+static int mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
+ pgoff_t index1, pgoff_t index2, size_t len,
+ struct folio *folio[2])
{
struct address_space *mapping[2];
unsigned int flags;
+ fgf_t fgp_flags = FGP_WRITEBEGIN;
BUG_ON(!inode1 || !inode2);
if (inode1 < inode2) {
@@ -136,14 +76,15 @@ mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
}
flags = memalloc_nofs_save();
- folio[0] = __filemap_get_folio(mapping[0], index1, FGP_WRITEBEGIN,
+ fgp_flags |= fgf_set_order(len);
+ folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
mapping_gfp_mask(mapping[0]));
if (IS_ERR(folio[0])) {
memalloc_nofs_restore(flags);
return PTR_ERR(folio[0]);
}
- folio[1] = __filemap_get_folio(mapping[1], index2, FGP_WRITEBEGIN,
+ folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
mapping_gfp_mask(mapping[1]));
memalloc_nofs_restore(flags);
if (IS_ERR(folio[1])) {
@@ -164,8 +105,16 @@ mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
return 0;
}
+static void mext_folio_double_unlock(struct folio *folio[2])
+{
+ folio_unlock(folio[0]);
+ folio_put(folio[0]);
+ folio_unlock(folio[1]);
+ folio_put(folio[1]);
+}
+
/* Force folio buffers uptodate w/o dropping folio's lock */
-static int mext_page_mkuptodate(struct folio *folio, size_t from, size_t to)
+static int mext_folio_mkuptodate(struct folio *folio, size_t from, size_t to)
{
struct inode *inode = folio->mapping->host;
sector_t block;
@@ -225,7 +174,7 @@ static int mext_page_mkuptodate(struct folio *folio, size_t from, size_t to)
do {
if (bh_offset(bh) + blocksize <= from)
continue;
- if (bh_offset(bh) > to)
+ if (bh_offset(bh) >= to)
break;
wait_on_buffer(bh);
if (buffer_uptodate(bh))
@@ -238,269 +187,313 @@ out:
return 0;
}
-/**
- * move_extent_per_page - Move extent data per page
- *
- * @o_filp: file structure of original file
- * @donor_inode: donor inode
- * @orig_page_offset: page index on original file
- * @donor_page_offset: page index on donor file
- * @data_offset_in_page: block index where data swapping starts
- * @block_len_in_page: the number of blocks to be swapped
- * @unwritten: orig extent is unwritten or not
- * @err: pointer to save return value
- *
- * Save the data in original inode blocks and replace original inode extents
- * with donor inode extents by calling ext4_swap_extents().
- * Finally, write out the saved data in new original inode blocks. Return
- * replaced block count.
+enum mext_move_type {MEXT_SKIP_EXTENT, MEXT_MOVE_EXTENT, MEXT_COPY_DATA};
+
+/*
+ * Start to move extent between the origin inode and the donor inode,
+ * hold one folio for each inode and check the candidate moving extent
+ * mapping status again.
*/
-static int
-move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
- pgoff_t orig_page_offset, pgoff_t donor_page_offset,
- int data_offset_in_page,
- int block_len_in_page, int unwritten, int *err)
+static int mext_move_begin(struct mext_data *mext, struct folio *folio[2],
+ enum mext_move_type *move_type)
{
- struct inode *orig_inode = file_inode(o_filp);
- struct folio *folio[2] = {NULL, NULL};
- handle_t *handle;
- ext4_lblk_t orig_blk_offset, donor_blk_offset;
- unsigned long blocksize = orig_inode->i_sb->s_blocksize;
- unsigned int tmp_data_size, data_size, replaced_size;
- int i, err2, jblocks, retries = 0;
- int replaced_count = 0;
- int from = data_offset_in_page << orig_inode->i_blkbits;
- int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
- struct super_block *sb = orig_inode->i_sb;
- struct buffer_head *bh = NULL;
+ struct inode *orig_inode = mext->orig_inode;
+ struct inode *donor_inode = mext->donor_inode;
+ unsigned int blkbits = orig_inode->i_blkbits;
+ struct ext4_map_blocks donor_map = {0};
+ loff_t orig_pos, donor_pos;
+ size_t move_len;
+ int ret;
+
+ orig_pos = ((loff_t)mext->orig_map.m_lblk) << blkbits;
+ donor_pos = ((loff_t)mext->donor_lblk) << blkbits;
+ ret = mext_folio_double_lock(orig_inode, donor_inode,
+ orig_pos >> PAGE_SHIFT, donor_pos >> PAGE_SHIFT,
+ ((size_t)mext->orig_map.m_len) << blkbits, folio);
+ if (ret)
+ return ret;
/*
- * It needs twice the amount of ordinary journal buffers because
- * inode and donor_inode may change each different metadata blocks.
+ * Check the origin inode's mapping information again under the
+ * folio lock, as we do not hold the i_data_sem at all times, and
+ * it may change during the concurrent write-back operation.
*/
-again:
- *err = 0;
- jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
- handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
+ if (mext->orig_map.m_seq != READ_ONCE(EXT4_I(orig_inode)->i_es_seq)) {
+ ret = -ESTALE;
+ goto error;
+ }
+
+ /* Adjust the moving length according to the length of shorter folio. */
+ move_len = umin(folio_pos(folio[0]) + folio_size(folio[0]) - orig_pos,
+ folio_pos(folio[1]) + folio_size(folio[1]) - donor_pos);
+ move_len >>= blkbits;
+ if (move_len < mext->orig_map.m_len)
+ mext->orig_map.m_len = move_len;
+
+ donor_map.m_lblk = mext->donor_lblk;
+ donor_map.m_len = mext->orig_map.m_len;
+ donor_map.m_flags = 0;
+ ret = ext4_map_blocks(NULL, donor_inode, &donor_map, 0);
+ if (ret < 0)
+ goto error;
+
+ /* Adjust the moving length according to the donor mapping length. */
+ mext->orig_map.m_len = donor_map.m_len;
+
+ /* Skip moving if the donor range is a hole or a delalloc extent. */
+ if (!(donor_map.m_flags & (EXT4_MAP_MAPPED | EXT4_MAP_UNWRITTEN)))
+ *move_type = MEXT_SKIP_EXTENT;
+ /* If both mapping ranges are unwritten, no need to copy data. */
+ else if ((mext->orig_map.m_flags & EXT4_MAP_UNWRITTEN) &&
+ (donor_map.m_flags & EXT4_MAP_UNWRITTEN))
+ *move_type = MEXT_MOVE_EXTENT;
+ else
+ *move_type = MEXT_COPY_DATA;
+
+ return 0;
+error:
+ mext_folio_double_unlock(folio);
+ return ret;
+}
+
+/*
+ * Re-create the new moved mapping buffers of the original inode and commit
+ * the entire written range.
+ */
+static int mext_folio_mkwrite(struct inode *inode, struct folio *folio,
+ size_t from, size_t to)
+{
+ unsigned int blocksize = i_blocksize(inode);
+ struct buffer_head *bh, *head;
+ size_t block_start, block_end;
+ sector_t block;
+ int ret;
+
+ head = folio_buffers(folio);
+ if (!head)
+ head = create_empty_buffers(folio, blocksize, 0);
+
+ block = folio_pos(folio) >> inode->i_blkbits;
+ block_end = 0;
+ bh = head;
+ do {
+ block_start = block_end;
+ block_end = block_start + blocksize;
+ if (block_end <= from || block_start >= to)
+ continue;
+
+ ret = ext4_get_block(inode, block, bh, 0);
+ if (ret)
+ return ret;
+ } while (block++, (bh = bh->b_this_page) != head);
+
+ block_commit_write(folio, from, to);
+ return 0;
+}
+
+/*
+ * Save the data in original inode extent blocks and replace one folio size
+ * aligned original inode extent with one or one partial donor inode extent,
+ * and then write out the saved data in new original inode blocks. Pass out
+ * the replaced block count through m_len. Return 0 on success, and an error
+ * code otherwise.
+ */
+static int mext_move_extent(struct mext_data *mext, u64 *m_len)
+{
+ struct inode *orig_inode = mext->orig_inode;
+ struct inode *donor_inode = mext->donor_inode;
+ struct ext4_map_blocks *orig_map = &mext->orig_map;
+ unsigned int blkbits = orig_inode->i_blkbits;
+ struct folio *folio[2] = {NULL, NULL};
+ loff_t from, length;
+ enum mext_move_type move_type = 0;
+ handle_t *handle;
+ u64 r_len = 0;
+ unsigned int credits;
+ int ret, ret2;
+
+ *m_len = 0;
+ trace_ext4_move_extent_enter(orig_inode, orig_map, donor_inode,
+ mext->donor_lblk);
+ credits = ext4_chunk_trans_extent(orig_inode, 0) * 2;
+ handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, credits);
if (IS_ERR(handle)) {
- *err = PTR_ERR(handle);
- return 0;
+ ret = PTR_ERR(handle);
+ goto out;
}
- orig_blk_offset = orig_page_offset * blocks_per_page +
- data_offset_in_page;
-
- donor_blk_offset = donor_page_offset * blocks_per_page +
- data_offset_in_page;
-
- /* Calculate data_size */
- if ((orig_blk_offset + block_len_in_page - 1) ==
- ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
- /* Replace the last block */
- tmp_data_size = orig_inode->i_size & (blocksize - 1);
- /*
- * If data_size equal zero, it shows data_size is multiples of
- * blocksize. So we set appropriate value.
- */
- if (tmp_data_size == 0)
- tmp_data_size = blocksize;
-
- data_size = tmp_data_size +
- ((block_len_in_page - 1) << orig_inode->i_blkbits);
- } else
- data_size = block_len_in_page << orig_inode->i_blkbits;
-
- replaced_size = data_size;
-
- *err = mext_folio_double_lock(orig_inode, donor_inode, orig_page_offset,
- donor_page_offset, folio);
- if (unlikely(*err < 0))
- goto stop_journal;
+ ret = mext_move_begin(mext, folio, &move_type);
+ if (ret)
+ goto stop_handle;
+
+ if (move_type == MEXT_SKIP_EXTENT)
+ goto unlock;
+
/*
- * If orig extent was unwritten it can become initialized
- * at any time after i_data_sem was dropped, in order to
- * serialize with delalloc we have recheck extent while we
- * hold page's lock, if it is still the case data copy is not
- * necessary, just swap data blocks between orig and donor.
+ * Copy the data. First, read the original inode data into the page
+ * cache. Then, release the existing mapping relationships and swap
+ * the extent. Finally, re-establish the new mapping relationships
+ * and dirty the page cache.
*/
-
- VM_BUG_ON_FOLIO(folio_test_large(folio[0]), folio[0]);
- VM_BUG_ON_FOLIO(folio_test_large(folio[1]), folio[1]);
- VM_BUG_ON_FOLIO(folio_nr_pages(folio[0]) != folio_nr_pages(folio[1]), folio[1]);
-
- if (unwritten) {
- ext4_double_down_write_data_sem(orig_inode, donor_inode);
- /* If any of extents in range became initialized we have to
- * fallback to data copying */
- unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
- block_len_in_page, 1, err);
- if (*err)
- goto drop_data_sem;
-
- unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
- block_len_in_page, 1, err);
- if (*err)
- goto drop_data_sem;
-
- if (!unwritten) {
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
- goto data_copy;
- }
- if (!filemap_release_folio(folio[0], 0) ||
- !filemap_release_folio(folio[1], 0)) {
- *err = -EBUSY;
- goto drop_data_sem;
- }
- replaced_count = ext4_swap_extents(handle, orig_inode,
- donor_inode, orig_blk_offset,
- donor_blk_offset,
- block_len_in_page, 1, err);
- drop_data_sem:
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
- goto unlock_folios;
+ if (move_type == MEXT_COPY_DATA) {
+ from = offset_in_folio(folio[0],
+ ((loff_t)orig_map->m_lblk) << blkbits);
+ length = ((loff_t)orig_map->m_len) << blkbits;
+
+ ret = mext_folio_mkuptodate(folio[0], from, from + length);
+ if (ret)
+ goto unlock;
}
-data_copy:
- *err = mext_page_mkuptodate(folio[0], from, from + replaced_size);
- if (*err)
- goto unlock_folios;
- /* At this point all buffers in range are uptodate, old mapping layout
- * is no longer required, try to drop it now. */
if (!filemap_release_folio(folio[0], 0) ||
!filemap_release_folio(folio[1], 0)) {
- *err = -EBUSY;
- goto unlock_folios;
+ ret = -EBUSY;
+ goto unlock;
}
+
+ /* Move extent */
ext4_double_down_write_data_sem(orig_inode, donor_inode);
- replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
- orig_blk_offset, donor_blk_offset,
- block_len_in_page, 1, err);
+ *m_len = ext4_swap_extents(handle, orig_inode, donor_inode,
+ orig_map->m_lblk, mext->donor_lblk,
+ orig_map->m_len, 1, &ret);
ext4_double_up_write_data_sem(orig_inode, donor_inode);
- if (*err) {
- if (replaced_count) {
- block_len_in_page = replaced_count;
- replaced_size =
- block_len_in_page << orig_inode->i_blkbits;
- } else
- goto unlock_folios;
- }
- /* Perform all necessary steps similar write_begin()/write_end()
- * but keeping in mind that i_size will not change */
- bh = folio_buffers(folio[0]);
- if (!bh)
- bh = create_empty_buffers(folio[0],
- 1 << orig_inode->i_blkbits, 0);
- for (i = 0; i < data_offset_in_page; i++)
- bh = bh->b_this_page;
- for (i = 0; i < block_len_in_page; i++) {
- *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
- if (*err < 0)
- goto repair_branches;
- bh = bh->b_this_page;
- }
- block_commit_write(&folio[0]->page, from, from + replaced_size);
+ /* A short-length swap cannot occur after a successful swap extent. */
+ if (WARN_ON_ONCE(!ret && (*m_len != orig_map->m_len)))
+ ret = -EIO;
- /* Even in case of data=writeback it is reasonable to pin
- * inode to transaction, to prevent unexpected data loss */
- *err = ext4_jbd2_inode_add_write(handle, orig_inode,
- (loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
+ if (!(*m_len) || (move_type == MEXT_MOVE_EXTENT))
+ goto unlock;
-unlock_folios:
- folio_unlock(folio[0]);
- folio_put(folio[0]);
- folio_unlock(folio[1]);
- folio_put(folio[1]);
-stop_journal:
+ /* Copy data */
+ length = (*m_len) << blkbits;
+ ret2 = mext_folio_mkwrite(orig_inode, folio[0], from, from + length);
+ if (ret2) {
+ if (!ret)
+ ret = ret2;
+ goto repair_branches;
+ }
+ /*
+ * Even in case of data=writeback it is reasonable to pin
+ * inode to transaction, to prevent unexpected data loss.
+ */
+ ret2 = ext4_jbd2_inode_add_write(handle, orig_inode,
+ ((loff_t)orig_map->m_lblk) << blkbits, length);
+ if (!ret)
+ ret = ret2;
+unlock:
+ mext_folio_double_unlock(folio);
+stop_handle:
ext4_journal_stop(handle);
- if (*err == -ENOSPC &&
- ext4_should_retry_alloc(sb, &retries))
- goto again;
- /* Buffer was busy because probably is pinned to journal transaction,
- * force transaction commit may help to free it. */
- if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
- jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
- goto again;
- return replaced_count;
+out:
+ trace_ext4_move_extent_exit(orig_inode, orig_map->m_lblk, donor_inode,
+ mext->donor_lblk, orig_map->m_len, *m_len,
+ move_type, ret);
+ return ret;
repair_branches:
- /*
- * This should never ever happen!
- * Extents are swapped already, but we are not able to copy data.
- * Try to swap extents to it's original places
- */
- ext4_double_down_write_data_sem(orig_inode, donor_inode);
- replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
- orig_blk_offset, donor_blk_offset,
- block_len_in_page, 0, &err2);
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
- if (replaced_count != block_len_in_page) {
- ext4_error_inode_block(orig_inode, (sector_t)(orig_blk_offset),
- EIO, "Unable to copy data block,"
- " data will be lost.");
- *err = -EIO;
+ ret2 = 0;
+ r_len = ext4_swap_extents(handle, donor_inode, orig_inode,
+ mext->donor_lblk, orig_map->m_lblk,
+ *m_len, 0, &ret2);
+ if (ret2 || r_len != *m_len) {
+ ext4_error_inode_block(orig_inode, (sector_t)(orig_map->m_lblk),
+ EIO, "Unable to copy data block, data will be lost!");
+ ret = -EIO;
}
- replaced_count = 0;
- goto unlock_folios;
+ *m_len = 0;
+ goto unlock;
}
-/**
- * mext_check_arguments - Check whether move extent can be done
- *
- * @orig_inode: original inode
- * @donor_inode: donor inode
- * @orig_start: logical start offset in block for orig
- * @donor_start: logical start offset in block for donor
- * @len: the number of blocks to be moved
- *
- * Check the arguments of ext4_move_extents() whether the files can be
- * exchanged with each other.
- * Return 0 on success, or a negative error value on failure.
+/*
+ * Check the validity of the basic filesystem environment and the
+ * inodes' support status.
*/
-static int
-mext_check_arguments(struct inode *orig_inode,
- struct inode *donor_inode, __u64 orig_start,
- __u64 donor_start, __u64 *len)
+static int mext_check_validity(struct inode *orig_inode,
+ struct inode *donor_inode)
{
- __u64 orig_eof, donor_eof;
- unsigned int blkbits = orig_inode->i_blkbits;
- unsigned int blocksize = 1 << blkbits;
+ struct super_block *sb = orig_inode->i_sb;
+
+ /* origin and donor should be different inodes */
+ if (orig_inode == donor_inode) {
+ ext4_debug("ext4 move extent: The argument files should not be same inode [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+
+ /* origin and donor should belone to the same filesystem */
+ if (orig_inode->i_sb != donor_inode->i_sb) {
+ ext4_debug("ext4 move extent: The argument files should be in same FS [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+
+ /* Regular file check */
+ if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
+ ext4_debug("ext4 move extent: The argument files should be regular file [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
- orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
- donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
+ if (ext4_has_feature_bigalloc(sb)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported with bigalloc");
+ return -EOPNOTSUPP;
+ }
+ if (IS_DAX(orig_inode)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported with DAX");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * TODO: it's not obvious how to swap blocks for inodes with full
+ * journaling enabled.
+ */
+ if (ext4_should_journal_data(orig_inode) ||
+ ext4_should_journal_data(donor_inode)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported with data journaling");
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported for encrypted files");
+ return -EOPNOTSUPP;
+ }
+
+ /* Ext4 move extent supports only extent based file */
+ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS)) ||
+ !(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported for non-extent files");
+ return -EOPNOTSUPP;
+ }
if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
- ext4_debug("ext4 move extent: suid or sgid is set"
- " to donor file [ino:orig %lu, donor %lu]\n",
+ ext4_debug("ext4 move extent: suid or sgid is set to donor file [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
- if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
+ if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode)) {
+ ext4_debug("ext4 move extent: donor should not be immutable or append file [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
return -EPERM;
+ }
/* Ext4 move extent does not support swap files */
if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should not be swap files [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
+ orig_inode->i_ino, donor_inode->i_ino);
return -ETXTBSY;
}
- if (ext4_is_quota_file(orig_inode) && ext4_is_quota_file(donor_inode)) {
+ if (ext4_is_quota_file(orig_inode) || ext4_is_quota_file(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should not be quota files [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EOPNOTSUPP;
- }
-
- /* Ext4 move extent supports only extent based file */
- if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
- ext4_debug("ext4 move extent: orig file is not extents "
- "based file [ino:orig %lu]\n", orig_inode->i_ino);
- return -EOPNOTSUPP;
- } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
- ext4_debug("ext4 move extent: donor file is not extents "
- "based file [ino:donor %lu]\n", donor_inode->i_ino);
+ orig_inode->i_ino, donor_inode->i_ino);
return -EOPNOTSUPP;
}
@@ -509,12 +502,25 @@ mext_check_arguments(struct inode *orig_inode,
return -EINVAL;
}
+ return 0;
+}
+
+/*
+ * Check the moving range of ext4_move_extents() whether the files can be
+ * exchanged with each other, and adjust the length to fit within the file
+ * size. Return 0 on success, or a negative error value on failure.
+ */
+static int mext_check_adjust_range(struct inode *orig_inode,
+ struct inode *donor_inode, __u64 orig_start,
+ __u64 donor_start, __u64 *len)
+{
+ __u64 orig_eof, donor_eof;
+
/* Start offset should be same */
if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
(donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
- ext4_debug("ext4 move extent: orig and donor's start "
- "offsets are not aligned [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
+ ext4_debug("ext4 move extent: orig and donor's start offsets are not aligned [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
@@ -523,11 +529,14 @@ mext_check_arguments(struct inode *orig_inode,
(*len > EXT_MAX_BLOCKS) ||
(donor_start + *len >= EXT_MAX_BLOCKS) ||
(orig_start + *len >= EXT_MAX_BLOCKS)) {
- ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
- "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
- orig_inode->i_ino, donor_inode->i_ino);
+ ext4_debug("ext4 move extent: Can't handle over [%u] blocks [ino:orig %lu, donor %lu]\n",
+ EXT_MAX_BLOCKS,
+ orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
+
+ orig_eof = EXT4_B_TO_LBLK(orig_inode, i_size_read(orig_inode));
+ donor_eof = EXT4_B_TO_LBLK(donor_inode, i_size_read(donor_inode));
if (orig_eof <= orig_start)
*len = 0;
else if (orig_eof < orig_start + *len - 1)
@@ -537,9 +546,8 @@ mext_check_arguments(struct inode *orig_inode,
else if (donor_eof < donor_start + *len - 1)
*len = donor_eof - donor_start;
if (!*len) {
- ext4_debug("ext4 move extent: len should not be 0 "
- "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
- donor_inode->i_ino);
+ ext4_debug("ext4 move extent: len should not be 0 [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
@@ -558,140 +566,81 @@ mext_check_arguments(struct inode *orig_inode,
*
* This function returns 0 and moved block length is set in moved_len
* if succeed, otherwise returns error value.
- *
*/
-int
-ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
- __u64 donor_blk, __u64 len, __u64 *moved_len)
+int ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+ __u64 donor_blk, __u64 len, __u64 *moved_len)
{
struct inode *orig_inode = file_inode(o_filp);
struct inode *donor_inode = file_inode(d_filp);
- struct ext4_ext_path *path = NULL;
- int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
- ext4_lblk_t o_end, o_start = orig_blk;
- ext4_lblk_t d_start = donor_blk;
+ struct mext_data mext;
+ struct super_block *sb = orig_inode->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int retries = 0;
+ u64 m_len;
int ret;
- if (orig_inode->i_sb != donor_inode->i_sb) {
- ext4_debug("ext4 move extent: The argument files "
- "should be in same FS [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
- /* orig and donor should be different inodes */
- if (orig_inode == donor_inode) {
- ext4_debug("ext4 move extent: The argument files should not "
- "be same inode [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
- /* Regular file check */
- if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
- ext4_debug("ext4 move extent: The argument files should be "
- "regular file [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
- /* TODO: it's not obvious how to swap blocks for inodes with full
- journaling enabled */
- if (ext4_should_journal_data(orig_inode) ||
- ext4_should_journal_data(donor_inode)) {
- ext4_msg(orig_inode->i_sb, KERN_ERR,
- "Online defrag not supported with data journaling");
- return -EOPNOTSUPP;
- }
-
- if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
- ext4_msg(orig_inode->i_sb, KERN_ERR,
- "Online defrag not supported for encrypted files");
- return -EOPNOTSUPP;
- }
+ *moved_len = 0;
/* Protect orig and donor inodes against a truncate */
lock_two_nondirectories(orig_inode, donor_inode);
+ ret = mext_check_validity(orig_inode, donor_inode);
+ if (ret)
+ goto out;
+
/* Wait for all existing dio workers */
inode_dio_wait(orig_inode);
inode_dio_wait(donor_inode);
- /* Protect extent tree against block allocations via delalloc */
- ext4_double_down_write_data_sem(orig_inode, donor_inode);
- /* Check the filesystem environment whether move_extent can be done */
- ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
- donor_blk, &len);
+ /* Check and adjust the specified move_extent range. */
+ ret = mext_check_adjust_range(orig_inode, donor_inode, orig_blk,
+ donor_blk, &len);
if (ret)
goto out;
- o_end = o_start + len;
- *moved_len = 0;
- while (o_start < o_end) {
- struct ext4_extent *ex;
- ext4_lblk_t cur_blk, next_blk;
- pgoff_t orig_page_index, donor_page_index;
- int offset_in_page;
- int unwritten, cur_len;
-
- path = get_ext_path(orig_inode, o_start, path);
- if (IS_ERR(path)) {
- ret = PTR_ERR(path);
+ mext.orig_inode = orig_inode;
+ mext.donor_inode = donor_inode;
+ while (len) {
+ mext.orig_map.m_lblk = orig_blk;
+ mext.orig_map.m_len = len;
+ mext.orig_map.m_flags = 0;
+ mext.donor_lblk = donor_blk;
+
+ ret = ext4_map_blocks(NULL, orig_inode, &mext.orig_map, 0);
+ if (ret < 0)
goto out;
- }
- ex = path[path->p_depth].p_ext;
- cur_blk = le32_to_cpu(ex->ee_block);
- cur_len = ext4_ext_get_actual_len(ex);
- /* Check hole before the start pos */
- if (cur_blk + cur_len - 1 < o_start) {
- next_blk = ext4_ext_next_allocated_block(path);
- if (next_blk == EXT_MAX_BLOCKS) {
- ret = -ENODATA;
- goto out;
+
+ /* Skip moving if it is a hole or a delalloc extent. */
+ if (mext.orig_map.m_flags &
+ (EXT4_MAP_MAPPED | EXT4_MAP_UNWRITTEN)) {
+ ret = mext_move_extent(&mext, &m_len);
+ *moved_len += m_len;
+ if (!ret)
+ goto next;
+
+ /* Move failed or partially failed. */
+ if (m_len) {
+ orig_blk += m_len;
+ donor_blk += m_len;
+ len -= m_len;
}
- d_start += next_blk - o_start;
- o_start = next_blk;
- continue;
- /* Check hole after the start pos */
- } else if (cur_blk > o_start) {
- /* Skip hole */
- d_start += cur_blk - o_start;
- o_start = cur_blk;
- /* Extent inside requested range ?*/
- if (cur_blk >= o_end)
- goto out;
- } else { /* in_range(o_start, o_blk, o_len) */
- cur_len += cur_blk - o_start;
+ if (ret == -ESTALE)
+ continue;
+ if (ret == -ENOSPC &&
+ ext4_should_retry_alloc(sb, &retries))
+ continue;
+ if (ret == -EBUSY &&
+ sbi->s_journal && retries++ < 4 &&
+ jbd2_journal_force_commit_nested(sbi->s_journal))
+ continue;
+
+ goto out;
}
- unwritten = ext4_ext_is_unwritten(ex);
- if (o_end - o_start < cur_len)
- cur_len = o_end - o_start;
-
- orig_page_index = o_start >> (PAGE_SHIFT -
- orig_inode->i_blkbits);
- donor_page_index = d_start >> (PAGE_SHIFT -
- donor_inode->i_blkbits);
- offset_in_page = o_start % blocks_per_page;
- if (cur_len > blocks_per_page - offset_in_page)
- cur_len = blocks_per_page - offset_in_page;
- /*
- * Up semaphore to avoid following problems:
- * a. transaction deadlock among ext4_journal_start,
- * ->write_begin via pagefault, and jbd2_journal_commit
- * b. racing with ->read_folio, ->write_begin, and
- * ext4_get_block in move_extent_per_page
- */
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
- /* Swap original branches with new branches */
- *moved_len += move_extent_per_page(o_filp, donor_inode,
- orig_page_index, donor_page_index,
- offset_in_page, cur_len,
- unwritten, &ret);
- ext4_double_down_write_data_sem(orig_inode, donor_inode);
- if (ret < 0)
- break;
- o_start += cur_len;
- d_start += cur_len;
+next:
+ orig_blk += mext.orig_map.m_len;
+ donor_blk += mext.orig_map.m_len;
+ len -= mext.orig_map.m_len;
+ retries = 0;
}
out:
@@ -700,9 +649,6 @@ out:
ext4_discard_preallocations(donor_inode);
}
- ext4_free_ext_path(path);
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
unlock_two_nondirectories(orig_inode, donor_inode);
-
return ret;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index bcf2737078b8..c4b5e252af0e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -176,7 +176,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
brelse(bh);
return ERR_PTR(-EFSCORRUPTED);
}
- if (!ext4_has_metadata_csum(inode->i_sb) ||
+ if (!ext4_has_feature_metadata_csum(inode->i_sb) ||
buffer_verified(bh))
return bh;
@@ -291,36 +291,6 @@ struct dx_tail {
__le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */
};
-static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
-static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
-static inline unsigned dx_get_hash(struct dx_entry *entry);
-static void dx_set_hash(struct dx_entry *entry, unsigned value);
-static unsigned dx_get_count(struct dx_entry *entries);
-static unsigned dx_get_limit(struct dx_entry *entries);
-static void dx_set_count(struct dx_entry *entries, unsigned value);
-static void dx_set_limit(struct dx_entry *entries, unsigned value);
-static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
-static unsigned dx_node_limit(struct inode *dir);
-static struct dx_frame *dx_probe(struct ext4_filename *fname,
- struct inode *dir,
- struct dx_hash_info *hinfo,
- struct dx_frame *frame);
-static void dx_release(struct dx_frame *frames);
-static int dx_make_map(struct inode *dir, struct buffer_head *bh,
- struct dx_hash_info *hinfo,
- struct dx_map_entry *map_tail);
-static void dx_sort_map(struct dx_map_entry *map, unsigned count);
-static struct ext4_dir_entry_2 *dx_move_dirents(struct inode *dir, char *from,
- char *to, struct dx_map_entry *offsets,
- int count, unsigned int blocksize);
-static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base,
- unsigned int blocksize);
-static void dx_insert_block(struct dx_frame *frame,
- u32 hash, ext4_lblk_t block);
-static int ext4_htree_next_block(struct inode *dir, __u32 hash,
- struct dx_frame *frame,
- struct dx_frame *frames,
- __u32 *start_hash);
static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **res_dir);
@@ -376,11 +346,10 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
static __le32 ext4_dirblock_csum(struct inode *inode, void *dirent, int size)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum;
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+ csum = ext4_chksum(ei->i_csum_seed, (__u8 *)dirent, size);
return cpu_to_le32(csum);
}
@@ -398,7 +367,7 @@ int ext4_dirblock_csum_verify(struct inode *inode, struct buffer_head *bh)
{
struct ext4_dir_entry_tail *t;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return 1;
t = get_dirent_tail(inode, bh);
@@ -419,7 +388,7 @@ static void ext4_dirblock_csum_set(struct inode *inode,
{
struct ext4_dir_entry_tail *t;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return;
t = get_dirent_tail(inode, bh);
@@ -472,7 +441,6 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
int count_offset, int count, struct dx_tail *t)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum;
int size;
@@ -480,9 +448,9 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
int offset = offsetof(struct dx_tail, dt_checksum);
size = count_offset + (count * sizeof(struct dx_entry));
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
- csum = ext4_chksum(sbi, csum, (__u8 *)t, offset);
- csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
+ csum = ext4_chksum(ei->i_csum_seed, (__u8 *)dirent, size);
+ csum = ext4_chksum(csum, (__u8 *)t, offset);
+ csum = ext4_chksum(csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
return cpu_to_le32(csum);
}
@@ -494,7 +462,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
struct dx_tail *t;
int count_offset, limit, count;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return 1;
c = get_dx_countlimit(inode, dirent, &count_offset);
@@ -523,7 +491,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
struct dx_tail *t;
int count_offset, limit, count;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return;
c = get_dx_countlimit(inode, dirent, &count_offset);
@@ -612,7 +580,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
ext4_dir_rec_len(1, NULL) -
ext4_dir_rec_len(2, NULL) - infosize;
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
entry_space -= sizeof(struct dx_tail);
return entry_space / sizeof(struct dx_entry);
}
@@ -622,7 +590,7 @@ static inline unsigned dx_node_limit(struct inode *dir)
unsigned int entry_space = dir->i_sb->s_blocksize -
ext4_dir_rec_len(0, dir);
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
entry_space -= sizeof(struct dx_tail);
return entry_space / sizeof(struct dx_entry);
}
@@ -1076,7 +1044,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
struct ext4_dir_entry_2 *de, *top;
int err = 0, count = 0;
struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0), tmp_str;
- int csum = ext4_has_metadata_csum(dir->i_sb);
+ int csum = ext4_has_feature_metadata_csum(dir->i_sb);
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
@@ -1108,7 +1076,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
bh->b_data, bh->b_size,
- (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ EXT4_LBLK_TO_B(dir, block)
+ ((char *)de - bh->b_data))) {
/* silently ignore the rest of the block */
break;
@@ -1320,7 +1288,7 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
struct dx_hash_info h = *hinfo;
int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
buflen -= sizeof(struct ext4_dir_entry_tail);
while ((char *) de < base + buflen) {
@@ -1462,7 +1430,8 @@ static bool ext4_match(struct inode *parent,
* sure cf_name was properly initialized before
* considering the calculated hash.
*/
- if (IS_ENCRYPTED(parent) && fname->cf_name.name &&
+ if (sb_no_casefold_compat_fallback(parent->i_sb) &&
+ IS_ENCRYPTED(parent) && fname->cf_name.name &&
(fname->hinfo.hash != EXT4_DIRENT_HASH(de) ||
fname->hinfo.minor_hash != EXT4_DIRENT_MINOR_HASH(de)))
return false;
@@ -1595,10 +1564,15 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
* return. Otherwise, fall back to doing a search the
* old fashioned way.
*/
- if (!IS_ERR(ret) || PTR_ERR(ret) != ERR_BAD_DX_DIR)
+ if (IS_ERR(ret) && PTR_ERR(ret) == ERR_BAD_DX_DIR)
+ dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
+ "falling back\n"));
+ else if (!sb_no_casefold_compat_fallback(dir->i_sb) &&
+ *res_dir == NULL && IS_CASEFOLDED(dir))
+ dxtrace(printk(KERN_DEBUG "ext4_find_entry: casefold "
+ "failed, falling back\n"));
+ else
goto cleanup_and_exit;
- dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
- "falling back\n"));
ret = NULL;
}
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
@@ -1656,7 +1630,7 @@ restart:
}
set_buffer_verified(bh);
i = search_dirblock(bh, dir, fname,
- block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
+ EXT4_LBLK_TO_B(dir, block), res_dir);
if (i == 1) {
EXT4_I(dir)->i_dir_start_lookup = block;
ret = bh;
@@ -1736,7 +1710,6 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **res_dir)
{
- struct super_block * sb = dir->i_sb;
struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
struct buffer_head *bh;
ext4_lblk_t block;
@@ -1755,8 +1728,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
goto errout;
retval = search_dirblock(bh, dir, fname,
- block << EXT4_BLOCK_SIZE_BITS(sb),
- res_dir);
+ EXT4_LBLK_TO_B(dir, block), res_dir);
if (retval == 1)
goto success;
brelse(bh);
@@ -1788,7 +1760,7 @@ success:
static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
- struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_2 *de = NULL;
struct buffer_head *bh;
if (dentry->d_name.len > EXT4_NAME_LEN)
@@ -1844,7 +1816,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
struct dentry *ext4_get_parent(struct dentry *child)
{
__u32 ino;
- struct ext4_dir_entry_2 * de;
+ struct ext4_dir_entry_2 * de = NULL;
struct buffer_head *bh;
bh = ext4_find_entry(d_inode(child), &dotdot_name, &de, NULL);
@@ -1945,7 +1917,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
int csum_size = 0;
int err = 0, i;
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
bh2 = ext4_append(handle, dir, &newblock);
@@ -1995,7 +1967,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
* split it in half by count; each resulting block will have at least
* half the space free.
*/
- if (i > 0)
+ if (i >= 0)
split = count - move;
else
split = count/2;
@@ -2060,8 +2032,7 @@ out:
return ERR_PTR(err);
}
-int ext4_find_dest_de(struct inode *dir, struct inode *inode,
- struct buffer_head *bh,
+int ext4_find_dest_de(struct inode *dir, struct buffer_head *bh,
void *buf, int buf_size,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **dest_de)
@@ -2143,11 +2114,11 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
int csum_size = 0;
int err, err2;
- if (ext4_has_metadata_csum(inode->i_sb))
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
if (!de) {
- err = ext4_find_dest_de(dir, inode, bh, bh->b_data,
+ err = ext4_find_dest_de(dir, bh, bh->b_data,
blocksize - csum_size, fname, &de);
if (err)
return err;
@@ -2252,7 +2223,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
struct fake_dirent *fde;
int csum_size = 0;
- if (ext4_has_metadata_csum(inode->i_sb))
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
blocksize = dir->i_sb->s_blocksize;
@@ -2396,7 +2367,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
ext4_lblk_t block, blocks;
int csum_size = 0;
- if (ext4_has_metadata_csum(inode->i_sb))
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
sb = dir->i_sb;
@@ -2427,7 +2398,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
if (!retval || (retval != ERR_BAD_DX_DIR))
goto out;
/* Can we just ignore htree data? */
- if (ext4_has_metadata_csum(sb)) {
+ if (ext4_has_feature_metadata_csum(sb)) {
EXT4_ERROR_INODE(dir,
"Directory has corrupted htree index.");
retval = -EFSCORRUPTED;
@@ -2577,8 +2548,10 @@ again:
BUFFER_TRACE(frame->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, sb, frame->bh,
EXT4_JTR_NONE);
- if (err)
+ if (err) {
+ brelse(bh2);
goto journal_error;
+ }
if (!add_level) {
unsigned icount1 = icount/2, icount2 = icount - icount1;
unsigned hash2 = dx_get_hash(entries + icount1);
@@ -2589,8 +2562,10 @@ again:
err = ext4_journal_get_write_access(handle, sb,
(frame - 1)->bh,
EXT4_JTR_NONE);
- if (err)
+ if (err) {
+ brelse(bh2);
goto journal_error;
+ }
memcpy((char *) entries2, (char *) (entries + icount1),
icount2 * sizeof(struct dx_entry));
@@ -2609,8 +2584,10 @@ again:
dxtrace(dx_show_index("node",
((struct dx_node *) bh2->b_data)->entries));
err = ext4_handle_dirty_dx_node(handle, dir, bh2);
- if (err)
+ if (err) {
+ brelse(bh2);
goto journal_error;
+ }
brelse (bh2);
err = ext4_handle_dirty_dx_node(handle, dir,
(frame - 1)->bh);
@@ -2635,8 +2612,10 @@ again:
"Creating %d level index...\n",
dxroot->info.indirect_levels));
err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
- if (err)
+ if (err) {
+ brelse(bh2);
goto journal_error;
+ }
err = ext4_handle_dirty_dx_node(handle, dir, bh2);
brelse(bh2);
restart = 1;
@@ -2733,7 +2712,7 @@ static int ext4_delete_entry(handle_t *handle,
return err;
}
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
BUFFER_TRACE(bh, "get_write_access");
@@ -2934,48 +2913,59 @@ err_unlock_inode:
return err;
}
-struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
- struct ext4_dir_entry_2 *de,
- int blocksize, int csum_size,
- unsigned int parent_ino, int dotdot_real_len)
+int ext4_init_dirblock(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, unsigned int parent_ino,
+ void *inline_buf, int inline_size)
{
+ struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) bh->b_data;
+ size_t blocksize = bh->b_size;
+ int csum_size = 0, header_size;
+
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
de->inode = cpu_to_le32(inode->i_ino);
de->name_len = 1;
de->rec_len = ext4_rec_len_to_disk(ext4_dir_rec_len(de->name_len, NULL),
blocksize);
- strcpy(de->name, ".");
+ memcpy(de->name, ".", 2);
ext4_set_de_type(inode->i_sb, de, S_IFDIR);
de = ext4_next_entry(de, blocksize);
de->inode = cpu_to_le32(parent_ino);
de->name_len = 2;
- if (!dotdot_real_len)
- de->rec_len = ext4_rec_len_to_disk(blocksize -
- (csum_size + ext4_dir_rec_len(1, NULL)),
- blocksize);
- else
+ memcpy(de->name, "..", 3);
+ ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+ if (inline_buf) {
de->rec_len = ext4_rec_len_to_disk(
ext4_dir_rec_len(de->name_len, NULL),
blocksize);
- strcpy(de->name, "..");
- ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+ de = ext4_next_entry(de, blocksize);
+ header_size = (char *)de - bh->b_data;
+ memcpy((void *)de, inline_buf, inline_size);
+ ext4_update_final_de(bh->b_data, inline_size + header_size,
+ blocksize - csum_size);
+ } else {
+ de->rec_len = ext4_rec_len_to_disk(blocksize -
+ (csum_size + ext4_dir_rec_len(1, NULL)),
+ blocksize);
+ }
- return ext4_next_entry(de, blocksize);
+ if (csum_size)
+ ext4_initialize_dirent_tail(bh, blocksize);
+ BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
+ set_buffer_uptodate(bh);
+ set_buffer_verified(bh);
+ return ext4_handle_dirty_dirblock(handle, inode, bh);
}
int ext4_init_new_dir(handle_t *handle, struct inode *dir,
struct inode *inode)
{
struct buffer_head *dir_block = NULL;
- struct ext4_dir_entry_2 *de;
ext4_lblk_t block = 0;
- unsigned int blocksize = dir->i_sb->s_blocksize;
- int csum_size = 0;
int err;
- if (ext4_has_metadata_csum(dir->i_sb))
- csum_size = sizeof(struct ext4_dir_entry_tail);
-
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
err = ext4_try_create_inline_dir(handle, dir, inode);
if (err < 0 && err != -ENOSPC)
@@ -2984,39 +2974,30 @@ int ext4_init_new_dir(handle_t *handle, struct inode *dir,
goto out;
}
+ set_nlink(inode, 2);
inode->i_size = 0;
dir_block = ext4_append(handle, inode, &block);
if (IS_ERR(dir_block))
return PTR_ERR(dir_block);
- de = (struct ext4_dir_entry_2 *)dir_block->b_data;
- ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
- set_nlink(inode, 2);
- if (csum_size)
- ext4_initialize_dirent_tail(dir_block, blocksize);
-
- BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
- if (err)
- goto out;
- set_buffer_verified(dir_block);
+ err = ext4_init_dirblock(handle, inode, dir_block, dir->i_ino, NULL, 0);
out:
brelse(dir_block);
return err;
}
-static int ext4_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ext4_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
int err, err2 = 0, credits, retries = 0;
if (EXT4_DIR_LINK_MAX(dir))
- return -EMLINK;
+ return ERR_PTR(-EMLINK);
err = dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -3066,7 +3047,7 @@ out_stop:
out_retry:
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
- return err;
+ return ERR_PTR(err);
}
/*
@@ -3101,7 +3082,8 @@ bool ext4_empty_dir(struct inode *inode)
de = (struct ext4_dir_entry_2 *) bh->b_data;
if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
0) ||
- le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
+ le32_to_cpu(de->inode) != inode->i_ino || de->name_len != 1 ||
+ de->name[0] != '.') {
ext4_warning_inode(inode, "directory missing '.'");
brelse(bh);
return false;
@@ -3110,7 +3092,8 @@ bool ext4_empty_dir(struct inode *inode)
de = ext4_next_entry(de, sb->s_blocksize);
if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
offset) ||
- le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
+ le32_to_cpu(de->inode) == 0 || de->name_len != 2 ||
+ de->name[0] != '.' || de->name[1] != '.') {
ext4_warning_inode(inode, "directory missing '..'");
brelse(bh);
return false;
@@ -3148,11 +3131,12 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
int retval;
struct inode *inode;
struct buffer_head *bh;
- struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_2 *de = NULL;
handle_t *handle = NULL;
- if (unlikely(ext4_forced_shutdown(dir->i_sb)))
- return -EIO;
+ retval = ext4_emergency_state(dir->i_sb);
+ if (unlikely(retval))
+ return retval;
/* Initialize quotas before so that eventual writes go in
* separate transaction */
@@ -3238,7 +3222,7 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
{
int retval = -ENOENT;
struct buffer_head *bh;
- struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_2 *de = NULL;
handle_t *handle;
int skip_remove_dentry = 0;
@@ -3309,8 +3293,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
{
int retval;
- if (unlikely(ext4_forced_shutdown(dir->i_sb)))
- return -EIO;
+ retval = ext4_emergency_state(dir->i_sb);
+ if (unlikely(retval))
+ return retval;
trace_ext4_unlink_enter(dir, dentry);
/*
@@ -3376,8 +3361,9 @@ static int ext4_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct fscrypt_str disk_link;
int retries = 0;
- if (unlikely(ext4_forced_shutdown(dir->i_sb)))
- return -EIO;
+ err = ext4_emergency_state(dir->i_sb);
+ if (unlikely(err))
+ return err;
err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize,
&disk_link);
@@ -3418,7 +3404,6 @@ retry:
inode->i_op = &ext4_symlink_inode_operations;
} else {
inode->i_op = &ext4_fast_symlink_inode_operations;
- inode->i_link = (char *)&EXT4_I(inode)->i_data;
}
}
@@ -3434,6 +3419,9 @@ retry:
disk_link.len);
inode->i_size = disk_link.len - 1;
EXT4_I(inode)->i_disksize = inode->i_size;
+ if (!IS_ENCRYPTED(inode))
+ inode_set_cached_link(inode, (char *)&EXT4_I(inode)->i_data,
+ inode->i_size);
}
err = ext4_add_nondir(handle, dentry, &inode);
if (handle)
@@ -3546,7 +3534,7 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data,
bh->b_size, 0) ||
le32_to_cpu(de->inode) != inode->i_ino ||
- strcmp(".", de->name)) {
+ de->name_len != 1 || de->name[0] != '.') {
EXT4_ERROR_INODE(inode, "directory missing '.'");
brelse(bh);
*retval = -EFSCORRUPTED;
@@ -3557,7 +3545,8 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
de = ext4_next_entry(de, inode->i_sb->s_blocksize);
if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data,
bh->b_size, offset) ||
- le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
+ le32_to_cpu(de->inode) == 0 || de->name_len != 2 ||
+ de->name[0] != '.' || de->name[1] != '.') {
EXT4_ERROR_INODE(inode, "directory missing '..'");
brelse(bh);
*retval = -EFSCORRUPTED;
@@ -3697,7 +3686,7 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
{
int retval = -ENOENT;
struct buffer_head *bh;
- struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_2 *de = NULL;
bh = ext4_find_entry(dir, d_name, &de, NULL);
if (IS_ERR(bh))
@@ -4197,8 +4186,9 @@ static int ext4_rename2(struct mnt_idmap *idmap,
{
int err;
- if (unlikely(ext4_forced_shutdown(old_dir->i_sb)))
- return -EIO;
+ err = ext4_emergency_state(old_dir->i_sb);
+ if (unlikely(err))
+ return err;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
index e5b47dda3317..c9b93b670b0f 100644
--- a/fs/ext4/orphan.c
+++ b/fs/ext4/orphan.c
@@ -8,6 +8,8 @@
#include "ext4.h"
#include "ext4_jbd2.h"
+#define EXT4_MAX_ORPHAN_FILE_BLOCKS 512
+
static int ext4_orphan_file_add(handle_t *handle, struct inode *inode)
{
int i, j, start;
@@ -107,13 +109,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
if (!sbi->s_journal || is_bad_inode(inode))
return 0;
- WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+ WARN_ON_ONCE(!(inode_state_read_once(inode) & (I_NEW | I_FREEING)) &&
!inode_is_locked(inode));
- /*
- * Inode orphaned in orphan file or in orphan list?
- */
- if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
- !list_empty(&EXT4_I(inode)->i_orphan))
+ if (ext4_inode_orphan_tracked(inode))
return 0;
/*
@@ -236,7 +234,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS))
return 0;
- WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+ WARN_ON_ONCE(!(inode_state_read_once(inode) & (I_NEW | I_FREEING)) &&
!inode_is_locked(inode));
if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE))
return ext4_orphan_file_del(handle, inode);
@@ -517,7 +515,7 @@ void ext4_release_orphan_info(struct super_block *sb)
return;
for (i = 0; i < oi->of_blocks; i++)
brelse(oi->of_binfo[i].ob_bh);
- kfree(oi->of_binfo);
+ kvfree(oi->of_binfo);
}
static struct ext4_orphan_block_tail *ext4_orphan_block_tail(
@@ -537,13 +535,13 @@ static int ext4_orphan_file_block_csum_verify(struct super_block *sb,
struct ext4_orphan_block_tail *ot;
__le64 dsk_block_nr = cpu_to_le64(bh->b_blocknr);
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
ot = ext4_orphan_block_tail(sb, bh);
- calculated = ext4_chksum(EXT4_SB(sb), oi->of_csum_seed,
- (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr));
- calculated = ext4_chksum(EXT4_SB(sb), calculated, (__u8 *)bh->b_data,
+ calculated = ext4_chksum(oi->of_csum_seed, (__u8 *)&dsk_block_nr,
+ sizeof(dsk_block_nr));
+ calculated = ext4_chksum(calculated, (__u8 *)bh->b_data,
inodes_per_ob * sizeof(__u32));
return le32_to_cpu(ot->ob_checksum) == calculated;
}
@@ -560,10 +558,9 @@ void ext4_orphan_file_block_trigger(struct jbd2_buffer_trigger_type *triggers,
struct ext4_orphan_block_tail *ot;
__le64 dsk_block_nr = cpu_to_le64(bh->b_blocknr);
- csum = ext4_chksum(EXT4_SB(sb), oi->of_csum_seed,
- (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr));
- csum = ext4_chksum(EXT4_SB(sb), csum, (__u8 *)data,
- inodes_per_ob * sizeof(__u32));
+ csum = ext4_chksum(oi->of_csum_seed, (__u8 *)&dsk_block_nr,
+ sizeof(dsk_block_nr));
+ csum = ext4_chksum(csum, (__u8 *)data, inodes_per_ob * sizeof(__u32));
ot = ext4_orphan_block_tail(sb, bh);
ot->ob_checksum = cpu_to_le32(csum);
}
@@ -588,10 +585,22 @@ int ext4_init_orphan_info(struct super_block *sb)
ext4_msg(sb, KERN_ERR, "get orphan inode failed");
return PTR_ERR(inode);
}
+ /*
+ * This is just an artificial limit to prevent corrupted fs from
+ * consuming absurd amounts of memory when pinning blocks of orphan
+ * file in memory.
+ */
+ if (inode->i_size > (EXT4_MAX_ORPHAN_FILE_BLOCKS << inode->i_blkbits)) {
+ ext4_msg(sb, KERN_ERR, "orphan file too big: %llu",
+ (unsigned long long)inode->i_size);
+ ret = -EFSCORRUPTED;
+ goto out_put;
+ }
oi->of_blocks = inode->i_size >> sb->s_blocksize_bits;
oi->of_csum_seed = EXT4_I(inode)->i_csum_seed;
- oi->of_binfo = kmalloc(oi->of_blocks*sizeof(struct ext4_orphan_block),
- GFP_KERNEL);
+ oi->of_binfo = kvmalloc_array(oi->of_blocks,
+ sizeof(struct ext4_orphan_block),
+ GFP_KERNEL);
if (!oi->of_binfo) {
ret = -ENOMEM;
goto out_put;
@@ -630,7 +639,7 @@ int ext4_init_orphan_info(struct super_block *sb)
out_free:
for (i--; i >= 0; i--)
brelse(oi->of_binfo[i].ob_bh);
- kfree(oi->of_binfo);
+ kvfree(oi->of_binfo);
out_put:
iput(inode);
return ret;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 69b8a7221a2b..39abfeec5f36 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -164,7 +164,8 @@ static void ext4_release_io_end(ext4_io_end_t *io_end)
}
/*
- * Check a range of space and convert unwritten extents to written. Note that
+ * On successful IO, check a range of space and convert unwritten extents to
+ * written. On IO failure, check if journal abort is needed. Note that
* we are protected from truncate touching same part of extent tree by the
* fact that truncate code waits for all DIO to finish (thus exclusion from
* direct IO is achieved) and also waits for PageWriteback bits. Thus we
@@ -175,20 +176,36 @@ static int ext4_end_io_end(ext4_io_end_t *io_end)
{
struct inode *inode = io_end->inode;
handle_t *handle = io_end->handle;
+ struct super_block *sb = inode->i_sb;
int ret = 0;
ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
"list->prev 0x%p\n",
io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
- io_end->handle = NULL; /* Following call will use up the handle */
- ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
- if (ret < 0 && !ext4_forced_shutdown(inode->i_sb)) {
- ext4_msg(inode->i_sb, KERN_EMERG,
+ /*
+ * Do not convert the unwritten extents if data writeback fails,
+ * or stale data may be exposed.
+ */
+ io_end->handle = NULL; /* Following call will use up the handle */
+ if (unlikely(io_end->flag & EXT4_IO_END_FAILED)) {
+ ret = -EIO;
+ if (handle)
+ jbd2_journal_free_reserved(handle);
+
+ if (test_opt(sb, DATA_ERR_ABORT))
+ jbd2_journal_abort(EXT4_SB(sb)->s_journal, ret);
+ } else {
+ ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
+ }
+ if (ret < 0 && !ext4_emergency_state(sb) &&
+ io_end->flag & EXT4_IO_END_UNWRITTEN) {
+ ext4_msg(sb, KERN_EMERG,
"failed to convert unwritten extents to written "
"extents -- potential data loss! "
"(inode %lu, error %d)", inode->i_ino, ret);
}
+
ext4_clear_io_unwritten_flag(io_end);
ext4_release_io_end(io_end);
return ret;
@@ -217,6 +234,18 @@ static void dump_completed_IO(struct inode *inode, struct list_head *head)
#endif
}
+static bool ext4_io_end_defer_completion(ext4_io_end_t *io_end)
+{
+ if (io_end->flag & EXT4_IO_END_UNWRITTEN &&
+ !list_empty(&io_end->list_vec))
+ return true;
+ if (test_opt(io_end->inode->i_sb, DATA_ERR_ABORT) &&
+ io_end->flag & EXT4_IO_END_FAILED &&
+ !ext4_emergency_state(io_end->inode->i_sb))
+ return true;
+ return false;
+}
+
/* Add the io_end to per-inode completed end_io list. */
static void ext4_add_complete_io(ext4_io_end_t *io_end)
{
@@ -225,9 +254,12 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end)
struct workqueue_struct *wq;
unsigned long flags;
- /* Only reserved conversions from writeback should enter here */
- WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
- WARN_ON(!io_end->handle && sbi->s_journal);
+ /* Only reserved conversions or pending IO errors will enter here. */
+ WARN_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION));
+ WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN &&
+ !io_end->handle && sbi->s_journal);
+ WARN_ON(!io_end->bio);
+
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
wq = sbi->rsv_conversion_wq;
if (list_empty(&ei->i_rsv_conversion_list))
@@ -252,7 +284,7 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
while (!list_empty(&unwritten)) {
io_end = list_entry(unwritten.next, ext4_io_end_t, list);
- BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ BUG_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION));
list_del_init(&io_end->list);
err = ext4_end_io_end(io_end);
@@ -263,7 +295,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
}
/*
- * work on completed IO, to convert unwritten extents to extents
+ * Used to convert unwritten extents to written extents upon IO completion,
+ * or used to abort the journal upon IO errors.
*/
void ext4_end_io_rsv_work(struct work_struct *work)
{
@@ -288,29 +321,22 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
{
if (refcount_dec_and_test(&io_end->count)) {
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
- list_empty(&io_end->list_vec)) {
- ext4_release_io_end(io_end);
- return;
- }
- ext4_add_complete_io(io_end);
+ if (ext4_io_end_defer_completion(io_end))
+ return ext4_add_complete_io(io_end);
+
+ ext4_release_io_end(io_end);
}
}
int ext4_put_io_end(ext4_io_end_t *io_end)
{
- int err = 0;
-
if (refcount_dec_and_test(&io_end->count)) {
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
- err = ext4_convert_unwritten_io_end_vec(io_end->handle,
- io_end);
- io_end->handle = NULL;
- ext4_clear_io_unwritten_flag(io_end);
- }
+ if (ext4_io_end_defer_completion(io_end))
+ return ext4_end_io_end(io_end);
+
ext4_release_io_end(io_end);
}
- return err;
+ return 0;
}
ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
@@ -344,11 +370,12 @@ static void ext4_end_bio(struct bio *bio)
bio->bi_status, inode->i_ino,
(unsigned long long)
bi_sector >> (inode->i_blkbits - 9));
+ io_end->flag |= EXT4_IO_END_FAILED;
mapping_set_error(inode->i_mapping,
blk_status_to_errno(bio->bi_status));
}
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+ if (ext4_io_end_defer_completion(io_end)) {
/*
* Link bio into list hanging from io_end. We have to do it
* atomically as bio completions can be racing against each
@@ -520,9 +547,9 @@ int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
* first page of the bio. Otherwise it can deadlock.
*/
if (io->io_bio)
- gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
+ gfp_flags = GFP_NOWAIT;
retry_encrypt:
- bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
+ bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
enc_bytes, 0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 5d3a9dc9a32d..e7f2350c725b 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -213,9 +213,7 @@ int ext4_mpage_readpages(struct inode *inode,
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
-
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t next_block;
sector_t block_in_file;
@@ -227,27 +225,32 @@ int ext4_mpage_readpages(struct inode *inode,
int length;
unsigned relative_block = 0;
struct ext4_map_blocks map;
- unsigned int nr_pages = rac ? readahead_count(rac) : 1;
+ unsigned int nr_pages, folio_pages;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
- for (; nr_pages; nr_pages--) {
+ nr_pages = rac ? readahead_count(rac) : folio_nr_pages(folio);
+ for (; nr_pages; nr_pages -= folio_pages) {
int fully_mapped = 1;
- unsigned first_hole = blocks_per_page;
+ unsigned int first_hole;
+ unsigned int blocks_per_folio;
if (rac)
folio = readahead_folio(rac);
+
+ folio_pages = folio_nr_pages(folio);
prefetchw(&folio->flags);
if (folio_buffers(folio))
goto confused;
- block_in_file = next_block =
- (sector_t)folio->index << (PAGE_SHIFT - blkbits);
- last_block = block_in_file + nr_pages * blocks_per_page;
+ blocks_per_folio = folio_size(folio) >> blkbits;
+ first_hole = blocks_per_folio;
+ block_in_file = next_block = EXT4_PG_TO_LBLK(inode, folio->index);
+ last_block = EXT4_PG_TO_LBLK(inode, folio->index + nr_pages);
last_block_in_file = (ext4_readpage_limit(inode) +
blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
@@ -270,7 +273,7 @@ int ext4_mpage_readpages(struct inode *inode,
map.m_flags &= ~EXT4_MAP_MAPPED;
break;
}
- if (page_block == blocks_per_page)
+ if (page_block == blocks_per_folio)
break;
page_block++;
block_in_file++;
@@ -281,7 +284,7 @@ int ext4_mpage_readpages(struct inode *inode,
* Then do more ext4_map_blocks() calls until we are
* done with this folio.
*/
- while (page_block < blocks_per_page) {
+ while (page_block < blocks_per_folio) {
if (block_in_file < last_block) {
map.m_lblk = block_in_file;
map.m_len = last_block - block_in_file;
@@ -296,13 +299,13 @@ int ext4_mpage_readpages(struct inode *inode,
}
if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
fully_mapped = 0;
- if (first_hole == blocks_per_page)
+ if (first_hole == blocks_per_folio)
first_hole = page_block;
page_block++;
block_in_file++;
continue;
}
- if (first_hole != blocks_per_page)
+ if (first_hole != blocks_per_folio)
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
@@ -315,13 +318,13 @@ int ext4_mpage_readpages(struct inode *inode,
/* needed? */
map.m_flags &= ~EXT4_MAP_MAPPED;
break;
- } else if (page_block == blocks_per_page)
+ } else if (page_block == blocks_per_folio)
break;
page_block++;
block_in_file++;
}
}
- if (first_hole != blocks_per_page) {
+ if (first_hole != blocks_per_folio) {
folio_zero_segment(folio, first_hole << blkbits,
folio_size(folio));
if (first_hole == 0) {
@@ -367,11 +370,11 @@ int ext4_mpage_readpages(struct inode *inode,
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
(relative_block == map.m_len)) ||
- (first_hole != blocks_per_page)) {
+ (first_hole != blocks_per_folio)) {
submit_bio(bio);
bio = NULL;
} else
- last_block_in_bio = first_block + blocks_per_page - 1;
+ last_block_in_bio = first_block + blocks_per_folio - 1;
continue;
confused:
if (bio) {
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 72f77f78ae8d..050f26168d97 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1118,8 +1118,8 @@ static inline void ext4_set_block_group_nr(struct super_block *sb, char *data,
struct ext4_super_block *es = (struct ext4_super_block *) data;
es->s_block_group_nr = cpu_to_le16(group);
- if (ext4_has_metadata_csum(sb))
- es->s_checksum = ext4_superblock_csum(sb, es);
+ if (ext4_has_feature_metadata_csum(sb))
+ es->s_checksum = ext4_superblock_csum(es);
}
/*
@@ -1315,7 +1315,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
{
struct buffer_head *bh;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 0;
bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 785809f33ff4..87205660c5d0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -79,7 +79,6 @@ static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
-static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
static void ext4_clear_request_list(void);
static struct inode *ext4_get_journal_inode(struct super_block *sb,
@@ -266,10 +265,19 @@ struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
return __ext4_sb_bread_gfp(sb, block, 0, gfp);
}
+struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
+ sector_t block)
+{
+ gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
+ ~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL;
+
+ return __ext4_sb_bread_gfp(sb, block, 0, gfp);
+}
+
void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
{
struct buffer_head *bh = bdev_getblk(sb->s_bdev, block,
- sb->s_blocksize, GFP_NOWAIT | __GFP_NOWARN);
+ sb->s_blocksize, GFP_NOWAIT);
if (likely(bh)) {
if (trylock_buffer(bh))
@@ -287,14 +295,12 @@ static int ext4_verify_csum_type(struct super_block *sb,
return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}
-__le32 ext4_superblock_csum(struct super_block *sb,
- struct ext4_super_block *es)
+__le32 ext4_superblock_csum(struct ext4_super_block *es)
{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
int offset = offsetof(struct ext4_super_block, s_checksum);
__u32 csum;
- csum = ext4_chksum(sbi, ~0, (char *)es, offset);
+ csum = ext4_chksum(~0, (char *)es, offset);
return cpu_to_le32(csum);
}
@@ -302,20 +308,20 @@ __le32 ext4_superblock_csum(struct super_block *sb,
static int ext4_superblock_csum_verify(struct super_block *sb,
struct ext4_super_block *es)
{
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
- return es->s_checksum == ext4_superblock_csum(sb, es);
+ return es->s_checksum == ext4_superblock_csum(es);
}
void ext4_superblock_csum_set(struct super_block *sb)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
- es->s_checksum = ext4_superblock_csum(sb, es);
+ es->s_checksum = ext4_superblock_csum(es);
}
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
@@ -448,9 +454,6 @@ static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
#define ext4_get_tstamp(es, tstamp) \
__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
-#define EXT4_SB_REFRESH_INTERVAL_SEC (3600) /* seconds (1 hour) */
-#define EXT4_SB_REFRESH_INTERVAL_KB (16384) /* kilobytes (16MB) */
-
/*
* The ext4_maybe_update_superblock() function checks and updates the
* superblock if needed.
@@ -458,8 +461,10 @@ static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
* This function is designed to update the on-disk superblock only under
* certain conditions to prevent excessive disk writes and unnecessary
* waking of the disk from sleep. The superblock will be updated if:
- * 1. More than an hour has passed since the last superblock update, and
- * 2. More than 16MB have been written since the last superblock update.
+ * 1. More than sbi->s_sb_update_sec (def: 1 hour) has passed since the last
+ * superblock update
+ * 2. More than sbi->s_sb_update_kb (def: 16MB) kbs have been written since the
+ * last superblock update.
*
* @sb: The superblock
*/
@@ -473,14 +478,15 @@ static void ext4_maybe_update_superblock(struct super_block *sb)
__u64 lifetime_write_kbytes;
__u64 diff_size;
- if (sb_rdonly(sb) || !(sb->s_flags & SB_ACTIVE) ||
- !journal || (journal->j_flags & JBD2_UNMOUNT))
+ if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
+ !(sb->s_flags & SB_ACTIVE) || !journal ||
+ journal->j_flags & JBD2_UNMOUNT)
return;
now = ktime_get_real_seconds();
last_update = ext4_get_tstamp(es, s_wtime);
- if (likely(now - last_update < EXT4_SB_REFRESH_INTERVAL_SEC))
+ if (likely(now - last_update < sbi->s_sb_update_sec))
return;
lifetime_write_kbytes = sbi->s_kbytes_written +
@@ -495,49 +501,23 @@ static void ext4_maybe_update_superblock(struct super_block *sb)
*/
diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written);
- if (diff_size > EXT4_SB_REFRESH_INTERVAL_KB)
+ if (diff_size > sbi->s_sb_update_kb)
schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
}
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
struct super_block *sb = journal->j_private;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- int error = is_journal_aborted(journal);
- struct ext4_journal_cb_entry *jce;
BUG_ON(txn->t_state == T_FINISHED);
ext4_process_freed_data(sb, txn->t_tid);
ext4_maybe_update_superblock(sb);
-
- spin_lock(&sbi->s_md_lock);
- while (!list_empty(&txn->t_private_list)) {
- jce = list_entry(txn->t_private_list.next,
- struct ext4_journal_cb_entry, jce_list);
- list_del_init(&jce->jce_list);
- spin_unlock(&sbi->s_md_lock);
- jce->jce_func(sb, jce, error);
- spin_lock(&sbi->s_md_lock);
- }
- spin_unlock(&sbi->s_md_lock);
}
-/*
- * This writepage callback for write_cache_pages()
- * takes care of a few cases after page cleaning.
- *
- * write_cache_pages() already checks for dirty pages
- * and calls clear_page_dirty_for_io(), which we want,
- * to write protect the pages.
- *
- * However, we may have to redirty a page (see below.)
- */
-static int ext4_journalled_writepage_callback(struct folio *folio,
- struct writeback_control *wbc,
- void *data)
+static bool ext4_journalled_writepage_needs_redirty(struct jbd2_inode *jinode,
+ struct folio *folio)
{
- transaction_t *transaction = (transaction_t *) data;
struct buffer_head *bh, *head;
struct journal_head *jh;
@@ -558,15 +538,12 @@ static int ext4_journalled_writepage_callback(struct folio *folio,
*/
jh = bh2jh(bh);
if (buffer_dirty(bh) ||
- (jh && (jh->b_transaction != transaction ||
- jh->b_next_transaction))) {
- folio_redirty_for_writepage(wbc, folio);
- goto out;
- }
+ (jh && (jh->b_transaction != jinode->i_transaction ||
+ jh->b_next_transaction)))
+ return true;
} while ((bh = bh->b_this_page) != head);
-out:
- return AOP_WRITEPAGE_ACTIVATE;
+ return false;
}
static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
@@ -578,10 +555,23 @@ static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
.range_start = jinode->i_dirty_start,
.range_end = jinode->i_dirty_end,
};
+ struct folio *folio = NULL;
+ int error;
+
+ /*
+ * writeback_iter() already checks for dirty pages and calls
+ * folio_clear_dirty_for_io(), which we want to write protect the
+ * folios.
+ *
+ * However, we may have to redirty a folio sometimes.
+ */
+ while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
+ if (ext4_journalled_writepage_needs_redirty(jinode, folio))
+ folio_redirty_for_writepage(&wbc, folio);
+ folio_unlock(folio);
+ }
- return write_cache_pages(mapping, &wbc,
- ext4_journalled_writepage_callback,
- jinode->i_transaction);
+ return error;
}
static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
@@ -707,11 +697,8 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
if (test_opt(sb, WARN_ON_ERROR))
WARN_ON_ONCE(1);
- if (!continue_fs && !sb_rdonly(sb)) {
- set_bit(EXT4_FLAGS_SHUTDOWN, &EXT4_SB(sb)->s_ext4_flags);
- if (journal)
- jbd2_journal_abort(journal, -EIO);
- }
+ if (!continue_fs && !ext4_emergency_ro(sb) && journal)
+ jbd2_journal_abort(journal, -error);
if (!bdev_read_only(sb->s_bdev)) {
save_error_info(sb, error, ino, block, func, line);
@@ -719,9 +706,13 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
* In case the fs should keep running, we need to writeout
* superblock through the journal. Due to lock ordering
* constraints, it may not be safe to do it right here so we
- * defer superblock flushing to a workqueue.
+ * defer superblock flushing to a workqueue. We just need to be
+ * careful when the journal is already shutting down. If we get
+ * here in that case, just update the sb directly as the last
+ * transaction won't commit anyway.
*/
- if (continue_fs && journal)
+ if (continue_fs && journal &&
+ !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
else
ext4_commit_super(sb);
@@ -737,17 +728,17 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
sb->s_id);
}
- if (sb_rdonly(sb) || continue_fs)
+ if (ext4_emergency_ro(sb) || continue_fs)
return;
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
/*
- * EXT4_FLAGS_SHUTDOWN was set which stops all filesystem
- * modifications. We don't set SB_RDONLY because that requires
- * sb->s_umount semaphore and setting it without proper remount
- * procedure is confusing code such as freeze_super() leading to
- * deadlocks and other problems.
+ * We don't set SB_RDONLY because that requires sb->s_umount
+ * semaphore and setting it without proper remount procedure is
+ * confusing code such as freeze_super() leading to deadlocks
+ * and other problems.
*/
+ set_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags);
}
static void update_super_work(struct work_struct *work)
@@ -765,7 +756,8 @@ static void update_super_work(struct work_struct *work)
* We use directly jbd2 functions here to avoid recursing back into
* ext4 error handling code during handling of previous errors.
*/
- if (!sb_rdonly(sbi->s_sb) && journal) {
+ if (!ext4_emergency_state(sbi->s_sb) &&
+ !sb_rdonly(sbi->s_sb) && journal) {
struct buffer_head *sbh = sbi->s_sbh;
bool call_notify_err = false;
@@ -819,7 +811,7 @@ void __ext4_error(struct super_block *sb, const char *function,
struct va_format vaf;
va_list args;
- if (unlikely(ext4_forced_shutdown(sb)))
+ if (unlikely(ext4_emergency_state(sb)))
return;
trace_ext4_error(sb, function, line);
@@ -844,7 +836,7 @@ void __ext4_error_inode(struct inode *inode, const char *function,
va_list args;
struct va_format vaf;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
+ if (unlikely(ext4_emergency_state(inode->i_sb)))
return;
trace_ext4_error(inode->i_sb, function, line);
@@ -879,7 +871,7 @@ void __ext4_error_file(struct file *file, const char *function,
struct inode *inode = file_inode(file);
char pathname[80], *path;
- if (unlikely(ext4_forced_shutdown(inode->i_sb)))
+ if (unlikely(ext4_emergency_state(inode->i_sb)))
return;
trace_ext4_error(inode->i_sb, function, line);
@@ -959,7 +951,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
char nbuf[16];
const char *errstr;
- if (unlikely(ext4_forced_shutdown(sb)))
+ if (unlikely(ext4_emergency_state(sb)))
return;
/* Special case: if the error is EROFS, and we're not already
@@ -1053,7 +1045,7 @@ __acquires(bitlock)
struct va_format vaf;
va_list args;
- if (unlikely(ext4_forced_shutdown(sb)))
+ if (unlikely(ext4_emergency_state(sb)))
return;
trace_ext4_error(sb, function, line);
@@ -1306,18 +1298,17 @@ static void ext4_put_super(struct super_block *sb)
ext4_unregister_li_request(sb);
ext4_quotas_off(sb, EXT4_MAXQUOTAS);
- flush_work(&sbi->s_sb_upd_work);
destroy_workqueue(sbi->rsv_conversion_wq);
ext4_release_orphan_info(sb);
if (sbi->s_journal) {
aborted = is_journal_aborted(sbi->s_journal);
- err = jbd2_journal_destroy(sbi->s_journal);
- sbi->s_journal = NULL;
+ err = ext4_journal_destroy(sbi, sbi->s_journal);
if ((err < 0) && !aborted) {
ext4_abort(sb, -err, "Couldn't clean up the journal");
}
- }
+ } else
+ flush_work(&sbi->s_sb_upd_work);
ext4_es_unregister_shrinker(sbi);
timer_shutdown_sync(&sbi->s_err_report);
@@ -1325,13 +1316,14 @@ static void ext4_put_super(struct super_block *sb)
ext4_mb_release(sb);
ext4_ext_release(sb);
- if (!sb_rdonly(sb) && !aborted) {
- ext4_clear_feature_journal_needs_recovery(sb);
- ext4_clear_feature_orphan_present(sb);
- es->s_state = cpu_to_le16(sbi->s_mount_state);
- }
- if (!sb_rdonly(sb))
+ if (!ext4_emergency_state(sb) && !sb_rdonly(sb)) {
+ if (!aborted) {
+ ext4_clear_feature_journal_needs_recovery(sb);
+ ext4_clear_feature_orphan_present(sb);
+ es->s_state = cpu_to_le16(sbi->s_mount_state);
+ }
ext4_commit_super(sb);
+ }
ext4_group_desc_free(sbi);
ext4_flex_groups_free(sbi);
@@ -1380,8 +1372,6 @@ static void ext4_put_super(struct super_block *sb)
*/
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->s_blockgroup_lock);
fs_put_dax(sbi->s_daxdev, NULL);
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
@@ -1406,6 +1396,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
inode_set_iversion(&ei->vfs_inode, 1);
ei->i_flags = 0;
+ ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
spin_lock_init(&ei->i_raw_lock);
ei->i_prealloc_node = RB_ROOT;
atomic_set(&ei->i_prealloc_active, 0);
@@ -1416,6 +1407,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_es_all_nr = 0;
ei->i_es_shk_nr = 0;
ei->i_es_shrink_lblk = 0;
+ ei->i_es_seq = 0;
ei->i_reserved_data_blocks = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
ext4_init_pending_tree(&ei->i_pending_tree);
@@ -1428,16 +1420,15 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
spin_lock_init(&ei->i_completed_io_lock);
ei->i_sync_tid = 0;
ei->i_datasync_tid = 0;
- atomic_set(&ei->i_unwritten, 0);
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
ext4_fc_init_inode(&ei->vfs_inode);
- mutex_init(&ei->i_fc_lock);
+ spin_lock_init(&ei->i_fc_lock);
return &ei->vfs_inode;
}
static int ext4_drop_inode(struct inode *inode)
{
- int drop = generic_drop_inode(inode);
+ int drop = inode_generic_drop(inode);
if (!drop)
drop = fscrypt_drop_inode(inode);
@@ -1458,9 +1449,9 @@ static void ext4_free_in_core_inode(struct inode *inode)
static void ext4_destroy_inode(struct inode *inode)
{
- if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
+ if (ext4_inode_orphan_tracked(inode)) {
ext4_msg(inode->i_sb, KERN_ERR,
- "Inode %lu (%p): orphan list check failed!",
+ "Inode %lu (%p): inode tracked as orphan!",
inode->i_ino, EXT4_I(inode));
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
EXT4_I(inode), sizeof(struct ext4_inode_info),
@@ -1490,6 +1481,12 @@ static void init_once(void *foo)
init_rwsem(&ei->i_data_sem);
inode_init_once(&ei->vfs_inode);
ext4_fc_init_inode(&ei->vfs_inode);
+#ifdef CONFIG_FS_ENCRYPTION
+ ei->i_crypt_info = NULL;
+#endif
+#ifdef CONFIG_FS_VERITY
+ ei->i_verity_info = NULL;
+#endif
}
static int __init init_inodecache(void)
@@ -1825,7 +1822,6 @@ static const struct fs_parameter_spec ext4_param_specs[] = {
{}
};
-#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
#define MOPT_SET 0x0001
#define MOPT_CLEAR 0x0002
@@ -2019,6 +2015,9 @@ int ext4_init_fs_context(struct fs_context *fc)
fc->fs_private = ctx;
fc->ops = &ext4_context_ops;
+ /* i_version is always enabled now */
+ fc->sb_flags |= SB_I_VERSION;
+
return 0;
}
@@ -2478,7 +2477,7 @@ static int parse_apply_sb_mount_options(struct super_block *sb,
struct ext4_fs_context *m_ctx)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- char *s_mount_opts = NULL;
+ char s_mount_opts[64];
struct ext4_fs_context *s_ctx = NULL;
struct fs_context *fc = NULL;
int ret = -ENOMEM;
@@ -2486,15 +2485,12 @@ static int parse_apply_sb_mount_options(struct super_block *sb,
if (!sbi->s_es->s_mount_opts[0])
return 0;
- s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
- sizeof(sbi->s_es->s_mount_opts),
- GFP_KERNEL);
- if (!s_mount_opts)
- return ret;
+ if (strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts) < 0)
+ return -E2BIG;
fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
if (!fc)
- goto out_free;
+ return -ENOMEM;
s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
if (!s_ctx)
@@ -2526,11 +2522,8 @@ parse_failed:
ret = 0;
out_free:
- if (fc) {
- ext4_fc_free(fc);
- kfree(fc);
- }
- kfree(s_mount_opts);
+ ext4_fc_free(fc);
+ kfree(fc);
return ret;
}
@@ -2787,6 +2780,13 @@ static int ext4_check_opt_consistency(struct fs_context *fc,
}
if (is_remount) {
+ if (!sbi->s_journal &&
+ ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) {
+ ext4_msg(NULL, KERN_WARNING,
+ "Remounting fs w/o journal so ignoring data_err option");
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT);
+ }
+
if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
ext4_msg(NULL, KERN_ERR, "can't mount with "
@@ -2969,11 +2969,11 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
}
if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
- le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
+ ext4_get_resuid(es) != EXT4_DEF_RESUID)
SEQ_OPTS_PRINT("resuid=%u",
from_kuid_munged(&init_user_ns, sbi->s_resuid));
if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
- le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
+ ext4_get_resgid(es) != EXT4_DEF_RESGID)
SEQ_OPTS_PRINT("resgid=%u",
from_kgid_munged(&init_user_ns, sbi->s_resgid));
def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
@@ -2989,6 +2989,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
+ if (nodefs && sb->s_flags & SB_I_VERSION)
+ SEQ_OPTS_PUTS("i_version");
if (nodefs || sbi->s_stripe)
SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
if (nodefs || EXT4_MOUNT_DATA_FLAGS &
@@ -3040,6 +3042,12 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
if (nodefs && !test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS))
SEQ_OPTS_PUTS("prefetch_block_bitmaps");
+ if (ext4_emergency_ro(sb))
+ SEQ_OPTS_PUTS("emergency_ro");
+
+ if (ext4_forced_shutdown(sb))
+ SEQ_OPTS_PUTS("shutdown");
+
ext4_show_quota_options(seq, sb);
return 0;
}
@@ -3207,19 +3215,19 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
__le32 le_group = cpu_to_le32(block_group);
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (ext4_has_metadata_csum(sbi->s_sb)) {
+ if (ext4_has_feature_metadata_csum(sbi->s_sb)) {
/* Use new metadata_csum algorithm */
__u32 csum32;
__u16 dummy_csum = 0;
- csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
+ csum32 = ext4_chksum(sbi->s_csum_seed, (__u8 *)&le_group,
sizeof(le_group));
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
+ csum32 = ext4_chksum(csum32, (__u8 *)gdp, offset);
+ csum32 = ext4_chksum(csum32, (__u8 *)&dummy_csum,
sizeof(dummy_csum));
offset += sizeof(dummy_csum);
if (offset < sbi->s_desc_size)
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
+ csum32 = ext4_chksum(csum32, (__u8 *)gdp + offset,
sbi->s_desc_size - offset);
crc = csum32 & 0xFFFF;
@@ -3635,7 +3643,7 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly)
*/
static void print_daily_error_info(struct timer_list *t)
{
- struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
+ struct ext4_sb_info *sbi = timer_container_of(sbi, t, s_err_report);
struct super_block *sb = sbi->s_sb;
struct ext4_super_block *es = sbi->s_es;
@@ -3695,7 +3703,8 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
if (group >= elr->lr_next_group) {
ret = 1;
if (elr->lr_first_not_zeroed != ngroups &&
- !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
+ !ext4_emergency_state(sb) && !sb_rdonly(sb) &&
+ test_opt(sb, INIT_INODE_TABLE)) {
elr->lr_next_group = elr->lr_first_not_zeroed;
elr->lr_mode = EXT4_LI_MODE_ITABLE;
ret = 0;
@@ -4000,7 +4009,7 @@ int ext4_register_li_request(struct super_block *sb,
goto out;
}
- if (sb_rdonly(sb) ||
+ if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
(test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) &&
(first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE))))
goto out;
@@ -4063,7 +4072,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
int compat, incompat;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (ext4_has_metadata_csum(sb)) {
+ if (ext4_has_feature_metadata_csum(sb)) {
/* journal checksum v3 */
compat = 0;
incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
@@ -4182,7 +4191,7 @@ int ext4_calculate_overhead(struct super_block *sb)
unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
ext4_fsblk_t overhead = 0;
- char *buf = (char *) get_zeroed_page(GFP_NOFS);
+ char *buf = kvmalloc(sb->s_blocksize, GFP_NOFS | __GFP_ZERO);
if (!buf)
return -ENOMEM;
@@ -4207,7 +4216,7 @@ int ext4_calculate_overhead(struct super_block *sb)
blks = count_overhead(sb, i, buf);
overhead += blks;
if (blks)
- memset(buf, 0, PAGE_SIZE);
+ memset(buf, 0, sb->s_blocksize);
cond_resched();
}
@@ -4230,7 +4239,7 @@ int ext4_calculate_overhead(struct super_block *sb)
}
sbi->s_overhead = overhead;
smp_wmb();
- free_page((unsigned long) buf);
+ kvfree(buf);
return 0;
}
@@ -4351,7 +4360,7 @@ static void ext4_set_def_opts(struct super_block *sb,
if (ext4_has_feature_fast_commit(sb))
set_opt2(sb, JOURNAL_FAST_COMMIT);
/* don't forget to enable journal_csum when metadata_csum is enabled. */
- if (ext4_has_metadata_csum(sb))
+ if (ext4_has_feature_metadata_csum(sb))
set_opt(sb, JOURNAL_CHECKSUM);
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
@@ -4383,8 +4392,7 @@ static void ext4_set_def_opts(struct super_block *sb,
((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
set_opt(sb, DELALLOC);
- if (sb->s_blocksize <= PAGE_SIZE)
- set_opt(sb, DIOREAD_NOLOCK);
+ set_opt(sb, DIOREAD_NOLOCK);
}
static int ext4_handle_clustersize(struct super_block *sb)
@@ -4443,13 +4451,16 @@ static int ext4_handle_clustersize(struct super_block *sb)
/*
* ext4_atomic_write_init: Initializes filesystem min & max atomic write units.
+ * With non-bigalloc filesystem awu will be based upon filesystem blocksize
+ * & bdev awu units.
+ * With bigalloc it will be based upon bigalloc cluster size & bdev awu units.
* @sb: super block
- * TODO: Later add support for bigalloc
*/
static void ext4_atomic_write_init(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct block_device *bdev = sb->s_bdev;
+ unsigned int clustersize = EXT4_CLUSTER_SIZE(sb);
if (!bdev_can_atomic_write(bdev))
return;
@@ -4459,7 +4470,7 @@ static void ext4_atomic_write_init(struct super_block *sb)
sbi->s_awu_min = max(sb->s_blocksize,
bdev_atomic_write_unit_min_bytes(bdev));
- sbi->s_awu_max = min(sb->s_blocksize,
+ sbi->s_awu_max = min(clustersize,
bdev_atomic_write_unit_max_bytes(bdev));
if (sbi->s_awu_min && sbi->s_awu_max &&
sbi->s_awu_min <= sbi->s_awu_max) {
@@ -4484,7 +4495,7 @@ static void ext4_fast_commit_init(struct super_block *sb)
sbi->s_fc_bytes = 0;
ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
sbi->s_fc_ineligible_tid = 0;
- spin_lock_init(&sbi->s_fc_lock);
+ mutex_init(&sbi->s_fc_lock);
memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
sbi->s_fc_replay_state.fc_regions = NULL;
sbi->s_fc_replay_state.fc_regions_size = 0;
@@ -4634,15 +4645,6 @@ static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_blo
ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE,
ext4_orphan_file_block_trigger);
- /* Load the checksum driver */
- sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(sbi->s_chksum_driver)) {
- int ret = PTR_ERR(sbi->s_chksum_driver);
- ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
- sbi->s_chksum_driver = NULL;
- return ret;
- }
-
/* Check superblock checksum */
if (!ext4_superblock_csum_verify(sb, es)) {
ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
@@ -4653,8 +4655,9 @@ static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_blo
/* Precompute checksum seed for all metadata */
if (ext4_has_feature_csum_seed(sb))
sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
- else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
- sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
+ else if (ext4_has_feature_metadata_csum(sb) ||
+ ext4_has_feature_ea_inode(sb))
+ sbi->s_csum_seed = ext4_chksum(~0, es->s_uuid,
sizeof(es->s_uuid));
return 0;
}
@@ -4984,10 +4987,7 @@ static int ext4_load_and_init_journal(struct super_block *sb,
return 0;
out:
- /* flush s_sb_upd_work before destroying the journal. */
- flush_work(&sbi->s_sb_upd_work);
- jbd2_journal_destroy(sbi->s_journal);
- sbi->s_journal = NULL;
+ ext4_journal_destroy(sbi, sbi->s_journal);
return -EINVAL;
}
@@ -5024,6 +5024,59 @@ static int ext4_check_journal_data_mode(struct super_block *sb)
return 0;
}
+static const char *ext4_has_journal_option(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
+ return "journal_async_commit";
+ if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM))
+ return "journal_checksum";
+ if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
+ return "commit=";
+ if (EXT4_MOUNT_DATA_FLAGS &
+ (sbi->s_mount_opt ^ sbi->s_def_mount_opt))
+ return "data=";
+ if (test_opt(sb, DATA_ERR_ABORT))
+ return "data_err=abort";
+ return NULL;
+}
+
+/*
+ * Limit the maximum folio order to 2048 blocks to prevent overestimation
+ * of reserve handle credits during the folio writeback in environments
+ * where the PAGE_SIZE exceeds 4KB.
+ */
+#define EXT4_MAX_PAGECACHE_ORDER(sb) \
+ umin(MAX_PAGECACHE_ORDER, (11 + (sb)->s_blocksize_bits - PAGE_SHIFT))
+static void ext4_set_max_mapping_order(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+ sbi->s_max_folio_order = sbi->s_min_folio_order;
+ else
+ sbi->s_max_folio_order = EXT4_MAX_PAGECACHE_ORDER(sb);
+}
+
+static int ext4_check_large_folio(struct super_block *sb)
+{
+ const char *err_str = NULL;
+
+ if (ext4_has_feature_encrypt(sb))
+ err_str = "encrypt";
+
+ if (!err_str) {
+ ext4_set_max_mapping_order(sb);
+ } else if (sb->s_blocksize > PAGE_SIZE) {
+ ext4_msg(sb, KERN_ERR, "bs(%lu) > ps(%lu) unsupported for %s",
+ sb->s_blocksize, PAGE_SIZE, err_str);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb,
int silent)
{
@@ -5091,11 +5144,8 @@ static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb,
* If the default block size is not the same as the real block size,
* we need to reload it.
*/
- if (sb->s_blocksize == blocksize) {
- *lsb = logical_sb_block;
- sbi->s_sbh = bh;
- return 0;
- }
+ if (sb->s_blocksize == blocksize)
+ goto success;
/*
* bh must be released before kill_bdev(), otherwise
@@ -5126,6 +5176,9 @@ static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb,
ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!");
goto out;
}
+
+success:
+ sbi->s_min_folio_order = get_order(blocksize);
*lsb = logical_sb_block;
sbi->s_sbh = bh;
return 0;
@@ -5250,7 +5303,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
/* Set defaults for the variables that will be set during parsing */
if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO))
- ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
+ ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
sbi->s_sectors_written_start =
@@ -5269,11 +5322,13 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
ext4_set_def_opts(sb, es);
- sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
- sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
+ sbi->s_resuid = make_kuid(&init_user_ns, ext4_get_resuid(es));
+ sbi->s_resgid = make_kgid(&init_user_ns, ext4_get_resuid(es));
sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
+ sbi->s_sb_update_kb = EXT4_DEF_SB_UPDATE_INTERVAL_KB;
+ sbi->s_sb_update_sec = EXT4_DEF_SB_UPDATE_INTERVAL_SEC;
/*
* set default s_li_wait_mult for lazyinit, for the case there is
@@ -5298,6 +5353,10 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
ext4_apply_options(fc, sb);
+ err = ext4_check_large_folio(sb);
+ if (err < 0)
+ goto failed_mount;
+
err = ext4_encoding_init(sb, es);
if (err)
goto failed_mount;
@@ -5309,8 +5368,8 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
- /* i_version is always enabled now */
- sb->s_flags |= SB_I_VERSION;
+ /* HSM events are allowed by default. */
+ sb->s_iflags |= SB_I_ALLOW_HSM;
err = ext4_check_feature_compatibility(sb, es, silent);
if (err)
@@ -5406,36 +5465,25 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
err = ext4_load_and_init_journal(sb, es, ctx);
if (err)
goto failed_mount3a;
+ if (bdev_read_only(sb->s_bdev))
+ needs_recovery = 0;
} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
ext4_has_feature_journal_needs_recovery(sb)) {
ext4_msg(sb, KERN_ERR, "required journal recovery "
"suppressed and not mounted read-only");
goto failed_mount3a;
} else {
+ const char *journal_option;
+
/* Nojournal mode, all journal mount options are illegal */
- if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "journal_async_commit, fs mounted w/o journal");
+ journal_option = ext4_has_journal_option(sb);
+ if (journal_option != NULL) {
+ ext4_msg(sb, KERN_ERR,
+ "can't mount with %s, fs mounted w/o journal",
+ journal_option);
goto failed_mount3a;
}
- if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "journal_checksum, fs mounted w/o journal");
- goto failed_mount3a;
- }
- if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "commit=%lu, fs mounted w/o journal",
- sbi->s_commit_interval / HZ);
- goto failed_mount3a;
- }
- if (EXT4_MOUNT_DATA_FLAGS &
- (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "data=, fs mounted w/o journal");
- goto failed_mount3a;
- }
sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
clear_opt(sb, JOURNAL_CHECKSUM);
clear_opt(sb, DATA_FLAGS);
@@ -5624,9 +5672,11 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
goto failed_mount9;
}
- if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev))
+ if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) {
ext4_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but the device does not support discard");
+ clear_opt(sb, DISCARD);
+ }
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
@@ -5673,10 +5723,7 @@ failed_mount_wq:
sbi->s_ea_block_cache = NULL;
if (sbi->s_journal) {
- /* flush s_sb_upd_work before journal destroy. */
- flush_work(&sbi->s_sb_upd_work);
- jbd2_journal_destroy(sbi->s_journal);
- sbi->s_journal = NULL;
+ ext4_journal_destroy(sbi, sbi->s_journal);
}
failed_mount3a:
ext4_es_unregister_shrinker(sbi);
@@ -5684,12 +5731,9 @@ failed_mount3:
/* flush s_sb_upd_work before sbi destroy */
flush_work(&sbi->s_sb_upd_work);
ext4_stop_mmpd(sbi);
- del_timer_sync(&sbi->s_err_report);
+ timer_delete_sync(&sbi->s_err_report);
ext4_group_desc_free(sbi);
failed_mount:
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
-
#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
@@ -5784,10 +5828,6 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
journal->j_flags |= JBD2_BARRIER;
else
journal->j_flags &= ~JBD2_BARRIER;
- if (test_opt(sb, DATA_ERR_ABORT))
- journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
- else
- journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
/*
* Always enable journal cycle record option, letting the journal
* records log transactions continuously between each mount.
@@ -5843,7 +5883,7 @@ static int ext4_journal_bmap(journal_t *journal, sector_t *block)
ext4_msg(journal->j_inode->i_sb, KERN_CRIT,
"journal bmap failed: block %llu ret %d\n",
*block, ret);
- jbd2_journal_abort(journal, ret ? ret : -EIO);
+ jbd2_journal_abort(journal, ret ? ret : -EFSCORRUPTED);
return ret;
}
*block = map.m_pblk;
@@ -5927,7 +5967,7 @@ static struct file *ext4_get_journal_blkdev(struct super_block *sb,
if ((le32_to_cpu(es->s_feature_ro_compat) &
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
- es->s_checksum != ext4_superblock_csum(sb, es)) {
+ es->s_checksum != ext4_superblock_csum(es)) {
ext4_msg(sb, KERN_ERR, "external journal has corrupt superblock");
errno = -EFSCORRUPTED;
goto out_bh;
@@ -5984,7 +6024,7 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
return journal;
out_journal:
- jbd2_journal_destroy(journal);
+ ext4_journal_destroy(EXT4_SB(sb), journal);
out_bdev:
bdev_fput(bdev_file);
return ERR_PTR(errno);
@@ -6101,8 +6141,7 @@ static int ext4_load_journal(struct super_block *sb,
EXT4_SB(sb)->s_journal = journal;
err = ext4_clear_journal_err(sb, es);
if (err) {
- EXT4_SB(sb)->s_journal = NULL;
- jbd2_journal_destroy(journal);
+ ext4_journal_destroy(EXT4_SB(sb), journal);
return err;
}
@@ -6120,7 +6159,7 @@ static int ext4_load_journal(struct super_block *sb,
return 0;
err_out:
- jbd2_journal_destroy(journal);
+ ext4_journal_destroy(EXT4_SB(sb), journal);
return err;
}
@@ -6347,8 +6386,9 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
bool needs_barrier = false;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (unlikely(ext4_forced_shutdown(sb)))
- return -EIO;
+ ret = ext4_emergency_state(sb);
+ if (unlikely(ret))
+ return ret;
trace_ext4_sync_fs(sb, wait);
flush_workqueue(sbi->rsv_conversion_wq);
@@ -6430,7 +6470,7 @@ out:
*/
static int ext4_unfreeze(struct super_block *sb)
{
- if (ext4_forced_shutdown(sb))
+ if (ext4_emergency_state(sb))
return 0;
if (EXT4_SB(sb)->s_journal) {
@@ -6506,7 +6546,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
ctx->journal_ioprio =
sbi->s_journal->j_task->io_context->ioprio;
else
- ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
+ ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
}
@@ -6586,7 +6626,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
flush_work(&sbi->s_sb_upd_work);
if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
- if (ext4_forced_shutdown(sb)) {
+ if (ext4_emergency_state(sb)) {
err = -EROFS;
goto restore_opts;
}
@@ -6791,6 +6831,7 @@ static int ext4_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
int ret;
+ bool old_ro = sb_rdonly(sb);
fc->s_fs_info = EXT4_SB(sb);
@@ -6802,9 +6843,9 @@ static int ext4_reconfigure(struct fs_context *fc)
if (ret < 0)
return ret;
- ext4_msg(sb, KERN_INFO, "re-mounted %pU %s. Quota mode: %s.",
- &sb->s_uuid, sb_rdonly(sb) ? "ro" : "r/w",
- ext4_quota_mode(sb));
+ ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.",
+ &sb->s_uuid,
+ (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : "");
return 0;
}
@@ -6828,22 +6869,29 @@ static int ext4_statfs_project(struct super_block *sb,
dquot->dq_dqb.dqb_bhardlimit);
limit >>= sb->s_blocksize_bits;
- if (limit && buf->f_blocks > limit) {
+ if (limit) {
+ uint64_t remaining = 0;
+
curblock = (dquot->dq_dqb.dqb_curspace +
dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
- buf->f_blocks = limit;
- buf->f_bfree = buf->f_bavail =
- (buf->f_blocks > curblock) ?
- (buf->f_blocks - curblock) : 0;
+ if (limit > curblock)
+ remaining = limit - curblock;
+
+ buf->f_blocks = min(buf->f_blocks, limit);
+ buf->f_bfree = min(buf->f_bfree, remaining);
+ buf->f_bavail = min(buf->f_bavail, remaining);
}
limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
dquot->dq_dqb.dqb_ihardlimit);
- if (limit && buf->f_files > limit) {
- buf->f_files = limit;
- buf->f_ffree =
- (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
- (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
+ if (limit) {
+ uint64_t remaining = 0;
+
+ if (limit > dquot->dq_dqb.dqb_curinodes)
+ remaining = limit - dquot->dq_dqb.dqb_curinodes;
+
+ buf->f_files = min(buf->f_files, limit);
+ buf->f_ffree = min(buf->f_ffree, remaining);
}
spin_unlock(&dquot->dq_dqb_lock);
@@ -6946,12 +6994,25 @@ static int ext4_release_dquot(struct dquot *dquot)
{
int ret, err;
handle_t *handle;
+ bool freeze_protected = false;
+
+ /*
+ * Trying to sb_start_intwrite() in a running transaction
+ * can result in a deadlock. Further, running transactions
+ * are already protected from freezing.
+ */
+ if (!ext4_journal_current_handle()) {
+ sb_start_intwrite(dquot->dq_sb);
+ freeze_protected = true;
+ }
handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle)) {
/* Release dquot anyway to avoid endless cycle in dqput() */
dquot_release(dquot);
+ if (freeze_protected)
+ sb_end_intwrite(dquot->dq_sb);
return PTR_ERR(handle);
}
ret = dquot_release(dquot);
@@ -6962,6 +7023,10 @@ static int ext4_release_dquot(struct dquot *dquot)
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
+
+ if (freeze_protected)
+ sb_end_intwrite(dquot->dq_sb);
+
return ret;
}
@@ -7299,7 +7364,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, len);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
@@ -7388,16 +7453,14 @@ static struct file_system_type ext4_fs_type = {
.init_fs_context = ext4_init_fs_context,
.parameters = ext4_param_specs,
.kill_sb = ext4_kill_sb,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME |
+ FS_LBS,
};
MODULE_ALIAS_FS("ext4");
-/* Shared across all ext4 file systems */
-wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
-
static int __init ext4_init_fs(void)
{
- int i, err;
+ int err;
ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
ext4_li_info = NULL;
@@ -7405,9 +7468,6 @@ static int __init ext4_init_fs(void)
/* Build-time check for flags consistency */
ext4_check_flag_values();
- for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
- init_waitqueue_head(&ext4__ioend_wq[i]);
-
err = ext4_init_es();
if (err)
return err;
@@ -7494,6 +7554,5 @@ static void __exit ext4_exit_fs(void)
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
MODULE_DESCRIPTION("Fourth Extended Filesystem");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc32c");
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index ddb54608ca2e..0018e09b867e 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -254,6 +254,8 @@ EXT4_ATTR(journal_task, 0444, journal_task);
EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch);
EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit);
EXT4_RW_ATTR_SBI_UL(last_trim_minblks, s_last_trim_minblks);
+EXT4_RW_ATTR_SBI_UI(sb_update_sec, s_sb_update_sec);
+EXT4_RW_ATTR_SBI_UI(sb_update_kb, s_sb_update_kb);
static unsigned int old_bump_val = 128;
EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
@@ -305,6 +307,8 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(mb_prefetch),
ATTR_LIST(mb_prefetch_limit),
ATTR_LIST(last_trim_minblks),
+ ATTR_LIST(sb_update_sec),
+ ATTR_LIST(sb_update_kb),
NULL,
};
ATTRIBUTE_GROUPS(ext4);
@@ -328,6 +332,9 @@ EXT4_ATTR_FEATURE(fast_commit);
#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
EXT4_ATTR_FEATURE(encrypted_casefold);
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+EXT4_ATTR_FEATURE(blocksize_gt_pagesize);
+#endif
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
@@ -348,6 +355,9 @@ static struct attribute *ext4_feat_attrs[] = {
#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
ATTR_LIST(encrypted_casefold),
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ ATTR_LIST(blocksize_gt_pagesize),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(ext4_feat);
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index d9203228ce97..415d9c4d8a32 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -302,7 +302,7 @@ static int ext4_get_verity_descriptor_location(struct inode *inode,
end_lblk = le32_to_cpu(last_extent->ee_block) +
ext4_ext_get_actual_len(last_extent);
- desc_size_pos = (u64)end_lblk << inode->i_blkbits;
+ desc_size_pos = EXT4_LBLK_TO_B(inode, end_lblk);
ext4_free_ext_path(path);
if (desc_size_pos < sizeof(desc_size_disk))
@@ -389,6 +389,8 @@ static int ext4_write_merkle_tree_block(struct inode *inode, const void *buf,
}
const struct fsverity_operations ext4_verityops = {
+ .inode_info_offs = (int)offsetof(struct ext4_inode_info, i_verity_info) -
+ (int)offsetof(struct ext4_inode_info, vfs_inode),
.begin_enable_verity = ext4_begin_enable_verity,
.end_enable_verity = ext4_end_enable_verity,
.get_verity_descriptor = ext4_get_verity_descriptor,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 7647e9f6e190..2e02efbddaac 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -139,12 +139,12 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
__u32 dummy_csum = 0;
int offset = offsetof(struct ext4_xattr_header, h_checksum);
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
sizeof(dsk_block_nr));
- csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
- csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
+ csum = ext4_chksum(csum, (__u8 *)hdr, offset);
+ csum = ext4_chksum(csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
offset += sizeof(dummy_csum);
- csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
+ csum = ext4_chksum(csum, (__u8 *)hdr + offset,
EXT4_BLOCK_SIZE(inode->i_sb) - offset);
return cpu_to_le32(csum);
@@ -156,7 +156,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
struct ext4_xattr_header *hdr = BHDR(bh);
int ret = 1;
- if (ext4_has_metadata_csum(inode->i_sb)) {
+ if (ext4_has_feature_metadata_csum(inode->i_sb)) {
lock_buffer(bh);
ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
bh->b_blocknr, hdr));
@@ -168,7 +168,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
static void ext4_xattr_block_csum_set(struct inode *inode,
struct buffer_head *bh)
{
- if (ext4_has_metadata_csum(inode->i_sb))
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
bh->b_blocknr, BHDR(bh));
}
@@ -251,6 +251,10 @@ check_xattrs(struct inode *inode, struct buffer_head *bh,
err_str = "invalid ea_ino";
goto errout;
}
+ if (ea_ino && !size) {
+ err_str = "invalid size in ea xattr";
+ goto errout;
+ }
if (size > EXT4_XATTR_SIZE_MAX) {
err_str = "e_value size too large";
goto errout;
@@ -308,7 +312,7 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
__ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
-static inline int
+int
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
void *end, const char *function, unsigned int line)
{
@@ -316,9 +320,6 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
function, line);
}
-#define xattr_check_inode(inode, header, end) \
- __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
-
static int
xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
void *end, int name_index, const char *name, int sorted)
@@ -341,7 +342,7 @@ xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
cmp = name_len - entry->e_name_len;
if (!cmp)
cmp = memcmp(name, entry->e_name, name_len);
- if (cmp <= 0 && (sorted || cmp == 0))
+ if (!cmp || (cmp < 0 && sorted))
break;
}
*pentry = entry;
@@ -351,7 +352,7 @@ xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
static u32
ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
{
- return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
+ return ext4_chksum(sbi->s_csum_seed, buffer, size);
}
static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
@@ -649,10 +650,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
return error;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
- end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- error = xattr_check_inode(inode, header, end);
- if (error)
- goto cleanup;
+ end = ITAIL(inode, raw_inode);
entry = IFIRST(header);
error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
if (error)
@@ -783,7 +781,6 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
struct ext4_xattr_ibody_header *header;
struct ext4_inode *raw_inode;
struct ext4_iloc iloc;
- void *end;
int error;
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
@@ -793,14 +790,9 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
return error;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
- end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- error = xattr_check_inode(inode, header, end);
- if (error)
- goto cleanup;
error = ext4_xattr_list_entries(dentry, IFIRST(header),
buffer, buffer_size);
-cleanup:
brelse(iloc.bh);
return error;
}
@@ -868,7 +860,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_entry *entry;
qsize_t ea_inode_refs = 0;
- void *end;
int ret;
lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
@@ -879,10 +870,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
goto out;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
- end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- ret = xattr_check_inode(inode, header, end);
- if (ret)
- goto out;
for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
entry = EXT4_XATTR_NEXT(entry))
@@ -979,7 +966,7 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
* so we need to reserve credits for this eventuality
*/
if (inode && ext4_has_inline_data(inode))
- credits += ext4_writepage_trans_blocks(inode) + 1;
+ credits += ext4_chunk_trans_extent(inode, 1) + 1;
/* We are done if ea_inode feature is not enabled. */
if (!ext4_has_feature_ea_inode(sb))
@@ -1036,7 +1023,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
int ref_change)
{
struct ext4_iloc iloc;
- s64 ref_count;
+ u64 ref_count;
int ret;
inode_lock_nested(ea_inode, I_MUTEX_XATTR);
@@ -1046,13 +1033,17 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
goto out;
ref_count = ext4_xattr_inode_get_ref(ea_inode);
+ if ((ref_count == 0 && ref_change < 0) || (ref_count == U64_MAX && ref_change > 0)) {
+ ext4_error_inode(ea_inode, __func__, __LINE__, 0,
+ "EA inode %lu ref wraparound: ref_count=%lld ref_change=%d",
+ ea_inode->i_ino, ref_count, ref_change);
+ ret = -EFSCORRUPTED;
+ goto out;
+ }
ref_count += ref_change;
ext4_xattr_inode_set_ref(ea_inode, ref_count);
if (ref_change > 0) {
- WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
- ea_inode->i_ino, ref_count);
-
if (ref_count == 1) {
WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
ea_inode->i_ino, ea_inode->i_nlink);
@@ -1061,9 +1052,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
ext4_orphan_del(handle, ea_inode);
}
} else {
- WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
- ea_inode->i_ino, ref_count);
-
if (ref_count == 0) {
WARN_ONCE(ea_inode->i_nlink != 1,
"EA inode %lu i_nlink=%u",
@@ -1176,15 +1164,28 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
{
struct inode *ea_inode;
struct ext4_xattr_entry *entry;
+ struct ext4_iloc iloc;
bool dirty = false;
unsigned int ea_ino;
int err;
int credits;
+ void *end;
+
+ if (block_csum)
+ end = (void *)bh->b_data + bh->b_size;
+ else {
+ err = ext4_get_inode_loc(parent, &iloc);
+ if (err) {
+ EXT4_ERROR_INODE(parent, "parent inode loc (error %d)", err);
+ return;
+ }
+ end = (void *)ext4_raw_inode(&iloc) + EXT4_SB(parent->i_sb)->s_inode_size;
+ }
/* One credit for dec ref on ea_inode, one for orphan list addition, */
credits = 2 + extra_credits;
- for (entry = first; !IS_LAST_ENTRY(entry);
+ for (entry = first; (void *)entry < end && !IS_LAST_ENTRY(entry);
entry = EXT4_XATTR_NEXT(entry)) {
if (!entry->e_value_inum)
continue;
@@ -1538,7 +1539,7 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
!(current->flags & PF_MEMALLOC_NOFS));
- ea_data = kvmalloc(value_len, GFP_KERNEL);
+ ea_data = kvmalloc(value_len, GFP_NOFS);
if (!ea_data) {
mb_cache_entry_put(ea_inode_cache, ce);
return NULL;
@@ -2235,11 +2236,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
header = IHDR(inode, raw_inode);
is->s.base = is->s.first = IFIRST(header);
is->s.here = is->s.first;
- is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ is->s.end = ITAIL(inode, raw_inode);
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
- error = xattr_check_inode(inode, header, is->s.end);
- if (error)
- return error;
/* Find the named attribute. */
error = xattr_find_entry(inode, &is->s.here, is->s.end,
i->name_index, i->name, 0);
@@ -2786,14 +2784,10 @@ retry:
*/
base = IFIRST(header);
- end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ end = ITAIL(inode, raw_inode);
min_offs = end - base;
total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
- error = xattr_check_inode(inode, header, end);
- if (error)
- goto cleanup;
-
ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
if (ifree >= isize_diff)
goto shift;
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index b25c2d7b5f99..1fedf44d4fb6 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -67,6 +67,9 @@ struct ext4_xattr_entry {
((void *)raw_inode + \
EXT4_GOOD_OLD_INODE_SIZE + \
EXT4_I(inode)->i_extra_isize))
+#define ITAIL(inode, raw_inode) \
+ ((void *)(raw_inode) + \
+ EXT4_SB((inode)->i_sb)->s_inode_size)
#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
/*
@@ -206,6 +209,13 @@ extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
extern struct mb_cache *ext4_xattr_create_cache(void);
extern void ext4_xattr_destroy_cache(struct mb_cache *);
+extern int
+__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
+ void *end, const char *function, unsigned int line);
+
+#define xattr_check_inode(inode, header, end) \
+ __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
+
#ifdef CONFIG_EXT4_FS_SECURITY
extern int ext4_init_security(handle_t *handle, struct inode *inode,
struct inode *dir, const struct qstr *qstr);
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 68a1e23e1557..5916a02fb46d 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -4,8 +4,7 @@ config F2FS_FS
depends on BLOCK
select BUFFER_HEAD
select NLS
- select CRYPTO
- select CRYPTO_CRC32
+ select CRC32
select F2FS_FS_XATTR if FS_ENCRYPTION
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
select FS_IOMAP
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 1fbc0607363b..fa8d81a30fb9 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -9,6 +9,7 @@
*
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
+#include <linux/fs_struct.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "xattr.h"
@@ -166,7 +167,7 @@ fail:
}
static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
- struct page *dpage)
+ struct folio *dfolio)
{
int name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT;
void *value = NULL;
@@ -176,13 +177,13 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
if (type == ACL_TYPE_ACCESS)
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
+ retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dfolio);
if (retval > 0) {
value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
if (!value)
return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value,
- retval, dpage);
+ retval, dfolio);
}
if (retval > 0)
@@ -227,7 +228,7 @@ static int f2fs_acl_update_mode(struct mnt_idmap *idmap,
static int __f2fs_set_acl(struct mnt_idmap *idmap,
struct inode *inode, int type,
- struct posix_acl *acl, struct page *ipage)
+ struct posix_acl *acl, struct folio *ifolio)
{
int name_index;
void *value = NULL;
@@ -238,9 +239,8 @@ static int __f2fs_set_acl(struct mnt_idmap *idmap,
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl && !ipage) {
- error = f2fs_acl_update_mode(idmap, inode,
- &mode, &acl);
+ if (acl && !ifolio) {
+ error = f2fs_acl_update_mode(idmap, inode, &mode, &acl);
if (error)
return error;
set_acl_inode(inode, mode);
@@ -265,7 +265,7 @@ static int __f2fs_set_acl(struct mnt_idmap *idmap,
}
}
- error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0);
+ error = f2fs_setxattr(inode, name_index, "", value, size, ifolio, 0);
kfree(value);
if (!error)
@@ -360,7 +360,7 @@ static int f2fs_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
static int f2fs_acl_create(struct inode *dir, umode_t *mode,
struct posix_acl **default_acl, struct posix_acl **acl,
- struct page *dpage)
+ struct folio *dfolio)
{
struct posix_acl *p;
struct posix_acl *clone;
@@ -372,7 +372,7 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
if (S_ISLNK(*mode) || !IS_POSIXACL(dir))
return 0;
- p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dpage);
+ p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dfolio);
if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
*mode &= ~current_umask();
return 0;
@@ -409,29 +409,29 @@ release_acl:
return ret;
}
-int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
- struct page *dpage)
+int f2fs_init_acl(struct inode *inode, struct inode *dir, struct folio *ifolio,
+ struct folio *dfolio)
{
struct posix_acl *default_acl = NULL, *acl = NULL;
int error;
- error = f2fs_acl_create(dir, &inode->i_mode, &default_acl, &acl, dpage);
+ error = f2fs_acl_create(dir, &inode->i_mode, &default_acl, &acl, dfolio);
if (error)
return error;
f2fs_mark_inode_dirty_sync(inode, true);
if (default_acl) {
- error = __f2fs_set_acl(NULL, inode, ACL_TYPE_DEFAULT, default_acl,
- ipage);
+ error = __f2fs_set_acl(NULL, inode, ACL_TYPE_DEFAULT,
+ default_acl, ifolio);
posix_acl_release(default_acl);
} else {
inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
- error = __f2fs_set_acl(NULL, inode, ACL_TYPE_ACCESS, acl,
- ipage);
+ error = __f2fs_set_acl(NULL, inode, ACL_TYPE_ACCESS,
+ acl, ifolio);
posix_acl_release(acl);
} else {
inode->i_acl = NULL;
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 94ebfbfbdc6f..20e87e63c089 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -33,17 +33,17 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL
-extern struct posix_acl *f2fs_get_acl(struct inode *, int, bool);
-extern int f2fs_set_acl(struct mnt_idmap *, struct dentry *,
+struct posix_acl *f2fs_get_acl(struct inode *, int, bool);
+int f2fs_set_acl(struct mnt_idmap *, struct dentry *,
struct posix_acl *, int);
-extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
- struct page *);
+int f2fs_init_acl(struct inode *, struct inode *, struct folio *ifolio,
+ struct folio *dfolio);
#else
#define f2fs_get_acl NULL
#define f2fs_set_acl NULL
static inline int f2fs_init_acl(struct inode *inode, struct inode *dir,
- struct page *ipage, struct page *dpage)
+ struct folio *ifolio, struct folio *dfolio)
{
return 0;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index efda9a022981..300664269eb6 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -21,7 +21,7 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
-#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 3))
static struct kmem_cache *ino_entry_slab;
struct kmem_cache *f2fs_inode_entry_slab;
@@ -29,7 +29,7 @@ struct kmem_cache *f2fs_inode_entry_slab;
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
unsigned char reason)
{
- f2fs_build_fault_attr(sbi, 0, 0);
+ f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL);
if (!end_io)
f2fs_flush_merged_writes(sbi);
f2fs_handle_critical_error(sbi, reason);
@@ -38,27 +38,27 @@ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
/*
* We guarantee no failure on the returned page.
*/
-struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index)
{
struct address_space *mapping = META_MAPPING(sbi);
- struct page *page;
+ struct folio *folio;
repeat:
- page = f2fs_grab_cache_page(mapping, index, false);
- if (!page) {
+ folio = f2fs_grab_cache_folio(mapping, index, false);
+ if (IS_ERR(folio)) {
cond_resched();
goto repeat;
}
- f2fs_wait_on_page_writeback(page, META, true, true);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- return page;
+ f2fs_folio_wait_writeback(folio, META, true, true);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ return folio;
}
-static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
+static struct folio *__get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index,
bool is_meta)
{
struct address_space *mapping = META_MAPPING(sbi);
- struct page *page;
+ struct folio *folio;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
@@ -74,64 +74,64 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
if (unlikely(!is_meta))
fio.op_flags &= ~REQ_META;
repeat:
- page = f2fs_grab_cache_page(mapping, index, false);
- if (!page) {
+ folio = f2fs_grab_cache_folio(mapping, index, false);
+ if (IS_ERR(folio)) {
cond_resched();
goto repeat;
}
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto out;
- fio.page = page;
+ fio.folio = folio;
err = f2fs_submit_page_bio(&fio);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE);
- lock_page(page);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
+ folio_lock(folio);
+ if (unlikely(!is_meta_folio(folio))) {
+ f2fs_folio_put(folio, true);
goto repeat;
}
- if (unlikely(!PageUptodate(page))) {
- f2fs_handle_page_eio(sbi, page_folio(page), META);
- f2fs_put_page(page, 1);
+ if (unlikely(!folio_test_uptodate(folio))) {
+ f2fs_handle_page_eio(sbi, folio, META);
+ f2fs_folio_put(folio, true);
return ERR_PTR(-EIO);
}
out:
- return page;
+ return folio;
}
-struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index)
{
- return __get_meta_page(sbi, index, true);
+ return __get_meta_folio(sbi, index, true);
}
-struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index)
+struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index)
{
- struct page *page;
+ struct folio *folio;
int count = 0;
retry:
- page = __get_meta_page(sbi, index, true);
- if (IS_ERR(page)) {
- if (PTR_ERR(page) == -EIO &&
+ folio = __get_meta_folio(sbi, index, true);
+ if (IS_ERR(folio)) {
+ if (PTR_ERR(folio) == -EIO &&
++count <= DEFAULT_RETRY_IO_COUNT)
goto retry;
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE);
}
- return page;
+ return folio;
}
/* for POR only */
-struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index)
{
- return __get_meta_page(sbi, index, false);
+ return __get_meta_folio(sbi, index, false);
}
static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
@@ -252,7 +252,6 @@ bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync)
{
- struct page *page;
block_t blkno = start;
struct f2fs_io_info fio = {
.sbi = sbi,
@@ -271,6 +270,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
+ struct folio *folio;
if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
goto out;
@@ -300,18 +300,18 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
BUG();
}
- page = f2fs_grab_cache_page(META_MAPPING(sbi),
+ folio = f2fs_grab_cache_folio(META_MAPPING(sbi),
fio.new_blkaddr, false);
- if (!page)
+ if (IS_ERR(folio))
continue;
- if (PageUptodate(page)) {
- f2fs_put_page(page, 1);
+ if (folio_test_uptodate(folio)) {
+ f2fs_folio_put(folio, true);
continue;
}
- fio.page = page;
+ fio.folio = folio;
err = f2fs_submit_page_bio(&fio);
- f2fs_put_page(page, err ? 1 : 0);
+ f2fs_folio_put(folio, err ? true : false);
if (!err)
f2fs_update_iostat(sbi, NULL, FS_META_READ_IO,
@@ -325,27 +325,26 @@ out:
void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
unsigned int ra_blocks)
{
- struct page *page;
+ struct folio *folio;
bool readahead = false;
if (ra_blocks == RECOVERY_MIN_RA_BLOCKS)
return;
- page = find_get_page(META_MAPPING(sbi), index);
- if (!page || !PageUptodate(page))
+ folio = filemap_get_folio(META_MAPPING(sbi), index);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio))
readahead = true;
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
if (readahead)
f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true);
}
-static int __f2fs_write_meta_page(struct page *page,
+static bool __f2fs_write_meta_folio(struct folio *folio,
struct writeback_control *wbc,
enum iostat_type io_type)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
- struct folio *folio = page_folio(page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
trace_f2fs_writepage(folio, META);
@@ -354,37 +353,26 @@ static int __f2fs_write_meta_page(struct page *page,
folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_META);
folio_unlock(folio);
- return 0;
+ return true;
}
goto redirty_out;
}
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (wbc->for_reclaim && folio->index < GET_SUM_BLOCK(sbi, 0))
- goto redirty_out;
f2fs_do_write_meta_page(sbi, folio, io_type);
dec_page_count(sbi, F2FS_DIRTY_META);
- if (wbc->for_reclaim)
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
-
folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_write(sbi, META);
- return 0;
+ return true;
redirty_out:
- redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
-}
-
-static int f2fs_write_meta_page(struct page *page,
- struct writeback_control *wbc)
-{
- return __f2fs_write_meta_page(page, wbc, FS_META_IO);
+ folio_redirty_for_writepage(wbc, folio);
+ return false;
}
static int f2fs_write_meta_pages(struct address_space *mapping,
@@ -427,9 +415,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
struct folio_batch fbatch;
long nwritten = 0;
int nr_folios;
- struct writeback_control wbc = {
- .for_reclaim = 0,
- };
+ struct writeback_control wbc = {};
struct blk_plug plug;
folio_batch_init(&fbatch);
@@ -453,7 +439,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
folio_lock(folio);
- if (unlikely(folio->mapping != mapping)) {
+ if (unlikely(!is_meta_folio(folio))) {
continue_unlock:
folio_unlock(folio);
continue;
@@ -463,13 +449,12 @@ continue_unlock:
goto continue_unlock;
}
- f2fs_wait_on_page_writeback(&folio->page, META,
- true, true);
+ f2fs_folio_wait_writeback(folio, META, true, true);
if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- if (__f2fs_write_meta_page(&folio->page, &wbc,
+ if (!__f2fs_write_meta_folio(folio, &wbc,
io_type)) {
folio_unlock(folio);
break;
@@ -500,14 +485,13 @@ static bool f2fs_dirty_meta_folio(struct address_space *mapping,
folio_mark_uptodate(folio);
if (filemap_dirty_folio(mapping, folio)) {
inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
- set_page_private_reference(&folio->page);
+ folio_set_f2fs_reference(folio);
return true;
}
return false;
}
const struct address_space_operations f2fs_meta_aops = {
- .writepage = f2fs_write_meta_page,
.writepages = f2fs_write_meta_pages,
.dirty_folio = f2fs_dirty_meta_folio,
.invalidate_folio = f2fs_invalidate_folio,
@@ -520,6 +504,7 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
{
struct inode_management *im = &sbi->im[type];
struct ino_entry *e = NULL, *new = NULL;
+ int ret;
if (type == FLUSH_INO) {
rcu_read_lock();
@@ -532,7 +517,8 @@ retry:
new = f2fs_kmem_cache_alloc(ino_entry_slab,
GFP_NOFS, true, NULL);
- radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ ret = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ f2fs_bug_on(sbi, ret);
spin_lock(&im->ino_lock);
e = radix_tree_lookup(&im->ino_root, ino);
@@ -757,26 +743,26 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
for (i = 0; i < orphan_blocks; i++) {
- struct page *page;
+ struct folio *folio;
struct f2fs_orphan_block *orphan_blk;
- page = f2fs_get_meta_page(sbi, start_blk + i);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_get_meta_folio(sbi, start_blk + i);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto out;
}
- orphan_blk = (struct f2fs_orphan_block *)page_address(page);
+ orphan_blk = folio_address(folio);
for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
err = recover_orphan_inode(sbi, ino);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
goto out;
}
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
/* clear Orphan Flag */
clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
@@ -793,7 +779,7 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
unsigned int nentries = 0;
unsigned short index = 1;
unsigned short orphan_blocks;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct ino_entry *orphan = NULL;
struct inode_management *im = &sbi->im[ORPHAN_INO];
@@ -808,10 +794,9 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
/* loop for each orphan inode entry and write them in journal block */
list_for_each_entry(orphan, head, list) {
- if (!page) {
- page = f2fs_grab_meta_page(sbi, start_blk++);
- orphan_blk =
- (struct f2fs_orphan_block *)page_address(page);
+ if (!folio) {
+ folio = f2fs_grab_meta_folio(sbi, start_blk++);
+ orphan_blk = folio_address(folio);
memset(orphan_blk, 0, sizeof(*orphan_blk));
}
@@ -826,62 +811,61 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
orphan_blk->blk_addr = cpu_to_le16(index);
orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
orphan_blk->entry_count = cpu_to_le32(nentries);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
index++;
nentries = 0;
- page = NULL;
+ folio = NULL;
}
}
- if (page) {
+ if (folio) {
orphan_blk->blk_addr = cpu_to_le16(index);
orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
orphan_blk->entry_count = cpu_to_le32(nentries);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
}
-static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
- struct f2fs_checkpoint *ckpt)
+static __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *ckpt)
{
unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
__u32 chksum;
- chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
+ chksum = f2fs_crc32(ckpt, chksum_ofs);
if (chksum_ofs < CP_CHKSUM_OFFSET) {
chksum_ofs += sizeof(chksum);
- chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
- F2FS_BLKSIZE - chksum_ofs);
+ chksum = f2fs_chksum(chksum, (__u8 *)ckpt + chksum_ofs,
+ F2FS_BLKSIZE - chksum_ofs);
}
return chksum;
}
static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
- struct f2fs_checkpoint **cp_block, struct page **cp_page,
+ struct f2fs_checkpoint **cp_block, struct folio **cp_folio,
unsigned long long *version)
{
size_t crc_offset = 0;
__u32 crc;
- *cp_page = f2fs_get_meta_page(sbi, cp_addr);
- if (IS_ERR(*cp_page))
- return PTR_ERR(*cp_page);
+ *cp_folio = f2fs_get_meta_folio(sbi, cp_addr);
+ if (IS_ERR(*cp_folio))
+ return PTR_ERR(*cp_folio);
- *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
+ *cp_block = folio_address(*cp_folio);
crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
crc_offset > CP_CHKSUM_OFFSET) {
- f2fs_put_page(*cp_page, 1);
+ f2fs_folio_put(*cp_folio, true);
f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
return -EINVAL;
}
- crc = f2fs_checkpoint_chksum(sbi, *cp_block);
+ crc = f2fs_checkpoint_chksum(*cp_block);
if (crc != cur_cp_crc(*cp_block)) {
- f2fs_put_page(*cp_page, 1);
+ f2fs_folio_put(*cp_folio, true);
f2fs_warn(sbi, "invalid crc value");
return -EINVAL;
}
@@ -890,17 +874,17 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
return 0;
}
-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+static struct folio *validate_checkpoint(struct f2fs_sb_info *sbi,
block_t cp_addr, unsigned long long *version)
{
- struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
+ struct folio *cp_folio_1 = NULL, *cp_folio_2 = NULL;
struct f2fs_checkpoint *cp_block = NULL;
unsigned long long cur_version = 0, pre_version = 0;
unsigned int cp_blocks;
int err;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
- &cp_page_1, version);
+ &cp_folio_1, version);
if (err)
return NULL;
@@ -915,19 +899,19 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
cp_addr += cp_blocks - 1;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
- &cp_page_2, version);
+ &cp_folio_2, version);
if (err)
goto invalid_cp;
cur_version = *version;
if (cur_version == pre_version) {
*version = cur_version;
- f2fs_put_page(cp_page_2, 1);
- return cp_page_1;
+ f2fs_folio_put(cp_folio_2, true);
+ return cp_folio_1;
}
- f2fs_put_page(cp_page_2, 1);
+ f2fs_folio_put(cp_folio_2, true);
invalid_cp:
- f2fs_put_page(cp_page_1, 1);
+ f2fs_folio_put(cp_folio_1, true);
return NULL;
}
@@ -935,7 +919,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *cp_block;
struct f2fs_super_block *fsb = sbi->raw_super;
- struct page *cp1, *cp2, *cur_page;
+ struct folio *cp1, *cp2, *cur_folio;
unsigned long blk_size = sbi->blocksize;
unsigned long long cp1_version = 0, cp2_version = 0;
unsigned long long cp_start_blk_no;
@@ -962,22 +946,22 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
if (cp1 && cp2) {
if (ver_after(cp2_version, cp1_version))
- cur_page = cp2;
+ cur_folio = cp2;
else
- cur_page = cp1;
+ cur_folio = cp1;
} else if (cp1) {
- cur_page = cp1;
+ cur_folio = cp1;
} else if (cp2) {
- cur_page = cp2;
+ cur_folio = cp2;
} else {
err = -EFSCORRUPTED;
goto fail_no_cp;
}
- cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
+ cp_block = folio_address(cur_folio);
memcpy(sbi->ckpt, cp_block, blk_size);
- if (cur_page == cp1)
+ if (cur_folio == cp1)
sbi->cur_cp_pack = 1;
else
sbi->cur_cp_pack = 2;
@@ -992,30 +976,30 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
goto done;
cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
- if (cur_page == cp2)
+ if (cur_folio == cp2)
cp_blk_no += BIT(le32_to_cpu(fsb->log_blocks_per_seg));
for (i = 1; i < cp_blks; i++) {
void *sit_bitmap_ptr;
unsigned char *ckpt = (unsigned char *)sbi->ckpt;
- cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
- if (IS_ERR(cur_page)) {
- err = PTR_ERR(cur_page);
+ cur_folio = f2fs_get_meta_folio(sbi, cp_blk_no + i);
+ if (IS_ERR(cur_folio)) {
+ err = PTR_ERR(cur_folio);
goto free_fail_no_cp;
}
- sit_bitmap_ptr = page_address(cur_page);
+ sit_bitmap_ptr = folio_address(cur_folio);
memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
- f2fs_put_page(cur_page, 1);
+ f2fs_folio_put(cur_folio, true);
}
done:
- f2fs_put_page(cp1, 1);
- f2fs_put_page(cp2, 1);
+ f2fs_folio_put(cp1, true);
+ f2fs_folio_put(cp2, true);
return 0;
free_fail_no_cp:
- f2fs_put_page(cp1, 1);
- f2fs_put_page(cp2, 1);
+ f2fs_folio_put(cp1, true);
+ f2fs_folio_put(cp2, true);
fail_no_cp:
kvfree(sbi->ckpt);
return err;
@@ -1061,7 +1045,7 @@ void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio)
inode_inc_dirty_pages(inode);
spin_unlock(&sbi->inode_lock[type]);
- set_page_private_reference(&folio->page);
+ folio_set_f2fs_reference(folio);
}
void f2fs_remove_dirty_inode(struct inode *inode)
@@ -1225,7 +1209,6 @@ static int block_operations(struct f2fs_sb_info *sbi)
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
- .for_reclaim = 0,
};
int err = 0, cnt = 0;
@@ -1237,7 +1220,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
retry_flush_quotas:
f2fs_lock_all(sbi);
if (__need_flush_quota(sbi)) {
- int locked;
+ bool need_lock = sbi->umount_lock_holder != current;
if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
@@ -1246,11 +1229,13 @@ retry_flush_quotas:
}
f2fs_unlock_all(sbi);
- /* only failed during mount/umount/freeze/quotactl */
- locked = down_read_trylock(&sbi->sb->s_umount);
- f2fs_quota_sync(sbi->sb, -1);
- if (locked)
+ /* don't grab s_umount lock during mount/umount/remount/freeze/quotactl */
+ if (!need_lock) {
+ f2fs_do_quota_sync(sbi->sb, -1);
+ } else if (down_read_trylock(&sbi->sb->s_umount)) {
+ f2fs_do_quota_sync(sbi->sb, -1);
up_read(&sbi->sb->s_umount);
+ }
cond_resched();
goto retry_flush_quotas;
}
@@ -1333,7 +1318,7 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
f2fs_submit_merged_write(sbi, DATA);
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
- io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
}
finish_wait(&sbi->cp_wait, &wait);
}
@@ -1344,21 +1329,13 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long flags;
- if (cpc->reason & CP_UMOUNT) {
- if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
- NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) {
- clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
- f2fs_notice(sbi, "Disable nat_bits due to no space");
- } else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
- f2fs_nat_bitmap_enabled(sbi)) {
- f2fs_enable_nat_bits(sbi);
- set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
- f2fs_notice(sbi, "Rebuild and enable nat_bits");
- }
- }
-
spin_lock_irqsave(&sbi->cp_lock, flags);
+ if ((cpc->reason & CP_UMOUNT) &&
+ le32_to_cpu(ckpt->cp_pack_total_block_count) >
+ sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
+ disable_nat_bits(sbi, false);
+
if (cpc->reason & CP_TRIMMED)
__set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
else
@@ -1415,35 +1392,31 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
static void commit_checkpoint(struct f2fs_sb_info *sbi,
void *src, block_t blk_addr)
{
- struct writeback_control wbc = {
- .for_reclaim = 0,
- };
+ struct writeback_control wbc = {};
/*
- * filemap_get_folios_tag and lock_page again will take
+ * filemap_get_folios_tag and folio_lock again will take
* some extra time. Therefore, f2fs_update_meta_pages and
* f2fs_sync_meta_pages are combined in this function.
*/
- struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
- int err;
-
- f2fs_wait_on_page_writeback(page, META, true, true);
+ struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr);
- memcpy(page_address(page), src, PAGE_SIZE);
+ memcpy(folio_address(folio), src, PAGE_SIZE);
- set_page_dirty(page);
- if (unlikely(!clear_page_dirty_for_io(page)))
+ folio_mark_dirty(folio);
+ if (unlikely(!folio_clear_dirty_for_io(folio)))
f2fs_bug_on(sbi, 1);
/* writeout cp pack 2 page */
- err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
- if (unlikely(err && f2fs_cp_error(sbi))) {
- f2fs_put_page(page, 1);
- return;
+ if (unlikely(!__f2fs_write_meta_folio(folio, &wbc, FS_CP_META_IO))) {
+ if (f2fs_cp_error(sbi)) {
+ f2fs_folio_put(folio, true);
+ return;
+ }
+ f2fs_bug_on(sbi, true);
}
- f2fs_bug_on(sbi, err);
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
/* submit checkpoint (with barrier if NOBARRIER is not set) */
f2fs_submit_merged_write(sbi, META_FLUSH);
@@ -1469,6 +1442,34 @@ u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi)
return get_sectors_written(sbi->sb->s_bdev);
}
+static inline void stat_cp_time(struct cp_control *cpc, enum cp_time type)
+{
+ cpc->stats.times[type] = ktime_get();
+}
+
+static inline void check_cp_time(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+ unsigned long long sb_diff, cur_diff;
+ enum cp_time ct;
+
+ sb_diff = (u64)ktime_ms_delta(sbi->cp_stats.times[CP_TIME_END],
+ sbi->cp_stats.times[CP_TIME_START]);
+ cur_diff = (u64)ktime_ms_delta(cpc->stats.times[CP_TIME_END],
+ cpc->stats.times[CP_TIME_START]);
+
+ if (cur_diff > sb_diff) {
+ sbi->cp_stats = cpc->stats;
+ if (cur_diff < CP_LONG_LATENCY_THRESHOLD)
+ return;
+
+ f2fs_warn(sbi, "checkpoint was blocked for %llu ms", cur_diff);
+ for (ct = CP_TIME_START; ct < CP_TIME_MAX - 1; ct++)
+ f2fs_warn(sbi, "Step#%d: %llu ms", ct,
+ (u64)ktime_ms_delta(cpc->stats.times[ct + 1],
+ cpc->stats.times[ct]));
+ }
+}
+
static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1486,6 +1487,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Flush all the NAT/SIT pages */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ stat_cp_time(cpc, CP_TIME_SYNC_META);
+
/* start to update checkpoint, cp ver is already updated previously */
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
@@ -1533,7 +1536,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
- crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
+ crc32 = f2fs_checkpoint_chksum(ckpt);
*((__le32 *)((unsigned char *)ckpt +
le32_to_cpu(ckpt->checksum_offset)))
= cpu_to_le32(crc32);
@@ -1541,8 +1544,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
start_blk = __start_cp_next_addr(sbi);
/* write nat bits */
- if ((cpc->reason & CP_UMOUNT) &&
- is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) {
+ if (enabled_nat_bits(sbi, cpc)) {
__u64 cp_ver = cur_cp_version(ckpt);
block_t blk;
@@ -1583,20 +1585,26 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Here, we have one bio having CP pack except cp pack 2 page */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ stat_cp_time(cpc, CP_TIME_SYNC_CP_META);
+
/* Wait for all dirty meta pages to be submitted for IO */
f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
+ stat_cp_time(cpc, CP_TIME_WAIT_DIRTY_META);
/* wait for previous submitted meta pages writeback */
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+ stat_cp_time(cpc, CP_TIME_WAIT_CP_DATA);
/* flush all device cache */
err = f2fs_flush_device_cache(sbi);
if (err)
return err;
+ stat_cp_time(cpc, CP_TIME_FLUSH_DEVICE);
/* barrier and flush checkpoint cp pack 2 page if it can */
commit_checkpoint(sbi, ckpt, start_blk);
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+ stat_cp_time(cpc, CP_TIME_WAIT_LAST_CP);
/*
* invalidate intermediate page cache borrowed from meta inode which are
@@ -1641,6 +1649,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned long long ckpt_ver;
int err = 0;
+ stat_cp_time(cpc, CP_TIME_START);
+
if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
return -EROFS;
@@ -1652,6 +1662,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (cpc->reason != CP_RESIZE)
f2fs_down_write(&sbi->cp_global_sem);
+ stat_cp_time(cpc, CP_TIME_LOCK);
+
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
@@ -1661,13 +1673,15 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
goto out;
}
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_START_BLOCK_OPS);
err = block_operations(sbi);
if (err)
goto out;
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
+ stat_cp_time(cpc, CP_TIME_OP_LOCK);
+
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_FINISH_BLOCK_OPS);
f2fs_flush_merged_writes(sbi);
@@ -1706,6 +1720,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_sit_entries(sbi, cpc);
+ stat_cp_time(cpc, CP_TIME_FLUSH_META);
+
/* save inmem log status */
f2fs_save_inmem_curseg(sbi);
@@ -1723,13 +1739,15 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
stat_inc_cp_count(sbi);
stop:
unblock_operations(sbi);
+ stat_cp_time(cpc, CP_TIME_END);
+ check_cp_time(sbi, cpc);
if (cpc->reason & CP_RECOVERY)
f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
/* update CP_TIME to trigger checkpoint periodically */
f2fs_update_time(sbi, CP_TIME);
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_FINISH_CHECKPOINT);
out:
if (cpc->reason != CP_RESIZE)
f2fs_up_write(&sbi->cp_global_sem);
@@ -1806,6 +1824,7 @@ static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
llist_for_each_entry_safe(req, next, dispatch_list, llnode) {
diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time);
req->ret = ret;
+ req->delta_time = diff;
complete(&req->wait);
sum_diff += diff;
@@ -1867,7 +1886,8 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
struct cp_control cpc;
cpc.reason = __get_cp_reason(sbi);
- if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
+ if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC ||
+ sbi->umount_lock_holder == current) {
int ret;
f2fs_down_write(&sbi->gc_lock);
@@ -1900,6 +1920,12 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
else
flush_remained_ckpt_reqs(sbi, &req);
+ if (unlikely(req.delta_time >= CP_LONG_LATENCY_THRESHOLD)) {
+ f2fs_warn_ratelimited(sbi,
+ "blocked on checkpoint for %u ms", cprc->peak_time);
+ dump_stack();
+ }
+
return req.ret;
}
@@ -1948,7 +1974,7 @@ void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi)
/* Let's wait for the previous dispatched checkpoint. */
while (atomic_read(&cprc->queued_ckpt))
- io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
}
void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 7f26440e8595..7b68bf22989d 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -23,20 +23,18 @@
static struct kmem_cache *cic_entry_slab;
static struct kmem_cache *dic_entry_slab;
-static void *page_array_alloc(struct inode *inode, int nr)
+static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (likely(size <= sbi->page_array_slab_size))
return f2fs_kmem_cache_alloc(sbi->page_array_slab,
- GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
+ GFP_F2FS_ZERO, false, sbi);
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}
-static void page_array_free(struct inode *inode, void *pages, int nr)
+static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (!pages)
@@ -73,17 +71,15 @@ static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
return cc->cluster_idx << cc->log_cluster_size;
}
-bool f2fs_is_compressed_page(struct page *page)
+bool f2fs_is_compressed_page(struct folio *folio)
{
- if (!PagePrivate(page))
+ if (!folio->private)
return false;
- if (!page_private(page))
- return false;
- if (page_private_nonpointer(page))
+ if (folio_test_f2fs_nonpointer(folio))
return false;
- f2fs_bug_on(F2FS_M_SB(page->mapping),
- *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
+ f2fs_bug_on(F2FS_F_SB(folio),
+ *((u32 *)folio->private) != F2FS_COMPRESSED_PAGE_MAGIC);
return true;
}
@@ -124,7 +120,7 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
}
static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
- struct writeback_control *wbc, bool redirty, int unlock)
+ struct writeback_control *wbc, bool redirty, bool unlock)
{
unsigned int i;
@@ -137,9 +133,11 @@ static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
}
}
-struct page *f2fs_compress_control_page(struct page *page)
+struct folio *f2fs_compress_control_folio(struct folio *folio)
{
- return ((struct compress_io_ctx *)page_private(page))->rpages[0];
+ struct compress_io_ctx *ctx = folio->private;
+
+ return page_folio(ctx->rpages[0]);
}
int f2fs_init_compress_ctx(struct compress_ctx *cc)
@@ -147,13 +145,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
if (cc->rpages)
return 0;
- cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
{
- page_array_free(cc->inode, cc->rpages, cc->cluster_size);
+ page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@@ -178,8 +176,8 @@ void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
#ifdef CONFIG_F2FS_FS_LZO
static int lzo_init_compress_ctx(struct compress_ctx *cc)
{
- cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
- LZO1X_MEM_COMPRESS, GFP_NOFS);
+ cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode),
+ LZO1X_MEM_COMPRESS);
if (!cc->private)
return -ENOMEM;
@@ -189,7 +187,7 @@ static int lzo_init_compress_ctx(struct compress_ctx *cc)
static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
{
- kvfree(cc->private);
+ vfree(cc->private);
cc->private = NULL;
}
@@ -214,13 +212,13 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic)
ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
dic->rbuf, &dic->rlen);
if (ret != LZO_E_OK) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"lzo decompress failed, ret:%d", ret);
return -EIO;
}
if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"lzo invalid rlen:%zu, expected:%lu",
dic->rlen, PAGE_SIZE << dic->log_cluster_size);
return -EIO;
@@ -246,7 +244,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
size = LZ4HC_MEM_COMPRESS;
#endif
- cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
+ cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), size);
if (!cc->private)
return -ENOMEM;
@@ -261,7 +259,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
{
- kvfree(cc->private);
+ vfree(cc->private);
cc->private = NULL;
}
@@ -294,13 +292,13 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
dic->clen, dic->rlen);
if (ret < 0) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"lz4 decompress failed, ret:%d", ret);
return -EIO;
}
if (ret != PAGE_SIZE << dic->log_cluster_size) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"lz4 invalid ret:%d, expected:%lu",
ret, PAGE_SIZE << dic->log_cluster_size);
return -EIO;
@@ -342,8 +340,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
params = zstd_get_params(level, cc->rlen);
workspace_size = zstd_cstream_workspace_bound(&params.cParams);
- workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
- workspace_size, GFP_NOFS);
+ workspace = f2fs_vmalloc(F2FS_I_SB(cc->inode), workspace_size);
if (!workspace)
return -ENOMEM;
@@ -351,7 +348,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
if (!stream) {
f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
"%s zstd_init_cstream failed", __func__);
- kvfree(workspace);
+ vfree(workspace);
return -EIO;
}
@@ -364,7 +361,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
{
- kvfree(cc->private);
+ vfree(cc->private);
cc->private = NULL;
cc->private2 = NULL;
}
@@ -423,16 +420,15 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
workspace_size = zstd_dstream_workspace_bound(max_window_size);
- workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
- workspace_size, GFP_NOFS);
+ workspace = f2fs_vmalloc(dic->sbi, workspace_size);
if (!workspace)
return -ENOMEM;
stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
if (!stream) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"%s zstd_init_dstream failed", __func__);
- kvfree(workspace);
+ vfree(workspace);
return -EIO;
}
@@ -444,7 +440,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
{
- kvfree(dic->private);
+ vfree(dic->private);
dic->private = NULL;
dic->private2 = NULL;
}
@@ -466,14 +462,14 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
if (zstd_is_error(ret)) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"%s zstd_decompress_stream failed, ret: %d",
__func__, zstd_get_error_code(ret));
return -EIO;
}
if (dic->rlen != outbuf.pos) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"%s ZSTD invalid rlen:%zu, expected:%lu",
__func__, dic->rlen,
PAGE_SIZE << dic->log_cluster_size);
@@ -593,11 +589,14 @@ static struct page *f2fs_compress_alloc_page(void)
static void f2fs_compress_free_page(struct page *page)
{
+ struct folio *folio;
+
if (!page)
return;
- detach_page_private(page);
- page->mapping = NULL;
- unlock_page(page);
+ folio = page_folio(page);
+ folio_detach_private(folio);
+ folio->mapping = NULL;
+ folio_unlock(folio);
mempool_free(page, compress_page_pool);
}
@@ -619,6 +618,7 @@ static void *f2fs_vmap(struct page **pages, unsigned int count)
static int f2fs_compress_pages(struct compress_ctx *cc)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
@@ -639,7 +639,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
cc->valid_nr_cpages = cc->nr_cpages;
- cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
+ cc->cpages = page_array_alloc(sbi, cc->nr_cpages);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@@ -674,8 +674,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
cc->cbuf->clen = cpu_to_le32(cc->clen);
if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
- chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
- cc->cbuf->cdata, cc->clen);
+ chksum = f2fs_crc32(cc->cbuf->cdata, cc->clen);
cc->cbuf->chksum = cpu_to_le32(chksum);
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
@@ -714,7 +713,7 @@ out_free_cpages:
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -732,7 +731,7 @@ static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+ struct f2fs_sb_info *sbi = dic->sbi;
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
@@ -760,10 +759,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
ret = -EFSCORRUPTED;
/* Avoid f2fs_commit_super in irq context */
- if (!in_task)
- f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
- else
- f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
+ f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
goto out_release;
}
@@ -771,7 +767,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
u32 provided = le32_to_cpu(dic->cbuf->chksum);
- u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
+ u32 calculated = f2fs_crc32(dic->cbuf->cdata, dic->clen);
if (provided != calculated) {
if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
@@ -794,25 +790,27 @@ out_end_io:
f2fs_decompress_end_io(dic, ret, in_task);
}
+static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
+ struct folio *folio, nid_t ino, block_t blkaddr);
+
/*
* This is called when a page of a compressed cluster has been read from disk
* (or failed to be read from disk). It checks whether this page was the last
* page being waited on in the cluster, and if so, it decompresses the cluster
* (or in the case of a failure, cleans up without actually decompressing).
*/
-void f2fs_end_read_compressed_page(struct page *page, bool failed,
+void f2fs_end_read_compressed_page(struct folio *folio, bool failed,
block_t blkaddr, bool in_task)
{
- struct decompress_io_ctx *dic =
- (struct decompress_io_ctx *)page_private(page);
- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+ struct decompress_io_ctx *dic = folio->private;
+ struct f2fs_sb_info *sbi = dic->sbi;
dec_page_count(sbi, F2FS_RD_DATA);
if (failed)
WRITE_ONCE(dic->failed, true);
else if (blkaddr && in_task)
- f2fs_cache_compressed_page(sbi, page,
+ f2fs_cache_compressed_page(sbi, folio,
dic->inode->i_ino, blkaddr);
if (atomic_dec_and_test(&dic->remaining_pages))
@@ -846,7 +844,7 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
int index, int nr_pages, bool uptodate)
{
- unsigned long pgidx = pages[index]->index;
+ unsigned long pgidx = page_folio(pages[index])->index;
int i = uptodate ? 0 : 1;
/*
@@ -860,9 +858,11 @@ bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
return false;
for (; i < cc->cluster_size; i++) {
- if (pages[index + i]->index != pgidx + i)
+ struct folio *folio = page_folio(pages[index + i]);
+
+ if (folio->index != pgidx + i)
return false;
- if (uptodate && !PageUptodate(pages[index + i]))
+ if (uptodate && !folio_test_uptodate(folio))
return false;
}
@@ -907,7 +907,7 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
}
for (i = 1, count = 1; i < cluster_size; i++, count++) {
- block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
/* [COMPR_ADDR, ..., COMPR_ADDR] */
@@ -948,7 +948,7 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
int count, i;
for (i = 0, count = 0; i < cluster_size; i++) {
- block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (__is_valid_data_blkaddr(blkaddr))
@@ -1057,7 +1057,7 @@ static void cancel_cluster_writeback(struct compress_ctx *cc,
f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
while (atomic_read(&cic->pending_pages) !=
(cc->valid_nr_cpages - submitted + 1))
- f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
}
/* Cancel writeback and stay locked. */
@@ -1088,7 +1088,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
{
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct address_space *mapping = cc->inode->i_mapping;
- struct page *page;
+ struct folio *folio;
sector_t last_block_in_bio;
fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc);
@@ -1103,19 +1103,19 @@ retry:
if (ret)
return ret;
- /* keep page reference to avoid page reclaim */
+ /* keep folio reference to avoid page reclaim */
for (i = 0; i < cc->cluster_size; i++) {
- page = f2fs_pagecache_get_page(mapping, start_idx + i,
- fgp_flag, GFP_NOFS);
- if (!page) {
- ret = -ENOMEM;
+ folio = f2fs_filemap_get_folio(mapping, start_idx + i,
+ fgp_flag, GFP_NOFS);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
goto unlock_pages;
}
- if (PageUptodate(page))
- f2fs_put_page(page, 1);
+ if (folio_test_uptodate(folio))
+ f2fs_folio_put(folio, true);
else
- f2fs_compress_ctx_add_page(cc, page_folio(page));
+ f2fs_compress_ctx_add_page(cc, folio);
}
if (!f2fs_cluster_is_empty(cc)) {
@@ -1138,16 +1138,17 @@ retry:
for (i = 0; i < cc->cluster_size; i++) {
f2fs_bug_on(sbi, cc->rpages[i]);
- page = find_lock_page(mapping, start_idx + i);
- if (!page) {
- /* page can be truncated */
+ folio = filemap_lock_folio(mapping, start_idx + i);
+ if (IS_ERR(folio)) {
+ /* folio could be truncated */
goto release_and_retry;
}
- f2fs_wait_on_page_writeback(page, DATA, true, true);
- f2fs_compress_ctx_add_page(cc, page_folio(page));
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+ f2fs_compress_ctx_add_page(cc, folio);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
+ f2fs_handle_page_eio(sbi, folio, DATA);
release_and_retry:
f2fs_put_rpages(cc);
f2fs_unlock_rpages(cc, i + 1);
@@ -1195,12 +1196,13 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rpages = fsdata,
};
- bool first_index = (index == cc.rpages[0]->index);
+ struct folio *folio = page_folio(cc.rpages[0]);
+ bool first_index = (index == folio->index);
if (copied)
set_cluster_dirty(&cc);
- f2fs_put_rpages_wbc(&cc, NULL, false, 1);
+ f2fs_put_rpages_wbc(&cc, NULL, false, true);
f2fs_destroy_compress_ctx(&cc, false);
return first_index;
@@ -1210,9 +1212,11 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
{
void *fsdata = NULL;
struct page *pagep;
+ struct page **rpages;
int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
log_cluster_size;
+ int i;
int err;
err = f2fs_is_compressed_cluster(inode, start_idx);
@@ -1233,26 +1237,30 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
if (err <= 0)
return err;
- if (err > 0) {
- struct page **rpages = fsdata;
- int cluster_size = F2FS_I(inode)->i_cluster_size;
- int i;
+ rpages = fsdata;
- for (i = cluster_size - 1; i >= 0; i--) {
- loff_t start = rpages[i]->index << PAGE_SHIFT;
+ for (i = (1 << log_cluster_size) - 1; i >= 0; i--) {
+ struct folio *folio = page_folio(rpages[i]);
+ loff_t start = (loff_t)folio->index << PAGE_SHIFT;
+ loff_t offset = from > start ? from - start : 0;
- if (from <= start) {
- zero_user_segment(rpages[i], 0, PAGE_SIZE);
- } else {
- zero_user_segment(rpages[i], from - start,
- PAGE_SIZE);
- break;
- }
- }
+ folio_zero_segment(folio, offset, folio_size(folio));
- f2fs_compress_write_end(inode, fsdata, start_idx, true);
+ if (from >= start)
+ break;
}
- return 0;
+
+ f2fs_compress_write_end(inode, fsdata, start_idx, true);
+
+ err = filemap_write_and_wait_range(inode->i_mapping,
+ round_down(from, 1 << log_cluster_size << PAGE_SHIFT),
+ LLONG_MAX);
+ if (err)
+ return err;
+
+ truncate_pagecache(inode, from);
+
+ return f2fs_do_truncate_blocks(inode, round_up(from, PAGE_SIZE), lock);
}
static int f2fs_write_compressed_pages(struct compress_ctx *cc,
@@ -1278,6 +1286,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1 : 0,
};
+ struct folio *folio;
struct dnode_of_data dn;
struct node_info ni;
struct compress_io_ctx *cic;
@@ -1289,7 +1298,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(cc->rpages[0]->mapping, -EIO);
+ mapping_set_error(inode->i_mapping, -EIO);
goto out_free;
}
@@ -1311,12 +1320,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
goto out_unlock_op;
for (i = 0; i < cc->cluster_size; i++) {
- if (data_blkaddr(dn.inode, dn.node_page,
+ if (data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i) == NULL_ADDR)
goto out_put_dnode;
}
- psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
+ folio = page_folio(cc->rpages[last_index]);
+ psize = folio_next_pos(folio);
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
@@ -1331,7 +1341,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
- cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ cic->rpages = page_array_alloc(sbi, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;
@@ -1339,10 +1349,10 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
- cc->rpages[i + 1]->index, cic);
+ page_folio(cc->rpages[i + 1])->index, cic);
fio.compressed_page = cc->cpages[i];
- fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
+ fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i + 1);
/* wait for GCed page writeback via META_MAPPING */
@@ -1374,7 +1384,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (blkaddr == COMPRESS_ADDR)
fio.compr_blocks++;
if (__is_valid_data_blkaddr(blkaddr))
- f2fs_invalidate_blocks(sbi, blkaddr);
+ f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
goto unlock_continue;
}
@@ -1384,7 +1394,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (i > cc->valid_nr_cpages) {
if (__is_valid_data_blkaddr(blkaddr)) {
- f2fs_invalidate_blocks(sbi, blkaddr);
+ f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
}
goto unlock_continue;
@@ -1411,7 +1421,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
(*submitted)++;
unlock_continue:
inode_dec_dirty_pages(cc->inode);
- unlock_page(fio.page);
+ folio_unlock(fio.folio);
}
if (fio.compr_blocks)
@@ -1433,13 +1443,13 @@ unlock_continue:
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc, false);
return 0;
out_destroy_crypt:
- page_array_free(cc->inode, cic->rpages, cc->cluster_size);
+ page_array_free(sbi, cic->rpages, cc->cluster_size);
for (--i; i >= 0; i--) {
if (!cc->cpages[i])
@@ -1460,21 +1470,21 @@ out_free:
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
return -EAGAIN;
}
-void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio)
{
+ struct page *page = &folio->page;
struct f2fs_sb_info *sbi = bio->bi_private;
- struct compress_io_ctx *cic =
- (struct compress_io_ctx *)page_private(page);
- enum count_type type = WB_DATA_TYPE(page,
- f2fs_is_compressed_page(page));
+ struct compress_io_ctx *cic = folio->private;
+ enum count_type type = WB_DATA_TYPE(folio,
+ f2fs_is_compressed_page(folio));
int i;
- if (unlikely(bio->bi_status))
+ if (unlikely(bio->bi_status != BLK_STS_OK))
mapping_set_error(cic->inode->i_mapping, -EIO);
f2fs_compress_free_page(page);
@@ -1490,7 +1500,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}
- page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
+ page_array_free(sbi, cic->rpages, cic->nr_rpages);
kmem_cache_free(cic_entry_slab, cic);
}
@@ -1522,36 +1532,38 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
f2fs_lock_op(sbi);
for (i = 0; i < cc->cluster_size; i++) {
+ struct folio *folio;
+
if (!cc->rpages[i])
continue;
+ folio = page_folio(cc->rpages[i]);
retry_write:
- lock_page(cc->rpages[i]);
+ folio_lock(folio);
- if (cc->rpages[i]->mapping != mapping) {
+ if (folio->mapping != mapping) {
continue_unlock:
- unlock_page(cc->rpages[i]);
+ folio_unlock(folio);
continue;
}
- if (!PageDirty(cc->rpages[i]))
+ if (!folio_test_dirty(folio))
goto continue_unlock;
- if (folio_test_writeback(page_folio(cc->rpages[i]))) {
+ if (folio_test_writeback(folio)) {
if (wbc->sync_mode == WB_SYNC_NONE)
goto continue_unlock;
- f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
}
- if (!clear_page_dirty_for_io(cc->rpages[i]))
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
- &submitted,
+ submitted = 0;
+ ret = f2fs_write_single_data_page(folio, &submitted,
NULL, NULL, wbc, io_type,
compr_blocks, false);
if (ret) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(cc->rpages[i]);
+ if (ret == 1) {
ret = 0;
} else if (ret == -EAGAIN) {
ret = 0;
@@ -1562,7 +1574,7 @@ continue_unlock:
*/
if (IS_NOQUOTA(cc->inode))
goto out;
- f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
goto retry_write;
}
goto out;
@@ -1593,7 +1605,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
add_compr_block_stat(cc->inode, cc->cluster_size);
goto write;
} else if (err) {
- f2fs_put_rpages_wbc(cc, wbc, true, 1);
+ f2fs_put_rpages_wbc(cc, wbc, true, true);
goto destroy_out;
}
@@ -1607,7 +1619,7 @@ write:
f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
- f2fs_put_rpages_wbc(cc, wbc, false, 0);
+ f2fs_put_rpages_wbc(cc, wbc, false, false);
destroy_out:
f2fs_destroy_compress_ctx(cc, false);
return err;
@@ -1622,14 +1634,13 @@ static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
bool pre_alloc)
{
- const struct f2fs_compress_ops *cops =
- f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+ const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
int i;
- if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
+ if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
return 0;
- dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
+ dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size);
if (!dic->tpages)
return -ENOMEM;
@@ -1659,10 +1670,9 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
bool bypass_destroy_callback, bool pre_alloc)
{
- const struct f2fs_compress_ops *cops =
- f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+ const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
- if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
+ if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
return;
if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
@@ -1689,7 +1699,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);
- dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ dic->rpages = page_array_alloc(sbi, cc->cluster_size);
if (!dic->rpages) {
kmem_cache_free(dic_entry_slab, dic);
return ERR_PTR(-ENOMEM);
@@ -1697,6 +1707,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
dic->inode = cc->inode;
+ dic->sbi = sbi;
+ dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm;
atomic_set(&dic->remaining_pages, cc->nr_cpages);
dic->cluster_idx = cc->cluster_idx;
dic->cluster_size = cc->cluster_size;
@@ -1710,7 +1722,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;
- dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
+ dic->cpages = page_array_alloc(sbi, dic->nr_cpages);
if (!dic->cpages) {
ret = -ENOMEM;
goto out_free;
@@ -1740,6 +1752,8 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
bool bypass_destroy_callback)
{
int i;
+ /* use sbi in dic to avoid UFA of dic->inode*/
+ struct f2fs_sb_info *sbi = dic->sbi;
f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
@@ -1751,7 +1765,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
- page_array_free(dic->inode, dic->tpages, dic->cluster_size);
+ page_array_free(sbi, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
@@ -1760,10 +1774,10 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
- page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
+ page_array_free(sbi, dic->cpages, dic->nr_cpages);
}
- page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
+ page_array_free(sbi, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}
@@ -1782,8 +1796,7 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
f2fs_free_dic(dic, false);
} else {
INIT_WORK(&dic->free_work, f2fs_late_free_dic);
- queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
- &dic->free_work);
+ queue_work(dic->sbi->post_read_wq, &dic->free_work);
}
}
}
@@ -1854,14 +1867,13 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
}
/*
- * Put a reference to a compressed page's decompress_io_ctx.
+ * Put a reference to a compressed folio's decompress_io_ctx.
*
- * This is called when the page is no longer needed and can be freed.
+ * This is called when the folio is no longer needed and can be freed.
*/
-void f2fs_put_page_dic(struct page *page, bool in_task)
+void f2fs_put_folio_dic(struct folio *folio, bool in_task)
{
- struct decompress_io_ctx *dic =
- (struct decompress_io_ctx *)page_private(page);
+ struct decompress_io_ctx *dic = folio->private;
f2fs_put_dic(dic, in_task);
}
@@ -1873,14 +1885,14 @@ void f2fs_put_page_dic(struct page *page, bool in_task)
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
unsigned int ofs_in_node)
{
- bool compressed = data_blkaddr(dn->inode, dn->node_page,
+ bool compressed = data_blkaddr(dn->inode, dn->node_folio,
ofs_in_node) == COMPRESS_ADDR;
int i = compressed ? 1 : 0;
- block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
ofs_in_node + i);
for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
- block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -1903,17 +1915,18 @@ struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
return sbi->compress_inode->i_mapping;
}
-void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
+void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len)
{
if (!sbi->compress_inode)
return;
- invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
+ invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}
-void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
- nid_t ino, block_t blkaddr)
+static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
+ struct folio *folio, nid_t ino, block_t blkaddr)
{
- struct page *cpage;
+ struct folio *cfolio;
int ret;
if (!test_opt(sbi, COMPRESS_CACHE))
@@ -1925,49 +1938,49 @@ void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
return;
- cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
- if (cpage) {
- f2fs_put_page(cpage, 0);
+ cfolio = filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr);
+ if (!IS_ERR(cfolio)) {
+ f2fs_folio_put(cfolio, false);
return;
}
- cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
- if (!cpage)
+ cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0, NULL);
+ if (!cfolio)
return;
- ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
+ ret = filemap_add_folio(COMPRESS_MAPPING(sbi), cfolio,
blkaddr, GFP_NOFS);
if (ret) {
- f2fs_put_page(cpage, 0);
+ f2fs_folio_put(cfolio, false);
return;
}
- set_page_private_data(cpage, ino);
+ folio_set_f2fs_data(cfolio, ino);
- memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
- SetPageUptodate(cpage);
- f2fs_put_page(cpage, 1);
+ memcpy(folio_address(cfolio), folio_address(folio), PAGE_SIZE);
+ folio_mark_uptodate(cfolio);
+ f2fs_folio_put(cfolio, true);
}
-bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
block_t blkaddr)
{
- struct page *cpage;
+ struct folio *cfolio;
bool hitted = false;
if (!test_opt(sbi, COMPRESS_CACHE))
return false;
- cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
+ cfolio = f2fs_filemap_get_folio(COMPRESS_MAPPING(sbi),
blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
- if (cpage) {
- if (PageUptodate(cpage)) {
+ if (!IS_ERR(cfolio)) {
+ if (folio_test_uptodate(cfolio)) {
atomic_inc(&sbi->compress_page_hit);
- memcpy(page_address(page),
- page_address(cpage), PAGE_SIZE);
+ memcpy(folio_address(folio),
+ folio_address(cfolio), folio_size(folio));
hitted = true;
}
- f2fs_put_page(cpage, 1);
+ f2fs_folio_put(cfolio, true);
}
return hitted;
@@ -2001,7 +2014,7 @@ void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
continue;
}
- if (ino != get_page_private_data(&folio->page)) {
+ if (ino != folio_get_f2fs_data(folio)) {
folio_unlock(folio);
continue;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index a2478c2afb3a..c30e69392a62 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -47,14 +47,14 @@ void f2fs_destroy_bioset(void)
bioset_exit(&f2fs_bioset);
}
-bool f2fs_is_cp_guaranteed(struct page *page)
+bool f2fs_is_cp_guaranteed(const struct folio *folio)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *inode;
struct f2fs_sb_info *sbi;
- if (!mapping)
- return false;
+ if (fscrypt_is_bounce_folio(folio))
+ return folio_test_f2fs_gcing(fscrypt_pagecache_folio(folio));
inode = mapping->host;
sbi = F2FS_I_SB(inode);
@@ -65,14 +65,14 @@ bool f2fs_is_cp_guaranteed(struct page *page)
return true;
if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
- page_private_gcing(page))
+ folio_test_f2fs_gcing(folio))
return true;
return false;
}
-static enum count_type __read_io_type(struct page *page)
+static enum count_type __read_io_type(struct folio *folio)
{
- struct address_space *mapping = page_file_mapping(page);
+ struct address_space *mapping = folio->mapping;
if (mapping) {
struct inode *inode = mapping->host;
@@ -136,27 +136,22 @@ struct bio_post_read_ctx {
*/
static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
struct bio_post_read_ctx *ctx = bio->bi_private;
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
- if (f2fs_is_compressed_page(page)) {
+ if (f2fs_is_compressed_page(folio)) {
if (ctx && !ctx->decompression_attempted)
- f2fs_end_read_compressed_page(page, true, 0,
+ f2fs_end_read_compressed_page(folio, true, 0,
in_task);
- f2fs_put_page_dic(page, in_task);
+ f2fs_put_folio_dic(folio, in_task);
continue;
}
- if (bio->bi_status)
- ClearPageUptodate(page);
- else
- SetPageUptodate(page);
- dec_page_count(F2FS_P_SB(page), __read_io_type(page));
- unlock_page(page);
+ dec_page_count(F2FS_F_SB(folio), __read_io_type(folio));
+ folio_end_read(folio, bio->bi_status == BLK_STS_OK);
}
if (ctx)
@@ -186,14 +181,13 @@ static void f2fs_verify_bio(struct work_struct *work)
* as those were handled separately by f2fs_end_read_compressed_page().
*/
if (may_have_compressed_pages) {
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
- if (!f2fs_is_compressed_page(page) &&
- !fsverity_verify_page(page)) {
+ if (!f2fs_is_compressed_page(folio) &&
+ !fsverity_verify_page(&folio->page)) {
bio->bi_status = BLK_STS_IOERR;
break;
}
@@ -238,16 +232,15 @@ static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
bool in_task)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
bool all_compressed = true;
block_t blkaddr = ctx->fs_blkaddr;
- bio_for_each_segment_all(bv, ctx->bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, ctx->bio) {
+ struct folio *folio = fi.folio;
- if (f2fs_is_compressed_page(page))
- f2fs_end_read_compressed_page(page, false, blkaddr,
+ if (f2fs_is_compressed_page(folio))
+ f2fs_end_read_compressed_page(folio, false, blkaddr,
in_task);
else
all_compressed = false;
@@ -285,9 +278,9 @@ static void f2fs_post_read_work(struct work_struct *work)
static void f2fs_read_end_io(struct bio *bio)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
+ struct f2fs_sb_info *sbi = F2FS_F_SB(bio_first_folio_all(bio));
struct bio_post_read_ctx *ctx;
- bool intask = in_task();
+ bool intask = in_task() && !irqs_disabled();
iostat_update_and_unbind_ctx(bio);
ctx = bio->bi_private;
@@ -295,7 +288,7 @@ static void f2fs_read_end_io(struct bio *bio)
if (time_to_inject(sbi, FAULT_READ_IO))
bio->bi_status = BLK_STS_IOERR;
- if (bio->bi_status) {
+ if (bio->bi_status != BLK_STS_OK) {
f2fs_finish_read_bio(bio, intask);
return;
}
@@ -324,8 +317,7 @@ static void f2fs_read_end_io(struct bio *bio)
static void f2fs_write_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
iostat_update_and_unbind_ctx(bio);
sbi = bio->bi_private;
@@ -333,34 +325,41 @@ static void f2fs_write_end_io(struct bio *bio)
if (time_to_inject(sbi, FAULT_WRITE_IO))
bio->bi_status = BLK_STS_IOERR;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- enum count_type type = WB_DATA_TYPE(page, false);
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
+ enum count_type type;
- fscrypt_finalize_bounce_page(&page);
+ if (fscrypt_is_bounce_folio(folio)) {
+ struct folio *io_folio = folio;
+
+ folio = fscrypt_pagecache_folio(io_folio);
+ fscrypt_free_bounce_page(&io_folio->page);
+ }
#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (f2fs_is_compressed_page(page)) {
- f2fs_compress_write_end_io(bio, page);
+ if (f2fs_is_compressed_page(folio)) {
+ f2fs_compress_write_end_io(bio, folio);
continue;
}
#endif
- if (unlikely(bio->bi_status)) {
- mapping_set_error(page->mapping, -EIO);
+ type = WB_DATA_TYPE(folio, false);
+
+ if (unlikely(bio->bi_status != BLK_STS_OK)) {
+ mapping_set_error(folio->mapping, -EIO);
if (type == F2FS_WB_CP_DATA)
f2fs_stop_checkpoint(sbi, true,
STOP_CP_REASON_WRITE_FAIL);
}
- f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
- page_folio(page)->index != nid_of_node(page));
+ f2fs_bug_on(sbi, is_node_folio(folio) &&
+ folio->index != nid_of_node(folio));
dec_page_count(sbi, type);
- if (f2fs_in_warm_node_list(sbi, page))
- f2fs_del_fsync_node_entry(sbi, page);
- clear_page_private_gcing(page);
- end_page_writeback(page);
+ if (f2fs_in_warm_node_list(sbi, folio))
+ f2fs_del_fsync_node_entry(sbi, folio);
+ folio_clear_f2fs_gcing(folio);
+ folio_end_writeback(folio);
}
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
wq_has_sleeper(&sbi->cp_wait))
@@ -443,6 +442,11 @@ static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
op_flags |= REQ_META;
if (BIT(fio->temp) & fua_flag)
op_flags |= REQ_FUA;
+
+ if (fio->type == DATA &&
+ F2FS_I(fio->folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
+ op_flags |= REQ_PRIO;
+
return op_flags;
}
@@ -516,10 +520,6 @@ static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
enum page_type type)
{
WARN_ON_ONCE(is_read_io(bio_op(bio)));
-
- if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
- blk_finish_plug(current->plug);
-
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
submit_bio(bio);
@@ -543,34 +543,33 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
}
static bool __has_merged_page(struct bio *bio, struct inode *inode,
- struct page *page, nid_t ino)
+ struct folio *folio, nid_t ino)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
if (!bio)
return false;
- if (!inode && !page && !ino)
+ if (!inode && !folio && !ino)
return true;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *target = bvec->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *target = fi.folio;
- if (fscrypt_is_bounce_page(target)) {
- target = fscrypt_pagecache_page(target);
+ if (fscrypt_is_bounce_folio(target)) {
+ target = fscrypt_pagecache_folio(target);
if (IS_ERR(target))
continue;
}
if (f2fs_is_compressed_page(target)) {
- target = f2fs_compress_control_page(target);
+ target = f2fs_compress_control_folio(target);
if (IS_ERR(target))
continue;
}
if (inode && inode == target->mapping->host)
return true;
- if (page && page == target)
+ if (folio && folio == target)
return true;
if (ino && ino == ino_of_node(target))
return true;
@@ -639,7 +638,7 @@ unlock_out:
}
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, struct page *page,
+ struct inode *inode, struct folio *folio,
nid_t ino, enum page_type type, bool force)
{
enum temp_type temp;
@@ -651,7 +650,7 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
f2fs_down_read(&io->io_rwsem);
- ret = __has_merged_page(io->bio, inode, page, ino);
+ ret = __has_merged_page(io->bio, inode, folio, ino);
f2fs_up_read(&io->io_rwsem);
}
if (ret)
@@ -669,10 +668,10 @@ void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
}
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, struct page *page,
+ struct inode *inode, struct folio *folio,
nid_t ino, enum page_type type)
{
- __submit_merged_write_cond(sbi, inode, page, ino, type, false);
+ __submit_merged_write_cond(sbi, inode, folio, ino, type, false);
}
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
@@ -689,33 +688,29 @@ void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
- struct page *page = fio->encrypted_page ?
- fio->encrypted_page : fio->page;
+ struct folio *fio_folio = fio->folio;
+ struct folio *data_folio = fio->encrypted_page ?
+ page_folio(fio->encrypted_page) : fio_folio;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
fio->is_por ? META_POR : (__is_meta_io(fio) ?
META_GENERIC : DATA_GENERIC_ENHANCE)))
return -EFSCORRUPTED;
- trace_f2fs_submit_page_bio(page, fio);
+ trace_f2fs_submit_folio_bio(data_folio, fio);
/* Allocate a new bio */
bio = __bio_alloc(fio, 1);
- f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- page_folio(fio->page)->index, fio, GFP_NOIO);
-
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- bio_put(bio);
- return -EFAULT;
- }
+ f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host,
+ fio_folio->index, fio, GFP_NOIO);
+ bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0);
if (fio->io_wbc && !is_read_io(fio->op))
- wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
- PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
- __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
+ __read_io_type(data_folio) : WB_DATA_TYPE(fio->folio, false));
if (is_read_io(bio_op(bio)))
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
@@ -738,9 +733,11 @@ static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
static bool io_type_is_mergeable(struct f2fs_bio_info *io,
struct f2fs_io_info *fio)
{
+ blk_opf_t mask = ~(REQ_PREFLUSH | REQ_FUA);
+
if (io->fio.op != fio->op)
return false;
- return io->fio.op_flags == fio->op_flags;
+ return (io->fio.op_flags & mask) == (fio->op_flags & mask);
}
static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
@@ -755,7 +752,7 @@ static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
}
static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
- struct page *page, enum temp_type temp)
+ struct folio *folio, enum temp_type temp)
{
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
struct bio_entry *be;
@@ -764,8 +761,7 @@ static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
be->bio = bio;
bio_get(bio);
- if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
- f2fs_bug_on(sbi, 1);
+ bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
f2fs_down_write(&io->bio_list_lock);
list_add_tail(&be->list, &io->bio_list);
@@ -779,8 +775,9 @@ static void del_bio_entry(struct bio_entry *be)
}
static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
- struct page *page)
+ struct folio *folio)
{
+ struct folio *fio_folio = fio->folio;
struct f2fs_sb_info *sbi = fio->sbi;
enum temp_type temp;
bool found = false;
@@ -802,10 +799,9 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
*fio->last_block,
fio->new_blkaddr));
if (f2fs_crypt_mergeable_bio(*bio,
- fio->page->mapping->host,
- page_folio(fio->page)->index, fio) &&
- bio_add_page(*bio, page, PAGE_SIZE, 0) ==
- PAGE_SIZE) {
+ fio_folio->mapping->host,
+ fio_folio->index, fio) &&
+ bio_add_folio(*bio, folio, folio_size(folio), 0)) {
ret = 0;
break;
}
@@ -827,13 +823,13 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
}
void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
- struct bio **bio, struct page *page)
+ struct bio **bio, struct folio *folio)
{
enum temp_type temp;
bool found = false;
struct bio *target = bio ? *bio : NULL;
- f2fs_bug_on(sbi, !target && !page);
+ f2fs_bug_on(sbi, !target && !folio);
for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
@@ -849,7 +845,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
found = (target == be->bio);
else
found = __has_merged_page(be->bio, NULL,
- page, 0);
+ folio, 0);
if (found)
break;
}
@@ -866,7 +862,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
found = (target == be->bio);
else
found = __has_merged_page(be->bio, NULL,
- page, 0);
+ folio, 0);
if (found) {
target = be->bio;
del_bio_entry(be);
@@ -887,14 +883,15 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
int f2fs_merge_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio = *fio->bio;
- struct page *page = fio->encrypted_page ?
- fio->encrypted_page : fio->page;
+ struct folio *data_folio = fio->encrypted_page ?
+ page_folio(fio->encrypted_page) : fio->folio;
+ struct folio *folio = fio->folio;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
return -EFSCORRUPTED;
- trace_f2fs_submit_page_bio(page, fio);
+ trace_f2fs_submit_folio_bio(data_folio, fio);
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
@@ -902,20 +899,19 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_VECS);
- f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- page_folio(fio->page)->index, fio, GFP_NOIO);
+ f2fs_set_bio_crypt_ctx(bio, folio->mapping->host,
+ folio->index, fio, GFP_NOIO);
- add_bio_entry(fio->sbi, bio, page, fio->temp);
+ add_bio_entry(fio->sbi, bio, data_folio, fio->temp);
} else {
- if (add_ipu_page(fio, &bio, page))
+ if (add_ipu_page(fio, &bio, data_folio))
goto alloc_new;
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
- PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio));
- inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
+ inc_page_count(fio->sbi, WB_DATA_TYPE(folio, false));
*fio->last_block = fio->new_blkaddr;
*fio->bio = bio;
@@ -950,7 +946,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
- struct page *bio_page;
+ struct folio *bio_folio;
enum count_type type;
f2fs_bug_on(sbi, is_read_io(fio->op));
@@ -981,44 +977,44 @@ next:
verify_fio_blkaddr(fio);
if (fio->encrypted_page)
- bio_page = fio->encrypted_page;
+ bio_folio = page_folio(fio->encrypted_page);
else if (fio->compressed_page)
- bio_page = fio->compressed_page;
+ bio_folio = page_folio(fio->compressed_page);
else
- bio_page = fio->page;
+ bio_folio = fio->folio;
/* set submitted = true as a return value */
fio->submitted = 1;
- type = WB_DATA_TYPE(bio_page, fio->compressed_page);
+ type = WB_DATA_TYPE(bio_folio, fio->compressed_page);
inc_page_count(sbi, type);
if (io->bio &&
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
fio->new_blkaddr) ||
- !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
- page_folio(bio_page)->index, fio)))
+ !f2fs_crypt_mergeable_bio(io->bio, fio_inode(fio),
+ bio_folio->index, fio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
- f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
- page_folio(bio_page)->index, fio, GFP_NOIO);
+ f2fs_set_bio_crypt_ctx(io->bio, fio_inode(fio),
+ bio_folio->index, fio, GFP_NOIO);
io->fio = *fio;
}
- if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ if (!bio_add_folio(io->bio, bio_folio, folio_size(bio_folio), 0)) {
__submit_merged_bio(io);
goto alloc_new;
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
- PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->folio,
+ folio_size(fio->folio));
io->last_block_in_bio = fio->new_blkaddr;
- trace_f2fs_submit_page_write(fio->page, fio);
+ trace_f2fs_submit_folio_write(fio->folio, fio);
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
@@ -1054,8 +1050,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
REQ_OP_READ | op_flag,
for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
- if (!bio)
- return ERR_PTR(-ENOMEM);
bio->bi_iter.bi_sector = sector;
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
bio->bi_end_io = f2fs_read_end_io;
@@ -1089,7 +1083,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
}
/* This can handle encryption stuffs */
-static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
+static void f2fs_submit_page_read(struct inode *inode, struct folio *folio,
block_t blkaddr, blk_opf_t op_flags,
bool for_write)
{
@@ -1098,28 +1092,21 @@ static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
folio->index, for_write);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) {
- iostat_update_and_unbind_ctx(bio);
- if (bio->bi_private)
- mempool_free(bio->bi_private, bio_post_read_ctx_pool);
- bio_put(bio);
- return -EFAULT;
- }
+ if (!bio_add_folio(bio, folio, PAGE_SIZE, 0))
+ f2fs_bug_on(sbi, 1);
+
inc_page_count(sbi, F2FS_RD_DATA);
f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
f2fs_submit_read_bio(sbi, bio, DATA);
- return 0;
}
static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
+ __le32 *addr = get_dnode_addr(dn->inode, dn->node_folio);
dn->data_blkaddr = blkaddr;
addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
@@ -1128,14 +1115,14 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
/*
* Lock ordering for the change of data block address:
* ->data_page
- * ->node_page
+ * ->node_folio
* update block addresses in the node page
*/
void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
__set_data_blkaddr(dn, blkaddr);
- if (set_page_dirty(dn->node_page))
+ if (folio_mark_dirty(dn->node_folio))
dn->node_changed = true;
}
@@ -1163,7 +1150,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr = f2fs_data_blkaddr(dn);
@@ -1174,7 +1161,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
}
}
- if (set_page_dirty(dn->node_page))
+ if (folio_mark_dirty(dn->node_folio))
dn->node_changed = true;
return 0;
}
@@ -1192,7 +1179,7 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn)
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
- bool need_put = dn->inode_page ? false : true;
+ bool need_put = dn->inode_folio ? false : true;
int err;
err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
@@ -1206,18 +1193,17 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
return err;
}
-struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
- blk_opf_t op_flags, bool for_write,
- pgoff_t *next_pgofs)
+struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
+ blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
- struct page *page;
+ struct folio *folio;
int err;
- page = f2fs_grab_cache_page(mapping, index, for_write);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ folio = f2fs_grab_cache_folio(mapping, index, for_write);
+ if (IS_ERR(folio))
+ return folio;
if (f2fs_lookup_read_extent_cache_block(inode, index,
&dn.data_blkaddr)) {
@@ -1252,61 +1238,62 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
goto put_err;
}
got_it:
- if (PageUptodate(page)) {
- unlock_page(page);
- return page;
+ if (folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ return folio;
}
/*
* A new dentry page is allocated but not able to be written, since its
* new inode page couldn't be allocated due to -ENOSPC.
* In such the case, its blkaddr can be remained as NEW_ADDR.
- * see, f2fs_add_link -> f2fs_get_new_data_page ->
+ * see, f2fs_add_link -> f2fs_get_new_data_folio ->
* f2fs_init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- unlock_page(page);
- return page;
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ return folio;
}
- err = f2fs_submit_page_read(inode, page_folio(page), dn.data_blkaddr,
+ f2fs_submit_page_read(inode, folio, dn.data_blkaddr,
op_flags, for_write);
- if (err)
- goto put_err;
- return page;
+ return folio;
put_err:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
-struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
+struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
pgoff_t *next_pgofs)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
- page = find_get_page(mapping, index);
- if (page && PageUptodate(page))
- return page;
- f2fs_put_page(page, 0);
+ folio = f2fs_filemap_get_folio(mapping, index, FGP_ACCESSED, 0);
+ if (IS_ERR(folio))
+ goto read;
+ if (folio_test_uptodate(folio))
+ return folio;
+ f2fs_folio_put(folio, false);
- page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
- if (IS_ERR(page))
- return page;
+read:
+ folio = f2fs_get_read_data_folio(inode, index, 0, false, next_pgofs);
+ if (IS_ERR(folio))
+ return folio;
- if (PageUptodate(page))
- return page;
+ if (folio_test_uptodate(folio))
+ return folio;
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 0);
+ folio_wait_locked(folio);
+ if (unlikely(!folio_test_uptodate(folio))) {
+ f2fs_folio_put(folio, false);
return ERR_PTR(-EIO);
}
- return page;
+ return folio;
}
/*
@@ -1314,23 +1301,23 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
-struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
bool for_write)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
- page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
- if (IS_ERR(page))
- return page;
+ folio = f2fs_get_read_data_folio(inode, index, 0, for_write, NULL);
+ if (IS_ERR(folio))
+ return folio;
/* wait for read completion */
- lock_page(page);
- if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
- f2fs_put_page(page, 1);
+ folio_lock(folio);
+ if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) {
+ f2fs_folio_put(folio, true);
return ERR_PTR(-EIO);
}
- return page;
+ return folio;
}
/*
@@ -1339,57 +1326,57 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
*
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
- * Note that, ipage is set only by make_empty_dir, and if any error occur,
- * ipage should be released by this function.
+ * Note that, ifolio is set only by make_empty_dir, and if any error occur,
+ * ifolio should be released by this function.
*/
-struct page *f2fs_get_new_data_page(struct inode *inode,
- struct page *ipage, pgoff_t index, bool new_i_size)
+struct folio *f2fs_get_new_data_folio(struct inode *inode,
+ struct folio *ifolio, pgoff_t index, bool new_i_size)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
struct dnode_of_data dn;
int err;
- page = f2fs_grab_cache_page(mapping, index, true);
- if (!page) {
+ folio = f2fs_grab_cache_folio(mapping, index, true);
+ if (IS_ERR(folio)) {
/*
- * before exiting, we should make sure ipage will be released
+ * before exiting, we should make sure ifolio will be released
* if any error occur.
*/
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return ERR_PTR(-ENOMEM);
}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
+ set_new_dnode(&dn, inode, ifolio, NULL, 0);
err = f2fs_reserve_block(&dn, index);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
- if (!ipage)
+ if (!ifolio)
f2fs_put_dnode(&dn);
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
} else {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
- /* if ipage exists, blkaddr should be NEW_ADDR */
- f2fs_bug_on(F2FS_I_SB(inode), ipage);
- page = f2fs_get_lock_data_page(inode, index, true);
- if (IS_ERR(page))
- return page;
+ /* if ifolio exists, blkaddr should be NEW_ADDR */
+ f2fs_bug_on(F2FS_I_SB(inode), ifolio);
+ folio = f2fs_get_lock_data_folio(inode, index, true);
+ if (IS_ERR(folio))
+ return folio;
}
got_it:
if (new_i_size && i_size_read(inode) <
((loff_t)(index + 1) << PAGE_SHIFT))
f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
- return page;
+ return folio;
}
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
@@ -1423,7 +1410,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
return err;
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
- f2fs_invalidate_internal_cache(sbi, old_blkaddr);
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
return 0;
@@ -1431,6 +1418,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
{
+ f2fs_down_read(&sbi->cp_enable_rwsem);
if (flag == F2FS_GET_BLOCK_PRE_AIO)
f2fs_down_read(&sbi->node_change);
else
@@ -1443,6 +1431,7 @@ static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
f2fs_up_read(&sbi->node_change);
else
f2fs_unlock_op(sbi);
+ f2fs_up_read(&sbi->cp_enable_rwsem);
}
int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
@@ -1508,8 +1497,8 @@ static bool f2fs_map_blocks_cached(struct inode *inode,
struct f2fs_dev_info *dev = &sbi->devs[bidx];
map->m_bdev = dev->bdev;
- map->m_pblk -= dev->start_blk;
map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
+ map->m_pblk -= dev->start_blk;
} else {
map->m_bdev = inode->i_sb->s_bdev;
}
@@ -1554,10 +1543,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
unsigned int start_pgofs;
int bidx = 0;
bool is_hole;
+ bool lfs_dio_write;
if (!maxblocks)
return 0;
+ lfs_dio_write = (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
+ map->m_may_create);
+
if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
goto out;
@@ -1572,9 +1565,15 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
pgofs = (pgoff_t)map->m_lblk;
end = pgofs + maxblocks;
+ if (flag == F2FS_GET_BLOCK_PRECACHE)
+ mode = LOOKUP_NODE_RA;
+
next_dnode:
- if (map->m_may_create)
+ if (map->m_may_create) {
+ if (f2fs_lfs_mode(sbi))
+ f2fs_balance_fs(sbi, true);
f2fs_map_lock(sbi, flag);
+ }
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -1590,7 +1589,7 @@ next_dnode:
start_pgofs = pgofs;
prealloc = 0;
last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
next_block:
blkaddr = f2fs_data_blkaddr(&dn);
@@ -1604,7 +1603,7 @@ next_block:
/* use out-place-update for direct IO under LFS mode */
if (map->m_may_create && (is_hole ||
(flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
- !f2fs_is_pinned_file(inode)))) {
+ !f2fs_is_pinned_file(inode) && map->m_last_pblk != blkaddr))) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
goto sync_out;
@@ -1688,10 +1687,15 @@ next_block:
if (map->m_multidev_dio)
map->m_bdev = FDEV(bidx).bdev;
+
+ if (lfs_dio_write)
+ map->m_last_pblk = NULL_ADDR;
} else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) {
ofs++;
map->m_len++;
} else {
+ if (lfs_dio_write && !f2fs_is_pinned_file(inode))
+ map->m_last_pblk = blkaddr;
goto sync_out;
}
@@ -1716,14 +1720,6 @@ skip:
dn.ofs_in_node = end_offset;
}
- if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
- map->m_may_create) {
- /* the next block to be allocated may not be contiguous. */
- if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) ==
- CAP_BLKS_PER_SEC(sbi) - 1)
- goto sync_out;
- }
-
if (pgofs >= end)
goto sync_out;
else if (dn.ofs_in_node < end_offset)
@@ -1778,12 +1774,13 @@ sync_out:
if (map->m_flags & F2FS_MAP_MAPPED) {
unsigned int ofs = start_pgofs - map->m_lblk;
- f2fs_update_read_extent_cache_range(&dn,
- start_pgofs, map->m_pblk + ofs,
- map->m_len - ofs);
+ if (map->m_len > ofs)
+ f2fs_update_read_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
}
if (map->m_next_extent)
- *map->m_next_extent = pgofs + 1;
+ *map->m_next_extent = is_hole ? pgofs + 1 : pgofs;
}
f2fs_put_dnode(&dn);
unlock_out:
@@ -1826,7 +1823,6 @@ static int f2fs_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *page;
struct node_info ni;
__u64 phys = 0, len;
__u32 flags;
@@ -1835,15 +1831,15 @@ static int f2fs_xattr_fiemap(struct inode *inode,
if (f2fs_has_inline_xattr(inode)) {
int offset;
+ struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi),
+ inode->i_ino, false);
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
- inode->i_ino, false);
- if (!page)
- return -ENOMEM;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
@@ -1855,7 +1851,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
phys += offset;
len = inline_xattr_size(inode);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
@@ -1869,20 +1865,22 @@ static int f2fs_xattr_fiemap(struct inode *inode,
}
if (xnid) {
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
- if (!page)
- return -ENOMEM;
+ struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi),
+ xnid, false);
+
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
err = f2fs_get_node_info(sbi, xnid, &ni, false);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
len = inode->i_sb->s_blocksize;
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
flags = FIEMAP_EXTENT_LAST;
}
@@ -2078,7 +2076,7 @@ static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
- pgoff_t index = folio_index(folio);
+ pgoff_t index = folio->index;
int ret = 0;
block_in_file = (sector_t)index;
@@ -2144,16 +2142,10 @@ submit_and_realloc:
f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
- if (bio == NULL) {
+ if (bio == NULL)
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
f2fs_ra_op_flags(rac), index,
false);
- if (IS_ERR(bio)) {
- ret = PTR_ERR(bio);
- bio = NULL;
- goto out;
- }
- }
/*
* If the page is under writeback, we need to wait for
@@ -2191,6 +2183,12 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
int i;
int ret = 0;
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
+ from_dnode = false;
+ goto out_put_dnode;
+ }
+
f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
@@ -2234,17 +2232,13 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
if (ret)
goto out;
- if (unlikely(f2fs_cp_error(sbi))) {
- ret = -EIO;
- goto out_put_dnode;
- }
f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
skip_reading_dnode:
for (i = 1; i < cc->cluster_size; i++) {
block_t blkaddr;
- blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
+ blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i) :
ei.blk + i - 1;
@@ -2278,14 +2272,13 @@ skip_reading_dnode:
block_t blkaddr;
struct bio_post_read_ctx *ctx;
- blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
+ blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i + 1) :
ei.blk + i;
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (f2fs_load_compressed_page(sbi, folio_page(folio, 0),
- blkaddr)) {
+ if (f2fs_load_compressed_folio(sbi, folio, blkaddr)) {
if (atomic_dec_and_test(&dic->remaining_pages)) {
f2fs_decompress_cluster(dic, true);
break;
@@ -2301,18 +2294,10 @@ submit_and_realloc:
bio = NULL;
}
- if (!bio) {
- bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
+ if (!bio)
+ bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages - i,
f2fs_ra_op_flags(rac),
folio->index, for_write);
- if (IS_ERR(bio)) {
- ret = PTR_ERR(bio);
- f2fs_decompress_end_io(dic, ret, true);
- f2fs_put_dnode(&dn);
- *bio_ret = NULL;
- return ret;
- }
- }
if (!bio_add_folio(bio, folio, blocksize, 0))
goto submit_and_realloc;
@@ -2375,6 +2360,14 @@ static int f2fs_mpage_readpages(struct inode *inode,
unsigned max_nr_pages = nr_pages;
int ret = 0;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ index = rac ? readahead_index(rac) : folio->index;
+ max_nr_pages = round_up(index + nr_pages, cc.cluster_size) -
+ round_down(index, cc.cluster_size);
+ }
+#endif
+
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
@@ -2391,7 +2384,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
- index = folio_index(folio);
+ index = folio->index;
if (!f2fs_compressed_file(inode))
goto read_single_page;
@@ -2464,7 +2457,7 @@ next_page:
static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
- struct inode *inode = folio_file_mapping(folio)->host;
+ struct inode *inode = folio->mapping->host;
int ret = -EAGAIN;
trace_f2fs_readpage(folio, DATA);
@@ -2500,8 +2493,9 @@ static void f2fs_readahead(struct readahead_control *rac)
int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
{
- struct inode *inode = fio->page->mapping->host;
- struct page *mpage, *page;
+ struct inode *inode = fio_inode(fio);
+ struct folio *mfolio;
+ struct page *page;
gfp_t gfp_flags = GFP_NOFS;
if (!f2fs_encrypted_file(inode))
@@ -2513,7 +2507,7 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
return 0;
retry_encrypt:
- fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
+ fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page),
PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
@@ -2526,12 +2520,12 @@ retry_encrypt:
return PTR_ERR(fio->encrypted_page);
}
- mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
- if (mpage) {
- if (PageUptodate(mpage))
- memcpy(page_address(mpage),
+ mfolio = filemap_lock_folio(META_MAPPING(fio->sbi), fio->old_blkaddr);
+ if (!IS_ERR(mfolio)) {
+ if (folio_test_uptodate(mfolio))
+ memcpy(folio_address(mfolio),
page_address(fio->encrypted_page), PAGE_SIZE);
- f2fs_put_page(mpage, 1);
+ f2fs_folio_put(mfolio, true);
}
return 0;
}
@@ -2630,7 +2624,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
- struct inode *inode = fio->page->mapping->host;
+ struct inode *inode = fio_inode(fio);
if (f2fs_should_update_outplace(inode, fio))
return false;
@@ -2640,7 +2634,7 @@ static inline bool need_inplace_update(struct f2fs_io_info *fio)
int f2fs_do_write_data_page(struct f2fs_io_info *fio)
{
- struct folio *folio = page_folio(fio->page);
+ struct folio *folio = fio->folio;
struct inode *inode = folio->mapping->host;
struct dnode_of_data dn;
struct node_info ni;
@@ -2650,7 +2644,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* Use COW inode to make dnode_of_data for atomic write */
atomic_commit = f2fs_is_atomic_file(inode) &&
- page_private_atomic(folio_page(folio, 0));
+ folio_test_f2fs_atomic(folio);
if (atomic_commit)
set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
else
@@ -2681,7 +2675,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
folio_clear_uptodate(folio);
- clear_page_private_gcing(folio_page(folio, 0));
+ folio_clear_f2fs_gcing(folio);
goto out_writepage;
}
got_it:
@@ -2751,7 +2745,7 @@ got_it:
trace_f2fs_do_write_data_page(folio, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
if (atomic_commit)
- clear_page_private_atomic(folio_page(folio, 0));
+ folio_clear_f2fs_atomic(folio);
out_writepage:
f2fs_put_dnode(&dn);
out:
@@ -2769,7 +2763,6 @@ int f2fs_write_single_data_page(struct folio *folio, int *submitted,
bool allow_balance)
{
struct inode *inode = folio->mapping->host;
- struct page *page = folio_page(folio, 0);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long)i_size)
@@ -2786,7 +2779,7 @@ int f2fs_write_single_data_page(struct folio *folio, int *submitted,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
.old_blkaddr = NULL_ADDR,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
.submitted = 0,
.compr_blocks = compr_blocks,
@@ -2854,13 +2847,7 @@ write:
goto done;
}
- if (!wbc->for_reclaim)
- need_balance_fs = true;
- else if (has_not_enough_free_secs(sbi, 0, 0))
- goto redirty_out;
- else
- set_inode_flag(inode, FI_HOT_DATA);
-
+ need_balance_fs = true;
err = -EAGAIN;
if (f2fs_has_inline_data(inode)) {
err = f2fs_write_inline_data(inode, folio);
@@ -2894,14 +2881,7 @@ out:
inode_dec_dirty_pages(inode);
if (err) {
folio_clear_uptodate(folio);
- clear_page_private_gcing(page);
- }
-
- if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
- clear_inode_flag(inode, FI_HOT_DATA);
- f2fs_remove_dirty_inode(inode);
- submitted = NULL;
+ folio_clear_f2fs_gcing(folio);
}
folio_unlock(folio);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
@@ -2928,35 +2908,12 @@ redirty_out:
* file_write_and_wait_range() will see EIO error, which is critical
* to return value of fsync() followed by atomic_write failure to user.
*/
- if (!err || wbc->for_reclaim)
- return AOP_WRITEPAGE_ACTIVATE;
folio_unlock(folio);
+ if (!err)
+ return 1;
return err;
}
-static int f2fs_write_data_page(struct page *page,
- struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(page);
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- struct inode *inode = folio->mapping->host;
-
- if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
- goto out;
-
- if (f2fs_compressed_file(inode)) {
- if (f2fs_is_compressed_cluster(inode, folio->index)) {
- folio_redirty_for_writepage(wbc, folio);
- return AOP_WRITEPAGE_ACTIVATE;
- }
- }
-out:
-#endif
-
- return f2fs_write_single_data_page(folio, NULL, NULL, NULL,
- wbc, FS_DATA_IO, 0, true);
-}
-
/*
* This function was copied from write_cache_pages from mm/page-writeback.c.
* The major change is making write step of cold data page separately from
@@ -3029,10 +2986,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
}
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
+ tag = wbc_to_tag(wbc);
retry:
retry = 0;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
@@ -3150,7 +3104,7 @@ continue_unlock:
if (folio_test_writeback(folio)) {
if (wbc->sync_mode == WB_SYNC_NONE)
goto continue_unlock;
- f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
}
if (!folio_clear_dirty_for_io(folio))
@@ -3163,11 +3117,10 @@ continue_unlock:
continue;
}
#endif
+ submitted = 0;
ret = f2fs_write_single_data_page(folio,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);
- if (ret == AOP_WRITEPAGE_ACTIVATE)
- folio_unlock(folio);
#ifdef CONFIG_F2FS_FS_COMPRESSION
result:
#endif
@@ -3179,14 +3132,14 @@ result:
* keep nr_to_write, since vfs uses this to
* get # of written pages.
*/
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ if (ret == 1) {
ret = 0;
goto next;
} else if (ret == -EAGAIN) {
ret = 0;
if (wbc->sync_mode == WB_SYNC_ALL) {
- f2fs_io_schedule_timeout(
- DEFAULT_IO_TIMEOUT);
+ f2fs_schedule_timeout(
+ DEFAULT_SCHEDULE_TIMEOUT);
goto retry_write;
}
goto next;
@@ -3268,6 +3221,19 @@ static inline bool __should_serialize_io(struct inode *inode,
return false;
}
+static inline void account_writeback(struct inode *inode, bool inc)
+{
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return;
+
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
+ if (inc)
+ atomic_inc(&F2FS_I(inode)->writeback);
+ else
+ atomic_dec(&F2FS_I(inode)->writeback);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
+}
+
static int __f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc,
enum iostat_type io_type)
@@ -3278,10 +3244,6 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
int ret;
bool locked = false;
- /* deal with chardevs and other special file */
- if (!mapping->a_ops->writepage)
- return 0;
-
/* skip writing if there is no dirty page in this inode */
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
return 0;
@@ -3317,10 +3279,14 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
locked = true;
}
+ account_writeback(inode, true);
+
blk_start_plug(&plug);
ret = f2fs_write_cache_pages(mapping, wbc, io_type);
blk_finish_plug(&plug);
+ account_writeback(inode, false);
+
if (locked)
mutex_unlock(&sbi->writepages);
@@ -3377,7 +3343,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
struct inode *inode = folio->mapping->host;
pgoff_t index = folio->index;
struct dnode_of_data dn;
- struct page *ipage;
+ struct folio *ifolio;
bool locked = false;
int flag = F2FS_GET_BLOCK_PRE_AIO;
int err = 0;
@@ -3402,23 +3368,23 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
restart:
/* check inline_data */
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto unlock_out;
}
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA(inode)) {
- f2fs_do_read_inline_data(folio, ipage);
+ f2fs_do_read_inline_data(folio, ifolio);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
- set_page_private_inline(ipage);
+ folio_set_f2fs_inline(ifolio);
goto out;
}
- err = f2fs_convert_inline_page(&dn, folio_page(folio, 0));
+ err = f2fs_convert_inline_folio(&dn, folio);
if (err || dn.data_blkaddr != NULL_ADDR)
goto out;
}
@@ -3462,14 +3428,14 @@ static int __find_data_block(struct inode *inode, pgoff_t index,
block_t *blk_addr)
{
struct dnode_of_data dn;
- struct page *ipage;
+ struct folio *ifolio;
int err = 0;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (!f2fs_lookup_read_extent_cache_block(inode, index,
&dn.data_blkaddr)) {
@@ -3490,17 +3456,17 @@ static int __reserve_data_block(struct inode *inode, pgoff_t index,
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- struct page *ipage;
+ struct folio *ifolio;
int err = 0;
f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto unlock_out;
}
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
&dn.data_blkaddr))
@@ -3558,8 +3524,10 @@ reserve_block:
return 0;
}
-static int f2fs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
+static int f2fs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop,
+ void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -3615,8 +3583,9 @@ repeat:
* Do not use FGP_STABLE to avoid deadlock.
* Will wait that below with our IO control.
*/
- folio = __filemap_get_folio(mapping, index,
- FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
+ folio = f2fs_filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_NOFS,
+ mapping_gfp_mask(mapping));
if (IS_ERR(folio)) {
err = PTR_ERR(folio);
goto fail;
@@ -3648,7 +3617,7 @@ repeat:
}
}
- f2fs_wait_on_page_writeback(&folio->page, DATA, false, true);
+ f2fs_folio_wait_writeback(folio, DATA, false, true);
if (len == folio_size(folio) || folio_test_uptodate(folio))
return 0;
@@ -3668,11 +3637,9 @@ repeat:
err = -EFSCORRUPTED;
goto put_folio;
}
- err = f2fs_submit_page_read(use_cow ?
+ f2fs_submit_page_read(use_cow ?
F2FS_I(inode)->cow_inode : inode,
folio, blkaddr, 0, true);
- if (err)
- goto put_folio;
folio_lock(folio);
if (unlikely(folio->mapping != mapping)) {
@@ -3688,14 +3655,13 @@ repeat:
return 0;
put_folio:
- folio_unlock(folio);
- folio_put(folio);
+ f2fs_folio_put(folio, true);
fail:
f2fs_write_failed(inode, pos + len);
return err;
}
-static int f2fs_write_end(struct file *file,
+static int f2fs_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
@@ -3735,7 +3701,7 @@ static int f2fs_write_end(struct file *file,
folio_mark_dirty(folio);
if (f2fs_is_atomic_file(inode))
- set_page_private_atomic(folio_page(folio, 0));
+ folio_set_f2fs_atomic(folio);
if (pos + copied > i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
@@ -3745,8 +3711,7 @@ static int f2fs_write_end(struct file *file,
pos + copied);
}
unlock_out:
- folio_unlock(folio);
- folio_put(folio);
+ f2fs_folio_put(folio, true);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
@@ -3770,7 +3735,7 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
f2fs_remove_dirty_inode(inode);
}
}
- clear_page_private_all(&folio->page);
+ folio_detach_private(folio);
}
bool f2fs_release_folio(struct folio *folio, gfp_t wait)
@@ -3779,7 +3744,7 @@ bool f2fs_release_folio(struct folio *folio, gfp_t wait)
if (folio_test_dirty(folio))
return false;
- clear_page_private_all(&folio->page);
+ folio_detach_private(folio);
return true;
}
@@ -3903,18 +3868,18 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
set_inode_flag(inode, FI_SKIP_WRITES);
for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
- struct page *page;
+ struct folio *folio;
unsigned int blkidx = secidx * blk_per_sec + blkofs;
- page = f2fs_get_lock_data_page(inode, blkidx, true);
- if (IS_ERR(page)) {
+ folio = f2fs_get_lock_data_folio(inode, blkidx, true);
+ if (IS_ERR(folio)) {
f2fs_up_write(&sbi->pin_sem);
- ret = PTR_ERR(page);
+ ret = PTR_ERR(folio);
goto done;
}
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
clear_inode_flag(inode, FI_SKIP_WRITES);
@@ -3991,7 +3956,7 @@ retry:
if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
nr_pblocks % blks_per_sec ||
- !f2fs_valid_pinned_area(sbi, pblock)) {
+ f2fs_is_sequential_zone_area(sbi, pblock)) {
bool last_extent = false;
not_aligned++;
@@ -4043,7 +4008,6 @@ retry:
cur_lblock = 1; /* force Empty message */
sis->max = cur_lblock;
sis->pages = cur_lblock - 1;
- sis->highest_bit = cur_lblock - 1;
out:
if (not_aligned)
f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
@@ -4114,7 +4078,6 @@ static void f2fs_swap_deactivate(struct file *file)
const struct address_space_operations f2fs_dblock_aops = {
.read_folio = f2fs_read_data_folio,
.readahead = f2fs_readahead,
- .writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
.write_begin = f2fs_write_begin,
.write_end = f2fs_write_end,
@@ -4199,7 +4162,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned int flags, struct iomap *iomap,
struct iomap *srcmap)
{
- struct f2fs_map_blocks map = {};
+ struct f2fs_map_blocks map = { NULL, };
pgoff_t next_pgofs = 0;
int err;
@@ -4208,7 +4171,17 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
map.m_next_pgofs = &next_pgofs;
map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
inode->i_write_hint);
- if (flags & IOMAP_WRITE)
+ if (flags & IOMAP_WRITE && iomap->private) {
+ map.m_last_pblk = (unsigned long)iomap->private;
+ iomap->private = NULL;
+ }
+
+ /*
+ * If the blocks being overwritten are already allocated,
+ * f2fs_map_lock and f2fs_balance_fs are not necessary.
+ */
+ if ((flags & IOMAP_WRITE) &&
+ !f2fs_overwrite_io(inode, offset, length))
map.m_may_create = true;
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
@@ -4240,6 +4213,9 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->flags |= IOMAP_F_MERGED;
iomap->bdev = map.m_bdev;
iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk);
+
+ if (flags & IOMAP_WRITE && map.m_last_pblk)
+ iomap->private = (void *)map.m_last_pblk;
} else {
if (flags & IOMAP_WRITE)
return -ENOTBLK;
@@ -4259,7 +4235,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
if (map.m_flags & F2FS_MAP_NEW)
iomap->flags |= IOMAP_F_NEW;
- if ((inode->i_state & I_DIRTY_DATASYNC) ||
+ if ((inode_state_read_once(inode) & I_DIRTY_DATASYNC) ||
offset + length > i_size_read(inode))
iomap->flags |= IOMAP_F_DIRTY;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 468828288a4a..032683835569 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -21,7 +21,7 @@
#include "gc.h"
static LIST_HEAD(f2fs_stat_list);
-static DEFINE_RAW_SPINLOCK(f2fs_stat_lock);
+static DEFINE_SPINLOCK(f2fs_stat_lock);
#ifdef CONFIG_DEBUG_FS
static struct dentry *f2fs_debugfs_root;
#endif
@@ -91,7 +91,7 @@ static void update_multidevice_stats(struct f2fs_sb_info *sbi)
seg_blks = get_seg_entry(sbi, j)->valid_blocks;
/* update segment stats */
- if (IS_CURSEG(sbi, j))
+ if (is_curseg(sbi, j))
dev_stats[i].devstats[0][DEVSTAT_INUSE]++;
else if (seg_blks == BLKS_PER_SEG(sbi))
dev_stats[i].devstats[0][DEVSTAT_FULL]++;
@@ -109,7 +109,7 @@ static void update_multidevice_stats(struct f2fs_sb_info *sbi)
sec_blks = get_sec_entry(sbi, j)->valid_blocks;
/* update section stats */
- if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, j)))
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, j)))
dev_stats[i].devstats[1][DEVSTAT_INUSE]++;
else if (sec_blks == BLKS_PER_SEC(sbi))
dev_stats[i].devstats[1][DEVSTAT_FULL]++;
@@ -164,6 +164,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
+ si->ndonate_files = sbi->donate_files;
si->nquota_files = sbi->nquota_files;
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->aw_cnt = atomic_read(&sbi->atomic_files);
@@ -250,6 +251,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
+ si->blkoff[i] = curseg->next_blkoff;
si->curseg[i] = curseg->segno;
si->cursec[i] = GET_SEC_FROM_SEG(sbi, curseg->segno);
si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]);
@@ -438,9 +440,8 @@ static int stat_show(struct seq_file *s, void *v)
{
struct f2fs_stat_info *si;
int i = 0, j = 0;
- unsigned long flags;
- raw_spin_lock_irqsave(&f2fs_stat_lock, flags);
+ spin_lock(&f2fs_stat_lock);
list_for_each_entry(si, &f2fs_stat_list, stat_list) {
struct f2fs_sb_info *sbi = si->sbi;
@@ -501,60 +502,70 @@ static int stat_show(struct seq_file *s, void *v)
si->compr_inode, si->compr_blocks);
seq_printf(s, " - Swapfile Inode: %u\n",
si->swapfile_inode);
+ seq_printf(s, " - Donate Inode: %u\n",
+ si->ndonate_files);
seq_printf(s, " - Orphan/Append/Update Inode: %u, %u, %u\n",
si->orphans, si->append, si->update);
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
si->main_area_segs, si->main_area_sections,
si->main_area_zones);
- seq_printf(s, " TYPE %8s %8s %8s %10s %10s %10s\n",
- "segno", "secno", "zoneno", "dirty_seg", "full_seg", "valid_blk");
- seq_printf(s, " - COLD data: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " TYPE %8s %8s %8s %8s %10s %10s %10s\n",
+ "blkoff", "segno", "secno", "zoneno", "dirty_seg", "full_seg", "valid_blk");
+ seq_printf(s, " - COLD data: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_COLD_DATA],
si->curseg[CURSEG_COLD_DATA],
si->cursec[CURSEG_COLD_DATA],
si->curzone[CURSEG_COLD_DATA],
si->dirty_seg[CURSEG_COLD_DATA],
si->full_seg[CURSEG_COLD_DATA],
si->valid_blks[CURSEG_COLD_DATA]);
- seq_printf(s, " - WARM data: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - WARM data: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_WARM_DATA],
si->curseg[CURSEG_WARM_DATA],
si->cursec[CURSEG_WARM_DATA],
si->curzone[CURSEG_WARM_DATA],
si->dirty_seg[CURSEG_WARM_DATA],
si->full_seg[CURSEG_WARM_DATA],
si->valid_blks[CURSEG_WARM_DATA]);
- seq_printf(s, " - HOT data: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - HOT data: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_HOT_DATA],
si->curseg[CURSEG_HOT_DATA],
si->cursec[CURSEG_HOT_DATA],
si->curzone[CURSEG_HOT_DATA],
si->dirty_seg[CURSEG_HOT_DATA],
si->full_seg[CURSEG_HOT_DATA],
si->valid_blks[CURSEG_HOT_DATA]);
- seq_printf(s, " - Dir dnode: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - Dir dnode: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_HOT_NODE],
si->curseg[CURSEG_HOT_NODE],
si->cursec[CURSEG_HOT_NODE],
si->curzone[CURSEG_HOT_NODE],
si->dirty_seg[CURSEG_HOT_NODE],
si->full_seg[CURSEG_HOT_NODE],
si->valid_blks[CURSEG_HOT_NODE]);
- seq_printf(s, " - File dnode: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - File dnode: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_WARM_NODE],
si->curseg[CURSEG_WARM_NODE],
si->cursec[CURSEG_WARM_NODE],
si->curzone[CURSEG_WARM_NODE],
si->dirty_seg[CURSEG_WARM_NODE],
si->full_seg[CURSEG_WARM_NODE],
si->valid_blks[CURSEG_WARM_NODE]);
- seq_printf(s, " - Indir nodes: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - Indir nodes: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_COLD_NODE],
si->curseg[CURSEG_COLD_NODE],
si->cursec[CURSEG_COLD_NODE],
si->curzone[CURSEG_COLD_NODE],
si->dirty_seg[CURSEG_COLD_NODE],
si->full_seg[CURSEG_COLD_NODE],
si->valid_blks[CURSEG_COLD_NODE]);
- seq_printf(s, " - Pinned file: %8d %8d %8d\n",
+ seq_printf(s, " - Pinned file: %8d %8d %8d %8d\n",
+ si->blkoff[CURSEG_COLD_DATA_PINNED],
si->curseg[CURSEG_COLD_DATA_PINNED],
si->cursec[CURSEG_COLD_DATA_PINNED],
si->curzone[CURSEG_COLD_DATA_PINNED]);
- seq_printf(s, " - ATGC data: %8d %8d %8d\n",
+ seq_printf(s, " - ATGC data: %8d %8d %8d %8d\n",
+ si->blkoff[CURSEG_ALL_DATA_ATGC],
si->curseg[CURSEG_ALL_DATA_ATGC],
si->cursec[CURSEG_ALL_DATA_ATGC],
si->curzone[CURSEG_ALL_DATA_ATGC]);
@@ -750,7 +761,7 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - paged : %llu KB\n",
si->page_mem >> 10);
}
- raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags);
+ spin_unlock(&f2fs_stat_lock);
return 0;
}
@@ -762,7 +773,6 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_stat_info *si;
struct f2fs_dev_stats *dev_stats;
- unsigned long flags;
int i;
si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL);
@@ -814,9 +824,9 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->max_aw_cnt, 0);
- raw_spin_lock_irqsave(&f2fs_stat_lock, flags);
+ spin_lock(&f2fs_stat_lock);
list_add_tail(&si->stat_list, &f2fs_stat_list);
- raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags);
+ spin_unlock(&f2fs_stat_lock);
return 0;
}
@@ -824,11 +834,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
- unsigned long flags;
- raw_spin_lock_irqsave(&f2fs_stat_lock, flags);
+ spin_lock(&f2fs_stat_lock);
list_del(&si->stat_list);
- raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags);
+ spin_unlock(&f2fs_stat_lock);
kfree(si->dev_stats);
kfree(si);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 47a5c806cf16..48f4f98afb01 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -16,6 +16,21 @@
#include "xattr.h"
#include <trace/events/f2fs.h>
+static inline bool f2fs_should_fallback_to_linear(struct inode *dir)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+
+ switch (F2FS_OPTION(sbi).lookup_mode) {
+ case LOOKUP_PERF:
+ return false;
+ case LOOKUP_COMPAT:
+ return true;
+ case LOOKUP_AUTO:
+ return !sb_no_casefold_compat_fallback(sbi->sb);
+ }
+ return false;
+}
+
#if IS_ENABLED(CONFIG_UNICODE)
extern struct kmem_cache *f2fs_cf_name_slab;
#endif
@@ -173,17 +188,18 @@ static unsigned long dir_block_index(unsigned int level,
}
static struct f2fs_dir_entry *find_in_block(struct inode *dir,
- struct page *dentry_page,
+ struct folio *dentry_folio,
const struct f2fs_filename *fname,
- int *max_slots)
+ int *max_slots,
+ bool use_hash)
{
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr d;
- dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
make_dentry_ptr_block(dir, &d, dentry_blk);
- return f2fs_find_target_dentry(&d, fname, max_slots);
+ return f2fs_find_target_dentry(&d, fname, max_slots, use_hash);
}
static inline int f2fs_match_name(const struct inode *dir,
@@ -208,7 +224,8 @@ static inline int f2fs_match_name(const struct inode *dir,
}
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
- const struct f2fs_filename *fname, int *max_slots)
+ const struct f2fs_filename *fname, int *max_slots,
+ bool use_hash)
{
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
@@ -231,7 +248,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
continue;
}
- if (de->hash_code == fname->hash) {
+ if (!use_hash || de->hash_code == fname->hash) {
res = f2fs_match_name(d->inode, fname,
d->filename[bit_pos],
le16_to_cpu(de->name_len));
@@ -258,12 +275,12 @@ found:
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
const struct f2fs_filename *fname,
- struct page **res_page)
+ struct folio **res_folio,
+ bool use_hash)
{
int s = GET_DENTRY_SLOTS(fname->disk_name.len);
unsigned int nbucket, nblock;
- unsigned int bidx, end_block;
- struct page *dentry_page;
+ unsigned int bidx, end_block, bucket_no;
struct f2fs_dir_entry *de = NULL;
pgoff_t next_pgofs;
bool room = false;
@@ -272,62 +289,76 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
+ bucket_no = use_hash ? le32_to_cpu(fname->hash) % nbucket : 0;
+
+start_find_bucket:
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
- le32_to_cpu(fname->hash) % nbucket);
+ bucket_no);
end_block = bidx + nblock;
while (bidx < end_block) {
/* no need to allocate new dentry pages to all the indices */
- dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs);
- if (IS_ERR(dentry_page)) {
- if (PTR_ERR(dentry_page) == -ENOENT) {
+ struct folio *dentry_folio;
+ dentry_folio = f2fs_find_data_folio(dir, bidx, &next_pgofs);
+ if (IS_ERR(dentry_folio)) {
+ if (PTR_ERR(dentry_folio) == -ENOENT) {
room = true;
bidx = next_pgofs;
continue;
} else {
- *res_page = dentry_page;
+ *res_folio = dentry_folio;
break;
}
}
- de = find_in_block(dir, dentry_page, fname, &max_slots);
+ de = find_in_block(dir, dentry_folio, fname, &max_slots, use_hash);
if (IS_ERR(de)) {
- *res_page = ERR_CAST(de);
+ *res_folio = ERR_CAST(de);
de = NULL;
break;
} else if (de) {
- *res_page = dentry_page;
+ *res_folio = dentry_folio;
break;
}
if (max_slots >= s)
room = true;
- f2fs_put_page(dentry_page, 0);
+ f2fs_folio_put(dentry_folio, false);
bidx++;
}
- if (!de && room && F2FS_I(dir)->chash != fname->hash) {
- F2FS_I(dir)->chash = fname->hash;
- F2FS_I(dir)->clevel = level;
- }
+ if (de)
+ return de;
- return de;
+ if (likely(use_hash)) {
+ if (room && F2FS_I(dir)->chash != fname->hash) {
+ F2FS_I(dir)->chash = fname->hash;
+ F2FS_I(dir)->clevel = level;
+ }
+ } else if (++bucket_no < nbucket) {
+ goto start_find_bucket;
+ }
+ return NULL;
}
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
const struct f2fs_filename *fname,
- struct page **res_page)
+ struct folio **res_folio)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
+ bool use_hash = true;
- *res_page = NULL;
+ *res_folio = NULL;
+#if IS_ENABLED(CONFIG_UNICODE)
+start_find_entry:
+#endif
if (f2fs_has_inline_dentry(dir)) {
- de = f2fs_find_in_inline_dir(dir, fname, res_page);
+ de = f2fs_find_in_inline_dir(dir, fname, res_folio, use_hash);
goto out;
}
@@ -343,11 +374,19 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
}
for (level = 0; level < max_depth; level++) {
- de = find_in_level(dir, level, fname, res_page);
- if (de || IS_ERR(*res_page))
+ de = find_in_level(dir, level, fname, res_folio, use_hash);
+ if (de || IS_ERR(*res_folio))
break;
}
+
out:
+#if IS_ENABLED(CONFIG_UNICODE)
+ if (f2fs_should_fallback_to_linear(dir) &&
+ IS_CASEFOLDED(dir) && !de && use_hash) {
+ use_hash = false;
+ goto start_find_entry;
+ }
+#endif
/* This is to increase the speed of f2fs_create */
if (!de)
F2FS_I(dir)->task = current;
@@ -361,7 +400,7 @@ out:
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- const struct qstr *child, struct page **res_page)
+ const struct qstr *child, struct folio **res_folio)
{
struct f2fs_dir_entry *de = NULL;
struct f2fs_filename fname;
@@ -370,67 +409,67 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
err = f2fs_setup_filename(dir, child, 1, &fname);
if (err) {
if (err == -ENOENT)
- *res_page = NULL;
+ *res_folio = NULL;
else
- *res_page = ERR_PTR(err);
+ *res_folio = ERR_PTR(err);
return NULL;
}
- de = __f2fs_find_entry(dir, &fname, res_page);
+ de = __f2fs_find_entry(dir, &fname, res_folio);
f2fs_free_filename(&fname);
return de;
}
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f)
{
- return f2fs_find_entry(dir, &dotdot_name, p);
+ return f2fs_find_entry(dir, &dotdot_name, f);
}
ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
- struct page **page)
+ struct folio **folio)
{
ino_t res = 0;
struct f2fs_dir_entry *de;
- de = f2fs_find_entry(dir, qstr, page);
+ de = f2fs_find_entry(dir, qstr, folio);
if (de) {
res = le32_to_cpu(de->ino);
- f2fs_put_page(*page, 0);
+ f2fs_folio_put(*folio, false);
}
return res;
}
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
- struct page *page, struct inode *inode)
+ struct folio *folio, struct inode *inode)
{
enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
- lock_page(page);
- f2fs_wait_on_page_writeback(page, type, true, true);
+ folio_lock(folio);
+ f2fs_folio_wait_writeback(folio, type, true, true);
de->ino = cpu_to_le32(inode->i_ino);
de->file_type = fs_umode_to_ftype(inode->i_mode);
- set_page_dirty(page);
+ folio_mark_dirty(folio);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
static void init_dent_inode(struct inode *dir, struct inode *inode,
const struct f2fs_filename *fname,
- struct page *ipage)
+ struct folio *ifolio)
{
struct f2fs_inode *ri;
if (!fname) /* tmpfile case? */
return;
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
- /* copy name info. to this inode page */
- ri = F2FS_INODE(ipage);
+ /* copy name info. to this inode folio */
+ ri = F2FS_INODE(ifolio);
ri->i_namelen = cpu_to_le32(fname->disk_name.len);
memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len);
if (IS_ENCRYPTED(dir)) {
@@ -451,7 +490,7 @@ static void init_dent_inode(struct inode *dir, struct inode *inode,
file_lost_pino(inode);
}
}
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
}
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
@@ -468,72 +507,73 @@ void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
}
static int make_empty_dir(struct inode *inode,
- struct inode *parent, struct page *page)
+ struct inode *parent, struct folio *folio)
{
- struct page *dentry_page;
+ struct folio *dentry_folio;
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr d;
if (f2fs_has_inline_dentry(inode))
- return f2fs_make_empty_inline_dir(inode, parent, page);
+ return f2fs_make_empty_inline_dir(inode, parent, folio);
- dentry_page = f2fs_get_new_data_page(inode, page, 0, true);
- if (IS_ERR(dentry_page))
- return PTR_ERR(dentry_page);
+ dentry_folio = f2fs_get_new_data_folio(inode, folio, 0, true);
+ if (IS_ERR(dentry_folio))
+ return PTR_ERR(dentry_folio);
- dentry_blk = page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
make_dentry_ptr_block(NULL, &d, dentry_blk);
f2fs_do_make_empty_dir(inode, parent, &d);
- set_page_dirty(dentry_page);
- f2fs_put_page(dentry_page, 1);
+ folio_mark_dirty(dentry_folio);
+ f2fs_folio_put(dentry_folio, true);
return 0;
}
-struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct f2fs_filename *fname, struct page *dpage)
+struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
+ const struct f2fs_filename *fname, struct folio *dfolio)
{
- struct page *page;
+ struct folio *folio;
int err;
if (is_inode_flag_set(inode, FI_NEW_INODE)) {
- page = f2fs_new_inode_page(inode);
- if (IS_ERR(page))
- return page;
+ folio = f2fs_new_inode_folio(inode);
+ if (IS_ERR(folio))
+ return folio;
if (S_ISDIR(inode->i_mode)) {
/* in order to handle error case */
- get_page(page);
- err = make_empty_dir(inode, dir, page);
+ folio_get(folio);
+ err = make_empty_dir(inode, dir, folio);
if (err) {
- lock_page(page);
+ folio_lock(folio);
goto put_error;
}
- put_page(page);
+ folio_put(folio);
}
- err = f2fs_init_acl(inode, dir, page, dpage);
+ err = f2fs_init_acl(inode, dir, folio, dfolio);
if (err)
goto put_error;
err = f2fs_init_security(inode, dir,
- fname ? fname->usr_fname : NULL, page);
+ fname ? fname->usr_fname : NULL,
+ folio);
if (err)
goto put_error;
if (IS_ENCRYPTED(inode)) {
- err = fscrypt_set_context(inode, page);
+ err = fscrypt_set_context(inode, folio);
if (err)
goto put_error;
}
} else {
- page = f2fs_get_node_page(F2FS_I_SB(dir), inode->i_ino);
- if (IS_ERR(page))
- return page;
+ folio = f2fs_get_inode_folio(F2FS_I_SB(dir), inode->i_ino);
+ if (IS_ERR(folio))
+ return folio;
}
- init_dent_inode(dir, inode, fname, page);
+ init_dent_inode(dir, inode, fname, folio);
/*
* This file should be checkpointed during fsync.
@@ -550,12 +590,12 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
f2fs_remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
f2fs_i_links_write(inode, true);
}
- return page;
+ return folio;
put_error:
clear_nlink(inode);
- f2fs_update_inode(inode, page);
- f2fs_put_page(page, 1);
+ f2fs_update_inode(inode, folio);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
@@ -597,14 +637,14 @@ next:
goto next;
}
-bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio,
const struct f2fs_filename *fname)
{
struct f2fs_dentry_ptr d;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
- make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage));
+ make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ifolio));
bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
@@ -641,10 +681,10 @@ int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
unsigned int current_depth;
unsigned long bidx, block;
unsigned int nbucket, nblock;
- struct page *dentry_page = NULL;
+ struct folio *dentry_folio = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
- struct page *page = NULL;
+ struct folio *folio = NULL;
int slots, err = 0;
level = 0;
@@ -674,30 +714,30 @@ start:
(le32_to_cpu(fname->hash) % nbucket));
for (block = bidx; block <= (bidx + nblock - 1); block++) {
- dentry_page = f2fs_get_new_data_page(dir, NULL, block, true);
- if (IS_ERR(dentry_page))
- return PTR_ERR(dentry_page);
+ dentry_folio = f2fs_get_new_data_folio(dir, NULL, block, true);
+ if (IS_ERR(dentry_folio))
+ return PTR_ERR(dentry_folio);
- dentry_blk = page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
bit_pos = f2fs_room_for_filename(&dentry_blk->dentry_bitmap,
slots, NR_DENTRY_IN_BLOCK);
if (bit_pos < NR_DENTRY_IN_BLOCK)
goto add_dentry;
- f2fs_put_page(dentry_page, 1);
+ f2fs_folio_put(dentry_folio, true);
}
/* Move to next level to find the empty slot for new dentry */
++level;
goto start;
add_dentry:
- f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
+ f2fs_folio_wait_writeback(dentry_folio, DATA, true, true);
if (inode) {
f2fs_down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_init_inode_metadata(inode, dir, fname, NULL);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto fail;
}
}
@@ -706,16 +746,16 @@ add_dentry:
f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
bit_pos);
- set_page_dirty(dentry_page);
+ folio_mark_dirty(dentry_folio);
if (inode) {
f2fs_i_pino_write(inode, dir->i_ino);
/* synchronize inode page's data from inode cache */
if (is_inode_flag_set(inode, FI_NEW_INODE))
- f2fs_update_inode(inode, page);
+ f2fs_update_inode(inode, folio);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
f2fs_update_parent_metadata(dir, inode, current_depth);
@@ -723,7 +763,7 @@ fail:
if (inode)
f2fs_up_write(&F2FS_I(inode)->i_sem);
- f2fs_put_page(dentry_page, 1);
+ f2fs_folio_put(dentry_folio, true);
return err;
}
@@ -757,7 +797,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_filename fname;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct f2fs_dir_entry *de = NULL;
int err;
@@ -773,14 +813,14 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
* consistency more.
*/
if (current != F2FS_I(dir)->task) {
- de = __f2fs_find_entry(dir, &fname, &page);
+ de = __f2fs_find_entry(dir, &fname, &folio);
F2FS_I(dir)->task = NULL;
}
if (de) {
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
err = -EEXIST;
- } else if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ } else if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
} else {
err = f2fs_add_dentry(dir, &fname, inode, ino, mode);
}
@@ -791,16 +831,16 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
struct f2fs_filename *fname)
{
- struct page *page;
+ struct folio *folio;
int err = 0;
f2fs_down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_init_inode_metadata(inode, dir, fname, NULL);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto fail;
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
clear_inode_flag(inode, FI_NEW_INODE);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -836,13 +876,13 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
* It only removes the dentry from the dentry page, corresponding name
* entry in name page does not need to be touched during deletion.
*/
-void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio,
struct inode *dir, struct inode *inode)
{
- struct f2fs_dentry_block *dentry_blk;
+ struct f2fs_dentry_block *dentry_blk;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
- pgoff_t index = page_folio(page)->index;
+ pgoff_t index = folio->index;
int i;
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
@@ -851,12 +891,12 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
f2fs_add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
if (f2fs_has_inline_dentry(dir))
- return f2fs_delete_inline_entry(dentry, page, dir, inode);
+ return f2fs_delete_inline_entry(dentry, folio, dir, inode);
- lock_page(page);
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ folio_lock(folio);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
- dentry_blk = page_address(page);
+ dentry_blk = folio_address(folio);
bit_pos = dentry - dentry_blk->dentry;
for (i = 0; i < slots; i++)
__clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
@@ -865,19 +905,19 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
NR_DENTRY_IN_BLOCK,
0);
- set_page_dirty(page);
+ folio_mark_dirty(folio);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!f2fs_truncate_hole(dir, index, index + 1)) {
- f2fs_clear_page_cache_dirty_tag(page_folio(page));
- clear_page_dirty_for_io(page);
- ClearPageUptodate(page);
- clear_page_private_all(page);
+ f2fs_clear_page_cache_dirty_tag(folio);
+ folio_clear_dirty_for_io(folio);
+ folio_clear_uptodate(folio);
+ folio_detach_private(folio);
inode_dec_dirty_pages(dir);
f2fs_remove_dirty_inode(dir);
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
@@ -889,7 +929,6 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
bool f2fs_empty_dir(struct inode *dir)
{
unsigned long bidx = 0;
- struct page *dentry_page;
unsigned int bit_pos;
struct f2fs_dentry_block *dentry_blk;
unsigned long nblock = dir_blocks(dir);
@@ -899,10 +938,11 @@ bool f2fs_empty_dir(struct inode *dir)
while (bidx < nblock) {
pgoff_t next_pgofs;
+ struct folio *dentry_folio;
- dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs);
- if (IS_ERR(dentry_page)) {
- if (PTR_ERR(dentry_page) == -ENOENT) {
+ dentry_folio = f2fs_find_data_folio(dir, bidx, &next_pgofs);
+ if (IS_ERR(dentry_folio)) {
+ if (PTR_ERR(dentry_folio) == -ENOENT) {
bidx = next_pgofs;
continue;
} else {
@@ -910,7 +950,7 @@ bool f2fs_empty_dir(struct inode *dir)
}
}
- dentry_blk = page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
if (bidx == 0)
bit_pos = 2;
else
@@ -919,7 +959,7 @@ bool f2fs_empty_dir(struct inode *dir)
NR_DENTRY_IN_BLOCK,
bit_pos);
- f2fs_put_page(dentry_page, 0);
+ f2fs_folio_put(dentry_folio, false);
if (bit_pos < NR_DENTRY_IN_BLOCK)
return false;
@@ -1018,7 +1058,6 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
struct inode *inode = file_inode(file);
unsigned long npages = dir_blocks(inode);
struct f2fs_dentry_block *dentry_blk = NULL;
- struct page *dentry_page = NULL;
struct file_ra_state *ra = &file->f_ra;
loff_t start_pos = ctx->pos;
unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
@@ -1042,6 +1081,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
}
for (; n < npages; ctx->pos = n * NR_DENTRY_IN_BLOCK) {
+ struct folio *dentry_folio;
pgoff_t next_pgofs;
/* allow readdir() to be interrupted */
@@ -1056,9 +1096,9 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
page_cache_sync_readahead(inode->i_mapping, ra, file, n,
min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
- dentry_page = f2fs_find_data_page(inode, n, &next_pgofs);
- if (IS_ERR(dentry_page)) {
- err = PTR_ERR(dentry_page);
+ dentry_folio = f2fs_find_data_folio(inode, n, &next_pgofs);
+ if (IS_ERR(dentry_folio)) {
+ err = PTR_ERR(dentry_folio);
if (err == -ENOENT) {
err = 0;
n = next_pgofs;
@@ -1068,18 +1108,15 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
}
}
- dentry_blk = page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
make_dentry_ptr_block(inode, &d, dentry_blk);
err = f2fs_fill_dentries(ctx, &d,
n * NR_DENTRY_IN_BLOCK, &fstr);
- if (err) {
- f2fs_put_page(dentry_page, 0);
+ f2fs_folio_put(dentry_folio, false);
+ if (err)
break;
- }
-
- f2fs_put_page(dentry_page, 0);
n++;
}
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 347b3b647834..0ed84cc065a7 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -19,10 +19,10 @@
#include "node.h"
#include <trace/events/f2fs.h>
-bool sanity_check_extent_cache(struct inode *inode, struct page *ipage)
+bool sanity_check_extent_cache(struct inode *inode, struct folio *ifolio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
+ struct f2fs_extent *i_ext = &F2FS_INODE(ifolio)->i_ext;
struct extent_info ei;
int devi;
@@ -407,21 +407,21 @@ static void __drop_largest_extent(struct extent_tree *et,
}
}
-void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
+void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
- struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
+ struct f2fs_extent *i_ext = &F2FS_INODE(ifolio)->i_ext;
struct extent_tree *et;
struct extent_node *en;
- struct extent_info ei;
+ struct extent_info ei = {0};
if (!__may_extent_tree(inode, EX_READ)) {
/* drop largest read extent */
if (i_ext->len) {
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
i_ext->len = 0;
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
}
set_inode_flag(inode, FI_NO_EXTENT);
return;
@@ -604,7 +604,13 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
p = &(*p)->rb_right;
leftmost = false;
} else {
+ f2fs_err_ratelimited(sbi, "%s: corrupted extent, type: %d, "
+ "extent node in rb tree [%u, %u, %u], age [%llu, %llu], "
+ "extent node to insert [%u, %u, %u], age [%llu, %llu]",
+ __func__, et->type, en->ei.fofs, en->ei.blk, en->ei.len, en->ei.age,
+ en->ei.last_blocks, ei->fofs, ei->blk, ei->len, ei->age, ei->last_blocks);
f2fs_bug_on(sbi, 1);
+ return NULL;
}
}
@@ -664,6 +670,15 @@ static void __update_extent_tree_range(struct inode *inode,
if (!et)
return;
+ if (unlikely(len == 0)) {
+ f2fs_err_ratelimited(sbi, "%s: extent len is zero, type: %d, "
+ "extent [%u, %u, %u], age [%llu, %llu]",
+ __func__, type, tei->fofs, tei->blk, tei->len,
+ tei->age, tei->last_blocks);
+ f2fs_bug_on(sbi, 1);
+ return;
+ }
+
if (type == EX_READ)
trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
tei->blk, 0);
@@ -793,7 +808,7 @@ static void __update_extent_tree_range(struct inode *inode,
}
goto out_read_extent_cache;
update_age_extent_cache:
- if (!tei->last_blocks)
+ if (tei->last_blocks == F2FS_EXTENT_AGE_INVALID)
goto out_read_extent_cache;
__set_extent_info(&ei, fofs, len, 0, false,
@@ -897,7 +912,7 @@ static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
cur_age = cur_blocks - tei.last_blocks;
else
/* allocated_data_blocks overflow */
- cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
+ cur_age = (ULLONG_MAX - 1) - tei.last_blocks + cur_blocks;
if (tei.age)
ei->age = __calculate_block_age(sbi, cur_age, tei.age);
@@ -934,7 +949,7 @@ static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type typ
if (!__may_extent_tree(dn->inode, type))
return;
- ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+ ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio), dn->inode) +
dn->ofs_in_node;
ei.len = 1;
@@ -1099,6 +1114,7 @@ void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
struct extent_info ei = {
.fofs = fofs,
.len = len,
+ .last_blocks = F2FS_EXTENT_AGE_INVALID,
};
if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE))
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6f2cbf4c5740..20edbb99b814 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -24,7 +24,6 @@
#include <linux/quotaops.h>
#include <linux/part_stat.h>
#include <linux/rw_hint.h>
-#include <crypto/hash.h>
#include <linux/fscrypt.h>
#include <linux/fsverity.h>
@@ -63,16 +62,26 @@ enum {
FAULT_BLKADDR_VALIDITY,
FAULT_BLKADDR_CONSISTENCE,
FAULT_NO_SEGMENT,
+ FAULT_INCONSISTENT_FOOTER,
+ FAULT_TIMEOUT,
+ FAULT_VMALLOC,
FAULT_MAX,
};
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-#define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0))
+/* indicate which option to update */
+enum fault_option {
+ FAULT_RATE = 1, /* only update fault rate */
+ FAULT_TYPE = 2, /* only update fault type */
+ FAULT_ALL = 4, /* reset all fault injection options/stats */
+};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info {
atomic_t inject_ops;
int inject_rate;
unsigned int inject_type;
+ /* Used to account total count of injection for each type */
+ unsigned int inject_count[FAULT_MAX];
};
extern const char *f2fs_fault_name[FAULT_MAX];
@@ -87,39 +96,52 @@ extern const char *f2fs_fault_name[FAULT_MAX];
/*
* For mount options
*/
-#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000001
-#define F2FS_MOUNT_DISCARD 0x00000002
-#define F2FS_MOUNT_NOHEAP 0x00000004
-#define F2FS_MOUNT_XATTR_USER 0x00000008
-#define F2FS_MOUNT_POSIX_ACL 0x00000010
-#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000020
-#define F2FS_MOUNT_INLINE_XATTR 0x00000040
-#define F2FS_MOUNT_INLINE_DATA 0x00000080
-#define F2FS_MOUNT_INLINE_DENTRY 0x00000100
-#define F2FS_MOUNT_FLUSH_MERGE 0x00000200
-#define F2FS_MOUNT_NOBARRIER 0x00000400
-#define F2FS_MOUNT_FASTBOOT 0x00000800
-#define F2FS_MOUNT_READ_EXTENT_CACHE 0x00001000
-#define F2FS_MOUNT_DATA_FLUSH 0x00002000
-#define F2FS_MOUNT_FAULT_INJECTION 0x00004000
-#define F2FS_MOUNT_USRQUOTA 0x00008000
-#define F2FS_MOUNT_GRPQUOTA 0x00010000
-#define F2FS_MOUNT_PRJQUOTA 0x00020000
-#define F2FS_MOUNT_QUOTA 0x00040000
-#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00080000
-#define F2FS_MOUNT_RESERVE_ROOT 0x00100000
-#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x00200000
-#define F2FS_MOUNT_NORECOVERY 0x00400000
-#define F2FS_MOUNT_ATGC 0x00800000
-#define F2FS_MOUNT_MERGE_CHECKPOINT 0x01000000
-#define F2FS_MOUNT_GC_MERGE 0x02000000
-#define F2FS_MOUNT_COMPRESS_CACHE 0x04000000
-#define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000
+enum f2fs_mount_opt {
+ F2FS_MOUNT_DISABLE_ROLL_FORWARD,
+ F2FS_MOUNT_DISCARD,
+ F2FS_MOUNT_NOHEAP,
+ F2FS_MOUNT_XATTR_USER,
+ F2FS_MOUNT_POSIX_ACL,
+ F2FS_MOUNT_DISABLE_EXT_IDENTIFY,
+ F2FS_MOUNT_INLINE_XATTR,
+ F2FS_MOUNT_INLINE_DATA,
+ F2FS_MOUNT_INLINE_DENTRY,
+ F2FS_MOUNT_FLUSH_MERGE,
+ F2FS_MOUNT_NOBARRIER,
+ F2FS_MOUNT_FASTBOOT,
+ F2FS_MOUNT_READ_EXTENT_CACHE,
+ F2FS_MOUNT_DATA_FLUSH,
+ F2FS_MOUNT_FAULT_INJECTION,
+ F2FS_MOUNT_USRQUOTA,
+ F2FS_MOUNT_GRPQUOTA,
+ F2FS_MOUNT_PRJQUOTA,
+ F2FS_MOUNT_QUOTA,
+ F2FS_MOUNT_INLINE_XATTR_SIZE,
+ F2FS_MOUNT_RESERVE_ROOT,
+ F2FS_MOUNT_DISABLE_CHECKPOINT,
+ F2FS_MOUNT_NORECOVERY,
+ F2FS_MOUNT_ATGC,
+ F2FS_MOUNT_MERGE_CHECKPOINT,
+ F2FS_MOUNT_GC_MERGE,
+ F2FS_MOUNT_COMPRESS_CACHE,
+ F2FS_MOUNT_AGE_EXTENT_CACHE,
+ F2FS_MOUNT_NAT_BITS,
+ F2FS_MOUNT_INLINECRYPT,
+ /*
+ * Some f2fs environments expect to be able to pass the "lazytime" option
+ * string rather than using the MS_LAZYTIME flag, so this must remain.
+ */
+ F2FS_MOUNT_LAZYTIME,
+ F2FS_MOUNT_RESERVE_NODE,
+};
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
-#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
-#define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
-#define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
+#define clear_opt(sbi, option) \
+ (F2FS_OPTION(sbi).opt &= ~BIT(F2FS_MOUNT_##option))
+#define set_opt(sbi, option) \
+ (F2FS_OPTION(sbi).opt |= BIT(F2FS_MOUNT_##option))
+#define test_opt(sbi, option) \
+ (F2FS_OPTION(sbi).opt & BIT(F2FS_MOUNT_##option))
#define ver_after(a, b) (typecheck(unsigned long long, a) && \
typecheck(unsigned long long, b) && \
@@ -139,6 +161,18 @@ enum blkzone_allocation_policy {
BLKZONE_ALLOC_PRIOR_CONV, /* Prioritize writing to conventional zones */
};
+enum bggc_io_aware_policy {
+ AWARE_ALL_IO, /* skip background GC if there is any kind of pending IO */
+ AWARE_READ_IO, /* skip background GC if there is pending read IO */
+ AWARE_NONE, /* don't aware IO for background GC */
+};
+
+enum device_allocation_policy {
+ ALLOCATE_FORWARD_NOHINT,
+ ALLOCATE_FORWARD_WITHIN_HINT,
+ ALLOCATE_FORWARD_FROM_HINT,
+};
+
/*
* An implementation of an rwsem that is explicitly unfair to readers. This
* prevents priority inversion when a low-priority reader acquires the read lock
@@ -154,8 +188,9 @@ struct f2fs_rwsem {
};
struct f2fs_mount_info {
- unsigned int opt;
+ unsigned long long opt;
block_t root_reserved_blocks; /* root reserved blocks */
+ block_t root_reserved_nodes; /* root reserved nodes */
kuid_t s_resuid; /* reserved blocks for uid */
kgid_t s_resgid; /* reserved blocks for gid */
int active_logs; /* # of active logs */
@@ -196,6 +231,7 @@ struct f2fs_mount_info {
int compress_mode; /* compression mode */
unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
+ unsigned int lookup_mode;
};
#define F2FS_FEATURE_ENCRYPT 0x00000001
@@ -214,6 +250,7 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_COMPRESSION 0x00002000
#define F2FS_FEATURE_RO 0x00004000
#define F2FS_FEATURE_DEVICE_ALIAS 0x00008000
+#define F2FS_FEATURE_PACKED_SSA 0x00010000
#define __F2FS_HAS_FEATURE(raw_super, mask) \
((raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -250,14 +287,42 @@ enum {
#define DEF_CP_INTERVAL 60 /* 60 secs */
#define DEF_IDLE_INTERVAL 5 /* 5 secs */
#define DEF_DISABLE_INTERVAL 5 /* 5 secs */
+#define DEF_ENABLE_INTERVAL 5 /* 5 secs */
#define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
#define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
+enum cp_time {
+ CP_TIME_START, /* begin */
+ CP_TIME_LOCK, /* after cp_global_sem */
+ CP_TIME_OP_LOCK, /* after block_operation */
+ CP_TIME_FLUSH_META, /* after flush sit/nat */
+ CP_TIME_SYNC_META, /* after sync_meta_pages */
+ CP_TIME_SYNC_CP_META, /* after sync cp meta pages */
+ CP_TIME_WAIT_DIRTY_META,/* after wait on dirty meta */
+ CP_TIME_WAIT_CP_DATA, /* after wait on cp data */
+ CP_TIME_FLUSH_DEVICE, /* after flush device cache */
+ CP_TIME_WAIT_LAST_CP, /* after wait on last cp pack */
+ CP_TIME_END, /* after unblock_operation */
+ CP_TIME_MAX,
+};
+
+/* time cost stats of checkpoint */
+struct cp_stats {
+ ktime_t times[CP_TIME_MAX];
+};
+
struct cp_control {
int reason;
__u64 trim_start;
__u64 trim_end;
__u64 trim_minlen;
+ struct cp_stats stats;
+};
+
+enum f2fs_cp_phase {
+ CP_PHASE_START_BLOCK_OPS,
+ CP_PHASE_FINISH_BLOCK_OPS,
+ CP_PHASE_FINISH_CHECKPOINT,
};
/*
@@ -310,7 +375,7 @@ struct inode_entry {
struct fsync_node_entry {
struct list_head list; /* list head */
- struct page *page; /* warm node page pointer */
+ struct folio *folio; /* warm node folio pointer */
unsigned int seq_id; /* sequence id */
};
@@ -318,7 +383,10 @@ struct ckpt_req {
struct completion wait; /* completion for checkpoint done */
struct llist_node llnode; /* llist_node to be linked in wait queue */
int ret; /* return code of checkpoint */
- ktime_t queue_time; /* request queued time */
+ union {
+ ktime_t queue_time; /* request queued time */
+ ktime_t delta_time; /* time in queue */
+ };
};
struct ckpt_req_control {
@@ -334,6 +402,9 @@ struct ckpt_req_control {
unsigned int peak_time; /* peak wait time in msec until now */
};
+/* a time threshold that checkpoint was blocked for, unit: ms */
+#define CP_LONG_LATENCY_THRESHOLD 5000
+
/* for the bitmap indicate blocks to be discarded */
struct discard_entry {
struct list_head list; /* list head */
@@ -347,6 +418,8 @@ struct discard_entry {
#define DEFAULT_DISCARD_GRANULARITY 16
/* default maximum discard granularity of ordered discard, unit: block count */
#define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16
+/* default interval of periodical discard submission */
+#define DEFAULT_DISCARD_INTERVAL (msecs_to_jiffies(20))
/* max discard pend list number */
#define MAX_PLIST_NUM 512
@@ -370,7 +443,7 @@ struct discard_cmd {
struct rb_node rb_node; /* rb node located in rb-tree */
struct discard_info di; /* discard info */
struct list_head list; /* command list */
- struct completion wait; /* compleation */
+ struct completion wait; /* completion */
struct block_device *bdev; /* bdev */
unsigned short ref; /* reference count */
unsigned char state; /* state */
@@ -596,8 +669,11 @@ enum {
#define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */
-/* congestion wait timeout value, default: 20ms */
-#define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
+/* IO/non-IO congestion wait timeout value, default: 1ms */
+#define DEFAULT_SCHEDULE_TIMEOUT (msecs_to_jiffies(1))
+
+/* timeout value injected, default: 1000ms */
+#define DEFAULT_FAULT_TIMEOUT (msecs_to_jiffies(1000))
/* maximum retry quota flush count */
#define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
@@ -645,6 +721,12 @@ enum extent_type {
NR_EXTENT_CACHES,
};
+/*
+ * Reserved value to mark invalid age extents, hence valid block range
+ * from 0 to ULLONG_MAX-1
+ */
+#define F2FS_EXTENT_AGE_INVALID ULLONG_MAX
+
struct extent_info {
unsigned int fofs; /* start offset in a file */
unsigned int len; /* length of the extent */
@@ -713,6 +795,7 @@ struct f2fs_map_blocks {
block_t m_lblk;
unsigned int m_len;
unsigned int m_flags;
+ unsigned long m_last_pblk; /* last allocated block, only used for DIO in LFS mode */
pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
pgoff_t *m_next_extent; /* point to next possible extent */
int m_seg_type;
@@ -814,6 +897,7 @@ enum {
FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
FI_ATOMIC_REPLACE, /* indicate atomic replace */
FI_OPENED_FILE, /* indicate file has been opened */
+ FI_DONATE_FINISHED, /* indicate page donation of file has been finished */
FI_MAX, /* max flag, never be used */
};
@@ -831,6 +915,7 @@ struct f2fs_inode_info {
/* Use below internally in f2fs*/
unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
+ unsigned int ioprio_hint; /* hint for IO priority */
struct f2fs_rwsem i_sem; /* protect fi info */
atomic_t dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */
@@ -850,6 +935,12 @@ struct f2fs_inode_info {
#endif
struct list_head dirty_list; /* dirty list for dirs and files */
struct list_head gdirty_list; /* linked in global dirty list */
+
+ /* linked in global inode list for cache donation */
+ struct list_head gdonate_list;
+ pgoff_t donate_start, donate_end; /* inclusive */
+ atomic_t open_count; /* # of open files */
+
struct task_struct *atomic_write_task; /* store atomic write task */
struct extent_tree *extent_tree[NR_EXTENT_CACHES];
/* cached extent_tree entry */
@@ -876,9 +967,16 @@ struct f2fs_inode_info {
unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
unsigned char i_compress_flag; /* compress flag */
unsigned int i_cluster_size; /* cluster size */
+ atomic_t writeback; /* count # of writeback thread */
unsigned int atomic_write_cnt;
loff_t original_i_size; /* original i_size before atomic write */
+#ifdef CONFIG_FS_ENCRYPTION
+ struct fscrypt_inode_info *i_crypt_info; /* filesystem encryption info */
+#endif
+#ifdef CONFIG_FS_VERITY
+ struct fsverity_info *i_verity_info; /* filesystem verity info */
+#endif
};
static inline void get_read_extent_info(struct extent_info *ext,
@@ -981,11 +1079,11 @@ struct f2fs_nm_info {
*/
struct dnode_of_data {
struct inode *inode; /* vfs inode pointer */
- struct page *inode_page; /* its inode page, NULL is possible */
- struct page *node_page; /* cached direct node page */
+ struct folio *inode_folio; /* its inode folio, NULL is possible */
+ struct folio *node_folio; /* cached direct node folio */
nid_t nid; /* node id of the direct node block */
unsigned int ofs_in_node; /* data offset in the node page */
- bool inode_page_locked; /* inode page is locked or not */
+ bool inode_folio_locked; /* inode folio is locked or not */
bool node_changed; /* is node block changed */
char cur_level; /* level of hole node page */
char max_level; /* level of current page located */
@@ -993,12 +1091,12 @@ struct dnode_of_data {
};
static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
- struct page *ipage, struct page *npage, nid_t nid)
+ struct folio *ifolio, struct folio *nfolio, nid_t nid)
{
memset(dn, 0, sizeof(*dn));
dn->inode = inode;
- dn->inode_page = ipage;
- dn->node_page = npage;
+ dn->inode_folio = ifolio;
+ dn->node_folio = nfolio;
dn->nid = nid;
}
@@ -1097,8 +1195,8 @@ struct f2fs_sm_info {
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
-#define WB_DATA_TYPE(p, f) \
- (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
+#define WB_DATA_TYPE(folio, f) \
+ (f || f2fs_is_cp_guaranteed(folio) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
enum count_type {
F2FS_DIRTY_DENTS,
F2FS_DIRTY_DATA,
@@ -1214,7 +1312,10 @@ struct f2fs_io_info {
blk_opf_t op_flags; /* req_flag_bits */
block_t new_blkaddr; /* new block address to be written */
block_t old_blkaddr; /* old block address before Cow */
- struct page *page; /* page to be written */
+ union {
+ struct page *page; /* page to be written */
+ struct folio *folio;
+ };
struct page *encrypted_page; /* encrypted page */
struct page *compressed_page; /* compressed page */
struct list_head list; /* serialize IOs */
@@ -1260,7 +1361,7 @@ struct f2fs_bio_info {
struct f2fs_dev_info {
struct file *bdev_file;
struct block_device *bdev;
- char path[MAX_PATH_LEN];
+ char path[MAX_PATH_LEN + 1];
unsigned int total_segments;
block_t start_blk;
block_t end_blk;
@@ -1274,6 +1375,7 @@ enum inode_type {
DIR_INODE, /* for dirty dir inode */
FILE_INODE, /* for dirty regular/symlink inode */
DIRTY_META, /* for all dirtied inode metadata */
+ DONATE_INODE, /* for all inode to donate pages */
NR_INODE_TYPE,
};
@@ -1337,6 +1439,7 @@ enum {
DISCARD_TIME,
GC_TIME,
DISABLE_TIME,
+ ENABLE_TIME,
UMOUNT_DISCARD_TIMEOUT,
MAX_TIME,
};
@@ -1400,7 +1503,7 @@ enum {
enum {
MEMORY_MODE_NORMAL, /* memory mode for normal devices */
- MEMORY_MODE_LOW, /* memory mode for low memry devices */
+ MEMORY_MODE_LOW, /* memory mode for low memory devices */
};
enum errors_option {
@@ -1416,6 +1519,12 @@ enum {
TOTAL_CALL = FOREGROUND,
};
+enum f2fs_lookup_mode {
+ LOOKUP_PERF,
+ LOOKUP_COMPAT,
+ LOOKUP_AUTO,
+};
+
static inline int f2fs_test_bit(unsigned int nr, char *addr);
static inline void f2fs_set_bit(unsigned int nr, char *addr);
static inline void f2fs_clear_bit(unsigned int nr, char *addr);
@@ -1464,7 +1573,7 @@ enum compress_flag {
#define COMPRESS_DATA_RESERVED_SIZE 4
struct compress_data {
__le32 clen; /* compressed data size */
- __le32 chksum; /* compressed data chksum */
+ __le32 chksum; /* compressed data checksum */
__le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
u8 cdata[]; /* compressed data */
};
@@ -1509,6 +1618,7 @@ struct compress_io_ctx {
struct decompress_io_ctx {
u32 magic; /* magic number to indicate page is compressed */
struct inode *inode; /* inode the context belong to */
+ struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
pgoff_t cluster_idx; /* cluster index number */
unsigned int cluster_size; /* page count in cluster */
unsigned int log_cluster_size; /* log of cluster size */
@@ -1549,6 +1659,7 @@ struct decompress_io_ctx {
bool failed; /* IO error occurred before decompression? */
bool need_verity; /* need fs-verity verification after decompression? */
+ unsigned char compress_algorithm; /* backup algorithm type */
void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */
struct work_struct verity_work; /* work to verify the decompressed pages */
@@ -1571,6 +1682,7 @@ struct f2fs_sb_info {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
+ unsigned int unusable_blocks_per_sec; /* unusable blocks per section */
unsigned int max_open_zones; /* max open zone resources of the zoned device */
/* For adjust the priority writing position of data in zone UFS */
unsigned int blkzone_alloc_policy;
@@ -1603,6 +1715,8 @@ struct f2fs_sb_info {
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
long interval_time[MAX_TIME]; /* to store thresholds */
struct ckpt_req_control cprc_info; /* for checkpoint request control */
+ struct cp_stats cp_stats; /* for time stat of checkpoint */
+ struct f2fs_rwsem cp_enable_rwsem; /* block cache/dio write */
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
@@ -1629,6 +1743,9 @@ struct f2fs_sb_info {
unsigned int warm_data_age_threshold;
unsigned int last_age_weight;
+ /* control donate caches */
+ unsigned int donate_files;
+
/* basic filesystem units */
unsigned int log_sectors_per_block; /* log2 sectors per block */
unsigned int log_blocksize; /* log2 block size */
@@ -1638,7 +1755,6 @@ struct f2fs_sb_info {
unsigned int meta_ino_num; /* meta inode number*/
unsigned int log_blocks_per_seg; /* log2 blocks per segment */
unsigned int blocks_per_seg; /* blocks per segment */
- unsigned int unusable_blocks_per_sec; /* unusable blocks per section */
unsigned int segs_per_sec; /* segments per section */
unsigned int secs_per_zone; /* sections per zone */
unsigned int total_sections; /* total section count */
@@ -1660,6 +1776,7 @@ struct f2fs_sb_info {
unsigned int nquota_files; /* # of quota sysfile */
struct f2fs_rwsem quota_sem; /* blocking cp for flags */
+ struct task_struct *umount_lock_holder; /* s_umount lock holder */
/* # of pages, see count_type */
atomic_t nr_pages[NR_COUNT_TYPE];
@@ -1693,6 +1810,9 @@ struct f2fs_sb_info {
/* for skip statistic */
unsigned long long skipped_gc_rwsem; /* FG_GC only */
+ /* free sections reserved for pinned file */
+ unsigned int reserved_pin_section;
+
/* threshold for gc trials on pinned files */
unsigned short gc_pin_file_threshold;
struct f2fs_rwsem pin_sem;
@@ -1762,15 +1882,15 @@ struct f2fs_sb_info {
unsigned int dirty_device; /* for checkpoint data flush */
spinlock_t dev_lock; /* protect dirty_device */
bool aligned_blksize; /* all devices has the same logical blksize */
- unsigned int first_zoned_segno; /* first zoned segno */
+ unsigned int first_seq_zone_segno; /* first segno in sequential zone */
+ unsigned int bggc_io_aware; /* For adjust the BG_GC priority when pending IO */
+ unsigned int allocate_section_hint; /* the boundary position between devices */
+ unsigned int allocate_section_policy; /* determine the section writing priority */
/* For write statistics */
u64 sectors_written_start;
u64 kbytes_written;
- /* Reference to checksum algorithm driver via cryptoapi */
- struct crypto_shash *s_chksum_driver;
-
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_chksum_seed;
@@ -1786,9 +1906,6 @@ struct f2fs_sb_info {
spinlock_t error_lock; /* protect errors/stop_reason array */
bool error_dirty; /* errors of sb is dirty */
- struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
- unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
-
/* For reclaimed segs statistics per each GC mode */
unsigned int gc_segment_mode; /* GC state for reclaimed segments */
unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */
@@ -1804,6 +1921,9 @@ struct f2fs_sb_info {
u64 committed_atomic_block;
u64 revoked_atomic_block;
+ /* carve out reserved_blocks from total blocks */
+ bool carve_out;
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct kmem_cache *page_array_slab; /* page array entry */
unsigned int page_array_slab_size; /* default page array slab size */
@@ -1884,6 +2004,7 @@ static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
atomic_inc(&ffi->inject_ops);
if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
atomic_set(&ffi->inject_ops, 0);
+ ffi->inject_count[type]++;
f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
f2fs_fault_name[type], func, parent_func);
return true;
@@ -1945,42 +2066,20 @@ static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
/*
* Inline functions
*/
-static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
- const void *address, unsigned int length)
-{
- struct {
- struct shash_desc shash;
- char ctx[4];
- } desc;
- int err;
-
- BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
-
- desc.shash.tfm = sbi->s_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- err = crypto_shash_update(&desc.shash, address, length);
- BUG_ON(err);
-
- return *(u32 *)desc.ctx;
-}
-
-static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
- unsigned int length)
+static inline u32 __f2fs_crc32(u32 crc, const void *address,
+ unsigned int length)
{
- return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
+ return crc32(crc, address, length);
}
-static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
- void *buf, size_t buf_size)
+static inline u32 f2fs_crc32(const void *address, unsigned int length)
{
- return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
+ return __f2fs_crc32(F2FS_SUPER_MAGIC, address, length);
}
-static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
- const void *address, unsigned int length)
+static inline u32 f2fs_chksum(u32 crc, const void *address, unsigned int length)
{
- return __f2fs_crc32(sbi, crc, address, length);
+ return __f2fs_crc32(crc, address, length);
}
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
@@ -2003,9 +2102,9 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
return F2FS_I_SB(mapping->host);
}
-static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
+static inline struct f2fs_sb_info *F2FS_F_SB(const struct folio *folio)
{
- return F2FS_M_SB(page_file_mapping(page));
+ return F2FS_M_SB(folio->mapping);
}
static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
@@ -2016,7 +2115,7 @@ static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
pgoff_t index)
{
- pgoff_t idx_in_folio = index % (1 << folio_order(folio));
+ pgoff_t idx_in_folio = index % folio_nr_pages(folio);
return (struct f2fs_super_block *)
(page_address(folio_page(folio, idx_in_folio)) +
@@ -2028,14 +2127,14 @@ static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
return (struct f2fs_checkpoint *)(sbi->ckpt);
}
-static inline struct f2fs_node *F2FS_NODE(struct page *page)
+static inline struct f2fs_node *F2FS_NODE(const struct folio *folio)
{
- return (struct f2fs_node *)page_address(page);
+ return (struct f2fs_node *)folio_address(folio);
}
-static inline struct f2fs_inode *F2FS_INODE(struct page *page)
+static inline struct f2fs_inode *F2FS_INODE(const struct folio *folio)
{
- return &((struct f2fs_node *)page_address(page))->i;
+ return &((struct f2fs_node *)folio_address(folio))->i;
}
static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
@@ -2073,6 +2172,16 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
return sbi->node_inode->i_mapping;
}
+static inline bool is_meta_folio(struct folio *folio)
+{
+ return folio->mapping == META_MAPPING(F2FS_F_SB(folio));
+}
+
+static inline bool is_node_folio(struct folio *folio)
+{
+ return folio->mapping == NODE_MAPPING(F2FS_F_SB(folio));
+}
+
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
return test_bit(type, &sbi->s_flag);
@@ -2232,6 +2341,36 @@ static inline void f2fs_up_write(struct f2fs_rwsem *sem)
#endif
}
+static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
+{
+ unsigned long flags;
+ unsigned char *nat_bits;
+
+ /*
+ * In order to re-enable nat_bits we need to call fsck.f2fs by
+ * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
+ * so let's rely on regular fsck or unclean shutdown.
+ */
+
+ if (lock)
+ spin_lock_irqsave(&sbi->cp_lock, flags);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
+ nat_bits = NM_I(sbi)->nat_bits;
+ NM_I(sbi)->nat_bits = NULL;
+ if (lock)
+ spin_unlock_irqrestore(&sbi->cp_lock, flags);
+
+ kvfree(nat_bits);
+}
+
+static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
+ struct cp_control *cpc)
+{
+ bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+
+ return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
+}
+
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
f2fs_down_read(&sbi->cp_rwsem);
@@ -2296,13 +2435,11 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
return ofs == XATTR_NODE_OFFSET;
}
-static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
+static inline bool __allow_reserved_root(struct f2fs_sb_info *sbi,
struct inode *inode, bool cap)
{
if (!inode)
return true;
- if (!test_opt(sbi, RESERVE_ROOT))
- return false;
if (IS_NOQUOTA(inode))
return true;
if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
@@ -2323,7 +2460,7 @@ static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
avail_user_block_count = sbi->user_block_count -
sbi->current_reserved_blocks;
- if (!__allow_reserved_blocks(sbi, inode, cap))
+ if (test_opt(sbi, RESERVE_ROOT) && !__allow_reserved_root(sbi, inode, cap))
avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
@@ -2398,6 +2535,13 @@ release_quota:
}
#define PAGE_PRIVATE_GET_FUNC(name, flagname) \
+static inline bool folio_test_f2fs_##name(const struct folio *folio) \
+{ \
+ unsigned long priv = (unsigned long)folio->private; \
+ unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \
+ (1UL << PAGE_PRIVATE_##flagname); \
+ return (priv & v) == v; \
+} \
static inline bool page_private_##name(struct page *page) \
{ \
return PagePrivate(page) && \
@@ -2406,6 +2550,17 @@ static inline bool page_private_##name(struct page *page) \
}
#define PAGE_PRIVATE_SET_FUNC(name, flagname) \
+static inline void folio_set_f2fs_##name(struct folio *folio) \
+{ \
+ unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \
+ (1UL << PAGE_PRIVATE_##flagname); \
+ if (!folio->private) \
+ folio_attach_private(folio, (void *)v); \
+ else { \
+ v |= (unsigned long)folio->private; \
+ folio->private = (void *)v; \
+ } \
+} \
static inline void set_page_private_##name(struct page *page) \
{ \
if (!PagePrivate(page)) \
@@ -2415,6 +2570,16 @@ static inline void set_page_private_##name(struct page *page) \
}
#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
+static inline void folio_clear_f2fs_##name(struct folio *folio) \
+{ \
+ unsigned long v = (unsigned long)folio->private; \
+ \
+ v &= ~(1UL << PAGE_PRIVATE_##flagname); \
+ if (v == (1UL << PAGE_PRIVATE_NOT_POINTER)) \
+ folio_detach_private(folio); \
+ else \
+ folio->private = (void *)v; \
+} \
static inline void clear_page_private_##name(struct page *page) \
{ \
clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
@@ -2437,39 +2602,23 @@ PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
-static inline unsigned long get_page_private_data(struct page *page)
+static inline unsigned long folio_get_f2fs_data(struct folio *folio)
{
- unsigned long data = page_private(page);
+ unsigned long data = (unsigned long)folio->private;
if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
return 0;
return data >> PAGE_PRIVATE_MAX;
}
-static inline void set_page_private_data(struct page *page, unsigned long data)
-{
- if (!PagePrivate(page))
- attach_page_private(page, (void *)0);
- set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
- page_private(page) |= data << PAGE_PRIVATE_MAX;
-}
-
-static inline void clear_page_private_data(struct page *page)
-{
- page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
- if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER))
- detach_page_private(page);
-}
-
-static inline void clear_page_private_all(struct page *page)
+static inline void folio_set_f2fs_data(struct folio *folio, unsigned long data)
{
- clear_page_private_data(page);
- clear_page_private_reference(page);
- clear_page_private_gcing(page);
- clear_page_private_inline(page);
- clear_page_private_atomic(page);
+ data = (1UL << PAGE_PRIVATE_NOT_POINTER) | (data << PAGE_PRIVATE_MAX);
- f2fs_bug_on(F2FS_P_SB(page), page_private(page));
+ if (!folio_test_private(folio))
+ folio_attach_private(folio, (void *)data);
+ else
+ folio->private = (void *)((unsigned long)folio->private | data);
}
static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
@@ -2479,8 +2628,14 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
- sbi->total_valid_block_count -= (block_t)count;
+ if (unlikely(sbi->total_valid_block_count < count)) {
+ f2fs_warn(sbi, "Inconsistent total_valid_block_count:%u, ino:%lu, count:%u",
+ sbi->total_valid_block_count, inode->i_ino, count);
+ sbi->total_valid_block_count = 0;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ } else {
+ sbi->total_valid_block_count -= count;
+ }
if (sbi->reserved_blocks &&
sbi->current_reserved_blocks < sbi->reserved_blocks)
sbi->current_reserved_blocks = min(sbi->reserved_blocks,
@@ -2663,7 +2818,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
struct inode *inode, bool is_inode)
{
block_t valid_block_count;
- unsigned int valid_node_count;
+ unsigned int valid_node_count, avail_user_node_count;
unsigned int avail_user_block_count;
int err;
@@ -2685,15 +2840,20 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock);
valid_block_count = sbi->total_valid_block_count + 1;
- avail_user_block_count = get_available_block_count(sbi, inode, false);
+ avail_user_block_count = get_available_block_count(sbi, inode,
+ test_opt(sbi, RESERVE_NODE));
if (unlikely(valid_block_count > avail_user_block_count)) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
+ avail_user_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
+ if (test_opt(sbi, RESERVE_NODE) &&
+ !__allow_reserved_root(sbi, inode, true))
+ avail_user_node_count -= F2FS_OPTION(sbi).root_reserved_nodes;
valid_node_count = sbi->total_valid_node_count + 1;
- if (unlikely(valid_node_count > sbi->total_node_count)) {
+ if (unlikely(valid_node_count > avail_user_node_count)) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
@@ -2778,65 +2938,75 @@ static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
}
-static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
- pgoff_t index, bool for_write)
+static inline struct folio *f2fs_grab_cache_folio(struct address_space *mapping,
+ pgoff_t index, bool for_write)
{
- struct page *page;
+ struct folio *folio;
unsigned int flags;
if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+ fgf_t fgf_flags;
+
if (!for_write)
- page = find_get_page_flags(mapping, index,
- FGP_LOCK | FGP_ACCESSED);
+ fgf_flags = FGP_LOCK | FGP_ACCESSED;
else
- page = find_lock_page(mapping, index);
- if (page)
- return page;
+ fgf_flags = FGP_LOCK;
+ folio = __filemap_get_folio(mapping, index, fgf_flags, 0);
+ if (!IS_ERR(folio))
+ return folio;
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
if (!for_write)
- return grab_cache_page(mapping, index);
+ return filemap_grab_folio(mapping, index);
flags = memalloc_nofs_save();
- page = grab_cache_page_write_begin(mapping, index);
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
memalloc_nofs_restore(flags);
- return page;
+ return folio;
}
-static inline struct page *f2fs_pagecache_get_page(
+static inline struct folio *f2fs_filemap_get_folio(
struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp_mask)
{
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
- return NULL;
+ return ERR_PTR(-ENOMEM);
- return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
+ return __filemap_get_folio(mapping, index, fgp_flags, gfp_mask);
}
-static inline void f2fs_put_page(struct page *page, int unlock)
+static inline void f2fs_folio_put(struct folio *folio, bool unlock)
{
- if (!page)
+ if (IS_ERR_OR_NULL(folio))
return;
if (unlock) {
- f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
- unlock_page(page);
+ f2fs_bug_on(F2FS_F_SB(folio), !folio_test_locked(folio));
+ folio_unlock(folio);
}
- put_page(page);
+ folio_put(folio);
+}
+
+static inline void f2fs_put_page(struct page *page, bool unlock)
+{
+ if (!page)
+ return;
+ f2fs_folio_put(page_folio(page), unlock);
}
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
{
- if (dn->node_page)
- f2fs_put_page(dn->node_page, 1);
- if (dn->inode_page && dn->node_page != dn->inode_page)
- f2fs_put_page(dn->inode_page, 0);
- dn->node_page = NULL;
- dn->inode_page = NULL;
+ if (dn->node_folio)
+ f2fs_folio_put(dn->node_folio, true);
+ if (dn->inode_folio && dn->node_folio != dn->inode_folio)
+ f2fs_folio_put(dn->inode_folio, false);
+ dn->node_folio = NULL;
+ dn->inode_folio = NULL;
}
static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
@@ -2900,13 +3070,10 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
if (sbi->gc_mode == GC_URGENT_HIGH)
return true;
- if (zoned_gc) {
- if (is_inflight_read_io(sbi))
- return false;
- } else {
- if (is_inflight_io(sbi, type))
- return false;
- }
+ if (sbi->bggc_io_aware == AWARE_READ_IO && is_inflight_read_io(sbi))
+ return false;
+ if (sbi->bggc_io_aware == AWARE_ALL_IO && is_inflight_io(sbi, type))
+ return false;
if (sbi->gc_mode == GC_URGENT_MID)
return true;
@@ -2930,9 +3097,9 @@ static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
-static inline bool IS_INODE(struct page *page)
+static inline bool IS_INODE(const struct folio *folio)
{
- struct f2fs_node *p = F2FS_NODE(page);
+ struct f2fs_node *p = F2FS_NODE(folio);
return RAW_IS_INODE(p);
}
@@ -2950,31 +3117,31 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
static inline int f2fs_has_extra_attr(struct inode *inode);
static inline unsigned int get_dnode_base(struct inode *inode,
- struct page *node_page)
+ struct folio *node_folio)
{
- if (!IS_INODE(node_page))
+ if (!IS_INODE(node_folio))
return 0;
return inode ? get_extra_isize(inode) :
- offset_in_addr(&F2FS_NODE(node_page)->i);
+ offset_in_addr(&F2FS_NODE(node_folio)->i);
}
static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page)
+ struct folio *node_folio)
{
- return blkaddr_in_node(F2FS_NODE(node_page)) +
- get_dnode_base(inode, node_page);
+ return blkaddr_in_node(F2FS_NODE(node_folio)) +
+ get_dnode_base(inode, node_folio);
}
static inline block_t data_blkaddr(struct inode *inode,
- struct page *node_page, unsigned int offset)
+ struct folio *node_folio, unsigned int offset)
{
- return le32_to_cpu(*(get_dnode_addr(inode, node_page) + offset));
+ return le32_to_cpu(*(get_dnode_addr(inode, node_folio) + offset));
}
static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
{
- return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
+ return data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node);
}
static inline int f2fs_test_bit(unsigned int nr, char *addr)
@@ -3285,9 +3452,10 @@ static inline unsigned int addrs_per_page(struct inode *inode,
return addrs;
}
-static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
+static inline
+void *inline_xattr_addr(struct inode *inode, const struct folio *folio)
{
- struct f2fs_inode *ri = F2FS_INODE(page);
+ struct f2fs_inode *ri = F2FS_INODE(folio);
return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
get_inline_xattr_addrs(inode)]);
@@ -3302,7 +3470,7 @@ static inline int inline_xattr_size(struct inode *inode)
/*
* Notice: check inline_data flag without inode page lock is unsafe.
- * It could change at any time by f2fs_convert_inline_page().
+ * It could change at any time by f2fs_convert_inline_folio().
*/
static inline int f2fs_has_inline_data(struct inode *inode)
{
@@ -3334,9 +3502,9 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
return is_inode_flag_set(inode, FI_COW_FILE);
}
-static inline void *inline_data_addr(struct inode *inode, struct page *page)
+static inline void *inline_data_addr(struct inode *inode, struct folio *folio)
{
- __le32 *addr = get_dnode_addr(inode, page);
+ __le32 *addr = get_dnode_addr(inode, folio);
return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
}
@@ -3462,6 +3630,14 @@ static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
}
+static inline void *f2fs_vmalloc(struct f2fs_sb_info *sbi, size_t size)
+{
+ if (time_to_inject(sbi, FAULT_VMALLOC))
+ return NULL;
+
+ return vmalloc(size);
+}
+
static inline int get_extra_isize(struct inode *inode)
{
return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
@@ -3526,9 +3702,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
bool readonly, bool need_lock);
int f2fs_precache_extents(struct inode *inode);
-int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
int f2fs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
@@ -3538,14 +3714,15 @@ int f2fs_pin_file_control(struct inode *inode, bool inc);
* inode.c
*/
void f2fs_set_inode_flags(struct inode *inode);
-bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
-void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
+bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio);
+void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct folio *folio);
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
-void f2fs_update_inode(struct inode *inode, struct page *node_page);
+void f2fs_update_inode(struct inode *inode, struct folio *node_folio);
void f2fs_update_inode_page(struct inode *inode);
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
+void f2fs_remove_donate_inode(struct inode *inode);
void f2fs_evict_inode(struct inode *inode);
void f2fs_handle_failed_inode(struct inode *inode);
@@ -3583,28 +3760,28 @@ int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct f2fs_filename *fname);
void f2fs_free_filename(struct f2fs_filename *fname);
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
- const struct f2fs_filename *fname, int *max_slots);
+ const struct f2fs_filename *fname, int *max_slots,
+ bool use_hash);
int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int start_pos, struct fscrypt_str *fstr);
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d);
-struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct f2fs_filename *fname, struct page *dpage);
+struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
+ const struct f2fs_filename *fname, struct folio *dfolio);
void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth);
int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
- const struct f2fs_filename *fname,
- struct page **res_page);
+ const struct f2fs_filename *fname, struct folio **res_folio);
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- const struct qstr *child, struct page **res_page);
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
+ const struct qstr *child, struct folio **res_folio);
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f);
ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
- struct page **page);
+ struct folio **folio);
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
- struct page *page, struct inode *inode);
-bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+ struct folio *folio, struct inode *inode);
+bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio,
const struct f2fs_filename *fname);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
const struct fscrypt_str *name, f2fs_hash_t name_hash,
@@ -3615,7 +3792,7 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode);
-void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio,
struct inode *dir, struct inode *inode);
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
struct f2fs_filename *fname);
@@ -3636,13 +3813,12 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
int f2fs_dquot_initialize(struct inode *inode);
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
-int f2fs_quota_sync(struct super_block *sb, int type);
+int f2fs_do_quota_sync(struct super_block *sb, int type);
loff_t max_file_blocks(struct inode *inode);
void f2fs_quota_off_umount(struct super_block *sb);
void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason);
void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
-void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
@@ -3656,12 +3832,13 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
* node.c
*/
struct node_info;
+enum node_type;
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio);
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
-void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio);
void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
@@ -3674,14 +3851,15 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
int f2fs_truncate_xattr_node(struct inode *inode);
int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
unsigned int seq_id);
-bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
int f2fs_remove_inode_page(struct inode *inode);
-struct page *f2fs_new_inode_page(struct inode *inode);
-struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
+struct folio *f2fs_new_inode_folio(struct inode *inode);
+struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs);
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
-struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
-struct page *f2fs_get_node_page_ra(struct page *parent, int start);
-int f2fs_move_node_page(struct page *node_page, int gc_type);
+struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
+ enum node_type node_type);
+struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino);
+struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid);
+int f2fs_move_node_folio(struct folio *node_folio, int gc_type);
void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
@@ -3694,12 +3872,11 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
-int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
-int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
-int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
+int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio);
+int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio);
+int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio);
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
-void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
@@ -3718,7 +3895,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+ unsigned int len);
bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
@@ -3744,7 +3922,7 @@ int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
-struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
+struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno);
void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
block_t blk_addr);
void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
@@ -3763,14 +3941,16 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
bool recover_newaddr);
enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi,
enum log_type seg_type);
-int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio,
block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio);
void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
block_t blkaddr, unsigned int blkcnt);
-void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type, bool ordered, bool locked);
+void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
+ bool ordered, bool locked);
+#define f2fs_wait_on_page_writeback(page, type, ordered, locked) \
+ f2fs_folio_wait_writeback(page_folio(page), type, ordered, locked)
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len);
@@ -3793,6 +3973,11 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
unsigned int segno);
+static inline struct inode *fio_inode(struct f2fs_io_info *fio)
+{
+ return fio->folio->mapping->host;
+}
+
#define DEF_FRAGMENT_SIZE 4
#define MIN_FRAGMENT_SIZE 1
#define MAX_FRAGMENT_SIZE 512
@@ -3809,10 +3994,10 @@ static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
unsigned char reason);
void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
-struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index);
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type);
bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
@@ -3857,7 +4042,7 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
*/
int __init f2fs_init_bioset(void);
void f2fs_destroy_bioset(void);
-bool f2fs_is_cp_guaranteed(struct page *page);
+bool f2fs_is_cp_guaranteed(const struct folio *folio);
int f2fs_init_bio_entry_cache(void);
void f2fs_destroy_bio_entry_cache(void);
void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
@@ -3865,10 +4050,10 @@ void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, struct page *page,
+ struct inode *inode, struct folio *folio,
nid_t ino, enum page_type type);
void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
- struct bio **bio, struct page *page);
+ struct bio **bio, struct folio *folio);
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
int f2fs_merge_page_bio(struct f2fs_io_info *fio);
@@ -3882,14 +4067,14 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
int f2fs_reserve_new_block(struct dnode_of_data *dn);
int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
-struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
- blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
-struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
- pgoff_t *next_pgofs);
-struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
+ blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
+struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
+ pgoff_t *next_pgofs);
+struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
bool for_write);
-struct page *f2fs_get_new_data_page(struct inode *inode,
- struct page *ipage, pgoff_t index, bool new_i_size);
+struct folio *f2fs_get_new_data_folio(struct inode *inode,
+ struct folio *ifolio, pgoff_t index, bool new_i_size);
int f2fs_do_write_data_page(struct f2fs_io_info *fio);
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -3977,7 +4162,8 @@ struct f2fs_stat_info {
unsigned long long allocated_data_blocks;
int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
int ndirty_data, ndirty_qdata;
- unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
+ unsigned int ndirty_dirs, ndirty_files, ndirty_all;
+ unsigned int nquota_files, ndonate_files;
int nats, dirty_nats, sits, dirty_sits;
int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
@@ -4008,6 +4194,7 @@ struct f2fs_stat_info {
int gc_secs[2][2];
int tot_blks, data_blks, node_blks;
int bg_data_blks, bg_node_blks;
+ int blkoff[NR_CURSEG_TYPE];
int curseg[NR_CURSEG_TYPE];
int cursec[NR_CURSEG_TYPE];
int curzone[NR_CURSEG_TYPE];
@@ -4206,27 +4393,26 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
* inline.c
*/
bool f2fs_may_inline_data(struct inode *inode);
-bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage);
+bool f2fs_sanity_check_inline_data(struct inode *inode, struct folio *ifolio);
bool f2fs_may_inline_dentry(struct inode *inode);
-void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage);
-void f2fs_truncate_inline_inode(struct inode *inode,
- struct page *ipage, u64 from);
+void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio);
+void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio,
+ u64 from);
int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
-int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
+int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
-int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
- const struct f2fs_filename *fname,
- struct page **res_page);
+ const struct f2fs_filename *fname, struct folio **res_folio,
+ bool use_hash);
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
- struct page *ipage);
+ struct folio *ifolio);
int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
- struct page *page, struct inode *dir,
- struct inode *inode);
+ struct folio *folio, struct inode *dir, struct inode *inode);
bool f2fs_empty_inline_dir(struct inode *dir);
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
struct fscrypt_str *fstr);
@@ -4241,13 +4427,15 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc);
unsigned long f2fs_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc);
+unsigned int f2fs_donate_files(void);
+void f2fs_reclaim_caches(unsigned int reclaim_caches_kb);
void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
-bool sanity_check_extent_cache(struct inode *inode, struct page *ipage);
+bool sanity_check_extent_cache(struct inode *inode, struct folio *ifolio);
void f2fs_init_extent_tree(struct inode *inode);
void f2fs_drop_extent_tree(struct inode *inode);
void f2fs_destroy_extent_node(struct inode *inode);
@@ -4257,7 +4445,7 @@ int __init f2fs_create_extent_cache(void);
void f2fs_destroy_extent_cache(void);
/* read extent cache ops */
-void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
+void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio);
bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
struct extent_info *ei);
bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
@@ -4337,20 +4525,20 @@ enum cluster_check_type {
CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
};
-bool f2fs_is_compressed_page(struct page *page);
-struct page *f2fs_compress_control_page(struct page *page);
+bool f2fs_is_compressed_page(struct folio *folio);
+struct folio *f2fs_compress_control_folio(struct folio *folio);
int f2fs_prepare_compress_overwrite(struct inode *inode,
struct page **pagep, pgoff_t index, void **fsdata);
bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
pgoff_t index, unsigned copied);
int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
-void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
+void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio);
bool f2fs_is_compress_backend_ready(struct inode *inode);
bool f2fs_is_compress_level_valid(int alg, int lvl);
int __init f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void);
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
-void f2fs_end_read_compressed_page(struct page *page, bool failed,
+void f2fs_end_read_compressed_page(struct folio *folio, bool failed,
block_t blkaddr, bool in_task);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
@@ -4373,7 +4561,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task);
-void f2fs_put_page_dic(struct page *page, bool in_task);
+void f2fs_put_folio_dic(struct folio *folio, bool in_task);
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
unsigned int ofs_in_node);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
@@ -4386,10 +4574,9 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
int __init f2fs_init_compress_cache(void);
void f2fs_destroy_compress_cache(void);
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
-void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
-void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
- nid_t ino, block_t blkaddr);
-bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len);
+bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
block_t blkaddr);
void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
#define inc_compr_inode_stat(inode) \
@@ -4405,7 +4592,7 @@ void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
sbi->compr_saved_block += diff; \
} while (0)
#else
-static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
+static inline bool f2fs_is_compressed_page(struct folio *folio) { return false; }
static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
{
if (!f2fs_compressed_file(inode))
@@ -4414,7 +4601,7 @@ static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
return false;
}
static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
-static inline struct page *f2fs_compress_control_page(struct page *page)
+static inline struct folio *f2fs_compress_control_folio(struct folio *folio)
{
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
@@ -4423,12 +4610,12 @@ static inline int __init f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
bool in_task) { }
-static inline void f2fs_end_read_compressed_page(struct page *page,
+static inline void f2fs_end_read_compressed_page(struct folio *folio,
bool failed, block_t blkaddr, bool in_task)
{
WARN_ON_ONCE(1);
}
-static inline void f2fs_put_page_dic(struct page *page, bool in_task)
+static inline void f2fs_put_folio_dic(struct folio *folio, bool in_task)
{
WARN_ON_ONCE(1);
}
@@ -4441,12 +4628,10 @@ static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
static inline int __init f2fs_init_compress_cache(void) { return 0; }
static inline void f2fs_destroy_compress_cache(void) { }
-static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
- block_t blkaddr) { }
-static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
- struct page *page, nid_t ino, block_t blkaddr) { }
-static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
- struct page *page, block_t blkaddr) { return false; }
+static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len) { }
+static inline bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi,
+ struct folio *folio, block_t blkaddr) { return false; }
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
nid_t ino) { }
#define inc_compr_inode_stat(inode) do { } while (0)
@@ -4498,7 +4683,7 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
f2fs_up_write(&fi->i_sem);
return true;
}
- if (f2fs_is_mmap_file(inode) ||
+ if (f2fs_is_mmap_file(inode) || atomic_read(&fi->writeback) ||
(S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
f2fs_up_write(&fi->i_sem);
return false;
@@ -4534,14 +4719,19 @@ F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
F2FS_FEATURE_FUNCS(compression, COMPRESSION);
F2FS_FEATURE_FUNCS(readonly, RO);
F2FS_FEATURE_FUNCS(device_alias, DEVICE_ALIAS);
+F2FS_FEATURE_FUNCS(packed_ssa, PACKED_SSA);
#ifdef CONFIG_BLK_DEV_ZONED
-static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
- block_t blkaddr)
+static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi,
+ unsigned int zone)
{
- unsigned int zno = blkaddr / sbi->blocks_per_blkz;
+ return test_bit(zone, FDEV(devi).blkz_seq);
+}
- return test_bit(zno, FDEV(devi).blkz_seq);
+static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
+ block_t blkaddr)
+{
+ return f2fs_zone_is_seq(sbi, devi, blkaddr / sbi->blocks_per_blkz);
}
#endif
@@ -4584,6 +4774,18 @@ static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
return false;
}
+static inline unsigned int f2fs_hw_discard_granularity(struct f2fs_sb_info *sbi)
+{
+ int i = 1;
+ unsigned int discard_granularity = bdev_discard_granularity(sbi->sb->s_bdev);
+
+ if (f2fs_is_multi_device(sbi))
+ for (; i < sbi->s_ndevs && !bdev_is_zoned(FDEV(i).bdev); i++)
+ discard_granularity = max_t(unsigned int, discard_granularity,
+ bdev_discard_granularity(FDEV(i).bdev));
+ return discard_granularity;
+}
+
static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
{
return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
@@ -4613,15 +4815,31 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
}
-static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi,
+static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi,
block_t blkaddr)
{
if (f2fs_sb_has_blkzoned(sbi)) {
+#ifdef CONFIG_BLK_DEV_ZONED
int devi = f2fs_target_device_index(sbi, blkaddr);
- return !bdev_is_zoned(FDEV(devi).bdev);
+ if (!bdev_is_zoned(FDEV(devi).bdev))
+ return false;
+
+ if (f2fs_is_multi_device(sbi)) {
+ if (blkaddr < FDEV(devi).start_blk ||
+ blkaddr > FDEV(devi).end_blk) {
+ f2fs_err(sbi, "Invalid block %x", blkaddr);
+ return false;
+ }
+ blkaddr -= FDEV(devi).start_blk;
+ }
+
+ return f2fs_blkz_is_seq(sbi, devi, blkaddr);
+#else
+ return false;
+#endif
}
- return true;
+ return false;
}
static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
@@ -4676,10 +4894,11 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
#ifdef CONFIG_F2FS_FAULT_INJECTION
extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
- unsigned long type);
+ unsigned long type, enum fault_option fo);
#else
static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
- unsigned long rate, unsigned long type)
+ unsigned long rate, unsigned long type,
+ enum fault_option fo)
{
return 0;
}
@@ -4703,10 +4922,31 @@ static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
}
-static inline void f2fs_io_schedule_timeout(long timeout)
+static inline void __f2fs_schedule_timeout(long timeout, bool io)
{
set_current_state(TASK_UNINTERRUPTIBLE);
- io_schedule_timeout(timeout);
+ if (io)
+ io_schedule_timeout(timeout);
+ else
+ schedule_timeout(timeout);
+}
+
+#define f2fs_io_schedule_timeout(timeout) \
+ __f2fs_schedule_timeout(timeout, true)
+#define f2fs_schedule_timeout(timeout) \
+ __f2fs_schedule_timeout(timeout, false)
+
+static inline void f2fs_io_schedule_timeout_killable(long timeout)
+{
+ while (timeout) {
+ if (fatal_signal_pending(current))
+ return;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
+ if (timeout <= DEFAULT_SCHEDULE_TIMEOUT)
+ return;
+ timeout -= DEFAULT_SCHEDULE_TIMEOUT;
+ }
}
static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi,
@@ -4738,13 +4978,13 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
int i = 0;
do {
- struct page *page;
+ struct folio *folio;
- page = find_get_page(META_MAPPING(sbi), blkaddr + i);
- if (page) {
- if (folio_test_writeback(page_folio(page)))
+ folio = filemap_get_folio(META_MAPPING(sbi), blkaddr + i);
+ if (!IS_ERR(folio)) {
+ if (folio_test_writeback(folio))
need_submit = true;
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
}
} while (++i < cnt && !need_submit);
@@ -4758,10 +4998,10 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
}
static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
- block_t blkaddr)
+ block_t blkaddr, unsigned int len)
{
- f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
- f2fs_invalidate_compress_page(sbi, blkaddr);
+ f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
+ f2fs_invalidate_compress_pages_range(sbi, blkaddr, len);
}
#define EFSBADCRC EBADMSG /* Bad CRC detected */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index aa9679b3d8e4..d7047ca6b98d 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -35,6 +35,25 @@
#include <trace/events/f2fs.h>
#include <uapi/linux/f2fs.h>
+static void f2fs_zero_post_eof_page(struct inode *inode,
+ loff_t new_size, bool lock)
+{
+ loff_t old_size = i_size_read(inode);
+
+ if (old_size >= new_size)
+ return;
+
+ if (mapping_empty(inode->i_mapping))
+ return;
+
+ if (lock)
+ filemap_invalidate_lock(inode->i_mapping);
+ /* zero or drop pages only in range of [old_size, new_size] */
+ truncate_inode_pages_range(inode->i_mapping, old_size, new_size);
+ if (lock)
+ filemap_invalidate_unlock(inode->i_mapping);
+}
+
static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
@@ -103,8 +122,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
+ f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT, true);
+
file_update_time(vmf->vma->vm_file);
filemap_invalidate_lock_shared(inode->i_mapping);
+
folio_lock(folio);
if (unlikely(folio->mapping != inode->i_mapping ||
folio_pos(folio) > i_size_read(inode) ||
@@ -131,7 +153,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
goto out_sem;
}
- f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true);
+ f2fs_folio_wait_writeback(folio, DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -226,12 +248,13 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
- struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
+ struct folio *i = filemap_get_folio(NODE_MAPPING(sbi), ino);
bool ret = false;
/* But we need to avoid that there are some inode updates */
- if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
+ if ((!IS_ERR(i) && folio_test_dirty(i)) ||
+ f2fs_need_inode_block_update(sbi, ino))
ret = true;
- f2fs_put_page(i, 0);
+ f2fs_folio_put(i, false);
return ret;
}
@@ -260,7 +283,6 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
- .for_reclaim = 0,
};
unsigned int seq_id = 0;
@@ -403,7 +425,7 @@ static bool __found_offset(struct address_space *mapping,
bool compressed_cluster = false;
if (f2fs_compressed_file(inode)) {
- block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size));
compressed_cluster = first_blkaddr == COMPRESS_ADDR;
@@ -473,7 +495,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
}
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
/* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset;
@@ -532,8 +554,9 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
return -EINVAL;
}
-static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int f2fs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
@@ -543,7 +566,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
return -EOPNOTSUPP;
file_accessed(file);
- vma->vm_ops = &f2fs_file_vm_ops;
+ desc->vm_ops = &f2fs_file_vm_ops;
f2fs_down_read(&F2FS_I(inode)->i_sem);
set_inode_flag(inode, FI_MMAP_FILE);
@@ -554,19 +577,21 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
static int finish_preallocate_blocks(struct inode *inode)
{
- int ret;
+ int ret = 0;
+ bool opened;
- inode_lock(inode);
- if (is_inode_flag_set(inode, FI_OPENED_FILE)) {
- inode_unlock(inode);
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
+ opened = is_inode_flag_set(inode, FI_OPENED_FILE);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
+ if (opened)
return 0;
- }
- if (!file_should_truncate(inode)) {
- set_inode_flag(inode, FI_OPENED_FILE);
- inode_unlock(inode);
- return 0;
- }
+ inode_lock(inode);
+ if (is_inode_flag_set(inode, FI_OPENED_FILE))
+ goto out_unlock;
+
+ if (!file_should_truncate(inode))
+ goto out_update;
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
@@ -576,16 +601,17 @@ static int finish_preallocate_blocks(struct inode *inode)
filemap_invalidate_unlock(inode->i_mapping);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
- if (!ret)
- set_inode_flag(inode, FI_OPENED_FILE);
-
- inode_unlock(inode);
if (ret)
- return ret;
+ goto out_unlock;
file_dont_truncate(inode);
- return 0;
+out_update:
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+ set_inode_flag(inode, FI_OPENED_FILE);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
+out_unlock:
+ inode_unlock(inode);
+ return ret;
}
static int f2fs_file_open(struct inode *inode, struct file *filp)
@@ -609,7 +635,10 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
if (err)
return err;
- return finish_preallocate_blocks(inode);
+ err = finish_preallocate_blocks(inode);
+ if (!err)
+ atomic_inc(&F2FS_I(inode)->open_count);
+ return err;
}
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
@@ -621,8 +650,11 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
+ block_t blkstart;
+ int blklen = 0;
- addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
+ addr = get_dnode_addr(dn->inode, dn->node_folio) + ofs;
+ blkstart = le32_to_cpu(*addr);
/* Assumption: truncation starts with cluster */
for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
@@ -638,26 +670,44 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
}
if (blkaddr == NULL_ADDR)
- continue;
+ goto next;
f2fs_set_data_blkaddr(dn, NULL_ADDR);
if (__is_valid_data_blkaddr(blkaddr)) {
if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
- continue;
+ goto next;
if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
DATA_GENERIC_ENHANCE))
- continue;
+ goto next;
if (compressed_cluster)
valid_blocks++;
}
- f2fs_invalidate_blocks(sbi, blkaddr);
+ if (blkstart + blklen == blkaddr) {
+ blklen++;
+ } else {
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+ blkstart = blkaddr;
+ blklen = 1;
+ }
if (!released || blkaddr != COMPRESS_ADDR)
nr_free++;
+
+ continue;
+
+next:
+ if (blklen)
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+
+ blkstart = le32_to_cpu(*(addr + 1));
+ blklen = 0;
}
+ if (blklen)
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+
if (compressed_cluster)
f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
@@ -667,7 +717,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
* once we invalidate valid blkaddr in range [ofs, ofs + count],
* we will invalidate all blkaddr in the whole range.
*/
- fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
+ fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio),
dn->inode) + ofs;
f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
f2fs_update_age_extent_cache_range(dn, fofs, len);
@@ -686,31 +736,33 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
loff_t offset = from & (PAGE_SIZE - 1);
pgoff_t index = from >> PAGE_SHIFT;
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
if (!offset && !cache_only)
return 0;
if (cache_only) {
- page = find_lock_page(mapping, index);
- if (page && PageUptodate(page))
+ folio = filemap_lock_folio(mapping, index);
+ if (IS_ERR(folio))
+ return 0;
+ if (folio_test_uptodate(folio))
goto truncate_out;
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return 0;
}
- page = f2fs_get_lock_data_page(inode, index, true);
- if (IS_ERR(page))
- return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
+ folio = f2fs_get_lock_data_folio(inode, index, true);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio) == -ENOENT ? 0 : PTR_ERR(folio);
truncate_out:
- f2fs_wait_on_page_writeback(page, DATA, true, true);
- zero_user(page, offset, PAGE_SIZE - offset);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+ folio_zero_segment(folio, offset, folio_size(folio));
/* An encrypted inode should have a key and truncate the last page. */
f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
if (!cache_only)
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
return 0;
}
@@ -720,7 +772,7 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
struct dnode_of_data dn;
pgoff_t free_from;
int count = 0, err = 0;
- struct page *ipage;
+ struct folio *ifolio;
bool truncate_page = false;
trace_f2fs_truncate_blocks_enter(inode, from);
@@ -738,35 +790,33 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
if (lock)
f2fs_lock_op(sbi);
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto out;
}
if (IS_DEVICE_ALIASING(inode)) {
struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
struct extent_info ei = et->largest;
- unsigned int i;
- for (i = 0; i < ei.len; i++)
- f2fs_invalidate_blocks(sbi, ei.blk + i);
+ f2fs_invalidate_blocks(sbi, ei.blk, ei.len);
dec_valid_block_count(sbi, inode, ei.len);
f2fs_update_time(sbi, REQ_TIME);
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
goto out;
}
if (f2fs_has_inline_data(inode)) {
- f2fs_truncate_inline_inode(inode, ipage, from);
- f2fs_put_page(ipage, 1);
+ f2fs_truncate_inline_inode(inode, ifolio, from);
+ f2fs_folio_put(ifolio, true);
truncate_page = true;
goto out;
}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
+ set_new_dnode(&dn, inode, ifolio, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
if (err) {
if (err == -ENOENT)
@@ -774,12 +824,12 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
goto out;
}
- count = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = ADDRS_PER_PAGE(dn.node_folio, inode);
count -= dn.ofs_in_node;
f2fs_bug_on(sbi, count < 0);
- if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
+ if (dn.ofs_in_node || IS_INODE(dn.node_folio)) {
f2fs_truncate_data_blocks_range(&dn, count);
free_from += count;
}
@@ -860,8 +910,16 @@ int f2fs_truncate(struct inode *inode)
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
- if (err)
+ if (err) {
+ /*
+ * Always truncate page #0 to avoid page cache
+ * leak in evict() path.
+ */
+ truncate_inode_pages_range(inode->i_mapping,
+ F2FS_BLK_TO_BYTES(0),
+ F2FS_BLK_END_BYTES(0));
return err;
+ }
}
err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
@@ -1002,11 +1060,24 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err;
- if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = setattr_prepare(idmap, dentry, attr);
+ if (err)
+ return err;
+
+ err = fscrypt_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
+
+ err = fsverity_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
+
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
@@ -1023,20 +1094,19 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
!IS_ALIGNED(attr->ia_size,
F2FS_BLK_TO_BYTES(fi->i_cluster_size)))
return -EINVAL;
+ /*
+ * To prevent scattered pin block generation, we don't allow
+ * smaller/equal size unaligned truncation for pinned file.
+ * We only support overwrite IO to pinned file, so don't
+ * care about larger size truncation.
+ */
+ if (f2fs_is_pinned_file(inode) &&
+ attr->ia_size <= i_size_read(inode) &&
+ !IS_ALIGNED(attr->ia_size,
+ F2FS_BLK_TO_BYTES(CAP_BLKS_PER_SEC(sbi))))
+ return -EINVAL;
}
- err = setattr_prepare(idmap, dentry, attr);
- if (err)
- return err;
-
- err = fscrypt_prepare_setattr(dentry, attr);
- if (err)
- return err;
-
- err = fsverity_prepare_setattr(dentry, attr);
- if (err)
- return err;
-
if (is_quota_modification(idmap, inode, attr)) {
err = f2fs_dquot_initialize(inode);
if (err)
@@ -1044,12 +1114,11 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
if (i_uid_needs_update(idmap, attr, inode) ||
i_gid_needs_update(idmap, attr, inode)) {
- f2fs_lock_op(F2FS_I_SB(inode));
+ f2fs_lock_op(sbi);
err = dquot_transfer(idmap, inode, attr);
if (err) {
- set_sbi_flag(F2FS_I_SB(inode),
- SBI_QUOTA_NEED_REPAIR);
- f2fs_unlock_op(F2FS_I_SB(inode));
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ f2fs_unlock_op(sbi);
return err;
}
/*
@@ -1059,7 +1128,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
i_uid_update(idmap, attr, inode);
i_gid_update(idmap, attr, inode);
f2fs_mark_inode_dirty_sync(inode, true);
- f2fs_unlock_op(F2FS_I_SB(inode));
+ f2fs_unlock_op(sbi);
}
if (attr->ia_valid & ATTR_SIZE) {
@@ -1085,6 +1154,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
+ if (attr->ia_size > old_size)
+ f2fs_zero_post_eof_page(inode, attr->ia_size, false);
truncate_setsize(inode, attr->ia_size);
if (attr->ia_size <= old_size)
@@ -1120,7 +1191,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
f2fs_mark_inode_dirty_sync(inode, true);
/* inode change will produce dirty node pages flushed by checkpoint */
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
return err;
}
@@ -1140,7 +1211,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
loff_t start, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *page;
+ struct folio *folio;
if (!len)
return 0;
@@ -1148,16 +1219,16 @@ static int fill_zero(struct inode *inode, pgoff_t index,
f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
- page = f2fs_get_new_data_page(inode, NULL, index, false);
+ folio = f2fs_get_new_data_folio(inode, NULL, index, false);
f2fs_unlock_op(sbi);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- f2fs_wait_on_page_writeback(page, DATA, true, true);
- zero_user(page, start, len);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+ folio_zero_range(folio, start, len);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
return 0;
}
@@ -1180,7 +1251,7 @@ int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
return err;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
@@ -1203,6 +1274,8 @@ static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (ret)
return ret;
+ f2fs_zero_post_eof_page(inode, offset + len, true);
+
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1275,7 +1348,7 @@ next_dnode:
goto next;
}
- done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+ done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, inode) -
dn.ofs_in_node, len);
for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
*blkaddr = f2fs_data_blkaddr(&dn);
@@ -1323,7 +1396,7 @@ static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
if (ret) {
dec_valid_block_count(sbi, inode, 1);
- f2fs_invalidate_blocks(sbi, *blkaddr);
+ f2fs_invalidate_blocks(sbi, *blkaddr, 1);
} else {
f2fs_update_data_blkaddr(&dn, *blkaddr);
}
@@ -1364,7 +1437,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
}
ilen = min((pgoff_t)
- ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+ ADDRS_PER_PAGE(dn.node_folio, dst_inode) -
dn.ofs_in_node, len - i);
do {
dn.data_blkaddr = f2fs_data_blkaddr(&dn);
@@ -1389,26 +1462,26 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
f2fs_put_dnode(&dn);
} else {
- struct page *psrc, *pdst;
+ struct folio *fsrc, *fdst;
- psrc = f2fs_get_lock_data_page(src_inode,
+ fsrc = f2fs_get_lock_data_folio(src_inode,
src + i, true);
- if (IS_ERR(psrc))
- return PTR_ERR(psrc);
- pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
+ if (IS_ERR(fsrc))
+ return PTR_ERR(fsrc);
+ fdst = f2fs_get_new_data_folio(dst_inode, NULL, dst + i,
true);
- if (IS_ERR(pdst)) {
- f2fs_put_page(psrc, 1);
- return PTR_ERR(pdst);
+ if (IS_ERR(fdst)) {
+ f2fs_folio_put(fsrc, true);
+ return PTR_ERR(fdst);
}
- f2fs_wait_on_page_writeback(pdst, DATA, true, true);
+ f2fs_folio_wait_writeback(fdst, DATA, true, true);
- memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
- set_page_dirty(pdst);
- set_page_private_gcing(pdst);
- f2fs_put_page(pdst, 1);
- f2fs_put_page(psrc, 1);
+ memcpy_folio(fdst, 0, fsrc, 0, PAGE_SIZE);
+ folio_mark_dirty(fdst);
+ folio_set_f2fs_gcing(fdst);
+ f2fs_folio_put(fdst, true);
+ f2fs_folio_put(fsrc, true);
ret = f2fs_truncate_hole(src_inode,
src + i, src + i + 1);
@@ -1486,6 +1559,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
+ f2fs_zero_post_eof_page(inode, offset + len, false);
+
f2fs_lock_op(sbi);
f2fs_drop_extent_tree(inode);
truncate_pagecache(inode, offset);
@@ -1575,12 +1650,15 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
break;
}
- f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
+ f2fs_invalidate_blocks(sbi, dn->data_blkaddr, 1);
f2fs_set_data_blkaddr(dn, NEW_ADDR);
}
- f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
- f2fs_update_age_extent_cache_range(dn, start, index - start);
+ if (index > start) {
+ f2fs_update_read_extent_cache_range(dn, start, 0,
+ index - start);
+ f2fs_update_age_extent_cache_range(dn, start, index - start);
+ }
return ret;
}
@@ -1607,6 +1685,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret)
return ret;
+ f2fs_zero_post_eof_page(inode, offset + len, true);
+
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1654,7 +1734,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
goto out;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
end = min(pg_end, end_offset - dn.ofs_in_node + index);
ret = f2fs_do_zero_range(&dn, index, end);
@@ -1738,6 +1818,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
/* avoid gc operation during block exchange */
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(mapping);
+
+ f2fs_zero_post_eof_page(inode, offset + len, false);
truncate_pagecache(inode, offset);
while (!ret && idx > pg_start) {
@@ -1795,6 +1877,8 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
if (err)
return err;
+ f2fs_zero_post_eof_page(inode, offset + len, true);
+
f2fs_balance_fs(sbi, true);
pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
@@ -1815,18 +1899,31 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
map.m_len = sec_blks;
next_alloc:
- if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ?
- ZONED_PIN_SEC_REQUIRED_COUNT :
- GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
+ f2fs_down_write(&sbi->pin_sem);
+
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ if (has_not_enough_free_secs(sbi, 0, 0)) {
+ f2fs_up_write(&sbi->pin_sem);
+ err = -ENOSPC;
+ f2fs_warn_ratelimited(sbi,
+ "ino:%lu, start:%lu, end:%lu, need to trigger GC to "
+ "reclaim enough free segment when checkpoint is enabled",
+ inode->i_ino, pg_start, pg_end);
+ goto out_err;
+ }
+ }
+
+ if (has_not_enough_free_secs(sbi, 0,
+ sbi->reserved_pin_section)) {
f2fs_down_write(&sbi->gc_lock);
stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);
- if (err && err != -ENODATA)
+ if (err && err != -ENODATA) {
+ f2fs_up_write(&sbi->pin_sem);
goto out_err;
+ }
}
- f2fs_down_write(&sbi->pin_sem);
-
err = f2fs_allocate_pinning_section(sbi);
if (err) {
f2fs_up_write(&sbi->pin_sem);
@@ -1955,6 +2052,9 @@ out:
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
+ if (atomic_dec_and_test(&F2FS_I(inode)->open_count))
+ f2fs_remove_donate_inode(inode);
+
/*
* f2fs_release_file is called at every close calls. So we should
* not drop any inmemory pages by close called by other process.
@@ -2028,8 +2128,9 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
f2fs_down_write(&fi->i_sem);
if (!f2fs_may_compress(inode) ||
- (S_ISREG(inode->i_mode) &&
- F2FS_HAS_BLOCKS(inode))) {
+ atomic_read(&fi->writeback) ||
+ (S_ISREG(inode->i_mode) &&
+ F2FS_HAS_BLOCKS(inode))) {
f2fs_up_write(&fi->i_sem);
return -EINVAL;
}
@@ -2429,17 +2530,72 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
return ret;
}
+static int f2fs_keep_noreuse_range(struct inode *inode,
+ loff_t offset, loff_t len)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ u64 max_bytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
+ u64 start, end;
+ int ret = 0;
+
+ if (!S_ISREG(inode->i_mode))
+ return 0;
+
+ if (offset >= max_bytes || len > max_bytes ||
+ (offset + len) > max_bytes)
+ return 0;
+
+ start = offset >> PAGE_SHIFT;
+ end = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+
+ inode_lock(inode);
+ if (f2fs_is_atomic_file(inode)) {
+ inode_unlock(inode);
+ return 0;
+ }
+
+ spin_lock(&sbi->inode_lock[DONATE_INODE]);
+ /* let's remove the range, if len = 0 */
+ if (!len) {
+ if (!list_empty(&F2FS_I(inode)->gdonate_list)) {
+ list_del_init(&F2FS_I(inode)->gdonate_list);
+ sbi->donate_files--;
+ if (is_inode_flag_set(inode, FI_DONATE_FINISHED))
+ ret = -EALREADY;
+ else
+ set_inode_flag(inode, FI_DONATE_FINISHED);
+ } else
+ ret = -ENOENT;
+ } else {
+ if (list_empty(&F2FS_I(inode)->gdonate_list)) {
+ list_add_tail(&F2FS_I(inode)->gdonate_list,
+ &sbi->inode_list[DONATE_INODE]);
+ sbi->donate_files++;
+ } else {
+ list_move_tail(&F2FS_I(inode)->gdonate_list,
+ &sbi->inode_list[DONATE_INODE]);
+ }
+ F2FS_I(inode)->donate_start = start;
+ F2FS_I(inode)->donate_end = end - 1;
+ clear_inode_flag(inode, FI_DONATE_FINISHED);
+ }
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+ inode_unlock(inode);
+
+ return ret;
+}
+
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- struct super_block *sb = inode->i_sb;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fstrim_range range;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!f2fs_hw_support_discard(F2FS_SB(sb)))
+ if (!f2fs_hw_support_discard(sbi))
return -EOPNOTSUPP;
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
@@ -2450,9 +2606,9 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
if (ret)
return ret;
- range.minlen = max((unsigned int)range.minlen,
- bdev_discard_granularity(sb->s_bdev));
- ret = f2fs_trim_fs(F2FS_SB(sb), &range);
+ range.minlen = max_t(unsigned int, range.minlen,
+ f2fs_hw_discard_granularity(sbi));
+ ret = f2fs_trim_fs(sbi, &range);
mnt_drop_write_file(filp);
if (ret < 0)
return ret;
@@ -2460,7 +2616,7 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
if (copy_to_user((struct fstrim_range __user *)arg, &range,
sizeof(range)))
return -EFAULT;
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
}
@@ -2839,19 +2995,19 @@ do_map:
idx = map.m_lblk;
while (idx < map.m_lblk + map.m_len &&
cnt < BLKS_PER_SEG(sbi)) {
- struct page *page;
+ struct folio *folio;
- page = f2fs_get_lock_data_page(inode, idx, true);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_get_lock_data_folio(inode, idx, true);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto clear_out;
}
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
- set_page_dirty(page);
- set_page_private_gcing(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ folio_set_f2fs_gcing(folio);
+ f2fs_folio_put(folio, true);
idx++;
cnt++;
@@ -3263,7 +3419,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
}
#endif
-int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -3287,7 +3443,7 @@ int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
}
int f2fs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
@@ -3427,6 +3583,23 @@ static int f2fs_ioc_get_dev_alias_file(struct file *filp, unsigned long arg)
(u32 __user *)arg);
}
+static int f2fs_ioc_io_prio(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u32 level;
+
+ if (get_user(level, (__u32 __user *)arg))
+ return -EFAULT;
+
+ if (!S_ISREG(inode->i_mode) || level >= F2FS_IOPRIO_MAX)
+ return -EINVAL;
+
+ inode_lock(inode);
+ F2FS_I(inode)->ioprio_hint = level;
+ inode_unlock(inode);
+ return 0;
+}
+
int f2fs_precache_extents(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -3613,7 +3786,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
int i;
for (i = 0; i < count; i++) {
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -3731,7 +3904,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
break;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = round_up(count, fi->i_cluster_size);
@@ -3782,7 +3955,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
int i;
for (i = 0; i < count; i++) {
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -3799,7 +3972,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
int ret;
for (i = 0; i < cluster_size; i++) {
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (i == 0) {
@@ -3909,7 +4082,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
break;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = round_up(count, fi->i_cluster_size);
@@ -4073,7 +4246,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
goto out;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - index);
for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
struct block_device *cur_bdev;
@@ -4245,34 +4418,36 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
{
DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
pgoff_t redirty_idx = page_idx;
- int i, page_len = 0, ret = 0;
+ int page_len = 0, ret = 0;
page_cache_ra_unbounded(&ractl, len, 0);
- for (i = 0; i < len; i++, page_idx++) {
- page = read_cache_page(mapping, page_idx, NULL, NULL);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
+ do {
+ folio = read_cache_folio(mapping, page_idx, NULL, NULL);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
break;
}
- page_len++;
- }
+ page_len += folio_nr_pages(folio) - (page_idx - folio->index);
+ page_idx = folio_next_index(folio);
+ } while (page_len < len);
- for (i = 0; i < page_len; i++, redirty_idx++) {
- page = find_lock_page(mapping, redirty_idx);
+ do {
+ folio = filemap_lock_folio(mapping, redirty_idx);
- /* It will never fail, when page has pinned above */
- f2fs_bug_on(F2FS_I_SB(inode), !page);
+ /* It will never fail, when folio has pinned above */
+ f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(folio));
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
- set_page_dirty(page);
- set_page_private_gcing(page);
- f2fs_put_page(page, 1);
- f2fs_put_page(page, 0);
- }
+ folio_mark_dirty(folio);
+ folio_set_f2fs_gcing(folio);
+ redirty_idx = folio_next_index(folio);
+ folio_unlock(folio);
+ folio_put_refs(folio, 2);
+ } while (redirty_idx < page_idx);
return ret;
}
@@ -4528,6 +4703,8 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_compress_file(filp);
case F2FS_IOC_GET_DEV_ALIAS_FILE:
return f2fs_ioc_get_dev_alias_file(filp, arg);
+ case F2FS_IOC_IO_PRIO:
+ return f2fs_ioc_io_prio(filp, arg);
default:
return -ENOTTY;
}
@@ -4676,6 +4853,7 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct inode *inode = file_inode(iocb->ki_filp);
const loff_t pos = iocb->ki_pos;
ssize_t ret;
+ bool dio;
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
@@ -4684,12 +4862,15 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
iov_iter_count(to), READ);
+ dio = f2fs_should_use_dio(inode, iocb, to);
+
/* In LFS mode, if there is inflight dio, wait for its completion */
if (f2fs_lfs_mode(F2FS_I_SB(inode)) &&
- get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE))
+ get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE) &&
+ (!f2fs_is_pinned_file(inode) || !dio))
inode_dio_wait(inode);
- if (f2fs_should_use_dio(inode, iocb, to)) {
+ if (dio) {
ret = f2fs_dio_read_iter(iocb, to);
} else {
ret = filemap_read(iocb, to, 0);
@@ -4697,8 +4878,7 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
f2fs_update_iostat(F2FS_I_SB(inode), inode,
APP_BUFFERED_READ_IO, ret);
}
- if (trace_f2fs_dataread_end_enabled())
- trace_f2fs_dataread_end(inode, pos, ret);
+ trace_f2fs_dataread_end(inode, pos, ret);
return ret;
}
@@ -4721,8 +4901,7 @@ static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos,
f2fs_update_iostat(F2FS_I_SB(inode), inode,
APP_BUFFERED_READ_IO, ret);
- if (trace_f2fs_dataread_end_enabled())
- trace_f2fs_dataread_end(inode, pos, ret);
+ trace_f2fs_dataread_end(inode, pos, ret);
return ret;
}
@@ -4746,6 +4925,9 @@ static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
err = file_modified(file);
if (err)
return err;
+
+ f2fs_zero_post_eof_page(inode,
+ iocb->ki_pos + iov_iter_count(from), true);
return count;
}
@@ -5063,8 +5245,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
f2fs_dio_write_iter(iocb, from, &may_need_sync) :
f2fs_buffered_write_iter(iocb, from);
- if (trace_f2fs_datawrite_end_enabled())
- trace_f2fs_datawrite_end(inode, orig_pos, ret);
+ trace_f2fs_datawrite_end(inode, orig_pos, ret);
}
/* Don't leave any preallocated blocks around past i_size. */
@@ -5107,6 +5288,8 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
struct inode *inode = file_inode(filp);
int err;
+ trace_f2fs_fadvise(inode, offset, len, advice);
+
if (advice == POSIX_FADV_SEQUENTIAL) {
if (S_ISFIFO(inode->i_mode))
return -ESPIPE;
@@ -5128,11 +5311,15 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
}
err = generic_fadvise(filp, offset, len, advice);
- if (!err && advice == POSIX_FADV_DONTNEED &&
- test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
- f2fs_compressed_file(inode))
- f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
+ if (err)
+ return err;
+ if (advice == POSIX_FADV_DONTNEED &&
+ (test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
+ f2fs_compressed_file(inode)))
+ f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
+ else if (advice == POSIX_FADV_NOREUSE)
+ err = f2fs_keep_noreuse_range(inode, offset, len);
return err;
}
@@ -5242,6 +5429,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_DECOMPRESS_FILE:
case F2FS_IOC_COMPRESS_FILE:
case F2FS_IOC_GET_DEV_ALIAS_FILE:
+ case F2FS_IOC_IO_PRIO:
break;
default:
return -ENOIOCTLCMD;
@@ -5257,7 +5445,7 @@ const struct file_operations f2fs_file_operations = {
.iopoll = iocb_bio_iopoll,
.open = f2fs_file_open,
.release = f2fs_release_file,
- .mmap = f2fs_file_mmap,
+ .mmap_prepare = f2fs_file_mmap_prepare,
.flush = f2fs_file_flush,
.fsync = f2fs_sync_file,
.fallocate = f2fs_fallocate,
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 3e1b6d2ff3a7..384fa7e2085b 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -38,13 +38,14 @@ static int gc_thread_func(void *data)
struct f2fs_gc_control gc_control = {
.victim_segno = NULL_SEGNO,
.should_migrate_blocks = false,
- .err_gc_skipped = false };
+ .err_gc_skipped = false,
+ .one_time = false };
wait_ms = gc_th->min_sleep_time;
set_freezable();
do {
- bool sync_mode, foreground = false;
+ bool sync_mode, foreground = false, gc_boost = false;
wait_event_freezable_timeout(*wq,
kthread_should_stop() ||
@@ -52,8 +53,12 @@ static int gc_thread_func(void *data)
gc_th->gc_wake,
msecs_to_jiffies(wait_ms));
- if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
+ if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) {
foreground = true;
+ gc_control.one_time = false;
+ } else if (f2fs_sb_has_blkzoned(sbi)) {
+ gc_control.one_time = true;
+ }
/* give it a try one time */
if (gc_th->gc_wake)
@@ -81,8 +86,6 @@ static int gc_thread_func(void *data)
continue;
}
- gc_control.one_time = false;
-
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
@@ -132,7 +135,7 @@ static int gc_thread_func(void *data)
if (need_to_boost_gc(sbi)) {
decrease_sleep_time(gc_th, &wait_ms);
if (f2fs_sb_has_blkzoned(sbi))
- gc_control.one_time = true;
+ gc_boost = true;
} else {
increase_sleep_time(gc_th, &wait_ms);
}
@@ -141,10 +144,10 @@ do_gc:
FOREGROUND : BACKGROUND);
sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
- gc_control.one_time;
+ (gc_boost && gc_th->boost_gc_greedy);
/* foreground GC was been triggered via f2fs_balance_fs() */
- if (foreground)
+ if (foreground && !f2fs_sb_has_blkzoned(sbi))
sync_mode = false;
gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
@@ -197,6 +200,8 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
+ gc_th->boost_gc_multiple = BOOST_GC_MULTIPLE;
+ gc_th->boost_gc_greedy = GC_GREEDY;
if (f2fs_sb_has_blkzoned(sbi)) {
gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
@@ -278,12 +283,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- if (p->alloc_mode == SSR) {
- p->gc_mode = GC_GREEDY;
- p->dirty_bitmap = dirty_i->dirty_segmap[type];
- p->max_search = dirty_i->nr_dirty[type];
- p->ofs_unit = 1;
- } else if (p->alloc_mode == AT_SSR) {
+ if (p->alloc_mode == SSR || p->alloc_mode == AT_SSR) {
p->gc_mode = GC_GREEDY;
p->dirty_bitmap = dirty_i->dirty_segmap[type];
p->max_search = dirty_i->nr_dirty[type];
@@ -389,14 +389,15 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
}
static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
- unsigned int segno, struct victim_sel_policy *p)
+ unsigned int segno, struct victim_sel_policy *p,
+ unsigned int valid_thresh_ratio)
{
if (p->alloc_mode == SSR)
return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
- if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >=
- CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio /
- 100))
+ if (p->one_time_gc && (valid_thresh_ratio < 100) &&
+ (get_valid_blocks(sbi, segno, true) >=
+ CAP_BLKS_PER_SEC(sbi) * valid_thresh_ratio / 100))
return UINT_MAX;
/* alloc_mode == LFS */
@@ -773,10 +774,11 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct sit_info *sm = SIT_I(sbi);
- struct victim_sel_policy p;
+ struct victim_sel_policy p = {0};
unsigned int secno, last_victim;
unsigned int last_segment;
unsigned int nsearched;
+ unsigned int valid_thresh_ratio = 100;
bool is_atgc;
int ret = 0;
@@ -786,7 +788,11 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
p.alloc_mode = alloc_mode;
p.age = age;
p.age_threshold = sbi->am.age_threshold;
- p.one_time_gc = one_time;
+ if (one_time) {
+ p.one_time_gc = one_time;
+ if (has_enough_free_secs(sbi, 0, NR_PERSISTENT_LOG))
+ valid_thresh_ratio = sbi->gc_thread->valid_thresh_ratio;
+ }
retry:
select_policy(sbi, gc_type, type, &p);
@@ -806,11 +812,14 @@ retry:
goto out;
}
- if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
+ if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
ret = -EBUSY;
- else
- p.min_segno = *result;
- goto out;
+ goto out;
+ }
+ if (gc_type == FG_GC)
+ clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
+ p.min_segno = *result;
+ goto got_result;
}
ret = -ENODATA;
@@ -909,7 +918,7 @@ retry:
goto next;
}
- cost = get_gc_cost(sbi, segno, &p);
+ cost = get_gc_cost(sbi, segno, &p, valid_thresh_ratio);
if (p.min_cost > cost) {
p.min_segno = segno;
@@ -1042,7 +1051,7 @@ next_step:
for (off = 0; off < usable_blks_in_seg; off++, entry++) {
nid_t nid = le32_to_cpu(entry->nid);
- struct page *node_page;
+ struct folio *node_folio;
struct node_info ni;
int err;
@@ -1065,27 +1074,27 @@ next_step:
}
/* phase == 2 */
- node_page = f2fs_get_node_page(sbi, nid);
- if (IS_ERR(node_page))
+ node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
+ if (IS_ERR(node_folio))
continue;
- /* block may become invalid during f2fs_get_node_page */
+ /* block may become invalid during f2fs_get_node_folio */
if (check_valid_map(sbi, segno, off) == 0) {
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
continue;
}
if (f2fs_get_node_info(sbi, nid, &ni, false)) {
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
continue;
}
if (ni.blk_addr != start_addr + off) {
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
continue;
}
- err = f2fs_move_node_page(node_page, gc_type);
+ err = f2fs_move_node_folio(node_folio, gc_type);
if (!err && gc_type == FG_GC)
submitted++;
stat_inc_node_blk_count(sbi, 1, gc_type);
@@ -1131,7 +1140,7 @@ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct node_info *dni, block_t blkaddr, unsigned int *nofs)
{
- struct page *node_page;
+ struct folio *node_folio;
nid_t nid;
unsigned int ofs_in_node, max_addrs, base;
block_t source_blkaddr;
@@ -1139,12 +1148,12 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
nid = le32_to_cpu(sum->nid);
ofs_in_node = le16_to_cpu(sum->ofs_in_node);
- node_page = f2fs_get_node_page(sbi, nid);
- if (IS_ERR(node_page))
+ node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
+ if (IS_ERR(node_folio))
return false;
if (f2fs_get_node_info(sbi, nid, dni, false)) {
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
return false;
}
@@ -1155,12 +1164,12 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
}
if (f2fs_check_nid_range(sbi, dni->ino)) {
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
return false;
}
- if (IS_INODE(node_page)) {
- base = offset_in_addr(F2FS_INODE(node_page));
+ if (IS_INODE(node_folio)) {
+ base = offset_in_addr(F2FS_INODE(node_folio));
max_addrs = DEF_ADDRS_PER_INODE;
} else {
base = 0;
@@ -1170,13 +1179,13 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (base + ofs_in_node >= max_addrs) {
f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
base, ofs_in_node, max_addrs, dni->ino, dni->nid);
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
return false;
}
- *nofs = ofs_of_node(node_page);
- source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
- f2fs_put_page(node_page, 1);
+ *nofs = ofs_of_node(node_folio);
+ source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node);
+ f2fs_folio_put(node_folio, true);
if (source_blkaddr != blkaddr) {
#ifdef CONFIG_F2FS_CHECK_FS
@@ -1202,7 +1211,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
struct address_space *mapping = f2fs_is_cow_file(inode) ?
F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
struct dnode_of_data dn;
- struct page *page;
+ struct folio *folio, *efolio;
struct f2fs_io_info fio = {
.sbi = sbi,
.ino = inode->i_ino,
@@ -1215,16 +1224,16 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
};
int err;
- page = f2fs_grab_cache_page(mapping, index, true);
- if (!page)
- return -ENOMEM;
+ folio = f2fs_grab_cache_folio(mapping, index, true);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
if (f2fs_lookup_read_extent_cache_block(inode, index,
&dn.data_blkaddr)) {
if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
DATA_GENERIC_ENHANCE_READ))) {
err = -EFSCORRUPTED;
- goto put_page;
+ goto put_folio;
}
goto got_it;
}
@@ -1232,53 +1241,54 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err)
- goto put_page;
+ goto put_folio;
f2fs_put_dnode(&dn);
if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
err = -ENOENT;
- goto put_page;
+ goto put_folio;
}
if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
DATA_GENERIC_ENHANCE))) {
err = -EFSCORRUPTED;
- goto put_page;
+ goto put_folio;
}
got_it:
- /* read page */
- fio.page = page;
+ /* read folio */
+ fio.folio = folio;
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
/*
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
- fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
- dn.data_blkaddr,
+ efolio = f2fs_filemap_get_folio(META_MAPPING(sbi), dn.data_blkaddr,
FGP_LOCK | FGP_CREAT, GFP_NOFS);
- if (!fio.encrypted_page) {
- err = -ENOMEM;
- goto put_page;
+ if (IS_ERR(efolio)) {
+ err = PTR_ERR(efolio);
+ goto put_folio;
}
+ fio.encrypted_page = &efolio->page;
+
err = f2fs_submit_page_bio(&fio);
if (err)
goto put_encrypted_page;
- f2fs_put_page(fio.encrypted_page, 0);
- f2fs_put_page(page, 1);
+ f2fs_put_page(fio.encrypted_page, false);
+ f2fs_folio_put(folio, true);
f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
return 0;
put_encrypted_page:
- f2fs_put_page(fio.encrypted_page, 1);
-put_page:
- f2fs_put_page(page, 1);
+ f2fs_put_page(fio.encrypted_page, true);
+put_folio:
+ f2fs_folio_put(folio, true);
return err;
}
@@ -1304,7 +1314,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
- struct page *page, *mpage;
+ struct folio *folio, *mfolio, *efolio;
block_t newaddr;
int err = 0;
bool lfs_mode = f2fs_lfs_mode(fio.sbi);
@@ -1313,9 +1323,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
/* do not read out */
- page = f2fs_grab_cache_page(mapping, bidx, false);
- if (!page)
- return -ENOMEM;
+ folio = f2fs_grab_cache_folio(mapping, bidx, false);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
err = -ENOENT;
@@ -1332,7 +1342,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
goto out;
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
err = -ENOENT;
goto put_out;
}
@@ -1341,7 +1351,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -1350,26 +1360,26 @@ static int move_data_block(struct inode *inode, block_t bidx,
goto put_out;
/* read page */
- fio.page = page;
+ fio.folio = folio;
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
if (lfs_mode)
f2fs_down_write(&fio.sbi->io_order_lock);
- mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+ mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi),
fio.old_blkaddr, false);
- if (!mpage) {
- err = -ENOMEM;
+ if (IS_ERR(mfolio)) {
+ err = PTR_ERR(mfolio);
goto up_out;
}
- fio.encrypted_page = mpage;
+ fio.encrypted_page = folio_file_page(mfolio, fio.old_blkaddr);
- /* read source block in mpage */
- if (!PageUptodate(mpage)) {
+ /* read source block in mfolio */
+ if (!folio_test_uptodate(mfolio)) {
err = f2fs_submit_page_bio(&fio);
if (err) {
- f2fs_put_page(mpage, 1);
+ f2fs_folio_put(mfolio, true);
goto up_out;
}
@@ -1378,11 +1388,11 @@ static int move_data_block(struct inode *inode, block_t bidx,
f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
F2FS_BLKSIZE);
- lock_page(mpage);
- if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
- !PageUptodate(mpage))) {
+ folio_lock(mfolio);
+ if (unlikely(!is_meta_folio(mfolio) ||
+ !folio_test_uptodate(mfolio))) {
err = -EIO;
- f2fs_put_page(mpage, 1);
+ f2fs_folio_put(mfolio, true);
goto up_out;
}
}
@@ -1393,26 +1403,28 @@ static int move_data_block(struct inode *inode, block_t bidx,
err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
&sum, type, NULL);
if (err) {
- f2fs_put_page(mpage, 1);
+ f2fs_folio_put(mfolio, true);
/* filesystem should shutdown, no need to recovery block */
goto up_out;
}
- fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
- newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
- if (!fio.encrypted_page) {
- err = -ENOMEM;
- f2fs_put_page(mpage, 1);
+ efolio = f2fs_filemap_get_folio(META_MAPPING(fio.sbi), newaddr,
+ FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (IS_ERR(efolio)) {
+ err = PTR_ERR(efolio);
+ f2fs_folio_put(mfolio, true);
goto recover_block;
}
+ fio.encrypted_page = &efolio->page;
+
/* write target block */
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
memcpy(page_address(fio.encrypted_page),
- page_address(mpage), PAGE_SIZE);
- f2fs_put_page(mpage, 1);
+ folio_address(mfolio), PAGE_SIZE);
+ f2fs_folio_put(mfolio, true);
- f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
+ f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))
@@ -1430,7 +1442,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
f2fs_update_data_blkaddr(&dn, newaddr);
set_inode_flag(inode, FI_APPEND_WRITE);
- f2fs_put_page(fio.encrypted_page, 1);
+ f2fs_put_page(fio.encrypted_page, true);
recover_block:
if (err)
f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
@@ -1441,19 +1453,19 @@ up_out:
put_out:
f2fs_put_dnode(&dn);
out:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
- unsigned int segno, int off)
+ unsigned int segno, int off)
{
- struct page *page;
+ struct folio *folio;
int err = 0;
- page = f2fs_get_lock_data_page(inode, bidx, true);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_lock_data_folio(inode, bidx, true);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
err = -ENOENT;
@@ -1465,12 +1477,12 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
goto out;
if (gc_type == BG_GC) {
- if (folio_test_writeback(page_folio(page))) {
+ if (folio_test_writeback(folio)) {
err = -EAGAIN;
goto out;
}
- set_page_dirty(page);
- set_page_private_gcing(page);
+ folio_mark_dirty(folio);
+ folio_set_f2fs_gcing(folio);
} else {
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
@@ -1480,37 +1492,37 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC,
.old_blkaddr = NULL_ADDR,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
.need_lock = LOCK_REQ,
.io_type = FS_GC_DATA_IO,
};
- bool is_dirty = PageDirty(page);
+ bool is_dirty = folio_test_dirty(folio);
retry:
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
- set_page_dirty(page);
- if (clear_page_dirty_for_io(page)) {
+ folio_mark_dirty(folio);
+ if (folio_clear_dirty_for_io(folio)) {
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
}
- set_page_private_gcing(page);
+ folio_set_f2fs_gcing(folio);
err = f2fs_do_write_data_page(&fio);
if (err) {
- clear_page_private_gcing(page);
+ folio_clear_f2fs_gcing(folio);
if (err == -ENOMEM) {
memalloc_retry_wait(GFP_NOFS);
goto retry;
}
if (is_dirty)
- set_page_dirty(page);
+ folio_mark_dirty(folio);
}
}
out:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
@@ -1539,7 +1551,6 @@ next_step:
entry = sum;
for (off = 0; off < usable_blks_in_seg; off++, entry++) {
- struct page *data_page;
struct inode *inode;
struct node_info dni; /* dnode info for the data */
unsigned int ofs_in_node, nofs;
@@ -1582,6 +1593,7 @@ next_step:
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 3) {
+ struct folio *data_folio;
int err;
inode = f2fs_iget(sb, dni.ino);
@@ -1632,15 +1644,15 @@ next_step:
continue;
}
- data_page = f2fs_get_read_data_page(inode, start_bidx,
+ data_folio = f2fs_get_read_data_folio(inode, start_bidx,
REQ_RAHEAD, true, NULL);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- if (IS_ERR(data_page)) {
+ if (IS_ERR(data_folio)) {
iput(inode);
continue;
}
- f2fs_put_page(data_page, 0);
+ f2fs_folio_put(data_folio, false);
add_gc_inode(gc_list, inode);
continue;
}
@@ -1715,8 +1727,6 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
struct gc_inode_list *gc_list, int gc_type,
bool force_migrate, bool one_time)
{
- struct page *sum_page;
- struct f2fs_summary_block *sum;
struct blk_plug plug;
unsigned int segno = start_segno;
unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
@@ -1725,7 +1735,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
- int submitted = 0;
+ int submitted = 0, sum_blk_cnt;
if (__is_large_section(sbi)) {
sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
@@ -1748,7 +1758,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
!has_enough_free_blocks(sbi,
sbi->gc_thread->boost_zoned_gc_percent))
window_granularity *=
- BOOST_GC_MULTIPLE;
+ sbi->gc_thread->boost_gc_multiple;
end_segno = start_segno + window_granularity;
}
@@ -1759,85 +1769,113 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
+ segno = rounddown(segno, SUMS_PER_BLOCK);
+ sum_blk_cnt = DIV_ROUND_UP(end_segno - segno, SUMS_PER_BLOCK);
/* readahead multi ssa blocks those have contiguous address */
if (__is_large_section(sbi))
f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
- end_segno - segno, META_SSA, true);
+ sum_blk_cnt, META_SSA, true);
/* reference all summary page */
while (segno < end_segno) {
- sum_page = f2fs_get_sum_page(sbi, segno++);
- if (IS_ERR(sum_page)) {
- int err = PTR_ERR(sum_page);
+ struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno);
+
+ segno += SUMS_PER_BLOCK;
+ if (IS_ERR(sum_folio)) {
+ int err = PTR_ERR(sum_folio);
- end_segno = segno - 1;
- for (segno = start_segno; segno < end_segno; segno++) {
- sum_page = find_get_page(META_MAPPING(sbi),
+ end_segno = segno - SUMS_PER_BLOCK;
+ segno = rounddown(start_segno, SUMS_PER_BLOCK);
+ while (segno < end_segno) {
+ sum_folio = filemap_get_folio(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
- f2fs_put_page(sum_page, 0);
- f2fs_put_page(sum_page, 0);
+ folio_put_refs(sum_folio, 2);
+ segno += SUMS_PER_BLOCK;
}
return err;
}
- unlock_page(sum_page);
+ folio_unlock(sum_folio);
}
blk_start_plug(&plug);
- for (segno = start_segno; segno < end_segno; segno++) {
+ segno = start_segno;
+ while (segno < end_segno) {
+ unsigned int cur_segno;
/* find segment summary of victim */
- sum_page = find_get_page(META_MAPPING(sbi),
+ struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
- f2fs_put_page(sum_page, 0);
-
- if (get_valid_blocks(sbi, segno, false) == 0)
- goto freed;
- if (gc_type == BG_GC && __is_large_section(sbi) &&
- migrated >= sbi->migration_granularity)
- goto skip;
- if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
- goto skip;
-
- sum = page_address(sum_page);
- if (type != GET_SUM_TYPE((&sum->footer))) {
- f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
- segno, type, GET_SUM_TYPE((&sum->footer)));
- f2fs_stop_checkpoint(sbi, false,
- STOP_CP_REASON_CORRUPTED_SUMMARY);
- goto skip;
+ unsigned int block_end_segno = rounddown(segno, SUMS_PER_BLOCK)
+ + SUMS_PER_BLOCK;
+
+ if (block_end_segno > end_segno)
+ block_end_segno = end_segno;
+
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno))) {
+ f2fs_err(sbi, "%s: segment %u is used by log",
+ __func__, segno);
+ f2fs_bug_on(sbi, 1);
+ goto next_block;
}
- /*
- * this is to avoid deadlock:
- * - lock_page(sum_page) - f2fs_replace_block
- * - check_valid_map() - down_write(sentry_lock)
- * - down_read(sentry_lock) - change_curseg()
- * - lock_page(sum_page)
- */
- if (type == SUM_TYPE_NODE)
- submitted += gc_node_segment(sbi, sum->entries, segno,
- gc_type);
- else
- submitted += gc_data_segment(sbi, sum->entries, gc_list,
- segno, gc_type,
- force_migrate);
+ if (!folio_test_uptodate(sum_folio) ||
+ unlikely(f2fs_cp_error(sbi)))
+ goto next_block;
- stat_inc_gc_seg_count(sbi, data_type, gc_type);
- sbi->gc_reclaimed_segs[sbi->gc_mode]++;
- migrated++;
+ for (cur_segno = segno; cur_segno < block_end_segno;
+ cur_segno++) {
+ struct f2fs_summary_block *sum;
-freed:
- if (gc_type == FG_GC &&
- get_valid_blocks(sbi, segno, false) == 0)
- seg_freed++;
+ if (get_valid_blocks(sbi, cur_segno, false) == 0)
+ goto freed;
+ if (gc_type == BG_GC && __is_large_section(sbi) &&
+ migrated >= sbi->migration_granularity)
+ continue;
- if (__is_large_section(sbi))
- sbi->next_victim_seg[gc_type] =
- (segno + 1 < sec_end_segno) ?
- segno + 1 : NULL_SEGNO;
-skip:
- f2fs_put_page(sum_page, 0);
+ sum = SUM_BLK_PAGE_ADDR(sum_folio, cur_segno);
+ if (type != GET_SUM_TYPE((&sum->footer))) {
+ f2fs_err(sbi, "Inconsistent segment (%u) type "
+ "[%d, %d] in SSA and SIT",
+ cur_segno, type,
+ GET_SUM_TYPE((&sum->footer)));
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_CORRUPTED_SUMMARY);
+ continue;
+ }
+
+ /*
+ * this is to avoid deadlock:
+ * - lock_page(sum_page) - f2fs_replace_block
+ * - check_valid_map() - down_write(sentry_lock)
+ * - down_read(sentry_lock) - change_curseg()
+ * - lock_page(sum_page)
+ */
+ if (type == SUM_TYPE_NODE)
+ submitted += gc_node_segment(sbi, sum->entries,
+ cur_segno, gc_type);
+ else
+ submitted += gc_data_segment(sbi, sum->entries,
+ gc_list, cur_segno,
+ gc_type, force_migrate);
+
+ stat_inc_gc_seg_count(sbi, data_type, gc_type);
+ sbi->gc_reclaimed_segs[sbi->gc_mode]++;
+ migrated++;
+
+freed:
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, cur_segno, false) == 0)
+ seg_freed++;
+
+ if (__is_large_section(sbi))
+ sbi->next_victim_seg[gc_type] =
+ (cur_segno + 1 < sec_end_segno) ?
+ cur_segno + 1 : NULL_SEGNO;
+ }
+next_block:
+ folio_put_refs(sum_folio, 2);
+ segno = block_end_segno;
}
if (submitted)
@@ -1890,6 +1928,7 @@ gc_more:
/* Let's run FG_GC, if we don't have enough space. */
if (has_not_enough_free_secs(sbi, 0, 0)) {
gc_type = FG_GC;
+ gc_control->one_time = false;
/*
* For example, if there are many prefree_segments below given
@@ -2063,6 +2102,16 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
+ /*
+ * avoid migrating empty section, as it can be allocated by
+ * log in parallel.
+ */
+ if (!get_valid_blocks(sbi, segno, true))
+ continue;
+
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno)))
+ continue;
+
do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
put_gc_inode(&gc_list);
@@ -2174,6 +2223,8 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
MAIN_SECS(sbi) += secs;
+ if (sbi->allocate_section_hint > MAIN_SECS(sbi))
+ sbi->allocate_section_hint = MAIN_SECS(sbi);
FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
@@ -2181,6 +2232,9 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
if (f2fs_is_multi_device(sbi)) {
int last_dev = sbi->s_ndevs - 1;
+ sbi->allocate_section_hint = FDEV(0).total_segments /
+ SEGS_PER_SEC(sbi);
+
FDEV(last_dev).total_segments =
(int)FDEV(last_dev).total_segments + segs;
FDEV(last_dev).end_blk =
@@ -2268,12 +2322,12 @@ out_drop_write:
if (err)
return err;
- err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
+ err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
if (err)
return err;
if (f2fs_readonly(sbi->sb)) {
- err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
+ err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
if (err)
return err;
return -EROFS;
@@ -2330,6 +2384,6 @@ recover_out:
out_err:
f2fs_up_write(&sbi->cp_global_sem);
f2fs_up_write(&sbi->gc_lock);
- thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
+ thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
return err;
}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index 5c1eaf55e127..6c4d4567571e 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -25,7 +25,7 @@
#define DEF_GC_THREAD_CANDIDATE_RATIO 20 /* select 20% oldest sections as candidates */
#define DEF_GC_THREAD_MAX_CANDIDATE_COUNT 10 /* select at most 10 sections as candidates */
#define DEF_GC_THREAD_AGE_WEIGHT 60 /* age weight */
-#define DEF_GC_THREAD_VALID_THRESH_RATIO 95 /* do not GC over 95% valid block ratio for one time GC */
+#define DEF_GC_THREAD_VALID_THRESH_RATIO 80 /* do not GC over 80% valid block ratio for one time GC */
#define DEFAULT_ACCURACY_CLASS 10000 /* accuracy class */
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
@@ -68,6 +68,8 @@ struct f2fs_gc_kthread {
unsigned int no_zoned_gc_percent;
unsigned int boost_zoned_gc_percent;
unsigned int valid_thresh_ratio;
+ unsigned int boost_gc_multiple;
+ unsigned int boost_gc_greedy;
};
struct gc_inode_list {
@@ -194,6 +196,7 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
static inline bool need_to_boost_gc(struct f2fs_sb_info *sbi)
{
if (f2fs_sb_has_blkzoned(sbi))
- return !has_enough_free_blocks(sbi, LIMIT_BOOST_ZONED_GC);
+ return !has_enough_free_blocks(sbi,
+ sbi->gc_thread->boost_zoned_gc_percent);
return has_enough_invalid_blocks(sbi);
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 005babf1bed1..e5c6a08b7e4f 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -33,9 +33,9 @@ bool f2fs_may_inline_data(struct inode *inode)
return !f2fs_post_read_required(inode);
}
-static bool inode_has_blocks(struct inode *inode, struct page *ipage)
+static bool inode_has_blocks(struct inode *inode, struct folio *ifolio)
{
- struct f2fs_inode *ri = F2FS_INODE(ipage);
+ struct f2fs_inode *ri = F2FS_INODE(ifolio);
int i;
if (F2FS_HAS_BLOCKS(inode))
@@ -48,12 +48,12 @@ static bool inode_has_blocks(struct inode *inode, struct page *ipage)
return false;
}
-bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage)
+bool f2fs_sanity_check_inline_data(struct inode *inode, struct folio *ifolio)
{
if (!f2fs_has_inline_data(inode))
return false;
- if (inode_has_blocks(inode, ipage))
+ if (inode_has_blocks(inode, ifolio))
return false;
if (!support_inline_data(inode))
@@ -79,37 +79,37 @@ bool f2fs_may_inline_dentry(struct inode *inode)
return true;
}
-void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage)
+void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio)
{
- struct inode *inode = folio_file_mapping(folio)->host;
+ struct inode *inode = folio->mapping->host;
if (folio_test_uptodate(folio))
return;
- f2fs_bug_on(F2FS_I_SB(inode), folio_index(folio));
+ f2fs_bug_on(F2FS_I_SB(inode), folio->index);
folio_zero_segment(folio, MAX_INLINE_DATA(inode), folio_size(folio));
/* Copy the whole inline data block */
- memcpy_to_folio(folio, 0, inline_data_addr(inode, ipage),
+ memcpy_to_folio(folio, 0, inline_data_addr(inode, ifolio),
MAX_INLINE_DATA(inode));
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
}
-void f2fs_truncate_inline_inode(struct inode *inode,
- struct page *ipage, u64 from)
+void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio,
+ u64 from)
{
void *addr;
if (from >= MAX_INLINE_DATA(inode))
return;
- addr = inline_data_addr(inode, ipage);
+ addr = inline_data_addr(inode, ifolio);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
if (from == 0)
clear_inode_flag(inode, FI_DATA_EXIST);
@@ -117,32 +117,32 @@ void f2fs_truncate_inline_inode(struct inode *inode,
int f2fs_read_inline_data(struct inode *inode, struct folio *folio)
{
- struct page *ipage;
+ struct folio *ifolio;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage)) {
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio)) {
folio_unlock(folio);
- return PTR_ERR(ipage);
+ return PTR_ERR(ifolio);
}
if (!f2fs_has_inline_data(inode)) {
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return -EAGAIN;
}
- if (folio_index(folio))
+ if (folio->index)
folio_zero_segment(folio, 0, folio_size(folio));
else
- f2fs_do_read_inline_data(folio, ipage);
+ f2fs_do_read_inline_data(folio, ifolio);
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
folio_unlock(folio);
return 0;
}
-int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
+int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
@@ -150,7 +150,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_PRIO,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
.io_type = FS_DATA_IO,
};
@@ -182,20 +182,20 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
return -EFSCORRUPTED;
}
- f2fs_bug_on(F2FS_P_SB(page), folio_test_writeback(page_folio(page)));
+ f2fs_bug_on(F2FS_F_SB(folio), folio_test_writeback(folio));
- f2fs_do_read_inline_data(page_folio(page), dn->inode_page);
- set_page_dirty(page);
+ f2fs_do_read_inline_data(folio, dn->inode_folio);
+ folio_mark_dirty(folio);
/* clear dirty state */
- dirty = clear_page_dirty_for_io(page);
+ dirty = folio_clear_dirty_for_io(folio);
/* write data page to try to make data consistent */
- set_page_writeback(page);
+ folio_start_writeback(folio);
fio.old_blkaddr = dn->data_blkaddr;
set_inode_flag(dn->inode, FI_HOT_DATA);
f2fs_outplace_write_data(dn, &fio);
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
if (dirty) {
inode_dec_dirty_pages(dn->inode);
f2fs_remove_dirty_inode(dn->inode);
@@ -205,8 +205,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
set_inode_flag(dn->inode, FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
- f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
- clear_page_private_inline(dn->inode_page);
+ f2fs_truncate_inline_inode(dn->inode, dn->inode_folio, 0);
+ folio_clear_f2fs_inline(dn->inode_folio);
clear_out:
stat_dec_inline_inode(dn->inode);
clear_inode_flag(dn->inode, FI_INLINE_DATA);
@@ -218,7 +218,7 @@ int f2fs_convert_inline_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- struct page *ipage, *page;
+ struct folio *ifolio, *folio;
int err = 0;
if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
@@ -231,28 +231,28 @@ int f2fs_convert_inline_inode(struct inode *inode)
if (err)
return err;
- page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
- if (!page)
- return -ENOMEM;
+ folio = f2fs_grab_cache_folio(inode->i_mapping, 0, false);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
f2fs_lock_op(sbi);
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto out;
}
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (f2fs_has_inline_data(inode))
- err = f2fs_convert_inline_page(&dn, page);
+ err = f2fs_convert_inline_folio(&dn, folio);
f2fs_put_dnode(&dn);
out:
f2fs_unlock_op(sbi);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
if (!err)
f2fs_balance_fs(sbi, dn.node_changed);
@@ -263,40 +263,39 @@ out:
int f2fs_write_inline_data(struct inode *inode, struct folio *folio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *ipage;
+ struct folio *ifolio;
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
if (!f2fs_has_inline_data(inode)) {
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return -EAGAIN;
}
f2fs_bug_on(F2FS_I_SB(inode), folio->index);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
- memcpy_from_folio(inline_data_addr(inode, ipage),
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
+ memcpy_from_folio(inline_data_addr(inode, ifolio),
folio, 0, MAX_INLINE_DATA(inode));
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
f2fs_clear_page_cache_dirty_tag(folio);
set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(inode, FI_DATA_EXIST);
- clear_page_private_inline(ipage);
- f2fs_put_page(ipage, 1);
+ folio_clear_f2fs_inline(ifolio);
+ f2fs_folio_put(ifolio, true);
return 0;
}
-int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
+int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode *ri = NULL;
void *src_addr, *dst_addr;
- struct page *ipage;
/*
* The inline_data recovery policy is as follows.
@@ -306,38 +305,39 @@ int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
* x o -> remove data blocks, and then recover inline_data
* x x -> recover data blocks
*/
- if (IS_INODE(npage))
- ri = F2FS_INODE(npage);
+ if (IS_INODE(nfolio))
+ ri = F2FS_INODE(nfolio);
if (f2fs_has_inline_data(inode) &&
ri && (ri->i_inline & F2FS_INLINE_DATA)) {
+ struct folio *ifolio;
process_inline:
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
- src_addr = inline_data_addr(inode, npage);
- dst_addr = inline_data_addr(inode, ipage);
+ src_addr = inline_data_addr(inode, nfolio);
+ dst_addr = inline_data_addr(inode, ifolio);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
set_inode_flag(inode, FI_INLINE_DATA);
set_inode_flag(inode, FI_DATA_EXIST);
- set_page_dirty(ipage);
- f2fs_put_page(ipage, 1);
+ folio_mark_dirty(ifolio);
+ f2fs_folio_put(ifolio, true);
return 1;
}
if (f2fs_has_inline_data(inode)) {
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
- f2fs_truncate_inline_inode(inode, ipage, 0);
+ struct folio *ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
+ f2fs_truncate_inline_inode(inode, ifolio, 0);
stat_dec_inline_inode(inode);
clear_inode_flag(inode, FI_INLINE_DATA);
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
int ret;
@@ -352,49 +352,50 @@ process_inline:
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
- struct page **res_page)
+ struct folio **res_folio,
+ bool use_hash)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
struct f2fs_dir_entry *de;
struct f2fs_dentry_ptr d;
- struct page *ipage;
+ struct folio *ifolio;
void *inline_dentry;
- ipage = f2fs_get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage)) {
- *res_page = ipage;
+ ifolio = f2fs_get_inode_folio(sbi, dir->i_ino);
+ if (IS_ERR(ifolio)) {
+ *res_folio = ifolio;
return NULL;
}
- inline_dentry = inline_data_addr(dir, ipage);
+ inline_dentry = inline_data_addr(dir, ifolio);
make_dentry_ptr_inline(dir, &d, inline_dentry);
- de = f2fs_find_target_dentry(&d, fname, NULL);
- unlock_page(ipage);
+ de = f2fs_find_target_dentry(&d, fname, NULL, use_hash);
+ folio_unlock(ifolio);
if (IS_ERR(de)) {
- *res_page = ERR_CAST(de);
+ *res_folio = ERR_CAST(de);
de = NULL;
}
if (de)
- *res_page = ipage;
+ *res_folio = ifolio;
else
- f2fs_put_page(ipage, 0);
+ f2fs_folio_put(ifolio, false);
return de;
}
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
- struct page *ipage)
+ struct folio *ifolio)
{
struct f2fs_dentry_ptr d;
void *inline_dentry;
- inline_dentry = inline_data_addr(inode, ipage);
+ inline_dentry = inline_data_addr(inode, ifolio);
make_dentry_ptr_inline(inode, &d, inline_dentry);
f2fs_do_make_empty_dir(inode, parent, &d);
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
/* update i_size to MAX_INLINE_DATA */
if (i_size_read(inode) < MAX_INLINE_DATA(inode))
@@ -406,39 +407,39 @@ int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
* NOTE: ipage is grabbed by caller, but if any error occurs, we should
* release ipage in this function.
*/
-static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
+static int f2fs_move_inline_dirents(struct inode *dir, struct folio *ifolio,
void *inline_dentry)
{
- struct page *page;
+ struct folio *folio;
struct dnode_of_data dn;
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr src, dst;
int err;
- page = f2fs_grab_cache_page(dir->i_mapping, 0, true);
- if (!page) {
- f2fs_put_page(ipage, 1);
- return -ENOMEM;
+ folio = f2fs_grab_cache_folio(dir->i_mapping, 0, true);
+ if (IS_ERR(folio)) {
+ f2fs_folio_put(ifolio, true);
+ return PTR_ERR(folio);
}
- set_new_dnode(&dn, dir, ipage, NULL, 0);
+ set_new_dnode(&dn, dir, ifolio, NULL, 0);
err = f2fs_reserve_block(&dn, 0);
if (err)
goto out;
if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
f2fs_put_dnode(&dn);
- set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
- f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
+ set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
+ f2fs_warn(F2FS_F_SB(folio), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
__func__, dir->i_ino, dn.data_blkaddr);
- f2fs_handle_error(F2FS_P_SB(page), ERROR_INVALID_BLKADDR);
+ f2fs_handle_error(F2FS_F_SB(folio), ERROR_INVALID_BLKADDR);
err = -EFSCORRUPTED;
goto out;
}
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
- dentry_blk = page_address(page);
+ dentry_blk = folio_address(folio);
/*
* Start by zeroing the full block, to ensure that all unused space is
@@ -454,12 +455,12 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- set_page_dirty(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ folio_mark_dirty(folio);
/* clear inline dir and flag after data writeback */
- f2fs_truncate_inline_inode(dir, ipage, 0);
+ f2fs_truncate_inline_inode(dir, ifolio, 0);
stat_dec_inline_dir(dir);
clear_inode_flag(dir, FI_INLINE_DENTRY);
@@ -476,7 +477,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
if (i_size_read(dir) < PAGE_SIZE)
f2fs_i_size_write(dir, PAGE_SIZE);
out:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
@@ -532,7 +533,7 @@ punch_dentry_pages:
return err;
}
-static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
+static int f2fs_move_rehashed_dirents(struct inode *dir, struct folio *ifolio,
void *inline_dentry)
{
void *backup_dentry;
@@ -541,20 +542,20 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
if (!backup_dentry) {
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return -ENOMEM;
}
memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
- f2fs_truncate_inline_inode(dir, ipage, 0);
+ f2fs_truncate_inline_inode(dir, ifolio, 0);
- unlock_page(ipage);
+ folio_unlock(ifolio);
err = f2fs_add_inline_entries(dir, backup_dentry);
if (err)
goto recover;
- lock_page(ipage);
+ folio_lock(ifolio);
stat_dec_inline_dir(dir);
clear_inode_flag(dir, FI_INLINE_DENTRY);
@@ -570,31 +571,31 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
kfree(backup_dentry);
return 0;
recover:
- lock_page(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ folio_lock(ifolio);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
f2fs_i_depth_write(dir, 0);
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
- set_page_dirty(ipage);
- f2fs_put_page(ipage, 1);
+ folio_mark_dirty(ifolio);
+ f2fs_folio_put(ifolio, true);
kfree(backup_dentry);
return err;
}
-static int do_convert_inline_dir(struct inode *dir, struct page *ipage,
+static int do_convert_inline_dir(struct inode *dir, struct folio *ifolio,
void *inline_dentry)
{
if (!F2FS_I(dir)->i_dir_level)
- return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
+ return f2fs_move_inline_dirents(dir, ifolio, inline_dentry);
else
- return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
+ return f2fs_move_rehashed_dirents(dir, ifolio, inline_dentry);
}
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
+ struct folio *ifolio;
struct f2fs_filename fname;
void *inline_dentry = NULL;
int err = 0;
@@ -608,22 +609,22 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
- ipage = f2fs_get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, dir->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto out_fname;
}
- if (f2fs_has_enough_room(dir, ipage, &fname)) {
- f2fs_put_page(ipage, 1);
+ if (f2fs_has_enough_room(dir, ifolio, &fname)) {
+ f2fs_folio_put(ifolio, true);
goto out_fname;
}
- inline_dentry = inline_data_addr(dir, ipage);
+ inline_dentry = inline_data_addr(dir, ifolio);
- err = do_convert_inline_dir(dir, ipage, inline_dentry);
+ err = do_convert_inline_dir(dir, ifolio, inline_dentry);
if (!err)
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
out_fname:
f2fs_free_filename(&fname);
out:
@@ -635,24 +636,24 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
+ struct folio *ifolio;
unsigned int bit_pos;
void *inline_dentry = NULL;
struct f2fs_dentry_ptr d;
int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
- struct page *page = NULL;
+ struct folio *folio = NULL;
int err = 0;
- ipage = f2fs_get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, dir->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
- inline_dentry = inline_data_addr(dir, ipage);
+ inline_dentry = inline_data_addr(dir, ifolio);
make_dentry_ptr_inline(dir, &d, inline_dentry);
bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
if (bit_pos >= d.max) {
- err = do_convert_inline_dir(dir, ipage, inline_dentry);
+ err = do_convert_inline_dir(dir, ifolio, inline_dentry);
if (err)
return err;
err = -EAGAIN;
@@ -662,19 +663,19 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
if (inode) {
f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
SINGLE_DEPTH_NESTING);
- page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_init_inode_metadata(inode, dir, fname, ifolio);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto fail;
}
}
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
bit_pos);
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
/* we don't need to mark_inode_dirty now */
if (inode) {
@@ -682,9 +683,9 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
/* synchronize inode page's data from inode cache */
if (is_inode_flag_set(inode, FI_NEW_INODE))
- f2fs_update_inode(inode, page);
+ f2fs_update_inode(inode, folio);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
f2fs_update_parent_metadata(dir, inode, 0);
@@ -692,12 +693,12 @@ fail:
if (inode)
f2fs_up_write(&F2FS_I(inode)->i_sem);
out:
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return err;
}
-void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
- struct inode *dir, struct inode *inode)
+void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
+ struct folio *folio, struct inode *dir, struct inode *inode)
{
struct f2fs_dentry_ptr d;
void *inline_dentry;
@@ -705,18 +706,18 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
unsigned int bit_pos;
int i;
- lock_page(page);
- f2fs_wait_on_page_writeback(page, NODE, true, true);
+ folio_lock(folio);
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
- inline_dentry = inline_data_addr(dir, page);
+ inline_dentry = inline_data_addr(dir, folio);
make_dentry_ptr_inline(dir, &d, inline_dentry);
bit_pos = dentry - d.dentry;
for (i = 0; i < slots; i++)
__clear_bit_le(bit_pos + i, d.bitmap);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
@@ -728,21 +729,21 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
bool f2fs_empty_inline_dir(struct inode *dir)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
+ struct folio *ifolio;
unsigned int bit_pos = 2;
void *inline_dentry;
struct f2fs_dentry_ptr d;
- ipage = f2fs_get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
+ ifolio = f2fs_get_inode_folio(sbi, dir->i_ino);
+ if (IS_ERR(ifolio))
return false;
- inline_dentry = inline_data_addr(dir, ipage);
+ inline_dentry = inline_data_addr(dir, ifolio);
make_dentry_ptr_inline(dir, &d, inline_dentry);
bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
if (bit_pos < d.max)
return false;
@@ -754,7 +755,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
struct fscrypt_str *fstr)
{
struct inode *inode = file_inode(file);
- struct page *ipage = NULL;
+ struct folio *ifolio = NULL;
struct f2fs_dentry_ptr d;
void *inline_dentry = NULL;
int err;
@@ -764,17 +765,17 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
if (ctx->pos == d.max)
return 0;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
/*
* f2fs_readdir was protected by inode.i_rwsem, it is safe to access
* ipage without page's lock held.
*/
- unlock_page(ipage);
+ folio_unlock(ifolio);
- inline_dentry = inline_data_addr(inode, ipage);
+ inline_dentry = inline_data_addr(inode, ifolio);
make_dentry_ptr_inline(inode, &d, inline_dentry);
@@ -782,7 +783,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
if (!err)
ctx->pos = d.max;
- f2fs_put_page(ipage, 0);
+ f2fs_folio_put(ifolio, false);
return err < 0 ? err : 0;
}
@@ -793,12 +794,12 @@ int f2fs_inline_data_fiemap(struct inode *inode,
__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
FIEMAP_EXTENT_LAST;
struct node_info ni;
- struct page *ipage;
+ struct folio *ifolio;
int err = 0;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!f2fs_has_inline_data(inode)) {
@@ -823,11 +824,11 @@ int f2fs_inline_data_fiemap(struct inode *inode,
goto out;
byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
- byteaddr += (char *)inline_data_addr(inode, ipage) -
- (char *)F2FS_INODE(ipage);
+ byteaddr += (char *)inline_data_addr(inode, ifolio) -
+ (char *)F2FS_INODE(ifolio);
err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err);
out:
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return err;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 282fd320bdb3..38b8994bc1b2 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -34,10 +34,10 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
if (f2fs_inode_dirtied(inode, sync))
return;
- if (f2fs_is_atomic_file(inode)) {
- set_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ /* only atomic file w/ FI_ATOMIC_COMMITTED can be set vfs dirty */
+ if (f2fs_is_atomic_file(inode) &&
+ !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
return;
- }
mark_inode_dirty_sync(inode);
}
@@ -68,9 +68,9 @@ void f2fs_set_inode_flags(struct inode *inode)
S_ENCRYPTED|S_VERITY|S_CASEFOLD);
}
-static void __get_inode_rdev(struct inode *inode, struct page *node_page)
+static void __get_inode_rdev(struct inode *inode, struct folio *node_folio)
{
- __le32 *addr = get_dnode_addr(inode, node_page);
+ __le32 *addr = get_dnode_addr(inode, node_folio);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
@@ -81,9 +81,9 @@ static void __get_inode_rdev(struct inode *inode, struct page *node_page)
}
}
-static void __set_inode_rdev(struct inode *inode, struct page *node_page)
+static void __set_inode_rdev(struct inode *inode, struct folio *node_folio)
{
- __le32 *addr = get_dnode_addr(inode, node_page);
+ __le32 *addr = get_dnode_addr(inode, node_folio);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
@@ -97,33 +97,34 @@ static void __set_inode_rdev(struct inode *inode, struct page *node_page)
}
}
-static void __recover_inline_status(struct inode *inode, struct page *ipage)
+static void __recover_inline_status(struct inode *inode, struct folio *ifolio)
{
- void *inline_data = inline_data_addr(inode, ipage);
+ void *inline_data = inline_data_addr(inode, ifolio);
__le32 *start = inline_data;
__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
while (start < end) {
if (*start++) {
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
set_inode_flag(inode, FI_DATA_EXIST);
- set_raw_inline(inode, F2FS_INODE(ipage));
- set_page_dirty(ipage);
+ set_raw_inline(inode, F2FS_INODE(ifolio));
+ folio_mark_dirty(ifolio);
return;
}
}
return;
}
-static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
+static
+bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct folio *folio)
{
- struct f2fs_inode *ri = &F2FS_NODE(page)->i;
+ struct f2fs_inode *ri = &F2FS_NODE(folio)->i;
if (!f2fs_sb_has_inode_chksum(sbi))
return false;
- if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+ if (!IS_INODE(folio) || !(ri->i_inline & F2FS_EXTRA_ATTR))
return false;
if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
@@ -133,9 +134,9 @@ static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page
return true;
}
-static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
+static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct folio *folio)
{
- struct f2fs_node *node = F2FS_NODE(page);
+ struct f2fs_node *node = F2FS_NODE(folio);
struct f2fs_inode *ri = &node->i;
__le32 ino = node->footer.ino;
__le32 gen = ri->i_generation;
@@ -144,19 +145,18 @@ static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
unsigned int cs_size = sizeof(dummy_cs);
- chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
- sizeof(ino));
- chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
+ chksum = f2fs_chksum(sbi->s_chksum_seed, (__u8 *)&ino, sizeof(ino));
+ chksum_seed = f2fs_chksum(chksum, (__u8 *)&gen, sizeof(gen));
- chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
- chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
+ chksum = f2fs_chksum(chksum_seed, (__u8 *)ri, offset);
+ chksum = f2fs_chksum(chksum, (__u8 *)&dummy_cs, cs_size);
offset += cs_size;
- chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
- F2FS_BLKSIZE - offset);
+ chksum = f2fs_chksum(chksum, (__u8 *)ri + offset,
+ F2FS_BLKSIZE - offset);
return chksum;
}
-bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
+bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio)
{
struct f2fs_inode *ri;
__u32 provided, calculated;
@@ -165,34 +165,34 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
return true;
#ifdef CONFIG_F2FS_CHECK_FS
- if (!f2fs_enable_inode_chksum(sbi, page))
+ if (!f2fs_enable_inode_chksum(sbi, folio))
#else
- if (!f2fs_enable_inode_chksum(sbi, page) ||
- PageDirty(page) ||
- folio_test_writeback(page_folio(page)))
+ if (!f2fs_enable_inode_chksum(sbi, folio) ||
+ folio_test_dirty(folio) ||
+ folio_test_writeback(folio))
#endif
return true;
- ri = &F2FS_NODE(page)->i;
+ ri = &F2FS_NODE(folio)->i;
provided = le32_to_cpu(ri->i_inode_checksum);
- calculated = f2fs_inode_chksum(sbi, page);
+ calculated = f2fs_inode_chksum(sbi, folio);
if (provided != calculated)
f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
- page_folio(page)->index, ino_of_node(page),
+ folio->index, ino_of_node(folio),
provided, calculated);
return provided == calculated;
}
-void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
+void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct folio *folio)
{
- struct f2fs_inode *ri = &F2FS_NODE(page)->i;
+ struct f2fs_inode *ri = &F2FS_NODE(folio)->i;
- if (!f2fs_enable_inode_chksum(sbi, page))
+ if (!f2fs_enable_inode_chksum(sbi, folio))
return;
- ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
+ ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, folio));
}
static bool sanity_check_compress_inode(struct inode *inode,
@@ -267,24 +267,36 @@ err_level:
return false;
}
-static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+static bool sanity_check_inode(struct inode *inode, struct folio *node_folio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct f2fs_inode *ri = F2FS_INODE(node_page);
+ struct f2fs_inode *ri = F2FS_INODE(node_folio);
unsigned long long iblocks;
- iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+ iblocks = le64_to_cpu(F2FS_INODE(node_folio)->i_blocks);
if (!iblocks) {
f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
__func__, inode->i_ino, iblocks);
return false;
}
- if (ino_of_node(node_page) != nid_of_node(node_page)) {
+ if (ino_of_node(node_folio) != nid_of_node(node_folio)) {
f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
__func__, inode->i_ino,
- ino_of_node(node_page), nid_of_node(node_page));
+ ino_of_node(node_folio), nid_of_node(node_folio));
+ return false;
+ }
+
+ if (ino_of_node(node_folio) == fi->i_xattr_nid) {
+ f2fs_warn(sbi, "%s: corrupted inode i_ino=%lx, xnid=%x, run fsck to fix.",
+ __func__, inode->i_ino, fi->i_xattr_nid);
+ return false;
+ }
+
+ if (S_ISDIR(inode->i_mode) && unlikely(inode->i_nlink == 1)) {
+ f2fs_warn(sbi, "%s: directory inode (ino=%lx) has a single i_nlink",
+ __func__, inode->i_ino);
return false;
}
@@ -302,15 +314,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
F2FS_TOTAL_EXTRA_ATTR_SIZE);
return false;
}
- if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
- f2fs_has_inline_xattr(inode) &&
- (!fi->i_inline_xattr_size ||
- fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
- f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu",
- __func__, inode->i_ino, fi->i_inline_xattr_size,
- MAX_INLINE_XATTR_SIZE);
- return false;
- }
if (f2fs_sb_has_compression(sbi) &&
fi->i_flags & F2FS_COMPR_FL &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
@@ -320,6 +323,16 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
}
+ if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
+ f2fs_has_inline_xattr(inode) &&
+ (fi->i_inline_xattr_size < MIN_INLINE_XATTR_SIZE ||
+ fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, min: %zu, max: %lu",
+ __func__, inode->i_ino, fi->i_inline_xattr_size,
+ MIN_INLINE_XATTR_SIZE, MAX_INLINE_XATTR_SIZE);
+ return false;
+ }
+
if (!f2fs_sb_has_extra_attr(sbi)) {
if (f2fs_sb_has_project_quota(sbi)) {
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
@@ -348,7 +361,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
}
- if (f2fs_sanity_check_inline_data(inode, node_page)) {
+ if (f2fs_sanity_check_inline_data(inode, node_folio)) {
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
return false;
@@ -401,7 +414,7 @@ static int do_read_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct page *node_page;
+ struct folio *node_folio;
struct f2fs_inode *ri;
projid_t i_projid;
@@ -409,11 +422,11 @@ static int do_read_inode(struct inode *inode)
if (f2fs_check_nid_range(sbi, inode->i_ino))
return -EINVAL;
- node_page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(node_page))
- return PTR_ERR(node_page);
+ node_folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(node_folio))
+ return PTR_ERR(node_folio);
- ri = F2FS_INODE(node_page);
+ ri = F2FS_INODE(node_folio);
inode->i_mode = le16_to_cpu(ri->i_mode);
i_uid_write(inode, le32_to_cpu(ri->i_uid));
@@ -463,8 +476,8 @@ static int do_read_inode(struct inode *inode)
fi->i_inline_xattr_size = 0;
}
- if (!sanity_check_inode(inode, node_page)) {
- f2fs_put_page(node_page, 1);
+ if (!sanity_check_inode(inode, node_folio)) {
+ f2fs_folio_put(node_folio, true);
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
return -EFSCORRUPTED;
@@ -472,17 +485,17 @@ static int do_read_inode(struct inode *inode)
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
- __recover_inline_status(inode, node_page);
+ __recover_inline_status(inode, node_folio);
/* try to recover cold bit for non-dir inode */
- if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
- f2fs_wait_on_page_writeback(node_page, NODE, true, true);
- set_cold_node(node_page, false);
- set_page_dirty(node_page);
+ if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_folio)) {
+ f2fs_folio_wait_writeback(node_folio, NODE, true, true);
+ set_cold_node(node_folio, false);
+ folio_mark_dirty(node_folio);
}
/* get rdev by using inline_info */
- __get_inode_rdev(inode, node_page);
+ __get_inode_rdev(inode, node_folio);
if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
fi->last_disk_size = inode->i_size;
@@ -525,17 +538,17 @@ static int do_read_inode(struct inode *inode)
init_idisk_time(inode);
- if (!sanity_check_extent_cache(inode, node_page)) {
- f2fs_put_page(node_page, 1);
+ if (!sanity_check_extent_cache(inode, node_folio)) {
+ f2fs_folio_put(node_folio, true);
f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
return -EFSCORRUPTED;
}
/* Need all the flag bits */
- f2fs_init_read_extent_tree(inode, node_page);
+ f2fs_init_read_extent_tree(inode, node_folio);
f2fs_init_age_extent_tree(inode);
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
stat_inc_inline_xattr(inode);
stat_inc_inline_inode(inode);
@@ -562,7 +575,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW)) {
+ if (!(inode_state_read_once(inode) & I_NEW)) {
if (is_meta_ino(sbi, ino)) {
f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
set_sbi_flag(sbi, SBI_NEED_FSCK);
@@ -652,18 +665,18 @@ retry:
return inode;
}
-void f2fs_update_inode(struct inode *inode, struct page *node_page)
+void f2fs_update_inode(struct inode *inode, struct folio *node_folio)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode *ri;
struct extent_tree *et = fi->extent_tree[EX_READ];
- f2fs_wait_on_page_writeback(node_page, NODE, true, true);
- set_page_dirty(node_page);
+ f2fs_folio_wait_writeback(node_folio, NODE, true, true);
+ folio_mark_dirty(node_folio);
f2fs_inode_synced(inode);
- ri = F2FS_INODE(node_page);
+ ri = F2FS_INODE(node_folio);
ri->i_mode = cpu_to_le16(inode->i_mode);
ri->i_advise = fi->i_advise;
@@ -738,39 +751,43 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
}
}
- __set_inode_rdev(inode, node_page);
+ __set_inode_rdev(inode, node_folio);
/* deleted inode */
if (inode->i_nlink == 0)
- clear_page_private_inline(node_page);
+ folio_clear_f2fs_inline(node_folio);
init_idisk_time(inode);
#ifdef CONFIG_F2FS_CHECK_FS
- f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
+ f2fs_inode_chksum_set(F2FS_I_SB(inode), node_folio);
#endif
}
void f2fs_update_inode_page(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *node_page;
+ struct folio *node_folio;
int count = 0;
retry:
- node_page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(node_page)) {
- int err = PTR_ERR(node_page);
+ node_folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(node_folio)) {
+ int err = PTR_ERR(node_folio);
/* The node block was truncated. */
if (err == -ENOENT)
return;
+ if (err == -EFSCORRUPTED)
+ goto stop_checkpoint;
+
if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
goto retry;
+stop_checkpoint:
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
return;
}
- f2fs_update_inode(inode, node_page);
- f2fs_put_page(node_page, 1);
+ f2fs_update_inode(inode, node_folio);
+ f2fs_folio_put(node_folio, true);
}
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -788,6 +805,13 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
+ /*
+ * no need to update inode page, ultimately f2fs_evict_inode() will
+ * clear dirty status of inode.
+ */
+ if (f2fs_cp_error(sbi))
+ return -EIO;
+
if (!f2fs_is_checkpoint_ready(sbi)) {
f2fs_mark_inode_dirty_sync(inode, true);
return -ENOSPC;
@@ -803,6 +827,19 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
return 0;
}
+void f2fs_remove_donate_inode(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (list_empty(&F2FS_I(inode)->gdonate_list))
+ return;
+
+ spin_lock(&sbi->inode_lock[DONATE_INODE]);
+ list_del_init(&F2FS_I(inode)->gdonate_list);
+ sbi->donate_files--;
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+}
+
/*
* Called at the last iput() if i_nlink is zero
*/
@@ -837,6 +874,7 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_bug_on(sbi, get_dirty_pages(inode));
f2fs_remove_dirty_inode(inode);
+ f2fs_remove_donate_inode(inode);
if (!IS_DEVICE_ALIASING(inode))
f2fs_destroy_extent_tree(inode);
@@ -902,6 +940,19 @@ retry:
f2fs_update_inode_page(inode);
if (dquot_initialize_needed(inode))
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+
+ /*
+ * If both f2fs_truncate() and f2fs_update_inode_page() failed
+ * due to fuzzed corrupted inode, call f2fs_inode_synced() to
+ * avoid triggering later f2fs_bug_on().
+ */
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ f2fs_warn(sbi,
+ "f2fs_evict_inode: inode is dirty, ino:%lu",
+ inode->i_ino);
+ f2fs_inode_synced(inode);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
}
if (freeze_protected)
sb_end_intwrite(inode->i_sb);
@@ -918,8 +969,12 @@ no_delete:
if (likely(!f2fs_cp_error(sbi) &&
!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
- else
- f2fs_inode_synced(inode);
+
+ /*
+ * anyway, it needs to remove the inode from sbi->inode_list[DIRTY_META]
+ * list to avoid UAF in f2fs_sync_inode_meta() during checkpoint.
+ */
+ f2fs_inode_synced(inode);
/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
if (inode->i_ino)
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 57d46e1439de..043d20516a21 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -341,6 +341,7 @@ fail_drop:
trace_f2fs_new_inode(inode, err);
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
+ make_bad_inode(inode);
if (nid_free)
set_inode_flag(inode, FI_FREE_NID);
clear_nlink(inode);
@@ -413,7 +414,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
if (is_inode_flag_set(dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(dir)->i_projid,
- F2FS_I(old_dentry->d_inode)->i_projid)))
+ F2FS_I(inode)->i_projid)))
return -EXDEV;
err = f2fs_dquot_initialize(dir);
@@ -446,12 +447,12 @@ out:
struct dentry *f2fs_get_parent(struct dentry *child)
{
- struct page *page;
- unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot_name, &page);
+ struct folio *folio;
+ unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot_name, &folio);
if (!ino) {
- if (IS_ERR(page))
- return ERR_CAST(page);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
return ERR_PTR(-ENOENT);
}
return d_obtain_alias(f2fs_iget(child->d_sb, ino));
@@ -462,7 +463,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
{
struct inode *inode = NULL;
struct f2fs_dir_entry *de;
- struct page *page;
+ struct folio *folio;
struct dentry *new;
nid_t ino = -1;
int err = 0;
@@ -480,12 +481,12 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out_splice;
if (err)
goto out;
- de = __f2fs_find_entry(dir, &fname, &page);
+ de = __f2fs_find_entry(dir, &fname, &folio);
f2fs_free_filename(&fname);
if (!de) {
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto out;
}
err = -ENOENT;
@@ -493,7 +494,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
}
ino = le32_to_cpu(de->ino);
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
inode = f2fs_iget(dir->i_sb, ino);
if (IS_ERR(inode)) {
@@ -501,6 +502,14 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
+ if (inode->i_nlink == 0) {
+ f2fs_warn(F2FS_I_SB(inode), "%s: inode (ino=%lx) has zero i_nlink",
+ __func__, inode->i_ino);
+ err = -EFSCORRUPTED;
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ goto out_iput;
+ }
+
if (IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
@@ -536,28 +545,38 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode = d_inode(dentry);
struct f2fs_dir_entry *de;
- struct page *page;
+ struct folio *folio;
int err;
trace_f2fs_unlink_enter(dir, dentry);
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
- goto fail;
+ goto out;
}
err = f2fs_dquot_initialize(dir);
if (err)
- goto fail;
+ goto out;
err = f2fs_dquot_initialize(inode);
if (err)
- goto fail;
+ goto out;
- de = f2fs_find_entry(dir, &dentry->d_name, &page);
+ de = f2fs_find_entry(dir, &dentry->d_name, &folio);
if (!de) {
- if (IS_ERR(page))
- err = PTR_ERR(page);
- goto fail;
+ if (IS_ERR(folio))
+ err = PTR_ERR(folio);
+ goto out;
+ }
+
+ if (unlikely(inode->i_nlink == 0)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has zero i_nlink",
+ __func__, inode->i_ino);
+ goto corrupted;
+ } else if (S_ISDIR(inode->i_mode) && unlikely(inode->i_nlink == 1)) {
+ f2fs_warn(sbi, "%s: directory inode (ino=%lx) has a single i_nlink",
+ __func__, inode->i_ino);
+ goto corrupted;
}
f2fs_balance_fs(sbi, true);
@@ -566,10 +585,10 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
err = f2fs_acquire_orphan_inode(sbi);
if (err) {
f2fs_unlock_op(sbi);
- f2fs_put_page(page, 0);
- goto fail;
+ f2fs_folio_put(folio, false);
+ goto out;
}
- f2fs_delete_entry(de, page, dir, inode);
+ f2fs_delete_entry(de, folio, dir, inode);
f2fs_unlock_op(sbi);
/* VFS negative dentries are incompatible with Encoding and
@@ -583,7 +602,13 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
-fail:
+
+ goto out;
+corrupted:
+ err = -EFSCORRUPTED;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_folio_put(folio, false);
+out:
trace_f2fs_unlink_exit(inode, err);
return err;
}
@@ -683,23 +708,23 @@ out_free_encrypted_link:
return err;
}
-static int f2fs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *f2fs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
int err;
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ return ERR_PTR(-EIO);
err = f2fs_dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
inode = f2fs_new_inode(idmap, dir, S_IFDIR | mode, NULL);
if (IS_ERR(inode))
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
@@ -721,12 +746,12 @@ static int f2fs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
f2fs_sync_fs(sbi->sb, 1);
f2fs_balance_fs(sbi, true);
- return 0;
+ return NULL;
out_fail:
clear_inode_flag(inode, FI_INC_LINK);
f2fs_handle_failed_inode(inode);
- return err;
+ return ERR_PTR(err);
}
static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -826,7 +851,7 @@ static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
f2fs_i_links_write(inode, false);
spin_lock(&inode->i_lock);
- inode->i_state |= I_LINKABLE;
+ inode_state_set(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
} else {
if (file)
@@ -890,8 +915,8 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
struct inode *whiteout = NULL;
- struct page *old_dir_page = NULL;
- struct page *old_page, *new_page = NULL;
+ struct folio *old_dir_folio = NULL;
+ struct folio *old_folio, *new_folio = NULL;
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
@@ -905,7 +930,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(new_dir)->i_projid,
- F2FS_I(old_dentry->d_inode)->i_projid)))
+ F2FS_I(old_inode)->i_projid)))
return -EXDEV;
/*
@@ -950,18 +975,18 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
}
err = -ENOENT;
- old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
if (!old_entry) {
- if (IS_ERR(old_page))
- err = PTR_ERR(old_page);
+ if (IS_ERR(old_folio))
+ err = PTR_ERR(old_folio);
goto out;
}
if (old_is_dir && old_dir != new_dir) {
- old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
+ old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_folio);
if (!old_dir_entry) {
- if (IS_ERR(old_dir_page))
- err = PTR_ERR(old_dir_page);
+ if (IS_ERR(old_dir_folio))
+ err = PTR_ERR(old_dir_folio);
goto out_old;
}
}
@@ -974,10 +999,10 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
err = -ENOENT;
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
- &new_page);
+ &new_folio);
if (!new_entry) {
- if (IS_ERR(new_page))
- err = PTR_ERR(new_page);
+ if (IS_ERR(new_folio))
+ err = PTR_ERR(new_folio);
goto out_dir;
}
@@ -989,8 +1014,8 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (err)
goto put_out_dir;
- f2fs_set_link(new_dir, new_entry, new_page, old_inode);
- new_page = NULL;
+ f2fs_set_link(new_dir, new_entry, new_folio, old_inode);
+ new_folio = NULL;
inode_set_ctime_current(new_inode);
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
@@ -1029,24 +1054,26 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
inode_set_ctime_current(old_inode);
f2fs_mark_inode_dirty_sync(old_inode, false);
- f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
- old_page = NULL;
+ f2fs_delete_entry(old_entry, old_folio, old_dir, NULL);
+ old_folio = NULL;
if (whiteout) {
set_inode_flag(whiteout, FI_INC_LINK);
err = f2fs_add_link(old_dentry, whiteout);
- if (err)
+ if (err) {
+ d_invalidate(old_dentry);
+ d_invalidate(new_dentry);
goto put_out_dir;
-
+ }
spin_lock(&whiteout->i_lock);
- whiteout->i_state &= ~I_LINKABLE;
+ inode_state_clear(whiteout, I_LINKABLE);
spin_unlock(&whiteout->i_lock);
iput(whiteout);
}
if (old_dir_entry)
- f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir);
+ f2fs_set_link(old_inode, old_dir_entry, old_dir_folio, new_dir);
if (old_is_dir)
f2fs_i_links_write(old_dir, false);
@@ -1067,12 +1094,12 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
put_out_dir:
f2fs_unlock_op(sbi);
- f2fs_put_page(new_page, 0);
+ f2fs_folio_put(new_folio, false);
out_dir:
if (old_dir_entry)
- f2fs_put_page(old_dir_page, 0);
+ f2fs_folio_put(old_dir_folio, false);
out_old:
- f2fs_put_page(old_page, 0);
+ f2fs_folio_put(old_folio, false);
out:
iput(whiteout);
return err;
@@ -1084,8 +1111,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
- struct page *old_dir_page, *new_dir_page;
- struct page *old_page, *new_page;
+ struct folio *old_dir_folio, *new_dir_folio;
+ struct folio *old_folio, *new_folio;
struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL;
struct f2fs_dir_entry *old_entry, *new_entry;
int old_nlink = 0, new_nlink = 0;
@@ -1098,10 +1125,10 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(new_dir)->i_projid,
- F2FS_I(old_dentry->d_inode)->i_projid)) ||
- (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
+ F2FS_I(old_inode)->i_projid)) ||
+ (is_inode_flag_set(old_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(old_dir)->i_projid,
- F2FS_I(new_dentry->d_inode)->i_projid)))
+ F2FS_I(new_inode)->i_projid)))
return -EXDEV;
err = f2fs_dquot_initialize(old_dir);
@@ -1113,17 +1140,17 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
err = -ENOENT;
- old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
if (!old_entry) {
- if (IS_ERR(old_page))
- err = PTR_ERR(old_page);
+ if (IS_ERR(old_folio))
+ err = PTR_ERR(old_folio);
goto out;
}
- new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page);
+ new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_folio);
if (!new_entry) {
- if (IS_ERR(new_page))
- err = PTR_ERR(new_page);
+ if (IS_ERR(new_folio))
+ err = PTR_ERR(new_folio);
goto out_old;
}
@@ -1131,20 +1158,20 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (old_dir != new_dir) {
if (S_ISDIR(old_inode->i_mode)) {
old_dir_entry = f2fs_parent_dir(old_inode,
- &old_dir_page);
+ &old_dir_folio);
if (!old_dir_entry) {
- if (IS_ERR(old_dir_page))
- err = PTR_ERR(old_dir_page);
+ if (IS_ERR(old_dir_folio))
+ err = PTR_ERR(old_dir_folio);
goto out_new;
}
}
if (S_ISDIR(new_inode->i_mode)) {
new_dir_entry = f2fs_parent_dir(new_inode,
- &new_dir_page);
+ &new_dir_folio);
if (!new_dir_entry) {
- if (IS_ERR(new_dir_page))
- err = PTR_ERR(new_dir_page);
+ if (IS_ERR(new_dir_folio))
+ err = PTR_ERR(new_dir_folio);
goto out_old_dir;
}
}
@@ -1171,14 +1198,14 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
/* update ".." directory entry info of old dentry */
if (old_dir_entry)
- f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir);
+ f2fs_set_link(old_inode, old_dir_entry, old_dir_folio, new_dir);
/* update ".." directory entry info of new dentry */
if (new_dir_entry)
- f2fs_set_link(new_inode, new_dir_entry, new_dir_page, old_dir);
+ f2fs_set_link(new_inode, new_dir_entry, new_dir_folio, old_dir);
/* update directory entry info of old dir inode */
- f2fs_set_link(old_dir, old_entry, old_page, new_inode);
+ f2fs_set_link(old_dir, old_entry, old_folio, new_inode);
f2fs_down_write(&F2FS_I(old_inode)->i_sem);
if (!old_dir_entry)
@@ -1197,7 +1224,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_mark_inode_dirty_sync(old_dir, false);
/* update directory entry info of new dir inode */
- f2fs_set_link(new_dir, new_entry, new_page, old_inode);
+ f2fs_set_link(new_dir, new_entry, new_folio, old_inode);
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
if (!new_dir_entry)
@@ -1229,16 +1256,16 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
return 0;
out_new_dir:
if (new_dir_entry) {
- f2fs_put_page(new_dir_page, 0);
+ f2fs_folio_put(new_dir_folio, false);
}
out_old_dir:
if (old_dir_entry) {
- f2fs_put_page(old_dir_page, 0);
+ f2fs_folio_put(old_dir_folio, false);
}
out_new:
- f2fs_put_page(new_page, 0);
+ f2fs_folio_put(new_folio, false);
out_old:
- f2fs_put_page(old_page, 0);
+ f2fs_folio_put(old_folio, false);
out:
return err;
}
@@ -1280,19 +1307,19 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct page *page;
+ struct folio *folio;
const char *target;
if (!dentry)
return ERR_PTR(-ECHILD);
- page = read_mapping_page(inode->i_mapping, 0, NULL);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ folio = read_mapping_folio(inode->i_mapping, 0, NULL);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
- target = fscrypt_get_symlink(inode, page_address(page),
+ target = fscrypt_get_symlink(inode, folio_address(folio),
inode->i_sb->s_blocksize, done);
- put_page(page);
+ folio_put(folio);
return target;
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 0b900a7a48e5..482a362f2625 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -27,12 +27,17 @@ static struct kmem_cache *free_nid_slab;
static struct kmem_cache *nat_entry_set_slab;
static struct kmem_cache *fsync_node_entry_slab;
+static inline bool is_invalid_nid(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ return nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid;
+}
+
/*
* Check whether the given nid is within node id range.
*/
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
{
- if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
+ if (unlikely(is_invalid_nid(sbi, nid))) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
__func__, nid);
@@ -120,25 +125,25 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
return res;
}
-static void clear_node_page_dirty(struct page *page)
+static void clear_node_folio_dirty(struct folio *folio)
{
- if (PageDirty(page)) {
- f2fs_clear_page_cache_dirty_tag(page_folio(page));
- clear_page_dirty_for_io(page);
- dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
+ if (folio_test_dirty(folio)) {
+ f2fs_clear_page_cache_dirty_tag(folio);
+ folio_clear_dirty_for_io(folio);
+ dec_page_count(F2FS_F_SB(folio), F2FS_DIRTY_NODES);
}
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
}
-static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
{
- return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
+ return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid));
}
-static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
{
- struct page *src_page;
- struct page *dst_page;
+ struct folio *src_folio;
+ struct folio *dst_folio;
pgoff_t dst_off;
void *src_addr;
void *dst_addr;
@@ -147,21 +152,21 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
/* get current nat block page with lock */
- src_page = get_current_nat_page(sbi, nid);
- if (IS_ERR(src_page))
- return src_page;
- dst_page = f2fs_grab_meta_page(sbi, dst_off);
- f2fs_bug_on(sbi, PageDirty(src_page));
-
- src_addr = page_address(src_page);
- dst_addr = page_address(dst_page);
+ src_folio = get_current_nat_folio(sbi, nid);
+ if (IS_ERR(src_folio))
+ return src_folio;
+ dst_folio = f2fs_grab_meta_folio(sbi, dst_off);
+ f2fs_bug_on(sbi, folio_test_dirty(src_folio));
+
+ src_addr = folio_address(src_folio);
+ dst_addr = folio_address(dst_folio);
memcpy(dst_addr, src_addr, PAGE_SIZE);
- set_page_dirty(dst_page);
- f2fs_put_page(src_page, 1);
+ folio_mark_dirty(dst_folio);
+ f2fs_folio_put(src_folio, true);
set_to_next_nat(nm_i, nid);
- return dst_page;
+ return dst_folio;
}
static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
@@ -185,7 +190,7 @@ static void __free_nat_entry(struct nat_entry *e)
/* must be locked by nat_tree_lock */
static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
- struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
+ struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail, bool init_dirty)
{
if (no_fail)
f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
@@ -195,6 +200,12 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
if (raw_ne)
node_info_from_raw_nat(&ne->ni, raw_ne);
+ if (init_dirty) {
+ INIT_LIST_HEAD(&ne->list);
+ nm_i->nat_cnt[TOTAL_NAT]++;
+ return ne;
+ }
+
spin_lock(&nm_i->nat_list_lock);
list_add_tail(&ne->list, &nm_i->nat_entries);
spin_unlock(&nm_i->nat_list_lock);
@@ -204,14 +215,17 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
return ne;
}
-static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
+static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n, bool for_dirty)
{
struct nat_entry *ne;
ne = radix_tree_lookup(&nm_i->nat_root, n);
- /* for recent accessed nat entry, move it to tail of lru list */
- if (ne && !get_nat_flag(ne, IS_DIRTY)) {
+ /*
+ * for recent accessed nat entry which will not be dirtied soon
+ * later, move it to tail of lru list.
+ */
+ if (ne && !get_nat_flag(ne, IS_DIRTY) && !for_dirty) {
spin_lock(&nm_i->nat_list_lock);
if (!list_empty(&ne->list))
list_move_tail(&ne->list, &nm_i->nat_entries);
@@ -256,7 +270,7 @@ static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
}
static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
- struct nat_entry *ne)
+ struct nat_entry *ne, bool init_dirty)
{
struct nat_entry_set *head;
bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
@@ -279,7 +293,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
goto refresh_list;
nm_i->nat_cnt[DIRTY_NAT]++;
- nm_i->nat_cnt[RECLAIMABLE_NAT]--;
+ if (!init_dirty)
+ nm_i->nat_cnt[RECLAIMABLE_NAT]--;
set_nat_flag(ne, IS_DIRTY, true);
refresh_list:
spin_lock(&nm_i->nat_list_lock);
@@ -310,10 +325,9 @@ static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
start, nr);
}
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio)
{
- return NODE_MAPPING(sbi) == page->mapping &&
- IS_DNODE(page) && is_cold_node(page);
+ return is_node_folio(folio) && IS_DNODE(folio) && is_cold_node(folio);
}
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
@@ -325,7 +339,7 @@ void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
}
static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
- struct page *page)
+ struct folio *folio)
{
struct fsync_node_entry *fn;
unsigned long flags;
@@ -334,8 +348,8 @@ static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
GFP_NOFS, true, NULL);
- get_page(page);
- fn->page = page;
+ folio_get(folio);
+ fn->folio = folio;
INIT_LIST_HEAD(&fn->list);
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
@@ -348,19 +362,19 @@ static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
return seq_id;
}
-void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio)
{
struct fsync_node_entry *fn;
unsigned long flags;
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
list_for_each_entry(fn, &sbi->fsync_node_list, list) {
- if (fn->page == page) {
+ if (fn->folio == folio) {
list_del(&fn->list);
sbi->fsync_node_num--;
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
kmem_cache_free(fsync_node_entry_slab, fn);
- put_page(page);
+ folio_put(folio);
return;
}
}
@@ -384,7 +398,7 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
bool need = false;
f2fs_down_read(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (e) {
if (!get_nat_flag(e, IS_CHECKPOINTED) &&
!get_nat_flag(e, HAS_FSYNCED_INODE))
@@ -401,7 +415,7 @@ bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
bool is_cp = true;
f2fs_down_read(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (e && !get_nat_flag(e, IS_CHECKPOINTED))
is_cp = false;
f2fs_up_read(&nm_i->nat_tree_lock);
@@ -415,7 +429,7 @@ bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
bool need_update = true;
f2fs_down_read(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, ino);
+ e = __lookup_nat_cache(nm_i, ino, false);
if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
(get_nat_flag(e, IS_CHECKPOINTED) ||
get_nat_flag(e, HAS_FSYNCED_INODE)))
@@ -440,9 +454,9 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
return;
f2fs_down_write(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (!e)
- e = __init_nat_entry(nm_i, new, ne, false);
+ e = __init_nat_entry(nm_i, new, ne, false, false);
else
f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
nat_get_blkaddr(e) !=
@@ -459,11 +473,13 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
+ bool init_dirty = false;
f2fs_down_write(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, ni->nid);
+ e = __lookup_nat_cache(nm_i, ni->nid, true);
if (!e) {
- e = __init_nat_entry(nm_i, new, NULL, true);
+ init_dirty = true;
+ e = __init_nat_entry(nm_i, new, NULL, true, true);
copy_node_info(&e->ni, ni);
f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
@@ -499,11 +515,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
nat_set_blkaddr(e, new_blkaddr);
if (!__is_valid_data_blkaddr(new_blkaddr))
set_nat_flag(e, IS_CHECKPOINTED, false);
- __set_nat_cache_dirty(nm_i, e);
+ __set_nat_cache_dirty(nm_i, e, init_dirty);
/* update fsync_mark if its inode nat entry is still alive */
if (ni->nid != ni->ino)
- e = __lookup_nat_cache(nm_i, ni->ino);
+ e = __lookup_nat_cache(nm_i, ni->ino, false);
if (e) {
if (fsync_done && ni->nid == ni->ino)
set_nat_flag(e, HAS_FSYNCED_INODE, true);
@@ -551,23 +567,28 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = START_NID(nid);
struct f2fs_nat_block *nat_blk;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct f2fs_nat_entry ne;
struct nat_entry *e;
pgoff_t index;
- block_t blkaddr;
int i;
+ bool need_cache = true;
+ ni->flag = 0;
ni->nid = nid;
retry:
/* Check nat cache */
f2fs_down_read(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (e) {
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
f2fs_up_read(&nm_i->nat_tree_lock);
+ if (IS_ENABLED(CONFIG_F2FS_CHECK_FS)) {
+ need_cache = false;
+ goto sanity_check;
+ }
return 0;
}
@@ -593,38 +614,47 @@ retry:
up_read(&curseg->journal_rwsem);
if (i >= 0) {
f2fs_up_read(&nm_i->nat_tree_lock);
- goto cache;
+ goto sanity_check;
}
/* Fill node_info from nat page */
index = current_nat_addr(sbi, nid);
f2fs_up_read(&nm_i->nat_tree_lock);
- page = f2fs_get_meta_page(sbi, index);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_meta_folio(sbi, index);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- nat_blk = (struct f2fs_nat_block *)page_address(page);
+ nat_blk = folio_address(folio);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
- f2fs_put_page(page, 1);
-cache:
- blkaddr = le32_to_cpu(ne.block_addr);
- if (__is_valid_data_blkaddr(blkaddr) &&
- !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
- return -EFAULT;
+ f2fs_folio_put(folio, true);
+sanity_check:
+ if (__is_valid_data_blkaddr(ni->blk_addr) &&
+ !f2fs_is_valid_blkaddr(sbi, ni->blk_addr,
+ DATA_GENERIC_ENHANCE)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_err_ratelimited(sbi,
+ "f2fs_get_node_info of %pS: inconsistent nat entry, "
+ "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
+ __builtin_return_address(0),
+ ni->ino, ni->nid, ni->blk_addr, ni->version, ni->flag);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
+ return -EFSCORRUPTED;
+ }
/* cache nat entry */
- cache_nat_entry(sbi, nid, &ne);
+ if (need_cache)
+ cache_nat_entry(sbi, nid, &ne);
return 0;
}
/*
* readahead MAX_RA_NODE number of node pages.
*/
-static void f2fs_ra_node_pages(struct page *parent, int start, int n)
+static void f2fs_ra_node_pages(struct folio *parent, int start, int n)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
struct blk_plug plug;
int i, end;
nid_t nid;
@@ -753,6 +783,8 @@ got:
return level;
}
+static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start);
+
/*
* Caller should call f2fs_put_dnode(dn).
* Also, it should grab and release a rwsem by calling f2fs_lock_op() and
@@ -761,8 +793,8 @@ got:
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct page *npage[4];
- struct page *parent = NULL;
+ struct folio *nfolio[4];
+ struct folio *parent = NULL;
int offset[4];
unsigned int noffset[4];
nid_t nids[4];
@@ -774,31 +806,42 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
return level;
nids[0] = dn->inode->i_ino;
- npage[0] = dn->inode_page;
- if (!npage[0]) {
- npage[0] = f2fs_get_node_page(sbi, nids[0]);
- if (IS_ERR(npage[0]))
- return PTR_ERR(npage[0]);
+ if (!dn->inode_folio) {
+ nfolio[0] = f2fs_get_inode_folio(sbi, nids[0]);
+ if (IS_ERR(nfolio[0]))
+ return PTR_ERR(nfolio[0]);
+ } else {
+ nfolio[0] = dn->inode_folio;
}
/* if inline_data is set, should not report any block indices */
if (f2fs_has_inline_data(dn->inode) && index) {
err = -ENOENT;
- f2fs_put_page(npage[0], 1);
+ f2fs_folio_put(nfolio[0], true);
goto release_out;
}
- parent = npage[0];
+ parent = nfolio[0];
if (level != 0)
nids[1] = get_nid(parent, offset[0], true);
- dn->inode_page = npage[0];
- dn->inode_page_locked = true;
+ dn->inode_folio = nfolio[0];
+ dn->inode_folio_locked = true;
/* get indirect or direct nodes */
for (i = 1; i <= level; i++) {
bool done = false;
+ if (nids[i] && nids[i] == dn->inode->i_ino) {
+ err = -EFSCORRUPTED;
+ f2fs_err_ratelimited(sbi,
+ "inode mapping table is corrupted, run fsck to fix it, "
+ "ino:%lu, nid:%u, level:%d, offset:%d",
+ dn->inode->i_ino, nids[i], level, offset[level]);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ goto release_pages;
+ }
+
if (!nids[i] && mode == ALLOC_NODE) {
/* alloc new node */
if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
@@ -807,10 +850,10 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
}
dn->nid = nids[i];
- npage[i] = f2fs_new_node_page(dn, noffset[i]);
- if (IS_ERR(npage[i])) {
+ nfolio[i] = f2fs_new_node_folio(dn, noffset[i]);
+ if (IS_ERR(nfolio[i])) {
f2fs_alloc_nid_failed(sbi, nids[i]);
- err = PTR_ERR(npage[i]);
+ err = PTR_ERR(nfolio[i]);
goto release_pages;
}
@@ -818,36 +861,37 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
f2fs_alloc_nid_done(sbi, nids[i]);
done = true;
} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
- npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
- if (IS_ERR(npage[i])) {
- err = PTR_ERR(npage[i]);
+ nfolio[i] = f2fs_get_node_folio_ra(parent, offset[i - 1]);
+ if (IS_ERR(nfolio[i])) {
+ err = PTR_ERR(nfolio[i]);
goto release_pages;
}
done = true;
}
if (i == 1) {
- dn->inode_page_locked = false;
- unlock_page(parent);
+ dn->inode_folio_locked = false;
+ folio_unlock(parent);
} else {
- f2fs_put_page(parent, 1);
+ f2fs_folio_put(parent, true);
}
if (!done) {
- npage[i] = f2fs_get_node_page(sbi, nids[i]);
- if (IS_ERR(npage[i])) {
- err = PTR_ERR(npage[i]);
- f2fs_put_page(npage[0], 0);
+ nfolio[i] = f2fs_get_node_folio(sbi, nids[i],
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(nfolio[i])) {
+ err = PTR_ERR(nfolio[i]);
+ f2fs_folio_put(nfolio[0], false);
goto release_out;
}
}
if (i < level) {
- parent = npage[i];
+ parent = nfolio[i];
nids[i + 1] = get_nid(parent, offset[i], false);
}
}
dn->nid = nids[level];
dn->ofs_in_node = offset[level];
- dn->node_page = npage[level];
+ dn->node_folio = nfolio[level];
dn->data_blkaddr = f2fs_data_blkaddr(dn);
if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
@@ -868,9 +912,9 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
if (!c_len)
goto out;
- blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node);
if (blkaddr == COMPRESS_ADDR)
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
ofs_in_node + 1);
f2fs_update_read_extent_tree_range_compressed(dn->inode,
@@ -880,12 +924,12 @@ out:
return 0;
release_pages:
- f2fs_put_page(parent, 1);
+ f2fs_folio_put(parent, true);
if (i > 1)
- f2fs_put_page(npage[0], 0);
+ f2fs_folio_put(nfolio[0], false);
release_out:
- dn->inode_page = NULL;
- dn->node_page = NULL;
+ dn->inode_folio = NULL;
+ dn->node_folio = NULL;
if (err == -ENOENT) {
dn->cur_level = i;
dn->max_level = level;
@@ -916,7 +960,7 @@ static int truncate_node(struct dnode_of_data *dn)
}
/* Deallocate node address */
- f2fs_invalidate_blocks(sbi, ni.blk_addr);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -926,16 +970,16 @@ static int truncate_node(struct dnode_of_data *dn)
f2fs_inode_synced(dn->inode);
}
- clear_node_page_dirty(dn->node_page);
+ clear_node_folio_dirty(dn->node_folio);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- index = page_folio(dn->node_page)->index;
- f2fs_put_page(dn->node_page, 1);
+ index = dn->node_folio->index;
+ f2fs_folio_put(dn->node_folio, true);
invalidate_mapping_pages(NODE_MAPPING(sbi),
index, index);
- dn->node_page = NULL;
+ dn->node_folio = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
return 0;
@@ -944,35 +988,35 @@ static int truncate_node(struct dnode_of_data *dn)
static int truncate_dnode(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct page *page;
+ struct folio *folio;
int err;
if (dn->nid == 0)
return 1;
/* get direct node */
- page = f2fs_get_node_page(sbi, dn->nid);
- if (PTR_ERR(page) == -ENOENT)
+ folio = f2fs_get_node_folio(sbi, dn->nid, NODE_TYPE_NON_INODE);
+ if (PTR_ERR(folio) == -ENOENT)
return 1;
- else if (IS_ERR(page))
- return PTR_ERR(page);
+ else if (IS_ERR(folio))
+ return PTR_ERR(folio);
- if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
+ if (IS_INODE(folio) || ino_of_node(folio) != dn->inode->i_ino) {
f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
- dn->inode->i_ino, dn->nid, ino_of_node(page));
+ dn->inode->i_ino, dn->nid, ino_of_node(folio));
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return -EFSCORRUPTED;
}
/* Make dnode_of_data for parameter */
- dn->node_page = page;
+ dn->node_folio = folio;
dn->ofs_in_node = 0;
f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
err = truncate_node(dn);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
@@ -983,7 +1027,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
int ofs, int depth)
{
struct dnode_of_data rdn = *dn;
- struct page *page;
+ struct folio *folio;
struct f2fs_node *rn;
nid_t child_nid;
unsigned int child_nofs;
@@ -995,15 +1039,16 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
- page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
- if (IS_ERR(page)) {
- trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
- return PTR_ERR(page);
+ folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid,
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(folio)) {
+ trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(folio));
+ return PTR_ERR(folio);
}
- f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
+ f2fs_ra_node_pages(folio, ofs, NIDS_PER_BLOCK);
- rn = F2FS_NODE(page);
+ rn = F2FS_NODE(folio);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
child_nid = le32_to_cpu(rn->in.nid[i]);
@@ -1013,7 +1058,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
ret = truncate_dnode(&rdn);
if (ret < 0)
goto out_err;
- if (set_nid(page, i, 0, false))
+ if (set_nid(folio, i, 0, false))
dn->node_changed = true;
}
} else {
@@ -1027,7 +1072,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
rdn.nid = child_nid;
ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
if (ret == (NIDS_PER_BLOCK + 1)) {
- if (set_nid(page, i, 0, false))
+ if (set_nid(folio, i, 0, false))
dn->node_changed = true;
child_nofs += ret;
} else if (ret < 0 && ret != -ENOENT) {
@@ -1039,19 +1084,19 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
if (!ofs) {
/* remove current indirect node */
- dn->node_page = page;
+ dn->node_folio = folio;
ret = truncate_node(dn);
if (ret)
goto out_err;
freed++;
} else {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
trace_f2fs_truncate_nodes_exit(dn->inode, freed);
return freed;
out_err:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
trace_f2fs_truncate_nodes_exit(dn->inode, ret);
return ret;
}
@@ -1059,59 +1104,60 @@ out_err:
static int truncate_partial_nodes(struct dnode_of_data *dn,
struct f2fs_inode *ri, int *offset, int depth)
{
- struct page *pages[2];
+ struct folio *folios[2];
nid_t nid[3];
nid_t child_nid;
int err = 0;
int i;
int idx = depth - 2;
- nid[0] = get_nid(dn->inode_page, offset[0], true);
+ nid[0] = get_nid(dn->inode_folio, offset[0], true);
if (!nid[0])
return 0;
/* get indirect nodes in the path */
for (i = 0; i < idx + 1; i++) {
/* reference count'll be increased */
- pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
- if (IS_ERR(pages[i])) {
- err = PTR_ERR(pages[i]);
+ folios[i] = f2fs_get_node_folio(F2FS_I_SB(dn->inode), nid[i],
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(folios[i])) {
+ err = PTR_ERR(folios[i]);
idx = i - 1;
goto fail;
}
- nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
+ nid[i + 1] = get_nid(folios[i], offset[i + 1], false);
}
- f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
+ f2fs_ra_node_pages(folios[idx], offset[idx + 1], NIDS_PER_BLOCK);
/* free direct nodes linked to a partial indirect node */
for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
- child_nid = get_nid(pages[idx], i, false);
+ child_nid = get_nid(folios[idx], i, false);
if (!child_nid)
continue;
dn->nid = child_nid;
err = truncate_dnode(dn);
if (err < 0)
goto fail;
- if (set_nid(pages[idx], i, 0, false))
+ if (set_nid(folios[idx], i, 0, false))
dn->node_changed = true;
}
if (offset[idx + 1] == 0) {
- dn->node_page = pages[idx];
+ dn->node_folio = folios[idx];
dn->nid = nid[idx];
err = truncate_node(dn);
if (err)
goto fail;
} else {
- f2fs_put_page(pages[idx], 1);
+ f2fs_folio_put(folios[idx], true);
}
offset[idx]++;
offset[idx + 1] = 0;
idx--;
fail:
for (i = idx; i >= 0; i--)
- f2fs_put_page(pages[i], 1);
+ f2fs_folio_put(folios[i], true);
trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
@@ -1129,26 +1175,33 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
unsigned int nofs = 0;
struct f2fs_inode *ri;
struct dnode_of_data dn;
- struct page *page;
+ struct folio *folio;
trace_f2fs_truncate_inode_blocks_enter(inode, from);
level = get_node_path(inode, from, offset, noffset);
- if (level < 0) {
+ if (level <= 0) {
+ if (!level) {
+ level = -EFSCORRUPTED;
+ f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
+ __func__, inode->i_ino,
+ from, ADDRS_PER_INODE(inode));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
trace_f2fs_truncate_inode_blocks_exit(inode, level);
return level;
}
- page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page)) {
- trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
- return PTR_ERR(page);
+ folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(folio)) {
+ trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio));
+ return PTR_ERR(folio);
}
- set_new_dnode(&dn, inode, page, NULL, 0);
- unlock_page(page);
+ set_new_dnode(&dn, inode, folio, NULL, 0);
+ folio_unlock(folio);
- ri = F2FS_INODE(page);
+ ri = F2FS_INODE(folio);
switch (level) {
case 0:
case 1:
@@ -1177,7 +1230,7 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
skip_partial:
while (cont) {
- dn.nid = get_nid(page, offset[0], true);
+ dn.nid = get_nid(folio, offset[0], true);
switch (offset[0]) {
case NODE_DIR1_BLOCK:
case NODE_DIR2_BLOCK:
@@ -1198,7 +1251,7 @@ skip_partial:
BUG();
}
if (err == -ENOENT) {
- set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
+ set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
f2fs_err_ratelimited(sbi,
"truncate node fail, ino:%lu, nid:%u, "
@@ -1209,18 +1262,18 @@ skip_partial:
}
if (err < 0)
goto fail;
- if (offset[1] == 0 && get_nid(page, offset[0], true)) {
- lock_page(page);
- BUG_ON(page->mapping != NODE_MAPPING(sbi));
- set_nid(page, offset[0], 0, true);
- unlock_page(page);
+ if (offset[1] == 0 && get_nid(folio, offset[0], true)) {
+ folio_lock(folio);
+ BUG_ON(!is_node_folio(folio));
+ set_nid(folio, offset[0], 0, true);
+ folio_unlock(folio);
}
offset[1] = 0;
offset[0]++;
nofs += err;
}
fail:
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
trace_f2fs_truncate_inode_blocks_exit(inode, err);
return err > 0 ? 0 : err;
}
@@ -1231,20 +1284,20 @@ int f2fs_truncate_xattr_node(struct inode *inode)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t nid = F2FS_I(inode)->i_xattr_nid;
struct dnode_of_data dn;
- struct page *npage;
+ struct folio *nfolio;
int err;
if (!nid)
return 0;
- npage = f2fs_get_node_page(sbi, nid);
- if (IS_ERR(npage))
- return PTR_ERR(npage);
+ nfolio = f2fs_get_xnode_folio(sbi, nid);
+ if (IS_ERR(nfolio))
+ return PTR_ERR(nfolio);
- set_new_dnode(&dn, inode, NULL, npage, nid);
+ set_new_dnode(&dn, inode, NULL, nfolio, nid);
err = truncate_node(&dn);
if (err) {
- f2fs_put_page(npage, 1);
+ f2fs_folio_put(nfolio, true);
return err;
}
@@ -1274,8 +1327,9 @@ int f2fs_remove_inode_page(struct inode *inode)
}
/* remove potential inline_data blocks */
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
+ if (!IS_DEVICE_ALIASING(inode) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
f2fs_truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
@@ -1300,30 +1354,30 @@ int f2fs_remove_inode_page(struct inode *inode)
return 0;
}
-struct page *f2fs_new_inode_page(struct inode *inode)
+struct folio *f2fs_new_inode_folio(struct inode *inode)
{
struct dnode_of_data dn;
/* allocate inode page for new inode */
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
- /* caller should f2fs_put_page(page, 1); */
- return f2fs_new_node_page(&dn, 0);
+ /* caller should f2fs_folio_put(folio, true); */
+ return f2fs_new_node_folio(&dn, 0);
}
-struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info new_ni;
- struct page *page;
+ struct folio *folio;
int err;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return ERR_PTR(-EPERM);
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false);
+ if (IS_ERR(folio))
+ return folio;
if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
goto fail;
@@ -1339,7 +1393,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
dec_valid_node_count(sbi, dn->inode, !ofs);
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn_ratelimited(sbi,
- "f2fs_new_node_page: inconsistent nat entry, "
+ "f2fs_new_node_folio: inconsistent nat entry, "
"ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
new_ni.ino, new_ni.nid, new_ni.blk_addr,
new_ni.version, new_ni.flag);
@@ -1354,12 +1408,12 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
new_ni.version = 0;
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
- f2fs_wait_on_page_writeback(page, NODE, true, true);
- fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
- set_cold_node(page, S_ISDIR(dn->inode->i_mode));
- if (!PageUptodate(page))
- SetPageUptodate(page);
- if (set_page_dirty(page))
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
+ fill_node_footer(folio, dn->nid, dn->inode->i_ino, ofs, true);
+ set_cold_node(folio, S_ISDIR(dn->inode->i_mode));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ if (folio_mark_dirty(folio))
dn->node_changed = true;
if (f2fs_has_xattr_block(ofs))
@@ -1367,35 +1421,34 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
if (ofs == 0)
inc_valid_inode_count(sbi);
- return page;
+ return folio;
fail:
- clear_node_page_dirty(page);
- f2fs_put_page(page, 1);
+ clear_node_folio_dirty(folio);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
/*
* Caller should do after getting the following values.
- * 0: f2fs_put_page(page, 0)
- * LOCKED_PAGE or error: f2fs_put_page(page, 1)
+ * 0: f2fs_folio_put(folio, false)
+ * LOCKED_PAGE or error: f2fs_folio_put(folio, true)
*/
-static int read_node_page(struct page *page, blk_opf_t op_flags)
+static int read_node_folio(struct folio *folio, blk_opf_t op_flags)
{
- struct folio *folio = page_folio(page);
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = NODE,
.op = REQ_OP_READ,
.op_flags = op_flags,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
};
int err;
if (folio_test_uptodate(folio)) {
- if (!f2fs_inode_chksum_verify(sbi, page)) {
+ if (!f2fs_inode_chksum_verify(sbi, folio)) {
folio_clear_uptodate(folio);
return -EFSBADCRC;
}
@@ -1427,7 +1480,7 @@ static int read_node_page(struct page *page, blk_opf_t op_flags)
*/
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
{
- struct page *apage;
+ struct folio *afolio;
int err;
if (!nid)
@@ -1435,22 +1488,59 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
if (f2fs_check_nid_range(sbi, nid))
return;
- apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
- if (apage)
+ afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
+ if (afolio)
return;
- apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
- if (!apage)
+ afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
+ if (IS_ERR(afolio))
return;
- err = read_node_page(apage, REQ_RAHEAD);
- f2fs_put_page(apage, err ? 1 : 0);
+ err = read_node_folio(afolio, REQ_RAHEAD);
+ f2fs_folio_put(afolio, err ? true : false);
+}
+
+static int sanity_check_node_footer(struct f2fs_sb_info *sbi,
+ struct folio *folio, pgoff_t nid,
+ enum node_type ntype)
+{
+ if (unlikely(nid != nid_of_node(folio)))
+ goto out_err;
+
+ switch (ntype) {
+ case NODE_TYPE_INODE:
+ if (!IS_INODE(folio))
+ goto out_err;
+ break;
+ case NODE_TYPE_XATTR:
+ if (!f2fs_has_xattr_block(ofs_of_node(folio)))
+ goto out_err;
+ break;
+ case NODE_TYPE_NON_INODE:
+ if (IS_INODE(folio))
+ goto out_err;
+ break;
+ default:
+ break;
+ }
+ if (time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))
+ goto out_err;
+ return 0;
+out_err:
+ f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, "
+ "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
+ ntype, nid, nid_of_node(folio), ino_of_node(folio),
+ ofs_of_node(folio), cpver_of_node(folio),
+ next_blkaddr_of_node(folio));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
+ return -EFSCORRUPTED;
}
-static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
- struct page *parent, int start)
+static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
+ struct folio *parent, int start, enum node_type ntype)
{
- struct page *page;
+ struct folio *folio;
int err;
if (!nid)
@@ -1458,75 +1548,77 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
if (f2fs_check_nid_range(sbi, nid))
return ERR_PTR(-EINVAL);
repeat:
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
+ if (IS_ERR(folio))
+ return folio;
- err = read_node_page(page, 0);
- if (err < 0) {
+ err = read_node_folio(folio, 0);
+ if (err < 0)
goto out_put_err;
- } else if (err == LOCKED_PAGE) {
- err = 0;
+ if (err == LOCKED_PAGE)
goto page_hit;
- }
if (parent)
f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
- f2fs_put_page(page, 1);
+ if (unlikely(!is_node_folio(folio))) {
+ f2fs_folio_put(folio, true);
goto repeat;
}
- if (unlikely(!PageUptodate(page))) {
+ if (unlikely(!folio_test_uptodate(folio))) {
err = -EIO;
- goto out_err;
+ goto out_put_err;
}
- if (!f2fs_inode_chksum_verify(sbi, page)) {
+ if (!f2fs_inode_chksum_verify(sbi, folio)) {
err = -EFSBADCRC;
goto out_err;
}
page_hit:
- if (likely(nid == nid_of_node(page)))
- return page;
-
- f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
- nid, nid_of_node(page), ino_of_node(page),
- ofs_of_node(page), cpver_of_node(page),
- next_blkaddr_of_node(page));
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
- err = -EFSCORRUPTED;
+ err = sanity_check_node_footer(sbi, folio, nid, ntype);
+ if (!err)
+ return folio;
out_err:
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
out_put_err:
- /* ENOENT comes from read_node_page which is not an error. */
+ /* ENOENT comes from read_node_folio which is not an error. */
if (err != -ENOENT)
- f2fs_handle_page_eio(sbi, page_folio(page), NODE);
- f2fs_put_page(page, 1);
+ f2fs_handle_page_eio(sbi, folio, NODE);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
-struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
+ enum node_type node_type)
{
- return __get_node_page(sbi, nid, NULL, 0);
+ return __get_node_folio(sbi, nid, NULL, 0, node_type);
}
-struct page *f2fs_get_node_page_ra(struct page *parent, int start)
+struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+ return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE);
+}
+
+struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid)
+{
+ return __get_node_folio(sbi, xnid, NULL, 0, NODE_TYPE_XATTR);
+}
+
+static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start)
+{
+ struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
nid_t nid = get_nid(parent, start, false);
- return __get_node_page(sbi, nid, parent, start);
+ return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR);
}
static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
- struct page *page;
+ struct folio *folio;
int ret;
/* should flush inline_data before evict_inode */
@@ -1534,36 +1626,36 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
if (!inode)
return;
- page = f2fs_pagecache_get_page(inode->i_mapping, 0,
+ folio = f2fs_filemap_get_folio(inode->i_mapping, 0,
FGP_LOCK|FGP_NOWAIT, 0);
- if (!page)
+ if (IS_ERR(folio))
goto iput_out;
- if (!PageUptodate(page))
- goto page_out;
+ if (!folio_test_uptodate(folio))
+ goto folio_out;
- if (!PageDirty(page))
- goto page_out;
+ if (!folio_test_dirty(folio))
+ goto folio_out;
- if (!clear_page_dirty_for_io(page))
- goto page_out;
+ if (!folio_clear_dirty_for_io(folio))
+ goto folio_out;
- ret = f2fs_write_inline_data(inode, page_folio(page));
+ ret = f2fs_write_inline_data(inode, folio);
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
if (ret)
- set_page_dirty(page);
-page_out:
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+folio_out:
+ f2fs_folio_put(folio, true);
iput_out:
iput(inode);
}
-static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
+static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
{
pgoff_t index;
struct folio_batch fbatch;
- struct page *last_page = NULL;
+ struct folio *last_folio = NULL;
int nr_folios;
folio_batch_init(&fbatch);
@@ -1575,62 +1667,61 @@ static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
int i;
for (i = 0; i < nr_folios; i++) {
- struct page *page = &fbatch.folios[i]->page;
+ struct folio *folio = fbatch.folios[i];
if (unlikely(f2fs_cp_error(sbi))) {
- f2fs_put_page(last_page, 0);
+ f2fs_folio_put(last_folio, false);
folio_batch_release(&fbatch);
return ERR_PTR(-EIO);
}
- if (!IS_DNODE(page) || !is_cold_node(page))
+ if (!IS_DNODE(folio) || !is_cold_node(folio))
continue;
- if (ino_of_node(page) != ino)
+ if (ino_of_node(folio) != ino)
continue;
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+ if (unlikely(!is_node_folio(folio))) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (ino_of_node(page) != ino)
+ if (ino_of_node(folio) != ino)
goto continue_unlock;
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
- if (last_page)
- f2fs_put_page(last_page, 0);
+ if (last_folio)
+ f2fs_folio_put(last_folio, false);
- get_page(page);
- last_page = page;
- unlock_page(page);
+ folio_get(folio);
+ last_folio = folio;
+ folio_unlock(folio);
}
folio_batch_release(&fbatch);
cond_resched();
}
- return last_page;
+ return last_folio;
}
-static int __write_node_page(struct page *page, bool atomic, bool *submitted,
+static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
struct writeback_control *wbc, bool do_balance,
enum iostat_type io_type, unsigned int *seq_id)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
- struct folio *folio = page_folio(page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
nid_t nid;
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
- .ino = ino_of_node(page),
+ .ino = ino_of_node(folio),
.type = NODE,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
.submitted = 0,
.io_type = io_type,
@@ -1647,7 +1738,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
folio_unlock(folio);
- return 0;
+ return true;
}
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@@ -1655,22 +1746,17 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
wbc->sync_mode == WB_SYNC_NONE &&
- IS_DNODE(page) && is_cold_node(page))
+ IS_DNODE(folio) && is_cold_node(folio))
goto redirty_out;
/* get old block addr of this node page */
- nid = nid_of_node(page);
+ nid = nid_of_node(folio);
f2fs_bug_on(sbi, folio->index != nid);
if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
goto redirty_out;
- if (wbc->for_reclaim) {
- if (!f2fs_down_read_trylock(&sbi->node_write))
- goto redirty_out;
- } else {
- f2fs_down_read(&sbi->node_write);
- }
+ f2fs_down_read(&sbi->node_write);
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
@@ -1678,7 +1764,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
dec_page_count(sbi, F2FS_DIRTY_NODES);
f2fs_up_read(&sbi->node_write);
folio_unlock(folio);
- return 0;
+ return true;
}
if (__is_valid_data_blkaddr(ni.blk_addr) &&
@@ -1692,8 +1778,8 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
/* should add to global list before clearing PAGECACHE status */
- if (f2fs_in_warm_node_list(sbi, page)) {
- seq = f2fs_add_fsync_node_entry(sbi, page);
+ if (f2fs_in_warm_node_list(sbi, folio)) {
+ seq = f2fs_add_fsync_node_entry(sbi, folio);
if (seq_id)
*seq_id = seq;
}
@@ -1702,15 +1788,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
- set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
+ set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(folio));
dec_page_count(sbi, F2FS_DIRTY_NODES);
f2fs_up_read(&sbi->node_write);
- if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
- submitted = NULL;
- }
-
folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi))) {
@@ -1722,14 +1803,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (do_balance)
f2fs_balance_fs(sbi, false);
- return 0;
+ return true;
redirty_out:
folio_redirty_for_writepage(wbc, folio);
- return AOP_WRITEPAGE_ACTIVATE;
+ folio_unlock(folio);
+ return false;
}
-int f2fs_move_node_page(struct page *node_page, int gc_type)
+int f2fs_move_node_folio(struct folio *node_folio, int gc_type)
{
int err = 0;
@@ -1737,43 +1819,33 @@ int f2fs_move_node_page(struct page *node_page, int gc_type)
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 1,
- .for_reclaim = 0,
};
- f2fs_wait_on_page_writeback(node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(node_folio, NODE, true, true);
- set_page_dirty(node_page);
+ folio_mark_dirty(node_folio);
- if (!clear_page_dirty_for_io(node_page)) {
+ if (!folio_clear_dirty_for_io(node_folio)) {
err = -EAGAIN;
goto out_page;
}
- if (__write_node_page(node_page, false, NULL,
- &wbc, false, FS_GC_NODE_IO, NULL)) {
+ if (!__write_node_folio(node_folio, false, NULL,
+ &wbc, false, FS_GC_NODE_IO, NULL))
err = -EAGAIN;
- unlock_page(node_page);
- }
goto release_page;
} else {
/* set page dirty and write it */
- if (!folio_test_writeback(page_folio(node_page)))
- set_page_dirty(node_page);
+ if (!folio_test_writeback(node_folio))
+ folio_mark_dirty(node_folio);
}
out_page:
- unlock_page(node_page);
+ folio_unlock(node_folio);
release_page:
- f2fs_put_page(node_page, 0);
+ f2fs_folio_put(node_folio, false);
return err;
}
-static int f2fs_write_node_page(struct page *page,
- struct writeback_control *wbc)
-{
- return __write_node_page(page, false, NULL, wbc, false,
- FS_NODE_IO, NULL);
-}
-
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id)
@@ -1781,16 +1853,16 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
pgoff_t index;
struct folio_batch fbatch;
int ret = 0;
- struct page *last_page = NULL;
+ struct folio *last_folio = NULL;
bool marked = false;
nid_t ino = inode->i_ino;
int nr_folios;
int nwritten = 0;
if (atomic) {
- last_page = last_fsync_dnode(sbi, ino);
- if (IS_ERR_OR_NULL(last_page))
- return PTR_ERR_OR_ZERO(last_page);
+ last_folio = last_fsync_dnode(sbi, ino);
+ if (IS_ERR_OR_NULL(last_folio))
+ return PTR_ERR_OR_ZERO(last_folio);
}
retry:
folio_batch_init(&fbatch);
@@ -1802,96 +1874,94 @@ retry:
int i;
for (i = 0; i < nr_folios; i++) {
- struct page *page = &fbatch.folios[i]->page;
+ struct folio *folio = fbatch.folios[i];
bool submitted = false;
if (unlikely(f2fs_cp_error(sbi))) {
- f2fs_put_page(last_page, 0);
+ f2fs_folio_put(last_folio, false);
folio_batch_release(&fbatch);
ret = -EIO;
goto out;
}
- if (!IS_DNODE(page) || !is_cold_node(page))
+ if (!IS_DNODE(folio) || !is_cold_node(folio))
continue;
- if (ino_of_node(page) != ino)
+ if (ino_of_node(folio) != ino)
continue;
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+ if (unlikely(!is_node_folio(folio))) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (ino_of_node(page) != ino)
+ if (ino_of_node(folio) != ino)
goto continue_unlock;
- if (!PageDirty(page) && page != last_page) {
+ if (!folio_test_dirty(folio) && folio != last_folio) {
/* someone wrote it for us */
goto continue_unlock;
}
- f2fs_wait_on_page_writeback(page, NODE, true, true);
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
- set_fsync_mark(page, 0);
- set_dentry_mark(page, 0);
+ set_fsync_mark(folio, 0);
+ set_dentry_mark(folio, 0);
- if (!atomic || page == last_page) {
- set_fsync_mark(page, 1);
+ if (!atomic || folio == last_folio) {
+ set_fsync_mark(folio, 1);
percpu_counter_inc(&sbi->rf_node_block_count);
- if (IS_INODE(page)) {
+ if (IS_INODE(folio)) {
if (is_inode_flag_set(inode,
FI_DIRTY_INODE))
- f2fs_update_inode(inode, page);
- set_dentry_mark(page,
+ f2fs_update_inode(inode, folio);
+ set_dentry_mark(folio,
f2fs_need_dentry_mark(sbi, ino));
}
/* may be written by other thread */
- if (!PageDirty(page))
- set_page_dirty(page);
+ if (!folio_test_dirty(folio))
+ folio_mark_dirty(folio);
}
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- ret = __write_node_page(page, atomic &&
- page == last_page,
+ if (!__write_node_folio(folio, atomic &&
+ folio == last_folio,
&submitted, wbc, true,
- FS_NODE_IO, seq_id);
- if (ret) {
- unlock_page(page);
- f2fs_put_page(last_page, 0);
- break;
- } else if (submitted) {
- nwritten++;
+ FS_NODE_IO, seq_id)) {
+ f2fs_folio_put(last_folio, false);
+ folio_batch_release(&fbatch);
+ ret = -EIO;
+ goto out;
}
+ if (submitted)
+ nwritten++;
- if (page == last_page) {
- f2fs_put_page(page, 0);
+ if (folio == last_folio) {
+ f2fs_folio_put(folio, false);
+ folio_batch_release(&fbatch);
marked = true;
- break;
+ goto out;
}
}
folio_batch_release(&fbatch);
cond_resched();
-
- if (ret || marked)
- break;
}
- if (!ret && atomic && !marked) {
+ if (atomic && !marked) {
f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
- ino, page_folio(last_page)->index);
- lock_page(last_page);
- f2fs_wait_on_page_writeback(last_page, NODE, true, true);
- set_page_dirty(last_page);
- unlock_page(last_page);
+ ino, last_folio->index);
+ folio_lock(last_folio);
+ f2fs_folio_wait_writeback(last_folio, NODE, true, true);
+ folio_mark_dirty(last_folio);
+ folio_unlock(last_folio);
goto retry;
}
out:
if (nwritten)
f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
- return ret ? -EIO : 0;
+ return ret;
}
static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
@@ -1918,18 +1988,18 @@ static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
return 1;
}
-static bool flush_dirty_inode(struct page *page)
+static bool flush_dirty_inode(struct folio *folio)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
struct inode *inode;
- nid_t ino = ino_of_node(page);
+ nid_t ino = ino_of_node(folio);
inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
if (!inode)
return false;
- f2fs_update_inode(inode, page);
- unlock_page(page);
+ f2fs_update_inode(inode, folio);
+ folio_unlock(folio);
iput(inode);
return true;
@@ -1949,32 +2019,27 @@ void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
int i;
for (i = 0; i < nr_folios; i++) {
- struct page *page = &fbatch.folios[i]->page;
+ struct folio *folio = fbatch.folios[i];
- if (!IS_INODE(page))
+ if (!IS_INODE(folio))
continue;
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
-continue_unlock:
- unlock_page(page);
- continue;
- }
-
- if (!PageDirty(page)) {
- /* someone wrote it for us */
- goto continue_unlock;
- }
+ if (unlikely(!is_node_folio(folio)))
+ goto unlock;
+ if (!folio_test_dirty(folio))
+ goto unlock;
/* flush inline_data, if it's async context. */
- if (page_private_inline(page)) {
- clear_page_private_inline(page);
- unlock_page(page);
- flush_inline_data(sbi, ino_of_node(page));
+ if (folio_test_f2fs_inline(folio)) {
+ folio_clear_f2fs_inline(folio);
+ folio_unlock(folio);
+ flush_inline_data(sbi, ino_of_node(folio));
continue;
}
- unlock_page(page);
+unlock:
+ folio_unlock(folio);
}
folio_batch_release(&fbatch);
cond_resched();
@@ -2003,7 +2068,7 @@ next_step:
int i;
for (i = 0; i < nr_folios; i++) {
- struct page *page = &fbatch.folios[i]->page;
+ struct folio *folio = fbatch.folios[i];
bool submitted = false;
/* give a priority to WB_SYNC threads */
@@ -2019,27 +2084,27 @@ next_step:
* 1. dentry dnodes
* 2. file dnodes
*/
- if (step == 0 && IS_DNODE(page))
+ if (step == 0 && IS_DNODE(folio))
continue;
- if (step == 1 && (!IS_DNODE(page) ||
- is_cold_node(page)))
+ if (step == 1 && (!IS_DNODE(folio) ||
+ is_cold_node(folio)))
continue;
- if (step == 2 && (!IS_DNODE(page) ||
- !is_cold_node(page)))
+ if (step == 2 && (!IS_DNODE(folio) ||
+ !is_cold_node(folio)))
continue;
lock_node:
if (wbc->sync_mode == WB_SYNC_ALL)
- lock_page(page);
- else if (!trylock_page(page))
+ folio_lock(folio);
+ else if (!folio_trylock(folio))
continue;
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+ if (unlikely(!is_node_folio(folio))) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
@@ -2049,30 +2114,32 @@ continue_unlock:
goto write_node;
/* flush inline_data */
- if (page_private_inline(page)) {
- clear_page_private_inline(page);
- unlock_page(page);
- flush_inline_data(sbi, ino_of_node(page));
+ if (folio_test_f2fs_inline(folio)) {
+ folio_clear_f2fs_inline(folio);
+ folio_unlock(folio);
+ flush_inline_data(sbi, ino_of_node(folio));
goto lock_node;
}
/* flush dirty inode */
- if (IS_INODE(page) && flush_dirty_inode(page))
+ if (IS_INODE(folio) && flush_dirty_inode(folio))
goto lock_node;
write_node:
- f2fs_wait_on_page_writeback(page, NODE, true, true);
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- set_fsync_mark(page, 0);
- set_dentry_mark(page, 0);
+ set_fsync_mark(folio, 0);
+ set_dentry_mark(folio, 0);
- ret = __write_node_page(page, false, &submitted,
- wbc, do_balance, io_type, NULL);
- if (ret)
- unlock_page(page);
- else if (submitted)
+ if (!__write_node_folio(folio, false, &submitted,
+ wbc, do_balance, io_type, NULL)) {
+ folio_batch_release(&fbatch);
+ ret = -EIO;
+ goto out;
+ }
+ if (submitted)
nwritten++;
if (--wbc->nr_to_write == 0)
@@ -2107,12 +2174,13 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
unsigned int seq_id)
{
struct fsync_node_entry *fn;
- struct page *page;
struct list_head *head = &sbi->fsync_node_list;
unsigned long flags;
unsigned int cur_seq_id = 0;
while (seq_id && cur_seq_id < seq_id) {
+ struct folio *folio;
+
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
if (list_empty(head)) {
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
@@ -2124,13 +2192,13 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
break;
}
cur_seq_id = fn->seq_id;
- page = fn->page;
- get_page(page);
+ folio = fn->folio;
+ folio_get(folio);
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
- f2fs_wait_on_page_writeback(page, NODE, true, false);
+ f2fs_folio_wait_writeback(folio, NODE, true, false);
- put_page(page);
+ folio_put(folio);
}
return filemap_check_errors(NODE_MAPPING(sbi));
@@ -2190,12 +2258,12 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping,
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
#ifdef CONFIG_F2FS_CHECK_FS
- if (IS_INODE(&folio->page))
- f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
+ if (IS_INODE(folio))
+ f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio);
#endif
if (filemap_dirty_folio(mapping, folio)) {
inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
- set_page_private_reference(&folio->page);
+ folio_set_f2fs_reference(folio);
return true;
}
return false;
@@ -2205,7 +2273,6 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping,
* Structure of the f2fs node operations
*/
const struct address_space_operations f2fs_node_aops = {
- .writepage = f2fs_write_node_page,
.writepages = f2fs_write_node_pages,
.dirty_folio = f2fs_dirty_node_folio,
.invalidate_folio = f2fs_invalidate_folio,
@@ -2267,24 +2334,6 @@ static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
}
}
-bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
-{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- unsigned int i;
- bool ret = true;
-
- f2fs_down_read(&nm_i->nat_tree_lock);
- for (i = 0; i < nm_i->nat_blocks; i++) {
- if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
- ret = false;
- break;
- }
- }
- f2fs_up_read(&nm_i->nat_tree_lock);
-
- return ret;
-}
-
static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
bool set, bool build)
{
@@ -2316,7 +2365,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *e;
struct nat_entry *ne;
- int err = -EINVAL;
+ int err;
bool ret = false;
/* 0 nid should not be used */
@@ -2330,7 +2379,10 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
i->nid = nid;
i->state = FREE_NID;
- radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ f2fs_bug_on(sbi, err);
+
+ err = -EINVAL;
spin_lock(&nm_i->nid_list_lock);
@@ -2349,14 +2401,14 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
* - __lookup_nat_cache
* - f2fs_add_link
* - f2fs_init_inode_metadata
- * - f2fs_new_inode_page
- * - f2fs_new_node_page
+ * - f2fs_new_inode_folio
+ * - f2fs_new_node_folio
* - set_node_addr
* - f2fs_alloc_nid_done
* - __remove_nid_from_list(PREALLOC_NID)
* - __insert_nid_to_list(FREE_NID)
*/
- ne = __lookup_nat_cache(nm_i, nid);
+ ne = __lookup_nat_cache(nm_i, nid, false);
if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
nat_get_blkaddr(ne) != NULL_ADDR))
goto err_out;
@@ -2403,10 +2455,9 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
}
static int scan_nat_page(struct f2fs_sb_info *sbi,
- struct page *nat_page, nid_t start_nid)
+ struct f2fs_nat_block *nat_blk, nid_t start_nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct f2fs_nat_block *nat_blk = page_address(nat_page);
block_t blk_addr;
unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
int i;
@@ -2526,13 +2577,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
while (1) {
if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
nm_i->nat_block_bitmap)) {
- struct page *page = get_current_nat_page(sbi, nid);
+ struct folio *folio = get_current_nat_folio(sbi, nid);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
} else {
- ret = scan_nat_page(sbi, page, nid);
- f2fs_put_page(page, 1);
+ ret = scan_nat_page(sbi, folio_address(folio),
+ nid);
+ f2fs_folio_put(folio, true);
}
if (ret) {
@@ -2607,6 +2659,16 @@ retry:
f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
i = list_first_entry(&nm_i->free_nid_list,
struct free_nid, list);
+
+ if (unlikely(is_invalid_nid(sbi, i->nid))) {
+ spin_unlock(&nm_i->nid_list_lock);
+ f2fs_err(sbi, "Corrupted nid %u in free_nid_list",
+ i->nid);
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_CORRUPTED_NID);
+ return false;
+ }
+
*nid = i->nid;
__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
@@ -2708,18 +2770,18 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
return nr - nr_shrink;
}
-int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
+int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio)
{
void *src_addr, *dst_addr;
size_t inline_size;
- struct page *ipage;
+ struct folio *ifolio;
struct f2fs_inode *ri;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
- ri = F2FS_INODE(page);
+ ri = F2FS_INODE(folio);
if (ri->i_inline & F2FS_INLINE_XATTR) {
if (!f2fs_has_inline_xattr(inode)) {
set_inode_flag(inode, FI_INLINE_XATTR);
@@ -2733,26 +2795,26 @@ int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
goto update_inode;
}
- dst_addr = inline_xattr_addr(inode, ipage);
- src_addr = inline_xattr_addr(inode, page);
+ dst_addr = inline_xattr_addr(inode, ifolio);
+ src_addr = inline_xattr_addr(inode, folio);
inline_size = inline_xattr_size(inode);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
memcpy(dst_addr, src_addr, inline_size);
update_inode:
- f2fs_update_inode(inode, ipage);
- f2fs_put_page(ipage, 1);
+ f2fs_update_inode(inode, ifolio);
+ f2fs_folio_put(ifolio, true);
return 0;
}
-int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
+int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
nid_t new_xnid;
struct dnode_of_data dn;
struct node_info ni;
- struct page *xpage;
+ struct folio *xfolio;
int err;
if (!prev_xnid)
@@ -2763,7 +2825,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
if (err)
return err;
- f2fs_invalidate_blocks(sbi, ni.blk_addr);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -2773,32 +2835,32 @@ recover_xnid:
return -ENOSPC;
set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
- xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
- if (IS_ERR(xpage)) {
+ xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET);
+ if (IS_ERR(xfolio)) {
f2fs_alloc_nid_failed(sbi, new_xnid);
- return PTR_ERR(xpage);
+ return PTR_ERR(xfolio);
}
f2fs_alloc_nid_done(sbi, new_xnid);
f2fs_update_inode_page(inode);
/* 3: update and set xattr node page dirty */
- if (page) {
- memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
+ if (folio) {
+ memcpy(F2FS_NODE(xfolio), F2FS_NODE(folio),
VALID_XATTR_BLOCK_SIZE);
- set_page_dirty(xpage);
+ folio_mark_dirty(xfolio);
}
- f2fs_put_page(xpage, 1);
+ f2fs_folio_put(xfolio, true);
return 0;
}
-int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
+int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio)
{
struct f2fs_inode *src, *dst;
- nid_t ino = ino_of_node(page);
+ nid_t ino = ino_of_node(folio);
struct node_info old_ni, new_ni;
- struct page *ipage;
+ struct folio *ifolio;
int err;
err = f2fs_get_node_info(sbi, ino, &old_ni, false);
@@ -2808,8 +2870,8 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
retry:
- ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
- if (!ipage) {
+ ifolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), ino, false);
+ if (IS_ERR(ifolio)) {
memalloc_retry_wait(GFP_NOFS);
goto retry;
}
@@ -2817,13 +2879,13 @@ retry:
/* Should not use this inode from free nid list */
remove_free_nid(sbi, ino);
- if (!PageUptodate(ipage))
- SetPageUptodate(ipage);
- fill_node_footer(ipage, ino, ino, 0, true);
- set_cold_node(ipage, false);
+ if (!folio_test_uptodate(ifolio))
+ folio_mark_uptodate(ifolio);
+ fill_node_footer(ifolio, ino, ino, 0, true);
+ set_cold_node(ifolio, false);
- src = F2FS_INODE(page);
- dst = F2FS_INODE(ipage);
+ src = F2FS_INODE(folio);
+ dst = F2FS_INODE(ifolio);
memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
dst->i_size = 0;
@@ -2859,8 +2921,8 @@ retry:
WARN_ON(1);
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
inc_valid_inode_count(sbi);
- set_page_dirty(ipage);
- f2fs_put_page(ipage, 1);
+ folio_mark_dirty(ifolio);
+ f2fs_folio_put(ifolio, true);
return 0;
}
@@ -2884,17 +2946,17 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
for (idx = addr; idx < addr + nrpages; idx++) {
- struct page *page = f2fs_get_tmp_page(sbi, idx);
+ struct folio *folio = f2fs_get_tmp_folio(sbi, idx);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- rn = F2FS_NODE(page);
+ rn = F2FS_NODE(folio);
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
sum_entry->ofs_in_node = 0;
sum_entry++;
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
invalidate_mapping_pages(META_MAPPING(sbi), addr,
@@ -2909,6 +2971,7 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_journal *journal = curseg->journal;
int i;
+ bool init_dirty;
down_write(&curseg->journal_rwsem);
for (i = 0; i < nats_in_cursum(journal); i++) {
@@ -2919,12 +2982,15 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
if (f2fs_check_nid_range(sbi, nid))
continue;
+ init_dirty = false;
+
raw_ne = nat_in_journal(journal, i);
- ne = __lookup_nat_cache(nm_i, nid);
+ ne = __lookup_nat_cache(nm_i, nid, true);
if (!ne) {
+ init_dirty = true;
ne = __alloc_nat_entry(sbi, nid, true);
- __init_nat_entry(nm_i, ne, &raw_ne, true);
+ __init_nat_entry(nm_i, ne, &raw_ne, true, true);
}
/*
@@ -2939,7 +3005,7 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
spin_unlock(&nm_i->nid_list_lock);
}
- __set_nat_cache_dirty(nm_i, ne);
+ __set_nat_cache_dirty(nm_i, ne, init_dirty);
}
update_nats_in_cursum(journal, -i);
up_write(&curseg->journal_rwsem);
@@ -2963,32 +3029,15 @@ add_out:
list_add_tail(&nes->set_list, head);
}
-static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
- unsigned int valid)
-{
- if (valid == 0) {
- __set_bit_le(nat_ofs, nm_i->empty_nat_bits);
- __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
- return;
- }
-
- __clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
- if (valid == NAT_ENTRY_PER_BLOCK)
- __set_bit_le(nat_ofs, nm_i->full_nat_bits);
- else
- __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
-}
-
-static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
- struct page *page)
+static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+ const struct f2fs_nat_block *nat_blk)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
- struct f2fs_nat_block *nat_blk = page_address(page);
int valid = 0;
int i = 0;
- if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
+ if (!enabled_nat_bits(sbi, NULL))
return;
if (nat_index == 0) {
@@ -2999,36 +3048,17 @@ static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
valid++;
}
-
- __update_nat_bits(nm_i, nat_index, valid);
-}
-
-void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
-{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- unsigned int nat_ofs;
-
- f2fs_down_read(&nm_i->nat_tree_lock);
-
- for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
- unsigned int valid = 0, nid_ofs = 0;
-
- /* handle nid zero due to it should never be used */
- if (unlikely(nat_ofs == 0)) {
- valid = 1;
- nid_ofs = 1;
- }
-
- for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
- if (!test_bit_le(nid_ofs,
- nm_i->free_nid_bitmap[nat_ofs]))
- valid++;
- }
-
- __update_nat_bits(nm_i, nat_ofs, valid);
+ if (valid == 0) {
+ __set_bit_le(nat_index, nm_i->empty_nat_bits);
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
+ return;
}
- f2fs_up_read(&nm_i->nat_tree_lock);
+ __clear_bit_le(nat_index, nm_i->empty_nat_bits);
+ if (valid == NAT_ENTRY_PER_BLOCK)
+ __set_bit_le(nat_index, nm_i->full_nat_bits);
+ else
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
}
static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
@@ -3040,25 +3070,25 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
bool to_journal = true;
struct f2fs_nat_block *nat_blk;
struct nat_entry *ne, *cur;
- struct page *page = NULL;
+ struct folio *folio = NULL;
/*
* there are two steps to flush nat entries:
* #1, flush nat entries to journal in current hot data summary block.
* #2, flush nat entries to nat page.
*/
- if ((cpc->reason & CP_UMOUNT) ||
+ if (enabled_nat_bits(sbi, cpc) ||
!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
to_journal = false;
if (to_journal) {
down_write(&curseg->journal_rwsem);
} else {
- page = get_next_nat_page(sbi, start_nid);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = get_next_nat_folio(sbi, start_nid);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- nat_blk = page_address(page);
+ nat_blk = folio_address(folio);
f2fs_bug_on(sbi, !nat_blk);
}
@@ -3094,8 +3124,8 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
if (to_journal) {
up_write(&curseg->journal_rwsem);
} else {
- update_nat_bits(sbi, start_nid, page);
- f2fs_put_page(page, 1);
+ __update_nat_bits(sbi, start_nid, nat_blk);
+ f2fs_folio_put(folio, true);
}
/* Allow dirty nats by node block allocation in write_begin */
@@ -3125,7 +3155,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* during unmount, let's flush nat_bits before checking
* nat_cnt[DIRTY_NAT].
*/
- if (cpc->reason & CP_UMOUNT) {
+ if (enabled_nat_bits(sbi, cpc)) {
f2fs_down_write(&nm_i->nat_tree_lock);
remove_nats_in_journal(sbi);
f2fs_up_write(&nm_i->nat_tree_lock);
@@ -3141,7 +3171,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* entries, remove all entries from journal and merge them
* into nat entry set.
*/
- if (cpc->reason & CP_UMOUNT ||
+ if (enabled_nat_bits(sbi, cpc) ||
!__has_cursum_space(journal,
nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
remove_nats_in_journal(sbi);
@@ -3178,40 +3208,38 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
__u64 cp_ver = cur_cp_version(ckpt);
block_t nat_bits_addr;
+ if (!enabled_nat_bits(sbi, NULL))
+ return 0;
+
nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
nm_i->nat_bits = f2fs_kvzalloc(sbi,
F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
- nm_i->full_nat_bits = nm_i->nat_bits + 8;
- nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
-
- if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
- return 0;
-
nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++) {
- struct page *page;
+ struct folio *folio;
- page = f2fs_get_meta_page(sbi, nat_bits_addr++);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_meta_folio(sbi, nat_bits_addr++);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
- page_address(page), F2FS_BLKSIZE);
- f2fs_put_page(page, 1);
+ folio_address(folio), F2FS_BLKSIZE);
+ f2fs_folio_put(folio, true);
}
cp_ver |= (cur_cp_crc(ckpt) << 32);
if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
- clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
- f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
- cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
+ disable_nat_bits(sbi, true);
return 0;
}
+ nm_i->full_nat_bits = nm_i->nat_bits + 8;
+ nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
+
f2fs_notice(sbi, "Found nat_bits in checkpoint");
return 0;
}
@@ -3222,7 +3250,7 @@ static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
unsigned int i = 0;
nid_t nid, last_nid;
- if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
+ if (!enabled_nat_bits(sbi, NULL))
return;
for (i = 0; i < nm_i->nat_blocks; i++) {
@@ -3294,6 +3322,9 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
if (!nm_i->nat_bitmap)
return -ENOMEM;
+ if (!test_opt(sbi, NAT_BITS))
+ disable_nat_bits(sbi, true);
+
err = __get_nat_bitmaps(sbi);
if (err)
return err;
@@ -3434,10 +3465,10 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
}
kvfree(nm_i->free_nid_count);
- kvfree(nm_i->nat_bitmap);
+ kfree(nm_i->nat_bitmap);
kvfree(nm_i->nat_bits);
#ifdef CONFIG_F2FS_CHECK_FS
- kvfree(nm_i->nat_bitmap_mir);
+ kfree(nm_i->nat_bitmap_mir);
#endif
sbi->nm_info = NULL;
kfree(nm_i);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 6aea13024ac1..9cb8dcf8d417 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -31,7 +31,7 @@
/* control total # of nats */
#define DEF_NAT_CACHE_THRESHOLD 100000
-/* control total # of node writes used for roll-fowrad recovery */
+/* control total # of node writes used for roll-forward recovery */
#define DEF_RF_NODE_BLOCKS 0
/* vector size for gang look-up from nat cache that consists of radix tree */
@@ -52,6 +52,14 @@ enum {
IS_PREALLOC, /* nat entry is preallocated */
};
+/* For node type in __get_node_folio() */
+enum node_type {
+ NODE_TYPE_REGULAR,
+ NODE_TYPE_INODE,
+ NODE_TYPE_XATTR,
+ NODE_TYPE_NON_INODE,
+};
+
/*
* For node information
*/
@@ -236,41 +244,41 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
#endif
}
-static inline nid_t ino_of_node(struct page *node_page)
+static inline nid_t ino_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
return le32_to_cpu(rn->footer.ino);
}
-static inline nid_t nid_of_node(struct page *node_page)
+static inline nid_t nid_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
return le32_to_cpu(rn->footer.nid);
}
-static inline unsigned int ofs_of_node(struct page *node_page)
+static inline unsigned int ofs_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
unsigned flag = le32_to_cpu(rn->footer.flag);
return flag >> OFFSET_BIT_SHIFT;
}
-static inline __u64 cpver_of_node(struct page *node_page)
+static inline __u64 cpver_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
return le64_to_cpu(rn->footer.cp_ver);
}
-static inline block_t next_blkaddr_of_node(struct page *node_page)
+static inline block_t next_blkaddr_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
return le32_to_cpu(rn->footer.next_blkaddr);
}
-static inline void fill_node_footer(struct page *page, nid_t nid,
+static inline void fill_node_footer(const struct folio *folio, nid_t nid,
nid_t ino, unsigned int ofs, bool reset)
{
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_node *rn = F2FS_NODE(folio);
unsigned int old_flag = 0;
if (reset)
@@ -286,17 +294,18 @@ static inline void fill_node_footer(struct page *page, nid_t nid,
(old_flag & OFFSET_BIT_MASK));
}
-static inline void copy_node_footer(struct page *dst, struct page *src)
+static inline void copy_node_footer(const struct folio *dst,
+ const struct folio *src)
{
struct f2fs_node *src_rn = F2FS_NODE(src);
struct f2fs_node *dst_rn = F2FS_NODE(dst);
memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
}
-static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
+static inline void fill_node_footer_blkaddr(struct folio *folio, block_t blkaddr)
{
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio));
+ struct f2fs_node *rn = F2FS_NODE(folio);
__u64 cp_ver = cur_cp_version(ckpt);
if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
@@ -306,19 +315,19 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
-static inline bool is_recoverable_dnode(struct page *page)
+static inline bool is_recoverable_dnode(const struct folio *folio)
{
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio));
__u64 cp_ver = cur_cp_version(ckpt);
/* Don't care crc part, if fsck.f2fs sets it. */
if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG))
- return (cp_ver << 32) == (cpver_of_node(page) << 32);
+ return (cp_ver << 32) == (cpver_of_node(folio) << 32);
if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
cp_ver |= (cur_cp_crc(ckpt) << 32);
- return cp_ver == cpver_of_node(page);
+ return cp_ver == cpver_of_node(folio);
}
/*
@@ -342,9 +351,9 @@ static inline bool is_recoverable_dnode(struct page *page)
* `- indirect node ((6 + 2N) + (N - 1)(N + 1))
* `- direct node
*/
-static inline bool IS_DNODE(struct page *node_page)
+static inline bool IS_DNODE(const struct folio *node_folio)
{
- unsigned int ofs = ofs_of_node(node_page);
+ unsigned int ofs = ofs_of_node(node_folio);
if (f2fs_has_xattr_block(ofs))
return true;
@@ -360,22 +369,22 @@ static inline bool IS_DNODE(struct page *node_page)
return true;
}
-static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
+static inline int set_nid(struct folio *folio, int off, nid_t nid, bool i)
{
- struct f2fs_node *rn = F2FS_NODE(p);
+ struct f2fs_node *rn = F2FS_NODE(folio);
- f2fs_wait_on_page_writeback(p, NODE, true, true);
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
if (i)
rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
else
rn->in.nid[off] = cpu_to_le32(nid);
- return set_page_dirty(p);
+ return folio_mark_dirty(folio);
}
-static inline nid_t get_nid(struct page *p, int off, bool i)
+static inline nid_t get_nid(const struct folio *folio, int off, bool i)
{
- struct f2fs_node *rn = F2FS_NODE(p);
+ struct f2fs_node *rn = F2FS_NODE(folio);
if (i)
return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
@@ -389,19 +398,19 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
* - Mark cold data pages in page cache
*/
-static inline int is_node(struct page *page, int type)
+static inline int is_node(const struct folio *folio, int type)
{
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_node *rn = F2FS_NODE(folio);
return le32_to_cpu(rn->footer.flag) & BIT(type);
}
-#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT)
-#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
-#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
+#define is_cold_node(folio) is_node(folio, COLD_BIT_SHIFT)
+#define is_fsync_dnode(folio) is_node(folio, FSYNC_BIT_SHIFT)
+#define is_dent_dnode(folio) is_node(folio, DENT_BIT_SHIFT)
-static inline void set_cold_node(struct page *page, bool is_dir)
+static inline void set_cold_node(const struct folio *folio, bool is_dir)
{
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_node *rn = F2FS_NODE(folio);
unsigned int flag = le32_to_cpu(rn->footer.flag);
if (is_dir)
@@ -411,9 +420,9 @@ static inline void set_cold_node(struct page *page, bool is_dir)
rn->footer.flag = cpu_to_le32(flag);
}
-static inline void set_mark(struct page *page, int mark, int type)
+static inline void set_mark(struct folio *folio, int mark, int type)
{
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_node *rn = F2FS_NODE(folio);
unsigned int flag = le32_to_cpu(rn->footer.flag);
if (mark)
flag |= BIT(type);
@@ -422,8 +431,8 @@ static inline void set_mark(struct page *page, int mark, int type)
rn->footer.flag = cpu_to_le32(flag);
#ifdef CONFIG_F2FS_CHECK_FS
- f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+ f2fs_inode_chksum_set(F2FS_F_SB(folio), folio);
#endif
}
-#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
-#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
+#define set_dentry_mark(folio, mark) set_mark(folio, mark, DENT_BIT_SHIFT)
+#define set_fsync_mark(folio, mark) set_mark(folio, mark, FSYNC_BIT_SHIFT)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index f35be2c48e3c..c3415ebb9f50 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -157,15 +157,15 @@ static int init_recovered_filename(const struct inode *dir,
return 0;
}
-static int recover_dentry(struct inode *inode, struct page *ipage,
+static int recover_dentry(struct inode *inode, struct folio *ifolio,
struct list_head *dir_list)
{
- struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
+ struct f2fs_inode *raw_inode = F2FS_INODE(ifolio);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
struct f2fs_filename fname;
struct qstr usr_fname;
- struct page *page;
+ struct folio *folio;
struct inode *dir, *einode;
struct fsync_inode_entry *entry;
int err = 0;
@@ -187,7 +187,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
if (err)
goto out;
retry:
- de = __f2fs_find_entry(dir, &fname, &page);
+ de = __f2fs_find_entry(dir, &fname, &folio);
if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_put;
@@ -212,11 +212,11 @@ retry:
iput(einode);
goto out_put;
}
- f2fs_delete_entry(de, page, dir, einode);
+ f2fs_delete_entry(de, folio, dir, einode);
iput(einode);
goto retry;
- } else if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ } else if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
} else {
err = f2fs_add_dentry(dir, &fname, inode,
inode->i_ino, inode->i_mode);
@@ -226,21 +226,21 @@ retry:
goto out;
out_put:
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
out:
if (file_enc_name(inode))
name = "<encrypted>";
else
name = raw_inode->i_name;
f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
- __func__, ino_of_node(ipage), name,
+ __func__, ino_of_node(ifolio), name,
IS_ERR(dir) ? 0 : dir->i_ino, err);
return err;
}
-static int recover_quota_data(struct inode *inode, struct page *page)
+static int recover_quota_data(struct inode *inode, struct folio *folio)
{
- struct f2fs_inode *raw = F2FS_INODE(page);
+ struct f2fs_inode *raw = F2FS_INODE(folio);
struct iattr attr;
uid_t i_uid = le32_to_cpu(raw->i_uid);
gid_t i_gid = le32_to_cpu(raw->i_gid);
@@ -277,16 +277,16 @@ static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
clear_inode_flag(inode, FI_DATA_EXIST);
}
-static int recover_inode(struct inode *inode, struct page *page)
+static int recover_inode(struct inode *inode, struct folio *folio)
{
- struct f2fs_inode *raw = F2FS_INODE(page);
+ struct f2fs_inode *raw = F2FS_INODE(folio);
struct f2fs_inode_info *fi = F2FS_I(inode);
char *name;
int err;
inode->i_mode = le16_to_cpu(raw->i_mode);
- err = recover_quota_data(inode, page);
+ err = recover_quota_data(inode, folio);
if (err)
return err;
@@ -333,10 +333,10 @@ static int recover_inode(struct inode *inode, struct page *page)
if (file_enc_name(inode))
name = "<encrypted>";
else
- name = F2FS_INODE(page)->i_name;
+ name = F2FS_INODE(folio)->i_name;
f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
- ino_of_node(page), name, raw->i_inline);
+ ino_of_node(folio), name, raw->i_inline);
return 0;
}
@@ -358,33 +358,34 @@ static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr,
block_t *blkaddr_fast, bool *is_detecting)
{
unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
- struct page *page = NULL;
int i;
if (!*is_detecting)
return 0;
for (i = 0; i < 2; i++) {
+ struct folio *folio;
+
if (!f2fs_is_valid_blkaddr(sbi, *blkaddr_fast, META_POR)) {
*is_detecting = false;
return 0;
}
- page = f2fs_get_tmp_page(sbi, *blkaddr_fast);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_tmp_folio(sbi, *blkaddr_fast);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- if (!is_recoverable_dnode(page)) {
- f2fs_put_page(page, 1);
+ if (!is_recoverable_dnode(folio)) {
+ f2fs_folio_put(folio, true);
*is_detecting = false;
return 0;
}
ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, *blkaddr_fast,
- next_blkaddr_of_node(page));
+ next_blkaddr_of_node(folio));
- *blkaddr_fast = next_blkaddr_of_node(page);
- f2fs_put_page(page, 1);
+ *blkaddr_fast = next_blkaddr_of_node(folio);
+ f2fs_folio_put(folio, true);
f2fs_ra_meta_pages_cond(sbi, *blkaddr_fast, ra_blocks);
}
@@ -398,10 +399,9 @@ static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr,
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
- bool check_only)
+ bool check_only, bool *new_inode)
{
struct curseg_info *curseg;
- struct page *page = NULL;
block_t blkaddr, blkaddr_fast;
bool is_detecting = true;
int err = 0;
@@ -413,60 +413,65 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
while (1) {
struct fsync_inode_entry *entry;
+ struct folio *folio;
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
- page = f2fs_get_tmp_page(sbi, blkaddr);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_get_tmp_folio(sbi, blkaddr);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
break;
}
- if (!is_recoverable_dnode(page)) {
- f2fs_put_page(page, 1);
+ if (!is_recoverable_dnode(folio)) {
+ f2fs_folio_put(folio, true);
break;
}
- if (!is_fsync_dnode(page))
+ if (!is_fsync_dnode(folio))
goto next;
- entry = get_fsync_inode(head, ino_of_node(page));
+ entry = get_fsync_inode(head, ino_of_node(folio));
if (!entry) {
bool quota_inode = false;
if (!check_only &&
- IS_INODE(page) && is_dent_dnode(page)) {
- err = f2fs_recover_inode_page(sbi, page);
+ IS_INODE(folio) &&
+ is_dent_dnode(folio)) {
+ err = f2fs_recover_inode_page(sbi, folio);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
quota_inode = true;
}
- /*
- * CP | dnode(F) | inode(DF)
- * For this case, we should not give up now.
- */
- entry = add_fsync_inode(sbi, head, ino_of_node(page),
+ entry = add_fsync_inode(sbi, head, ino_of_node(folio),
quota_inode);
if (IS_ERR(entry)) {
err = PTR_ERR(entry);
- if (err == -ENOENT)
+ /*
+ * CP | dnode(F) | inode(DF)
+ * For this case, we should not give up now.
+ */
+ if (err == -ENOENT) {
+ if (check_only)
+ *new_inode = true;
goto next;
- f2fs_put_page(page, 1);
+ }
+ f2fs_folio_put(folio, true);
break;
}
}
entry->blkaddr = blkaddr;
- if (IS_INODE(page) && is_dent_dnode(page))
+ if (IS_INODE(folio) && is_dent_dnode(folio))
entry->last_dentry = blkaddr;
next:
/* check next segment */
- blkaddr = next_blkaddr_of_node(page);
- f2fs_put_page(page, 1);
+ blkaddr = next_blkaddr_of_node(folio);
+ f2fs_folio_put(folio, true);
err = sanity_check_node_chain(sbi, blkaddr, &blkaddr_fast,
&is_detecting);
@@ -492,7 +497,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
struct f2fs_summary_block *sum_node;
struct f2fs_summary sum;
- struct page *sum_page, *node_page;
+ struct folio *sum_folio, *node_folio;
struct dnode_of_data tdn = *dn;
nid_t ino, nid;
struct inode *inode;
@@ -514,18 +519,18 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
}
}
- sum_page = f2fs_get_sum_page(sbi, segno);
- if (IS_ERR(sum_page))
- return PTR_ERR(sum_page);
- sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ sum_folio = f2fs_get_sum_folio(sbi, segno);
+ if (IS_ERR(sum_folio))
+ return PTR_ERR(sum_folio);
+ sum_node = SUM_BLK_PAGE_ADDR(sum_folio, segno);
sum = sum_node->entries[blkoff];
- f2fs_put_page(sum_page, 1);
+ f2fs_folio_put(sum_folio, true);
got_it:
/* Use the locked dnode page and inode */
nid = le32_to_cpu(sum.nid);
ofs_in_node = le16_to_cpu(sum.ofs_in_node);
- max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode);
+ max_addrs = ADDRS_PER_PAGE(dn->node_folio, dn->inode);
if (ofs_in_node >= max_addrs) {
f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u",
ofs_in_node, dn->inode->i_ino, nid, max_addrs);
@@ -535,9 +540,9 @@ got_it:
if (dn->inode->i_ino == nid) {
tdn.nid = nid;
- if (!dn->inode_page_locked)
- lock_page(dn->inode_page);
- tdn.node_page = dn->inode_page;
+ if (!dn->inode_folio_locked)
+ folio_lock(dn->inode_folio);
+ tdn.node_folio = dn->inode_folio;
tdn.ofs_in_node = ofs_in_node;
goto truncate_out;
} else if (dn->nid == nid) {
@@ -546,13 +551,13 @@ got_it:
}
/* Get the node page */
- node_page = f2fs_get_node_page(sbi, nid);
- if (IS_ERR(node_page))
- return PTR_ERR(node_page);
+ node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
+ if (IS_ERR(node_folio))
+ return PTR_ERR(node_folio);
- offset = ofs_of_node(node_page);
- ino = ino_of_node(node_page);
- f2fs_put_page(node_page, 1);
+ offset = ofs_of_node(node_folio);
+ ino = ino_of_node(node_folio);
+ f2fs_folio_put(node_folio, true);
if (ino != dn->inode->i_ino) {
int ret;
@@ -578,8 +583,8 @@ got_it:
* if inode page is locked, unlock temporarily, but its reference
* count keeps alive.
*/
- if (ino == dn->inode->i_ino && dn->inode_page_locked)
- unlock_page(dn->inode_page);
+ if (ino == dn->inode->i_ino && dn->inode_folio_locked)
+ folio_unlock(dn->inode_folio);
set_new_dnode(&tdn, inode, NULL, NULL, 0);
if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
@@ -592,15 +597,15 @@ got_it:
out:
if (ino != dn->inode->i_ino)
iput(inode);
- else if (dn->inode_page_locked)
- lock_page(dn->inode_page);
+ else if (dn->inode_folio_locked)
+ folio_lock(dn->inode_folio);
return 0;
truncate_out:
if (f2fs_data_blkaddr(&tdn) == blkaddr)
f2fs_truncate_data_blocks_range(&tdn, 1);
- if (dn->inode->i_ino == nid && !dn->inode_page_locked)
- unlock_page(dn->inode_page);
+ if (dn->inode->i_ino == nid && !dn->inode_folio_locked)
+ folio_unlock(dn->inode_folio);
return 0;
}
@@ -618,27 +623,27 @@ static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn)
}
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
- struct page *page)
+ struct folio *folio)
{
struct dnode_of_data dn;
struct node_info ni;
- unsigned int start, end;
+ unsigned int start = 0, end = 0, index;
int err = 0, recovered = 0;
/* step 1: recover xattr */
- if (IS_INODE(page)) {
- err = f2fs_recover_inline_xattr(inode, page);
+ if (IS_INODE(folio)) {
+ err = f2fs_recover_inline_xattr(inode, folio);
if (err)
goto out;
- } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
- err = f2fs_recover_xattr_data(inode, page);
+ } else if (f2fs_has_xattr_block(ofs_of_node(folio))) {
+ err = f2fs_recover_xattr_data(inode, folio);
if (!err)
recovered++;
goto out;
}
/* step 2: recover inline data */
- err = f2fs_recover_inline_data(inode, page);
+ err = f2fs_recover_inline_data(inode, folio);
if (err) {
if (err == 1)
err = 0;
@@ -646,8 +651,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
}
/* step 3: recover data indices */
- start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
- end = start + ADDRS_PER_PAGE(page, inode);
+ start = f2fs_start_bidx_of_node(ofs_of_node(folio), inode);
+ end = start + ADDRS_PER_PAGE(folio, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
retry_dn:
@@ -660,28 +665,28 @@ retry_dn:
goto out;
}
- f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn.node_folio, NODE, true, true);
err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
if (err)
goto err;
- f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
+ f2fs_bug_on(sbi, ni.ino != ino_of_node(folio));
- if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
+ if (ofs_of_node(dn.node_folio) != ofs_of_node(folio)) {
f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
- inode->i_ino, ofs_of_node(dn.node_page),
- ofs_of_node(page));
+ inode->i_ino, ofs_of_node(dn.node_folio),
+ ofs_of_node(folio));
err = -EFSCORRUPTED;
f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
goto err;
}
- for (; start < end; start++, dn.ofs_in_node++) {
+ for (index = start; index < end; index++, dn.ofs_in_node++) {
block_t src, dest;
src = f2fs_data_blkaddr(&dn);
- dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
+ dest = data_blkaddr(dn.inode, folio, dn.ofs_in_node);
if (__is_valid_data_blkaddr(src) &&
!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
@@ -706,9 +711,9 @@ retry_dn:
}
if (!file_keep_isize(inode) &&
- (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
+ (i_size_read(inode) <= ((loff_t)index << PAGE_SHIFT)))
f2fs_i_size_write(inode,
- (loff_t)(start + 1) << PAGE_SHIFT);
+ (loff_t)(index + 1) << PAGE_SHIFT);
/*
* dest is reserved block, invalidate src block
@@ -756,16 +761,18 @@ retry_prev:
}
}
- copy_node_footer(dn.node_page, page);
- fill_node_footer(dn.node_page, dn.nid, ni.ino,
- ofs_of_node(page), false);
- set_page_dirty(dn.node_page);
+ copy_node_footer(dn.node_folio, folio);
+ fill_node_footer(dn.node_folio, dn.nid, ni.ino,
+ ofs_of_node(folio), false);
+ folio_mark_dirty(dn.node_folio);
err:
f2fs_put_dnode(&dn);
out:
- f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
- inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
- recovered, err);
+ f2fs_notice(sbi, "recover_data: ino = %lx, nid = %x (i_size: %s), "
+ "range (%u, %u), recovered = %d, err = %d",
+ inode->i_ino, nid_of_node(folio),
+ file_keep_isize(inode) ? "keep" : "recover",
+ start, end, recovered, err);
return err;
}
@@ -773,10 +780,17 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
struct list_head *tmp_inode_list, struct list_head *dir_list)
{
struct curseg_info *curseg;
- struct page *page = NULL;
int err = 0;
block_t blkaddr;
unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
+ unsigned int recoverable_dnode = 0;
+ unsigned int fsynced_dnode = 0;
+ unsigned int total_dnode = 0;
+ unsigned int recovered_inode = 0;
+ unsigned int recovered_dentry = 0;
+ unsigned int recovered_dnode = 0;
+
+ f2fs_notice(sbi, "do_recover_data: start to recover dnode");
/* get node pages in the current segment */
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
@@ -784,88 +798,101 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
while (1) {
struct fsync_inode_entry *entry;
+ struct folio *folio;
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
- page = f2fs_get_tmp_page(sbi, blkaddr);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_get_tmp_folio(sbi, blkaddr);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
break;
}
- if (!is_recoverable_dnode(page)) {
- f2fs_put_page(page, 1);
+ if (!is_recoverable_dnode(folio)) {
+ f2fs_folio_put(folio, true);
break;
}
+ recoverable_dnode++;
- entry = get_fsync_inode(inode_list, ino_of_node(page));
+ entry = get_fsync_inode(inode_list, ino_of_node(folio));
if (!entry)
goto next;
+ fsynced_dnode++;
/*
* inode(x) | CP | inode(x) | dnode(F)
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
- if (IS_INODE(page)) {
- err = recover_inode(entry->inode, page);
+ if (IS_INODE(folio)) {
+ err = recover_inode(entry->inode, folio);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
+ recovered_inode++;
}
if (entry->last_dentry == blkaddr) {
- err = recover_dentry(entry->inode, page, dir_list);
+ err = recover_dentry(entry->inode, folio, dir_list);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
+ recovered_dentry++;
}
- err = do_recover_data(sbi, entry->inode, page);
+ err = do_recover_data(sbi, entry->inode, folio);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
+ recovered_dnode++;
if (entry->blkaddr == blkaddr)
list_move_tail(&entry->list, tmp_inode_list);
next:
ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr,
- next_blkaddr_of_node(page));
+ next_blkaddr_of_node(folio));
/* check next segment */
- blkaddr = next_blkaddr_of_node(page);
- f2fs_put_page(page, 1);
+ blkaddr = next_blkaddr_of_node(folio);
+ f2fs_folio_put(folio, true);
f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks);
+ total_dnode++;
}
if (!err)
err = f2fs_allocate_new_segments(sbi);
+
+ f2fs_notice(sbi, "do_recover_data: dnode: (recoverable: %u, fsynced: %u, "
+ "total: %u), recovered: (inode: %u, dentry: %u, dnode: %u), err: %d",
+ recoverable_dnode, fsynced_dnode, total_dnode, recovered_inode,
+ recovered_dentry, recovered_dnode, err);
return err;
}
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
- struct list_head inode_list, tmp_inode_list;
- struct list_head dir_list;
+ LIST_HEAD(inode_list);
+ LIST_HEAD(tmp_inode_list);
+ LIST_HEAD(dir_list);
int err;
int ret = 0;
unsigned long s_flags = sbi->sb->s_flags;
bool need_writecp = false;
+ bool new_inode = false;
+
+ f2fs_notice(sbi, "f2fs_recover_fsync_data: recovery fsync data, "
+ "check_only: %d", check_only);
if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
f2fs_info(sbi, "recover fsync data on readonly fs");
- INIT_LIST_HEAD(&inode_list);
- INIT_LIST_HEAD(&tmp_inode_list);
- INIT_LIST_HEAD(&dir_list);
-
/* prevent checkpoint */
f2fs_down_write(&sbi->cp_global_sem);
/* step #1: find fsynced inode numbers */
- err = find_fsync_dnodes(sbi, &inode_list, check_only);
- if (err || list_empty(&inode_list))
+ err = find_fsync_dnodes(sbi, &inode_list, check_only, &new_inode);
+ if (err < 0 || (list_empty(&inode_list) && (!check_only || !new_inode)))
goto skip;
if (check_only) {
@@ -899,10 +926,8 @@ skip:
* and the f2fs is not read only, check and fix zoned block devices'
* write pointer consistency.
*/
- if (!err) {
+ if (!err)
err = f2fs_check_and_fix_write_pointer(sbi);
- ret = err;
- }
if (!err)
clear_sbi_flag(sbi, SBI_POR_DOING);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index eade36c5ef13..c26424f47686 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -201,6 +201,12 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
clear_inode_flag(inode, FI_ATOMIC_FILE);
if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ /*
+ * The vfs inode keeps clean during commit, but the f2fs inode
+ * doesn't. So clear the dirty state after commit and let
+ * f2fs_mark_inode_dirty_sync ensure a consistent dirty state.
+ */
+ f2fs_inode_synced(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
stat_dec_atomic_inode(inode);
@@ -228,7 +234,7 @@ retry:
err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
if (err) {
if (err == -ENOMEM) {
- f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
return err;
@@ -245,7 +251,7 @@ retry:
if (!__is_valid_data_blkaddr(new_addr)) {
if (new_addr == NULL_ADDR)
dec_valid_block_count(sbi, inode, 1);
- f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
+ f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1);
f2fs_update_data_blkaddr(&dn, new_addr);
} else {
f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
@@ -328,7 +334,7 @@ static int __f2fs_commit_atomic_write(struct inode *inode)
goto next;
}
- blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
+ blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, cow_inode),
len);
index = off;
for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
@@ -365,12 +371,21 @@ next:
}
out:
+ if (time_to_inject(sbi, FAULT_TIMEOUT))
+ f2fs_io_schedule_timeout_killable(DEFAULT_FAULT_TIMEOUT);
+
if (ret) {
sbi->revoked_atomic_block += fi->atomic_write_cnt;
} else {
sbi->committed_atomic_block += fi->atomic_write_cnt;
set_inode_flag(inode, FI_ATOMIC_COMMITTED);
+
+ /*
+ * inode may has no FI_ATOMIC_DIRTIED flag due to no write
+ * before commit.
+ */
if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ /* clear atomic dirty status and set vfs dirty status */
clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
f2fs_mark_inode_dirty_sync(inode, true);
}
@@ -418,7 +433,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
if (need && excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi, false);
- if (!f2fs_is_checkpoint_ready(sbi))
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return;
/*
@@ -440,7 +455,8 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
} else {
struct f2fs_gc_control gc_control = {
.victim_segno = NULL_SEGNO,
- .init_gc_type = BG_GC,
+ .init_gc_type = f2fs_sb_has_blkzoned(sbi) ?
+ FG_GC : BG_GC,
.no_bg_gc = true,
.should_migrate_blocks = false,
.err_gc_skipped = false,
@@ -734,7 +750,7 @@ int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
do {
ret = __submit_flush_wait(sbi, FDEV(i).bdev);
if (ret)
- f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
} while (ret && --count);
if (ret) {
@@ -757,7 +773,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
/* need not be added */
- if (IS_CURSEG(sbi, segno))
+ if (is_curseg(sbi, segno))
return;
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
@@ -784,7 +800,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
!valid_blocks) ||
valid_blocks == CAP_BLKS_PER_SEC(sbi));
- if (!IS_CURSEC(sbi, secno))
+ if (!is_cursec(sbi, secno))
set_bit(secno, dirty_i->dirty_secmap);
}
}
@@ -823,7 +839,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
return;
}
- if (!IS_CURSEC(sbi, secno))
+ if (!is_cursec(sbi, secno))
set_bit(secno, dirty_i->dirty_secmap);
}
}
@@ -840,7 +856,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned short valid_blocks, ckpt_valid_blocks;
unsigned int usable_blocks;
- if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
+ if (segno == NULL_SEGNO || is_curseg(sbi, segno))
return;
usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
@@ -873,7 +889,7 @@ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
if (get_valid_blocks(sbi, segno, false))
continue;
- if (IS_CURSEG(sbi, segno))
+ if (is_curseg(sbi, segno))
continue;
__locate_dirty_segment(sbi, segno, PRE);
__remove_dirty_segment(sbi, segno, DIRTY);
@@ -1327,15 +1343,9 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
dc->di.len += len;
+ err = 0;
if (time_to_inject(sbi, FAULT_DISCARD)) {
err = -EIO;
- } else {
- err = __blkdev_issue_discard(bdev,
- SECTOR_FROM_BLOCK(start),
- SECTOR_FROM_BLOCK(len),
- GFP_NOFS, &bio);
- }
- if (err) {
spin_lock_irqsave(&dc->lock, flags);
if (dc->state == D_PARTIAL)
dc->state = D_SUBMIT;
@@ -1344,6 +1354,8 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
break;
}
+ __blkdev_issue_discard(bdev, SECTOR_FROM_BLOCK(start),
+ SECTOR_FROM_BLOCK(len), GFP_NOFS, &bio);
f2fs_bug_on(sbi, !bio);
/*
@@ -2090,7 +2102,9 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
return false;
if (!force) {
- if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
+ if (!f2fs_realtime_discard_enable(sbi) ||
+ (!se->valid_blocks &&
+ !is_curseg(sbi, cpc->trim_start)) ||
SM_I(sbi)->dcc_info->nr_discards >=
SM_I(sbi)->dcc_info->max_discards)
return false;
@@ -2218,7 +2232,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
next:
secno = GET_SEC_FROM_SEG(sbi, start);
start_segno = GET_SEG_FROM_SEC(sbi, secno);
- if (!IS_CURSEC(sbi, secno) &&
+ if (!is_cursec(sbi, secno) &&
!get_valid_blocks(sbi, start, true))
f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
BLKS_PER_SEC(sbi));
@@ -2314,10 +2328,9 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;
- if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
+ if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT ||
+ F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
dcc->discard_granularity = BLKS_PER_SEG(sbi);
- else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
- dcc->discard_granularity = BLKS_PER_SEC(sbi);
INIT_LIST_HEAD(&dcc->entry_list);
for (i = 0; i < MAX_PLIST_NUM; i++)
@@ -2426,78 +2439,38 @@ static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
SIT_I(sbi)->max_mtime = ctime;
}
-static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+/*
+ * NOTE: when updating multiple blocks at the same time, please ensure
+ * that the consecutive input blocks belong to the same segment.
+ */
+static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
+ unsigned int segno, block_t blkaddr, unsigned int offset, int del)
{
- struct seg_entry *se;
- unsigned int segno, offset;
- long int new_vblocks;
bool exist;
#ifdef CONFIG_F2FS_CHECK_FS
bool mir_exist;
#endif
+ int i;
+ int del_count = -del;
- segno = GET_SEGNO(sbi, blkaddr);
- if (segno == NULL_SEGNO)
- return;
-
- se = get_seg_entry(sbi, segno);
- new_vblocks = se->valid_blocks + del;
- offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
-
- f2fs_bug_on(sbi, (new_vblocks < 0 ||
- (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
-
- se->valid_blocks = new_vblocks;
-
- /* Update valid block bitmap */
- if (del > 0) {
- exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
-#ifdef CONFIG_F2FS_CHECK_FS
- mir_exist = f2fs_test_and_set_bit(offset,
- se->cur_valid_map_mir);
- if (unlikely(exist != mir_exist)) {
- f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
- blkaddr, exist);
- f2fs_bug_on(sbi, 1);
- }
-#endif
- if (unlikely(exist)) {
- f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
- blkaddr);
- f2fs_bug_on(sbi, 1);
- se->valid_blocks--;
- del = 0;
- }
-
- if (f2fs_block_unit_discard(sbi) &&
- !f2fs_test_and_set_bit(offset, se->discard_map))
- sbi->discard_blks--;
+ f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
- /*
- * SSR should never reuse block which is checkpointed
- * or newly invalidated.
- */
- if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
- if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
- se->ckpt_valid_blocks++;
- }
- } else {
- exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
+ for (i = 0; i < del_count; i++) {
+ exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
- mir_exist = f2fs_test_and_clear_bit(offset,
+ mir_exist = f2fs_test_and_clear_bit(offset + i,
se->cur_valid_map_mir);
if (unlikely(exist != mir_exist)) {
f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
- blkaddr, exist);
+ blkaddr + i, exist);
f2fs_bug_on(sbi, 1);
}
#endif
if (unlikely(!exist)) {
- f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
- blkaddr);
+ f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i);
f2fs_bug_on(sbi, 1);
se->valid_blocks++;
- del = 0;
+ del += 1;
} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
/*
* If checkpoints are off, we must not reuse data that
@@ -2505,7 +2478,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
* before, we must track that to know how much space we
* really have.
*/
- if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
+ if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
spin_lock(&sbi->stat_lock);
sbi->unusable_block_count++;
spin_unlock(&sbi->stat_lock);
@@ -2513,11 +2486,105 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
}
if (f2fs_block_unit_discard(sbi) &&
- f2fs_test_and_clear_bit(offset, se->discard_map))
+ f2fs_test_and_clear_bit(offset + i, se->discard_map))
sbi->discard_blks++;
+
+ if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
+ se->ckpt_valid_blocks -= 1;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks -= 1;
+ }
+ }
+
+ if (__is_large_section(sbi))
+ sanity_check_valid_blocks(sbi, segno);
+
+ return del;
+}
+
+static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se,
+ unsigned int segno, block_t blkaddr, unsigned int offset, int del)
+{
+ bool exist;
+#ifdef CONFIG_F2FS_CHECK_FS
+ bool mir_exist;
+#endif
+
+ exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
+#ifdef CONFIG_F2FS_CHECK_FS
+ mir_exist = f2fs_test_and_set_bit(offset,
+ se->cur_valid_map_mir);
+ if (unlikely(exist != mir_exist)) {
+ f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
+ blkaddr, exist);
+ f2fs_bug_on(sbi, 1);
+ }
+#endif
+ if (unlikely(exist)) {
+ f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr);
+ f2fs_bug_on(sbi, 1);
+ se->valid_blocks--;
+ del = 0;
+ }
+
+ if (f2fs_block_unit_discard(sbi) &&
+ !f2fs_test_and_set_bit(offset, se->discard_map))
+ sbi->discard_blks--;
+
+ /*
+ * SSR should never reuse block which is checkpointed
+ * or newly invalidated.
+ */
+ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
+ if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) {
+ se->ckpt_valid_blocks++;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks++;
+ }
}
- if (!f2fs_test_bit(offset, se->ckpt_valid_map))
+
+ if (!f2fs_test_bit(offset, se->ckpt_valid_map)) {
se->ckpt_valid_blocks += del;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks += del;
+ }
+
+ if (__is_large_section(sbi))
+ sanity_check_valid_blocks(sbi, segno);
+
+ return del;
+}
+
+/*
+ * If releasing blocks, this function supports updating multiple consecutive blocks
+ * at one time, but please note that these consecutive blocks need to belong to the
+ * same segment.
+ */
+static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+{
+ struct seg_entry *se;
+ unsigned int segno, offset;
+ long int new_vblocks;
+
+ segno = GET_SEGNO(sbi, blkaddr);
+ if (segno == NULL_SEGNO)
+ return;
+
+ se = get_seg_entry(sbi, segno);
+ new_vblocks = se->valid_blocks + del;
+ offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+
+ f2fs_bug_on(sbi, (new_vblocks < 0 ||
+ (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
+
+ se->valid_blocks = new_vblocks;
+
+ /* Update valid block bitmap */
+ if (del > 0) {
+ del = update_sit_entry_for_alloc(sbi, se, segno, blkaddr, offset, del);
+ } else {
+ del = update_sit_entry_for_release(sbi, se, segno, blkaddr, offset, del);
+ }
__mark_sit_entry_dirty(sbi, segno);
@@ -2528,25 +2595,43 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
get_sec_entry(sbi, segno)->valid_blocks += del;
}
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+ unsigned int len)
{
unsigned int segno = GET_SEGNO(sbi, addr);
struct sit_info *sit_i = SIT_I(sbi);
+ block_t addr_start = addr, addr_end = addr + len - 1;
+ unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1;
+ unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt;
f2fs_bug_on(sbi, addr == NULL_ADDR);
if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
return;
- f2fs_invalidate_internal_cache(sbi, addr);
+ f2fs_invalidate_internal_cache(sbi, addr, len);
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
- update_segment_mtime(sbi, addr, 0);
- update_sit_entry(sbi, addr, -1);
+ if (seg_num == 1)
+ cnt = len;
+ else
+ cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr);
- /* add it into dirty seglist */
- locate_dirty_segment(sbi, segno);
+ do {
+ update_segment_mtime(sbi, addr_start, 0);
+ update_sit_entry(sbi, addr_start, -cnt);
+
+ /* add it into dirty seglist */
+ locate_dirty_segment(sbi, segno);
+
+ /* update @addr_start and @cnt and @segno */
+ addr_start = START_BLOCK(sbi, ++segno);
+ if (++i == seg_num)
+ cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1;
+ else
+ cnt = max_blocks;
+ } while (i <= seg_num);
up_write(&sit_i->sentry_lock);
}
@@ -2611,40 +2696,60 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
}
/*
- * Caller should put this summary page
+ * Caller should put this summary folio
*/
-struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
+struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno)
{
if (unlikely(f2fs_cp_error(sbi)))
return ERR_PTR(-EIO);
- return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
+ return f2fs_get_meta_folio_retry(sbi, GET_SUM_BLOCK(sbi, segno));
}
void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
void *src, block_t blk_addr)
{
- struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
+ struct folio *folio;
+
+ if (SUMS_PER_BLOCK == 1)
+ folio = f2fs_grab_meta_folio(sbi, blk_addr);
+ else
+ folio = f2fs_get_meta_folio_retry(sbi, blk_addr);
- memcpy(page_address(page), src, PAGE_SIZE);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ if (IS_ERR(folio))
+ return;
+
+ memcpy(folio_address(folio), src, PAGE_SIZE);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
static void write_sum_page(struct f2fs_sb_info *sbi,
- struct f2fs_summary_block *sum_blk, block_t blk_addr)
+ struct f2fs_summary_block *sum_blk, unsigned int segno)
{
- f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
+ struct folio *folio;
+
+ if (SUMS_PER_BLOCK == 1)
+ return f2fs_update_meta_page(sbi, (void *)sum_blk,
+ GET_SUM_BLOCK(sbi, segno));
+
+ folio = f2fs_get_sum_folio(sbi, segno);
+ if (IS_ERR(folio))
+ return;
+
+ memcpy(SUM_BLK_PAGE_ADDR(folio, segno), sum_blk, sizeof(*sum_blk));
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
static void write_current_sum_page(struct f2fs_sb_info *sbi,
int type, block_t blk_addr)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
+ struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr);
struct f2fs_summary_block *src = curseg->sum_blk;
struct f2fs_summary_block *dst;
- dst = (struct f2fs_summary_block *)page_address(page);
+ dst = folio_address(folio);
memset(dst, 0, PAGE_SIZE);
mutex_lock(&curseg->curseg_mutex);
@@ -2658,8 +2763,8 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi,
mutex_unlock(&curseg->curseg_mutex);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
static int is_next_segment_free(struct f2fs_sb_info *sbi,
@@ -2685,6 +2790,8 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
+ unsigned int alloc_policy = sbi->allocate_section_policy;
+ unsigned int alloc_hint = sbi->allocate_section_hint;
bool init = true;
int i;
int ret = 0;
@@ -2713,11 +2820,26 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
segno = 0;
else
- segno = max(sbi->first_zoned_segno, *newseg);
+ segno = max(sbi->first_seq_zone_segno, *newseg);
hint = GET_SEC_FROM_SEG(sbi, segno);
}
#endif
+ /*
+ * Prevent allocate_section_hint from exceeding MAIN_SECS()
+ * due to desynchronization.
+ */
+ if (alloc_policy != ALLOCATE_FORWARD_NOHINT &&
+ alloc_hint > MAIN_SECS(sbi))
+ alloc_hint = MAIN_SECS(sbi);
+
+ if (alloc_policy == ALLOCATE_FORWARD_FROM_HINT &&
+ hint < alloc_hint)
+ hint = alloc_hint;
+ else if (alloc_policy == ALLOCATE_FORWARD_WITHIN_HINT &&
+ hint >= alloc_hint)
+ hint = 0;
+
find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
@@ -2725,7 +2847,7 @@ find_other_zone:
if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
/* Write only to sequential zones */
if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
- hint = GET_SEC_FROM_SEG(sbi, sbi->first_zoned_segno);
+ hint = GET_SEC_FROM_SEG(sbi, sbi->first_seq_zone_segno);
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
} else
secno = find_first_zero_bit(free_i->free_secmap,
@@ -2743,7 +2865,7 @@ find_other_zone:
MAIN_SECS(sbi));
if (secno >= MAIN_SECS(sbi)) {
ret = -ENOSPC;
- f2fs_bug_on(sbi, 1);
+ f2fs_bug_on(sbi, !pinning);
goto out_unlock;
}
}
@@ -2772,11 +2894,15 @@ find_other_zone:
}
got_it:
/* set it as dirty segment in free segmap */
- f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
+ if (test_bit(segno, free_i->free_segmap)) {
+ ret = -EFSCORRUPTED;
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_CORRUPTED_FREE_BITMAP);
+ goto out_unlock;
+ }
- /* no free section in conventional zone */
+ /* no free section in conventional device or conventional zone */
if (new_sec && pinning &&
- !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) {
+ f2fs_is_sequential_zone_area(sbi, START_BLOCK(sbi, segno))) {
ret = -EAGAIN;
goto out_unlock;
}
@@ -2785,7 +2911,7 @@ got_it:
out_unlock:
spin_unlock(&free_i->segmap_lock);
- if (ret == -ENOSPC)
+ if (ret == -ENOSPC && !pinning)
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
return ret;
}
@@ -2858,6 +2984,13 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
return curseg->segno;
}
+static void reset_curseg_fields(struct curseg_info *curseg)
+{
+ curseg->inited = false;
+ curseg->segno = NULL_SEGNO;
+ curseg->next_segno = 0;
+}
+
/*
* Allocate a current working segment.
* This function always allocates a free segment in LFS manner.
@@ -2870,13 +3003,13 @@ static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
int ret;
if (curseg->inited)
- write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno));
+ write_sum_page(sbi, curseg->sum_blk, segno);
segno = __get_next_segno(sbi, type);
ret = get_new_segment(sbi, &segno, new_sec, pinning);
if (ret) {
if (ret == -ENOSPC)
- curseg->segno = NULL_SEGNO;
+ reset_curseg_fields(curseg);
return ret;
}
@@ -2926,10 +3059,10 @@ static int change_curseg(struct f2fs_sb_info *sbi, int type)
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int new_segno = curseg->next_segno;
struct f2fs_summary_block *sum_node;
- struct page *sum_page;
+ struct folio *sum_folio;
if (curseg->inited)
- write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
__set_test_and_inuse(sbi, new_segno);
@@ -2942,15 +3075,15 @@ static int change_curseg(struct f2fs_sb_info *sbi, int type)
curseg->alloc_type = SSR;
curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
- sum_page = f2fs_get_sum_page(sbi, new_segno);
- if (IS_ERR(sum_page)) {
+ sum_folio = f2fs_get_sum_folio(sbi, new_segno);
+ if (IS_ERR(sum_folio)) {
/* GC won't be able to use stale summary pages by cp_error */
memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
- return PTR_ERR(sum_page);
+ return PTR_ERR(sum_folio);
}
- sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ sum_node = SUM_BLK_PAGE_ADDR(sum_folio, new_segno);
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
- f2fs_put_page(sum_page, 1);
+ f2fs_folio_put(sum_folio, true);
return 0;
}
@@ -3037,8 +3170,7 @@ static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
goto out;
if (get_valid_blocks(sbi, curseg->segno, false)) {
- write_sum_page(sbi, curseg->sum_blk,
- GET_SUM_BLOCK(sbi, curseg->segno));
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
} else {
mutex_lock(&DIRTY_I(sbi)->seglist_lock);
__set_test_and_free(sbi, curseg->segno, true);
@@ -3240,7 +3372,7 @@ retry:
if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
f2fs_down_write(&sbi->gc_lock);
- err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk),
+ err = f2fs_gc_range(sbi, 0, sbi->first_seq_zone_segno - 1,
true, ZONED_PIN_SEC_REQUIRED_COUNT);
f2fs_up_write(&sbi->gc_lock);
@@ -3335,7 +3467,7 @@ next:
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
trimmed += __wait_all_discard_cmd(sbi, NULL);
- f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ f2fs_schedule_timeout(DEFAULT_DISCARD_INTERVAL);
goto next;
}
skip:
@@ -3513,14 +3645,14 @@ static int __get_segment_type_2(struct f2fs_io_info *fio)
static int __get_segment_type_4(struct f2fs_io_info *fio)
{
if (fio->type == DATA) {
- struct inode *inode = fio->page->mapping->host;
+ struct inode *inode = fio_inode(fio);
if (S_ISDIR(inode->i_mode))
return CURSEG_HOT_DATA;
else
return CURSEG_COLD_DATA;
} else {
- if (IS_DNODE(fio->page) && is_cold_node(fio->page))
+ if (IS_DNODE(fio->folio) && is_cold_node(fio->folio))
return CURSEG_WARM_NODE;
else
return CURSEG_COLD_NODE;
@@ -3547,7 +3679,7 @@ static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
static int __get_segment_type_6(struct f2fs_io_info *fio)
{
if (fio->type == DATA) {
- struct inode *inode = fio->page->mapping->host;
+ struct inode *inode = fio_inode(fio);
int type;
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
@@ -3566,20 +3698,20 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (file_is_cold(inode) || f2fs_need_compress_data(inode))
return CURSEG_COLD_DATA;
- type = __get_age_segment_type(inode,
- page_folio(fio->page)->index);
+ type = __get_age_segment_type(inode, fio->folio->index);
if (type != NO_CHECK_TYPE)
return type;
if (file_is_hot(inode) ||
is_inode_flag_set(inode, FI_HOT_DATA) ||
- f2fs_is_cow_file(inode))
+ f2fs_is_cow_file(inode) ||
+ is_inode_flag_set(inode, FI_NEED_IPU))
return CURSEG_HOT_DATA;
return f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
inode->i_write_hint);
} else {
- if (IS_DNODE(fio->page))
- return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
+ if (IS_DNODE(fio->folio))
+ return is_cold_node(fio->folio) ? CURSEG_WARM_NODE :
CURSEG_HOT_NODE;
return CURSEG_COLD_NODE;
}
@@ -3647,14 +3779,7 @@ static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
get_random_u32_inclusive(1, sbi->max_fragment_hole);
}
-static void reset_curseg_fields(struct curseg_info *curseg)
-{
- curseg->inited = false;
- curseg->segno = NULL_SEGNO;
- curseg->next_segno = 0;
-}
-
-int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio,
block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio)
@@ -3723,8 +3848,7 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
if (segment_full) {
if (type == CURSEG_COLD_DATA_PINNED &&
!((curseg->segno + 1) % sbi->segs_per_sec)) {
- write_sum_page(sbi, curseg->sum_blk,
- GET_SUM_BLOCK(sbi, curseg->segno));
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
reset_curseg_fields(curseg);
goto skip_new_segment;
}
@@ -3753,15 +3877,20 @@ skip_new_segment:
locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
- if (IS_DATASEG(curseg->seg_type))
- atomic64_inc(&sbi->allocated_data_blocks);
+ if (IS_DATASEG(curseg->seg_type)) {
+ unsigned long long new_val;
+
+ new_val = atomic64_inc_return(&sbi->allocated_data_blocks);
+ if (unlikely(new_val == ULLONG_MAX))
+ atomic64_set(&sbi->allocated_data_blocks, 0);
+ }
up_write(&sit_i->sentry_lock);
- if (page && IS_NODESEG(curseg->seg_type)) {
- fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
+ if (folio && IS_NODESEG(curseg->seg_type)) {
+ fill_node_footer_blkaddr(folio, NEXT_FREE_BLKADDR(sbi, curseg));
- f2fs_inode_chksum_set(sbi, page);
+ f2fs_inode_chksum_set(sbi, folio);
}
if (fio) {
@@ -3839,25 +3968,38 @@ static int log_type_to_seg_type(enum log_type type)
static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{
+ struct folio *folio = fio->folio;
enum log_type type = __get_segment_type(fio);
int seg_type = log_type_to_seg_type(type);
bool keep_order = (f2fs_lfs_mode(fio->sbi) &&
seg_type == CURSEG_COLD_DATA);
+ int err;
if (keep_order)
f2fs_down_read(&fio->sbi->io_order_lock);
- if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
- &fio->new_blkaddr, sum, type, fio)) {
- if (fscrypt_inode_uses_fs_layer_crypto(fio->page->mapping->host))
+ err = f2fs_allocate_data_block(fio->sbi, folio, fio->old_blkaddr,
+ &fio->new_blkaddr, sum, type, fio);
+ if (unlikely(err)) {
+ f2fs_err_ratelimited(fio->sbi,
+ "%s Failed to allocate data block, ino:%u, index:%lu, type:%d, old_blkaddr:0x%x, new_blkaddr:0x%x, err:%d",
+ __func__, fio->ino, folio->index, type,
+ fio->old_blkaddr, fio->new_blkaddr, err);
+ if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
fscrypt_finalize_bounce_page(&fio->encrypted_page);
- end_page_writeback(fio->page);
- if (f2fs_in_warm_node_list(fio->sbi, fio->page))
- f2fs_del_fsync_node_entry(fio->sbi, fio->page);
+ folio_end_writeback(folio);
+ if (f2fs_in_warm_node_list(fio->sbi, folio))
+ f2fs_del_fsync_node_entry(fio->sbi, folio);
+ f2fs_bug_on(fio->sbi, !is_set_ckpt_flags(fio->sbi,
+ CP_ERROR_FLAG));
goto out;
}
+
+ f2fs_bug_on(fio->sbi, !f2fs_is_valid_blkaddr_raw(fio->sbi,
+ fio->new_blkaddr, DATA_GENERIC_ENHANCE));
+
if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
- f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
+ f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1);
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
@@ -3879,7 +4021,7 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
.old_blkaddr = folio->index,
.new_blkaddr = folio->index,
- .page = folio_page(folio, 0),
+ .folio = folio,
.encrypted_page = NULL,
.in_list = 0,
};
@@ -3958,7 +4100,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
if (!err) {
f2fs_update_device_state(fio->sbi, fio->ino,
fio->new_blkaddr, 1);
- f2fs_update_iostat(fio->sbi, fio->page->mapping->host,
+ f2fs_update_iostat(fio->sbi, fio_inode(fio),
fio->io_type, F2FS_BLKSIZE);
}
@@ -4007,14 +4149,14 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (!recover_curseg) {
/* for recovery flow */
- if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
+ if (se->valid_blocks == 0 && !is_curseg(sbi, segno)) {
if (old_blkaddr == NULL_ADDR)
type = CURSEG_COLD_DATA;
else
type = CURSEG_WARM_DATA;
}
} else {
- if (IS_CURSEG(sbi, segno)) {
+ if (is_curseg(sbi, segno)) {
/* se->type is volatile as SSR allocation */
type = __f2fs_get_curseg(sbi, segno);
f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
@@ -4049,7 +4191,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
update_sit_entry(sbi, new_blkaddr, 1);
}
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
- f2fs_invalidate_internal_cache(sbi, old_blkaddr);
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
if (!from_gc)
update_segment_mtime(sbi, old_blkaddr, 0);
update_sit_entry(sbi, old_blkaddr, -1);
@@ -4091,22 +4233,21 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
f2fs_update_data_blkaddr(dn, new_addr);
}
-void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type, bool ordered, bool locked)
+void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
+ bool ordered, bool locked)
{
- if (folio_test_writeback(page_folio(page))) {
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ if (folio_test_writeback(folio)) {
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
/* submit cached LFS IO */
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
+ f2fs_submit_merged_write_cond(sbi, NULL, folio, 0, type);
/* submit cached IPU IO */
- f2fs_submit_merged_ipu_write(sbi, NULL, page);
+ f2fs_submit_merged_ipu_write(sbi, NULL, folio);
if (ordered) {
- wait_on_page_writeback(page);
- f2fs_bug_on(sbi, locked &&
- folio_test_writeback(page_folio(page)));
+ folio_wait_writeback(folio);
+ f2fs_bug_on(sbi, locked && folio_test_writeback(folio));
} else {
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
}
}
}
@@ -4114,7 +4255,7 @@ void f2fs_wait_on_page_writeback(struct page *page,
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *cpage;
+ struct folio *cfolio;
if (!f2fs_meta_inode_gc_required(inode))
return;
@@ -4122,10 +4263,10 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
if (!__is_valid_data_blkaddr(blkaddr))
return;
- cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
- if (cpage) {
- f2fs_wait_on_page_writeback(cpage, DATA, true, true);
- f2fs_put_page(cpage, 1);
+ cfolio = filemap_lock_folio(META_MAPPING(sbi), blkaddr);
+ if (!IS_ERR(cfolio)) {
+ f2fs_folio_wait_writeback(cfolio, DATA, true, true);
+ f2fs_folio_put(cfolio, true);
}
}
@@ -4149,16 +4290,16 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *seg_i;
unsigned char *kaddr;
- struct page *page;
+ struct folio *folio;
block_t start;
int i, j, offset;
start = start_sum_block(sbi);
- page = f2fs_get_meta_page(sbi, start++);
- if (IS_ERR(page))
- return PTR_ERR(page);
- kaddr = (unsigned char *)page_address(page);
+ folio = f2fs_get_meta_folio(sbi, start++);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ kaddr = folio_address(folio);
/* Step 1: restore nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -4195,17 +4336,16 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
SUM_FOOTER_SIZE)
continue;
- f2fs_put_page(page, 1);
- page = NULL;
+ f2fs_folio_put(folio, true);
- page = f2fs_get_meta_page(sbi, start++);
- if (IS_ERR(page))
- return PTR_ERR(page);
- kaddr = (unsigned char *)page_address(page);
+ folio = f2fs_get_meta_folio(sbi, start++);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ kaddr = folio_address(folio);
offset = 0;
}
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return 0;
}
@@ -4214,7 +4354,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct f2fs_summary_block *sum;
struct curseg_info *curseg;
- struct page *new;
+ struct folio *new;
unsigned short blk_off;
unsigned int segno = 0;
block_t blk_addr = 0;
@@ -4241,10 +4381,10 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
blk_addr = GET_SUM_BLOCK(sbi, segno);
}
- new = f2fs_get_meta_page(sbi, blk_addr);
+ new = f2fs_get_meta_folio(sbi, blk_addr);
if (IS_ERR(new))
return PTR_ERR(new);
- sum = (struct f2fs_summary_block *)page_address(new);
+ sum = folio_address(new);
if (IS_NODESEG(type)) {
if (__exist_node_summaries(sbi)) {
@@ -4279,7 +4419,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
curseg->next_blkoff = blk_off;
mutex_unlock(&curseg->curseg_mutex);
out:
- f2fs_put_page(new, 1);
+ f2fs_folio_put(new, true);
return err;
}
@@ -4328,15 +4468,15 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
{
- struct page *page;
+ struct folio *folio;
unsigned char *kaddr;
struct f2fs_summary *summary;
struct curseg_info *seg_i;
int written_size = 0;
int i, j;
- page = f2fs_grab_meta_page(sbi, blkaddr++);
- kaddr = (unsigned char *)page_address(page);
+ folio = f2fs_grab_meta_folio(sbi, blkaddr++);
+ kaddr = folio_address(folio);
memset(kaddr, 0, PAGE_SIZE);
/* Step 1: write nat cache */
@@ -4353,9 +4493,9 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
seg_i = CURSEG_I(sbi, i);
for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
- if (!page) {
- page = f2fs_grab_meta_page(sbi, blkaddr++);
- kaddr = (unsigned char *)page_address(page);
+ if (!folio) {
+ folio = f2fs_grab_meta_folio(sbi, blkaddr++);
+ kaddr = folio_address(folio);
memset(kaddr, 0, PAGE_SIZE);
written_size = 0;
}
@@ -4367,14 +4507,14 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
SUM_FOOTER_SIZE)
continue;
- set_page_dirty(page);
- f2fs_put_page(page, 1);
- page = NULL;
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
+ folio = NULL;
}
}
- if (page) {
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ if (folio) {
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
}
@@ -4427,29 +4567,29 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
return -1;
}
-static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
+static struct folio *get_current_sit_folio(struct f2fs_sb_info *sbi,
unsigned int segno)
{
- return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+ return f2fs_get_meta_folio(sbi, current_sit_addr(sbi, segno));
}
-static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
+static struct folio *get_next_sit_folio(struct f2fs_sb_info *sbi,
unsigned int start)
{
struct sit_info *sit_i = SIT_I(sbi);
- struct page *page;
+ struct folio *folio;
pgoff_t src_off, dst_off;
src_off = current_sit_addr(sbi, start);
dst_off = next_sit_addr(sbi, src_off);
- page = f2fs_grab_meta_page(sbi, dst_off);
- seg_info_to_sit_page(sbi, page, start);
+ folio = f2fs_grab_meta_folio(sbi, dst_off);
+ seg_info_to_sit_folio(sbi, folio, start);
- set_page_dirty(page);
+ folio_mark_dirty(folio);
set_to_next_sit(sit_i, start);
- return page;
+ return folio;
}
static struct sit_entry_set *grab_sit_entry_set(void)
@@ -4579,7 +4719,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* #2, flush sit entries to sit page.
*/
list_for_each_entry_safe(ses, tmp, head, set_list) {
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct f2fs_sit_block *raw_sit = NULL;
unsigned int start_segno = ses->start_segno;
unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
@@ -4593,8 +4733,8 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (to_journal) {
down_write(&curseg->journal_rwsem);
} else {
- page = get_next_sit_page(sbi, start_segno);
- raw_sit = page_address(page);
+ folio = get_next_sit_folio(sbi, start_segno);
+ raw_sit = folio_address(folio);
}
/* flush dirty sit entries in region of current sit set */
@@ -4632,6 +4772,12 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
&raw_sit->entries[sit_offset]);
}
+ /* update ckpt_valid_block */
+ if (__is_large_section(sbi)) {
+ set_ckpt_valid_blocks(sbi, segno);
+ sanity_check_valid_blocks(sbi, segno);
+ }
+
__clear_bit(segno, bitmap);
sit_i->dirty_sentries--;
ses->entry_cnt--;
@@ -4640,7 +4786,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (to_journal)
up_write(&curseg->journal_rwsem);
else
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
f2fs_bug_on(sbi, ses->entry_cnt);
release_sit_entry_set(ses);
@@ -4852,15 +4998,15 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
for (; start < end && start < MAIN_SEGS(sbi); start++) {
struct f2fs_sit_block *sit_blk;
- struct page *page;
+ struct folio *folio;
se = &sit_i->sentries[start];
- page = get_current_sit_page(sbi, start);
- if (IS_ERR(page))
- return PTR_ERR(page);
- sit_blk = (struct f2fs_sit_block *)page_address(page);
+ folio = get_current_sit_folio(sbi, start);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ sit_blk = folio_address(folio);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
err = check_block_count(sbi, start, &sit);
if (err)
@@ -4953,6 +5099,16 @@ init_discard_map_done:
}
up_read(&curseg->journal_rwsem);
+ /* update ckpt_valid_block */
+ if (__is_large_section(sbi)) {
+ unsigned int segno;
+
+ for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
+ set_ckpt_valid_blocks(sbi, segno);
+ sanity_check_valid_blocks(sbi, segno);
+ }
+ }
+
if (err)
return err;
@@ -5036,7 +5192,7 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
continue;
- if (IS_CURSEC(sbi, secno))
+ if (is_cursec(sbi, secno))
continue;
set_bit(secno, dirty_i->dirty_secmap);
}
@@ -5172,7 +5328,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
* Get # of valid block of the zone.
*/
valid_block_cnt = get_valid_blocks(sbi, zone_segno, true);
- if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]",
zone_segno, valid_block_cnt,
blk_zone_cond_str(zone->cond));
@@ -5405,7 +5561,8 @@ int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
{
int ret;
- if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb))
+ if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) ||
+ f2fs_hw_is_readonly(sbi))
return 0;
f2fs_notice(sbi, "Checking entire write pointers");
@@ -5492,8 +5649,10 @@ unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
secno = GET_SEC_FROM_SEG(sbi, segno);
start = GET_SEG_FROM_SEC(sbi, secno);
- if (!__is_large_section(sbi))
- return get_seg_entry(sbi, start + i)->mtime;
+ if (!__is_large_section(sbi)) {
+ mtime = get_seg_entry(sbi, start + i)->mtime;
+ goto out;
+ }
for (i = 0; i < usable_segs_per_sec; i++) {
/* for large section, only check the mtime of valid segments */
@@ -5506,7 +5665,11 @@ unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
if (total_valid_blocks == 0)
return INVALID_MTIME;
- return div_u64(mtime, total_valid_blocks);
+ mtime = div_u64(mtime, total_valid_blocks);
+out:
+ if (unlikely(mtime == INVALID_MTIME))
+ mtime -= 1;
+ return mtime;
}
/*
@@ -5692,9 +5855,9 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
kvfree(sit_i->dirty_sentries_bitmap);
SM_I(sbi)->sit_info = NULL;
- kvfree(sit_i->sit_bitmap);
+ kfree(sit_i->sit_bitmap);
#ifdef CONFIG_F2FS_CHECK_FS
- kvfree(sit_i->sit_bitmap_mir);
+ kfree(sit_i->sit_bitmap_mir);
kvfree(sit_i->invalid_segmap);
#endif
kfree(sit_i);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 943be4f1d6d2..07dcbcbeb7c6 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -34,34 +34,6 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
}
-#define IS_CURSEG(sbi, seg) \
- (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno))
-
-#define IS_CURSEC(sbi, secno) \
- (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
- SEGS_PER_SEC(sbi)) || \
- ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
- SEGS_PER_SEC(sbi)) || \
- ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
- SEGS_PER_SEC(sbi)) || \
- ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
- SEGS_PER_SEC(sbi)) || \
- ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
- SEGS_PER_SEC(sbi)) || \
- ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
- SEGS_PER_SEC(sbi)) || \
- ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
- SEGS_PER_SEC(sbi)) || \
- ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
- SEGS_PER_SEC(sbi)))
-
#define MAIN_BLKADDR(sbi) \
(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
@@ -97,11 +69,18 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
((!__is_valid_data_blkaddr(blk_addr)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
+#ifdef CONFIG_BLK_DEV_ZONED
#define CAP_BLKS_PER_SEC(sbi) \
(BLKS_PER_SEC(sbi) - (sbi)->unusable_blocks_per_sec)
#define CAP_SEGS_PER_SEC(sbi) \
(SEGS_PER_SEC(sbi) - \
BLKS_TO_SEGS(sbi, (sbi)->unusable_blocks_per_sec))
+#else
+#define CAP_BLKS_PER_SEC(sbi) BLKS_PER_SEC(sbi)
+#define CAP_SEGS_PER_SEC(sbi) SEGS_PER_SEC(sbi)
+#endif
+#define GET_START_SEG_FROM_SEC(sbi, segno) \
+ (rounddown(segno, SEGS_PER_SEC(sbi)))
#define GET_SEC_FROM_SEG(sbi, segno) \
(((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi))
#define GET_SEG_FROM_SEC(sbi, secno) \
@@ -111,8 +90,12 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
#define GET_ZONE_FROM_SEG(sbi, segno) \
GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
-#define GET_SUM_BLOCK(sbi, segno) \
- ((sbi)->sm_info->ssa_blkaddr + (segno))
+#define SUMS_PER_BLOCK (F2FS_BLKSIZE / F2FS_SUM_BLKSIZE)
+#define GET_SUM_BLOCK(sbi, segno) \
+ (SM_I(sbi)->ssa_blkaddr + (segno / SUMS_PER_BLOCK))
+#define GET_SUM_BLKOFF(segno) (segno % SUMS_PER_BLOCK)
+#define SUM_BLK_PAGE_ADDR(folio, segno) \
+ (folio_address(folio) + GET_SUM_BLKOFF(segno) * F2FS_SUM_BLKSIZE)
#define GET_SUM_TYPE(footer) ((footer)->entry_type)
#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
@@ -209,6 +192,7 @@ struct seg_entry {
struct sec_entry {
unsigned int valid_blocks; /* # of valid blocks in a section */
+ unsigned int ckpt_valid_blocks; /* # of valid blocks last cp in a section */
};
#define MAX_SKIP_GC_COUNT 16
@@ -315,6 +299,28 @@ static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
}
+static inline bool is_curseg(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ int i;
+
+ for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
+ if (segno == CURSEG_I(sbi, i)->segno)
+ return true;
+ }
+ return false;
+}
+
+static inline bool is_cursec(struct f2fs_sb_info *sbi, unsigned int secno)
+{
+ int i;
+
+ for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
+ if (secno == GET_SEC_FROM_SEG(sbi, CURSEG_I(sbi, i)->segno))
+ return true;
+ }
+ return false;
+}
+
static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
unsigned int segno)
{
@@ -345,22 +351,57 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
unsigned int segno, bool use_section)
{
- if (use_section && __is_large_section(sbi)) {
- unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
- unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
- unsigned int blocks = 0;
- int i;
+ if (use_section && __is_large_section(sbi))
+ return get_sec_entry(sbi, segno)->ckpt_valid_blocks;
+ else
+ return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+}
- for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
- struct seg_entry *se = get_seg_entry(sbi, start_segno);
+static inline void set_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ unsigned int blocks = 0;
+ int i;
- blocks += se->ckpt_valid_blocks;
- }
- return blocks;
+ for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
+ struct seg_entry *se = get_seg_entry(sbi, start_segno);
+
+ blocks += se->ckpt_valid_blocks;
}
- return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks = blocks;
}
+#ifdef CONFIG_F2FS_CHECK_FS
+static inline void sanity_check_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ unsigned int blocks = 0;
+ int i;
+
+ for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
+ struct seg_entry *se = get_seg_entry(sbi, start_segno);
+
+ blocks += se->ckpt_valid_blocks;
+ }
+
+ if (blocks != get_sec_entry(sbi, segno)->ckpt_valid_blocks) {
+ f2fs_err(sbi,
+ "Inconsistent ckpt valid blocks: "
+ "seg entry(%d) vs sec entry(%d) at secno %d",
+ blocks, get_sec_entry(sbi, segno)->ckpt_valid_blocks, secno);
+ f2fs_bug_on(sbi, 1);
+ }
+}
+#else
+static inline void sanity_check_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+}
+#endif
static inline void seg_info_from_raw_sit(struct seg_entry *se,
struct f2fs_sit_entry *rs)
{
@@ -385,8 +426,8 @@ static inline void __seg_info_to_raw_sit(struct seg_entry *se,
rs->mtime = cpu_to_le64(se->mtime);
}
-static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
- struct page *page, unsigned int start)
+static inline void seg_info_to_sit_folio(struct f2fs_sb_info *sbi,
+ struct folio *folio, unsigned int start)
{
struct f2fs_sit_block *raw_sit;
struct seg_entry *se;
@@ -395,7 +436,7 @@ static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
(unsigned long)MAIN_SEGS(sbi));
int i;
- raw_sit = (struct f2fs_sit_block *)page_address(page);
+ raw_sit = folio_address(folio);
memset(raw_sit, 0, PAGE_SIZE);
for (i = 0; i < end - start; i++) {
rs = &raw_sit->entries[i];
@@ -429,7 +470,6 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
spin_lock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap);
@@ -437,7 +477,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
next = find_next_bit(free_i->free_segmap,
start_segno + SEGS_PER_SEC(sbi), start_segno);
- if (next >= start_segno + usable_segs) {
+ if (next >= start_segno + f2fs_usable_segs_in_sec(sbi)) {
clear_bit(secno, free_i->free_secmap);
free_i->free_sections++;
}
@@ -463,22 +503,36 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
+ bool ret;
spin_lock(&free_i->segmap_lock);
- if (test_and_clear_bit(segno, free_i->free_segmap)) {
- free_i->free_segments++;
-
- if (!inmem && IS_CURSEC(sbi, secno))
- goto skip_free;
- next = find_next_bit(free_i->free_segmap,
- start_segno + SEGS_PER_SEC(sbi), start_segno);
- if (next >= start_segno + usable_segs) {
- if (test_and_clear_bit(secno, free_i->free_secmap))
- free_i->free_sections++;
- }
- }
-skip_free:
+ ret = test_and_clear_bit(segno, free_i->free_segmap);
+ if (!ret)
+ goto unlock_out;
+
+ free_i->free_segments++;
+
+ if (!inmem && is_cursec(sbi, secno))
+ goto unlock_out;
+
+ /* check large section */
+ next = find_next_bit(free_i->free_segmap,
+ start_segno + SEGS_PER_SEC(sbi), start_segno);
+ if (next < start_segno + f2fs_usable_segs_in_sec(sbi))
+ goto unlock_out;
+
+ ret = test_and_clear_bit(secno, free_i->free_secmap);
+ if (!ret)
+ goto unlock_out;
+
+ free_i->free_sections++;
+
+ if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[BG_GC]) == secno)
+ sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+ if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[FG_GC]) == secno)
+ sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
+
+unlock_out:
spin_unlock(&free_i->segmap_lock);
}
@@ -555,19 +609,33 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
}
+static inline unsigned int get_left_section_blocks(struct f2fs_sb_info *sbi,
+ enum log_type type, unsigned int segno)
+{
+ if (f2fs_lfs_mode(sbi)) {
+ unsigned int used_blocks = __is_large_section(sbi) ? SEGS_TO_BLKS(sbi,
+ (segno - GET_START_SEG_FROM_SEC(sbi, segno))) : 0;
+ return CAP_BLKS_PER_SEC(sbi) - used_blocks -
+ CURSEG_I(sbi, type)->next_blkoff;
+ }
+ return CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true);
+}
+
static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
unsigned int node_blocks, unsigned int data_blocks,
unsigned int dent_blocks)
{
-
unsigned int segno, left_blocks, blocks;
int i;
/* check current data/node sections in the worst case. */
for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) {
segno = CURSEG_I(sbi, i)->segno;
- left_blocks = CAP_BLKS_PER_SEC(sbi) -
- get_ckpt_valid_blocks(sbi, segno, true);
+
+ if (unlikely(segno == NULL_SEGNO))
+ return false;
+
+ left_blocks = get_left_section_blocks(sbi, i, segno);
blocks = i <= CURSEG_COLD_DATA ? data_blocks : node_blocks;
if (blocks > left_blocks)
@@ -576,8 +644,12 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
/* check current data section for dentry blocks. */
segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
- left_blocks = CAP_BLKS_PER_SEC(sbi) -
- get_ckpt_valid_blocks(sbi, segno, true);
+
+ if (unlikely(segno == NULL_SEGNO))
+ return false;
+
+ left_blocks = get_left_section_blocks(sbi, CURSEG_HOT_DATA, segno);
+
if (dent_blocks > left_blocks)
return false;
return true;
@@ -603,8 +675,7 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi,
unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
unsigned int data_blocks = 0;
- if (f2fs_lfs_mode(sbi) &&
- unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ if (f2fs_lfs_mode(sbi)) {
total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA);
data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi);
data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi);
@@ -613,7 +684,7 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi,
if (lower_p)
*lower_p = node_secs + dent_secs + data_secs;
if (upper_p)
- *upper_p = node_secs + dent_secs +
+ *upper_p = node_secs + dent_secs + data_secs +
(node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) +
(data_blocks ? 1 : 0);
if (curseg_p)
@@ -915,7 +986,7 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
{
- if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
+ if (is_cursec(sbi, secno) || (sbi->cur_victim_sec == secno))
return true;
return false;
}
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 83d6fb97dcae..b88babcf6ab4 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -73,7 +73,7 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
mutex_unlock(&sbi->umount_mutex);
}
spin_unlock(&f2fs_list_lock);
- return count;
+ return count ?: SHRINK_EMPTY;
}
unsigned long f2fs_shrink_scan(struct shrinker *shrink,
@@ -130,6 +130,103 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
return freed;
}
+unsigned int f2fs_donate_files(void)
+{
+ struct f2fs_sb_info *sbi;
+ struct list_head *p;
+ unsigned int donate_files = 0;
+
+ spin_lock(&f2fs_list_lock);
+ p = f2fs_list.next;
+ while (p != &f2fs_list) {
+ sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+ /* stop f2fs_put_super */
+ if (!mutex_trylock(&sbi->umount_mutex)) {
+ p = p->next;
+ continue;
+ }
+ spin_unlock(&f2fs_list_lock);
+
+ donate_files += sbi->donate_files;
+
+ spin_lock(&f2fs_list_lock);
+ p = p->next;
+ mutex_unlock(&sbi->umount_mutex);
+ }
+ spin_unlock(&f2fs_list_lock);
+
+ return donate_files;
+}
+
+static unsigned int do_reclaim_caches(struct f2fs_sb_info *sbi,
+ unsigned int reclaim_caches_kb)
+{
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+ unsigned int nfiles = sbi->donate_files;
+ pgoff_t npages = reclaim_caches_kb >> (PAGE_SHIFT - 10);
+
+ while (npages && nfiles--) {
+ pgoff_t len;
+
+ spin_lock(&sbi->inode_lock[DONATE_INODE]);
+ if (list_empty(&sbi->inode_list[DONATE_INODE])) {
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+ break;
+ }
+ fi = list_first_entry(&sbi->inode_list[DONATE_INODE],
+ struct f2fs_inode_info, gdonate_list);
+ list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+
+ if (!inode)
+ continue;
+
+ inode_lock(inode);
+ if (!is_inode_flag_set(inode, FI_DONATE_FINISHED)) {
+ len = fi->donate_end - fi->donate_start + 1;
+ npages = npages < len ? 0 : npages - len;
+
+ invalidate_inode_pages2_range(inode->i_mapping,
+ fi->donate_start, fi->donate_end);
+ set_inode_flag(inode, FI_DONATE_FINISHED);
+ }
+ inode_unlock(inode);
+
+ iput(inode);
+ cond_resched();
+ }
+ return npages << (PAGE_SHIFT - 10);
+}
+
+void f2fs_reclaim_caches(unsigned int reclaim_caches_kb)
+{
+ struct f2fs_sb_info *sbi;
+ struct list_head *p;
+
+ spin_lock(&f2fs_list_lock);
+ p = f2fs_list.next;
+ while (p != &f2fs_list && reclaim_caches_kb) {
+ sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+ /* stop f2fs_put_super */
+ if (!mutex_trylock(&sbi->umount_mutex)) {
+ p = p->next;
+ continue;
+ }
+ spin_unlock(&f2fs_list_lock);
+
+ reclaim_caches_kb = do_reclaim_caches(sbi, reclaim_caches_kb);
+
+ spin_lock(&f2fs_list_lock);
+ p = p->next;
+ mutex_unlock(&sbi->umount_mutex);
+ }
+ spin_unlock(&f2fs_list_lock);
+}
+
void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
{
spin_lock(&f2fs_list_lock);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index fc7d463dee15..c4c225e09dc4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -27,6 +27,8 @@
#include <linux/part_stat.h>
#include <linux/zstd.h>
#include <linux/lz4.h>
+#include <linux/ctype.h>
+#include <linux/fs_parser.h>
#include "f2fs.h"
#include "node.h"
@@ -47,6 +49,7 @@ const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_PAGE_GET] = "page get",
+ [FAULT_ALLOC_BIO] = "alloc bio(obsolete)",
[FAULT_ALLOC_NID] = "alloc nid",
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
@@ -63,32 +66,36 @@ const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_BLKADDR_VALIDITY] = "invalid blkaddr",
[FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr",
[FAULT_NO_SEGMENT] = "no free segment",
+ [FAULT_INCONSISTENT_FOOTER] = "inconsistent footer",
+ [FAULT_TIMEOUT] = "timeout",
+ [FAULT_VMALLOC] = "vmalloc",
};
int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
- unsigned long type)
+ unsigned long type, enum fault_option fo)
{
struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
- if (rate) {
+ if (fo & FAULT_ALL) {
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
+ return 0;
+ }
+
+ if (fo & FAULT_RATE) {
if (rate > INT_MAX)
return -EINVAL;
atomic_set(&ffi->inject_ops, 0);
ffi->inject_rate = (int)rate;
+ f2fs_info(sbi, "build fault injection rate: %lu", rate);
}
- if (type) {
+ if (fo & FAULT_TYPE) {
if (type >= BIT(FAULT_MAX))
return -EINVAL;
ffi->inject_type = (unsigned int)type;
+ f2fs_info(sbi, "build fault injection type: 0x%lx", type);
}
- if (!rate && !type)
- memset(ffi, 0, sizeof(struct f2fs_fault_info));
- else
- f2fs_info(sbi,
- "build fault injection attr: rate: %lu, type: 0x%lx",
- rate, type);
return 0;
}
#endif
@@ -120,52 +127,36 @@ enum {
Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard,
- Opt_nodiscard,
Opt_noheap,
Opt_heap,
Opt_user_xattr,
- Opt_nouser_xattr,
Opt_acl,
- Opt_noacl,
Opt_active_logs,
Opt_disable_ext_identify,
Opt_inline_xattr,
- Opt_noinline_xattr,
Opt_inline_xattr_size,
Opt_inline_data,
Opt_inline_dentry,
- Opt_noinline_dentry,
Opt_flush_merge,
- Opt_noflush_merge,
Opt_barrier,
- Opt_nobarrier,
Opt_fastboot,
Opt_extent_cache,
- Opt_noextent_cache,
- Opt_noinline_data,
Opt_data_flush,
Opt_reserve_root,
+ Opt_reserve_node,
Opt_resgid,
Opt_resuid,
Opt_mode,
Opt_fault_injection,
Opt_fault_type,
Opt_lazytime,
- Opt_nolazytime,
Opt_quota,
- Opt_noquota,
Opt_usrquota,
Opt_grpquota,
Opt_prjquota,
Opt_usrjquota,
Opt_grpjquota,
Opt_prjjquota,
- Opt_offusrjquota,
- Opt_offgrpjquota,
- Opt_offprjjquota,
- Opt_jqfmt_vfsold,
- Opt_jqfmt_vfsv0,
- Opt_jqfmt_vfsv1,
Opt_alloc,
Opt_fsync,
Opt_test_dummy_encryption,
@@ -175,105 +166,221 @@ enum {
Opt_checkpoint_disable_cap_perc,
Opt_checkpoint_enable,
Opt_checkpoint_merge,
- Opt_nocheckpoint_merge,
Opt_compress_algorithm,
Opt_compress_log_size,
- Opt_compress_extension,
Opt_nocompress_extension,
+ Opt_compress_extension,
Opt_compress_chksum,
Opt_compress_mode,
Opt_compress_cache,
Opt_atgc,
Opt_gc_merge,
- Opt_nogc_merge,
Opt_discard_unit,
Opt_memory_mode,
Opt_age_extent_cache,
Opt_errors,
+ Opt_nat_bits,
+ Opt_jqfmt,
+ Opt_checkpoint,
+ Opt_lookup_mode,
Opt_err,
};
-static match_table_t f2fs_tokens = {
- {Opt_gc_background, "background_gc=%s"},
- {Opt_disable_roll_forward, "disable_roll_forward"},
- {Opt_norecovery, "norecovery"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_noheap, "no_heap"},
- {Opt_heap, "heap"},
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_active_logs, "active_logs=%u"},
- {Opt_disable_ext_identify, "disable_ext_identify"},
- {Opt_inline_xattr, "inline_xattr"},
- {Opt_noinline_xattr, "noinline_xattr"},
- {Opt_inline_xattr_size, "inline_xattr_size=%u"},
- {Opt_inline_data, "inline_data"},
- {Opt_inline_dentry, "inline_dentry"},
- {Opt_noinline_dentry, "noinline_dentry"},
- {Opt_flush_merge, "flush_merge"},
- {Opt_noflush_merge, "noflush_merge"},
- {Opt_barrier, "barrier"},
- {Opt_nobarrier, "nobarrier"},
- {Opt_fastboot, "fastboot"},
- {Opt_extent_cache, "extent_cache"},
- {Opt_noextent_cache, "noextent_cache"},
- {Opt_noinline_data, "noinline_data"},
- {Opt_data_flush, "data_flush"},
- {Opt_reserve_root, "reserve_root=%u"},
- {Opt_resgid, "resgid=%u"},
- {Opt_resuid, "resuid=%u"},
- {Opt_mode, "mode=%s"},
- {Opt_fault_injection, "fault_injection=%u"},
- {Opt_fault_type, "fault_type=%u"},
- {Opt_lazytime, "lazytime"},
- {Opt_nolazytime, "nolazytime"},
- {Opt_quota, "quota"},
- {Opt_noquota, "noquota"},
- {Opt_usrquota, "usrquota"},
- {Opt_grpquota, "grpquota"},
- {Opt_prjquota, "prjquota"},
- {Opt_usrjquota, "usrjquota=%s"},
- {Opt_grpjquota, "grpjquota=%s"},
- {Opt_prjjquota, "prjjquota=%s"},
- {Opt_offusrjquota, "usrjquota="},
- {Opt_offgrpjquota, "grpjquota="},
- {Opt_offprjjquota, "prjjquota="},
- {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
- {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
- {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
- {Opt_alloc, "alloc_mode=%s"},
- {Opt_fsync, "fsync_mode=%s"},
- {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
- {Opt_test_dummy_encryption, "test_dummy_encryption"},
- {Opt_inlinecrypt, "inlinecrypt"},
- {Opt_checkpoint_disable, "checkpoint=disable"},
- {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
- {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
- {Opt_checkpoint_enable, "checkpoint=enable"},
- {Opt_checkpoint_merge, "checkpoint_merge"},
- {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
- {Opt_compress_algorithm, "compress_algorithm=%s"},
- {Opt_compress_log_size, "compress_log_size=%u"},
- {Opt_compress_extension, "compress_extension=%s"},
- {Opt_nocompress_extension, "nocompress_extension=%s"},
- {Opt_compress_chksum, "compress_chksum"},
- {Opt_compress_mode, "compress_mode=%s"},
- {Opt_compress_cache, "compress_cache"},
- {Opt_atgc, "atgc"},
- {Opt_gc_merge, "gc_merge"},
- {Opt_nogc_merge, "nogc_merge"},
- {Opt_discard_unit, "discard_unit=%s"},
- {Opt_memory_mode, "memory=%s"},
- {Opt_age_extent_cache, "age_extent_cache"},
- {Opt_errors, "errors=%s"},
+static const struct constant_table f2fs_param_background_gc[] = {
+ {"on", BGGC_MODE_ON},
+ {"off", BGGC_MODE_OFF},
+ {"sync", BGGC_MODE_SYNC},
+ {}
+};
+
+static const struct constant_table f2fs_param_mode[] = {
+ {"adaptive", FS_MODE_ADAPTIVE},
+ {"lfs", FS_MODE_LFS},
+ {"fragment:segment", FS_MODE_FRAGMENT_SEG},
+ {"fragment:block", FS_MODE_FRAGMENT_BLK},
+ {}
+};
+
+static const struct constant_table f2fs_param_jqfmt[] = {
+ {"vfsold", QFMT_VFS_OLD},
+ {"vfsv0", QFMT_VFS_V0},
+ {"vfsv1", QFMT_VFS_V1},
+ {}
+};
+
+static const struct constant_table f2fs_param_alloc_mode[] = {
+ {"default", ALLOC_MODE_DEFAULT},
+ {"reuse", ALLOC_MODE_REUSE},
+ {}
+};
+static const struct constant_table f2fs_param_fsync_mode[] = {
+ {"posix", FSYNC_MODE_POSIX},
+ {"strict", FSYNC_MODE_STRICT},
+ {"nobarrier", FSYNC_MODE_NOBARRIER},
+ {}
+};
+
+static const struct constant_table f2fs_param_compress_mode[] = {
+ {"fs", COMPR_MODE_FS},
+ {"user", COMPR_MODE_USER},
+ {}
+};
+
+static const struct constant_table f2fs_param_discard_unit[] = {
+ {"block", DISCARD_UNIT_BLOCK},
+ {"segment", DISCARD_UNIT_SEGMENT},
+ {"section", DISCARD_UNIT_SECTION},
+ {}
+};
+
+static const struct constant_table f2fs_param_memory_mode[] = {
+ {"normal", MEMORY_MODE_NORMAL},
+ {"low", MEMORY_MODE_LOW},
+ {}
+};
+
+static const struct constant_table f2fs_param_errors[] = {
+ {"remount-ro", MOUNT_ERRORS_READONLY},
+ {"continue", MOUNT_ERRORS_CONTINUE},
+ {"panic", MOUNT_ERRORS_PANIC},
+ {}
+};
+
+static const struct constant_table f2fs_param_lookup_mode[] = {
+ {"perf", LOOKUP_PERF},
+ {"compat", LOOKUP_COMPAT},
+ {"auto", LOOKUP_AUTO},
+ {}
+};
+
+static const struct fs_parameter_spec f2fs_param_specs[] = {
+ fsparam_enum("background_gc", Opt_gc_background, f2fs_param_background_gc),
+ fsparam_flag("disable_roll_forward", Opt_disable_roll_forward),
+ fsparam_flag("norecovery", Opt_norecovery),
+ fsparam_flag_no("discard", Opt_discard),
+ fsparam_flag("no_heap", Opt_noheap),
+ fsparam_flag("heap", Opt_heap),
+ fsparam_flag_no("user_xattr", Opt_user_xattr),
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_s32("active_logs", Opt_active_logs),
+ fsparam_flag("disable_ext_identify", Opt_disable_ext_identify),
+ fsparam_flag_no("inline_xattr", Opt_inline_xattr),
+ fsparam_s32("inline_xattr_size", Opt_inline_xattr_size),
+ fsparam_flag_no("inline_data", Opt_inline_data),
+ fsparam_flag_no("inline_dentry", Opt_inline_dentry),
+ fsparam_flag_no("flush_merge", Opt_flush_merge),
+ fsparam_flag_no("barrier", Opt_barrier),
+ fsparam_flag("fastboot", Opt_fastboot),
+ fsparam_flag_no("extent_cache", Opt_extent_cache),
+ fsparam_flag("data_flush", Opt_data_flush),
+ fsparam_u32("reserve_root", Opt_reserve_root),
+ fsparam_u32("reserve_node", Opt_reserve_node),
+ fsparam_gid("resgid", Opt_resgid),
+ fsparam_uid("resuid", Opt_resuid),
+ fsparam_enum("mode", Opt_mode, f2fs_param_mode),
+ fsparam_s32("fault_injection", Opt_fault_injection),
+ fsparam_u32("fault_type", Opt_fault_type),
+ fsparam_flag_no("lazytime", Opt_lazytime),
+ fsparam_flag_no("quota", Opt_quota),
+ fsparam_flag("usrquota", Opt_usrquota),
+ fsparam_flag("grpquota", Opt_grpquota),
+ fsparam_flag("prjquota", Opt_prjquota),
+ fsparam_string_empty("usrjquota", Opt_usrjquota),
+ fsparam_string_empty("grpjquota", Opt_grpjquota),
+ fsparam_string_empty("prjjquota", Opt_prjjquota),
+ fsparam_flag("nat_bits", Opt_nat_bits),
+ fsparam_enum("jqfmt", Opt_jqfmt, f2fs_param_jqfmt),
+ fsparam_enum("alloc_mode", Opt_alloc, f2fs_param_alloc_mode),
+ fsparam_enum("fsync_mode", Opt_fsync, f2fs_param_fsync_mode),
+ fsparam_string("test_dummy_encryption", Opt_test_dummy_encryption),
+ fsparam_flag("test_dummy_encryption", Opt_test_dummy_encryption),
+ fsparam_flag("inlinecrypt", Opt_inlinecrypt),
+ fsparam_string("checkpoint", Opt_checkpoint),
+ fsparam_flag_no("checkpoint_merge", Opt_checkpoint_merge),
+ fsparam_string("compress_algorithm", Opt_compress_algorithm),
+ fsparam_u32("compress_log_size", Opt_compress_log_size),
+ fsparam_string("compress_extension", Opt_compress_extension),
+ fsparam_string("nocompress_extension", Opt_nocompress_extension),
+ fsparam_flag("compress_chksum", Opt_compress_chksum),
+ fsparam_enum("compress_mode", Opt_compress_mode, f2fs_param_compress_mode),
+ fsparam_flag("compress_cache", Opt_compress_cache),
+ fsparam_flag("atgc", Opt_atgc),
+ fsparam_flag_no("gc_merge", Opt_gc_merge),
+ fsparam_enum("discard_unit", Opt_discard_unit, f2fs_param_discard_unit),
+ fsparam_enum("memory", Opt_memory_mode, f2fs_param_memory_mode),
+ fsparam_flag("age_extent_cache", Opt_age_extent_cache),
+ fsparam_enum("errors", Opt_errors, f2fs_param_errors),
+ fsparam_enum("lookup_mode", Opt_lookup_mode, f2fs_param_lookup_mode),
+ {}
+};
+
+/* Resort to a match_table for this interestingly formatted option */
+static match_table_t f2fs_checkpoint_tokens = {
+ {Opt_checkpoint_disable, "disable"},
+ {Opt_checkpoint_disable_cap, "disable:%u"},
+ {Opt_checkpoint_disable_cap_perc, "disable:%u%%"},
+ {Opt_checkpoint_enable, "enable"},
{Opt_err, NULL},
};
+#define F2FS_SPEC_background_gc (1 << 0)
+#define F2FS_SPEC_inline_xattr_size (1 << 1)
+#define F2FS_SPEC_active_logs (1 << 2)
+#define F2FS_SPEC_reserve_root (1 << 3)
+#define F2FS_SPEC_resgid (1 << 4)
+#define F2FS_SPEC_resuid (1 << 5)
+#define F2FS_SPEC_mode (1 << 6)
+#define F2FS_SPEC_fault_injection (1 << 7)
+#define F2FS_SPEC_fault_type (1 << 8)
+#define F2FS_SPEC_jqfmt (1 << 9)
+#define F2FS_SPEC_alloc_mode (1 << 10)
+#define F2FS_SPEC_fsync_mode (1 << 11)
+#define F2FS_SPEC_checkpoint_disable_cap (1 << 12)
+#define F2FS_SPEC_checkpoint_disable_cap_perc (1 << 13)
+#define F2FS_SPEC_compress_level (1 << 14)
+#define F2FS_SPEC_compress_algorithm (1 << 15)
+#define F2FS_SPEC_compress_log_size (1 << 16)
+#define F2FS_SPEC_compress_extension (1 << 17)
+#define F2FS_SPEC_nocompress_extension (1 << 18)
+#define F2FS_SPEC_compress_chksum (1 << 19)
+#define F2FS_SPEC_compress_mode (1 << 20)
+#define F2FS_SPEC_discard_unit (1 << 21)
+#define F2FS_SPEC_memory_mode (1 << 22)
+#define F2FS_SPEC_errors (1 << 23)
+#define F2FS_SPEC_lookup_mode (1 << 24)
+#define F2FS_SPEC_reserve_node (1 << 25)
+
+struct f2fs_fs_context {
+ struct f2fs_mount_info info;
+ unsigned long long opt_mask; /* Bits changed */
+ unsigned int spec_mask;
+ unsigned short qname_mask;
+};
+
+#define F2FS_CTX_INFO(ctx) ((ctx)->info)
+
+static inline void ctx_set_opt(struct f2fs_fs_context *ctx,
+ enum f2fs_mount_opt flag)
+{
+ ctx->info.opt |= BIT(flag);
+ ctx->opt_mask |= BIT(flag);
+}
+
+static inline void ctx_clear_opt(struct f2fs_fs_context *ctx,
+ enum f2fs_mount_opt flag)
+{
+ ctx->info.opt &= ~BIT(flag);
+ ctx->opt_mask |= BIT(flag);
+}
+
+static inline bool ctx_test_opt(struct f2fs_fs_context *ctx,
+ enum f2fs_mount_opt flag)
+{
+ return ctx->info.opt & BIT(flag);
+}
+
void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate,
- const char *fmt, ...)
+ const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -285,11 +392,19 @@ void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate,
vaf.fmt = printk_skip_level(fmt);
vaf.va = &args;
if (limit_rate)
- printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
- KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+ if (sbi)
+ printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
+ KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+ else
+ printk_ratelimited("%c%cF2FS-fs: %pV\n",
+ KERN_SOH_ASCII, level, &vaf);
else
- printk("%c%cF2FS-fs (%s): %pV\n",
- KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+ if (sbi)
+ printk("%c%cF2FS-fs (%s): %pV\n",
+ KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+ else
+ printk("%c%cF2FS-fs: %pV\n",
+ KERN_SOH_ASCII, level, &vaf);
va_end(args);
}
@@ -335,22 +450,30 @@ static void f2fs_destroy_casefold_cache(void) { }
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
{
- block_t limit = min((sbi->user_block_count >> 3),
+ block_t block_limit = min((sbi->user_block_count >> 3),
sbi->user_block_count - sbi->reserved_blocks);
+ block_t node_limit = sbi->total_node_count >> 3;
/* limit is 12.5% */
if (test_opt(sbi, RESERVE_ROOT) &&
- F2FS_OPTION(sbi).root_reserved_blocks > limit) {
- F2FS_OPTION(sbi).root_reserved_blocks = limit;
+ F2FS_OPTION(sbi).root_reserved_blocks > block_limit) {
+ F2FS_OPTION(sbi).root_reserved_blocks = block_limit;
f2fs_info(sbi, "Reduce reserved blocks for root = %u",
F2FS_OPTION(sbi).root_reserved_blocks);
}
- if (!test_opt(sbi, RESERVE_ROOT) &&
+ if (test_opt(sbi, RESERVE_NODE) &&
+ F2FS_OPTION(sbi).root_reserved_nodes > node_limit) {
+ F2FS_OPTION(sbi).root_reserved_nodes = node_limit;
+ f2fs_info(sbi, "Reduce reserved nodes for root = %u",
+ F2FS_OPTION(sbi).root_reserved_nodes);
+ }
+ if (!test_opt(sbi, RESERVE_ROOT) && !test_opt(sbi, RESERVE_NODE) &&
(!uid_eq(F2FS_OPTION(sbi).s_resuid,
make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
!gid_eq(F2FS_OPTION(sbi).s_resgid,
make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
- f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
+ f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root"
+ " and reserve_node",
from_kuid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resuid),
from_kgid_munged(&init_user_ns,
@@ -378,165 +501,101 @@ static void init_once(void *foo)
struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
inode_init_once(&fi->vfs_inode);
+#ifdef CONFIG_FS_ENCRYPTION
+ fi->i_crypt_info = NULL;
+#endif
+#ifdef CONFIG_FS_VERITY
+ fi->i_verity_info = NULL;
+#endif
}
#ifdef CONFIG_QUOTA
static const char * const quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
-static int f2fs_set_qf_name(struct super_block *sb, int qtype,
- substring_t *args)
+/*
+ * Note the name of the specified quota file.
+ */
+static int f2fs_note_qf_name(struct fs_context *fc, int qtype,
+ struct fs_parameter *param)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct f2fs_fs_context *ctx = fc->fs_private;
char *qname;
- int ret = -EINVAL;
- if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
- f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
+ if (param->size < 1) {
+ f2fs_err(NULL, "Missing quota name");
return -EINVAL;
}
- if (f2fs_sb_has_quota_ino(sbi)) {
- f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
+ if (strchr(param->string, '/')) {
+ f2fs_err(NULL, "quotafile must be on filesystem root");
+ return -EINVAL;
+ }
+ if (ctx->info.s_qf_names[qtype]) {
+ if (strcmp(ctx->info.s_qf_names[qtype], param->string) != 0) {
+ f2fs_err(NULL, "Quota file already specified");
+ return -EINVAL;
+ }
return 0;
}
- qname = match_strdup(args);
+ qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
if (!qname) {
- f2fs_err(sbi, "Not enough memory for storing quotafile name");
+ f2fs_err(NULL, "Not enough memory for storing quotafile name");
return -ENOMEM;
}
- if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
- if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
- ret = 0;
- else
- f2fs_err(sbi, "%s quota file already specified",
- QTYPE2NAME(qtype));
- goto errout;
- }
- if (strchr(qname, '/')) {
- f2fs_err(sbi, "quotafile must be on filesystem root");
- goto errout;
- }
- F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
- set_opt(sbi, QUOTA);
+ F2FS_CTX_INFO(ctx).s_qf_names[qtype] = qname;
+ ctx->qname_mask |= 1 << qtype;
return 0;
-errout:
- kfree(qname);
- return ret;
}
-static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
+/*
+ * Clear the name of the specified quota file.
+ */
+static int f2fs_unnote_qf_name(struct fs_context *fc, int qtype)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct f2fs_fs_context *ctx = fc->fs_private;
- if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
- f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
- return -EINVAL;
- }
- kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
- F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
+ kfree(ctx->info.s_qf_names[qtype]);
+ ctx->info.s_qf_names[qtype] = NULL;
+ ctx->qname_mask |= 1 << qtype;
return 0;
}
-static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
+static void f2fs_unnote_qf_name_all(struct fs_context *fc)
{
- /*
- * We do the test below only for project quotas. 'usrquota' and
- * 'grpquota' mount options are allowed even without quota feature
- * to support legacy quotas in quota files.
- */
- if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
- f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
- return -1;
- }
- if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
- F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
- F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
- if (test_opt(sbi, USRQUOTA) &&
- F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
- clear_opt(sbi, USRQUOTA);
-
- if (test_opt(sbi, GRPQUOTA) &&
- F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
- clear_opt(sbi, GRPQUOTA);
-
- if (test_opt(sbi, PRJQUOTA) &&
- F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
- clear_opt(sbi, PRJQUOTA);
-
- if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
- test_opt(sbi, PRJQUOTA)) {
- f2fs_err(sbi, "old and new quota format mixing");
- return -1;
- }
-
- if (!F2FS_OPTION(sbi).s_jquota_fmt) {
- f2fs_err(sbi, "journaled quota format not specified");
- return -1;
- }
- }
+ int i;
- if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
- f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
- F2FS_OPTION(sbi).s_jquota_fmt = 0;
- }
- return 0;
+ for (i = 0; i < MAXQUOTAS; i++)
+ f2fs_unnote_qf_name(fc, i);
}
#endif
-static int f2fs_set_test_dummy_encryption(struct super_block *sb,
- const char *opt,
- const substring_t *arg,
- bool is_remount)
+static int f2fs_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct f2fs_fs_context *ctx)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- struct fs_parameter param = {
- .type = fs_value_is_string,
- .string = arg->from ? arg->from : "",
- };
- struct fscrypt_dummy_policy *policy =
- &F2FS_OPTION(sbi).dummy_enc_policy;
int err;
if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
- f2fs_warn(sbi, "test_dummy_encryption option not supported");
- return -EINVAL;
- }
-
- if (!f2fs_sb_has_encrypt(sbi)) {
- f2fs_err(sbi, "Encrypt feature is off");
+ f2fs_warn(NULL, "test_dummy_encryption option not supported");
return -EINVAL;
}
-
- /*
- * This mount option is just for testing, and it's not worthwhile to
- * implement the extra complexity (e.g. RCU protection) that would be
- * needed to allow it to be set or changed during remount. We do allow
- * it to be specified during remount, but only if there is no change.
- */
- if (is_remount && !fscrypt_is_dummy_policy_set(policy)) {
- f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
- return -EINVAL;
- }
-
- err = fscrypt_parse_test_dummy_encryption(&param, policy);
+ err = fscrypt_parse_test_dummy_encryption(param,
+ &ctx->info.dummy_enc_policy);
if (err) {
- if (err == -EEXIST)
- f2fs_warn(sbi,
- "Can't change test_dummy_encryption on remount");
- else if (err == -EINVAL)
- f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
- opt);
+ if (err == -EINVAL)
+ f2fs_warn(NULL, "Value of option \"%s\" is unrecognized",
+ param->key);
+ else if (err == -EEXIST)
+ f2fs_warn(NULL, "Conflicting test_dummy_encryption options");
else
- f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
- opt, err);
+ f2fs_warn(NULL, "Error processing option \"%s\" [%d]",
+ param->key, err);
return -EINVAL;
}
- f2fs_warn(sbi, "Test dummy encryption mode enabled");
return 0;
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
-static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
+static bool is_compress_extension_exist(struct f2fs_mount_info *info,
const char *new_ext, bool is_ext)
{
unsigned char (*ext)[F2FS_EXTENSION_LEN];
@@ -544,11 +603,11 @@ static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
int i;
if (is_ext) {
- ext = F2FS_OPTION(sbi).extensions;
- ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ ext = info->extensions;
+ ext_cnt = info->compress_ext_cnt;
} else {
- ext = F2FS_OPTION(sbi).noextensions;
- ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+ ext = info->noextensions;
+ ext_cnt = info->nocompress_ext_cnt;
}
for (i = 0; i < ext_cnt; i++) {
@@ -566,28 +625,28 @@ static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
* extension will be treated as special cases and will not be compressed.
* 3. Don't allow the non-compress extension specifies all files.
*/
-static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
+static int f2fs_test_compress_extension(unsigned char (*noext)[F2FS_EXTENSION_LEN],
+ int noext_cnt,
+ unsigned char (*ext)[F2FS_EXTENSION_LEN],
+ int ext_cnt)
{
- unsigned char (*ext)[F2FS_EXTENSION_LEN];
- unsigned char (*noext)[F2FS_EXTENSION_LEN];
- int ext_cnt, noext_cnt, index = 0, no_index = 0;
-
- ext = F2FS_OPTION(sbi).extensions;
- ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
- noext = F2FS_OPTION(sbi).noextensions;
- noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+ int index = 0, no_index = 0;
if (!noext_cnt)
return 0;
for (no_index = 0; no_index < noext_cnt; no_index++) {
+ if (strlen(noext[no_index]) == 0)
+ continue;
if (!strcasecmp("*", noext[no_index])) {
- f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
+ f2fs_info(NULL, "Don't allow the nocompress extension specifies all files");
return -EINVAL;
}
for (index = 0; index < ext_cnt; index++) {
+ if (strlen(ext[index]) == 0)
+ continue;
if (!strcasecmp(ext[index], noext[no_index])) {
- f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
+ f2fs_info(NULL, "Don't allow the same extension %s appear in both compress and nocompress extension",
ext[index]);
return -EINVAL;
}
@@ -597,58 +656,62 @@ static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
}
#ifdef CONFIG_F2FS_FS_LZ4
-static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+static int f2fs_set_lz4hc_level(struct f2fs_fs_context *ctx, const char *str)
{
#ifdef CONFIG_F2FS_FS_LZ4HC
unsigned int level;
if (strlen(str) == 3) {
- F2FS_OPTION(sbi).compress_level = 0;
+ F2FS_CTX_INFO(ctx).compress_level = 0;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
str += 3;
if (str[0] != ':') {
- f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+ f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>");
return -EINVAL;
}
if (kstrtouint(str + 1, 10, &level))
return -EINVAL;
if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
- f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
+ f2fs_info(NULL, "invalid lz4hc compress level: %d", level);
return -EINVAL;
}
- F2FS_OPTION(sbi).compress_level = level;
+ F2FS_CTX_INFO(ctx).compress_level = level;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
#else
if (strlen(str) == 3) {
- F2FS_OPTION(sbi).compress_level = 0;
+ F2FS_CTX_INFO(ctx).compress_level = 0;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
- f2fs_info(sbi, "kernel doesn't support lz4hc compression");
+ f2fs_info(NULL, "kernel doesn't support lz4hc compression");
return -EINVAL;
#endif
}
#endif
#ifdef CONFIG_F2FS_FS_ZSTD
-static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+static int f2fs_set_zstd_level(struct f2fs_fs_context *ctx, const char *str)
{
int level;
int len = 4;
if (strlen(str) == len) {
- F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ F2FS_CTX_INFO(ctx).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
str += len;
if (str[0] != ':') {
- f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+ f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>");
return -EINVAL;
}
if (kstrtoint(str + 1, 10, &level))
@@ -656,692 +719,775 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
/* f2fs does not support negative compress level now */
if (level < 0) {
- f2fs_info(sbi, "do not support negative compress level: %d", level);
+ f2fs_info(NULL, "do not support negative compress level: %d", level);
return -ERANGE;
}
if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
- f2fs_info(sbi, "invalid zstd compress level: %d", level);
+ f2fs_info(NULL, "invalid zstd compress level: %d", level);
return -EINVAL;
}
- F2FS_OPTION(sbi).compress_level = level;
+ F2FS_CTX_INFO(ctx).compress_level = level;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
#endif
#endif
-static int parse_options(struct super_block *sb, char *options, bool is_remount)
+static int f2fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- substring_t args[MAX_OPT_ARGS];
+ struct f2fs_fs_context *ctx = fc->fs_private;
#ifdef CONFIG_F2FS_FS_COMPRESSION
unsigned char (*ext)[F2FS_EXTENSION_LEN];
unsigned char (*noext)[F2FS_EXTENSION_LEN];
int ext_cnt, noext_cnt;
+ char *name;
#endif
- char *p, *name;
- int arg = 0;
- kuid_t uid;
- kgid_t gid;
- int ret;
-
- if (!options)
- goto default_check;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
- /*
- * Initialize args struct so we know whether arg was
- * found; some options take optional arguments.
- */
- args[0].to = args[0].from = NULL;
- token = match_token(p, f2fs_tokens, args);
+ substring_t args[MAX_OPT_ARGS];
+ struct fs_parse_result result;
+ int token, ret, arg;
- switch (token) {
- case Opt_gc_background:
- name = match_strdup(&args[0]);
+ token = fs_parse(fc, f2fs_param_specs, param, &result);
+ if (token < 0)
+ return token;
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "on")) {
- F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
- } else if (!strcmp(name, "off")) {
- if (f2fs_sb_has_blkzoned(sbi)) {
- f2fs_warn(sbi, "zoned devices need bggc");
- kfree(name);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
- } else if (!strcmp(name, "sync")) {
- F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_disable_roll_forward:
- set_opt(sbi, DISABLE_ROLL_FORWARD);
- break;
- case Opt_norecovery:
- /* this option mounts f2fs with ro */
- set_opt(sbi, NORECOVERY);
- if (!f2fs_readonly(sb))
- return -EINVAL;
- break;
- case Opt_discard:
- if (!f2fs_hw_support_discard(sbi)) {
- f2fs_warn(sbi, "device does not support discard");
- break;
- }
- set_opt(sbi, DISCARD);
- break;
- case Opt_nodiscard:
- if (f2fs_hw_should_discard(sbi)) {
- f2fs_warn(sbi, "discard is required for zoned block devices");
- return -EINVAL;
- }
- clear_opt(sbi, DISCARD);
- break;
- case Opt_noheap:
- case Opt_heap:
- f2fs_warn(sbi, "heap/no_heap options were deprecated");
- break;
+ switch (token) {
+ case Opt_gc_background:
+ F2FS_CTX_INFO(ctx).bggc_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_background_gc;
+ break;
+ case Opt_disable_roll_forward:
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_ROLL_FORWARD);
+ break;
+ case Opt_norecovery:
+ /* requires ro mount, checked in f2fs_validate_options */
+ ctx_set_opt(ctx, F2FS_MOUNT_NORECOVERY);
+ break;
+ case Opt_discard:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_DISCARD);
+ break;
+ case Opt_noheap:
+ case Opt_heap:
+ f2fs_warn(NULL, "heap/no_heap options were deprecated");
+ break;
#ifdef CONFIG_F2FS_FS_XATTR
- case Opt_user_xattr:
- set_opt(sbi, XATTR_USER);
- break;
- case Opt_nouser_xattr:
- clear_opt(sbi, XATTR_USER);
- break;
- case Opt_inline_xattr:
- set_opt(sbi, INLINE_XATTR);
- break;
- case Opt_noinline_xattr:
- clear_opt(sbi, INLINE_XATTR);
- break;
- case Opt_inline_xattr_size:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- set_opt(sbi, INLINE_XATTR_SIZE);
- F2FS_OPTION(sbi).inline_xattr_size = arg;
- break;
+ case Opt_user_xattr:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_XATTR_USER);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_XATTR_USER);
+ break;
+ case Opt_inline_xattr:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
+ break;
+ case Opt_inline_xattr_size:
+ if (result.int_32 < MIN_INLINE_XATTR_SIZE ||
+ result.int_32 > MAX_INLINE_XATTR_SIZE) {
+ f2fs_err(NULL, "inline xattr size is out of range: %u ~ %u",
+ (u32)MIN_INLINE_XATTR_SIZE, (u32)MAX_INLINE_XATTR_SIZE);
+ return -EINVAL;
+ }
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE);
+ F2FS_CTX_INFO(ctx).inline_xattr_size = result.int_32;
+ ctx->spec_mask |= F2FS_SPEC_inline_xattr_size;
+ break;
#else
- case Opt_user_xattr:
- f2fs_info(sbi, "user_xattr options not supported");
- break;
- case Opt_nouser_xattr:
- f2fs_info(sbi, "nouser_xattr options not supported");
- break;
- case Opt_inline_xattr:
- f2fs_info(sbi, "inline_xattr options not supported");
- break;
- case Opt_noinline_xattr:
- f2fs_info(sbi, "noinline_xattr options not supported");
- break;
+ case Opt_user_xattr:
+ case Opt_inline_xattr:
+ case Opt_inline_xattr_size:
+ f2fs_info(NULL, "%s options not supported", param->key);
+ break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
- case Opt_acl:
- set_opt(sbi, POSIX_ACL);
- break;
- case Opt_noacl:
- clear_opt(sbi, POSIX_ACL);
- break;
+ case Opt_acl:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_POSIX_ACL);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_POSIX_ACL);
+ break;
#else
- case Opt_acl:
- f2fs_info(sbi, "acl options not supported");
- break;
- case Opt_noacl:
- f2fs_info(sbi, "noacl options not supported");
- break;
+ case Opt_acl:
+ f2fs_info(NULL, "%s options not supported", param->key);
+ break;
#endif
- case Opt_active_logs:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (arg != 2 && arg != 4 &&
- arg != NR_CURSEG_PERSIST_TYPE)
- return -EINVAL;
- F2FS_OPTION(sbi).active_logs = arg;
- break;
- case Opt_disable_ext_identify:
- set_opt(sbi, DISABLE_EXT_IDENTIFY);
- break;
- case Opt_inline_data:
- set_opt(sbi, INLINE_DATA);
- break;
- case Opt_inline_dentry:
- set_opt(sbi, INLINE_DENTRY);
- break;
- case Opt_noinline_dentry:
- clear_opt(sbi, INLINE_DENTRY);
- break;
- case Opt_flush_merge:
- set_opt(sbi, FLUSH_MERGE);
- break;
- case Opt_noflush_merge:
- clear_opt(sbi, FLUSH_MERGE);
- break;
- case Opt_nobarrier:
- set_opt(sbi, NOBARRIER);
- break;
- case Opt_barrier:
- clear_opt(sbi, NOBARRIER);
- break;
- case Opt_fastboot:
- set_opt(sbi, FASTBOOT);
- break;
- case Opt_extent_cache:
- set_opt(sbi, READ_EXTENT_CACHE);
- break;
- case Opt_noextent_cache:
- if (F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_DEVICE_ALIAS)) {
- f2fs_err(sbi, "device aliasing requires extent cache");
- return -EINVAL;
- }
- clear_opt(sbi, READ_EXTENT_CACHE);
- break;
- case Opt_noinline_data:
- clear_opt(sbi, INLINE_DATA);
- break;
- case Opt_data_flush:
- set_opt(sbi, DATA_FLUSH);
- break;
- case Opt_reserve_root:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (test_opt(sbi, RESERVE_ROOT)) {
- f2fs_info(sbi, "Preserve previous reserve_root=%u",
- F2FS_OPTION(sbi).root_reserved_blocks);
- } else {
- F2FS_OPTION(sbi).root_reserved_blocks = arg;
- set_opt(sbi, RESERVE_ROOT);
- }
- break;
- case Opt_resuid:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), arg);
- if (!uid_valid(uid)) {
- f2fs_err(sbi, "Invalid uid value %d", arg);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).s_resuid = uid;
- break;
- case Opt_resgid:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), arg);
- if (!gid_valid(gid)) {
- f2fs_err(sbi, "Invalid gid value %d", arg);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).s_resgid = gid;
- break;
- case Opt_mode:
- name = match_strdup(&args[0]);
-
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "adaptive")) {
- F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
- } else if (!strcmp(name, "lfs")) {
- F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
- } else if (!strcmp(name, "fragment:segment")) {
- F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
- } else if (!strcmp(name, "fragment:block")) {
- F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
+ case Opt_active_logs:
+ if (result.int_32 != 2 && result.int_32 != 4 &&
+ result.int_32 != NR_CURSEG_PERSIST_TYPE)
+ return -EINVAL;
+ ctx->spec_mask |= F2FS_SPEC_active_logs;
+ F2FS_CTX_INFO(ctx).active_logs = result.int_32;
+ break;
+ case Opt_disable_ext_identify:
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_EXT_IDENTIFY);
+ break;
+ case Opt_inline_data:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DATA);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DATA);
+ break;
+ case Opt_inline_dentry:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
+ break;
+ case Opt_flush_merge:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
+ break;
+ case Opt_barrier:
+ if (result.negated)
+ ctx_set_opt(ctx, F2FS_MOUNT_NOBARRIER);
+ else
+ ctx_clear_opt(ctx, F2FS_MOUNT_NOBARRIER);
+ break;
+ case Opt_fastboot:
+ ctx_set_opt(ctx, F2FS_MOUNT_FASTBOOT);
+ break;
+ case Opt_extent_cache:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
+ break;
+ case Opt_data_flush:
+ ctx_set_opt(ctx, F2FS_MOUNT_DATA_FLUSH);
+ break;
+ case Opt_reserve_root:
+ ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
+ F2FS_CTX_INFO(ctx).root_reserved_blocks = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_reserve_root;
+ break;
+ case Opt_reserve_node:
+ ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_NODE);
+ F2FS_CTX_INFO(ctx).root_reserved_nodes = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_reserve_node;
+ break;
+ case Opt_resuid:
+ F2FS_CTX_INFO(ctx).s_resuid = result.uid;
+ ctx->spec_mask |= F2FS_SPEC_resuid;
+ break;
+ case Opt_resgid:
+ F2FS_CTX_INFO(ctx).s_resgid = result.gid;
+ ctx->spec_mask |= F2FS_SPEC_resgid;
+ break;
+ case Opt_mode:
+ F2FS_CTX_INFO(ctx).fs_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_mode;
+ break;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- case Opt_fault_injection:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (f2fs_build_fault_attr(sbi, arg,
- F2FS_ALL_FAULT_TYPE))
- return -EINVAL;
- set_opt(sbi, FAULT_INJECTION);
- break;
+ case Opt_fault_injection:
+ F2FS_CTX_INFO(ctx).fault_info.inject_rate = result.int_32;
+ ctx->spec_mask |= F2FS_SPEC_fault_injection;
+ ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
+ break;
- case Opt_fault_type:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (f2fs_build_fault_attr(sbi, 0, arg))
- return -EINVAL;
- set_opt(sbi, FAULT_INJECTION);
- break;
+ case Opt_fault_type:
+ if (result.uint_32 > BIT(FAULT_MAX))
+ return -EINVAL;
+ F2FS_CTX_INFO(ctx).fault_info.inject_type = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_fault_type;
+ ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
+ break;
#else
- case Opt_fault_injection:
- f2fs_info(sbi, "fault_injection options not supported");
- break;
-
- case Opt_fault_type:
- f2fs_info(sbi, "fault_type options not supported");
- break;
+ case Opt_fault_injection:
+ case Opt_fault_type:
+ f2fs_info(NULL, "%s options not supported", param->key);
+ break;
#endif
- case Opt_lazytime:
- sb->s_flags |= SB_LAZYTIME;
- break;
- case Opt_nolazytime:
- sb->s_flags &= ~SB_LAZYTIME;
- break;
+ case Opt_lazytime:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_LAZYTIME);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_LAZYTIME);
+ break;
#ifdef CONFIG_QUOTA
- case Opt_quota:
- case Opt_usrquota:
- set_opt(sbi, USRQUOTA);
- break;
- case Opt_grpquota:
- set_opt(sbi, GRPQUOTA);
- break;
- case Opt_prjquota:
- set_opt(sbi, PRJQUOTA);
- break;
- case Opt_usrjquota:
- ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
- if (ret)
- return ret;
- break;
- case Opt_grpjquota:
- ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
- if (ret)
- return ret;
- break;
- case Opt_prjjquota:
- ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
- if (ret)
- return ret;
- break;
- case Opt_offusrjquota:
- ret = f2fs_clear_qf_name(sb, USRQUOTA);
- if (ret)
- return ret;
- break;
- case Opt_offgrpjquota:
- ret = f2fs_clear_qf_name(sb, GRPQUOTA);
- if (ret)
- return ret;
- break;
- case Opt_offprjjquota:
- ret = f2fs_clear_qf_name(sb, PRJQUOTA);
- if (ret)
- return ret;
- break;
- case Opt_jqfmt_vfsold:
- F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
- break;
- case Opt_jqfmt_vfsv0:
- F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
- break;
- case Opt_jqfmt_vfsv1:
- F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
- break;
- case Opt_noquota:
- clear_opt(sbi, QUOTA);
- clear_opt(sbi, USRQUOTA);
- clear_opt(sbi, GRPQUOTA);
- clear_opt(sbi, PRJQUOTA);
- break;
+ case Opt_quota:
+ if (result.negated) {
+ ctx_clear_opt(ctx, F2FS_MOUNT_QUOTA);
+ ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
+ ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
+ } else
+ ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ break;
+ case Opt_usrquota:
+ ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ break;
+ case Opt_grpquota:
+ ctx_set_opt(ctx, F2FS_MOUNT_GRPQUOTA);
+ break;
+ case Opt_prjquota:
+ ctx_set_opt(ctx, F2FS_MOUNT_PRJQUOTA);
+ break;
+ case Opt_usrjquota:
+ if (!*param->string)
+ ret = f2fs_unnote_qf_name(fc, USRQUOTA);
+ else
+ ret = f2fs_note_qf_name(fc, USRQUOTA, param);
+ if (ret)
+ return ret;
+ break;
+ case Opt_grpjquota:
+ if (!*param->string)
+ ret = f2fs_unnote_qf_name(fc, GRPQUOTA);
+ else
+ ret = f2fs_note_qf_name(fc, GRPQUOTA, param);
+ if (ret)
+ return ret;
+ break;
+ case Opt_prjjquota:
+ if (!*param->string)
+ ret = f2fs_unnote_qf_name(fc, PRJQUOTA);
+ else
+ ret = f2fs_note_qf_name(fc, PRJQUOTA, param);
+ if (ret)
+ return ret;
+ break;
+ case Opt_jqfmt:
+ F2FS_CTX_INFO(ctx).s_jquota_fmt = result.int_32;
+ ctx->spec_mask |= F2FS_SPEC_jqfmt;
+ break;
#else
- case Opt_quota:
- case Opt_usrquota:
- case Opt_grpquota:
- case Opt_prjquota:
- case Opt_usrjquota:
- case Opt_grpjquota:
- case Opt_prjjquota:
- case Opt_offusrjquota:
- case Opt_offgrpjquota:
- case Opt_offprjjquota:
- case Opt_jqfmt_vfsold:
- case Opt_jqfmt_vfsv0:
- case Opt_jqfmt_vfsv1:
- case Opt_noquota:
- f2fs_info(sbi, "quota operations not supported");
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ case Opt_grpquota:
+ case Opt_prjquota:
+ case Opt_usrjquota:
+ case Opt_grpjquota:
+ case Opt_prjjquota:
+ f2fs_info(NULL, "quota operations not supported");
+ break;
#endif
- case Opt_alloc:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
-
- if (!strcmp(name, "default")) {
- F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
- } else if (!strcmp(name, "reuse")) {
- F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_fsync:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "posix")) {
- F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
- } else if (!strcmp(name, "strict")) {
- F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
- } else if (!strcmp(name, "nobarrier")) {
- F2FS_OPTION(sbi).fsync_mode =
- FSYNC_MODE_NOBARRIER;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_test_dummy_encryption:
- ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
- is_remount);
- if (ret)
- return ret;
- break;
- case Opt_inlinecrypt:
+ case Opt_alloc:
+ F2FS_CTX_INFO(ctx).alloc_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_alloc_mode;
+ break;
+ case Opt_fsync:
+ F2FS_CTX_INFO(ctx).fsync_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_fsync_mode;
+ break;
+ case Opt_test_dummy_encryption:
+ ret = f2fs_parse_test_dummy_encryption(param, ctx);
+ if (ret)
+ return ret;
+ break;
+ case Opt_inlinecrypt:
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
- sb->s_flags |= SB_INLINECRYPT;
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINECRYPT);
#else
- f2fs_info(sbi, "inline encryption not supported");
+ f2fs_info(NULL, "inline encryption not supported");
#endif
- break;
+ break;
+ case Opt_checkpoint:
+ /*
+ * Initialize args struct so we know whether arg was
+ * found; some options take optional arguments.
+ */
+ args[0].from = args[0].to = NULL;
+ arg = 0;
+
+ /* revert to match_table for checkpoint= options */
+ token = match_token(param->string, f2fs_checkpoint_tokens, args);
+ switch (token) {
case Opt_checkpoint_disable_cap_perc:
if (args->from && match_int(args, &arg))
return -EINVAL;
if (arg < 0 || arg > 100)
return -EINVAL;
- F2FS_OPTION(sbi).unusable_cap_perc = arg;
- set_opt(sbi, DISABLE_CHECKPOINT);
+ F2FS_CTX_INFO(ctx).unusable_cap_perc = arg;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_disable_cap:
if (args->from && match_int(args, &arg))
return -EINVAL;
- F2FS_OPTION(sbi).unusable_cap = arg;
- set_opt(sbi, DISABLE_CHECKPOINT);
+ F2FS_CTX_INFO(ctx).unusable_cap = arg;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_disable:
- set_opt(sbi, DISABLE_CHECKPOINT);
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_enable:
- clear_opt(sbi, DISABLE_CHECKPOINT);
- break;
- case Opt_checkpoint_merge:
- set_opt(sbi, MERGE_CHECKPOINT);
- break;
- case Opt_nocheckpoint_merge:
- clear_opt(sbi, MERGE_CHECKPOINT);
+ F2FS_CTX_INFO(ctx).unusable_cap_perc = 0;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
+ F2FS_CTX_INFO(ctx).unusable_cap = 0;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
+ ctx_clear_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case Opt_checkpoint_merge:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
+ break;
#ifdef CONFIG_F2FS_FS_COMPRESSION
- case Opt_compress_algorithm:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "lzo")) {
+ case Opt_compress_algorithm:
+ name = param->string;
+ if (!strcmp(name, "lzo")) {
#ifdef CONFIG_F2FS_FS_LZO
- F2FS_OPTION(sbi).compress_level = 0;
- F2FS_OPTION(sbi).compress_algorithm =
- COMPRESS_LZO;
+ F2FS_CTX_INFO(ctx).compress_level = 0;
+ F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZO;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
+ ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
- f2fs_info(sbi, "kernel doesn't support lzo compression");
+ f2fs_info(NULL, "kernel doesn't support lzo compression");
#endif
- } else if (!strncmp(name, "lz4", 3)) {
+ } else if (!strncmp(name, "lz4", 3)) {
#ifdef CONFIG_F2FS_FS_LZ4
- ret = f2fs_set_lz4hc_level(sbi, name);
- if (ret) {
- kfree(name);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).compress_algorithm =
- COMPRESS_LZ4;
+ ret = f2fs_set_lz4hc_level(ctx, name);
+ if (ret)
+ return -EINVAL;
+ F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZ4;
+ ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
- f2fs_info(sbi, "kernel doesn't support lz4 compression");
+ f2fs_info(NULL, "kernel doesn't support lz4 compression");
#endif
- } else if (!strncmp(name, "zstd", 4)) {
+ } else if (!strncmp(name, "zstd", 4)) {
#ifdef CONFIG_F2FS_FS_ZSTD
- ret = f2fs_set_zstd_level(sbi, name);
- if (ret) {
- kfree(name);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).compress_algorithm =
- COMPRESS_ZSTD;
+ ret = f2fs_set_zstd_level(ctx, name);
+ if (ret)
+ return -EINVAL;
+ F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_ZSTD;
+ ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
- f2fs_info(sbi, "kernel doesn't support zstd compression");
+ f2fs_info(NULL, "kernel doesn't support zstd compression");
#endif
- } else if (!strcmp(name, "lzo-rle")) {
+ } else if (!strcmp(name, "lzo-rle")) {
#ifdef CONFIG_F2FS_FS_LZORLE
- F2FS_OPTION(sbi).compress_level = 0;
- F2FS_OPTION(sbi).compress_algorithm =
- COMPRESS_LZORLE;
+ F2FS_CTX_INFO(ctx).compress_level = 0;
+ F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZORLE;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
+ ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
- f2fs_info(sbi, "kernel doesn't support lzorle compression");
+ f2fs_info(NULL, "kernel doesn't support lzorle compression");
#endif
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_compress_log_size:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (arg < MIN_COMPRESS_LOG_SIZE ||
- arg > MAX_COMPRESS_LOG_SIZE) {
- f2fs_err(sbi,
- "Compress cluster log size is out of range");
- return -EINVAL;
- }
- F2FS_OPTION(sbi).compress_log_size = arg;
+ } else
+ return -EINVAL;
+ break;
+ case Opt_compress_log_size:
+ if (result.uint_32 < MIN_COMPRESS_LOG_SIZE ||
+ result.uint_32 > MAX_COMPRESS_LOG_SIZE) {
+ f2fs_err(NULL,
+ "Compress cluster log size is out of range");
+ return -EINVAL;
+ }
+ F2FS_CTX_INFO(ctx).compress_log_size = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_compress_log_size;
+ break;
+ case Opt_compress_extension:
+ name = param->string;
+ ext = F2FS_CTX_INFO(ctx).extensions;
+ ext_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
+
+ if (strlen(name) >= F2FS_EXTENSION_LEN ||
+ ext_cnt >= COMPRESS_EXT_NUM) {
+ f2fs_err(NULL, "invalid extension length/number");
+ return -EINVAL;
+ }
+
+ if (is_compress_extension_exist(&ctx->info, name, true))
break;
- case Opt_compress_extension:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- ext = F2FS_OPTION(sbi).extensions;
- ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ ret = strscpy(ext[ext_cnt], name, F2FS_EXTENSION_LEN);
+ if (ret < 0)
+ return ret;
+ F2FS_CTX_INFO(ctx).compress_ext_cnt++;
+ ctx->spec_mask |= F2FS_SPEC_compress_extension;
+ break;
+ case Opt_nocompress_extension:
+ name = param->string;
+ noext = F2FS_CTX_INFO(ctx).noextensions;
+ noext_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
+
+ if (strlen(name) >= F2FS_EXTENSION_LEN ||
+ noext_cnt >= COMPRESS_EXT_NUM) {
+ f2fs_err(NULL, "invalid extension length/number");
+ return -EINVAL;
+ }
- if (strlen(name) >= F2FS_EXTENSION_LEN ||
- ext_cnt >= COMPRESS_EXT_NUM) {
- f2fs_err(sbi,
- "invalid extension length/number");
- kfree(name);
- return -EINVAL;
- }
+ if (is_compress_extension_exist(&ctx->info, name, false))
+ break;
- if (is_compress_extension_exist(sbi, name, true)) {
- kfree(name);
- break;
- }
+ ret = strscpy(noext[noext_cnt], name, F2FS_EXTENSION_LEN);
+ if (ret < 0)
+ return ret;
+ F2FS_CTX_INFO(ctx).nocompress_ext_cnt++;
+ ctx->spec_mask |= F2FS_SPEC_nocompress_extension;
+ break;
+ case Opt_compress_chksum:
+ F2FS_CTX_INFO(ctx).compress_chksum = true;
+ ctx->spec_mask |= F2FS_SPEC_compress_chksum;
+ break;
+ case Opt_compress_mode:
+ F2FS_CTX_INFO(ctx).compress_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_compress_mode;
+ break;
+ case Opt_compress_cache:
+ ctx_set_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE);
+ break;
+#else
+ case Opt_compress_algorithm:
+ case Opt_compress_log_size:
+ case Opt_compress_extension:
+ case Opt_nocompress_extension:
+ case Opt_compress_chksum:
+ case Opt_compress_mode:
+ case Opt_compress_cache:
+ f2fs_info(NULL, "compression options not supported");
+ break;
+#endif
+ case Opt_atgc:
+ ctx_set_opt(ctx, F2FS_MOUNT_ATGC);
+ break;
+ case Opt_gc_merge:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_GC_MERGE);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_GC_MERGE);
+ break;
+ case Opt_discard_unit:
+ F2FS_CTX_INFO(ctx).discard_unit = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_discard_unit;
+ break;
+ case Opt_memory_mode:
+ F2FS_CTX_INFO(ctx).memory_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_memory_mode;
+ break;
+ case Opt_age_extent_cache:
+ ctx_set_opt(ctx, F2FS_MOUNT_AGE_EXTENT_CACHE);
+ break;
+ case Opt_errors:
+ F2FS_CTX_INFO(ctx).errors = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_errors;
+ break;
+ case Opt_nat_bits:
+ ctx_set_opt(ctx, F2FS_MOUNT_NAT_BITS);
+ break;
+ case Opt_lookup_mode:
+ F2FS_CTX_INFO(ctx).lookup_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_lookup_mode;
+ break;
+ }
+ return 0;
+}
- ret = strscpy(ext[ext_cnt], name);
- if (ret < 0) {
- kfree(name);
- return ret;
- }
- F2FS_OPTION(sbi).compress_ext_cnt++;
- kfree(name);
- break;
- case Opt_nocompress_extension:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
+/*
+ * Check quota settings consistency.
+ */
+static int f2fs_check_quota_consistency(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ #ifdef CONFIG_QUOTA
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ bool quota_feature = f2fs_sb_has_quota_ino(sbi);
+ bool quota_turnon = sb_any_quota_loaded(sb);
+ char *old_qname, *new_qname;
+ bool usr_qf_name, grp_qf_name, prj_qf_name, usrquota, grpquota, prjquota;
+ int i;
+
+ /*
+ * We do the test below only for project quotas. 'usrquota' and
+ * 'grpquota' mount options are allowed even without quota feature
+ * to support legacy quotas in quota files.
+ */
+ if (ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA) &&
+ !f2fs_sb_has_project_quota(sbi)) {
+ f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
+ return -EINVAL;
+ }
- noext = F2FS_OPTION(sbi).noextensions;
- noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+ if (ctx->qname_mask) {
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (!(ctx->qname_mask & (1 << i)))
+ continue;
- if (strlen(name) >= F2FS_EXTENSION_LEN ||
- noext_cnt >= COMPRESS_EXT_NUM) {
- f2fs_err(sbi,
- "invalid extension length/number");
- kfree(name);
- return -EINVAL;
+ old_qname = F2FS_OPTION(sbi).s_qf_names[i];
+ new_qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
+ if (quota_turnon &&
+ !!old_qname != !!new_qname)
+ goto err_jquota_change;
+
+ if (old_qname) {
+ if (!new_qname) {
+ f2fs_info(sbi, "remove qf_name %s",
+ old_qname);
+ continue;
+ } else if (strcmp(old_qname, new_qname) == 0) {
+ ctx->qname_mask &= ~(1 << i);
+ continue;
+ }
+ goto err_jquota_specified;
}
- if (is_compress_extension_exist(sbi, name, false)) {
- kfree(name);
- break;
+ if (quota_feature) {
+ f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
+ ctx->qname_mask &= ~(1 << i);
+ kfree(F2FS_CTX_INFO(ctx).s_qf_names[i]);
+ F2FS_CTX_INFO(ctx).s_qf_names[i] = NULL;
}
+ }
+ }
+
+ /* Make sure we don't mix old and new quota format */
+ usr_qf_name = F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
+ F2FS_CTX_INFO(ctx).s_qf_names[USRQUOTA];
+ grp_qf_name = F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
+ F2FS_CTX_INFO(ctx).s_qf_names[GRPQUOTA];
+ prj_qf_name = F2FS_OPTION(sbi).s_qf_names[PRJQUOTA] ||
+ F2FS_CTX_INFO(ctx).s_qf_names[PRJQUOTA];
+ usrquota = test_opt(sbi, USRQUOTA) ||
+ ctx_test_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ grpquota = test_opt(sbi, GRPQUOTA) ||
+ ctx_test_opt(ctx, F2FS_MOUNT_GRPQUOTA);
+ prjquota = test_opt(sbi, PRJQUOTA) ||
+ ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA);
+
+ if (usr_qf_name) {
+ ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ usrquota = false;
+ }
+ if (grp_qf_name) {
+ ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
+ grpquota = false;
+ }
+ if (prj_qf_name) {
+ ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
+ prjquota = false;
+ }
+ if (usr_qf_name || grp_qf_name || prj_qf_name) {
+ if (grpquota || usrquota || prjquota) {
+ f2fs_err(sbi, "old and new quota format mixing");
+ return -EINVAL;
+ }
+ if (!(ctx->spec_mask & F2FS_SPEC_jqfmt ||
+ F2FS_OPTION(sbi).s_jquota_fmt)) {
+ f2fs_err(sbi, "journaled quota format not specified");
+ return -EINVAL;
+ }
+ }
+ return 0;
+
+err_jquota_change:
+ f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
+ return -EINVAL;
+err_jquota_specified:
+ f2fs_err(sbi, "%s quota file already specified",
+ QTYPE2NAME(i));
+ return -EINVAL;
- ret = strscpy(noext[noext_cnt], name);
- if (ret < 0) {
- kfree(name);
- return ret;
- }
- F2FS_OPTION(sbi).nocompress_ext_cnt++;
- kfree(name);
- break;
- case Opt_compress_chksum:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- F2FS_OPTION(sbi).compress_chksum = true;
- break;
- case Opt_compress_mode:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "fs")) {
- F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
- } else if (!strcmp(name, "user")) {
- F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_compress_cache:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- set_opt(sbi, COMPRESS_CACHE);
- break;
#else
- case Opt_compress_algorithm:
- case Opt_compress_log_size:
- case Opt_compress_extension:
- case Opt_nocompress_extension:
- case Opt_compress_chksum:
- case Opt_compress_mode:
- case Opt_compress_cache:
- f2fs_info(sbi, "compression options not supported");
- break;
+ if (f2fs_readonly(sbi->sb))
+ return 0;
+ if (f2fs_sb_has_quota_ino(sbi)) {
+ f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
+ return -EINVAL;
+ }
+ if (f2fs_sb_has_project_quota(sbi)) {
+ f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
+ return -EINVAL;
+ }
+
+ return 0;
#endif
- case Opt_atgc:
- set_opt(sbi, ATGC);
- break;
- case Opt_gc_merge:
- set_opt(sbi, GC_MERGE);
- break;
- case Opt_nogc_merge:
- clear_opt(sbi, GC_MERGE);
- break;
- case Opt_discard_unit:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "block")) {
- F2FS_OPTION(sbi).discard_unit =
- DISCARD_UNIT_BLOCK;
- } else if (!strcmp(name, "segment")) {
- F2FS_OPTION(sbi).discard_unit =
- DISCARD_UNIT_SEGMENT;
- } else if (!strcmp(name, "section")) {
- F2FS_OPTION(sbi).discard_unit =
- DISCARD_UNIT_SECTION;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_memory_mode:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "normal")) {
- F2FS_OPTION(sbi).memory_mode =
- MEMORY_MODE_NORMAL;
- } else if (!strcmp(name, "low")) {
- F2FS_OPTION(sbi).memory_mode =
- MEMORY_MODE_LOW;
- } else {
- kfree(name);
- return -EINVAL;
+}
+
+static int f2fs_check_test_dummy_encryption(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy))
+ return 0;
+
+ if (!f2fs_sb_has_encrypt(sbi)) {
+ f2fs_err(sbi, "Encrypt feature is off");
+ return -EINVAL;
+ }
+
+ /*
+ * This mount option is just for testing, and it's not worthwhile to
+ * implement the extra complexity (e.g. RCU protection) that would be
+ * needed to allow it to be set or changed during remount. We do allow
+ * it to be specified during remount, but only if there is no change.
+ */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ if (fscrypt_dummy_policies_equal(&F2FS_OPTION(sbi).dummy_enc_policy,
+ &F2FS_CTX_INFO(ctx).dummy_enc_policy))
+ return 0;
+ f2fs_warn(sbi, "Can't set or change test_dummy_encryption on remount");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline bool test_compression_spec(unsigned int mask)
+{
+ return mask & (F2FS_SPEC_compress_algorithm
+ | F2FS_SPEC_compress_log_size
+ | F2FS_SPEC_compress_extension
+ | F2FS_SPEC_nocompress_extension
+ | F2FS_SPEC_compress_chksum
+ | F2FS_SPEC_compress_mode);
+}
+
+static inline void clear_compression_spec(struct f2fs_fs_context *ctx)
+{
+ ctx->spec_mask &= ~(F2FS_SPEC_compress_algorithm
+ | F2FS_SPEC_compress_log_size
+ | F2FS_SPEC_compress_extension
+ | F2FS_SPEC_nocompress_extension
+ | F2FS_SPEC_compress_chksum
+ | F2FS_SPEC_compress_mode);
+}
+
+static int f2fs_check_compression(struct fs_context *fc,
+ struct super_block *sb)
+{
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int i, cnt;
+
+ if (!f2fs_sb_has_compression(sbi)) {
+ if (test_compression_spec(ctx->spec_mask) ||
+ ctx_test_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE))
+ f2fs_info(sbi, "Image doesn't support compression");
+ clear_compression_spec(ctx);
+ ctx->opt_mask &= ~BIT(F2FS_MOUNT_COMPRESS_CACHE);
+ return 0;
+ }
+ if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
+ cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
+ for (i = 0; i < F2FS_CTX_INFO(ctx).compress_ext_cnt; i++) {
+ if (is_compress_extension_exist(&F2FS_OPTION(sbi),
+ F2FS_CTX_INFO(ctx).extensions[i], true)) {
+ F2FS_CTX_INFO(ctx).extensions[i][0] = '\0';
+ cnt--;
}
- kfree(name);
- break;
- case Opt_age_extent_cache:
- set_opt(sbi, AGE_EXTENT_CACHE);
- break;
- case Opt_errors:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "remount-ro")) {
- F2FS_OPTION(sbi).errors =
- MOUNT_ERRORS_READONLY;
- } else if (!strcmp(name, "continue")) {
- F2FS_OPTION(sbi).errors =
- MOUNT_ERRORS_CONTINUE;
- } else if (!strcmp(name, "panic")) {
- F2FS_OPTION(sbi).errors =
- MOUNT_ERRORS_PANIC;
- } else {
- kfree(name);
- return -EINVAL;
+ }
+ if (F2FS_OPTION(sbi).compress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
+ f2fs_err(sbi, "invalid extension length/number");
+ return -EINVAL;
+ }
+ }
+ if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
+ cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
+ for (i = 0; i < F2FS_CTX_INFO(ctx).nocompress_ext_cnt; i++) {
+ if (is_compress_extension_exist(&F2FS_OPTION(sbi),
+ F2FS_CTX_INFO(ctx).noextensions[i], false)) {
+ F2FS_CTX_INFO(ctx).noextensions[i][0] = '\0';
+ cnt--;
}
- kfree(name);
- break;
- default:
- f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
- p);
+ }
+ if (F2FS_OPTION(sbi).nocompress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
+ f2fs_err(sbi, "invalid noextension length/number");
return -EINVAL;
}
}
-default_check:
-#ifdef CONFIG_QUOTA
- if (f2fs_check_quota_options(sbi))
+
+ if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
+ F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
+ F2FS_CTX_INFO(ctx).extensions,
+ F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
+ f2fs_err(sbi, "new noextensions conflicts with new extensions");
return -EINVAL;
-#else
- if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
- f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
+ }
+ if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
+ F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
+ F2FS_OPTION(sbi).extensions,
+ F2FS_OPTION(sbi).compress_ext_cnt)) {
+ f2fs_err(sbi, "new noextensions conflicts with old extensions");
return -EINVAL;
}
- if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
- f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
+ if (f2fs_test_compress_extension(F2FS_OPTION(sbi).noextensions,
+ F2FS_OPTION(sbi).nocompress_ext_cnt,
+ F2FS_CTX_INFO(ctx).extensions,
+ F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
+ f2fs_err(sbi, "new extensions conflicts with old noextensions");
return -EINVAL;
}
#endif
+ return 0;
+}
+
+static int f2fs_check_opt_consistency(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int err;
+
+ if (ctx_test_opt(ctx, F2FS_MOUNT_NORECOVERY) && !f2fs_readonly(sb))
+ return -EINVAL;
+
+ if (f2fs_hw_should_discard(sbi) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) &&
+ !ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
+ f2fs_warn(sbi, "discard is required for zoned block devices");
+ return -EINVAL;
+ }
+
+ if (!f2fs_hw_support_discard(sbi) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) &&
+ ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
+ f2fs_warn(sbi, "device does not support discard");
+ ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
+ ctx->opt_mask &= ~BIT(F2FS_MOUNT_DISCARD);
+ }
+
+ if (f2fs_sb_has_device_alias(sbi) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_READ_EXTENT_CACHE)) &&
+ !ctx_test_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE)) {
+ f2fs_err(sbi, "device aliasing requires extent cache");
+ return -EINVAL;
+ }
+
+ if (test_opt(sbi, RESERVE_ROOT) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_ROOT)) &&
+ ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_ROOT)) {
+ f2fs_info(sbi, "Preserve previous reserve_root=%u",
+ F2FS_OPTION(sbi).root_reserved_blocks);
+ ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
+ ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_ROOT);
+ }
+ if (test_opt(sbi, RESERVE_NODE) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_NODE)) &&
+ ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_NODE)) {
+ f2fs_info(sbi, "Preserve previous reserve_node=%u",
+ F2FS_OPTION(sbi).root_reserved_nodes);
+ ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_NODE);
+ ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_NODE);
+ }
+
+ err = f2fs_check_test_dummy_encryption(fc, sb);
+ if (err)
+ return err;
+
+ err = f2fs_check_compression(fc, sb);
+ if (err)
+ return err;
+
+ err = f2fs_check_quota_consistency(fc, sb);
+ if (err)
+ return err;
if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) {
f2fs_err(sbi,
@@ -1355,15 +1501,19 @@ default_check:
* devices, but mandatory for host-managed zoned block devices.
*/
if (f2fs_sb_has_blkzoned(sbi)) {
+ if (F2FS_CTX_INFO(ctx).bggc_mode == BGGC_MODE_OFF) {
+ f2fs_warn(sbi, "zoned devices need bggc");
+ return -EINVAL;
+ }
#ifdef CONFIG_BLK_DEV_ZONED
- if (F2FS_OPTION(sbi).discard_unit !=
- DISCARD_UNIT_SECTION) {
+ if ((ctx->spec_mask & F2FS_SPEC_discard_unit) &&
+ F2FS_CTX_INFO(ctx).discard_unit != DISCARD_UNIT_SECTION) {
f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
- F2FS_OPTION(sbi).discard_unit =
- DISCARD_UNIT_SECTION;
+ F2FS_CTX_INFO(ctx).discard_unit = DISCARD_UNIT_SECTION;
}
- if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
+ if ((ctx->spec_mask & F2FS_SPEC_mode) &&
+ F2FS_CTX_INFO(ctx).fs_mode != FS_MODE_LFS) {
f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
return -EINVAL;
}
@@ -1373,43 +1523,25 @@ default_check:
#endif
}
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (f2fs_test_compress_extension(sbi)) {
- f2fs_err(sbi, "invalid compress or nocompress extension");
- return -EINVAL;
- }
-#endif
-
- if (test_opt(sbi, INLINE_XATTR_SIZE)) {
- int min_size, max_size;
-
+ if (ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE)) {
if (!f2fs_sb_has_extra_attr(sbi) ||
!f2fs_sb_has_flexible_inline_xattr(sbi)) {
f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
return -EINVAL;
}
- if (!test_opt(sbi, INLINE_XATTR)) {
+ if (!ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR) && !test_opt(sbi, INLINE_XATTR)) {
f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
return -EINVAL;
}
-
- min_size = MIN_INLINE_XATTR_SIZE;
- max_size = MAX_INLINE_XATTR_SIZE;
-
- if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
- F2FS_OPTION(sbi).inline_xattr_size > max_size) {
- f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
- min_size, max_size);
- return -EINVAL;
- }
}
- if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) {
+ if (ctx_test_opt(ctx, F2FS_MOUNT_ATGC) &&
+ F2FS_CTX_INFO(ctx).fs_mode == FS_MODE_LFS) {
f2fs_err(sbi, "LFS is not compatible with ATGC");
return -EINVAL;
}
- if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) {
+ if (f2fs_is_readonly(sbi) && ctx_test_opt(ctx, F2FS_MOUNT_FLUSH_MERGE)) {
f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
return -EINVAL;
}
@@ -1421,6 +1553,195 @@ default_check:
return 0;
}
+static void f2fs_apply_quota_options(struct fs_context *fc,
+ struct super_block *sb)
+{
+#ifdef CONFIG_QUOTA
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ bool quota_feature = f2fs_sb_has_quota_ino(sbi);
+ char *qname;
+ int i;
+
+ if (quota_feature)
+ return;
+
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (!(ctx->qname_mask & (1 << i)))
+ continue;
+
+ qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
+ if (qname) {
+ qname = kstrdup(F2FS_CTX_INFO(ctx).s_qf_names[i],
+ GFP_KERNEL | __GFP_NOFAIL);
+ set_opt(sbi, QUOTA);
+ }
+ F2FS_OPTION(sbi).s_qf_names[i] = qname;
+ }
+
+ if (ctx->spec_mask & F2FS_SPEC_jqfmt)
+ F2FS_OPTION(sbi).s_jquota_fmt = F2FS_CTX_INFO(ctx).s_jquota_fmt;
+
+ if (quota_feature && F2FS_OPTION(sbi).s_jquota_fmt) {
+ f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
+ F2FS_OPTION(sbi).s_jquota_fmt = 0;
+ }
+#endif
+}
+
+static void f2fs_apply_test_dummy_encryption(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy) ||
+ /* if already set, it was already verified to be the same */
+ fscrypt_is_dummy_policy_set(&F2FS_OPTION(sbi).dummy_enc_policy))
+ return;
+ swap(F2FS_OPTION(sbi).dummy_enc_policy, F2FS_CTX_INFO(ctx).dummy_enc_policy);
+ f2fs_warn(sbi, "Test dummy encryption mode enabled");
+}
+
+static void f2fs_apply_compression(struct fs_context *fc,
+ struct super_block *sb)
+{
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ unsigned char (*ctx_ext)[F2FS_EXTENSION_LEN];
+ unsigned char (*sbi_ext)[F2FS_EXTENSION_LEN];
+ int ctx_cnt, sbi_cnt, i;
+
+ if (ctx->spec_mask & F2FS_SPEC_compress_level)
+ F2FS_OPTION(sbi).compress_level =
+ F2FS_CTX_INFO(ctx).compress_level;
+ if (ctx->spec_mask & F2FS_SPEC_compress_algorithm)
+ F2FS_OPTION(sbi).compress_algorithm =
+ F2FS_CTX_INFO(ctx).compress_algorithm;
+ if (ctx->spec_mask & F2FS_SPEC_compress_log_size)
+ F2FS_OPTION(sbi).compress_log_size =
+ F2FS_CTX_INFO(ctx).compress_log_size;
+ if (ctx->spec_mask & F2FS_SPEC_compress_chksum)
+ F2FS_OPTION(sbi).compress_chksum =
+ F2FS_CTX_INFO(ctx).compress_chksum;
+ if (ctx->spec_mask & F2FS_SPEC_compress_mode)
+ F2FS_OPTION(sbi).compress_mode =
+ F2FS_CTX_INFO(ctx).compress_mode;
+ if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
+ ctx_ext = F2FS_CTX_INFO(ctx).extensions;
+ ctx_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
+ sbi_ext = F2FS_OPTION(sbi).extensions;
+ sbi_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ for (i = 0; i < ctx_cnt; i++) {
+ if (strlen(ctx_ext[i]) == 0)
+ continue;
+ strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
+ sbi_cnt++;
+ }
+ F2FS_OPTION(sbi).compress_ext_cnt = sbi_cnt;
+ }
+ if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
+ ctx_ext = F2FS_CTX_INFO(ctx).noextensions;
+ ctx_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
+ sbi_ext = F2FS_OPTION(sbi).noextensions;
+ sbi_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+ for (i = 0; i < ctx_cnt; i++) {
+ if (strlen(ctx_ext[i]) == 0)
+ continue;
+ strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
+ sbi_cnt++;
+ }
+ F2FS_OPTION(sbi).nocompress_ext_cnt = sbi_cnt;
+ }
+#endif
+}
+
+static void f2fs_apply_options(struct fs_context *fc, struct super_block *sb)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ F2FS_OPTION(sbi).opt &= ~ctx->opt_mask;
+ F2FS_OPTION(sbi).opt |= F2FS_CTX_INFO(ctx).opt;
+
+ if (ctx->spec_mask & F2FS_SPEC_background_gc)
+ F2FS_OPTION(sbi).bggc_mode = F2FS_CTX_INFO(ctx).bggc_mode;
+ if (ctx->spec_mask & F2FS_SPEC_inline_xattr_size)
+ F2FS_OPTION(sbi).inline_xattr_size =
+ F2FS_CTX_INFO(ctx).inline_xattr_size;
+ if (ctx->spec_mask & F2FS_SPEC_active_logs)
+ F2FS_OPTION(sbi).active_logs = F2FS_CTX_INFO(ctx).active_logs;
+ if (ctx->spec_mask & F2FS_SPEC_reserve_root)
+ F2FS_OPTION(sbi).root_reserved_blocks =
+ F2FS_CTX_INFO(ctx).root_reserved_blocks;
+ if (ctx->spec_mask & F2FS_SPEC_reserve_node)
+ F2FS_OPTION(sbi).root_reserved_nodes =
+ F2FS_CTX_INFO(ctx).root_reserved_nodes;
+ if (ctx->spec_mask & F2FS_SPEC_resgid)
+ F2FS_OPTION(sbi).s_resgid = F2FS_CTX_INFO(ctx).s_resgid;
+ if (ctx->spec_mask & F2FS_SPEC_resuid)
+ F2FS_OPTION(sbi).s_resuid = F2FS_CTX_INFO(ctx).s_resuid;
+ if (ctx->spec_mask & F2FS_SPEC_mode)
+ F2FS_OPTION(sbi).fs_mode = F2FS_CTX_INFO(ctx).fs_mode;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (ctx->spec_mask & F2FS_SPEC_fault_injection)
+ (void)f2fs_build_fault_attr(sbi,
+ F2FS_CTX_INFO(ctx).fault_info.inject_rate, 0, FAULT_RATE);
+ if (ctx->spec_mask & F2FS_SPEC_fault_type)
+ (void)f2fs_build_fault_attr(sbi, 0,
+ F2FS_CTX_INFO(ctx).fault_info.inject_type, FAULT_TYPE);
+#endif
+ if (ctx->spec_mask & F2FS_SPEC_alloc_mode)
+ F2FS_OPTION(sbi).alloc_mode = F2FS_CTX_INFO(ctx).alloc_mode;
+ if (ctx->spec_mask & F2FS_SPEC_fsync_mode)
+ F2FS_OPTION(sbi).fsync_mode = F2FS_CTX_INFO(ctx).fsync_mode;
+ if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap)
+ F2FS_OPTION(sbi).unusable_cap = F2FS_CTX_INFO(ctx).unusable_cap;
+ if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap_perc)
+ F2FS_OPTION(sbi).unusable_cap_perc =
+ F2FS_CTX_INFO(ctx).unusable_cap_perc;
+ if (ctx->spec_mask & F2FS_SPEC_discard_unit)
+ F2FS_OPTION(sbi).discard_unit = F2FS_CTX_INFO(ctx).discard_unit;
+ if (ctx->spec_mask & F2FS_SPEC_memory_mode)
+ F2FS_OPTION(sbi).memory_mode = F2FS_CTX_INFO(ctx).memory_mode;
+ if (ctx->spec_mask & F2FS_SPEC_errors)
+ F2FS_OPTION(sbi).errors = F2FS_CTX_INFO(ctx).errors;
+ if (ctx->spec_mask & F2FS_SPEC_lookup_mode)
+ F2FS_OPTION(sbi).lookup_mode = F2FS_CTX_INFO(ctx).lookup_mode;
+
+ f2fs_apply_compression(fc, sb);
+ f2fs_apply_test_dummy_encryption(fc, sb);
+ f2fs_apply_quota_options(fc, sb);
+}
+
+static int f2fs_sanity_check_options(struct f2fs_sb_info *sbi, bool remount)
+{
+ if (f2fs_sb_has_device_alias(sbi) &&
+ !test_opt(sbi, READ_EXTENT_CACHE)) {
+ f2fs_err(sbi, "device aliasing requires extent cache");
+ return -EINVAL;
+ }
+
+ if (!remount)
+ return 0;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) &&
+ sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
+ f2fs_err(sbi,
+ "zoned: max open zones %u is too small, need at least %u open zones",
+ sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
+ return -EINVAL;
+ }
+#endif
+ if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
+ f2fs_warn(sbi, "LFS is not compatible with IPU");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct inode *f2fs_alloc_inode(struct super_block *sb)
{
struct f2fs_inode_info *fi;
@@ -1437,10 +1758,13 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Initialize f2fs-specific inode info */
atomic_set(&fi->dirty_pages, 0);
atomic_set(&fi->i_compr_blocks, 0);
+ atomic_set(&fi->open_count, 0);
+ atomic_set(&fi->writeback, 0);
init_f2fs_rwsem(&fi->i_sem);
spin_lock_init(&fi->i_size_lock);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
+ INIT_LIST_HEAD(&fi->gdonate_list);
init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
init_f2fs_rwsem(&fi->i_xattr_sem);
@@ -1475,10 +1799,10 @@ static int f2fs_drop_inode(struct inode *inode)
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
- if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
+ if ((!inode_unhashed(inode) && inode_state_read(inode) & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
- atomic_inc(&inode->i_count);
+ __iget(inode);
spin_unlock(&inode->i_lock);
/* should remain fi->extent_tree for writepage */
@@ -1502,7 +1826,7 @@ static int f2fs_drop_inode(struct inode *inode)
trace_f2fs_drop_inode(inode, 0);
return 0;
}
- ret = generic_drop_inode(inode);
+ ret = inode_generic_drop(inode);
if (!ret)
ret = fscrypt_drop_inode(inode);
trace_f2fs_drop_inode(inode, ret);
@@ -1527,6 +1851,12 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync)
inc_page_count(sbi, F2FS_DIRTY_IMETA);
}
spin_unlock(&sbi->inode_lock[DIRTY_META]);
+
+ /* if atomic write is not committed, set inode w/ atomic dirty */
+ if (!ret && f2fs_is_atomic_file(inode) &&
+ !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
+ set_inode_flag(inode, FI_ATOMIC_DIRTIED);
+
return ret;
}
@@ -1659,14 +1989,6 @@ static void f2fs_put_super(struct super_block *sb)
truncate_inode_pages_final(META_MAPPING(sbi));
}
- for (i = 0; i < NR_COUNT_TYPE; i++) {
- if (!get_pages(sbi, i))
- continue;
- f2fs_err(sbi, "detect filesystem reference count leak during "
- "umount, type: %d, count: %lld", i, get_pages(sbi, i));
- f2fs_bug_on(sbi, 1);
- }
-
f2fs_bug_on(sbi, sbi->fsync_node_num);
f2fs_destroy_compress_inode(sbi);
@@ -1677,6 +1999,15 @@ static void f2fs_put_super(struct super_block *sb)
iput(sbi->meta_inode);
sbi->meta_inode = NULL;
+ /* Should check the page counts after dropping all node/meta pages */
+ for (i = 0; i < NR_COUNT_TYPE; i++) {
+ if (!get_pages(sbi, i))
+ continue;
+ f2fs_err(sbi, "detect filesystem reference count leak during "
+ "umount, type: %d, count: %lld", i, get_pages(sbi, i));
+ f2fs_bug_on(sbi, 1);
+ }
+
/*
* iput() can update stat information, if f2fs_write_checkpoint()
* above failed with error.
@@ -1694,12 +2025,9 @@ static void f2fs_put_super(struct super_block *sb)
kvfree(sbi->ckpt);
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->raw_super);
f2fs_destroy_page_array_cache(sbi);
- f2fs_destroy_xattr_caches(sbi);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(F2FS_OPTION(sbi).s_qf_names[i]);
@@ -1708,7 +2036,7 @@ static void f2fs_put_super(struct super_block *sb)
destroy_percpu_info(sbi);
f2fs_destroy_iostat(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
- kvfree(sbi->write_io[i]);
+ kfree(sbi->write_io[i]);
#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
@@ -1739,22 +2067,28 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
static int f2fs_freeze(struct super_block *sb)
{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
if (f2fs_readonly(sb))
return 0;
/* IO error happened before */
- if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+ if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
/* must be clean, since sync_filesystem() was already called */
- if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY))
return -EINVAL;
+ sbi->umount_lock_holder = current;
+
/* Let's flush checkpoints and stop the thread. */
- f2fs_flush_ckpt_thread(F2FS_SB(sb));
+ f2fs_flush_ckpt_thread(sbi);
+
+ sbi->umount_lock_holder = NULL;
/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
- set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
+ set_sbi_flag(sbi, SBI_IS_FREEZING);
return 0;
}
@@ -1793,26 +2127,32 @@ static int f2fs_statfs_project(struct super_block *sb,
limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
dquot->dq_dqb.dqb_bhardlimit);
- if (limit)
- limit >>= sb->s_blocksize_bits;
+ limit >>= sb->s_blocksize_bits;
+
+ if (limit) {
+ uint64_t remaining = 0;
- if (limit && buf->f_blocks > limit) {
curblock = (dquot->dq_dqb.dqb_curspace +
dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
- buf->f_blocks = limit;
- buf->f_bfree = buf->f_bavail =
- (buf->f_blocks > curblock) ?
- (buf->f_blocks - curblock) : 0;
+ if (limit > curblock)
+ remaining = limit - curblock;
+
+ buf->f_blocks = min(buf->f_blocks, limit);
+ buf->f_bfree = min(buf->f_bfree, remaining);
+ buf->f_bavail = min(buf->f_bavail, remaining);
}
limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
dquot->dq_dqb.dqb_ihardlimit);
- if (limit && buf->f_files > limit) {
- buf->f_files = limit;
- buf->f_ffree =
- (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
- (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
+ if (limit) {
+ uint64_t remaining = 0;
+
+ if (limit > dquot->dq_dqb.dqb_curinodes)
+ remaining = limit - dquot->dq_dqb.dqb_curinodes;
+
+ buf->f_files = min(buf->f_files, limit);
+ buf->f_ffree = min(buf->f_ffree, remaining);
}
spin_unlock(&dquot->dq_dqb_lock);
@@ -1838,7 +2178,8 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_blocks = total_count - start_count;
spin_lock(&sbi->stat_lock);
-
+ if (sbi->carve_out)
+ buf->f_blocks -= sbi->current_reserved_blocks;
user_block_count = sbi->user_block_count;
total_valid_node_count = valid_node_count(sbi);
avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
@@ -1870,9 +2211,9 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_fsid = u64_to_fsid(id);
#ifdef CONFIG_QUOTA
- if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
+ if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) &&
sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
- f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
+ f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf);
}
#endif
return 0;
@@ -2060,9 +2401,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
seq_puts(seq, "fragment:block");
seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
- if (test_opt(sbi, RESERVE_ROOT))
- seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
+ if (test_opt(sbi, RESERVE_ROOT) || test_opt(sbi, RESERVE_NODE))
+ seq_printf(seq, ",reserve_root=%u,reserve_node=%u,resuid=%u,"
+ "resgid=%u",
F2FS_OPTION(sbi).root_reserved_blocks,
+ F2FS_OPTION(sbi).root_reserved_nodes,
from_kuid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resuid),
from_kgid_munged(&init_user_ns,
@@ -2130,6 +2473,16 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
seq_printf(seq, ",errors=%s", "panic");
+ if (test_opt(sbi, NAT_BITS))
+ seq_puts(seq, ",nat_bits");
+
+ if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_PERF)
+ seq_show_option(seq, "lookup_mode", "perf");
+ else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_COMPAT)
+ seq_show_option(seq, "lookup_mode", "compat");
+ else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_AUTO)
+ seq_show_option(seq, "lookup_mode", "auto");
+
return 0;
}
@@ -2177,8 +2530,8 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, MERGE_CHECKPOINT);
+ set_opt(sbi, LAZYTIME);
F2FS_OPTION(sbi).unusable_cap = 0;
- sbi->sb->s_flags |= SB_LAZYTIME;
if (!f2fs_is_readonly(sbi))
set_opt(sbi, FLUSH_MERGE);
if (f2fs_sb_has_blkzoned(sbi))
@@ -2193,7 +2546,9 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
set_opt(sbi, POSIX_ACL);
#endif
- f2fs_build_fault_attr(sbi, 0, 0);
+ f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL);
+
+ F2FS_OPTION(sbi).lookup_mode = LOOKUP_PERF;
}
#ifdef CONFIG_QUOTA
@@ -2274,21 +2629,48 @@ out_unlock:
restore_flag:
sbi->gc_mode = gc_mode;
sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
+ f2fs_info(sbi, "f2fs_disable_checkpoint() finish, err:%d", err);
return err;
}
-static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+static int f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
{
- int retry = DEFAULT_RETRY_IO_COUNT;
+ unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16;
+ long long start, writeback, lock, sync_inode, end;
+ int ret;
+
+ f2fs_info(sbi, "%s start, meta: %lld, node: %lld, data: %lld",
+ __func__,
+ get_pages(sbi, F2FS_DIRTY_META),
+ get_pages(sbi, F2FS_DIRTY_NODES),
+ get_pages(sbi, F2FS_DIRTY_DATA));
+
+ f2fs_update_time(sbi, ENABLE_TIME);
+
+ start = ktime_get();
/* we should flush all the data to keep data consistency */
- do {
+ while (get_pages(sbi, F2FS_DIRTY_DATA)) {
+ writeback_inodes_sb_nr(sbi->sb, nr_pages, WB_REASON_SYNC);
+ f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
+
+ if (f2fs_time_over(sbi, ENABLE_TIME))
+ break;
+ }
+ writeback = ktime_get();
+
+ f2fs_down_write(&sbi->cp_enable_rwsem);
+
+ lock = ktime_get();
+
+ if (get_pages(sbi, F2FS_DIRTY_DATA))
sync_inodes_sb(sbi->sb);
- f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
- } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
- if (unlikely(retry < 0))
- f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
+ if (unlikely(get_pages(sbi, F2FS_DIRTY_DATA)))
+ f2fs_warn(sbi, "%s: has some unwritten data: %lld",
+ __func__, get_pages(sbi, F2FS_DIRTY_DATA));
+
+ sync_inode = ktime_get();
f2fs_down_write(&sbi->gc_lock);
f2fs_dirty_to_prefree(sbi);
@@ -2297,17 +2679,40 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
set_sbi_flag(sbi, SBI_IS_DIRTY);
f2fs_up_write(&sbi->gc_lock);
- f2fs_sync_fs(sbi->sb, 1);
+ f2fs_info(sbi, "%s sync_fs, meta: %lld, imeta: %lld, node: %lld, dents: %lld, qdata: %lld",
+ __func__,
+ get_pages(sbi, F2FS_DIRTY_META),
+ get_pages(sbi, F2FS_DIRTY_IMETA),
+ get_pages(sbi, F2FS_DIRTY_NODES),
+ get_pages(sbi, F2FS_DIRTY_DENTS),
+ get_pages(sbi, F2FS_DIRTY_QDATA));
+ ret = f2fs_sync_fs(sbi->sb, 1);
+ if (ret)
+ f2fs_err(sbi, "%s sync_fs failed, ret: %d", __func__, ret);
/* Let's ensure there's no pending checkpoint anymore */
f2fs_flush_ckpt_thread(sbi);
+
+ f2fs_up_write(&sbi->cp_enable_rwsem);
+
+ end = ktime_get();
+
+ f2fs_info(sbi, "%s end, writeback:%llu, "
+ "lock:%llu, sync_inode:%llu, sync_fs:%llu",
+ __func__,
+ ktime_ms_delta(writeback, start),
+ ktime_ms_delta(lock, writeback),
+ ktime_ms_delta(sync_inode, lock),
+ ktime_ms_delta(end, sync_inode));
+ return ret;
}
-static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+static int __f2fs_remount(struct fs_context *fc, struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct f2fs_mount_info org_mount_opt;
unsigned long old_sb_flags;
+ unsigned int flags = fc->sb_flags;
int err;
bool need_restart_gc = false, need_stop_gc = false;
bool need_restart_flush = false, need_stop_flush = false;
@@ -2320,6 +2725,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool no_discard = !test_opt(sbi, DISCARD);
bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
bool block_unit_discard = f2fs_block_unit_discard(sbi);
+ bool no_nat_bits = !test_opt(sbi, NAT_BITS);
#ifdef CONFIG_QUOTA
int i, j;
#endif
@@ -2331,6 +2737,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
org_mount_opt = sbi->mount_opt;
old_sb_flags = sb->s_flags;
+ sbi->umount_lock_holder = current;
+
#ifdef CONFIG_QUOTA
org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
@@ -2350,7 +2758,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
#endif
/* recover superblocks we couldn't write due to previous RO mount */
- if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+ if (!(flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
err = f2fs_commit_super(sbi, false);
f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
err);
@@ -2360,21 +2768,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
default_options(sbi, true);
- /* parse mount options */
- err = parse_options(sb, data, true);
+ err = f2fs_check_opt_consistency(fc, sb);
if (err)
goto restore_opts;
-#ifdef CONFIG_BLK_DEV_ZONED
- if (f2fs_sb_has_blkzoned(sbi) &&
- sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
- f2fs_err(sbi,
- "zoned: max open zones %u is too small, need at least %u open zones",
- sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
- err = -EINVAL;
+ f2fs_apply_options(fc, sb);
+
+ err = f2fs_sanity_check_options(sbi, true);
+ if (err)
goto restore_opts;
- }
-#endif
/* flush outstanding errors before changing fs state */
flush_work(&sbi->s_error_work);
@@ -2383,20 +2785,20 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* Previous and new state of filesystem is RO,
* so skip checking GC and FLUSH_MERGE conditions.
*/
- if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
+ if (f2fs_readonly(sb) && (flags & SB_RDONLY))
goto skip;
- if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) {
+ if (f2fs_dev_is_readonly(sbi) && !(flags & SB_RDONLY)) {
err = -EROFS;
goto restore_opts;
}
#ifdef CONFIG_QUOTA
- if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
+ if (!f2fs_readonly(sb) && (flags & SB_RDONLY)) {
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
- } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
+ } else if (f2fs_readonly(sb) && !(flags & SB_RDONLY)) {
/* dquot_resume needs RW */
sb->s_flags &= ~SB_RDONLY;
if (sb_any_quota_suspended(sb)) {
@@ -2408,12 +2810,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
}
}
#endif
- if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
- err = -EINVAL;
- f2fs_warn(sbi, "LFS is not compatible with IPU");
- goto restore_opts;
- }
-
/* disallow enable atgc dynamically */
if (no_atgc == !!test_opt(sbi, ATGC)) {
err = -EINVAL;
@@ -2446,7 +2842,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
- if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
+ if (no_nat_bits == !!test_opt(sbi, NAT_BITS)) {
+ err = -EINVAL;
+ f2fs_warn(sbi, "switch nat_bits option is not allowed");
+ goto restore_opts;
+ }
+
+ if ((flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
err = -EINVAL;
f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
goto restore_opts;
@@ -2457,7 +2859,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* or if background_gc = off is passed in mount
* option. Also sync the filesystem.
*/
- if ((*flags & SB_RDONLY) ||
+ if ((flags & SB_RDONLY) ||
(F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
!test_opt(sbi, GC_MERGE))) {
if (sbi->gc_thread) {
@@ -2471,7 +2873,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_gc = true;
}
- if (*flags & SB_RDONLY) {
+ if (flags & SB_RDONLY) {
sync_inodes_sb(sb);
set_sbi_flag(sbi, SBI_IS_DIRTY);
@@ -2484,7 +2886,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
*/
- if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+ if ((flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
clear_opt(sbi, FLUSH_MERGE);
f2fs_destroy_flush_cmd_control(sbi, false);
need_restart_flush = true;
@@ -2516,7 +2918,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
goto restore_discard;
need_enable_checkpoint = true;
} else {
- f2fs_enable_checkpoint(sbi);
+ err = f2fs_enable_checkpoint(sbi);
+ if (err)
+ goto restore_discard;
need_disable_checkpoint = true;
}
}
@@ -2526,11 +2930,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* triggered while remount and we need to take care of it before
* returning from remount.
*/
- if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
+ if ((flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
!test_opt(sbi, MERGE_CHECKPOINT)) {
f2fs_stop_ckpt_thread(sbi);
} else {
- /* Flush if the prevous checkpoint, if exists. */
+ /* Flush if the previous checkpoint, if exists. */
f2fs_flush_ckpt_thread(sbi);
err = f2fs_start_ckpt_thread(sbi);
@@ -2553,11 +2957,14 @@ skip:
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
limit_reserve_root(sbi);
- *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
+ fc->sb_flags = (flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
+
+ sbi->umount_lock_holder = NULL;
return 0;
restore_checkpoint:
if (need_enable_checkpoint) {
- f2fs_enable_checkpoint(sbi);
+ if (f2fs_enable_checkpoint(sbi))
+ f2fs_warn(sbi, "checkpoint has not been enabled");
} else if (need_disable_checkpoint) {
if (f2fs_disable_checkpoint(sbi))
f2fs_warn(sbi, "checkpoint has not been disabled");
@@ -2594,6 +3001,8 @@ restore_opts:
#endif
sbi->mount_opt = org_mount_opt;
sb->s_flags = old_sb_flags;
+
+ sbi->umount_lock_holder = NULL;
return err;
}
@@ -2657,12 +3066,9 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
{
struct inode *inode = sb_dqopt(sb)->files[type];
struct address_space *mapping = inode->i_mapping;
- block_t blkidx = F2FS_BYTES_TO_BLK(off);
- int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t toread;
loff_t i_size = i_size_read(inode);
- struct page *page;
if (off > i_size)
return 0;
@@ -2671,37 +3077,42 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
len = i_size - off;
toread = len;
while (toread > 0) {
- tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
+ struct folio *folio;
+ size_t offset;
+
repeat:
- page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
- if (IS_ERR(page)) {
- if (PTR_ERR(page) == -ENOMEM) {
+ folio = mapping_read_folio_gfp(mapping, off >> PAGE_SHIFT,
+ GFP_NOFS);
+ if (IS_ERR(folio)) {
+ if (PTR_ERR(folio) == -ENOMEM) {
memalloc_retry_wait(GFP_NOFS);
goto repeat;
}
set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
- return PTR_ERR(page);
+ return PTR_ERR(folio);
}
+ offset = offset_in_folio(folio, off);
+ tocopy = min(folio_size(folio) - offset, toread);
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
+ if (unlikely(folio->mapping != mapping)) {
+ f2fs_folio_put(folio, true);
goto repeat;
}
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
- return -EIO;
- }
- memcpy_from_page(data, page, offset, tocopy);
- f2fs_put_page(page, 1);
+ /*
+ * should never happen, just leave f2fs_bug_on() here to catch
+ * any potential bug.
+ */
+ f2fs_bug_on(F2FS_SB(sb), !folio_test_uptodate(folio));
+
+ memcpy_from_folio(data, folio, offset, tocopy);
+ f2fs_folio_put(folio, true);
- offset = 0;
toread -= tocopy;
data += tocopy;
- blkidx++;
+ off += tocopy;
}
return len;
}
@@ -2728,7 +3139,7 @@ retry:
&folio, &fsdata);
if (unlikely(err)) {
if (err == -ENOMEM) {
- f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
@@ -2910,7 +3321,7 @@ out:
return ret;
}
-int f2fs_quota_sync(struct super_block *sb, int type)
+int f2fs_do_quota_sync(struct super_block *sb, int type)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct quota_info *dqopt = sb_dqopt(sb);
@@ -2958,11 +3369,21 @@ int f2fs_quota_sync(struct super_block *sb, int type)
return ret;
}
+static int f2fs_quota_sync(struct super_block *sb, int type)
+{
+ int ret;
+
+ F2FS_SB(sb)->umount_lock_holder = current;
+ ret = f2fs_do_quota_sync(sb, type);
+ F2FS_SB(sb)->umount_lock_holder = NULL;
+ return ret;
+}
+
static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path)
{
struct inode *inode;
- int err;
+ int err = 0;
/* if quota sysfile exists, deny enabling quota with specific file */
if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
@@ -2973,31 +3394,34 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
if (path->dentry->d_sb != sb)
return -EXDEV;
- err = f2fs_quota_sync(sb, type);
+ F2FS_SB(sb)->umount_lock_holder = current;
+
+ err = f2fs_do_quota_sync(sb, type);
if (err)
- return err;
+ goto out;
inode = d_inode(path->dentry);
err = filemap_fdatawrite(inode->i_mapping);
if (err)
- return err;
+ goto out;
err = filemap_fdatawait(inode->i_mapping);
if (err)
- return err;
+ goto out;
err = dquot_quota_on(sb, type, format_id, path);
if (err)
- return err;
+ goto out;
inode_lock(inode);
F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
-
- return 0;
+out:
+ F2FS_SB(sb)->umount_lock_holder = NULL;
+ return err;
}
static int __f2fs_quota_off(struct super_block *sb, int type)
@@ -3008,7 +3432,7 @@ static int __f2fs_quota_off(struct super_block *sb, int type)
if (!inode || !igrab(inode))
return dquot_quota_off(sb, type);
- err = f2fs_quota_sync(sb, type);
+ err = f2fs_do_quota_sync(sb, type);
if (err)
goto out_put;
@@ -3031,6 +3455,8 @@ static int f2fs_quota_off(struct super_block *sb, int type)
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int err;
+ F2FS_SB(sb)->umount_lock_holder = current;
+
err = __f2fs_quota_off(sb, type);
/*
@@ -3040,6 +3466,9 @@ static int f2fs_quota_off(struct super_block *sb, int type)
*/
if (is_journalled_quota(sbi))
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+
+ F2FS_SB(sb)->umount_lock_holder = NULL;
+
return err;
}
@@ -3172,7 +3601,7 @@ int f2fs_dquot_initialize(struct inode *inode)
return 0;
}
-int f2fs_quota_sync(struct super_block *sb, int type)
+int f2fs_do_quota_sync(struct super_block *sb, int type)
{
return 0;
}
@@ -3200,7 +3629,6 @@ static const struct super_operations f2fs_sops = {
.freeze_fs = f2fs_freeze,
.unfreeze_fs = f2fs_unfreeze,
.statfs = f2fs_statfs,
- .remount_fs = f2fs_remount,
.shutdown = f2fs_shutdown,
};
@@ -3263,6 +3691,8 @@ static struct block_device **f2fs_get_devices(struct super_block *sb,
}
static const struct fscrypt_operations f2fs_cryptops = {
+ .inode_info_offs = (int)offsetof(struct f2fs_inode_info, i_crypt_info) -
+ (int)offsetof(struct f2fs_inode_info, vfs_inode),
.needs_bounce_pages = 1,
.has_32bit_inodes = 1,
.supports_subblock_data_units = 1,
@@ -3274,7 +3704,7 @@ static const struct fscrypt_operations f2fs_cryptops = {
.has_stable_inodes = f2fs_has_stable_inodes,
.get_devices = f2fs_get_devices,
};
-#endif
+#endif /* CONFIG_FS_ENCRYPTION */
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
@@ -3382,12 +3812,13 @@ static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
/* it doesn't need to set crypto context for superblock update */
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio_index(folio));
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio->index);
if (!bio_add_folio(bio, folio, folio_size(folio), 0))
f2fs_bug_on(sbi, 1);
ret = submit_bio_wait(bio);
+ bio_put(bio);
folio_end_writeback(folio);
return ret;
@@ -3508,7 +3939,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return -EFSCORRUPTED;
}
crc = le32_to_cpu(raw_super->crc);
- if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
+ if (crc != f2fs_crc32(raw_super, crc_offset)) {
f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
return -EFSCORRUPTED;
}
@@ -3649,6 +4080,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
if (sanity_check_area_boundary(sbi, folio, index))
return -EFSCORRUPTED;
+ /*
+ * Check for legacy summary layout on 16KB+ block devices.
+ * Modern f2fs-tools packs multiple 4KB summary areas into one block,
+ * whereas legacy versions used one block per summary, leading
+ * to a much larger SSA.
+ */
+ if (SUMS_PER_BLOCK > 1 &&
+ !(__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_PACKED_SSA))) {
+ f2fs_info(sbi, "Error: Device formatted with a legacy version. "
+ "Please reformat with a tool supporting the packed ssa "
+ "feature for block sizes larger than 4kb.");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -3667,6 +4112,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
block_t user_block_count, valid_user_blocks;
block_t avail_node_count, valid_node_count;
unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
+ unsigned int sit_blk_cnt;
int i, j;
total = le32_to_cpu(raw_super->segment_count);
@@ -3778,6 +4224,13 @@ skip_cross:
return 1;
}
+ sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK);
+ if (sit_bitmap_size * 8 < sit_blk_cnt) {
+ f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u",
+ sit_bitmap_size, sit_blk_cnt);
+ return 1;
+ }
+
cp_pack_start_sum = __start_sum_addr(sbi);
cp_payload = __cp_payload(sbi);
if (cp_pack_start_sum < cp_payload + 1 ||
@@ -3832,6 +4285,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->total_node_count = SEGS_TO_BLKS(sbi,
((le32_to_cpu(raw_super->segment_count_nat) / 2) *
NAT_ENTRY_PER_BLOCK));
+ sbi->allocate_section_hint = le32_to_cpu(raw_super->section_count);
+ sbi->allocate_section_policy = ALLOCATE_FORWARD_NOHINT;
F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
@@ -3855,6 +4310,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
+ sbi->interval_time[ENABLE_TIME] = DEF_ENABLE_INTERVAL;
sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
DEF_UMOUNT_DISCARD_TIMEOUT;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
@@ -4056,7 +4512,7 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
/* we should update superblock crc here */
if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
- crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
+ crc = f2fs_crc32(F2FS_RAW_SUPER(sbi),
offsetof(struct f2fs_super_block, crc));
F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
}
@@ -4131,50 +4587,9 @@ void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
spin_unlock_irqrestore(&sbi->error_lock, flags);
}
-static bool f2fs_update_errors(struct f2fs_sb_info *sbi)
-{
- unsigned long flags;
- bool need_update = false;
-
- spin_lock_irqsave(&sbi->error_lock, flags);
- if (sbi->error_dirty) {
- memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
- MAX_F2FS_ERRORS);
- sbi->error_dirty = false;
- need_update = true;
- }
- spin_unlock_irqrestore(&sbi->error_lock, flags);
-
- return need_update;
-}
-
-static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error)
-{
- int err;
-
- f2fs_down_write(&sbi->sb_lock);
-
- if (!f2fs_update_errors(sbi))
- goto out_unlock;
-
- err = f2fs_commit_super(sbi, false);
- if (err)
- f2fs_err_ratelimited(sbi,
- "f2fs_commit_super fails to record errors:%u, err:%d",
- error, err);
-out_unlock:
- f2fs_up_write(&sbi->sb_lock);
-}
-
void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
{
f2fs_save_errors(sbi, error);
- f2fs_record_errors(sbi, error);
-}
-
-void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error)
-{
- f2fs_save_errors(sbi, error);
if (!sbi->error_dirty)
return;
@@ -4222,6 +4637,8 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason)
if (shutdown)
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ else
+ dump_stack();
/*
* Continue filesystem operators if errors=continue. Should not set
@@ -4252,14 +4669,35 @@ static void f2fs_record_error_work(struct work_struct *work)
f2fs_record_stop_reason(sbi);
}
-static inline unsigned int get_first_zoned_segno(struct f2fs_sb_info *sbi)
+static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi)
{
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int zoneno, total_zones;
int devi;
- for (devi = 0; devi < sbi->s_ndevs; devi++)
- if (bdev_is_zoned(FDEV(devi).bdev))
- return GET_SEGNO(sbi, FDEV(devi).start_blk);
- return 0;
+ if (!f2fs_sb_has_blkzoned(sbi))
+ return NULL_SEGNO;
+
+ for (devi = 0; devi < sbi->s_ndevs; devi++) {
+ if (!bdev_is_zoned(FDEV(devi).bdev))
+ continue;
+
+ total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments);
+
+ for (zoneno = 0; zoneno < total_zones; zoneno++) {
+ unsigned int segs, blks;
+
+ if (!f2fs_zone_is_seq(sbi, devi, zoneno))
+ continue;
+
+ segs = GET_SEG_FROM_SEC(sbi,
+ zoneno * sbi->secs_per_zone);
+ blks = SEGS_TO_BLKS(sbi, segs);
+ return GET_SEGNO(sbi, FDEV(devi).start_blk + blks);
+ }
+ }
+#endif
+ return NULL_SEGNO;
}
static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
@@ -4290,12 +4728,22 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
sbi->aligned_blksize = true;
+ sbi->bggc_io_aware = AWARE_ALL_IO;
#ifdef CONFIG_BLK_DEV_ZONED
sbi->max_open_zones = UINT_MAX;
sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
+ sbi->bggc_io_aware = AWARE_READ_IO;
#endif
for (i = 0; i < max_devices; i++) {
+ if (max_devices == 1) {
+ FDEV(i).total_segments =
+ le32_to_cpu(raw_super->segment_count_main);
+ FDEV(i).start_blk = 0;
+ FDEV(i).end_blk = FDEV(i).total_segments *
+ BLKS_PER_SEG(sbi);
+ }
+
if (i == 0)
FDEV(0).bdev_file = sbi->sb->s_bdev_file;
else if (!RDEV(i).path[0])
@@ -4312,6 +4760,8 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
SEGS_TO_BLKS(sbi,
FDEV(i).total_segments) - 1 +
le32_to_cpu(raw_super->segment0_blkaddr);
+ sbi->allocate_section_hint = FDEV(i).total_segments /
+ SEGS_PER_SEC(sbi);
} else {
FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
FDEV(i).end_blk = FDEV(i).start_blk +
@@ -4420,14 +4870,14 @@ static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
sbi->readdir_ra = true;
}
-static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+static int f2fs_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi;
struct f2fs_super_block *raw_super;
struct inode *root;
int err;
bool skip_recovery = false, need_fsck = false;
- char *options = NULL;
int recovery, i, valid_super_block;
struct curseg_info *seg_i;
int retry_cnt = 1;
@@ -4456,6 +4906,7 @@ try_onemore:
init_f2fs_rwsem(&sbi->node_change);
spin_lock_init(&sbi->stat_lock);
init_f2fs_rwsem(&sbi->cp_rwsem);
+ init_f2fs_rwsem(&sbi->cp_enable_rwsem);
init_f2fs_rwsem(&sbi->quota_sem);
init_waitqueue_head(&sbi->cp_wait);
spin_lock_init(&sbi->error_lock);
@@ -4466,15 +4917,6 @@ try_onemore:
}
mutex_init(&sbi->flush_lock);
- /* Load the checksum driver */
- sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(sbi->s_chksum_driver)) {
- f2fs_err(sbi, "Cannot load crc32 driver.");
- err = PTR_ERR(sbi->s_chksum_driver);
- sbi->s_chksum_driver = NULL;
- goto free_sbi;
- }
-
/* set a block size */
if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
f2fs_err(sbi, "unable to set blocksize");
@@ -4495,18 +4937,18 @@ try_onemore:
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sbi))
- sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
- sizeof(raw_super->uuid));
+ sbi->s_chksum_seed = f2fs_chksum(~0, raw_super->uuid,
+ sizeof(raw_super->uuid));
default_options(sbi, false);
- /* parse mount options */
- options = kstrdup((const char *)data, GFP_KERNEL);
- if (data && !options) {
- err = -ENOMEM;
+
+ err = f2fs_check_opt_consistency(fc, sb);
+ if (err)
goto free_sb_buf;
- }
- err = parse_options(sb, options, false);
+ f2fs_apply_options(fc, sb);
+
+ err = f2fs_sanity_check_options(sbi, false);
if (err)
goto free_options;
@@ -4544,6 +4986,14 @@ try_onemore:
sb->s_time_gran = 1;
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
+ if (test_opt(sbi, INLINECRYPT))
+ sb->s_flags |= SB_INLINECRYPT;
+
+ if (test_opt(sbi, LAZYTIME))
+ sb->s_flags |= SB_LAZYTIME;
+ else
+ sb->s_flags &= ~SB_LAZYTIME;
+
super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid));
super_set_sysfs_name_bdev(sb);
sb->s_iflags |= SB_I_CGROUPWB;
@@ -4568,13 +5018,9 @@ try_onemore:
if (err)
goto free_iostat;
- /* init per sbi slab cache */
- err = f2fs_init_xattr_caches(sbi);
- if (err)
- goto free_percpu;
err = f2fs_init_page_array_cache(sbi);
if (err)
- goto free_xattr_cache;
+ goto free_percpu;
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
@@ -4663,7 +5109,11 @@ try_onemore:
sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
/* get segno of first zoned block device */
- sbi->first_zoned_segno = get_first_zoned_segno(sbi);
+ sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi);
+
+ sbi->reserved_pin_section = f2fs_sb_has_blkzoned(sbi) ?
+ ZONED_PIN_SEC_REQUIRED_COUNT :
+ GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi));
/* Read accumulated write IO statistics if exists */
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
@@ -4714,6 +5164,7 @@ try_onemore:
if (err)
goto free_compress_inode;
+ sbi->umount_lock_holder = current;
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount */
if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
@@ -4729,8 +5180,10 @@ try_onemore:
if (err)
goto free_meta;
- if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
+ if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) {
+ skip_recovery = true;
goto reset_checkpoint;
+ }
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
@@ -4772,18 +5225,22 @@ try_onemore:
}
} else {
err = f2fs_recover_fsync_data(sbi, true);
-
- if (!f2fs_readonly(sb) && err > 0) {
- err = -EINVAL;
- f2fs_err(sbi, "Need to recover fsync data");
- goto free_meta;
+ if (err > 0) {
+ if (!f2fs_readonly(sb)) {
+ f2fs_err(sbi, "Need to recover fsync data");
+ err = -EINVAL;
+ goto free_meta;
+ } else {
+ f2fs_info(sbi, "drop all fsynced data");
+ err = 0;
+ }
}
}
+reset_checkpoint:
#ifdef CONFIG_QUOTA
f2fs_recover_quota_end(sbi, quota_enabled);
#endif
-reset_checkpoint:
/*
* If the f2fs is not readonly and fsync data recovery succeeds,
* write pointer consistency of cursegs and other zones are already
@@ -4803,13 +5260,12 @@ reset_checkpoint:
if (err)
goto sync_free_meta;
- if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+ if (test_opt(sbi, DISABLE_CHECKPOINT))
err = f2fs_disable_checkpoint(sbi);
- if (err)
- goto sync_free_meta;
- } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
- f2fs_enable_checkpoint(sbi);
- }
+ else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))
+ err = f2fs_enable_checkpoint(sbi);
+ if (err)
+ goto sync_free_meta;
/*
* If filesystem is not mounted as read-only then
@@ -4822,7 +5278,6 @@ reset_checkpoint:
if (err)
goto sync_free_meta;
}
- kvfree(options);
/* recover broken superblock */
if (recovery) {
@@ -4840,6 +5295,8 @@ reset_checkpoint:
f2fs_update_time(sbi, CP_TIME);
f2fs_update_time(sbi, REQ_TIME);
clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
+
+ sbi->umount_lock_holder = NULL;
return 0;
sync_free_meta:
@@ -4895,15 +5352,13 @@ free_meta_inode:
sbi->meta_inode = NULL;
free_page_array_cache:
f2fs_destroy_page_array_cache(sbi);
-free_xattr_cache:
- f2fs_destroy_xattr_caches(sbi);
free_percpu:
destroy_percpu_info(sbi);
free_iostat:
f2fs_destroy_iostat(sbi);
free_bio_info:
for (i = 0; i < NR_PAGE_TYPE; i++)
- kvfree(sbi->write_io[i]);
+ kfree(sbi->write_io[i]);
#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
@@ -4914,13 +5369,11 @@ free_options:
for (i = 0; i < MAXQUOTAS; i++)
kfree(F2FS_OPTION(sbi).s_qf_names[i]);
#endif
- fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
- kvfree(options);
+ /* no need to free dummy_enc_policy, we just keep it in ctx when failed */
+ swap(F2FS_CTX_INFO(ctx).dummy_enc_policy, F2FS_OPTION(sbi).dummy_enc_policy);
free_sb_buf:
kfree(raw_super);
free_sbi:
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi);
sb->s_fs_info = NULL;
@@ -4933,17 +5386,46 @@ free_sbi:
return err;
}
-static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int f2fs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, f2fs_fill_super);
+}
+
+static int f2fs_reconfigure(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
+ struct super_block *sb = fc->root->d_sb;
+
+ return __f2fs_remount(fc, sb);
}
+static void f2fs_fc_free(struct fs_context *fc)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+
+ if (!ctx)
+ return;
+
+#ifdef CONFIG_QUOTA
+ f2fs_unnote_qf_name_all(fc);
+#endif
+ fscrypt_free_dummy_policy(&F2FS_CTX_INFO(ctx).dummy_enc_policy);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations f2fs_context_ops = {
+ .parse_param = f2fs_parse_param,
+ .get_tree = f2fs_get_tree,
+ .reconfigure = f2fs_reconfigure,
+ .free = f2fs_fc_free,
+};
+
static void kill_f2fs_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (sb->s_root) {
+ sbi->umount_lock_holder = current;
+
set_sbi_flag(sbi, SBI_IS_CLOSE);
f2fs_stop_gc_thread(sbi);
f2fs_stop_discard_thread(sbi);
@@ -4978,10 +5460,24 @@ static void kill_f2fs_super(struct super_block *sb)
}
}
+static int f2fs_init_fs_context(struct fs_context *fc)
+{
+ struct f2fs_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct f2fs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ fc->fs_private = ctx;
+ fc->ops = &f2fs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type f2fs_fs_type = {
.owner = THIS_MODULE,
.name = "f2fs",
- .mount = f2fs_mount,
+ .init_fs_context = f2fs_init_fs_context,
.kill_sb = kill_f2fs_super,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
@@ -5058,10 +5554,15 @@ static int __init init_f2fs_fs(void)
err = f2fs_create_casefold_cache();
if (err)
goto free_compress_cache;
- err = register_filesystem(&f2fs_fs_type);
+ err = f2fs_init_xattr_cache();
if (err)
goto free_casefold_cache;
+ err = register_filesystem(&f2fs_fs_type);
+ if (err)
+ goto free_xattr_cache;
return 0;
+free_xattr_cache:
+ f2fs_destroy_xattr_cache();
free_casefold_cache:
f2fs_destroy_casefold_cache();
free_compress_cache:
@@ -5102,6 +5603,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
unregister_filesystem(&f2fs_fs_type);
+ f2fs_destroy_xattr_cache();
f2fs_destroy_casefold_cache();
f2fs_destroy_compress_cache();
f2fs_destroy_compress_mempool();
@@ -5127,5 +5629,3 @@ module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc32");
-
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 6b99dc49f776..c42f4f979d13 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -61,6 +61,12 @@ struct f2fs_attr {
int id;
};
+struct f2fs_base_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct f2fs_base_attr *a, char *buf);
+ ssize_t (*store)(struct f2fs_base_attr *a, const char *buf, size_t len);
+};
+
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf);
@@ -229,6 +235,9 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_compression(sbi))
len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "compression");
+ if (f2fs_sb_has_packed_ssa(sbi))
+ len += sysfs_emit_at(buf, len, "%s%s",
+ len ? ", " : "", "packed_ssa");
len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "pin_file");
len += sysfs_emit_at(buf, len, "\n");
@@ -268,6 +277,29 @@ static ssize_t encoding_show(struct f2fs_attr *a,
return sysfs_emit(buf, "(none)\n");
}
+static ssize_t encoding_flags_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%x\n",
+ le16_to_cpu(F2FS_RAW_SUPER(sbi)->s_encoding_flags));
+}
+
+static ssize_t effective_lookup_mode_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ switch (F2FS_OPTION(sbi).lookup_mode) {
+ case LOOKUP_PERF:
+ return sysfs_emit(buf, "perf\n");
+ case LOOKUP_COMPAT:
+ return sysfs_emit(buf, "compat\n");
+ case LOOKUP_AUTO:
+ if (sb_no_casefold_compat_fallback(sbi->sb))
+ return sysfs_emit(buf, "auto:perf\n");
+ return sysfs_emit(buf, "auto:compat\n");
+ }
+ return 0;
+}
+
static ssize_t mounted_time_sec_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -488,12 +520,12 @@ out:
return ret;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (a->struct_type == FAULT_INFO_TYPE) {
- if (f2fs_build_fault_attr(sbi, 0, t))
+ if (f2fs_build_fault_attr(sbi, 0, t, FAULT_TYPE))
return -EINVAL;
return count;
}
if (a->struct_type == FAULT_INFO_RATE) {
- if (f2fs_build_fault_attr(sbi, t, 0))
+ if (f2fs_build_fault_attr(sbi, t, 0, FAULT_RATE))
return -EINVAL;
return count;
}
@@ -615,6 +647,27 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "gc_no_zoned_gc_percent")) {
+ if (t > 100)
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_boost_zoned_gc_percent")) {
+ if (t > 100)
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_valid_thresh_ratio")) {
+ if (t > 100)
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
#ifdef CONFIG_F2FS_IOSTAT
if (!strcmp(a->attr.name, "iostat_enable")) {
sbi->iostat_enable = !!t;
@@ -811,6 +864,48 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "reserved_pin_section")) {
+ if (t > GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_boost_gc_multiple")) {
+ if (t < 1 || t > SEGS_PER_SEC(sbi))
+ return -EINVAL;
+ sbi->gc_thread->boost_gc_multiple = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_boost_gc_greedy")) {
+ if (t > GC_GREEDY)
+ return -EINVAL;
+ sbi->gc_thread->boost_gc_greedy = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "bggc_io_aware")) {
+ if (t < AWARE_ALL_IO || t > AWARE_NONE)
+ return -EINVAL;
+ sbi->bggc_io_aware = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "allocate_section_hint")) {
+ if (t < 0 || t > MAIN_SECS(sbi))
+ return -EINVAL;
+ sbi->allocate_section_hint = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "allocate_section_policy")) {
+ if (t < ALLOCATE_FORWARD_NOHINT || t > ALLOCATE_FORWARD_FROM_HINT)
+ return -EINVAL;
+ sbi->allocate_section_policy = t;
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -862,6 +957,25 @@ static void f2fs_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister);
}
+static ssize_t f2fs_base_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct f2fs_base_attr *a = container_of(attr,
+ struct f2fs_base_attr, attr);
+
+ return a->show ? a->show(a, buf) : 0;
+}
+
+static ssize_t f2fs_base_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct f2fs_base_attr *a = container_of(attr,
+ struct f2fs_base_attr, attr);
+
+ return a->store ? a->store(a, buf, len) : 0;
+}
+
/*
* Note that there are three feature list entries:
* 1) /sys/fs/f2fs/features
@@ -880,18 +994,50 @@ static void f2fs_sb_release(struct kobject *kobj)
* please add new on-disk feature in this list only.
* - ref. F2FS_SB_FEATURE_RO_ATTR()
*/
-static ssize_t f2fs_feature_show(struct f2fs_attr *a,
- struct f2fs_sb_info *sbi, char *buf)
+static ssize_t f2fs_feature_show(struct f2fs_base_attr *a, char *buf)
{
return sysfs_emit(buf, "supported\n");
}
#define F2FS_FEATURE_RO_ATTR(_name) \
-static struct f2fs_attr f2fs_attr_##_name = { \
+static struct f2fs_base_attr f2fs_base_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = 0444 }, \
.show = f2fs_feature_show, \
}
+static ssize_t f2fs_tune_show(struct f2fs_base_attr *a, char *buf)
+{
+ unsigned int res = 0;
+
+ if (!strcmp(a->attr.name, "reclaim_caches_kb"))
+ res = f2fs_donate_files();
+
+ return sysfs_emit(buf, "%u\n", res);
+}
+
+static ssize_t f2fs_tune_store(struct f2fs_base_attr *a,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int ret;
+
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+
+ if (!strcmp(a->attr.name, "reclaim_caches_kb"))
+ f2fs_reclaim_caches(t);
+
+ return count;
+}
+
+#define F2FS_TUNE_RW_ATTR(_name) \
+static struct f2fs_base_attr f2fs_base_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0644 }, \
+ .show = f2fs_tune_show, \
+ .store = f2fs_tune_store, \
+}
+
static ssize_t f2fs_sb_feature_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -986,6 +1132,8 @@ GC_THREAD_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
GC_THREAD_RW_ATTR(gc_no_zoned_gc_percent, no_zoned_gc_percent);
GC_THREAD_RW_ATTR(gc_boost_zoned_gc_percent, boost_zoned_gc_percent);
GC_THREAD_RW_ATTR(gc_valid_thresh_ratio, valid_thresh_ratio);
+GC_THREAD_RW_ATTR(gc_boost_gc_multiple, boost_gc_multiple);
+GC_THREAD_RW_ATTR(gc_boost_gc_greedy, boost_gc_greedy);
/* SM_INFO ATTR */
SM_INFO_RW_ATTR(reclaim_segments, rec_prefree_segments);
@@ -1030,6 +1178,8 @@ F2FS_SBI_GENERAL_RW_ATTR(max_victim_search);
F2FS_SBI_GENERAL_RW_ATTR(migration_granularity);
F2FS_SBI_GENERAL_RW_ATTR(migration_window_granularity);
F2FS_SBI_GENERAL_RW_ATTR(dir_level);
+F2FS_SBI_GENERAL_RW_ATTR(allocate_section_hint);
+F2FS_SBI_GENERAL_RW_ATTR(allocate_section_policy);
#ifdef CONFIG_F2FS_IOSTAT
F2FS_SBI_GENERAL_RW_ATTR(iostat_enable);
F2FS_SBI_GENERAL_RW_ATTR(iostat_period_ms);
@@ -1063,8 +1213,12 @@ F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
F2FS_SBI_GENERAL_RW_ATTR(max_read_extent_count);
#ifdef CONFIG_BLK_DEV_ZONED
F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+F2FS_SBI_GENERAL_RO_ATTR(max_open_zones);
F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
#endif
+F2FS_SBI_GENERAL_RW_ATTR(carve_out);
+F2FS_SBI_GENERAL_RW_ATTR(reserved_pin_section);
+F2FS_SBI_GENERAL_RW_ATTR(bggc_io_aware);
/* STAT_INFO ATTR */
#ifdef CONFIG_F2FS_STAT_FS
@@ -1100,6 +1254,8 @@ F2FS_GENERAL_RO_ATTR(features);
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
F2FS_GENERAL_RO_ATTR(unusable);
F2FS_GENERAL_RO_ATTR(encoding);
+F2FS_GENERAL_RO_ATTR(encoding_flags);
+F2FS_GENERAL_RO_ATTR(effective_lookup_mode);
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
F2FS_GENERAL_RO_ATTR(main_blkaddr);
F2FS_GENERAL_RO_ATTR(pending_discard);
@@ -1141,6 +1297,10 @@ F2FS_FEATURE_RO_ATTR(readonly);
F2FS_FEATURE_RO_ATTR(compression);
#endif
F2FS_FEATURE_RO_ATTR(pin_file);
+#ifdef CONFIG_UNICODE
+F2FS_FEATURE_RO_ATTR(linear_lookup);
+#endif
+F2FS_FEATURE_RO_ATTR(packed_ssa);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -1151,6 +1311,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_no_zoned_gc_percent),
ATTR_LIST(gc_boost_zoned_gc_percent),
ATTR_LIST(gc_valid_thresh_ratio),
+ ATTR_LIST(gc_boost_gc_multiple),
+ ATTR_LIST(gc_boost_gc_greedy),
ATTR_LIST(gc_idle),
ATTR_LIST(gc_urgent),
ATTR_LIST(reclaim_segments),
@@ -1187,6 +1349,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(discard_idle_interval),
ATTR_LIST(gc_idle_interval),
ATTR_LIST(umount_discard_timeout),
+ ATTR_LIST(bggc_io_aware),
#ifdef CONFIG_F2FS_IOSTAT
ATTR_LIST(iostat_enable),
ATTR_LIST(iostat_period_ms),
@@ -1212,6 +1375,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(reserved_blocks),
ATTR_LIST(current_reserved_blocks),
ATTR_LIST(encoding),
+ ATTR_LIST(encoding_flags),
+ ATTR_LIST(effective_lookup_mode),
ATTR_LIST(mounted_time_sec),
#ifdef CONFIG_F2FS_STAT_FS
ATTR_LIST(cp_foreground_calls),
@@ -1224,6 +1389,7 @@ static struct attribute *f2fs_attrs[] = {
#endif
#ifdef CONFIG_BLK_DEV_ZONED
ATTR_LIST(unusable_blocks_per_sec),
+ ATTR_LIST(max_open_zones),
ATTR_LIST(blkzone_alloc_policy),
#endif
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -1252,41 +1418,50 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(warm_data_age_threshold),
ATTR_LIST(last_age_weight),
ATTR_LIST(max_read_extent_count),
+ ATTR_LIST(carve_out),
+ ATTR_LIST(reserved_pin_section),
+ ATTR_LIST(allocate_section_hint),
+ ATTR_LIST(allocate_section_policy),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
+#define BASE_ATTR_LIST(name) (&f2fs_base_attr_##name.attr)
static struct attribute *f2fs_feat_attrs[] = {
#ifdef CONFIG_FS_ENCRYPTION
- ATTR_LIST(encryption),
- ATTR_LIST(test_dummy_encryption_v2),
+ BASE_ATTR_LIST(encryption),
+ BASE_ATTR_LIST(test_dummy_encryption_v2),
#if IS_ENABLED(CONFIG_UNICODE)
- ATTR_LIST(encrypted_casefold),
+ BASE_ATTR_LIST(encrypted_casefold),
#endif
#endif /* CONFIG_FS_ENCRYPTION */
#ifdef CONFIG_BLK_DEV_ZONED
- ATTR_LIST(block_zoned),
+ BASE_ATTR_LIST(block_zoned),
#endif
- ATTR_LIST(atomic_write),
- ATTR_LIST(extra_attr),
- ATTR_LIST(project_quota),
- ATTR_LIST(inode_checksum),
- ATTR_LIST(flexible_inline_xattr),
- ATTR_LIST(quota_ino),
- ATTR_LIST(inode_crtime),
- ATTR_LIST(lost_found),
+ BASE_ATTR_LIST(atomic_write),
+ BASE_ATTR_LIST(extra_attr),
+ BASE_ATTR_LIST(project_quota),
+ BASE_ATTR_LIST(inode_checksum),
+ BASE_ATTR_LIST(flexible_inline_xattr),
+ BASE_ATTR_LIST(quota_ino),
+ BASE_ATTR_LIST(inode_crtime),
+ BASE_ATTR_LIST(lost_found),
#ifdef CONFIG_FS_VERITY
- ATTR_LIST(verity),
+ BASE_ATTR_LIST(verity),
#endif
- ATTR_LIST(sb_checksum),
+ BASE_ATTR_LIST(sb_checksum),
#if IS_ENABLED(CONFIG_UNICODE)
- ATTR_LIST(casefold),
+ BASE_ATTR_LIST(casefold),
#endif
- ATTR_LIST(readonly),
+ BASE_ATTR_LIST(readonly),
#ifdef CONFIG_F2FS_FS_COMPRESSION
- ATTR_LIST(compression),
+ BASE_ATTR_LIST(compression),
+#endif
+ BASE_ATTR_LIST(pin_file),
+#ifdef CONFIG_UNICODE
+ BASE_ATTR_LIST(linear_lookup),
#endif
- ATTR_LIST(pin_file),
+ BASE_ATTR_LIST(packed_ssa),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_feat);
@@ -1322,6 +1497,7 @@ F2FS_SB_FEATURE_RO_ATTR(casefold, CASEFOLD);
F2FS_SB_FEATURE_RO_ATTR(compression, COMPRESSION);
F2FS_SB_FEATURE_RO_ATTR(readonly, RO);
F2FS_SB_FEATURE_RO_ATTR(device_alias, DEVICE_ALIAS);
+F2FS_SB_FEATURE_RO_ATTR(packed_ssa, PACKED_SSA);
static struct attribute *f2fs_sb_feat_attrs[] = {
ATTR_LIST(sb_encryption),
@@ -1339,10 +1515,19 @@ static struct attribute *f2fs_sb_feat_attrs[] = {
ATTR_LIST(sb_compression),
ATTR_LIST(sb_readonly),
ATTR_LIST(sb_device_alias),
+ ATTR_LIST(sb_packed_ssa),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_sb_feat);
+F2FS_TUNE_RW_ATTR(reclaim_caches_kb);
+
+static struct attribute *f2fs_tune_attrs[] = {
+ BASE_ATTR_LIST(reclaim_caches_kb),
+ NULL,
+};
+ATTRIBUTE_GROUPS(f2fs_tune);
+
static const struct sysfs_ops f2fs_attr_ops = {
.show = f2fs_attr_show,
.store = f2fs_attr_store,
@@ -1362,15 +1547,34 @@ static struct kset f2fs_kset = {
.kobj = {.ktype = &f2fs_ktype},
};
+static const struct sysfs_ops f2fs_feat_attr_ops = {
+ .show = f2fs_base_attr_show,
+ .store = f2fs_base_attr_store,
+};
+
static const struct kobj_type f2fs_feat_ktype = {
.default_groups = f2fs_feat_groups,
- .sysfs_ops = &f2fs_attr_ops,
+ .sysfs_ops = &f2fs_feat_attr_ops,
};
static struct kobject f2fs_feat = {
.kset = &f2fs_kset,
};
+static const struct sysfs_ops f2fs_tune_attr_ops = {
+ .show = f2fs_base_attr_show,
+ .store = f2fs_base_attr_store,
+};
+
+static const struct kobj_type f2fs_tune_ktype = {
+ .default_groups = f2fs_tune_groups,
+ .sysfs_ops = &f2fs_tune_attr_ops,
+};
+
+static struct kobject f2fs_tune = {
+ .kset = &f2fs_kset,
+};
+
static ssize_t f2fs_stat_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -1472,7 +1676,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
le32_to_cpu(sbi->raw_super->segment_count_main);
int i, j;
- seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
+ seq_puts(seq, "format: segment_type|valid_blocks|bitmaps|mtime\n"
"segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
for (i = 0; i < total_segs; i++) {
@@ -1482,6 +1686,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks);
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
seq_printf(seq, " %.2x", se->cur_valid_map[j]);
+ seq_printf(seq, "| %llx", se->mtime);
seq_putc(seq, '\n');
}
return 0;
@@ -1572,12 +1777,15 @@ static int __maybe_unused disk_map_seq_show(struct seq_file *seq,
seq_printf(seq, " Main : 0x%010x (%10d)\n",
SM_I(sbi)->main_blkaddr,
le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main));
- seq_printf(seq, " # of Sections : %12d\n",
- le32_to_cpu(F2FS_RAW_SUPER(sbi)->section_count));
+ seq_printf(seq, " Block size : %12lu KB\n", F2FS_BLKSIZE >> 10);
+ seq_printf(seq, " Segment size : %12d MB\n",
+ (BLKS_PER_SEG(sbi) << (F2FS_BLKSIZE_BITS - 10)) >> 10);
seq_printf(seq, " Segs/Sections : %12d\n",
SEGS_PER_SEC(sbi));
seq_printf(seq, " Section size : %12d MB\n",
- SEGS_PER_SEC(sbi) << 1);
+ (BLKS_PER_SEC(sbi) << (F2FS_BLKSIZE_BITS - 10)) >> 10);
+ seq_printf(seq, " # of Sections : %12d\n",
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->section_count));
if (!f2fs_is_multi_device(sbi))
return 0;
@@ -1591,6 +1799,87 @@ static int __maybe_unused disk_map_seq_show(struct seq_file *seq,
return 0;
}
+static int __maybe_unused donation_list_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+ struct dentry *dentry;
+ char *buf, *path;
+ int i;
+
+ buf = f2fs_getname(sbi);
+ if (!buf)
+ return 0;
+
+ seq_printf(seq, "Donation List\n");
+ seq_printf(seq, " # of files : %u\n", sbi->donate_files);
+ seq_printf(seq, " %-50s %10s %20s %20s %22s\n",
+ "File path", "Status", "Donation offset (kb)",
+ "Donation size (kb)", "File cached size (kb)");
+ seq_printf(seq, "---\n");
+
+ for (i = 0; i < sbi->donate_files; i++) {
+ spin_lock(&sbi->inode_lock[DONATE_INODE]);
+ if (list_empty(&sbi->inode_list[DONATE_INODE])) {
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+ break;
+ }
+ fi = list_first_entry(&sbi->inode_list[DONATE_INODE],
+ struct f2fs_inode_info, gdonate_list);
+ list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+
+ if (!inode)
+ continue;
+
+ inode_lock_shared(inode);
+
+ dentry = d_find_alias(inode);
+ if (!dentry) {
+ path = NULL;
+ } else {
+ path = dentry_path_raw(dentry, buf, PATH_MAX);
+ if (IS_ERR(path))
+ goto next;
+ }
+ seq_printf(seq, " %-50s %10s %20llu %20llu %22llu\n",
+ path ? path : "<unlinked>",
+ is_inode_flag_set(inode, FI_DONATE_FINISHED) ?
+ "Evicted" : "Donated",
+ (loff_t)fi->donate_start << (PAGE_SHIFT - 10),
+ (loff_t)(fi->donate_end + 1) << (PAGE_SHIFT - 10),
+ (loff_t)inode->i_mapping->nrpages << (PAGE_SHIFT - 10));
+next:
+ dput(dentry);
+ inode_unlock_shared(inode);
+ iput(inode);
+ }
+ f2fs_putname(buf);
+ return 0;
+}
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+static int __maybe_unused inject_stats_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
+ int i;
+
+ seq_puts(seq, "fault_type injected_count\n");
+
+ for (i = 0; i < FAULT_MAX; i++)
+ seq_printf(seq, "%-24s%-10u\n", f2fs_fault_name[i],
+ ffi->inject_count[i]);
+ return 0;
+}
+#endif
+
int __init f2fs_init_sysfs(void)
{
int ret;
@@ -1606,6 +1895,11 @@ int __init f2fs_init_sysfs(void)
if (ret)
goto put_kobject;
+ ret = kobject_init_and_add(&f2fs_tune, &f2fs_tune_ktype,
+ NULL, "tuning");
+ if (ret)
+ goto put_kobject;
+
f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
if (!f2fs_proc_root) {
ret = -ENOMEM;
@@ -1613,7 +1907,9 @@ int __init f2fs_init_sysfs(void)
}
return 0;
+
put_kobject:
+ kobject_put(&f2fs_tune);
kobject_put(&f2fs_feat);
kset_unregister(&f2fs_kset);
return ret;
@@ -1621,6 +1917,7 @@ put_kobject:
void f2fs_exit_sysfs(void)
{
+ kobject_put(&f2fs_tune);
kobject_put(&f2fs_feat);
kset_unregister(&f2fs_kset);
remove_proc_entry("fs/f2fs", NULL);
@@ -1674,6 +1971,12 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
discard_plist_seq_show, sb);
proc_create_single_data("disk_map", 0444, sbi->s_proc,
disk_map_seq_show, sb);
+ proc_create_single_data("donation_list", 0444, sbi->s_proc,
+ donation_list_seq_show, sb);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ proc_create_single_data("inject_stats", 0444, sbi->s_proc,
+ inject_stats_seq_show, sb);
+#endif
return 0;
put_feature_list_kobj:
kobject_put(&sbi->s_feature_list_kobj);
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 2287f238ae09..05b935b55216 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -263,7 +263,7 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
- folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
+ folio = f2fs_filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
@@ -287,6 +287,8 @@ static int f2fs_write_merkle_tree_block(struct inode *inode, const void *buf,
}
const struct fsverity_operations f2fs_verityops = {
+ .inode_info_offs = (int)offsetof(struct f2fs_inode_info, i_verity_info) -
+ (int)offsetof(struct f2fs_inode_info, vfs_inode),
.begin_enable_verity = f2fs_begin_enable_verity,
.end_enable_verity = f2fs_end_enable_verity,
.get_verity_descriptor = f2fs_get_verity_descriptor,
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 3f3874943679..b4e5c406632f 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -23,11 +23,12 @@
#include "xattr.h"
#include "segment.h"
+static struct kmem_cache *inline_xattr_slab;
static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline)
{
- if (likely(size == sbi->inline_xattr_slab_size)) {
+ if (likely(size == DEFAULT_XATTR_SLAB_SIZE)) {
*is_inline = true;
- return f2fs_kmem_cache_alloc(sbi->inline_xattr_slab,
+ return f2fs_kmem_cache_alloc(inline_xattr_slab,
GFP_F2FS_ZERO, false, sbi);
}
*is_inline = false;
@@ -38,7 +39,7 @@ static void xattr_free(struct f2fs_sb_info *sbi, void *xattr_addr,
bool is_inline)
{
if (is_inline)
- kmem_cache_free(sbi->inline_xattr_slab, xattr_addr);
+ kmem_cache_free(inline_xattr_slab, xattr_addr);
else
kfree(xattr_addr);
}
@@ -136,7 +137,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
#ifdef CONFIG_F2FS_FS_SECURITY
static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
- void *page)
+ void *folio)
{
const struct xattr *xattr;
int err = 0;
@@ -144,7 +145,7 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
xattr->name, xattr->value,
- xattr->value_len, (struct page *)page, 0);
+ xattr->value_len, folio, 0);
if (err < 0)
break;
}
@@ -152,10 +153,10 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
}
int f2fs_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, struct page *ipage)
+ const struct qstr *qstr, struct folio *ifolio)
{
return security_inode_init_security(inode, dir, qstr,
- &f2fs_initxattrs, ipage);
+ f2fs_initxattrs, ifolio);
}
#endif
@@ -271,25 +272,25 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
return entry;
}
-static int read_inline_xattr(struct inode *inode, struct page *ipage,
+static int read_inline_xattr(struct inode *inode, struct folio *ifolio,
void *txattr_addr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int inline_size = inline_xattr_size(inode);
- struct page *page = NULL;
+ struct folio *folio = NULL;
void *inline_addr;
- if (ipage) {
- inline_addr = inline_xattr_addr(inode, ipage);
+ if (ifolio) {
+ inline_addr = inline_xattr_addr(inode, ifolio);
} else {
- page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- inline_addr = inline_xattr_addr(inode, page);
+ inline_addr = inline_xattr_addr(inode, folio);
}
memcpy(txattr_addr, inline_addr, inline_size);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return 0;
}
@@ -299,22 +300,22 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int inline_size = inline_xattr_size(inode);
- struct page *xpage;
+ struct folio *xfolio;
void *xattr_addr;
/* The inode already has an extended attribute block. */
- xpage = f2fs_get_node_page(sbi, xnid);
- if (IS_ERR(xpage))
- return PTR_ERR(xpage);
+ xfolio = f2fs_get_xnode_folio(sbi, xnid);
+ if (IS_ERR(xfolio))
+ return PTR_ERR(xfolio);
- xattr_addr = page_address(xpage);
+ xattr_addr = folio_address(xfolio);
memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE);
- f2fs_put_page(xpage, 1);
+ f2fs_folio_put(xfolio, true);
return 0;
}
-static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+static int lookup_all_xattrs(struct inode *inode, struct folio *ifolio,
unsigned int index, unsigned int len,
const char *name, struct f2fs_xattr_entry **xe,
void **base_addr, int *base_size,
@@ -338,7 +339,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
/* read from inline xattr */
if (inline_size) {
- err = read_inline_xattr(inode, ipage, txattr_addr);
+ err = read_inline_xattr(inode, ifolio, txattr_addr);
if (err)
goto out;
@@ -385,7 +386,7 @@ out:
return err;
}
-static int read_all_xattrs(struct inode *inode, struct page *ipage,
+static int read_all_xattrs(struct inode *inode, struct folio *ifolio,
void **base_addr)
{
struct f2fs_xattr_header *header;
@@ -402,7 +403,7 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
/* read from inline xattr */
if (inline_size) {
- err = read_inline_xattr(inode, ipage, txattr_addr);
+ err = read_inline_xattr(inode, ifolio, txattr_addr);
if (err)
goto fail;
}
@@ -429,14 +430,14 @@ fail:
}
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
- void *txattr_addr, struct page *ipage)
+ void *txattr_addr, struct folio *ifolio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
size_t inline_size = inline_xattr_size(inode);
- struct page *in_page = NULL;
+ struct folio *in_folio = NULL;
void *xattr_addr;
void *inline_addr = NULL;
- struct page *xpage;
+ struct folio *xfolio;
nid_t new_nid = 0;
int err = 0;
@@ -446,73 +447,73 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
/* write to inline xattr */
if (inline_size) {
- if (ipage) {
- inline_addr = inline_xattr_addr(inode, ipage);
+ if (ifolio) {
+ inline_addr = inline_xattr_addr(inode, ifolio);
} else {
- in_page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(in_page)) {
+ in_folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(in_folio)) {
f2fs_alloc_nid_failed(sbi, new_nid);
- return PTR_ERR(in_page);
+ return PTR_ERR(in_folio);
}
- inline_addr = inline_xattr_addr(inode, in_page);
+ inline_addr = inline_xattr_addr(inode, in_folio);
}
- f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
+ f2fs_folio_wait_writeback(ifolio ? ifolio : in_folio,
NODE, true, true);
/* no need to use xattr node block */
if (hsize <= inline_size) {
err = f2fs_truncate_xattr_node(inode);
f2fs_alloc_nid_failed(sbi, new_nid);
if (err) {
- f2fs_put_page(in_page, 1);
+ f2fs_folio_put(in_folio, true);
return err;
}
memcpy(inline_addr, txattr_addr, inline_size);
- set_page_dirty(ipage ? ipage : in_page);
+ folio_mark_dirty(ifolio ? ifolio : in_folio);
goto in_page_out;
}
}
/* write to xattr node block */
if (F2FS_I(inode)->i_xattr_nid) {
- xpage = f2fs_get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
- if (IS_ERR(xpage)) {
- err = PTR_ERR(xpage);
+ xfolio = f2fs_get_xnode_folio(sbi, F2FS_I(inode)->i_xattr_nid);
+ if (IS_ERR(xfolio)) {
+ err = PTR_ERR(xfolio);
f2fs_alloc_nid_failed(sbi, new_nid);
goto in_page_out;
}
f2fs_bug_on(sbi, new_nid);
- f2fs_wait_on_page_writeback(xpage, NODE, true, true);
+ f2fs_folio_wait_writeback(xfolio, NODE, true, true);
} else {
struct dnode_of_data dn;
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
- xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
- if (IS_ERR(xpage)) {
- err = PTR_ERR(xpage);
+ xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET);
+ if (IS_ERR(xfolio)) {
+ err = PTR_ERR(xfolio);
f2fs_alloc_nid_failed(sbi, new_nid);
goto in_page_out;
}
f2fs_alloc_nid_done(sbi, new_nid);
}
- xattr_addr = page_address(xpage);
+ xattr_addr = folio_address(xfolio);
if (inline_size)
memcpy(inline_addr, txattr_addr, inline_size);
memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE);
if (inline_size)
- set_page_dirty(ipage ? ipage : in_page);
- set_page_dirty(xpage);
+ folio_mark_dirty(ifolio ? ifolio : in_folio);
+ folio_mark_dirty(xfolio);
- f2fs_put_page(xpage, 1);
+ f2fs_folio_put(xfolio, true);
in_page_out:
- f2fs_put_page(in_page, 1);
+ f2fs_folio_put(in_folio, true);
return err;
}
int f2fs_getxattr(struct inode *inode, int index, const char *name,
- void *buffer, size_t buffer_size, struct page *ipage)
+ void *buffer, size_t buffer_size, struct folio *ifolio)
{
struct f2fs_xattr_entry *entry = NULL;
int error;
@@ -528,11 +529,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
- if (!ipage)
+ if (!ifolio)
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
- error = lookup_all_xattrs(inode, ipage, index, len, name,
+ error = lookup_all_xattrs(inode, ifolio, index, len, name,
&entry, &base_addr, &base_size, &is_inline);
- if (!ipage)
+ if (!ifolio)
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;
@@ -627,7 +628,7 @@ static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry,
static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
- struct page *ipage, int flags)
+ struct folio *ifolio, int flags)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_entry *here, *last;
@@ -651,7 +652,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (size > MAX_VALUE_LEN(inode))
return -E2BIG;
retry:
- error = read_all_xattrs(inode, ipage, &base_addr);
+ error = read_all_xattrs(inode, ifolio, &base_addr);
if (error)
return error;
@@ -766,7 +767,7 @@ retry:
*(u32 *)((u8 *)last + newsize) = 0;
}
- error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
+ error = write_all_xattrs(inode, new_hsize, base_addr, ifolio);
if (error)
goto exit;
@@ -800,7 +801,7 @@ exit:
int f2fs_setxattr(struct inode *inode, int index, const char *name,
const void *value, size_t size,
- struct page *ipage, int flags)
+ struct folio *ifolio, int flags)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err;
@@ -815,14 +816,14 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
return err;
/* this case is only from f2fs_init_inode_metadata */
- if (ipage)
+ if (ifolio)
return __f2fs_setxattr(inode, index, name, value,
- size, ipage, flags);
+ size, ifolio, flags);
f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
f2fs_down_write(&F2FS_I(inode)->i_xattr_sem);
- err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
+ err = __f2fs_setxattr(inode, index, name, value, size, NULL, flags);
f2fs_up_write(&F2FS_I(inode)->i_xattr_sem);
f2fs_unlock_op(sbi);
@@ -830,25 +831,14 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
return err;
}
-int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi)
+int __init f2fs_init_xattr_cache(void)
{
- dev_t dev = sbi->sb->s_bdev->bd_dev;
- char slab_name[32];
-
- sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev));
-
- sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size *
- sizeof(__le32) + XATTR_PADDING_SIZE;
-
- sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name,
- sbi->inline_xattr_slab_size);
- if (!sbi->inline_xattr_slab)
- return -ENOMEM;
-
- return 0;
+ inline_xattr_slab = f2fs_kmem_cache_create("f2fs_xattr_entry",
+ DEFAULT_XATTR_SLAB_SIZE);
+ return inline_xattr_slab ? 0 : -ENOMEM;
}
-void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi)
+void f2fs_destroy_xattr_cache(void)
{
- kmem_cache_destroy(sbi->inline_xattr_slab);
-}
+ kmem_cache_destroy(inline_xattr_slab);
+} \ No newline at end of file
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index a005ffdcf717..bce3d93e4755 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -89,6 +89,8 @@ struct f2fs_xattr_entry {
F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) - \
DEF_INLINE_RESERVED_SIZE - \
MIN_INLINE_DENTRY_SIZE / sizeof(__le32))
+#define DEFAULT_XATTR_SLAB_SIZE (DEFAULT_INLINE_XATTR_ADDRS * \
+ sizeof(__le32) + XATTR_PADDING_SIZE)
/*
* On-disk structure of f2fs_xattr
@@ -127,39 +129,39 @@ extern const struct xattr_handler f2fs_xattr_security_handler;
extern const struct xattr_handler * const f2fs_xattr_handlers[];
-extern int f2fs_setxattr(struct inode *, int, const char *,
- const void *, size_t, struct page *, int);
-extern int f2fs_getxattr(struct inode *, int, const char *, void *,
- size_t, struct page *);
-extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
-extern int f2fs_init_xattr_caches(struct f2fs_sb_info *);
-extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *);
+int f2fs_setxattr(struct inode *, int, const char *, const void *,
+ size_t, struct folio *, int);
+int f2fs_getxattr(struct inode *, int, const char *, void *,
+ size_t, struct folio *);
+ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
+int __init f2fs_init_xattr_cache(void);
+void f2fs_destroy_xattr_cache(void);
#else
#define f2fs_xattr_handlers NULL
#define f2fs_listxattr NULL
static inline int f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
- struct page *page, int flags)
+ struct folio *folio, int flags)
{
return -EOPNOTSUPP;
}
static inline int f2fs_getxattr(struct inode *inode, int index,
const char *name, void *buffer,
- size_t buffer_size, struct page *dpage)
+ size_t buffer_size, struct folio *dfolio)
{
return -EOPNOTSUPP;
}
-static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; }
-static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { }
+static inline int __init f2fs_init_xattr_cache(void) { return 0; }
+static inline void f2fs_destroy_xattr_cache(void) { }
#endif
#ifdef CONFIG_F2FS_FS_SECURITY
-extern int f2fs_init_security(struct inode *, struct inode *,
- const struct qstr *, struct page *);
+int f2fs_init_security(struct inode *, struct inode *,
+ const struct qstr *, struct folio *);
#else
static inline int f2fs_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, struct page *ipage)
+ const struct qstr *qstr, struct folio *ifolio)
{
return 0;
}
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index acbec5bdd521..92b091783966 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -1209,7 +1209,7 @@ EXPORT_SYMBOL_GPL(fat_alloc_new_dir);
static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
int *nr_cluster, struct msdos_dir_entry **de,
- struct buffer_head **bh, loff_t *i_pos)
+ struct buffer_head **bh)
{
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -1269,7 +1269,6 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
get_bh(bhs[n]);
*bh = bhs[n];
*de = (struct msdos_dir_entry *)((*bh)->b_data + offset);
- *i_pos = fat_make_i_pos(sb, *bh, *de);
/* Second stage: clear the rest of cluster, and write outs */
err = fat_zeroed_cluster(dir, start_blknr, ++n, bhs, MAX_BUF_PER_PAGE);
@@ -1298,7 +1297,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */
struct msdos_dir_entry *de;
int err, free_slots, i, nr_bhs;
- loff_t pos, i_pos;
+ loff_t pos;
sinfo->nr_slots = nr_slots;
@@ -1386,7 +1385,7 @@ found:
* add the cluster to dir.
*/
cluster = fat_add_new_entries(dir, slots, nr_slots, &nr_cluster,
- &de, &bh, &i_pos);
+ &de, &bh);
if (cluster < 0) {
err = cluster;
goto error_remove;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 1db348f8f887..a7061c2ad8e4 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -356,7 +356,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
if (!fat_valid_entry(sbi, entry)) {
fatent_brelse(fatent);
- fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
+ fat_fs_error_ratelimit(sb, "invalid access to FAT (entry 0x%08x)", entry);
return -EIO;
}
diff --git a/fs/fat/file.c b/fs/fat/file.c
index e887e9ab7472..4fc49a614fb8 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -204,7 +204,7 @@ const struct file_operations fat_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.release = fat_file_release,
.unlocked_ioctl = fat_generic_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 3852bb66358c..0b6009cd1844 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -22,6 +22,7 @@
#include <linux/unaligned.h>
#include <linux/random.h>
#include <linux/iversion.h>
+#include <linux/fs_struct.h>
#include "fat.h"
#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
@@ -219,13 +220,14 @@ static void fat_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int fat_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
+static int fat_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int err;
- err = cont_write_begin(file, mapping, pos, len,
+ err = cont_write_begin(iocb, mapping, pos, len,
foliop, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private);
if (err < 0)
@@ -233,13 +235,14 @@ static int fat_write_begin(struct file *file, struct address_space *mapping,
return err;
}
-static int fat_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+static int fat_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int err;
- err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
+ err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (err < len)
fat_write_failed(mapping, pos + len);
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
@@ -1593,8 +1596,12 @@ int fat_fill_super(struct super_block *sb, struct fs_context *fc,
setup(sb); /* flavour-specific stuff that needs options */
+ error = -EINVAL;
+ if (!sb_min_blocksize(sb, 512)) {
+ fat_msg(sb, KERN_ERR, "unable to set blocksize");
+ goto out_fail;
+ }
error = -EIO;
- sb_min_blocksize(sb, 512);
bh = sb_bread(sb, 0);
if (bh == NULL) {
fat_msg(sb, KERN_ERR, "unable to read boot sector");
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index c7a2d27120ba..950da09f0961 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -158,9 +158,9 @@ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
mark_inode_dirty(inode);
}
if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) {
- fat_fs_error(sb, "clusters badly computed (%d != %llu)",
- new_fclus,
- (llu)(inode->i_blocks >> (sbi->cluster_bits - 9)));
+ fat_fs_error_ratelimit(
+ sb, "clusters badly computed (%d != %llu)", new_fclus,
+ (llu)(inode->i_blocks >> (sbi->cluster_bits - 9)));
fat_cache_inval_inode(inode);
}
inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index f06f6ba643cc..0b920ee40a7f 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -339,8 +339,8 @@ out:
}
/***** Make a directory */
-static int msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
@@ -389,13 +389,13 @@ static int msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir,
mutex_unlock(&MSDOS_SB(sb)->s_lock);
fat_flush_inodes(sb, dir, inode);
- return 0;
+ return NULL;
out_free:
fat_free_clusters(dir, cluster);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
}
/***** Unlink a file */
@@ -646,7 +646,7 @@ static const struct inode_operations msdos_dir_inode_operations = {
static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations;
- sb->s_d_op = &msdos_dentry_operations;
+ set_default_d_op(sb, &msdos_dentry_operations);
sb->s_flags |= SB_NOATIME;
}
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 15bf32c21ac0..5dbc4cbb8fce 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -43,17 +43,13 @@ static inline void vfat_d_version_set(struct dentry *dentry,
* If it happened, the negative dentry isn't actually negative
* anymore. So, drop it.
*/
-static int vfat_revalidate_shortname(struct dentry *dentry)
+static bool vfat_revalidate_shortname(struct dentry *dentry, struct inode *dir)
{
- int ret = 1;
- spin_lock(&dentry->d_lock);
- if (!inode_eq_iversion(d_inode(dentry->d_parent), vfat_d_version(dentry)))
- ret = 0;
- spin_unlock(&dentry->d_lock);
- return ret;
+ return inode_eq_iversion(dir, vfat_d_version(dentry));
}
-static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
+static int vfat_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -61,10 +57,11 @@ static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
/* This is not negative dentry. Always valid. */
if (d_really_is_positive(dentry))
return 1;
- return vfat_revalidate_shortname(dentry);
+ return vfat_revalidate_shortname(dentry, dir);
}
-static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
+static int vfat_revalidate_ci(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -97,7 +94,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
- return vfat_revalidate_shortname(dentry);
+ return vfat_revalidate_shortname(dentry, dir);
}
/* returns the length of a struct qstr, ignoring trailing dots */
@@ -844,8 +841,8 @@ out:
return err;
}
-static int vfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *vfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -880,13 +877,13 @@ static int vfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return 0;
+ return NULL;
out_free:
fat_free_clusters(dir, cluster);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
}
static int vfat_get_dotdot_de(struct inode *inode, struct buffer_head **bh,
@@ -1190,9 +1187,9 @@ static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations;
if (MSDOS_SB(sb)->options.name_check != 's')
- sb->s_d_op = &vfat_ci_dentry_ops;
+ set_default_d_op(sb, &vfat_ci_dentry_ops);
else
- sb->s_d_op = &vfat_dentry_ops;
+ set_default_d_op(sb, &vfat_dentry_ops);
}
static int vfat_fill_super(struct super_block *sb, struct fs_context *fc)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 49884fa3c81d..f93dbca08435 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -355,8 +355,7 @@ static bool rw_hint_valid(u64 hint)
}
}
-static long fcntl_get_rw_hint(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long fcntl_get_rw_hint(struct file *file, unsigned long arg)
{
struct inode *inode = file_inode(file);
u64 __user *argp = (u64 __user *)arg;
@@ -367,8 +366,7 @@ static long fcntl_get_rw_hint(struct file *file, unsigned int cmd,
return 0;
}
-static long fcntl_set_rw_hint(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long fcntl_set_rw_hint(struct file *file, unsigned long arg)
{
struct inode *inode = file_inode(file);
u64 __user *argp = (u64 __user *)arg;
@@ -447,6 +445,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
void __user *argp = (void __user *)arg;
+ struct delegation deleg;
int argi = (int)arg;
struct flock flock;
long err = -EINVAL;
@@ -547,10 +546,22 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
err = memfd_fcntl(filp, cmd, argi);
break;
case F_GET_RW_HINT:
- err = fcntl_get_rw_hint(filp, cmd, arg);
+ err = fcntl_get_rw_hint(filp, arg);
break;
case F_SET_RW_HINT:
- err = fcntl_set_rw_hint(filp, cmd, arg);
+ err = fcntl_set_rw_hint(filp, arg);
+ break;
+ case F_GETDELEG:
+ if (copy_from_user(&deleg, argp, sizeof(deleg)))
+ return -EFAULT;
+ err = fcntl_getdeleg(filp, &deleg);
+ if (!err && copy_to_user(argp, &deleg, sizeof(deleg)))
+ return -EFAULT;
+ break;
+ case F_SETDELEG:
+ if (copy_from_user(&deleg, argp, sizeof(deleg)))
+ return -EFAULT;
+ err = fcntl_setdeleg(fd, filp, &deleg);
break;
default:
break;
@@ -1158,10 +1169,10 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ !=
HWEIGHT32(
(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
- __FMODE_EXEC | __FMODE_NONOTIFY));
+ __FMODE_EXEC));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0,
diff --git a/fs/fhandle.c b/fs/fhandle.c
index ec9145047dfc..3de1547ec9d4 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -11,6 +11,7 @@
#include <linux/personality.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
+#include <linux/nsfs.h>
#include "internal.h"
#include "mount.h"
@@ -88,7 +89,7 @@ static long do_sys_name_to_handle(const struct path *path,
if (fh_flags & EXPORT_FH_CONNECTABLE) {
handle->handle_type |= FILEID_IS_CONNECTABLE;
if (d_is_dir(path->dentry))
- fh_flags |= FILEID_IS_DIR;
+ handle->handle_type |= FILEID_IS_DIR;
}
retval = 0;
}
@@ -168,35 +169,34 @@ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
return err;
}
-static int get_path_from_fd(int fd, struct path *root)
+static int get_path_anchor(int fd, struct path *root)
{
- if (fd == AT_FDCWD) {
- struct fs_struct *fs = current->fs;
- spin_lock(&fs->lock);
- *root = fs->pwd;
- path_get(root);
- spin_unlock(&fs->lock);
- } else {
+ if (fd >= 0) {
CLASS(fd, f)(fd);
if (fd_empty(f))
return -EBADF;
*root = fd_file(f)->f_path;
path_get(root);
+ return 0;
}
- return 0;
-}
+ if (fd == AT_FDCWD) {
+ get_fs_pwd(current->fs, root);
+ return 0;
+ }
-enum handle_to_path_flags {
- HANDLE_CHECK_PERMS = (1 << 0),
- HANDLE_CHECK_SUBTREE = (1 << 1),
-};
+ if (fd == FD_PIDFS_ROOT) {
+ pidfs_get_root(root);
+ return 0;
+ }
-struct handle_to_path_ctx {
- struct path root;
- enum handle_to_path_flags flags;
- unsigned int fh_flags;
-};
+ if (fd == FD_NSFS_ROOT) {
+ nsfs_get_root(root);
+ return 0;
+ }
+
+ return -EBADF;
+}
static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
{
@@ -214,6 +214,14 @@ static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
return 1;
/*
+ * Verify that the decoded dentry itself has a valid id mapping.
+ * In case the decoded dentry is the mountfd root itself, this
+ * verifies that the mountfd inode itself has a valid id mapping.
+ */
+ if (!privileged_wrt_inode_uidgid(user_ns, idmap, d_inode(dentry)))
+ return 0;
+
+ /*
* It's racy as we're not taking rename_lock but we're able to ignore
* permissions and we just need an approximation whether we were able
* to follow a path to the file.
@@ -261,50 +269,55 @@ static int do_handle_to_path(struct file_handle *handle, struct path *path,
{
int handle_dwords;
struct vfsmount *mnt = ctx->root.mnt;
+ struct dentry *dentry;
/* change the handle size to multiple of sizeof(u32) */
handle_dwords = handle->handle_bytes >> 2;
- path->dentry = exportfs_decode_fh_raw(mnt,
- (struct fid *)handle->f_handle,
- handle_dwords, handle->handle_type,
- ctx->fh_flags,
- vfs_dentry_acceptable, ctx);
- if (IS_ERR_OR_NULL(path->dentry)) {
- if (path->dentry == ERR_PTR(-ENOMEM))
+ dentry = exportfs_decode_fh_raw(mnt, (struct fid *)handle->f_handle,
+ handle_dwords, handle->handle_type,
+ ctx->fh_flags, vfs_dentry_acceptable,
+ ctx);
+ if (IS_ERR_OR_NULL(dentry)) {
+ if (dentry == ERR_PTR(-ENOMEM))
return -ENOMEM;
return -ESTALE;
}
+ path->dentry = dentry;
path->mnt = mntget(mnt);
return 0;
}
-/*
- * Allow relaxed permissions of file handles if the caller has the
- * ability to mount the filesystem or create a bind-mount of the
- * provided @mountdirfd.
- *
- * In both cases the caller may be able to get an unobstructed way to
- * the encoded file handle. If the caller is only able to create a
- * bind-mount we need to verify that there are no locked mounts on top
- * of it that could prevent us from getting to the encoded file.
- *
- * In principle, locked mounts can prevent the caller from mounting the
- * filesystem but that only applies to procfs and sysfs neither of which
- * support decoding file handles.
- */
-static inline bool may_decode_fh(struct handle_to_path_ctx *ctx,
- unsigned int o_flags)
+static inline int may_decode_fh(struct handle_to_path_ctx *ctx,
+ unsigned int o_flags)
{
struct path *root = &ctx->root;
+ if (capable(CAP_DAC_READ_SEARCH))
+ return 0;
+
/*
- * Restrict to O_DIRECTORY to provide a deterministic API that avoids a
- * confusing api in the face of disconnected non-dir dentries.
+ * Allow relaxed permissions of file handles if the caller has
+ * the ability to mount the filesystem or create a bind-mount of
+ * the provided @mountdirfd.
+ *
+ * In both cases the caller may be able to get an unobstructed
+ * way to the encoded file handle. If the caller is only able to
+ * create a bind-mount we need to verify that there are no
+ * locked mounts on top of it that could prevent us from getting
+ * to the encoded file.
+ *
+ * In principle, locked mounts can prevent the caller from
+ * mounting the filesystem but that only applies to procfs and
+ * sysfs neither of which support decoding file handles.
+ *
+ * Restrict to O_DIRECTORY to provide a deterministic API that
+ * avoids a confusing api in the face of disconnected non-dir
+ * dentries.
*
* There's only one dentry for each directory inode (VFS rule)...
*/
if (!(o_flags & O_DIRECTORY))
- return false;
+ return -EPERM;
if (ns_capable(root->mnt->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
ctx->flags = HANDLE_CHECK_PERMS;
@@ -314,14 +327,14 @@ static inline bool may_decode_fh(struct handle_to_path_ctx *ctx,
!has_locked_children(real_mount(root->mnt), root->dentry))
ctx->flags = HANDLE_CHECK_PERMS | HANDLE_CHECK_SUBTREE;
else
- return false;
+ return -EPERM;
/* Are we able to override DAC permissions? */
if (!ns_capable(current_user_ns(), CAP_DAC_READ_SEARCH))
- return false;
+ return -EPERM;
ctx->fh_flags = EXPORT_FH_DIR_ONLY;
- return true;
+ return 0;
}
static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
@@ -329,32 +342,32 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
{
int retval = 0;
struct file_handle f_handle;
- struct file_handle *handle = NULL;
+ struct file_handle *handle __free(kfree) = NULL;
struct handle_to_path_ctx ctx = {};
+ const struct export_operations *eops;
- retval = get_path_from_fd(mountdirfd, &ctx.root);
- if (retval)
- goto out_err;
-
- if (!capable(CAP_DAC_READ_SEARCH) && !may_decode_fh(&ctx, o_flags)) {
- retval = -EPERM;
- goto out_path;
- }
+ if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
+ return -EFAULT;
- if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) {
- retval = -EFAULT;
- goto out_path;
- }
if ((f_handle.handle_bytes > MAX_HANDLE_SZ) ||
- (f_handle.handle_bytes == 0)) {
- retval = -EINVAL;
- goto out_path;
- }
+ (f_handle.handle_bytes == 0))
+ return -EINVAL;
+
if (f_handle.handle_type < 0 ||
- FILEID_USER_FLAGS(f_handle.handle_type) & ~FILEID_VALID_USER_FLAGS) {
- retval = -EINVAL;
+ FILEID_USER_FLAGS(f_handle.handle_type) & ~FILEID_VALID_USER_FLAGS)
+ return -EINVAL;
+
+ retval = get_path_anchor(mountdirfd, &ctx.root);
+ if (retval)
+ return retval;
+
+ eops = ctx.root.mnt->mnt_sb->s_export_op;
+ if (eops && eops->permission)
+ retval = eops->permission(&ctx, o_flags);
+ else
+ retval = may_decode_fh(&ctx, o_flags);
+ if (retval)
goto out_path;
- }
handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
GFP_KERNEL);
@@ -368,7 +381,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
&ufh->f_handle,
f_handle.handle_bytes)) {
retval = -EFAULT;
- goto out_handle;
+ goto out_path;
}
/*
@@ -386,41 +399,33 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
handle->handle_type &= ~FILEID_USER_FLAGS_MASK;
retval = do_handle_to_path(handle, path, &ctx);
-out_handle:
- kfree(handle);
out_path:
path_put(&ctx.root);
-out_err:
return retval;
}
+static struct file *file_open_handle(struct path *path, int open_flag)
+{
+ const struct export_operations *eops;
+
+ eops = path->mnt->mnt_sb->s_export_op;
+ if (eops->open)
+ return eops->open(path, open_flag);
+
+ return file_open_root(path, "", open_flag, 0);
+}
+
static long do_handle_open(int mountdirfd, struct file_handle __user *ufh,
int open_flag)
{
- long retval = 0;
- struct path path;
- struct file *file;
- int fd;
+ long retval;
+ struct path path __free(path_put) = {};
retval = handle_to_path(mountdirfd, ufh, &path, open_flag);
if (retval)
return retval;
- fd = get_unused_fd_flags(open_flag);
- if (fd < 0) {
- path_put(&path);
- return fd;
- }
- file = file_open_root(&path, "", open_flag, 0);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- retval = PTR_ERR(file);
- } else {
- retval = fd;
- fd_install(fd, file);
- }
- path_put(&path);
- return retval;
+ return FD_ADD(open_flag, file_open_handle(&path, open_flag));
}
/**
diff --git a/fs/file.c b/fs/file.c
index fb1011cf6b4a..0a4f3bdb2dec 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -22,9 +22,32 @@
#include <linux/close_range.h>
#include <linux/file_ref.h>
#include <net/sock.h>
+#include <linux/init_task.h>
#include "internal.h"
+static noinline bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
+{
+ /*
+ * If the reference count was already in the dead zone, then this
+ * put() operation is imbalanced. Warn, put the reference count back to
+ * DEAD and tell the caller to not deconstruct the object.
+ */
+ if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
+ atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
+ return false;
+ }
+
+ /*
+ * This is a put() operation on a saturated refcount. Restore the
+ * mean saturation value and tell the caller to not deconstruct the
+ * object.
+ */
+ if (cnt > FILE_REF_MAXREF)
+ atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
+ return false;
+}
+
/**
* __file_ref_put - Slowpath of file_ref_put()
* @ref: Pointer to the reference count
@@ -66,24 +89,7 @@ bool __file_ref_put(file_ref_t *ref, unsigned long cnt)
return true;
}
- /*
- * If the reference count was already in the dead zone, then this
- * put() operation is imbalanced. Warn, put the reference count back to
- * DEAD and tell the caller to not deconstruct the object.
- */
- if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
- atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
- return false;
- }
-
- /*
- * This is a put() operation on a saturated refcount. Restore the
- * mean saturation value and tell the caller to not deconstruct the
- * object.
- */
- if (cnt > FILE_REF_MAXREF)
- atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
- return false;
+ return __file_ref_put_badval(ref, cnt);
}
EXPORT_SYMBOL_GPL(__file_ref_put);
@@ -191,6 +197,21 @@ static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
return ERR_PTR(-EMFILE);
}
+ /*
+ * Check if the allocation size would exceed INT_MAX. kvmalloc_array()
+ * and kvmalloc() will warn if the allocation size is greater than
+ * INT_MAX, as filp_cachep objects are not __GFP_NOWARN.
+ *
+ * This can happen when sysctl_nr_open is set to a very high value and
+ * a process tries to use a file descriptor near that limit. For example,
+ * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what
+ * systemd typically sets it to - then trying to use a file descriptor
+ * close to that value will require allocating a file descriptor table
+ * that exceeds 8GB in size.
+ */
+ if (unlikely(nr > INT_MAX / sizeof(struct file *)))
+ return ERR_PTR(-EMFILE);
+
fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
if (!fdt)
goto out;
@@ -278,10 +299,6 @@ repeat:
if (nr < fdt->max_fds)
return 0;
- /* Can we expand? */
- if (nr >= sysctl_nr_open)
- return -EMFILE;
-
if (unlikely(files->resize_in_progress)) {
spin_unlock(&files->file_lock);
wait_event(files->resize_wait, !files->resize_in_progress);
@@ -289,6 +306,10 @@ repeat:
goto repeat;
}
+ /* Can we expand? */
+ if (unlikely(nr >= sysctl_nr_open))
+ return -EMFILE;
+
/* All good, so we try */
files->resize_in_progress = true;
error = expand_fdtable(files, nr);
@@ -417,17 +438,25 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
+ /*
+ * We may be racing against fd allocation from other threads using this
+ * files_struct, despite holding ->file_lock.
+ *
+ * alloc_fd() might have already claimed a slot, while fd_install()
+ * did not populate it yet. Note the latter operates locklessly, so
+ * the file can show up as we are walking the array below.
+ *
+ * At the same time we know no files will disappear as all other
+ * operations take the lock.
+ *
+ * Instead of trying to placate userspace racing with itself, we
+ * ref the file if we see it and mark the fd slot as unused otherwise.
+ */
for (i = open_files; i != 0; i--) {
- struct file *f = *old_fds++;
+ struct file *f = rcu_dereference_raw(*old_fds++);
if (f) {
get_file(f);
} else {
- /*
- * The fd may be claimed in the fd bitmap but not yet
- * instantiated in the files array if a sibling thread
- * is partway through open(). So make sure that this
- * fd is available to the new process.
- */
__clear_open_fd(open_files - i, new_fdt);
}
rcu_assign_pointer(*new_fds++, f);
@@ -576,6 +605,7 @@ repeat:
__set_open_fd(fd, fdt, flags & O_CLOEXEC);
error = fd;
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
out:
spin_unlock(&files->file_lock);
@@ -612,21 +642,41 @@ void put_unused_fd(unsigned int fd)
EXPORT_SYMBOL(put_unused_fd);
/*
- * Install a file pointer in the fd array.
+ * Install a file pointer in the fd array while it is being resized.
*
- * The VFS is full of places where we drop the files lock between
- * setting the open_fds bitmap and installing the file in the file
- * array. At any such point, we are vulnerable to a dup2() race
- * installing a file in the array before us. We need to detect this and
- * fput() the struct file we are about to overwrite in this case.
+ * We need to make sure our update to the array does not get lost as the resizing
+ * thread can be copying the content as we modify it.
*
- * It should never happen - if we allow dup2() do it, _really_ bad things
- * will follow.
+ * We have two ways to do it:
+ * - go off CPU waiting for resize_in_progress to clear
+ * - take the spin lock
+ *
+ * The latter is trivial to implement and saves us from having to might_sleep()
+ * for debugging purposes.
+ *
+ * This is moved out of line from fd_install() to convince gcc to optimize that
+ * routine better.
+ */
+static void noinline fd_install_slowpath(unsigned int fd, struct file *file)
+{
+ struct files_struct *files = current->files;
+ struct fdtable *fdt;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
+ rcu_assign_pointer(fdt->fd[fd], file);
+ spin_unlock(&files->file_lock);
+}
+
+/**
+ * fd_install - install a file pointer in the fd array
+ * @fd: file descriptor to install the file in
+ * @file: the file to install
*
* This consumes the "file" refcount, so callers should treat it
* as if they had called fput(file).
*/
-
void fd_install(unsigned int fd, struct file *file)
{
struct files_struct *files = current->files;
@@ -636,20 +686,15 @@ void fd_install(unsigned int fd, struct file *file)
return;
rcu_read_lock_sched();
-
if (unlikely(files->resize_in_progress)) {
rcu_read_unlock_sched();
- spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- WARN_ON(fdt->fd[fd] != NULL);
- rcu_assign_pointer(fdt->fd[fd], file);
- spin_unlock(&files->file_lock);
+ fd_install_slowpath(fd, file);
return;
}
/* coupled with smp_wmb() in expand_fdtable() */
smp_rmb();
fdt = rcu_dereference_sched(files->fdt);
- BUG_ON(fdt->fd[fd] != NULL);
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
rcu_read_unlock_sched();
}
@@ -678,7 +723,7 @@ struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
return NULL;
fd = array_index_nospec(fd, fdt->max_fds);
- file = fdt->fd[fd];
+ file = rcu_dereference_raw(fdt->fd[fd]);
if (file) {
rcu_assign_pointer(fdt->fd[fd], NULL);
__put_unused_fd(files, fd);
@@ -1177,8 +1222,27 @@ struct fd fdget_raw(unsigned int fd)
*/
static inline bool file_needs_f_pos_lock(struct file *file)
{
- return (file->f_mode & FMODE_ATOMIC_POS) &&
- (file_count(file) > 1 || file->f_op->iterate_shared);
+ if (!(file->f_mode & FMODE_ATOMIC_POS))
+ return false;
+ if (__file_ref_read_raw(&file->f_ref) != FILE_REF_ONEREF)
+ return true;
+ if (file->f_op->iterate_shared)
+ return true;
+ return false;
+}
+
+bool file_seek_cur_needs_f_lock(struct file *file)
+{
+ if (!(file->f_mode & FMODE_ATOMIC_POS) && !file->f_op->iterate_shared)
+ return false;
+
+ /*
+ * Note that we are not guaranteed to be called after fdget_pos() on
+ * this file obj, in which case the caller is expected to provide the
+ * appropriate locking.
+ */
+
+ return true;
}
struct fd fdget_pos(unsigned int fd)
@@ -1186,7 +1250,7 @@ struct fd fdget_pos(unsigned int fd)
struct fd f = fdget(fd);
struct file *file = fd_file(f);
- if (file && file_needs_f_pos_lock(file)) {
+ if (likely(file) && file_needs_f_pos_lock(file)) {
f.word |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
}
@@ -1229,22 +1293,34 @@ __releases(&files->file_lock)
struct fdtable *fdt;
/*
- * We need to detect attempts to do dup2() over allocated but still
- * not finished descriptor. NB: OpenBSD avoids that at the price of
- * extra work in their equivalent of fget() - they insert struct
- * file immediately after grabbing descriptor, mark it larval if
- * more work (e.g. actual opening) is needed and make sure that
- * fget() treats larval files as absent. Potentially interesting,
- * but while extra work in fget() is trivial, locking implications
- * and amount of surgery on open()-related paths in VFS are not.
- * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
- * deadlocks in rather amusing ways, AFAICS. All of that is out of
- * scope of POSIX or SUS, since neither considers shared descriptor
- * tables and this condition does not arise without those.
+ * dup2() is expected to close the file installed in the target fd slot
+ * (if any). However, userspace hand-picking a fd may be racing against
+ * its own threads which happened to allocate it in open() et al but did
+ * not populate it yet.
+ *
+ * Broadly speaking we may be racing against the following:
+ * fd = get_unused_fd_flags(); // fd slot reserved, ->fd[fd] == NULL
+ * file = hard_work_goes_here();
+ * fd_install(fd, file); // only now ->fd[fd] == file
+ *
+ * It is an invariant that a successfully allocated fd has a NULL entry
+ * in the array until the matching fd_install().
+ *
+ * If we fit the window, we have the fd to populate, yet no target file
+ * to close. Trying to ignore it and install our new file would violate
+ * the invariant and make fd_install() overwrite our file.
+ *
+ * Things can be done(tm) to handle this. However, the issue does not
+ * concern legitimate programs and we only need to make sure the kernel
+ * does not trip over it.
+ *
+ * The simplest way out is to return an error if we find ourselves here.
+ *
+ * POSIX is silent on the issue, we return -EBUSY.
*/
fdt = files_fdtable(files);
fd = array_index_nospec(fd, fdt->max_fds);
- tofree = fdt->fd[fd];
+ tofree = rcu_dereference_raw(fdt->fd[fd]);
if (!tofree && fd_is_open(fd, fdt))
goto Ebusy;
get_file(file);
@@ -1277,7 +1353,10 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
err = expand_files(files, fd);
if (unlikely(err < 0))
goto out_unlock;
- return do_dup2(files, file, fd, flags);
+ err = do_dup2(files, file, fd, flags);
+ if (err < 0)
+ return err;
+ return 0;
out_unlock:
spin_unlock(&files->file_lock);
@@ -1301,28 +1380,25 @@ out_unlock:
*/
int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
{
- int new_fd;
int error;
error = security_file_receive(file);
if (error)
return error;
- new_fd = get_unused_fd_flags(o_flags);
- if (new_fd < 0)
- return new_fd;
+ FD_PREPARE(fdf, o_flags, file);
+ if (fdf.err)
+ return fdf.err;
+ get_file(file);
if (ufd) {
- error = put_user(new_fd, ufd);
- if (error) {
- put_unused_fd(new_fd);
+ error = put_user(fd_prepare_fd(fdf), ufd);
+ if (error)
return error;
- }
}
- fd_install(new_fd, get_file(file));
- __receive_sock(file);
- return new_fd;
+ __receive_sock(fd_prepare_file(fdf));
+ return fd_publish(fdf);
}
EXPORT_SYMBOL_GPL(receive_fd);
diff --git a/fs/file_attr.c b/fs/file_attr.c
new file mode 100644
index 000000000000..4c4916632f11
--- /dev/null
+++ b/fs/file_attr.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/fscrypt.h>
+#include <linux/fileattr.h>
+#include <linux/export.h>
+#include <linux/syscalls.h>
+#include <linux/namei.h>
+
+#include "internal.h"
+
+/**
+ * fileattr_fill_xflags - initialize fileattr with xflags
+ * @fa: fileattr pointer
+ * @xflags: FS_XFLAG_* flags
+ *
+ * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All
+ * other fields are zeroed.
+ */
+void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags)
+{
+ memset(fa, 0, sizeof(*fa));
+ fa->fsx_valid = true;
+ fa->fsx_xflags = xflags;
+ if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE)
+ fa->flags |= FS_IMMUTABLE_FL;
+ if (fa->fsx_xflags & FS_XFLAG_APPEND)
+ fa->flags |= FS_APPEND_FL;
+ if (fa->fsx_xflags & FS_XFLAG_SYNC)
+ fa->flags |= FS_SYNC_FL;
+ if (fa->fsx_xflags & FS_XFLAG_NOATIME)
+ fa->flags |= FS_NOATIME_FL;
+ if (fa->fsx_xflags & FS_XFLAG_NODUMP)
+ fa->flags |= FS_NODUMP_FL;
+ if (fa->fsx_xflags & FS_XFLAG_DAX)
+ fa->flags |= FS_DAX_FL;
+ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ fa->flags |= FS_PROJINHERIT_FL;
+}
+EXPORT_SYMBOL(fileattr_fill_xflags);
+
+/**
+ * fileattr_fill_flags - initialize fileattr with flags
+ * @fa: fileattr pointer
+ * @flags: FS_*_FL flags
+ *
+ * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags).
+ * All other fields are zeroed.
+ */
+void fileattr_fill_flags(struct file_kattr *fa, u32 flags)
+{
+ memset(fa, 0, sizeof(*fa));
+ fa->flags_valid = true;
+ fa->flags = flags;
+ if (fa->flags & FS_SYNC_FL)
+ fa->fsx_xflags |= FS_XFLAG_SYNC;
+ if (fa->flags & FS_IMMUTABLE_FL)
+ fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
+ if (fa->flags & FS_APPEND_FL)
+ fa->fsx_xflags |= FS_XFLAG_APPEND;
+ if (fa->flags & FS_NODUMP_FL)
+ fa->fsx_xflags |= FS_XFLAG_NODUMP;
+ if (fa->flags & FS_NOATIME_FL)
+ fa->fsx_xflags |= FS_XFLAG_NOATIME;
+ if (fa->flags & FS_DAX_FL)
+ fa->fsx_xflags |= FS_XFLAG_DAX;
+ if (fa->flags & FS_PROJINHERIT_FL)
+ fa->fsx_xflags |= FS_XFLAG_PROJINHERIT;
+}
+EXPORT_SYMBOL(fileattr_fill_flags);
+
+/**
+ * vfs_fileattr_get - retrieve miscellaneous file attributes
+ * @dentry: the object to retrieve from
+ * @fa: fileattr pointer
+ *
+ * Call i_op->fileattr_get() callback, if exists.
+ *
+ * Return: 0 on success, or a negative error on failure.
+ */
+int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
+{
+ struct inode *inode = d_inode(dentry);
+ int error;
+
+ if (!inode->i_op->fileattr_get)
+ return -ENOIOCTLCMD;
+
+ error = security_inode_file_getattr(dentry, fa);
+ if (error)
+ return error;
+
+ return inode->i_op->fileattr_get(dentry, fa);
+}
+EXPORT_SYMBOL(vfs_fileattr_get);
+
+static void fileattr_to_file_attr(const struct file_kattr *fa,
+ struct file_attr *fattr)
+{
+ __u32 mask = FS_XFLAGS_MASK;
+
+ memset(fattr, 0, sizeof(struct file_attr));
+ fattr->fa_xflags = fa->fsx_xflags & mask;
+ fattr->fa_extsize = fa->fsx_extsize;
+ fattr->fa_nextents = fa->fsx_nextents;
+ fattr->fa_projid = fa->fsx_projid;
+ fattr->fa_cowextsize = fa->fsx_cowextsize;
+}
+
+/**
+ * copy_fsxattr_to_user - copy fsxattr to userspace.
+ * @fa: fileattr pointer
+ * @ufa: fsxattr user pointer
+ *
+ * Return: 0 on success, or -EFAULT on failure.
+ */
+int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa)
+{
+ struct fsxattr xfa;
+ __u32 mask = FS_XFLAGS_MASK;
+
+ memset(&xfa, 0, sizeof(xfa));
+ xfa.fsx_xflags = fa->fsx_xflags & mask;
+ xfa.fsx_extsize = fa->fsx_extsize;
+ xfa.fsx_nextents = fa->fsx_nextents;
+ xfa.fsx_projid = fa->fsx_projid;
+ xfa.fsx_cowextsize = fa->fsx_cowextsize;
+
+ if (copy_to_user(ufa, &xfa, sizeof(xfa)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(copy_fsxattr_to_user);
+
+static int file_attr_to_fileattr(const struct file_attr *fattr,
+ struct file_kattr *fa)
+{
+ __u64 mask = FS_XFLAGS_MASK;
+
+ if (fattr->fa_xflags & ~mask)
+ return -EINVAL;
+
+ fileattr_fill_xflags(fa, fattr->fa_xflags);
+ fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK;
+ fa->fsx_extsize = fattr->fa_extsize;
+ fa->fsx_projid = fattr->fa_projid;
+ fa->fsx_cowextsize = fattr->fa_cowextsize;
+
+ return 0;
+}
+
+static int copy_fsxattr_from_user(struct file_kattr *fa,
+ struct fsxattr __user *ufa)
+{
+ struct fsxattr xfa;
+ __u32 mask = FS_XFLAGS_MASK;
+
+ if (copy_from_user(&xfa, ufa, sizeof(xfa)))
+ return -EFAULT;
+
+ if (xfa.fsx_xflags & ~mask)
+ return -EOPNOTSUPP;
+
+ fileattr_fill_xflags(fa, xfa.fsx_xflags);
+ fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK;
+ fa->fsx_extsize = xfa.fsx_extsize;
+ fa->fsx_nextents = xfa.fsx_nextents;
+ fa->fsx_projid = xfa.fsx_projid;
+ fa->fsx_cowextsize = xfa.fsx_cowextsize;
+
+ return 0;
+}
+
+/*
+ * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject
+ * any invalid configurations.
+ *
+ * Note: must be called with inode lock held.
+ */
+static int fileattr_set_prepare(struct inode *inode,
+ const struct file_kattr *old_ma,
+ struct file_kattr *fa)
+{
+ int err;
+
+ /*
+ * The IMMUTABLE and APPEND_ONLY flags can only be changed by
+ * the relevant capability.
+ */
+ if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
+ !capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+
+ err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags);
+ if (err)
+ return err;
+
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() != &init_user_ns) {
+ if (old_ma->fsx_projid != fa->fsx_projid)
+ return -EINVAL;
+ if ((old_ma->fsx_xflags ^ fa->fsx_xflags) &
+ FS_XFLAG_PROJINHERIT)
+ return -EINVAL;
+ } else {
+ /*
+ * Caller is allowed to change the project ID. If it is being
+ * changed, make sure that the new value is valid.
+ */
+ if (old_ma->fsx_projid != fa->fsx_projid &&
+ !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid)))
+ return -EINVAL;
+ }
+
+ /* Check extent size hints. */
+ if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
+ !S_ISDIR(inode->i_mode))
+ return -EINVAL;
+
+ if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
+ !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ return -EINVAL;
+
+ /*
+ * It is only valid to set the DAX flag on regular files and
+ * directories on filesystems.
+ */
+ if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
+ !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
+ return -EINVAL;
+
+ /* Extent size hints of zero turn off the flags. */
+ if (fa->fsx_extsize == 0)
+ fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
+ if (fa->fsx_cowextsize == 0)
+ fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
+
+ return 0;
+}
+
+/**
+ * vfs_fileattr_set - change miscellaneous file attributes
+ * @idmap: idmap of the mount
+ * @dentry: the object to change
+ * @fa: fileattr pointer
+ *
+ * After verifying permissions, call i_op->fileattr_set() callback, if
+ * exists.
+ *
+ * Verifying attributes involves retrieving current attributes with
+ * i_op->fileattr_get(), this also allows initializing attributes that have
+ * not been set by the caller to current values. Inode lock is held
+ * thoughout to prevent racing with another instance.
+ *
+ * Return: 0 on success, or a negative error on failure.
+ */
+int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct file_kattr *fa)
+{
+ struct inode *inode = d_inode(dentry);
+ struct file_kattr old_ma = {};
+ int err;
+
+ if (!inode->i_op->fileattr_set)
+ return -ENOIOCTLCMD;
+
+ if (!inode_owner_or_capable(idmap, inode))
+ return -EPERM;
+
+ inode_lock(inode);
+ err = vfs_fileattr_get(dentry, &old_ma);
+ if (!err) {
+ /* initialize missing bits from old_ma */
+ if (fa->flags_valid) {
+ fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON;
+ fa->fsx_extsize = old_ma.fsx_extsize;
+ fa->fsx_nextents = old_ma.fsx_nextents;
+ fa->fsx_projid = old_ma.fsx_projid;
+ fa->fsx_cowextsize = old_ma.fsx_cowextsize;
+ } else {
+ fa->flags |= old_ma.flags & ~FS_COMMON_FL;
+ }
+
+ err = fileattr_set_prepare(inode, &old_ma, fa);
+ if (err)
+ goto out;
+ err = security_inode_file_setattr(dentry, fa);
+ if (err)
+ goto out;
+ err = inode->i_op->fileattr_set(idmap, dentry, fa);
+ if (err)
+ goto out;
+ }
+
+out:
+ inode_unlock(inode);
+ return err;
+}
+EXPORT_SYMBOL(vfs_fileattr_set);
+
+int ioctl_getflags(struct file *file, unsigned int __user *argp)
+{
+ struct file_kattr fa = { .flags_valid = true }; /* hint only */
+ int err;
+
+ err = vfs_fileattr_get(file->f_path.dentry, &fa);
+ if (!err)
+ err = put_user(fa.flags, argp);
+ return err;
+}
+
+int ioctl_setflags(struct file *file, unsigned int __user *argp)
+{
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
+ struct dentry *dentry = file->f_path.dentry;
+ struct file_kattr fa;
+ unsigned int flags;
+ int err;
+
+ err = get_user(flags, argp);
+ if (!err) {
+ err = mnt_want_write_file(file);
+ if (!err) {
+ fileattr_fill_flags(&fa, flags);
+ err = vfs_fileattr_set(idmap, dentry, &fa);
+ mnt_drop_write_file(file);
+ }
+ }
+ return err;
+}
+
+int ioctl_fsgetxattr(struct file *file, void __user *argp)
+{
+ struct file_kattr fa = { .fsx_valid = true }; /* hint only */
+ int err;
+
+ err = vfs_fileattr_get(file->f_path.dentry, &fa);
+ if (!err)
+ err = copy_fsxattr_to_user(&fa, argp);
+
+ return err;
+}
+
+int ioctl_fssetxattr(struct file *file, void __user *argp)
+{
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
+ struct dentry *dentry = file->f_path.dentry;
+ struct file_kattr fa;
+ int err;
+
+ err = copy_fsxattr_from_user(&fa, argp);
+ if (!err) {
+ err = mnt_want_write_file(file);
+ if (!err) {
+ err = vfs_fileattr_set(idmap, dentry, &fa);
+ mnt_drop_write_file(file);
+ }
+ }
+ return err;
+}
+
+SYSCALL_DEFINE5(file_getattr, int, dfd, const char __user *, filename,
+ struct file_attr __user *, ufattr, size_t, usize,
+ unsigned int, at_flags)
+{
+ struct path filepath __free(path_put) = {};
+ struct filename *name __free(putname) = NULL;
+ unsigned int lookup_flags = 0;
+ struct file_attr fattr;
+ struct file_kattr fa;
+ int error;
+
+ BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0);
+ BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST);
+
+ if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
+ return -EINVAL;
+
+ if (!(at_flags & AT_SYMLINK_NOFOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
+
+ if (usize > PAGE_SIZE)
+ return -E2BIG;
+
+ if (usize < FILE_ATTR_SIZE_VER0)
+ return -EINVAL;
+
+ name = getname_maybe_null(filename, at_flags);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ if (!name && dfd >= 0) {
+ CLASS(fd, f)(dfd);
+ if (fd_empty(f))
+ return -EBADF;
+
+ filepath = fd_file(f)->f_path;
+ path_get(&filepath);
+ } else {
+ error = filename_lookup(dfd, name, lookup_flags, &filepath,
+ NULL);
+ if (error)
+ return error;
+ }
+
+ error = vfs_fileattr_get(filepath.dentry, &fa);
+ if (error == -ENOIOCTLCMD || error == -ENOTTY)
+ error = -EOPNOTSUPP;
+ if (error)
+ return error;
+
+ fileattr_to_file_attr(&fa, &fattr);
+ error = copy_struct_to_user(ufattr, usize, &fattr,
+ sizeof(struct file_attr), NULL);
+
+ return error;
+}
+
+SYSCALL_DEFINE5(file_setattr, int, dfd, const char __user *, filename,
+ struct file_attr __user *, ufattr, size_t, usize,
+ unsigned int, at_flags)
+{
+ struct path filepath __free(path_put) = {};
+ struct filename *name __free(putname) = NULL;
+ unsigned int lookup_flags = 0;
+ struct file_attr fattr;
+ struct file_kattr fa;
+ int error;
+
+ BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0);
+ BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST);
+
+ if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
+ return -EINVAL;
+
+ if (!(at_flags & AT_SYMLINK_NOFOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
+
+ if (usize > PAGE_SIZE)
+ return -E2BIG;
+
+ if (usize < FILE_ATTR_SIZE_VER0)
+ return -EINVAL;
+
+ error = copy_struct_from_user(&fattr, sizeof(struct file_attr), ufattr,
+ usize);
+ if (error)
+ return error;
+
+ error = file_attr_to_fileattr(&fattr, &fa);
+ if (error)
+ return error;
+
+ name = getname_maybe_null(filename, at_flags);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ if (!name && dfd >= 0) {
+ CLASS(fd, f)(dfd);
+ if (fd_empty(f))
+ return -EBADF;
+
+ filepath = fd_file(f)->f_path;
+ path_get(&filepath);
+ } else {
+ error = filename_lookup(dfd, name, lookup_flags, &filepath,
+ NULL);
+ if (error)
+ return error;
+ }
+
+ error = mnt_want_write(filepath.mnt);
+ if (!error) {
+ error = vfs_fileattr_set(mnt_idmap(filepath.mnt),
+ filepath.dentry, &fa);
+ if (error == -ENOIOCTLCMD || error == -ENOTTY)
+ error = -EOPNOTSUPP;
+ mnt_drop_write(filepath.mnt);
+ }
+
+ return error;
+}
diff --git a/fs/file_table.c b/fs/file_table.c
index 976736be47cb..cd4a3db4659a 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -52,17 +52,20 @@ struct backing_file {
};
};
-static inline struct backing_file *backing_file(struct file *f)
-{
- return container_of(f, struct backing_file, file);
-}
+#define backing_file(f) container_of(f, struct backing_file, file)
-struct path *backing_file_user_path(struct file *f)
+const struct path *backing_file_user_path(const struct file *f)
{
return &backing_file(f)->user_path;
}
EXPORT_SYMBOL_GPL(backing_file_user_path);
+void backing_file_set_user_path(struct file *f, const struct path *path)
+{
+ backing_file(f)->user_path = *path;
+}
+EXPORT_SYMBOL_GPL(backing_file_set_user_path);
+
static inline void file_free(struct file *f)
{
security_file_free(f);
@@ -102,11 +105,11 @@ EXPORT_SYMBOL_GPL(get_max_files);
static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
- files_stat.nr_files = get_nr_files();
+ files_stat.nr_files = percpu_counter_sum_positive(&nr_files);
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-static struct ctl_table fs_stat_sysctls[] = {
+static const struct ctl_table fs_stat_sysctls[] = {
{
.procname = "file-nr",
.data = &files_stat,
@@ -128,7 +131,7 @@ static struct ctl_table fs_stat_sysctls[] = {
.data = &sysctl_nr_open,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_douintvec_minmax,
.extra1 = &sysctl_nr_open_min,
.extra2 = &sysctl_nr_open_max,
},
@@ -168,7 +171,7 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
* the respective member when opening the file.
*/
mutex_init(&f->f_pos_lock);
- memset(&f->f_path, 0, sizeof(f->f_path));
+ memset(&f->__f_path, 0, sizeof(f->f_path));
memset(&f->f_ra, 0, sizeof(f->f_ra));
f->f_flags = flags;
@@ -189,11 +192,16 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
f->f_sb_err = 0;
/*
- * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While
+ * We're SLAB_TYPESAFE_BY_RCU so initialize f_ref last. While
* fget-rcu pattern users need to be able to handle spurious
* refcount bumps we should reinitialize the reused file first.
*/
file_ref_init(&f->f_ref, 1);
+ /*
+ * Disable permission and pre-content events for all files by default.
+ * They may be enabled later by fsnotify_open_perm_and_set_mode().
+ */
+ file_set_fsnotify_mode(f, FMODE_NONOTIFY_PERM);
return 0;
}
@@ -216,7 +224,8 @@ struct file *alloc_empty_file(int flags, const struct cred *cred)
/*
* Privileged users can go above max_files
*/
- if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
+ if (unlikely(get_nr_files() >= files_stat.max_files) &&
+ !capable(CAP_SYS_ADMIN)) {
/*
* percpu_counters are inaccurate. Do an expensive check before
* we go and fail.
@@ -310,7 +319,7 @@ struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
static void file_init_path(struct file *file, const struct path *path,
const struct file_operations *fop)
{
- file->f_path = *path;
+ file->__f_path = *path;
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
@@ -351,9 +360,7 @@ static struct file *alloc_file(const struct path *path, int flags,
static inline int alloc_path_pseudo(const char *name, struct inode *inode,
struct vfsmount *mnt, struct path *path)
{
- struct qstr this = QSTR_INIT(name, strlen(name));
-
- path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
+ path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name));
if (!path->dentry)
return -ENOMEM;
path->mnt = mntget(mnt);
@@ -377,7 +384,13 @@ struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
if (IS_ERR(file)) {
ihold(inode);
path_put(&path);
+ return file;
}
+ /*
+ * Disable all fsnotify events for pseudo files by default.
+ * They may be enabled by caller with file_set_fsnotify_mode().
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY);
return file;
}
EXPORT_SYMBOL(alloc_file_pseudo);
@@ -402,6 +415,11 @@ struct file *alloc_file_pseudo_noaccount(struct inode *inode,
return file;
}
file_init_path(file, &path, fops);
+ /*
+ * Disable all fsnotify events for pseudo files by default.
+ * They may be enabled by caller with file_set_fsnotify_mode().
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY);
return file;
}
EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
@@ -478,6 +496,8 @@ static void ____fput(struct callback_head *work)
__fput(container_of(work, struct file, f_task_work));
}
+static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
+
/*
* If kernel thread really needs to have the final fput() it has done
* to complete, call this. The only user right now is the boot - we
@@ -491,35 +511,40 @@ static void ____fput(struct callback_head *work)
void flush_delayed_fput(void)
{
delayed_fput(NULL);
+ flush_delayed_work(&delayed_fput_work);
}
EXPORT_SYMBOL_GPL(flush_delayed_fput);
-static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
-
-void fput(struct file *file)
+static void __fput_deferred(struct file *file)
{
- if (file_ref_put(&file->f_ref)) {
- struct task_struct *task = current;
+ struct task_struct *task = current;
+
+ if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
+ file_free(file);
+ return;
+ }
- if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
- file_free(file);
+ if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+ init_task_work(&file->f_task_work, ____fput);
+ if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
return;
- }
- if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
- init_task_work(&file->f_task_work, ____fput);
- if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
- return;
- /*
- * After this task has run exit_task_work(),
- * task_work_add() will fail. Fall through to delayed
- * fput to avoid leaking *file.
- */
- }
-
- if (llist_add(&file->f_llist, &delayed_fput_list))
- schedule_delayed_work(&delayed_fput_work, 1);
+ /*
+ * After this task has run exit_task_work(),
+ * task_work_add() will fail. Fall through to delayed
+ * fput to avoid leaking *file.
+ */
}
+
+ if (llist_add(&file->f_llist, &delayed_fput_list))
+ schedule_delayed_work(&delayed_fput_work, 1);
+}
+
+void fput(struct file *file)
+{
+ if (unlikely(file_ref_put(&file->f_ref)))
+ __fput_deferred(file);
}
+EXPORT_SYMBOL(fput);
/*
* synchronous analog of fput(); for kernel threads that might be needed
@@ -534,10 +559,32 @@ void __fput_sync(struct file *file)
if (file_ref_put(&file->f_ref))
__fput(file);
}
-
-EXPORT_SYMBOL(fput);
EXPORT_SYMBOL(__fput_sync);
+/*
+ * Equivalent to __fput_sync(), but optimized for being called with the last
+ * reference.
+ *
+ * See file_ref_put_close() for details.
+ */
+void fput_close_sync(struct file *file)
+{
+ if (likely(file_ref_put_close(&file->f_ref)))
+ __fput(file);
+}
+
+/*
+ * Equivalent to fput(), but optimized for being called with the last
+ * reference.
+ *
+ * See file_ref_put_close() for details.
+ */
+void fput_close(struct file *file)
+{
+ if (file_ref_put_close(&file->f_ref))
+ __fput_deferred(file);
+}
+
void __init files_init(void)
{
struct kmem_cache_args args = {
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 58b9067b2391..95e5256821a5 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -156,15 +156,19 @@ static int fs_index(const char __user * __name)
static int fs_name(unsigned int index, char __user * buf)
{
struct file_system_type * tmp;
- int len, res;
+ int len, res = -EINVAL;
read_lock(&file_systems_lock);
- for (tmp = file_systems; tmp; tmp = tmp->next, index--)
- if (index <= 0 && try_module_get(tmp->owner))
+ for (tmp = file_systems; tmp; tmp = tmp->next, index--) {
+ if (index == 0) {
+ if (try_module_get(tmp->owner))
+ res = 0;
break;
+ }
+ }
read_unlock(&file_systems_lock);
- if (!tmp)
- return -EINVAL;
+ if (res)
+ return res;
/* OK, we got the reference, so we can safely block */
len = strlen(tmp->name) + 1;
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 20600e9ea202..21fc94b98209 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -258,7 +258,7 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
ip = iget_locked(sbp, ino);
if (!ip)
return ERR_PTR(-ENOMEM);
- if (!(ip->i_state & I_NEW))
+ if (!(inode_state_read_once(ip) & I_NEW))
return ip;
vip = VXFS_INO(ip);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 3cd99e2dc6ac..6800886c4d10 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -14,6 +14,7 @@
* Additions for address_space-based writeback
*/
+#include <linux/sched/sysctl.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -32,11 +33,6 @@
#include "internal.h"
/*
- * 4MB minimal write chunk size
- */
-#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
-
-/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
struct wb_writeback_work {
@@ -65,7 +61,7 @@ struct wb_writeback_work {
* timestamps written to disk after 12 hours, but in the worst case a
* few inodes might not their timestamps updated for 24 hours.
*/
-unsigned int dirtytime_expire_interval = 12 * 60 * 60;
+static unsigned int dirtytime_expire_interval = 12 * 60 * 60;
static inline struct inode *wb_inode(struct list_head *head)
{
@@ -121,7 +117,7 @@ static bool inode_io_list_move_locked(struct inode *inode,
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
- WARN_ON_ONCE(inode->i_state & I_FREEING);
+ WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
list_move(&inode->i_io_list, head);
@@ -200,6 +196,19 @@ static void wb_queue_work(struct bdi_writeback *wb,
spin_unlock_irq(&wb->work_lock);
}
+static bool wb_wait_for_completion_cb(struct wb_completion *done)
+{
+ unsigned long waited_secs = (jiffies - done->wait_start) / HZ;
+
+ done->progress_stamp = jiffies;
+ if (waited_secs > sysctl_hung_task_timeout_secs)
+ pr_info("INFO: The task %s:%d has been waiting for writeback "
+ "completion for more than %lu seconds.",
+ current->comm, current->pid, waited_secs);
+
+ return !atomic_read(&done->cnt);
+}
+
/**
* wb_wait_for_completion - wait for completion of bdi_writeback_works
* @done: target wb_completion
@@ -212,8 +221,9 @@ static void wb_queue_work(struct bdi_writeback *wb,
*/
void wb_wait_for_completion(struct wb_completion *done)
{
+ done->wait_start = jiffies;
atomic_dec(&done->cnt); /* put down the initial count */
- wait_event(*done->waitq, !atomic_read(&done->cnt));
+ wait_event(*done->waitq, wb_wait_for_completion_cb(done));
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -304,9 +314,9 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
- WARN_ON_ONCE(inode->i_state & I_FREEING);
+ WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
if (wb != &wb->bdi->wb)
list_move(&inode->i_io_list, &wb->b_attached);
else
@@ -368,7 +378,8 @@ static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
}
struct inode_switch_wbs_context {
- struct rcu_work work;
+ /* List of queued switching contexts for the wb */
+ struct llist_node list;
/*
* Multiple inodes can be switched at once. The switching procedure
@@ -378,7 +389,6 @@ struct inode_switch_wbs_context {
* array embedded into struct inode_switch_wbs_context. Otherwise
* an inode could be left in a non-consistent state.
*/
- struct bdi_writeback *new_wb;
struct inode *inodes[];
};
@@ -408,7 +418,7 @@ static bool inode_do_switch_wbs(struct inode *inode,
* Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
* path owns the inode and we shouldn't modify ->i_io_list.
*/
- if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
+ if (unlikely(inode_state_read(inode) & (I_FREEING | I_WILL_FREE)))
goto skip_switch;
trace_inode_switch_wbs(inode, old_wb, new_wb);
@@ -445,22 +455,23 @@ static bool inode_do_switch_wbs(struct inode *inode,
* Transfer to @new_wb's IO list if necessary. If the @inode is dirty,
* the specific list @inode was on is ignored and the @inode is put on
* ->b_dirty which is always correct including from ->b_dirty_time.
- * The transfer preserves @inode->dirtied_when ordering. If the @inode
- * was clean, it means it was on the b_attached list, so move it onto
- * the b_attached list of @new_wb.
+ * If the @inode was clean, it means it was on the b_attached list, so
+ * move it onto the b_attached list of @new_wb.
*/
if (!list_empty(&inode->i_io_list)) {
inode->i_wb = new_wb;
- if (inode->i_state & I_DIRTY_ALL) {
- struct inode *pos;
-
- list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
- if (time_after_eq(inode->dirtied_when,
- pos->dirtied_when))
- break;
+ if (inode_state_read(inode) & I_DIRTY_ALL) {
+ /*
+ * We need to keep b_dirty list sorted by
+ * dirtied_time_when. However properly sorting the
+ * inode in the list gets too expensive when switching
+ * many inodes. So just attach inode at the end of the
+ * dirty list and clobber the dirtied_time_when.
+ */
+ inode->dirtied_time_when = jiffies;
inode_io_list_move_locked(inode, new_wb,
- pos->i_io_list.prev);
+ &new_wb->b_dirty);
} else {
inode_cgwb_move_to_attached(inode, new_wb);
}
@@ -475,10 +486,11 @@ static bool inode_do_switch_wbs(struct inode *inode,
switched = true;
skip_switch:
/*
- * Paired with load_acquire in unlocked_inode_to_wb_begin() and
+ * Paired with an acquire fence in unlocked_inode_to_wb_begin() and
* ensures that the new wb is visible if they see !I_WB_SWITCH.
*/
- smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
+ smp_wmb();
+ inode_state_clear(inode, I_WB_SWITCH);
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&inode->i_lock);
@@ -486,13 +498,11 @@ skip_switch:
return switched;
}
-static void inode_switch_wbs_work_fn(struct work_struct *work)
+static void process_inode_switch_wbs(struct bdi_writeback *new_wb,
+ struct inode_switch_wbs_context *isw)
{
- struct inode_switch_wbs_context *isw =
- container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
- struct bdi_writeback *new_wb = isw->new_wb;
unsigned long nr_switched = 0;
struct inode **inodep;
@@ -502,6 +512,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
*/
down_read(&bdi->wb_switch_rwsem);
+ inodep = isw->inodes;
/*
* By the time control reaches here, RCU grace period has passed
* since I_WB_SWITCH assertion and all wb stat update transactions
@@ -512,6 +523,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
* gives us exclusion against all wb related operations on @inode
* including IO list manipulations and stat updates.
*/
+relock:
if (old_wb < new_wb) {
spin_lock(&old_wb->list_lock);
spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
@@ -520,10 +532,17 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
}
- for (inodep = isw->inodes; *inodep; inodep++) {
+ while (*inodep) {
WARN_ON_ONCE((*inodep)->i_wb != old_wb);
if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
nr_switched++;
+ inodep++;
+ if (*inodep && need_resched()) {
+ spin_unlock(&new_wb->list_lock);
+ spin_unlock(&old_wb->list_lock);
+ cond_resched();
+ goto relock;
+ }
}
spin_unlock(&new_wb->list_lock);
@@ -543,6 +562,38 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
atomic_dec(&isw_nr_in_flight);
}
+void inode_switch_wbs_work_fn(struct work_struct *work)
+{
+ struct bdi_writeback *new_wb = container_of(work, struct bdi_writeback,
+ switch_work);
+ struct inode_switch_wbs_context *isw, *next_isw;
+ struct llist_node *list;
+
+ /*
+ * Grab out reference to wb so that it cannot get freed under us
+ * after we process all the isw items.
+ */
+ wb_get(new_wb);
+ while (1) {
+ list = llist_del_all(&new_wb->switch_wbs_ctxs);
+ /* Nothing to do? */
+ if (!list)
+ break;
+ /*
+ * In addition to synchronizing among switchers, I_WB_SWITCH
+ * tells the RCU protected stat update paths to grab the i_page
+ * lock so that stat transfer can synchronize against them.
+ * Let's continue after I_WB_SWITCH is guaranteed to be
+ * visible.
+ */
+ synchronize_rcu();
+
+ llist_for_each_entry_safe(isw, next_isw, list, list)
+ process_inode_switch_wbs(new_wb, isw);
+ }
+ wb_put(new_wb);
+}
+
static bool inode_prepare_wbs_switch(struct inode *inode,
struct bdi_writeback *new_wb)
{
@@ -560,18 +611,25 @@ static bool inode_prepare_wbs_switch(struct inode *inode,
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
- inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
+ inode_state_read(inode) & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
inode_to_wb(inode) == new_wb) {
spin_unlock(&inode->i_lock);
return false;
}
- inode->i_state |= I_WB_SWITCH;
+ inode_state_set(inode, I_WB_SWITCH);
__iget(inode);
spin_unlock(&inode->i_lock);
return true;
}
+static void wb_queue_isw(struct bdi_writeback *wb,
+ struct inode_switch_wbs_context *isw)
+{
+ if (llist_add(&isw->list, &wb->switch_wbs_ctxs))
+ queue_work(isw_wq, &wb->switch_work);
+}
+
/**
* inode_switch_wbs - change the wb association of an inode
* @inode: target inode
@@ -585,9 +643,10 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
struct backing_dev_info *bdi = inode_to_bdi(inode);
struct cgroup_subsys_state *memcg_css;
struct inode_switch_wbs_context *isw;
+ struct bdi_writeback *new_wb = NULL;
/* noop if seems to be already in progress */
- if (inode->i_state & I_WB_SWITCH)
+ if (inode_state_read_once(inode) & I_WB_SWITCH)
return;
/* avoid queueing a new switch if too many are already in flight */
@@ -609,40 +668,35 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (!memcg_css)
goto out_free;
- isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
+ new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
css_put(memcg_css);
- if (!isw->new_wb)
+ if (!new_wb)
goto out_free;
- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+ if (!inode_prepare_wbs_switch(inode, new_wb))
goto out_free;
isw->inodes[0] = inode;
- /*
- * In addition to synchronizing among switchers, I_WB_SWITCH tells
- * the RCU protected stat update paths to grab the i_page
- * lock so that stat transfer can synchronize against them.
- * Let's continue after I_WB_SWITCH is guaranteed to be visible.
- */
- INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
- queue_rcu_work(isw_wq, &isw->work);
+ trace_inode_switch_wbs_queue(inode->i_wb, new_wb, 1);
+ wb_queue_isw(new_wb, isw);
return;
out_free:
atomic_dec(&isw_nr_in_flight);
- if (isw->new_wb)
- wb_put(isw->new_wb);
+ if (new_wb)
+ wb_put(new_wb);
kfree(isw);
}
-static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
+static bool isw_prepare_wbs_switch(struct bdi_writeback *new_wb,
+ struct inode_switch_wbs_context *isw,
struct list_head *list, int *nr)
{
struct inode *inode;
list_for_each_entry(inode, list, i_io_list) {
- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+ if (!inode_prepare_wbs_switch(inode, new_wb))
continue;
isw->inodes[*nr] = inode;
@@ -666,6 +720,7 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
{
struct cgroup_subsys_state *memcg_css;
struct inode_switch_wbs_context *isw;
+ struct bdi_writeback *new_wb;
int nr;
bool restart = false;
@@ -678,12 +733,12 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
for (memcg_css = wb->memcg_css->parent; memcg_css;
memcg_css = memcg_css->parent) {
- isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
- if (isw->new_wb)
+ new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
+ if (new_wb)
break;
}
- if (unlikely(!isw->new_wb))
- isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
+ if (unlikely(!new_wb))
+ new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
nr = 0;
spin_lock(&wb->list_lock);
@@ -695,27 +750,22 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
* bandwidth restrictions, as writeback of inode metadata is not
* accounted for.
*/
- restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
+ restart = isw_prepare_wbs_switch(new_wb, isw, &wb->b_attached, &nr);
if (!restart)
- restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
+ restart = isw_prepare_wbs_switch(new_wb, isw, &wb->b_dirty_time,
+ &nr);
spin_unlock(&wb->list_lock);
/* no attached inodes? bail out */
if (nr == 0) {
atomic_dec(&isw_nr_in_flight);
- wb_put(isw->new_wb);
+ wb_put(new_wb);
kfree(isw);
return restart;
}
- /*
- * In addition to synchronizing among switchers, I_WB_SWITCH tells
- * the RCU protected stat update paths to grab the i_page
- * lock so that stat transfer can synchronize against them.
- * Let's continue after I_WB_SWITCH is guaranteed to be visible.
- */
- INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
- queue_rcu_work(isw_wq, &isw->work);
+ trace_inode_switch_wbs_queue(wb, new_wb, nr);
+ wb_queue_isw(new_wb, isw);
return restart;
}
@@ -768,9 +818,9 @@ static void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
* @wbc: writeback_control of interest
* @inode: target inode
*
- * This function is to be used by __filemap_fdatawrite_range(), which is an
- * alternative entry point into writeback code, and first ensures @inode is
- * associated with a bdi_writeback and attaches it to @wbc.
+ * This function is to be used by filemap_writeback(), which is an alternative
+ * entry point into writeback code, and first ensures @inode is associated with
+ * a bdi_writeback and attaches it to @wbc.
*/
void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
struct inode *inode)
@@ -1123,7 +1173,7 @@ int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
dirty = dirty * 10 / 8;
/* issue the writeback work */
- work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
+ work = kzalloc(sizeof(*work), GFP_NOWAIT);
if (work) {
work->nr_pages = dirty;
work->sync_mode = WB_SYNC_NONE;
@@ -1180,7 +1230,7 @@ void cgroup_writeback_umount(struct super_block *sb)
static int __init cgroup_writeback_init(void)
{
- isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
+ isw_wq = alloc_workqueue("inode_switch_wbs", WQ_PERCPU, 0);
if (!isw_wq)
return -ENOMEM;
return 0;
@@ -1197,9 +1247,9 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
- WARN_ON_ONCE(inode->i_state & I_FREEING);
+ WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
}
@@ -1309,10 +1359,17 @@ void inode_io_list_del(struct inode *inode)
{
struct bdi_writeback *wb;
+ /*
+ * FIXME: ext4 can call here from ext4_evict_inode() after evict() already
+ * unlinked the inode.
+ */
+ if (list_empty_careful(&inode->i_io_list))
+ return;
+
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
@@ -1370,13 +1427,13 @@ static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{
assert_spin_locked(&inode->i_lock);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
/*
* When the inode is being freed just don't bother with dirty list
* tracking. Flush worker will ignore this inode anyway and it will
* trigger assertions in inode_io_list_move_locked().
*/
- if (inode->i_state & I_FREEING) {
+ if (inode_state_read(inode) & I_FREEING) {
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
return;
@@ -1410,9 +1467,9 @@ static void inode_sync_complete(struct inode *inode)
{
assert_spin_locked(&inode->i_lock);
- inode->i_state &= ~I_SYNC;
+ inode_state_clear(inode, I_SYNC);
/* If inode is clean an unused, put it into LRU now... */
- inode_add_lru(inode);
+ inode_lru_list_add(inode);
/* Called with inode->i_lock which ensures memory ordering. */
inode_wake_up_bit(inode, __I_SYNC);
}
@@ -1454,7 +1511,7 @@ static int move_expired_inodes(struct list_head *delaying_queue,
spin_lock(&inode->i_lock);
list_move(&inode->i_io_list, &tmp);
moved++;
- inode->i_state |= I_SYNC_QUEUED;
+ inode_state_set(inode, I_SYNC_QUEUED);
spin_unlock(&inode->i_lock);
if (sb_is_blkdev_sb(inode->i_sb))
continue;
@@ -1540,14 +1597,14 @@ void inode_wait_for_writeback(struct inode *inode)
assert_spin_locked(&inode->i_lock);
- if (!(inode->i_state & I_SYNC))
+ if (!(inode_state_read(inode) & I_SYNC))
return;
wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
for (;;) {
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
/* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
- if (!(inode->i_state & I_SYNC))
+ if (!(inode_state_read(inode) & I_SYNC))
break;
spin_unlock(&inode->i_lock);
schedule();
@@ -1573,7 +1630,7 @@ static void inode_sleep_on_writeback(struct inode *inode)
wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
/* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
- sleep = !!(inode->i_state & I_SYNC);
+ sleep = !!(inode_state_read(inode) & I_SYNC);
spin_unlock(&inode->i_lock);
if (sleep)
schedule();
@@ -1592,7 +1649,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
struct writeback_control *wbc,
unsigned long dirtied_before)
{
- if (inode->i_state & I_FREEING)
+ if (inode_state_read(inode) & I_FREEING)
return;
/*
@@ -1600,7 +1657,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* shot. If still dirty, it will be redirty_tail()'ed below. Update
* the dirty time to prevent enqueue and sync it again.
*/
- if ((inode->i_state & I_DIRTY) &&
+ if ((inode_state_read(inode) & I_DIRTY) &&
(wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
inode->dirtied_when = jiffies;
@@ -1611,7 +1668,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* is odd for clean inodes, it can happen for some
* filesystems so handle that gracefully.
*/
- if (inode->i_state & I_DIRTY_ALL)
+ if (inode_state_read(inode) & I_DIRTY_ALL)
redirty_tail_locked(inode, wb);
else
inode_cgwb_move_to_attached(inode, wb);
@@ -1637,17 +1694,17 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
*/
redirty_tail_locked(inode, wb);
}
- } else if (inode->i_state & I_DIRTY) {
+ } else if (inode_state_read(inode) & I_DIRTY) {
/*
* Filesystems can dirty the inode during writeback operations,
* such as delayed allocation during submission or metadata
* updates after data IO completion.
*/
redirty_tail_locked(inode, wb);
- } else if (inode->i_state & I_DIRTY_TIME) {
+ } else if (inode_state_read(inode) & I_DIRTY_TIME) {
inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
} else {
/* The inode is clean. Remove from writeback lists. */
inode_cgwb_move_to_attached(inode, wb);
@@ -1673,7 +1730,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
unsigned dirty;
int ret;
- WARN_ON(!(inode->i_state & I_SYNC));
+ WARN_ON(!(inode_state_read_once(inode) & I_SYNC));
trace_writeback_single_inode_start(inode, wbc, nr_to_write);
@@ -1697,7 +1754,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* mark_inode_dirty_sync() to notify the filesystem about it and to
* change I_DIRTY_TIME into I_DIRTY_SYNC.
*/
- if ((inode->i_state & I_DIRTY_TIME) &&
+ if ((inode_state_read_once(inode) & I_DIRTY_TIME) &&
(wbc->sync_mode == WB_SYNC_ALL ||
time_after(jiffies, inode->dirtied_time_when +
dirtytime_expire_interval * HZ))) {
@@ -1712,8 +1769,8 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* after handling timestamp expiration, as that may dirty the inode too.
*/
spin_lock(&inode->i_lock);
- dirty = inode->i_state & I_DIRTY;
- inode->i_state &= ~dirty;
+ dirty = inode_state_read(inode) & I_DIRTY;
+ inode_state_clear(inode, dirty);
/*
* Paired with smp_mb() in __mark_inode_dirty(). This allows
@@ -1729,10 +1786,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
smp_mb();
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
- inode->i_state |= I_DIRTY_PAGES;
- else if (unlikely(inode->i_state & I_PINNING_NETFS_WB)) {
- if (!(inode->i_state & I_DIRTY_PAGES)) {
- inode->i_state &= ~I_PINNING_NETFS_WB;
+ inode_state_set(inode, I_DIRTY_PAGES);
+ else if (unlikely(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
+ if (!(inode_state_read(inode) & I_DIRTY_PAGES)) {
+ inode_state_clear(inode, I_PINNING_NETFS_WB);
wbc->unpinned_netfs_wb = true;
dirty |= I_PINNING_NETFS_WB; /* Cause write_inode */
}
@@ -1767,12 +1824,12 @@ static int writeback_single_inode(struct inode *inode,
int ret = 0;
spin_lock(&inode->i_lock);
- if (!atomic_read(&inode->i_count))
- WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
+ if (!icount_read(inode))
+ WARN_ON(!(inode_state_read(inode) & (I_WILL_FREE | I_FREEING)));
else
- WARN_ON(inode->i_state & I_WILL_FREE);
+ WARN_ON(inode_state_read(inode) & I_WILL_FREE);
- if (inode->i_state & I_SYNC) {
+ if (inode_state_read(inode) & I_SYNC) {
/*
* Writeback is already running on the inode. For WB_SYNC_NONE,
* that's enough and we can just return. For WB_SYNC_ALL, we
@@ -1783,7 +1840,7 @@ static int writeback_single_inode(struct inode *inode,
goto out;
inode_wait_for_writeback(inode);
}
- WARN_ON(inode->i_state & I_SYNC);
+ WARN_ON(inode_state_read(inode) & I_SYNC);
/*
* If the inode is already fully clean, then there's nothing to do.
*
@@ -1791,11 +1848,11 @@ static int writeback_single_inode(struct inode *inode,
* still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If
* there are any such pages, we'll need to wait for them.
*/
- if (!(inode->i_state & I_DIRTY_ALL) &&
+ if (!(inode_state_read(inode) & I_DIRTY_ALL) &&
(wbc->sync_mode != WB_SYNC_ALL ||
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out;
- inode->i_state |= I_SYNC;
+ inode_state_set(inode, I_SYNC);
wbc_attach_and_unlock_inode(wbc, inode);
ret = __writeback_single_inode(inode, wbc);
@@ -1808,18 +1865,18 @@ static int writeback_single_inode(struct inode *inode,
* If the inode is freeing, its i_io_list shoudn't be updated
* as it can be finally deleted at this moment.
*/
- if (!(inode->i_state & I_FREEING)) {
+ if (!(inode_state_read(inode) & I_FREEING)) {
/*
* If the inode is now fully clean, then it can be safely
* removed from its writeback list (if any). Otherwise the
* flusher threads are responsible for the writeback lists.
*/
- if (!(inode->i_state & I_DIRTY_ALL))
+ if (!(inode_state_read(inode) & I_DIRTY_ALL))
inode_cgwb_move_to_attached(inode, wb);
- else if (!(inode->i_state & I_SYNC_QUEUED)) {
- if ((inode->i_state & I_DIRTY))
+ else if (!(inode_state_read(inode) & I_SYNC_QUEUED)) {
+ if ((inode_state_read(inode) & I_DIRTY))
redirty_tail_locked(inode, wb);
- else if (inode->i_state & I_DIRTY_TIME) {
+ else if (inode_state_read(inode) & I_DIRTY_TIME) {
inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode,
wb,
@@ -1835,8 +1892,8 @@ out:
return ret;
}
-static long writeback_chunk_size(struct bdi_writeback *wb,
- struct wb_writeback_work *work)
+static long writeback_chunk_size(struct super_block *sb,
+ struct bdi_writeback *wb, struct wb_writeback_work *work)
{
long pages;
@@ -1854,16 +1911,13 @@ static long writeback_chunk_size(struct bdi_writeback *wb,
* (maybe slowly) sync all tagged pages
*/
if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
- pages = LONG_MAX;
- else {
- pages = min(wb->avg_write_bandwidth / 2,
- global_wb_domain.dirty_limit / DIRTY_SCOPE);
- pages = min(pages, work->nr_pages);
- pages = round_down(pages + MIN_WRITEBACK_PAGES,
- MIN_WRITEBACK_PAGES);
- }
+ return LONG_MAX;
- return pages;
+ pages = min(wb->avg_write_bandwidth / 2,
+ global_wb_domain.dirty_limit / DIRTY_SCOPE);
+ pages = min(pages, work->nr_pages);
+ return round_down(pages + sb->s_min_writeback_pages,
+ sb->s_min_writeback_pages);
}
/*
@@ -1928,12 +1982,12 @@ static long writeback_sb_inodes(struct super_block *sb,
* kind writeout is handled by the freer.
*/
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) {
redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock);
continue;
}
- if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
+ if ((inode_state_read(inode) & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
/*
* If this inode is locked for writeback and we are not
* doing writeback-for-data-integrity, move it to
@@ -1955,17 +2009,17 @@ static long writeback_sb_inodes(struct super_block *sb,
* are doing WB_SYNC_NONE writeback. So this catches only the
* WB_SYNC_ALL case.
*/
- if (inode->i_state & I_SYNC) {
+ if (inode_state_read(inode) & I_SYNC) {
/* Wait for I_SYNC. This function drops i_lock... */
inode_sleep_on_writeback(inode);
/* Inode may be gone, start again */
spin_lock(&wb->list_lock);
continue;
}
- inode->i_state |= I_SYNC;
+ inode_state_set(inode, I_SYNC);
wbc_attach_and_unlock_inode(&wbc, inode);
- write_chunk = writeback_chunk_size(wb, work);
+ write_chunk = writeback_chunk_size(inode->i_sb, wb, work);
wbc.nr_to_write = write_chunk;
wbc.pages_skipped = 0;
@@ -1975,6 +2029,12 @@ static long writeback_sb_inodes(struct super_block *sb,
*/
__writeback_single_inode(inode, &wbc);
+ /* Report progress to inform the hung task detector of the progress. */
+ if (work->done && work->done->progress_stamp &&
+ (jiffies - work->done->progress_stamp) > HZ *
+ sysctl_hung_task_timeout_secs / 2)
+ wake_up_all(work->done->waitq);
+
wbc_detach_inode(&wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write;
wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped;
@@ -2000,7 +2060,7 @@ static long writeback_sb_inodes(struct super_block *sb,
*/
tmp_wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
- if (!(inode->i_state & I_DIRTY_ALL))
+ if (!(inode_state_read(inode) & I_DIRTY_ALL))
total_wrote++;
requeue_inode(inode, tmp_wb, &wbc, dirtied_before);
inode_sync_complete(inode);
@@ -2435,24 +2495,36 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
}
-static int __init start_dirtytime_writeback(void)
-{
- schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
- return 0;
-}
-__initcall(start_dirtytime_writeback);
-
-int dirtytime_interval_handler(const struct ctl_table *table, int write,
+static int dirtytime_interval_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
- mod_delayed_work(system_wq, &dirtytime_work, 0);
+ mod_delayed_work(system_percpu_wq, &dirtytime_work, 0);
return ret;
}
+static const struct ctl_table vm_fs_writeback_table[] = {
+ {
+ .procname = "dirtytime_expire_seconds",
+ .data = &dirtytime_expire_interval,
+ .maxlen = sizeof(dirtytime_expire_interval),
+ .mode = 0644,
+ .proc_handler = dirtytime_interval_handler,
+ .extra1 = SYSCTL_ZERO,
+ },
+};
+
+static int __init start_dirtytime_writeback(void)
+{
+ schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+ register_sysctl_init("vm", vm_fs_writeback_table);
+ return 0;
+}
+__initcall(start_dirtytime_writeback);
+
/**
* __mark_inode_dirty - internal function to mark an inode dirty
*
@@ -2494,10 +2566,10 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* We tell ->dirty_inode callback that timestamps need to
* be updated by setting I_DIRTY_TIME in flags.
*/
- if (inode->i_state & I_DIRTY_TIME) {
+ if (inode_state_read_once(inode) & I_DIRTY_TIME) {
spin_lock(&inode->i_lock);
- if (inode->i_state & I_DIRTY_TIME) {
- inode->i_state &= ~I_DIRTY_TIME;
+ if (inode_state_read(inode) & I_DIRTY_TIME) {
+ inode_state_clear(inode, I_DIRTY_TIME);
flags |= I_DIRTY_TIME;
}
spin_unlock(&inode->i_lock);
@@ -2534,16 +2606,16 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
smp_mb();
- if ((inode->i_state & flags) == flags)
+ if ((inode_state_read_once(inode) & flags) == flags)
return;
spin_lock(&inode->i_lock);
- if ((inode->i_state & flags) != flags) {
- const int was_dirty = inode->i_state & I_DIRTY;
+ if ((inode_state_read(inode) & flags) != flags) {
+ const int was_dirty = inode_state_read(inode) & I_DIRTY;
inode_attach_wb(inode, NULL);
- inode->i_state |= flags;
+ inode_state_set(inode, flags);
/*
* Grab inode's wb early because it requires dropping i_lock and we
@@ -2562,7 +2634,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* the inode it will place it on the appropriate superblock
* list, based upon its state.
*/
- if (inode->i_state & I_SYNC_QUEUED)
+ if (inode_state_read(inode) & I_SYNC_QUEUED)
goto out_unlock;
/*
@@ -2573,7 +2645,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (inode_unhashed(inode))
goto out_unlock;
}
- if (inode->i_state & I_FREEING)
+ if (inode_state_read(inode) & I_FREEING)
goto out_unlock;
/*
@@ -2588,7 +2660,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (dirtytime)
inode->dirtied_time_when = jiffies;
- if (inode->i_state & I_DIRTY)
+ if (inode_state_read(inode) & I_DIRTY)
dirty_list = &wb->b_dirty;
else
dirty_list = &wb->b_dirty_time;
@@ -2596,10 +2668,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
wakeup_bdi = inode_io_list_move_locked(inode, wb,
dirty_list);
- spin_unlock(&wb->list_lock);
- spin_unlock(&inode->i_lock);
- trace_writeback_dirty_inode_enqueue(inode);
-
/*
* If this is the first dirty inode for this bdi,
* we have to wake-up the corresponding bdi thread
@@ -2609,6 +2677,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (wakeup_bdi &&
(wb->bdi->capabilities & BDI_CAP_WRITEBACK))
wb_wakeup_delayed(wb);
+
+ spin_unlock(&wb->list_lock);
+ spin_unlock(&inode->i_lock);
+ trace_writeback_dirty_inode_enqueue(inode);
+
return;
}
}
@@ -2684,7 +2757,7 @@ static void wait_sb_inodes(struct super_block *sb)
spin_unlock_irq(&sb->s_inode_wblist_lock);
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
spin_lock_irq(&sb->s_inode_wblist_lock);
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 98589aae5208..93b7ebf8d927 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -161,25 +161,24 @@ int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param)
EXPORT_SYMBOL(vfs_parse_fs_param);
/**
- * vfs_parse_fs_string - Convenience function to just parse a string.
+ * vfs_parse_fs_qstr - Convenience function to just parse a string.
* @fc: Filesystem context.
* @key: Parameter name.
* @value: Default value.
- * @v_size: Maximum number of bytes in the value.
*/
-int vfs_parse_fs_string(struct fs_context *fc, const char *key,
- const char *value, size_t v_size)
+int vfs_parse_fs_qstr(struct fs_context *fc, const char *key,
+ const struct qstr *value)
{
int ret;
struct fs_parameter param = {
.key = key,
.type = fs_value_is_flag,
- .size = v_size,
+ .size = value ? value->len : 0,
};
if (value) {
- param.string = kmemdup_nul(value, v_size, GFP_KERNEL);
+ param.string = kmemdup_nul(value->name, value->len, GFP_KERNEL);
if (!param.string)
return -ENOMEM;
param.type = fs_value_is_string;
@@ -189,7 +188,7 @@ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
kfree(param.string);
return ret;
}
-EXPORT_SYMBOL(vfs_parse_fs_string);
+EXPORT_SYMBOL(vfs_parse_fs_qstr);
/**
* vfs_parse_monolithic_sep - Parse key[=val][,key[=val]]* mount data
@@ -218,16 +217,14 @@ int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
while ((key = sep(&options)) != NULL) {
if (*key) {
- size_t v_len = 0;
char *value = strchr(key, '=');
if (value) {
- if (value == key)
+ if (unlikely(value == key))
continue;
*value++ = 0;
- v_len = strlen(value);
}
- ret = vfs_parse_fs_string(fc, key, value, v_len);
+ ret = vfs_parse_fs_string(fc, key, value);
if (ret < 0)
break;
}
@@ -449,6 +446,10 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
printk(KERN_ERR "%s%s%pV\n", prefix ? prefix : "",
prefix ? ": " : "", &vaf);
break;
+ case 'i':
+ printk(KERN_INFO "%s%s%pV\n", prefix ? prefix : "",
+ prefix ? ": " : "", &vaf);
+ break;
default:
printk(KERN_NOTICE "%s%s%pV\n", prefix ? prefix : "",
prefix ? ": " : "", &vaf);
@@ -493,7 +494,7 @@ static void put_fc_log(struct fs_context *fc)
if (log) {
if (refcount_dec_and_test(&log->usage)) {
fc->log.log = NULL;
- for (i = 0; i <= 7; i++)
+ for (i = 0; i < ARRAY_SIZE(log->buffer) ; i++)
if (log->need_free & (1 << i))
kfree(log->buffer[i]);
kfree(log);
diff --git a/fs/fs_types.c b/fs/fs_dirent.c
index 78365e5dc08c..e5e08f213816 100644
--- a/fs/fs_types.c
+++ b/fs/fs_dirent.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/fs.h>
+#include <linux/fs_dirent.h>
#include <linux/export.h>
/*
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 16fa61ef56bf..c092a9f79e32 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -13,7 +13,7 @@
#include <linux/namei.h>
#include "internal.h"
-static const struct constant_table bool_names[] = {
+const struct constant_table bool_names[] = {
{ "0", false },
{ "1", true },
{ "false", false },
@@ -22,6 +22,7 @@ static const struct constant_table bool_names[] = {
{ "yes", true },
{ },
};
+EXPORT_SYMBOL(bool_names);
static const struct constant_table *
__lookup_constant(const struct constant_table *tbl, const char *name)
@@ -379,58 +380,9 @@ EXPORT_SYMBOL(fs_param_is_path);
#ifdef CONFIG_VALIDATE_FS_PARSER
/**
- * validate_constant_table - Validate a constant table
- * @tbl: The constant table to validate.
- * @tbl_size: The size of the table.
- * @low: The lowest permissible value.
- * @high: The highest permissible value.
- * @special: One special permissible value outside of the range.
- */
-bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special)
-{
- size_t i;
- bool good = true;
-
- if (tbl_size == 0) {
- pr_warn("VALIDATE C-TBL: Empty\n");
- return true;
- }
-
- for (i = 0; i < tbl_size; i++) {
- if (!tbl[i].name) {
- pr_err("VALIDATE C-TBL[%zu]: Null\n", i);
- good = false;
- } else if (i > 0 && tbl[i - 1].name) {
- int c = strcmp(tbl[i-1].name, tbl[i].name);
-
- if (c == 0) {
- pr_err("VALIDATE C-TBL[%zu]: Duplicate %s\n",
- i, tbl[i].name);
- good = false;
- }
- if (c > 0) {
- pr_err("VALIDATE C-TBL[%zu]: Missorted %s>=%s\n",
- i, tbl[i-1].name, tbl[i].name);
- good = false;
- }
- }
-
- if (tbl[i].value != special &&
- (tbl[i].value < low || tbl[i].value > high)) {
- pr_err("VALIDATE C-TBL[%zu]: %s->%d const out of range (%d-%d)\n",
- i, tbl[i].name, tbl[i].value, low, high);
- good = false;
- }
- }
-
- return good;
-}
-
-/**
- * fs_validate_description - Validate a parameter description
- * @name: The parameter name to search for.
- * @desc: The parameter description to validate.
+ * fs_validate_description - Validate a parameter specification array
+ * @name: Owner name of the parameter specification array
+ * @desc: The parameter specification array to validate.
*/
bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc)
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 64c2d0814ed6..b8c46c5a38a0 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -17,12 +17,10 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
struct path old_root;
path_get(path);
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->seq);
old_root = fs->root;
fs->root = *path;
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->seq);
if (old_root.dentry)
path_put(&old_root);
}
@@ -36,12 +34,10 @@ void set_fs_pwd(struct fs_struct *fs, const struct path *path)
struct path old_pwd;
path_get(path);
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->seq);
old_pwd = fs->pwd;
fs->pwd = *path;
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->seq);
if (old_pwd.dentry)
path_put(&old_pwd);
@@ -67,16 +63,14 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
fs = p->fs;
if (fs) {
int hits = 0;
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->seq);
hits += replace_path(&fs->root, old_root, new_root);
hits += replace_path(&fs->pwd, old_root, new_root);
- write_seqcount_end(&fs->seq);
while (hits--) {
count++;
path_get(new_root);
}
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->seq);
}
task_unlock(p);
}
@@ -99,10 +93,10 @@ void exit_fs(struct task_struct *tsk)
if (fs) {
int kill;
task_lock(tsk);
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
tsk->fs = NULL;
kill = !--fs->users;
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
task_unlock(tsk);
if (kill)
free_fs_struct(fs);
@@ -116,16 +110,15 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
if (fs) {
fs->users = 1;
fs->in_exec = 0;
- spin_lock_init(&fs->lock);
- seqcount_spinlock_init(&fs->seq, &fs->lock);
+ seqlock_init(&fs->seq);
fs->umask = old->umask;
- spin_lock(&old->lock);
+ read_seqlock_excl(&old->seq);
fs->root = old->root;
path_get(&fs->root);
fs->pwd = old->pwd;
path_get(&fs->pwd);
- spin_unlock(&old->lock);
+ read_sequnlock_excl(&old->seq);
}
return fs;
}
@@ -140,10 +133,10 @@ int unshare_fs_struct(void)
return -ENOMEM;
task_lock(current);
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
kill = !--fs->users;
current->fs = new_fs;
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
task_unlock(current);
if (kill)
@@ -153,16 +146,9 @@ int unshare_fs_struct(void)
}
EXPORT_SYMBOL_GPL(unshare_fs_struct);
-int current_umask(void)
-{
- return current->fs->umask;
-}
-EXPORT_SYMBOL(current_umask);
-
/* to be mentioned only in INIT_TASK */
struct fs_struct init_fs = {
.users = 1,
- .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
- .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
+ .seq = __SEQLOCK_UNLOCKED(init_fs.seq),
.umask = 0022,
};
diff --git a/fs/fsopen.c b/fs/fsopen.c
index 094a7f510edf..f645c99204eb 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -18,50 +18,56 @@
#include "internal.h"
#include "mount.h"
+static inline const char *fetch_message_locked(struct fc_log *log, size_t len,
+ bool *need_free)
+{
+ const char *p;
+ int index;
+
+ if (unlikely(log->head == log->tail))
+ return ERR_PTR(-ENODATA);
+
+ index = log->tail & (ARRAY_SIZE(log->buffer) - 1);
+ p = log->buffer[index];
+ if (unlikely(strlen(p) > len))
+ return ERR_PTR(-EMSGSIZE);
+
+ log->buffer[index] = NULL;
+ *need_free = log->need_free & (1 << index);
+ log->need_free &= ~(1 << index);
+ log->tail++;
+
+ return p;
+}
+
/*
* Allow the user to read back any error, warning or informational messages.
+ * Only one message is returned for each read(2) call.
*/
static ssize_t fscontext_read(struct file *file,
char __user *_buf, size_t len, loff_t *pos)
{
struct fs_context *fc = file->private_data;
- struct fc_log *log = fc->log.log;
- unsigned int logsize = ARRAY_SIZE(log->buffer);
- ssize_t ret;
- char *p;
+ ssize_t err;
+ const char *p __free(kfree) = NULL, *message;
bool need_free;
- int index, n;
+ int n;
- ret = mutex_lock_interruptible(&fc->uapi_mutex);
- if (ret < 0)
- return ret;
-
- if (log->head == log->tail) {
- mutex_unlock(&fc->uapi_mutex);
- return -ENODATA;
- }
-
- index = log->tail & (logsize - 1);
- p = log->buffer[index];
- need_free = log->need_free & (1 << index);
- log->buffer[index] = NULL;
- log->need_free &= ~(1 << index);
- log->tail++;
+ err = mutex_lock_interruptible(&fc->uapi_mutex);
+ if (err < 0)
+ return err;
+ message = fetch_message_locked(fc->log.log, len, &need_free);
mutex_unlock(&fc->uapi_mutex);
+ if (IS_ERR(message))
+ return PTR_ERR(message);
- ret = -EMSGSIZE;
- n = strlen(p);
- if (n > len)
- goto err_free;
- ret = -EFAULT;
- if (copy_to_user(_buf, p, n) != 0)
- goto err_free;
- ret = n;
-
-err_free:
if (need_free)
- kfree(p);
- return ret;
+ p = message;
+
+ n = strlen(message);
+ if (copy_to_user(_buf, message, n))
+ return -EFAULT;
+ return n;
}
static int fscontext_release(struct inode *inode, struct file *file)
@@ -453,7 +459,7 @@ SYSCALL_DEFINE5(fsconfig,
case FSCONFIG_SET_FD:
param.type = fs_value_is_file;
ret = -EBADF;
- param.file = fget(aux);
+ param.file = fget_raw(aux);
if (!param.file)
goto out_key;
param.dirfd = aux;
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
index 8674dbfbe59d..3a4ae632c94a 100644
--- a/fs/fuse/Kconfig
+++ b/fs/fuse/Kconfig
@@ -2,6 +2,7 @@
config FUSE_FS
tristate "FUSE (Filesystem in Userspace) support"
select FS_POSIX_ACL
+ select FS_IOMAP
help
With FUSE it is possible to implement a fully functional filesystem
in a userspace program.
@@ -12,7 +13,7 @@ config FUSE_FS
although chances are your distribution already has that library
installed if you've installed the "fuse" package itself.
- See <file:Documentation/filesystems/fuse.rst> for more information.
+ See <file:Documentation/filesystems/fuse/fuse.rst> for more information.
See <file:Documentation/Changes> for needed library/utility version.
If you want to develop a userspace FS, or if you want to use
@@ -63,3 +64,15 @@ config FUSE_PASSTHROUGH
to be performed directly on a backing file.
If you want to allow passthrough operations, answer Y.
+
+config FUSE_IO_URING
+ bool "FUSE communication over io-uring"
+ default y
+ depends on FUSE_FS
+ depends on IO_URING
+ help
+ This allows sending FUSE requests over the io-uring interface and
+ also adds request core affinity.
+
+ If you want to allow fuse server/client communication through io-uring,
+ answer Y
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 2c372180d631..22ad9538dfc4 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -10,10 +10,12 @@ obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
-fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o
+fuse-y := trace.o # put trace.o first so we see ftrace errors sooner
+fuse-y += dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o
fuse-y += iomode.o
fuse-$(CONFIG_FUSE_DAX) += dax.o
-fuse-$(CONFIG_FUSE_PASSTHROUGH) += passthrough.o
+fuse-$(CONFIG_FUSE_PASSTHROUGH) += passthrough.o backing.o
fuse-$(CONFIG_SYSCTL) += sysctl.o
+fuse-$(CONFIG_FUSE_IO_URING) += dev_uring.o
virtiofs-y := virtio_fs.o
diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c
new file mode 100644
index 000000000000..4afda419dd14
--- /dev/null
+++ b/fs/fuse/backing.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FUSE passthrough to backing file.
+ *
+ * Copyright (c) 2023 CTERA Networks.
+ */
+
+#include "fuse_i.h"
+
+#include <linux/file.h>
+
+struct fuse_backing *fuse_backing_get(struct fuse_backing *fb)
+{
+ if (fb && refcount_inc_not_zero(&fb->count))
+ return fb;
+ return NULL;
+}
+
+static void fuse_backing_free(struct fuse_backing *fb)
+{
+ pr_debug("%s: fb=0x%p\n", __func__, fb);
+
+ if (fb->file)
+ fput(fb->file);
+ put_cred(fb->cred);
+ kfree_rcu(fb, rcu);
+}
+
+void fuse_backing_put(struct fuse_backing *fb)
+{
+ if (fb && refcount_dec_and_test(&fb->count))
+ fuse_backing_free(fb);
+}
+
+void fuse_backing_files_init(struct fuse_conn *fc)
+{
+ idr_init(&fc->backing_files_map);
+}
+
+static int fuse_backing_id_alloc(struct fuse_conn *fc, struct fuse_backing *fb)
+{
+ int id;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&fc->lock);
+ /* FIXME: xarray might be space inefficient */
+ id = idr_alloc_cyclic(&fc->backing_files_map, fb, 1, 0, GFP_ATOMIC);
+ spin_unlock(&fc->lock);
+ idr_preload_end();
+
+ WARN_ON_ONCE(id == 0);
+ return id;
+}
+
+static struct fuse_backing *fuse_backing_id_remove(struct fuse_conn *fc,
+ int id)
+{
+ struct fuse_backing *fb;
+
+ spin_lock(&fc->lock);
+ fb = idr_remove(&fc->backing_files_map, id);
+ spin_unlock(&fc->lock);
+
+ return fb;
+}
+
+static int fuse_backing_id_free(int id, void *p, void *data)
+{
+ struct fuse_backing *fb = p;
+
+ WARN_ON_ONCE(refcount_read(&fb->count) != 1);
+ fuse_backing_free(fb);
+ return 0;
+}
+
+void fuse_backing_files_free(struct fuse_conn *fc)
+{
+ idr_for_each(&fc->backing_files_map, fuse_backing_id_free, NULL);
+ idr_destroy(&fc->backing_files_map);
+}
+
+int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
+{
+ struct file *file;
+ struct super_block *backing_sb;
+ struct fuse_backing *fb = NULL;
+ int res;
+
+ pr_debug("%s: fd=%d flags=0x%x\n", __func__, map->fd, map->flags);
+
+ /* TODO: relax CAP_SYS_ADMIN once backing files are visible to lsof */
+ res = -EPERM;
+ if (!fc->passthrough || !capable(CAP_SYS_ADMIN))
+ goto out;
+
+ res = -EINVAL;
+ if (map->flags || map->padding)
+ goto out;
+
+ file = fget_raw(map->fd);
+ res = -EBADF;
+ if (!file)
+ goto out;
+
+ /* read/write/splice/mmap passthrough only relevant for regular files */
+ res = d_is_dir(file->f_path.dentry) ? -EISDIR : -EINVAL;
+ if (!d_is_reg(file->f_path.dentry))
+ goto out_fput;
+
+ backing_sb = file_inode(file)->i_sb;
+ res = -ELOOP;
+ if (backing_sb->s_stack_depth >= fc->max_stack_depth)
+ goto out_fput;
+
+ fb = kmalloc(sizeof(struct fuse_backing), GFP_KERNEL);
+ res = -ENOMEM;
+ if (!fb)
+ goto out_fput;
+
+ fb->file = file;
+ fb->cred = prepare_creds();
+ refcount_set(&fb->count, 1);
+
+ res = fuse_backing_id_alloc(fc, fb);
+ if (res < 0) {
+ fuse_backing_free(fb);
+ fb = NULL;
+ }
+
+out:
+ pr_debug("%s: fb=0x%p, ret=%i\n", __func__, fb, res);
+
+ return res;
+
+out_fput:
+ fput(file);
+ goto out;
+}
+
+int fuse_backing_close(struct fuse_conn *fc, int backing_id)
+{
+ struct fuse_backing *fb = NULL;
+ int err;
+
+ pr_debug("%s: backing_id=%d\n", __func__, backing_id);
+
+ /* TODO: relax CAP_SYS_ADMIN once backing files are visible to lsof */
+ err = -EPERM;
+ if (!fc->passthrough || !capable(CAP_SYS_ADMIN))
+ goto out;
+
+ err = -EINVAL;
+ if (backing_id <= 0)
+ goto out;
+
+ err = -ENOENT;
+ fb = fuse_backing_id_remove(fc, backing_id);
+ if (!fb)
+ goto out;
+
+ fuse_backing_put(fb);
+ err = 0;
+out:
+ pr_debug("%s: fb=0x%p, err=%i\n", __func__, fb, err);
+
+ return err;
+}
+
+struct fuse_backing *fuse_backing_lookup(struct fuse_conn *fc, int backing_id)
+{
+ struct fuse_backing *fb;
+
+ rcu_read_lock();
+ fb = idr_find(&fc->backing_files_map, backing_id);
+ fb = fuse_backing_get(fb);
+ rcu_read_unlock();
+
+ return fb;
+}
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 2a730d88cc3b..140bd5730d99 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs_context.h>
+#include <linux/namei.h>
#define FUSE_CTL_SUPER_MAGIC 0x65735543
@@ -204,15 +205,13 @@ static const struct file_operations fuse_conn_congestion_threshold_ops = {
static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
struct fuse_conn *fc,
- const char *name,
- int mode, int nlink,
+ const char *name, int mode,
const struct inode_operations *iop,
const struct file_operations *fop)
{
struct dentry *dentry;
struct inode *inode;
- BUG_ON(fc->ctl_ndents >= FUSE_CTL_NUM_DENTRIES);
dentry = d_alloc_name(parent, name);
if (!dentry)
return NULL;
@@ -232,12 +231,19 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
if (iop)
inode->i_op = iop;
inode->i_fop = fop;
- set_nlink(inode, nlink);
+ if (S_ISDIR(mode)) {
+ inc_nlink(d_inode(parent));
+ inc_nlink(inode);
+ }
inode->i_private = fc;
- d_add(dentry, inode);
-
- fc->ctl_dentry[fc->ctl_ndents++] = dentry;
-
+ d_make_persistent(dentry, inode);
+ dput(dentry);
+
+ /*
+ * We are returning a borrowed reference here - it's only good while
+ * fuse_mutex is held. Actually it's d_make_persistent() return
+ * value...
+ */
return dentry;
}
@@ -254,22 +260,21 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
return 0;
parent = fuse_control_sb->s_root;
- inc_nlink(d_inode(parent));
sprintf(name, "%u", fc->dev);
- parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500, 2,
+ parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500,
&simple_dir_inode_operations,
&simple_dir_operations);
if (!parent)
goto err;
- if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400, 1,
+ if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400,
NULL, &fuse_ctl_waiting_ops) ||
- !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1,
+ !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200,
NULL, &fuse_ctl_abort_ops) ||
!fuse_ctl_add_dentry(parent, fc, "max_background", S_IFREG | 0600,
- 1, NULL, &fuse_conn_max_background_ops) ||
+ NULL, &fuse_conn_max_background_ops) ||
!fuse_ctl_add_dentry(parent, fc, "congestion_threshold",
- S_IFREG | 0600, 1, NULL,
+ S_IFREG | 0600, NULL,
&fuse_conn_congestion_threshold_ops))
goto err;
@@ -280,27 +285,24 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
return -ENOMEM;
}
+static void remove_one(struct dentry *dentry)
+{
+ d_inode(dentry)->i_private = NULL;
+}
+
/*
* Remove a connection from the control filesystem (if it exists).
* Caller must hold fuse_mutex
*/
void fuse_ctl_remove_conn(struct fuse_conn *fc)
{
- int i;
+ char name[32];
if (!fuse_control_sb || fc->no_control)
return;
- for (i = fc->ctl_ndents - 1; i >= 0; i--) {
- struct dentry *dentry = fc->ctl_dentry[i];
- d_inode(dentry)->i_private = NULL;
- if (!i) {
- /* Get rid of submounts: */
- d_invalidate(dentry);
- }
- dput(dentry);
- }
- drop_nlink(d_inode(fuse_control_sb->s_root));
+ sprintf(name, "%u", fc->dev);
+ simple_remove_by_name(fuse_control_sb->s_root, name, remove_one);
}
static int fuse_ctl_fill_super(struct super_block *sb, struct fs_context *fsc)
@@ -346,15 +348,11 @@ static int fuse_ctl_init_fs_context(struct fs_context *fsc)
static void fuse_ctl_kill_sb(struct super_block *sb)
{
- struct fuse_conn *fc;
-
mutex_lock(&fuse_mutex);
fuse_control_sb = NULL;
- list_for_each_entry(fc, &fuse_conn_list, entry)
- fc->ctl_ndents = 0;
mutex_unlock(&fuse_mutex);
- kill_litter_super(sb);
+ kill_anon_super(sb);
}
static struct file_system_type fuse_ctl_fs_type = {
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index b39844d75a80..28c96961e85d 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -52,6 +52,7 @@
#include <linux/user_namespace.h>
#include "fuse_i.h"
+#include "fuse_dev_i.h"
#define CUSE_CONNTBL_LEN 64
@@ -547,7 +548,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
*/
static int cuse_channel_release(struct inode *inode, struct file *file)
{
- struct fuse_dev *fud = file->private_data;
+ struct fuse_dev *fud = __fuse_get_dev(file);
struct cuse_conn *cc = fc_to_cc(fud->fc);
/* remove from the conntbl, no more access from this point on */
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index 9abbc2f2894f..ac6d4c1064cc 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -10,7 +10,6 @@
#include <linux/dax.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
-#include <linux/pfn_t.h>
#include <linux/iomap.h>
#include <linux/interval_tree.h>
@@ -240,11 +239,12 @@ static int fuse_send_removemapping(struct inode *inode,
args.opcode = FUSE_REMOVEMAPPING;
args.nodeid = fi->nodeid;
- args.in_numargs = 2;
- args.in_args[0].size = sizeof(*inargp);
- args.in_args[0].value = inargp;
- args.in_args[1].size = inargp->count * sizeof(*remove_one);
- args.in_args[1].value = remove_one;
+ args.in_numargs = 3;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = sizeof(*inargp);
+ args.in_args[1].value = inargp;
+ args.in_args[2].size = inargp->count * sizeof(*remove_one);
+ args.in_args[2].value = remove_one;
return fuse_simple_request(fm, &args);
}
@@ -665,36 +665,12 @@ static void fuse_wait_dax_page(struct inode *inode)
filemap_invalidate_lock(inode->i_mapping);
}
-/* Should be called with mapping->invalidate_lock held exclusively */
-static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
- loff_t start, loff_t end)
-{
- struct page *page;
-
- page = dax_layout_busy_page_range(inode->i_mapping, start, end);
- if (!page)
- return 0;
-
- *retry = true;
- return ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, fuse_wait_dax_page(inode));
-}
-
-/* dmap_end == 0 leads to unmapping of whole file */
+/* Should be called with mapping->invalidate_lock held exclusively. */
int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
u64 dmap_end)
{
- bool retry;
- int ret;
-
- do {
- retry = false;
- ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
- dmap_end);
- } while (ret == 0 && retry);
-
- return ret;
+ return dax_break_layout(inode, dmap_start, dmap_end,
+ fuse_wait_dax_page);
}
ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -780,7 +756,7 @@ static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
vm_fault_t ret;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct super_block *sb = inode->i_sb;
- pfn_t pfn;
+ unsigned long pfn;
int error = 0;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_conn_dax *fcd = fc->dax;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 27ccae63495d..6d59cbc877c6 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -6,7 +6,9 @@
See the file COPYING.
*/
+#include "dev_uring_i.h"
#include "fuse_i.h"
+#include "fuse_dev_i.h"
#include <linux/init.h>
#include <linux/module.h>
@@ -21,28 +23,107 @@
#include <linux/swap.h>
#include <linux/splice.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
-#define CREATE_TRACE_POINTS
#include "fuse_trace.h"
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
-/* Ordinary requests have even IDs, while interrupts IDs are odd */
-#define FUSE_INT_REQ_BIT (1ULL << 0)
-#define FUSE_REQ_ID_STEP (1ULL << 1)
-
static struct kmem_cache *fuse_req_cachep;
-static void end_requests(struct list_head *head);
+const unsigned long fuse_timeout_timer_freq =
+ secs_to_jiffies(FUSE_TIMEOUT_TIMER_FREQ);
-static struct fuse_dev *fuse_get_dev(struct file *file)
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list)
{
- /*
- * Lockless access is OK, because file->private data is set
- * once during mount and is valid until the file is released.
- */
- return READ_ONCE(file->private_data);
+ struct fuse_req *req;
+
+ req = list_first_entry_or_null(list, struct fuse_req, list);
+ if (!req)
+ return false;
+ return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout);
+}
+
+static bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing)
+{
+ int i;
+
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ if (fuse_request_expired(fc, &processing[i]))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if any requests aren't being completed by the time the request timeout
+ * elapses. To do so, we:
+ * - check the fiq pending list
+ * - check the bg queue
+ * - check the fpq io and processing lists
+ *
+ * To make this fast, we only check against the head request on each list since
+ * these are generally queued in order of creation time (eg newer requests get
+ * queued to the tail). We might miss a few edge cases (eg requests transitioning
+ * between lists, re-sent requests at the head of the pending list having a
+ * later creation time than other requests on that list, etc.) but that is fine
+ * since if the request never gets fulfilled, it will eventually be caught.
+ */
+void fuse_check_timeout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fuse_conn *fc = container_of(dwork, struct fuse_conn,
+ timeout.work);
+ struct fuse_iqueue *fiq = &fc->iq;
+ struct fuse_dev *fud;
+ struct fuse_pqueue *fpq;
+ bool expired = false;
+
+ if (!atomic_read(&fc->num_waiting))
+ goto out;
+
+ spin_lock(&fiq->lock);
+ expired = fuse_request_expired(fc, &fiq->pending);
+ spin_unlock(&fiq->lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->bg_lock);
+ expired = fuse_request_expired(fc, &fc->bg_queue);
+ spin_unlock(&fc->bg_lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->lock);
+ if (!fc->connected) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+ list_for_each_entry(fud, &fc->devices, entry) {
+ fpq = &fud->pq;
+ spin_lock(&fpq->lock);
+ if (fuse_request_expired(fc, &fpq->io) ||
+ fuse_fpq_processing_expired(fc, fpq->processing)) {
+ spin_unlock(&fpq->lock);
+ spin_unlock(&fc->lock);
+ goto abort_conn;
+ }
+
+ spin_unlock(&fpq->lock);
+ }
+ spin_unlock(&fc->lock);
+
+ if (fuse_uring_request_expired(fc))
+ goto abort_conn;
+
+out:
+ queue_delayed_work(system_percpu_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+ return;
+
+abort_conn:
+ fuse_abort_conn(fc);
}
static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
@@ -53,6 +134,7 @@ static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
refcount_set(&req->count, 1);
__set_bit(FR_PENDING, &req->flags);
req->fm = fm;
+ req->create_time = jiffies;
}
static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
@@ -89,7 +171,8 @@ void fuse_set_initialized(struct fuse_conn *fc)
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
- return !fc->initialized || (for_background && fc->blocked);
+ return !fc->initialized || (for_background && fc->blocked) ||
+ (fc->io_uring && fc->connected && !fuse_uring_ready(fc));
}
static void fuse_drop_waiting(struct fuse_conn *fc)
@@ -123,8 +206,9 @@ static struct fuse_req *fuse_get_req(struct mnt_idmap *idmap,
if (fuse_block_alloc(fc, for_background)) {
err = -EINTR;
- if (wait_event_killable_exclusive(fc->blocked_waitq,
- !fuse_block_alloc(fc, for_background)))
+ if (wait_event_state_exclusive(fc->blocked_waitq,
+ !fuse_block_alloc(fc, for_background),
+ (TASK_KILLABLE | TASK_FREEZABLE)))
goto out;
}
/* Matches smp_wmb() in fuse_set_initialized() */
@@ -234,10 +318,11 @@ u64 fuse_get_unique(struct fuse_iqueue *fiq)
}
EXPORT_SYMBOL_GPL(fuse_get_unique);
-static unsigned int fuse_req_hash(u64 unique)
+unsigned int fuse_req_hash(u64 unique)
{
return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
+EXPORT_SYMBOL_GPL(fuse_req_hash);
/*
* A new request is available, wake fiq->waitq
@@ -250,7 +335,8 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
}
-static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
+void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
+ struct fuse_forget_link *forget)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
@@ -263,7 +349,7 @@ static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_li
}
}
-static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->lock);
if (list_empty(&req->intr_entry)) {
@@ -284,12 +370,32 @@ static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *r
}
}
+static inline void fuse_request_assign_unique_locked(struct fuse_iqueue *fiq,
+ struct fuse_req *req)
+{
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique_locked(fiq);
+
+ /* tracepoint captures in.h.unique and in.h.len */
+ trace_fuse_request_send(req);
+}
+
+inline void fuse_request_assign_unique(struct fuse_iqueue *fiq,
+ struct fuse_req *req)
+{
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique(fiq);
+
+ /* tracepoint captures in.h.unique and in.h.len */
+ trace_fuse_request_send(req);
+}
+EXPORT_SYMBOL_GPL(fuse_request_assign_unique);
+
static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
- if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
- req->in.h.unique = fuse_get_unique_locked(fiq);
+ fuse_request_assign_unique_locked(fiq, req);
list_add_tail(&req->list, &fiq->pending);
fuse_dev_wake_and_unlock(fiq);
} else {
@@ -312,7 +418,6 @@ static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
- trace_fuse_request_send(req);
fiq->ops->send_req(fiq, req);
}
@@ -418,6 +523,24 @@ static int queue_interrupt(struct fuse_req *req)
return 0;
}
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock)
+{
+ spin_lock(lock);
+ if (test_bit(FR_PENDING, &req->flags)) {
+ /*
+ * FR_PENDING does not get cleared as the request will end
+ * up in destruction anyway.
+ */
+ list_del(&req->list);
+ spin_unlock(lock);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return true;
+ }
+ spin_unlock(lock);
+ return false;
+}
+
static void request_wait_answer(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
@@ -439,22 +562,20 @@ static void request_wait_answer(struct fuse_req *req)
}
if (!test_bit(FR_FORCE, &req->flags)) {
+ bool removed;
+
/* Only fatal signals may interrupt this */
err = wait_event_killable(req->waitq,
test_bit(FR_FINISHED, &req->flags));
if (!err)
return;
- spin_lock(&fiq->lock);
- /* Request is not yet in userspace, bail out */
- if (test_bit(FR_PENDING, &req->flags)) {
- list_del(&req->list);
- spin_unlock(&fiq->lock);
- __fuse_put_request(req);
- req->out.h.error = -EINTR;
+ if (test_bit(FR_URING, &req->flags))
+ removed = fuse_uring_remove_pending_req(req);
+ else
+ removed = fuse_remove_pending_req(req, &fiq->lock);
+ if (removed)
return;
- }
- spin_unlock(&fiq->lock);
}
/*
@@ -580,7 +701,25 @@ ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
return ret;
}
-static bool fuse_request_queue_background(struct fuse_req *req)
+#ifdef CONFIG_FUSE_IO_URING
+static bool fuse_request_queue_background_uring(struct fuse_conn *fc,
+ struct fuse_req *req)
+{
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ req->in.h.len = sizeof(struct fuse_in_header) +
+ fuse_len_args(req->args->in_numargs,
+ (struct fuse_arg *) req->args->in_args);
+ fuse_request_assign_unique(fiq, req);
+
+ return fuse_uring_queue_bq_req(req);
+}
+#endif
+
+/*
+ * @return true if queued
+ */
+static int fuse_request_queue_background(struct fuse_req *req)
{
struct fuse_mount *fm = req->fm;
struct fuse_conn *fc = fm->fc;
@@ -592,6 +731,12 @@ static bool fuse_request_queue_background(struct fuse_req *req)
atomic_inc(&fc->num_waiting);
}
__set_bit(FR_ISREPLY, &req->flags);
+
+#ifdef CONFIG_FUSE_IO_URING
+ if (fuse_uring_ready(fc))
+ return fuse_request_queue_background_uring(fc, req);
+#endif
+
spin_lock(&fc->bg_lock);
if (likely(fc->connected)) {
fc->num_background++;
@@ -692,22 +837,8 @@ static int unlock_request(struct fuse_req *req)
return err;
}
-struct fuse_copy_state {
- int write;
- struct fuse_req *req;
- struct iov_iter *iter;
- struct pipe_buffer *pipebufs;
- struct pipe_buffer *currbuf;
- struct pipe_inode_info *pipe;
- unsigned long nr_segs;
- struct page *pg;
- unsigned len;
- unsigned offset;
- unsigned move_pages:1;
-};
-
-static void fuse_copy_init(struct fuse_copy_state *cs, int write,
- struct iov_iter *iter)
+void fuse_copy_init(struct fuse_copy_state *cs, bool write,
+ struct iov_iter *iter)
{
memset(cs, 0, sizeof(*cs));
cs->write = write;
@@ -715,7 +846,7 @@ static void fuse_copy_init(struct fuse_copy_state *cs, int write,
}
/* Unmap and put previous page of userspace buffer */
-static void fuse_copy_finish(struct fuse_copy_state *cs)
+void fuse_copy_finish(struct fuse_copy_state *cs)
{
if (cs->currbuf) {
struct pipe_buffer *buf = cs->currbuf;
@@ -814,6 +945,9 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
*size -= ncpy;
cs->len -= ncpy;
cs->offset += ncpy;
+ if (cs->is_uring)
+ cs->ring.copied_sz += ncpy;
+
return ncpy;
}
@@ -821,7 +955,7 @@ static int fuse_check_folio(struct folio *folio)
{
if (folio_mapped(folio) ||
folio->mapping != NULL ||
- (folio->flags & PAGE_FLAGS_CHECK_AT_PREP &
+ (folio->flags.f & PAGE_FLAGS_CHECK_AT_PREP &
~(1 << PG_locked |
1 << PG_referenced |
1 << PG_lru |
@@ -836,10 +970,16 @@ static int fuse_check_folio(struct folio *folio)
return 0;
}
-static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
+/*
+ * Attempt to steal a page from the splice() pipe and move it into the
+ * pagecache. If successful, the pointer in @pagep will be updated. The
+ * folio that was originally in @pagep will lose a reference and the new
+ * folio returned in @pagep will carry a reference.
+ */
+static int fuse_try_move_folio(struct fuse_copy_state *cs, struct folio **foliop)
{
int err;
- struct folio *oldfolio = page_folio(*pagep);
+ struct folio *oldfolio = *foliop;
struct folio *newfolio;
struct pipe_buffer *buf = cs->pipebufs;
@@ -860,7 +1000,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
cs->pipebufs++;
cs->nr_segs--;
- if (cs->len != PAGE_SIZE)
+ if (cs->len != folio_size(oldfolio))
goto out_fallback;
if (!pipe_buf_try_steal(cs->pipe, buf))
@@ -906,7 +1046,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (test_bit(FR_ABORTED, &cs->req->flags))
err = -ENOENT;
else
- *pagep = &newfolio->page;
+ *foliop = newfolio;
spin_unlock(&cs->req->waitq.lock);
if (err) {
@@ -939,8 +1079,8 @@ out_fallback:
goto out_put_old;
}
-static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
- unsigned offset, unsigned count)
+static int fuse_ref_folio(struct fuse_copy_state *cs, struct folio *folio,
+ unsigned offset, unsigned count)
{
struct pipe_buffer *buf;
int err;
@@ -948,17 +1088,17 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
if (cs->nr_segs >= cs->pipe->max_usage)
return -EIO;
- get_page(page);
+ folio_get(folio);
err = unlock_request(cs->req);
if (err) {
- put_page(page);
+ folio_put(folio);
return err;
}
fuse_copy_finish(cs);
buf = cs->pipebufs;
- buf->page = page;
+ buf->page = &folio->page;
buf->offset = offset;
buf->len = count;
@@ -970,20 +1110,24 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
}
/*
- * Copy a page in the request to/from the userspace buffer. Must be
+ * Copy a folio in the request to/from the userspace buffer. Must be
* done atomically
*/
-static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
- unsigned offset, unsigned count, int zeroing)
+static int fuse_copy_folio(struct fuse_copy_state *cs, struct folio **foliop,
+ unsigned offset, unsigned count, int zeroing)
{
int err;
- struct page *page = *pagep;
+ struct folio *folio = *foliop;
+ size_t size;
- if (page && zeroing && count < PAGE_SIZE)
- clear_highpage(page);
+ if (folio) {
+ size = folio_size(folio);
+ if (zeroing && count < size)
+ folio_zero_range(folio, 0, size);
+ }
while (count) {
- if (cs->write && cs->pipebufs && page) {
+ if (cs->write && cs->pipebufs && folio) {
/*
* Can't control lifetime of pipe buffers, so always
* copy user pages.
@@ -993,12 +1137,12 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
if (err)
return err;
} else {
- return fuse_ref_page(cs, page, offset, count);
+ return fuse_ref_folio(cs, folio, offset, count);
}
} else if (!cs->len) {
- if (cs->move_pages && page &&
- offset == 0 && count == PAGE_SIZE) {
- err = fuse_try_move_page(cs, pagep);
+ if (cs->move_folios && folio &&
+ offset == 0 && count == size) {
+ err = fuse_try_move_folio(cs, foliop);
if (err <= 0)
return err;
} else {
@@ -1007,22 +1151,30 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
return err;
}
}
- if (page) {
- void *mapaddr = kmap_local_page(page);
- void *buf = mapaddr + offset;
- offset += fuse_copy_do(cs, &buf, &count);
+ if (folio) {
+ void *mapaddr = kmap_local_folio(folio, offset);
+ void *buf = mapaddr;
+ unsigned int copy = count;
+ unsigned int bytes_copied;
+
+ if (folio_test_highmem(folio) && count > PAGE_SIZE - offset_in_page(offset))
+ copy = PAGE_SIZE - offset_in_page(offset);
+
+ bytes_copied = fuse_copy_do(cs, &buf, &copy);
kunmap_local(mapaddr);
+ offset += bytes_copied;
+ count -= bytes_copied;
} else
offset += fuse_copy_do(cs, NULL, &count);
}
- if (page && !cs->write)
- flush_dcache_page(page);
+ if (folio && !cs->write)
+ flush_dcache_folio(folio);
return 0;
}
-/* Copy pages in the request to/from userspace buffer */
-static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
- int zeroing)
+/* Copy folios in the request to/from userspace buffer */
+static int fuse_copy_folios(struct fuse_copy_state *cs, unsigned nbytes,
+ int zeroing)
{
unsigned i;
struct fuse_req *req = cs->req;
@@ -1032,23 +1184,12 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
int err;
unsigned int offset = ap->descs[i].offset;
unsigned int count = min(nbytes, ap->descs[i].length);
- struct page *orig, *pagep;
- orig = pagep = &ap->folios[i]->page;
-
- err = fuse_copy_page(cs, &pagep, offset, count, zeroing);
+ err = fuse_copy_folio(cs, &ap->folios[i], offset, count, zeroing);
if (err)
return err;
nbytes -= count;
-
- /*
- * fuse_copy_page may have moved a page from a pipe instead of
- * copying into our given page, so update the folios if it was
- * replaced.
- */
- if (pagep != orig)
- ap->folios[i] = page_folio(pagep);
}
return 0;
}
@@ -1068,9 +1209,9 @@ static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
}
/* Copy request arguments to/from userspace buffer */
-static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
- unsigned argpages, struct fuse_arg *args,
- int zeroing)
+int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
+ unsigned argpages, struct fuse_arg *args,
+ int zeroing)
{
int err = 0;
unsigned i;
@@ -1078,7 +1219,7 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
for (i = 0; !err && i < numargs; i++) {
struct fuse_arg *arg = &args[i];
if (i == numargs - 1 && argpages)
- err = fuse_copy_pages(cs, arg->size, zeroing);
+ err = fuse_copy_folios(cs, arg->size, zeroing);
else
err = fuse_copy_one(cs, arg->value, arg->size);
}
@@ -1407,19 +1548,39 @@ static int fuse_dev_open(struct inode *inode, struct file *file)
return 0;
}
+struct fuse_dev *fuse_get_dev(struct file *file)
+{
+ struct fuse_dev *fud = __fuse_get_dev(file);
+ int err;
+
+ if (likely(fud))
+ return fud;
+
+ err = wait_event_interruptible(fuse_dev_waitq,
+ READ_ONCE(file->private_data) != FUSE_DEV_SYNC_INIT);
+ if (err)
+ return ERR_PTR(err);
+
+ fud = __fuse_get_dev(file);
+ if (!fud)
+ return ERR_PTR(-EPERM);
+
+ return fud;
+}
+
static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
{
struct fuse_copy_state cs;
struct file *file = iocb->ki_filp;
struct fuse_dev *fud = fuse_get_dev(file);
- if (!fud)
- return -EPERM;
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
if (!user_backed_iter(to))
return -EINVAL;
- fuse_copy_init(&cs, 1, to);
+ fuse_copy_init(&cs, true, to);
return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
}
@@ -1434,22 +1595,22 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
struct fuse_copy_state cs;
struct fuse_dev *fud = fuse_get_dev(in);
- if (!fud)
- return -EPERM;
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
GFP_KERNEL);
if (!bufs)
return -ENOMEM;
- fuse_copy_init(&cs, 1, NULL);
+ fuse_copy_init(&cs, true, NULL);
cs.pipebufs = bufs;
cs.pipe = pipe;
ret = fuse_dev_do_read(fud, in, &cs, len);
if (ret < 0)
goto out;
- if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
+ if (pipe_buf_usage(pipe) + cs.nr_segs > pipe->max_usage) {
ret = -EIO;
goto out;
}
@@ -1479,35 +1640,31 @@ static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_poll_wakeup_out outarg;
- int err = -EINVAL;
+ int err;
if (size != sizeof(outarg))
- goto err;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto err;
+ return err;
fuse_copy_finish(cs);
return fuse_notify_poll_wakeup(fc, &outarg);
-
-err:
- fuse_copy_finish(cs);
- return err;
}
static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_inode_out outarg;
- int err = -EINVAL;
+ int err;
if (size != sizeof(outarg))
- goto err;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto err;
+ return err;
fuse_copy_finish(cs);
down_read(&fc->killsb);
@@ -1515,39 +1672,33 @@ static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
outarg.off, outarg.len);
up_read(&fc->killsb);
return err;
-
-err:
- fuse_copy_finish(cs);
- return err;
}
static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
- int err = -ENOMEM;
+ int err;
char *buf;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
- err = -EINVAL;
if (size < sizeof(outarg))
- goto err;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto err;
+ return err;
- err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
- goto err;
+ if (outarg.namelen > fc->name_max)
+ return -ENAMETOOLONG;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
- goto err;
+ return -EINVAL;
+
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
name.name = buf;
name.len = outarg.namelen;
@@ -1560,12 +1711,8 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
down_read(&fc->killsb);
err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags);
up_read(&fc->killsb);
- kfree(buf);
- return err;
-
err:
kfree(buf);
- fuse_copy_finish(cs);
return err;
}
@@ -1573,29 +1720,26 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_delete_out outarg;
- int err = -ENOMEM;
+ int err;
char *buf;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
- err = -EINVAL;
if (size < sizeof(outarg))
- goto err;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto err;
+ return err;
- err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
- goto err;
+ if (outarg.namelen > fc->name_max)
+ return -ENAMETOOLONG;
- err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
- goto err;
+ return -EINVAL;
+
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
name.name = buf;
name.len = outarg.namelen;
@@ -1608,12 +1752,8 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
down_read(&fc->killsb);
err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0);
up_read(&fc->killsb);
- kfree(buf);
- return err;
-
err:
kfree(buf);
- fuse_copy_finish(cs);
return err;
}
@@ -1631,17 +1771,15 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
loff_t file_size;
loff_t end;
- err = -EINVAL;
if (size < sizeof(outarg))
- goto out_finish;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto out_finish;
+ return err;
- err = -EINVAL;
if (size - sizeof(outarg) != outarg.size)
- goto out_finish;
+ return -EINVAL;
nodeid = outarg.nodeid;
@@ -1665,20 +1803,23 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
num = outarg.size;
while (num) {
struct folio *folio;
- struct page *page;
- unsigned int this_num;
+ unsigned int folio_offset;
+ unsigned int nr_bytes;
+ unsigned int nr_pages;
folio = filemap_grab_folio(mapping, index);
err = PTR_ERR(folio);
if (IS_ERR(folio))
goto out_iput;
- page = &folio->page;
- this_num = min_t(unsigned, num, folio_size(folio) - offset);
- err = fuse_copy_page(cs, &page, offset, this_num, 0);
+ folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
+ nr_bytes = min_t(unsigned, num, folio_size(folio) - folio_offset);
+ nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ err = fuse_copy_folio(cs, &folio, folio_offset, nr_bytes, 0);
if (!folio_test_uptodate(folio) && !err && offset == 0 &&
- (this_num == folio_size(folio) || file_size == end)) {
- folio_zero_segment(folio, this_num, folio_size(folio));
+ (nr_bytes == folio_size(folio) || file_size == end)) {
+ folio_zero_segment(folio, nr_bytes, folio_size(folio));
folio_mark_uptodate(folio);
}
folio_unlock(folio);
@@ -1687,9 +1828,9 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
if (err)
goto out_iput;
- num -= this_num;
+ num -= nr_bytes;
offset = 0;
- index++;
+ index += nr_pages;
}
err = 0;
@@ -1698,8 +1839,6 @@ out_iput:
iput(inode);
out_up_killsb:
up_read(&fc->killsb);
-out_finish:
- fuse_copy_finish(cs);
return err;
}
@@ -1728,7 +1867,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
unsigned int num;
unsigned int offset;
size_t total_len = 0;
- unsigned int num_pages, cur_pages = 0;
+ unsigned int num_pages;
struct fuse_conn *fc = fm->fc;
struct fuse_retrieve_args *ra;
size_t args_size = sizeof(*ra);
@@ -1746,6 +1885,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages = min(num_pages, fc->max_pages);
+ num = min(num, num_pages << PAGE_SHIFT);
args_size += num_pages * (sizeof(ap->folios[0]) + sizeof(ap->descs[0]));
@@ -1760,37 +1900,42 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
args = &ap->args;
args->nodeid = outarg->nodeid;
args->opcode = FUSE_NOTIFY_REPLY;
- args->in_numargs = 2;
+ args->in_numargs = 3;
args->in_pages = true;
args->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_SHIFT;
- while (num && cur_pages < num_pages) {
+ while (num && ap->num_folios < num_pages) {
struct folio *folio;
- unsigned int this_num;
+ unsigned int folio_offset;
+ unsigned int nr_bytes;
+ unsigned int nr_pages;
folio = filemap_get_folio(mapping, index);
if (IS_ERR(folio))
break;
- this_num = min_t(unsigned, num, PAGE_SIZE - offset);
+ folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
+ nr_bytes = min(folio_size(folio) - folio_offset, num);
+ nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
ap->folios[ap->num_folios] = folio;
- ap->descs[ap->num_folios].offset = offset;
- ap->descs[ap->num_folios].length = this_num;
+ ap->descs[ap->num_folios].offset = folio_offset;
+ ap->descs[ap->num_folios].length = nr_bytes;
ap->num_folios++;
- cur_pages++;
offset = 0;
- num -= this_num;
- total_len += this_num;
- index++;
+ num -= nr_bytes;
+ total_len += nr_bytes;
+ index += nr_pages;
}
ra->inarg.offset = outarg->offset;
ra->inarg.size = total_len;
- args->in_args[0].size = sizeof(ra->inarg);
- args->in_args[0].value = &ra->inarg;
- args->in_args[1].size = total_len;
+ fuse_set_zero_arg0(args);
+ args->in_args[1].size = sizeof(ra->inarg);
+ args->in_args[1].value = &ra->inarg;
+ args->in_args[2].size = total_len;
err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
if (err)
@@ -1808,13 +1953,12 @@ static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
u64 nodeid;
int err;
- err = -EINVAL;
if (size != sizeof(outarg))
- goto copy_finish;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto copy_finish;
+ return err;
fuse_copy_finish(cs);
@@ -1830,10 +1974,6 @@ static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
up_read(&fc->killsb);
return err;
-
-copy_finish:
- fuse_copy_finish(cs);
- return err;
}
/*
@@ -1885,7 +2025,7 @@ static void fuse_resend(struct fuse_conn *fc)
spin_unlock(&fiq->lock);
list_for_each_entry(req, &to_queue, list)
clear_bit(FR_PENDING, &req->flags);
- end_requests(&to_queue);
+ fuse_dev_end_requests(&to_queue);
return;
}
/* iq and pq requests are both oldest to newest */
@@ -1899,11 +2039,61 @@ static int fuse_notify_resend(struct fuse_conn *fc)
return 0;
}
+/*
+ * Increments the fuse connection epoch. This will result of dentries from
+ * previous epochs to be invalidated. Additionally, if inval_wq is set, a work
+ * queue is scheduled to trigger the invalidation.
+ */
+static int fuse_notify_inc_epoch(struct fuse_conn *fc)
+{
+ atomic_inc(&fc->epoch);
+ if (inval_wq)
+ schedule_work(&fc->epoch_work);
+
+ return 0;
+}
+
+static int fuse_notify_prune(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_prune_out outarg;
+ const unsigned int batch = 512;
+ u64 *nodeids __free(kfree) = kmalloc(sizeof(u64) * batch, GFP_KERNEL);
+ unsigned int num, i;
+ int err;
+
+ if (!nodeids)
+ return -ENOMEM;
+
+ if (size < sizeof(outarg))
+ return -EINVAL;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ return err;
+
+ if (size - sizeof(outarg) != outarg.count * sizeof(u64))
+ return -EINVAL;
+
+ for (; outarg.count; outarg.count -= num) {
+ num = min(batch, outarg.count);
+ err = fuse_copy_one(cs, nodeids, num * sizeof(u64));
+ if (err)
+ return err;
+
+ scoped_guard(rwsem_read, &fc->killsb) {
+ for (i = 0; i < num; i++)
+ fuse_try_prune_one_inode(fc, nodeids[i]);
+ }
+ }
+ return 0;
+}
+
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
unsigned int size, struct fuse_copy_state *cs)
{
- /* Don't try to move pages (yet) */
- cs->move_pages = 0;
+ /* Don't try to move folios (yet) */
+ cs->move_folios = false;
switch (code) {
case FUSE_NOTIFY_POLL:
@@ -1927,14 +2117,19 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
case FUSE_NOTIFY_RESEND:
return fuse_notify_resend(fc);
+ case FUSE_NOTIFY_INC_EPOCH:
+ return fuse_notify_inc_epoch(fc);
+
+ case FUSE_NOTIFY_PRUNE:
+ return fuse_notify_prune(fc, size, cs);
+
default:
- fuse_copy_finish(cs);
return -EINVAL;
}
}
/* Look up request on processing list by unique ID */
-static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
+struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique)
{
unsigned int hash = fuse_req_hash(unique);
struct fuse_req *req;
@@ -1946,10 +2141,17 @@ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
return NULL;
}
-static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
- unsigned nbytes)
+int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
+ unsigned nbytes)
{
- unsigned reqsize = sizeof(struct fuse_out_header);
+
+ unsigned int reqsize = 0;
+
+ /*
+ * Uring has all headers separated from args - args is payload only
+ */
+ if (!cs->is_uring)
+ reqsize = sizeof(struct fuse_out_header);
reqsize += fuse_len_args(args->out_numargs, args->out_args);
@@ -2001,7 +2203,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
*/
if (!oh.unique) {
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
- goto out;
+ goto copy_finish;
}
err = -EINVAL;
@@ -2011,7 +2213,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
spin_lock(&fpq->lock);
req = NULL;
if (fpq->connected)
- req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
+ req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
err = -ENOENT;
if (!req) {
@@ -2044,12 +2246,12 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
spin_unlock(&fpq->lock);
cs->req = req;
if (!req->args->page_replace)
- cs->move_pages = 0;
+ cs->move_folios = false;
if (oh.error)
err = nbytes != sizeof(oh) ? -EINVAL : 0;
else
- err = copy_out_args(cs, req->args, nbytes);
+ err = fuse_copy_out_args(cs, req->args, nbytes);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
@@ -2074,7 +2276,7 @@ copy_finish:
static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
{
struct fuse_copy_state cs;
- struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
+ struct fuse_dev *fud = __fuse_get_dev(iocb->ki_filp);
if (!fud)
return -EPERM;
@@ -2082,7 +2284,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
if (!user_backed_iter(from))
return -EINVAL;
- fuse_copy_init(&cs, 0, from);
+ fuse_copy_init(&cs, false, from);
return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
}
@@ -2091,16 +2293,15 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
- unsigned int head, tail, mask, count;
+ unsigned int head, tail, count;
unsigned nbuf;
unsigned idx;
struct pipe_buffer *bufs;
struct fuse_copy_state cs;
- struct fuse_dev *fud;
+ struct fuse_dev *fud = __fuse_get_dev(out);
size_t rem;
ssize_t ret;
- fud = fuse_get_dev(out);
if (!fud)
return -EPERM;
@@ -2108,8 +2309,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
head = pipe->head;
tail = pipe->tail;
- mask = pipe->ring_size - 1;
- count = head - tail;
+ count = pipe_occupancy(head, tail);
bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
if (!bufs) {
@@ -2119,8 +2319,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
nbuf = 0;
rem = 0;
- for (idx = tail; idx != head && rem < len; idx++)
- rem += pipe->bufs[idx & mask].len;
+ for (idx = tail; !pipe_empty(head, idx) && rem < len; idx++)
+ rem += pipe_buf(pipe, idx)->len;
ret = -EINVAL;
if (rem < len)
@@ -2131,10 +2331,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
- if (WARN_ON(nbuf >= count || tail == head))
+ if (WARN_ON(nbuf >= count || pipe_empty(head, tail)))
goto out_free;
- ibuf = &pipe->bufs[tail & mask];
+ ibuf = pipe_buf(pipe, tail);
obuf = &bufs[nbuf];
if (rem >= ibuf->len) {
@@ -2157,13 +2357,13 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
}
pipe_unlock(pipe);
- fuse_copy_init(&cs, 0, NULL);
+ fuse_copy_init(&cs, false, NULL);
cs.pipebufs = bufs;
cs.nr_segs = nbuf;
cs.pipe = pipe;
if (flags & SPLICE_F_MOVE)
- cs.move_pages = 1;
+ cs.move_folios = true;
ret = fuse_dev_do_write(fud, &cs, len);
@@ -2187,7 +2387,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
struct fuse_iqueue *fiq;
struct fuse_dev *fud = fuse_get_dev(file);
- if (!fud)
+ if (IS_ERR(fud))
return EPOLLERR;
fiq = &fud->fc->iq;
@@ -2204,7 +2404,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
}
/* Abort all requests on the given list (pending or processing) */
-static void end_requests(struct list_head *head)
+void fuse_dev_end_requests(struct list_head *head)
{
while (!list_empty(head)) {
struct fuse_req *req;
@@ -2240,7 +2440,7 @@ static void end_polls(struct fuse_conn *fc)
* The same effect is usually achievable through killing the filesystem daemon
* and all users of the filesystem. The exception is the combination of an
* asynchronous request and the tricky deadlock (see
- * Documentation/filesystems/fuse.rst).
+ * Documentation/filesystems/fuse/fuse.rst).
*
* Aborting requests under I/O goes as follows: 1: Separate out unlocked
* requests, they should be finished off immediately. Locked requests will be
@@ -2260,6 +2460,9 @@ void fuse_abort_conn(struct fuse_conn *fc)
LIST_HEAD(to_end);
unsigned int i;
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work(&fc->timeout.work);
+
/* Background queuing checks fc->connected under bg_lock */
spin_lock(&fc->bg_lock);
fc->connected = 0;
@@ -2307,7 +2510,13 @@ void fuse_abort_conn(struct fuse_conn *fc)
wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
- end_requests(&to_end);
+ fuse_dev_end_requests(&to_end);
+
+ /*
+ * fc->lock must not be taken to avoid conflicts with io-uring
+ * locks
+ */
+ fuse_uring_abort(fc);
} else {
spin_unlock(&fc->lock);
}
@@ -2319,11 +2528,13 @@ void fuse_wait_aborted(struct fuse_conn *fc)
/* matches implicit memory barrier in fuse_drop_waiting() */
smp_mb();
wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
+
+ fuse_uring_wait_stopped_queues(fc);
}
int fuse_dev_release(struct inode *inode, struct file *file)
{
- struct fuse_dev *fud = fuse_get_dev(file);
+ struct fuse_dev *fud = __fuse_get_dev(file);
if (fud) {
struct fuse_conn *fc = fud->fc;
@@ -2337,7 +2548,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
list_splice_init(&fpq->processing[i], &to_end);
spin_unlock(&fpq->lock);
- end_requests(&to_end);
+ fuse_dev_end_requests(&to_end);
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
@@ -2354,8 +2565,8 @@ static int fuse_dev_fasync(int fd, struct file *file, int on)
{
struct fuse_dev *fud = fuse_get_dev(file);
- if (!fud)
- return -EPERM;
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
/* No locking - fasync_helper does its own locking */
return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
@@ -2365,7 +2576,7 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
{
struct fuse_dev *fud;
- if (new->private_data)
+ if (__fuse_get_dev(new))
return -EINVAL;
fud = fuse_dev_alloc_install(fc);
@@ -2396,7 +2607,7 @@ static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp)
* uses the same ioctl handler.
*/
if (fd_file(f)->f_op == file->f_op)
- fud = fuse_get_dev(fd_file(f));
+ fud = __fuse_get_dev(fd_file(f));
res = -EINVAL;
if (fud) {
@@ -2414,8 +2625,8 @@ static long fuse_dev_ioctl_backing_open(struct file *file,
struct fuse_dev *fud = fuse_get_dev(file);
struct fuse_backing_map map;
- if (!fud)
- return -EPERM;
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
return -EOPNOTSUPP;
@@ -2431,8 +2642,8 @@ static long fuse_dev_ioctl_backing_close(struct file *file, __u32 __user *argp)
struct fuse_dev *fud = fuse_get_dev(file);
int backing_id;
- if (!fud)
- return -EPERM;
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
return -EOPNOTSUPP;
@@ -2443,6 +2654,19 @@ static long fuse_dev_ioctl_backing_close(struct file *file, __u32 __user *argp)
return fuse_backing_close(fud->fc, backing_id);
}
+static long fuse_dev_ioctl_sync_init(struct file *file)
+{
+ int err = -EINVAL;
+
+ mutex_lock(&fuse_mutex);
+ if (!__fuse_get_dev(file)) {
+ WRITE_ONCE(file->private_data, FUSE_DEV_SYNC_INIT);
+ err = 0;
+ }
+ mutex_unlock(&fuse_mutex);
+ return err;
+}
+
static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -2458,11 +2682,25 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
case FUSE_DEV_IOC_BACKING_CLOSE:
return fuse_dev_ioctl_backing_close(file, argp);
+ case FUSE_DEV_IOC_SYNC_INIT:
+ return fuse_dev_ioctl_sync_init(file);
+
default:
return -ENOTTY;
}
}
+#ifdef CONFIG_PROC_FS
+static void fuse_dev_show_fdinfo(struct seq_file *seq, struct file *file)
+{
+ struct fuse_dev *fud = __fuse_get_dev(file);
+ if (!fud)
+ return;
+
+ seq_printf(seq, "fuse_connection:\t%u\n", fud->fc->dev);
+}
+#endif
+
const struct file_operations fuse_dev_operations = {
.owner = THIS_MODULE,
.open = fuse_dev_open,
@@ -2475,6 +2713,12 @@ const struct file_operations fuse_dev_operations = {
.fasync = fuse_dev_fasync,
.unlocked_ioctl = fuse_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+#ifdef CONFIG_FUSE_IO_URING
+ .uring_cmd = fuse_uring_cmd,
+#endif
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = fuse_dev_show_fdinfo,
+#endif
};
EXPORT_SYMBOL_GPL(fuse_dev_operations);
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
new file mode 100644
index 000000000000..5ceb217ced1b
--- /dev/null
+++ b/fs/fuse/dev_uring.c
@@ -0,0 +1,1373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FUSE: Filesystem in Userspace
+ * Copyright (c) 2023-2024 DataDirect Networks.
+ */
+
+#include "fuse_i.h"
+#include "dev_uring_i.h"
+#include "fuse_dev_i.h"
+#include "fuse_trace.h"
+
+#include <linux/fs.h>
+#include <linux/io_uring/cmd.h>
+
+static bool __read_mostly enable_uring;
+module_param(enable_uring, bool, 0644);
+MODULE_PARM_DESC(enable_uring,
+ "Enable userspace communication through io-uring");
+
+#define FUSE_URING_IOV_SEGS 2 /* header and payload */
+
+
+bool fuse_uring_enabled(void)
+{
+ return enable_uring;
+}
+
+struct fuse_uring_pdu {
+ struct fuse_ring_ent *ent;
+};
+
+static const struct fuse_iqueue_ops fuse_io_uring_ops;
+
+static void uring_cmd_set_ring_ent(struct io_uring_cmd *cmd,
+ struct fuse_ring_ent *ring_ent)
+{
+ struct fuse_uring_pdu *pdu =
+ io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu);
+
+ pdu->ent = ring_ent;
+}
+
+static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
+{
+ struct fuse_uring_pdu *pdu =
+ io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu);
+
+ return pdu->ent;
+}
+
+static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+
+ lockdep_assert_held(&queue->lock);
+ lockdep_assert_held(&fc->bg_lock);
+
+ /*
+ * Allow one bg request per queue, ignoring global fc limits.
+ * This prevents a single queue from consuming all resources and
+ * eliminates the need for remote queue wake-ups when global
+ * limits are met but this queue has no more waiting requests.
+ */
+ while ((fc->active_background < fc->max_background ||
+ !queue->active_background) &&
+ (!list_empty(&queue->fuse_req_bg_queue))) {
+ struct fuse_req *req;
+
+ req = list_first_entry(&queue->fuse_req_bg_queue,
+ struct fuse_req, list);
+ fc->active_background++;
+ queue->active_background++;
+
+ list_move_tail(&req->list, &queue->fuse_req_queue);
+ }
+}
+
+static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
+ int error)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+
+ lockdep_assert_not_held(&queue->lock);
+ spin_lock(&queue->lock);
+ ent->fuse_req = NULL;
+ list_del_init(&req->list);
+ if (test_bit(FR_BACKGROUND, &req->flags)) {
+ queue->active_background--;
+ spin_lock(&fc->bg_lock);
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+ }
+
+ spin_unlock(&queue->lock);
+
+ if (error)
+ req->out.h.error = error;
+
+ clear_bit(FR_SENT, &req->flags);
+ fuse_request_end(req);
+}
+
+/* Abort all list queued request on the given ring queue */
+static void fuse_uring_abort_end_queue_requests(struct fuse_ring_queue *queue)
+{
+ struct fuse_req *req;
+ LIST_HEAD(req_list);
+
+ spin_lock(&queue->lock);
+ list_for_each_entry(req, &queue->fuse_req_queue, list)
+ clear_bit(FR_PENDING, &req->flags);
+ list_splice_init(&queue->fuse_req_queue, &req_list);
+ spin_unlock(&queue->lock);
+
+ /* must not hold queue lock to avoid order issues with fi->lock */
+ fuse_dev_end_requests(&req_list);
+}
+
+void fuse_uring_abort_end_requests(struct fuse_ring *ring)
+{
+ int qid;
+ struct fuse_ring_queue *queue;
+ struct fuse_conn *fc = ring->fc;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ queue = READ_ONCE(ring->queues[qid]);
+ if (!queue)
+ continue;
+
+ queue->stopped = true;
+
+ WARN_ON_ONCE(ring->fc->max_background != UINT_MAX);
+ spin_lock(&queue->lock);
+ spin_lock(&fc->bg_lock);
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+ spin_unlock(&queue->lock);
+ fuse_uring_abort_end_queue_requests(queue);
+ }
+}
+
+static bool ent_list_request_expired(struct fuse_conn *fc, struct list_head *list)
+{
+ struct fuse_ring_ent *ent;
+ struct fuse_req *req;
+
+ ent = list_first_entry_or_null(list, struct fuse_ring_ent, list);
+ if (!ent)
+ return false;
+
+ req = ent->fuse_req;
+
+ return time_is_before_jiffies(req->create_time +
+ fc->timeout.req_timeout);
+}
+
+bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ int qid;
+
+ if (!ring)
+ return false;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ queue = READ_ONCE(ring->queues[qid]);
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->lock);
+ if (fuse_request_expired(fc, &queue->fuse_req_queue) ||
+ fuse_request_expired(fc, &queue->fuse_req_bg_queue) ||
+ ent_list_request_expired(fc, &queue->ent_w_req_queue) ||
+ ent_list_request_expired(fc, &queue->ent_in_userspace)) {
+ spin_unlock(&queue->lock);
+ return true;
+ }
+ spin_unlock(&queue->lock);
+ }
+
+ return false;
+}
+
+void fuse_uring_destruct(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+ int qid;
+
+ if (!ring)
+ return;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = ring->queues[qid];
+ struct fuse_ring_ent *ent, *next;
+
+ if (!queue)
+ continue;
+
+ WARN_ON(!list_empty(&queue->ent_avail_queue));
+ WARN_ON(!list_empty(&queue->ent_w_req_queue));
+ WARN_ON(!list_empty(&queue->ent_commit_queue));
+ WARN_ON(!list_empty(&queue->ent_in_userspace));
+
+ list_for_each_entry_safe(ent, next, &queue->ent_released,
+ list) {
+ list_del_init(&ent->list);
+ kfree(ent);
+ }
+
+ kfree(queue->fpq.processing);
+ kfree(queue);
+ ring->queues[qid] = NULL;
+ }
+
+ kfree(ring->queues);
+ kfree(ring);
+ fc->ring = NULL;
+}
+
+/*
+ * Basic ring setup for this connection based on the provided configuration
+ */
+static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring;
+ size_t nr_queues = num_possible_cpus();
+ struct fuse_ring *res = NULL;
+ size_t max_payload_size;
+
+ ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT);
+ if (!ring)
+ return NULL;
+
+ ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *),
+ GFP_KERNEL_ACCOUNT);
+ if (!ring->queues)
+ goto out_err;
+
+ max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write);
+ max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE);
+
+ spin_lock(&fc->lock);
+ if (fc->ring) {
+ /* race, another thread created the ring in the meantime */
+ spin_unlock(&fc->lock);
+ res = fc->ring;
+ goto out_err;
+ }
+
+ init_waitqueue_head(&ring->stop_waitq);
+
+ ring->nr_queues = nr_queues;
+ ring->fc = fc;
+ ring->max_payload_sz = max_payload_size;
+ smp_store_release(&fc->ring, ring);
+
+ spin_unlock(&fc->lock);
+ return ring;
+
+out_err:
+ kfree(ring->queues);
+ kfree(ring);
+ return res;
+}
+
+static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
+ int qid)
+{
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_ring_queue *queue;
+ struct list_head *pq;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT);
+ if (!queue)
+ return NULL;
+ pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL);
+ if (!pq) {
+ kfree(queue);
+ return NULL;
+ }
+
+ queue->qid = qid;
+ queue->ring = ring;
+ spin_lock_init(&queue->lock);
+
+ INIT_LIST_HEAD(&queue->ent_avail_queue);
+ INIT_LIST_HEAD(&queue->ent_commit_queue);
+ INIT_LIST_HEAD(&queue->ent_w_req_queue);
+ INIT_LIST_HEAD(&queue->ent_in_userspace);
+ INIT_LIST_HEAD(&queue->fuse_req_queue);
+ INIT_LIST_HEAD(&queue->fuse_req_bg_queue);
+ INIT_LIST_HEAD(&queue->ent_released);
+
+ queue->fpq.processing = pq;
+ fuse_pqueue_init(&queue->fpq);
+
+ spin_lock(&fc->lock);
+ if (ring->queues[qid]) {
+ spin_unlock(&fc->lock);
+ kfree(queue->fpq.processing);
+ kfree(queue);
+ return ring->queues[qid];
+ }
+
+ /*
+ * write_once and lock as the caller mostly doesn't take the lock at all
+ */
+ WRITE_ONCE(ring->queues[qid], queue);
+ spin_unlock(&fc->lock);
+
+ return queue;
+}
+
+static void fuse_uring_stop_fuse_req_end(struct fuse_req *req)
+{
+ clear_bit(FR_SENT, &req->flags);
+ req->out.h.error = -ECONNABORTED;
+ fuse_request_end(req);
+}
+
+/*
+ * Release a request/entry on connection tear down
+ */
+static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent)
+{
+ struct fuse_req *req;
+ struct io_uring_cmd *cmd;
+
+ struct fuse_ring_queue *queue = ent->queue;
+
+ spin_lock(&queue->lock);
+ cmd = ent->cmd;
+ ent->cmd = NULL;
+ req = ent->fuse_req;
+ ent->fuse_req = NULL;
+ if (req) {
+ /* remove entry from queue->fpq->processing */
+ list_del_init(&req->list);
+ }
+
+ /*
+ * The entry must not be freed immediately, due to access of direct
+ * pointer access of entries through IO_URING_F_CANCEL - there is a risk
+ * of race between daemon termination (which triggers IO_URING_F_CANCEL
+ * and accesses entries without checking the list state first
+ */
+ list_move(&ent->list, &queue->ent_released);
+ ent->state = FRRS_RELEASED;
+ spin_unlock(&queue->lock);
+
+ if (cmd)
+ io_uring_cmd_done(cmd, -ENOTCONN, IO_URING_F_UNLOCKED);
+
+ if (req)
+ fuse_uring_stop_fuse_req_end(req);
+}
+
+static void fuse_uring_stop_list_entries(struct list_head *head,
+ struct fuse_ring_queue *queue,
+ enum fuse_ring_req_state exp_state)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_ring_ent *ent, *next;
+ ssize_t queue_refs = SSIZE_MAX;
+ LIST_HEAD(to_teardown);
+
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(ent, next, head, list) {
+ if (ent->state != exp_state) {
+ pr_warn("entry teardown qid=%d state=%d expected=%d",
+ queue->qid, ent->state, exp_state);
+ continue;
+ }
+
+ ent->state = FRRS_TEARDOWN;
+ list_move(&ent->list, &to_teardown);
+ }
+ spin_unlock(&queue->lock);
+
+ /* no queue lock to avoid lock order issues */
+ list_for_each_entry_safe(ent, next, &to_teardown, list) {
+ fuse_uring_entry_teardown(ent);
+ queue_refs = atomic_dec_return(&ring->queue_refs);
+ WARN_ON_ONCE(queue_refs < 0);
+ }
+}
+
+static void fuse_uring_teardown_entries(struct fuse_ring_queue *queue)
+{
+ fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue,
+ FRRS_USERSPACE);
+ fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue,
+ FRRS_AVAILABLE);
+}
+
+/*
+ * Log state debug info
+ */
+static void fuse_uring_log_ent_state(struct fuse_ring *ring)
+{
+ int qid;
+ struct fuse_ring_ent *ent;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = ring->queues[qid];
+
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->lock);
+ /*
+ * Log entries from the intermediate queue, the other queues
+ * should be empty
+ */
+ list_for_each_entry(ent, &queue->ent_w_req_queue, list) {
+ pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n",
+ ring, qid, ent, ent->state);
+ }
+ list_for_each_entry(ent, &queue->ent_commit_queue, list) {
+ pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n",
+ ring, qid, ent, ent->state);
+ }
+ spin_unlock(&queue->lock);
+ }
+ ring->stop_debug_log = 1;
+}
+
+static void fuse_uring_async_stop_queues(struct work_struct *work)
+{
+ int qid;
+ struct fuse_ring *ring =
+ container_of(work, struct fuse_ring, async_teardown_work.work);
+
+ /* XXX code dup */
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
+
+ if (!queue)
+ continue;
+
+ fuse_uring_teardown_entries(queue);
+ }
+
+ /*
+ * Some ring entries might be in the middle of IO operations,
+ * i.e. in process to get handled by file_operations::uring_cmd
+ * or on the way to userspace - we could handle that with conditions in
+ * run time code, but easier/cleaner to have an async tear down handler
+ * If there are still queue references left
+ */
+ if (atomic_read(&ring->queue_refs) > 0) {
+ if (time_after(jiffies,
+ ring->teardown_time + FUSE_URING_TEARDOWN_TIMEOUT))
+ fuse_uring_log_ent_state(ring);
+
+ schedule_delayed_work(&ring->async_teardown_work,
+ FUSE_URING_TEARDOWN_INTERVAL);
+ } else {
+ wake_up_all(&ring->stop_waitq);
+ }
+}
+
+/*
+ * Stop the ring queues
+ */
+void fuse_uring_stop_queues(struct fuse_ring *ring)
+{
+ int qid;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
+
+ if (!queue)
+ continue;
+
+ fuse_uring_teardown_entries(queue);
+ }
+
+ if (atomic_read(&ring->queue_refs) > 0) {
+ ring->teardown_time = jiffies;
+ INIT_DELAYED_WORK(&ring->async_teardown_work,
+ fuse_uring_async_stop_queues);
+ schedule_delayed_work(&ring->async_teardown_work,
+ FUSE_URING_TEARDOWN_INTERVAL);
+ } else {
+ wake_up_all(&ring->stop_waitq);
+ }
+}
+
+/*
+ * Handle IO_URING_F_CANCEL, typically should come on daemon termination.
+ *
+ * Releasing the last entry should trigger fuse_dev_release() if
+ * the daemon was terminated
+ */
+static void fuse_uring_cancel(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd);
+ struct fuse_ring_queue *queue;
+ bool need_cmd_done = false;
+
+ /*
+ * direct access on ent - it must not be destructed as long as
+ * IO_URING_F_CANCEL might come up
+ */
+ queue = ent->queue;
+ spin_lock(&queue->lock);
+ if (ent->state == FRRS_AVAILABLE) {
+ ent->state = FRRS_USERSPACE;
+ list_move_tail(&ent->list, &queue->ent_in_userspace);
+ need_cmd_done = true;
+ ent->cmd = NULL;
+ }
+ spin_unlock(&queue->lock);
+
+ if (need_cmd_done) {
+ /* no queue lock to avoid lock order issues */
+ io_uring_cmd_done(cmd, -ENOTCONN, issue_flags);
+ }
+}
+
+static void fuse_uring_prepare_cancel(struct io_uring_cmd *cmd, int issue_flags,
+ struct fuse_ring_ent *ring_ent)
+{
+ uring_cmd_set_ring_ent(cmd, ring_ent);
+ io_uring_cmd_mark_cancelable(cmd, issue_flags);
+}
+
+/*
+ * Checks for errors and stores it into the request
+ */
+static int fuse_uring_out_header_has_err(struct fuse_out_header *oh,
+ struct fuse_req *req,
+ struct fuse_conn *fc)
+{
+ int err;
+
+ err = -EINVAL;
+ if (oh->unique == 0) {
+ /* Not supported through io-uring yet */
+ pr_warn_once("notify through fuse-io-uring not supported\n");
+ goto err;
+ }
+
+ if (oh->error <= -ERESTARTSYS || oh->error > 0)
+ goto err;
+
+ if (oh->error) {
+ err = oh->error;
+ goto err;
+ }
+
+ err = -ENOENT;
+ if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) {
+ pr_warn_ratelimited("unique mismatch, expected: %llu got %llu\n",
+ req->in.h.unique,
+ oh->unique & ~FUSE_INT_REQ_BIT);
+ goto err;
+ }
+
+ /*
+ * Is it an interrupt reply ID?
+ * XXX: Not supported through fuse-io-uring yet, it should not even
+ * find the request - should not happen.
+ */
+ WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT);
+
+ err = 0;
+err:
+ return err;
+}
+
+static int fuse_uring_copy_from_ring(struct fuse_ring *ring,
+ struct fuse_req *req,
+ struct fuse_ring_ent *ent)
+{
+ struct fuse_copy_state cs;
+ struct fuse_args *args = req->args;
+ struct iov_iter iter;
+ int err;
+ struct fuse_uring_ent_in_out ring_in_out;
+
+ err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out,
+ sizeof(ring_in_out));
+ if (err)
+ return -EFAULT;
+
+ err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz,
+ &iter);
+ if (err)
+ return err;
+
+ fuse_copy_init(&cs, false, &iter);
+ cs.is_uring = true;
+ cs.req = req;
+
+ err = fuse_copy_out_args(&cs, args, ring_in_out.payload_sz);
+ fuse_copy_finish(&cs);
+ return err;
+}
+
+/*
+ * Copy data from the req to the ring buffer
+ */
+static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req,
+ struct fuse_ring_ent *ent)
+{
+ struct fuse_copy_state cs;
+ struct fuse_args *args = req->args;
+ struct fuse_in_arg *in_args = args->in_args;
+ int num_args = args->in_numargs;
+ int err;
+ struct iov_iter iter;
+ struct fuse_uring_ent_in_out ent_in_out = {
+ .flags = 0,
+ .commit_id = req->in.h.unique,
+ };
+
+ err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter);
+ if (err) {
+ pr_info_ratelimited("fuse: Import of user buffer failed\n");
+ return err;
+ }
+
+ fuse_copy_init(&cs, true, &iter);
+ cs.is_uring = true;
+ cs.req = req;
+
+ if (num_args > 0) {
+ /*
+ * Expectation is that the first argument is the per op header.
+ * Some op code have that as zero size.
+ */
+ if (args->in_args[0].size > 0) {
+ err = copy_to_user(&ent->headers->op_in, in_args->value,
+ in_args->size);
+ if (err) {
+ pr_info_ratelimited(
+ "Copying the header failed.\n");
+ return -EFAULT;
+ }
+ }
+ in_args++;
+ num_args--;
+ }
+
+ /* copy the payload */
+ err = fuse_copy_args(&cs, num_args, args->in_pages,
+ (struct fuse_arg *)in_args, 0);
+ fuse_copy_finish(&cs);
+ if (err) {
+ pr_info_ratelimited("%s fuse_copy_args failed\n", __func__);
+ return err;
+ }
+
+ ent_in_out.payload_sz = cs.ring.copied_sz;
+ err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out,
+ sizeof(ent_in_out));
+ return err ? -EFAULT : 0;
+}
+
+static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ int err;
+
+ err = -EIO;
+ if (WARN_ON(ent->state != FRRS_FUSE_REQ)) {
+ pr_err("qid=%d ring-req=%p invalid state %d on send\n",
+ queue->qid, ent, ent->state);
+ return err;
+ }
+
+ err = -EINVAL;
+ if (WARN_ON(req->in.h.unique == 0))
+ return err;
+
+ /* copy the request */
+ err = fuse_uring_args_to_ring(ring, req, ent);
+ if (unlikely(err)) {
+ pr_info_ratelimited("Copy to ring failed: %d\n", err);
+ return err;
+ }
+
+ /* copy fuse_in_header */
+ err = copy_to_user(&ent->headers->in_out, &req->in.h,
+ sizeof(req->in.h));
+ if (err) {
+ err = -EFAULT;
+ return err;
+ }
+
+ return 0;
+}
+
+static int fuse_uring_prepare_send(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ int err;
+
+ err = fuse_uring_copy_to_ring(ent, req);
+ if (!err)
+ set_bit(FR_SENT, &req->flags);
+ else
+ fuse_uring_req_end(ent, req, err);
+
+ return err;
+}
+
+/*
+ * Write data to the ring buffer and send the request to userspace,
+ * userspace will read it
+ * This is comparable with classical read(/dev/fuse)
+ */
+static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ent,
+ struct fuse_req *req,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ int err;
+ struct io_uring_cmd *cmd;
+
+ err = fuse_uring_prepare_send(ent, req);
+ if (err)
+ return err;
+
+ spin_lock(&queue->lock);
+ cmd = ent->cmd;
+ ent->cmd = NULL;
+ ent->state = FRRS_USERSPACE;
+ list_move_tail(&ent->list, &queue->ent_in_userspace);
+ spin_unlock(&queue->lock);
+
+ io_uring_cmd_done(cmd, 0, issue_flags);
+ return 0;
+}
+
+/*
+ * Make a ring entry available for fuse_req assignment
+ */
+static void fuse_uring_ent_avail(struct fuse_ring_ent *ent,
+ struct fuse_ring_queue *queue)
+{
+ WARN_ON_ONCE(!ent->cmd);
+ list_move(&ent->list, &queue->ent_avail_queue);
+ ent->state = FRRS_AVAILABLE;
+}
+
+/* Used to find the request on SQE commit */
+static void fuse_uring_add_to_pq(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_pqueue *fpq = &queue->fpq;
+ unsigned int hash;
+
+ req->ring_entry = ent;
+ hash = fuse_req_hash(req->in.h.unique);
+ list_move_tail(&req->list, &fpq->processing[hash]);
+}
+
+/*
+ * Assign a fuse queue entry to the given entry
+ */
+static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ lockdep_assert_held(&queue->lock);
+
+ if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE &&
+ ent->state != FRRS_COMMIT)) {
+ pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid,
+ ent->state);
+ }
+
+ clear_bit(FR_PENDING, &req->flags);
+ ent->fuse_req = req;
+ ent->state = FRRS_FUSE_REQ;
+ list_move_tail(&ent->list, &queue->ent_w_req_queue);
+ fuse_uring_add_to_pq(ent, req);
+}
+
+/* Fetch the next fuse request if available */
+static struct fuse_req *fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
+ __must_hold(&queue->lock)
+{
+ struct fuse_req *req;
+ struct fuse_ring_queue *queue = ent->queue;
+ struct list_head *req_queue = &queue->fuse_req_queue;
+
+ lockdep_assert_held(&queue->lock);
+
+ /* get and assign the next entry while it is still holding the lock */
+ req = list_first_entry_or_null(req_queue, struct fuse_req, list);
+ if (req)
+ fuse_uring_add_req_to_ring_ent(ent, req);
+
+ return req;
+}
+
+/*
+ * Read data from the ring buffer, which user space has written to
+ * This is comparible with handling of classical write(/dev/fuse).
+ * Also make the ring request available again for new fuse requests.
+ */
+static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req,
+ unsigned int issue_flags)
+{
+ struct fuse_ring *ring = ent->queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ ssize_t err = 0;
+
+ err = copy_from_user(&req->out.h, &ent->headers->in_out,
+ sizeof(req->out.h));
+ if (err) {
+ req->out.h.error = -EFAULT;
+ goto out;
+ }
+
+ err = fuse_uring_out_header_has_err(&req->out.h, req, fc);
+ if (err) {
+ /* req->out.h.error already set */
+ goto out;
+ }
+
+ err = fuse_uring_copy_from_ring(ring, req, ent);
+out:
+ fuse_uring_req_end(ent, req, err);
+}
+
+/*
+ * Get the next fuse req and send it
+ */
+static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ent,
+ struct fuse_ring_queue *queue,
+ unsigned int issue_flags)
+{
+ int err;
+ struct fuse_req *req;
+
+retry:
+ spin_lock(&queue->lock);
+ fuse_uring_ent_avail(ent, queue);
+ req = fuse_uring_ent_assign_req(ent);
+ spin_unlock(&queue->lock);
+
+ if (req) {
+ err = fuse_uring_send_next_to_ring(ent, req, issue_flags);
+ if (err)
+ goto retry;
+ }
+}
+
+static int fuse_ring_ent_set_commit(struct fuse_ring_ent *ent)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ lockdep_assert_held(&queue->lock);
+
+ if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE))
+ return -EIO;
+
+ ent->state = FRRS_COMMIT;
+ list_move(&ent->list, &queue->ent_commit_queue);
+
+ return 0;
+}
+
+/* FUSE_URING_CMD_COMMIT_AND_FETCH handler */
+static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
+ struct fuse_conn *fc)
+{
+ const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
+ struct fuse_ring_ent *ent;
+ int err;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ uint64_t commit_id = READ_ONCE(cmd_req->commit_id);
+ unsigned int qid = READ_ONCE(cmd_req->qid);
+ struct fuse_pqueue *fpq;
+ struct fuse_req *req;
+
+ err = -ENOTCONN;
+ if (!ring)
+ return err;
+
+ if (qid >= ring->nr_queues)
+ return -EINVAL;
+
+ queue = ring->queues[qid];
+ if (!queue)
+ return err;
+ fpq = &queue->fpq;
+
+ if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped))
+ return err;
+
+ spin_lock(&queue->lock);
+ /* Find a request based on the unique ID of the fuse request
+ * This should get revised, as it needs a hash calculation and list
+ * search. And full struct fuse_pqueue is needed (memory overhead).
+ * As well as the link from req to ring_ent.
+ */
+ req = fuse_request_find(fpq, commit_id);
+ err = -ENOENT;
+ if (!req) {
+ pr_info("qid=%d commit_id %llu not found\n", queue->qid,
+ commit_id);
+ spin_unlock(&queue->lock);
+ return err;
+ }
+ list_del_init(&req->list);
+ ent = req->ring_entry;
+ req->ring_entry = NULL;
+
+ err = fuse_ring_ent_set_commit(ent);
+ if (err != 0) {
+ pr_info_ratelimited("qid=%d commit_id %llu state %d",
+ queue->qid, commit_id, ent->state);
+ spin_unlock(&queue->lock);
+ req->out.h.error = err;
+ clear_bit(FR_SENT, &req->flags);
+ fuse_request_end(req);
+ return err;
+ }
+
+ ent->cmd = cmd;
+ spin_unlock(&queue->lock);
+
+ /* without the queue lock, as other locks are taken */
+ fuse_uring_prepare_cancel(cmd, issue_flags, ent);
+ fuse_uring_commit(ent, req, issue_flags);
+
+ /*
+ * Fetching the next request is absolutely required as queued
+ * fuse requests would otherwise not get processed - committing
+ * and fetching is done in one step vs legacy fuse, which has separated
+ * read (fetch request) and write (commit result).
+ */
+ fuse_uring_next_fuse_req(ent, queue, issue_flags);
+ return 0;
+}
+
+static bool is_ring_ready(struct fuse_ring *ring, int current_qid)
+{
+ int qid;
+ struct fuse_ring_queue *queue;
+ bool ready = true;
+
+ for (qid = 0; qid < ring->nr_queues && ready; qid++) {
+ if (current_qid == qid)
+ continue;
+
+ queue = ring->queues[qid];
+ if (!queue) {
+ ready = false;
+ break;
+ }
+
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->ent_avail_queue))
+ ready = false;
+ spin_unlock(&queue->lock);
+ }
+
+ return ready;
+}
+
+/*
+ * fuse_uring_req_fetch command handling
+ */
+static void fuse_uring_do_register(struct fuse_ring_ent *ent,
+ struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ fuse_uring_prepare_cancel(cmd, issue_flags, ent);
+
+ spin_lock(&queue->lock);
+ ent->cmd = cmd;
+ fuse_uring_ent_avail(ent, queue);
+ spin_unlock(&queue->lock);
+
+ if (!ring->ready) {
+ bool ready = is_ring_ready(ring, queue->qid);
+
+ if (ready) {
+ WRITE_ONCE(fiq->ops, &fuse_io_uring_ops);
+ WRITE_ONCE(ring->ready, true);
+ wake_up_all(&fc->blocked_waitq);
+ }
+ }
+}
+
+/*
+ * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1]
+ * the payload
+ */
+static int fuse_uring_get_iovec_from_sqe(const struct io_uring_sqe *sqe,
+ struct iovec iov[FUSE_URING_IOV_SEGS])
+{
+ struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ struct iov_iter iter;
+ ssize_t ret;
+
+ if (sqe->len != FUSE_URING_IOV_SEGS)
+ return -EINVAL;
+
+ /*
+ * Direction for buffer access will actually be READ and WRITE,
+ * using write for the import should include READ access as well.
+ */
+ ret = import_iovec(WRITE, uiov, FUSE_URING_IOV_SEGS,
+ FUSE_URING_IOV_SEGS, &iov, &iter);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct fuse_ring_ent *
+fuse_uring_create_ring_ent(struct io_uring_cmd *cmd,
+ struct fuse_ring_queue *queue)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_ring_ent *ent;
+ size_t payload_size;
+ struct iovec iov[FUSE_URING_IOV_SEGS];
+ int err;
+
+ err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov);
+ if (err) {
+ pr_info_ratelimited("Failed to get iovec from sqe, err=%d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ err = -EINVAL;
+ if (iov[0].iov_len < sizeof(struct fuse_uring_req_header)) {
+ pr_info_ratelimited("Invalid header len %zu\n", iov[0].iov_len);
+ return ERR_PTR(err);
+ }
+
+ payload_size = iov[1].iov_len;
+ if (payload_size < ring->max_payload_sz) {
+ pr_info_ratelimited("Invalid req payload len %zu\n",
+ payload_size);
+ return ERR_PTR(err);
+ }
+
+ err = -ENOMEM;
+ ent = kzalloc(sizeof(*ent), GFP_KERNEL_ACCOUNT);
+ if (!ent)
+ return ERR_PTR(err);
+
+ INIT_LIST_HEAD(&ent->list);
+
+ ent->queue = queue;
+ ent->headers = iov[0].iov_base;
+ ent->payload = iov[1].iov_base;
+
+ atomic_inc(&ring->queue_refs);
+ return ent;
+}
+
+/*
+ * Register header and payload buffer with the kernel and puts the
+ * entry as "ready to get fuse requests" on the queue
+ */
+static int fuse_uring_register(struct io_uring_cmd *cmd,
+ unsigned int issue_flags, struct fuse_conn *fc)
+{
+ const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
+ struct fuse_ring *ring = smp_load_acquire(&fc->ring);
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent;
+ int err;
+ unsigned int qid = READ_ONCE(cmd_req->qid);
+
+ err = -ENOMEM;
+ if (!ring) {
+ ring = fuse_uring_create(fc);
+ if (!ring)
+ return err;
+ }
+
+ if (qid >= ring->nr_queues) {
+ pr_info_ratelimited("fuse: Invalid ring qid %u\n", qid);
+ return -EINVAL;
+ }
+
+ queue = ring->queues[qid];
+ if (!queue) {
+ queue = fuse_uring_create_queue(ring, qid);
+ if (!queue)
+ return err;
+ }
+
+ /*
+ * The created queue above does not need to be destructed in
+ * case of entry errors below, will be done at ring destruction time.
+ */
+
+ ent = fuse_uring_create_ring_ent(cmd, queue);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ fuse_uring_do_register(ent, cmd, issue_flags);
+
+ return 0;
+}
+
+/*
+ * Entry function from io_uring to handle the given passthrough command
+ * (op code IORING_OP_URING_CMD)
+ */
+int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct fuse_dev *fud;
+ struct fuse_conn *fc;
+ u32 cmd_op = cmd->cmd_op;
+ int err;
+
+ if ((unlikely(issue_flags & IO_URING_F_CANCEL))) {
+ fuse_uring_cancel(cmd, issue_flags);
+ return 0;
+ }
+
+ /* This extra SQE size holds struct fuse_uring_cmd_req */
+ if (!(issue_flags & IO_URING_F_SQE128))
+ return -EINVAL;
+
+ fud = fuse_get_dev(cmd->file);
+ if (IS_ERR(fud)) {
+ pr_info_ratelimited("No fuse device found\n");
+ return PTR_ERR(fud);
+ }
+ fc = fud->fc;
+
+ /* Once a connection has io-uring enabled on it, it can't be disabled */
+ if (!enable_uring && !fc->io_uring) {
+ pr_info_ratelimited("fuse-io-uring is disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (fc->aborted)
+ return -ECONNABORTED;
+ if (!fc->connected)
+ return -ENOTCONN;
+
+ /*
+ * fuse_uring_register() needs the ring to be initialized,
+ * we need to know the max payload size
+ */
+ if (!fc->initialized)
+ return -EAGAIN;
+
+ switch (cmd_op) {
+ case FUSE_IO_URING_CMD_REGISTER:
+ err = fuse_uring_register(cmd, issue_flags, fc);
+ if (err) {
+ pr_info_once("FUSE_IO_URING_CMD_REGISTER failed err=%d\n",
+ err);
+ fc->io_uring = 0;
+ wake_up_all(&fc->blocked_waitq);
+ return err;
+ }
+ break;
+ case FUSE_IO_URING_CMD_COMMIT_AND_FETCH:
+ err = fuse_uring_commit_fetch(cmd, issue_flags, fc);
+ if (err) {
+ pr_info_once("FUSE_IO_URING_COMMIT_AND_FETCH failed err=%d\n",
+ err);
+ return err;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EIOCBQUEUED;
+}
+
+static void fuse_uring_send(struct fuse_ring_ent *ent, struct io_uring_cmd *cmd,
+ ssize_t ret, unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ spin_lock(&queue->lock);
+ ent->state = FRRS_USERSPACE;
+ list_move_tail(&ent->list, &queue->ent_in_userspace);
+ ent->cmd = NULL;
+ spin_unlock(&queue->lock);
+
+ io_uring_cmd_done(cmd, ret, issue_flags);
+}
+
+/*
+ * This prepares and sends the ring request in fuse-uring task context.
+ * User buffers are not mapped yet - the application does not have permission
+ * to write to it - this has to be executed in ring task context.
+ */
+static void fuse_uring_send_in_task(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
+ struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd);
+ struct fuse_ring_queue *queue = ent->queue;
+ int err;
+
+ if (!tw.cancel) {
+ err = fuse_uring_prepare_send(ent, ent->fuse_req);
+ if (err) {
+ fuse_uring_next_fuse_req(ent, queue, issue_flags);
+ return;
+ }
+ } else {
+ err = -ECANCELED;
+ }
+
+ fuse_uring_send(ent, cmd, err, issue_flags);
+}
+
+static struct fuse_ring_queue *fuse_uring_task_to_queue(struct fuse_ring *ring)
+{
+ unsigned int qid;
+ struct fuse_ring_queue *queue;
+
+ qid = task_cpu(current);
+
+ if (WARN_ONCE(qid >= ring->nr_queues,
+ "Core number (%u) exceeds nr queues (%zu)\n", qid,
+ ring->nr_queues))
+ qid = 0;
+
+ queue = ring->queues[qid];
+ WARN_ONCE(!queue, "Missing queue for qid %d\n", qid);
+
+ return queue;
+}
+
+static void fuse_uring_dispatch_ent(struct fuse_ring_ent *ent)
+{
+ struct io_uring_cmd *cmd = ent->cmd;
+
+ uring_cmd_set_ring_ent(cmd, ent);
+ io_uring_cmd_complete_in_task(cmd, fuse_uring_send_in_task);
+}
+
+/* queue a fuse request and send it if a ring entry is available */
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent = NULL;
+ int err;
+
+ err = -EINVAL;
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ goto err;
+
+ fuse_request_assign_unique(fiq, req);
+
+ spin_lock(&queue->lock);
+ err = -ENOTCONN;
+ if (unlikely(queue->stopped))
+ goto err_unlock;
+
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
+ ent = list_first_entry_or_null(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+ if (ent)
+ fuse_uring_add_req_to_ring_ent(ent, req);
+ else
+ list_add_tail(&req->list, &queue->fuse_req_queue);
+ spin_unlock(&queue->lock);
+
+ if (ent)
+ fuse_uring_dispatch_ent(ent);
+
+ return;
+
+err_unlock:
+ spin_unlock(&queue->lock);
+err:
+ req->out.h.error = err;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+}
+
+bool fuse_uring_queue_bq_req(struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent = NULL;
+
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ return false;
+
+ spin_lock(&queue->lock);
+ if (unlikely(queue->stopped)) {
+ spin_unlock(&queue->lock);
+ return false;
+ }
+
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
+ list_add_tail(&req->list, &queue->fuse_req_bg_queue);
+
+ ent = list_first_entry_or_null(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+ spin_lock(&fc->bg_lock);
+ fc->num_background++;
+ if (fc->num_background == fc->max_background)
+ fc->blocked = 1;
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+
+ /*
+ * Due to bg_queue flush limits there might be other bg requests
+ * in the queue that need to be handled first. Or no further req
+ * might be available.
+ */
+ req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req,
+ list);
+ if (ent && req) {
+ fuse_uring_add_req_to_ring_ent(ent, req);
+ spin_unlock(&queue->lock);
+
+ fuse_uring_dispatch_ent(ent);
+ } else {
+ spin_unlock(&queue->lock);
+ }
+
+ return true;
+}
+
+bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = req->ring_queue;
+
+ return fuse_remove_pending_req(req, &queue->lock);
+}
+
+static const struct fuse_iqueue_ops fuse_io_uring_ops = {
+ /* should be send over io-uring as enhancement */
+ .send_forget = fuse_dev_queue_forget,
+
+ /*
+ * could be send over io-uring, but interrupts should be rare,
+ * no need to make the code complex
+ */
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_uring_queue_fuse_req,
+};
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
new file mode 100644
index 000000000000..51a563922ce1
--- /dev/null
+++ b/fs/fuse/dev_uring_i.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * FUSE: Filesystem in Userspace
+ * Copyright (c) 2023-2024 DataDirect Networks.
+ */
+
+#ifndef _FS_FUSE_DEV_URING_I_H
+#define _FS_FUSE_DEV_URING_I_H
+
+#include "fuse_i.h"
+
+#ifdef CONFIG_FUSE_IO_URING
+
+#define FUSE_URING_TEARDOWN_TIMEOUT (5 * HZ)
+#define FUSE_URING_TEARDOWN_INTERVAL (HZ/20)
+
+enum fuse_ring_req_state {
+ FRRS_INVALID = 0,
+
+ /* The ring entry received from userspace and it is being processed */
+ FRRS_COMMIT,
+
+ /* The ring entry is waiting for new fuse requests */
+ FRRS_AVAILABLE,
+
+ /* The ring entry got assigned a fuse req */
+ FRRS_FUSE_REQ,
+
+ /* The ring entry is in or on the way to user space */
+ FRRS_USERSPACE,
+
+ /* The ring entry is in teardown */
+ FRRS_TEARDOWN,
+
+ /* The ring entry is released, but not freed yet */
+ FRRS_RELEASED,
+};
+
+/** A fuse ring entry, part of the ring queue */
+struct fuse_ring_ent {
+ /* userspace buffer */
+ struct fuse_uring_req_header __user *headers;
+ void __user *payload;
+
+ /* the ring queue that owns the request */
+ struct fuse_ring_queue *queue;
+
+ /* fields below are protected by queue->lock */
+
+ struct io_uring_cmd *cmd;
+
+ struct list_head list;
+
+ enum fuse_ring_req_state state;
+
+ struct fuse_req *fuse_req;
+};
+
+struct fuse_ring_queue {
+ /*
+ * back pointer to the main fuse uring structure that holds this
+ * queue
+ */
+ struct fuse_ring *ring;
+
+ /* queue id, corresponds to the cpu core */
+ unsigned int qid;
+
+ /*
+ * queue lock, taken when any value in the queue changes _and_ also
+ * a ring entry state changes.
+ */
+ spinlock_t lock;
+
+ /* available ring entries (struct fuse_ring_ent) */
+ struct list_head ent_avail_queue;
+
+ /*
+ * entries in the process of being committed or in the process
+ * to be sent to userspace
+ */
+ struct list_head ent_w_req_queue;
+ struct list_head ent_commit_queue;
+
+ /* entries in userspace */
+ struct list_head ent_in_userspace;
+
+ /* entries that are released */
+ struct list_head ent_released;
+
+ /* fuse requests waiting for an entry slot */
+ struct list_head fuse_req_queue;
+
+ /* background fuse requests */
+ struct list_head fuse_req_bg_queue;
+
+ struct fuse_pqueue fpq;
+
+ unsigned int active_background;
+
+ bool stopped;
+};
+
+/**
+ * Describes if uring is for communication and holds alls the data needed
+ * for uring communication
+ */
+struct fuse_ring {
+ /* back pointer */
+ struct fuse_conn *fc;
+
+ /* number of ring queues */
+ size_t nr_queues;
+
+ /* maximum payload/arg size */
+ size_t max_payload_sz;
+
+ struct fuse_ring_queue **queues;
+
+ /*
+ * Log ring entry states on stop when entries cannot be released
+ */
+ unsigned int stop_debug_log : 1;
+
+ wait_queue_head_t stop_waitq;
+
+ /* async tear down */
+ struct delayed_work async_teardown_work;
+
+ /* log */
+ unsigned long teardown_time;
+
+ atomic_t queue_refs;
+
+ bool ready;
+};
+
+bool fuse_uring_enabled(void);
+void fuse_uring_destruct(struct fuse_conn *fc);
+void fuse_uring_stop_queues(struct fuse_ring *ring);
+void fuse_uring_abort_end_requests(struct fuse_ring *ring);
+int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_uring_queue_bq_req(struct fuse_req *req);
+bool fuse_uring_remove_pending_req(struct fuse_req *req);
+bool fuse_uring_request_expired(struct fuse_conn *fc);
+
+static inline void fuse_uring_abort(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+
+ if (ring == NULL)
+ return;
+
+ if (atomic_read(&ring->queue_refs) > 0) {
+ fuse_uring_abort_end_requests(ring);
+ fuse_uring_stop_queues(ring);
+ }
+}
+
+static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+
+ if (ring)
+ wait_event(ring->stop_waitq,
+ atomic_read(&ring->queue_refs) == 0);
+}
+
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+ return fc->ring && fc->ring->ready;
+}
+
+#else /* CONFIG_FUSE_IO_URING */
+
+static inline void fuse_uring_destruct(struct fuse_conn *fc)
+{
+}
+
+static inline bool fuse_uring_enabled(void)
+{
+ return false;
+}
+
+static inline void fuse_uring_abort(struct fuse_conn *fc)
+{
+}
+
+static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
+{
+}
+
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+ return false;
+}
+
+static inline bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ return false;
+}
+
+static inline bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ return false;
+}
+
+#endif /* CONFIG_FUSE_IO_URING */
+
+#endif /* _FS_FUSE_DEV_URING_I_H */
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 494ac372ace0..4b6b3d2758ff 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -27,6 +27,67 @@ module_param(allow_sys_admin_access, bool, 0644);
MODULE_PARM_DESC(allow_sys_admin_access,
"Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check");
+struct dentry_bucket {
+ struct rb_root tree;
+ spinlock_t lock;
+};
+
+#define HASH_BITS 5
+#define HASH_SIZE (1 << HASH_BITS)
+static struct dentry_bucket dentry_hash[HASH_SIZE];
+struct delayed_work dentry_tree_work;
+
+/* Minimum invalidation work queue frequency */
+#define FUSE_DENTRY_INVAL_FREQ_MIN 5
+
+unsigned __read_mostly inval_wq;
+static int inval_wq_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned int num;
+ unsigned int old = inval_wq;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtouint(val, 0, &num);
+ if (ret)
+ return ret;
+
+ if ((num < FUSE_DENTRY_INVAL_FREQ_MIN) && (num != 0))
+ return -EINVAL;
+
+ /* This should prevent overflow in secs_to_jiffies() */
+ if (num > USHRT_MAX)
+ return -EINVAL;
+
+ *((unsigned int *)kp->arg) = num;
+
+ if (num && !old)
+ schedule_delayed_work(&dentry_tree_work,
+ secs_to_jiffies(num));
+ else if (!num && old)
+ cancel_delayed_work_sync(&dentry_tree_work);
+
+ return 0;
+}
+static const struct kernel_param_ops inval_wq_ops = {
+ .set = inval_wq_set,
+ .get = param_get_uint,
+};
+module_param_cb(inval_wq, &inval_wq_ops, &inval_wq, 0644);
+__MODULE_PARM_TYPE(inval_wq, "uint");
+MODULE_PARM_DESC(inval_wq,
+ "Dentries invalidation work queue period in secs (>= "
+ __stringify(FUSE_DENTRY_INVAL_FREQ_MIN) ").");
+
+static inline struct dentry_bucket *get_dentry_bucket(struct dentry *dentry)
+{
+ int i = hash_ptr(dentry, HASH_BITS);
+
+ return &dentry_hash[i];
+}
+
static void fuse_advise_use_readdirplus(struct inode *dir)
{
struct fuse_inode *fi = get_fuse_inode(dir);
@@ -34,33 +95,151 @@ static void fuse_advise_use_readdirplus(struct inode *dir)
set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
}
-#if BITS_PER_LONG >= 64
-static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
+struct fuse_dentry {
+ u64 time;
+ union {
+ struct rcu_head rcu;
+ struct rb_node node;
+ };
+ struct dentry *dentry;
+};
+
+static void __fuse_dentry_tree_del_node(struct fuse_dentry *fd,
+ struct dentry_bucket *bucket)
{
- entry->d_fsdata = (void *) time;
+ if (!RB_EMPTY_NODE(&fd->node)) {
+ rb_erase(&fd->node, &bucket->tree);
+ RB_CLEAR_NODE(&fd->node);
+ }
}
-static inline u64 fuse_dentry_time(const struct dentry *entry)
+static void fuse_dentry_tree_del_node(struct dentry *dentry)
{
- return (u64)entry->d_fsdata;
+ struct fuse_dentry *fd = dentry->d_fsdata;
+ struct dentry_bucket *bucket = get_dentry_bucket(dentry);
+
+ spin_lock(&bucket->lock);
+ __fuse_dentry_tree_del_node(fd, bucket);
+ spin_unlock(&bucket->lock);
}
-#else
-union fuse_dentry {
- u64 time;
- struct rcu_head rcu;
-};
+static void fuse_dentry_tree_add_node(struct dentry *dentry)
+{
+ struct fuse_dentry *fd = dentry->d_fsdata;
+ struct dentry_bucket *bucket;
+ struct fuse_dentry *cur;
+ struct rb_node **p, *parent = NULL;
+
+ if (!inval_wq)
+ return;
+
+ bucket = get_dentry_bucket(dentry);
+
+ spin_lock(&bucket->lock);
+
+ __fuse_dentry_tree_del_node(fd, bucket);
+
+ p = &bucket->tree.rb_node;
+ while (*p) {
+ parent = *p;
+ cur = rb_entry(*p, struct fuse_dentry, node);
+ if (fd->time < cur->time)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&fd->node, parent, p);
+ rb_insert_color(&fd->node, &bucket->tree);
+ spin_unlock(&bucket->lock);
+}
+
+/*
+ * work queue which, when enabled, will periodically check for expired dentries
+ * in the dentries tree.
+ */
+static void fuse_dentry_tree_work(struct work_struct *work)
+{
+ LIST_HEAD(dispose);
+ struct fuse_dentry *fd;
+ struct rb_node *node;
+ int i;
+
+ for (i = 0; i < HASH_SIZE; i++) {
+ spin_lock(&dentry_hash[i].lock);
+ node = rb_first(&dentry_hash[i].tree);
+ while (node) {
+ fd = rb_entry(node, struct fuse_dentry, node);
+ if (time_after64(get_jiffies_64(), fd->time)) {
+ rb_erase(&fd->node, &dentry_hash[i].tree);
+ RB_CLEAR_NODE(&fd->node);
+ spin_unlock(&dentry_hash[i].lock);
+ d_dispose_if_unused(fd->dentry, &dispose);
+ cond_resched();
+ spin_lock(&dentry_hash[i].lock);
+ } else
+ break;
+ node = rb_first(&dentry_hash[i].tree);
+ }
+ spin_unlock(&dentry_hash[i].lock);
+ shrink_dentry_list(&dispose);
+ }
+
+ if (inval_wq)
+ schedule_delayed_work(&dentry_tree_work,
+ secs_to_jiffies(inval_wq));
+}
+
+void fuse_epoch_work(struct work_struct *work)
+{
+ struct fuse_conn *fc = container_of(work, struct fuse_conn,
+ epoch_work);
+ struct fuse_mount *fm;
+ struct inode *inode;
+
+ down_read(&fc->killsb);
+
+ inode = fuse_ilookup(fc, FUSE_ROOT_ID, &fm);
+ if (inode) {
+ iput(inode);
+ /* Remove all possible active references to cached inodes */
+ shrink_dcache_sb(fm->sb);
+ } else
+ pr_warn("Failed to get root inode");
+
+ up_read(&fc->killsb);
+}
+
+void fuse_dentry_tree_init(void)
+{
+ int i;
+
+ for (i = 0; i < HASH_SIZE; i++) {
+ spin_lock_init(&dentry_hash[i].lock);
+ dentry_hash[i].tree = RB_ROOT;
+ }
+ INIT_DELAYED_WORK(&dentry_tree_work, fuse_dentry_tree_work);
+}
+
+void fuse_dentry_tree_cleanup(void)
+{
+ int i;
+
+ inval_wq = 0;
+ cancel_delayed_work_sync(&dentry_tree_work);
+
+ for (i = 0; i < HASH_SIZE; i++)
+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&dentry_hash[i].tree));
+}
static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
{
- ((union fuse_dentry *) dentry->d_fsdata)->time = time;
+ ((struct fuse_dentry *) dentry->d_fsdata)->time = time;
}
static inline u64 fuse_dentry_time(const struct dentry *entry)
{
- return ((union fuse_dentry *) entry->d_fsdata)->time;
+ return ((struct fuse_dentry *) entry->d_fsdata)->time;
}
-#endif
static void fuse_dentry_settime(struct dentry *dentry, u64 time)
{
@@ -81,6 +260,7 @@ static void fuse_dentry_settime(struct dentry *dentry, u64 time)
}
__fuse_dentry_settime(dentry, time);
+ fuse_dentry_tree_add_node(dentry);
}
/*
@@ -175,9 +355,12 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
memset(outarg, 0, sizeof(struct fuse_entry_out));
args->opcode = FUSE_LOOKUP;
args->nodeid = nodeid;
- args->in_numargs = 1;
- args->in_args[0].size = name->len + 1;
- args->in_args[0].value = name->name;
+ args->in_numargs = 3;
+ fuse_set_zero_arg0(args);
+ args->in_args[1].size = name->len;
+ args->in_args[1].value = name->name;
+ args->in_args[2].size = 1;
+ args->in_args[2].value = "";
args->out_numargs = 1;
args->out_args[0].size = sizeof(struct fuse_entry_out);
args->out_args[0].value = outarg;
@@ -192,14 +375,19 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
* the lookup once more. If the lookup results in the same inode,
* then refresh the attributes, timeouts and mark the dentry valid.
*/
-static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+static int fuse_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *entry, unsigned int flags)
{
struct inode *inode;
- struct dentry *parent;
struct fuse_mount *fm;
+ struct fuse_conn *fc;
struct fuse_inode *fi;
int ret;
+ fc = get_fuse_conn_super(dir->i_sb);
+ if (entry->d_time < atomic_read(&fc->epoch))
+ goto invalid;
+
inode = d_inode_rcu(entry);
if (inode && fuse_is_bad(inode))
goto invalid;
@@ -227,11 +415,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
attr_version = fuse_get_attr_version(fm->fc);
- parent = dget_parent(entry);
- fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
- &entry->d_name, &outarg);
+ fuse_lookup_init(fm->fc, &args, get_node_id(dir),
+ name, &outarg);
ret = fuse_simple_request(fm, &args);
- dput(parent);
/* Zero nodeid is same as -ENOENT */
if (!ret && !outarg.nodeid)
ret = -ENOENT;
@@ -265,9 +451,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
return -ECHILD;
} else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
- parent = dget_parent(entry);
- fuse_advise_use_readdirplus(d_inode(parent));
- dput(parent);
+ fuse_advise_use_readdirplus(dir);
}
}
ret = 1;
@@ -279,21 +463,36 @@ invalid:
goto out;
}
-#if BITS_PER_LONG < 64
static int fuse_dentry_init(struct dentry *dentry)
{
- dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry),
- GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
+ struct fuse_dentry *fd;
- return dentry->d_fsdata ? 0 : -ENOMEM;
+ fd = kzalloc(sizeof(struct fuse_dentry),
+ GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
+ if (!fd)
+ return -ENOMEM;
+
+ fd->dentry = dentry;
+ RB_CLEAR_NODE(&fd->node);
+ dentry->d_fsdata = fd;
+
+ return 0;
}
+
+static void fuse_dentry_prune(struct dentry *dentry)
+{
+ struct fuse_dentry *fd = dentry->d_fsdata;
+
+ if (!RB_EMPTY_NODE(&fd->node))
+ fuse_dentry_tree_del_node(dentry);
+}
+
static void fuse_dentry_release(struct dentry *dentry)
{
- union fuse_dentry *fd = dentry->d_fsdata;
+ struct fuse_dentry *fd = dentry->d_fsdata;
kfree_rcu(fd, rcu);
}
-#endif
static int fuse_dentry_delete(const struct dentry *dentry)
{
@@ -320,9 +519,6 @@ static struct vfsmount *fuse_dentry_automount(struct path *path)
/* Create the submount */
mnt = fc_mount(fsc);
- if (!IS_ERR(mnt))
- mntget(mnt);
-
put_fs_context(fsc);
return mnt;
}
@@ -330,20 +526,12 @@ static struct vfsmount *fuse_dentry_automount(struct path *path)
const struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
.d_delete = fuse_dentry_delete,
-#if BITS_PER_LONG < 64
.d_init = fuse_dentry_init,
+ .d_prune = fuse_dentry_prune,
.d_release = fuse_dentry_release,
-#endif
.d_automount = fuse_dentry_automount,
};
-const struct dentry_operations fuse_root_dentry_operations = {
-#if BITS_PER_LONG < 64
- .d_init = fuse_dentry_init,
- .d_release = fuse_dentry_release,
-#endif
-};
-
int fuse_valid_type(int m)
{
return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
@@ -371,7 +559,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
*inode = NULL;
err = -ENAMETOOLONG;
- if (name->len > FUSE_NAME_MAX)
+ if (name->len > fm->fc->name_max)
goto out;
@@ -416,16 +604,20 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
unsigned int flags)
{
- int err;
struct fuse_entry_out outarg;
+ struct fuse_conn *fc;
struct inode *inode;
struct dentry *newent;
+ int err, epoch;
bool outarg_valid = true;
bool locked;
if (fuse_is_bad(dir))
return ERR_PTR(-EIO);
+ fc = get_fuse_conn_super(dir->i_sb);
+ epoch = atomic_read(&fc->epoch);
+
locked = fuse_lock_inode(dir);
err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
&outarg, &inode);
@@ -447,6 +639,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
goto out_err;
entry = newent ? newent : entry;
+ entry->d_time = epoch;
if (outarg_valid)
fuse_change_entry_timeout(entry, &outarg);
else
@@ -467,29 +660,29 @@ static int get_security_context(struct dentry *entry, umode_t mode,
{
struct fuse_secctx *fctx;
struct fuse_secctx_header *header;
- void *ctx = NULL, *ptr;
- u32 ctxlen, total_len = sizeof(*header);
+ struct lsm_context lsmctx = { };
+ void *ptr;
+ u32 total_len = sizeof(*header);
int err, nr_ctx = 0;
- const char *name;
- size_t namelen;
+ const char *name = NULL;
+ size_t namesize;
err = security_dentry_init_security(entry, mode, &entry->d_name,
- &name, &ctx, &ctxlen);
- if (err) {
- if (err != -EOPNOTSUPP)
- goto out_err;
- /* No LSM is supporting this security hook. Ignore error */
- ctxlen = 0;
- ctx = NULL;
- }
+ &name, &lsmctx);
+
+ /* If no LSM is supporting this security hook ignore error */
+ if (err && err != -EOPNOTSUPP)
+ goto out_err;
- if (ctxlen) {
+ if (lsmctx.len) {
nr_ctx = 1;
- namelen = strlen(name) + 1;
+ namesize = strlen(name) + 1;
err = -EIO;
- if (WARN_ON(namelen > XATTR_NAME_MAX + 1 || ctxlen > S32_MAX))
+ if (WARN_ON(namesize > XATTR_NAME_MAX + 1 ||
+ lsmctx.len > S32_MAX))
goto out_err;
- total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namelen + ctxlen);
+ total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namesize +
+ lsmctx.len);
}
err = -ENOMEM;
@@ -502,19 +695,20 @@ static int get_security_context(struct dentry *entry, umode_t mode,
ptr += sizeof(*header);
if (nr_ctx) {
fctx = ptr;
- fctx->size = ctxlen;
+ fctx->size = lsmctx.len;
ptr += sizeof(*fctx);
- strcpy(ptr, name);
- ptr += namelen;
+ strscpy(ptr, name, namesize);
+ ptr += namesize;
- memcpy(ptr, ctx, ctxlen);
+ memcpy(ptr, lsmctx.context, lsmctx.len);
}
ext->size = total_len;
ext->value = header;
err = 0;
out_err:
- kfree(ctx);
+ if (nr_ctx)
+ security_release_secctx(&lsmctx);
return err;
}
@@ -619,7 +813,6 @@ static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *entry, struct file *file,
unsigned int flags, umode_t mode, u32 opcode)
{
- int err;
struct inode *inode;
struct fuse_mount *fm = get_fuse_mount(dir);
FUSE_ARGS(args);
@@ -629,11 +822,13 @@ static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
struct fuse_entry_out outentry;
struct fuse_inode *fi;
struct fuse_file *ff;
+ int epoch, err;
bool trunc = flags & O_TRUNC;
/* Userspace expects S_IFREG in create mode */
BUG_ON((mode & S_IFMT) != S_IFREG);
+ epoch = atomic_read(&fm->fc->epoch);
forget = fuse_alloc_forget();
err = -ENOMEM;
if (!forget)
@@ -702,6 +897,7 @@ static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
}
kfree(forget);
d_instantiate(entry, inode);
+ entry->d_time = epoch;
fuse_change_entry_timeout(entry, &outentry);
fuse_dir_changed(dir);
err = generic_file_open(inode, file);
@@ -737,22 +933,18 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
int err;
struct mnt_idmap *idmap = file_mnt_idmap(file);
struct fuse_conn *fc = get_fuse_conn(dir);
- struct dentry *res = NULL;
if (fuse_is_bad(dir))
return -EIO;
if (d_in_lookup(entry)) {
- res = fuse_lookup(dir, entry, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- if (res)
- entry = res;
+ struct dentry *res = fuse_lookup(dir, entry, 0);
+ if (res || d_really_is_positive(entry))
+ return finish_no_open(file, res);
}
- if (!(flags & O_CREAT) || d_really_is_positive(entry))
- goto no_open;
+ if (!(flags & O_CREAT))
+ return finish_no_open(file, NULL);
/* Only creates */
file->f_mode |= FMODE_CREATED;
@@ -766,37 +958,36 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
goto mknod;
} else if (err == -EEXIST)
fuse_invalidate_entry(entry);
-out_dput:
- dput(res);
return err;
mknod:
err = fuse_mknod(idmap, dir, entry, mode, 0);
if (err)
- goto out_dput;
-no_open:
- return finish_no_open(file, res);
+ return err;
+ return finish_no_open(file, NULL);
}
/*
* Code shared between mknod, mkdir, symlink and link
*/
-static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
- struct fuse_args *args, struct inode *dir,
- struct dentry *entry, umode_t mode)
+static struct dentry *create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_entry_out outarg;
struct inode *inode;
struct dentry *d;
- int err;
struct fuse_forget_link *forget;
+ int epoch, err;
if (fuse_is_bad(dir))
- return -EIO;
+ return ERR_PTR(-EIO);
+
+ epoch = atomic_read(&fm->fc->epoch);
forget = fuse_alloc_forget();
if (!forget)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
memset(&outarg, 0, sizeof(outarg));
args->nodeid = get_node_id(dir);
@@ -826,29 +1017,46 @@ static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
&outarg.attr, ATTR_TIMEOUT(&outarg), 0, 0);
if (!inode) {
fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
kfree(forget);
d_drop(entry);
d = d_splice_alias(inode, entry);
if (IS_ERR(d))
- return PTR_ERR(d);
+ return d;
if (d) {
+ d->d_time = epoch;
fuse_change_entry_timeout(d, &outarg);
- dput(d);
} else {
+ entry->d_time = epoch;
fuse_change_entry_timeout(entry, &outarg);
}
fuse_dir_changed(dir);
- return 0;
+ return d;
out_put_forget_req:
if (err == -EEXIST)
fuse_invalidate_entry(entry);
kfree(forget);
- return err;
+ return ERR_PTR(err);
+}
+
+static int create_new_nondir(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
+{
+ /*
+ * Note that when creating anything other than a directory we
+ * can be sure create_new_entry() will NOT return an alternate
+ * dentry as d_splice_alias() only returns an alternate dentry
+ * for directories. So we don't need to check for that case
+ * when passing back the result.
+ */
+ WARN_ON_ONCE(S_ISDIR(mode));
+
+ return PTR_ERR(create_new_entry(idmap, fm, args, dir, entry, mode));
}
static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
@@ -871,7 +1079,7 @@ static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(idmap, fm, &args, dir, entry, mode);
+ return create_new_nondir(idmap, fm, &args, dir, entry, mode);
}
static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
@@ -898,8 +1106,8 @@ static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
return err;
}
-static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *entry, umode_t mode)
+static struct dentry *fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_mkdir_in inarg;
struct fuse_mount *fm = get_fuse_mount(dir);
@@ -928,12 +1136,13 @@ static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
FUSE_ARGS(args);
args.opcode = FUSE_SYMLINK;
- args.in_numargs = 2;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
- args.in_args[1].size = len;
- args.in_args[1].value = link;
- return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK);
+ args.in_numargs = 3;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
+ args.in_args[2].size = len;
+ args.in_args[2].value = link;
+ return create_new_nondir(idmap, fm, &args, dir, entry, S_IFLNK);
}
void fuse_flush_time_update(struct inode *inode)
@@ -992,9 +1201,10 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
args.opcode = FUSE_UNLINK;
args.nodeid = get_node_id(dir);
- args.in_numargs = 1;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
err = fuse_simple_request(fm, &args);
if (!err) {
fuse_dir_changed(dir);
@@ -1015,9 +1225,10 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
args.opcode = FUSE_RMDIR;
args.nodeid = get_node_id(dir);
- args.in_numargs = 1;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
err = fuse_simple_request(fm, &args);
if (!err) {
fuse_dir_changed(dir);
@@ -1120,6 +1331,9 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
struct fuse_mount *fm = get_fuse_mount(inode);
FUSE_ARGS(args);
+ if (fm->fc->no_link)
+ goto out;
+
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
args.opcode = FUSE_LINK;
@@ -1128,12 +1342,18 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
args.in_args[0].value = &inarg;
args.in_args[1].size = newent->d_name.len + 1;
args.in_args[1].value = newent->d_name.name;
- err = create_new_entry(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
+ err = create_new_nondir(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
if (!err)
fuse_update_ctime_in_cache(inode);
else if (err == -EINTR)
fuse_invalidate_attr(inode);
+ if (err == -ENOSYS)
+ fm->fc->no_link = 1;
+out:
+ if (fm->fc->no_link)
+ return -EPERM;
+
return err;
}
@@ -1344,6 +1564,7 @@ retry:
generic_fillattr(idmap, request_mask, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
+ stat->blksize = 1 << fi->cached_i_blkbits;
if (test_bit(FUSE_I_BTIME, &fi->state)) {
stat->btime = fi->i_btime;
stat->result_mask |= STATX_BTIME;
@@ -1370,27 +1591,25 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
if (!parent)
return -ENOENT;
- inode_lock_nested(parent, I_MUTEX_PARENT);
if (!S_ISDIR(parent->i_mode))
- goto unlock;
+ goto put_parent;
err = -ENOENT;
dir = d_find_alias(parent);
if (!dir)
- goto unlock;
+ goto put_parent;
- name->hash = full_name_hash(dir, name->name, name->len);
- entry = d_lookup(dir, name);
+ entry = start_removing_noperm(dir, name);
dput(dir);
- if (!entry)
- goto unlock;
+ if (IS_ERR(entry))
+ goto put_parent;
fuse_dir_changed(parent);
if (!(flags & FUSE_EXPIRE_ONLY))
d_invalidate(entry);
fuse_invalidate_entry_cache(entry);
- if (child_nodeid != 0 && d_really_is_positive(entry)) {
+ if (child_nodeid != 0) {
inode_lock(d_inode(entry));
if (get_node_id(d_inode(entry)) != child_nodeid) {
err = -ENOENT;
@@ -1418,10 +1637,9 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
} else {
err = 0;
}
- dput(entry);
- unlock:
- inode_unlock(parent);
+ end_removing(entry);
+ put_parent:
iput(parent);
return err;
}
@@ -1586,10 +1804,10 @@ static int fuse_permission(struct mnt_idmap *idmap,
return err;
}
-static int fuse_readlink_page(struct inode *inode, struct folio *folio)
+static int fuse_readlink_folio(struct inode *inode, struct folio *folio)
{
struct fuse_mount *fm = get_fuse_mount(inode);
- struct fuse_folio_desc desc = { .length = PAGE_SIZE - 1 };
+ struct fuse_folio_desc desc = { .length = folio_size(folio) - 1 };
struct fuse_args_pages ap = {
.num_folios = 1,
.folios = &folio,
@@ -1633,7 +1851,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
goto out_err;
if (fc->cache_symlinks)
- return page_get_link(dentry, inode, callback);
+ return page_get_link_raw(dentry, inode, callback);
err = -ECHILD;
if (!dentry)
@@ -1644,13 +1862,13 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
if (!folio)
goto out_err;
- err = fuse_readlink_page(inode, folio);
+ err = fuse_readlink_folio(inode, folio);
if (err) {
folio_put(folio);
goto out_err;
}
- set_delayed_call(callback, page_put_link, &folio->page);
+ set_delayed_call(callback, page_put_link, folio);
return folio_address(folio);
@@ -1681,6 +1899,8 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
*/
if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
nonseekable_open(inode, file);
+ if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
}
return err;
@@ -1918,6 +2138,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
int err;
bool trust_local_cmtime = is_wb;
bool fault_blocked = false;
+ u64 attr_version;
if (!fc->default_permissions)
attr->ia_valid |= ATTR_FORCE;
@@ -1935,7 +2156,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (FUSE_IS_DAX(inode) && is_truncate) {
filemap_invalidate_lock(mapping);
fault_blocked = true;
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err) {
filemap_invalidate_unlock(mapping);
return err;
@@ -2002,6 +2223,8 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (fc->handle_killpriv_v2 && !capable(CAP_FSETID))
inarg.valid |= FATTR_KILL_SUIDGID;
}
+
+ attr_version = fuse_get_attr_version(fm->fc);
fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
err = fuse_simple_request(fm, &args);
if (err) {
@@ -2027,6 +2250,14 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
/* FIXME: clear I_DIRTY_SYNC? */
}
+ if (fi->attr_version > attr_version) {
+ /*
+ * Apply attributes, for example for fsnotify_change(), but set
+ * attribute timeout to zero.
+ */
+ outarg.attr_valid = outarg.attr_valid_nsec = 0;
+ }
+
fuse_change_attributes_common(inode, &outarg.attr, NULL,
ATTR_TIMEOUT(&outarg),
fuse_get_cache_mask(inode), 0);
@@ -2190,6 +2421,7 @@ static const struct file_operations fuse_dir_operations = {
.fsync = fuse_dir_fsync,
.unlocked_ioctl = fuse_dir_ioctl,
.compat_ioctl = fuse_dir_compat_ioctl,
+ .setlease = simple_nosetlease,
};
static const struct inode_operations fuse_common_inode_operations = {
@@ -2232,7 +2464,7 @@ void fuse_init_dir(struct inode *inode)
static int fuse_symlink_read_folio(struct file *null, struct folio *folio)
{
- int err = fuse_readlink_page(folio->mapping->host, folio);
+ int err = fuse_readlink_folio(folio->mapping->host, folio);
if (!err)
folio_mark_uptodate(folio);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 88d0946b5bc9..01bc894e9c2b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -21,6 +21,7 @@
#include <linux/filelock.h>
#include <linux/splice.h>
#include <linux/task_io_accounting_ops.h>
+#include <linux/iomap.h>
static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
unsigned int open_flags, int opcode,
@@ -109,7 +110,9 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
fuse_file_io_release(ff, ra->inode);
if (!args) {
- /* Do nothing when server does not implement 'open' */
+ /* Do nothing when server does not implement 'opendir' */
+ } else if (args->opcode == FUSE_RELEASE && ff->fm->fc->no_open) {
+ fuse_release_end(ff->fm, args, 0);
} else if (sync) {
fuse_simple_request(ff->fm, args);
fuse_release_end(ff->fm, args, 0);
@@ -130,8 +133,17 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
struct fuse_file *ff;
int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
bool open = isdir ? !fc->no_opendir : !fc->no_open;
+ bool release = !isdir || open;
- ff = fuse_file_alloc(fm, open);
+ /*
+ * ff->args->release_args still needs to be allocated (so we can hold an
+ * inode reference while there are pending inflight file operations when
+ * ->release() is called, see fuse_prepare_release()) even if
+ * fc->no_open is set else it becomes possible for reclaim to deadlock
+ * if while servicing the readahead request the server triggers reclaim
+ * and reclaim evicts the inode of the file being read ahead.
+ */
+ ff = fuse_file_alloc(fm, release);
if (!ff)
return ERR_PTR(-ENOMEM);
@@ -151,13 +163,14 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
fuse_file_free(ff);
return ERR_PTR(err);
} else {
- /* No release needed */
- kfree(ff->args);
- ff->args = NULL;
- if (isdir)
+ if (isdir) {
+ /* No release needed */
+ kfree(ff->args);
+ ff->args = NULL;
fc->no_opendir = 1;
- else
+ } else {
fc->no_open = 1;
+ }
}
}
@@ -253,7 +266,7 @@ static int fuse_open(struct inode *inode, struct file *file)
if (dax_truncate) {
filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err)
goto out_inode_unlock;
}
@@ -355,8 +368,14 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff,
* Make the release synchronous if this is a fuseblk mount,
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
+ *
+ * Always use the asynchronous file put because the current thread
+ * might be the fuse server. This can happen if a process starts some
+ * aio and closes the fd before the aio completes. Since aio takes its
+ * own ref to the file, the IO completion has to drop the ref, which is
+ * how the fuse server can end up closing its clients' files.
*/
- fuse_file_put(ff, ff->fm->fc->destroy);
+ fuse_file_put(ff, false);
}
void fuse_release_common(struct file *file, bool isdir)
@@ -415,89 +434,11 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
struct fuse_writepage_args {
struct fuse_io_args ia;
- struct rb_node writepages_entry;
struct list_head queue_entry;
- struct fuse_writepage_args *next;
struct inode *inode;
struct fuse_sync_bucket *bucket;
};
-static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
- pgoff_t idx_from, pgoff_t idx_to)
-{
- struct rb_node *n;
-
- n = fi->writepages.rb_node;
-
- while (n) {
- struct fuse_writepage_args *wpa;
- pgoff_t curr_index;
-
- wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
- WARN_ON(get_fuse_inode(wpa->inode) != fi);
- curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
- if (idx_from >= curr_index + wpa->ia.ap.num_folios)
- n = n->rb_right;
- else if (idx_to < curr_index)
- n = n->rb_left;
- else
- return wpa;
- }
- return NULL;
-}
-
-/*
- * Check if any page in a range is under writeback
- */
-static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
- pgoff_t idx_to)
-{
- struct fuse_inode *fi = get_fuse_inode(inode);
- bool found;
-
- if (RB_EMPTY_ROOT(&fi->writepages))
- return false;
-
- spin_lock(&fi->lock);
- found = fuse_find_writeback(fi, idx_from, idx_to);
- spin_unlock(&fi->lock);
-
- return found;
-}
-
-static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
-{
- return fuse_range_is_writeback(inode, index, index);
-}
-
-/*
- * Wait for page writeback to be completed.
- *
- * Since fuse doesn't rely on the VM writeback tracking, this has to
- * use some other means.
- */
-static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
-{
- struct fuse_inode *fi = get_fuse_inode(inode);
-
- wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
-}
-
-static inline bool fuse_folio_is_writeback(struct inode *inode,
- struct folio *folio)
-{
- pgoff_t last = folio_next_index(folio) - 1;
- return fuse_range_is_writeback(inode, folio_index(folio), last);
-}
-
-static void fuse_wait_on_folio_writeback(struct inode *inode,
- struct folio *folio)
-{
- struct fuse_inode *fi = get_fuse_inode(inode);
-
- wait_event(fi->page_waitq, !fuse_folio_is_writeback(inode, folio));
-}
-
/*
* Wait for all pending writepages on the inode to finish.
*
@@ -532,10 +473,6 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (err)
return err;
- inode_lock(inode);
- fuse_sync_writes(inode);
- inode_unlock(inode);
-
err = filemap_check_errors(file->f_mapping);
if (err)
return err;
@@ -870,12 +807,16 @@ static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
}
}
-static int fuse_do_readfolio(struct file *file, struct folio *folio)
+static int fuse_do_readfolio(struct file *file, struct folio *folio,
+ size_t off, size_t len)
{
struct inode *inode = folio->mapping->host;
struct fuse_mount *fm = get_fuse_mount(inode);
- loff_t pos = folio_pos(folio);
- struct fuse_folio_desc desc = { .length = PAGE_SIZE };
+ loff_t pos = folio_pos(folio) + off;
+ struct fuse_folio_desc desc = {
+ .offset = off,
+ .length = len,
+ };
struct fuse_io_args ia = {
.ap.args.page_zeroing = true,
.ap.args.out_pages = true,
@@ -886,13 +827,6 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio)
ssize_t res;
u64 attr_ver;
- /*
- * With the temporary pages that are used to complete writeback, we can
- * have writeback that extends beyond the lifetime of the folio. So
- * make sure we read a properly synced folio.
- */
- fuse_wait_on_folio_writeback(inode, folio);
-
attr_ver = fuse_get_attr_version(fm->fc);
/* Don't overflow end offset */
@@ -909,25 +843,155 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio)
if (res < desc.length)
fuse_short_read(inode, attr_ver, res, &ia.ap);
- folio_mark_uptodate(folio);
+ return 0;
+}
+
+static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ iomap->type = IOMAP_MAPPED;
+ iomap->length = length;
+ iomap->offset = offset;
+ return 0;
+}
+
+static const struct iomap_ops fuse_iomap_ops = {
+ .iomap_begin = fuse_iomap_begin,
+};
+
+struct fuse_fill_read_data {
+ struct file *file;
+
+ /* Fields below are used if sending the read request asynchronously */
+ struct fuse_conn *fc;
+ struct fuse_io_args *ia;
+ unsigned int nr_bytes;
+};
+
+/* forward declarations */
+static bool fuse_folios_need_send(struct fuse_conn *fc, loff_t pos,
+ unsigned len, struct fuse_args_pages *ap,
+ unsigned cur_bytes, bool write);
+static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file,
+ unsigned int count, bool async);
+
+static int fuse_handle_readahead(struct folio *folio,
+ struct readahead_control *rac,
+ struct fuse_fill_read_data *data, loff_t pos,
+ size_t len)
+{
+ struct fuse_io_args *ia = data->ia;
+ size_t off = offset_in_folio(folio, pos);
+ struct fuse_conn *fc = data->fc;
+ struct fuse_args_pages *ap;
+ unsigned int nr_pages;
+
+ if (ia && fuse_folios_need_send(fc, pos, len, &ia->ap, data->nr_bytes,
+ false)) {
+ fuse_send_readpages(ia, data->file, data->nr_bytes,
+ fc->async_read);
+ data->nr_bytes = 0;
+ data->ia = NULL;
+ ia = NULL;
+ }
+ if (!ia) {
+ if (fc->num_background >= fc->congestion_threshold &&
+ rac->ra->async_size >= readahead_count(rac))
+ /*
+ * Congested and only async pages left, so skip the
+ * rest.
+ */
+ return -EAGAIN;
+
+ nr_pages = min(fc->max_pages, readahead_count(rac));
+ data->ia = fuse_io_alloc(NULL, nr_pages);
+ if (!data->ia)
+ return -ENOMEM;
+ ia = data->ia;
+ }
+ folio_get(folio);
+ ap = &ia->ap;
+ ap->folios[ap->num_folios] = folio;
+ ap->descs[ap->num_folios].offset = off;
+ ap->descs[ap->num_folios].length = len;
+ data->nr_bytes += len;
+ ap->num_folios++;
return 0;
}
+static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx,
+ size_t len)
+{
+ struct fuse_fill_read_data *data = ctx->read_ctx;
+ struct folio *folio = ctx->cur_folio;
+ loff_t pos = iter->pos;
+ size_t off = offset_in_folio(folio, pos);
+ struct file *file = data->file;
+ int ret;
+
+ if (ctx->rac) {
+ ret = fuse_handle_readahead(folio, ctx->rac, data, pos, len);
+ } else {
+ /*
+ * for non-readahead read requests, do reads synchronously
+ * since it's not guaranteed that the server can handle
+ * out-of-order reads
+ */
+ ret = fuse_do_readfolio(file, folio, off, len);
+ if (!ret)
+ iomap_finish_folio_read(folio, off, len, ret);
+ }
+ return ret;
+}
+
+static void fuse_iomap_read_submit(struct iomap_read_folio_ctx *ctx)
+{
+ struct fuse_fill_read_data *data = ctx->read_ctx;
+
+ if (data->ia)
+ fuse_send_readpages(data->ia, data->file, data->nr_bytes,
+ data->fc->async_read);
+}
+
+static const struct iomap_read_ops fuse_iomap_read_ops = {
+ .read_folio_range = fuse_iomap_read_folio_range_async,
+ .submit_read = fuse_iomap_read_submit,
+};
+
static int fuse_read_folio(struct file *file, struct folio *folio)
{
struct inode *inode = folio->mapping->host;
- int err;
+ struct fuse_fill_read_data data = {
+ .file = file,
+ };
+ struct iomap_read_folio_ctx ctx = {
+ .cur_folio = folio,
+ .ops = &fuse_iomap_read_ops,
+ .read_ctx = &data,
- err = -EIO;
- if (fuse_is_bad(inode))
- goto out;
+ };
+
+ if (fuse_is_bad(inode)) {
+ folio_unlock(folio);
+ return -EIO;
+ }
- err = fuse_do_readfolio(file, folio);
+ iomap_read_folio(&fuse_iomap_ops, &ctx);
fuse_invalidate_atime(inode);
- out:
- folio_unlock(folio);
- return err;
+ return 0;
+}
+
+static int fuse_iomap_read_folio_range(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos,
+ size_t len)
+{
+ struct file *file = iter->private;
+ size_t off = offset_in_folio(folio, pos);
+
+ return fuse_do_readfolio(file, folio, off, len);
}
static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
@@ -938,39 +1002,39 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
struct fuse_args_pages *ap = &ia->ap;
size_t count = ia->read.in.size;
size_t num_read = args->out_args[0].size;
- struct address_space *mapping = NULL;
+ struct address_space *mapping;
+ struct inode *inode;
- for (i = 0; mapping == NULL && i < ap->num_folios; i++)
- mapping = ap->folios[i]->mapping;
+ WARN_ON_ONCE(!ap->num_folios);
+ mapping = ap->folios[0]->mapping;
+ inode = mapping->host;
- if (mapping) {
- struct inode *inode = mapping->host;
+ /*
+ * Short read means EOF. If file size is larger, truncate it
+ */
+ if (!err && num_read < count)
+ fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
- /*
- * Short read means EOF. If file size is larger, truncate it
- */
- if (!err && num_read < count)
- fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
+ fuse_invalidate_atime(inode);
- fuse_invalidate_atime(inode);
+ for (i = 0; i < ap->num_folios; i++) {
+ iomap_finish_folio_read(ap->folios[i], ap->descs[i].offset,
+ ap->descs[i].length, err);
+ folio_put(ap->folios[i]);
}
-
- for (i = 0; i < ap->num_folios; i++)
- folio_end_read(ap->folios[i], !err);
if (ia->ff)
fuse_file_put(ia->ff, false);
fuse_io_free(ia);
}
-static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
+static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file,
+ unsigned int count, bool async)
{
struct fuse_file *ff = file->private_data;
struct fuse_mount *fm = ff->fm;
struct fuse_args_pages *ap = &ia->ap;
loff_t pos = folio_pos(ap->folios[0]);
- /* Currently, all folios in FUSE are one page */
- size_t count = ap->num_folios << PAGE_SHIFT;
ssize_t res;
int err;
@@ -987,7 +1051,7 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
ia->read.attr_ver = fuse_get_attr_version(fm->fc);
- if (fm->fc->async_read) {
+ if (async) {
ia->ff = fuse_file_get(ff);
ap->args.end = fuse_readpages_end;
err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
@@ -1003,59 +1067,21 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
static void fuse_readahead(struct readahead_control *rac)
{
struct inode *inode = rac->mapping->host;
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
- unsigned int max_pages, nr_pages;
- pgoff_t first = readahead_index(rac);
- pgoff_t last = first + readahead_count(rac) - 1;
+ struct fuse_fill_read_data data = {
+ .file = rac->file,
+ .fc = fc,
+ };
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &fuse_iomap_read_ops,
+ .rac = rac,
+ .read_ctx = &data
+ };
if (fuse_is_bad(inode))
return;
- wait_event(fi->page_waitq, !fuse_range_is_writeback(inode, first, last));
-
- max_pages = min_t(unsigned int, fc->max_pages,
- fc->max_read / PAGE_SIZE);
-
- /*
- * This is only accurate the first time through, since readahead_folio()
- * doesn't update readahead_count() from the previous folio until the
- * next call. Grab nr_pages here so we know how many pages we're going
- * to have to process. This means that we will exit here with
- * readahead_count() == folio_nr_pages(last_folio), but we will have
- * consumed all of the folios, and read_pages() will call
- * readahead_folio() again which will clean up the rac.
- */
- nr_pages = readahead_count(rac);
-
- while (nr_pages) {
- struct fuse_io_args *ia;
- struct fuse_args_pages *ap;
- struct folio *folio;
- unsigned cur_pages = min(max_pages, nr_pages);
-
- if (fc->num_background >= fc->congestion_threshold &&
- rac->ra->async_size >= readahead_count(rac))
- /*
- * Congested and only async pages left, so skip the
- * rest.
- */
- break;
-
- ia = fuse_io_alloc(NULL, cur_pages);
- if (!ia)
- return;
- ap = &ia->ap;
-
- while (ap->num_folios < cur_pages) {
- folio = readahead_folio(rac);
- ap->folios[ap->num_folios] = folio;
- ap->descs[ap->num_folios].length = folio_size(folio);
- ap->num_folios++;
- }
- fuse_send_readpages(ia, rac->file);
- nr_pages -= cur_pages;
- }
+ iomap_readahead(&fuse_iomap_ops, &ctx);
}
static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -1172,7 +1198,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
int err;
for (i = 0; i < ap->num_folios; i++)
- fuse_wait_on_folio_writeback(inode, ap->folios[i]);
+ folio_wait_writeback(ap->folios[i]);
fuse_write_args_fill(ia, ff, pos, count);
ia->write.in.flags = fuse_write_flags(iocb);
@@ -1212,32 +1238,27 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
struct address_space *mapping,
struct iov_iter *ii, loff_t pos,
- unsigned int max_pages)
+ unsigned int max_folios)
{
struct fuse_args_pages *ap = &ia->ap;
struct fuse_conn *fc = get_fuse_conn(mapping->host);
unsigned offset = pos & (PAGE_SIZE - 1);
- unsigned int nr_pages = 0;
size_t count = 0;
- int err;
+ unsigned int num;
+ int err = 0;
+
+ num = min(iov_iter_count(ii), fc->max_write);
ap->args.in_pages = true;
- ap->descs[0].offset = offset;
- do {
+ while (num && ap->num_folios < max_folios) {
size_t tmp;
struct folio *folio;
pgoff_t index = pos >> PAGE_SHIFT;
- size_t bytes = min_t(size_t, PAGE_SIZE - offset,
- iov_iter_count(ii));
-
- bytes = min_t(size_t, bytes, fc->max_write - count);
+ unsigned int bytes;
+ unsigned int folio_offset;
again:
- err = -EFAULT;
- if (fault_in_iov_iter_readable(ii, bytes))
- break;
-
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
mapping_gfp_mask(mapping));
if (IS_ERR(folio)) {
@@ -1248,29 +1269,42 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
if (mapping_writably_mapped(mapping))
flush_dcache_folio(folio);
- tmp = copy_folio_from_iter_atomic(folio, offset, bytes, ii);
+ folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
+ bytes = min(folio_size(folio) - folio_offset, num);
+
+ tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii);
flush_dcache_folio(folio);
if (!tmp) {
folio_unlock(folio);
folio_put(folio);
+
+ /*
+ * Ensure forward progress by faulting in
+ * while not holding the folio lock:
+ */
+ if (fault_in_iov_iter_readable(ii, bytes)) {
+ err = -EFAULT;
+ break;
+ }
+
goto again;
}
- err = 0;
ap->folios[ap->num_folios] = folio;
+ ap->descs[ap->num_folios].offset = folio_offset;
ap->descs[ap->num_folios].length = tmp;
ap->num_folios++;
- nr_pages++;
count += tmp;
pos += tmp;
+ num -= tmp;
offset += tmp;
- if (offset == PAGE_SIZE)
+ if (offset == folio_size(folio))
offset = 0;
- /* If we copied full page, mark it uptodate */
- if (tmp == PAGE_SIZE)
+ /* If we copied full folio, mark it uptodate */
+ if (tmp == folio_size(folio))
folio_mark_uptodate(folio);
if (folio_test_uptodate(folio)) {
@@ -1279,10 +1313,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
ia->write.folio_locked = true;
break;
}
- if (!fc->big_writes)
+ if (!fc->big_writes || offset != 0)
break;
- } while (iov_iter_count(ii) && count < fc->max_write &&
- nr_pages < max_pages && offset == 0);
+ }
return count > 0 ? count : err;
}
@@ -1431,6 +1464,10 @@ static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
}
}
+static const struct iomap_write_ops fuse_iomap_write_ops = {
+ .read_folio_range = fuse_iomap_read_folio_range,
+};
+
static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
@@ -1440,6 +1477,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = mapping->host;
ssize_t err, count;
struct fuse_conn *fc = get_fuse_conn(inode);
+ bool writeback = false;
if (fc->writeback_cache) {
/* Update size (EOF optimization) and mode (SUID clearing) */
@@ -1448,16 +1486,11 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (err)
return err;
- if (fc->handle_killpriv_v2 &&
- setattr_should_drop_suidgid(idmap,
- file_inode(file))) {
- goto writethrough;
- }
-
- return generic_file_write_iter(iocb, from);
+ if (!fc->handle_killpriv_v2 ||
+ !setattr_should_drop_suidgid(idmap, file_inode(file)))
+ writeback = true;
}
-writethrough:
inode_lock(inode);
err = count = generic_write_checks(iocb, from);
@@ -1476,6 +1509,15 @@ writethrough:
goto out;
written = direct_write_fallback(iocb, from, written,
fuse_perform_write(iocb, from));
+ } else if (writeback) {
+ /*
+ * Use iomap so that we can do granular uptodate reads
+ * and granular dirty tracking for large folios.
+ */
+ written = iomap_file_buffered_write(iocb, from,
+ &fuse_iomap_ops,
+ &fuse_iomap_write_ops,
+ file);
} else {
written = fuse_perform_write(iocb, from);
}
@@ -1541,8 +1583,10 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
*/
struct page **pages = kzalloc(max_pages * sizeof(struct page *),
GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ if (!pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
while (nbytes < *nbytesp && nr_pages < max_pages) {
unsigned nfolios, i;
@@ -1557,18 +1601,22 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
nbytes += ret;
- ret += start;
- /* Currently, all folios in FUSE are one page */
- nfolios = DIV_ROUND_UP(ret, PAGE_SIZE);
+ nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE);
+
+ for (i = 0; i < nfolios; i++) {
+ struct folio *folio = page_folio(pages[i]);
+ unsigned int offset = start +
+ (folio_page_idx(folio, pages[i]) << PAGE_SHIFT);
+ unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start);
- ap->descs[ap->num_folios].offset = start;
- fuse_folio_descs_length_init(ap->descs, ap->num_folios, nfolios);
- for (i = 0; i < nfolios; i++)
- ap->folios[i + ap->num_folios] = page_folio(pages[i]);
+ ap->descs[ap->num_folios].offset = offset;
+ ap->descs[ap->num_folios].length = len;
+ ap->folios[ap->num_folios] = folio;
+ start = 0;
+ ret -= len;
+ ap->num_folios++;
+ }
- ap->num_folios += nfolios;
- ap->descs[ap->num_folios - 1].length -=
- (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
nr_pages += nfolios;
}
kfree(pages);
@@ -1584,6 +1632,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
else
ap->args.out_pages = true;
+out:
*nbytesp = nbytes;
return ret < 0 ? ret : 0;
@@ -1615,14 +1664,14 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
if (!ia)
return -ENOMEM;
- if (fopen_direct_io && fc->direct_io_allow_mmap) {
+ if (fopen_direct_io) {
res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
if (res) {
fuse_io_free(ia);
return res;
}
}
- if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
+ if (!cuse && filemap_range_has_writeback(mapping, pos, (pos + count - 1))) {
if (!write)
inode_lock(inode);
fuse_sync_writes(inode);
@@ -1689,6 +1738,15 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
if (res > 0)
*ppos = pos;
+ if (res > 0 && write && fopen_direct_io) {
+ /*
+ * As in generic_file_direct_write(), invalidate after the
+ * write, to invalidate read-ahead cache that may have competed
+ * with the write.
+ */
+ invalidate_inode_pages2_range(mapping, idx_from, idx_to);
+ }
+
return res > 0 ? res : err;
}
EXPORT_SYMBOL_GPL(fuse_direct_io);
@@ -1819,29 +1877,16 @@ static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out,
static void fuse_writepage_free(struct fuse_writepage_args *wpa)
{
struct fuse_args_pages *ap = &wpa->ia.ap;
- int i;
if (wpa->bucket)
fuse_sync_bucket_dec(wpa->bucket);
- for (i = 0; i < ap->num_folios; i++)
- folio_put(ap->folios[i]);
-
fuse_file_put(wpa->ia.ff, false);
kfree(ap->folios);
kfree(wpa);
}
-static void fuse_writepage_finish_stat(struct inode *inode, struct folio *folio)
-{
- struct backing_dev_info *bdi = inode_to_bdi(inode);
-
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- node_stat_sub_folio(folio, NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
-}
-
static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
{
struct fuse_args_pages *ap = &wpa->ia.ap;
@@ -1850,7 +1895,13 @@ static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
int i;
for (i = 0; i < ap->num_folios; i++)
- fuse_writepage_finish_stat(inode, ap->folios[i]);
+ /*
+ * Benchmarks showed that ending writeback within the
+ * scope of the fi->lock alleviates xarray lock
+ * contention and noticeably improves performance.
+ */
+ iomap_finish_folio_write(inode, ap->folios[i],
+ ap->descs[i].length);
wake_up(&fi->page_waitq);
}
@@ -1861,13 +1912,15 @@ static void fuse_send_writepage(struct fuse_mount *fm,
__releases(fi->lock)
__acquires(fi->lock)
{
- struct fuse_writepage_args *aux, *next;
struct fuse_inode *fi = get_fuse_inode(wpa->inode);
+ struct fuse_args_pages *ap = &wpa->ia.ap;
struct fuse_write_in *inarg = &wpa->ia.write.in;
- struct fuse_args *args = &wpa->ia.ap.args;
- /* Currently, all folios in FUSE are one page */
- __u64 data_size = wpa->ia.ap.num_folios * PAGE_SIZE;
- int err;
+ struct fuse_args *args = &ap->args;
+ __u64 data_size = 0;
+ int err, i;
+
+ for (i = 0; i < ap->num_folios; i++)
+ data_size += ap->descs[i].length;
fi->writectr++;
if (inarg->offset + data_size <= size) {
@@ -1898,19 +1951,8 @@ __acquires(fi->lock)
out_free:
fi->writectr--;
- rb_erase(&wpa->writepages_entry, &fi->writepages);
fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
-
- /* After rb_erase() aux request list is private */
- for (aux = wpa->next; aux; aux = next) {
- next = aux->next;
- aux->next = NULL;
- fuse_writepage_finish_stat(aux->inode,
- aux->ia.ap.folios[0]);
- fuse_writepage_free(aux);
- }
-
fuse_writepage_free(wpa);
spin_lock(&fi->lock);
}
@@ -1938,43 +1980,6 @@ __acquires(fi->lock)
}
}
-static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
- struct fuse_writepage_args *wpa)
-{
- pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
- pgoff_t idx_to = idx_from + wpa->ia.ap.num_folios - 1;
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
-
- WARN_ON(!wpa->ia.ap.num_folios);
- while (*p) {
- struct fuse_writepage_args *curr;
- pgoff_t curr_index;
-
- parent = *p;
- curr = rb_entry(parent, struct fuse_writepage_args,
- writepages_entry);
- WARN_ON(curr->inode != wpa->inode);
- curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
-
- if (idx_from >= curr_index + curr->ia.ap.num_folios)
- p = &(*p)->rb_right;
- else if (idx_to < curr_index)
- p = &(*p)->rb_left;
- else
- return curr;
- }
-
- rb_link_node(&wpa->writepages_entry, parent, p);
- rb_insert_color(&wpa->writepages_entry, root);
- return NULL;
-}
-
-static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
-{
- WARN_ON(fuse_insert_writeback(root, wpa));
-}
-
static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
int error)
{
@@ -1994,41 +1999,6 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
if (!fc->writeback_cache)
fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
spin_lock(&fi->lock);
- rb_erase(&wpa->writepages_entry, &fi->writepages);
- while (wpa->next) {
- struct fuse_mount *fm = get_fuse_mount(inode);
- struct fuse_write_in *inarg = &wpa->ia.write.in;
- struct fuse_writepage_args *next = wpa->next;
-
- wpa->next = next->next;
- next->next = NULL;
- tree_insert(&fi->writepages, next);
-
- /*
- * Skip fuse_flush_writepages() to make it easy to crop requests
- * based on primary request size.
- *
- * 1st case (trivial): there are no concurrent activities using
- * fuse_set/release_nowrite. Then we're on safe side because
- * fuse_flush_writepages() would call fuse_send_writepage()
- * anyway.
- *
- * 2nd case: someone called fuse_set_nowrite and it is waiting
- * now for completion of all in-flight requests. This happens
- * rarely and no more than once per page, so this should be
- * okay.
- *
- * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
- * of fuse_set_nowrite..fuse_release_nowrite section. The fact
- * that fuse_set_nowrite returned implies that all in-flight
- * requests were completed along with all of their secondary
- * requests. Further primary requests are blocked by negative
- * writectr. Hence there cannot be any in-flight requests and
- * no invocations of fuse_writepage_end() while we're in
- * fuse_set_nowrite..fuse_release_nowrite section.
- */
- fuse_send_writepage(fm, next, inarg->offset + inarg->size);
- }
fi->writectr--;
fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
@@ -2062,17 +2032,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
struct fuse_file *ff;
int err;
- /*
- * Inode is always written before the last reference is dropped and
- * hence this should not be reached from reclaim.
- *
- * Writing back the inode from reclaim can deadlock if the request
- * processing itself needs an allocation. Allocations triggering
- * reclaim while serving a request can't be prevented, because it can
- * involve any number of unrelated userspace processes.
- */
- WARN_ON(wbc->for_reclaim);
-
ff = __fuse_write_file_get(fi);
err = fuse_flush_times(inode, ff);
if (ff)
@@ -2115,22 +2074,17 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
}
static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio,
- struct folio *tmp_folio, uint32_t folio_index)
+ uint32_t folio_index, loff_t offset, unsigned len)
{
- struct inode *inode = folio->mapping->host;
struct fuse_args_pages *ap = &wpa->ia.ap;
- folio_copy(tmp_folio, folio);
-
- ap->folios[folio_index] = tmp_folio;
- ap->descs[folio_index].offset = 0;
- ap->descs[folio_index].length = PAGE_SIZE;
-
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP);
+ ap->folios[folio_index] = folio;
+ ap->descs[folio_index].offset = offset;
+ ap->descs[folio_index].length = len;
}
static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio,
+ size_t offset,
struct fuse_file *ff)
{
struct inode *inode = folio->mapping->host;
@@ -2143,7 +2097,7 @@ static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio
return NULL;
fuse_writepage_add_to_bucket(fc, wpa);
- fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio), 0);
+ fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio) + offset, 0);
wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
wpa->inode = inode;
wpa->ia.ff = ff;
@@ -2155,74 +2109,28 @@ static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio
return wpa;
}
-static int fuse_writepage_locked(struct folio *folio)
-{
- struct address_space *mapping = folio->mapping;
- struct inode *inode = mapping->host;
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_writepage_args *wpa;
- struct fuse_args_pages *ap;
- struct folio *tmp_folio;
- struct fuse_file *ff;
- int error = -ENOMEM;
-
- tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
- if (!tmp_folio)
- goto err;
-
- error = -EIO;
- ff = fuse_write_file_get(fi);
- if (!ff)
- goto err_nofile;
-
- wpa = fuse_writepage_args_setup(folio, ff);
- error = -ENOMEM;
- if (!wpa)
- goto err_writepage_args;
-
- ap = &wpa->ia.ap;
- ap->num_folios = 1;
-
- folio_start_writeback(folio);
- fuse_writepage_args_page_fill(wpa, folio, tmp_folio, 0);
-
- spin_lock(&fi->lock);
- tree_insert(&fi->writepages, wpa);
- list_add_tail(&wpa->queue_entry, &fi->queued_writes);
- fuse_flush_writepages(inode);
- spin_unlock(&fi->lock);
-
- folio_end_writeback(folio);
-
- return 0;
-
-err_writepage_args:
- fuse_file_put(ff, false);
-err_nofile:
- folio_put(tmp_folio);
-err:
- mapping_set_error(folio->mapping, error);
- return error;
-}
-
struct fuse_fill_wb_data {
struct fuse_writepage_args *wpa;
struct fuse_file *ff;
- struct inode *inode;
- struct folio **orig_folios;
unsigned int max_folios;
+ /*
+ * nr_bytes won't overflow since fuse_folios_need_send() caps
+ * wb requests to never exceed fc->max_pages (which has an upper bound
+ * of U16_MAX).
+ */
+ unsigned int nr_bytes;
};
-static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
+static bool fuse_pages_realloc(struct fuse_fill_wb_data *data,
+ unsigned int max_pages)
{
struct fuse_args_pages *ap = &data->wpa->ia.ap;
- struct fuse_conn *fc = get_fuse_conn(data->inode);
struct folio **folios;
struct fuse_folio_desc *descs;
unsigned int nfolios = min_t(unsigned int,
max_t(unsigned int, data->max_folios * 2,
FUSE_DEFAULT_MAX_PAGES_PER_REQ),
- fc->max_pages);
+ max_pages);
WARN_ON(nfolios <= data->max_folios);
folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs);
@@ -2239,319 +2147,170 @@ static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
return true;
}
-static void fuse_writepages_send(struct fuse_fill_wb_data *data)
+static void fuse_writepages_send(struct inode *inode,
+ struct fuse_fill_wb_data *data)
{
struct fuse_writepage_args *wpa = data->wpa;
- struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- int num_folios = wpa->ia.ap.num_folios;
- int i;
spin_lock(&fi->lock);
list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fi->lock);
-
- for (i = 0; i < num_folios; i++)
- folio_end_writeback(data->orig_folios[i]);
}
-/*
- * Check under fi->lock if the page is under writeback, and insert it onto the
- * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
- * one already added for a page at this offset. If there's none, then insert
- * this new request onto the auxiliary list, otherwise reuse the existing one by
- * swapping the new temp page with the old one.
- */
-static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
- struct folio *folio)
+static bool fuse_folios_need_send(struct fuse_conn *fc, loff_t pos,
+ unsigned len, struct fuse_args_pages *ap,
+ unsigned cur_bytes, bool write)
{
- struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
- struct fuse_writepage_args *tmp;
- struct fuse_writepage_args *old_wpa;
- struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
-
- WARN_ON(new_ap->num_folios != 0);
- new_ap->num_folios = 1;
-
- spin_lock(&fi->lock);
- old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
- if (!old_wpa) {
- spin_unlock(&fi->lock);
- return true;
- }
-
- for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
- pgoff_t curr_index;
-
- WARN_ON(tmp->inode != new_wpa->inode);
- curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
- if (curr_index == folio->index) {
- WARN_ON(tmp->ia.ap.num_folios != 1);
- swap(tmp->ia.ap.folios[0], new_ap->folios[0]);
- break;
- }
- }
-
- if (!tmp) {
- new_wpa->next = old_wpa->next;
- old_wpa->next = new_wpa;
- }
+ struct folio *prev_folio;
+ struct fuse_folio_desc prev_desc;
+ unsigned bytes = cur_bytes + len;
+ loff_t prev_pos;
+ size_t max_bytes = write ? fc->max_write : fc->max_read;
- spin_unlock(&fi->lock);
-
- if (tmp) {
- fuse_writepage_finish_stat(new_wpa->inode,
- folio);
- fuse_writepage_free(new_wpa);
- }
-
- return false;
-}
-
-static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio,
- struct fuse_args_pages *ap,
- struct fuse_fill_wb_data *data)
-{
WARN_ON(!ap->num_folios);
- /*
- * Being under writeback is unlikely but possible. For example direct
- * read to an mmaped fuse file will set the page dirty twice; once when
- * the pages are faulted with get_user_pages(), and then after the read
- * completed.
- */
- if (fuse_folio_is_writeback(data->inode, folio))
- return true;
-
/* Reached max pages */
- if (ap->num_folios == fc->max_pages)
+ if ((bytes + PAGE_SIZE - 1) >> PAGE_SHIFT > fc->max_pages)
return true;
- /* Reached max write bytes */
- if ((ap->num_folios + 1) * PAGE_SIZE > fc->max_write)
+ if (bytes > max_bytes)
return true;
/* Discontinuity */
- if (data->orig_folios[ap->num_folios - 1]->index + 1 != folio_index(folio))
- return true;
-
- /* Need to grow the pages array? If so, did the expansion fail? */
- if (ap->num_folios == data->max_folios && !fuse_pages_realloc(data))
+ prev_folio = ap->folios[ap->num_folios - 1];
+ prev_desc = ap->descs[ap->num_folios - 1];
+ prev_pos = folio_pos(prev_folio) + prev_desc.offset + prev_desc.length;
+ if (prev_pos != pos)
return true;
return false;
}
-static int fuse_writepages_fill(struct folio *folio,
- struct writeback_control *wbc, void *_data)
+static ssize_t fuse_iomap_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 pos,
+ unsigned len, u64 end_pos)
{
- struct fuse_fill_wb_data *data = _data;
+ struct fuse_fill_wb_data *data = wpc->wb_ctx;
struct fuse_writepage_args *wpa = data->wpa;
struct fuse_args_pages *ap = &wpa->ia.ap;
- struct inode *inode = data->inode;
+ struct inode *inode = wpc->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct folio *tmp_folio;
- int err;
+ loff_t offset = offset_in_folio(folio, pos);
+
+ WARN_ON_ONCE(!data);
if (!data->ff) {
- err = -EIO;
data->ff = fuse_write_file_get(fi);
if (!data->ff)
- goto out_unlock;
+ return -EIO;
}
- if (wpa && fuse_writepage_need_send(fc, folio, ap, data)) {
- fuse_writepages_send(data);
- data->wpa = NULL;
- }
+ if (wpa) {
+ bool send = fuse_folios_need_send(fc, pos, len, ap,
+ data->nr_bytes, true);
- err = -ENOMEM;
- tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
- if (!tmp_folio)
- goto out_unlock;
+ if (!send) {
+ /*
+ * Need to grow the pages array? If so, did the
+ * expansion fail?
+ */
+ send = (ap->num_folios == data->max_folios) &&
+ !fuse_pages_realloc(data, fc->max_pages);
+ }
- /*
- * The page must not be redirtied until the writeout is completed
- * (i.e. userspace has sent a reply to the write request). Otherwise
- * there could be more than one temporary page instance for each real
- * page.
- *
- * This is ensured by holding the page lock in page_mkwrite() while
- * checking fuse_page_is_writeback(). We already hold the page lock
- * since clear_page_dirty_for_io() and keep it held until we add the
- * request to the fi->writepages list and increment ap->num_folios.
- * After this fuse_page_is_writeback() will indicate that the page is
- * under writeback, so we can release the page lock.
- */
- if (data->wpa == NULL) {
- err = -ENOMEM;
- wpa = fuse_writepage_args_setup(folio, data->ff);
- if (!wpa) {
- folio_put(tmp_folio);
- goto out_unlock;
+ if (send) {
+ fuse_writepages_send(inode, data);
+ data->wpa = NULL;
+ data->nr_bytes = 0;
}
+ }
+
+ if (data->wpa == NULL) {
+ wpa = fuse_writepage_args_setup(folio, offset, data->ff);
+ if (!wpa)
+ return -ENOMEM;
fuse_file_get(wpa->ia.ff);
data->max_folios = 1;
ap = &wpa->ia.ap;
}
- folio_start_writeback(folio);
- fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_folios);
- data->orig_folios[ap->num_folios] = folio;
+ fuse_writepage_args_page_fill(wpa, folio, ap->num_folios,
+ offset, len);
+ data->nr_bytes += len;
- err = 0;
- if (data->wpa) {
- /*
- * Protected by fi->lock against concurrent access by
- * fuse_page_is_writeback().
- */
- spin_lock(&fi->lock);
- ap->num_folios++;
- spin_unlock(&fi->lock);
- } else if (fuse_writepage_add(wpa, folio)) {
+ ap->num_folios++;
+ if (!data->wpa)
data->wpa = wpa;
- } else {
- folio_end_writeback(folio);
+
+ return len;
+}
+
+static int fuse_iomap_writeback_submit(struct iomap_writepage_ctx *wpc,
+ int error)
+{
+ struct fuse_fill_wb_data *data = wpc->wb_ctx;
+
+ WARN_ON_ONCE(!data);
+
+ if (data->wpa) {
+ WARN_ON(!data->wpa->ia.ap.num_folios);
+ fuse_writepages_send(wpc->inode, data);
}
-out_unlock:
- folio_unlock(folio);
- return err;
+ if (data->ff)
+ fuse_file_put(data->ff, false);
+
+ return error;
}
+static const struct iomap_writeback_ops fuse_writeback_ops = {
+ .writeback_range = fuse_iomap_writeback_range,
+ .writeback_submit = fuse_iomap_writeback_submit,
+};
+
static int fuse_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_fill_wb_data data;
- int err;
+ struct fuse_fill_wb_data data = {};
+ struct iomap_writepage_ctx wpc = {
+ .inode = inode,
+ .iomap.type = IOMAP_MAPPED,
+ .wbc = wbc,
+ .ops = &fuse_writeback_ops,
+ .wb_ctx = &data,
+ };
- err = -EIO;
if (fuse_is_bad(inode))
- goto out;
+ return -EIO;
if (wbc->sync_mode == WB_SYNC_NONE &&
fc->num_background >= fc->congestion_threshold)
return 0;
- data.inode = inode;
- data.wpa = NULL;
- data.ff = NULL;
-
- err = -ENOMEM;
- data.orig_folios = kcalloc(fc->max_pages,
- sizeof(struct folio *),
- GFP_NOFS);
- if (!data.orig_folios)
- goto out;
-
- err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
- if (data.wpa) {
- WARN_ON(!data.wpa->ia.ap.num_folios);
- fuse_writepages_send(&data);
- }
- if (data.ff)
- fuse_file_put(data.ff, false);
-
- kfree(data.orig_folios);
-out:
- return err;
-}
-
-/*
- * It's worthy to make sure that space is reserved on disk for the write,
- * but how to implement it without killing performance need more thinking.
- */
-static int fuse_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
-{
- pgoff_t index = pos >> PAGE_SHIFT;
- struct fuse_conn *fc = get_fuse_conn(file_inode(file));
- struct folio *folio;
- loff_t fsize;
- int err = -ENOMEM;
-
- WARN_ON(!fc->writeback_cache);
-
- folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
- mapping_gfp_mask(mapping));
- if (IS_ERR(folio))
- goto error;
-
- fuse_wait_on_page_writeback(mapping->host, folio->index);
-
- if (folio_test_uptodate(folio) || len >= folio_size(folio))
- goto success;
- /*
- * Check if the start of this folio comes after the end of file,
- * in which case the readpage can be optimized away.
- */
- fsize = i_size_read(mapping->host);
- if (fsize <= folio_pos(folio)) {
- size_t off = offset_in_folio(folio, pos);
- if (off)
- folio_zero_segment(folio, 0, off);
- goto success;
- }
- err = fuse_do_readfolio(file, folio);
- if (err)
- goto cleanup;
-success:
- *foliop = folio;
- return 0;
-
-cleanup:
- folio_unlock(folio);
- folio_put(folio);
-error:
- return err;
-}
-
-static int fuse_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
-{
- struct inode *inode = folio->mapping->host;
-
- /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
- if (!copied)
- goto unlock;
-
- pos += copied;
- if (!folio_test_uptodate(folio)) {
- /* Zero any unwritten bytes at the end of the page */
- size_t endoff = pos & ~PAGE_MASK;
- if (endoff)
- folio_zero_segment(folio, endoff, PAGE_SIZE);
- folio_mark_uptodate(folio);
- }
-
- if (pos > inode->i_size)
- i_size_write(inode, pos);
-
- folio_mark_dirty(folio);
-
-unlock:
- folio_unlock(folio);
- folio_put(folio);
-
- return copied;
+ return iomap_writepages(&wpc);
}
static int fuse_launder_folio(struct folio *folio)
{
int err = 0;
- if (folio_clear_dirty_for_io(folio)) {
- struct inode *inode = folio->mapping->host;
+ struct fuse_fill_wb_data data = {};
+ struct iomap_writepage_ctx wpc = {
+ .inode = folio->mapping->host,
+ .iomap.type = IOMAP_MAPPED,
+ .ops = &fuse_writeback_ops,
+ .wb_ctx = &data,
+ };
- /* Serialize with pending writeback for the same page */
- fuse_wait_on_page_writeback(inode, folio->index);
- err = fuse_writepage_locked(folio);
+ if (folio_clear_dirty_for_io(folio)) {
+ err = iomap_writeback_folio(&wpc, folio);
+ err = fuse_iomap_writeback_submit(&wpc, err);
if (!err)
- fuse_wait_on_page_writeback(inode, folio->index);
+ folio_wait_writeback(folio);
}
return err;
}
@@ -2595,7 +2354,7 @@ static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
- fuse_wait_on_folio_writeback(inode, folio);
+ folio_wait_writeback(folio);
return VM_FAULT_LOCKED;
}
@@ -3189,7 +2948,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
inode_lock(inode);
if (block_faults) {
filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err)
goto out;
}
@@ -3276,6 +3035,8 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
.flags = flags
};
struct fuse_write_out outarg;
+ struct fuse_copy_file_range_out outarg_64;
+ u64 bytes_copied;
ssize_t err;
/* mark unstable when write-back is not used, and file_out gets
* extended */
@@ -3325,30 +3086,51 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (is_unstable)
set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
- args.opcode = FUSE_COPY_FILE_RANGE;
+ args.opcode = FUSE_COPY_FILE_RANGE_64;
args.nodeid = ff_in->nodeid;
args.in_numargs = 1;
args.in_args[0].size = sizeof(inarg);
args.in_args[0].value = &inarg;
args.out_numargs = 1;
- args.out_args[0].size = sizeof(outarg);
- args.out_args[0].value = &outarg;
+ args.out_args[0].size = sizeof(outarg_64);
+ args.out_args[0].value = &outarg_64;
+ if (fc->no_copy_file_range_64) {
+fallback:
+ /* Fall back to old op that can't handle large copy length */
+ args.opcode = FUSE_COPY_FILE_RANGE;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
+ inarg.len = len = min_t(size_t, len, UINT_MAX & PAGE_MASK);
+ }
err = fuse_simple_request(fm, &args);
if (err == -ENOSYS) {
- fc->no_copy_file_range = 1;
- err = -EOPNOTSUPP;
+ if (fc->no_copy_file_range_64) {
+ fc->no_copy_file_range = 1;
+ err = -EOPNOTSUPP;
+ } else {
+ fc->no_copy_file_range_64 = 1;
+ goto fallback;
+ }
}
if (err)
goto out;
+ bytes_copied = fc->no_copy_file_range_64 ?
+ outarg.size : outarg_64.bytes_copied;
+
+ if (bytes_copied > len) {
+ err = -EIO;
+ goto out;
+ }
+
truncate_inode_pages_range(inode_out->i_mapping,
ALIGN_DOWN(pos_out, PAGE_SIZE),
- ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
+ ALIGN(pos_out + bytes_copied, PAGE_SIZE) - 1);
file_update_time(file_out);
- fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
+ fuse_write_update_attr(inode_out, pos_out + bytes_copied, bytes_copied);
- err = outarg.size;
+ err = bytes_copied;
out:
if (is_unstable)
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
@@ -3402,20 +3184,24 @@ static const struct address_space_operations fuse_file_aops = {
.readahead = fuse_readahead,
.writepages = fuse_writepages,
.launder_folio = fuse_launder_folio,
- .dirty_folio = filemap_dirty_folio,
+ .dirty_folio = iomap_dirty_folio,
+ .release_folio = iomap_release_folio,
+ .invalidate_folio = iomap_invalidate_folio,
+ .is_partially_uptodate = iomap_is_partially_uptodate,
.migrate_folio = filemap_migrate_folio,
.bmap = fuse_bmap,
.direct_IO = fuse_direct_IO,
- .write_begin = fuse_write_begin,
- .write_end = fuse_write_end,
};
void fuse_init_file_inode(struct inode *inode, unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_conn *fc = get_fuse_conn(inode);
inode->i_fop = &fuse_file_operations;
inode->i_data.a_ops = &fuse_file_aops;
+ if (fc->writeback_cache)
+ mapping_set_writeback_may_deadlock_on_reclaim(&inode->i_data);
INIT_LIST_HEAD(&fi->write_files);
INIT_LIST_HEAD(&fi->queued_writes);
@@ -3423,7 +3209,6 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags)
fi->iocachectr = 0;
init_waitqueue_head(&fi->page_waitq);
init_waitqueue_head(&fi->direct_io_waitq);
- fi->writepages = RB_ROOT;
if (IS_ENABLED(CONFIG_FUSE_DAX))
fuse_dax_inode_init(inode, flags);
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
new file mode 100644
index 000000000000..134bf44aff0d
--- /dev/null
+++ b/fs/fuse/fuse_dev_i.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * FUSE: Filesystem in Userspace
+ * Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
+ */
+#ifndef _FS_FUSE_DEV_I_H
+#define _FS_FUSE_DEV_I_H
+
+#include <linux/types.h>
+
+/* Ordinary requests have even IDs, while interrupts IDs are odd */
+#define FUSE_INT_REQ_BIT (1ULL << 0)
+#define FUSE_REQ_ID_STEP (1ULL << 1)
+
+extern struct wait_queue_head fuse_dev_waitq;
+
+struct fuse_arg;
+struct fuse_args;
+struct fuse_pqueue;
+struct fuse_req;
+struct fuse_iqueue;
+struct fuse_forget_link;
+
+struct fuse_copy_state {
+ struct fuse_req *req;
+ struct iov_iter *iter;
+ struct pipe_buffer *pipebufs;
+ struct pipe_buffer *currbuf;
+ struct pipe_inode_info *pipe;
+ unsigned long nr_segs;
+ struct page *pg;
+ unsigned int len;
+ unsigned int offset;
+ bool write:1;
+ bool move_folios:1;
+ bool is_uring:1;
+ struct {
+ unsigned int copied_sz; /* copied size into the user buffer */
+ } ring;
+};
+
+#define FUSE_DEV_SYNC_INIT ((struct fuse_dev *) 1)
+#define FUSE_DEV_PTR_MASK (~1UL)
+
+static inline struct fuse_dev *__fuse_get_dev(struct file *file)
+{
+ /*
+ * Lockless access is OK, because file->private data is set
+ * once during mount and is valid until the file is released.
+ */
+ struct fuse_dev *fud = READ_ONCE(file->private_data);
+
+ return (typeof(fud)) ((unsigned long) fud & FUSE_DEV_PTR_MASK);
+}
+
+struct fuse_dev *fuse_get_dev(struct file *file);
+
+unsigned int fuse_req_hash(u64 unique);
+struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique);
+
+void fuse_dev_end_requests(struct list_head *head);
+
+void fuse_copy_init(struct fuse_copy_state *cs, bool write,
+ struct iov_iter *iter);
+void fuse_copy_finish(struct fuse_copy_state *cs);
+int fuse_copy_args(struct fuse_copy_state *cs, unsigned int numargs,
+ unsigned int argpages, struct fuse_arg *args,
+ int zeroing);
+int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
+ unsigned int nbytes);
+void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
+ struct fuse_forget_link *forget);
+void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list);
+
+#endif
+
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 74744c6f2860..7f16049387d1 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -38,14 +38,41 @@
/** Bias for fi->writectr, meaning new writepages must not be sent */
#define FUSE_NOWRITE INT_MIN
-/** It could be as large as PATH_MAX, but would that have any uses? */
-#define FUSE_NAME_MAX 1024
+/** Maximum length of a filename, not including terminating null */
+
+/* maximum, small enough for FUSE_MIN_READ_BUFFER*/
+#define FUSE_NAME_LOW_MAX 1024
+/* maximum, but needs a request buffer > FUSE_MIN_READ_BUFFER */
+#define FUSE_NAME_MAX (PATH_MAX - 1)
/** Number of dentries for each connection in the control filesystem */
#define FUSE_CTL_NUM_DENTRIES 5
+/* Frequency (in seconds) of request timeout checks, if opted into */
+#define FUSE_TIMEOUT_TIMER_FREQ 15
+
+/** Frequency (in jiffies) of request timeout checks, if opted into */
+extern const unsigned long fuse_timeout_timer_freq;
+
+/*
+ * Dentries invalidation workqueue period, in seconds. The value of this
+ * parameter shall be >= FUSE_DENTRY_INVAL_FREQ_MIN seconds, or 0 (zero), in
+ * which case no workqueue will be created.
+ */
+extern unsigned inval_wq __read_mostly;
+
/** Maximum of max_pages received in init_out */
extern unsigned int fuse_max_pages_limit;
+/*
+ * Default timeout (in seconds) for the server to reply to a request
+ * before the connection is aborted, if no timeout was specified on mount.
+ */
+extern unsigned int fuse_default_req_timeout;
+/*
+ * Max timeout (in seconds) for the server to reply to a request before
+ * the connection is aborted.
+ */
+extern unsigned int fuse_max_req_timeout;
/** List of active connections */
extern struct list_head fuse_conn_list;
@@ -54,8 +81,8 @@ extern struct list_head fuse_conn_list;
extern struct mutex fuse_mutex;
/** Module parameters */
-extern unsigned max_user_bgreq;
-extern unsigned max_user_congthresh;
+extern unsigned int max_user_bgreq;
+extern unsigned int max_user_congthresh;
/* One forget request */
struct fuse_forget_link {
@@ -141,9 +168,6 @@ struct fuse_inode {
/* waitq for direct-io completion */
wait_queue_head_t direct_io_waitq;
-
- /* List of writepage requestst (pending or sent) */
- struct rb_root writepages;
};
/* readdir cache (directory only) */
@@ -193,6 +217,12 @@ struct fuse_inode {
/** Reference to backing file in passthrough mode */
struct fuse_backing *fb;
#endif
+
+ /*
+ * The underlying inode->i_blkbits value will not be modified,
+ * so preserve the blocksize specified by the server.
+ */
+ u8 cached_i_blkbits;
};
/** FUSE inode state bits */
@@ -209,6 +239,11 @@ enum {
FUSE_I_BTIME,
/* Wants or already has page cache IO */
FUSE_I_CACHE_IO_MODE,
+ /*
+ * Client has exclusive access to the inode, either because fs is local
+ * or the fuse server has an exclusive "lease" on distributed fs
+ */
+ FUSE_I_EXCLUSIVE,
};
struct fuse_conn;
@@ -310,7 +345,7 @@ struct fuse_args {
bool is_ext:1;
bool is_pinned:1;
bool invalidate_vmap:1;
- struct fuse_in_arg in_args[3];
+ struct fuse_in_arg in_args[4];
struct fuse_arg out_args[2];
void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
/* Used for kvec iter backed by vmalloc address */
@@ -378,6 +413,7 @@ struct fuse_io_priv {
* FR_FINISHED: request is finished
* FR_PRIVATE: request is on private list
* FR_ASYNC: request is asynchronous
+ * FR_URING: request is handled through fuse-io-uring
*/
enum fuse_req_flag {
FR_ISREPLY,
@@ -392,6 +428,7 @@ enum fuse_req_flag {
FR_FINISHED,
FR_PRIVATE,
FR_ASYNC,
+ FR_URING,
};
/**
@@ -438,6 +475,13 @@ struct fuse_req {
/** fuse_mount this request belongs to */
struct fuse_mount *fm;
+
+#ifdef CONFIG_FUSE_IO_URING
+ void *ring_entry;
+ void *ring_queue;
+#endif
+ /** When (in jiffies) the request was created */
+ unsigned long create_time;
};
struct fuse_iqueue;
@@ -607,6 +651,11 @@ struct fuse_conn {
/** Number of fuse_dev's */
atomic_t dev_count;
+ /** Current epoch for up-to-date dentries */
+ atomic_t epoch;
+
+ struct work_struct epoch_work;
+
struct rcu_head rcu;
/** The user id for this mount */
@@ -821,6 +870,9 @@ struct fuse_conn {
/** Does the filesystem support copy_file_range? */
unsigned no_copy_file_range:1;
+ /** Does the filesystem support copy_file_range_64? */
+ unsigned no_copy_file_range_64:1;
+
/* Send DESTROY request */
unsigned int destroy:1;
@@ -863,6 +915,15 @@ struct fuse_conn {
/* Use pages instead of pointer for kernel I/O */
unsigned int use_pages_for_kvec_io:1;
+ /* Is link not implemented by fs? */
+ unsigned int no_link:1;
+
+ /* Is synchronous FUSE_INIT allowed? */
+ unsigned int sync_init:1;
+
+ /* Use io_uring for communication */
+ unsigned int io_uring;
+
/** Maximum stack depth for passthrough backing files */
int max_stack_depth;
@@ -878,12 +939,6 @@ struct fuse_conn {
/** Device ID from the root super block */
dev_t dev;
- /** Dentries in the control filesystem */
- struct dentry *ctl_dentry[FUSE_CTL_NUM_DENTRIES];
-
- /** number of dentries used in the above array */
- int ctl_ndents;
-
/** Key for lock owner ID scrambling */
u32 scramble_key[4];
@@ -893,6 +948,9 @@ struct fuse_conn {
/** Version counter for evict inode */
atomic64_t evict_ctr;
+ /* maximum file name length */
+ u32 name_max;
+
/** Called on final put */
void (*release)(struct fuse_conn *);
@@ -923,6 +981,20 @@ struct fuse_conn {
/** IDR for backing files ids */
struct idr backing_files_map;
#endif
+
+#ifdef CONFIG_FUSE_IO_URING
+ /** uring connection information*/
+ struct fuse_ring *ring;
+#endif
+
+ /** Only used if the connection opts into request timeouts */
+ struct {
+ /* Worker for checking if any requests have timed out */
+ struct delayed_work work;
+
+ /* Request timeout (in jiffies). 0 = no timeout */
+ unsigned int req_timeout;
+ } timeout;
};
/*
@@ -947,6 +1019,19 @@ struct fuse_mount {
struct rcu_head rcu;
};
+/*
+ * Empty header for FUSE opcodes without specific header needs.
+ * Used as a placeholder in args->in_args[0] for consistency
+ * across all FUSE operations, simplifying request handling.
+ */
+struct fuse_zero_header {};
+
+static inline void fuse_set_zero_arg0(struct fuse_args *args)
+{
+ args->in_args[0].size = sizeof(struct fuse_zero_header);
+ args->in_args[0].value = NULL;
+}
+
static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb)
{
return sb->s_fs_info;
@@ -967,7 +1052,7 @@ static inline struct fuse_conn *get_fuse_conn(struct inode *inode)
return get_fuse_mount_super(inode->i_sb)->fc;
}
-static inline struct fuse_inode *get_fuse_inode(struct inode *inode)
+static inline struct fuse_inode *get_fuse_inode(const struct inode *inode)
{
return container_of(inode, struct fuse_inode, inode);
}
@@ -1009,6 +1094,13 @@ static inline bool fuse_is_bad(struct inode *inode)
return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state));
}
+static inline bool fuse_inode_is_exclusive(const struct inode *inode)
+{
+ const struct fuse_inode *fi = get_fuse_inode(inode);
+
+ return test_bit(FUSE_I_EXCLUSIVE, &fi->state);
+}
+
static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags,
struct fuse_folio_desc **desc)
{
@@ -1044,7 +1136,6 @@ static inline void fuse_sync_bucket_dec(struct fuse_sync_bucket *bucket)
extern const struct file_operations fuse_dev_operations;
extern const struct dentry_operations fuse_dentry_operations;
-extern const struct dentry_operations fuse_root_dentry_operations;
/**
* Get a filled in inode
@@ -1183,6 +1274,11 @@ int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
gfp_t gfp_flags);
/**
+ * Assign a unique id to a fuse request
+ */
+void fuse_request_assign_unique(struct fuse_iqueue *fiq, struct fuse_req *req);
+
+/**
* End a finished request
*/
void fuse_request_end(struct fuse_req *req);
@@ -1191,6 +1287,14 @@ void fuse_request_end(struct fuse_req *req);
void fuse_abort_conn(struct fuse_conn *fc);
void fuse_wait_aborted(struct fuse_conn *fc);
+/* Check if any requests timed out */
+void fuse_check_timeout(struct work_struct *work);
+
+void fuse_dentry_tree_init(void);
+void fuse_dentry_tree_cleanup(void);
+
+void fuse_epoch_work(struct work_struct *work);
+
/**
* Invalidate inode attributes
*/
@@ -1220,6 +1324,11 @@ void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o);
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
/**
+ * Initialize the fuse processing queue
+ */
+void fuse_pqueue_init(struct fuse_pqueue *fpq);
+
+/**
* Initialize fuse_conn
*/
void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
@@ -1235,7 +1344,7 @@ struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc);
struct fuse_dev *fuse_dev_alloc(void);
void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc);
void fuse_dev_free(struct fuse_dev *fud);
-void fuse_send_init(struct fuse_mount *fm);
+int fuse_send_init(struct fuse_mount *fm);
/**
* Fill in superblock and initialize fuse connection
@@ -1327,6 +1436,12 @@ int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
u64 child_nodeid, struct qstr *name, u32 flags);
+/*
+ * Try to prune this inode. If neither the inode itself nor dentries associated
+ * with this inode have any external reference, then the inode can be freed.
+ */
+void fuse_try_prune_one_inode(struct fuse_conn *fc, u64 nodeid);
+
int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
bool isdir);
@@ -1413,9 +1528,9 @@ void fuse_dax_cancel_work(struct fuse_conn *fc);
long fuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
-int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
int fuse_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
/* iomode.c */
int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff);
@@ -1432,29 +1547,11 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
void fuse_file_release(struct inode *inode, struct fuse_file *ff,
unsigned int open_flags, fl_owner_t id, bool isdir);
-/* passthrough.c */
-static inline struct fuse_backing *fuse_inode_backing(struct fuse_inode *fi)
-{
-#ifdef CONFIG_FUSE_PASSTHROUGH
- return READ_ONCE(fi->fb);
-#else
- return NULL;
-#endif
-}
-
-static inline struct fuse_backing *fuse_inode_backing_set(struct fuse_inode *fi,
- struct fuse_backing *fb)
-{
-#ifdef CONFIG_FUSE_PASSTHROUGH
- return xchg(&fi->fb, fb);
-#else
- return NULL;
-#endif
-}
-
+/* backing.c */
#ifdef CONFIG_FUSE_PASSTHROUGH
struct fuse_backing *fuse_backing_get(struct fuse_backing *fb);
void fuse_backing_put(struct fuse_backing *fb);
+struct fuse_backing *fuse_backing_lookup(struct fuse_conn *fc, int backing_id);
#else
static inline struct fuse_backing *fuse_backing_get(struct fuse_backing *fb)
@@ -1465,6 +1562,11 @@ static inline struct fuse_backing *fuse_backing_get(struct fuse_backing *fb)
static inline void fuse_backing_put(struct fuse_backing *fb)
{
}
+static inline struct fuse_backing *fuse_backing_lookup(struct fuse_conn *fc,
+ int backing_id)
+{
+ return NULL;
+}
#endif
void fuse_backing_files_init(struct fuse_conn *fc);
@@ -1472,9 +1574,27 @@ void fuse_backing_files_free(struct fuse_conn *fc);
int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map);
int fuse_backing_close(struct fuse_conn *fc, int backing_id);
-struct fuse_backing *fuse_passthrough_open(struct file *file,
- struct inode *inode,
- int backing_id);
+/* passthrough.c */
+static inline struct fuse_backing *fuse_inode_backing(struct fuse_inode *fi)
+{
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ return READ_ONCE(fi->fb);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct fuse_backing *fuse_inode_backing_set(struct fuse_inode *fi,
+ struct fuse_backing *fb)
+{
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ return xchg(&fi->fb, fb);
+#else
+ return NULL;
+#endif
+}
+
+struct fuse_backing *fuse_passthrough_open(struct file *file, int backing_id);
void fuse_passthrough_release(struct fuse_file *ff, struct fuse_backing *fb);
static inline struct file *fuse_file_passthrough(struct fuse_file *ff)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3ce4f4e81d09..819e50d66622 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -7,7 +7,10 @@
*/
#include "fuse_i.h"
+#include "fuse_dev_i.h"
+#include "dev_uring_i.h"
+#include <linux/dax.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/file.h>
@@ -32,12 +35,16 @@ MODULE_LICENSE("GPL");
static struct kmem_cache *fuse_inode_cachep;
struct list_head fuse_conn_list;
DEFINE_MUTEX(fuse_mutex);
+DECLARE_WAIT_QUEUE_HEAD(fuse_dev_waitq);
static int set_global_limit(const char *val, const struct kernel_param *kp);
unsigned int fuse_max_pages_limit = 256;
+/* default is no timeout */
+unsigned int fuse_default_req_timeout;
+unsigned int fuse_max_req_timeout;
-unsigned max_user_bgreq;
+unsigned int max_user_bgreq;
module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
&max_user_bgreq, 0644);
__MODULE_PARM_TYPE(max_user_bgreq, "uint");
@@ -45,7 +52,7 @@ MODULE_PARM_DESC(max_user_bgreq,
"Global limit for the maximum number of backgrounded requests an "
"unprivileged user can set");
-unsigned max_user_congthresh;
+unsigned int max_user_congthresh;
module_param_call(max_user_congthresh, set_global_limit, param_get_uint,
&max_user_congthresh, 0644);
__MODULE_PARM_TYPE(max_user_congthresh, "uint");
@@ -96,14 +103,11 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
if (!fi)
return NULL;
- fi->i_time = 0;
+ /* Initialize private data (i.e. everything except fi->inode) */
+ BUILD_BUG_ON(offsetof(struct fuse_inode, inode) != 0);
+ memset((void *) fi + sizeof(fi->inode), 0, sizeof(*fi) - sizeof(fi->inode));
+
fi->inval_mask = ~0;
- fi->nodeid = 0;
- fi->nlookup = 0;
- fi->attr_version = 0;
- fi->orig_ino = 0;
- fi->state = 0;
- fi->submount_lookup = NULL;
mutex_init(&fi->mutex);
spin_lock_init(&fi->lock);
fi->forget = fuse_alloc_forget();
@@ -156,7 +160,10 @@ static void fuse_evict_inode(struct inode *inode)
struct fuse_inode *fi = get_fuse_inode(inode);
/* Will write inode on close/munmap and in all other dirtiers */
- WARN_ON(inode->i_state & I_DIRTY_INODE);
+ WARN_ON(inode_state_read_once(inode) & I_DIRTY_INODE);
+
+ if (FUSE_IS_DAX(inode))
+ dax_break_layout_final(inode);
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
@@ -281,10 +288,10 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
}
}
- if (attr->blksize != 0)
- inode->i_blkbits = ilog2(attr->blksize);
+ if (attr->blksize)
+ fi->cached_i_blkbits = ilog2(attr->blksize);
else
- inode->i_blkbits = inode->i_sb->s_blocksize_bits;
+ fi->cached_i_blkbits = inode->i_sb->s_blocksize_bits;
/*
* Don't set the sticky bit in i_mode, unless we want the VFS
@@ -498,7 +505,7 @@ retry:
if (!inode)
return NULL;
- if ((inode->i_state & I_NEW)) {
+ if ((inode_state_read_once(inode) & I_NEW)) {
inode->i_flags |= S_NOATIME;
if (!fc->writeback_cache || !S_ISREG(attr->mode))
inode->i_flags |= S_NOCMTIME;
@@ -578,6 +585,17 @@ int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
return 0;
}
+void fuse_try_prune_one_inode(struct fuse_conn *fc, u64 nodeid)
+{
+ struct inode *inode;
+
+ inode = fuse_ilookup(fc, nodeid, NULL);
+ if (!inode)
+ return;
+ d_prune_aliases(inode);
+ iput(inode);
+}
+
bool fuse_lock_inode(struct inode *inode)
{
bool locked = false;
@@ -937,7 +955,7 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq,
fiq->priv = priv;
}
-static void fuse_pqueue_init(struct fuse_pqueue *fpq)
+void fuse_pqueue_init(struct fuse_pqueue *fpq)
{
unsigned int i;
@@ -958,6 +976,8 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
init_rwsem(&fc->killsb);
refcount_set(&fc->count, 1);
atomic_set(&fc->dev_count, 1);
+ atomic_set(&fc->epoch, 1);
+ INIT_WORK(&fc->epoch_work, fuse_epoch_work);
init_waitqueue_head(&fc->blocked_waitq);
fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv);
INIT_LIST_HEAD(&fc->bg_queue);
@@ -978,6 +998,8 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
fc->user_ns = get_user_ns(user_ns);
fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
fc->max_pages_limit = fuse_max_pages_limit;
+ fc->name_max = FUSE_NAME_LOW_MAX;
+ fc->timeout.req_timeout = 0;
if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
fuse_backing_files_init(fc);
@@ -992,30 +1014,36 @@ static void delayed_release(struct rcu_head *p)
{
struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
+ fuse_uring_destruct(fc);
+
put_user_ns(fc->user_ns);
fc->release(fc);
}
void fuse_conn_put(struct fuse_conn *fc)
{
- if (refcount_dec_and_test(&fc->count)) {
- struct fuse_iqueue *fiq = &fc->iq;
- struct fuse_sync_bucket *bucket;
-
- if (IS_ENABLED(CONFIG_FUSE_DAX))
- fuse_dax_conn_free(fc);
- if (fiq->ops->release)
- fiq->ops->release(fiq);
- put_pid_ns(fc->pid_ns);
- bucket = rcu_dereference_protected(fc->curr_bucket, 1);
- if (bucket) {
- WARN_ON(atomic_read(&bucket->count) != 1);
- kfree(bucket);
- }
- if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
- fuse_backing_files_free(fc);
- call_rcu(&fc->rcu, delayed_release);
+ struct fuse_iqueue *fiq = &fc->iq;
+ struct fuse_sync_bucket *bucket;
+
+ if (!refcount_dec_and_test(&fc->count))
+ return;
+
+ if (IS_ENABLED(CONFIG_FUSE_DAX))
+ fuse_dax_conn_free(fc);
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work_sync(&fc->timeout.work);
+ cancel_work_sync(&fc->epoch_work);
+ if (fiq->ops->release)
+ fiq->ops->release(fiq);
+ put_pid_ns(fc->pid_ns);
+ bucket = rcu_dereference_protected(fc->curr_bucket, 1);
+ if (bucket) {
+ WARN_ON(atomic_read(&bucket->count) != 1);
+ kfree(bucket);
}
+ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
+ fuse_backing_files_free(fc);
+ call_rcu(&fc->rcu, delayed_release);
}
EXPORT_SYMBOL_GPL(fuse_conn_put);
@@ -1026,7 +1054,7 @@ struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
}
EXPORT_SYMBOL_GPL(fuse_conn_get);
-static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
+static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned int mode)
{
struct fuse_attr attr;
memset(&attr, 0, sizeof(attr));
@@ -1194,14 +1222,14 @@ static const struct super_operations fuse_super_operations = {
.free_inode = fuse_free_inode,
.evict_inode = fuse_evict_inode,
.write_inode = fuse_write_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
.umount_begin = fuse_umount_begin,
.statfs = fuse_statfs,
.sync_fs = fuse_sync_fs,
.show_options = fuse_show_options,
};
-static void sanitize_global_limit(unsigned *limit)
+static void sanitize_global_limit(unsigned int *limit)
{
/*
* The default maximum number of async requests is calculated to consume
@@ -1222,7 +1250,7 @@ static int set_global_limit(const char *val, const struct kernel_param *kp)
if (rv)
return rv;
- sanitize_global_limit((unsigned *)kp->arg);
+ sanitize_global_limit((unsigned int *)kp->arg);
return 0;
}
@@ -1254,6 +1282,34 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
spin_unlock(&fc->bg_lock);
}
+static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ fc->timeout.req_timeout = secs_to_jiffies(timeout);
+ INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
+ queue_delayed_work(system_percpu_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+}
+
+static void init_server_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ if (!timeout && !fuse_max_req_timeout && !fuse_default_req_timeout)
+ return;
+
+ if (!timeout)
+ timeout = fuse_default_req_timeout;
+
+ if (fuse_max_req_timeout) {
+ if (timeout)
+ timeout = min(fuse_max_req_timeout, timeout);
+ else
+ timeout = fuse_max_req_timeout;
+ }
+
+ timeout = max(FUSE_TIMEOUT_TIMER_FREQ, timeout);
+
+ set_request_timeout(fc, timeout);
+}
+
struct fuse_init_args {
struct fuse_args args;
struct fuse_init_in in;
@@ -1272,6 +1328,7 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
ok = false;
else {
unsigned long ra_pages;
+ unsigned int timeout = 0;
process_init_limits(fc, arg);
@@ -1335,6 +1392,13 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
fc->max_pages =
min_t(unsigned int, fc->max_pages_limit,
max_t(unsigned int, arg->max_pages, 1));
+
+ /*
+ * PATH_MAX file names might need two pages for
+ * ops like rename
+ */
+ if (fc->max_pages > 1)
+ fc->name_max = FUSE_NAME_MAX;
}
if (IS_ENABLED(CONFIG_FUSE_DAX)) {
if (flags & FUSE_MAP_ALIGNMENT &&
@@ -1387,12 +1451,19 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
else
ok = false;
}
+ if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled())
+ fc->io_uring = 1;
+
+ if (flags & FUSE_REQUEST_TIMEOUT)
+ timeout = arg->request_timeout;
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
fc->no_flock = 1;
}
+ init_server_timeout(fc, timeout);
+
fm->sb->s_bdi->ra_pages =
min(fm->sb->s_bdi->ra_pages, ra_pages);
fc->minor = arg->minor;
@@ -1411,7 +1482,7 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
wake_up_all(&fc->blocked_waitq);
}
-void fuse_send_init(struct fuse_mount *fm)
+static struct fuse_init_args *fuse_new_init(struct fuse_mount *fm)
{
struct fuse_init_args *ia;
u64 flags;
@@ -1434,7 +1505,8 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
- FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP;
+ FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP |
+ FUSE_REQUEST_TIMEOUT;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
@@ -1446,6 +1518,13 @@ void fuse_send_init(struct fuse_mount *fm)
if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
flags |= FUSE_PASSTHROUGH;
+ /*
+ * This is just an information flag for fuse server. No need to check
+ * the reply - server is either sending IORING_OP_URING_CMD or not.
+ */
+ if (fuse_uring_enabled())
+ flags |= FUSE_OVER_IO_URING;
+
ia->in.flags = flags;
ia->in.flags2 = flags >> 32;
@@ -1462,10 +1541,30 @@ void fuse_send_init(struct fuse_mount *fm)
ia->args.out_args[0].value = &ia->out;
ia->args.force = true;
ia->args.nocreds = true;
- ia->args.end = process_init_reply;
- if (fuse_simple_background(fm, &ia->args, GFP_KERNEL) != 0)
- process_init_reply(fm, &ia->args, -ENOTCONN);
+ return ia;
+}
+
+int fuse_send_init(struct fuse_mount *fm)
+{
+ struct fuse_init_args *ia = fuse_new_init(fm);
+ int err;
+
+ if (fm->fc->sync_init) {
+ err = fuse_simple_request(fm, &ia->args);
+ /* Ignore size of init reply */
+ if (err > 0)
+ err = 0;
+ } else {
+ ia->args.end = process_init_reply;
+ err = fuse_simple_background(fm, &ia->args, GFP_KERNEL);
+ if (!err)
+ return 0;
+ }
+ process_init_reply(fm, &ia->args, err);
+ if (fm->fc->conn_error)
+ return -ENOTCONN;
+ return 0;
}
EXPORT_SYMBOL_GPL(fuse_send_init);
@@ -1495,8 +1594,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
if (err)
return err;
- /* fuse does it's own writeback accounting */
- sb->s_bdi->capabilities &= ~BDI_CAP_WRITEBACK_ACCT;
sb->s_bdi->capabilities |= BDI_CAP_STRICTLIMIT;
/*
@@ -1653,7 +1750,7 @@ static int fuse_fill_super_submount(struct super_block *sb,
fi = get_fuse_inode(root);
fi->nlookup--;
- sb->s_d_op = &fuse_dentry_operations;
+ set_default_d_op(sb, &fuse_dentry_operations);
sb->s_root = d_make_root(root);
if (!sb->s_root)
return -ENOMEM;
@@ -1745,6 +1842,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
if (!sb_set_blocksize(sb, ctx->blksize))
goto err;
#endif
+ fc->sync_fs = 1;
} else {
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -1788,17 +1886,19 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
err = -ENOMEM;
root = fuse_get_root_inode(sb, ctx->rootmode);
- sb->s_d_op = &fuse_root_dentry_operations;
+ set_default_d_op(sb, &fuse_dentry_operations);
root_dentry = d_make_root(root);
if (!root_dentry)
goto err_dev_free;
- /* Root dentry doesn't have .d_revalidate */
- sb->s_d_op = &fuse_dentry_operations;
mutex_lock(&fuse_mutex);
err = -EINVAL;
- if (ctx->fudptr && *ctx->fudptr)
- goto err_unlock;
+ if (ctx->fudptr && *ctx->fudptr) {
+ if (*ctx->fudptr == FUSE_DEV_SYNC_INIT)
+ fc->sync_init = 1;
+ else
+ goto err_unlock;
+ }
err = fuse_ctl_add_conn(fc);
if (err)
@@ -1806,8 +1906,10 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
list_add_tail(&fc->entry, &fuse_conn_list);
sb->s_root = root_dentry;
- if (ctx->fudptr)
+ if (ctx->fudptr) {
*ctx->fudptr = fud;
+ wake_up_all(&fuse_dev_waitq);
+ }
mutex_unlock(&fuse_mutex);
return 0;
@@ -1828,6 +1930,7 @@ EXPORT_SYMBOL_GPL(fuse_fill_super_common);
static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
{
struct fuse_fs_context *ctx = fsc->fs_private;
+ struct fuse_mount *fm;
int err;
if (!ctx->file || !ctx->rootmode_present ||
@@ -1848,8 +1951,10 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
return err;
/* file->private_data shall be visible on all CPUs after this */
smp_mb();
- fuse_send_init(get_fuse_mount_super(sb));
- return 0;
+
+ fm = get_fuse_mount_super(sb);
+
+ return fuse_send_init(fm);
}
/*
@@ -1910,7 +2015,7 @@ static int fuse_get_tree(struct fs_context *fsc)
* Allow creating a fuse mount with an already initialized fuse
* connection
*/
- fud = READ_ONCE(ctx->file->private_data);
+ fud = __fuse_get_dev(ctx->file);
if (ctx->file->f_op == &fuse_dev_operations && fud) {
fsc->sget_key = fud->fc;
sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super);
@@ -2181,6 +2286,8 @@ static int __init fuse_init(void)
if (res)
goto err_sysfs_cleanup;
+ fuse_dentry_tree_init();
+
sanitize_global_limit(&max_user_bgreq);
sanitize_global_limit(&max_user_congthresh);
@@ -2200,6 +2307,7 @@ static void __exit fuse_exit(void)
{
pr_debug("exit\n");
+ fuse_dentry_tree_cleanup();
fuse_ctl_cleanup();
fuse_sysfs_cleanup();
fuse_fs_cleanup();
diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
index 2d9abf48828f..fdc175e93f74 100644
--- a/fs/fuse/ioctl.c
+++ b/fs/fuse/ioctl.c
@@ -502,7 +502,7 @@ static void fuse_priv_ioctl_cleanup(struct inode *inode, struct fuse_file *ff)
fuse_file_release(inode, ff, O_RDONLY, NULL, S_ISDIR(inode->i_mode));
}
-int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct fuse_file *ff;
@@ -540,7 +540,7 @@ cleanup:
}
int fuse_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct fuse_file *ff;
diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c
index c99e285f3183..3728933188f3 100644
--- a/fs/fuse/iomode.c
+++ b/fs/fuse/iomode.c
@@ -177,8 +177,7 @@ static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
(ff->open_flags & ~FOPEN_PASSTHROUGH_MASK))
return -EINVAL;
- fb = fuse_passthrough_open(file, inode,
- ff->args->open_outarg.backing_id);
+ fb = fuse_passthrough_open(file, ff->args->open_outarg.backing_id);
if (IS_ERR(fb))
return PTR_ERR(fb);
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
index 607ef735ad4a..72de97c03d0e 100644
--- a/fs/fuse/passthrough.c
+++ b/fs/fuse/passthrough.c
@@ -144,166 +144,12 @@ ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma)
return backing_file_mmap(backing_file, vma, &ctx);
}
-struct fuse_backing *fuse_backing_get(struct fuse_backing *fb)
-{
- if (fb && refcount_inc_not_zero(&fb->count))
- return fb;
- return NULL;
-}
-
-static void fuse_backing_free(struct fuse_backing *fb)
-{
- pr_debug("%s: fb=0x%p\n", __func__, fb);
-
- if (fb->file)
- fput(fb->file);
- put_cred(fb->cred);
- kfree_rcu(fb, rcu);
-}
-
-void fuse_backing_put(struct fuse_backing *fb)
-{
- if (fb && refcount_dec_and_test(&fb->count))
- fuse_backing_free(fb);
-}
-
-void fuse_backing_files_init(struct fuse_conn *fc)
-{
- idr_init(&fc->backing_files_map);
-}
-
-static int fuse_backing_id_alloc(struct fuse_conn *fc, struct fuse_backing *fb)
-{
- int id;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&fc->lock);
- /* FIXME: xarray might be space inefficient */
- id = idr_alloc_cyclic(&fc->backing_files_map, fb, 1, 0, GFP_ATOMIC);
- spin_unlock(&fc->lock);
- idr_preload_end();
-
- WARN_ON_ONCE(id == 0);
- return id;
-}
-
-static struct fuse_backing *fuse_backing_id_remove(struct fuse_conn *fc,
- int id)
-{
- struct fuse_backing *fb;
-
- spin_lock(&fc->lock);
- fb = idr_remove(&fc->backing_files_map, id);
- spin_unlock(&fc->lock);
-
- return fb;
-}
-
-static int fuse_backing_id_free(int id, void *p, void *data)
-{
- struct fuse_backing *fb = p;
-
- WARN_ON_ONCE(refcount_read(&fb->count) != 1);
- fuse_backing_free(fb);
- return 0;
-}
-
-void fuse_backing_files_free(struct fuse_conn *fc)
-{
- idr_for_each(&fc->backing_files_map, fuse_backing_id_free, NULL);
- idr_destroy(&fc->backing_files_map);
-}
-
-int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
-{
- struct file *file;
- struct super_block *backing_sb;
- struct fuse_backing *fb = NULL;
- int res;
-
- pr_debug("%s: fd=%d flags=0x%x\n", __func__, map->fd, map->flags);
-
- /* TODO: relax CAP_SYS_ADMIN once backing files are visible to lsof */
- res = -EPERM;
- if (!fc->passthrough || !capable(CAP_SYS_ADMIN))
- goto out;
-
- res = -EINVAL;
- if (map->flags || map->padding)
- goto out;
-
- file = fget_raw(map->fd);
- res = -EBADF;
- if (!file)
- goto out;
-
- backing_sb = file_inode(file)->i_sb;
- res = -ELOOP;
- if (backing_sb->s_stack_depth >= fc->max_stack_depth)
- goto out_fput;
-
- fb = kmalloc(sizeof(struct fuse_backing), GFP_KERNEL);
- res = -ENOMEM;
- if (!fb)
- goto out_fput;
-
- fb->file = file;
- fb->cred = prepare_creds();
- refcount_set(&fb->count, 1);
-
- res = fuse_backing_id_alloc(fc, fb);
- if (res < 0) {
- fuse_backing_free(fb);
- fb = NULL;
- }
-
-out:
- pr_debug("%s: fb=0x%p, ret=%i\n", __func__, fb, res);
-
- return res;
-
-out_fput:
- fput(file);
- goto out;
-}
-
-int fuse_backing_close(struct fuse_conn *fc, int backing_id)
-{
- struct fuse_backing *fb = NULL;
- int err;
-
- pr_debug("%s: backing_id=%d\n", __func__, backing_id);
-
- /* TODO: relax CAP_SYS_ADMIN once backing files are visible to lsof */
- err = -EPERM;
- if (!fc->passthrough || !capable(CAP_SYS_ADMIN))
- goto out;
-
- err = -EINVAL;
- if (backing_id <= 0)
- goto out;
-
- err = -ENOENT;
- fb = fuse_backing_id_remove(fc, backing_id);
- if (!fb)
- goto out;
-
- fuse_backing_put(fb);
- err = 0;
-out:
- pr_debug("%s: fb=0x%p, err=%i\n", __func__, fb, err);
-
- return err;
-}
-
/*
* Setup passthrough to a backing file.
*
* Returns an fb object with elevated refcount to be stored in fuse inode.
*/
-struct fuse_backing *fuse_passthrough_open(struct file *file,
- struct inode *inode,
- int backing_id)
+struct fuse_backing *fuse_passthrough_open(struct file *file, int backing_id)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fm->fc;
@@ -315,12 +161,8 @@ struct fuse_backing *fuse_passthrough_open(struct file *file,
if (backing_id <= 0)
goto out;
- rcu_read_lock();
- fb = idr_find(&fc->backing_files_map, backing_id);
- fb = fuse_backing_get(fb);
- rcu_read_unlock();
-
err = -ENOENT;
+ fb = fuse_backing_lookup(fc, backing_id);
if (!fb)
goto out;
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 17ce9636a2b1..c2aae2eef086 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -120,7 +120,7 @@ static bool fuse_emit(struct file *file, struct dir_context *ctx,
fuse_add_dirent_to_cache(file, dirent, ctx->pos);
return dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino,
- dirent->type);
+ dirent->type | FILLDIR_FLAG_NOINTR);
}
static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
@@ -161,6 +161,7 @@ static int fuse_direntplus_link(struct file *file,
struct fuse_conn *fc;
struct inode *inode;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ int epoch;
if (!o->nodeid) {
/*
@@ -190,6 +191,7 @@ static int fuse_direntplus_link(struct file *file,
return -EIO;
fc = get_fuse_conn(dir);
+ epoch = atomic_read(&fc->epoch);
name.hash = full_name_hash(parent, name.name, name.len);
dentry = d_lookup(parent, &name);
@@ -256,6 +258,7 @@ retry:
}
if (fc->readdirplus_auto)
set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
+ dentry->d_time = epoch;
fuse_change_entry_timeout(dentry, o);
dput(dentry);
@@ -332,35 +335,32 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
{
int plus;
ssize_t res;
- struct folio *folio;
struct inode *inode = file_inode(file);
struct fuse_mount *fm = get_fuse_mount(inode);
+ struct fuse_conn *fc = fm->fc;
struct fuse_io_args ia = {};
- struct fuse_args_pages *ap = &ia.ap;
- struct fuse_folio_desc desc = { .length = PAGE_SIZE };
+ struct fuse_args *args = &ia.ap.args;
+ void *buf;
+ size_t bufsize = clamp((unsigned int) ctx->count, PAGE_SIZE, fc->max_pages << PAGE_SHIFT);
u64 attr_version = 0, evict_ctr = 0;
bool locked;
- folio = folio_alloc(GFP_KERNEL, 0);
- if (!folio)
+ buf = kvmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
+ args->out_args[0].value = buf;
+
plus = fuse_use_readdirplus(inode, ctx);
- ap->args.out_pages = true;
- ap->num_folios = 1;
- ap->folios = &folio;
- ap->descs = &desc;
if (plus) {
attr_version = fuse_get_attr_version(fm->fc);
evict_ctr = fuse_get_evict_ctr(fm->fc);
- fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIRPLUS);
+ fuse_read_args_fill(&ia, file, ctx->pos, bufsize, FUSE_READDIRPLUS);
} else {
- fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIR);
+ fuse_read_args_fill(&ia, file, ctx->pos, bufsize, FUSE_READDIR);
}
locked = fuse_lock_inode(inode);
- res = fuse_simple_request(fm, &ap->args);
+ res = fuse_simple_request(fm, args);
fuse_unlock_inode(inode, locked);
if (res >= 0) {
if (!res) {
@@ -369,16 +369,14 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
if (ff->open_flags & FOPEN_CACHE_DIR)
fuse_readdir_cache_end(file, ctx->pos);
} else if (plus) {
- res = parse_dirplusfile(folio_address(folio), res,
- file, ctx, attr_version,
+ res = parse_dirplusfile(buf, res, file, ctx, attr_version,
evict_ctr);
} else {
- res = parse_dirfile(folio_address(folio), res, file,
- ctx);
+ res = parse_dirfile(buf, res, file, ctx);
}
}
- folio_put(folio);
+ kvfree(buf);
fuse_invalidate_atime(inode);
return res;
}
@@ -419,7 +417,7 @@ static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff,
if (ff->readdir.pos == ctx->pos) {
res = FOUND_SOME;
if (!dir_emit(ctx, dirent->name, dirent->namelen,
- dirent->ino, dirent->type))
+ dirent->ino, dirent->type | FILLDIR_FLAG_NOINTR))
return FOUND_ALL;
ctx->pos = dirent->off;
}
diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c
index b272bb333005..e2d921abcb88 100644
--- a/fs/fuse/sysctl.c
+++ b/fs/fuse/sysctl.c
@@ -13,7 +13,13 @@ static struct ctl_table_header *fuse_table_header;
/* Bound by fuse_init_out max_pages, which is a u16 */
static unsigned int sysctl_fuse_max_pages_limit = 65535;
-static struct ctl_table fuse_sysctl_table[] = {
+/*
+ * fuse_init_out request timeouts are u16.
+ * This goes up to ~18 hours, which is plenty for a timeout.
+ */
+static unsigned int sysctl_fuse_req_timeout_limit = 65535;
+
+static const struct ctl_table fuse_sysctl_table[] = {
{
.procname = "max_pages_limit",
.data = &fuse_max_pages_limit,
@@ -23,6 +29,24 @@ static struct ctl_table fuse_sysctl_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = &sysctl_fuse_max_pages_limit,
},
+ {
+ .procname = "default_request_timeout",
+ .data = &fuse_default_req_timeout,
+ .maxlen = sizeof(fuse_default_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
+ {
+ .procname = "max_request_timeout",
+ .data = &fuse_max_req_timeout,
+ .maxlen = sizeof(fuse_max_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
};
int fuse_sysctl_register(void)
diff --git a/fs/fuse/trace.c b/fs/fuse/trace.c
new file mode 100644
index 000000000000..93bd72efc98c
--- /dev/null
+++ b/fs/fuse/trace.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "dev_uring_i.h"
+#include "fuse_i.h"
+#include "fuse_dev_i.h"
+
+#include <linux/pagemap.h>
+
+#define CREATE_TRACE_POINTS
+#include "fuse_trace.h"
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 82afe78ec542..b2f6486fe1d5 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -9,7 +9,6 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/group_cpus.h>
-#include <linux/pfn_t.h>
#include <linux/memremap.h>
#include <linux/module.h>
#include <linux/virtio.h>
@@ -21,6 +20,7 @@
#include <linux/cleanup.h>
#include <linux/uio.h>
#include "fuse_i.h"
+#include "fuse_dev_i.h"
/* Used to help calculate the FUSE connection's max_pages limit for a request's
* size. Parts of the struct fuse_req are sliced into scattergather lists in
@@ -373,7 +373,7 @@ static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
sprintf(buff, "%d", i);
fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
- if (!fs->mqs_kobj) {
+ if (!fsvq->kobj) {
ret = -ENOMEM;
goto out_del;
}
@@ -762,7 +762,6 @@ static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
static void virtio_fs_request_complete(struct fuse_req *req,
struct virtio_fs_vq *fsvq)
{
- struct fuse_pqueue *fpq = &fsvq->fud->pq;
struct fuse_args *args;
struct fuse_args_pages *ap;
unsigned int len, i, thislen;
@@ -791,9 +790,7 @@ static void virtio_fs_request_complete(struct fuse_req *req,
}
}
- spin_lock(&fpq->lock);
clear_bit(FR_SENT, &req->flags);
- spin_unlock(&fpq->lock);
fuse_request_end(req);
spin_lock(&fsvq->lock);
@@ -862,7 +859,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
{
const struct cpumask *mask, *masks;
- unsigned int q, cpu;
+ unsigned int q, cpu, nr_masks;
/* First attempt to map using existing transport layer affinities
* e.g. PCIe MSI-X
@@ -882,7 +879,7 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f
return;
fallback:
/* Attempt to map evenly in groups over the CPUs */
- masks = group_cpus_evenly(fs->num_request_queues);
+ masks = group_cpus_evenly(fs->num_request_queues, &nr_masks);
/* If even this fails we default to all CPUs use first request queue */
if (!masks) {
for_each_possible_cpu(cpu)
@@ -891,7 +888,7 @@ fallback:
}
for (q = 0; q < fs->num_request_queues; q++) {
- for_each_cpu(cpu, &masks[q])
+ for_each_cpu(cpu, &masks[q % nr_masks])
fs->mq_map[cpu] = q + VQ_REQUEST;
}
kfree(masks);
@@ -1008,7 +1005,7 @@ static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
*/
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode,
- void **kaddr, pfn_t *pfn)
+ void **kaddr, unsigned long *pfn)
{
struct virtio_fs *fs = dax_get_private(dax_dev);
phys_addr_t offset = PFN_PHYS(pgoff);
@@ -1017,8 +1014,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
if (kaddr)
*kaddr = fs->window_kaddr + offset;
if (pfn)
- *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
- PFN_DEV | PFN_MAP);
+ *pfn = PHYS_PFN(fs->window_phys_addr + offset);
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
}
@@ -1386,7 +1382,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
unsigned int out_sgs = 0;
unsigned int in_sgs = 0;
unsigned int total_sgs;
- unsigned int i;
+ unsigned int i, hash;
int ret;
bool notify;
struct fuse_pqueue *fpq;
@@ -1446,8 +1442,9 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
/* Request successfully sent. */
fpq = &fsvq->fud->pq;
+ hash = fuse_req_hash(req->in.h.unique);
spin_lock(&fpq->lock);
- list_add_tail(&req->list, fpq->processing);
+ list_add_tail(&req->list, &fpq->processing[hash]);
spin_unlock(&fpq->lock);
set_bit(FR_SENT, &req->flags);
/* matches barrier in request_wait_answer() */
@@ -1482,8 +1479,7 @@ static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
struct virtio_fs_vq *fsvq;
int ret;
- if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
- req->in.h.unique = fuse_get_unique(fiq);
+ fuse_request_assign_unique(fiq, req);
clear_bit(FR_PENDING, &req->flags);
@@ -1670,6 +1666,9 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
unsigned int virtqueue_size;
int err = -EIO;
+ if (!fsc->source)
+ return invalf(fsc, "No source specified");
+
/* This gets a reference on virtio_fs object. This ptr gets installed
* in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
* to drop the reference to this object.
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 9f568d345c51..93dfb06b6cea 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -164,9 +164,10 @@ int fuse_removexattr(struct inode *inode, const char *name)
args.opcode = FUSE_REMOVEXATTR;
args.nodeid = get_node_id(inode);
- args.in_numargs = 1;
- args.in_args[0].size = strlen(name) + 1;
- args.in_args[0].value = name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = strlen(name) + 1;
+ args.in_args[1].value = name;
err = fuse_simple_request(fm, &args);
if (err == -ENOSYS) {
fm->fc->no_removexattr = 1;
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index be7f87a8e11a..7bd231d16d4a 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -4,7 +4,6 @@ config GFS2_FS
select BUFFER_HEAD
select FS_POSIX_ACL
select CRC32
- select LIBCRC32C
select QUOTACTL
select FS_IOMAP
help
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 68fc8af14700..e79ad087512a 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -37,27 +37,6 @@
#include "aops.h"
-void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
- size_t from, size_t len)
-{
- struct buffer_head *head = folio_buffers(folio);
- unsigned int bsize = head->b_size;
- struct buffer_head *bh;
- size_t to = from + len;
- size_t start, end;
-
- for (bh = head, start = 0; bh != head || !start;
- bh = bh->b_this_page, start = end) {
- end = start + bsize;
- if (end <= from)
- continue;
- if (start >= to)
- break;
- set_buffer_uptodate(bh);
- gfs2_trans_add_data(ip->i_gl, bh);
- }
-}
-
/**
* gfs2_get_block_noalloc - Fills in a buffer head with details about a block
* @inode: The inode
@@ -102,8 +81,7 @@ static int gfs2_write_jdata_folio(struct folio *folio,
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- if (folio_pos(folio) < i_size &&
- i_size < folio_pos(folio) + folio_size(folio))
+ if (folio_pos(folio) < i_size && i_size < folio_next_pos(folio))
folio_zero_segment(folio, offset_in_folio(folio, i_size),
folio_size(folio));
@@ -133,12 +111,43 @@ static int __gfs2_jdata_write_folio(struct folio *folio,
inode->i_sb->s_blocksize,
BIT(BH_Dirty)|BIT(BH_Uptodate));
}
- gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
+ gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio));
}
return gfs2_write_jdata_folio(folio, wbc);
}
/**
+ * gfs2_jdata_writeback - Write jdata folios to the log
+ * @mapping: The mapping to write
+ * @wbc: The writeback control
+ *
+ * Returns: errno
+ */
+int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
+ struct folio *folio = NULL;
+ int error;
+
+ BUG_ON(current->journal_info);
+ if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
+ return 0;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
+ if (folio_test_checked(folio)) {
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ continue;
+ }
+ error = __gfs2_jdata_write_folio(folio, wbc);
+ }
+
+ return error;
+}
+
+/**
* gfs2_writepages - Write a bunch of dirty pages back to disk
* @mapping: The mapping to write
* @wbc: Write-back control
@@ -149,7 +158,11 @@ static int gfs2_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
- struct iomap_writepage_ctx wpc = { };
+ struct iomap_writepage_ctx wpc = {
+ .inode = mapping->host,
+ .wbc = wbc,
+ .ops = &gfs2_writeback_ops,
+ };
int ret;
/*
@@ -158,7 +171,7 @@ static int gfs2_writepages(struct address_space *mapping,
* want balance_dirty_pages() to loop indefinitely trying to write out
* pages held in the ail that it can't find.
*/
- ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
+ ret = iomap_writepages(&wpc);
if (ret == 0 && wbc->nr_to_write > 0)
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
return ret;
@@ -228,24 +241,16 @@ continue_unlock:
ret = __gfs2_jdata_write_folio(folio, wbc);
if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- folio_unlock(folio);
- ret = 0;
- } else {
-
- /*
- * done_index is set past this page,
- * so media errors will not choke
- * background writeout for the entire
- * file. This has consequences for
- * range_cyclic semantics (ie. it may
- * not be suitable for data integrity
- * writeout).
- */
- *done_index = folio_next_index(folio);
- ret = 1;
- break;
- }
+ /*
+ * done_index is set past this page, so media errors
+ * will not choke background writeout for the entire
+ * file. This has consequences for range_cyclic
+ * semantics (ie. it may not be suitable for data
+ * integrity writeout).
+ */
+ *done_index = folio_next_index(folio);
+ ret = 1;
+ break;
}
/*
@@ -305,10 +310,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
}
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
+ tag = wbc_to_tag(wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
@@ -418,18 +420,18 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
struct inode *inode = folio->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- int error;
+ int error = 0;
if (!gfs2_is_jdata(ip) ||
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
- error = iomap_read_folio(folio, &gfs2_iomap_ops);
+ iomap_bio_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
error = stuffed_read_folio(ip, folio);
} else {
error = mpage_read_folio(folio, gfs2_block_map);
}
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
return -EIO;
return error;
@@ -497,7 +499,7 @@ static void gfs2_readahead(struct readahead_control *rac)
else if (gfs2_is_jdata(ip))
mpage_readahead(rac, gfs2_block_map);
else
- iomap_readahead(rac, &gfs2_iomap_ops);
+ iomap_bio_readahead(rac, &gfs2_iomap_ops);
}
/**
@@ -540,7 +542,7 @@ out:
gfs2_trans_end(sdp);
}
-static bool jdata_dirty_folio(struct address_space *mapping,
+static bool gfs2_jdata_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
if (current->journal_info)
@@ -722,7 +724,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
.writepages = gfs2_jdata_writepages,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
- .dirty_folio = jdata_dirty_folio,
+ .dirty_folio = gfs2_jdata_dirty_folio,
.bmap = gfs2_bmap,
.migrate_folio = buffer_migrate_folio,
.invalidate_folio = gfs2_invalidate_folio,
diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h
index a10c4334d248..bf002522a782 100644
--- a/fs/gfs2/aops.h
+++ b/fs/gfs2/aops.h
@@ -9,7 +9,6 @@
#include "incore.h"
void adjust_fs_space(struct inode *inode);
-void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
- size_t from, size_t len);
+int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc);
#endif /* __AOPS_DOT_H__ */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 1795c4e8dbf6..131091520de6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -963,12 +963,16 @@ static struct folio *
gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
{
struct inode *inode = iter->inode;
+ struct gfs2_inode *ip = GFS2_I(inode);
unsigned int blockmask = i_blocksize(inode) - 1;
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int blocks;
struct folio *folio;
int status;
+ if (!gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
+ return iomap_get_folio(iter, pos, len);
+
blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
if (status)
@@ -987,20 +991,22 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (!gfs2_is_stuffed(ip))
- gfs2_trans_add_databufs(ip, folio, offset_in_folio(folio, pos),
+ if (gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
+ gfs2_trans_add_databufs(ip->i_gl, folio,
+ offset_in_folio(folio, pos),
copied);
folio_unlock(folio);
folio_put(folio);
- if (tr->tr_num_buf_new)
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
-
- gfs2_trans_end(sdp);
+ if (gfs2_is_jdata(ip) || gfs2_is_stuffed(ip)) {
+ if (tr->tr_num_buf_new)
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ gfs2_trans_end(sdp);
+ }
}
-static const struct iomap_folio_ops gfs2_iomap_folio_ops = {
+const struct iomap_write_ops gfs2_iomap_write_ops = {
.get_folio = gfs2_iomap_get_folio,
.put_folio = gfs2_iomap_put_folio,
};
@@ -1077,8 +1083,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
gfs2_trans_end(sdp);
}
- if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
- iomap->folio_ops = &gfs2_iomap_folio_ops;
return 0;
out_trans_end:
@@ -1296,11 +1300,14 @@ int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
* uses iomap write to perform its actions, which begin their own transactions
* (iomap_begin, get_folio, etc.)
*/
-static int gfs2_block_zero_range(struct inode *inode, loff_t from,
- unsigned int length)
+static int gfs2_block_zero_range(struct inode *inode, loff_t from, loff_t length)
{
BUG_ON(current->journal_info);
- return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
+ if (from >= inode->i_size)
+ return 0;
+ length = min(length, inode->i_size - from);
+ return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops,
+ &gfs2_iomap_write_ops, NULL);
}
#define GFS2_JTRUNC_REVOKES 8192
@@ -2465,23 +2472,26 @@ out:
return error;
}
-static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
- loff_t offset, unsigned int len)
+static ssize_t gfs2_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
{
- int ret;
-
- if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
+ if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(wpc->inode))))
return -EIO;
- if (offset >= wpc->iomap.offset &&
- offset < wpc->iomap.offset + wpc->iomap.length)
- return 0;
+ if (offset < wpc->iomap.offset ||
+ offset >= wpc->iomap.offset + wpc->iomap.length) {
+ int ret;
- memset(&wpc->iomap, 0, sizeof(wpc->iomap));
- ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap);
- return ret;
+ memset(&wpc->iomap, 0, sizeof(wpc->iomap));
+ ret = gfs2_iomap_get(wpc->inode, offset, INT_MAX, &wpc->iomap);
+ if (ret)
+ return ret;
+ }
+
+ return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
}
const struct iomap_writeback_ops gfs2_writeback_ops = {
- .map_blocks = gfs2_map_blocks,
+ .writeback_range = gfs2_writeback_range,
+ .writeback_submit = iomap_ioend_writeback_submit,
};
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index 4e8b1e8ebdf3..6cdc72dd55a3 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -44,6 +44,7 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
}
extern const struct iomap_ops gfs2_iomap_ops;
+extern const struct iomap_write_ops gfs2_iomap_write_ops;
extern const struct iomap_writeback_ops gfs2_writeback_ops;
int gfs2_unstuff_dinode(struct gfs2_inode *ip);
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 2e215e8c3c88..95050e719233 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -21,7 +21,9 @@
/**
* gfs2_drevalidate - Check directory lookup consistency
- * @dentry: the mapping to check
+ * @dir: expected parent directory inode
+ * @name: expexted name
+ * @dentry: dentry to check
* @flags: lookup flags
*
* Check to make sure the lookup necessary to arrive at this inode from its
@@ -30,50 +32,43 @@
* Returns: 1 if the dentry is ok, 0 if it isn't
*/
-static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
+static int gfs2_drevalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct dentry *parent;
- struct gfs2_sbd *sdp;
- struct gfs2_inode *dip;
+ struct gfs2_sbd *sdp = GFS2_SB(dir);
+ struct gfs2_inode *dip = GFS2_I(dir);
struct inode *inode;
struct gfs2_holder d_gh;
struct gfs2_inode *ip = NULL;
- int error, valid = 0;
+ int error, valid;
int had_lock = 0;
if (flags & LOOKUP_RCU)
return -ECHILD;
- parent = dget_parent(dentry);
- sdp = GFS2_SB(d_inode(parent));
- dip = GFS2_I(d_inode(parent));
inode = d_inode(dentry);
if (inode) {
if (is_bad_inode(inode))
- goto out;
+ return 0;
ip = GFS2_I(inode);
}
- if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) {
- valid = 1;
- goto out;
- }
+ if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
+ return 1;
had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
if (!had_lock) {
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
if (error)
- goto out;
+ return 0;
}
- error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip);
+ error = gfs2_dir_check(dir, name, ip);
valid = inode ? !error : (error == -ENOENT);
if (!had_lock)
gfs2_glock_dq_uninit(&d_gh);
-out:
- dput(parent);
return valid;
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index dbf1aede744c..509e2f0d97e7 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -60,6 +60,7 @@
#include <linux/crc32.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
+#include <linux/log2.h>
#include "gfs2.h"
#include "incore.h"
@@ -912,7 +913,6 @@ static int dir_make_exhash(struct inode *inode)
struct qstr args;
struct buffer_head *bh, *dibh;
struct gfs2_leaf *leaf;
- int y;
u32 x;
__be64 *lp;
u64 bn;
@@ -979,9 +979,7 @@ static int dir_make_exhash(struct inode *inode)
i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
gfs2_add_inode_blocks(&dip->i_inode, 1);
dip->i_diskflags |= GFS2_DIF_EXHASH;
-
- for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
- dip->i_depth = y;
+ dip->i_depth = ilog2(sdp->sd_hash_ptrs);
gfs2_dinode_out(dip, dibh->b_data);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 1e73cf87ff88..b2d23c98c996 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -155,7 +155,7 @@ static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
return fsflags;
}
-int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int gfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
@@ -251,6 +251,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
error = filemap_fdatawait(inode->i_mapping);
if (error)
goto out;
+ truncate_inode_pages(inode->i_mapping, 0);
if (new_flags & GFS2_DIF_JDATA)
gfs2_ordered_del_inode(ip);
}
@@ -275,7 +276,7 @@ out:
}
int gfs2_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
u32 fsflags = fa->flags, gfsflags = 0;
@@ -743,7 +744,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
{
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- int sync_state = inode->i_state & I_DIRTY;
+ int sync_state = inode_state_read_once(inode) & I_DIRTY;
struct gfs2_inode *ip = GFS2_I(inode);
int ret = 0, ret1 = 0;
@@ -819,7 +820,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*
* Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger
@@ -884,7 +885,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*
* For writes, iomap_dio_rw only triggers manual page faults, so we
@@ -956,7 +957,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*/
@@ -1023,7 +1024,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*/
@@ -1057,7 +1058,8 @@ retry:
}
pagefault_disable();
- ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL);
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops,
+ &gfs2_iomap_write_ops, NULL);
pagefault_enable();
if (ret > 0)
written += ret;
@@ -1440,22 +1442,29 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ int ret;
if (!(fl->c.flc_flags & FL_POSIX))
return -ENOLCK;
- if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
if (lock_is_unlock(fl))
locks_lock_file_wait(file, fl);
return -EIO;
}
- if (cmd == F_CANCELLK)
- return dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl);
- else if (IS_GETLK(cmd))
- return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
- else if (lock_is_unlock(fl))
- return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
- else
- return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
+ down_read(&ls->ls_sem);
+ ret = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ if (cmd == F_CANCELLK)
+ ret = dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (IS_GETLK(cmd))
+ ret = dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (lock_is_unlock(fl))
+ ret = dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else
+ ret = dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
+ }
+ up_read(&ls->ls_sem);
+ return ret;
}
static void __flock_holder_uninit(struct file *file, struct gfs2_holder *fl_gh)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8c4c1f871a88..92e029104d8a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -137,33 +137,6 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
kmem_cache_free(gfs2_glock_cachep, gl);
}
-/**
- * glock_blocked_by_withdraw - determine if we can still use a glock
- * @gl: the glock
- *
- * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
- * when we're withdrawn. For example, to maintain metadata integrity, we should
- * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
- * the iopen or freeze glock may be safely used because none of their
- * metadata goes through the journal. So in general, we should disallow all
- * glocks that are journaled, and allow all the others. One exception is:
- * we need to allow our active journal to be promoted and demoted so others
- * may recover it and we can reacquire it when they're done.
- */
-static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
-{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- if (!gfs2_withdrawing_or_withdrawn(sdp))
- return false;
- if (gl->gl_ops->go_flags & GLOF_NONDISK)
- return false;
- if (!sdp->sd_jdesc ||
- gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
- return false;
- return true;
-}
-
static void __gfs2_glock_free(struct gfs2_glock *gl)
{
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
@@ -270,7 +243,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
if (mapping) {
truncate_inode_pages_final(mapping);
- if (!gfs2_withdrawing_or_withdrawn(sdp))
+ if (!gfs2_withdrawn(sdp))
GLOCK_BUG_ON(gl, !mapping_empty(mapping));
}
trace_gfs2_glock_put(gl);
@@ -481,14 +454,18 @@ done:
/**
* do_promote - promote as many requests as possible on the current queue
* @gl: The glock
- *
- * Returns true on success (i.e., progress was made or there are no waiters).
*/
-static bool do_promote(struct gfs2_glock *gl)
+static void do_promote(struct gfs2_glock *gl)
{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_holder *gh, *current_gh;
+ if (gfs2_withdrawn(sdp)) {
+ do_error(gl, LM_OUT_ERROR);
+ return;
+ }
+
current_gh = find_first_holder(gl);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
@@ -496,13 +473,10 @@ static bool do_promote(struct gfs2_glock *gl)
if (!may_grant(gl, current_gh, gh)) {
/*
* If we get here, it means we may not grant this
- * holder for some reason. If this holder is at the
- * head of the list, it means we have a blocked holder
- * at the head, so return false.
+ * holder for some reason.
*/
- if (list_is_first(&gh->gh_list, &gl->gl_holders))
- return false;
- do_error(gl, 0);
+ if (current_gh)
+ do_error(gl, 0); /* Fail queued try locks */
break;
}
set_bit(HIF_HOLDER, &gh->gh_iflags);
@@ -511,7 +485,6 @@ static bool do_promote(struct gfs2_glock *gl)
if (!current_gh)
current_gh = gh;
}
- return true;
}
/**
@@ -590,31 +563,31 @@ static void gfs2_demote_wake(struct gfs2_glock *gl)
static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
- struct gfs2_holder *gh;
- unsigned state = ret & LM_OUT_ST_MASK;
- trace_gfs2_glock_state_change(gl, state);
- state_change(gl, state);
- gh = find_first_waiter(gl);
+ if (!(ret & ~LM_OUT_ST_MASK)) {
+ unsigned state = ret & LM_OUT_ST_MASK;
+
+ trace_gfs2_glock_state_change(gl, state);
+ state_change(gl, state);
+ }
/* Demote to UN request arrived during demote to SH or DF */
if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
- state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
+ gl->gl_state != LM_ST_UNLOCKED &&
+ gl->gl_demote_state == LM_ST_UNLOCKED)
gl->gl_target = LM_ST_UNLOCKED;
/* Check for state != intended state */
- if (unlikely(state != gl->gl_target)) {
- if (gh && (ret & LM_OUT_CANCELED))
- gfs2_holder_wake(gh);
+ if (unlikely(gl->gl_state != gl->gl_target)) {
+ struct gfs2_holder *gh = find_first_waiter(gl);
+
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
- /* move to back of queue and try next entry */
if (ret & LM_OUT_CANCELED) {
- list_move_tail(&gh->gh_list, &gl->gl_holders);
- gh = find_first_waiter(gl);
- gl->gl_target = gh->gh_state;
- if (do_promote(gl))
- goto out;
- goto retry;
+ list_del_init(&gh->gh_list);
+ trace_gfs2_glock_queue(gh, 0);
+ gfs2_holder_wake(gh);
+ gl->gl_target = gl->gl_state;
+ goto out;
}
/* Some error or failed "try lock" - report it */
if ((ret & LM_OUT_ERROR) ||
@@ -624,10 +597,9 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
goto out;
}
}
- switch(state) {
+ switch(gl->gl_state) {
/* Unlocked due to conversion deadlock, try again */
case LM_ST_UNLOCKED:
-retry:
do_xmote(gl, gh, gl->gl_target);
break;
/* Conversion fails, unlock and try again */
@@ -636,17 +608,21 @@ retry:
do_xmote(gl, gh, LM_ST_UNLOCKED);
break;
default: /* Everything else */
- fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
- gl->gl_target, state);
+ fs_err(gl->gl_name.ln_sbd,
+ "glock %u:%llu requested=%u ret=%u\n",
+ gl->gl_name.ln_type, gl->gl_name.ln_number,
+ gl->gl_req, ret);
GLOCK_BUG_ON(gl, 1);
}
return;
}
/* Fast path - we got what we asked for */
- if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
+ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
+ clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
gfs2_demote_wake(gl);
- if (state != LM_ST_UNLOCKED) {
+ }
+ if (gl->gl_state != LM_ST_UNLOCKED) {
if (glops->go_xmote_bh) {
int rv;
@@ -661,17 +637,8 @@ retry:
do_promote(gl);
}
out:
- clear_bit(GLF_LOCK, &gl->gl_flags);
-}
-
-static bool is_system_glock(struct gfs2_glock *gl)
-{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
-
- if (gl == m_ip->i_gl)
- return true;
- return false;
+ if (!test_bit(GLF_CANCELING, &gl->gl_flags))
+ clear_bit(GLF_LOCK, &gl->gl_flags);
}
/**
@@ -690,145 +657,84 @@ __acquires(&gl->gl_lockref.lock)
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret;
- if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
- gh && !(gh->gh_flags & LM_FLAG_NOEXP))
- goto skip_inval;
+ /*
+ * When a filesystem is withdrawing, the remaining cluster nodes will
+ * take care of recovering the withdrawing node's journal. We only
+ * need to make sure that once we trigger remote recovery, we won't
+ * write to the shared block device anymore. This means that here,
+ *
+ * - no new writes to the filesystem must be triggered (->go_sync()).
+ *
+ * - any cached data should be discarded by calling ->go_inval(), dirty
+ * or not and journaled or unjournaled.
+ *
+ * - no more dlm locking operations should be issued (->lm_lock()).
+ */
- lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
GLOCK_BUG_ON(gl, gl->gl_state == target);
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
- if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
- glops->go_inval) {
- /*
- * If another process is already doing the invalidate, let that
- * finish first. The glock state machine will get back to this
- * holder again later.
- */
- if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
- &gl->gl_flags))
- return;
- do_error(gl, 0); /* Fail queued try locks */
- }
- gl->gl_req = target;
- set_bit(GLF_BLOCKING, &gl->gl_flags);
- if ((gl->gl_req == LM_ST_UNLOCKED) ||
- (gl->gl_state == LM_ST_EXCLUSIVE) ||
- (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
- clear_bit(GLF_BLOCKING, &gl->gl_flags);
- if (!glops->go_inval && !glops->go_sync)
+
+ if (!glops->go_inval || !glops->go_sync)
goto skip_inval;
spin_unlock(&gl->gl_lockref.lock);
- if (glops->go_sync) {
+ if (!gfs2_withdrawn(sdp)) {
ret = glops->go_sync(gl);
- /* If we had a problem syncing (due to io errors or whatever,
- * we should not invalidate the metadata or tell dlm to
- * release the glock to other nodes.
- */
if (ret) {
if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
- fs_err(sdp, "Error %d syncing glock \n", ret);
+ fs_err(sdp, "Error %d syncing glock\n", ret);
gfs2_dump_glock(NULL, gl, true);
+ gfs2_withdraw(sdp);
}
- spin_lock(&gl->gl_lockref.lock);
- goto skip_inval;
}
}
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
- /*
- * The call to go_sync should have cleared out the ail list.
- * If there are still items, we have a problem. We ought to
- * withdraw, but we can't because the withdraw code also uses
- * glocks. Warn about the error, dump the glock, then fall
- * through and wait for logd to do the withdraw for us.
- */
- if ((atomic_read(&gl->gl_ail_count) != 0) &&
- (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
- gfs2_glock_assert_warn(gl,
- !atomic_read(&gl->gl_ail_count));
- gfs2_dump_glock(NULL, gl, true);
- }
+
+ if (target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED)
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
- clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
- }
spin_lock(&gl->gl_lockref.lock);
skip_inval:
- gl->gl_lockref.count++;
- /*
- * Check for an error encountered since we called go_sync and go_inval.
- * If so, we can't withdraw from the glock code because the withdraw
- * code itself uses glocks (see function signal_our_withdraw) to
- * change the mount to read-only. Most importantly, we must not call
- * dlm to unlock the glock until the journal is in a known good state
- * (after journal replay) otherwise other nodes may use the object
- * (rgrp or dinode) and then later, journal replay will corrupt the
- * file system. The best we can do here is wait for the logd daemon
- * to see sd_log_error and withdraw, and in the meantime, requeue the
- * work for later.
- *
- * We make a special exception for some system glocks, such as the
- * system statfs inode glock, which needs to be granted before the
- * gfs2_quotad daemon can exit, and that exit needs to finish before
- * we can unmount the withdrawn file system.
- *
- * However, if we're just unlocking the lock (say, for unmount, when
- * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
- * then it's okay to tell dlm to unlock it.
- */
- if (unlikely(sdp->sd_log_error) && !gfs2_withdrawing_or_withdrawn(sdp))
- gfs2_withdraw_delayed(sdp);
- if (glock_blocked_by_withdraw(gl) &&
- (target != LM_ST_UNLOCKED ||
- test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
- if (!is_system_glock(gl)) {
- request_demote(gl, LM_ST_UNLOCKED, 0, false);
- /*
- * Ordinarily, we would call dlm and its callback would call
- * finish_xmote, which would call state_change() to the new state.
- * Since we withdrew, we won't call dlm, so call state_change
- * manually, but to the UNLOCKED state we desire.
- */
- state_change(gl, LM_ST_UNLOCKED);
- /*
- * We skip telling dlm to do the locking, so we won't get a
- * reply that would otherwise clear GLF_LOCK. So we clear it here.
- */
- clear_bit(GLF_LOCK, &gl->gl_flags);
- clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
- gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
- return;
- } else {
- clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
- }
+ if (gfs2_withdrawn(sdp)) {
+ if (target != LM_ST_UNLOCKED)
+ target = LM_OUT_ERROR;
+ goto out;
}
if (ls->ls_ops->lm_lock) {
+ set_bit(GLF_PENDING_REPLY, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
- ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
+ ret = ls->ls_ops->lm_lock(gl, target, gh ? gh->gh_flags : 0);
spin_lock(&gl->gl_lockref.lock);
- if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
- target == LM_ST_UNLOCKED &&
- test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ if (!ret) {
+ /* The operation will be completed asynchronously. */
+ gl->gl_lockref.count++;
+ return;
+ }
+ clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
+
+ if (ret == -ENODEV) {
/*
* The lockspace has been released and the lock has
* been unlocked implicitly.
*/
- } else if (ret) {
- fs_err(sdp, "lm_lock ret %d\n", ret);
- target = gl->gl_state | LM_OUT_ERROR;
+ if (target != LM_ST_UNLOCKED) {
+ target = LM_OUT_ERROR;
+ goto out;
+ }
} else {
- /* The operation will be completed asynchronously. */
+ fs_err(sdp, "lm_lock ret %d\n", ret);
+ GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
return;
}
}
+out:
/* Complete the operation now. */
finish_xmote(gl, target);
+ gl->gl_lockref.count++;
gfs2_glock_queue_work(gl, 0);
}
@@ -843,16 +749,26 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
- struct gfs2_holder *gh = NULL;
+ struct gfs2_holder *gh;
if (test_bit(GLF_LOCK, &gl->gl_flags))
return;
set_bit(GLF_LOCK, &gl->gl_flags);
+ /*
+ * The GLF_DEMOTE_IN_PROGRESS flag is only set intermittently during
+ * locking operations. We have just started a locking operation by
+ * setting the GLF_LOCK flag, so the GLF_DEMOTE_IN_PROGRESS flag must
+ * be cleared.
+ */
GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
- if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
- gl->gl_demote_state != gl->gl_state) {
+ if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
+ if (gl->gl_demote_state == gl->gl_state) {
+ gfs2_demote_wake(gl);
+ goto promote;
+ }
+
if (find_first_holder(gl))
goto out_unlock;
if (nonblock)
@@ -860,29 +776,33 @@ __acquires(&gl->gl_lockref.lock)
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state;
- } else {
- if (test_bit(GLF_DEMOTE, &gl->gl_flags))
- gfs2_demote_wake(gl);
- if (do_promote(gl))
- goto out_unlock;
- gh = find_first_waiter(gl);
- gl->gl_target = gh->gh_state;
- if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
- do_error(gl, 0); /* Fail queued try locks */
+ do_xmote(gl, NULL, gl->gl_target);
+ return;
}
+
+promote:
+ do_promote(gl);
+ if (find_first_holder(gl))
+ goto out_unlock;
+ gh = find_first_waiter(gl);
+ if (!gh)
+ goto out_unlock;
+ if (nonblock)
+ goto out_sched;
+ gl->gl_target = gh->gh_state;
+ if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
+ do_error(gl, 0); /* Fail queued try locks */
do_xmote(gl, gh, gl->gl_target);
return;
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
- smp_mb__after_atomic();
gl->gl_lockref.count++;
gfs2_glock_queue_work(gl, 0);
return;
out_unlock:
clear_bit(GLF_LOCK, &gl->gl_flags);
- smp_mb__after_atomic();
}
/**
@@ -898,12 +818,8 @@ void glock_set_object(struct gfs2_glock *gl, void *object)
prev_object = gl->gl_object;
gl->gl_object = object;
spin_unlock(&gl->gl_lockref.lock);
- if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) {
- pr_warn("glock=%u/%llx\n",
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number);
+ if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL))
gfs2_dump_glock(NULL, gl, true);
- }
}
/**
@@ -919,12 +835,8 @@ void glock_clear_object(struct gfs2_glock *gl, void *object)
prev_object = gl->gl_object;
gl->gl_object = NULL;
spin_unlock(&gl->gl_lockref.lock);
- if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) {
- pr_warn("glock=%u/%llx\n",
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number);
+ if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object))
gfs2_dump_glock(NULL, gl, true);
- }
}
void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
@@ -959,14 +871,33 @@ static void gfs2_glock_poke(struct gfs2_glock *gl)
gfs2_holder_uninit(&gh);
}
-static void gfs2_try_evict(struct gfs2_glock *gl)
+static struct gfs2_inode *gfs2_grab_existing_inode(struct gfs2_glock *gl)
+{
+ struct gfs2_inode *ip;
+
+ spin_lock(&gl->gl_lockref.lock);
+ ip = gl->gl_object;
+ if (ip && !igrab(&ip->i_inode))
+ ip = NULL;
+ spin_unlock(&gl->gl_lockref.lock);
+ if (ip) {
+ wait_on_new_inode(&ip->i_inode);
+ if (is_bad_inode(&ip->i_inode)) {
+ iput(&ip->i_inode);
+ ip = NULL;
+ }
+ }
+ return ip;
+}
+
+static void gfs2_try_to_evict(struct gfs2_glock *gl)
{
struct gfs2_inode *ip;
/*
* If there is contention on the iopen glock and we have an inode, try
* to grab and release the inode so that it can be evicted. The
- * GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
+ * GLF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
* should not be deleted locally. This will allow the remote node to
* go ahead and delete the inode without us having to do it, which will
* avoid rgrp glock thrashing.
@@ -976,32 +907,15 @@ static void gfs2_try_evict(struct gfs2_glock *gl)
* happened below. (Verification is triggered by the call to
* gfs2_queue_verify_delete() in gfs2_evict_inode().)
*/
- spin_lock(&gl->gl_lockref.lock);
- ip = gl->gl_object;
- if (ip && !igrab(&ip->i_inode))
- ip = NULL;
- spin_unlock(&gl->gl_lockref.lock);
- if (ip) {
- wait_on_inode(&ip->i_inode);
- if (is_bad_inode(&ip->i_inode)) {
- iput(&ip->i_inode);
- ip = NULL;
- }
- }
+ ip = gfs2_grab_existing_inode(gl);
if (ip) {
- set_bit(GIF_DEFER_DELETE, &ip->i_flags);
+ set_bit(GLF_DEFER_DELETE, &gl->gl_flags);
d_prune_aliases(&ip->i_inode);
iput(&ip->i_inode);
+ clear_bit(GLF_DEFER_DELETE, &gl->gl_flags);
/* If the inode was evicted, gl->gl_object will now be NULL. */
- spin_lock(&gl->gl_lockref.lock);
- ip = gl->gl_object;
- if (ip) {
- clear_bit(GIF_DEFER_DELETE, &ip->i_flags);
- if (!igrab(&ip->i_inode))
- ip = NULL;
- }
- spin_unlock(&gl->gl_lockref.lock);
+ ip = gfs2_grab_existing_inode(gl);
if (ip) {
gfs2_glock_poke(ip->i_gl);
iput(&ip->i_inode);
@@ -1036,8 +950,14 @@ static void delete_work_func(struct work_struct *work)
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
+ /*
+ * Check for the GLF_VERIFY_DELETE above: this ensures that we won't
+ * immediately process GLF_VERIFY_DELETE work that the below call to
+ * gfs2_try_to_evict() queues.
+ */
+
if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
- gfs2_try_evict(gl);
+ gfs2_try_to_evict(gl);
if (verify_delete) {
u64 no_addr = gl->gl_name.ln_number;
@@ -1160,7 +1080,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
- struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number,
.ln_type = glops->go_type,
.ln_sbd = sdp };
@@ -1201,8 +1120,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (glops->go_instantiate)
gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED);
gl->gl_name = name;
+ lockref_init(&gl->gl_lockref);
lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
- gl->gl_lockref.count = 1;
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -1222,10 +1141,13 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
mapping = gfs2_glock2aspace(gl);
if (mapping) {
+ gfp_t gfp_mask;
+
mapping->a_ops = &gfs2_meta_aops;
- mapping->host = s->s_bdev->bd_mapping->host;
+ mapping->host = sdp->sd_inode;
mapping->flags = 0;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
+ gfp_mask = mapping_gfp_mask(sdp->sd_inode->i_mapping);
+ mapping_set_gfp_mask(mapping, gfp_mask);
mapping->i_private_data = NULL;
mapping->writeback_index = 0;
}
@@ -1252,7 +1174,7 @@ found:
* @state: the state we're requesting
* @flags: the modifier flags
* @gh: the holder structure
- *
+ * @ip: caller's return address for debugging
*/
void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
@@ -1458,13 +1380,29 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
va_end(args);
}
+static bool gfs2_should_queue_trylock(struct gfs2_glock *gl,
+ struct gfs2_holder *gh)
+{
+ struct gfs2_holder *current_gh, *gh2;
+
+ current_gh = find_first_holder(gl);
+ if (current_gh && !may_grant(gl, current_gh, gh))
+ return false;
+
+ list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
+ if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
+ continue;
+ if (!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
+ return false;
+ }
+ return true;
+}
+
static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
{
if (!(gh->gh_flags & GL_NOPID))
return true;
- if (gh->gh_state == LM_ST_UNLOCKED)
- return true;
- return false;
+ return !test_bit(HIF_HOLDER, &gh->gh_iflags);
}
/**
@@ -1478,28 +1416,20 @@ static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
*/
static inline void add_to_queue(struct gfs2_holder *gh)
-__releases(&gl->gl_lockref.lock)
-__acquires(&gl->gl_lockref.lock)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct list_head *insert_pt = NULL;
struct gfs2_holder *gh2;
- int try_futile = 0;
GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
GLOCK_BUG_ON(gl, true);
- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
- if (test_bit(GLF_LOCK, &gl->gl_flags)) {
- struct gfs2_holder *current_gh;
-
- current_gh = find_first_holder(gl);
- try_futile = !may_grant(gl, current_gh, gh);
- }
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
- goto fail;
+ if ((gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
+ !gfs2_should_queue_trylock(gl, gh)) {
+ gh->gh_error = GLR_TRYFAILED;
+ gfs2_holder_wake(gh);
+ return;
}
list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
@@ -1511,29 +1441,10 @@ __acquires(&gl->gl_lockref.lock)
continue;
goto trap_recursive;
}
- list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
- if (try_futile &&
- !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
-fail:
- gh->gh_error = GLR_TRYFAILED;
- gfs2_holder_wake(gh);
- return;
- }
- if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
- continue;
- }
trace_gfs2_glock_queue(gh, 1);
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
- if (likely(insert_pt == NULL)) {
- list_add_tail(&gh->gh_list, &gl->gl_holders);
- return;
- }
- list_add_tail(&gh->gh_list, insert_pt);
- spin_unlock(&gl->gl_lockref.lock);
- if (sdp->sd_lockstruct.ls_ops->lm_cancel)
- sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
- spin_lock(&gl->gl_lockref.lock);
+ list_add_tail(&gh->gh_list, &gl->gl_holders);
return;
trap_recursive:
@@ -1561,9 +1472,10 @@ trap_recursive:
int gfs2_glock_nq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
int error;
- if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
+ if (gfs2_withdrawn(sdp))
return -EIO;
if (gh->gh_flags & GL_NOBLOCK) {
@@ -1588,7 +1500,7 @@ unlock:
gh->gh_error = 0;
spin_lock(&gl->gl_lockref.lock);
add_to_queue(gh);
- if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
+ if (unlikely((LM_FLAG_RECOVER & gh->gh_flags) &&
test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) {
set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
gl->gl_lockref.count++;
@@ -1661,7 +1573,6 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
void gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
spin_lock(&gl->gl_lockref.lock);
if (!gfs2_holder_queued(gh)) {
@@ -1673,29 +1584,19 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
}
if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
- !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+ !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
+ test_bit(GLF_LOCK, &gl->gl_flags) &&
+ !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
+ !test_bit(GLF_CANCELING, &gl->gl_flags)) {
+ set_bit(GLF_CANCELING, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
spin_lock(&gl->gl_lockref.lock);
- }
-
- /*
- * If we're in the process of file system withdraw, we cannot just
- * dequeue any glocks until our journal is recovered, lest we introduce
- * file system corruption. We need two exceptions to this rule: We need
- * to allow unlocking of nondisk glocks and the glock for our own
- * journal that needs recovery.
- */
- if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
- glock_blocked_by_withdraw(gl) &&
- gh->gh_gl != sdp->sd_jinode_gl) {
- sdp->sd_glock_dqs_held++;
- spin_unlock(&gl->gl_lockref.lock);
- might_sleep();
- wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
- TASK_UNINTERRUPTIBLE);
- spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_CANCELING, &gl->gl_flags);
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+ if (!gfs2_holder_queued(gh))
+ goto out;
}
__gfs2_glock_dq(gh);
@@ -1885,7 +1786,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
*
* Glocks are not frozen if (a) the result of the dlm operation is
* an error, (b) the locking operation was an unlock operation or
- * (c) if there is a "noexp" flagged request anywhere in the queue
+ * (c) if there is a "recover" flagged request anywhere in the queue
*
* Returns: 1 if freezing should occur, 0 otherwise
*/
@@ -1902,7 +1803,7 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
- if (LM_FLAG_NOEXP & gh->gh_flags)
+ if (LM_FLAG_RECOVER & gh->gh_flags)
return 0;
}
@@ -1923,6 +1824,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
gl->gl_reply = ret;
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
@@ -2178,18 +2080,26 @@ static void dump_glock_func(struct gfs2_glock *gl)
dump_glock(NULL, gl, true);
}
-static void withdraw_dq(struct gfs2_glock *gl)
+static void withdraw_glock(struct gfs2_glock *gl)
{
spin_lock(&gl->gl_lockref.lock);
- if (!__lockref_is_dead(&gl->gl_lockref) &&
- glock_blocked_by_withdraw(gl))
+ if (!__lockref_is_dead(&gl->gl_lockref)) {
+ /*
+ * We don't want to write back any more dirty data. Unlock the
+ * remaining inode and resource group glocks; this will cause
+ * their ->go_inval() hooks to toss out all the remaining
+ * cached data, dirty or not.
+ */
+ if (gl->gl_ops->go_inval && gl->gl_state != LM_ST_UNLOCKED)
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
do_error(gl, LM_OUT_ERROR); /* remove pending waiters */
+ }
spin_unlock(&gl->gl_lockref.lock);
}
-void gfs2_gl_dq_holders(struct gfs2_sbd *sdp)
+void gfs2_withdraw_glocks(struct gfs2_sbd *sdp)
{
- glock_hash_walk(withdraw_dq, sdp);
+ glock_hash_walk(withdraw_glock, sdp);
}
/**
@@ -2250,7 +2160,7 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
*p++ = 't';
if (flags & LM_FLAG_TRY_1CB)
*p++ = 'T';
- if (flags & LM_FLAG_NOEXP)
+ if (flags & LM_FLAG_RECOVER)
*p++ = 'e';
if (flags & LM_FLAG_ANY)
*p++ = 'A';
@@ -2321,8 +2231,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'y';
if (test_bit(GLF_LFLUSH, gflags))
*p++ = 'f';
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
- *p++ = 'i';
+ if (test_bit(GLF_PENDING_REPLY, gflags))
+ *p++ = 'R';
if (test_bit(GLF_HAVE_REPLY, gflags))
*p++ = 'r';
if (test_bit(GLF_INITIAL, gflags))
@@ -2337,8 +2247,6 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'o';
if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b';
- if (test_bit(GLF_UNLOCKED, gflags))
- *p++ = 'x';
if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
*p++ = 'n';
if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags))
@@ -2347,6 +2255,10 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'e';
if (test_bit(GLF_VERIFY_DELETE, gflags))
*p++ = 'E';
+ if (test_bit(GLF_DEFER_DELETE, gflags))
+ *p++ = 's';
+ if (test_bit(GLF_CANCELING, gflags))
+ *p++ = 'C';
*p = 0;
return buf;
}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c171f745650f..55d5985f32a0 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -58,16 +58,20 @@ enum {
* LM_FLAG_TRY_1CB
* Send one blocking callback if TRY is set and the lock is not granted.
*
- * LM_FLAG_NOEXP
+ * LM_FLAG_RECOVER
* GFS sets this flag on lock requests it makes while doing journal recovery.
- * These special requests should not be blocked due to the recovery like
- * ordinary locks would be.
+ * While ordinary requests are blocked until the end of recovery, requests
+ * with this flag set do proceed.
*
* LM_FLAG_ANY
* A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
* also be granted in SHARED. The preferred state is whichever is compatible
* with other granted locks, or the specified state if no other locks exist.
*
+ * In addition, when a lock is already held in EX mode locally, a SHARED or
+ * DEFERRED mode request with the LM_FLAG_ANY flag set will be granted.
+ * (The LM_FLAG_ANY flag is only use for SHARED mode requests currently.)
+ *
* LM_FLAG_NODE_SCOPE
* This holder agrees to share the lock within this node. In other words,
* the glock is held in EX mode according to DLM, but local holders on the
@@ -76,7 +80,7 @@ enum {
#define LM_FLAG_TRY 0x0001
#define LM_FLAG_TRY_1CB 0x0002
-#define LM_FLAG_NOEXP 0x0004
+#define LM_FLAG_RECOVER 0x0004
#define LM_FLAG_ANY 0x0008
#define LM_FLAG_NODE_SCOPE 0x0020
#define GL_ASYNC 0x0040
@@ -92,12 +96,22 @@ enum {
* LM_OUT_ST_MASK
* Masks the lower two bits of lock state in the returned value.
*
+ * LM_OUT_TRY_AGAIN
+ * The trylock request failed.
+ *
+ * LM_OUT_DEADLOCK
+ * The lock request failed because it would deadlock.
+ *
* LM_OUT_CANCELED
* The lock request was canceled.
*
+ * LM_OUT_ERROR
+ * The lock request timed out or failed.
*/
#define LM_OUT_ST_MASK 0x00000003
+#define LM_OUT_TRY_AGAIN 0x00000020
+#define LM_OUT_DEADLOCK 0x00000010
#define LM_OUT_CANCELED 0x00000008
#define LM_OUT_ERROR 0x00000004
@@ -122,7 +136,7 @@ struct lm_lockops {
void (*lm_first_done) (struct gfs2_sbd *sdp);
void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
unsigned int result);
- void (*lm_unmount) (struct gfs2_sbd *sdp);
+ void (*lm_unmount) (struct gfs2_sbd *sdp, bool clean);
void (*lm_withdraw) (struct gfs2_sbd *sdp);
void (*lm_put_lock) (struct gfs2_glock *gl);
int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
@@ -249,7 +263,7 @@ bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later);
void gfs2_cancel_delete_work(struct gfs2_glock *gl);
void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
-void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
+void gfs2_withdraw_glocks(struct gfs2_sbd *sdp);
void gfs2_glock_thaw(struct gfs2_sbd *sdp);
void gfs2_glock_free(struct gfs2_glock *gl);
void gfs2_glock_free_later(struct gfs2_glock *gl);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index eb4714f299ef..2173ccf5034b 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -11,6 +11,7 @@
#include <linux/bio.h>
#include <linux/posix_acl.h>
#include <linux/security.h>
+#include <linux/log2.h>
#include "gfs2.h"
#include "incore.h"
@@ -29,8 +30,6 @@
struct workqueue_struct *gfs2_freeze_wq;
-extern struct workqueue_struct *gfs2_control_wq;
-
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
@@ -39,12 +38,12 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
"AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
"state 0x%lx\n",
bh, (unsigned long long)bh->b_blocknr, bh->b_state,
- bh->b_folio->mapping, bh->b_folio->flags);
+ bh->b_folio->mapping, bh->b_folio->flags.f);
fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
gl->gl_name.ln_type, gl->gl_name.ln_number,
gfs2_glock2aspace(gl));
gfs2_lm(sdp, "AIL error\n");
- gfs2_withdraw_delayed(sdp);
+ gfs2_withdraw(sdp);
}
/**
@@ -82,9 +81,6 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
-
- if (gfs2_withdrawing(sdp))
- gfs2_withdraw(sdp);
}
@@ -168,7 +164,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct address_space *metamapping = &sdp->sd_aspace;
+ struct address_space *metamapping = gfs2_aspace(sdp);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
const unsigned bsize = sdp->sd_sb.sb_bsize;
loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
@@ -177,7 +173,7 @@ static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
filemap_fdatawrite_range(metamapping, start, end);
error = filemap_fdatawait_range(metamapping, start, end);
- WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
+ WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
mapping_set_error(metamapping, error);
if (error)
gfs2_io_error(sdp);
@@ -225,7 +221,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl)
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct address_space *mapping = &sdp->sd_aspace;
+ struct address_space *mapping = gfs2_aspace(sdp);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
const unsigned bsize = sdp->sd_sb.sb_bsize;
loff_t start, end;
@@ -236,6 +232,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
gfs2_rgrp_brelse(rgd);
WARN_ON_ONCE(!(flags & DIO_METADATA));
+ gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
truncate_inode_pages_range(mapping, start, end);
}
@@ -362,6 +359,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
struct gfs2_inode *ip = gfs2_glock2inode(gl);
+ gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
+
if (flags & DIO_METADATA) {
struct address_space *mapping = gfs2_glock2aspace(gl);
truncate_inode_pages(mapping, 0);
@@ -393,7 +392,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
u16 height, depth;
umode_t mode = be32_to_cpu(str->di_mode);
struct inode *inode = &ip->i_inode;
- bool is_new = inode->i_state & I_NEW;
+ bool is_new = inode_state_read_once(inode) & I_NEW;
if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
gfs2_consist_inode(ip);
@@ -450,6 +449,11 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
gfs2_consist_inode(ip);
return -EIO;
}
+ if ((ip->i_diskflags & GFS2_DIF_EXHASH) &&
+ depth < ilog2(sdp->sd_hash_ptrs)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
ip->i_depth = (u8)depth;
ip->i_entries = be32_to_cpu(str->di_entries);
@@ -601,14 +605,13 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
- error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
- if (gfs2_assert_withdraw_delayed(sdp, !error))
+ error = gfs2_find_jhead(sdp->sd_jdesc, &head);
+ if (gfs2_assert_withdraw(sdp, !error))
return error;
- if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
- GFS2_LOG_HEAD_UNMOUNT))
+ if (gfs2_assert_withdraw(sdp, head.lh_flags &
+ GFS2_LOG_HEAD_UNMOUNT))
return -EIO;
- sdp->sd_log_sequence = head.lh_sequence + 1;
- gfs2_log_pointers_init(sdp, head.lh_blkno);
+ gfs2_log_pointers_init(sdp, &head);
}
return 0;
}
@@ -625,8 +628,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
struct gfs2_inode *ip = gl->gl_object;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- if (!remote || sb_rdonly(sdp->sd_vfs) ||
- test_bit(SDF_KILL, &sdp->sd_flags))
+ if (!remote || test_bit(SDF_KILL, &sdp->sd_flags))
return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
@@ -637,76 +639,8 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
}
}
-/**
- * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast
- * @gl: glock being unlocked
- *
- * For now, this is only used for the journal inode glock. In withdraw
- * situations, we need to wait for the glock to be unlocked so that we know
- * other nodes may proceed with recovery / journal replay.
- */
-static void inode_go_unlocked(struct gfs2_glock *gl)
-{
- /* Note that we cannot reference gl_object because it's already set
- * to NULL by this point in its lifecycle. */
- if (!test_bit(GLF_UNLOCKED, &gl->gl_flags))
- return;
- clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags);
- wake_up_bit(&gl->gl_flags, GLF_UNLOCKED);
-}
-
-/**
- * nondisk_go_callback - used to signal when a node did a withdraw
- * @gl: the nondisk glock
- * @remote: true if this came from a different cluster node
- *
- */
-static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
-{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- /* Ignore the callback unless it's from another node, and it's the
- live lock. */
- if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
- return;
-
- /* First order of business is to cancel the demote request. We don't
- * really want to demote a nondisk glock. At best it's just to inform
- * us of another node's withdraw. We'll keep it in SH mode. */
- clear_bit(GLF_DEMOTE, &gl->gl_flags);
- clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
-
- /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
- if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
- test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
- test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
- return;
-
- /* We only care when a node wants us to unlock, because that means
- * they want a journal recovered. */
- if (gl->gl_demote_state != LM_ST_UNLOCKED)
- return;
-
- if (sdp->sd_args.ar_spectator) {
- fs_warn(sdp, "Spectator node cannot recover journals.\n");
- return;
- }
-
- fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
- set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
- /*
- * We can't call remote_withdraw directly here or gfs2_recover_journal
- * because this is called from the glock unlock function and the
- * remote_withdraw needs to enqueue and dequeue the same "live" glock
- * we were called from. So we queue it to the control work queue in
- * lock_dlm.
- */
- queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
-}
-
const struct gfs2_glock_operations gfs2_meta_glops = {
.go_type = LM_TYPE_META,
- .go_flags = GLOF_NONDISK,
};
const struct gfs2_glock_operations gfs2_inode_glops = {
@@ -717,7 +651,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
.go_flags = GLOF_ASPACE | GLOF_LVB,
- .go_unlocked = inode_go_unlocked,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -733,36 +666,30 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
.go_xmote_bh = freeze_go_xmote_bh,
.go_callback = freeze_go_callback,
.go_type = LM_TYPE_NONDISK,
- .go_flags = GLOF_NONDISK,
};
const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback,
.go_dump = inode_go_dump,
- .go_flags = GLOF_NONDISK,
.go_subclass = 1,
};
const struct gfs2_glock_operations gfs2_flock_glops = {
.go_type = LM_TYPE_FLOCK,
- .go_flags = GLOF_NONDISK,
};
const struct gfs2_glock_operations gfs2_nondisk_glops = {
.go_type = LM_TYPE_NONDISK,
- .go_flags = GLOF_NONDISK,
- .go_callback = nondisk_go_callback,
};
const struct gfs2_glock_operations gfs2_quota_glops = {
.go_type = LM_TYPE_QUOTA,
- .go_flags = GLOF_LVB | GLOF_NONDISK,
+ .go_flags = GLOF_LVB,
};
const struct gfs2_glock_operations gfs2_journal_glops = {
.go_type = LM_TYPE_JOURNAL,
- .go_flags = GLOF_NONDISK,
};
const struct gfs2_glock_operations *gfs2_glops_list[] = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 4e19cce3d906..d05d8fe4e456 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -223,13 +223,11 @@ struct gfs2_glock_operations {
void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl,
const char *fs_id_buf);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
- void (*go_unlocked)(struct gfs2_glock *gl);
const int go_subclass;
const int go_type;
const unsigned long go_flags;
#define GLOF_ASPACE 1 /* address space attached */
#define GLOF_LVB 2 /* Lock Value Block attached */
-#define GLOF_NONDISK 8 /* not I/O related */
};
enum {
@@ -319,7 +317,6 @@ enum {
GLF_DEMOTE_IN_PROGRESS = 5,
GLF_DIRTY = 6,
GLF_LFLUSH = 7,
- GLF_INVALIDATE_IN_PROGRESS = 8,
GLF_HAVE_REPLY = 9,
GLF_INITIAL = 10,
GLF_HAVE_FROZEN_REPLY = 11,
@@ -327,9 +324,11 @@ enum {
GLF_LRU = 13,
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
- GLF_UNLOCKED = 16, /* Wait for glock to be unlocked */
GLF_TRY_TO_EVICT = 17, /* iopen glocks only */
GLF_VERIFY_DELETE = 18, /* iopen glocks only */
+ GLF_PENDING_REPLY = 19,
+ GLF_DEFER_DELETE = 20, /* iopen glocks only */
+ GLF_CANCELING = 21,
};
struct gfs2_glock {
@@ -372,11 +371,8 @@ struct gfs2_glock {
enum {
GIF_QD_LOCKED = 1,
- GIF_ALLOC_FAILED = 2,
GIF_SW_PAGED = 3,
- GIF_FREE_VFS_INODE = 5,
GIF_GLOP_PENDING = 6,
- GIF_DEFER_DELETE = 7,
};
struct gfs2_inode {
@@ -521,8 +517,6 @@ struct gfs2_jdesc {
struct list_head jd_revoke_list;
unsigned int jd_replay_tail;
-
- u64 jd_no_addr;
};
struct gfs2_statfs_change_host {
@@ -543,8 +537,7 @@ struct gfs2_statfs_change_host {
#define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
#define GFS2_ERRORS_WITHDRAW 0
-#define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
-#define GFS2_ERRORS_RO 2 /* place holder for future feature */
+#define GFS2_ERRORS_DEACTIVATE 1
#define GFS2_ERRORS_PANIC 3
struct gfs2_args {
@@ -560,7 +553,7 @@ struct gfs2_args {
unsigned int ar_data:2; /* ordered/writeback */
unsigned int ar_meta:1; /* mount metafs */
unsigned int ar_discard:1; /* discard requests */
- unsigned int ar_errors:2; /* errors=withdraw | panic */
+ unsigned int ar_errors:2; /* errors=withdraw | deactivate | panic */
unsigned int ar_nobarrier:1; /* do not send barriers */
unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */
@@ -586,6 +579,7 @@ struct gfs2_tune {
unsigned int gt_complain_secs;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
+ unsigned int gt_withdraw_helper_timeout;
};
enum {
@@ -600,11 +594,6 @@ enum {
SDF_SKIP_DLM_UNLOCK = 8,
SDF_FORCE_AIL_FLUSH = 9,
SDF_FREEZE_INITIATOR = 10,
- SDF_WITHDRAWING = 11, /* Will withdraw eventually */
- SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */
- SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
- SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
- withdrawing */
SDF_KILL = 15,
SDF_EVICTING = 16,
SDF_FROZEN = 17,
@@ -657,6 +646,8 @@ struct lm_lockstruct {
struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
char *ls_lvb_bits;
+ struct rw_semaphore ls_sem;
+
spinlock_t ls_recover_spin; /* protects following fields */
unsigned long ls_recover_flags; /* DFL_ */
uint32_t ls_recover_mount; /* gen in first recover_done cb */
@@ -715,11 +706,13 @@ struct gfs2_sbd {
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_freeze_gl;
struct work_struct sd_freeze_work;
+ struct work_struct sd_withdraw_work;
wait_queue_head_t sd_kill_wait;
wait_queue_head_t sd_async_glock_wait;
atomic_t sd_glock_disposal;
struct completion sd_locking_init;
- struct completion sd_wdack;
+ struct completion sd_withdraw_helper;
+ int sd_withdraw_helper_status;
struct delayed_work sd_control_work;
/* Inode Stuff */
@@ -760,7 +753,6 @@ struct gfs2_sbd {
struct gfs2_jdesc *sd_jdesc;
struct gfs2_holder sd_journal_gh;
struct gfs2_holder sd_jinode_gh;
- struct gfs2_glock *sd_jinode_gl;
struct gfs2_holder sd_sc_gh;
struct buffer_head *sd_sc_bh;
@@ -793,7 +785,7 @@ struct gfs2_sbd {
/* Log stuff */
- struct address_space sd_aspace;
+ struct inode *sd_inode;
spinlock_t sd_log_lock;
@@ -822,7 +814,6 @@ struct gfs2_sbd {
atomic_t sd_log_in_flight;
wait_queue_head_t sd_log_flush_wait;
int sd_log_error; /* First log error */
- wait_queue_head_t sd_withdraw_wait;
unsigned int sd_log_tail;
unsigned int sd_log_flush_tail;
@@ -846,9 +837,15 @@ struct gfs2_sbd {
unsigned long sd_last_warning;
struct dentry *debugfs_dir; /* debugfs directory */
- unsigned long sd_glock_dqs_held;
};
+#define GFS2_BAD_INO 1
+
+static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp)
+{
+ return sdp->sd_inode->i_mapping;
+}
+
static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
{
gl->gl_stats.stats[which]++;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6fbbaaad1cd0..36618e353199 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -89,6 +89,19 @@ static int iget_set(struct inode *inode, void *opaque)
return 0;
}
+void gfs2_setup_inode(struct inode *inode)
+{
+ gfp_t gfp_mask;
+
+ /*
+ * Ensure all page cache allocations are done from GFP_NOFS context to
+ * prevent direct reclaim recursion back into the filesystem and blowing
+ * stacks or deadlocking.
+ */
+ gfp_mask = mapping_gfp_mask(inode->i_mapping);
+ mapping_set_gfp_mask(inode->i_mapping, gfp_mask & ~__GFP_FS);
+}
+
/**
* gfs2_inode_lookup - Lookup an inode
* @sb: The super block
@@ -127,11 +140,12 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
ip = GFS2_I(inode);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_glock *io_gl;
int extra_flags = 0;
+ gfs2_setup_inode(inode);
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE,
&ip->i_gl);
if (unlikely(error))
@@ -439,6 +453,72 @@ out:
return error;
}
+static void gfs2_final_release_pages(struct gfs2_inode *ip)
+{
+ struct inode *inode = &ip->i_inode;
+ struct gfs2_glock *gl = ip->i_gl;
+
+ /* This can only happen during incomplete inode creation. */
+ if (unlikely(!gl))
+ return;
+
+ truncate_inode_pages(gfs2_glock2aspace(gl), 0);
+ truncate_inode_pages(&inode->i_data, 0);
+
+ if (atomic_read(&gl->gl_revokes) == 0) {
+ clear_bit(GLF_LFLUSH, &gl->gl_flags);
+ clear_bit(GLF_DIRTY, &gl->gl_flags);
+ }
+}
+
+int gfs2_dinode_dealloc(struct gfs2_inode *ip)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct gfs2_rgrpd *rgd;
+ struct gfs2_holder gh;
+ int error;
+
+ if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
+
+ gfs2_rindex_update(sdp);
+
+ error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
+ if (error)
+ return error;
+
+ rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
+ if (!rgd) {
+ gfs2_consist_inode(ip);
+ error = -EIO;
+ goto out_qs;
+ }
+
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &gh);
+ if (error)
+ goto out_qs;
+
+ error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
+ sdp->sd_jdesc->jd_blocks);
+ if (error)
+ goto out_rg_gunlock;
+
+ gfs2_free_di(rgd, ip);
+
+ gfs2_final_release_pages(ip);
+
+ gfs2_trans_end(sdp);
+
+out_rg_gunlock:
+ gfs2_glock_dq_uninit(&gh);
+out_qs:
+ gfs2_quota_unhold(ip);
+ return error;
+}
+
static void gfs2_init_dir(struct buffer_head *dibh,
const struct gfs2_inode *parent)
{
@@ -629,10 +709,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
struct gfs2_inode *dip = GFS2_I(dir), *ip;
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_glock *io_gl;
- int error;
+ int error, dealloc_error;
u32 aflags = 0;
unsigned blocks = 1;
struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
+ bool xattr_initialized = false;
if (!name->len || name->len > GFS2_FNAMESIZE)
return -ENAMETOOLONG;
@@ -659,7 +740,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (!IS_ERR(inode)) {
if (S_ISDIR(inode->i_mode)) {
iput(inode);
- inode = ERR_PTR(-EISDIR);
+ inode = NULL;
+ error = -EISDIR;
goto fail_gunlock;
}
d_instantiate(dentry, inode);
@@ -684,6 +766,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = -ENOMEM;
if (!inode)
goto fail_gunlock;
+ gfs2_setup_inode(inode);
ip = GFS2_I(inode);
error = posix_acl_create(dir, &mode, &default_acl, &acl);
@@ -744,11 +827,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (error)
- goto fail_free_inode;
+ goto fail_dealloc_inode;
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (error)
- goto fail_free_inode;
+ goto fail_dealloc_inode;
gfs2_cancel_delete_work(io_gl);
io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
@@ -767,13 +850,16 @@ retry:
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
if (error)
goto fail_gunlock3;
+ clear_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
goto fail_gunlock3;
- if (blocks > 1)
+ if (blocks > 1) {
gfs2_init_xattr(ip);
+ xattr_initialized = true;
+ }
init_dinode(dip, ip, symname);
gfs2_trans_end(sdp);
@@ -828,6 +914,17 @@ fail_gunlock3:
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_gunlock2:
gfs2_glock_put(io_gl);
+fail_dealloc_inode:
+ dealloc_error = 0;
+ if (ip->i_eattr)
+ dealloc_error = gfs2_ea_dealloc(ip, xattr_initialized);
+ clear_nlink(inode);
+ mark_inode_dirty(inode);
+ if (!dealloc_error)
+ dealloc_error = gfs2_dinode_dealloc(ip);
+ if (dealloc_error)
+ fs_warn(sdp, "%s: %d\n", __func__, dealloc_error);
+ ip->i_no_addr = 0;
fail_free_inode:
if (ip->i_gl) {
gfs2_glock_put(ip->i_gl);
@@ -842,11 +939,7 @@ fail_gunlock:
gfs2_dir_no_add(&da);
gfs2_glock_dq_uninit(&d_gh);
if (!IS_ERR_OR_NULL(inode)) {
- set_bit(GIF_ALLOC_FAILED, &ip->i_flags);
- clear_nlink(inode);
- if (ip->i_no_addr)
- mark_inode_dirty(inode);
- if (inode->i_state & I_NEW)
+ if (inode_state_read_once(inode) & I_NEW)
iget_failed(inode);
else
iput(inode);
@@ -1248,14 +1341,15 @@ static int gfs2_symlink(struct mnt_idmap *idmap, struct inode *dir,
* @dentry: The dentry of the new directory
* @mode: The mode of the new directory
*
- * Returns: errno
+ * Returns: the dentry, or ERR_PTR(errno)
*/
-static int gfs2_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *gfs2_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
- return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
+
+ return ERR_PTR(gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0));
}
/**
@@ -1289,27 +1383,19 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags,
umode_t mode)
{
- struct dentry *d;
bool excl = !!(flags & O_EXCL);
- if (!d_in_lookup(dentry))
- goto skip_lookup;
-
- d = __gfs2_lookup(dir, dentry, file);
- if (IS_ERR(d))
- return PTR_ERR(d);
- if (d != NULL)
- dentry = d;
- if (d_really_is_positive(dentry)) {
- if (!(file->f_mode & FMODE_OPENED))
+ if (d_in_lookup(dentry)) {
+ struct dentry *d = __gfs2_lookup(dir, dentry, file);
+ if (file->f_mode & FMODE_OPENED) {
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+ dput(d);
+ return excl && (flags & O_CREAT) ? -EEXIST : 0;
+ }
+ if (d || d_really_is_positive(dentry))
return finish_no_open(file, d);
- dput(d);
- return excl && (flags & O_CREAT) ? -EEXIST : 0;
}
-
- BUG_ON(d != NULL);
-
-skip_lookup:
if (!(flags & O_CREAT))
return -ENOENT;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 9e5e1622d50a..2fcd96dd1361 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -44,17 +44,17 @@ static inline int gfs2_is_dir(const struct gfs2_inode *ip)
static inline void gfs2_set_inode_blocks(struct inode *inode, u64 blocks)
{
- inode->i_blocks = blocks << (inode->i_blkbits - 9);
+ inode->i_blocks = blocks << (inode->i_blkbits - SECTOR_SHIFT);
}
static inline u64 gfs2_get_inode_blocks(const struct inode *inode)
{
- return inode->i_blocks >> (inode->i_blkbits - 9);
+ return inode->i_blocks >> (inode->i_blkbits - SECTOR_SHIFT);
}
static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change)
{
- change <<= inode->i_blkbits - 9;
+ change <<= inode->i_blkbits - SECTOR_SHIFT;
gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks >= -change));
inode->i_blocks += change;
}
@@ -86,12 +86,14 @@ err:
return -EIO;
}
+void gfs2_setup_inode(struct inode *inode);
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
u64 no_addr, u64 no_formal_ino,
unsigned int blktype);
struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
u64 no_formal_ino,
unsigned int blktype);
+int gfs2_dinode_dealloc(struct gfs2_inode *ip);
struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
int is_root);
@@ -106,9 +108,9 @@ loff_t gfs2_seek_hole(struct file *file, loff_t offset);
extern const struct file_operations gfs2_file_fops_nolock;
extern const struct file_operations gfs2_dir_fops_nolock;
-int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int gfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
int gfs2_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
void gfs2_set_inode_flags(struct inode *inode);
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 58aeeae7ed8c..b8d249925395 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -15,9 +15,6 @@
#include <linux/sched/signal.h>
#include "incore.h"
-#include "glock.h"
-#include "glops.h"
-#include "recovery.h"
#include "util.h"
#include "sys.h"
#include "trace_gfs2.h"
@@ -58,6 +55,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
/**
* gfs2_update_reply_times - Update locking statistics
* @gl: The glock to update
+ * @blocking: The operation may have been blocking
*
* This assumes that gl->gl_dstamp has been set earlier.
*
@@ -72,12 +70,12 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
* TRY_1CB flags are set are classified as non-blocking. All
* other DLM requests are counted as (potentially) blocking.
*/
-static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
+static inline void gfs2_update_reply_times(struct gfs2_glock *gl,
+ bool blocking)
{
struct gfs2_pcpu_lkstats *lks;
const unsigned gltype = gl->gl_name.ln_type;
- unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
- GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
+ unsigned index = blocking ? GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
s64 rtt;
preempt_disable();
@@ -119,14 +117,18 @@ static inline void gfs2_update_request_times(struct gfs2_glock *gl)
static void gdlm_ast(void *arg)
{
struct gfs2_glock *gl = arg;
- unsigned ret = gl->gl_state;
+ bool blocking;
+ unsigned ret;
+
+ blocking = test_bit(GLF_BLOCKING, &gl->gl_flags);
+ gfs2_update_reply_times(gl, blocking);
+ clear_bit(GLF_BLOCKING, &gl->gl_flags);
/* If the glock is dead, we only react to a dlm_unlock() reply. */
if (__lockref_is_dead(&gl->gl_lockref) &&
gl->gl_lksb.sb_status != -DLM_EUNLOCK)
return;
- gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
@@ -134,18 +136,19 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- if (gl->gl_ops->go_unlocked)
- gl->gl_ops->go_unlocked(gl);
gfs2_glock_free(gl);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
- ret |= LM_OUT_CANCELED;
+ ret = LM_OUT_CANCELED;
goto out;
case -EAGAIN: /* Try lock fails */
+ ret = LM_OUT_TRY_AGAIN;
+ goto out;
case -EDEADLK: /* Deadlock detected */
+ ret = LM_OUT_DEADLOCK;
goto out;
case -ETIMEDOUT: /* Canceled due to timeout */
- ret |= LM_OUT_ERROR;
+ ret = LM_OUT_ERROR;
goto out;
case 0: /* Success */
break;
@@ -154,14 +157,6 @@ static void gdlm_ast(void *arg)
}
ret = gl->gl_req;
- if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
- if (gl->gl_req == LM_ST_SHARED)
- ret = LM_ST_DEFERRED;
- else if (gl->gl_req == LM_ST_DEFERRED)
- ret = LM_ST_SHARED;
- else
- BUG();
- }
/*
* The GLF_INITIAL flag is initially set for new glocks. Upon the
@@ -238,7 +233,7 @@ static bool down_conversion(int cur, int req)
}
static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
- const int cur, const int req)
+ const int req, bool blocking)
{
u32 lkf = 0;
@@ -253,15 +248,6 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
lkf |= DLM_LKF_NOQUEUEBAST;
}
- if (gfs_flags & LM_FLAG_ANY) {
- if (req == DLM_LOCK_PR)
- lkf |= DLM_LKF_ALTCW;
- else if (req == DLM_LOCK_CW)
- lkf |= DLM_LKF_ALTPR;
- else
- BUG();
- }
-
if (!test_bit(GLF_INITIAL, &gl->gl_flags)) {
lkf |= DLM_LKF_CONVERT;
@@ -271,7 +257,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
* "upward" lock conversions or else DLM will reject the
* request as invalid.
*/
- if (!down_conversion(cur, req))
+ if (blocking)
lkf |= DLM_LKF_QUECVT;
}
@@ -291,14 +277,20 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
unsigned int flags)
{
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
+ bool blocking;
int cur, req;
u32 lkf;
char strname[GDLM_STRNAME_BYTES] = "";
int error;
+ gl->gl_req = req_state;
cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state);
req = make_mode(gl->gl_name.ln_sbd, req_state);
- lkf = make_flags(gl, flags, cur, req);
+ blocking = !down_conversion(cur, req) &&
+ !(flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB));
+ lkf = make_flags(gl, flags, req, blocking);
+ if (blocking)
+ set_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
if (test_bit(GLF_INITIAL, &gl->gl_flags)) {
@@ -315,8 +307,13 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
*/
again:
- error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
- GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
+ GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+ }
+ up_read(&ls->ls_sem);
if (error == -EBUSY) {
msleep(20);
goto again;
@@ -328,6 +325,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ uint32_t flags = 0;
int error;
BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
@@ -337,22 +335,15 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
return;
}
- clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_update_request_times(gl);
- /* don't want to call dlm if we've unmounted the lock protocol */
- if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
- gfs2_glock_free(gl);
- return;
- }
-
/*
* When the lockspace is released, all remaining glocks will be
* unlocked automatically. This is more efficient than unlocking them
* individually, but when the lock is held in DLM_LOCK_EX or
- * DLM_LOCK_PW mode, the lock value block (LVB) will be lost.
+ * DLM_LOCK_PW mode, the lock value block (LVB) would be lost.
*/
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
@@ -361,14 +352,27 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
return;
}
+ if (gl->gl_lksb.sb_lvbptr)
+ flags |= DLM_LKF_VALBLK;
+
again:
- error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
- NULL, gl);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, flags,
+ NULL, gl);
+ }
+ up_read(&ls->ls_sem);
if (error == -EBUSY) {
msleep(20);
goto again;
}
+ if (error == -ENODEV) {
+ gfs2_glock_free(gl);
+ return;
+ }
+
if (error) {
fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
gl->gl_name.ln_type,
@@ -379,13 +383,17 @@ again:
static void gdlm_cancel(struct gfs2_glock *gl)
{
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
- dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
+
+ down_read(&ls->ls_sem);
+ if (likely(ls->ls_dlm != NULL)) {
+ dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
+ }
+ up_read(&ls->ls_sem);
}
/*
* dlm/gfs2 recovery coordination using dlm_recover callbacks
*
- * 0. gfs2 checks for another cluster node withdraw, needing journal replay
* 1. dlm_controld sees lockspace members change
* 2. dlm_controld blocks dlm-kernel locking activity
* 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
@@ -560,7 +568,11 @@ static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
- error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL))
+ error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
+ up_read(&ls->ls_sem);
if (error) {
fs_err(sdp, "%s lkid %x error %d\n",
name, lksb->sb_lkid, error);
@@ -587,9 +599,14 @@ static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
memset(strname, 0, GDLM_STRNAME_BYTES);
snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
- error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
- strname, GDLM_STRNAME_BYTES - 1,
- 0, sync_wait_cb, ls, NULL);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
+ strname, GDLM_STRNAME_BYTES - 1,
+ 0, sync_wait_cb, ls, NULL);
+ }
+ up_read(&ls->ls_sem);
if (error) {
fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
name, lksb->sb_lkid, flags, mode, error);
@@ -634,28 +651,6 @@ static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
&ls->ls_control_lksb, "control_lock");
}
-/**
- * remote_withdraw - react to a node withdrawing from the file system
- * @sdp: The superblock
- */
-static void remote_withdraw(struct gfs2_sbd *sdp)
-{
- struct gfs2_jdesc *jd;
- int ret = 0, count = 0;
-
- list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
- if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
- continue;
- ret = gfs2_recover_journal(jd, true);
- if (ret)
- break;
- count++;
- }
-
- /* Now drop the additional reference we acquired */
- fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret);
-}
-
static void gfs2_control_func(struct work_struct *work)
{
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
@@ -666,13 +661,6 @@ static void gfs2_control_func(struct work_struct *work)
int recover_size;
int i, error;
- /* First check for other nodes that may have done a withdraw. */
- if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) {
- remote_withdraw(sdp);
- clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
- return;
- }
-
spin_lock(&ls->ls_recover_spin);
/*
* No MOUNT_DONE means we're still mounting; control_mount()
@@ -996,14 +984,15 @@ locks_done:
if (sdp->sd_args.ar_spectator) {
fs_info(sdp, "Recovery is required. Waiting for a "
"non-spectator to mount.\n");
+ spin_unlock(&ls->ls_recover_spin);
msleep_interruptible(1000);
} else {
fs_info(sdp, "control_mount wait1 block %u start %u "
"mount %u lvb %u flags %lx\n", block_gen,
start_gen, mount_gen, lvb_gen,
ls->ls_recover_flags);
+ spin_unlock(&ls->ls_recover_spin);
}
- spin_unlock(&ls->ls_recover_spin);
goto restart;
}
@@ -1171,7 +1160,7 @@ static void gdlm_recover_prep(void *arg)
struct gfs2_sbd *sdp = arg;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "recover_prep ignored due to withdraw.\n");
return;
}
@@ -1197,7 +1186,7 @@ static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int jid = slot->slot - 1;
- if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n",
jid);
return;
@@ -1226,7 +1215,7 @@ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
struct gfs2_sbd *sdp = arg;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "recover_done ignored due to withdraw.\n");
return;
}
@@ -1257,7 +1246,7 @@ static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n",
jid);
return;
@@ -1315,6 +1304,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
*/
INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
+ ls->ls_dlm = NULL;
spin_lock_init(&ls->ls_recover_spin);
ls->ls_recover_flags = 0;
ls->ls_recover_mount = 0;
@@ -1349,6 +1339,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
* create/join lockspace
*/
+ init_rwsem(&ls->ls_sem);
error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
&gdlm_lockspace_ops, sdp, &ops_result,
&ls->ls_dlm);
@@ -1392,7 +1383,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
return 0;
fail_release:
- dlm_release_lockspace(ls->ls_dlm, 2);
+ dlm_release_lockspace(ls->ls_dlm, DLM_RELEASE_NORMAL);
fail_free:
free_recover_size(ls);
fail:
@@ -1412,7 +1403,15 @@ static void gdlm_first_done(struct gfs2_sbd *sdp)
fs_err(sdp, "mount first_done error %d\n", error);
}
-static void gdlm_unmount(struct gfs2_sbd *sdp)
+/*
+ * gdlm_unmount - release our lockspace
+ * @sdp: the superblock
+ * @clean: Indicates whether or not the remaining nodes in the cluster should
+ * perform recovery. Recovery is necessary when a node withdraws and
+ * its journal remains dirty. Recovery isn't necessary when a node
+ * cleanly unmounts a filesystem.
+ */
+static void gdlm_unmount(struct gfs2_sbd *sdp, bool clean)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
@@ -1428,10 +1427,14 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
/* mounted_lock and control_lock will be purged in dlm recovery */
release:
+ down_write(&ls->ls_sem);
if (ls->ls_dlm) {
- dlm_release_lockspace(ls->ls_dlm, 2);
+ dlm_release_lockspace(ls->ls_dlm,
+ clean ? DLM_RELEASE_NORMAL :
+ DLM_RELEASE_RECOVER);
ls->ls_dlm = NULL;
}
+ up_write(&ls->ls_sem);
free_recover_size(ls);
}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f9c5089783d2..8312cd2cdae4 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -31,6 +31,7 @@
#include "dir.h"
#include "trace_gfs2.h"
#include "trans.h"
+#include "aops.h"
static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
@@ -111,13 +112,11 @@ __acquires(&sdp->sd_ail_lock)
&tr->tr_ail2_list);
continue;
}
- if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
+ if (!cmpxchg(&sdp->sd_log_error, 0, -EIO))
gfs2_io_error_bh(sdp, bh);
- gfs2_withdraw_delayed(sdp);
- }
}
- if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
gfs2_remove_from_ail(bd);
continue;
}
@@ -131,7 +130,11 @@ __acquires(&sdp->sd_ail_lock)
if (!mapping)
continue;
spin_unlock(&sdp->sd_ail_lock);
- ret = mapping->a_ops->writepages(mapping, wbc);
+ BUG_ON(GFS2_SB(mapping->host) != sdp);
+ if (gfs2_is_jdata(GFS2_I(mapping->host)))
+ ret = gfs2_jdata_writeback(mapping, wbc);
+ else
+ ret = mapping->a_ops->writepages(mapping, wbc);
if (need_resched()) {
blk_finish_plug(plug);
cond_resched();
@@ -319,10 +322,8 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
continue;
}
if (!buffer_uptodate(bh) &&
- !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
+ !cmpxchg(&sdp->sd_log_error, 0, -EIO))
gfs2_io_error_bh(sdp, bh);
- gfs2_withdraw_delayed(sdp);
- }
/*
* If we have space for revokes and the bd is no longer on any
* buf list, we can just add a revoke for it immediately and
@@ -802,9 +803,6 @@ void gfs2_flush_revokes(struct gfs2_sbd *sdp)
gfs2_log_lock(sdp);
gfs2_ail1_empty(sdp, max_revokes);
gfs2_log_unlock(sdp);
-
- if (gfs2_withdrawing(sdp))
- gfs2_withdraw(sdp);
}
/**
@@ -832,7 +830,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
struct super_block *sb = sdp->sd_vfs;
u64 dblock;
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
return;
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
@@ -979,12 +977,9 @@ static void empty_ail1_list(struct gfs2_sbd *sdp)
gfs2_ail1_wait(sdp);
empty = gfs2_ail1_empty(sdp, 0);
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
break;
}
-
- if (gfs2_withdrawing(sdp))
- gfs2_withdraw(sdp);
}
/**
@@ -1045,7 +1040,7 @@ repeat:
* Do this check while holding the log_flush_lock to prevent new
* buffers from being added to the ail via gfs2_pin()
*/
- if (gfs2_withdrawing_or_withdrawn(sdp) ||
+ if (gfs2_withdrawn(sdp) ||
!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
goto out;
@@ -1066,7 +1061,7 @@ repeat:
sdp->sd_log_tr = NULL;
tr->tr_first = first_log_head;
if (unlikely(frozen)) {
- if (gfs2_assert_withdraw_delayed(sdp,
+ if (gfs2_assert_withdraw(sdp,
!tr->tr_num_buf_new && !tr->tr_num_databuf_new))
goto out_withdraw;
}
@@ -1091,18 +1086,18 @@ repeat:
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
if (unlikely(frozen))
- if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
+ if (gfs2_assert_withdraw(sdp, !reserved_revokes))
goto out_withdraw;
gfs2_ordered_write(sdp);
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
goto out_withdraw;
lops_before_commit(sdp, tr);
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
goto out_withdraw;
if (sdp->sd_jdesc)
gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
goto out_withdraw;
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
@@ -1110,7 +1105,7 @@ repeat:
} else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
log_write_header(sdp, flags);
}
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
goto out_withdraw;
lops_after_commit(sdp, tr);
@@ -1128,7 +1123,7 @@ repeat:
if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
if (!sdp->sd_log_idle) {
empty_ail1_list(sdp);
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
goto out_withdraw;
log_write_header(sdp, flags);
}
@@ -1146,13 +1141,11 @@ out_end:
reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
out:
if (used_blocks != reserved_blocks) {
- gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
+ gfs2_assert_withdraw(sdp, used_blocks < reserved_blocks);
gfs2_log_release(sdp, reserved_blocks - used_blocks);
}
up_write(&sdp->sd_log_flush_lock);
gfs2_trans_free(sdp, tr);
- if (gfs2_withdrawing(sdp))
- gfs2_withdraw(sdp);
trace_gfs2_log_flush(sdp, 0, flags);
return;
@@ -1299,19 +1292,8 @@ int gfs2_logd(void *data)
set_freezable();
while (!kthread_should_stop()) {
- if (gfs2_withdrawing_or_withdrawn(sdp))
- break;
-
- /* Check for errors writing to the journal */
- if (sdp->sd_log_error) {
- gfs2_lm(sdp,
- "GFS2: fsid=%s: error %d: "
- "withdrawing the file system to "
- "prevent further damage.\n",
- sdp->sd_fsname, sdp->sd_log_error);
- gfs2_withdraw(sdp);
+ if (gfs2_withdrawn(sdp))
break;
- }
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
gfs2_ail1_empty(sdp, 0);
@@ -1335,15 +1317,11 @@ int gfs2_logd(void *data)
test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
gfs2_ail_flush_reqd(sdp) ||
gfs2_jrnl_flush_reqd(sdp) ||
- sdp->sd_log_error ||
- gfs2_withdrawing_or_withdrawn(sdp) ||
+ gfs2_withdrawn(sdp) ||
kthread_should_stop(),
t);
}
- if (gfs2_withdrawing(sdp))
- gfs2_withdraw(sdp);
-
return 0;
}
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index c27b05099c1e..fc30ebdad83a 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -44,17 +44,6 @@ __releases(&sdp->sd_log_lock)
spin_unlock(&sdp->sd_log_lock);
}
-static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
- unsigned int value)
-{
- if (++value == sdp->sd_jdesc->jd_blocks) {
- value = 0;
- }
- sdp->sd_log_tail = value;
- sdp->sd_log_flush_tail = value;
- sdp->sd_log_head = value;
-}
-
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 314ec2a70167..97ebe457c00a 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -49,7 +49,7 @@ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (test_set_buffer_pinned(bh))
gfs2_assert_withdraw(sdp, 0);
if (!buffer_uptodate(bh))
- gfs2_io_error_bh_wd(sdp, bh);
+ gfs2_io_error_bh(sdp, bh);
bd = bh->b_private;
/* If this buffer is in the AIL and it has already been written
* to in-place disk block, remove it from the AIL.
@@ -157,7 +157,9 @@ u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
/**
* gfs2_end_log_write_bh - end log write of pagecache data with buffers
* @sdp: The superblock
- * @bvec: The bio_vec
+ * @folio: The folio
+ * @offset: The first byte within the folio that completed
+ * @size: The number of bytes that completed
* @error: The i/o status
*
* This finds the relevant buffers and unlocks them and sets the
@@ -166,17 +168,13 @@ u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
* that is pinned in the pagecache.
*/
-static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
- struct bio_vec *bvec,
- blk_status_t error)
+static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct folio *folio,
+ size_t offset, size_t size, blk_status_t error)
{
struct buffer_head *bh, *next;
- struct page *page = bvec->bv_page;
- unsigned size;
- bh = page_buffers(page);
- size = bvec->bv_len;
- while (bh_offset(bh) < bvec->bv_offset)
+ bh = folio_buffers(folio);
+ while (bh_offset(bh) < offset)
bh = bh->b_this_page;
do {
if (error)
@@ -186,7 +184,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
size -= bh->b_size;
brelse(bh);
bh = next;
- } while(bh && size);
+ } while (bh && size);
}
/**
@@ -203,23 +201,24 @@ static void gfs2_end_log_write(struct bio *bio)
{
struct gfs2_sbd *sdp = bio->bi_private;
struct bio_vec *bvec;
- struct page *page;
struct bvec_iter_all iter_all;
if (bio->bi_status) {
- if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
+ int err = blk_status_to_errno(bio->bi_status);
+
+ if (!cmpxchg(&sdp->sd_log_error, 0, err))
fs_err(sdp, "Error %d writing to journal, jid=%u\n",
- bio->bi_status, sdp->sd_jdesc->jd_jid);
- gfs2_withdraw_delayed(sdp);
- /* prevent more writes to the journal */
- clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- wake_up(&sdp->sd_logd_waitq);
+ err, sdp->sd_jdesc->jd_jid);
+ gfs2_withdraw(sdp);
}
bio_for_each_segment_all(bvec, bio, iter_all) {
- page = bvec->bv_page;
- if (page_has_buffers(page))
- gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
+ struct page *page = bvec->bv_page;
+ struct folio *folio = page_folio(page);
+
+ if (folio && folio_buffers(folio))
+ gfs2_end_log_write_bh(sdp, folio, bvec->bv_offset,
+ bvec->bv_len, bio->bi_status);
else
mempool_free(page, gfs2_page_pool);
}
@@ -359,8 +358,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
gfs2_log_incr_head(sdp);
- gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
- bh_offset(bh), dblock);
+ gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0),
+ bh->b_size, bh_offset(bh), dblock);
}
/**
@@ -406,17 +405,16 @@ static void gfs2_end_log_read(struct bio *bio)
}
/**
- * gfs2_jhead_pg_srch - Look for the journal head in a given page.
+ * gfs2_jhead_folio_search - Look for the journal head in a given page.
* @jd: The journal descriptor
* @head: The journal head to start from
- * @page: The page to look in
+ * @folio: The folio to look in
*
* Returns: 1 if found, 0 otherwise.
*/
-
-static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
- struct gfs2_log_header_host *head,
- struct page *page)
+static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head,
+ struct folio *folio)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct gfs2_log_header_host lh;
@@ -424,7 +422,8 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
unsigned int offset;
bool ret = false;
- kaddr = kmap_local_page(page);
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+ kaddr = kmap_local_folio(folio, 0);
for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
if (lh.lh_sequence >= head->lh_sequence)
@@ -449,7 +448,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
* Find the folio with 'index' in the journal's mapping. Search the folio for
* the journal head if requested (cleanup == false). Release refs on the
* folio so the page cache can reclaim it. We grabbed a
- * reference on this folio twice, first when we did a grab_cache_page()
+ * reference on this folio twice, first when we did a filemap_grab_folio()
* to obtain the folio to add it to the bio and second when we do a
* filemap_get_folio() here to get the folio to wait on while I/O on it is being
* completed.
@@ -472,9 +471,9 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
*done = true;
if (!*done)
- *done = gfs2_jhead_pg_srch(jd, head, &folio->page);
+ *done = gfs2_jhead_folio_search(jd, head, folio);
- /* filemap_get_folio() and the earlier grab_cache_page() */
+ /* filemap_get_folio() and the earlier filemap_grab_folio() */
folio_put_refs(folio, 2);
}
@@ -485,7 +484,7 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev);
- bio_chain(new, prev);
+ bio_chain(prev, new);
submit_bio(prev);
return new;
}
@@ -494,15 +493,13 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
* gfs2_find_jhead - find the head of a log
* @jd: The journal descriptor
* @head: The log descriptor for the head of the log is returned here
- * @keep_cache: If set inode pages will not be truncated
*
* Do a search of a journal by reading it in large chunks using bios and find
* the valid log entry with the highest sequence number. (i.e. the log head)
*
* Returns: 0 on success, errno otherwise
*/
-int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
- bool keep_cache)
+int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct address_space *mapping = jd->jd_inode->i_mapping;
@@ -512,9 +509,9 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
unsigned int shift = PAGE_SHIFT - bsize_shift;
unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
struct gfs2_journal_extent *je;
- int sz, ret = 0;
+ int ret = 0;
struct bio *bio = NULL;
- struct page *page = NULL;
+ struct folio *folio = NULL;
bool done = false;
errseq_t since;
@@ -527,10 +524,11 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
u64 dblock = je->dblock;
for (; block < je->lblock + je->blocks; block++, dblock++) {
- if (!page) {
- page = grab_cache_page(mapping, block >> shift);
- if (!page) {
- ret = -ENOMEM;
+ if (!folio) {
+ folio = filemap_grab_folio(mapping,
+ block >> shift);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
done = true;
goto out;
}
@@ -541,8 +539,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
sector_t sector = dblock << sdp->sd_fsb2bb_shift;
if (bio_end_sector(bio) == sector) {
- sz = bio_add_page(bio, page, bsize, off);
- if (sz == bsize)
+ if (bio_add_folio(bio, folio, bsize, off))
goto block_added;
}
if (off) {
@@ -562,12 +559,11 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
bio->bi_opf = REQ_OP_READ;
add_block_to_new_bio:
- sz = bio_add_page(bio, page, bsize, off);
- BUG_ON(sz != bsize);
+ bio_add_folio_nofail(bio, folio, bsize, off);
block_added:
off += bsize;
- if (off == PAGE_SIZE)
- page = NULL;
+ if (off == folio_size(folio))
+ folio = NULL;
if (blocks_submitted <= blocks_read + max_blocks) {
/* Keep at least one bio in flight */
continue;
@@ -591,8 +587,7 @@ out:
if (!ret)
ret = filemap_check_wb_err(mapping, since);
- if (!keep_cache)
- truncate_inode_pages(mapping, 0);
+ truncate_inode_pages(mapping, 0);
return ret;
}
@@ -615,15 +610,13 @@ static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
static void gfs2_check_magic(struct buffer_head *bh)
{
- void *kaddr;
__be32 *ptr;
clear_buffer_escaped(bh);
- kaddr = kmap_local_page(bh->b_page);
- ptr = kaddr + bh_offset(bh);
+ ptr = kmap_local_folio(bh->b_folio, bh_offset(bh));
if (*ptr == cpu_to_be32(GFS2_MAGIC))
set_buffer_escaped(bh);
- kunmap_local(kaddr);
+ kunmap_local(ptr);
}
static int blocknr_cmp(void *priv, const struct list_head *a,
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 07890c7b145d..be740bf33666 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -20,7 +20,7 @@ void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
int gfs2_find_jhead(struct gfs2_jdesc *jd,
- struct gfs2_log_header_host *head, bool keep_cache);
+ struct gfs2_log_header_host *head);
void gfs2_drain_revokes(struct gfs2_sbd *sdp);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 04cadc02e5a6..9d65719353fa 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -51,7 +51,6 @@ static void gfs2_init_glock_once(void *foo)
{
struct gfs2_glock *gl = foo;
- spin_lock_init(&gl->gl_lockref.lock);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_lru);
INIT_LIST_HEAD(&gl->gl_ail_list);
@@ -152,7 +151,8 @@ static int __init init_gfs2_fs(void)
error = -ENOMEM;
gfs2_recovery_wq = alloc_workqueue("gfs2_recovery",
- WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
+ WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU,
+ 0);
if (!gfs2_recovery_wq)
goto fail_wq1;
@@ -161,7 +161,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_control_wq)
goto fail_wq2;
- gfs2_freeze_wq = alloc_workqueue("gfs2_freeze", 0, 0);
+ gfs2_freeze_wq = alloc_workqueue("gfs2_freeze", WQ_PERCPU, 0);
if (!gfs2_freeze_wq)
goto fail_wq3;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index fea3efcc2f93..e4356198d8d8 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -103,6 +103,7 @@ const struct address_space_operations gfs2_meta_aops = {
.invalidate_folio = block_invalidate_folio,
.writepages = gfs2_aspace_writepages,
.release_folio = gfs2_release_folio,
+ .migrate_folio = buffer_migrate_folio_norefs,
};
const struct address_space_operations gfs2_rgrp_aops = {
@@ -110,6 +111,7 @@ const struct address_space_operations gfs2_rgrp_aops = {
.invalidate_folio = block_invalidate_folio,
.writepages = gfs2_aspace_writepages,
.release_folio = gfs2_release_folio,
+ .migrate_folio = buffer_migrate_folio_norefs,
};
/**
@@ -132,7 +134,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
unsigned int bufnum;
if (mapping == NULL)
- mapping = &sdp->sd_aspace;
+ mapping = gfs2_aspace(sdp);
shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
index = blkno >> shift; /* convert block to page */
@@ -198,15 +200,14 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
static void gfs2_meta_read_endio(struct bio *bio)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- struct buffer_head *bh = page_buffers(page);
- unsigned int len = bvec->bv_len;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
+ struct buffer_head *bh = folio_buffers(folio);
+ size_t len = fi.length;
- while (bh_offset(bh) < bvec->bv_offset)
+ while (bh_offset(bh) < fi.offset)
bh = bh->b_this_page;
do {
struct buffer_head *next = bh->b_this_page;
@@ -229,10 +230,10 @@ static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num)
struct bio *bio;
bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO);
- bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> SECTOR_SHIFT);
while (num > 0) {
bh = *bhs;
- if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
+ if (!bio_add_folio(bio, bh->b_folio, bh->b_size, bh_offset(bh))) {
BUG_ON(bio->bi_iter.bi_size == 0);
break;
}
@@ -262,8 +263,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
struct buffer_head *bh, *bhs[2];
int num = 0;
- if (gfs2_withdrawing_or_withdrawn(sdp) &&
- !gfs2_withdraw_in_prog(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
*bhp = NULL;
return -EIO;
}
@@ -302,7 +302,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
if (unlikely(!buffer_uptodate(bh))) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
- gfs2_io_error_bh_wd(sdp, bh);
+ gfs2_io_error_bh(sdp, bh);
brelse(bh);
*bhp = NULL;
return -EIO;
@@ -321,8 +321,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
- if (gfs2_withdrawing_or_withdrawn(sdp) &&
- !gfs2_withdraw_in_prog(sdp))
+ if (gfs2_withdrawn(sdp))
return -EIO;
wait_on_buffer(bh);
@@ -330,11 +329,10 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (!buffer_uptodate(bh)) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
- gfs2_io_error_bh_wd(sdp, bh);
+ gfs2_io_error_bh(sdp, bh);
return -EIO;
}
- if (gfs2_withdrawing_or_withdrawn(sdp) &&
- !gfs2_withdraw_in_prog(sdp))
+ if (gfs2_withdrawn(sdp))
return -EIO;
return 0;
@@ -444,11 +442,9 @@ void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
struct buffer_head *bh;
int ty;
- if (!ip->i_gl) {
- /* This can only happen during incomplete inode creation. */
- BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
+ /* This can only happen during incomplete inode creation. */
+ if (!ip->i_gl)
return;
- }
gfs2_ail1_wipe(sdp, bstart, blen);
while (blen) {
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 831d988c2ceb..b7c8a6684d02 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -44,9 +44,7 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
struct gfs2_glock_aspace *gla =
container_of(mapping, struct gfs2_glock_aspace, mapping);
return gla->glock.gl_name.ln_sbd;
- } else if (mapping->a_ops == &gfs2_rgrp_aops)
- return container_of(mapping, struct gfs2_sbd, sd_aspace);
- else
+ } else
return inode->i_sb->s_fs_info;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index e83d293c3614..e7a88b717991 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -60,19 +60,21 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = BIT(18);
gt->gt_complain_secs = 10;
+ gt->gt_withdraw_helper_timeout = 5;
}
void free_sbd(struct gfs2_sbd *sdp)
{
- if (sdp->sd_lkstats)
- free_percpu(sdp->sd_lkstats);
+ struct super_block *sb = sdp->sd_vfs;
+
+ free_percpu(sdp->sd_lkstats);
+ sb->s_fs_info = NULL;
kfree(sdp);
}
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
struct gfs2_sbd *sdp;
- struct address_space *mapping;
sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
if (!sdp)
@@ -91,7 +93,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_waitqueue_head(&sdp->sd_async_glock_wait);
atomic_set(&sdp->sd_glock_disposal, 0);
init_completion(&sdp->sd_locking_init);
- init_completion(&sdp->sd_wdack);
+ init_completion(&sdp->sd_withdraw_helper);
spin_lock_init(&sdp->sd_statfs_spin);
spin_lock_init(&sdp->sd_rindex_spin);
@@ -109,16 +111,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
- mapping = &sdp->sd_aspace;
-
- address_space_init_once(mapping);
- mapping->a_ops = &gfs2_rgrp_aops;
- mapping->host = sb->s_bdev->bd_mapping->host;
- mapping->flags = 0;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
- mapping->i_private_data = NULL;
- mapping->writeback_index = 0;
-
spin_lock_init(&sdp->sd_log_lock);
atomic_set(&sdp->sd_log_pinned, 0);
INIT_LIST_HEAD(&sdp->sd_log_revokes);
@@ -172,7 +164,7 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
return -EINVAL;
}
- if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
+ if (sb->sb_bsize < SECTOR_SIZE || sb->sb_bsize > PAGE_SIZE ||
(sb->sb_bsize & (sb->sb_bsize - 1))) {
pr_warn("Invalid block size\n");
return -EINVAL;
@@ -226,28 +218,22 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const struct gfs2_sb *str)
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
{
- struct super_block *sb = sdp->sd_vfs;
- struct page *page;
- struct bio_vec bvec;
- struct bio bio;
+ struct gfs2_sb *sb;
int err;
- page = alloc_page(GFP_KERNEL);
- if (unlikely(!page))
+ sb = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (unlikely(!sb))
return -ENOMEM;
-
- bio_init(&bio, sb->s_bdev, &bvec, 1, REQ_OP_READ | REQ_META);
- bio.bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
- __bio_add_page(&bio, page, PAGE_SIZE, 0);
-
- err = submit_bio_wait(&bio);
+ err = bdev_rw_virt(sdp->sd_vfs->s_bdev,
+ sector << (sdp->sd_vfs->s_blocksize_bits - SECTOR_SHIFT),
+ sb, PAGE_SIZE, REQ_OP_READ | REQ_META);
if (err) {
pr_warn("error %d reading superblock\n", err);
- __free_page(page);
+ kfree(sb);
return err;
}
- gfs2_sb_in(sdp, page_address(page));
- __free_page(page);
+ gfs2_sb_in(sdp, sb);
+ kfree(sb);
return gfs2_check_sb(sdp, silent);
}
@@ -272,7 +258,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
return error;
}
- sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9;
+ sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - SECTOR_SHIFT;
sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_dinode)) / sizeof(u64);
@@ -385,7 +371,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
error = gfs2_glock_nq_num(sdp,
GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
LM_ST_EXCLUSIVE,
- LM_FLAG_NOEXP | GL_NOCACHE | GL_NOPID,
+ LM_FLAG_RECOVER | GL_NOCACHE | GL_NOPID,
mount_gh);
if (error) {
fs_err(sdp, "can't acquire mount glock: %d\n", error);
@@ -395,7 +381,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
error = gfs2_glock_nq_num(sdp,
GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT | GL_NOPID,
+ LM_FLAG_RECOVER | GL_EXACT | GL_NOPID,
&sdp->sd_live_gh);
if (error) {
fs_err(sdp, "can't acquire live glock: %d\n", error);
@@ -500,7 +486,9 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
goto out;
}
- sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
+ ret = -EINVAL;
+ if (!sb_set_blocksize(sb, sdp->sd_sb.sb_bsize))
+ goto out;
/* Get the root inode */
no_addr = sdp->sd_sb.sb_root_dir.no_addr;
@@ -555,8 +543,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
mutex_lock(&sdp->sd_jindex_mutex);
for (;;) {
- struct gfs2_inode *jip;
-
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
if (error)
break;
@@ -597,8 +583,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
d_mark_dontcache(jd->jd_inode);
spin_lock(&sdp->sd_jindex_spin);
jd->jd_jid = sdp->sd_journals++;
- jip = GFS2_I(jd->jd_inode);
- jd->jd_no_addr = jip->i_no_addr;
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
spin_unlock(&sdp->sd_jindex_spin);
}
@@ -758,7 +742,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
&gfs2_journal_glops,
LM_ST_EXCLUSIVE,
- LM_FLAG_NOEXP | GL_NOCACHE | GL_NOPID,
+ LM_FLAG_RECOVER | GL_NOPID,
&sdp->sd_journal_gh);
if (error) {
fs_err(sdp, "can't acquire journal glock: %d\n", error);
@@ -766,9 +750,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
}
ip = GFS2_I(sdp->sd_jdesc->jd_inode);
- sdp->sd_jinode_gl = ip->i_gl;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT |
+ LM_FLAG_RECOVER | GL_EXACT |
GL_NOCACHE | GL_NOPID,
&sdp->sd_jinode_gh);
if (error) {
@@ -834,13 +817,10 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
fail_statfs:
uninit_statfs(sdp);
fail_jinode_gh:
- /* A withdraw may have done dq/uninit so now we need to check it */
- if (!sdp->sd_args.ar_spectator &&
- gfs2_holder_initialized(&sdp->sd_jinode_gh))
+ if (!sdp->sd_args.ar_spectator)
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
fail_journal_gh:
- if (!sdp->sd_args.ar_spectator &&
- gfs2_holder_initialized(&sdp->sd_journal_gh))
+ if (!sdp->sd_args.ar_spectator)
gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
fail_jindex:
gfs2_jindex_free(sdp);
@@ -1053,8 +1033,8 @@ hostdata_error:
void gfs2_lm_unmount(struct gfs2_sbd *sdp)
{
const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
- if (!gfs2_withdrawing_or_withdrawn(sdp) && lm->lm_unmount)
- lm->lm_unmount(sdp);
+ if (!gfs2_withdrawn(sdp) && lm->lm_unmount)
+ lm->lm_unmount(sdp, true);
}
static int wait_on_journal(struct gfs2_sbd *sdp)
@@ -1135,6 +1115,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
int silent = fc->sb_flags & SB_SILENT;
struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh;
+ struct address_space *mapping;
int error;
sdp = init_sbd(sb);
@@ -1156,7 +1137,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_NOSEC;
sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops;
- sb->s_d_op = &gfs2_dops;
+
+ set_default_d_op(sb, &gfs2_dops);
sb->s_export_op = &gfs2_export_ops;
sb->s_qcop = &gfs2_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
@@ -1166,9 +1148,12 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
/* Set up the buffer cache and fill in some fake block size values
to allow us to read-in the on-disk superblock. */
- sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, 512);
+ sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, SECTOR_SIZE);
+ error = -EINVAL;
+ if (!sdp->sd_sb.sb_bsize)
+ goto fail_free;
sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
- sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9;
+ sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - SECTOR_SHIFT;
sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
@@ -1181,21 +1166,35 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
sdp->sd_tune.gt_statfs_quantum = 30;
}
+ /* Set up an address space for metadata writes */
+ sdp->sd_inode = new_inode(sb);
+ error = -ENOMEM;
+ if (!sdp->sd_inode)
+ goto fail_free;
+ sdp->sd_inode->i_ino = GFS2_BAD_INO;
+ sdp->sd_inode->i_size = OFFSET_MAX;
+
+ mapping = gfs2_aspace(sdp);
+ mapping->a_ops = &gfs2_rgrp_aops;
+ gfs2_setup_inode(sdp->sd_inode);
+
error = init_names(sdp, silent);
if (error)
- goto fail_free;
+ goto fail_iput;
snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
error = -ENOMEM;
sdp->sd_glock_wq = alloc_workqueue("gfs2-glock/%s",
- WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0,
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE | WQ_PERCPU,
+ 0,
sdp->sd_fsname);
if (!sdp->sd_glock_wq)
- goto fail_free;
+ goto fail_iput;
sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s",
- WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname);
+ WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU, 0,
+ sdp->sd_fsname);
if (!sdp->sd_delete_wq)
goto fail_glock_wq;
@@ -1209,6 +1208,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
if (error)
goto fail_debug;
+ INIT_WORK(&sdp->sd_withdraw_work, gfs2_withdraw_func);
+
error = init_locking(sdp, &mount_gh, DO);
if (error)
goto fail_lm;
@@ -1309,9 +1310,10 @@ fail_delete_wq:
fail_glock_wq:
if (sdp->sd_glock_wq)
destroy_workqueue(sdp->sd_glock_wq);
+fail_iput:
+ iput(sdp->sd_inode);
fail_free:
free_sbd(sdp);
- sb->s_fs_info = NULL;
return error;
}
@@ -1394,12 +1396,14 @@ static const struct constant_table gfs2_param_data[] = {
};
enum opt_errors {
- Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
- Opt_errors_panic = GFS2_ERRORS_PANIC,
+ Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
+ Opt_errors_deactivate = GFS2_ERRORS_DEACTIVATE,
+ Opt_errors_panic = GFS2_ERRORS_PANIC,
};
static const struct constant_table gfs2_param_errors[] = {
{"withdraw", Opt_errors_withdraw },
+ {"deactivate", Opt_errors_deactivate },
{"panic", Opt_errors_panic },
{}
};
@@ -1744,12 +1748,12 @@ static void gfs2_evict_inodes(struct super_block *sb)
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
- if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) &&
+ if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) &&
!need_resched()) {
spin_unlock(&inode->i_lock);
continue;
}
- atomic_inc(&inode->i_count);
+ __iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&sb->s_inode_list_lock);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 72b48f6f5561..b1692f12a602 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -125,7 +125,7 @@ static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
hlist_bl_del_rcu(&qd->qd_hlist);
spin_unlock_bucket(qd->qd_hash);
- if (!gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (!gfs2_withdrawn(sdp)) {
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_ref);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
@@ -236,8 +236,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
return NULL;
qd->qd_sbd = sdp;
- qd->qd_lockref.count = 0;
- spin_lock_init(&qd->qd_lockref.lock);
+ lockref_init(&qd->qd_lockref);
qd->qd_id = qid;
qd->qd_slot = -1;
INIT_LIST_HEAD(&qd->qd_lru);
@@ -298,7 +297,6 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
spin_lock_bucket(hash);
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
if (qd == NULL) {
- new_qd->qd_lockref.count++;
*qdp = new_qd;
list_add(&new_qd->qd_list, &sdp->sd_quota_list);
hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
@@ -1451,6 +1449,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
if (qd == NULL)
goto fail_brelse;
+ qd->qd_lockref.count = 0;
set_bit(QDF_CHANGE, &qd->qd_flags);
qd->qd_change = qc_change;
qd->qd_slot = slot;
@@ -1552,27 +1551,13 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
{
if (error == 0 || error == -EROFS)
return;
- if (!gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (!gfs2_withdrawn(sdp)) {
if (!cmpxchg(&sdp->sd_log_error, 0, error))
fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
wake_up(&sdp->sd_logd_waitq);
}
}
-static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
- int (*fxn)(struct super_block *sb, int type),
- unsigned long t, unsigned long *timeo,
- unsigned int *new_timeo)
-{
- if (t >= *timeo) {
- int error = fxn(sdp->sd_vfs, 0);
- quotad_error(sdp, msg, error);
- *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
- } else {
- *timeo -= t;
- }
-}
-
void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
if (!sdp->sd_statfs_force_sync) {
sdp->sd_statfs_force_sync = 1;
@@ -1590,36 +1575,46 @@ void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
int gfs2_quotad(void *data)
{
struct gfs2_sbd *sdp = data;
- struct gfs2_tune *tune = &sdp->sd_tune;
- unsigned long statfs_timeo = 0;
- unsigned long quotad_timeo = 0;
- unsigned long t = 0;
+ unsigned long now = jiffies;
+ unsigned long statfs_deadline = now;
+ unsigned long quotad_deadline = now;
set_freezable();
while (!kthread_should_stop()) {
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ unsigned long t;
+
+ if (gfs2_withdrawn(sdp))
break;
- /* Update the master statfs file */
- if (sdp->sd_statfs_force_sync) {
- int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
+ now = jiffies;
+ if (sdp->sd_statfs_force_sync ||
+ time_after(now, statfs_deadline)) {
+ unsigned int quantum;
+ int error;
+
+ /* Update the master statfs file */
+ error = gfs2_statfs_sync(sdp->sd_vfs, 0);
quotad_error(sdp, "statfs", error);
- statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
+
+ quantum = gfs2_tune_get(sdp, gt_statfs_quantum);
+ statfs_deadline = now + quantum * HZ;
}
- else
- quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
- &statfs_timeo,
- &tune->gt_statfs_quantum);
+ if (time_after(now, quotad_deadline)) {
+ unsigned int quantum;
+ int error;
- /* Update quota file */
- quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
- &quotad_timeo, &tune->gt_quota_quantum);
+ /* Update the quota file */
+ error = gfs2_quota_sync(sdp->sd_vfs, 0);
+ quotad_error(sdp, "sync", error);
- t = min(quotad_timeo, statfs_timeo);
+ quantum = gfs2_tune_get(sdp, gt_quota_quantum);
+ quotad_deadline = now + quantum * HZ;
+ }
- t = wait_event_freezable_timeout(sdp->sd_quota_wait,
+ t = min(statfs_deadline - now, quotad_deadline - now);
+ wait_event_freezable_timeout(sdp->sd_quota_wait,
sdp->sd_statfs_force_sync ||
- gfs2_withdrawing_or_withdrawn(sdp) ||
+ gfs2_withdrawn(sdp) ||
kthread_should_stop(),
t);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index f462d9cb3087..988f38dc5b2c 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -44,8 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
int ret;
ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
- if (capable(CAP_SYS_RESOURCE) ||
- sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
+ capable(CAP_SYS_RESOURCE))
return 0;
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (ret)
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index f4fe7039f725..8c8202c68b64 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -118,6 +118,7 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd)
int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh,
unsigned int blkno, struct gfs2_log_header_host *head)
{
+ const u32 zero = 0;
u32 hash, crc;
if (lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
@@ -126,7 +127,7 @@ int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh,
return 1;
hash = crc32(~0, lh, LH_V1_SIZE - 4);
- hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */
+ hash = ~crc32(hash, &zero, 4); /* assume lh_hash is zero */
if (be32_to_cpu(lh->lh_hash) != hash)
return 1;
@@ -263,16 +264,12 @@ static void clean_journal(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
- u32 lblock = head->lh_blkno;
- gfs2_replay_incr_blk(jd, &lblock);
- gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock,
+ gfs2_replay_incr_blk(jd, &head->lh_blkno);
+ head->lh_sequence++;
+ gfs2_write_log_header(sdp, jd, head->lh_sequence, 0, head->lh_blkno,
GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY,
REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC);
- if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) {
- sdp->sd_log_flush_head = lblock;
- gfs2_log_incr_head(sdp);
- }
}
@@ -411,7 +408,7 @@ void gfs2_recover_func(struct work_struct *work)
int error = 0;
int jlocked = 0;
- if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "jid=%u: Recovery not attempted due to withdraw.\n",
jd->jd_jid);
goto fail;
@@ -427,7 +424,8 @@ void gfs2_recover_func(struct work_struct *work)
error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
LM_ST_EXCLUSIVE,
- LM_FLAG_NOEXP | LM_FLAG_TRY | GL_NOCACHE,
+ LM_FLAG_RECOVER | LM_FLAG_TRY |
+ GL_NOCACHE,
&j_gh);
switch (error) {
case 0:
@@ -443,7 +441,8 @@ void gfs2_recover_func(struct work_struct *work)
}
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_NOCACHE, &ji_gh);
+ LM_FLAG_RECOVER | GL_NOCACHE,
+ &ji_gh);
if (error)
goto fail_gunlock_j;
} else {
@@ -457,7 +456,7 @@ void gfs2_recover_func(struct work_struct *work)
if (error)
goto fail_gunlock_ji;
- error = gfs2_find_jhead(jd, &head, true);
+ error = gfs2_find_jhead(jd, &head);
if (error)
goto fail_gunlock_ji;
t_jhd = ktime_get();
@@ -533,6 +532,9 @@ void gfs2_recover_func(struct work_struct *work)
ktime_ms_delta(t_rep, t_tlck));
}
+ if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
+ gfs2_log_pointers_init(sdp, &head);
+
gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
if (jlocked) {
@@ -580,3 +582,13 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
return wait ? jd->jd_recover_error : 0;
}
+void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
+ struct gfs2_log_header_host *head)
+{
+ sdp->sd_log_sequence = head->lh_sequence + 1;
+ gfs2_replay_incr_blk(sdp->sd_jdesc, &head->lh_blkno);
+ sdp->sd_log_tail = head->lh_blkno;
+ sdp->sd_log_flush_head = head->lh_blkno;
+ sdp->sd_log_flush_tail = head->lh_blkno;
+ sdp->sd_log_head = head->lh_blkno;
+}
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 6a0fd42e1120..5a5ba72ecd75 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -29,6 +29,8 @@ void gfs2_recover_func(struct work_struct *work);
int __get_log_header(struct gfs2_sbd *sdp,
const struct gfs2_log_header *lh, unsigned int blkno,
struct gfs2_log_header_host *head);
+void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
+ struct gfs2_log_header_host *head);
#endif /* __RECOVERY_DOT_H__ */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 92a3b6ddafdc..f6cd907b3ec6 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -134,30 +134,20 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
struct gfs2_glock *j_gl = ip->i_gl;
- struct gfs2_log_header_host head;
int error;
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
return -EIO;
- error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
- if (error) {
- gfs2_consist(sdp);
- return error;
- }
-
- if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
- gfs2_consist(sdp);
+ if (sdp->sd_log_sequence == 0) {
+ fs_err(sdp, "unknown status of our own journal jid %d",
+ sdp->sd_lockstruct.ls_jid);
return -EIO;
}
- /* Initialize some head of the log stuff */
- sdp->sd_log_sequence = head.lh_sequence + 1;
- gfs2_log_pointers_init(sdp, head.lh_blkno);
-
error = gfs2_quota_init(sdp);
- if (!error && gfs2_withdrawing_or_withdrawn(sdp))
+ if (!error && gfs2_withdrawn(sdp))
error = -EIO;
if (!error)
set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
@@ -361,7 +351,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
gfs2_freeze_unlock(sdp);
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
- LM_FLAG_NOEXP | GL_NOPID,
+ LM_FLAG_RECOVER | GL_NOPID,
&sdp->sd_freeze_gh);
if (error)
goto relock_shared;
@@ -370,7 +360,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
error = gfs2_jdesc_check(jd);
if (error)
break;
- error = gfs2_find_jhead(jd, &lh, false);
+ error = gfs2_find_jhead(jd, &lh);
if (error)
break;
if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
@@ -497,13 +487,11 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
int need_endtrans = 0;
int ret;
- if (unlikely(!ip->i_gl)) {
- /* This can only happen during incomplete inode creation. */
- BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
+ /* This can only happen during incomplete inode creation. */
+ if (unlikely(!ip->i_gl))
return;
- }
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
return;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
@@ -609,13 +597,13 @@ restart:
if (!sb_rdonly(sb))
gfs2_make_fs_ro(sdp);
else {
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
gfs2_destroy_threads(sdp);
gfs2_quota_cleanup(sdp);
}
- WARN_ON(gfs2_withdrawing(sdp));
+ flush_work(&sdp->sd_withdraw_work);
/* At this point, we're through modifying the disk */
@@ -648,7 +636,7 @@ restart:
gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */
gfs2_gl_hash_clear(sdp);
- truncate_inode_pages_final(&sdp->sd_aspace);
+ iput(sdp->sd_inode);
gfs2_delete_debugfs_file(sdp);
gfs2_sys_fs_del(sdp);
@@ -674,7 +662,7 @@ static int gfs2_sync_fs(struct super_block *sb, int wait)
return sdp->sd_log_error;
}
-static int gfs2_do_thaw(struct gfs2_sbd *sdp)
+static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who, const void *freeze_owner)
{
struct super_block *sb = sdp->sd_vfs;
int error;
@@ -682,7 +670,7 @@ static int gfs2_do_thaw(struct gfs2_sbd *sdp)
error = gfs2_freeze_lock_shared(sdp);
if (error)
goto fail;
- error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
+ error = thaw_super(sb, who, freeze_owner);
if (!error)
return 0;
@@ -703,14 +691,14 @@ void gfs2_freeze_func(struct work_struct *work)
if (test_bit(SDF_FROZEN, &sdp->sd_flags))
goto freeze_failed;
- error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
+ error = freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
if (error)
goto freeze_failed;
gfs2_freeze_unlock(sdp);
set_bit(SDF_FROZEN, &sdp->sd_flags);
- error = gfs2_do_thaw(sdp);
+ error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE, NULL);
if (error)
goto out;
@@ -728,10 +716,13 @@ out:
/**
* gfs2_freeze_super - prevent further writes to the filesystem
* @sb: the VFS structure for the filesystem
+ * @who: freeze flags
+ * @freeze_owner: owner of the freeze
*
*/
-static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
+static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who,
+ const void *freeze_owner)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
int error;
@@ -744,7 +735,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
}
for (;;) {
- error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
+ error = freeze_super(sb, who, freeze_owner);
if (error) {
fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
error);
@@ -758,9 +749,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
break;
}
- error = gfs2_do_thaw(sdp);
- if (error)
- goto out;
+ (void)gfs2_do_thaw(sdp, who, freeze_owner);
if (error == -EBUSY)
fs_err(sdp, "waiting for recovery before freeze\n");
@@ -787,7 +776,7 @@ static int gfs2_freeze_fs(struct super_block *sb)
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
GFS2_LFC_FREEZE_GO_SYNC);
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
return -EIO;
}
return 0;
@@ -796,10 +785,13 @@ static int gfs2_freeze_fs(struct super_block *sb)
/**
* gfs2_thaw_super - reallow writes to the filesystem
* @sb: the VFS structure for the filesystem
+ * @who: freeze flags
+ * @freeze_owner: owner of the freeze
*
*/
-static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
+static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who,
+ const void *freeze_owner)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
int error;
@@ -814,7 +806,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
atomic_inc(&sb->s_active);
gfs2_freeze_unlock(sdp);
- error = gfs2_do_thaw(sdp);
+ error = gfs2_do_thaw(sdp, who, freeze_owner);
if (!error) {
clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
@@ -825,20 +817,6 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
return error;
}
-void gfs2_thaw_freeze_initiator(struct super_block *sb)
-{
- struct gfs2_sbd *sdp = sb->s_fs_info;
-
- mutex_lock(&sdp->sd_freeze_mutex);
- if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
- goto out;
-
- gfs2_freeze_unlock(sdp);
-
-out:
- mutex_unlock(&sdp->sd_freeze_mutex);
-}
-
/**
* statfs_slow_fill - fill in the sg for a given RG
* @rgd: the RG
@@ -1056,7 +1034,7 @@ static int gfs2_drop_inode(struct inode *inode)
if (test_bit(SDF_EVICTING, &sdp->sd_flags))
return 1;
- return generic_drop_inode(inode);
+ return inode_generic_drop(inode);
}
/**
@@ -1153,6 +1131,9 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
case GFS2_ERRORS_WITHDRAW:
state = "withdraw";
break;
+ case GFS2_ERRORS_DEACTIVATE:
+ state = "deactivate";
+ break;
case GFS2_ERRORS_PANIC:
state = "panic";
break;
@@ -1173,74 +1154,6 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
return 0;
}
-static void gfs2_final_release_pages(struct gfs2_inode *ip)
-{
- struct inode *inode = &ip->i_inode;
- struct gfs2_glock *gl = ip->i_gl;
-
- if (unlikely(!gl)) {
- /* This can only happen during incomplete inode creation. */
- BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
- return;
- }
-
- truncate_inode_pages(gfs2_glock2aspace(gl), 0);
- truncate_inode_pages(&inode->i_data, 0);
-
- if (atomic_read(&gl->gl_revokes) == 0) {
- clear_bit(GLF_LFLUSH, &gl->gl_flags);
- clear_bit(GLF_DIRTY, &gl->gl_flags);
- }
-}
-
-static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_rgrpd *rgd;
- struct gfs2_holder gh;
- int error;
-
- if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
- gfs2_consist_inode(ip);
- return -EIO;
- }
-
- gfs2_rindex_update(sdp);
-
- error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
- if (error)
- return error;
-
- rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
- if (!rgd) {
- gfs2_consist_inode(ip);
- error = -EIO;
- goto out_qs;
- }
-
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
- LM_FLAG_NODE_SCOPE, &gh);
- if (error)
- goto out_qs;
-
- error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
- sdp->sd_jdesc->jd_blocks);
- if (error)
- goto out_rg_gunlock;
-
- gfs2_free_di(rgd, ip);
-
- gfs2_final_release_pages(ip);
-
- gfs2_trans_end(sdp);
-
-out_rg_gunlock:
- gfs2_glock_dq_uninit(&gh);
-out_qs:
- gfs2_quota_unhold(ip);
- return error;
-}
-
/**
* gfs2_glock_put_eventually
* @gl: The glock to put
@@ -1326,10 +1239,8 @@ static enum evict_behavior evict_should_delete(struct inode *inode,
struct gfs2_sbd *sdp = sb->s_fs_info;
int ret;
- if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags)))
- goto should_delete;
-
- if (test_bit(GIF_DEFER_DELETE, &ip->i_flags))
+ if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
+ test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags))
return EVICT_SHOULD_DEFER_DELETE;
/* Deletes should never happen under memory pressure anymore. */
@@ -1338,12 +1249,8 @@ static enum evict_behavior evict_should_delete(struct inode *inode,
/* Must not read inode block until block type has been verified */
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
- if (unlikely(ret)) {
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- return EVICT_SHOULD_DEFER_DELETE;
- }
+ if (unlikely(ret))
+ return EVICT_SHOULD_SKIP_DELETE;
if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
return EVICT_SHOULD_SKIP_DELETE;
@@ -1361,17 +1268,9 @@ static enum evict_behavior evict_should_delete(struct inode *inode,
if (inode->i_nlink)
return EVICT_SHOULD_SKIP_DELETE;
-should_delete:
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
- test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
- enum evict_behavior behavior =
- gfs2_upgrade_iopen_glock(inode);
-
- if (behavior != EVICT_SHOULD_DELETE) {
- gfs2_holder_uninit(&ip->i_iopen_gh);
- return behavior;
- }
- }
+ test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
+ return gfs2_upgrade_iopen_glock(inode);
return EVICT_SHOULD_DELETE;
}
@@ -1392,7 +1291,7 @@ static int evict_unlinked_inode(struct inode *inode)
}
if (ip->i_eattr) {
- ret = gfs2_ea_dealloc(ip);
+ ret = gfs2_ea_dealloc(ip, true);
if (ret)
goto out;
}
@@ -1509,7 +1408,7 @@ static void gfs2_evict_inode(struct inode *inode)
gfs2_glock_put(io_gl);
goto out;
}
- behavior = EVICT_SHOULD_DELETE;
+ behavior = EVICT_SHOULD_SKIP_DELETE;
}
if (behavior == EVICT_SHOULD_DELETE)
ret = evict_unlinked_inode(inode);
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index b27a774d9580..173f1e74c2a9 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -47,7 +47,6 @@ void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
int gfs2_statfs_sync(struct super_block *sb, int type);
void gfs2_freeze_func(struct work_struct *work);
-void gfs2_thaw_freeze_initiator(struct super_block *sb);
void free_local_statfs_inodes(struct gfs2_sbd *sdp);
struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index ecc699f8d9fc..7051db9dbea0 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -59,7 +59,7 @@ static struct kset *gfs2_kset;
static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%u:%u\n",
+ return sysfs_emit(buf, "%u:%u\n",
MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev));
}
@@ -68,7 +68,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
unsigned long f = sdp->sd_flags;
ssize_t s;
- s = snprintf(buf, PAGE_SIZE,
+ s = sysfs_emit(buf,
"Journal Checked: %d\n"
"Journal Live: %d\n"
"Journal ID: %d\n"
@@ -84,10 +84,6 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
"Force AIL Flush: %d\n"
"FS Freeze Initiator: %d\n"
"FS Frozen: %d\n"
- "Withdrawing: %d\n"
- "Withdraw In Prog: %d\n"
- "Remote Withdraw: %d\n"
- "Withdraw Recovery: %d\n"
"Killing: %d\n"
"sd_log_error: %d\n"
"sd_log_flush_lock: %d\n"
@@ -117,10 +113,6 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
test_bit(SDF_FORCE_AIL_FLUSH, &f),
test_bit(SDF_FREEZE_INITIATOR, &f),
test_bit(SDF_FROZEN, &f),
- test_bit(SDF_WITHDRAWING, &f),
- test_bit(SDF_WITHDRAW_IN_PROG, &f),
- test_bit(SDF_REMOTE_WITHDRAW, &f),
- test_bit(SDF_WITHDRAW_RECOVERY, &f),
test_bit(SDF_KILL, &f),
sdp->sd_log_error,
rwsem_is_locked(&sdp->sd_log_flush_lock),
@@ -140,7 +132,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
+ return sysfs_emit(buf, "%s\n", sdp->sd_fsname);
}
static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
@@ -150,7 +142,7 @@ static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
buf[0] = '\0';
if (uuid_is_null(&s->s_uuid))
return 0;
- return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
+ return sysfs_emit(buf, "%pUB\n", &s->s_uuid);
}
static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@@ -158,7 +150,7 @@ static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
struct super_block *sb = sdp->sd_vfs;
int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
- return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
+ return sysfs_emit(buf, "%d\n", frozen);
}
static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
@@ -174,10 +166,10 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
switch (n) {
case 0:
- error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE);
+ error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE, NULL);
break;
case 1:
- error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE);
+ error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE, NULL);
break;
default:
return -EINVAL;
@@ -193,8 +185,8 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
{
- unsigned int b = gfs2_withdrawing_or_withdrawn(sdp);
- return snprintf(buf, PAGE_SIZE, "%u\n", b);
+ unsigned int b = gfs2_withdrawn(sdp);
+ return sysfs_emit(buf, "%u\n", b);
}
static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
@@ -397,7 +389,7 @@ static struct kobj_type gfs2_ktype = {
static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
{
const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
- return sprintf(buf, "%s\n", ops->lm_proto_name);
+ return sysfs_emit(buf, "%s\n", ops->lm_proto_name);
}
static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
@@ -408,7 +400,7 @@ static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))
val = 1;
- ret = sprintf(buf, "%d\n", val);
+ ret = sysfs_emit(buf, "%d\n", val);
return ret;
}
@@ -433,33 +425,27 @@ static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
return len;
}
-static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf)
-{
- int val = completion_done(&sdp->sd_wdack) ? 1 : 0;
-
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
+static ssize_t withdraw_helper_status_store(struct gfs2_sbd *sdp,
+ const char *buf,
+ size_t len)
{
int ret, val;
ret = kstrtoint(buf, 0, &val);
if (ret)
return ret;
-
- if ((val == 1) &&
- !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
- complete(&sdp->sd_wdack);
- else
+ if (val < 0 || val > 1)
return -EINVAL;
+
+ sdp->sd_withdraw_helper_status = val;
+ complete(&sdp->sd_withdraw_helper);
return len;
}
static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- return sprintf(buf, "%d\n", ls->ls_first);
+ return sysfs_emit(buf, "%d\n", ls->ls_first);
}
static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
@@ -492,7 +478,7 @@ out:
static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
+ return sysfs_emit(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
}
int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
@@ -550,18 +536,18 @@ out:
static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- return sprintf(buf, "%d\n", ls->ls_recover_jid_done);
+ return sysfs_emit(buf, "%d\n", ls->ls_recover_jid_done);
}
static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- return sprintf(buf, "%d\n", ls->ls_recover_jid_status);
+ return sysfs_emit(buf, "%d\n", ls->ls_recover_jid_status);
}
static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
{
- return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
+ return sysfs_emit(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
}
static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
@@ -599,7 +585,7 @@ static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
GDLM_ATTR(block, 0644, block_show, block_store);
-GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store);
+GDLM_ATTR(withdraw, 0200, NULL, withdraw_helper_status_store);
GDLM_ATTR(jid, 0644, jid_show, jid_store);
GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
GDLM_ATTR(first_done, 0444, first_done_show, NULL);
@@ -626,7 +612,7 @@ static struct attribute *lock_module_attrs[] = {
static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%u %u\n",
+ return sysfs_emit(buf, "%u %u\n",
sdp->sd_tune.gt_quota_scale_num,
sdp->sd_tune.gt_quota_scale_den);
}
@@ -679,7 +665,7 @@ static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store)
#define TUNE_ATTR_2(name, store) \
static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
{ \
- return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
+ return sysfs_emit(buf, "%u\n", sdp->sd_tune.gt_##name); \
} \
TUNE_ATTR_3(name, name##_show, store)
@@ -698,6 +684,7 @@ TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
+TUNE_ATTR(withdraw_helper_timeout, 1);
static struct attribute *tune_attrs[] = {
&tune_attr_quota_warn_period.attr,
@@ -708,6 +695,7 @@ static struct attribute *tune_attrs[] = {
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
+ &tune_attr_withdraw_helper_timeout.attr,
NULL,
};
@@ -764,7 +752,6 @@ fail_reg:
fs_err(sdp, "error %d adding sysfs files\n", error);
kobject_put(&sdp->sd_kobj);
wait_for_completion(&sdp->sd_kobj_unregister);
- sb->s_fs_info = NULL;
return error;
}
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 8eae8d62a413..fcfbf68ec725 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -52,13 +52,19 @@
{(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
{(1UL << GLF_DIRTY), "y" }, \
{(1UL << GLF_LFLUSH), "f" }, \
- {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
+ {(1UL << GLF_PENDING_REPLY), "R" }, \
{(1UL << GLF_HAVE_REPLY), "r" }, \
{(1UL << GLF_INITIAL), "a" }, \
{(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \
{(1UL << GLF_LRU), "L" }, \
{(1UL << GLF_OBJECT), "o" }, \
- {(1UL << GLF_BLOCKING), "b" })
+ {(1UL << GLF_BLOCKING), "b" }, \
+ {(1UL << GLF_INSTANTIATE_NEEDED), "n" }, \
+ {(1UL << GLF_INSTANTIATE_IN_PROG), "N" }, \
+ {(1UL << GLF_TRY_TO_EVICT), "e" }, \
+ {(1UL << GLF_VERIFY_DELETE), "E" }, \
+ {(1UL << GLF_DEFER_DELETE), "s" }, \
+ {(1UL << GLF_CANCELING), "C" })
#ifndef NUMPTY
#define NUMPTY
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 192213c7359a..6df65540e13d 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -49,7 +49,7 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
}
BUG_ON(blocks == 0 && revokes == 0);
- if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+ if (gfs2_withdrawn(sdp))
return -EROFS;
tr->tr_ip = ip;
@@ -85,25 +85,30 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
*/
down_read(&sdp->sd_log_flush_lock);
+ if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)))
+ goto out_not_live;
if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
goto reserved;
+
up_read(&sdp->sd_log_flush_lock);
gfs2_log_reserve(sdp, tr, &extra_revokes);
down_read(&sdp->sd_log_flush_lock);
-
-reserved:
- gfs2_log_release_revokes(sdp, extra_revokes);
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
- gfs2_log_release_revokes(sdp, tr->tr_revokes);
- up_read(&sdp->sd_log_flush_lock);
+ revokes = tr->tr_revokes + extra_revokes;
+ gfs2_log_release_revokes(sdp, revokes);
gfs2_log_release(sdp, tr->tr_reserved);
- sb_end_intwrite(sdp->sd_vfs);
- return -EROFS;
+ goto out_not_live;
}
+reserved:
+ gfs2_log_release_revokes(sdp, extra_revokes);
current->journal_info = tr;
-
return 0;
+
+out_not_live:
+ up_read(&sdp->sd_log_flush_lock);
+ sb_end_intwrite(sdp->sd_vfs);
+ return -EROFS;
}
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
@@ -226,6 +231,27 @@ out:
unlock_buffer(bh);
}
+void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio,
+ size_t from, size_t len)
+{
+ struct buffer_head *head = folio_buffers(folio);
+ unsigned int bsize = head->b_size;
+ struct buffer_head *bh;
+ size_t to = from + len;
+ size_t start, end;
+
+ for (bh = head, start = 0; bh != head || !start;
+ bh = bh->b_this_page, start = end) {
+ end = start + bsize;
+ if (end <= from)
+ continue;
+ if (start >= to)
+ break;
+ set_buffer_uptodate(bh);
+ gfs2_trans_add_data(gl, bh);
+ }
+}
+
void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
{
@@ -234,7 +260,6 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
struct gfs2_bufdata *bd;
struct gfs2_meta_header *mh;
struct gfs2_trans *tr = current->journal_info;
- bool withdraw = false;
lock_buffer(bh);
if (buffer_pinned(bh)) {
@@ -246,12 +271,12 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
if (bd == NULL) {
gfs2_log_unlock(sdp);
unlock_buffer(bh);
- lock_page(bh->b_page);
+ folio_lock(bh->b_folio);
if (bh->b_private == NULL)
bd = gfs2_alloc_bufdata(gl, bh);
else
bd = bh->b_private;
- unlock_page(bh->b_page);
+ folio_unlock(bh->b_folio);
lock_buffer(bh);
gfs2_log_lock(sdp);
}
@@ -268,14 +293,14 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
(unsigned long long)bd->bd_bh->b_blocknr);
BUG();
}
- if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n",
(unsigned long long)bd->bd_bh->b_blocknr);
goto out_unlock;
}
if (unlikely(sb->s_writers.frozen == SB_FREEZE_COMPLETE)) {
fs_info(sdp, "GFS2:adding buf while frozen\n");
- withdraw = true;
+ gfs2_withdraw(sdp);
goto out_unlock;
}
gfs2_pin(sdp, bd->bd_bh);
@@ -285,8 +310,6 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
tr->tr_num_buf_new++;
out_unlock:
gfs2_log_unlock(sdp);
- if (withdraw)
- gfs2_assert_withdraw(sdp, 0);
out:
unlock_buffer(bh);
}
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index f8ce5302280d..790c55f59e61 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -42,6 +42,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
void gfs2_trans_end(struct gfs2_sbd *sdp);
void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
+void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio,
+ size_t from, size_t len);
void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 13be8d1d228b..02603200846d 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -58,7 +58,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
struct gfs2_inode *ip;
ip = GFS2_I(jd->jd_inode);
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_RECOVER |
GL_EXACT | GL_NOCACHE, &j_gh);
if (error) {
if (verbose)
@@ -73,7 +73,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
"mount.\n");
goto out_unlock;
}
- error = gfs2_find_jhead(jd, &head, false);
+ error = gfs2_find_jhead(jd, &head);
if (error) {
if (verbose)
fs_err(sdp, "Error parsing journal for spectator "
@@ -99,7 +99,7 @@ out_unlock:
*/
int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
{
- int flags = LM_FLAG_NOEXP | GL_EXACT;
+ int flags = LM_FLAG_RECOVER | GL_EXACT;
int error;
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
@@ -115,191 +115,32 @@ void gfs2_freeze_unlock(struct gfs2_sbd *sdp)
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
}
-static void signal_our_withdraw(struct gfs2_sbd *sdp)
+static void do_withdraw(struct gfs2_sbd *sdp)
{
- struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
- struct inode *inode;
- struct gfs2_inode *ip;
- struct gfs2_glock *i_gl;
- u64 no_formal_ino;
- int ret = 0;
- int tries;
-
- if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
+ down_write(&sdp->sd_log_flush_lock);
+ if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ up_write(&sdp->sd_log_flush_lock);
return;
-
- gfs2_ail_drain(sdp); /* frees all transactions */
- inode = sdp->sd_jdesc->jd_inode;
- ip = GFS2_I(inode);
- i_gl = ip->i_gl;
- no_formal_ino = ip->i_no_formal_ino;
-
- /* Prevent any glock dq until withdraw recovery is complete */
- set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
- /*
- * Don't tell dlm we're bailing until we have no more buffers in the
- * wind. If journal had an IO error, the log code should just purge
- * the outstanding buffers rather than submitting new IO. Making the
- * file system read-only will flush the journal, etc.
- *
- * During a normal unmount, gfs2_make_fs_ro calls gfs2_log_shutdown
- * which clears SDF_JOURNAL_LIVE. In a withdraw, we must not write
- * any UNMOUNT log header, so we can't call gfs2_log_shutdown, and
- * therefore we need to clear SDF_JOURNAL_LIVE manually.
- */
- clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- if (!sb_rdonly(sdp->sd_vfs)) {
- bool locked = mutex_trylock(&sdp->sd_freeze_mutex);
-
- wake_up(&sdp->sd_logd_waitq);
- wake_up(&sdp->sd_quota_wait);
-
- wait_event_timeout(sdp->sd_log_waitq,
- gfs2_log_is_empty(sdp),
- HZ * 5);
-
- sdp->sd_vfs->s_flags |= SB_RDONLY;
-
- if (locked)
- mutex_unlock(&sdp->sd_freeze_mutex);
-
- /*
- * Dequeue any pending non-system glock holders that can no
- * longer be granted because the file system is withdrawn.
- */
- gfs2_gl_dq_holders(sdp);
- }
-
- if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
- if (!ret)
- ret = -EIO;
- clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
- goto skip_recovery;
- }
- /*
- * Drop the glock for our journal so another node can recover it.
- */
- if (gfs2_holder_initialized(&sdp->sd_journal_gh)) {
- gfs2_glock_dq_wait(&sdp->sd_journal_gh);
- gfs2_holder_uninit(&sdp->sd_journal_gh);
- }
- sdp->sd_jinode_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq(&sdp->sd_jinode_gh);
- gfs2_thaw_freeze_initiator(sdp->sd_vfs);
- wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
-
- /*
- * holder_uninit to force glock_put, to force dlm to let go
- */
- gfs2_holder_uninit(&sdp->sd_jinode_gh);
-
- /*
- * Note: We need to be careful here:
- * Our iput of jd_inode will evict it. The evict will dequeue its
- * glock, but the glock dq will wait for the withdraw unless we have
- * exception code in glock_dq.
- */
- iput(inode);
- sdp->sd_jdesc->jd_inode = NULL;
- /*
- * Wait until the journal inode's glock is freed. This allows try locks
- * on other nodes to be successful, otherwise we remain the owner of
- * the glock as far as dlm is concerned.
- */
- if (i_gl->gl_ops->go_unlocked) {
- set_bit(GLF_UNLOCKED, &i_gl->gl_flags);
- wait_on_bit(&i_gl->gl_flags, GLF_UNLOCKED, TASK_UNINTERRUPTIBLE);
}
+ clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ up_write(&sdp->sd_log_flush_lock);
- /*
- * Dequeue the "live" glock, but keep a reference so it's never freed.
- */
- gfs2_glock_hold(live_gl);
- gfs2_glock_dq_wait(&sdp->sd_live_gh);
- /*
- * We enqueue the "live" glock in EX so that all other nodes
- * get a demote request and act on it. We don't really want the
- * lock in EX, so we send a "try" lock with 1CB to produce a callback.
- */
- fs_warn(sdp, "Requesting recovery of jid %d.\n",
- sdp->sd_lockstruct.ls_jid);
- gfs2_holder_reinit(LM_ST_EXCLUSIVE,
- LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | GL_NOPID,
- &sdp->sd_live_gh);
- msleep(GL_GLOCK_MAX_HOLD);
- /*
- * This will likely fail in a cluster, but succeed standalone:
- */
- ret = gfs2_glock_nq(&sdp->sd_live_gh);
+ gfs2_ail_drain(sdp); /* frees all transactions */
- /*
- * If we actually got the "live" lock in EX mode, there are no other
- * nodes available to replay our journal. So we try to replay it
- * ourselves. We hold the "live" glock to prevent other mounters
- * during recovery, then just dequeue it and reacquire it in our
- * normal SH mode. Just in case the problem that caused us to
- * withdraw prevents us from recovering our journal (e.g. io errors
- * and such) we still check if the journal is clean before proceeding
- * but we may wait forever until another mounter does the recovery.
- */
- if (ret == 0) {
- fs_warn(sdp, "No other mounters found. Trying to recover our "
- "own journal jid %d.\n", sdp->sd_lockstruct.ls_jid);
- if (gfs2_recover_journal(sdp->sd_jdesc, 1))
- fs_warn(sdp, "Unable to recover our journal jid %d.\n",
- sdp->sd_lockstruct.ls_jid);
- gfs2_glock_dq_wait(&sdp->sd_live_gh);
- gfs2_holder_reinit(LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT | GL_NOPID,
- &sdp->sd_live_gh);
- gfs2_glock_nq(&sdp->sd_live_gh);
- }
+ wake_up(&sdp->sd_logd_waitq);
+ wake_up(&sdp->sd_quota_wait);
- gfs2_glock_put(live_gl); /* drop extra reference we acquired */
- clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
+ wait_event_timeout(sdp->sd_log_waitq,
+ gfs2_log_is_empty(sdp),
+ HZ * 5);
- /*
- * At this point our journal is evicted, so we need to get a new inode
- * for it. Once done, we need to call gfs2_find_jhead which
- * calls gfs2_map_journal_extents to map it for us again.
- *
- * Note that we don't really want it to look up a FREE block. The
- * GFS2_BLKST_FREE simply overrides a block check in gfs2_inode_lookup
- * which would otherwise fail because it requires grabbing an rgrp
- * glock, which would fail with -EIO because we're withdrawing.
- */
- inode = gfs2_inode_lookup(sdp->sd_vfs, DT_UNKNOWN,
- sdp->sd_jdesc->jd_no_addr, no_formal_ino,
- GFS2_BLKST_FREE);
- if (IS_ERR(inode)) {
- fs_warn(sdp, "Reprocessing of jid %d failed with %ld.\n",
- sdp->sd_lockstruct.ls_jid, PTR_ERR(inode));
- goto skip_recovery;
- }
- sdp->sd_jdesc->jd_inode = inode;
- d_mark_dontcache(inode);
+ sdp->sd_vfs->s_flags |= SB_RDONLY;
/*
- * Now wait until recovery is complete.
+ * Dequeue any pending non-system glock holders that can no
+ * longer be granted because the file system is withdrawn.
*/
- for (tries = 0; tries < 10; tries++) {
- ret = check_journal_clean(sdp, sdp->sd_jdesc, false);
- if (!ret)
- break;
- msleep(HZ);
- fs_warn(sdp, "Waiting for journal recovery jid %d.\n",
- sdp->sd_lockstruct.ls_jid);
- }
-skip_recovery:
- if (!ret)
- fs_warn(sdp, "Journal recovery complete for jid %d.\n",
- sdp->sd_lockstruct.ls_jid);
- else
- fs_warn(sdp, "Journal recovery skipped for jid %d until next "
- "mount.\n", sdp->sd_lockstruct.ls_jid);
- fs_warn(sdp, "Glock dequeues delayed: %lu\n", sdp->sd_glock_dqs_held);
- sdp->sd_glock_dqs_held = 0;
- wake_up_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY);
+ gfs2_withdraw_glocks(sdp);
}
void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...)
@@ -318,49 +159,108 @@ void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...)
va_end(args);
}
-int gfs2_withdraw(struct gfs2_sbd *sdp)
+/**
+ * gfs2_offline_uevent - run gfs2_withdraw_helper
+ * @sdp: The GFS2 superblock
+ */
+static bool gfs2_offline_uevent(struct gfs2_sbd *sdp)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ long timeout;
+
+ /* Skip protocol "lock_nolock" which doesn't require shared storage. */
+ if (!ls->ls_ops->lm_lock)
+ return false;
+
+ /*
+ * The gfs2_withdraw_helper replies by writing one of the following
+ * status codes to "/sys$DEVPATH/lock_module/withdraw":
+ *
+ * 0 - The shared block device has been marked inactive. Future write
+ * operations will fail.
+ *
+ * 1 - The shared block device may still be active and carry out
+ * write operations.
+ *
+ * If the "offline" uevent isn't reacted upon in time, the event
+ * handler is assumed to have failed.
+ */
+
+ sdp->sd_withdraw_helper_status = -1;
+ kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
+ timeout = gfs2_tune_get(sdp, gt_withdraw_helper_timeout) * HZ;
+ wait_for_completion_timeout(&sdp->sd_withdraw_helper, timeout);
+ if (sdp->sd_withdraw_helper_status == -1) {
+ fs_err(sdp, "%s timed out\n", "gfs2_withdraw_helper");
+ } else {
+ fs_err(sdp, "%s %s with status %d\n",
+ "gfs2_withdraw_helper",
+ sdp->sd_withdraw_helper_status == 0 ?
+ "succeeded" : "failed",
+ sdp->sd_withdraw_helper_status);
+ }
+ return sdp->sd_withdraw_helper_status == 0;
+}
+
+void gfs2_withdraw_func(struct work_struct *work)
{
+ struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_withdraw_work);
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
const struct lm_lockops *lm = ls->ls_ops;
+ bool device_inactive;
- if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
- unsigned long old = READ_ONCE(sdp->sd_flags), new;
-
- do {
- if (old & BIT(SDF_WITHDRAWN)) {
- wait_on_bit(&sdp->sd_flags,
- SDF_WITHDRAW_IN_PROG,
- TASK_UNINTERRUPTIBLE);
- return -1;
- }
- new = old | BIT(SDF_WITHDRAWN) | BIT(SDF_WITHDRAW_IN_PROG);
- } while (unlikely(!try_cmpxchg(&sdp->sd_flags, &old, new)));
+ if (test_bit(SDF_KILL, &sdp->sd_flags))
+ return;
- fs_err(sdp, "about to withdraw this file system\n");
- BUG_ON(sdp->sd_args.ar_debug);
+ BUG_ON(sdp->sd_args.ar_debug);
- signal_our_withdraw(sdp);
+ /*
+ * Try to deactivate the shared block device so that no more I/O will
+ * go through. If successful, we can immediately trigger remote
+ * recovery. Otherwise, we must first empty out all our local caches.
+ */
- kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
+ device_inactive = gfs2_offline_uevent(sdp);
- if (!strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
- wait_for_completion(&sdp->sd_wdack);
+ if (sdp->sd_args.ar_errors == GFS2_ERRORS_DEACTIVATE && !device_inactive)
+ panic("GFS2: fsid=%s: panic requested\n", sdp->sd_fsname);
- if (lm->lm_unmount) {
- fs_err(sdp, "telling LM to unmount\n");
- lm->lm_unmount(sdp);
+ if (lm->lm_unmount) {
+ if (device_inactive) {
+ lm->lm_unmount(sdp, false);
+ do_withdraw(sdp);
+ } else {
+ do_withdraw(sdp);
+ lm->lm_unmount(sdp, false);
}
- fs_err(sdp, "File system withdrawn\n");
+ } else {
+ do_withdraw(sdp);
+ }
+
+ fs_err(sdp, "file system withdrawn\n");
+}
+
+void gfs2_withdraw(struct gfs2_sbd *sdp)
+{
+ if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW ||
+ sdp->sd_args.ar_errors == GFS2_ERRORS_DEACTIVATE) {
+ if (test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags))
+ return;
+
dump_stack();
- clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
- smp_mb__after_atomic();
- wake_up_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG);
+ /*
+ * There is no need to withdraw when the superblock hasn't been
+ * fully initialized, yet.
+ */
+ if (!(sdp->sd_vfs->s_flags & SB_BORN))
+ return;
+ fs_err(sdp, "about to withdraw this file system\n");
+ schedule_work(&sdp->sd_withdraw_work);
+ return;
}
if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
panic("GFS2: fsid=%s: panic requested\n", sdp->sd_fsname);
-
- return -1;
}
/*
@@ -368,10 +268,9 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
*/
void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
- const char *function, char *file, unsigned int line,
- bool delayed)
+ const char *function, char *file, unsigned int line)
{
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
return;
fs_err(sdp,
@@ -379,17 +278,7 @@ void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
"function = %s, file = %s, line = %u\n",
assertion, function, file, line);
- /*
- * If errors=panic was specified on mount, it won't help to delay the
- * withdraw.
- */
- if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
- delayed = false;
-
- if (delayed)
- gfs2_withdraw_delayed(sdp);
- else
- gfs2_withdraw(sdp);
+ gfs2_withdraw(sdp);
dump_stack();
}
@@ -482,46 +371,36 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
/*
* gfs2_meta_check_ii - Flag a magic number consistency error and withdraw
- * Returns: -1 if this call withdrew the machine,
- * -2 if it was already withdrawn
*/
-int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file,
- unsigned int line)
+void gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function, char *file,
+ unsigned int line)
{
- int me;
-
gfs2_lm(sdp,
"fatal: invalid metadata block - "
"bh = %llu (bad magic number), "
"function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr,
function, file, line);
- me = gfs2_withdraw(sdp);
- return (me) ? -1 : -2;
+ gfs2_withdraw(sdp);
}
/*
* gfs2_metatype_check_ii - Flag a metadata type consistency error and withdraw
- * Returns: -1 if this call withdrew the machine,
- * -2 if it was already withdrawn
*/
-int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- u16 type, u16 t, const char *function,
- char *file, unsigned int line)
+void gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ u16 type, u16 t, const char *function,
+ char *file, unsigned int line)
{
- int me;
-
gfs2_lm(sdp,
"fatal: invalid metadata block - "
"bh = %llu (type: exp=%u, found=%u), "
"function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, type, t,
function, file, line);
- me = gfs2_withdraw(sdp);
- return (me) ? -1 : -2;
+ gfs2_withdraw(sdp);
}
/*
@@ -530,33 +409,29 @@ int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
* 0 if it was already withdrawn
*/
-int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
- unsigned int line)
+void gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
+ unsigned int line)
{
gfs2_lm(sdp,
"fatal: I/O error - "
"function = %s, file = %s, line = %u\n",
function, file, line);
- return gfs2_withdraw(sdp);
+ gfs2_withdraw(sdp);
}
/*
- * gfs2_io_error_bh_i - Flag a buffer I/O error
- * @withdraw: withdraw the filesystem
+ * gfs2_io_error_bh_i - Flag a buffer I/O error and withdraw
*/
void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file, unsigned int line,
- bool withdraw)
+ const char *function, char *file, unsigned int line)
{
- if (gfs2_withdrawing_or_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp))
return;
fs_err(sdp, "fatal: I/O error - "
"block = %llu, "
"function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, function, file, line);
- if (withdraw)
- gfs2_withdraw(sdp);
+ gfs2_withdraw(sdp);
}
-
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 27d03b641024..ffcc47d6b0b4 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -37,24 +37,14 @@ do { \
void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
- const char *function, char *file, unsigned int line,
- bool delayed);
+ const char *function, char *file, unsigned int line);
#define gfs2_assert_withdraw(sdp, assertion) \
({ \
bool _bool = (assertion); \
if (unlikely(!_bool)) \
gfs2_assert_withdraw_i((sdp), #assertion, \
- __func__, __FILE__, __LINE__, false); \
- !_bool; \
- })
-
-#define gfs2_assert_withdraw_delayed(sdp, assertion) \
- ({ \
- bool _bool = (assertion); \
- if (unlikely(!_bool)) \
- gfs2_assert_withdraw_i((sdp), #assertion, \
- __func__, __FILE__, __LINE__, true); \
+ __func__, __FILE__, __LINE__); \
!_bool; \
})
@@ -91,9 +81,9 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
gfs2_consist_rgrpd_i((rgd), __func__, __FILE__, __LINE__)
-int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function,
- char *file, unsigned int line);
+void gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function,
+ char *file, unsigned int line);
static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
struct buffer_head *bh)
@@ -108,10 +98,10 @@ static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
return 0;
}
-int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- u16 type, u16 t,
- const char *function,
- char *file, unsigned int line);
+void gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ u16 type, u16 t,
+ const char *function,
+ char *file, unsigned int line);
static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
struct buffer_head *bh,
@@ -122,12 +112,16 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
u32 magic = be32_to_cpu(mh->mh_magic);
u16 t = be32_to_cpu(mh->mh_type);
- if (unlikely(magic != GFS2_MAGIC))
- return gfs2_meta_check_ii(sdp, bh, function,
- file, line);
- if (unlikely(t != type))
- return gfs2_metatype_check_ii(sdp, bh, type, t, function,
- file, line);
+ if (unlikely(magic != GFS2_MAGIC)) {
+ gfs2_meta_check_ii(sdp, bh, function,
+ file, line);
+ return -EIO;
+ }
+ if (unlikely(t != type)) {
+ gfs2_metatype_check_ii(sdp, bh, type, t, function,
+ file, line);
+ return -EIO;
+ }
return 0;
}
@@ -144,8 +138,8 @@ static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type,
}
-int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
- char *file, unsigned int line);
+void gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
+ char *file, unsigned int line);
int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
bool verbose);
@@ -157,14 +151,10 @@ gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file, unsigned int line,
- bool withdraw);
-
-#define gfs2_io_error_bh_wd(sdp, bh) \
-gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, true)
+ const char *function, char *file, unsigned int line);
#define gfs2_io_error_bh(sdp, bh) \
-gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, false)
+gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__)
extern struct kmem_cache *gfs2_glock_cachep;
@@ -189,38 +179,12 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
}
/**
- * gfs2_withdraw_delayed - withdraw as soon as possible without deadlocks
+ * gfs2_withdrawn - test whether the file system is withdrawn
* @sdp: the superblock
*/
-static inline void gfs2_withdraw_delayed(struct gfs2_sbd *sdp)
+static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp)
{
- set_bit(SDF_WITHDRAWING, &sdp->sd_flags);
-}
-
-/**
- * gfs2_withdrawing_or_withdrawn - test whether the file system is withdrawing
- * or withdrawn
- * @sdp: the superblock
- */
-static inline bool gfs2_withdrawing_or_withdrawn(struct gfs2_sbd *sdp)
-{
- return unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
- test_bit(SDF_WITHDRAWING, &sdp->sd_flags));
-}
-
-/**
- * gfs2_withdrawing - check if a withdraw is pending
- * @sdp: the superblock
- */
-static inline bool gfs2_withdrawing(struct gfs2_sbd *sdp)
-{
- return unlikely(test_bit(SDF_WITHDRAWING, &sdp->sd_flags) &&
- !test_bit(SDF_WITHDRAWN, &sdp->sd_flags));
-}
-
-static inline bool gfs2_withdraw_in_prog(struct gfs2_sbd *sdp)
-{
- return unlikely(test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags));
+ return unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags));
}
#define gfs2_tune_get(sdp, field) \
@@ -228,6 +192,8 @@ gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
__printf(2, 3)
void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...);
-int gfs2_withdraw(struct gfs2_sbd *sdp);
+
+void gfs2_withdraw_func(struct work_struct *work);
+void gfs2_withdraw(struct gfs2_sbd *sdp);
#endif /* __UTIL_DOT_H__ */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 17ae5070a90e..df9c93de94c7 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1383,7 +1383,7 @@ out:
return error;
}
-static int ea_dealloc_block(struct gfs2_inode *ip)
+static int ea_dealloc_block(struct gfs2_inode *ip, bool initialized)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
@@ -1416,7 +1416,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
ip->i_eattr = 0;
gfs2_add_inode_blocks(&ip->i_inode, -1);
- if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) {
+ if (initialized) {
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
gfs2_trans_add_meta(ip->i_gl, dibh);
@@ -1435,11 +1435,12 @@ out_gunlock:
/**
* gfs2_ea_dealloc - deallocate the extended attribute fork
* @ip: the inode
+ * @initialized: xattrs have been initialized
*
* Returns: errno
*/
-int gfs2_ea_dealloc(struct gfs2_inode *ip)
+int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized)
{
int error;
@@ -1451,7 +1452,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
if (error)
return error;
- if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) {
+ if (initialized) {
error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
if (error)
goto out_quota;
@@ -1463,7 +1464,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
}
}
- error = ea_dealloc_block(ip);
+ error = ea_dealloc_block(ip, initialized);
out_quota:
gfs2_quota_unhold(ip);
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index eb12eb7e37c1..3c9788e0e137 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -54,7 +54,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name,
const void *value, size_t size,
int flags, int type);
ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
-int gfs2_ea_dealloc(struct gfs2_inode *ip);
+int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized);
/* Exported to acl.c */
diff --git a/fs/hfs/.kunitconfig b/fs/hfs/.kunitconfig
new file mode 100644
index 000000000000..5caa9af1e3bb
--- /dev/null
+++ b/fs/hfs/.kunitconfig
@@ -0,0 +1,7 @@
+CONFIG_KUNIT=y
+CONFIG_HFS_FS=y
+CONFIG_HFS_KUNIT_TEST=y
+CONFIG_BLOCK=y
+CONFIG_BUFFER_HEAD=y
+CONFIG_NLS=y
+CONFIG_LEGACY_DIRECT_IO=y
diff --git a/fs/hfs/Kconfig b/fs/hfs/Kconfig
index 5ea5cd8ecea9..7f3cbe43b4b7 100644
--- a/fs/hfs/Kconfig
+++ b/fs/hfs/Kconfig
@@ -13,3 +13,18 @@ config HFS_FS
To compile this file system support as a module, choose M here: the
module will be called hfs.
+
+config HFS_KUNIT_TEST
+ tristate "KUnit tests for HFS filesystem" if !KUNIT_ALL_TESTS
+ depends on HFS_FS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds KUnit tests for the HFS filesystem.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general please
+ refer to the KUnit documentation in Documentation/dev-tools/kunit/.
diff --git a/fs/hfs/Makefile b/fs/hfs/Makefile
index b65459bf3dc4..a7c9ce6b4609 100644
--- a/fs/hfs/Makefile
+++ b/fs/hfs/Makefile
@@ -9,3 +9,5 @@ hfs-objs := bitmap.o bfind.o bnode.o brec.o btree.o \
catalog.o dir.o extent.o inode.o attr.o mdb.o \
part_tbl.o string.o super.o sysdep.o trans.o
+# KUnit tests
+obj-$(CONFIG_HFS_KUNIT_TEST) += string_test.o
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index ef9498a6e88a..d56e47bdc517 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -16,14 +16,17 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
{
void *ptr;
+ if (!tree || !fd)
+ return -EINVAL;
+
fd->tree = tree;
fd->bnode = NULL;
- ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
+ ptr = kzalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
fd->search_key = ptr;
fd->key = ptr + tree->max_key_len + 2;
- hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
+ hfs_dbg("cnid %d, caller %ps\n",
tree->cnid, __builtin_return_address(0));
switch (tree->cnid) {
case HFS_CAT_CNID:
@@ -45,7 +48,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
{
hfs_bnode_put(fd->bnode);
kfree(fd->search_key);
- hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n",
+ hfs_dbg("cnid %d, caller %ps\n",
fd->tree->cnid, __builtin_return_address(0));
mutex_unlock(&fd->tree->tree_lock);
fd->tree = NULL;
@@ -112,6 +115,12 @@ int hfs_brec_find(struct hfs_find_data *fd)
__be32 data;
int height, res;
+ fd->record = -1;
+ fd->keyoffset = -1;
+ fd->keylength = -1;
+ fd->entryoffset = -1;
+ fd->entrylength = -1;
+
tree = fd->tree;
if (fd->bnode)
hfs_bnode_put(fd->bnode);
@@ -158,7 +167,7 @@ release:
return res;
}
-int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
+int hfs_brec_read(struct hfs_find_data *fd, void *rec, u32 rec_len)
{
int res;
diff --git a/fs/hfs/bitmap.c b/fs/hfs/bitmap.c
index 28307bc9ec1e..5e84833a4743 100644
--- a/fs/hfs/bitmap.c
+++ b/fs/hfs/bitmap.c
@@ -158,7 +158,7 @@ u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits)
}
}
- hfs_dbg(BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits);
+ hfs_dbg("pos %u, num_bits %u\n", pos, *num_bits);
HFS_SB(sb)->free_ablocks -= *num_bits;
hfs_bitmap_dirty(sb);
out:
@@ -200,7 +200,7 @@ int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count)
if (!count)
return 0;
- hfs_dbg(BITMAP, "clear_bits: %u,%u\n", start, count);
+ hfs_dbg("start %u, count %u\n", start, count);
/* are all of the bits in range? */
if ((start + count) > HFS_SB(sb)->fs_ablocks)
return -2;
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 6add6ebfef89..13d58c51fc46 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -15,12 +15,68 @@
#include "btree.h"
-void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+static inline
+bool is_bnode_offset_valid(struct hfs_bnode *node, u32 off)
+{
+ bool is_valid = off < node->tree->node_size;
+
+ if (!is_valid) {
+ pr_err("requested invalid offset: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off);
+ }
+
+ return is_valid;
+}
+
+static inline
+u32 check_and_correct_requested_length(struct hfs_bnode *node, u32 off, u32 len)
+{
+ unsigned int node_size;
+
+ if (!is_bnode_offset_valid(node, off))
+ return 0;
+
+ node_size = node->tree->node_size;
+
+ if ((off + len) > node_size) {
+ u32 new_len = node_size - off;
+
+ pr_err("requested length has been corrected: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, "
+ "requested_len %u, corrected_len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len, new_len);
+
+ return new_len;
+ }
+
+ return len;
+}
+
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len)
{
struct page *page;
- int pagenum;
- int bytes_read;
- int bytes_to_read;
+ u32 pagenum;
+ u32 bytes_read;
+ u32 bytes_to_read;
+
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
off += node->page_offset;
pagenum = off >> PAGE_SHIFT;
@@ -30,7 +86,7 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
if (pagenum >= node->tree->pages_per_bnode)
break;
page = node->page[pagenum];
- bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
+ bytes_to_read = min_t(u32, len - bytes_read, PAGE_SIZE - off);
memcpy_from_page(buf + bytes_read, page, off, bytes_to_read);
@@ -39,7 +95,7 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
}
}
-u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
+u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off)
{
__be16 data;
// optimize later...
@@ -47,7 +103,7 @@ u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
return be16_to_cpu(data);
}
-u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
+u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off)
{
u8 data;
// optimize later...
@@ -55,10 +111,10 @@ u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
return data;
}
-void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
+void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off)
{
struct hfs_btree *tree;
- int key_len;
+ u32 key_len;
tree = node->tree;
if (node->type == HFS_NODE_LEAF ||
@@ -67,13 +123,33 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
else
key_len = tree->max_key_len + 1;
+ if (key_len > sizeof(hfs_btree_key) || key_len < 1) {
+ memset(key, 0, sizeof(hfs_btree_key));
+ pr_err("hfs: Invalid key length: %u\n", key_len);
+ return;
+ }
+
hfs_bnode_read(node, key, off, key_len);
}
-void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
+void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len)
{
struct page *page;
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
+
off += node->page_offset;
page = node->page[0];
@@ -81,23 +157,37 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
set_page_dirty(page);
}
-void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
+void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data)
{
__be16 v = cpu_to_be16(data);
// optimize later...
hfs_bnode_write(node, &v, off, 2);
}
-void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data)
+void hfs_bnode_write_u8(struct hfs_bnode *node, u32 off, u8 data)
{
// optimize later...
hfs_bnode_write(node, &data, off, 1);
}
-void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
+void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len)
{
struct page *page;
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
+
off += node->page_offset;
page = node->page[0];
@@ -105,14 +195,18 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
set_page_dirty(page);
}
-void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
- struct hfs_bnode *src_node, int src, int len)
+void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
+ struct hfs_bnode *src_node, u32 src, u32 len)
{
struct page *src_page, *dst_page;
- hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
+ hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
if (!len)
return;
+
+ len = check_and_correct_requested_length(src_node, src, len);
+ len = check_and_correct_requested_length(dst_node, dst, len);
+
src += src_node->page_offset;
dst += dst_node->page_offset;
src_page = src_node->page[0];
@@ -122,14 +216,18 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
set_page_dirty(dst_page);
}
-void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
+void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len)
{
struct page *page;
void *ptr;
- hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
+ hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
if (!len)
return;
+
+ len = check_and_correct_requested_length(node, src, len);
+ len = check_and_correct_requested_length(node, dst, len);
+
src += node->page_offset;
dst += node->page_offset;
page = node->page[0];
@@ -145,16 +243,16 @@ void hfs_bnode_dump(struct hfs_bnode *node)
__be32 cnid;
int i, off, key_off;
- hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
+ hfs_dbg("node %d\n", node->this);
hfs_bnode_read(node, &desc, 0, sizeof(desc));
- hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
+ hfs_dbg("next %d, prev %d, type %d, height %d, num_recs %d\n",
be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
desc.type, desc.height, be16_to_cpu(desc.num_recs));
off = node->tree->node_size - 2;
for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
key_off = hfs_bnode_read_u16(node, off);
- hfs_dbg_cont(BNODE_MOD, " %d", key_off);
+ hfs_dbg(" key_off %d", key_off);
if (i && node->type == HFS_NODE_INDEX) {
int tmp;
@@ -162,18 +260,18 @@ void hfs_bnode_dump(struct hfs_bnode *node)
tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
else
tmp = node->tree->max_key_len + 1;
- hfs_dbg_cont(BNODE_MOD, " (%d,%d",
- tmp, hfs_bnode_read_u8(node, key_off));
+ hfs_dbg(" (%d,%d",
+ tmp, hfs_bnode_read_u8(node, key_off));
hfs_bnode_read(node, &cnid, key_off + tmp, 4);
- hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
+ hfs_dbg(", cnid %d)", be32_to_cpu(cnid));
} else if (i && node->type == HFS_NODE_LEAF) {
int tmp;
tmp = hfs_bnode_read_u8(node, key_off);
- hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
+ hfs_dbg(" (%d)", tmp);
}
}
- hfs_dbg_cont(BNODE_MOD, "\n");
+ hfs_dbg("\n");
}
void hfs_bnode_unlink(struct hfs_bnode *node)
@@ -263,7 +361,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
atomic_set(&node->refcnt, 1);
- hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
+ hfs_dbg("cnid %d, node %d, refcnt 1\n",
node->tree->cnid, node->this);
init_waitqueue_head(&node->lock_wq);
spin_lock(&tree->hash_lock);
@@ -303,7 +401,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
{
struct hfs_bnode **p;
- hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this, atomic_read(&node->refcnt));
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
@@ -448,7 +546,7 @@ void hfs_bnode_get(struct hfs_bnode *node)
{
if (node) {
atomic_inc(&node->refcnt);
- hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
}
@@ -461,7 +559,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
struct hfs_btree *tree = node->tree;
int i;
- hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
BUG_ON(!atomic_read(&node->refcnt));
@@ -476,6 +574,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
hfs_bnode_unhash(node);
spin_unlock(&tree->hash_lock);
+ hfs_bnode_clear(node, 0, tree->node_size);
hfs_bmap_free(node);
hfs_bnode_free(node);
return;
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 896396554bcc..5a2f740ddefd 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -62,7 +62,7 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
return retval;
}
-int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
+int hfs_brec_insert(struct hfs_find_data *fd, void *entry, u32 entry_len)
{
struct hfs_btree *tree;
struct hfs_bnode *node, *new_node;
@@ -94,7 +94,7 @@ again:
end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
end_off = hfs_bnode_read_u16(node, end_rec_off);
end_rec_off -= 2;
- hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n",
+ hfs_dbg("rec %d, size %d, end_off %d, end_rec_off %d\n",
rec, size, end_off, end_rec_off);
if (size > end_rec_off - end_off) {
if (new_node)
@@ -179,6 +179,7 @@ int hfs_brec_remove(struct hfs_find_data *fd)
struct hfs_btree *tree;
struct hfs_bnode *node, *parent;
int end_off, rec_off, data_off, size;
+ int src, dst, len;
tree = fd->tree;
node = fd->bnode;
@@ -191,7 +192,7 @@ again:
mark_inode_dirty(tree->inode);
}
hfs_bnode_dump(node);
- hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n",
+ hfs_dbg("rec %d, len %d\n",
fd->record, fd->keylength + fd->entrylength);
if (!--node->num_recs) {
hfs_bnode_unlink(node);
@@ -208,10 +209,14 @@ again:
}
hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
- if (rec_off == end_off)
- goto skip;
size = fd->keylength + fd->entrylength;
+ if (rec_off == end_off) {
+ src = fd->keyoffset;
+ hfs_bnode_clear(node, src, size);
+ goto skip;
+ }
+
do {
data_off = hfs_bnode_read_u16(node, rec_off);
hfs_bnode_write_u16(node, rec_off + 2, data_off - size);
@@ -219,9 +224,23 @@ again:
} while (rec_off >= end_off);
/* fill hole */
- hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size,
- data_off - fd->keyoffset - size);
+ dst = fd->keyoffset;
+ src = fd->keyoffset + size;
+ len = data_off - src;
+
+ hfs_bnode_move(node, dst, src, len);
+
+ src = dst + len;
+ len = data_off - src;
+
+ hfs_bnode_clear(node, src, len);
+
skip:
+ /*
+ * Remove the obsolete offset to free space.
+ */
+ hfs_bnode_write_u16(node, end_off, 0);
+
hfs_bnode_dump(node);
if (!fd->record)
hfs_brec_update_parent(fd);
@@ -242,7 +261,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
if (IS_ERR(new_node))
return new_node;
hfs_bnode_get(node);
- hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n",
+ hfs_dbg("this %d, new %d, next %d\n",
node->this, new_node->this, node->next);
new_node->next = node->next;
new_node->prev = node->this;
@@ -378,7 +397,7 @@ again:
newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1;
else
fd->keylength = newkeylen = tree->max_key_len + 1;
- hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n",
+ hfs_dbg("rec %d, keylength %d, newkeylen %d\n",
rec, fd->keylength, newkeylen);
rec_off = tree->node_size - (rec + 2) * 2;
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 2fa4b1f8cc7f..7bc425283d49 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -21,8 +21,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
struct hfs_btree *tree;
struct hfs_btree_header_rec *head;
struct address_space *mapping;
- struct page *page;
+ struct folio *folio;
+ struct buffer_head *bh;
unsigned int size;
+ u16 dblock;
+ sector_t start_block;
+ loff_t offset;
tree = kzalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
@@ -38,7 +42,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
tree->inode = iget_locked(sb, id);
if (!tree->inode)
goto free_tree;
- BUG_ON(!(tree->inode->i_state & I_NEW));
+ BUG_ON(!(inode_state_read_once(tree->inode) & I_NEW));
{
struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
HFS_I(tree->inode)->flags = 0;
@@ -75,12 +79,40 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
unlock_new_inode(tree->inode);
mapping = tree->inode->i_mapping;
- page = read_mapping_page(mapping, 0, NULL);
- if (IS_ERR(page))
+ folio = filemap_grab_folio(mapping, 0);
+ if (IS_ERR(folio))
goto free_inode;
+ folio_zero_range(folio, 0, folio_size(folio));
+
+ dblock = hfs_ext_find_block(HFS_I(tree->inode)->first_extents, 0);
+ start_block = HFS_SB(sb)->fs_start + (dblock * HFS_SB(sb)->fs_div);
+
+ size = folio_size(folio);
+ offset = 0;
+ while (size > 0) {
+ size_t len;
+
+ bh = sb_bread(sb, start_block);
+ if (!bh) {
+ pr_err("unable to read tree header\n");
+ goto put_folio;
+ }
+
+ len = min_t(size_t, folio_size(folio), sb->s_blocksize);
+ memcpy_to_folio(folio, offset, bh->b_data, sb->s_blocksize);
+
+ brelse(bh);
+
+ start_block++;
+ offset += len;
+ size -= len;
+ }
+
+ folio_mark_uptodate(folio);
+
/* Load the header */
- head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
+ head = (struct hfs_btree_header_rec *)(kmap_local_folio(folio, 0) +
sizeof(struct hfs_bnode_desc));
tree->root = be32_to_cpu(head->root);
tree->leaf_count = be32_to_cpu(head->leaf_count);
@@ -95,22 +127,22 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
size = tree->node_size;
if (!is_power_of_2(size))
- goto fail_page;
+ goto fail_folio;
if (!tree->node_count)
- goto fail_page;
+ goto fail_folio;
switch (id) {
case HFS_EXT_CNID:
if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
pr_err("invalid extent max_key_len %d\n",
tree->max_key_len);
- goto fail_page;
+ goto fail_folio;
}
break;
case HFS_CAT_CNID:
if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
pr_err("invalid catalog max_key_len %d\n",
tree->max_key_len);
- goto fail_page;
+ goto fail_folio;
}
break;
default:
@@ -121,12 +153,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
kunmap_local(head);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return tree;
-fail_page:
+fail_folio:
kunmap_local(head);
- put_page(page);
+put_folio:
+ folio_unlock(folio);
+ folio_put(folio);
free_inode:
tree->inode->i_mapping->a_ops = &hfs_aops;
iput(tree->inode);
@@ -224,7 +259,7 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
}
/* Make sure @tree has enough space for the @rsvd_nodes */
-int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
+int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes)
{
struct inode *inode = tree->inode;
u32 count;
@@ -329,7 +364,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
u32 nidx;
u8 *data, byte, m;
- hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
+ hfs_dbg("node %u\n", node->this);
tree = node->tree;
nidx = node->this;
node = hfs_bnode_find(tree, 0);
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index 0e6baee93245..99be858b2446 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -86,87 +86,46 @@ struct hfs_find_data {
/* btree.c */
-extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
-extern void hfs_btree_close(struct hfs_btree *);
-extern void hfs_btree_write(struct hfs_btree *);
-extern int hfs_bmap_reserve(struct hfs_btree *, int);
-extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
+extern struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id,
+ btree_keycmp keycmp);
+extern void hfs_btree_close(struct hfs_btree *tree);
+extern void hfs_btree_write(struct hfs_btree *tree);
+extern int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes);
+extern struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
extern void hfs_bmap_free(struct hfs_bnode *node);
/* bnode.c */
-extern void hfs_bnode_read(struct hfs_bnode *, void *, int, int);
-extern u16 hfs_bnode_read_u16(struct hfs_bnode *, int);
-extern u8 hfs_bnode_read_u8(struct hfs_bnode *, int);
-extern void hfs_bnode_read_key(struct hfs_bnode *, void *, int);
-extern void hfs_bnode_write(struct hfs_bnode *, void *, int, int);
-extern void hfs_bnode_write_u16(struct hfs_bnode *, int, u16);
-extern void hfs_bnode_write_u8(struct hfs_bnode *, int, u8);
-extern void hfs_bnode_clear(struct hfs_bnode *, int, int);
-extern void hfs_bnode_copy(struct hfs_bnode *, int,
- struct hfs_bnode *, int, int);
-extern void hfs_bnode_move(struct hfs_bnode *, int, int, int);
-extern void hfs_bnode_dump(struct hfs_bnode *);
-extern void hfs_bnode_unlink(struct hfs_bnode *);
-extern struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *, u32);
-extern struct hfs_bnode *hfs_bnode_find(struct hfs_btree *, u32);
-extern void hfs_bnode_unhash(struct hfs_bnode *);
-extern void hfs_bnode_free(struct hfs_bnode *);
-extern struct hfs_bnode *hfs_bnode_create(struct hfs_btree *, u32);
-extern void hfs_bnode_get(struct hfs_bnode *);
-extern void hfs_bnode_put(struct hfs_bnode *);
+extern void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len);
+extern u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off);
+extern u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off);
+extern void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off);
+extern void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len);
+extern void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data);
+extern void hfs_bnode_write_u8(struct hfs_bnode *node, u32 off, u8 data);
+extern void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len);
+extern void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
+ struct hfs_bnode *src_node, u32 src, u32 len);
+extern void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len);
+extern void hfs_bnode_dump(struct hfs_bnode *node);
+extern void hfs_bnode_unlink(struct hfs_bnode *node);
+extern struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid);
+extern struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num);
+extern void hfs_bnode_unhash(struct hfs_bnode *node);
+extern void hfs_bnode_free(struct hfs_bnode *node);
+extern struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num);
+extern void hfs_bnode_get(struct hfs_bnode *node);
+extern void hfs_bnode_put(struct hfs_bnode *node);
/* brec.c */
-extern u16 hfs_brec_lenoff(struct hfs_bnode *, u16, u16 *);
-extern u16 hfs_brec_keylen(struct hfs_bnode *, u16);
-extern int hfs_brec_insert(struct hfs_find_data *, void *, int);
-extern int hfs_brec_remove(struct hfs_find_data *);
+extern u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off);
+extern u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec);
+extern int hfs_brec_insert(struct hfs_find_data *fd, void *entry, u32 entry_len);
+extern int hfs_brec_remove(struct hfs_find_data *fd);
/* bfind.c */
-extern int hfs_find_init(struct hfs_btree *, struct hfs_find_data *);
-extern void hfs_find_exit(struct hfs_find_data *);
-extern int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *);
-extern int hfs_brec_find(struct hfs_find_data *);
-extern int hfs_brec_read(struct hfs_find_data *, void *, int);
-extern int hfs_brec_goto(struct hfs_find_data *, int);
-
-
-struct hfs_bnode_desc {
- __be32 next; /* (V) Number of the next node at this level */
- __be32 prev; /* (V) Number of the prev node at this level */
- u8 type; /* (F) The type of node */
- u8 height; /* (F) The level of this node (leaves=1) */
- __be16 num_recs; /* (V) The number of records in this node */
- u16 reserved;
-} __packed;
-
-#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
-#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
-#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
-#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
-
-struct hfs_btree_header_rec {
- __be16 depth; /* (V) The number of levels in this B-tree */
- __be32 root; /* (V) The node number of the root node */
- __be32 leaf_count; /* (V) The number of leaf records */
- __be32 leaf_head; /* (V) The number of the first leaf node */
- __be32 leaf_tail; /* (V) The number of the last leaf node */
- __be16 node_size; /* (F) The number of bytes in a node (=512) */
- __be16 max_key_len; /* (F) The length of a key in an index node */
- __be32 node_count; /* (V) The total number of nodes */
- __be32 free_nodes; /* (V) The number of unused nodes */
- u16 reserved1;
- __be32 clump_size; /* (F) clump size. not usually used. */
- u8 btree_type; /* (F) BTree type */
- u8 reserved2;
- __be32 attributes; /* (F) attributes */
- u32 reserved3[16];
-} __packed;
-
-#define BTREE_ATTR_BADCLOSE 0x00000001 /* b-tree not closed properly. not
- used by hfsplus. */
-#define HFS_TREE_BIGKEYS 0x00000002 /* key length is u16 instead of u8.
- used by hfsplus. */
-#define HFS_TREE_VARIDXKEYS 0x00000004 /* variable key length instead of
- max key length. use din catalog
- b-tree but not in extents
- b-tree (hfsplus). */
+extern int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd);
+extern void hfs_find_exit(struct hfs_find_data *fd);
+extern int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd);
+extern int hfs_brec_find(struct hfs_find_data *fd);
+extern int hfs_brec_read(struct hfs_find_data *fd, void *rec, u32 rec_len);
+extern int hfs_brec_goto(struct hfs_find_data *fd, int cnt);
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index d63880e7d9d6..b80ba40e3877 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -87,7 +87,7 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
int entry_size;
int err;
- hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n",
+ hfs_dbg("name %s, cnid %u, i_nlink %d\n",
str->name, cnid, inode->i_nlink);
if (dir->i_size >= HFS_MAX_VALENCE)
return -ENOSPC;
@@ -211,6 +211,124 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
return hfs_brec_find(fd);
}
+static inline
+void hfs_set_next_unused_CNID(struct super_block *sb,
+ u32 deleted_cnid, u32 found_cnid)
+{
+ if (found_cnid < HFS_FIRSTUSER_CNID) {
+ atomic64_cmpxchg(&HFS_SB(sb)->next_id,
+ deleted_cnid + 1, HFS_FIRSTUSER_CNID);
+ } else {
+ atomic64_cmpxchg(&HFS_SB(sb)->next_id,
+ deleted_cnid + 1, found_cnid + 1);
+ }
+}
+
+/*
+ * hfs_correct_next_unused_CNID()
+ *
+ * Correct the next unused CNID of Catalog Tree.
+ */
+static
+int hfs_correct_next_unused_CNID(struct super_block *sb, u32 cnid)
+{
+ struct hfs_btree *cat_tree;
+ struct hfs_bnode *node;
+ s64 leaf_head;
+ s64 leaf_tail;
+ s64 node_id;
+
+ hfs_dbg("cnid %u, next_id %lld\n",
+ cnid, atomic64_read(&HFS_SB(sb)->next_id));
+
+ if ((cnid + 1) < atomic64_read(&HFS_SB(sb)->next_id)) {
+ /* next ID should be unchanged */
+ return 0;
+ }
+
+ cat_tree = HFS_SB(sb)->cat_tree;
+ leaf_head = cat_tree->leaf_head;
+ leaf_tail = cat_tree->leaf_tail;
+
+ if (leaf_head > leaf_tail) {
+ pr_err("node is corrupted: leaf_head %lld, leaf_tail %lld\n",
+ leaf_head, leaf_tail);
+ return -ERANGE;
+ }
+
+ node = hfs_bnode_find(cat_tree, leaf_tail);
+ if (IS_ERR(node)) {
+ pr_err("fail to find leaf node: node ID %lld\n",
+ leaf_tail);
+ return -ENOENT;
+ }
+
+ node_id = leaf_tail;
+
+ do {
+ int i;
+
+ if (node_id != leaf_tail) {
+ node = hfs_bnode_find(cat_tree, node_id);
+ if (IS_ERR(node))
+ return -ENOENT;
+ }
+
+ hfs_dbg("node %lld, leaf_tail %lld, leaf_head %lld\n",
+ node_id, leaf_tail, leaf_head);
+
+ hfs_bnode_dump(node);
+
+ for (i = node->num_recs - 1; i >= 0; i--) {
+ hfs_cat_rec rec;
+ u16 off, len, keylen;
+ int entryoffset;
+ int entrylength;
+ u32 found_cnid;
+
+ len = hfs_brec_lenoff(node, i, &off);
+ keylen = hfs_brec_keylen(node, i);
+ if (keylen == 0) {
+ pr_err("fail to get the keylen: "
+ "node_id %lld, record index %d\n",
+ node_id, i);
+ return -EINVAL;
+ }
+
+ entryoffset = off + keylen;
+ entrylength = len - keylen;
+
+ if (entrylength > sizeof(rec)) {
+ pr_err("unexpected record length: "
+ "entrylength %d\n",
+ entrylength);
+ return -EINVAL;
+ }
+
+ hfs_bnode_read(node, &rec, entryoffset, entrylength);
+
+ if (rec.type == HFS_CDR_DIR) {
+ found_cnid = be32_to_cpu(rec.dir.DirID);
+ hfs_dbg("found_cnid %u\n", found_cnid);
+ hfs_set_next_unused_CNID(sb, cnid, found_cnid);
+ hfs_bnode_put(node);
+ return 0;
+ } else if (rec.type == HFS_CDR_FIL) {
+ found_cnid = be32_to_cpu(rec.file.FlNum);
+ hfs_dbg("found_cnid %u\n", found_cnid);
+ hfs_set_next_unused_CNID(sb, cnid, found_cnid);
+ hfs_bnode_put(node);
+ return 0;
+ }
+ }
+
+ node_id = node->prev;
+ hfs_bnode_put(node);
+
+ } while (node_id >= leaf_head);
+
+ return -ENOENT;
+}
/*
* hfs_cat_delete()
@@ -225,7 +343,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
struct hfs_readdir_data *rd;
int res, type;
- hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
+ hfs_dbg("name %s, cnid %u\n", str ? str->name : NULL, cnid);
sb = dir->i_sb;
res = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
if (res)
@@ -271,6 +389,11 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
dir->i_size--;
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
+
+ res = hfs_correct_next_unused_CNID(sb, cnid);
+ if (res)
+ goto out;
+
res = 0;
out:
hfs_find_exit(&fd);
@@ -294,7 +417,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
int entry_size, type;
int err;
- hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
+ hfs_dbg("cnid %u - (ino %lu, name %s) - (ino %lu, name %s)\n",
cnid, src_dir->i_ino, src_name->name,
dst_dir->i_ino, dst_name->name);
sb = src_dir->i_sb;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index b75c26045df4..86a6b317b474 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -219,26 +219,26 @@ static int hfs_create(struct mnt_idmap *idmap, struct inode *dir,
* in a directory, given the inode for the parent directory and the
* name (and its length) of the new directory.
*/
-static int hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int res;
inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
clear_nlink(inode);
hfs_delete_inode(inode);
iput(inode);
- return res;
+ return ERR_PTR(res);
}
d_instantiate(dentry, inode);
mark_inode_dirty(inode);
- return 0;
+ return NULL;
}
/*
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index 4a0ce131e233..a097908b269d 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -71,7 +71,7 @@ int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
*
* Find a block within an extent record
*/
-static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
+u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
{
int i;
u16 count;
@@ -209,12 +209,12 @@ static void hfs_dump_extent(struct hfs_extent *extent)
{
int i;
- hfs_dbg(EXTENT, " ");
+ hfs_dbg("extent: ");
for (i = 0; i < 3; i++)
- hfs_dbg_cont(EXTENT, " %u:%u",
- be16_to_cpu(extent[i].block),
- be16_to_cpu(extent[i].count));
- hfs_dbg_cont(EXTENT, "\n");
+ hfs_dbg(" block %u, count %u",
+ be16_to_cpu(extent[i].block),
+ be16_to_cpu(extent[i].count));
+ hfs_dbg("\n");
}
static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
@@ -411,10 +411,11 @@ int hfs_extend_file(struct inode *inode)
goto out;
}
- hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
+ hfs_dbg("ino %lu, start %u, len %u\n", inode->i_ino, start, len);
if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
if (!HFS_I(inode)->first_blocks) {
- hfs_dbg(EXTENT, "first extents\n");
+ hfs_dbg("first_extent: start %u, len %u\n",
+ start, len);
/* no extents yet */
HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
@@ -456,7 +457,7 @@ out:
return res;
insert_extent:
- hfs_dbg(EXTENT, "insert new extent\n");
+ hfs_dbg("insert new extent\n");
res = hfs_ext_write_extent(inode);
if (res)
goto out;
@@ -481,7 +482,7 @@ void hfs_file_truncate(struct inode *inode)
u32 size;
int res;
- hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
+ hfs_dbg("ino %lu, phys_size %llu -> i_size %llu\n",
inode->i_ino, (long long)HFS_I(inode)->phys_size,
inode->i_size);
if (inode->i_size > HFS_I(inode)->phys_size) {
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
index 6f194d0768b6..3f2293ff6fdd 100644
--- a/fs/hfs/hfs.h
+++ b/fs/hfs/hfs.h
@@ -9,274 +9,7 @@
#ifndef _HFS_H
#define _HFS_H
-/* offsets to various blocks */
-#define HFS_DD_BLK 0 /* Driver Descriptor block */
-#define HFS_PMAP_BLK 1 /* First block of partition map */
-#define HFS_MDB_BLK 2 /* Block (w/i partition) of MDB */
-
-/* magic numbers for various disk blocks */
-#define HFS_DRVR_DESC_MAGIC 0x4552 /* "ER": driver descriptor map */
-#define HFS_OLD_PMAP_MAGIC 0x5453 /* "TS": old-type partition map */
-#define HFS_NEW_PMAP_MAGIC 0x504D /* "PM": new-type partition map */
-#define HFS_SUPER_MAGIC 0x4244 /* "BD": HFS MDB (super block) */
-#define HFS_MFS_SUPER_MAGIC 0xD2D7 /* MFS MDB (super block) */
-
-/* various FIXED size parameters */
-#define HFS_SECTOR_SIZE 512 /* size of an HFS sector */
-#define HFS_SECTOR_SIZE_BITS 9 /* log_2(HFS_SECTOR_SIZE) */
-#define HFS_NAMELEN 31 /* maximum length of an HFS filename */
-#define HFS_MAX_NAMELEN 128
-#define HFS_MAX_VALENCE 32767U
-
-/* Meanings of the drAtrb field of the MDB,
- * Reference: _Inside Macintosh: Files_ p. 2-61
- */
-#define HFS_SB_ATTRIB_HLOCK (1 << 7)
-#define HFS_SB_ATTRIB_UNMNT (1 << 8)
-#define HFS_SB_ATTRIB_SPARED (1 << 9)
-#define HFS_SB_ATTRIB_INCNSTNT (1 << 11)
-#define HFS_SB_ATTRIB_SLOCK (1 << 15)
-
-/* Some special File ID numbers */
-#define HFS_POR_CNID 1 /* Parent Of the Root */
-#define HFS_ROOT_CNID 2 /* ROOT directory */
-#define HFS_EXT_CNID 3 /* EXTents B-tree */
-#define HFS_CAT_CNID 4 /* CATalog B-tree */
-#define HFS_BAD_CNID 5 /* BAD blocks file */
-#define HFS_ALLOC_CNID 6 /* ALLOCation file (HFS+) */
-#define HFS_START_CNID 7 /* STARTup file (HFS+) */
-#define HFS_ATTR_CNID 8 /* ATTRibutes file (HFS+) */
-#define HFS_EXCH_CNID 15 /* ExchangeFiles temp id */
-#define HFS_FIRSTUSER_CNID 16
-
-/* values for hfs_cat_rec.cdrType */
-#define HFS_CDR_DIR 0x01 /* folder (directory) */
-#define HFS_CDR_FIL 0x02 /* file */
-#define HFS_CDR_THD 0x03 /* folder (directory) thread */
-#define HFS_CDR_FTH 0x04 /* file thread */
-
-/* legal values for hfs_ext_key.FkType and hfs_file.fork */
-#define HFS_FK_DATA 0x00
-#define HFS_FK_RSRC 0xFF
-
-/* bits in hfs_fil_entry.Flags */
-#define HFS_FIL_LOCK 0x01 /* locked */
-#define HFS_FIL_THD 0x02 /* file thread */
-#define HFS_FIL_DOPEN 0x04 /* data fork open */
-#define HFS_FIL_ROPEN 0x08 /* resource fork open */
-#define HFS_FIL_DIR 0x10 /* directory (always clear) */
-#define HFS_FIL_NOCOPY 0x40 /* copy-protected file */
-#define HFS_FIL_USED 0x80 /* open */
-
-/* bits in hfs_dir_entry.Flags. dirflags is 16 bits. */
-#define HFS_DIR_LOCK 0x01 /* locked */
-#define HFS_DIR_THD 0x02 /* directory thread */
-#define HFS_DIR_INEXPFOLDER 0x04 /* in a shared area */
-#define HFS_DIR_MOUNTED 0x08 /* mounted */
-#define HFS_DIR_DIR 0x10 /* directory (always set) */
-#define HFS_DIR_EXPFOLDER 0x20 /* share point */
-
-/* bits hfs_finfo.fdFlags */
-#define HFS_FLG_INITED 0x0100
-#define HFS_FLG_LOCKED 0x1000
-#define HFS_FLG_INVISIBLE 0x4000
-
-/*======== HFS structures as they appear on the disk ========*/
-
-/* Pascal-style string of up to 31 characters */
-struct hfs_name {
- u8 len;
- u8 name[HFS_NAMELEN];
-} __packed;
-
-struct hfs_point {
- __be16 v;
- __be16 h;
-} __packed;
-
-struct hfs_rect {
- __be16 top;
- __be16 left;
- __be16 bottom;
- __be16 right;
-} __packed;
-
-struct hfs_finfo {
- __be32 fdType;
- __be32 fdCreator;
- __be16 fdFlags;
- struct hfs_point fdLocation;
- __be16 fdFldr;
-} __packed;
-
-struct hfs_fxinfo {
- __be16 fdIconID;
- u8 fdUnused[8];
- __be16 fdComment;
- __be32 fdPutAway;
-} __packed;
-
-struct hfs_dinfo {
- struct hfs_rect frRect;
- __be16 frFlags;
- struct hfs_point frLocation;
- __be16 frView;
-} __packed;
-
-struct hfs_dxinfo {
- struct hfs_point frScroll;
- __be32 frOpenChain;
- __be16 frUnused;
- __be16 frComment;
- __be32 frPutAway;
-} __packed;
-
-union hfs_finder_info {
- struct {
- struct hfs_finfo finfo;
- struct hfs_fxinfo fxinfo;
- } file;
- struct {
- struct hfs_dinfo dinfo;
- struct hfs_dxinfo dxinfo;
- } dir;
-} __packed;
-
-/* Cast to a pointer to a generic bkey */
-#define HFS_BKEY(X) (((void)((X)->KeyLen)), ((struct hfs_bkey *)(X)))
-
-/* The key used in the catalog b-tree: */
-struct hfs_cat_key {
- u8 key_len; /* number of bytes in the key */
- u8 reserved; /* padding */
- __be32 ParID; /* CNID of the parent dir */
- struct hfs_name CName; /* The filename of the entry */
-} __packed;
-
-/* The key used in the extents b-tree: */
-struct hfs_ext_key {
- u8 key_len; /* number of bytes in the key */
- u8 FkType; /* HFS_FK_{DATA,RSRC} */
- __be32 FNum; /* The File ID of the file */
- __be16 FABN; /* allocation blocks number*/
-} __packed;
-
-typedef union hfs_btree_key {
- u8 key_len; /* number of bytes in the key */
- struct hfs_cat_key cat;
- struct hfs_ext_key ext;
-} hfs_btree_key;
-
-#define HFS_MAX_CAT_KEYLEN (sizeof(struct hfs_cat_key) - sizeof(u8))
-#define HFS_MAX_EXT_KEYLEN (sizeof(struct hfs_ext_key) - sizeof(u8))
-
-typedef union hfs_btree_key btree_key;
-
-struct hfs_extent {
- __be16 block;
- __be16 count;
-};
-typedef struct hfs_extent hfs_extent_rec[3];
-
-/* The catalog record for a file */
-struct hfs_cat_file {
- s8 type; /* The type of entry */
- u8 reserved;
- u8 Flags; /* Flags such as read-only */
- s8 Typ; /* file version number = 0 */
- struct hfs_finfo UsrWds; /* data used by the Finder */
- __be32 FlNum; /* The CNID */
- __be16 StBlk; /* obsolete */
- __be32 LgLen; /* The logical EOF of the data fork*/
- __be32 PyLen; /* The physical EOF of the data fork */
- __be16 RStBlk; /* obsolete */
- __be32 RLgLen; /* The logical EOF of the rsrc fork */
- __be32 RPyLen; /* The physical EOF of the rsrc fork */
- __be32 CrDat; /* The creation date */
- __be32 MdDat; /* The modified date */
- __be32 BkDat; /* The last backup date */
- struct hfs_fxinfo FndrInfo; /* more data for the Finder */
- __be16 ClpSize; /* number of bytes to allocate
- when extending files */
- hfs_extent_rec ExtRec; /* first extent record
- for the data fork */
- hfs_extent_rec RExtRec; /* first extent record
- for the resource fork */
- u32 Resrv; /* reserved by Apple */
-} __packed;
-
-/* the catalog record for a directory */
-struct hfs_cat_dir {
- s8 type; /* The type of entry */
- u8 reserved;
- __be16 Flags; /* flags */
- __be16 Val; /* Valence: number of files and
- dirs in the directory */
- __be32 DirID; /* The CNID */
- __be32 CrDat; /* The creation date */
- __be32 MdDat; /* The modification date */
- __be32 BkDat; /* The last backup date */
- struct hfs_dinfo UsrInfo; /* data used by the Finder */
- struct hfs_dxinfo FndrInfo; /* more data used by Finder */
- u8 Resrv[16]; /* reserved by Apple */
-} __packed;
-
-/* the catalog record for a thread */
-struct hfs_cat_thread {
- s8 type; /* The type of entry */
- u8 reserved[9]; /* reserved by Apple */
- __be32 ParID; /* CNID of parent directory */
- struct hfs_name CName; /* The name of this entry */
-} __packed;
-
-/* A catalog tree record */
-typedef union hfs_cat_rec {
- s8 type; /* The type of entry */
- struct hfs_cat_file file;
- struct hfs_cat_dir dir;
- struct hfs_cat_thread thread;
-} hfs_cat_rec;
-
-struct hfs_mdb {
- __be16 drSigWord; /* Signature word indicating fs type */
- __be32 drCrDate; /* fs creation date/time */
- __be32 drLsMod; /* fs modification date/time */
- __be16 drAtrb; /* fs attributes */
- __be16 drNmFls; /* number of files in root directory */
- __be16 drVBMSt; /* location (in 512-byte blocks)
- of the volume bitmap */
- __be16 drAllocPtr; /* location (in allocation blocks)
- to begin next allocation search */
- __be16 drNmAlBlks; /* number of allocation blocks */
- __be32 drAlBlkSiz; /* bytes in an allocation block */
- __be32 drClpSiz; /* clumpsize, the number of bytes to
- allocate when extending a file */
- __be16 drAlBlSt; /* location (in 512-byte blocks)
- of the first allocation block */
- __be32 drNxtCNID; /* CNID to assign to the next
- file or directory created */
- __be16 drFreeBks; /* number of free allocation blocks */
- u8 drVN[28]; /* the volume label */
- __be32 drVolBkUp; /* fs backup date/time */
- __be16 drVSeqNum; /* backup sequence number */
- __be32 drWrCnt; /* fs write count */
- __be32 drXTClpSiz; /* clumpsize for the extents B-tree */
- __be32 drCTClpSiz; /* clumpsize for the catalog B-tree */
- __be16 drNmRtDirs; /* number of directories in
- the root directory */
- __be32 drFilCnt; /* number of files in the fs */
- __be32 drDirCnt; /* number of directories in the fs */
- u8 drFndrInfo[32]; /* data used by the Finder */
- __be16 drEmbedSigWord; /* embedded volume signature */
- __be32 drEmbedExtent; /* starting block number (xdrStABN)
- and number of allocation blocks
- (xdrNumABlks) occupied by embedded
- volume */
- __be32 drXTFlSize; /* bytes in the extents B-tree */
- hfs_extent_rec drXTExtRec; /* extents B-tree's first 3 extents */
- __be32 drCTFlSize; /* bytes in the catalog B-tree */
- hfs_extent_rec drCTExtRec; /* catalog B-tree's first 3 extents */
-} __packed;
+#include <linux/hfs_common.h>
/*======== Data structures kept in memory ========*/
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index a0c7cb0f79fc..e94dbc04a1e4 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -9,12 +9,6 @@
#ifndef _LINUX_HFS_FS_H
#define _LINUX_HFS_FS_H
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mutex.h>
@@ -27,32 +21,6 @@
#include "hfs.h"
-#define DBG_BNODE_REFS 0x00000001
-#define DBG_BNODE_MOD 0x00000002
-#define DBG_CAT_MOD 0x00000004
-#define DBG_INODE 0x00000008
-#define DBG_SUPER 0x00000010
-#define DBG_EXTENT 0x00000020
-#define DBG_BITMAP 0x00000040
-
-//#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD|DBG_CAT_MOD|DBG_BITMAP)
-//#define DBG_MASK (DBG_BNODE_MOD|DBG_CAT_MOD|DBG_INODE)
-//#define DBG_MASK (DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
-#define DBG_MASK (0)
-
-#define hfs_dbg(flg, fmt, ...) \
-do { \
- if (DBG_##flg & DBG_MASK) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-
-#define hfs_dbg_cont(flg, fmt, ...) \
-do { \
- if (DBG_##flg & DBG_MASK) \
- pr_cont(fmt, ##__VA_ARGS__); \
-} while (0)
-
-
/*
* struct hfs_inode_info
*
@@ -112,13 +80,13 @@ struct hfs_sb_info {
the extents b-tree */
struct hfs_btree *cat_tree; /* Information about
the catalog b-tree */
- u32 file_count; /* The number of
+ atomic64_t file_count; /* The number of
regular files in
the filesystem */
- u32 folder_count; /* The number of
+ atomic64_t folder_count; /* The number of
directories in the
filesystem */
- u32 next_id; /* The next available
+ atomic64_t next_id; /* The next available
file id number */
u32 clumpablks; /* The number of allocation
blocks to try to add when
@@ -171,73 +139,90 @@ struct hfs_sb_info {
#define HFS_FLG_ALT_MDB_DIRTY 2
/* bitmap.c */
-extern u32 hfs_vbm_search_free(struct super_block *, u32, u32 *);
-extern int hfs_clear_vbm_bits(struct super_block *, u16, u16);
+extern u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits);
+extern int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count);
/* catalog.c */
-extern int hfs_cat_keycmp(const btree_key *, const btree_key *);
+extern int hfs_cat_keycmp(const btree_key *key1, const btree_key *key2);
struct hfs_find_data;
-extern int hfs_cat_find_brec(struct super_block *, u32, struct hfs_find_data *);
-extern int hfs_cat_create(u32, struct inode *, const struct qstr *, struct inode *);
-extern int hfs_cat_delete(u32, struct inode *, const struct qstr *);
-extern int hfs_cat_move(u32, struct inode *, const struct qstr *,
- struct inode *, const struct qstr *);
-extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, const struct qstr *);
+extern int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
+ struct hfs_find_data *fd);
+extern int hfs_cat_create(u32 cnid, struct inode *dir,
+ const struct qstr *str, struct inode *inode);
+extern int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str);
+extern int hfs_cat_move(u32 cnid, struct inode *src_dir,
+ const struct qstr *src_name,
+ struct inode *dst_dir,
+ const struct qstr *dst_name);
+extern void hfs_cat_build_key(struct super_block *sb, btree_key *key,
+ u32 parent, const struct qstr *name);
/* dir.c */
extern const struct file_operations hfs_dir_operations;
extern const struct inode_operations hfs_dir_inode_operations;
/* extent.c */
-extern int hfs_ext_keycmp(const btree_key *, const btree_key *);
-extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int);
-extern int hfs_ext_write_extent(struct inode *);
-extern int hfs_extend_file(struct inode *);
-extern void hfs_file_truncate(struct inode *);
+extern int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2);
+extern u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off);
+extern int hfs_free_fork(struct super_block *sb,
+ struct hfs_cat_file *file, int type);
+extern int hfs_ext_write_extent(struct inode *inode);
+extern int hfs_extend_file(struct inode *inode);
+extern void hfs_file_truncate(struct inode *inode);
-extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+extern int hfs_get_block(struct inode *inode, sector_t block,
+ struct buffer_head *bh_result, int create);
/* inode.c */
extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops;
-int hfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct folio **foliop, void **fsdata);
-extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
-extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
-extern int hfs_write_inode(struct inode *, struct writeback_control *);
-extern int hfs_inode_setattr(struct mnt_idmap *, struct dentry *,
- struct iattr *);
+int hfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned int len, struct folio **foliop,
+ void **fsdata);
+extern struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name,
+ umode_t mode);
+extern void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
+ __be32 *log_size, __be32 *phys_size);
+extern int hfs_write_inode(struct inode *inode, struct writeback_control *wbc);
+extern int hfs_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr);
extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
- __be32 log_size, __be32 phys_size, u32 clump_size);
-extern struct inode *hfs_iget(struct super_block *, struct hfs_cat_key *, hfs_cat_rec *);
-extern void hfs_evict_inode(struct inode *);
-extern void hfs_delete_inode(struct inode *);
+ __be32 __log_size, __be32 phys_size,
+ u32 clump_size);
+extern struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key,
+ hfs_cat_rec *rec);
+extern void hfs_evict_inode(struct inode *inode);
+extern void hfs_delete_inode(struct inode *inode);
/* attr.c */
extern const struct xattr_handler * const hfs_xattr_handlers[];
/* mdb.c */
-extern int hfs_mdb_get(struct super_block *);
-extern void hfs_mdb_commit(struct super_block *);
-extern void hfs_mdb_close(struct super_block *);
-extern void hfs_mdb_put(struct super_block *);
+extern int hfs_mdb_get(struct super_block *sb);
+extern void hfs_mdb_commit(struct super_block *sb);
+extern void hfs_mdb_close(struct super_block *sb);
+extern void hfs_mdb_put(struct super_block *sb);
/* part_tbl.c */
-extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
+extern int hfs_part_find(struct super_block *sb,
+ sector_t *part_start, sector_t *part_size);
/* string.c */
extern const struct dentry_operations hfs_dentry_operations;
-extern int hfs_hash_dentry(const struct dentry *, struct qstr *);
-extern int hfs_strcmp(const unsigned char *, unsigned int,
- const unsigned char *, unsigned int);
+extern int hfs_hash_dentry(const struct dentry *dentry, struct qstr *this);
+extern int hfs_strcmp(const unsigned char *s1, unsigned int len1,
+ const unsigned char *s2, unsigned int len2);
extern int hfs_compare_dentry(const struct dentry *dentry,
- unsigned int len, const char *str, const struct qstr *name);
+ unsigned int len, const char *str,
+ const struct qstr *name);
/* trans.c */
-extern void hfs_asc2mac(struct super_block *, struct hfs_name *, const struct qstr *);
-extern int hfs_mac2asc(struct super_block *, char *, const struct hfs_name *);
+extern void hfs_asc2mac(struct super_block *sb,
+ struct hfs_name *out, const struct qstr *in);
+extern int hfs_mac2asc(struct super_block *sb,
+ char *out, const struct hfs_name *in);
/* super.c */
extern void hfs_mark_mdb_dirty(struct super_block *sb);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a81ce7a740b9..524db1389737 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -44,12 +44,13 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to)
}
}
-int hfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
+int hfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned int len, struct folio **foliop,
+ void **fsdata)
{
int ret;
- ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
hfs_get_block,
&HFS_I(mapping->host)->phys_size);
if (unlikely(ret))
@@ -183,6 +184,10 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
{
struct super_block *sb = dir->i_sb;
struct inode *inode = new_inode(sb);
+ s64 next_id;
+ s64 file_count;
+ s64 folder_count;
+
if (!inode)
return NULL;
@@ -190,7 +195,9 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
spin_lock_init(&HFS_I(inode)->open_dir_lock);
hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
- inode->i_ino = HFS_SB(sb)->next_id++;
+ next_id = atomic64_inc_return(&HFS_SB(sb)->next_id);
+ BUG_ON(next_id > U32_MAX);
+ inode->i_ino = (u32)next_id;
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
@@ -202,7 +209,8 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
if (S_ISDIR(mode)) {
inode->i_size = 2;
- HFS_SB(sb)->folder_count++;
+ folder_count = atomic64_inc_return(&HFS_SB(sb)->folder_count);
+ BUG_ON(folder_count > U32_MAX);
if (dir->i_ino == HFS_ROOT_CNID)
HFS_SB(sb)->root_dirs++;
inode->i_op = &hfs_dir_inode_operations;
@@ -211,7 +219,8 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
} else if (S_ISREG(mode)) {
HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
- HFS_SB(sb)->file_count++;
+ file_count = atomic64_inc_return(&HFS_SB(sb)->file_count);
+ BUG_ON(file_count > U32_MAX);
if (dir->i_ino == HFS_ROOT_CNID)
HFS_SB(sb)->root_files++;
inode->i_op = &hfs_file_inode_operations;
@@ -241,16 +250,19 @@ void hfs_delete_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
- hfs_dbg(INODE, "delete_inode: %lu\n", inode->i_ino);
+ hfs_dbg("ino %lu\n", inode->i_ino);
if (S_ISDIR(inode->i_mode)) {
- HFS_SB(sb)->folder_count--;
+ BUG_ON(atomic64_read(&HFS_SB(sb)->folder_count) > U32_MAX);
+ atomic64_dec(&HFS_SB(sb)->folder_count);
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_dirs--;
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
hfs_mark_mdb_dirty(sb);
return;
}
- HFS_SB(sb)->file_count--;
+
+ BUG_ON(atomic64_read(&HFS_SB(sb)->file_count) > U32_MAX);
+ atomic64_dec(&HFS_SB(sb)->file_count);
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_files--;
if (S_ISREG(inode->i_mode)) {
@@ -401,7 +413,7 @@ struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_
return NULL;
}
inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
- if (inode && (inode->i_state & I_NEW))
+ if (inode && (inode_state_read_once(inode) & I_NEW))
unlock_new_inode(inode);
return inode;
}
@@ -425,7 +437,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
hfs_cat_rec rec;
int res;
- hfs_dbg(INODE, "hfs_write_inode: %lu\n", inode->i_ino);
+ hfs_dbg("ino %lu\n", inode->i_ino);
res = hfs_ext_write_extent(inode);
if (res)
return res;
@@ -690,8 +702,9 @@ static const struct file_operations hfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.splice_read = filemap_splice_read,
+ .splice_write = iter_file_splice_write,
.fsync = hfs_file_fsync,
.open = hfs_file_open,
.release = hfs_file_release,
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 8082eb01127c..53f3fae60217 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -150,11 +150,11 @@ int hfs_mdb_get(struct super_block *sb)
/* These parameters are read from and written to the MDB */
HFS_SB(sb)->free_ablocks = be16_to_cpu(mdb->drFreeBks);
- HFS_SB(sb)->next_id = be32_to_cpu(mdb->drNxtCNID);
+ atomic64_set(&HFS_SB(sb)->next_id, be32_to_cpu(mdb->drNxtCNID));
HFS_SB(sb)->root_files = be16_to_cpu(mdb->drNmFls);
HFS_SB(sb)->root_dirs = be16_to_cpu(mdb->drNmRtDirs);
- HFS_SB(sb)->file_count = be32_to_cpu(mdb->drFilCnt);
- HFS_SB(sb)->folder_count = be32_to_cpu(mdb->drDirCnt);
+ atomic64_set(&HFS_SB(sb)->file_count, be32_to_cpu(mdb->drFilCnt));
+ atomic64_set(&HFS_SB(sb)->folder_count, be32_to_cpu(mdb->drDirCnt));
/* TRY to get the alternate (backup) MDB. */
sect = part_start + part_size - 2;
@@ -172,7 +172,7 @@ int hfs_mdb_get(struct super_block *sb)
pr_warn("continuing without an alternate MDB\n");
}
- HFS_SB(sb)->bitmap = kmalloc(8192, GFP_KERNEL);
+ HFS_SB(sb)->bitmap = kzalloc(8192, GFP_KERNEL);
if (!HFS_SB(sb)->bitmap)
goto out;
@@ -273,11 +273,17 @@ void hfs_mdb_commit(struct super_block *sb)
/* These parameters may have been modified, so write them back */
mdb->drLsMod = hfs_mtime();
mdb->drFreeBks = cpu_to_be16(HFS_SB(sb)->free_ablocks);
- mdb->drNxtCNID = cpu_to_be32(HFS_SB(sb)->next_id);
+ BUG_ON(atomic64_read(&HFS_SB(sb)->next_id) > U32_MAX);
+ mdb->drNxtCNID =
+ cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->next_id));
mdb->drNmFls = cpu_to_be16(HFS_SB(sb)->root_files);
mdb->drNmRtDirs = cpu_to_be16(HFS_SB(sb)->root_dirs);
- mdb->drFilCnt = cpu_to_be32(HFS_SB(sb)->file_count);
- mdb->drDirCnt = cpu_to_be32(HFS_SB(sb)->folder_count);
+ BUG_ON(atomic64_read(&HFS_SB(sb)->file_count) > U32_MAX);
+ mdb->drFilCnt =
+ cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->file_count));
+ BUG_ON(atomic64_read(&HFS_SB(sb)->folder_count) > U32_MAX);
+ mdb->drDirCnt =
+ cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->folder_count));
/* write MDB to disk */
mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
diff --git a/fs/hfs/string.c b/fs/hfs/string.c
index 3912209153a8..0cfa35e82abc 100644
--- a/fs/hfs/string.c
+++ b/fs/hfs/string.c
@@ -16,6 +16,8 @@
#include "hfs_fs.h"
#include <linux/dcache.h>
+#include <kunit/visibility.h>
+
/*================ File-local variables ================*/
/*
@@ -65,6 +67,7 @@ int hfs_hash_dentry(const struct dentry *dentry, struct qstr *this)
this->hash = end_name_hash(hash);
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfs_hash_dentry);
/*
* Compare two strings in the HFS filename character ordering
@@ -87,6 +90,7 @@ int hfs_strcmp(const unsigned char *s1, unsigned int len1,
}
return len1 - len2;
}
+EXPORT_SYMBOL_IF_KUNIT(hfs_strcmp);
/*
* Test for equality of two strings in the HFS filename character ordering.
@@ -112,3 +116,4 @@ int hfs_compare_dentry(const struct dentry *dentry,
}
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfs_compare_dentry);
diff --git a/fs/hfs/string_test.c b/fs/hfs/string_test.c
new file mode 100644
index 000000000000..e1bf6f954312
--- /dev/null
+++ b/fs/hfs/string_test.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for HFS string operations
+ *
+ * Copyright (C) 2025 Viacheslav Dubeyko <slava@dubeyko.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/dcache.h>
+#include "hfs_fs.h"
+
+/* Test hfs_strcmp function */
+static void hfs_strcmp_test(struct kunit *test)
+{
+ /* Test equal strings */
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("hello", 5, "hello", 5));
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("test", 4, "test", 4));
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("", 0, "", 0));
+
+ /* Test unequal strings */
+ KUNIT_EXPECT_NE(test, 0, hfs_strcmp("hello", 5, "world", 5));
+ KUNIT_EXPECT_NE(test, 0, hfs_strcmp("test", 4, "testing", 7));
+
+ /* Test different lengths */
+ KUNIT_EXPECT_LT(test, hfs_strcmp("test", 4, "testing", 7), 0);
+ KUNIT_EXPECT_GT(test, hfs_strcmp("testing", 7, "test", 4), 0);
+
+ /* Test case insensitive comparison (HFS should handle case) */
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("Test", 4, "TEST", 4));
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("hello", 5, "HELLO", 5));
+
+ /* Test with special characters */
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("file.txt", 8, "file.txt", 8));
+ KUNIT_EXPECT_NE(test, 0, hfs_strcmp("file.txt", 8, "file.dat", 8));
+
+ /* Test boundary cases */
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("a", 1, "a", 1));
+ KUNIT_EXPECT_NE(test, 0, hfs_strcmp("a", 1, "b", 1));
+}
+
+/* Test hfs_hash_dentry function */
+static void hfs_hash_dentry_test(struct kunit *test)
+{
+ struct qstr test_name1, test_name2, test_name3;
+ struct dentry dentry = {};
+ char name1[] = "testfile";
+ char name2[] = "TestFile";
+ char name3[] = "different";
+
+ /* Initialize test strings */
+ test_name1.name = name1;
+ test_name1.len = strlen(name1);
+ test_name1.hash = 0;
+
+ test_name2.name = name2;
+ test_name2.len = strlen(name2);
+ test_name2.hash = 0;
+
+ test_name3.name = name3;
+ test_name3.len = strlen(name3);
+ test_name3.hash = 0;
+
+ /* Test hashing */
+ KUNIT_EXPECT_EQ(test, 0, hfs_hash_dentry(&dentry, &test_name1));
+ KUNIT_EXPECT_EQ(test, 0, hfs_hash_dentry(&dentry, &test_name2));
+ KUNIT_EXPECT_EQ(test, 0, hfs_hash_dentry(&dentry, &test_name3));
+
+ /* Case insensitive names should hash the same */
+ KUNIT_EXPECT_EQ(test, test_name1.hash, test_name2.hash);
+
+ /* Different names should have different hashes */
+ KUNIT_EXPECT_NE(test, test_name1.hash, test_name3.hash);
+}
+
+/* Test hfs_compare_dentry function */
+static void hfs_compare_dentry_test(struct kunit *test)
+{
+ struct qstr test_name;
+ struct dentry dentry = {};
+ char name[] = "TestFile";
+
+ test_name.name = name;
+ test_name.len = strlen(name);
+
+ /* Test exact match */
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, 8,
+ "TestFile", &test_name));
+
+ /* Test case insensitive match */
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, 8,
+ "testfile", &test_name));
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, 8,
+ "TESTFILE", &test_name));
+
+ /* Test different names */
+ KUNIT_EXPECT_EQ(test, 1, hfs_compare_dentry(&dentry, 8,
+ "DiffFile", &test_name));
+
+ /* Test different lengths */
+ KUNIT_EXPECT_EQ(test, 1, hfs_compare_dentry(&dentry, 7,
+ "TestFil", &test_name));
+ KUNIT_EXPECT_EQ(test, 1, hfs_compare_dentry(&dentry, 9,
+ "TestFiles", &test_name));
+
+ /* Test empty string */
+ test_name.name = "";
+ test_name.len = 0;
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, 0, "", &test_name));
+
+ /* Test HFS_NAMELEN boundary */
+ test_name.name = "This_is_a_very_long_filename_that_exceeds_normal_limits";
+ test_name.len = strlen(test_name.name);
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, HFS_NAMELEN,
+ "This_is_a_very_long_filename_th", &test_name));
+}
+
+static struct kunit_case hfs_string_test_cases[] = {
+ KUNIT_CASE(hfs_strcmp_test),
+ KUNIT_CASE(hfs_hash_dentry_test),
+ KUNIT_CASE(hfs_compare_dentry_test),
+ {}
+};
+
+static struct kunit_suite hfs_string_test_suite = {
+ .name = "hfs_string",
+ .test_cases = hfs_string_test_cases,
+};
+
+kunit_test_suite(hfs_string_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for HFS string operations");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 3bee9b5dba5e..47f50fa555a4 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -319,6 +319,10 @@ static int hfs_fill_super(struct super_block *sb, struct fs_context *fc)
int silent = fc->sb_flags & SB_SILENT;
int res;
+ atomic64_set(&sbi->file_count, 0);
+ atomic64_set(&sbi->folder_count, 0);
+ atomic64_set(&sbi->next_id, 0);
+
/* load_nls_default does not fail */
if (sbi->nls_disk && !sbi->nls_io)
sbi->nls_io = load_nls_default();
@@ -349,11 +353,13 @@ static int hfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto bail_no_root;
res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
if (!res) {
- if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
+ if (fd.entrylength != sizeof(rec.dir)) {
res = -EIO;
goto bail_hfs_find;
}
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+ if (rec.type != HFS_CDR_DIR)
+ res = -EIO;
}
if (res)
goto bail_hfs_find;
@@ -363,7 +369,7 @@ static int hfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (!root_inode)
goto bail_no_root;
- sb->s_d_op = &hfs_dentry_operations;
+ set_default_d_op(sb, &hfs_dentry_operations);
res = -ENOMEM;
sb->s_root = d_make_root(root_inode);
if (!sb->s_root)
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 76fa02e3835b..ef54fc8093cf 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -13,7 +13,8 @@
/* dentry case-handling: just lowercase everything */
-static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
+static int hfs_revalidate_dentry(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int diff;
diff --git a/fs/hfsplus/.kunitconfig b/fs/hfsplus/.kunitconfig
new file mode 100644
index 000000000000..6c96dc7e872c
--- /dev/null
+++ b/fs/hfsplus/.kunitconfig
@@ -0,0 +1,8 @@
+CONFIG_KUNIT=y
+CONFIG_HFSPLUS_FS=y
+CONFIG_HFSPLUS_KUNIT_TEST=y
+CONFIG_BLOCK=y
+CONFIG_BUFFER_HEAD=y
+CONFIG_NLS=y
+CONFIG_NLS_UTF8=y
+CONFIG_LEGACY_DIRECT_IO=y
diff --git a/fs/hfsplus/Kconfig b/fs/hfsplus/Kconfig
index 8ce4a33a9ac7..ca8401cb6954 100644
--- a/fs/hfsplus/Kconfig
+++ b/fs/hfsplus/Kconfig
@@ -14,3 +14,18 @@ config HFSPLUS_FS
MacOS 8. It includes all Mac specific filesystem data such as
data forks and creator codes, but it also has several UNIX
style features such as file ownership and permissions.
+
+config HFSPLUS_KUNIT_TEST
+ tristate "KUnit tests for HFS+ filesystem" if !KUNIT_ALL_TESTS
+ depends on HFSPLUS_FS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds KUnit tests for the HFS+ filesystem.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general please
+ refer to the KUnit documentation in Documentation/dev-tools/kunit/.
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile
index 9ed20e64b983..f2a9ae697e81 100644
--- a/fs/hfsplus/Makefile
+++ b/fs/hfsplus/Makefile
@@ -8,3 +8,6 @@ obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o
hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \
attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o
+
+# KUnit tests
+obj-$(CONFIG_HFSPLUS_KUNIT_TEST) += unicode_test.o
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
index eeebe80c6be4..ba26980cc503 100644
--- a/fs/hfsplus/attributes.c
+++ b/fs/hfsplus/attributes.c
@@ -139,7 +139,7 @@ int hfsplus_find_attr(struct super_block *sb, u32 cnid,
{
int err = 0;
- hfs_dbg(ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid);
+ hfs_dbg("name %s, cnid %d\n", name ? name : NULL, cnid);
if (!HFSPLUS_SB(sb)->attr_tree) {
pr_err("attributes file doesn't exist\n");
@@ -201,7 +201,7 @@ int hfsplus_create_attr(struct inode *inode,
int entry_size;
int err;
- hfs_dbg(ATTR_MOD, "create_attr: %s,%ld\n",
+ hfs_dbg("name %s, ino %ld\n",
name ? name : NULL, inode->i_ino);
if (!HFSPLUS_SB(sb)->attr_tree) {
@@ -310,7 +310,7 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
struct super_block *sb = inode->i_sb;
struct hfs_find_data fd;
- hfs_dbg(ATTR_MOD, "delete_attr: %s,%ld\n",
+ hfs_dbg("name %s, ino %ld\n",
name ? name : NULL, inode->i_ino);
if (!HFSPLUS_SB(sb)->attr_tree) {
@@ -356,7 +356,7 @@ int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid)
int err = 0;
struct hfs_find_data fd;
- hfs_dbg(ATTR_MOD, "delete_all_attrs: %d\n", cnid);
+ hfs_dbg("cnid %d\n", cnid);
if (!HFSPLUS_SB(dir->i_sb)->attr_tree) {
pr_err("attributes file doesn't exist\n");
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index 901e83d65d20..336d654861c5 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -18,12 +18,12 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->tree = tree;
fd->bnode = NULL;
- ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
+ ptr = kzalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
fd->search_key = ptr;
fd->key = ptr + tree->max_key_len + 2;
- hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
+ hfs_dbg("cnid %d, caller %ps\n",
tree->cnid, __builtin_return_address(0));
mutex_lock_nested(&tree->tree_lock,
hfsplus_btree_lock_class(tree));
@@ -34,7 +34,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
{
hfs_bnode_put(fd->bnode);
kfree(fd->search_key);
- hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n",
+ hfs_dbg("cnid %d, caller %ps\n",
fd->tree->cnid, __builtin_return_address(0));
mutex_unlock(&fd->tree->tree_lock);
fd->tree = NULL;
@@ -158,6 +158,12 @@ int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare)
__be32 data;
int height, res;
+ fd->record = -1;
+ fd->keyoffset = -1;
+ fd->keylength = -1;
+ fd->entryoffset = -1;
+ fd->entrylength = -1;
+
tree = fd->tree;
if (fd->bnode)
hfs_bnode_put(fd->bnode);
@@ -204,7 +210,7 @@ release:
return res;
}
-int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
+int hfs_brec_read(struct hfs_find_data *fd, void *rec, u32 rec_len)
{
int res;
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index bd8dcea85588..1b3af8c87cad 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -31,7 +31,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
if (!len)
return size;
- hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
+ hfs_dbg("size %u, offset %u, len %u\n", size, offset, len);
mutex_lock(&sbi->alloc_mutex);
mapping = sbi->alloc_file->i_mapping;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
@@ -90,14 +90,14 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
}
- hfs_dbg(BITMAP, "bitmap full\n");
+ hfs_dbg("bitmap full\n");
start = size;
goto out;
found:
start = offset + (curr - pptr) * 32 + i;
if (start >= size) {
- hfs_dbg(BITMAP, "bitmap full\n");
+ hfs_dbg("bitmap full\n");
goto out;
}
/* do any partial u32 at the start */
@@ -155,7 +155,7 @@ done:
*max = offset + (curr - pptr) * 32 + i - start;
sbi->free_blocks -= *max;
hfsplus_mark_mdb_dirty(sb);
- hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
+ hfs_dbg("start %u, max %u\n", start, *max);
out:
mutex_unlock(&sbi->alloc_mutex);
return start;
@@ -174,7 +174,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
if (!count)
return 0;
- hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
+ hfs_dbg("offset %u, count %u\n", offset, count);
/* are all of the bits in range? */
if ((offset + count) > sbi->total_blocks)
return -ENOENT;
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 87974d5e6791..191661af9677 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -18,27 +18,42 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
+
/* Copy a specified range of bytes from the raw data of a node */
-void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len)
{
struct page **pagep;
- int l;
+ u32 l;
+
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_SIZE - off);
+ l = min_t(u32, len, PAGE_SIZE - off);
memcpy_from_page(buf, *pagep, off, l);
while ((len -= l) != 0) {
buf += l;
- l = min_t(int, len, PAGE_SIZE);
+ l = min_t(u32, len, PAGE_SIZE);
memcpy_from_page(buf, *++pagep, 0, l);
}
}
-u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
+u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off)
{
__be16 data;
/* TODO: optimize later... */
@@ -46,7 +61,7 @@ u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
return be16_to_cpu(data);
}
-u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
+u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off)
{
u8 data;
/* TODO: optimize later... */
@@ -54,10 +69,10 @@ u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
return data;
}
-void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
+void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off)
{
struct hfs_btree *tree;
- int key_len;
+ u32 key_len;
tree = node->tree;
if (node->type == HFS_NODE_LEAF ||
@@ -67,66 +82,104 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
else
key_len = tree->max_key_len + 2;
+ if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
+ memset(key, 0, sizeof(hfsplus_btree_key));
+ pr_err("hfsplus: Invalid key length: %u\n", key_len);
+ return;
+ }
+
hfs_bnode_read(node, key, off, key_len);
}
-void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
+void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len)
{
struct page **pagep;
- int l;
+ u32 l;
+
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_SIZE - off);
+ l = min_t(u32, len, PAGE_SIZE - off);
memcpy_to_page(*pagep, off, buf, l);
set_page_dirty(*pagep);
while ((len -= l) != 0) {
buf += l;
- l = min_t(int, len, PAGE_SIZE);
+ l = min_t(u32, len, PAGE_SIZE);
memcpy_to_page(*++pagep, 0, buf, l);
set_page_dirty(*pagep);
}
}
-void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
+void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data)
{
__be16 v = cpu_to_be16(data);
/* TODO: optimize later... */
hfs_bnode_write(node, &v, off, 2);
}
-void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
+void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len)
{
struct page **pagep;
- int l;
+ u32 l;
+
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_SIZE - off);
+ l = min_t(u32, len, PAGE_SIZE - off);
memzero_page(*pagep, off, l);
set_page_dirty(*pagep);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_SIZE);
+ l = min_t(u32, len, PAGE_SIZE);
memzero_page(*++pagep, 0, l);
set_page_dirty(*pagep);
}
}
-void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
- struct hfs_bnode *src_node, int src, int len)
+void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
+ struct hfs_bnode *src_node, u32 src, u32 len)
{
struct page **src_page, **dst_page;
- int l;
+ u32 l;
- hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
+ hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
if (!len)
return;
+
+ len = check_and_correct_requested_length(src_node, src, len);
+ len = check_and_correct_requested_length(dst_node, dst, len);
+
src += src_node->page_offset;
dst += dst_node->page_offset;
src_page = src_node->page + (src >> PAGE_SHIFT);
@@ -135,12 +188,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
dst &= ~PAGE_MASK;
if (src == dst) {
- l = min_t(int, len, PAGE_SIZE - src);
+ l = min_t(u32, len, PAGE_SIZE - src);
memcpy_page(*dst_page, src, *src_page, src, l);
set_page_dirty(*dst_page);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_SIZE);
+ l = min_t(u32, len, PAGE_SIZE);
memcpy_page(*++dst_page, 0, *++src_page, 0, l);
set_page_dirty(*dst_page);
}
@@ -172,15 +225,19 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
}
}
-void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
+void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len)
{
struct page **src_page, **dst_page;
void *src_ptr, *dst_ptr;
- int l;
+ u32 l;
- hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
+ hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
if (!len)
return;
+
+ len = check_and_correct_requested_length(node, src, len);
+ len = check_and_correct_requested_length(node, dst, len);
+
src += node->page_offset;
dst += node->page_offset;
if (dst > src) {
@@ -242,7 +299,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
dst &= ~PAGE_MASK;
if (src == dst) {
- l = min_t(int, len, PAGE_SIZE - src);
+ l = min_t(u32, len, PAGE_SIZE - src);
dst_ptr = kmap_local_page(*dst_page) + src;
src_ptr = kmap_local_page(*src_page) + src;
@@ -252,7 +309,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
kunmap_local(dst_ptr);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_SIZE);
+ l = min_t(u32, len, PAGE_SIZE);
dst_ptr = kmap_local_page(*++dst_page);
src_ptr = kmap_local_page(*++src_page);
memmove(dst_ptr, src_ptr, l);
@@ -294,16 +351,16 @@ void hfs_bnode_dump(struct hfs_bnode *node)
__be32 cnid;
int i, off, key_off;
- hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
+ hfs_dbg("node %d\n", node->this);
hfs_bnode_read(node, &desc, 0, sizeof(desc));
- hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
+ hfs_dbg("next %d, prev %d, type %d, height %d, num_recs %d\n",
be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
desc.type, desc.height, be16_to_cpu(desc.num_recs));
off = node->tree->node_size - 2;
for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
key_off = hfs_bnode_read_u16(node, off);
- hfs_dbg(BNODE_MOD, " %d", key_off);
+ hfs_dbg(" key_off %d", key_off);
if (i && node->type == HFS_NODE_INDEX) {
int tmp;
@@ -312,17 +369,17 @@ void hfs_bnode_dump(struct hfs_bnode *node)
tmp = hfs_bnode_read_u16(node, key_off) + 2;
else
tmp = node->tree->max_key_len + 2;
- hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
+ hfs_dbg(" (%d", tmp);
hfs_bnode_read(node, &cnid, key_off + tmp, 4);
- hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
+ hfs_dbg(", cnid %d)", be32_to_cpu(cnid));
} else if (i && node->type == HFS_NODE_LEAF) {
int tmp;
tmp = hfs_bnode_read_u16(node, key_off);
- hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
+ hfs_dbg(" (%d)", tmp);
}
}
- hfs_dbg_cont(BNODE_MOD, "\n");
+ hfs_dbg("\n");
}
void hfs_bnode_unlink(struct hfs_bnode *node)
@@ -358,7 +415,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
/* move down? */
if (!node->prev && !node->next)
- hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
+ hfs_dbg("btree delete level\n");
if (!node->parent) {
tree->root = 0;
tree->depth = 0;
@@ -413,7 +470,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
atomic_set(&node->refcnt, 1);
- hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
+ hfs_dbg("cnid %d, node %d, refcnt 1\n",
node->tree->cnid, node->this);
init_waitqueue_head(&node->lock_wq);
spin_lock(&tree->hash_lock);
@@ -424,6 +481,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
tree->node_hash[hash] = node;
tree->node_hash_cnt++;
} else {
+ hfs_bnode_get(node2);
spin_unlock(&tree->hash_lock);
kfree(node);
wait_event(node2->lock_wq,
@@ -453,7 +511,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
{
struct hfs_bnode **p;
- hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this, atomic_read(&node->refcnt));
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
@@ -599,7 +657,7 @@ void hfs_bnode_get(struct hfs_bnode *node)
{
if (node) {
atomic_inc(&node->refcnt);
- hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
}
@@ -612,7 +670,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
struct hfs_btree *tree = node->tree;
int i;
- hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
BUG_ON(!atomic_read(&node->refcnt));
@@ -647,6 +705,5 @@ bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
- return tree->cnid == HFSPLUS_CAT_CNID &&
- volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
+ return volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
}
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 1918544a7871..6796c1a80e99 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -60,7 +60,7 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
return retval;
}
-int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
+int hfs_brec_insert(struct hfs_find_data *fd, void *entry, u32 entry_len)
{
struct hfs_btree *tree;
struct hfs_bnode *node, *new_node;
@@ -92,7 +92,7 @@ again:
end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
end_off = hfs_bnode_read_u16(node, end_rec_off);
end_rec_off -= 2;
- hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n",
+ hfs_dbg("rec %d, size %d, end_off %d, end_rec_off %d\n",
rec, size, end_off, end_rec_off);
if (size > end_rec_off - end_off) {
if (new_node)
@@ -193,7 +193,7 @@ again:
mark_inode_dirty(tree->inode);
}
hfs_bnode_dump(node);
- hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n",
+ hfs_dbg("rec %d, len %d\n",
fd->record, fd->keylength + fd->entrylength);
if (!--node->num_recs) {
hfs_bnode_unlink(node);
@@ -246,7 +246,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
if (IS_ERR(new_node))
return new_node;
hfs_bnode_get(node);
- hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n",
+ hfs_dbg("this %d - new %d - next %d\n",
node->this, new_node->this, node->next);
new_node->next = node->next;
new_node->prev = node->this;
@@ -383,7 +383,7 @@ again:
newkeylen = hfs_bnode_read_u16(node, 14) + 2;
else
fd->keylength = newkeylen = tree->max_key_len + 2;
- hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n",
+ hfs_dbg("rec %d, keylength %d, newkeylen %d\n",
rec, fd->keylength, newkeylen);
rec_off = tree->node_size - (rec + 2) * 2;
@@ -395,7 +395,7 @@ again:
end_off = hfs_bnode_read_u16(parent, end_rec_off);
if (end_rec_off - end_off < diff) {
- hfs_dbg(BNODE_MOD, "splitting index node\n");
+ hfs_dbg("splitting index node\n");
fd->bnode = parent;
new_node = hfs_bnode_split(fd);
if (IS_ERR(new_node))
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 9e1732a2b92a..229f25dc7c49 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -344,7 +344,7 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
}
/* Make sure @tree has enough space for the @rsvd_nodes */
-int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
+int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes)
{
struct inode *inode = tree->inode;
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
@@ -393,6 +393,12 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
len = hfs_brec_lenoff(node, 2, &off16);
off = off16;
+ if (!is_bnode_offset_valid(node, off)) {
+ hfs_bnode_put(node);
+ return ERR_PTR(-EIO);
+ }
+ len = check_and_correct_requested_length(node, off, len);
+
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
data = kmap_local_page(*pagep);
@@ -428,7 +434,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
kunmap_local(data);
nidx = node->next;
if (!nidx) {
- hfs_dbg(BNODE_MOD, "create new bmap node\n");
+ hfs_dbg("create new bmap node\n");
next_node = hfs_bmap_new_bmap(node, idx);
} else
next_node = hfs_bnode_find(tree, nidx);
@@ -454,7 +460,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
u32 nidx;
u8 *data, byte, m;
- hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
+ hfs_dbg("node %u\n", node->this);
BUG_ON(!node->this);
tree = node->tree;
nidx = node->this;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 1995bafee839..02c1eee4a4b8 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -259,7 +259,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
int entry_size;
int err;
- hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n",
+ hfs_dbg("name %s, cnid %u, i_nlink %d\n",
str->name, cnid, inode->i_nlink);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
@@ -336,7 +336,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
int err, off;
u16 type;
- hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
+ hfs_dbg("name %s, cnid %u\n", str ? str->name : NULL, cnid);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return err;
@@ -441,7 +441,7 @@ int hfsplus_rename_cat(u32 cnid,
int entry_size, type;
int err;
- hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
+ hfs_dbg("cnid %u - ino %lu, name %s - ino %lu, name %s\n",
cnid, src_dir->i_ino, src_name->name,
dst_dir->i_ino, dst_name->name);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index f5c4b3e31a1c..cadf0b5f9342 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -204,7 +204,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
fd.entrylength);
type = be16_to_cpu(entry.type);
len = NLS_MAX_CHARSET_SIZE * HFSPLUS_MAX_STRLEN;
- err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
+ err = hfsplus_uni2asc_str(sb, &fd.key->cat.name, strbuf, &len);
if (err)
goto out;
if (type == HFSPLUS_FOLDER) {
@@ -523,10 +523,10 @@ static int hfsplus_create(struct mnt_idmap *idmap, struct inode *dir,
return hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode, 0);
}
-static int hfsplus_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hfsplus_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0);
+ return ERR_PTR(hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0));
}
static int hfsplus_rename(struct mnt_idmap *idmap,
@@ -552,8 +552,13 @@ static int hfsplus_rename(struct mnt_idmap *idmap,
res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name);
- if (!res)
+ if (!res) {
new_dentry->d_fsdata = old_dentry->d_fsdata;
+
+ res = hfsplus_cat_write_inode(old_dir);
+ if (!res)
+ res = hfsplus_cat_write_inode(new_dir);
+ }
return res;
}
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index a6d61685ae79..8e886514d27f 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -275,7 +275,7 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
mutex_unlock(&hip->extents_lock);
done:
- hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
+ hfs_dbg("ino %lu, iblock %llu - dblock %u\n",
inode->i_ino, (long long)iblock, dblock);
mask = (1 << sbi->fs_shift) - 1;
@@ -298,12 +298,12 @@ static void hfsplus_dump_extent(struct hfsplus_extent *extent)
{
int i;
- hfs_dbg(EXTENT, " ");
+ hfs_dbg("extent ");
for (i = 0; i < 8; i++)
- hfs_dbg_cont(EXTENT, " %u:%u",
- be32_to_cpu(extent[i].start_block),
- be32_to_cpu(extent[i].block_count));
- hfs_dbg_cont(EXTENT, "\n");
+ hfs_dbg(" start_block %u, block_count %u",
+ be32_to_cpu(extent[i].start_block),
+ be32_to_cpu(extent[i].block_count));
+ hfs_dbg("\n");
}
static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
@@ -342,9 +342,6 @@ static int hfsplus_free_extents(struct super_block *sb,
int i;
int err = 0;
- /* Mapping the allocation file may lock the extent tree */
- WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
-
hfsplus_dump_extent(extent);
for (i = 0; i < 8; extent++, i++) {
count = be32_to_cpu(extent->block_count);
@@ -362,8 +359,7 @@ found:
if (count <= block_nr) {
err = hfsplus_block_free(sb, start, count);
if (err) {
- pr_err("can't free extent\n");
- hfs_dbg(EXTENT, " start: %u count: %u\n",
+ pr_err("can't free extent: start %u, count %u\n",
start, count);
}
extent->block_count = 0;
@@ -373,8 +369,7 @@ found:
count -= block_nr;
err = hfsplus_block_free(sb, start + count, block_nr);
if (err) {
- pr_err("can't free extent\n");
- hfs_dbg(EXTENT, " start: %u count: %u\n",
+ pr_err("can't free extent: start %u, count %u\n",
start, count);
}
extent->block_count = cpu_to_be32(count);
@@ -481,11 +476,12 @@ int hfsplus_file_extend(struct inode *inode, bool zeroout)
goto out;
}
- hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
+ hfs_dbg("ino %lu, start %u, len %u\n", inode->i_ino, start, len);
if (hip->alloc_blocks <= hip->first_blocks) {
if (!hip->first_blocks) {
- hfs_dbg(EXTENT, "first extents\n");
+ hfs_dbg("first_extent: start %u, len %u\n",
+ start, len);
/* no extents yet */
hip->first_extents[0].start_block = cpu_to_be32(start);
hip->first_extents[0].block_count = cpu_to_be32(len);
@@ -524,7 +520,7 @@ out:
return res;
insert_extent:
- hfs_dbg(EXTENT, "insert new extent\n");
+ hfs_dbg("insert new extent\n");
res = hfsplus_ext_write_extent_locked(inode);
if (res)
goto out;
@@ -549,7 +545,7 @@ void hfsplus_file_truncate(struct inode *inode)
u32 alloc_cnt, blk_cnt, start;
int res;
- hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
+ hfs_dbg("ino %lu, phys_size %llu -> i_size %llu\n",
inode->i_ino, (long long)hip->phys_size, inode->i_size);
if (inode->i_size > hip->phys_size) {
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 2f089bff0095..45fe3a12ecba 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -11,12 +11,6 @@
#ifndef _LINUX_HFSPLUS_FS_H
#define _LINUX_HFSPLUS_FS_H
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/buffer_head.h>
@@ -24,34 +18,6 @@
#include <linux/fs_context.h>
#include "hfsplus_raw.h"
-#define DBG_BNODE_REFS 0x00000001
-#define DBG_BNODE_MOD 0x00000002
-#define DBG_CAT_MOD 0x00000004
-#define DBG_INODE 0x00000008
-#define DBG_SUPER 0x00000010
-#define DBG_EXTENT 0x00000020
-#define DBG_BITMAP 0x00000040
-#define DBG_ATTR_MOD 0x00000080
-
-#if 0
-#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
-#define DBG_MASK (DBG_BNODE_MOD|DBG_CAT_MOD|DBG_INODE)
-#define DBG_MASK (DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
-#endif
-#define DBG_MASK (0)
-
-#define hfs_dbg(flg, fmt, ...) \
-do { \
- if (DBG_##flg & DBG_MASK) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-
-#define hfs_dbg_cont(flg, fmt, ...) \
-do { \
- if (DBG_##flg & DBG_MASK) \
- pr_cont(fmt, ##__VA_ARGS__); \
-} while (0)
-
/* Runtime config options */
#define HFSPLUS_DEF_CR_TYPE 0x3F3F3F3F /* '????' */
@@ -390,21 +356,21 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id);
void hfs_btree_close(struct hfs_btree *tree);
int hfs_btree_write(struct hfs_btree *tree);
-int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes);
+int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes);
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
void hfs_bmap_free(struct hfs_bnode *node);
/* bnode.c */
-void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len);
-u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off);
-u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off);
-void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off);
-void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len);
-void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data);
-void hfs_bnode_clear(struct hfs_bnode *node, int off, int len);
-void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
- struct hfs_bnode *src_node, int src, int len);
-void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len);
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len);
+u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off);
+u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off);
+void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off);
+void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len);
+void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data);
+void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len);
+void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
+ struct hfs_bnode *src_node, u32 src, u32 len);
+void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len);
void hfs_bnode_dump(struct hfs_bnode *node);
void hfs_bnode_unlink(struct hfs_bnode *node);
struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid);
@@ -419,7 +385,7 @@ bool hfs_bnode_need_zeroout(struct hfs_btree *tree);
/* brec.c */
u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off);
u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec);
-int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len);
+int hfs_brec_insert(struct hfs_find_data *fd, void *entry, u32 entry_len);
int hfs_brec_remove(struct hfs_find_data *fd);
/* bfind.c */
@@ -432,7 +398,7 @@ int hfs_find_rec_by_key(struct hfs_bnode *bnode, struct hfs_find_data *fd,
int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd,
search_strategy_t rec_found);
int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare);
-int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len);
+int hfs_brec_read(struct hfs_find_data *fd, void *rec, u32 rec_len);
int hfs_brec_goto(struct hfs_find_data *fd, int cnt);
/* catalog.c */
@@ -473,8 +439,10 @@ extern const struct address_space_operations hfsplus_aops;
extern const struct address_space_operations hfsplus_btree_aops;
extern const struct dentry_operations hfsplus_dentry_operations;
-int hfsplus_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct folio **foliop, void **fsdata);
+int hfsplus_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop,
+ void **fsdata);
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode);
void hfsplus_delete_inode(struct inode *inode);
@@ -489,9 +457,9 @@ int hfsplus_getattr(struct mnt_idmap *idmap, const struct path *path,
unsigned int query_flags);
int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
-int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int hfsplus_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
int hfsplus_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
/* ioctl.c */
long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
@@ -508,6 +476,8 @@ int hfs_part_find(struct super_block *sb, sector_t *part_start,
/* super.c */
struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino);
void hfsplus_mark_mdb_dirty(struct super_block *sb);
+void hfsplus_prepare_volume_header_for_commit(struct hfsplus_vh *vhdr);
+int hfsplus_commit_superblock(struct super_block *sb);
/* tables.c */
extern u16 hfsplus_case_fold_table[];
@@ -519,8 +489,12 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
const struct hfsplus_unistr *s2);
int hfsplus_strcmp(const struct hfsplus_unistr *s1,
const struct hfsplus_unistr *s2);
-int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr,
- char *astr, int *len_p);
+int hfsplus_uni2asc_str(struct super_block *sb,
+ const struct hfsplus_unistr *ustr, char *astr,
+ int *len_p);
+int hfsplus_uni2asc_xattr_str(struct super_block *sb,
+ const struct hfsplus_attr_unistr *ustr,
+ char *astr, int *len_p);
int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
int max_unistr_len, const char *astr, int len);
int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str);
@@ -575,6 +549,48 @@ hfsplus_btree_lock_class(struct hfs_btree *tree)
return class;
}
+static inline
+bool is_bnode_offset_valid(struct hfs_bnode *node, u32 off)
+{
+ bool is_valid = off < node->tree->node_size;
+
+ if (!is_valid) {
+ pr_err("requested invalid offset: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off);
+ }
+
+ return is_valid;
+}
+
+static inline
+u32 check_and_correct_requested_length(struct hfs_bnode *node, u32 off, u32 len)
+{
+ unsigned int node_size;
+
+ if (!is_bnode_offset_valid(node, off))
+ return 0;
+
+ node_size = node->tree->node_size;
+
+ if ((off + len) > node_size) {
+ u32 new_len = node_size - off;
+
+ pr_err("requested length has been corrected: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, "
+ "requested_len %u, corrected_len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len, new_len);
+
+ return new_len;
+ }
+
+ return len;
+}
+
/* compatibility */
#define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) }
#define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec)
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 68b4240c6191..83b5dbde924b 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -15,398 +15,6 @@
#define _LINUX_HFSPLUS_RAW_H
#include <linux/types.h>
-
-/* Some constants */
-#define HFSPLUS_SECTOR_SIZE 512
-#define HFSPLUS_SECTOR_SHIFT 9
-#define HFSPLUS_VOLHEAD_SECTOR 2
-#define HFSPLUS_VOLHEAD_SIG 0x482b
-#define HFSPLUS_VOLHEAD_SIGX 0x4858
-#define HFSPLUS_SUPER_MAGIC 0x482b
-#define HFSPLUS_MIN_VERSION 4
-#define HFSPLUS_CURRENT_VERSION 5
-
-#define HFSP_WRAP_MAGIC 0x4244
-#define HFSP_WRAP_ATTRIB_SLOCK 0x8000
-#define HFSP_WRAP_ATTRIB_SPARED 0x0200
-
-#define HFSP_WRAPOFF_SIG 0x00
-#define HFSP_WRAPOFF_ATTRIB 0x0A
-#define HFSP_WRAPOFF_ABLKSIZE 0x14
-#define HFSP_WRAPOFF_ABLKSTART 0x1C
-#define HFSP_WRAPOFF_EMBEDSIG 0x7C
-#define HFSP_WRAPOFF_EMBEDEXT 0x7E
-
-#define HFSP_HIDDENDIR_NAME \
- "\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data"
-
-#define HFSP_HARDLINK_TYPE 0x686c6e6b /* 'hlnk' */
-#define HFSP_HFSPLUS_CREATOR 0x6866732b /* 'hfs+' */
-
-#define HFSP_SYMLINK_TYPE 0x736c6e6b /* 'slnk' */
-#define HFSP_SYMLINK_CREATOR 0x72686170 /* 'rhap' */
-
-#define HFSP_MOUNT_VERSION 0x482b4c78 /* 'H+Lx' */
-
-/* Structures used on disk */
-
-typedef __be32 hfsplus_cnid;
-typedef __be16 hfsplus_unichr;
-
-#define HFSPLUS_MAX_STRLEN 255
-#define HFSPLUS_ATTR_MAX_STRLEN 127
-
-/* A "string" as used in filenames, etc. */
-struct hfsplus_unistr {
- __be16 length;
- hfsplus_unichr unicode[HFSPLUS_MAX_STRLEN];
-} __packed;
-
-/*
- * A "string" is used in attributes file
- * for name of extended attribute
- */
-struct hfsplus_attr_unistr {
- __be16 length;
- hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
-} __packed;
-
-/* POSIX permissions */
-struct hfsplus_perm {
- __be32 owner;
- __be32 group;
- u8 rootflags;
- u8 userflags;
- __be16 mode;
- __be32 dev;
-} __packed;
-
-#define HFSPLUS_FLG_NODUMP 0x01
-#define HFSPLUS_FLG_IMMUTABLE 0x02
-#define HFSPLUS_FLG_APPEND 0x04
-
-/* A single contiguous area of a file */
-struct hfsplus_extent {
- __be32 start_block;
- __be32 block_count;
-} __packed;
-typedef struct hfsplus_extent hfsplus_extent_rec[8];
-
-/* Information for a "Fork" in a file */
-struct hfsplus_fork_raw {
- __be64 total_size;
- __be32 clump_size;
- __be32 total_blocks;
- hfsplus_extent_rec extents;
-} __packed;
-
-/* HFS+ Volume Header */
-struct hfsplus_vh {
- __be16 signature;
- __be16 version;
- __be32 attributes;
- __be32 last_mount_vers;
- u32 reserved;
-
- __be32 create_date;
- __be32 modify_date;
- __be32 backup_date;
- __be32 checked_date;
-
- __be32 file_count;
- __be32 folder_count;
-
- __be32 blocksize;
- __be32 total_blocks;
- __be32 free_blocks;
-
- __be32 next_alloc;
- __be32 rsrc_clump_sz;
- __be32 data_clump_sz;
- hfsplus_cnid next_cnid;
-
- __be32 write_count;
- __be64 encodings_bmp;
-
- u32 finder_info[8];
-
- struct hfsplus_fork_raw alloc_file;
- struct hfsplus_fork_raw ext_file;
- struct hfsplus_fork_raw cat_file;
- struct hfsplus_fork_raw attr_file;
- struct hfsplus_fork_raw start_file;
-} __packed;
-
-/* HFS+ volume attributes */
-#define HFSPLUS_VOL_UNMNT (1 << 8)
-#define HFSPLUS_VOL_SPARE_BLK (1 << 9)
-#define HFSPLUS_VOL_NOCACHE (1 << 10)
-#define HFSPLUS_VOL_INCNSTNT (1 << 11)
-#define HFSPLUS_VOL_NODEID_REUSED (1 << 12)
-#define HFSPLUS_VOL_JOURNALED (1 << 13)
-#define HFSPLUS_VOL_SOFTLOCK (1 << 15)
-#define HFSPLUS_VOL_UNUSED_NODE_FIX (1 << 31)
-
-/* HFS+ BTree node descriptor */
-struct hfs_bnode_desc {
- __be32 next;
- __be32 prev;
- s8 type;
- u8 height;
- __be16 num_recs;
- u16 reserved;
-} __packed;
-
-/* HFS+ BTree node types */
-#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
-#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
-#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
-#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
-
-/* HFS+ BTree header */
-struct hfs_btree_header_rec {
- __be16 depth;
- __be32 root;
- __be32 leaf_count;
- __be32 leaf_head;
- __be32 leaf_tail;
- __be16 node_size;
- __be16 max_key_len;
- __be32 node_count;
- __be32 free_nodes;
- u16 reserved1;
- __be32 clump_size;
- u8 btree_type;
- u8 key_type;
- __be32 attributes;
- u32 reserved3[16];
-} __packed;
-
-/* BTree attributes */
-#define HFS_TREE_BIGKEYS 2
-#define HFS_TREE_VARIDXKEYS 4
-
-/* HFS+ BTree misc info */
-#define HFSPLUS_TREE_HEAD 0
-#define HFSPLUS_NODE_MXSZ 32768
-#define HFSPLUS_ATTR_TREE_NODE_SIZE 8192
-#define HFSPLUS_BTREE_HDR_NODE_RECS_COUNT 3
-#define HFSPLUS_BTREE_HDR_USER_BYTES 128
-
-/* Some special File ID numbers (stolen from hfs.h) */
-#define HFSPLUS_POR_CNID 1 /* Parent Of the Root */
-#define HFSPLUS_ROOT_CNID 2 /* ROOT directory */
-#define HFSPLUS_EXT_CNID 3 /* EXTents B-tree */
-#define HFSPLUS_CAT_CNID 4 /* CATalog B-tree */
-#define HFSPLUS_BAD_CNID 5 /* BAD blocks file */
-#define HFSPLUS_ALLOC_CNID 6 /* ALLOCation file */
-#define HFSPLUS_START_CNID 7 /* STARTup file */
-#define HFSPLUS_ATTR_CNID 8 /* ATTRibutes file */
-#define HFSPLUS_EXCH_CNID 15 /* ExchangeFiles temp id */
-#define HFSPLUS_FIRSTUSER_CNID 16 /* first available user id */
-
-/* btree key type */
-#define HFSPLUS_KEY_CASEFOLDING 0xCF /* case-insensitive */
-#define HFSPLUS_KEY_BINARY 0xBC /* case-sensitive */
-
-/* HFS+ catalog entry key */
-struct hfsplus_cat_key {
- __be16 key_len;
- hfsplus_cnid parent;
- struct hfsplus_unistr name;
-} __packed;
-
-#define HFSPLUS_CAT_KEYLEN (sizeof(struct hfsplus_cat_key))
-
-/* Structs from hfs.h */
-struct hfsp_point {
- __be16 v;
- __be16 h;
-} __packed;
-
-struct hfsp_rect {
- __be16 top;
- __be16 left;
- __be16 bottom;
- __be16 right;
-} __packed;
-
-
-/* HFS directory info (stolen from hfs.h */
-struct DInfo {
- struct hfsp_rect frRect;
- __be16 frFlags;
- struct hfsp_point frLocation;
- __be16 frView;
-} __packed;
-
-struct DXInfo {
- struct hfsp_point frScroll;
- __be32 frOpenChain;
- __be16 frUnused;
- __be16 frComment;
- __be32 frPutAway;
-} __packed;
-
-/* HFS+ folder data (part of an hfsplus_cat_entry) */
-struct hfsplus_cat_folder {
- __be16 type;
- __be16 flags;
- __be32 valence;
- hfsplus_cnid id;
- __be32 create_date;
- __be32 content_mod_date;
- __be32 attribute_mod_date;
- __be32 access_date;
- __be32 backup_date;
- struct hfsplus_perm permissions;
- struct_group_attr(info, __packed,
- struct DInfo user_info;
- struct DXInfo finder_info;
- );
- __be32 text_encoding;
- __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
-} __packed;
-
-/* HFS file info (stolen from hfs.h) */
-struct FInfo {
- __be32 fdType;
- __be32 fdCreator;
- __be16 fdFlags;
- struct hfsp_point fdLocation;
- __be16 fdFldr;
-} __packed;
-
-struct FXInfo {
- __be16 fdIconID;
- u8 fdUnused[8];
- __be16 fdComment;
- __be32 fdPutAway;
-} __packed;
-
-/* HFS+ file data (part of a cat_entry) */
-struct hfsplus_cat_file {
- __be16 type;
- __be16 flags;
- u32 reserved1;
- hfsplus_cnid id;
- __be32 create_date;
- __be32 content_mod_date;
- __be32 attribute_mod_date;
- __be32 access_date;
- __be32 backup_date;
- struct hfsplus_perm permissions;
- struct_group_attr(info, __packed,
- struct FInfo user_info;
- struct FXInfo finder_info;
- );
- __be32 text_encoding;
- u32 reserved2;
-
- struct hfsplus_fork_raw data_fork;
- struct hfsplus_fork_raw rsrc_fork;
-} __packed;
-
-/* File and folder flag bits */
-#define HFSPLUS_FILE_LOCKED 0x0001
-#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
-#define HFSPLUS_XATTR_EXISTS 0x0004
-#define HFSPLUS_ACL_EXISTS 0x0008
-#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
- * (HFSX only) */
-
-/* HFS+ catalog thread (part of a cat_entry) */
-struct hfsplus_cat_thread {
- __be16 type;
- s16 reserved;
- hfsplus_cnid parentID;
- struct hfsplus_unistr nodeName;
-} __packed;
-
-#define HFSPLUS_MIN_THREAD_SZ 10
-
-/* A data record in the catalog tree */
-typedef union {
- __be16 type;
- struct hfsplus_cat_folder folder;
- struct hfsplus_cat_file file;
- struct hfsplus_cat_thread thread;
-} __packed hfsplus_cat_entry;
-
-/* HFS+ catalog entry type */
-#define HFSPLUS_FOLDER 0x0001
-#define HFSPLUS_FILE 0x0002
-#define HFSPLUS_FOLDER_THREAD 0x0003
-#define HFSPLUS_FILE_THREAD 0x0004
-
-/* HFS+ extents tree key */
-struct hfsplus_ext_key {
- __be16 key_len;
- u8 fork_type;
- u8 pad;
- hfsplus_cnid cnid;
- __be32 start_block;
-} __packed;
-
-#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
-
-#define HFSPLUS_XATTR_FINDER_INFO_NAME "com.apple.FinderInfo"
-#define HFSPLUS_XATTR_ACL_NAME "com.apple.system.Security"
-
-#define HFSPLUS_ATTR_INLINE_DATA 0x10
-#define HFSPLUS_ATTR_FORK_DATA 0x20
-#define HFSPLUS_ATTR_EXTENTS 0x30
-
-/* HFS+ attributes tree key */
-struct hfsplus_attr_key {
- __be16 key_len;
- __be16 pad;
- hfsplus_cnid cnid;
- __be32 start_block;
- struct hfsplus_attr_unistr key_name;
-} __packed;
-
-#define HFSPLUS_ATTR_KEYLEN sizeof(struct hfsplus_attr_key)
-
-/* HFS+ fork data attribute */
-struct hfsplus_attr_fork_data {
- __be32 record_type;
- __be32 reserved;
- struct hfsplus_fork_raw the_fork;
-} __packed;
-
-/* HFS+ extension attribute */
-struct hfsplus_attr_extents {
- __be32 record_type;
- __be32 reserved;
- struct hfsplus_extent extents;
-} __packed;
-
-#define HFSPLUS_MAX_INLINE_DATA_SIZE 3802
-
-/* HFS+ attribute inline data */
-struct hfsplus_attr_inline_data {
- __be32 record_type;
- __be32 reserved1;
- u8 reserved2[6];
- __be16 length;
- u8 raw_bytes[HFSPLUS_MAX_INLINE_DATA_SIZE];
-} __packed;
-
-/* A data record in the attributes tree */
-typedef union {
- __be32 record_type;
- struct hfsplus_attr_fork_data fork_data;
- struct hfsplus_attr_extents extents;
- struct hfsplus_attr_inline_data inline_data;
-} __packed hfsplus_attr_entry;
-
-/* HFS+ generic BTree key */
-typedef union {
- __be16 key_len;
- struct hfsplus_cat_key cat;
- struct hfsplus_ext_key ext;
- struct hfsplus_attr_key attr;
-} __packed hfsplus_btree_key;
+#include <linux/hfs_common.h>
#endif
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f331e9574217..7ae6745ca7ae 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -38,12 +38,14 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
}
}
-int hfsplus_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
+int hfsplus_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping, loff_t pos,
+ unsigned len, struct folio **foliop,
+ void **fsdata)
{
int ret;
- ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
hfsplus_get_block,
&HFSPLUS_I(mapping->host)->phys_size);
if (unlikely(ret))
@@ -178,13 +180,29 @@ const struct dentry_operations hfsplus_dentry_operations = {
.d_compare = hfsplus_compare_dentry,
};
-static void hfsplus_get_perms(struct inode *inode,
- struct hfsplus_perm *perms, int dir)
+static int hfsplus_get_perms(struct inode *inode,
+ struct hfsplus_perm *perms, int dir)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
u16 mode;
mode = be16_to_cpu(perms->mode);
+ if (dir) {
+ if (mode && !S_ISDIR(mode))
+ goto bad_type;
+ } else if (mode) {
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFLNK:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ break;
+ default:
+ goto bad_type;
+ }
+ }
i_uid_write(inode, be32_to_cpu(perms->owner));
if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
@@ -210,6 +228,10 @@ static void hfsplus_get_perms(struct inode *inode,
inode->i_flags |= S_APPEND;
else
inode->i_flags &= ~S_APPEND;
+ return 0;
+bad_type:
+ pr_err("invalid file type 0%04o for inode %lu\n", mode, inode->i_ino);
+ return -EIO;
}
static int hfsplus_file_open(struct inode *inode, struct file *file)
@@ -303,6 +325,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
struct inode *inode = file->f_mapping->host;
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
+ struct hfsplus_vh *vhdr = sbi->s_vhdr;
int error = 0, error2;
error = file_write_and_wait_range(file, start, end);
@@ -346,6 +369,14 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
error = error2;
}
+ mutex_lock(&sbi->vh_mutex);
+ hfsplus_prepare_volume_header_for_commit(vhdr);
+ mutex_unlock(&sbi->vh_mutex);
+
+ error2 = hfsplus_commit_superblock(inode->i_sb);
+ if (!error)
+ error = error2;
+
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
blkdev_issue_flush(inode->i_sb->s_bdev);
@@ -366,8 +397,9 @@ static const struct file_operations hfsplus_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.splice_read = filemap_splice_read,
+ .splice_write = iter_file_splice_write,
.fsync = hfsplus_file_fsync,
.open = hfsplus_file_open,
.release = hfsplus_file_release,
@@ -513,7 +545,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
}
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_folder));
- hfsplus_get_perms(inode, &folder->permissions, 1);
+ res = hfsplus_get_perms(inode, &folder->permissions, 1);
+ if (res)
+ goto out;
set_nlink(inode, 1);
inode->i_size = 2 + be32_to_cpu(folder->valence);
inode_set_atime_to_ts(inode, hfsp_mt2ut(folder->access_date));
@@ -542,7 +576,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
&file->rsrc_fork : &file->data_fork);
- hfsplus_get_perms(inode, &file->permissions, 0);
+ res = hfsplus_get_perms(inode, &file->permissions, 0);
+ if (res)
+ goto out;
set_nlink(inode, 1);
if (S_ISREG(inode->i_mode)) {
if (file->permissions.dev)
@@ -654,7 +690,7 @@ out:
return res;
}
-int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int hfsplus_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
@@ -673,7 +709,7 @@ int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
}
int hfsplus_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index a66a09a56bf7..9b377481f397 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/fs_struct.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/nls.h>
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 948b8aaee33e..aaffa9e060a0 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -65,16 +65,29 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
- INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
- spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock);
- mutex_init(&HFSPLUS_I(inode)->extents_lock);
- HFSPLUS_I(inode)->flags = 0;
+ atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
+ HFSPLUS_I(inode)->first_blocks = 0;
+ HFSPLUS_I(inode)->clump_blocks = 0;
+ HFSPLUS_I(inode)->alloc_blocks = 0;
+ HFSPLUS_I(inode)->cached_start = U32_MAX;
+ HFSPLUS_I(inode)->cached_blocks = 0;
+ memset(HFSPLUS_I(inode)->first_extents, 0, sizeof(hfsplus_extent_rec));
+ memset(HFSPLUS_I(inode)->cached_extents, 0, sizeof(hfsplus_extent_rec));
HFSPLUS_I(inode)->extent_state = 0;
+ mutex_init(&HFSPLUS_I(inode)->extents_lock);
HFSPLUS_I(inode)->rsrc_inode = NULL;
- atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
+ HFSPLUS_I(inode)->create_date = 0;
+ HFSPLUS_I(inode)->linkid = 0;
+ HFSPLUS_I(inode)->flags = 0;
+ HFSPLUS_I(inode)->fs_blocks = 0;
+ HFSPLUS_I(inode)->userflags = 0;
+ HFSPLUS_I(inode)->subfolders = 0;
+ INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
+ spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock);
+ HFSPLUS_I(inode)->phys_size = 0;
if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
inode->i_ino == HFSPLUS_ROOT_CNID) {
@@ -150,7 +163,7 @@ static int hfsplus_write_inode(struct inode *inode,
{
int err;
- hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
+ hfs_dbg("ino %lu\n", inode->i_ino);
err = hfsplus_ext_write_extent(inode);
if (err)
@@ -165,7 +178,7 @@ static int hfsplus_write_inode(struct inode *inode,
static void hfsplus_evict_inode(struct inode *inode)
{
- hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
+ hfs_dbg("ino %lu\n", inode->i_ino);
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (HFSPLUS_IS_RSRC(inode)) {
@@ -174,17 +187,62 @@ static void hfsplus_evict_inode(struct inode *inode)
}
}
-static int hfsplus_sync_fs(struct super_block *sb, int wait)
+int hfsplus_commit_superblock(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_vh *vhdr = sbi->s_vhdr;
int write_backup = 0;
+ int error = 0, error2;
+
+ hfs_dbg("starting...\n");
+
+ mutex_lock(&sbi->vh_mutex);
+ mutex_lock(&sbi->alloc_mutex);
+ vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
+ vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
+ vhdr->folder_count = cpu_to_be32(sbi->folder_count);
+ vhdr->file_count = cpu_to_be32(sbi->file_count);
+
+ hfs_dbg("free_blocks %u, next_cnid %u, folder_count %u, file_count %u\n",
+ sbi->free_blocks, sbi->next_cnid,
+ sbi->folder_count, sbi->file_count);
+
+ if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
+ memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
+ write_backup = 1;
+ }
+
+ error2 = hfsplus_submit_bio(sb,
+ sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
+ sbi->s_vhdr_buf, NULL, REQ_OP_WRITE);
+ if (!error)
+ error = error2;
+ if (!write_backup)
+ goto out;
+
+ error2 = hfsplus_submit_bio(sb,
+ sbi->part_start + sbi->sect_count - 2,
+ sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE);
+ if (!error)
+ error = error2;
+out:
+ mutex_unlock(&sbi->alloc_mutex);
+ mutex_unlock(&sbi->vh_mutex);
+
+ hfs_dbg("finished: err %d\n", error);
+
+ return error;
+}
+
+static int hfsplus_sync_fs(struct super_block *sb, int wait)
+{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
int error, error2;
if (!wait)
return 0;
- hfs_dbg(SUPER, "hfsplus_sync_fs\n");
+ hfs_dbg("starting...\n");
/*
* Explicitly write out the special metadata inodes.
@@ -208,40 +266,15 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
if (!error)
error = error2;
- mutex_lock(&sbi->vh_mutex);
- mutex_lock(&sbi->alloc_mutex);
- vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
- vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
- vhdr->folder_count = cpu_to_be32(sbi->folder_count);
- vhdr->file_count = cpu_to_be32(sbi->file_count);
-
- if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
- memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
- write_backup = 1;
- }
-
- error2 = hfsplus_submit_bio(sb,
- sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
- sbi->s_vhdr_buf, NULL, REQ_OP_WRITE |
- REQ_SYNC);
+ error2 = hfsplus_commit_superblock(sb);
if (!error)
error = error2;
- if (!write_backup)
- goto out;
-
- error2 = hfsplus_submit_bio(sb,
- sbi->part_start + sbi->sect_count - 2,
- sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE |
- REQ_SYNC);
- if (!error)
- error2 = error;
-out:
- mutex_unlock(&sbi->alloc_mutex);
- mutex_unlock(&sbi->vh_mutex);
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
blkdev_issue_flush(sb->s_bdev);
+ hfs_dbg("finished: err %d\n", error);
+
return error;
}
@@ -290,7 +323,7 @@ static void hfsplus_put_super(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
- hfs_dbg(SUPER, "hfsplus_put_super\n");
+ hfs_dbg("starting...\n");
cancel_delayed_work_sync(&sbi->sync_work);
@@ -312,6 +345,8 @@ static void hfsplus_put_super(struct super_block *sb)
kfree(sbi->s_vhdr_buf);
kfree(sbi->s_backup_vhdr_buf);
call_rcu(&sbi->rcu, delayed_free);
+
+ hfs_dbg("finished\n");
}
static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -376,6 +411,15 @@ static const struct super_operations hfsplus_sops = {
.show_options = hfsplus_show_options,
};
+void hfsplus_prepare_volume_header_for_commit(struct hfsplus_vh *vhdr)
+{
+ vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
+ vhdr->modify_date = hfsp_now2mt();
+ be32_add_cpu(&vhdr->write_count, 1);
+ vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
+ vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
+}
+
static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct hfsplus_vh *vhdr;
@@ -508,7 +552,7 @@ static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
goto out_put_alloc_file;
}
- sb->s_d_op = &hfsplus_dentry_operations;
+ set_default_d_op(sb, &hfsplus_dentry_operations);
sb->s_root = d_make_root(root);
if (!sb->s_root) {
err = -ENOMEM;
@@ -526,7 +570,7 @@ static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
- err = -EINVAL;
+ err = -EIO;
goto out_put_root;
}
inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
@@ -543,11 +587,7 @@ static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
* all three are registered with Apple for our use
*/
- vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
- vhdr->modify_date = hfsp_now2mt();
- be32_add_cpu(&vhdr->write_count, 1);
- vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
- vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
+ hfsplus_prepare_volume_header_for_commit(vhdr);
hfsplus_sync_fs(sb, 1);
if (!sbi->hidden_dir) {
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index 73342c925a4b..d3a142f4518b 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -11,6 +11,9 @@
#include <linux/types.h>
#include <linux/nls.h>
+
+#include <kunit/visibility.h>
+
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
@@ -40,6 +43,18 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
p1 = s1->unicode;
p2 = s2->unicode;
+ if (len1 > HFSPLUS_MAX_STRLEN) {
+ len1 = HFSPLUS_MAX_STRLEN;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(s1->length), len1);
+ }
+
+ if (len2 > HFSPLUS_MAX_STRLEN) {
+ len2 = HFSPLUS_MAX_STRLEN;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(s2->length), len2);
+ }
+
while (1) {
c1 = c2 = 0;
@@ -60,6 +75,7 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
return 0;
}
}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_strcasecmp);
/* Compare names as a sequence of 16-bit unsigned integers */
int hfsplus_strcmp(const struct hfsplus_unistr *s1,
@@ -74,6 +90,18 @@ int hfsplus_strcmp(const struct hfsplus_unistr *s1,
p1 = s1->unicode;
p2 = s2->unicode;
+ if (len1 > HFSPLUS_MAX_STRLEN) {
+ len1 = HFSPLUS_MAX_STRLEN;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(s1->length), len1);
+ }
+
+ if (len2 > HFSPLUS_MAX_STRLEN) {
+ len2 = HFSPLUS_MAX_STRLEN;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(s2->length), len2);
+ }
+
for (len = min(len1, len2); len > 0; len--) {
c1 = be16_to_cpu(*p1);
c2 = be16_to_cpu(*p2);
@@ -86,7 +114,7 @@ int hfsplus_strcmp(const struct hfsplus_unistr *s1,
return len1 < len2 ? -1 :
len1 > len2 ? 1 : 0;
}
-
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_strcmp);
#define Hangul_SBase 0xac00
#define Hangul_LBase 0x1100
@@ -119,9 +147,9 @@ static u16 *hfsplus_compose_lookup(u16 *p, u16 cc)
return NULL;
}
-int hfsplus_uni2asc(struct super_block *sb,
- const struct hfsplus_unistr *ustr,
- char *astr, int *len_p)
+static int hfsplus_uni2asc(struct super_block *sb,
+ const struct hfsplus_unistr *ustr,
+ int max_len, char *astr, int *len_p)
{
const hfsplus_unichr *ip;
struct nls_table *nls = HFSPLUS_SB(sb)->nls;
@@ -132,7 +160,14 @@ int hfsplus_uni2asc(struct super_block *sb,
op = astr;
ip = ustr->unicode;
+
ustrlen = be16_to_cpu(ustr->length);
+ if (ustrlen > max_len) {
+ ustrlen = max_len;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(ustr->length), ustrlen);
+ }
+
len = *len_p;
ce1 = NULL;
compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
@@ -249,6 +284,23 @@ out:
return res;
}
+inline int hfsplus_uni2asc_str(struct super_block *sb,
+ const struct hfsplus_unistr *ustr, char *astr,
+ int *len_p)
+{
+ return hfsplus_uni2asc(sb, ustr, HFSPLUS_MAX_STRLEN, astr, len_p);
+}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_uni2asc_str);
+
+inline int hfsplus_uni2asc_xattr_str(struct super_block *sb,
+ const struct hfsplus_attr_unistr *ustr,
+ char *astr, int *len_p)
+{
+ return hfsplus_uni2asc(sb, (const struct hfsplus_unistr *)ustr,
+ HFSPLUS_ATTR_MAX_STRLEN, astr, len_p);
+}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_uni2asc_xattr_str);
+
/*
* Convert one or more ASCII characters into a single unicode character.
* Returns the number of ASCII characters corresponding to the unicode char.
@@ -375,6 +427,7 @@ int hfsplus_asc2uni(struct super_block *sb,
return -ENAMETOOLONG;
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_asc2uni);
/*
* Hash a string to an integer as appropriate for the HFS+ filesystem.
@@ -427,6 +480,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_hash_dentry);
/*
* Compare strings with HFS+ filename ordering.
@@ -518,3 +572,4 @@ int hfsplus_compare_dentry(const struct dentry *dentry,
return 1;
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_compare_dentry);
diff --git a/fs/hfsplus/unicode_test.c b/fs/hfsplus/unicode_test.c
new file mode 100644
index 000000000000..5a7a6859efe3
--- /dev/null
+++ b/fs/hfsplus/unicode_test.c
@@ -0,0 +1,1579 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for HFS+ Unicode string operations
+ *
+ * Copyright (C) 2025 Viacheslav Dubeyko <slava@dubeyko.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/nls.h>
+#include <linux/dcache.h>
+#include <linux/stringhash.h>
+#include "hfsplus_fs.h"
+
+struct test_mock_string_env {
+ struct hfsplus_unistr str1;
+ struct hfsplus_unistr str2;
+ char *buf;
+ u32 buf_size;
+};
+
+static struct test_mock_string_env *setup_mock_str_env(u32 buf_size)
+{
+ struct test_mock_string_env *env;
+
+ env = kzalloc(sizeof(struct test_mock_string_env), GFP_KERNEL);
+ if (!env)
+ return NULL;
+
+ env->buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!env->buf) {
+ kfree(env);
+ return NULL;
+ }
+
+ env->buf_size = buf_size;
+
+ return env;
+}
+
+static void free_mock_str_env(struct test_mock_string_env *env)
+{
+ if (env->buf)
+ kfree(env->buf);
+ kfree(env);
+}
+
+/* Helper function to create hfsplus_unistr */
+static void create_unistr(struct hfsplus_unistr *ustr, const char *ascii_str)
+{
+ int len = strlen(ascii_str);
+ int i;
+
+ memset(ustr->unicode, 0, sizeof(ustr->unicode));
+
+ ustr->length = cpu_to_be16(len);
+ for (i = 0; i < len && i < HFSPLUS_MAX_STRLEN; i++)
+ ustr->unicode[i] = cpu_to_be16((u16)ascii_str[i]);
+}
+
+static void corrupt_unistr(struct hfsplus_unistr *ustr)
+{
+ ustr->length = cpu_to_be16(U16_MAX);
+}
+
+/* Test hfsplus_strcasecmp function */
+static void hfsplus_strcasecmp_test(struct kunit *test)
+{
+ struct test_mock_string_env *mock_env;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ /* Test identical strings */
+ create_unistr(&mock_env->str1, "hello");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test case insensitive comparison */
+ create_unistr(&mock_env->str1, "Hello");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "HELLO");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test different strings */
+ create_unistr(&mock_env->str1, "apple");
+ create_unistr(&mock_env->str2, "banana");
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "zebra");
+ create_unistr(&mock_env->str2, "apple");
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test different lengths */
+ create_unistr(&mock_env->str1, "test");
+ create_unistr(&mock_env->str2, "testing");
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test empty strings */
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test single characters */
+ create_unistr(&mock_env->str1, "A");
+ create_unistr(&mock_env->str2, "a");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "A");
+ create_unistr(&mock_env->str2, "B");
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test maximum length strings */
+ memset(mock_env->buf, 'a', HFSPLUS_MAX_STRLEN);
+ mock_env->buf[HFSPLUS_MAX_STRLEN] = '\0';
+ create_unistr(&mock_env->str1, mock_env->buf);
+ create_unistr(&mock_env->str2, mock_env->buf);
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Change one character in the middle */
+ mock_env->buf[HFSPLUS_MAX_STRLEN / 2] = 'b';
+ create_unistr(&mock_env->str2, mock_env->buf);
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test corrupted strings */
+ create_unistr(&mock_env->str1, "");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "");
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "test");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "testing");
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "test");
+ create_unistr(&mock_env->str2, "testing");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ create_unistr(&mock_env->str2, "test");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ free_mock_str_env(mock_env);
+}
+
+/* Test hfsplus_strcmp function (case-sensitive) */
+static void hfsplus_strcmp_test(struct kunit *test)
+{
+ struct test_mock_string_env *mock_env;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ /* Test identical strings */
+ create_unistr(&mock_env->str1, "hello");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test case sensitive comparison - should NOT be equal */
+ create_unistr(&mock_env->str1, "Hello");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+ /* 'H' < 'h' in Unicode */
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test lexicographic ordering */
+ create_unistr(&mock_env->str1, "apple");
+ create_unistr(&mock_env->str2, "banana");
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "zebra");
+ create_unistr(&mock_env->str2, "apple");
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test different lengths with common prefix */
+ create_unistr(&mock_env->str1, "test");
+ create_unistr(&mock_env->str2, "testing");
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test empty strings */
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test maximum length strings */
+ memset(mock_env->buf, 'a', HFSPLUS_MAX_STRLEN);
+ mock_env->buf[HFSPLUS_MAX_STRLEN] = '\0';
+ create_unistr(&mock_env->str1, mock_env->buf);
+ create_unistr(&mock_env->str2, mock_env->buf);
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Change one character in the middle */
+ mock_env->buf[HFSPLUS_MAX_STRLEN / 2] = 'b';
+ create_unistr(&mock_env->str2, mock_env->buf);
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test corrupted strings */
+ create_unistr(&mock_env->str1, "");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "");
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "test");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "testing");
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "test");
+ create_unistr(&mock_env->str2, "testing");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ create_unistr(&mock_env->str2, "test");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ free_mock_str_env(mock_env);
+}
+
+/* Test Unicode edge cases */
+static void hfsplus_unicode_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_string_env *mock_env;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ /* Test with special characters */
+ mock_env->str1.length = cpu_to_be16(3);
+ mock_env->str1.unicode[0] = cpu_to_be16(0x00E9); /* é */
+ mock_env->str1.unicode[1] = cpu_to_be16(0x00F1); /* ñ */
+ mock_env->str1.unicode[2] = cpu_to_be16(0x00FC); /* ü */
+
+ mock_env->str2.length = cpu_to_be16(3);
+ mock_env->str2.unicode[0] = cpu_to_be16(0x00E9); /* é */
+ mock_env->str2.unicode[1] = cpu_to_be16(0x00F1); /* ñ */
+ mock_env->str2.unicode[2] = cpu_to_be16(0x00FC); /* ü */
+
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test with different special characters */
+ mock_env->str2.unicode[1] = cpu_to_be16(0x00F2); /* ò */
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test null characters within string (should be handled correctly) */
+ mock_env->str1.length = cpu_to_be16(3);
+ mock_env->str1.unicode[0] = cpu_to_be16('a');
+ mock_env->str1.unicode[1] = cpu_to_be16(0x0000); /* null */
+ mock_env->str1.unicode[2] = cpu_to_be16('b');
+
+ mock_env->str2.length = cpu_to_be16(3);
+ mock_env->str2.unicode[0] = cpu_to_be16('a');
+ mock_env->str2.unicode[1] = cpu_to_be16(0x0000); /* null */
+ mock_env->str2.unicode[2] = cpu_to_be16('b');
+
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ free_mock_str_env(mock_env);
+}
+
+/* Test boundary conditions */
+static void hfsplus_unicode_boundary_test(struct kunit *test)
+{
+ struct test_mock_string_env *mock_env;
+ int i;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ /* Test maximum length boundary */
+ mock_env->str1.length = cpu_to_be16(HFSPLUS_MAX_STRLEN);
+ mock_env->str2.length = cpu_to_be16(HFSPLUS_MAX_STRLEN);
+
+ for (i = 0; i < HFSPLUS_MAX_STRLEN; i++) {
+ mock_env->str1.unicode[i] = cpu_to_be16('A');
+ mock_env->str2.unicode[i] = cpu_to_be16('A');
+ }
+
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Change last character */
+ mock_env->str2.unicode[HFSPLUS_MAX_STRLEN - 1] = cpu_to_be16('B');
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test zero length strings */
+ mock_env->str1.length = cpu_to_be16(0);
+ mock_env->str2.length = cpu_to_be16(0);
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test one character vs empty */
+ mock_env->str1.length = cpu_to_be16(1);
+ mock_env->str1.unicode[0] = cpu_to_be16('A');
+ mock_env->str2.length = cpu_to_be16(0);
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ free_mock_str_env(mock_env);
+}
+
+/* Mock superblock and NLS table for testing hfsplus_uni2asc */
+struct test_mock_sb {
+ struct nls_table nls;
+ struct hfsplus_sb_info sb_info;
+ struct super_block sb;
+};
+
+static struct test_mock_sb *setup_mock_sb(void)
+{
+ struct test_mock_sb *ptr;
+
+ ptr = kzalloc(sizeof(struct test_mock_sb), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ptr->nls.charset = "utf8";
+ ptr->nls.uni2char = NULL; /* Will use default behavior */
+ ptr->sb_info.nls = &ptr->nls;
+ ptr->sb.s_fs_info = &ptr->sb_info;
+
+ /* Set default flags - no decomposition, no case folding */
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &ptr->sb_info.flags);
+ clear_bit(HFSPLUS_SB_CASEFOLD, &ptr->sb_info.flags);
+
+ return ptr;
+}
+
+static void free_mock_sb(struct test_mock_sb *ptr)
+{
+ kfree(ptr);
+}
+
+/* Simple uni2char implementation for testing */
+static int test_uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ if (uni < 0x80) {
+ *out = (unsigned char)uni;
+ return 1;
+ }
+
+ /* For non-ASCII, just use '?' as fallback */
+ *out = '?';
+ return 1;
+}
+
+/* Test hfsplus_uni2asc basic functionality */
+static void hfsplus_uni2asc_basic_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test simple ASCII string conversion */
+ create_unistr(&mock_env->str1, "hello");
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 5, len);
+ KUNIT_EXPECT_STREQ(test, "hello", mock_env->buf);
+
+ /* Test empty string */
+ create_unistr(&mock_env->str1, "");
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 0, len);
+
+ /* Test single character */
+ create_unistr(&mock_env->str1, "A");
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 1, len);
+ KUNIT_EXPECT_EQ(test, 'A', mock_env->buf[0]);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test special character handling */
+static void hfsplus_uni2asc_special_chars_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test null character conversion (should become 0x2400) */
+ mock_env->str1.length = cpu_to_be16(1);
+ mock_env->str1.unicode[0] = cpu_to_be16(0x0000);
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 1, len);
+ /* Our test implementation returns '?' for non-ASCII */
+ KUNIT_EXPECT_EQ(test, '?', mock_env->buf[0]);
+
+ /* Test forward slash conversion (should become colon) */
+ mock_env->str1.length = cpu_to_be16(1);
+ mock_env->str1.unicode[0] = cpu_to_be16('/');
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 1, len);
+ KUNIT_EXPECT_EQ(test, ':', mock_env->buf[0]);
+
+ /* Test string with mixed special characters */
+ mock_env->str1.length = cpu_to_be16(3);
+ mock_env->str1.unicode[0] = cpu_to_be16('a');
+ mock_env->str1.unicode[1] = cpu_to_be16('/');
+ mock_env->str1.unicode[2] = cpu_to_be16('b');
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, len);
+ KUNIT_EXPECT_EQ(test, 'a', mock_env->buf[0]);
+ KUNIT_EXPECT_EQ(test, ':', mock_env->buf[1]);
+ KUNIT_EXPECT_EQ(test, 'b', mock_env->buf[2]);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test buffer length handling */
+static void hfsplus_uni2asc_buffer_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+
+ mock_env = setup_mock_str_env(10);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test insufficient buffer space */
+ create_unistr(&mock_env->str1, "toolongstring");
+ len = 5; /* Buffer too small */
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, 5, len); /* Should be set to consumed length */
+
+ /* Test exact buffer size */
+ create_unistr(&mock_env->str1, "exact");
+ len = 5;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 5, len);
+
+ /* Test zero length buffer */
+ create_unistr(&mock_env->str1, "test");
+ len = 0;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, 0, len);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test corrupted unicode string handling */
+static void hfsplus_uni2asc_corrupted_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test corrupted length (too large) */
+ create_unistr(&mock_env->str1, "test");
+ corrupt_unistr(&mock_env->str1); /* Sets length to U16_MAX */
+ len = mock_env->buf_size;
+
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ /* Should still work but with corrected length */
+ KUNIT_EXPECT_EQ(test, 0, result);
+ /*
+ * Length should be corrected to HFSPLUS_MAX_STRLEN
+ * and processed accordingly
+ */
+ KUNIT_EXPECT_GT(test, len, 0);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test edge cases and boundary conditions */
+static void hfsplus_uni2asc_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+ int i;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN * 2);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test maximum length string */
+ mock_env->str1.length = cpu_to_be16(HFSPLUS_MAX_STRLEN);
+ for (i = 0; i < HFSPLUS_MAX_STRLEN; i++)
+ mock_env->str1.unicode[i] = cpu_to_be16('a');
+
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, HFSPLUS_MAX_STRLEN, len);
+
+ /* Verify all characters are 'a' */
+ for (i = 0; i < HFSPLUS_MAX_STRLEN; i++)
+ KUNIT_EXPECT_EQ(test, 'a', mock_env->buf[i]);
+
+ /* Test string with high Unicode values (non-ASCII) */
+ mock_env->str1.length = cpu_to_be16(3);
+ mock_env->str1.unicode[0] = cpu_to_be16(0x00E9); /* é */
+ mock_env->str1.unicode[1] = cpu_to_be16(0x00F1); /* ñ */
+ mock_env->str1.unicode[2] = cpu_to_be16(0x00FC); /* ü */
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, len);
+ /* Our test implementation converts non-ASCII to '?' */
+ KUNIT_EXPECT_EQ(test, '?', mock_env->buf[0]);
+ KUNIT_EXPECT_EQ(test, '?', mock_env->buf[1]);
+ KUNIT_EXPECT_EQ(test, '?', mock_env->buf[2]);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Simple char2uni implementation for testing */
+static int test_char2uni(const unsigned char *rawstring,
+ int boundlen, wchar_t *uni)
+{
+ if (boundlen <= 0)
+ return -EINVAL;
+
+ *uni = (wchar_t)*rawstring;
+ return 1;
+}
+
+/* Helper function to check unicode string contents */
+static void check_unistr_content(struct kunit *test,
+ struct hfsplus_unistr *ustr,
+ const char *expected_ascii)
+{
+ int expected_len = strlen(expected_ascii);
+ int actual_len = be16_to_cpu(ustr->length);
+ int i;
+
+ KUNIT_EXPECT_EQ(test, expected_len, actual_len);
+
+ for (i = 0; i < expected_len && i < actual_len; i++) {
+ u16 expected_char = (u16)expected_ascii[i];
+ u16 actual_char = be16_to_cpu(ustr->unicode[i]);
+
+ KUNIT_EXPECT_EQ(test, expected_char, actual_char);
+ }
+}
+
+/* Test hfsplus_asc2uni basic functionality */
+static void hfsplus_asc2uni_basic_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test simple ASCII string conversion */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "hello", 5);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str1, "hello");
+
+ /* Test empty string */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "", 0);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 0, be16_to_cpu(mock_env->str1.length));
+
+ /* Test single character */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "A", 1);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str1, "A");
+
+ /* Test null-terminated string with explicit length */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "test\0extra", 4);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str1, "test");
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test special character handling in asc2uni */
+static void hfsplus_asc2uni_special_chars_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test colon conversion (should become forward slash) */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, ":", 1);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 1, be16_to_cpu(mock_env->str1.length));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[0]));
+
+ /* Test string with mixed special characters */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "a:b", 3);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, be16_to_cpu(mock_env->str1.length));
+ KUNIT_EXPECT_EQ(test, 'a', be16_to_cpu(mock_env->str1.unicode[0]));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[1]));
+ KUNIT_EXPECT_EQ(test, 'b', be16_to_cpu(mock_env->str1.unicode[2]));
+
+ /* Test multiple special characters */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, ":::", 3);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, be16_to_cpu(mock_env->str1.length));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[0]));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[1]));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[2]));
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test buffer length limits */
+static void hfsplus_asc2uni_buffer_limits_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 10);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test exact maximum length */
+ memset(mock_env->buf, 'a', HFSPLUS_MAX_STRLEN);
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &mock_env->str1, HFSPLUS_MAX_STRLEN,
+ mock_env->buf, HFSPLUS_MAX_STRLEN);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, HFSPLUS_MAX_STRLEN,
+ be16_to_cpu(mock_env->str1.length));
+
+ /* Test exceeding maximum length */
+ memset(mock_env->buf, 'a', HFSPLUS_MAX_STRLEN + 5);
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &mock_env->str1, HFSPLUS_MAX_STRLEN,
+ mock_env->buf, HFSPLUS_MAX_STRLEN + 5);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, HFSPLUS_MAX_STRLEN,
+ be16_to_cpu(mock_env->str1.length));
+
+ /* Test with smaller max_unistr_len */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &mock_env->str1, 5, "toolongstring", 13);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, 5, be16_to_cpu(mock_env->str1.length));
+
+ /* Test zero max length */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1, 0, "test", 4);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, 0, be16_to_cpu(mock_env->str1.length));
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test error handling and edge cases */
+static void hfsplus_asc2uni_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct hfsplus_unistr ustr;
+ char test_str[] = {'a', '\0', 'b'};
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test zero length input */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &ustr, HFSPLUS_MAX_STRLEN, "test", 0);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 0, be16_to_cpu(ustr.length));
+
+ /* Test input with length mismatch */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &ustr, HFSPLUS_MAX_STRLEN, "hello", 3);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &ustr, "hel");
+
+ /* Test with various printable ASCII characters */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &ustr, HFSPLUS_MAX_STRLEN, "ABC123!@#", 9);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &ustr, "ABC123!@#");
+
+ /* Test null character in the middle */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &ustr, HFSPLUS_MAX_STRLEN, test_str, 3);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, be16_to_cpu(ustr.length));
+ KUNIT_EXPECT_EQ(test, 'a', be16_to_cpu(ustr.unicode[0]));
+ KUNIT_EXPECT_EQ(test, 0, be16_to_cpu(ustr.unicode[1]));
+ KUNIT_EXPECT_EQ(test, 'b', be16_to_cpu(ustr.unicode[2]));
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test decomposition flag behavior */
+static void hfsplus_asc2uni_decompose_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with decomposition disabled (default) */
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "test", 4);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str1, "test");
+
+ /* Test with decomposition enabled */
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str2,
+ HFSPLUS_MAX_STRLEN, "test", 4);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str2, "test");
+
+ /* For simple ASCII, both should produce the same result */
+ KUNIT_EXPECT_EQ(test,
+ be16_to_cpu(mock_env->str1.length),
+ be16_to_cpu(mock_env->str2.length));
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Mock dentry for testing hfsplus_hash_dentry */
+static struct dentry test_dentry;
+
+static void setup_mock_dentry(struct super_block *sb)
+{
+ memset(&test_dentry, 0, sizeof(test_dentry));
+ test_dentry.d_sb = sb;
+}
+
+/* Helper function to create qstr */
+static void create_qstr(struct qstr *str, const char *name)
+{
+ str->name = name;
+ str->len = strlen(name);
+ str->hash = 0; /* Will be set by hash function */
+}
+
+/* Test hfsplus_hash_dentry basic functionality */
+static void hfsplus_hash_dentry_basic_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test basic string hashing */
+ create_qstr(&str1, "hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str1.hash);
+
+ /* Test that identical strings produce identical hashes */
+ create_qstr(&str2, "hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ /* Test empty string */
+ create_qstr(&str1, "");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+
+ /* Empty string should still produce a hash */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test single character */
+ create_qstr(&str1, "A");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str1.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test case folding behavior in hash */
+static void hfsplus_hash_dentry_casefold_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with case folding disabled (default) */
+ clear_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+
+ create_qstr(&str1, "Hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /*
+ * Without case folding, different cases
+ * should produce different hashes
+ */
+ KUNIT_EXPECT_NE(test, str1.hash, str2.hash);
+
+ /* Test with case folding enabled */
+ set_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+
+ create_qstr(&str1, "Hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* With case folding, different cases should produce same hash */
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ /* Test mixed case */
+ create_qstr(&str1, "HeLLo");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test special character handling in hash */
+static void hfsplus_hash_dentry_special_chars_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test colon conversion (: becomes /) */
+ create_qstr(&str1, "file:name");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "file/name");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* After conversion, these should produce the same hash */
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ /* Test multiple special characters */
+ create_qstr(&str1, ":::");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "///");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test decomposition flag behavior in hash */
+static void hfsplus_hash_dentry_decompose_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with decomposition disabled (default) */
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&str1, "test");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test with decomposition enabled */
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&str2, "test");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /*
+ * For simple ASCII, decomposition shouldn't change
+ * the hash much but the function should still work correctly
+ */
+ KUNIT_EXPECT_NE(test, 0, str2.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test hash consistency and distribution */
+static void hfsplus_hash_dentry_consistency_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2, str3;
+ unsigned long hash1;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test that same string always produces same hash */
+ create_qstr(&str1, "consistent");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+ hash1 = str1.hash;
+
+ create_qstr(&str2, "consistent");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ KUNIT_EXPECT_EQ(test, hash1, str2.hash);
+
+ /* Test that different strings produce different hashes */
+ create_qstr(&str3, "different");
+ result = hfsplus_hash_dentry(&test_dentry, &str3);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ KUNIT_EXPECT_NE(test, str1.hash, str3.hash);
+
+ /* Test similar strings should have different hashes */
+ create_qstr(&str1, "file1");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "file2");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ KUNIT_EXPECT_NE(test, str1.hash, str2.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test edge cases and boundary conditions */
+static void hfsplus_hash_dentry_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ struct qstr str;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test very long filename */
+ memset(mock_env->buf, 'a', mock_env->buf_size - 1);
+ mock_env->buf[mock_env->buf_size - 1] = '\0';
+
+ create_qstr(&str, mock_env->buf);
+ result = hfsplus_hash_dentry(&test_dentry, &str);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str.hash);
+
+ /* Test filename with all printable ASCII characters */
+ create_qstr(&str, "!@#$%^&*()_+-=[]{}|;':\",./<>?");
+ result = hfsplus_hash_dentry(&test_dentry, &str);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str.hash);
+
+ /* Test with embedded null (though not typical for filenames) */
+ str.name = "file\0hidden";
+ str.len = 11; /* Include the null and text after it */
+ str.hash = 0;
+ result = hfsplus_hash_dentry(&test_dentry, &str);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str.hash);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test hfsplus_compare_dentry basic functionality */
+static void hfsplus_compare_dentry_basic_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test identical strings */
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "hello", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test different strings - lexicographic order */
+ create_qstr(&name, "world");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "hello", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* "hello" < "world" */
+
+ result = hfsplus_compare_dentry(&test_dentry, 5, "world", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "world", &name);
+ KUNIT_EXPECT_GT(test, result, 0); /* "world" > "hello" */
+
+ /* Test empty strings */
+ create_qstr(&name, "");
+ result = hfsplus_compare_dentry(&test_dentry, 0, "", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test one empty, one non-empty */
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 0, "", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* "" < "test" */
+
+ create_qstr(&name, "");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_GT(test, result, 0); /* "test" > "" */
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test case folding behavior in comparison */
+static void hfsplus_compare_dentry_casefold_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with case folding disabled (default) */
+ clear_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "Hello", &name);
+ /* Case sensitive: "Hello" != "hello" */
+ KUNIT_EXPECT_NE(test, 0, result);
+
+ create_qstr(&name, "Hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "hello", &name);
+ /* Case sensitive: "hello" != "Hello" */
+ KUNIT_EXPECT_NE(test, 0, result);
+
+ /* Test with case folding enabled */
+ set_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "Hello", &name);
+ /* Case insensitive: "Hello" == "hello" */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&name, "Hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "hello", &name);
+ /* Case insensitive: "hello" == "Hello" */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test mixed case */
+ create_qstr(&name, "TeSt");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "TEST", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test special character handling in comparison */
+static void hfsplus_compare_dentry_special_chars_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test colon conversion (: becomes /) */
+ create_qstr(&name, "file/name");
+ result = hfsplus_compare_dentry(&test_dentry, 9, "file:name", &name);
+ /* "file:name" == "file/name" after conversion */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&name, "file:name");
+ result = hfsplus_compare_dentry(&test_dentry, 9, "file/name", &name);
+ /* "file/name" == "file:name" after conversion */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test multiple special characters */
+ create_qstr(&name, "///");
+ result = hfsplus_compare_dentry(&test_dentry, 3, ":::", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test mixed special and regular characters */
+ create_qstr(&name, "a/b:c");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "a:b/c", &name);
+ /* Both become "a/b/c" after conversion */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test length differences */
+static void hfsplus_compare_dentry_length_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test different lengths with common prefix */
+ create_qstr(&name, "testing");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* "test" < "testing" */
+
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 7, "testing", &name);
+ KUNIT_EXPECT_GT(test, result, 0); /* "testing" > "test" */
+
+ /* Test exact length match */
+ create_qstr(&name, "exact");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "exact", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test length parameter vs actual string content */
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 3, "hel", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* "hel" < "hello" */
+
+ /* Test longer first string but shorter length parameter */
+ create_qstr(&name, "hi");
+ result = hfsplus_compare_dentry(&test_dentry, 2, "hello", &name);
+ /* "he" < "hi" (only first 2 chars compared) */
+ KUNIT_EXPECT_LT(test, result, 0);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test decomposition flag behavior */
+static void hfsplus_compare_dentry_decompose_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with decomposition disabled (default) */
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test with decomposition enabled */
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* For simple ASCII, decomposition shouldn't affect the result */
+ create_qstr(&name, "different");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_NE(test, 0, result);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test edge cases and boundary conditions */
+static void hfsplus_compare_dentry_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ char *long_str;
+ char *long_str2;
+ u32 str_size = HFSPLUS_MAX_STRLEN + 1;
+ struct qstr null_name = {
+ .name = "a\0b",
+ .len = 3,
+ .hash = 0
+ };
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ long_str = kzalloc(str_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, long_str);
+
+ long_str2 = kzalloc(str_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, long_str2);
+
+ /* Test very long strings */
+ memset(long_str, 'a', str_size - 1);
+ long_str[str_size - 1] = '\0';
+
+ create_qstr(&name, long_str);
+ result = hfsplus_compare_dentry(&test_dentry, str_size - 1,
+ long_str, &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test with difference at the end of long strings */
+ memset(long_str2, 'a', str_size - 1);
+ long_str2[str_size - 1] = '\0';
+ long_str2[str_size - 2] = 'b';
+ create_qstr(&name, long_str2);
+ result = hfsplus_compare_dentry(&test_dentry, str_size - 1,
+ long_str, &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* 'a' < 'b' */
+
+ /* Test single character differences */
+ create_qstr(&name, "b");
+ result = hfsplus_compare_dentry(&test_dentry, 1, "a", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* 'a' < 'b' */
+
+ create_qstr(&name, "a");
+ result = hfsplus_compare_dentry(&test_dentry, 1, "b", &name);
+ KUNIT_EXPECT_GT(test, result, 0); /* 'b' > 'a' */
+
+ /* Test with null characters in the middle */
+ result = hfsplus_compare_dentry(&test_dentry, 3, "a\0b", &null_name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test all printable ASCII characters */
+ create_qstr(&name, "!@#$%^&*()");
+ result = hfsplus_compare_dentry(&test_dentry, 10, "!@#$%^&*()", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ kfree(long_str);
+ kfree(long_str2);
+ free_mock_sb(mock_sb);
+}
+
+/* Test combined flag behaviors */
+static void hfsplus_compare_dentry_combined_flags_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with both casefold and decompose enabled */
+ set_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "HELLO", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test special chars with case folding */
+ create_qstr(&name, "File/Name");
+ result = hfsplus_compare_dentry(&test_dentry, 9, "file:name", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test with both flags disabled */
+ clear_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "HELLO", &name);
+ KUNIT_EXPECT_NE(test, 0, result); /* Case sensitive */
+
+ /* But special chars should still be converted */
+ create_qstr(&name, "file/name");
+ result = hfsplus_compare_dentry(&test_dentry, 9, "file:name", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ free_mock_sb(mock_sb);
+}
+
+static struct kunit_case hfsplus_unicode_test_cases[] = {
+ KUNIT_CASE(hfsplus_strcasecmp_test),
+ KUNIT_CASE(hfsplus_strcmp_test),
+ KUNIT_CASE(hfsplus_unicode_edge_cases_test),
+ KUNIT_CASE(hfsplus_unicode_boundary_test),
+ KUNIT_CASE(hfsplus_uni2asc_basic_test),
+ KUNIT_CASE(hfsplus_uni2asc_special_chars_test),
+ KUNIT_CASE(hfsplus_uni2asc_buffer_test),
+ KUNIT_CASE(hfsplus_uni2asc_corrupted_test),
+ KUNIT_CASE(hfsplus_uni2asc_edge_cases_test),
+ KUNIT_CASE(hfsplus_asc2uni_basic_test),
+ KUNIT_CASE(hfsplus_asc2uni_special_chars_test),
+ KUNIT_CASE(hfsplus_asc2uni_buffer_limits_test),
+ KUNIT_CASE(hfsplus_asc2uni_edge_cases_test),
+ KUNIT_CASE(hfsplus_asc2uni_decompose_test),
+ KUNIT_CASE(hfsplus_hash_dentry_basic_test),
+ KUNIT_CASE(hfsplus_hash_dentry_casefold_test),
+ KUNIT_CASE(hfsplus_hash_dentry_special_chars_test),
+ KUNIT_CASE(hfsplus_hash_dentry_decompose_test),
+ KUNIT_CASE(hfsplus_hash_dentry_consistency_test),
+ KUNIT_CASE(hfsplus_hash_dentry_edge_cases_test),
+ KUNIT_CASE(hfsplus_compare_dentry_basic_test),
+ KUNIT_CASE(hfsplus_compare_dentry_casefold_test),
+ KUNIT_CASE(hfsplus_compare_dentry_special_chars_test),
+ KUNIT_CASE(hfsplus_compare_dentry_length_test),
+ KUNIT_CASE(hfsplus_compare_dentry_decompose_test),
+ KUNIT_CASE(hfsplus_compare_dentry_edge_cases_test),
+ KUNIT_CASE(hfsplus_compare_dentry_combined_flags_test),
+ {}
+};
+
+static struct kunit_suite hfsplus_unicode_test_suite = {
+ .name = "hfsplus_unicode",
+ .test_cases = hfsplus_unicode_test_cases,
+};
+
+kunit_test_suite(hfsplus_unicode_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for HFS+ Unicode string operations");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 74801911bc1c..30cf4fe78b3d 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -48,47 +48,19 @@ struct hfsplus_wd {
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
void *buf, void **data, blk_opf_t opf)
{
- const enum req_op op = opf & REQ_OP_MASK;
- struct bio *bio;
- int ret = 0;
- u64 io_size;
- loff_t start;
- int offset;
+ u64 io_size = hfsplus_min_io_size(sb);
+ loff_t start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
+ int offset = start & (io_size - 1);
+
+ if ((opf & REQ_OP_MASK) != REQ_OP_WRITE && data)
+ *data = (u8 *)buf + offset;
/*
- * Align sector to hardware sector size and find offset. We
- * assume that io_size is a power of two, which _should_
- * be true.
+ * Align sector to hardware sector size and find offset. We assume that
+ * io_size is a power of two, which _should_ be true.
*/
- io_size = hfsplus_min_io_size(sb);
- start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
- offset = start & (io_size - 1);
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
-
- bio = bio_alloc(sb->s_bdev, 1, opf, GFP_NOIO);
- bio->bi_iter.bi_sector = sector;
-
- if (op != REQ_OP_WRITE && data)
- *data = (u8 *)buf + offset;
-
- while (io_size > 0) {
- unsigned int page_offset = offset_in_page(buf);
- unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
- io_size);
-
- ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
- if (ret != len) {
- ret = -EIO;
- goto out;
- }
- io_size -= len;
- buf = (u8 *)buf + len;
- }
-
- ret = submit_bio_wait(bio);
-out:
- bio_put(bio);
- return ret < 0 ? ret : 0;
+ return bdev_rw_virt(sb->s_bdev, sector, buf, io_size, opf);
}
static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 9a1a93e3888b..da95a9de9a65 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -64,7 +64,7 @@ static void hfsplus_init_header_node(struct inode *attr_file,
u32 used_bmp_bytes;
u64 tmp;
- hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %u\n",
+ hfs_dbg("clump %u, node_size %u\n",
clump_size, node_size);
/* The end of the node contains list of record offsets */
@@ -132,7 +132,7 @@ static int hfsplus_create_attributes_file(struct super_block *sb)
struct page *page;
int old_state = HFSPLUS_EMPTY_ATTR_TREE;
- hfs_dbg(ATTR_MOD, "create_attr_file: ino %d\n", HFSPLUS_ATTR_CNID);
+ hfs_dbg("ino %d\n", HFSPLUS_ATTR_CNID);
check_attr_tree_state_again:
switch (atomic_read(&sbi->attr_tree_state)) {
@@ -172,7 +172,11 @@ check_attr_tree_state_again:
return PTR_ERR(attr_file);
}
- BUG_ON(i_size_read(attr_file) != 0);
+ if (i_size_read(attr_file) != 0) {
+ err = -EIO;
+ pr_err("detected inconsistent attributes file, running fsck.hfsplus is recommended.\n");
+ goto end_attr_file_creation;
+ }
hip = HFSPLUS_I(attr_file);
@@ -261,10 +265,8 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
struct hfs_find_data cat_fd;
hfsplus_cat_entry entry;
u16 cat_entry_flags, cat_entry_type;
- u16 folder_finderinfo_len = sizeof(struct DInfo) +
- sizeof(struct DXInfo);
- u16 file_finderinfo_len = sizeof(struct FInfo) +
- sizeof(struct FXInfo);
+ u16 folder_finderinfo_len = sizeof(DInfo) + sizeof(DXInfo);
+ u16 file_finderinfo_len = sizeof(FInfo) + sizeof(FXInfo);
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
@@ -440,11 +442,11 @@ static ssize_t hfsplus_getxattr_finder_info(struct inode *inode,
ssize_t res = 0;
struct hfs_find_data fd;
u16 entry_type;
- u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
- u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo);
+ u16 folder_rec_len = sizeof(DInfo) + sizeof(DXInfo);
+ u16 file_rec_len = sizeof(FInfo) + sizeof(FXInfo);
u16 record_len = max(folder_rec_len, file_rec_len);
- u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
- u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
+ u8 folder_finder_info[sizeof(DInfo) + sizeof(DXInfo)];
+ u8 file_finder_info[sizeof(FInfo) + sizeof(FXInfo)];
if (size >= record_len) {
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
@@ -608,8 +610,8 @@ static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
u16 entry_type;
- u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
- u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
+ u8 folder_finder_info[sizeof(DInfo) + sizeof(DXInfo)];
+ u8 file_finder_info[sizeof(FInfo) + sizeof(FXInfo)];
unsigned long len, found_bit;
int xattr_name_len, symbols_count;
@@ -625,14 +627,14 @@ static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
if (entry_type == HFSPLUS_FOLDER) {
- len = sizeof(struct DInfo) + sizeof(struct DXInfo);
+ len = sizeof(DInfo) + sizeof(DXInfo);
hfs_bnode_read(fd.bnode, folder_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_folder, user_info),
len);
found_bit = find_first_bit((void *)folder_finder_info, len*8);
} else if (entry_type == HFSPLUS_FILE) {
- len = sizeof(struct FInfo) + sizeof(struct FXInfo);
+ len = sizeof(FInfo) + sizeof(FXInfo);
hfs_bnode_read(fd.bnode, file_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_file, user_info),
@@ -731,9 +733,9 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
goto end_listxattr;
xattr_name_len = NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN;
- if (hfsplus_uni2asc(inode->i_sb,
- (const struct hfsplus_unistr *)&fd.key->attr.key_name,
- strbuf, &xattr_name_len)) {
+ if (hfsplus_uni2asc_xattr_str(inode->i_sb,
+ &fd.key->attr.key_name, strbuf,
+ &xattr_name_len)) {
pr_err("unicode conversion failed\n");
res = -EIO;
goto end_listxattr;
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 8b39c15c408c..aa02599b770f 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -3,40 +3,8 @@
#define __UM_FS_HOSTFS
#include <os.h>
+#include <generated/asm-offsets.h>
-/*
- * These are exactly the same definitions as in fs.h, but the names are
- * changed so that this file can be included in both kernel and user files.
- */
-
-#define HOSTFS_ATTR_MODE 1
-#define HOSTFS_ATTR_UID 2
-#define HOSTFS_ATTR_GID 4
-#define HOSTFS_ATTR_SIZE 8
-#define HOSTFS_ATTR_ATIME 16
-#define HOSTFS_ATTR_MTIME 32
-#define HOSTFS_ATTR_CTIME 64
-#define HOSTFS_ATTR_ATIME_SET 128
-#define HOSTFS_ATTR_MTIME_SET 256
-
-/* This one is unused by hostfs. */
-#define HOSTFS_ATTR_FORCE 512 /* Not a change, but a change it */
-#define HOSTFS_ATTR_ATTR_FLAG 1024
-
-/*
- * If you are very careful, you'll notice that these two are missing:
- *
- * #define ATTR_KILL_SUID 2048
- * #define ATTR_KILL_SGID 4096
- *
- * and this is because they were added in 2.5 development.
- * Actually, they are not needed by most ->setattr() methods - they are set by
- * callers of notify_change() to notify that the setuid/setgid bits must be
- * dropped.
- * notify_change() will delete those flags, make sure attr->ia_valid & ATTR_MODE
- * is on, and remove the appropriate bits from attr->ia_mode (attr is a
- * "struct iattr *"). -BlaisorBlade
- */
struct hostfs_timespec {
long long tv_sec;
long long tv_nsec;
@@ -60,7 +28,7 @@ struct hostfs_stat {
unsigned int uid;
unsigned int gid;
unsigned long long size;
- struct hostfs_timespec atime, mtime, ctime;
+ struct hostfs_timespec atime, mtime, ctime, btime;
unsigned int blksize;
unsigned long long blocks;
struct {
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 7e51d2cec64b..51d26aa2b93e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -33,6 +33,7 @@ struct hostfs_inode_info {
struct inode vfs_inode;
struct mutex open_mutex;
dev_t dev;
+ struct hostfs_timespec btime;
};
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
@@ -95,32 +96,17 @@ __uml_setup("hostfs=", hostfs_args,
static char *__dentry_name(struct dentry *dentry, char *name)
{
char *p = dentry_path_raw(dentry, name, PATH_MAX);
- char *root;
- size_t len;
- struct hostfs_fs_info *fsi;
-
- fsi = dentry->d_sb->s_fs_info;
- root = fsi->host_root_path;
- len = strlen(root);
- if (IS_ERR(p)) {
- __putname(name);
- return NULL;
- }
-
- /*
- * This function relies on the fact that dentry_path_raw() will place
- * the path name at the end of the provided buffer.
- */
- BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
+ struct hostfs_fs_info *fsi = dentry->d_sb->s_fs_info;
+ char *root = fsi->host_root_path;
+ size_t len = strlen(root);
- strscpy(name, root, PATH_MAX);
- if (len > p - name) {
+ if (IS_ERR(p) || len > p - name) {
__putname(name);
return NULL;
}
- if (p > name + len)
- strcpy(name + len, p);
+ memcpy(name, root, len);
+ memmove(name + len, p, name + PATH_MAX - p);
return name;
}
@@ -275,7 +261,7 @@ static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
static const struct super_operations hostfs_sbops = {
.alloc_inode = hostfs_alloc_inode,
.free_inode = hostfs_free_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
.evict_inode = hostfs_evict_inode,
.statfs = hostfs_statfs,
.show_options = hostfs_show_options,
@@ -396,7 +382,7 @@ static const struct file_operations hostfs_file_fops = {
.splice_write = iter_file_splice_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.open = hostfs_open,
.release = hostfs_file_release,
.fsync = hostfs_fsync,
@@ -410,38 +396,33 @@ static const struct file_operations hostfs_dir_fops = {
.fsync = hostfs_fsync,
};
-static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
+static int hostfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
- char *buffer;
- loff_t base = page_offset(page);
- int count = PAGE_SIZE;
- int end_index = inode->i_size >> PAGE_SHIFT;
- int err;
-
- if (page->index >= end_index)
- count = inode->i_size & (PAGE_SIZE-1);
-
- buffer = kmap_local_page(page);
-
- err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
- if (err != count) {
- if (err >= 0)
- err = -EIO;
- mapping_set_error(mapping, err);
- goto out;
+ struct folio *folio = NULL;
+ loff_t i_size = i_size_read(inode);
+ int err = 0;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
+ loff_t pos = folio_pos(folio);
+ size_t count = folio_size(folio);
+ char *buffer;
+ int ret;
+
+ if (count > i_size - pos)
+ count = i_size - pos;
+
+ buffer = kmap_local_folio(folio, 0);
+ ret = write_file(HOSTFS_I(inode)->fd, &pos, buffer, count);
+ kunmap_local(buffer);
+ folio_unlock(folio);
+ if (ret != count) {
+ err = ret < 0 ? ret : -EIO;
+ mapping_set_error(mapping, err);
+ }
}
- if (base > inode->i_size)
- inode->i_size = base;
-
- err = 0;
-
- out:
- kunmap_local(buffer);
- unlock_page(page);
-
return err;
}
@@ -464,7 +445,8 @@ static int hostfs_read_folio(struct file *file, struct folio *folio)
return ret;
}
-static int hostfs_write_begin(struct file *file, struct address_space *mapping,
+static int hostfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
{
@@ -477,7 +459,8 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping,
return 0;
}
-static int hostfs_write_end(struct file *file, struct address_space *mapping,
+static int hostfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
{
@@ -487,7 +470,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
int err;
buffer = kmap_local_folio(folio, from);
- err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer, copied);
+ err = write_file(FILE_HOSTFS_I(iocb->ki_filp)->fd, &pos, buffer, copied);
kunmap_local(buffer);
if (!folio_test_uptodate(folio) && err == folio_size(folio))
@@ -506,11 +489,12 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
}
static const struct address_space_operations hostfs_aops = {
- .writepage = hostfs_writepage,
+ .writepages = hostfs_writepages,
.read_folio = hostfs_read_folio,
.dirty_folio = filemap_dirty_folio,
.write_begin = hostfs_write_begin,
.write_end = hostfs_write_end,
+ .migrate_folio = filemap_migrate_folio,
};
static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
@@ -566,6 +550,7 @@ static int hostfs_inode_set(struct inode *ino, void *data)
}
HOSTFS_I(ino)->dev = dev;
+ HOSTFS_I(ino)->btime = st->btime;
ino->i_ino = st->ino;
ino->i_mode = st->mode;
return hostfs_inode_update(ino, st);
@@ -576,7 +561,10 @@ static int hostfs_inode_test(struct inode *inode, void *data)
const struct hostfs_stat *st = data;
dev_t dev = MKDEV(st->dev.maj, st->dev.min);
- return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev;
+ return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev &&
+ (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) &&
+ HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec &&
+ HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec;
}
static struct inode *hostfs_iget(struct super_block *sb, char *name)
@@ -593,7 +581,7 @@ static struct inode *hostfs_iget(struct super_block *sb, char *name)
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
unlock_new_inode(inode);
} else {
spin_lock(&inode->i_lock);
@@ -698,17 +686,25 @@ static int hostfs_symlink(struct mnt_idmap *idmap, struct inode *ino,
return err;
}
-static int hostfs_mkdir(struct mnt_idmap *idmap, struct inode *ino,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hostfs_mkdir(struct mnt_idmap *idmap, struct inode *ino,
+ struct dentry *dentry, umode_t mode)
{
+ struct inode *inode;
char *file;
int err;
if ((file = dentry_name(dentry)) == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
err = do_mkdir(file, mode);
+ if (err) {
+ dentry = ERR_PTR(err);
+ } else {
+ inode = hostfs_iget(dentry->d_sb, file);
+ d_drop(dentry);
+ dentry = d_splice_alias(inode, dentry);
+ }
__putname(file);
- return err;
+ return dentry;
}
static int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
@@ -939,7 +935,7 @@ static int hostfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_blocksize_bits = 10;
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
- sb->s_d_op = &simple_dentry_operations;
+ sb->s_d_flags = DCACHE_DONTCACHE;
sb->s_maxbytes = MAX_LFS_FILESIZE;
err = super_setup_bdi(sb);
if (err)
@@ -983,7 +979,7 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct hostfs_fs_info *fsi = fc->s_fs_info;
struct fs_parse_result result;
- char *host_root;
+ char *host_root, *tmp_root;
int opt;
opt = fs_parse(fc, hostfs_param_specs, param, &result);
@@ -994,11 +990,13 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_hostfs:
host_root = param->string;
if (!*host_root)
- host_root = "";
- fsi->host_root_path =
- kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
- if (fsi->host_root_path == NULL)
+ break;
+ tmp_root = kasprintf(GFP_KERNEL, "%s%s",
+ fsi->host_root_path, host_root);
+ if (!tmp_root)
return -ENOMEM;
+ kfree(fsi->host_root_path);
+ fsi->host_root_path = tmp_root;
break;
}
@@ -1008,17 +1006,17 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
static int hostfs_parse_monolithic(struct fs_context *fc, void *data)
{
struct hostfs_fs_info *fsi = fc->s_fs_info;
- char *host_root = (char *)data;
+ char *tmp_root, *host_root = (char *)data;
/* NULL is printed as '(null)' by printf(): avoid that. */
if (host_root == NULL)
- host_root = "";
+ return 0;
- fsi->host_root_path =
- kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
- if (fsi->host_root_path == NULL)
+ tmp_root = kasprintf(GFP_KERNEL, "%s%s", fsi->host_root_path, host_root);
+ if (!tmp_root)
return -ENOMEM;
-
+ kfree(fsi->host_root_path);
+ fsi->host_root_path = tmp_root;
return 0;
}
@@ -1053,6 +1051,11 @@ static int hostfs_init_fs_context(struct fs_context *fc)
if (!fsi)
return -ENOMEM;
+ fsi->host_root_path = kasprintf(GFP_KERNEL, "%s/", root_ino);
+ if (!fsi->host_root_path) {
+ kfree(fsi);
+ return -ENOMEM;
+ }
fc->s_fs_info = fsi;
fc->ops = &hostfs_context_ops;
return 0;
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 97e9c40a9448..3bcd9f35e70b 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -18,39 +18,48 @@
#include "hostfs.h"
#include <utime.h>
-static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
+static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p)
{
- p->ino = buf->st_ino;
- p->mode = buf->st_mode;
- p->nlink = buf->st_nlink;
- p->uid = buf->st_uid;
- p->gid = buf->st_gid;
- p->size = buf->st_size;
- p->atime.tv_sec = buf->st_atime;
- p->atime.tv_nsec = 0;
- p->ctime.tv_sec = buf->st_ctime;
- p->ctime.tv_nsec = 0;
- p->mtime.tv_sec = buf->st_mtime;
- p->mtime.tv_nsec = 0;
- p->blksize = buf->st_blksize;
- p->blocks = buf->st_blocks;
- p->rdev.maj = os_major(buf->st_rdev);
- p->rdev.min = os_minor(buf->st_rdev);
- p->dev.maj = os_major(buf->st_dev);
- p->dev.min = os_minor(buf->st_dev);
+ p->ino = buf->stx_ino;
+ p->mode = buf->stx_mode;
+ p->nlink = buf->stx_nlink;
+ p->uid = buf->stx_uid;
+ p->gid = buf->stx_gid;
+ p->size = buf->stx_size;
+ p->atime.tv_sec = buf->stx_atime.tv_sec;
+ p->atime.tv_nsec = buf->stx_atime.tv_nsec;
+ p->ctime.tv_sec = buf->stx_ctime.tv_sec;
+ p->ctime.tv_nsec = buf->stx_ctime.tv_nsec;
+ p->mtime.tv_sec = buf->stx_mtime.tv_sec;
+ p->mtime.tv_nsec = buf->stx_mtime.tv_nsec;
+ if (buf->stx_mask & STATX_BTIME) {
+ p->btime.tv_sec = buf->stx_btime.tv_sec;
+ p->btime.tv_nsec = buf->stx_btime.tv_nsec;
+ } else {
+ memset(&p->btime, 0, sizeof(p->btime));
+ }
+ p->blksize = buf->stx_blksize;
+ p->blocks = buf->stx_blocks;
+ p->rdev.maj = buf->stx_rdev_major;
+ p->rdev.min = buf->stx_rdev_minor;
+ p->dev.maj = buf->stx_dev_major;
+ p->dev.min = buf->stx_dev_minor;
}
int stat_file(const char *path, struct hostfs_stat *p, int fd)
{
- struct stat64 buf;
+ struct statx buf;
+ int flags = AT_SYMLINK_NOFOLLOW;
if (fd >= 0) {
- if (fstat64(fd, &buf) < 0)
- return -errno;
- } else if (lstat64(path, &buf) < 0) {
- return -errno;
+ flags |= AT_EMPTY_PATH;
+ path = "";
}
- stat64_to_hostfs(&buf, p);
+
+ if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0)
+ return -errno;
+
+ statx_to_hostfs(&buf, p);
return 0;
}
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index c14c9a035ee0..a4f5321eafae 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -27,7 +27,7 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
a = le32_to_cpu(btree->u.internal[i].down);
brelse(bh);
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
goto go_down;
}
hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
@@ -69,12 +69,13 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
int n;
unsigned fs;
int c1, c2 = 0;
+
if (fnod) {
if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
} else {
if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
a = node;
go_down:
@@ -91,7 +92,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
goto go_down;
}
if (n >= 0) {
@@ -151,7 +152,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
}
brelse(bh);
bh = bh1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
btree->n_free_nodes--; n = btree->n_used_nodes++;
le16_add_cpu(&btree->first_free, 12);
@@ -168,10 +169,10 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
if (up != node || !fnod) {
if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
} else {
if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
}
if (btree->n_free_nodes) {
btree->n_free_nodes--; n = btree->n_used_nodes++;
@@ -206,8 +207,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
anode->btree.n_used_nodes = 1;
anode->btree.n_free_nodes = 59;
anode->btree.first_free = cpu_to_le16(16);
- anode->btree.u.internal[0].down = cpu_to_le32(a);
- anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
+ GET_BTREE_PTR(&anode->btree)->u.internal[0].down = cpu_to_le32(a);
+ GET_BTREE_PTR(&anode->btree)->u.internal[0].file_secno = cpu_to_le32(-1);
mark_buffer_dirty(bh);
brelse(bh);
if ((anode = hpfs_map_anode(s, a, &bh))) {
@@ -229,20 +230,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
brelse(bh2);
return -1;
}
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
} else {
if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
brelse(bh2);
return -1;
}
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
}
ranode->up = cpu_to_le32(node);
memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
if (fnod)
ranode->btree.flags |= BP_fnode_parent;
- ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
- if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
+ GET_BTREE_PTR(&ranode->btree)->n_free_nodes = (bp_internal(GET_BTREE_PTR(&ranode->btree)) ? 60 : 40) - GET_BTREE_PTR(&ranode->btree)->n_used_nodes;
+ if (bp_internal(GET_BTREE_PTR(&ranode->btree))) for (n = 0; n < GET_BTREE_PTR(&ranode->btree)->n_used_nodes; n++) {
struct anode *unode;
if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
unode->up = cpu_to_le32(ra);
@@ -291,7 +292,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
return;
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
- btree1 = &anode->btree;
+ btree1 = GET_BTREE_PTR(&anode->btree);
level++;
pos = 0;
}
@@ -307,7 +308,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
ano = le32_to_cpu(anode->up);
if (--level) {
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
- btree1 = &anode->btree;
+ btree1 = GET_BTREE_PTR(&anode->btree);
} else btree1 = btree;
for (i = 0; i < btree1->n_used_nodes; i++) {
if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
@@ -332,7 +333,7 @@ static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
struct anode *anode;
struct buffer_head *bh;
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
+ return hpfs_bplus_lookup(s, NULL, GET_BTREE_PTR(&anode->btree), sec, bh);
}
int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
@@ -388,7 +389,7 @@ void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
struct buffer_head *bh;
if (ano) {
if (!(anode = hpfs_map_anode(s, a, &bh))) return;
- hpfs_remove_btree(s, &anode->btree);
+ hpfs_remove_btree(s, GET_BTREE_PTR(&anode->btree));
brelse(bh);
hpfs_free_sectors(s, a, 1);
} else hpfs_free_sectors(s, a, (len + 511) >> 9);
@@ -407,10 +408,10 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
int c1, c2 = 0;
if (fno) {
if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
} else {
if (!(anode = hpfs_map_anode(s, f, &bh))) return;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
if (!secs) {
hpfs_remove_btree(s, btree);
@@ -448,7 +449,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
return;
if (!(anode = hpfs_map_anode(s, node, &bh))) return;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
nodes = btree->n_used_nodes + btree->n_free_nodes;
for (i = 0; i < btree->n_used_nodes; i++)
@@ -485,7 +486,7 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
struct extended_attribute *ea;
struct extended_attribute *ea_end;
if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
- if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
+ if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, GET_BTREE_PTR(&fnode->btree));
else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
ea_end = fnode_end_ea(fnode);
for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 49dd585c2b17..ceb50b2dc91a 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -247,7 +247,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
result = ERR_PTR(-ENOMEM);
goto bail1;
}
- if (result->i_state & I_NEW) {
+ if (inode_state_read_once(result) & I_NEW) {
hpfs_init_inode(result);
if (de->directory)
hpfs_read_inode(result);
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 102ba18e561f..2149d3ca530b 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -41,7 +41,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
struct buffer_head *bh;
struct anode *anode;
if ((anode = hpfs_map_anode(s, a, &bh))) {
- hpfs_remove_btree(s, &anode->btree);
+ hpfs_remove_btree(s, GET_BTREE_PTR(&anode->btree));
brelse(bh);
hpfs_free_sectors(s, a, 1);
}
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 449a3fc1b8d9..29e876705369 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -51,7 +51,9 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_sec
return hpfs_inode->i_disk_sec + n;
}
if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
- disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
+ disk_secno = hpfs_bplus_lookup(inode->i_sb, inode,
+ GET_BTREE_PTR(&fnode->btree),
+ file_secno, bh);
if (disk_secno == -1) return 0;
if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
n = file_secno - hpfs_inode->i_file_sec;
@@ -188,13 +190,14 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
hpfs_unlock(inode->i_sb);
}
-static int hpfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
+static int hpfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
hpfs_get_block,
&hpfs_i(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -203,13 +206,14 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static int hpfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+static int hpfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int err;
- err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
+ err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (err < len)
hpfs_write_failed(mapping, pos + len);
if (!(err < 0)) {
@@ -255,7 +259,7 @@ const struct file_operations hpfs_file_ops =
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.release = hpfs_file_release,
.fsync = hpfs_file_fsync,
.splice_read = filemap_splice_read,
diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h
index 281dec8f636b..353f73c914d9 100644
--- a/fs/hpfs/hpfs.h
+++ b/fs/hpfs/hpfs.h
@@ -394,27 +394,45 @@ enum {
BP_binary_search = 0x40,
BP_internal = 0x80
};
+
+/**
+ * GET_BTREE_PTR() - Get a pointer to struct bplus_header
+ *
+ * Wrapper around container_of() to retrieve a pointer to struct
+ * bplus_header from a pointer to struct bplus_header_fixed.
+ *
+ * @ptr: Pointer to struct bplus_header_fixed.
+ *
+ */
+#define GET_BTREE_PTR(ptr) \
+ container_of(ptr, struct bplus_header, __hdr)
+
struct bplus_header
{
- u8 flags; /* bit 0 - high bit of first free entry offset
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(bplus_header_fixed, __hdr,
+ u8 flags; /* bit 0 - high bit of first free entry offset
bit 5 - we're pointed to by an fnode,
the data btree or some ea or the
main ea bootage pointer ea_secno
bit 6 - suggest binary search (unused)
bit 7 - 1 -> (internal) tree of anodes
0 -> (leaf) list of extents */
- u8 fill[3];
- u8 n_free_nodes; /* free nodes in following array */
- u8 n_used_nodes; /* used nodes in following array */
- __le16 first_free; /* offset from start of header to
+ u8 fill[3];
+ u8 n_free_nodes; /* free nodes in following array */
+ u8 n_used_nodes; /* used nodes in following array */
+ __le16 first_free; /* offset from start of header to
first free node in array */
- union {
- /* (internal) 2-word entries giving subtree pointers */
- DECLARE_FLEX_ARRAY(struct bplus_internal_node, internal);
- /* (external) 3-word entries giving sector runs */
- DECLARE_FLEX_ARRAY(struct bplus_leaf_node, external);
- } u;
+ );
+ union {
+ /* (internal) 2-word entries giving subtree pointers */
+ DECLARE_FLEX_ARRAY(struct bplus_internal_node, internal);
+ /* (external) 3-word entries giving sector runs */
+ DECLARE_FLEX_ARRAY(struct bplus_leaf_node, external);
+ } u;
};
+static_assert(offsetof(struct bplus_header, u.internal) == sizeof(struct bplus_header_fixed),
+ "struct member likely outside of struct_group_tagged()");
static inline bool bp_internal(struct bplus_header *bp)
{
@@ -453,7 +471,7 @@ struct fnode
__le16 flags; /* bit 1 set -> ea_secno is an anode */
/* bit 8 set -> directory. first & only extent
points to dnode. */
- struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */
+ struct bplus_header_fixed btree; /* b+ tree, 8 extents or 12 subtrees */
union {
struct bplus_leaf_node external[8];
struct bplus_internal_node internal[12];
@@ -495,7 +513,7 @@ struct anode
__le32 self; /* pointer to this anode */
__le32 up; /* parent anode or fnode */
- struct bplus_header btree; /* b+tree, 40 extents or 60 subtrees */
+ struct bplus_header_fixed btree; /* b+tree, 40 extents or 60 subtrees */
union {
struct bplus_leaf_node external[40];
struct bplus_internal_node internal[60];
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index a59e8fa630db..93d528f4f4f2 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -184,7 +184,7 @@ void hpfs_write_inode(struct inode *i)
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct inode *parent;
if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return;
- if (hpfs_inode->i_rddir_off && !atomic_read(&i->i_count)) {
+ if (hpfs_inode->i_rddir_off && !icount_read(i)) {
if (*hpfs_inode->i_rddir_off)
pr_err("write_inode: some position still there\n");
kfree(hpfs_inode->i_rddir_off);
@@ -196,7 +196,7 @@ void hpfs_write_inode(struct inode *i)
parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
if (parent) {
hpfs_inode->i_dirty = 0;
- if (parent->i_state & I_NEW) {
+ if (inode_state_read_once(parent) & I_NEW) {
hpfs_init_inode(parent);
hpfs_read_inode(parent);
unlock_new_inode(parent);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index ecd9fccd1663..be73233502f8 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -178,14 +178,14 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
}
if (!fnode_is_dir(fnode)) {
if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
- (bp_internal(&fnode->btree) ? 12 : 8)) {
+ (bp_internal(GET_BTREE_PTR(&fnode->btree)) ? 12 : 8)) {
hpfs_error(s,
"bad number of nodes in fnode %08lx",
(unsigned long)ino);
goto bail;
}
if (le16_to_cpu(fnode->btree.first_free) !=
- 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
+ 8 + fnode->btree.n_used_nodes * (bp_internal(GET_BTREE_PTR(&fnode->btree)) ? 8 : 12)) {
hpfs_error(s,
"bad first_free pointer in fnode %08lx",
(unsigned long)ino);
@@ -233,12 +233,12 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff
goto bail;
}
if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
- (bp_internal(&anode->btree) ? 60 : 40)) {
+ (bp_internal(GET_BTREE_PTR(&anode->btree)) ? 60 : 40)) {
hpfs_error(s, "bad number of nodes in anode %08x", ano);
goto bail;
}
if (le16_to_cpu(anode->btree.first_free) !=
- 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
+ 8 + anode->btree.n_used_nodes * (bp_internal(GET_BTREE_PTR(&anode->btree)) ? 8 : 12)) {
hpfs_error(s, "bad first_free pointer in anode %08x", ano);
goto bail;
}
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index d0edf9ed33b6..353e13a615f5 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -19,8 +19,8 @@ static void hpfs_update_directory_times(struct inode *dir)
hpfs_write_inode_nolock(dir);
}
-static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -35,7 +35,7 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
int r;
struct hpfs_dirent dee;
int err;
- if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
+ if ((err = hpfs_chk_name(name, &len))) return ERR_PTR(err==-ENOENT ? -EINVAL : err);
hpfs_lock(dir->i_sb);
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
@@ -52,8 +52,10 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
dee.fnode = cpu_to_le32(fno);
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail2;
+ }
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
@@ -112,7 +114,7 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
hpfs_update_directory_times(dir);
d_instantiate(dentry, result);
hpfs_unlock(dir->i_sb);
- return 0;
+ return NULL;
bail3:
iput(result);
bail2:
@@ -123,7 +125,7 @@ bail1:
hpfs_free_sectors(dir->i_sb, fno, 1);
bail:
hpfs_unlock(dir->i_sb);
- return err;
+ return ERR_PTR(err);
}
static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir,
@@ -153,9 +155,10 @@ static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
-
+ }
hpfs_init_inode(result);
result->i_ino = fno;
result->i_mode |= S_IFREG;
@@ -239,9 +242,10 @@ static int hpfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
-
+ }
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
@@ -314,8 +318,10 @@ static int hpfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
+ }
result->i_ino = fno;
hpfs_init_inode(result);
hpfs_i(result)->i_parent_dir = dir->i_ino;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 27567920abe4..371aa6de8075 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -9,6 +9,7 @@
#include "hpfs_fn.h"
#include <linux/module.h>
+#include <linux/fs_struct.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/init.h>
@@ -404,15 +405,11 @@ static int hpfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_timeshift:
{
- int m = 1;
char *rhs = param->string;
int timeshift;
- if (*rhs == '-') m = -1;
- if (*rhs == '+' || *rhs == '-') rhs++;
- timeshift = simple_strtoul(rhs, &rhs, 0) * m;
- if (*rhs)
- return -EINVAL;
+ if (kstrtoint(rhs, 0, &timeshift))
+ return -EINVAL;
ctx->timeshift = timeshift;
break;
}
@@ -554,7 +551,7 @@ static int hpfs_fill_super(struct super_block *s, struct fs_context *fc)
/* Fill superblock stuff */
s->s_magic = HPFS_SUPER_MAGIC;
s->s_op = &hpfs_sops;
- s->s_d_op = &hpfs_dentry_operations;
+ set_default_d_op(s, &hpfs_dentry_operations);
s->s_time_min = local_to_gmt(s, 0);
s->s_time_max = local_to_gmt(s, U32_MAX);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a4441fb77f7c..3b4c152c5c73 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -96,10 +96,16 @@ static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
#define PGOFF_LOFFT_MAX \
(((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
-static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int hugetlb_file_mmap_prepare_success(const struct vm_area_struct *vma)
{
+ /* Unfortunate we have to reassign vma->vm_private_data. */
+ return hugetlb_vma_lock_alloc((struct vm_area_struct *)vma);
+}
+
+static int hugetlbfs_file_mmap_prepare(struct vm_area_desc *desc)
+{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
- struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
loff_t len, vma_len;
int ret;
struct hstate *h = hstate_file(file);
@@ -113,12 +119,8 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
* way when do_mmap unwinds (may be important on powerpc
* and ia64).
*/
- vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND | VM_MTE_ALLOWED);
- vma->vm_ops = &hugetlb_vm_ops;
-
- ret = seal_check_write(info->seals, vma);
- if (ret)
- return ret;
+ desc->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+ desc->vm_ops = &hugetlb_vm_ops;
/*
* page based offset in vm_pgoff could be sufficiently large to
@@ -127,16 +129,16 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
* sizeof(unsigned long). So, only check in those instances.
*/
if (sizeof(unsigned long) == sizeof(loff_t)) {
- if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
+ if (desc->pgoff & PGOFF_LOFFT_MAX)
return -EINVAL;
}
/* must be huge page aligned */
- if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
+ if (desc->pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
return -EINVAL;
- vma_len = (loff_t)(vma->vm_end - vma->vm_start);
- len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ vma_len = (loff_t)vma_desc_size(desc);
+ len = vma_len + ((loff_t)desc->pgoff << PAGE_SHIFT);
/* check for overflow */
if (len < vma_len)
return -EINVAL;
@@ -146,7 +148,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
ret = -ENOMEM;
- vm_flags = vma->vm_flags;
+ vm_flags = desc->vm_flags;
/*
* for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
* reserving here. Note: only for SHM hugetlbfs file, the inode
@@ -155,18 +157,31 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (inode->i_flags & S_PRIVATE)
vm_flags |= VM_NORESERVE;
- if (!hugetlb_reserve_pages(inode,
- vma->vm_pgoff >> huge_page_order(h),
- len >> huge_page_shift(h), vma,
- vm_flags))
+ if (hugetlb_reserve_pages(inode,
+ desc->pgoff >> huge_page_order(h),
+ len >> huge_page_shift(h), desc,
+ vm_flags) < 0)
goto out;
ret = 0;
- if (vma->vm_flags & VM_WRITE && inode->i_size < len)
+ if ((desc->vm_flags & VM_WRITE) && inode->i_size < len)
i_size_write(inode, len);
out:
inode_unlock(inode);
+ if (!ret) {
+ /* Allocate the VMA lock after we set it up. */
+ desc->action.success_hook = hugetlb_file_mmap_prepare_success;
+ /*
+ * We cannot permit the rmap finding this VMA in the time
+ * between the VMA being inserted into the VMA tree and the
+ * completion/success hook being invoked.
+ *
+ * This is because we establish a per-VMA hugetlb lock which can
+ * be raced by rmap.
+ */
+ desc->action.hide_from_rmap_until_complete = true;
+ }
return ret;
}
@@ -184,52 +199,37 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~huge_page_mask(h))
return -EINVAL;
- if (flags & MAP_FIXED) {
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
- if (prepare_hugepage_range(file, addr, len))
- return -EINVAL;
- }
+ if ((flags & MAP_FIXED) && (addr & ~huge_page_mask(h)))
+ return -EINVAL;
if (addr)
addr0 = ALIGN(addr, huge_page_size(h));
- return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff,
- flags, 0);
+ return mm_get_unmapped_area_vmflags(file, addr0, len, pgoff, flags, 0);
}
/*
- * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
+ * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset.
* Returns the maximum number of bytes one can read without touching the 1st raw
- * HWPOISON subpage.
- *
- * The implementation borrows the iteration logic from copy_page_to_iter*.
+ * HWPOISON page.
*/
-static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes)
+static size_t adjust_range_hwpoison(struct folio *folio, size_t offset,
+ size_t bytes)
{
- size_t n = 0;
- size_t res = 0;
+ struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ size_t safe_bytes;
- /* First subpage to start the loop. */
- page = nth_page(page, offset / PAGE_SIZE);
- offset %= PAGE_SIZE;
- while (1) {
- if (is_raw_hwpoison_page_in_hugepage(page))
- break;
+ if (is_raw_hwpoison_page_in_hugepage(page))
+ return 0;
+ /* Safe to read the remaining bytes in this page. */
+ safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE);
+ page++;
- /* Safe to read n bytes without touching HWPOISON subpage. */
- n = min(bytes, (size_t)PAGE_SIZE - offset);
- res += n;
- bytes -= n;
- if (!bytes || !n)
+ /* Check each remaining page as long as we are not done yet. */
+ for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++)
+ if (is_raw_hwpoison_page_in_hugepage(page))
break;
- offset += n;
- if (offset == PAGE_SIZE) {
- page = nth_page(page, 1);
- offset = 0;
- }
- }
- return res;
+ return min(safe_bytes, bytes);
}
/*
@@ -283,10 +283,10 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
else {
/*
* Adjust how many bytes safe to read without
- * touching the 1st raw HWPOISON subpage after
+ * touching the 1st raw HWPOISON page after
* offset.
*/
- want = adjust_range_hwpoison(&folio->page, offset, nr);
+ want = adjust_range_hwpoison(folio, offset, nr);
if (want == 0) {
folio_put(folio);
retval = -EIO;
@@ -314,7 +314,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
return retval;
}
-static int hugetlbfs_write_begin(struct file *file,
+static int hugetlbfs_write_begin(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
@@ -322,9 +322,10 @@ static int hugetlbfs_write_begin(struct file *file,
return -EINVAL;
}
-static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+static int hugetlbfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
BUG();
return -EINVAL;
@@ -343,8 +344,8 @@ static void hugetlb_delete_from_page_cache(struct folio *folio)
* mutex for the page in the mapping. So, we can not race with page being
* faulted into the vma.
*/
-static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
- unsigned long addr, struct page *page)
+static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
{
pte_t *ptep, pte;
@@ -356,7 +357,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
if (huge_pte_none(pte) || !pte_present(pte))
return false;
- if (pte_page(pte) == page)
+ if (pte_pfn(pte) == pfn)
return true;
return false;
@@ -401,7 +402,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
{
struct rb_root_cached *root = &mapping->i_mmap;
struct hugetlb_vma_lock *vma_lock;
- struct page *page = &folio->page;
+ unsigned long pfn = folio_pfn(folio);
struct vm_area_struct *vma;
unsigned long v_start;
unsigned long v_end;
@@ -417,7 +418,7 @@ retry:
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
- if (!hugetlb_vma_maps_page(vma, v_start, page))
+ if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
continue;
if (!hugetlb_vma_trylock_write(vma)) {
@@ -467,7 +468,7 @@ retry:
*/
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
- if (hugetlb_vma_maps_page(vma, v_start, page))
+ if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
unmap_hugepage_range(vma, v_start, v_end, NULL,
ZAP_FLAG_DROP_MARKER);
@@ -523,14 +524,16 @@ static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
/*
* If folio is mapped, it was faulted in after being
- * unmapped in caller. Unmap (again) while holding
- * the fault mutex. The mutex will prevent faults
- * until we finish removing the folio.
+ * unmapped in caller or hugetlb_vmdelete_list() skips
+ * unmapping it due to fail to grab lock. Unmap (again)
+ * while holding the fault mutex. The mutex will prevent
+ * faults until we finish removing the folio. Hold folio
+ * lock to guarantee no concurrent migration.
*/
+ folio_lock(folio);
if (unlikely(folio_mapped(folio)))
hugetlb_unmap_file_folio(h, mapping, folio, index);
- folio_lock(folio);
/*
* We must remove the folio from page cache before removing
* the region/ reserve map (hugetlb_unreserve_pages). In
@@ -819,13 +822,13 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
* folios in these areas, we need to consume the reserves
* to keep reservation accounting consistent.
*/
- folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
+ folio = alloc_hugetlb_folio(&pseudo_vma, addr, false);
if (IS_ERR(folio)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
error = PTR_ERR(folio);
goto out;
}
- folio_zero_user(folio, ALIGN_DOWN(addr, hpage_size));
+ folio_zero_user(folio, addr);
__folio_mark_uptodate(folio);
error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
@@ -991,19 +994,18 @@ static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
if (!inode)
return -ENOSPC;
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
- d_instantiate(dentry, inode);
- dget(dentry);/* Extra count - pin the dentry in core */
+ d_make_persistent(dentry, inode);
return 0;
}
-static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int retval = hugetlbfs_mknod(idmap, dir, dentry,
mode | S_IFDIR, 0);
if (!retval)
inc_nlink(dir);
- return retval;
+ return ERR_PTR(retval);
}
static int hugetlbfs_create(struct mnt_idmap *idmap,
@@ -1039,10 +1041,9 @@ static int hugetlbfs_symlink(struct mnt_idmap *idmap,
if (inode) {
int l = strlen(symname)+1;
error = page_symlink(inode, symname, l);
- if (!error) {
- d_instantiate(dentry, inode);
- dget(dentry);
- } else
+ if (!error)
+ d_make_persistent(dentry, inode);
+ else
iput(inode);
}
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
@@ -1058,7 +1059,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
int rc;
rc = migrate_huge_page_move_mapping(mapping, dst, src);
- if (rc != MIGRATEPAGE_SUCCESS)
+ if (rc)
return rc;
if (hugetlb_folio_subpool(src)) {
@@ -1069,7 +1070,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
folio_migrate_flags(dst, src);
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
#else
#define hugetlbfs_migrate_folio NULL
@@ -1237,7 +1238,7 @@ static void init_once(void *foo)
static const struct file_operations hugetlbfs_file_operations = {
.read_iter = hugetlbfs_read_iter,
- .mmap = hugetlbfs_file_mmap,
+ .mmap_prepare = hugetlbfs_file_mmap_prepare,
.fsync = noop_fsync,
.get_unmapped_area = hugetlb_get_unmapped_area,
.llseek = default_llseek,
@@ -1436,6 +1437,7 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
sb->s_magic = HUGETLBFS_MAGIC;
sb->s_op = &hugetlbfs_ops;
+ sb->s_d_flags = DCACHE_DONTCACHE;
sb->s_time_gran = 1;
/*
@@ -1498,7 +1500,7 @@ static struct file_system_type hugetlbfs_fs_type = {
.name = "hugetlbfs",
.init_fs_context = hugetlbfs_init_fs_context,
.parameters = hugetlb_fs_parameters,
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
.fs_flags = FS_ALLOW_IDMAP,
};
@@ -1564,9 +1566,9 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
inode->i_size = size;
clear_nlink(inode);
- if (!hugetlb_reserve_pages(inode, 0,
+ if (hugetlb_reserve_pages(inode, 0,
size >> huge_page_shift(hstate_inode(inode)), NULL,
- acctflag))
+ acctflag) < 0)
file = ERR_PTR(-ENOMEM);
else
file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
@@ -1590,7 +1592,7 @@ static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
} else {
struct hugetlbfs_fs_context *ctx = fc->fs_private;
ctx->hstate = h;
- mnt = fc_mount(fc);
+ mnt = fc_mount_longterm(fc);
put_fs_context(fc);
}
if (IS_ERR(mnt))
diff --git a/fs/init.c b/fs/init.c
index e9387b6c4f30..e0f5429c0a49 100644
--- a/fs/init.c
+++ b/fs/init.c
@@ -149,7 +149,7 @@ int __init init_mknod(const char *filename, umode_t mode, unsigned int dev)
else if (!(S_ISBLK(mode) || S_ISCHR(mode)))
return -EINVAL;
- dentry = kern_path_create(AT_FDCWD, filename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, filename, &path, 0);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -157,8 +157,8 @@ int __init init_mknod(const char *filename, umode_t mode, unsigned int dev)
error = security_path_mknod(&path, dentry, mode, dev);
if (!error)
error = vfs_mknod(mnt_idmap(path.mnt), path.dentry->d_inode,
- dentry, mode, new_decode_dev(dev));
- done_path_create(&path, dentry);
+ dentry, mode, new_decode_dev(dev), NULL);
+ end_creating_path(&path, dentry);
return error;
}
@@ -173,7 +173,7 @@ int __init init_link(const char *oldname, const char *newname)
if (error)
return error;
- new_dentry = kern_path_create(AT_FDCWD, newname, &new_path, 0);
+ new_dentry = start_creating_path(AT_FDCWD, newname, &new_path, 0);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
@@ -191,7 +191,7 @@ int __init init_link(const char *oldname, const char *newname)
error = vfs_link(old_path.dentry, idmap, new_path.dentry->d_inode,
new_dentry, NULL);
out_dput:
- done_path_create(&new_path, new_dentry);
+ end_creating_path(&new_path, new_dentry);
out:
path_put(&old_path);
return error;
@@ -203,14 +203,14 @@ int __init init_symlink(const char *oldname, const char *newname)
struct path path;
int error;
- dentry = kern_path_create(AT_FDCWD, newname, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, newname, &path, 0);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
error = security_path_symlink(&path, dentry, oldname);
if (!error)
error = vfs_symlink(mnt_idmap(path.mnt), path.dentry->d_inode,
- dentry, oldname);
- done_path_create(&path, dentry);
+ dentry, oldname, NULL);
+ end_creating_path(&path, dentry);
return error;
}
@@ -225,15 +225,19 @@ int __init init_mkdir(const char *pathname, umode_t mode)
struct path path;
int error;
- dentry = kern_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY);
+ dentry = start_creating_path(AT_FDCWD, pathname, &path,
+ LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
mode = mode_strip_umask(d_inode(path.dentry), mode);
error = security_path_mkdir(&path, dentry, mode);
- if (!error)
- error = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
- dentry, mode);
- done_path_create(&path, dentry);
+ if (!error) {
+ dentry = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, mode, NULL);
+ if (IS_ERR(dentry))
+ error = PTR_ERR(dentry);
+ }
+ end_creating_path(&path, dentry);
return error;
}
diff --git a/fs/inode.c b/fs/inode.c
index 6b4c77268fc0..521383223d8a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -184,7 +184,7 @@ static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-static struct ctl_table inodes_sysctls[] = {
+static const struct ctl_table inodes_sysctls[] = {
{
.procname = "inode-nr",
.data = &inodes_stat,
@@ -233,7 +233,7 @@ int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
- inode->i_state = 0;
+ inode_state_assign_raw(inode, 0);
atomic64_set(&inode->i_sequence, 0);
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
@@ -327,7 +327,17 @@ static void i_callback(struct rcu_head *head)
free_inode_nonrcu(inode);
}
-static struct inode *alloc_inode(struct super_block *sb)
+/**
+ * alloc_inode - obtain an inode
+ * @sb: superblock
+ *
+ * Allocates a new inode for given superblock.
+ * Inode wont be chained in superblock s_inodes list
+ * This means :
+ * - fs can't be unmount
+ * - quotas, fsnotify, writeback can't work
+ */
+struct inode *alloc_inode(struct super_block *sb)
{
const struct super_operations *ops = sb->s_op;
struct inode *inode;
@@ -461,7 +471,7 @@ EXPORT_SYMBOL(set_nlink);
void inc_nlink(struct inode *inode)
{
if (unlikely(inode->i_nlink == 0)) {
- WARN_ON(!(inode->i_state & I_LINKABLE));
+ WARN_ON(!(inode_state_read_once(inode) & I_LINKABLE));
atomic_long_dec(&inode->i_sb->s_remove_count);
}
@@ -520,11 +530,50 @@ void ihold(struct inode *inode)
}
EXPORT_SYMBOL(ihold);
-static void __inode_add_lru(struct inode *inode, bool rotate)
+struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
+ struct inode *inode, u32 bit)
+{
+ void *bit_address;
+
+ bit_address = inode_state_wait_address(inode, bit);
+ init_wait_var_entry(wqe, bit_address, 0);
+ return __var_waitqueue(bit_address);
+}
+EXPORT_SYMBOL(inode_bit_waitqueue);
+
+void wait_on_new_inode(struct inode *inode)
+{
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
+
+ spin_lock(&inode->i_lock);
+ if (!(inode_state_read(inode) & I_NEW)) {
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
+ for (;;) {
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+ if (!(inode_state_read(inode) & I_NEW))
+ break;
+ spin_unlock(&inode->i_lock);
+ schedule();
+ spin_lock(&inode->i_lock);
+ }
+ finish_wait(wq_head, &wqe.wq_entry);
+ WARN_ON(inode_state_read(inode) & I_NEW);
+ spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(wait_on_new_inode);
+
+static void __inode_lru_list_add(struct inode *inode, bool rotate)
{
- if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
+ lockdep_assert_held(&inode->i_lock);
+
+ if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
return;
- if (atomic_read(&inode->i_count))
+ if (icount_read(inode))
return;
if (!(inode->i_sb->s_flags & SB_ACTIVE))
return;
@@ -534,32 +583,22 @@ static void __inode_add_lru(struct inode *inode, bool rotate)
if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
else if (rotate)
- inode->i_state |= I_REFERENCED;
+ inode_state_set(inode, I_REFERENCED);
}
-struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
- struct inode *inode, u32 bit)
-{
- void *bit_address;
-
- bit_address = inode_state_wait_address(inode, bit);
- init_wait_var_entry(wqe, bit_address, 0);
- return __var_waitqueue(bit_address);
-}
-EXPORT_SYMBOL(inode_bit_waitqueue);
-
/*
* Add inode to LRU if needed (inode is unused and clean).
- *
- * Needs inode->i_lock held.
*/
-void inode_add_lru(struct inode *inode)
+void inode_lru_list_add(struct inode *inode)
{
- __inode_add_lru(inode, false);
+ __inode_lru_list_add(inode, false);
}
static void inode_lru_list_del(struct inode *inode)
{
+ if (list_empty(&inode->i_lru))
+ return;
+
if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_dec(nr_unused);
}
@@ -567,15 +606,15 @@ static void inode_lru_list_del(struct inode *inode)
static void inode_pin_lru_isolating(struct inode *inode)
{
lockdep_assert_held(&inode->i_lock);
- WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
- inode->i_state |= I_LRU_ISOLATING;
+ WARN_ON(inode_state_read(inode) & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
+ inode_state_set(inode, I_LRU_ISOLATING);
}
static void inode_unpin_lru_isolating(struct inode *inode)
{
spin_lock(&inode->i_lock);
- WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
- inode->i_state &= ~I_LRU_ISOLATING;
+ WARN_ON(!(inode_state_read(inode) & I_LRU_ISOLATING));
+ inode_state_clear(inode, I_LRU_ISOLATING);
/* Called with inode->i_lock which ensures memory ordering. */
inode_wake_up_bit(inode, __I_LRU_ISOLATING);
spin_unlock(&inode->i_lock);
@@ -587,7 +626,7 @@ static void inode_wait_for_lru_isolating(struct inode *inode)
struct wait_queue_head *wq_head;
lockdep_assert_held(&inode->i_lock);
- if (!(inode->i_state & I_LRU_ISOLATING))
+ if (!(inode_state_read(inode) & I_LRU_ISOLATING))
return;
wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING);
@@ -597,14 +636,14 @@ static void inode_wait_for_lru_isolating(struct inode *inode)
* Checking I_LRU_ISOLATING with inode->i_lock guarantees
* memory ordering.
*/
- if (!(inode->i_state & I_LRU_ISOLATING))
+ if (!(inode_state_read(inode) & I_LRU_ISOLATING))
break;
spin_unlock(&inode->i_lock);
schedule();
spin_lock(&inode->i_lock);
}
finish_wait(wq_head, &wqe.wq_entry);
- WARN_ON(inode->i_state & I_LRU_ISOLATING);
+ WARN_ON(inode_state_read(inode) & I_LRU_ISOLATING);
}
/**
@@ -613,18 +652,22 @@ static void inode_wait_for_lru_isolating(struct inode *inode)
*/
void inode_sb_list_add(struct inode *inode)
{
- spin_lock(&inode->i_sb->s_inode_list_lock);
- list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
- spin_unlock(&inode->i_sb->s_inode_list_lock);
+ struct super_block *sb = inode->i_sb;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_add(&inode->i_sb_list, &sb->s_inodes);
+ spin_unlock(&sb->s_inode_list_lock);
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);
static inline void inode_sb_list_del(struct inode *inode)
{
+ struct super_block *sb = inode->i_sb;
+
if (!list_empty(&inode->i_sb_list)) {
- spin_lock(&inode->i_sb->s_inode_list_lock);
+ spin_lock(&sb->s_inode_list_lock);
list_del_init(&inode->i_sb_list);
- spin_unlock(&inode->i_sb->s_inode_list_lock);
+ spin_unlock(&sb->s_inode_list_lock);
}
}
@@ -747,11 +790,11 @@ void clear_inode(struct inode *inode)
*/
xa_unlock_irq(&inode->i_data.i_pages);
BUG_ON(!list_empty(&inode->i_data.i_private_list));
- BUG_ON(!(inode->i_state & I_FREEING));
- BUG_ON(inode->i_state & I_CLEAR);
+ BUG_ON(!(inode_state_read_once(inode) & I_FREEING));
+ BUG_ON(inode_state_read_once(inode) & I_CLEAR);
BUG_ON(!list_empty(&inode->i_wb_list));
/* don't need i_lock here, no concurrent mods to i_state */
- inode->i_state = I_FREEING | I_CLEAR;
+ inode_state_assign_raw(inode, I_FREEING | I_CLEAR);
}
EXPORT_SYMBOL(clear_inode);
@@ -772,12 +815,10 @@ static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
- BUG_ON(!(inode->i_state & I_FREEING));
+ BUG_ON(!(inode_state_read_once(inode) & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
- if (!list_empty(&inode->i_io_list))
- inode_io_list_del(inode);
-
+ inode_io_list_del(inode);
inode_sb_list_del(inode);
spin_lock(&inode->i_lock);
@@ -806,23 +847,16 @@ static void evict(struct inode *inode)
/*
* Wake up waiters in __wait_on_freeing_inode().
*
- * Lockless hash lookup may end up finding the inode before we removed
- * it above, but only lock it *after* we are done with the wakeup below.
- * In this case the potential waiter cannot safely block.
+ * It is an invariant that any thread we need to wake up is already
+ * accounted for before remove_inode_hash() acquires ->i_lock -- both
+ * sides take the lock and sleep is aborted if the inode is found
+ * unhashed. Thus either the sleeper wins and goes off CPU, or removal
+ * wins and the sleeper aborts after testing with the lock.
*
- * The inode being unhashed after the call to remove_inode_hash() is
- * used as an indicator whether blocking on it is safe.
- */
- spin_lock(&inode->i_lock);
- /*
- * Pairs with the barrier in prepare_to_wait_event() to make sure
- * ___wait_var_event() either sees the bit cleared or
- * waitqueue_active() check in wake_up_var() sees the waiter.
+ * This also means we don't need any fences for the call below.
*/
- smp_mb__after_spinlock();
inode_wake_up_bit(inode, __I_NEW);
- BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
- spin_unlock(&inode->i_lock);
+ BUG_ON(inode_state_read_once(inode) != (I_FREEING | I_CLEAR));
destroy_inode(inode);
}
@@ -858,26 +892,26 @@ static void dispose_list(struct list_head *head)
*/
void evict_inodes(struct super_block *sb)
{
- struct inode *inode, *next;
+ struct inode *inode;
LIST_HEAD(dispose);
again:
spin_lock(&sb->s_inode_list_lock);
- list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
- if (atomic_read(&inode->i_count))
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ if (icount_read(inode))
continue;
spin_lock(&inode->i_lock);
- if (atomic_read(&inode->i_count)) {
+ if (icount_read(inode)) {
spin_unlock(&inode->i_lock);
continue;
}
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) {
spin_unlock(&inode->i_lock);
continue;
}
- inode->i_state |= I_FREEING;
+ inode_state_set(inode, I_FREEING);
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose);
@@ -900,46 +934,6 @@ again:
}
EXPORT_SYMBOL_GPL(evict_inodes);
-/**
- * invalidate_inodes - attempt to free all inodes on a superblock
- * @sb: superblock to operate on
- *
- * Attempts to free all inodes (including dirty inodes) for a given superblock.
- */
-void invalidate_inodes(struct super_block *sb)
-{
- struct inode *inode, *next;
- LIST_HEAD(dispose);
-
-again:
- spin_lock(&sb->s_inode_list_lock);
- list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
- spin_lock(&inode->i_lock);
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
- spin_unlock(&inode->i_lock);
- continue;
- }
- if (atomic_read(&inode->i_count)) {
- spin_unlock(&inode->i_lock);
- continue;
- }
-
- inode->i_state |= I_FREEING;
- inode_lru_list_del(inode);
- spin_unlock(&inode->i_lock);
- list_add(&inode->i_lru, &dispose);
- if (need_resched()) {
- spin_unlock(&sb->s_inode_list_lock);
- cond_resched();
- dispose_list(&dispose);
- goto again;
- }
- }
- spin_unlock(&sb->s_inode_list_lock);
-
- dispose_list(&dispose);
-}
-
/*
* Isolate the inode from the LRU in preparation for freeing it.
*
@@ -970,8 +964,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
* unreclaimable for a while. Remove them lazily here; iput,
* sync, or the last page cache deletion will requeue them.
*/
- if (atomic_read(&inode->i_count) ||
- (inode->i_state & ~I_REFERENCED) ||
+ if (icount_read(inode) ||
+ (inode_state_read(inode) & ~I_REFERENCED) ||
!mapping_shrinkable(&inode->i_data)) {
list_lru_isolate(lru, &inode->i_lru);
spin_unlock(&inode->i_lock);
@@ -980,8 +974,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
}
/* Recently referenced inodes get one more pass */
- if (inode->i_state & I_REFERENCED) {
- inode->i_state &= ~I_REFERENCED;
+ if (inode_state_read(inode) & I_REFERENCED) {
+ inode_state_clear(inode, I_REFERENCED);
spin_unlock(&inode->i_lock);
return LRU_ROTATE;
}
@@ -1008,8 +1002,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
return LRU_RETRY;
}
- WARN_ON(inode->i_state & I_NEW);
- inode->i_state |= I_FREEING;
+ WARN_ON(inode_state_read(inode) & I_NEW);
+ inode_state_set(inode, I_FREEING);
list_lru_isolate_move(lru, &inode->i_lru, freeable);
spin_unlock(&inode->i_lock);
@@ -1041,7 +1035,8 @@ static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_lock
static struct inode *find_inode(struct super_block *sb,
struct hlist_head *head,
int (*test)(struct inode *, void *),
- void *data, bool is_inode_hash_locked)
+ void *data, bool is_inode_hash_locked,
+ bool *isnew)
{
struct inode *inode = NULL;
@@ -1058,16 +1053,17 @@ repeat:
if (!test(inode, data))
continue;
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
__wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
- if (unlikely(inode->i_state & I_CREATING)) {
+ if (unlikely(inode_state_read(inode) & I_CREATING)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
+ *isnew = !!(inode_state_read(inode) & I_NEW);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return inode;
@@ -1082,7 +1078,7 @@ repeat:
*/
static struct inode *find_inode_fast(struct super_block *sb,
struct hlist_head *head, unsigned long ino,
- bool is_inode_hash_locked)
+ bool is_inode_hash_locked, bool *isnew)
{
struct inode *inode = NULL;
@@ -1099,16 +1095,17 @@ repeat:
if (inode->i_sb != sb)
continue;
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
__wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
- if (unlikely(inode->i_state & I_CREATING)) {
+ if (unlikely(inode_state_read(inode) & I_CREATING)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
+ *isnew = !!(inode_state_read(inode) & I_NEW);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return inode;
@@ -1160,21 +1157,6 @@ unsigned int get_next_ino(void)
EXPORT_SYMBOL(get_next_ino);
/**
- * new_inode_pseudo - obtain an inode
- * @sb: superblock
- *
- * Allocates a new inode for given superblock.
- * Inode wont be chained in superblock s_inodes list
- * This means :
- * - fs can't be unmount
- * - quotas, fsnotify, writeback can't work
- */
-struct inode *new_inode_pseudo(struct super_block *sb)
-{
- return alloc_inode(sb);
-}
-
-/**
* new_inode - obtain an inode
* @sb: superblock
*
@@ -1190,7 +1172,7 @@ struct inode *new_inode(struct super_block *sb)
{
struct inode *inode;
- inode = new_inode_pseudo(sb);
+ inode = alloc_inode(sb);
if (inode)
inode_sb_list_add(inode);
return inode;
@@ -1206,9 +1188,8 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
/* Set new key only if filesystem hasn't already changed it */
if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
/*
- * ensure nobody is actually holding i_mutex
+ * ensure nobody is actually holding i_rwsem
*/
- // mutex_destroy(&inode->i_mutex);
init_rwsem(&inode->i_rwsem);
lockdep_set_class(&inode->i_rwsem,
&type->i_mutex_dir_key);
@@ -1229,14 +1210,8 @@ void unlock_new_inode(struct inode *inode)
{
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
- WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW & ~I_CREATING;
- /*
- * Pairs with the barrier in prepare_to_wait_event() to make sure
- * ___wait_var_event() either sees the bit cleared or
- * waitqueue_active() check in wake_up_var() sees the waiter.
- */
- smp_mb();
+ WARN_ON(!(inode_state_read(inode) & I_NEW));
+ inode_state_clear(inode, I_NEW | I_CREATING);
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
@@ -1246,14 +1221,8 @@ void discard_new_inode(struct inode *inode)
{
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
- WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
- /*
- * Pairs with the barrier in prepare_to_wait_event() to make sure
- * ___wait_var_event() either sees the bit cleared or
- * waitqueue_active() check in wake_up_var() sees the waiter.
- */
- smp_mb();
+ WARN_ON(!(inode_state_read(inode) & I_NEW));
+ inode_state_clear(inode, I_NEW);
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
iput(inode);
@@ -1309,6 +1278,7 @@ EXPORT_SYMBOL(unlock_two_nondirectories);
* @test: callback used for comparisons between inodes
* @set: callback used to initialize a new struct inode
* @data: opaque data pointer to pass to @test and @set
+ * @isnew: pointer to a bool which will indicate whether I_NEW is set
*
* Search for the inode specified by @hashval and @data in the inode cache,
* and if present return it with an increased reference count. This is a
@@ -1327,10 +1297,13 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
struct inode *old;
+ bool isnew;
+
+ might_sleep();
again:
spin_lock(&inode_hash_lock);
- old = find_inode(inode->i_sb, head, test, data, true);
+ old = find_inode(inode->i_sb, head, test, data, true, &isnew);
if (unlikely(old)) {
/*
* Uhhuh, somebody else created the same inode under us.
@@ -1339,7 +1312,8 @@ again:
spin_unlock(&inode_hash_lock);
if (IS_ERR(old))
return NULL;
- wait_on_inode(old);
+ if (unlikely(isnew))
+ wait_on_new_inode(old);
if (unlikely(inode_unhashed(old))) {
iput(old);
goto again;
@@ -1348,8 +1322,8 @@ again:
}
if (set && unlikely(set(inode, data))) {
- inode = NULL;
- goto unlock;
+ spin_unlock(&inode_hash_lock);
+ return NULL;
}
/*
@@ -1357,18 +1331,18 @@ again:
* caller is responsible for filling in the contents
*/
spin_lock(&inode->i_lock);
- inode->i_state |= I_NEW;
+ inode_state_set(inode, I_NEW);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
+
/*
* Add inode to the sb list if it's not already. It has I_NEW at this
* point, so it should be safe to test i_sb_list locklessly.
*/
if (list_empty(&inode->i_sb_list))
inode_sb_list_add(inode);
-unlock:
- spin_unlock(&inode_hash_lock);
return inode;
}
@@ -1430,13 +1404,17 @@ struct inode *iget5_locked_rcu(struct super_block *sb, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode, *new;
+ bool isnew;
+
+ might_sleep();
again:
- inode = find_inode(sb, head, test, data, false);
+ inode = find_inode(sb, head, test, data, false, &isnew);
if (inode) {
if (IS_ERR(inode))
return NULL;
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1471,12 +1449,17 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
+ bool isnew;
+
+ might_sleep();
+
again:
- inode = find_inode_fast(sb, head, ino, false);
+ inode = find_inode_fast(sb, head, ino, false, &isnew);
if (inode) {
if (IS_ERR(inode))
return NULL;
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1490,15 +1473,15 @@ again:
spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
- old = find_inode_fast(sb, head, ino, true);
+ old = find_inode_fast(sb, head, ino, true, &isnew);
if (!old) {
inode->i_ino = ino;
spin_lock(&inode->i_lock);
- inode->i_state = I_NEW;
+ inode_state_assign(inode, I_NEW);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
- inode_sb_list_add(inode);
spin_unlock(&inode_hash_lock);
+ inode_sb_list_add(inode);
/* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
@@ -1516,7 +1499,8 @@ again:
if (IS_ERR(old))
return NULL;
inode = old;
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1587,7 +1571,7 @@ EXPORT_SYMBOL(iunique);
struct inode *igrab(struct inode *inode)
{
spin_lock(&inode->i_lock);
- if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
+ if (!(inode_state_read(inode) & (I_FREEING | I_WILL_FREE))) {
__iget(inode);
spin_unlock(&inode->i_lock);
} else {
@@ -1620,13 +1604,13 @@ EXPORT_SYMBOL(igrab);
* Note2: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
- int (*test)(struct inode *, void *), void *data)
+ int (*test)(struct inode *, void *), void *data, bool *isnew)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
spin_lock(&inode_hash_lock);
- inode = find_inode(sb, head, test, data, true);
+ inode = find_inode(sb, head, test, data, true, isnew);
spin_unlock(&inode_hash_lock);
return IS_ERR(inode) ? NULL : inode;
@@ -1654,10 +1638,15 @@ struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct inode *inode;
+ bool isnew;
+
+ might_sleep();
+
again:
- inode = ilookup5_nowait(sb, hashval, test, data);
+ inode = ilookup5_nowait(sb, hashval, test, data, &isnew);
if (inode) {
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1679,13 +1668,18 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
+ bool isnew;
+
+ might_sleep();
+
again:
- inode = find_inode_fast(sb, head, ino, false);
+ inode = find_inode_fast(sb, head, ino, false, &isnew);
if (inode) {
if (IS_ERR(inode))
return NULL;
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1777,7 +1771,7 @@ struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_sb == sb &&
- !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
+ !(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE)) &&
test(inode, data))
return inode;
}
@@ -1816,7 +1810,7 @@ struct inode *find_inode_by_ino_rcu(struct super_block *sb,
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_ino == ino &&
inode->i_sb == sb &&
- !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
+ !(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE)))
return inode;
}
return NULL;
@@ -1828,6 +1822,9 @@ int insert_inode_locked(struct inode *inode)
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
struct hlist_head *head = inode_hashtable + hash(sb, ino);
+ bool isnew;
+
+ might_sleep();
while (1) {
struct inode *old = NULL;
@@ -1838,7 +1835,7 @@ int insert_inode_locked(struct inode *inode)
if (old->i_sb != sb)
continue;
spin_lock(&old->i_lock);
- if (old->i_state & (I_FREEING|I_WILL_FREE)) {
+ if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
spin_unlock(&old->i_lock);
continue;
}
@@ -1846,21 +1843,23 @@ int insert_inode_locked(struct inode *inode)
}
if (likely(!old)) {
spin_lock(&inode->i_lock);
- inode->i_state |= I_NEW | I_CREATING;
+ inode_state_set(inode, I_NEW | I_CREATING);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
}
- if (unlikely(old->i_state & I_CREATING)) {
+ if (unlikely(inode_state_read(old) & I_CREATING)) {
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
return -EBUSY;
}
__iget(old);
+ isnew = !!(inode_state_read(old) & I_NEW);
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
- wait_on_inode(old);
+ if (isnew)
+ wait_on_new_inode(old);
if (unlikely(!inode_unhashed(old))) {
iput(old);
return -EBUSY;
@@ -1875,7 +1874,9 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
{
struct inode *old;
- inode->i_state |= I_CREATING;
+ might_sleep();
+
+ inode_state_set_raw(inode, I_CREATING);
old = inode_insert5(inode, hashval, test, NULL, data);
if (old != inode) {
@@ -1887,11 +1888,11 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
EXPORT_SYMBOL(insert_inode_locked4);
-int generic_delete_inode(struct inode *inode)
+int inode_just_drop(struct inode *inode)
{
return 1;
}
-EXPORT_SYMBOL(generic_delete_inode);
+EXPORT_SYMBOL(inode_just_drop);
/*
* Called when we're dropping the last reference
@@ -1907,40 +1908,44 @@ static void iput_final(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
const struct super_operations *op = inode->i_sb->s_op;
- unsigned long state;
int drop;
- WARN_ON(inode->i_state & I_NEW);
+ WARN_ON(inode_state_read(inode) & I_NEW);
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode);
if (op->drop_inode)
drop = op->drop_inode(inode);
else
- drop = generic_drop_inode(inode);
+ drop = inode_generic_drop(inode);
if (!drop &&
- !(inode->i_state & I_DONTCACHE) &&
+ !(inode_state_read(inode) & I_DONTCACHE) &&
(sb->s_flags & SB_ACTIVE)) {
- __inode_add_lru(inode, true);
+ __inode_lru_list_add(inode, true);
spin_unlock(&inode->i_lock);
return;
}
- state = inode->i_state;
- if (!drop) {
- WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
+ /*
+ * Re-check ->i_count in case the ->drop_inode() hooks played games.
+ * Note we only execute this if the verdict was to drop the inode.
+ */
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode);
+
+ if (drop) {
+ inode_state_set(inode, I_FREEING);
+ } else {
+ inode_state_set(inode, I_WILL_FREE);
spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
spin_lock(&inode->i_lock);
- state = inode->i_state;
- WARN_ON(state & I_NEW);
- state &= ~I_WILL_FREE;
+ WARN_ON(inode_state_read(inode) & I_NEW);
+ inode_state_replace(inode, I_WILL_FREE, I_FREEING);
}
- WRITE_ONCE(inode->i_state, state | I_FREEING);
- if (!list_empty(&inode->i_lru))
- inode_lru_list_del(inode);
+ inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
evict(inode);
@@ -1957,23 +1962,61 @@ static void iput_final(struct inode *inode)
*/
void iput(struct inode *inode)
{
- if (!inode)
+ might_sleep();
+ if (unlikely(!inode))
return;
- BUG_ON(inode->i_state & I_CLEAR);
+
retry:
- if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
- if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
- atomic_inc(&inode->i_count);
- spin_unlock(&inode->i_lock);
- trace_writeback_lazytime_iput(inode);
- mark_inode_dirty_sync(inode);
- goto retry;
- }
- iput_final(inode);
+ lockdep_assert_not_held(&inode->i_lock);
+ VFS_BUG_ON_INODE(inode_state_read_once(inode) & (I_FREEING | I_CLEAR), inode);
+ /*
+ * Note this assert is technically racy as if the count is bogusly
+ * equal to one, then two CPUs racing to further drop it can both
+ * conclude it's fine.
+ */
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) < 1, inode);
+
+ if (atomic_add_unless(&inode->i_count, -1, 1))
+ return;
+
+ if ((inode_state_read_once(inode) & I_DIRTY_TIME) && inode->i_nlink) {
+ trace_writeback_lazytime_iput(inode);
+ mark_inode_dirty_sync(inode);
+ goto retry;
+ }
+
+ spin_lock(&inode->i_lock);
+ if (unlikely((inode_state_read(inode) & I_DIRTY_TIME) && inode->i_nlink)) {
+ spin_unlock(&inode->i_lock);
+ goto retry;
}
+
+ if (!atomic_dec_and_test(&inode->i_count)) {
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ /*
+ * iput_final() drops ->i_lock, we can't assert on it as the inode may
+ * be deallocated by the time the call returns.
+ */
+ iput_final(inode);
}
EXPORT_SYMBOL(iput);
+/**
+ * iput_not_last - put an inode assuming this is not the last reference
+ * @inode: inode to put
+ */
+void iput_not_last(struct inode *inode)
+{
+ VFS_BUG_ON_INODE(inode_state_read_once(inode) & (I_FREEING | I_CLEAR), inode);
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) < 2, inode);
+
+ WARN_ON(atomic_sub_return(1, &inode->i_count) == 0);
+}
+EXPORT_SYMBOL(iput_not_last);
+
#ifdef CONFIG_BLOCK
/**
* bmap - find a block number in a file
@@ -2238,7 +2281,7 @@ static int __remove_privs(struct mnt_idmap *idmap,
return notify_change(idmap, dentry, &newattrs, NULL);
}
-int file_remove_privs_flags(struct file *file, unsigned int flags)
+static int file_remove_privs_flags(struct file *file, unsigned int flags)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = file_inode(file);
@@ -2263,7 +2306,6 @@ int file_remove_privs_flags(struct file *file, unsigned int flags)
inode_has_no_xattr(inode);
return error;
}
-EXPORT_SYMBOL_GPL(file_remove_privs_flags);
/**
* file_remove_privs - remove special file privileges (suid, capabilities)
@@ -2318,42 +2360,40 @@ out:
}
EXPORT_SYMBOL(current_time);
-static int inode_needs_update_time(struct inode *inode)
+static int file_update_time_flags(struct file *file, unsigned int flags)
{
+ struct inode *inode = file_inode(file);
struct timespec64 now, ts;
- int sync_it = 0;
+ int sync_mode = 0;
+ int ret = 0;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
+ if (unlikely(file->f_mode & FMODE_NOCMTIME))
+ return 0;
now = current_time(inode);
ts = inode_get_mtime(inode);
if (!timespec64_equal(&ts, &now))
- sync_it |= S_MTIME;
-
+ sync_mode |= S_MTIME;
ts = inode_get_ctime(inode);
if (!timespec64_equal(&ts, &now))
- sync_it |= S_CTIME;
-
+ sync_mode |= S_CTIME;
if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
- sync_it |= S_VERSION;
-
- return sync_it;
-}
+ sync_mode |= S_VERSION;
-static int __file_update_time(struct file *file, int sync_mode)
-{
- int ret = 0;
- struct inode *inode = file_inode(file);
+ if (!sync_mode)
+ return 0;
- /* try to update time settings */
- if (!mnt_get_write_access_file(file)) {
- ret = inode_update_time(inode, sync_mode);
- mnt_put_write_access_file(file);
- }
+ if (flags & IOCB_NOWAIT)
+ return -EAGAIN;
+ if (mnt_get_write_access_file(file))
+ return 0;
+ ret = inode_update_time(inode, sync_mode);
+ mnt_put_write_access_file(file);
return ret;
}
@@ -2373,14 +2413,7 @@ static int __file_update_time(struct file *file, int sync_mode)
*/
int file_update_time(struct file *file)
{
- int ret;
- struct inode *inode = file_inode(file);
-
- ret = inode_needs_update_time(inode);
- if (ret <= 0)
- return ret;
-
- return __file_update_time(file, ret);
+ return file_update_time_flags(file, 0);
}
EXPORT_SYMBOL(file_update_time);
@@ -2402,7 +2435,6 @@ EXPORT_SYMBOL(file_update_time);
static int file_modified_flags(struct file *file, int flags)
{
int ret;
- struct inode *inode = file_inode(file);
/*
* Clear the security bits if the process is not being run by root.
@@ -2411,17 +2443,7 @@ static int file_modified_flags(struct file *file, int flags)
ret = file_remove_privs_flags(file, flags);
if (ret)
return ret;
-
- if (unlikely(file->f_mode & FMODE_NOCMTIME))
- return 0;
-
- ret = inode_needs_update_time(inode);
- if (ret <= 0)
- return ret;
- if (flags & IOCB_NOWAIT)
- return -EAGAIN;
-
- return __file_update_time(file, ret);
+ return file_update_time_flags(file, flags);
}
/**
@@ -2568,21 +2590,28 @@ void __init inode_init(void)
void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
{
inode->i_mode = mode;
- if (S_ISCHR(mode)) {
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFCHR:
inode->i_fop = &def_chr_fops;
inode->i_rdev = rdev;
- } else if (S_ISBLK(mode)) {
+ break;
+ case S_IFBLK:
if (IS_ENABLED(CONFIG_BLOCK))
inode->i_fop = &def_blk_fops;
inode->i_rdev = rdev;
- } else if (S_ISFIFO(mode))
+ break;
+ case S_IFIFO:
inode->i_fop = &pipefifo_fops;
- else if (S_ISSOCK(mode))
- ; /* leave it no_open_fops */
- else
+ break;
+ case S_IFSOCK:
+ /* leave it no_open_fops */
+ break;
+ default:
printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
" inode %s:%lu\n", mode, inode->i_sb->s_id,
inode->i_ino);
+ break;
+ }
}
EXPORT_SYMBOL(init_special_inode);
@@ -2663,7 +2692,7 @@ EXPORT_SYMBOL(inode_dio_finished);
* proceed with a truncate or equivalent operation.
*
* Must be called under a lock that serializes taking new references
- * to i_dio_count, usually by inode->i_mutex.
+ * to i_dio_count, usually by inode->i_rwsem.
*/
void inode_dio_wait(struct inode *inode)
{
@@ -2681,7 +2710,7 @@ EXPORT_SYMBOL(inode_dio_wait_interruptible);
/*
* inode_set_flags - atomically set some inode flags
*
- * Note: the caller should be holding i_mutex, or else be sure that
+ * Note: the caller should be holding i_rwsem exclusively, or else be sure that
* they have exclusive access to the inode structure (i.e., while the
* inode is being instantiated). The reason for the cmpxchg() loop
* --- which wouldn't be necessary if all code paths which modify
@@ -2689,7 +2718,7 @@ EXPORT_SYMBOL(inode_dio_wait_interruptible);
* code path which doesn't today so we use cmpxchg() out of an abundance
* of caution.
*
- * In the long run, i_mutex is overkill, and we should probably look
+ * In the long run, i_rwsem is overkill, and we should probably look
* at using the i_lock spinlock to protect i_flags, and then make sure
* it is so documented in include/linux/fs.h and that all code follows
* the locking convention!!
@@ -2953,3 +2982,26 @@ umode_t mode_strip_sgid(struct mnt_idmap *idmap,
return mode & ~S_ISGID;
}
EXPORT_SYMBOL(mode_strip_sgid);
+
+#ifdef CONFIG_DEBUG_VFS
+/*
+ * Dump an inode.
+ *
+ * TODO: add a proper inode dumping routine, this is a stub to get debug off the
+ * ground.
+ *
+ * TODO: handle getting to fs type with get_kernel_nofault()?
+ * See dump_mapping() above.
+ */
+void dump_inode(struct inode *inode, const char *reason)
+{
+ struct super_block *sb = inode->i_sb;
+
+ pr_warn("%s encountered for inode %px\n"
+ "fs %s mode %ho opflags 0x%hx flags 0x%x state 0x%x count %d\n",
+ reason, inode, sb->s_type->name, inode->i_mode, inode->i_opflags,
+ inode->i_flags, inode_state_read_once(inode), atomic_read(&inode->i_count));
+}
+
+EXPORT_SYMBOL(dump_inode);
+#endif
diff --git a/fs/internal.h b/fs/internal.h
index e7f02ae1e098..ab638d41ab81 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -53,7 +53,7 @@ extern int finish_clean_context(struct fs_context *fc);
* namei.c
*/
extern int filename_lookup(int dfd, struct filename *name, unsigned flags,
- struct path *path, struct path *root);
+ struct path *path, const struct path *root);
int do_rmdir(int dfd, struct filename *name);
int do_unlinkat(int dfd, struct filename *name);
int may_linkat(struct mnt_idmap *idmap, const struct path *link);
@@ -66,6 +66,10 @@ int do_linkat(int olddfd, struct filename *old, int newdfd,
int vfs_tmpfile(struct mnt_idmap *idmap,
const struct path *parentpath,
struct file *file, umode_t mode);
+struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
+struct dentry *start_dirop(struct dentry *parent, struct qstr *name,
+ unsigned int lookup_flags);
+int lookup_noperm_common(struct qstr *qname, struct dentry *base);
/*
* namespace.c
@@ -83,9 +87,9 @@ void mnt_put_write_access_file(struct file *file);
extern void dissolve_on_fput(struct vfsmount *);
extern bool may_mount(void);
-int path_mount(const char *dev_name, struct path *path,
+int path_mount(const char *dev_name, const struct path *path,
const char *type_page, unsigned long flags, void *data_page);
-int path_umount(struct path *path, int flags);
+int path_umount(const struct path *path, int flags);
int show_path(struct seq_file *m, struct dentry *root);
@@ -100,6 +104,7 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
struct file *alloc_empty_file(int flags, const struct cred *cred);
struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred);
struct file *alloc_empty_backing_file(int flags, const struct cred *cred);
+void backing_file_set_user_path(struct file *f, const struct path *path);
static inline void file_put_write_access(struct file *file)
{
@@ -118,6 +123,9 @@ static inline void put_file_access(struct file *file)
}
}
+void fput_close_sync(struct file *);
+void fput_close(struct file *);
+
/*
* super.c
*/
@@ -187,8 +195,8 @@ extern struct open_how build_open_how(int flags, umode_t mode);
extern int build_open_flags(const struct open_how *how, struct open_flags *op);
struct file *file_close_fd_locked(struct files_struct *files, unsigned fd);
-long do_ftruncate(struct file *file, loff_t length, int small);
-long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
+int do_ftruncate(struct file *file, loff_t length, int small);
+int do_sys_ftruncate(unsigned int fd, loff_t length, int small);
int chmod_common(const struct path *path, umode_t mode);
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
int flag);
@@ -207,7 +215,6 @@ bool in_group_or_capable(struct mnt_idmap *idmap,
* fs-writeback.c
*/
extern long get_nr_dirty_inodes(void);
-void invalidate_inodes(struct super_block *sb);
/*
* dcache.c
@@ -223,7 +230,6 @@ extern void shrink_dcache_for_umount(struct super_block *);
extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
const struct qstr *name, unsigned *seq);
-extern void d_genocide(struct dentry *);
/*
* pipe.c
@@ -319,12 +325,16 @@ struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns);
struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
void mnt_idmap_put(struct mnt_idmap *idmap);
struct stashed_operations {
+ struct dentry *(*stash_dentry)(struct dentry **stashed,
+ struct dentry *dentry);
void (*put_data)(void *data);
int (*init_inode)(struct inode *inode, void *data);
};
int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
struct path *path);
void stashed_dentry_prune(struct dentry *dentry);
+struct dentry *stash_dentry(struct dentry **stashed, struct dentry *dentry);
+struct dentry *stashed_dentry_get(struct dentry **stashed);
/**
* path_mounted - check whether path is mounted
* @path: path to check
@@ -338,3 +348,13 @@ static inline bool path_mounted(const struct path *path)
return path->mnt->mnt_root == path->dentry;
}
void file_f_owner_release(struct file *file);
+bool file_seek_cur_needs_f_lock(struct file *file);
+int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map);
+struct dentry *find_next_child(struct dentry *parent, struct dentry *prev);
+int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags);
+int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr);
+void pidfs_get_root(struct path *path);
+void nsfs_get_root(struct path *path);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 638a36be31c1..1c152c2b1b67 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -41,7 +41,7 @@
*
* Returns 0 on success, -errno on error.
*/
-long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static int vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int error = -ENOTTY;
@@ -54,7 +54,6 @@ long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
out:
return error;
}
-EXPORT_SYMBOL(vfs_ioctl);
static int ioctl_fibmap(struct file *filp, int __user *p)
{
@@ -228,8 +227,8 @@ static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
return error;
}
-static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
- u64 off, u64 olen, u64 destoff)
+static int ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
+ u64 off, u64 olen, u64 destoff)
{
CLASS(fd, src_file)(srcfd);
loff_t cloned;
@@ -248,8 +247,8 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
return ret;
}
-static long ioctl_file_clone_range(struct file *file,
- struct file_clone_range __user *argp)
+static int ioctl_file_clone_range(struct file *file,
+ struct file_clone_range __user *argp)
{
struct file_clone_range args;
@@ -396,8 +395,8 @@ static int ioctl_fsfreeze(struct file *filp)
/* Freeze */
if (sb->s_op->freeze_super)
- return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE);
- return freeze_super(sb, FREEZE_HOLDER_USERSPACE);
+ return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
+ return freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
}
static int ioctl_fsthaw(struct file *filp)
@@ -409,8 +408,8 @@ static int ioctl_fsthaw(struct file *filp)
/* Thaw */
if (sb->s_op->thaw_super)
- return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE);
- return thaw_super(sb, FREEZE_HOLDER_USERSPACE);
+ return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
+ return thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
}
static int ioctl_file_dedupe_range(struct file *file,
@@ -426,7 +425,7 @@ static int ioctl_file_dedupe_range(struct file *file,
goto out;
}
- size = offsetof(struct file_dedupe_range, info[count]);
+ size = struct_size(same, info, count);
if (size > PAGE_SIZE) {
ret = -ENOMEM;
goto out;
@@ -453,315 +452,6 @@ out:
return ret;
}
-/**
- * fileattr_fill_xflags - initialize fileattr with xflags
- * @fa: fileattr pointer
- * @xflags: FS_XFLAG_* flags
- *
- * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All
- * other fields are zeroed.
- */
-void fileattr_fill_xflags(struct fileattr *fa, u32 xflags)
-{
- memset(fa, 0, sizeof(*fa));
- fa->fsx_valid = true;
- fa->fsx_xflags = xflags;
- if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE)
- fa->flags |= FS_IMMUTABLE_FL;
- if (fa->fsx_xflags & FS_XFLAG_APPEND)
- fa->flags |= FS_APPEND_FL;
- if (fa->fsx_xflags & FS_XFLAG_SYNC)
- fa->flags |= FS_SYNC_FL;
- if (fa->fsx_xflags & FS_XFLAG_NOATIME)
- fa->flags |= FS_NOATIME_FL;
- if (fa->fsx_xflags & FS_XFLAG_NODUMP)
- fa->flags |= FS_NODUMP_FL;
- if (fa->fsx_xflags & FS_XFLAG_DAX)
- fa->flags |= FS_DAX_FL;
- if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
- fa->flags |= FS_PROJINHERIT_FL;
-}
-EXPORT_SYMBOL(fileattr_fill_xflags);
-
-/**
- * fileattr_fill_flags - initialize fileattr with flags
- * @fa: fileattr pointer
- * @flags: FS_*_FL flags
- *
- * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags).
- * All other fields are zeroed.
- */
-void fileattr_fill_flags(struct fileattr *fa, u32 flags)
-{
- memset(fa, 0, sizeof(*fa));
- fa->flags_valid = true;
- fa->flags = flags;
- if (fa->flags & FS_SYNC_FL)
- fa->fsx_xflags |= FS_XFLAG_SYNC;
- if (fa->flags & FS_IMMUTABLE_FL)
- fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
- if (fa->flags & FS_APPEND_FL)
- fa->fsx_xflags |= FS_XFLAG_APPEND;
- if (fa->flags & FS_NODUMP_FL)
- fa->fsx_xflags |= FS_XFLAG_NODUMP;
- if (fa->flags & FS_NOATIME_FL)
- fa->fsx_xflags |= FS_XFLAG_NOATIME;
- if (fa->flags & FS_DAX_FL)
- fa->fsx_xflags |= FS_XFLAG_DAX;
- if (fa->flags & FS_PROJINHERIT_FL)
- fa->fsx_xflags |= FS_XFLAG_PROJINHERIT;
-}
-EXPORT_SYMBOL(fileattr_fill_flags);
-
-/**
- * vfs_fileattr_get - retrieve miscellaneous file attributes
- * @dentry: the object to retrieve from
- * @fa: fileattr pointer
- *
- * Call i_op->fileattr_get() callback, if exists.
- *
- * Return: 0 on success, or a negative error on failure.
- */
-int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
-{
- struct inode *inode = d_inode(dentry);
-
- if (!inode->i_op->fileattr_get)
- return -ENOIOCTLCMD;
-
- return inode->i_op->fileattr_get(dentry, fa);
-}
-EXPORT_SYMBOL(vfs_fileattr_get);
-
-/**
- * copy_fsxattr_to_user - copy fsxattr to userspace.
- * @fa: fileattr pointer
- * @ufa: fsxattr user pointer
- *
- * Return: 0 on success, or -EFAULT on failure.
- */
-int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa)
-{
- struct fsxattr xfa;
-
- memset(&xfa, 0, sizeof(xfa));
- xfa.fsx_xflags = fa->fsx_xflags;
- xfa.fsx_extsize = fa->fsx_extsize;
- xfa.fsx_nextents = fa->fsx_nextents;
- xfa.fsx_projid = fa->fsx_projid;
- xfa.fsx_cowextsize = fa->fsx_cowextsize;
-
- if (copy_to_user(ufa, &xfa, sizeof(xfa)))
- return -EFAULT;
-
- return 0;
-}
-EXPORT_SYMBOL(copy_fsxattr_to_user);
-
-static int copy_fsxattr_from_user(struct fileattr *fa,
- struct fsxattr __user *ufa)
-{
- struct fsxattr xfa;
-
- if (copy_from_user(&xfa, ufa, sizeof(xfa)))
- return -EFAULT;
-
- fileattr_fill_xflags(fa, xfa.fsx_xflags);
- fa->fsx_extsize = xfa.fsx_extsize;
- fa->fsx_nextents = xfa.fsx_nextents;
- fa->fsx_projid = xfa.fsx_projid;
- fa->fsx_cowextsize = xfa.fsx_cowextsize;
-
- return 0;
-}
-
-/*
- * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject
- * any invalid configurations.
- *
- * Note: must be called with inode lock held.
- */
-static int fileattr_set_prepare(struct inode *inode,
- const struct fileattr *old_ma,
- struct fileattr *fa)
-{
- int err;
-
- /*
- * The IMMUTABLE and APPEND_ONLY flags can only be changed by
- * the relevant capability.
- */
- if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
- !capable(CAP_LINUX_IMMUTABLE))
- return -EPERM;
-
- err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags);
- if (err)
- return err;
-
- /*
- * Project Quota ID state is only allowed to change from within the init
- * namespace. Enforce that restriction only if we are trying to change
- * the quota ID state. Everything else is allowed in user namespaces.
- */
- if (current_user_ns() != &init_user_ns) {
- if (old_ma->fsx_projid != fa->fsx_projid)
- return -EINVAL;
- if ((old_ma->fsx_xflags ^ fa->fsx_xflags) &
- FS_XFLAG_PROJINHERIT)
- return -EINVAL;
- } else {
- /*
- * Caller is allowed to change the project ID. If it is being
- * changed, make sure that the new value is valid.
- */
- if (old_ma->fsx_projid != fa->fsx_projid &&
- !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid)))
- return -EINVAL;
- }
-
- /* Check extent size hints. */
- if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
- return -EINVAL;
-
- if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
- !S_ISDIR(inode->i_mode))
- return -EINVAL;
-
- if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
- !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return -EINVAL;
-
- /*
- * It is only valid to set the DAX flag on regular files and
- * directories on filesystems.
- */
- if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
- !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
- return -EINVAL;
-
- /* Extent size hints of zero turn off the flags. */
- if (fa->fsx_extsize == 0)
- fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
- if (fa->fsx_cowextsize == 0)
- fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
-
- return 0;
-}
-
-/**
- * vfs_fileattr_set - change miscellaneous file attributes
- * @idmap: idmap of the mount
- * @dentry: the object to change
- * @fa: fileattr pointer
- *
- * After verifying permissions, call i_op->fileattr_set() callback, if
- * exists.
- *
- * Verifying attributes involves retrieving current attributes with
- * i_op->fileattr_get(), this also allows initializing attributes that have
- * not been set by the caller to current values. Inode lock is held
- * thoughout to prevent racing with another instance.
- *
- * Return: 0 on success, or a negative error on failure.
- */
-int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
- struct fileattr *fa)
-{
- struct inode *inode = d_inode(dentry);
- struct fileattr old_ma = {};
- int err;
-
- if (!inode->i_op->fileattr_set)
- return -ENOIOCTLCMD;
-
- if (!inode_owner_or_capable(idmap, inode))
- return -EPERM;
-
- inode_lock(inode);
- err = vfs_fileattr_get(dentry, &old_ma);
- if (!err) {
- /* initialize missing bits from old_ma */
- if (fa->flags_valid) {
- fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON;
- fa->fsx_extsize = old_ma.fsx_extsize;
- fa->fsx_nextents = old_ma.fsx_nextents;
- fa->fsx_projid = old_ma.fsx_projid;
- fa->fsx_cowextsize = old_ma.fsx_cowextsize;
- } else {
- fa->flags |= old_ma.flags & ~FS_COMMON_FL;
- }
- err = fileattr_set_prepare(inode, &old_ma, fa);
- if (!err)
- err = inode->i_op->fileattr_set(idmap, dentry, fa);
- }
- inode_unlock(inode);
-
- return err;
-}
-EXPORT_SYMBOL(vfs_fileattr_set);
-
-static int ioctl_getflags(struct file *file, unsigned int __user *argp)
-{
- struct fileattr fa = { .flags_valid = true }; /* hint only */
- int err;
-
- err = vfs_fileattr_get(file->f_path.dentry, &fa);
- if (!err)
- err = put_user(fa.flags, argp);
- return err;
-}
-
-static int ioctl_setflags(struct file *file, unsigned int __user *argp)
-{
- struct mnt_idmap *idmap = file_mnt_idmap(file);
- struct dentry *dentry = file->f_path.dentry;
- struct fileattr fa;
- unsigned int flags;
- int err;
-
- err = get_user(flags, argp);
- if (!err) {
- err = mnt_want_write_file(file);
- if (!err) {
- fileattr_fill_flags(&fa, flags);
- err = vfs_fileattr_set(idmap, dentry, &fa);
- mnt_drop_write_file(file);
- }
- }
- return err;
-}
-
-static int ioctl_fsgetxattr(struct file *file, void __user *argp)
-{
- struct fileattr fa = { .fsx_valid = true }; /* hint only */
- int err;
-
- err = vfs_fileattr_get(file->f_path.dentry, &fa);
- if (!err)
- err = copy_fsxattr_to_user(&fa, argp);
-
- return err;
-}
-
-static int ioctl_fssetxattr(struct file *file, void __user *argp)
-{
- struct mnt_idmap *idmap = file_mnt_idmap(file);
- struct dentry *dentry = file->f_path.dentry;
- struct fileattr fa;
- int err;
-
- err = copy_fsxattr_from_user(&fa, argp);
- if (!err) {
- err = mnt_want_write_file(file);
- if (!err) {
- err = vfs_fileattr_set(idmap, dentry, &fa);
- mnt_drop_write_file(file);
- }
- }
- return err;
-}
-
static int ioctl_getfsuuid(struct file *file, void __user *argp)
{
struct super_block *sb = file_inode(file)->i_sb;
@@ -821,7 +511,8 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd,
return ioctl_fioasync(fd, filp, argp);
case FIOQSIZE:
- if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
+ if (S_ISDIR(inode->i_mode) ||
+ (S_ISREG(inode->i_mode) && !IS_ANON_FILE(inode)) ||
S_ISLNK(inode->i_mode)) {
loff_t res = inode_get_bytes(inode);
return copy_to_user(argp, &res, sizeof(res)) ?
@@ -856,7 +547,7 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd,
return ioctl_file_dedupe_range(filp, argp);
case FIONREAD:
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode) || IS_ANON_FILE(inode))
return vfs_ioctl(filp, cmd, arg);
return put_user(i_size_read(inode) - filp->f_pos,
@@ -881,7 +572,7 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd,
return ioctl_get_fs_sysfs_path(filp, argp);
default:
- if (S_ISREG(inode->i_mode))
+ if (S_ISREG(inode->i_mode) && !IS_ANON_FILE(inode))
return file_ioctl(filp, cmd, argp);
break;
}
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 381d76c5c232..a572b8808524 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -9,9 +9,11 @@ ccflags-y += -I $(src) # needed for trace events
obj-$(CONFIG_FS_IOMAP) += iomap.o
iomap-y += trace.o \
- iter.o
-iomap-$(CONFIG_BLOCK) += buffered-io.o \
- direct-io.o \
+ iter.o \
+ buffered-io.o
+iomap-$(CONFIG_BLOCK) += direct-io.o \
+ ioend.o \
fiemap.o \
- seek.o
+ seek.o \
+ bio.o
iomap-$(CONFIG_SWAP) += swapfile.o
diff --git a/fs/iomap/bio.c b/fs/iomap/bio.c
new file mode 100644
index 000000000000..fc045f2e4c45
--- /dev/null
+++ b/fs/iomap/bio.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (C) 2016-2023 Christoph Hellwig.
+ */
+#include <linux/iomap.h>
+#include <linux/pagemap.h>
+#include "internal.h"
+#include "trace.h"
+
+static void iomap_read_end_io(struct bio *bio)
+{
+ int error = blk_status_to_errno(bio->bi_status);
+ struct folio_iter fi;
+
+ bio_for_each_folio_all(fi, bio)
+ iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
+ bio_put(bio);
+}
+
+static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
+{
+ struct bio *bio = ctx->read_ctx;
+
+ if (bio)
+ submit_bio(bio);
+}
+
+static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t plen)
+{
+ struct folio *folio = ctx->cur_folio;
+ const struct iomap *iomap = &iter->iomap;
+ loff_t pos = iter->pos;
+ size_t poff = offset_in_folio(folio, pos);
+ loff_t length = iomap_length(iter);
+ sector_t sector;
+ struct bio *bio = ctx->read_ctx;
+
+ sector = iomap_sector(iomap, pos);
+ if (!bio || bio_end_sector(bio) != sector ||
+ !bio_add_folio(bio, folio, plen, poff)) {
+ gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
+ gfp_t orig_gfp = gfp;
+ unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
+
+ if (bio)
+ submit_bio(bio);
+
+ if (ctx->rac) /* same as readahead_gfp_mask */
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
+ gfp);
+ /*
+ * If the bio_alloc fails, try it again for a single page to
+ * avoid having to deal with partial page reads. This emulates
+ * what do_mpage_read_folio does.
+ */
+ if (!bio)
+ bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
+ if (ctx->rac)
+ bio->bi_opf |= REQ_RAHEAD;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_end_io = iomap_read_end_io;
+ bio_add_folio_nofail(bio, folio, plen, poff);
+ ctx->read_ctx = bio;
+ }
+ return 0;
+}
+
+const struct iomap_read_ops iomap_bio_read_ops = {
+ .read_folio_range = iomap_bio_read_folio_range,
+ .submit_read = iomap_bio_submit_read,
+};
+EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
+
+int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len)
+{
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ struct bio_vec bvec;
+ struct bio bio;
+
+ bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
+ bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
+ bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
+ return submit_bio_wait(&bio);
+}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 955f19e27e47..e5c1ca440d93 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -3,26 +3,16 @@
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (C) 2016-2023 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
-#include <linux/pagemap.h>
-#include <linux/uio.h>
#include <linux/buffer_head.h>
-#include <linux/dax.h>
#include <linux/writeback.h>
-#include <linux/list_sort.h>
#include <linux/swap.h>
-#include <linux/bio.h>
-#include <linux/sched/signal.h>
#include <linux/migrate.h>
+#include "internal.h"
#include "trace.h"
#include "../internal.h"
-#define IOEND_BATCH_SIZE 4096
-
/*
* Structure allocated for each folio to track per-block uptodate, dirty state
* and I/O completions.
@@ -40,8 +30,6 @@ struct iomap_folio_state {
unsigned long state[];
};
-static struct bio_set iomap_ioend_bioset;
-
static inline bool ifs_is_fully_uptodate(struct folio *folio,
struct iomap_folio_state *ifs)
{
@@ -50,10 +38,28 @@ static inline bool ifs_is_fully_uptodate(struct folio *folio,
return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
}
-static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
- unsigned int block)
+/*
+ * Find the next uptodate block in the folio. end_blk is inclusive.
+ * If no uptodate block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_uptodate_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ return find_next_bit(ifs->state, end_blk + 1, start_blk);
+}
+
+/*
+ * Find the next non-uptodate block in the folio. end_blk is inclusive.
+ * If no non-uptodate block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_nonuptodate_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
{
- return test_bit(block, ifs->state);
+ struct iomap_folio_state *ifs = folio->private;
+
+ return find_next_zero_bit(ifs->state, end_blk + 1, start_blk);
}
static bool ifs_set_range_uptodate(struct folio *folio,
@@ -75,6 +81,9 @@ static void iomap_set_range_uptodate(struct folio *folio, size_t off,
unsigned long flags;
bool uptodate = true;
+ if (folio_test_uptodate(folio))
+ return;
+
if (ifs) {
spin_lock_irqsave(&ifs->state_lock, flags);
uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
@@ -85,13 +94,34 @@ static void iomap_set_range_uptodate(struct folio *folio, size_t off,
folio_mark_uptodate(folio);
}
-static inline bool ifs_block_is_dirty(struct folio *folio,
- struct iomap_folio_state *ifs, int block)
+/*
+ * Find the next dirty block in the folio. end_blk is inclusive.
+ * If no dirty block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_dirty_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
{
+ struct iomap_folio_state *ifs = folio->private;
struct inode *inode = folio->mapping->host;
- unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
+ unsigned int blks = i_blocks_per_folio(inode, folio);
- return test_bit(block + blks_per_folio, ifs->state);
+ return find_next_bit(ifs->state, blks + end_blk + 1,
+ blks + start_blk) - blks;
+}
+
+/*
+ * Find the next clean block in the folio. end_blk is inclusive.
+ * If no clean block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_clean_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
+{
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks = i_blocks_per_folio(inode, folio);
+
+ return find_next_zero_bit(ifs->state, blks + end_blk + 1,
+ blks + start_blk) - blks;
}
static unsigned ifs_find_dirty_range(struct folio *folio,
@@ -102,18 +132,17 @@ static unsigned ifs_find_dirty_range(struct folio *folio,
offset_in_folio(folio, *range_start) >> inode->i_blkbits;
unsigned end_blk = min_not_zero(
offset_in_folio(folio, range_end) >> inode->i_blkbits,
- i_blocks_per_folio(inode, folio));
- unsigned nblks = 1;
-
- while (!ifs_block_is_dirty(folio, ifs, start_blk))
- if (++start_blk == end_blk)
- return 0;
+ i_blocks_per_folio(inode, folio)) - 1;
+ unsigned nblks;
- while (start_blk + nblks < end_blk) {
- if (!ifs_block_is_dirty(folio, ifs, start_blk + nblks))
- break;
- nblks++;
- }
+ start_blk = ifs_next_dirty_block(folio, start_blk, end_blk);
+ if (start_blk > end_blk)
+ return 0;
+ if (start_blk == end_blk)
+ nblks = 1;
+ else
+ nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) -
+ start_blk;
*range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
return nblks << inode->i_blkbits;
@@ -228,6 +257,22 @@ static void ifs_free(struct folio *folio)
}
/*
+ * Calculate how many bytes to truncate based off the number of blocks to
+ * truncate and the end position to start truncating from.
+ */
+static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits,
+ unsigned blocks_truncated)
+{
+ unsigned block_size = 1 << block_bits;
+ unsigned block_offset = end_pos & (block_size - 1);
+
+ if (!block_offset)
+ return blocks_truncated << block_bits;
+
+ return ((blocks_truncated - 1) << block_bits) + block_offset;
+}
+
+/*
* Calculate the range inside the folio that we actually need to read.
*/
static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
@@ -250,24 +295,29 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
* to avoid reading in already uptodate ranges.
*/
if (ifs) {
- unsigned int i;
-
- /* move forward for each leading block marked uptodate */
- for (i = first; i <= last; i++) {
- if (!ifs_block_is_uptodate(ifs, i))
- break;
- *pos += block_size;
- poff += block_size;
- plen -= block_size;
- first++;
+ unsigned int next, blocks_skipped;
+
+ next = ifs_next_nonuptodate_block(folio, first, last);
+ blocks_skipped = next - first;
+
+ if (blocks_skipped) {
+ unsigned long block_offset = *pos & (block_size - 1);
+ unsigned bytes_skipped =
+ (blocks_skipped << block_bits) - block_offset;
+
+ *pos += bytes_skipped;
+ poff += bytes_skipped;
+ plen -= bytes_skipped;
}
+ first = next;
/* truncate len if we find any trailing uptodate block(s) */
- for ( ; i <= last; i++) {
- if (ifs_block_is_uptodate(ifs, i)) {
- plen -= (last - i + 1) * block_size;
- last = i - 1;
- break;
+ if (++next <= last) {
+ next = ifs_next_uptodate_block(folio, next, last);
+ if (next <= last) {
+ plen -= iomap_bytes_to_truncate(*pos + plen,
+ block_bits, last - next + 1);
+ last = next - 1;
}
}
}
@@ -281,52 +331,24 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end)
- plen -= (last - end) * block_size;
+ plen -= iomap_bytes_to_truncate(*pos + plen, block_bits,
+ last - end);
}
*offp = poff;
*lenp = plen;
}
-static void iomap_finish_folio_read(struct folio *folio, size_t off,
- size_t len, int error)
-{
- struct iomap_folio_state *ifs = folio->private;
- bool uptodate = !error;
- bool finished = true;
-
- if (ifs) {
- unsigned long flags;
-
- spin_lock_irqsave(&ifs->state_lock, flags);
- if (!error)
- uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
- ifs->read_bytes_pending -= len;
- finished = !ifs->read_bytes_pending;
- spin_unlock_irqrestore(&ifs->state_lock, flags);
- }
-
- if (finished)
- folio_end_read(folio, uptodate);
-}
-
-static void iomap_read_end_io(struct bio *bio)
+static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
+ loff_t pos)
{
- int error = blk_status_to_errno(bio->bi_status);
- struct folio_iter fi;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
- bio_for_each_folio_all(fi, bio)
- iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
- bio_put(bio);
+ return srcmap->type != IOMAP_MAPPED ||
+ (srcmap->flags & IOMAP_F_NEW) ||
+ pos >= i_size_read(iter->inode);
}
-struct iomap_readpage_ctx {
- struct folio *cur_folio;
- bool cur_folio_in_bio;
- struct bio *bio;
- struct readahead_control *rac;
-};
-
/**
* iomap_read_inline_data - copy inline data into the page cache
* @iter: iteration structure
@@ -343,6 +365,9 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
size_t size = i_size_read(iter->inode) - iomap->offset;
size_t offset = offset_in_folio(folio, iomap->offset);
+ if (WARN_ON_ONCE(!iomap->inline_data))
+ return -EIO;
+
if (folio_test_uptodate(folio))
return 0;
@@ -356,172 +381,233 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
return 0;
}
-static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
- loff_t pos)
+void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
+ int error)
{
- const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ struct iomap_folio_state *ifs = folio->private;
+ bool uptodate = !error;
+ bool finished = true;
- return srcmap->type != IOMAP_MAPPED ||
- (srcmap->flags & IOMAP_F_NEW) ||
- pos >= i_size_read(iter->inode);
+ if (ifs) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ if (!error)
+ uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
+ ifs->read_bytes_pending -= len;
+ finished = !ifs->read_bytes_pending;
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+ }
+
+ if (finished)
+ folio_end_read(folio, uptodate);
}
+EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
-static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
- struct iomap_readpage_ctx *ctx, loff_t offset)
+static void iomap_read_init(struct folio *folio)
{
- const struct iomap *iomap = &iter->iomap;
- loff_t pos = iter->pos + offset;
- loff_t length = iomap_length(iter) - offset;
- struct folio *folio = ctx->cur_folio;
- struct iomap_folio_state *ifs;
- loff_t orig_pos = pos;
- size_t poff, plen;
- sector_t sector;
-
- if (iomap->type == IOMAP_INLINE)
- return iomap_read_inline_data(iter, folio);
+ struct iomap_folio_state *ifs = folio->private;
- /* zero post-eof blocks as the page may be mapped */
- ifs = ifs_alloc(iter->inode, folio, iter->flags);
- iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
- if (plen == 0)
- goto done;
+ if (ifs) {
+ size_t len = folio_size(folio);
- if (iomap_block_needs_zeroing(iter, pos)) {
- folio_zero_range(folio, poff, plen);
- iomap_set_range_uptodate(folio, poff, plen);
- goto done;
+ /*
+ * ifs->read_bytes_pending is used to track how many bytes are
+ * read in asynchronously by the IO helper. We need to track
+ * this so that we can know when the IO helper has finished
+ * reading in all the necessary ranges of the folio and can end
+ * the read.
+ *
+ * Increase ->read_bytes_pending by the folio size to start, and
+ * add a +1 bias. We'll subtract the bias and any uptodate /
+ * zeroed ranges that did not require IO in iomap_read_end()
+ * after we're done processing the folio.
+ *
+ * We do this because otherwise, we would have to increment
+ * ifs->read_bytes_pending every time a range in the folio needs
+ * to be read in, which can get expensive since the spinlock
+ * needs to be held whenever modifying ifs->read_bytes_pending.
+ *
+ * We add the bias to ensure the read has not been ended on the
+ * folio when iomap_read_end() is called, even if the IO helper
+ * has already finished reading in the entire folio.
+ */
+ spin_lock_irq(&ifs->state_lock);
+ WARN_ON_ONCE(ifs->read_bytes_pending != 0);
+ ifs->read_bytes_pending = len + 1;
+ spin_unlock_irq(&ifs->state_lock);
}
+}
+
+/*
+ * This ends IO if no bytes were submitted to an IO helper.
+ *
+ * Otherwise, this calibrates ifs->read_bytes_pending to represent only the
+ * submitted bytes (see comment in iomap_read_init()). If all bytes submitted
+ * have already been completed by the IO helper, then this will end the read.
+ * Else the IO helper will end the read after all submitted ranges have been
+ * read.
+ */
+static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
+{
+ struct iomap_folio_state *ifs = folio->private;
- ctx->cur_folio_in_bio = true;
if (ifs) {
+ bool end_read, uptodate;
+
spin_lock_irq(&ifs->state_lock);
- ifs->read_bytes_pending += plen;
- spin_unlock_irq(&ifs->state_lock);
- }
+ if (!ifs->read_bytes_pending) {
+ WARN_ON_ONCE(bytes_submitted);
+ spin_unlock_irq(&ifs->state_lock);
+ folio_unlock(folio);
+ return;
+ }
- sector = iomap_sector(iomap, pos);
- if (!ctx->bio ||
- bio_end_sector(ctx->bio) != sector ||
- !bio_add_folio(ctx->bio, folio, plen, poff)) {
- gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
- gfp_t orig_gfp = gfp;
- unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
-
- if (ctx->bio)
- submit_bio(ctx->bio);
-
- if (ctx->rac) /* same as readahead_gfp_mask */
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
- REQ_OP_READ, gfp);
/*
- * If the bio_alloc fails, try it again for a single page to
- * avoid having to deal with partial page reads. This emulates
- * what do_mpage_read_folio does.
+ * Subtract any bytes that were initially accounted to
+ * read_bytes_pending but skipped for IO. The +1 accounts for
+ * the bias we added in iomap_read_init().
*/
- if (!ctx->bio) {
- ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
- orig_gfp);
- }
- if (ctx->rac)
- ctx->bio->bi_opf |= REQ_RAHEAD;
- ctx->bio->bi_iter.bi_sector = sector;
- ctx->bio->bi_end_io = iomap_read_end_io;
- bio_add_folio_nofail(ctx->bio, folio, plen, poff);
- }
+ ifs->read_bytes_pending -=
+ (folio_size(folio) + 1 - bytes_submitted);
-done:
- /*
- * Move the caller beyond our range so that it keeps making progress.
- * For that, we have to include any leading non-uptodate ranges, but
- * we can skip trailing ones as they will be handled in the next
- * iteration.
- */
- return pos - orig_pos + plen;
+ /*
+ * If !ifs->read_bytes_pending, this means all pending reads by
+ * the IO helper have already completed, which means we need to
+ * end the folio read here. If ifs->read_bytes_pending != 0,
+ * the IO helper will end the folio read.
+ */
+ end_read = !ifs->read_bytes_pending;
+ if (end_read)
+ uptodate = ifs_is_fully_uptodate(folio, ifs);
+ spin_unlock_irq(&ifs->state_lock);
+ if (end_read)
+ folio_end_read(folio, uptodate);
+ } else if (!bytes_submitted) {
+ /*
+ * If there were no bytes submitted, this means we are
+ * responsible for unlocking the folio here, since no IO helper
+ * has taken ownership of it. If there were bytes submitted,
+ * then the IO helper will end the read via
+ * iomap_finish_folio_read().
+ */
+ folio_unlock(folio);
+ }
}
-static loff_t iomap_read_folio_iter(const struct iomap_iter *iter,
- struct iomap_readpage_ctx *ctx)
+static int iomap_read_folio_iter(struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t *bytes_submitted)
{
+ const struct iomap *iomap = &iter->iomap;
+ loff_t pos = iter->pos;
+ loff_t length = iomap_length(iter);
struct folio *folio = ctx->cur_folio;
- size_t offset = offset_in_folio(folio, iter->pos);
- loff_t length = min_t(loff_t, folio_size(folio) - offset,
- iomap_length(iter));
- loff_t done, ret;
-
- for (done = 0; done < length; done += ret) {
- ret = iomap_readpage_iter(iter, ctx, done);
- if (ret <= 0)
+ size_t poff, plen;
+ loff_t pos_diff;
+ int ret;
+
+ if (iomap->type == IOMAP_INLINE) {
+ ret = iomap_read_inline_data(iter, folio);
+ if (ret)
return ret;
+ return iomap_iter_advance(iter, length);
}
- return done;
+ ifs_alloc(iter->inode, folio, iter->flags);
+
+ length = min_t(loff_t, length,
+ folio_size(folio) - offset_in_folio(folio, pos));
+ while (length) {
+ iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff,
+ &plen);
+
+ pos_diff = pos - iter->pos;
+ if (WARN_ON_ONCE(pos_diff + plen > length))
+ return -EIO;
+
+ ret = iomap_iter_advance(iter, pos_diff);
+ if (ret)
+ return ret;
+
+ if (plen == 0)
+ return 0;
+
+ /* zero post-eof blocks as the page may be mapped */
+ if (iomap_block_needs_zeroing(iter, pos)) {
+ folio_zero_range(folio, poff, plen);
+ iomap_set_range_uptodate(folio, poff, plen);
+ } else {
+ if (!*bytes_submitted)
+ iomap_read_init(folio);
+ ret = ctx->ops->read_folio_range(iter, ctx, plen);
+ if (ret)
+ return ret;
+ *bytes_submitted += plen;
+ }
+
+ ret = iomap_iter_advance(iter, plen);
+ if (ret)
+ return ret;
+ length -= pos_diff + plen;
+ pos = iter->pos;
+ }
+ return 0;
}
-int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
+void iomap_read_folio(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx)
{
+ struct folio *folio = ctx->cur_folio;
struct iomap_iter iter = {
.inode = folio->mapping->host,
.pos = folio_pos(folio),
.len = folio_size(folio),
};
- struct iomap_readpage_ctx ctx = {
- .cur_folio = folio,
- };
+ size_t bytes_submitted = 0;
int ret;
trace_iomap_readpage(iter.inode, 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_read_folio_iter(&iter, &ctx);
+ iter.status = iomap_read_folio_iter(&iter, ctx,
+ &bytes_submitted);
- if (ctx.bio) {
- submit_bio(ctx.bio);
- WARN_ON_ONCE(!ctx.cur_folio_in_bio);
- } else {
- WARN_ON_ONCE(ctx.cur_folio_in_bio);
- folio_unlock(folio);
- }
+ if (ctx->ops->submit_read)
+ ctx->ops->submit_read(ctx);
- /*
- * Just like mpage_readahead and block_read_full_folio, we always
- * return 0 and just set the folio error flag on errors. This
- * should be cleaned up throughout the stack eventually.
- */
- return 0;
+ iomap_read_end(folio, bytes_submitted);
}
EXPORT_SYMBOL_GPL(iomap_read_folio);
-static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
- struct iomap_readpage_ctx *ctx)
+static int iomap_readahead_iter(struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t *cur_bytes_submitted)
{
- loff_t length = iomap_length(iter);
- loff_t done, ret;
+ int ret;
- for (done = 0; done < length; done += ret) {
+ while (iomap_length(iter)) {
if (ctx->cur_folio &&
- offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
- if (!ctx->cur_folio_in_bio)
- folio_unlock(ctx->cur_folio);
+ offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
+ iomap_read_end(ctx->cur_folio, *cur_bytes_submitted);
ctx->cur_folio = NULL;
}
if (!ctx->cur_folio) {
ctx->cur_folio = readahead_folio(ctx->rac);
- ctx->cur_folio_in_bio = false;
+ if (WARN_ON_ONCE(!ctx->cur_folio))
+ return -EINVAL;
+ *cur_bytes_submitted = 0;
}
- ret = iomap_readpage_iter(iter, ctx, done);
- if (ret <= 0)
+ ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted);
+ if (ret)
return ret;
}
- return done;
+ return 0;
}
/**
* iomap_readahead - Attempt to read pages from a file.
- * @rac: Describes the pages to be read.
* @ops: The operations vector for the filesystem.
+ * @ctx: The ctx used for issuing readahead.
*
* This function is for filesystems to call to implement their readahead
* address_space operation.
@@ -533,28 +619,28 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
* function is called with memalloc_nofs set, so allocations will not cause
* the filesystem to be reentered.
*/
-void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
+void iomap_readahead(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx)
{
+ struct readahead_control *rac = ctx->rac;
struct iomap_iter iter = {
.inode = rac->mapping->host,
.pos = readahead_pos(rac),
.len = readahead_length(rac),
};
- struct iomap_readpage_ctx ctx = {
- .rac = rac,
- };
+ size_t cur_bytes_submitted;
trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
while (iomap_iter(&iter, ops) > 0)
- iter.processed = iomap_readahead_iter(&iter, &ctx);
+ iter.status = iomap_readahead_iter(&iter, ctx,
+ &cur_bytes_submitted);
- if (ctx.bio)
- submit_bio(ctx.bio);
- if (ctx.cur_folio) {
- if (!ctx.cur_folio_in_bio)
- folio_unlock(ctx.cur_folio);
- }
+ if (ctx->ops->submit_read)
+ ctx->ops->submit_read(ctx);
+
+ if (ctx->cur_folio)
+ iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
}
EXPORT_SYMBOL_GPL(iomap_readahead);
@@ -569,7 +655,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
struct iomap_folio_state *ifs = folio->private;
struct inode *inode = folio->mapping->host;
- unsigned first, last, i;
+ unsigned first, last;
if (!ifs)
return false;
@@ -581,10 +667,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
first = from >> inode->i_blkbits;
last = (from + count - 1) >> inode->i_blkbits;
- for (i = first; i <= last; i++)
- if (!ifs_block_is_uptodate(ifs, i))
- return false;
- return true;
+ return ifs_next_nonuptodate_block(folio, first, last) > last;
}
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
@@ -603,6 +686,8 @@ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
if (iter->flags & IOMAP_NOWAIT)
fgp |= FGP_NOWAIT;
+ if (iter->flags & IOMAP_DONTCACHE)
+ fgp |= FGP_DONTCACHE;
fgp |= fgf_set_order(len);
return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
@@ -669,23 +754,12 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
pos + len - 1);
}
-static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
- size_t poff, size_t plen, const struct iomap *iomap)
-{
- struct bio_vec bvec;
- struct bio bio;
-
- bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
- bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
- bio_add_folio_nofail(&bio, folio, plen, poff);
- return submit_bio_wait(&bio);
-}
-
-static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
- size_t len, struct folio *folio)
+static int __iomap_write_begin(const struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t len,
+ struct folio *folio)
{
- const struct iomap *srcmap = iomap_iter_srcmap(iter);
struct iomap_folio_state *ifs;
+ loff_t pos = iter->pos;
loff_t block_size = i_blocksize(iter->inode);
loff_t block_start = round_down(pos, block_size);
loff_t block_end = round_up(pos + len, block_size);
@@ -701,7 +775,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
* are not changing pagecache contents.
*/
if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
- pos + len >= folio_pos(folio) + folio_size(folio))
+ pos + len >= folio_next_pos(folio))
return 0;
ifs = ifs_alloc(iter->inode, folio, iter->flags);
@@ -717,9 +791,12 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (plen == 0)
break;
- if (!(iter->flags & IOMAP_UNSHARE) &&
- (from <= poff || from >= poff + plen) &&
- (to <= poff || to >= poff + plen))
+ /*
+ * If the read range will be entirely overwritten by the write,
+ * we can skip having to zero/read it in.
+ */
+ if (!(iter->flags & IOMAP_UNSHARE) && from <= poff &&
+ to >= poff + plen)
continue;
if (iomap_block_needs_zeroing(iter, block_start)) {
@@ -732,8 +809,12 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (iter->flags & IOMAP_NOWAIT)
return -EAGAIN;
- status = iomap_read_folio_sync(block_start, folio,
- poff, plen, srcmap);
+ if (write_ops && write_ops->read_folio_range)
+ status = write_ops->read_folio_range(iter,
+ folio, block_start, plen);
+ else
+ status = iomap_bio_read_folio_range_sync(iter,
+ folio, block_start, plen);
if (status)
return status;
}
@@ -743,30 +824,71 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
return 0;
}
-static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
- size_t len)
+static struct folio *__iomap_get_folio(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t len)
{
- const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
+ loff_t pos = iter->pos;
- if (folio_ops && folio_ops->get_folio)
- return folio_ops->get_folio(iter, pos, len);
- else
- return iomap_get_folio(iter, pos, len);
+ if (!mapping_large_folio_support(iter->inode->i_mapping))
+ len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
+
+ if (iter->fbatch) {
+ struct folio *folio = folio_batch_next(iter->fbatch);
+
+ if (!folio)
+ return NULL;
+
+ /*
+ * The folio mapping generally shouldn't have changed based on
+ * fs locks, but be consistent with filemap lookup and retry
+ * the iter if it does.
+ */
+ folio_lock(folio);
+ if (unlikely(folio->mapping != iter->inode->i_mapping)) {
+ iter->iomap.flags |= IOMAP_F_STALE;
+ folio_unlock(folio);
+ return NULL;
+ }
+
+ folio_get(folio);
+ return folio;
+ }
+
+ if (write_ops && write_ops->get_folio)
+ return write_ops->get_folio(iter, pos, len);
+ return iomap_get_folio(iter, pos, len);
}
-static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
+static void __iomap_put_folio(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t ret,
struct folio *folio)
{
- const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
+ loff_t pos = iter->pos;
- if (folio_ops && folio_ops->put_folio) {
- folio_ops->put_folio(iter->inode, pos, ret, folio);
+ if (write_ops && write_ops->put_folio) {
+ write_ops->put_folio(iter->inode, pos, ret, folio);
} else {
folio_unlock(folio);
folio_put(folio);
}
}
+/* trim pos and bytes to within a given folio */
+static loff_t iomap_trim_folio_range(struct iomap_iter *iter,
+ struct folio *folio, size_t *offset, u64 *bytes)
+{
+ loff_t pos = iter->pos;
+ size_t fsize = folio_size(folio);
+
+ WARN_ON_ONCE(pos < folio_pos(folio));
+ WARN_ON_ONCE(pos >= folio_pos(folio) + fsize);
+
+ *offset = offset_in_folio(folio, pos);
+ *bytes = min(*bytes, fsize - *offset);
+
+ return pos;
+}
+
static int iomap_write_begin_inline(const struct iomap_iter *iter,
struct folio *folio)
{
@@ -776,29 +898,42 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
return iomap_read_inline_data(iter, folio);
}
-static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
- size_t len, struct folio **foliop)
+/*
+ * Grab and prepare a folio for write based on iter state. Returns the folio,
+ * offset, and length. Callers can optionally pass a max length *plen,
+ * otherwise init to zero.
+ */
+static int iomap_write_begin(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, struct folio **foliop,
+ size_t *poffset, u64 *plen)
{
- const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ loff_t pos;
+ u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
struct folio *folio;
int status = 0;
- BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
- if (srcmap != &iter->iomap)
- BUG_ON(pos + len > srcmap->offset + srcmap->length);
+ len = min_not_zero(len, *plen);
+ *foliop = NULL;
+ *plen = 0;
if (fatal_signal_pending(current))
return -EINTR;
- if (!mapping_large_folio_support(iter->inode->i_mapping))
- len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
-
- folio = __iomap_get_folio(iter, pos, len);
+ folio = __iomap_get_folio(iter, write_ops, len);
if (IS_ERR(folio))
return PTR_ERR(folio);
/*
+ * No folio means we're done with a batch. We still have range to
+ * process so return and let the caller iterate and refill the batch.
+ */
+ if (!folio) {
+ WARN_ON_ONCE(!iter->fbatch);
+ return 0;
+ }
+
+ /*
* Now we have a locked folio, before we do anything with it we need to
* check that the iomap we have cached is not stale. The inode extent
* mapping can change due to concurrent IO in flight (e.g.
@@ -808,8 +943,8 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
* could do the wrong thing here (zero a page range incorrectly or fail
* to zero) and corrupt data.
*/
- if (folio_ops && folio_ops->iomap_valid) {
- bool iomap_valid = folio_ops->iomap_valid(iter->inode,
+ if (write_ops && write_ops->iomap_valid) {
+ bool iomap_valid = write_ops->iomap_valid(iter->inode,
&iter->iomap);
if (!iomap_valid) {
iter->iomap.flags |= IOMAP_F_STALE;
@@ -818,25 +953,40 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
}
}
- if (pos + len > folio_pos(folio) + folio_size(folio))
- len = folio_pos(folio) + folio_size(folio) - pos;
+ /*
+ * The folios in a batch may not be contiguous. If we've skipped
+ * forward, advance the iter to the pos of the current folio. If the
+ * folio starts beyond the end of the mapping, it may have been trimmed
+ * since the lookup for whatever reason. Return a NULL folio to
+ * terminate the op.
+ */
+ if (folio_pos(folio) > iter->pos) {
+ len = min_t(u64, folio_pos(folio) - iter->pos,
+ iomap_length(iter));
+ status = iomap_iter_advance(iter, len);
+ len = iomap_length(iter);
+ if (status || !len)
+ goto out_unlock;
+ }
+
+ pos = iomap_trim_folio_range(iter, folio, poffset, &len);
if (srcmap->type == IOMAP_INLINE)
status = iomap_write_begin_inline(iter, folio);
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
else
- status = __iomap_write_begin(iter, pos, len, folio);
+ status = __iomap_write_begin(iter, write_ops, len, folio);
if (unlikely(status))
goto out_unlock;
*foliop = folio;
+ *plen = len;
return 0;
out_unlock:
- __iomap_put_folio(iter, pos, 0, folio);
-
+ __iomap_put_folio(iter, write_ops, 0, folio);
return status;
}
@@ -864,7 +1014,7 @@ static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
return true;
}
-static void iomap_write_end_inline(const struct iomap_iter *iter,
+static bool iomap_write_end_inline(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t copied)
{
const struct iomap *iomap = &iter->iomap;
@@ -873,33 +1023,35 @@ static void iomap_write_end_inline(const struct iomap_iter *iter,
WARN_ON_ONCE(!folio_test_uptodate(folio));
BUG_ON(!iomap_inline_data_valid(iomap));
+ if (WARN_ON_ONCE(!iomap->inline_data))
+ return false;
+
flush_dcache_folio(folio);
addr = kmap_local_folio(folio, pos);
memcpy(iomap_inline_data(iomap, pos), addr, copied);
kunmap_local(addr);
mark_inode_dirty(iter->inode);
+ return true;
}
/*
* Returns true if all copied bytes have been written to the pagecache,
* otherwise return false.
*/
-static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
- size_t copied, struct folio *folio)
+static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
+ struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ loff_t pos = iter->pos;
- if (srcmap->type == IOMAP_INLINE) {
- iomap_write_end_inline(iter, folio, pos, copied);
- return true;
- }
+ if (srcmap->type == IOMAP_INLINE)
+ return iomap_write_end_inline(iter, folio, pos, copied);
if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
size_t bh_written;
- bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
- len, copied, folio, NULL);
+ bh_written = block_write_end(pos, len, copied, folio);
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
return bh_written == copied;
}
@@ -907,12 +1059,11 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
return __iomap_write_end(iter->inode, pos, len, copied, folio);
}
-static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
+static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
+ const struct iomap_write_ops *write_ops)
{
- loff_t length = iomap_length(iter);
- loff_t pos = iter->pos;
ssize_t total_written = 0;
- long status = 0;
+ int status = 0;
struct address_space *mapping = iter->inode->i_mapping;
size_t chunk = mapping_max_folio_size(mapping);
unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
@@ -921,21 +1072,22 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
struct folio *folio;
loff_t old_size;
size_t offset; /* Offset into folio */
- size_t bytes; /* Bytes to write to folio */
+ u64 bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
- size_t written; /* Bytes have been written */
+ u64 written; /* Bytes have been written */
+ loff_t pos;
bytes = iov_iter_count(i);
retry:
- offset = pos & (chunk - 1);
+ offset = iter->pos & (chunk - 1);
bytes = min(chunk - offset, bytes);
status = balance_dirty_pages_ratelimited_flags(mapping,
bdp_flags);
if (unlikely(status))
break;
- if (bytes > length)
- bytes = length;
+ if (bytes > iomap_length(iter))
+ bytes = iomap_length(iter);
/*
* Bring in the user page that we'll copy from _first_.
@@ -952,23 +1104,22 @@ retry:
break;
}
- status = iomap_write_begin(iter, pos, bytes, &folio);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
if (unlikely(status)) {
- iomap_write_failed(iter->inode, pos, bytes);
+ iomap_write_failed(iter->inode, iter->pos, bytes);
break;
}
if (iter->iomap.flags & IOMAP_F_STALE)
break;
- offset = offset_in_folio(folio, pos);
- if (bytes > folio_size(folio) - offset)
- bytes = folio_size(folio) - offset;
+ pos = iter->pos;
if (mapping_writably_mapped(mapping))
flush_dcache_folio(folio);
copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
- written = iomap_write_end(iter, pos, bytes, copied, folio) ?
+ written = iomap_write_end(iter, bytes, copied, folio) ?
copied : 0;
/*
@@ -983,7 +1134,7 @@ retry:
i_size_write(iter->inode, pos + written);
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
}
- __iomap_put_folio(iter, pos, written, folio);
+ __iomap_put_folio(iter, write_ops, written, folio);
if (old_size < pos)
pagecache_isize_extended(iter->inode, old_size, pos);
@@ -1006,22 +1157,18 @@ retry:
goto retry;
}
} else {
- pos += written;
total_written += written;
- length -= written;
+ iomap_iter_advance(iter, written);
}
- } while (iov_iter_count(i) && length);
+ } while (iov_iter_count(i) && iomap_length(iter));
- if (status == -EAGAIN) {
- iov_iter_revert(i, total_written);
- return -EAGAIN;
- }
- return total_written ? total_written : status;
+ return total_written ? 0 : status;
}
ssize_t
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
- const struct iomap_ops *ops, void *private)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
struct iomap_iter iter = {
.inode = iocb->ki_filp->f_mapping->host,
@@ -1034,9 +1181,11 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
if (iocb->ki_flags & IOCB_NOWAIT)
iter.flags |= IOMAP_NOWAIT;
+ if (iocb->ki_flags & IOCB_DONTCACHE)
+ iter.flags |= IOMAP_DONTCACHE;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_write_iter(&iter, i);
+ iter.status = iomap_write_iter(&iter, i, write_ops);
if (unlikely(iter.pos == iocb->ki_pos))
return ret;
@@ -1050,7 +1199,7 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode,
struct folio *folio, loff_t start_byte, loff_t end_byte,
struct iomap *iomap, iomap_punch_t punch)
{
- unsigned int first_blk, last_blk, i;
+ unsigned int first_blk, last_blk;
loff_t last_byte;
u8 blkbits = inode->i_blkbits;
struct iomap_folio_state *ifs;
@@ -1065,14 +1214,14 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode,
if (!ifs)
return;
- last_byte = min_t(loff_t, end_byte - 1,
- folio_pos(folio) + folio_size(folio) - 1);
+ last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
first_blk = offset_in_folio(folio, start_byte) >> blkbits;
last_blk = offset_in_folio(folio, last_byte) >> blkbits;
- for (i = first_blk; i <= last_blk; i++) {
- if (!ifs_block_is_dirty(folio, ifs, i))
- punch(inode, folio_pos(folio) + (i << blkbits),
- 1 << blkbits, iomap);
+ while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
+ <= last_blk) {
+ punch(inode, folio_pos(folio) + (first_blk << blkbits),
+ 1 << blkbits, iomap);
+ first_blk++;
}
}
@@ -1097,8 +1246,7 @@ static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
* Make sure the next punch start is correctly bound to
* the end of this data range, not the end of the folio.
*/
- *punch_start_byte = min_t(loff_t, end_byte,
- folio_pos(folio) + folio_size(folio));
+ *punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
}
/*
@@ -1138,7 +1286,7 @@ static void iomap_write_delalloc_scan(struct inode *inode,
start_byte, end_byte, iomap, punch);
/* move offset to start of next folio in range */
- start_byte = folio_next_index(folio) << PAGE_SHIFT;
+ start_byte = folio_next_pos(folio);
folio_unlock(folio);
folio_put(folio);
}
@@ -1270,53 +1418,50 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
}
EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
-static loff_t iomap_unshare_iter(struct iomap_iter *iter)
+static int iomap_unshare_iter(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops)
{
struct iomap *iomap = &iter->iomap;
- loff_t pos = iter->pos;
- loff_t length = iomap_length(iter);
- loff_t written = 0;
+ u64 bytes = iomap_length(iter);
+ int status;
if (!iomap_want_unshare_iter(iter))
- return length;
+ return iomap_iter_advance(iter, bytes);
do {
struct folio *folio;
- int status;
size_t offset;
- size_t bytes = min_t(u64, SIZE_MAX, length);
bool ret;
- status = iomap_write_begin(iter, pos, bytes, &folio);
+ bytes = min_t(u64, SIZE_MAX, bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
if (unlikely(status))
return status;
if (iomap->flags & IOMAP_F_STALE)
break;
- offset = offset_in_folio(folio, pos);
- if (bytes > folio_size(folio) - offset)
- bytes = folio_size(folio) - offset;
-
- ret = iomap_write_end(iter, pos, bytes, bytes, folio);
- __iomap_put_folio(iter, pos, bytes, folio);
+ ret = iomap_write_end(iter, bytes, bytes, folio);
+ __iomap_put_folio(iter, write_ops, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;
cond_resched();
- pos += bytes;
- written += bytes;
- length -= bytes;
-
balance_dirty_pages_ratelimited(iter->inode->i_mapping);
- } while (length > 0);
- return written;
+ status = iomap_iter_advance(iter, bytes);
+ if (status)
+ break;
+ } while ((bytes = iomap_length(iter)) > 0);
+
+ return status;
}
int
iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops)
{
struct iomap_iter iter = {
.inode = inode,
@@ -1331,7 +1476,7 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_unshare_iter(&iter);
+ iter.status = iomap_unshare_iter(&iter, write_ops);
return ret;
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
@@ -1350,110 +1495,121 @@ static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
return filemap_write_and_wait_range(mapping, i->pos, end);
}
-static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
+static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
+ const struct iomap_write_ops *write_ops)
{
- loff_t pos = iter->pos;
- loff_t length = iomap_length(iter);
- loff_t written = 0;
+ u64 bytes = iomap_length(iter);
+ int status;
do {
struct folio *folio;
- int status;
size_t offset;
- size_t bytes = min_t(u64, SIZE_MAX, length);
bool ret;
- status = iomap_write_begin(iter, pos, bytes, &folio);
+ bytes = min_t(u64, SIZE_MAX, bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
if (status)
return status;
if (iter->iomap.flags & IOMAP_F_STALE)
break;
+ /* a NULL folio means we're done with a folio batch */
+ if (!folio) {
+ status = iomap_iter_advance_full(iter);
+ break;
+ }
+
/* warn about zeroing folios beyond eof that won't write back */
WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
- offset = offset_in_folio(folio, pos);
- if (bytes > folio_size(folio) - offset)
- bytes = folio_size(folio) - offset;
+
+ trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset,
+ bytes);
folio_zero_range(folio, offset, bytes);
folio_mark_accessed(folio);
- ret = iomap_write_end(iter, pos, bytes, bytes, folio);
- __iomap_put_folio(iter, pos, bytes, folio);
+ ret = iomap_write_end(iter, bytes, bytes, folio);
+ __iomap_put_folio(iter, write_ops, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;
- pos += bytes;
- length -= bytes;
- written += bytes;
- } while (length > 0);
+ status = iomap_iter_advance(iter, bytes);
+ if (status)
+ break;
+ } while ((bytes = iomap_length(iter)) > 0);
if (did_zero)
*did_zero = true;
- return written;
+ return status;
}
+loff_t
+iomap_fill_dirty_folios(
+ struct iomap_iter *iter,
+ loff_t offset,
+ loff_t length)
+{
+ struct address_space *mapping = iter->inode->i_mapping;
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + length - 1) >> PAGE_SHIFT;
+
+ iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL);
+ if (!iter->fbatch)
+ return offset + length;
+ folio_batch_init(iter->fbatch);
+
+ filemap_get_folios_dirty(mapping, &start, end, iter->fbatch);
+ return (start << PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
+
int
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_ZERO,
+ .private = private,
};
struct address_space *mapping = inode->i_mapping;
- unsigned int blocksize = i_blocksize(inode);
- unsigned int off = pos & (blocksize - 1);
- loff_t plen = min_t(loff_t, len, blocksize - off);
int ret;
bool range_dirty;
/*
- * Zero range can skip mappings that are zero on disk so long as
- * pagecache is clean. If pagecache was dirty prior to zero range, the
- * mapping converts on writeback completion and so must be zeroed.
- *
- * The simplest way to deal with this across a range is to flush
- * pagecache and process the updated mappings. To avoid excessive
- * flushing on partial eof zeroing, special case it to zero the
- * unaligned start portion if already dirty in pagecache.
- */
- if (off &&
- filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
- iter.len = plen;
- while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_zero_iter(&iter, did_zero);
-
- iter.len = len - (iter.pos - pos);
- if (ret || !iter.len)
- return ret;
- }
-
- /*
* To avoid an unconditional flush, check pagecache state and only flush
* if dirty and the fs returns a mapping that might convert on
* writeback.
*/
- range_dirty = filemap_range_needs_writeback(inode->i_mapping,
- iter.pos, iter.pos + iter.len - 1);
+ range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
+ iter.pos + iter.len - 1);
while ((ret = iomap_iter(&iter, ops)) > 0) {
const struct iomap *srcmap = iomap_iter_srcmap(&iter);
- if (srcmap->type == IOMAP_HOLE ||
- srcmap->type == IOMAP_UNWRITTEN) {
- loff_t proc = iomap_length(&iter);
+ if (WARN_ON_ONCE(iter.fbatch &&
+ srcmap->type != IOMAP_UNWRITTEN))
+ return -EIO;
+
+ if (!iter.fbatch &&
+ (srcmap->type == IOMAP_HOLE ||
+ srcmap->type == IOMAP_UNWRITTEN)) {
+ s64 status;
if (range_dirty) {
range_dirty = false;
- proc = iomap_zero_iter_flush_and_stale(&iter);
+ status = iomap_zero_iter_flush_and_stale(&iter);
+ } else {
+ status = iomap_iter_advance_full(&iter);
}
- iter.processed = proc;
+ iter.status = status;
continue;
}
- iter.processed = iomap_zero_iter(&iter, did_zero);
+ iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
}
return ret;
}
@@ -1461,7 +1617,8 @@ EXPORT_SYMBOL_GPL(iomap_zero_range);
int
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
unsigned int blocksize = i_blocksize(inode);
unsigned int off = pos & (blocksize - 1);
@@ -1469,11 +1626,12 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
/* Block boundary? Nothing to do */
if (!off)
return 0;
- return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
+ return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
+ write_ops, private);
}
EXPORT_SYMBOL_GPL(iomap_truncate_page);
-static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
+static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
struct folio *folio)
{
loff_t length = iomap_length(iter);
@@ -1484,20 +1642,22 @@ static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
&iter->iomap);
if (ret)
return ret;
- block_commit_write(&folio->page, 0, length);
+ block_commit_write(folio, 0, length);
} else {
WARN_ON_ONCE(!folio_test_uptodate(folio));
folio_mark_dirty(folio);
}
- return length;
+ return iomap_iter_advance(iter, length);
}
-vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
+ void *private)
{
struct iomap_iter iter = {
.inode = file_inode(vmf->vma->vm_file),
.flags = IOMAP_WRITE | IOMAP_FAULT,
+ .private = private,
};
struct folio *folio = page_folio(vmf->page);
ssize_t ret;
@@ -1509,7 +1669,7 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
iter.pos = folio_pos(folio);
iter.len = ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
+ iter.status = iomap_folio_mkwrite_iter(&iter, folio);
if (ret < 0)
goto out_unlock;
@@ -1521,332 +1681,63 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
-static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
- size_t len)
+static void iomap_writeback_init(struct inode *inode, struct folio *folio)
{
struct iomap_folio_state *ifs = folio->private;
WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
- WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
-
- if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
- folio_end_writeback(folio);
-}
-
-/*
- * We're now finished for good with this ioend structure. Update the page
- * state, release holds on bios, and finally free up memory. Do not use the
- * ioend after this.
- */
-static u32
-iomap_finish_ioend(struct iomap_ioend *ioend, int error)
-{
- struct inode *inode = ioend->io_inode;
- struct bio *bio = &ioend->io_bio;
- struct folio_iter fi;
- u32 folio_count = 0;
-
- if (error) {
- mapping_set_error(inode->i_mapping, error);
- if (!bio_flagged(bio, BIO_QUIET)) {
- pr_err_ratelimited(
-"%s: writeback error on inode %lu, offset %lld, sector %llu",
- inode->i_sb->s_id, inode->i_ino,
- ioend->io_offset, ioend->io_sector);
- }
- }
-
- /* walk all folios in bio, ending page IO on them */
- bio_for_each_folio_all(fi, bio) {
- iomap_finish_folio_write(inode, fi.folio, fi.length);
- folio_count++;
- }
-
- bio_put(bio); /* frees the ioend */
- return folio_count;
-}
-
-/*
- * Ioend completion routine for merged bios. This can only be called from task
- * contexts as merged ioends can be of unbound length. Hence we have to break up
- * the writeback completions into manageable chunks to avoid long scheduler
- * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
- * good batch processing throughput without creating adverse scheduler latency
- * conditions.
- */
-void
-iomap_finish_ioends(struct iomap_ioend *ioend, int error)
-{
- struct list_head tmp;
- u32 completions;
-
- might_sleep();
-
- list_replace_init(&ioend->io_list, &tmp);
- completions = iomap_finish_ioend(ioend, error);
-
- while (!list_empty(&tmp)) {
- if (completions > IOEND_BATCH_SIZE * 8) {
- cond_resched();
- completions = 0;
- }
- ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
- list_del_init(&ioend->io_list);
- completions += iomap_finish_ioend(ioend, error);
- }
-}
-EXPORT_SYMBOL_GPL(iomap_finish_ioends);
-
-/*
- * We can merge two adjacent ioends if they have the same set of work to do.
- */
-static bool
-iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
-{
- if (ioend->io_bio.bi_status != next->io_bio.bi_status)
- return false;
- if (next->io_flags & IOMAP_F_BOUNDARY)
- return false;
- if ((ioend->io_flags & IOMAP_F_SHARED) ^
- (next->io_flags & IOMAP_F_SHARED))
- return false;
- if ((ioend->io_type == IOMAP_UNWRITTEN) ^
- (next->io_type == IOMAP_UNWRITTEN))
- return false;
- if (ioend->io_offset + ioend->io_size != next->io_offset)
- return false;
- /*
- * Do not merge physically discontiguous ioends. The filesystem
- * completion functions will have to iterate the physical
- * discontiguities even if we merge the ioends at a logical level, so
- * we don't gain anything by merging physical discontiguities here.
- *
- * We cannot use bio->bi_iter.bi_sector here as it is modified during
- * submission so does not point to the start sector of the bio at
- * completion.
- */
- if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
- return false;
- return true;
-}
-
-void
-iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
-{
- struct iomap_ioend *next;
-
- INIT_LIST_HEAD(&ioend->io_list);
-
- while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
- io_list))) {
- if (!iomap_ioend_can_merge(ioend, next))
- break;
- list_move_tail(&next->io_list, &ioend->io_list);
- ioend->io_size += next->io_size;
- }
-}
-EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
-
-static int
-iomap_ioend_compare(void *priv, const struct list_head *a,
- const struct list_head *b)
-{
- struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
- struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
-
- if (ia->io_offset < ib->io_offset)
- return -1;
- if (ia->io_offset > ib->io_offset)
- return 1;
- return 0;
-}
-
-void
-iomap_sort_ioends(struct list_head *ioend_list)
-{
- list_sort(NULL, ioend_list, iomap_ioend_compare);
-}
-EXPORT_SYMBOL_GPL(iomap_sort_ioends);
-
-static void iomap_writepage_end_bio(struct bio *bio)
-{
- iomap_finish_ioend(iomap_ioend_from_bio(bio),
- blk_status_to_errno(bio->bi_status));
-}
-
-/*
- * Submit the final bio for an ioend.
- *
- * If @error is non-zero, it means that we have a situation where some part of
- * the submission process has failed after we've marked pages for writeback.
- * We cannot cancel ioend directly in that case, so call the bio end I/O handler
- * with the error status here to run the normal I/O completion handler to clear
- * the writeback bit and let the file system proess the errors.
- */
-static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
-{
- if (!wpc->ioend)
- return error;
-
- /*
- * Let the file systems prepare the I/O submission and hook in an I/O
- * comletion handler. This also needs to happen in case after a
- * failure happened so that the file system end I/O handler gets called
- * to clean up.
- */
- if (wpc->ops->prepare_ioend)
- error = wpc->ops->prepare_ioend(wpc->ioend, error);
-
- if (error) {
- wpc->ioend->io_bio.bi_status = errno_to_blk_status(error);
- bio_endio(&wpc->ioend->io_bio);
- } else {
- submit_bio(&wpc->ioend->io_bio);
+ if (ifs) {
+ WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
+ /*
+ * Set this to the folio size. After processing the folio for
+ * writeback in iomap_writeback_folio(), we'll subtract any
+ * ranges not written back.
+ *
+ * We do this because otherwise, we would have to atomically
+ * increment ifs->write_bytes_pending every time a range in the
+ * folio needs to be written back.
+ */
+ atomic_set(&ifs->write_bytes_pending, folio_size(folio));
}
-
- wpc->ioend = NULL;
- return error;
-}
-
-static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct inode *inode, loff_t pos)
-{
- struct iomap_ioend *ioend;
- struct bio *bio;
-
- bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
- REQ_OP_WRITE | wbc_to_write_flags(wbc),
- GFP_NOFS, &iomap_ioend_bioset);
- bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
- bio->bi_end_io = iomap_writepage_end_bio;
- wbc_init_bio(wbc, bio);
- bio->bi_write_hint = inode->i_write_hint;
-
- ioend = iomap_ioend_from_bio(bio);
- INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_type = wpc->iomap.type;
- ioend->io_flags = wpc->iomap.flags;
- if (pos > wpc->iomap.offset)
- wpc->iomap.flags &= ~IOMAP_F_BOUNDARY;
- ioend->io_inode = inode;
- ioend->io_size = 0;
- ioend->io_offset = pos;
- ioend->io_sector = bio->bi_iter.bi_sector;
-
- wpc->nr_folios = 0;
- return ioend;
-}
-
-static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
-{
- if (wpc->iomap.offset == pos && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
- return false;
- if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
- (wpc->ioend->io_flags & IOMAP_F_SHARED))
- return false;
- if (wpc->iomap.type != wpc->ioend->io_type)
- return false;
- if (pos != wpc->ioend->io_offset + wpc->ioend->io_size)
- return false;
- if (iomap_sector(&wpc->iomap, pos) !=
- bio_end_sector(&wpc->ioend->io_bio))
- return false;
- /*
- * Limit ioend bio chain lengths to minimise IO completion latency. This
- * also prevents long tight loops ending page writeback on all the
- * folios in the ioend.
- */
- if (wpc->nr_folios >= IOEND_BATCH_SIZE)
- return false;
- return true;
}
-/*
- * Test to see if we have an existing ioend structure that we could append to
- * first; otherwise finish off the current ioend and start another.
- *
- * If a new ioend is created and cached, the old ioend is submitted to the block
- * layer instantly. Batching optimisations are provided by higher level block
- * plugging.
- *
- * At the end of a writeback pass, there will be a cached ioend remaining on the
- * writepage context that the caller will need to submit.
- */
-static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct folio *folio,
- struct inode *inode, loff_t pos, unsigned len)
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len)
{
struct iomap_folio_state *ifs = folio->private;
- size_t poff = offset_in_folio(folio, pos);
- int error;
- if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) {
-new_ioend:
- error = iomap_submit_ioend(wpc, 0);
- if (error)
- return error;
- wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos);
- }
-
- if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff))
- goto new_ioend;
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+ WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
- if (ifs)
- atomic_add(len, &ifs->write_bytes_pending);
- wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, folio, len);
- return 0;
+ if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
+ folio_end_writeback(folio);
}
+EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
-static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct folio *folio,
- struct inode *inode, u64 pos, unsigned dirty_len,
- unsigned *count)
+static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
+ size_t *bytes_submitted)
{
- int error;
-
do {
- unsigned map_len;
+ ssize_t ret;
- error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len);
- if (error)
- break;
- trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap);
-
- map_len = min_t(u64, dirty_len,
- wpc->iomap.offset + wpc->iomap.length - pos);
- WARN_ON_ONCE(!folio->private && map_len < dirty_len);
+ ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos);
+ if (WARN_ON_ONCE(ret == 0 || ret > rlen))
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ rlen -= ret;
+ pos += ret;
- switch (wpc->iomap.type) {
- case IOMAP_INLINE:
- WARN_ON_ONCE(1);
- error = -EIO;
- break;
- case IOMAP_HOLE:
- break;
- default:
- error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
- map_len);
- if (!error)
- (*count)++;
- break;
- }
- dirty_len -= map_len;
- pos += map_len;
- } while (dirty_len && !error);
+ /*
+ * Holes are not written back by ->writeback_range, so track
+ * if we did handle anything that is not a hole here.
+ */
+ if (wpc->iomap.type != IOMAP_HOLE)
+ *bytes_submitted += ret;
+ } while (rlen);
- /*
- * We cannot cancel the ioend directly here on error. We may have
- * already set other pages under writeback and hence we have to run I/O
- * completion to mark the error state of the pages under writeback
- * appropriately.
- *
- * Just let the file system know what portion of the folio failed to
- * map.
- */
- if (error && wpc->ops->discard_folio)
- wpc->ops->discard_folio(folio, pos);
- return error;
+ return 0;
}
/*
@@ -1855,7 +1746,7 @@ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
* If the folio is entirely beyond i_size, return false. If it straddles
* i_size, adjust end_pos and zero all data beyond i_size.
*/
-static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
+static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode,
u64 *end_pos)
{
u64 isize = i_size_read(inode);
@@ -1897,24 +1788,24 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
* remaining memory is zeroed when mapped, and writes to that
* region are not written out to the file.
*
- * Also adjust the writeback range to skip all blocks entirely
- * beyond i_size.
+ * Also adjust the end_pos to the end of file and skip writeback
+ * for all blocks entirely beyond i_size.
*/
folio_zero_segment(folio, poff, folio_size(folio));
- *end_pos = round_up(isize, i_blocksize(inode));
+ *end_pos = isize;
}
return true;
}
-static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct folio *folio)
+int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio)
{
struct iomap_folio_state *ifs = folio->private;
- struct inode *inode = folio->mapping->host;
+ struct inode *inode = wpc->inode;
u64 pos = folio_pos(folio);
u64 end_pos = pos + folio_size(folio);
- unsigned count = 0;
+ u64 end_aligned = 0;
+ size_t bytes_submitted = 0;
int error = 0;
u32 rlen;
@@ -1922,12 +1813,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
WARN_ON_ONCE(folio_test_dirty(folio));
WARN_ON_ONCE(folio_test_writeback(folio));
- trace_iomap_writepage(inode, pos, folio_size(folio));
+ trace_iomap_writeback_folio(inode, pos, folio_size(folio));
- if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
- folio_unlock(folio);
+ if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
return 0;
- }
WARN_ON_ONCE(end_pos <= pos);
if (i_blocks_per_folio(inode, folio) > 1) {
@@ -1936,14 +1825,7 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
iomap_set_range_dirty(folio, 0, end_pos - pos);
}
- /*
- * Keep the I/O completion handler from clearing the writeback
- * bit until we have submitted all blocks by adding a bias to
- * ifs->write_bytes_pending, which is dropped after submitting
- * all blocks.
- */
- WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
- atomic_inc(&ifs->write_bytes_pending);
+ iomap_writeback_init(inode, folio);
}
/*
@@ -1955,15 +1837,16 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
/*
* Walk through the folio to find dirty areas to write back.
*/
- while ((rlen = iomap_find_dirty_range(folio, &pos, end_pos))) {
- error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
- pos, rlen, &count);
+ end_aligned = round_up(end_pos, i_blocksize(inode));
+ while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
+ error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos,
+ &bytes_submitted);
if (error)
break;
pos += rlen;
}
- if (count)
+ if (bytes_submitted)
wpc->nr_folios++;
/*
@@ -1980,23 +1863,30 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* already at this point. In that case we need to clear the writeback
* bit ourselves right after unlocking the page.
*/
- folio_unlock(folio);
if (ifs) {
- if (atomic_dec_and_test(&ifs->write_bytes_pending))
- folio_end_writeback(folio);
- } else {
- if (!count)
- folio_end_writeback(folio);
+ /*
+ * Subtract any bytes that were initially accounted to
+ * write_bytes_pending but skipped for writeback.
+ */
+ size_t bytes_not_submitted = folio_size(folio) -
+ bytes_submitted;
+
+ if (bytes_not_submitted)
+ iomap_finish_folio_write(inode, folio,
+ bytes_not_submitted);
+ } else if (!bytes_submitted) {
+ folio_end_writeback(folio);
}
+
mapping_set_error(inode->i_mapping, error);
return error;
}
+EXPORT_SYMBOL_GPL(iomap_writeback_folio);
int
-iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops)
+iomap_writepages(struct iomap_writepage_ctx *wpc)
{
+ struct address_space *mapping = wpc->inode->i_mapping;
struct folio *folio = NULL;
int error;
@@ -2008,17 +1898,22 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
PF_MEMALLOC))
return -EIO;
- wpc->ops = ops;
- while ((folio = writeback_iter(mapping, wbc, folio, &error)))
- error = iomap_writepage_map(wpc, wbc, folio);
- return iomap_submit_ioend(wpc, error);
-}
-EXPORT_SYMBOL_GPL(iomap_writepages);
+ while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
+ error = iomap_writeback_folio(wpc, folio);
+ folio_unlock(folio);
+ }
-static int __init iomap_buffered_init(void)
-{
- return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
- offsetof(struct iomap_ioend, io_bio),
- BIOSET_NEED_BVECS);
+ /*
+ * If @error is non-zero, it means that we have a situation where some
+ * part of the submission process has failed after we've marked pages
+ * for writeback.
+ *
+ * We cannot cancel the writeback directly in that case, so always call
+ * ->writeback_submit to run the I/O completion handler to clear the
+ * writeback bit and let the file system proess the errors.
+ */
+ if (wpc->wb_ctx)
+ return wpc->ops->writeback_submit(wpc, error);
+ return error;
}
-fs_initcall(iomap_buffered_init);
+EXPORT_SYMBOL_GPL(iomap_writepages);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index b521eb15759e..8e273408453a 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -1,17 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2021 Christoph Hellwig.
+ * Copyright (c) 2016-2025 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/fscrypt.h>
#include <linux/pagemap.h>
#include <linux/iomap.h>
-#include <linux/backing-dev.h>
-#include <linux/uio.h>
#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
#include "trace.h"
#include "../internal.h"
@@ -20,20 +16,13 @@
* Private flags for iomap_dio, must not overlap with the public ones in
* iomap.h:
*/
-#define IOMAP_DIO_CALLER_COMP (1U << 26)
-#define IOMAP_DIO_INLINE_COMP (1U << 27)
+#define IOMAP_DIO_NO_INVALIDATE (1U << 26)
+#define IOMAP_DIO_COMP_WORK (1U << 27)
#define IOMAP_DIO_WRITE_THROUGH (1U << 28)
#define IOMAP_DIO_NEED_SYNC (1U << 29)
#define IOMAP_DIO_WRITE (1U << 30)
#define IOMAP_DIO_DIRTY (1U << 31)
-/*
- * Used for sub block zeroing in iomap_dio_zero()
- */
-#define IOMAP_ZERO_PAGE_SIZE (SZ_64K)
-#define IOMAP_ZERO_PAGE_ORDER (get_order(IOMAP_ZERO_PAGE_SIZE))
-static struct page *zero_page;
-
struct iomap_dio {
struct kiocb *iocb;
const struct iomap_dio_ops *dops;
@@ -81,10 +70,12 @@ static void iomap_dio_submit_bio(const struct iomap_iter *iter,
WRITE_ONCE(iocb->private, bio);
}
- if (dio->dops && dio->dops->submit_io)
+ if (dio->dops && dio->dops->submit_io) {
dio->dops->submit_io(iter, bio, pos);
- else
+ } else {
+ WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE);
submit_bio(bio);
+ }
}
ssize_t iomap_dio_complete(struct iomap_dio *dio)
@@ -117,7 +108,8 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
* ->end_io() when necessary, otherwise a racing buffer read would cache
* zeros from unwritten extents.
*/
- if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE))
+ if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE) &&
+ !(dio->flags & IOMAP_DIO_NO_INVALIDATE))
kiocb_invalidate_post_direct_write(iocb, dio->size);
inode_dio_end(file_inode(iocb->ki_filp));
@@ -140,11 +132,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
}
EXPORT_SYMBOL_GPL(iomap_dio_complete);
-static ssize_t iomap_dio_deferred_complete(void *data)
-{
- return iomap_dio_complete(data);
-}
-
static void iomap_dio_complete_work(struct work_struct *work)
{
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
@@ -163,73 +150,77 @@ static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
cmpxchg(&dio->error, 0, ret);
}
-void iomap_dio_bio_end_io(struct bio *bio)
+/*
+ * Called when dio->ref reaches zero from an I/O completion.
+ */
+static void iomap_dio_done(struct iomap_dio *dio)
{
- struct iomap_dio *dio = bio->bi_private;
- bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
struct kiocb *iocb = dio->iocb;
- if (bio->bi_status)
- iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
- if (!atomic_dec_and_test(&dio->ref))
- goto release_bio;
-
- /*
- * Synchronous dio, task itself will handle any completion work
- * that needs after IO. All we need to do is wake the task.
- */
if (dio->wait_for_completion) {
+ /*
+ * Synchronous I/O, task itself will handle any completion work
+ * that needs after IO. All we need to do is wake the task.
+ */
struct task_struct *waiter = dio->submit.waiter;
WRITE_ONCE(dio->submit.waiter, NULL);
blk_wake_io_task(waiter);
- goto release_bio;
+ return;
}
/*
- * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline
+ * Always run error completions in user context. These are not
+ * performance critical and some code relies on taking sleeping locks
+ * for error handling.
*/
- if (dio->flags & IOMAP_DIO_INLINE_COMP) {
- WRITE_ONCE(iocb->private, NULL);
- iomap_dio_complete_work(&dio->aio.work);
- goto release_bio;
- }
+ if (dio->error)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
/*
- * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then schedule
- * our completion that way to avoid an async punt to a workqueue.
+ * Never invalidate pages from this context to avoid deadlocks with
+ * buffered I/O completions when called from the ioend workqueue,
+ * or avoid sleeping when called directly from ->bi_end_io.
+ * Tough luck if you hit the tiny race with someone dirtying the range
+ * right between this check and the actual completion.
*/
- if (dio->flags & IOMAP_DIO_CALLER_COMP) {
- /* only polled IO cares about private cleared */
- iocb->private = dio;
- iocb->dio_complete = iomap_dio_deferred_complete;
+ if ((dio->flags & IOMAP_DIO_WRITE) &&
+ !(dio->flags & IOMAP_DIO_COMP_WORK)) {
+ if (dio->iocb->ki_filp->f_mapping->nrpages)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+ else
+ dio->flags |= IOMAP_DIO_NO_INVALIDATE;
+ }
+
+ if (dio->flags & IOMAP_DIO_COMP_WORK) {
+ struct inode *inode = file_inode(iocb->ki_filp);
/*
- * Invoke ->ki_complete() directly. We've assigned our
- * dio_complete callback handler, and since the issuer set
- * IOCB_DIO_CALLER_COMP, we know their ki_complete handler will
- * notice ->dio_complete being set and will defer calling that
- * handler until it can be done from a safe task context.
- *
- * Note that the 'res' being passed in here is not important
- * for this case. The actual completion value of the request
- * will be gotten from dio_complete when that is run by the
- * issuer.
+ * Async DIO completion that requires filesystem level
+ * completion work gets punted to a work queue to complete as
+ * the operation may require more IO to be issued to finalise
+ * filesystem metadata changes or guarantee data integrity.
*/
- iocb->ki_complete(iocb, 0);
- goto release_bio;
+ INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
+ queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
+ return;
}
- /*
- * Async DIO completion that requires filesystem level completion work
- * gets punted to a work queue to complete as the operation may require
- * more IO to be issued to finalise filesystem metadata changes or
- * guarantee data integrity.
- */
- INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
- queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq,
- &dio->aio.work);
-release_bio:
+ WRITE_ONCE(iocb->private, NULL);
+ iomap_dio_complete_work(&dio->aio.work);
+}
+
+void iomap_dio_bio_end_io(struct bio *bio)
+{
+ struct iomap_dio *dio = bio->bi_private;
+ bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
+
+ if (bio->bi_status)
+ iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
+
+ if (atomic_dec_and_test(&dio->ref))
+ iomap_dio_done(dio);
+
if (should_dirty) {
bio_check_pages_dirty(bio);
} else {
@@ -239,108 +230,185 @@ release_bio:
}
EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
+u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend)
+{
+ struct iomap_dio *dio = ioend->io_bio.bi_private;
+ bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
+ u32 vec_count = ioend->io_bio.bi_vcnt;
+
+ if (ioend->io_error)
+ iomap_dio_set_error(dio, ioend->io_error);
+
+ if (atomic_dec_and_test(&dio->ref)) {
+ /*
+ * Try to avoid another context switch for the completion given
+ * that we are already called from the ioend completion
+ * workqueue.
+ */
+ dio->flags &= ~IOMAP_DIO_COMP_WORK;
+ iomap_dio_done(dio);
+ }
+
+ if (should_dirty) {
+ bio_check_pages_dirty(&ioend->io_bio);
+ } else {
+ bio_release_pages(&ioend->io_bio, false);
+ bio_put(&ioend->io_bio);
+ }
+
+ /*
+ * Return the number of bvecs completed as even direct I/O completions
+ * do significant per-folio work and we'll still want to give up the
+ * CPU after a lot of completions.
+ */
+ return vec_count;
+}
+
static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
loff_t pos, unsigned len)
{
struct inode *inode = file_inode(dio->iocb->ki_filp);
struct bio *bio;
+ struct folio *zero_folio = largest_zero_folio();
+ int nr_vecs = max(1, i_blocksize(inode) / folio_size(zero_folio));
if (!len)
return 0;
+
/*
- * Max block size supported is 64k
+ * This limit shall never be reached as most filesystems have a
+ * maximum blocksize of 64k.
*/
- if (WARN_ON_ONCE(len > IOMAP_ZERO_PAGE_SIZE))
+ if (WARN_ON_ONCE(nr_vecs > BIO_MAX_VECS))
return -EINVAL;
- bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
+ bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
+ REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- __bio_add_page(bio, zero_page, len, 0);
- iomap_dio_submit_bio(iter, dio, bio, pos);
- return 0;
-}
-
-/*
- * Figure out the bio's operation flags from the dio request, the
- * mapping, and whether or not we want FUA. Note that we can end up
- * clearing the WRITE_THROUGH flag in the dio request.
- */
-static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
- const struct iomap *iomap, bool use_fua, bool atomic)
-{
- blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
-
- if (!(dio->flags & IOMAP_DIO_WRITE))
- return REQ_OP_READ;
+ while (len > 0) {
+ unsigned int io_len = min(len, folio_size(zero_folio));
- opflags |= REQ_OP_WRITE;
- if (use_fua)
- opflags |= REQ_FUA;
- else
- dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
- if (atomic)
- opflags |= REQ_ATOMIC;
+ bio_add_folio_nofail(bio, zero_folio, io_len, 0);
+ len -= io_len;
+ }
+ iomap_dio_submit_bio(iter, dio, bio, pos);
- return opflags;
+ return 0;
}
-static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
const struct iomap *iomap = &iter->iomap;
struct inode *inode = iter->inode;
unsigned int fs_block_size = i_blocksize(inode), pad;
const loff_t length = iomap_length(iter);
- bool atomic = iter->flags & IOMAP_ATOMIC;
loff_t pos = iter->pos;
- blk_opf_t bio_opf;
+ blk_opf_t bio_opf = REQ_SYNC | REQ_IDLE;
struct bio *bio;
bool need_zeroout = false;
- bool use_fua = false;
int nr_pages, ret = 0;
- size_t copied = 0;
+ u64 copied = 0;
size_t orig_count;
+ unsigned int alignment;
- if (atomic && length != fs_block_size)
- return -EINVAL;
+ /*
+ * File systems that write out of place and always allocate new blocks
+ * need each bio to be block aligned as that's the unit of allocation.
+ */
+ if (dio->flags & IOMAP_DIO_FSBLOCK_ALIGNED)
+ alignment = fs_block_size;
+ else
+ alignment = bdev_logical_block_size(iomap->bdev);
- if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) ||
- !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
+ if ((pos | length) & (alignment - 1))
return -EINVAL;
- if (iomap->type == IOMAP_UNWRITTEN) {
- dio->flags |= IOMAP_DIO_UNWRITTEN;
- need_zeroout = true;
- }
+ if (dio->flags & IOMAP_DIO_WRITE) {
+ bool need_completion_work = true;
+
+ switch (iomap->type) {
+ case IOMAP_MAPPED:
+ /*
+ * Directly mapped I/O does not inherently need to do
+ * work at I/O completion time. But there are various
+ * cases below where this will get set again.
+ */
+ need_completion_work = false;
+ break;
+ case IOMAP_UNWRITTEN:
+ dio->flags |= IOMAP_DIO_UNWRITTEN;
+ need_zeroout = true;
+ break;
+ default:
+ break;
+ }
- if (iomap->flags & IOMAP_F_SHARED)
- dio->flags |= IOMAP_DIO_COW;
+ if (iomap->flags & IOMAP_F_ATOMIC_BIO) {
+ /*
+ * Ensure that the mapping covers the full write
+ * length, otherwise it won't be submitted as a single
+ * bio, which is required to use hardware atomics.
+ */
+ if (length != iter->len)
+ return -EINVAL;
+ bio_opf |= REQ_ATOMIC;
+ }
+
+ if (iomap->flags & IOMAP_F_SHARED) {
+ /*
+ * Unsharing of needs to update metadata at I/O
+ * completion time.
+ */
+ need_completion_work = true;
+ dio->flags |= IOMAP_DIO_COW;
+ }
+
+ if (iomap->flags & IOMAP_F_NEW) {
+ /*
+ * Newly allocated blocks might need recording in
+ * metadata at I/O completion time.
+ */
+ need_completion_work = true;
+ need_zeroout = true;
+ }
+
+ /*
+ * Use a FUA write if we need datasync semantics and this is a
+ * pure overwrite that doesn't require any metadata updates.
+ *
+ * This allows us to avoid cache flushes on I/O completion.
+ */
+ if (dio->flags & IOMAP_DIO_WRITE_THROUGH) {
+ if (!need_completion_work &&
+ !(iomap->flags & IOMAP_F_DIRTY) &&
+ (!bdev_write_cache(iomap->bdev) ||
+ bdev_fua(iomap->bdev)))
+ bio_opf |= REQ_FUA;
+ else
+ dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
+ }
- if (iomap->flags & IOMAP_F_NEW) {
- need_zeroout = true;
- } else if (iomap->type == IOMAP_MAPPED) {
/*
- * Use a FUA write if we need datasync semantics, this is a pure
- * data IO that doesn't require any metadata updates (including
- * after IO completion such as unwritten extent conversion) and
- * the underlying device either supports FUA or doesn't have
- * a volatile write cache. This allows us to avoid cache flushes
- * on IO completion. If we can't use writethrough and need to
- * sync, disable in-task completions as dio completion will
- * need to call generic_write_sync() which will do a blocking
- * fsync / cache flush call.
+ * We can only do inline completion for pure overwrites that
+ * don't require additional I/O at completion time.
+ *
+ * This rules out writes that need zeroing or metdata updates to
+ * convert unwritten or shared extents.
+ *
+ * Writes that extend i_size are also not supported, but this is
+ * handled in __iomap_dio_rw().
*/
- if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
- (dio->flags & IOMAP_DIO_WRITE_THROUGH) &&
- (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev)))
- use_fua = true;
- else if (dio->flags & IOMAP_DIO_NEED_SYNC)
- dio->flags &= ~IOMAP_DIO_CALLER_COMP;
+ if (need_completion_work)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+
+ bio_opf |= REQ_OP_WRITE;
+ } else {
+ bio_opf |= REQ_OP_READ;
}
/*
@@ -355,23 +423,11 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out;
/*
- * We can only do deferred completion for pure overwrites that
- * don't require additional IO at completion. This rules out
- * writes that need zeroing or extent conversion, extend
- * the file size, or issue journal IO or cache flushes
- * during completion processing.
- */
- if (need_zeroout ||
- ((dio->flags & IOMAP_DIO_NEED_SYNC) && !use_fua) ||
- ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
- dio->flags &= ~IOMAP_DIO_CALLER_COMP;
-
- /*
* The rules for polled IO completions follow the guidelines as the
* ones we set for inline and deferred completions. If none of those
* are available for this IO, clear the polled flag.
*/
- if (!(dio->flags & (IOMAP_DIO_INLINE_COMP|IOMAP_DIO_CALLER_COMP)))
+ if (dio->flags & IOMAP_DIO_COMP_WORK)
dio->iocb->ki_flags &= ~IOCB_HIPRI;
if (need_zeroout) {
@@ -383,8 +439,6 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out;
}
- bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua, atomic);
-
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
do {
size_t n;
@@ -403,7 +457,8 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
+ ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
+ alignment - 1);
if (unlikely(ret)) {
/*
* We have to stop part way through an IO. We must fall
@@ -416,9 +471,9 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
}
n = bio->bi_iter.bi_size;
- if (WARN_ON_ONCE(atomic && n != length)) {
+ if (WARN_ON_ONCE((bio_opf & REQ_ATOMIC) && n != length)) {
/*
- * This bio should have covered the complete length,
+ * An atomic write bio must cover the complete length,
* which it doesn't, so error. We may need to zero out
* the tail (complete FS block), similar to when
* bio_iov_iter_get_pages() returns an error, above.
@@ -427,12 +482,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
bio_put(bio);
goto zero_tail;
}
- if (dio->flags & IOMAP_DIO_WRITE) {
+ if (dio->flags & IOMAP_DIO_WRITE)
task_io_account_write(n);
- } else {
- if (dio->flags & IOMAP_DIO_DIRTY)
- bio_set_pages_dirty(bio);
- }
+ else if (dio->flags & IOMAP_DIO_DIRTY)
+ bio_set_pages_dirty(bio);
dio->size += n;
copied += n;
@@ -467,30 +520,31 @@ out:
/* Undo iter limitation to current extent */
iov_iter_reexpand(dio->submit.iter, orig_count - copied);
if (copied)
- return copied;
+ return iomap_iter_advance(iter, copied);
return ret;
}
-static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_hole_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
dio->size += length;
if (!length)
return -EFAULT;
- return length;
+ return iomap_iter_advance(iter, length);
}
-static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
- struct iomap_dio *dio)
+static int iomap_dio_inline_iter(struct iomap_iter *iomi, struct iomap_dio *dio)
{
const struct iomap *iomap = &iomi->iomap;
struct iov_iter *iter = dio->submit.iter;
void *inline_data = iomap_inline_data(iomap, iomi->pos);
loff_t length = iomap_length(iomi);
loff_t pos = iomi->pos;
- size_t copied;
+ u64 copied;
+
+ if (WARN_ON_ONCE(!inline_data))
+ return -EIO;
if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
return -EIO;
@@ -512,11 +566,10 @@ static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
dio->size += copied;
if (!copied)
return -EFAULT;
- return copied;
+ return iomap_iter_advance(iomi, copied);
}
-static loff_t iomap_dio_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
switch (iter->iomap.type) {
case IOMAP_HOLE:
@@ -610,13 +663,10 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (iocb->ki_flags & IOCB_NOWAIT)
iomi.flags |= IOMAP_NOWAIT;
- if (iocb->ki_flags & IOCB_ATOMIC)
- iomi.flags |= IOMAP_ATOMIC;
+ if (dio_flags & IOMAP_DIO_FSBLOCK_ALIGNED)
+ dio->flags |= IOMAP_DIO_FSBLOCK_ALIGNED;
if (iov_iter_rw(iter) == READ) {
- /* reads can always complete inline */
- dio->flags |= IOMAP_DIO_INLINE_COMP;
-
if (iomi.pos >= dio->i_size)
goto out_free_dio;
@@ -630,15 +680,6 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_WRITE;
dio->flags |= IOMAP_DIO_WRITE;
- /*
- * Flag as supporting deferred completions, if the issuer
- * groks it. This can avoid a workqueue punt for writes.
- * We may later clear this flag if we need to do other IO
- * as part of this IO completion.
- */
- if (iocb->ki_flags & IOCB_DIO_CALLER_COMP)
- dio->flags |= IOMAP_DIO_CALLER_COMP;
-
if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
ret = -EAGAIN;
if (iomi.pos >= dio->i_size ||
@@ -647,6 +688,9 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_OVERWRITE_ONLY;
}
+ if (iocb->ki_flags & IOCB_ATOMIC)
+ iomi.flags |= IOMAP_ATOMIC;
+
/* for data sync or sync, we need sync completion processing */
if (iocb_is_dsync(iocb)) {
dio->flags |= IOMAP_DIO_NEED_SYNC;
@@ -665,6 +709,12 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
/*
+ * i_size updates must to happen from process context.
+ */
+ if (iomi.pos + iomi.len > dio->i_size)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+
+ /*
* Try to invalidate cache pages for the range we are writing.
* If this invalidation fails, let the caller fall back to
* buffered I/O.
@@ -688,19 +738,19 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
goto out_free_dio;
}
+ }
- if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
- ret = sb_init_dio_done_wq(inode->i_sb);
- if (ret < 0)
- goto out_free_dio;
- }
+ if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
+ ret = sb_init_dio_done_wq(inode->i_sb);
+ if (ret < 0)
+ goto out_free_dio;
}
inode_dio_begin(inode);
blk_start_plug(&plug);
while ((ret = iomap_iter(&iomi, ops)) > 0) {
- iomi.processed = iomap_dio_iter(&iomi, dio);
+ iomi.status = iomap_dio_iter(&iomi, dio);
/*
* We can only poll for single bio I/Os.
@@ -736,9 +786,14 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
* If all the writes we issued were already written through to the
* media, we don't need to flush the cache on IO completion. Clear the
* sync flag for this case.
+ *
+ * Otherwise clear the inline completion flag if any sync work is
+ * needed, as that needs to be performed from process context.
*/
if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
+ else if (dio->flags & IOMAP_DIO_NEED_SYNC)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
/*
* We are about to drop our additional submission reference, which
@@ -796,15 +851,3 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
return iomap_dio_complete(dio);
}
EXPORT_SYMBOL_GPL(iomap_dio_rw);
-
-static int __init iomap_dio_init(void)
-{
- zero_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
- IOMAP_ZERO_PAGE_ORDER);
-
- if (!zero_page)
- return -ENOMEM;
-
- return 0;
-}
-fs_initcall(iomap_dio_init);
diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
index 610ca6f1ec9b..d11dadff8286 100644
--- a/fs/iomap/fiemap.c
+++ b/fs/iomap/fiemap.c
@@ -2,9 +2,6 @@
/*
* Copyright (c) 2016-2021 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
#include <linux/fiemap.h>
#include <linux/pagemap.h>
@@ -39,24 +36,23 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
iomap->length, flags);
}
-static loff_t iomap_fiemap_iter(const struct iomap_iter *iter,
+static int iomap_fiemap_iter(struct iomap_iter *iter,
struct fiemap_extent_info *fi, struct iomap *prev)
{
int ret;
if (iter->iomap.type == IOMAP_HOLE)
- return iomap_length(iter);
+ goto advance;
ret = iomap_to_fiemap(fi, prev, 0);
*prev = iter->iomap;
- switch (ret) {
- case 0: /* success */
- return iomap_length(iter);
- case 1: /* extent array full */
- return 0;
- default: /* error */
+ if (ret < 0)
return ret;
- }
+ if (ret == 1) /* extent array full */
+ return 0;
+
+advance:
+ return iomap_iter_advance_full(iter);
}
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
@@ -78,7 +74,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
return ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_fiemap_iter(&iter, fi, &prev);
+ iter.status = iomap_fiemap_iter(&iter, fi, &prev);
if (prev.type != IOMAP_HOLE) {
ret = iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST);
@@ -114,7 +110,7 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
while ((ret = iomap_iter(&iter, ops)) > 0) {
if (iter.iomap.type == IOMAP_MAPPED)
bno = iomap_sector(&iter.iomap, iter.pos) >> blkshift;
- /* leave iter.processed unset to abort loop */
+ /* leave iter.status unset to abort loop */
}
if (ret)
return 0;
diff --git a/fs/iomap/internal.h b/fs/iomap/internal.h
new file mode 100644
index 000000000000..3a4e4aad2bd1
--- /dev/null
+++ b/fs/iomap/internal.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IOMAP_INTERNAL_H
+#define _IOMAP_INTERNAL_H 1
+
+#define IOEND_BATCH_SIZE 4096
+
+u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
+
+#ifdef CONFIG_BLOCK
+int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len);
+#else
+static inline int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len)
+{
+ WARN_ON_ONCE(1);
+ return -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+#endif /* _IOMAP_INTERNAL_H */
diff --git a/fs/iomap/ioend.c b/fs/iomap/ioend.c
new file mode 100644
index 000000000000..86f44922ed3b
--- /dev/null
+++ b/fs/iomap/ioend.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2025 Christoph Hellwig.
+ */
+#include <linux/iomap.h>
+#include <linux/list_sort.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include "internal.h"
+#include "trace.h"
+
+struct bio_set iomap_ioend_bioset;
+EXPORT_SYMBOL_GPL(iomap_ioend_bioset);
+
+struct iomap_ioend *iomap_init_ioend(struct inode *inode,
+ struct bio *bio, loff_t file_offset, u16 ioend_flags)
+{
+ struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
+
+ atomic_set(&ioend->io_remaining, 1);
+ ioend->io_error = 0;
+ ioend->io_parent = NULL;
+ INIT_LIST_HEAD(&ioend->io_list);
+ ioend->io_flags = ioend_flags;
+ ioend->io_inode = inode;
+ ioend->io_offset = file_offset;
+ ioend->io_size = bio->bi_iter.bi_size;
+ ioend->io_sector = bio->bi_iter.bi_sector;
+ ioend->io_private = NULL;
+ return ioend;
+}
+EXPORT_SYMBOL_GPL(iomap_init_ioend);
+
+/*
+ * We're now finished for good with this ioend structure. Update the folio
+ * state, release holds on bios, and finally free up memory. Do not use the
+ * ioend after this.
+ */
+static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
+{
+ struct inode *inode = ioend->io_inode;
+ struct bio *bio = &ioend->io_bio;
+ struct folio_iter fi;
+ u32 folio_count = 0;
+
+ if (ioend->io_error) {
+ mapping_set_error(inode->i_mapping, ioend->io_error);
+ if (!bio_flagged(bio, BIO_QUIET)) {
+ pr_err_ratelimited(
+"%s: writeback error on inode %lu, offset %lld, sector %llu",
+ inode->i_sb->s_id, inode->i_ino,
+ ioend->io_offset, ioend->io_sector);
+ }
+ }
+
+ /* walk all folios in bio, ending page IO on them */
+ bio_for_each_folio_all(fi, bio) {
+ iomap_finish_folio_write(inode, fi.folio, fi.length);
+ folio_count++;
+ }
+
+ bio_put(bio); /* frees the ioend */
+ return folio_count;
+}
+
+static void ioend_writeback_end_bio(struct bio *bio)
+{
+ struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
+
+ ioend->io_error = blk_status_to_errno(bio->bi_status);
+ iomap_finish_ioend_buffered(ioend);
+}
+
+/*
+ * We cannot cancel the ioend directly in case of an error, so call the bio end
+ * I/O handler with the error status here to run the normal I/O completion
+ * handler.
+ */
+int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
+{
+ struct iomap_ioend *ioend = wpc->wb_ctx;
+
+ if (!ioend->io_bio.bi_end_io)
+ ioend->io_bio.bi_end_io = ioend_writeback_end_bio;
+
+ if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
+ error = -EIO;
+
+ if (error) {
+ ioend->io_bio.bi_status = errno_to_blk_status(error);
+ bio_endio(&ioend->io_bio);
+ return error;
+ }
+
+ submit_bio(&ioend->io_bio);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit);
+
+static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
+ loff_t pos, u16 ioend_flags)
+{
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
+ REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc),
+ GFP_NOFS, &iomap_ioend_bioset);
+ bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
+ bio->bi_write_hint = wpc->inode->i_write_hint;
+ wbc_init_bio(wpc->wbc, bio);
+ wpc->nr_folios = 0;
+ return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
+}
+
+static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
+ u16 ioend_flags)
+{
+ struct iomap_ioend *ioend = wpc->wb_ctx;
+
+ if (ioend_flags & IOMAP_IOEND_BOUNDARY)
+ return false;
+ if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
+ (ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
+ return false;
+ if (pos != ioend->io_offset + ioend->io_size)
+ return false;
+ if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
+ iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio))
+ return false;
+ /*
+ * Limit ioend bio chain lengths to minimise IO completion latency. This
+ * also prevents long tight loops ending page writeback on all the
+ * folios in the ioend.
+ */
+ if (wpc->nr_folios >= IOEND_BATCH_SIZE)
+ return false;
+ return true;
+}
+
+/*
+ * Test to see if we have an existing ioend structure that we could append to
+ * first; otherwise finish off the current ioend and start another.
+ *
+ * If a new ioend is created and cached, the old ioend is submitted to the block
+ * layer instantly. Batching optimisations are provided by higher level block
+ * plugging.
+ *
+ * At the end of a writeback pass, there will be a cached ioend remaining on the
+ * writepage context that the caller will need to submit.
+ */
+ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
+ loff_t pos, loff_t end_pos, unsigned int dirty_len)
+{
+ struct iomap_ioend *ioend = wpc->wb_ctx;
+ size_t poff = offset_in_folio(folio, pos);
+ unsigned int ioend_flags = 0;
+ unsigned int map_len = min_t(u64, dirty_len,
+ wpc->iomap.offset + wpc->iomap.length - pos);
+ int error;
+
+ trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap);
+
+ WARN_ON_ONCE(!folio->private && map_len < dirty_len);
+
+ switch (wpc->iomap.type) {
+ case IOMAP_INLINE:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ case IOMAP_HOLE:
+ return map_len;
+ default:
+ break;
+ }
+
+ if (wpc->iomap.type == IOMAP_UNWRITTEN)
+ ioend_flags |= IOMAP_IOEND_UNWRITTEN;
+ if (wpc->iomap.flags & IOMAP_F_SHARED)
+ ioend_flags |= IOMAP_IOEND_SHARED;
+ if (folio_test_dropbehind(folio))
+ ioend_flags |= IOMAP_IOEND_DONTCACHE;
+ if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
+ ioend_flags |= IOMAP_IOEND_BOUNDARY;
+
+ if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
+new_ioend:
+ if (ioend) {
+ error = wpc->ops->writeback_submit(wpc, 0);
+ if (error)
+ return error;
+ }
+ wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags);
+ }
+
+ if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
+ goto new_ioend;
+
+ /*
+ * Clamp io_offset and io_size to the incore EOF so that ondisk
+ * file size updates in the ioend completion are byte-accurate.
+ * This avoids recovering files with zeroed tail regions when
+ * writeback races with appending writes:
+ *
+ * Thread 1: Thread 2:
+ * ------------ -----------
+ * write [A, A+B]
+ * update inode size to A+B
+ * submit I/O [A, A+BS]
+ * write [A+B, A+B+C]
+ * update inode size to A+B+C
+ * <I/O completes, updates disk size to min(A+B+C, A+BS)>
+ * <power failure>
+ *
+ * After reboot:
+ * 1) with A+B+C < A+BS, the file has zero padding in range
+ * [A+B, A+B+C]
+ *
+ * |< Block Size (BS) >|
+ * |DDDDDDDDDDDD0000000000000|
+ * ^ ^ ^
+ * A A+B A+B+C
+ * (EOF)
+ *
+ * 2) with A+B+C > A+BS, the file has zero padding in range
+ * [A+B, A+BS]
+ *
+ * |< Block Size (BS) >|< Block Size (BS) >|
+ * |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
+ * ^ ^ ^ ^
+ * A A+B A+BS A+B+C
+ * (EOF)
+ *
+ * D = Valid Data
+ * 0 = Zero Padding
+ *
+ * Note that this defeats the ability to chain the ioends of
+ * appending writes.
+ */
+ ioend->io_size += map_len;
+ if (ioend->io_offset + ioend->io_size > end_pos)
+ ioend->io_size = end_pos - ioend->io_offset;
+
+ wbc_account_cgroup_owner(wpc->wbc, folio, map_len);
+ return map_len;
+}
+EXPORT_SYMBOL_GPL(iomap_add_to_ioend);
+
+static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+{
+ if (ioend->io_parent) {
+ struct bio *bio = &ioend->io_bio;
+
+ ioend = ioend->io_parent;
+ bio_put(bio);
+ }
+
+ if (error)
+ cmpxchg(&ioend->io_error, 0, error);
+
+ if (!atomic_dec_and_test(&ioend->io_remaining))
+ return 0;
+ if (ioend->io_flags & IOMAP_IOEND_DIRECT)
+ return iomap_finish_ioend_direct(ioend);
+ return iomap_finish_ioend_buffered(ioend);
+}
+
+/*
+ * Ioend completion routine for merged bios. This can only be called from task
+ * contexts as merged ioends can be of unbound length. Hence we have to break up
+ * the writeback completions into manageable chunks to avoid long scheduler
+ * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
+ * good batch processing throughput without creating adverse scheduler latency
+ * conditions.
+ */
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+{
+ struct list_head tmp;
+ u32 completions;
+
+ might_sleep();
+
+ list_replace_init(&ioend->io_list, &tmp);
+ completions = iomap_finish_ioend(ioend, error);
+
+ while (!list_empty(&tmp)) {
+ if (completions > IOEND_BATCH_SIZE * 8) {
+ cond_resched();
+ completions = 0;
+ }
+ ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
+ list_del_init(&ioend->io_list);
+ completions += iomap_finish_ioend(ioend, error);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_finish_ioends);
+
+/*
+ * We can merge two adjacent ioends if they have the same set of work to do.
+ */
+static bool iomap_ioend_can_merge(struct iomap_ioend *ioend,
+ struct iomap_ioend *next)
+{
+ if (ioend->io_bio.bi_status != next->io_bio.bi_status)
+ return false;
+ if (next->io_flags & IOMAP_IOEND_BOUNDARY)
+ return false;
+ if ((ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
+ (next->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
+ return false;
+ if (ioend->io_offset + ioend->io_size != next->io_offset)
+ return false;
+ /*
+ * Do not merge physically discontiguous ioends. The filesystem
+ * completion functions will have to iterate the physical
+ * discontiguities even if we merge the ioends at a logical level, so
+ * we don't gain anything by merging physical discontiguities here.
+ *
+ * We cannot use bio->bi_iter.bi_sector here as it is modified during
+ * submission so does not point to the start sector of the bio at
+ * completion.
+ */
+ if (ioend->io_sector + (ioend->io_size >> SECTOR_SHIFT) !=
+ next->io_sector)
+ return false;
+ return true;
+}
+
+void iomap_ioend_try_merge(struct iomap_ioend *ioend,
+ struct list_head *more_ioends)
+{
+ struct iomap_ioend *next;
+
+ INIT_LIST_HEAD(&ioend->io_list);
+
+ while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
+ io_list))) {
+ if (!iomap_ioend_can_merge(ioend, next))
+ break;
+ list_move_tail(&next->io_list, &ioend->io_list);
+ ioend->io_size += next->io_size;
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
+
+static int iomap_ioend_compare(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
+ struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
+
+ if (ia->io_offset < ib->io_offset)
+ return -1;
+ if (ia->io_offset > ib->io_offset)
+ return 1;
+ return 0;
+}
+
+void iomap_sort_ioends(struct list_head *ioend_list)
+{
+ list_sort(NULL, ioend_list, iomap_ioend_compare);
+}
+EXPORT_SYMBOL_GPL(iomap_sort_ioends);
+
+/*
+ * Split up to the first @max_len bytes from @ioend if the ioend covers more
+ * than @max_len bytes.
+ *
+ * If @is_append is set, the split will be based on the hardware limits for
+ * REQ_OP_ZONE_APPEND commands and can be less than @max_len if the hardware
+ * limits don't allow the entire @max_len length.
+ *
+ * The bio embedded into @ioend must be a REQ_OP_WRITE because the block layer
+ * does not allow splitting REQ_OP_ZONE_APPEND bios. The file systems has to
+ * switch the operation after this call, but before submitting the bio.
+ */
+struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
+ unsigned int max_len, bool is_append)
+{
+ struct bio *bio = &ioend->io_bio;
+ struct iomap_ioend *split_ioend;
+ unsigned int nr_segs;
+ int sector_offset;
+ struct bio *split;
+
+ if (is_append) {
+ struct queue_limits *lim = bdev_limits(bio->bi_bdev);
+
+ max_len = min(max_len,
+ lim->max_zone_append_sectors << SECTOR_SHIFT);
+
+ sector_offset = bio_split_rw_at(bio, lim, &nr_segs, max_len);
+ if (unlikely(sector_offset < 0))
+ return ERR_PTR(sector_offset);
+ if (!sector_offset)
+ return NULL;
+ } else {
+ if (bio->bi_iter.bi_size <= max_len)
+ return NULL;
+ sector_offset = max_len >> SECTOR_SHIFT;
+ }
+
+ /* ensure the split ioend is still block size aligned */
+ sector_offset = ALIGN_DOWN(sector_offset << SECTOR_SHIFT,
+ i_blocksize(ioend->io_inode)) >> SECTOR_SHIFT;
+
+ split = bio_split(bio, sector_offset, GFP_NOFS, &iomap_ioend_bioset);
+ if (IS_ERR(split))
+ return ERR_CAST(split);
+ split->bi_private = bio->bi_private;
+ split->bi_end_io = bio->bi_end_io;
+
+ split_ioend = iomap_init_ioend(ioend->io_inode, split, ioend->io_offset,
+ ioend->io_flags);
+ split_ioend->io_parent = ioend;
+
+ atomic_inc(&ioend->io_remaining);
+ ioend->io_offset += split_ioend->io_size;
+ ioend->io_size -= split_ioend->io_size;
+
+ split_ioend->io_sector = ioend->io_sector;
+ if (!is_append)
+ ioend->io_sector += (split_ioend->io_size >> SECTOR_SHIFT);
+ return split_ioend;
+}
+EXPORT_SYMBOL_GPL(iomap_split_ioend);
+
+static int __init iomap_ioend_init(void)
+{
+ return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
+ offsetof(struct iomap_ioend, io_bio),
+ BIOSET_NEED_BVECS);
+}
+fs_initcall(iomap_ioend_init);
diff --git a/fs/iomap/iter.c b/fs/iomap/iter.c
index 3790918646af..8692e5e41c6d 100644
--- a/fs/iomap/iter.c
+++ b/fs/iomap/iter.c
@@ -3,44 +3,30 @@
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (c) 2016-2021 Christoph Hellwig.
*/
-#include <linux/fs.h>
#include <linux/iomap.h>
#include "trace.h"
-/*
- * Advance to the next range we need to map.
- *
- * If the iomap is marked IOMAP_F_STALE, it means the existing map was not fully
- * processed - it was aborted because the extent the iomap spanned may have been
- * changed during the operation. In this case, the iteration behaviour is to
- * remap the unprocessed range of the iter, and that means we may need to remap
- * even when we've made no progress (i.e. iter->processed = 0). Hence the
- * "finished iterating" case needs to distinguish between
- * (processed = 0) meaning we are done and (processed = 0 && stale) meaning we
- * need to remap the entire remaining range.
- */
-static inline int iomap_iter_advance(struct iomap_iter *iter)
+static inline void iomap_iter_reset_iomap(struct iomap_iter *iter)
{
- bool stale = iter->iomap.flags & IOMAP_F_STALE;
- int ret = 1;
-
- /* handle the previous iteration (if any) */
- if (iter->iomap.length) {
- if (iter->processed < 0)
- return iter->processed;
- if (WARN_ON_ONCE(iter->processed > iomap_length(iter)))
- return -EIO;
- iter->pos += iter->processed;
- iter->len -= iter->processed;
- if (!iter->len || (!iter->processed && !stale))
- ret = 0;
+ if (iter->fbatch) {
+ folio_batch_release(iter->fbatch);
+ kfree(iter->fbatch);
+ iter->fbatch = NULL;
}
- /* clear the per iteration state */
- iter->processed = 0;
+ iter->status = 0;
memset(&iter->iomap, 0, sizeof(iter->iomap));
memset(&iter->srcmap, 0, sizeof(iter->srcmap));
- return ret;
+}
+
+/* Advance the current iterator position and decrement the remaining length */
+int iomap_iter_advance(struct iomap_iter *iter, u64 count)
+{
+ if (WARN_ON_ONCE(count > iomap_length(iter)))
+ return -EIO;
+ iter->pos += count;
+ iter->len -= count;
+ return 0;
}
static inline void iomap_iter_done(struct iomap_iter *iter)
@@ -50,6 +36,8 @@ static inline void iomap_iter_done(struct iomap_iter *iter)
WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_STALE);
+ iter->iter_start_pos = iter->pos;
+
trace_iomap_iter_dstmap(iter->inode, &iter->iomap);
if (iter->srcmap.type != IOMAP_HOLE)
trace_iomap_iter_srcmap(iter->inode, &iter->srcmap);
@@ -67,26 +55,58 @@ static inline void iomap_iter_done(struct iomap_iter *iter)
* function must be called in a loop that continues as long it returns a
* positive value. If 0 or a negative value is returned, the caller must not
* return to the loop body. Within a loop body, there are two ways to break out
- * of the loop body: leave @iter.processed unchanged, or set it to a negative
+ * of the loop body: leave @iter.status unchanged, or set it to a negative
* errno.
*/
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops)
{
+ bool stale = iter->iomap.flags & IOMAP_F_STALE;
+ ssize_t advanced;
+ u64 olen;
int ret;
- if (iter->iomap.length && ops->iomap_end) {
- ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter),
- iter->processed > 0 ? iter->processed : 0,
- iter->flags, &iter->iomap);
- if (ret < 0 && !iter->processed)
+ trace_iomap_iter(iter, ops, _RET_IP_);
+
+ if (!iter->iomap.length)
+ goto begin;
+
+ /*
+ * Calculate how far the iter was advanced and the original length bytes
+ * for ->iomap_end().
+ */
+ advanced = iter->pos - iter->iter_start_pos;
+ olen = iter->len + advanced;
+
+ if (ops->iomap_end) {
+ ret = ops->iomap_end(iter->inode, iter->iter_start_pos,
+ iomap_length_trim(iter, iter->iter_start_pos,
+ olen),
+ advanced, iter->flags, &iter->iomap);
+ if (ret < 0 && !advanced)
return ret;
}
- trace_iomap_iter(iter, ops, _RET_IP_);
- ret = iomap_iter_advance(iter);
+ /* detect old return semantics where this would advance */
+ if (WARN_ON_ONCE(iter->status > 0))
+ iter->status = -EIO;
+
+ /*
+ * Use iter->len to determine whether to continue onto the next mapping.
+ * Explicitly terminate on error status or if the current iter has not
+ * advanced at all (i.e. no work was done for some reason) unless the
+ * mapping has been marked stale and needs to be reprocessed.
+ */
+ if (iter->status < 0)
+ ret = iter->status;
+ else if (iter->len == 0 || (!advanced && !stale))
+ ret = 0;
+ else
+ ret = 1;
+ iomap_iter_reset_iomap(iter);
if (ret <= 0)
return ret;
+begin:
ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags,
&iter->iomap, &iter->srcmap);
if (ret < 0)
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
index a845c012b50c..6cbc587c93da 100644
--- a/fs/iomap/seek.c
+++ b/fs/iomap/seek.c
@@ -3,14 +3,10 @@
* Copyright (C) 2017 Red Hat, Inc.
* Copyright (c) 2018-2021 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter,
+static int iomap_seek_hole_iter(struct iomap_iter *iter,
loff_t *hole_pos)
{
loff_t length = iomap_length(iter);
@@ -20,13 +16,13 @@ static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter,
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
iter->pos, iter->pos + length, SEEK_HOLE);
if (*hole_pos == iter->pos + length)
- return length;
+ return iomap_iter_advance(iter, length);
return 0;
case IOMAP_HOLE:
*hole_pos = iter->pos;
return 0;
default:
- return length;
+ return iomap_iter_advance(iter, length);
}
}
@@ -47,7 +43,7 @@ iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
iter.len = size - pos;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_seek_hole_iter(&iter, &pos);
+ iter.status = iomap_seek_hole_iter(&iter, &pos);
if (ret < 0)
return ret;
if (iter.len) /* found hole before EOF */
@@ -56,19 +52,19 @@ iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
}
EXPORT_SYMBOL_GPL(iomap_seek_hole);
-static loff_t iomap_seek_data_iter(const struct iomap_iter *iter,
+static int iomap_seek_data_iter(struct iomap_iter *iter,
loff_t *hole_pos)
{
loff_t length = iomap_length(iter);
switch (iter->iomap.type) {
case IOMAP_HOLE:
- return length;
+ return iomap_iter_advance(iter, length);
case IOMAP_UNWRITTEN:
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
iter->pos, iter->pos + length, SEEK_DATA);
if (*hole_pos < 0)
- return length;
+ return iomap_iter_advance(iter, length);
return 0;
default:
*hole_pos = iter->pos;
@@ -93,7 +89,7 @@ iomap_seek_data(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
iter.len = size - pos;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_seek_data_iter(&iter, &pos);
+ iter.status = iomap_seek_data_iter(&iter, &pos);
if (ret < 0)
return ret;
if (iter.len) /* found data before EOF */
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
index 5fc0ac36dee3..0db77c449467 100644
--- a/fs/iomap/swapfile.c
+++ b/fs/iomap/swapfile.c
@@ -3,9 +3,6 @@
* Copyright (C) 2018 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <darrick.wong@oracle.com>
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
#include <linux/swap.h>
@@ -94,7 +91,7 @@ static int iomap_swapfile_fail(struct iomap_swapfile_info *isi, const char *str)
* swap only cares about contiguous page-aligned physical extents and makes no
* distinction between written and unwritten extents.
*/
-static loff_t iomap_swapfile_iter(const struct iomap_iter *iter,
+static int iomap_swapfile_iter(struct iomap_iter *iter,
struct iomap *iomap, struct iomap_swapfile_info *isi)
{
switch (iomap->type) {
@@ -132,7 +129,8 @@ static loff_t iomap_swapfile_iter(const struct iomap_iter *iter,
return error;
memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
}
- return iomap_length(iter);
+
+ return iomap_iter_advance_full(iter);
}
/*
@@ -166,7 +164,7 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
return ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
+ iter.status = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
if (ret < 0)
return ret;
@@ -189,7 +187,6 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
*pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
sis->max = isi.nr_pages;
sis->pages = isi.nr_pages - 1;
- sis->highest_bit = isi.nr_pages - 1;
return isi.nr_extents;
}
EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
diff --git a/fs/iomap/trace.c b/fs/iomap/trace.c
index 728d5443daf5..da217246b1a9 100644
--- a/fs/iomap/trace.c
+++ b/fs/iomap/trace.c
@@ -3,7 +3,6 @@
* Copyright (c) 2019 Christoph Hellwig
*/
#include <linux/iomap.h>
-#include <linux/uio.h>
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
index 4118a42cdab0..532787277b16 100644
--- a/fs/iomap/trace.h
+++ b/fs/iomap/trace.h
@@ -79,11 +79,12 @@ DECLARE_EVENT_CLASS(iomap_range_class,
DEFINE_EVENT(iomap_range_class, name, \
TP_PROTO(struct inode *inode, loff_t off, u64 len),\
TP_ARGS(inode, off, len))
-DEFINE_RANGE_EVENT(iomap_writepage);
+DEFINE_RANGE_EVENT(iomap_writeback_folio);
DEFINE_RANGE_EVENT(iomap_release_folio);
DEFINE_RANGE_EVENT(iomap_invalidate_folio);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
DEFINE_RANGE_EVENT(iomap_dio_rw_queued);
+DEFINE_RANGE_EVENT(iomap_zero_iter);
#define IOMAP_TYPE_STRINGS \
{ IOMAP_HOLE, "HOLE" }, \
@@ -99,7 +100,11 @@ DEFINE_RANGE_EVENT(iomap_dio_rw_queued);
{ IOMAP_FAULT, "FAULT" }, \
{ IOMAP_DIRECT, "DIRECT" }, \
{ IOMAP_NOWAIT, "NOWAIT" }, \
- { IOMAP_ATOMIC, "ATOMIC" }
+ { IOMAP_OVERWRITE_ONLY, "OVERWRITE_ONLY" }, \
+ { IOMAP_UNSHARE, "UNSHARE" }, \
+ { IOMAP_DAX, "DAX" }, \
+ { IOMAP_ATOMIC, "ATOMIC" }, \
+ { IOMAP_DONTCACHE, "DONTCACHE" }
#define IOMAP_F_FLAGS_STRINGS \
{ IOMAP_F_NEW, "NEW" }, \
@@ -107,12 +112,20 @@ DEFINE_RANGE_EVENT(iomap_dio_rw_queued);
{ IOMAP_F_SHARED, "SHARED" }, \
{ IOMAP_F_MERGED, "MERGED" }, \
{ IOMAP_F_BUFFER_HEAD, "BH" }, \
- { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" }
+ { IOMAP_F_XATTR, "XATTR" }, \
+ { IOMAP_F_BOUNDARY, "BOUNDARY" }, \
+ { IOMAP_F_ANON_WRITE, "ANON_WRITE" }, \
+ { IOMAP_F_ATOMIC_BIO, "ATOMIC_BIO" }, \
+ { IOMAP_F_PRIVATE, "PRIVATE" }, \
+ { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" }, \
+ { IOMAP_F_STALE, "STALE" }
+
#define IOMAP_DIO_STRINGS \
- {IOMAP_DIO_FORCE_WAIT, "DIO_FORCE_WAIT" }, \
- {IOMAP_DIO_OVERWRITE_ONLY, "DIO_OVERWRITE_ONLY" }, \
- {IOMAP_DIO_PARTIAL, "DIO_PARTIAL" }
+ {IOMAP_DIO_FORCE_WAIT, "DIO_FORCE_WAIT" }, \
+ {IOMAP_DIO_OVERWRITE_ONLY, "DIO_OVERWRITE_ONLY" }, \
+ {IOMAP_DIO_PARTIAL, "DIO_PARTIAL" }, \
+ {IOMAP_DIO_FSBLOCK_ALIGNED, "DIO_FSBLOCK_ALIGNED" }
DECLARE_EVENT_CLASS(iomap_class,
TP_PROTO(struct inode *inode, struct iomap *iomap),
@@ -138,7 +151,7 @@ DECLARE_EVENT_CLASS(iomap_class,
__entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0;
),
TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr 0x%llx offset 0x%llx "
- "length 0x%llx type %s flags %s",
+ "length 0x%llx type %s (0x%x) flags %s (0x%x)",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
MAJOR(__entry->bdev), MINOR(__entry->bdev),
@@ -146,7 +159,9 @@ DECLARE_EVENT_CLASS(iomap_class,
__entry->offset,
__entry->length,
__print_symbolic(__entry->type, IOMAP_TYPE_STRINGS),
- __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS))
+ __entry->type,
+ __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS),
+ __entry->flags)
)
#define DEFINE_IOMAP_EVENT(name) \
@@ -156,7 +171,7 @@ DEFINE_EVENT(iomap_class, name, \
DEFINE_IOMAP_EVENT(iomap_iter_dstmap);
DEFINE_IOMAP_EVENT(iomap_iter_srcmap);
-TRACE_EVENT(iomap_writepage_map,
+TRACE_EVENT(iomap_add_to_ioend,
TP_PROTO(struct inode *inode, u64 pos, unsigned int dirty_len,
struct iomap *iomap),
TP_ARGS(inode, pos, dirty_len, iomap),
@@ -185,7 +200,7 @@ TRACE_EVENT(iomap_writepage_map,
__entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0;
),
TP_printk("dev %d:%d ino 0x%llx bdev %d:%d pos 0x%llx dirty len 0x%llx "
- "addr 0x%llx offset 0x%llx length 0x%llx type %s flags %s",
+ "addr 0x%llx offset 0x%llx length 0x%llx type %s (0x%x) flags %s (0x%x)",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
MAJOR(__entry->bdev), MINOR(__entry->bdev),
@@ -195,7 +210,9 @@ TRACE_EVENT(iomap_writepage_map,
__entry->offset,
__entry->length,
__print_symbolic(__entry->type, IOMAP_TYPE_STRINGS),
- __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS))
+ __entry->type,
+ __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS),
+ __entry->flags)
);
TRACE_EVENT(iomap_iter,
@@ -207,7 +224,7 @@ TRACE_EVENT(iomap_iter,
__field(u64, ino)
__field(loff_t, pos)
__field(u64, length)
- __field(s64, processed)
+ __field(int, status)
__field(unsigned int, flags)
__field(const void *, ops)
__field(unsigned long, caller)
@@ -217,17 +234,17 @@ TRACE_EVENT(iomap_iter,
__entry->ino = iter->inode->i_ino;
__entry->pos = iter->pos;
__entry->length = iomap_length(iter);
- __entry->processed = iter->processed;
+ __entry->status = iter->status;
__entry->flags = iter->flags;
__entry->ops = ops;
__entry->caller = caller;
),
- TP_printk("dev %d:%d ino 0x%llx pos 0x%llx length 0x%llx processed %lld flags %s (0x%x) ops %ps caller %pS",
+ TP_printk("dev %d:%d ino 0x%llx pos 0x%llx length 0x%llx status %d flags %s (0x%x) ops %ps caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pos,
__entry->length,
- __entry->processed,
+ __entry->status,
__print_flags(__entry->flags, "|", IOMAP_FLAGS_STRINGS),
__entry->flags,
__entry->ops,
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 34d5baa5d88a..5f3b6da0e022 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -301,7 +301,6 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
*/
static int zisofs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
int err;
@@ -311,16 +310,15 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
struct page **pages;
- pgoff_t index = page->index, end_index;
+ pgoff_t index = folio->index, end_index;
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/*
- * If this page is wholly outside i_size we just return zero;
+ * If this folio is wholly outside i_size we just return zero;
* do_generic_file_read() will handle this for us
*/
if (index >= end_index) {
- SetPageUptodate(page);
- unlock_page(page);
+ folio_end_read(folio, true);
return 0;
}
@@ -338,10 +336,10 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
sizeof(*pages), GFP_KERNEL);
if (!pages) {
- unlock_page(page);
+ folio_unlock(folio);
return -ENOMEM;
}
- pages[full_page] = page;
+ pages[full_page] = &folio->page;
for (i = 0; i < pcount; i++, index++) {
if (i != full_page)
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index eb2f8273e6f1..09df40b612fb 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -147,7 +147,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
de = tmpde;
}
/* Basic sanity check, whether name doesn't exceed dir entry */
- if (de_len < de->name_len[0] +
+ if (de_len < sizeof(struct iso_directory_record) ||
+ de_len < de->name_len[0] +
sizeof(struct iso_directory_record)) {
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 35768a63fb1d..421d247fae52 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -180,7 +180,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
return NULL;
return isofs_export_iget(sb,
- fh_len > 2 ? ifid->parent_block : 0,
+ fh_len > 3 ? ifid->parent_block : 0,
ifid->parent_offset,
fh_len > 4 ? ifid->parent_generation : 0);
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 47038e660812..b7cbe126faf3 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -610,6 +610,11 @@ static int isofs_fill_super(struct super_block *s, struct fs_context *fc)
goto out_freesbi;
}
opt->blocksize = sb_min_blocksize(s, opt->blocksize);
+ if (!opt->blocksize) {
+ printk(KERN_ERR
+ "ISOFS: unable to set blocksize\n");
+ goto out_freesbi;
+ }
sbi->s_high_sierra = 0; /* default is iso9660 */
sbi->s_session = opt->session;
@@ -939,7 +944,7 @@ root_found:
sbi->s_check = opt->check;
if (table)
- s->s_d_op = &isofs_dentry_ops[table - 1];
+ set_default_d_op(s, &isofs_dentry_ops[table - 1]);
/* get the root dentry */
s->s_root = d_make_root(inode);
@@ -1275,6 +1280,7 @@ static int isofs_read_inode(struct inode *inode, int relocated)
unsigned long offset;
struct iso_inode_info *ei = ISOFS_I(inode);
int ret = -EIO;
+ struct timespec64 ts;
block = ei->i_iget5_block;
bh = sb_bread(inode->i_sb, block);
@@ -1387,8 +1393,10 @@ static int isofs_read_inode(struct inode *inode, int relocated)
inode->i_ino, de->flags[-high_sierra]);
}
#endif
- inode_set_mtime_to_ts(inode,
- inode_set_atime_to_ts(inode, inode_set_ctime(inode, iso_date(de->date, high_sierra), 0)));
+ ts = iso_date(de->date, high_sierra ? ISO_DATE_HIGH_SIERRA : 0);
+ inode_set_ctime_to_ts(inode, ts);
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
ei->i_first_extent = (isonum_733(de->extent) +
isonum_711(de->ext_attr_length));
@@ -1437,9 +1445,16 @@ static int isofs_read_inode(struct inode *inode, int relocated)
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_data.a_ops = &isofs_symlink_aops;
- } else
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ } else {
+ printk(KERN_DEBUG "ISOFS: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ ret = -EIO;
+ goto fail;
+ }
ret = 0;
out:
@@ -1505,7 +1520,7 @@ struct inode *__isofs_iget(struct super_block *sb,
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
ret = isofs_read_inode(inode, relocated);
if (ret < 0) {
iget_failed(inode);
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 2d55207c9a99..506555837533 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -106,7 +106,9 @@ static inline unsigned int isonum_733(u8 *p)
/* Ignore bigendian datum due to broken mastering programs */
return get_unaligned_le32(p);
}
-extern int iso_date(u8 *, int);
+#define ISO_DATE_HIGH_SIERRA (1 << 0)
+#define ISO_DATE_LONG_FORM (1 << 1)
+struct timespec64 iso_date(u8 *p, int flags);
struct inode; /* To make gcc happy */
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index dbf911126e61..576498245b9d 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -412,7 +412,12 @@ repeat:
}
}
break;
- case SIG('T', 'F'):
+ case SIG('T', 'F'): {
+ int flags, size, slen;
+
+ flags = rr->u.TF.flags & TF_LONG_FORM ? ISO_DATE_LONG_FORM : 0;
+ size = rr->u.TF.flags & TF_LONG_FORM ? 17 : 7;
+ slen = rr->len - 5;
/*
* Some RRIP writers incorrectly place ctime in the
* TF_CREATE field. Try to handle this correctly for
@@ -420,27 +425,28 @@ repeat:
*/
/* Rock ridge never appears on a High Sierra disk */
cnt = 0;
- if (rr->u.TF.flags & TF_CREATE) {
- inode_set_ctime(inode,
- iso_date(rr->u.TF.times[cnt++].time, 0),
- 0);
+ if ((rr->u.TF.flags & TF_CREATE) && size <= slen) {
+ inode_set_ctime_to_ts(inode,
+ iso_date(rr->u.TF.data + size * cnt++, flags));
+ slen -= size;
}
- if (rr->u.TF.flags & TF_MODIFY) {
- inode_set_mtime(inode,
- iso_date(rr->u.TF.times[cnt++].time, 0),
- 0);
+ if ((rr->u.TF.flags & TF_MODIFY) && size <= slen) {
+ inode_set_mtime_to_ts(inode,
+ iso_date(rr->u.TF.data + size * cnt++, flags));
+ slen -= size;
}
- if (rr->u.TF.flags & TF_ACCESS) {
- inode_set_atime(inode,
- iso_date(rr->u.TF.times[cnt++].time, 0),
- 0);
+ if ((rr->u.TF.flags & TF_ACCESS) && size <= slen) {
+ inode_set_atime_to_ts(inode,
+ iso_date(rr->u.TF.data + size * cnt++, flags));
+ slen -= size;
}
- if (rr->u.TF.flags & TF_ATTRIBUTES) {
- inode_set_ctime(inode,
- iso_date(rr->u.TF.times[cnt++].time, 0),
- 0);
+ if ((rr->u.TF.flags & TF_ATTRIBUTES) && size <= slen) {
+ inode_set_ctime_to_ts(inode,
+ iso_date(rr->u.TF.data + size * cnt++, flags));
+ slen -= size;
}
break;
+ }
case SIG('S', 'L'):
{
int slen;
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
index 7755e587f778..c0856fa9bb6a 100644
--- a/fs/isofs/rock.h
+++ b/fs/isofs/rock.h
@@ -65,13 +65,9 @@ struct RR_PL_s {
__u8 location[8];
};
-struct stamp {
- __u8 time[7]; /* actually 6 unsigned, 1 signed */
-} __attribute__ ((packed));
-
struct RR_TF_s {
__u8 flags;
- struct stamp times[]; /* Variable number of these beasts */
+ __u8 data[];
} __attribute__ ((packed));
/* Linux-specific extension for transparent decompression */
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index e88dba721661..42f479da0b28 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -16,29 +16,44 @@
* to GMT. Thus we should always be correct.
*/
-int iso_date(u8 *p, int flag)
+struct timespec64 iso_date(u8 *p, int flags)
{
int year, month, day, hour, minute, second, tz;
- int crtime;
+ struct timespec64 ts;
+
+ if (flags & ISO_DATE_LONG_FORM) {
+ year = (p[0] - '0') * 1000 +
+ (p[1] - '0') * 100 +
+ (p[2] - '0') * 10 +
+ (p[3] - '0') - 1900;
+ month = ((p[4] - '0') * 10 + (p[5] - '0'));
+ day = ((p[6] - '0') * 10 + (p[7] - '0'));
+ hour = ((p[8] - '0') * 10 + (p[9] - '0'));
+ minute = ((p[10] - '0') * 10 + (p[11] - '0'));
+ second = ((p[12] - '0') * 10 + (p[13] - '0'));
+ ts.tv_nsec = ((p[14] - '0') * 10 + (p[15] - '0')) * 10000000;
+ tz = p[16];
+ } else {
+ year = p[0];
+ month = p[1];
+ day = p[2];
+ hour = p[3];
+ minute = p[4];
+ second = p[5];
+ ts.tv_nsec = 0;
+ /* High sierra has no time zone */
+ tz = flags & ISO_DATE_HIGH_SIERRA ? 0 : p[6];
+ }
- year = p[0];
- month = p[1];
- day = p[2];
- hour = p[3];
- minute = p[4];
- second = p[5];
- if (flag == 0) tz = p[6]; /* High sierra has no time zone */
- else tz = 0;
-
if (year < 0) {
- crtime = 0;
+ ts.tv_sec = 0;
} else {
- crtime = mktime64(year+1900, month, day, hour, minute, second);
+ ts.tv_sec = mktime64(year+1900, month, day, hour, minute, second);
/* sign extend */
if (tz & 0x80)
tz |= (-1 << 8);
-
+
/*
* The timezone offset is unreliable on some disks,
* so we make a sanity check. In no case is it ever
@@ -65,7 +80,7 @@ int iso_date(u8 *p, int flag)
* for pointing out the sign error.
*/
if (-52 <= tz && tz <= 52)
- crtime -= tz * 15 * 60;
+ ts.tv_sec -= tz * 15 * 60;
}
- return crtime;
-}
+ return ts;
+}
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
index 4ad2c67f93f1..9c19e1512101 100644
--- a/fs/jbd2/Kconfig
+++ b/fs/jbd2/Kconfig
@@ -2,8 +2,6 @@
config JBD2
tristate
select CRC32
- select CRYPTO
- select CRYPTO_CRC32C
help
This is a generic journaling layer for block devices that support
both 32-bit and 64-bit block numbers. It is currently used by
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index b3971e91e8eb..de89c5bef607 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -113,7 +113,7 @@ __releases(&journal->j_state_lock)
"journal space in %s\n", __func__,
journal->j_devname);
WARN_ON(1);
- jbd2_journal_abort(journal, -EIO);
+ jbd2_journal_abort(journal, -ENOSPC);
}
write_lock(&journal->j_state_lock);
} else {
@@ -131,7 +131,7 @@ __flush_batch(journal_t *journal, int *batch_count)
blk_start_plug(&plug);
for (i = 0; i < *batch_count; i++)
- write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC);
+ write_dirty_buffer(journal->j_chkpt_bhs[i], JBD2_JOURNAL_REQ_FLAGS);
blk_finish_plug(&plug);
for (i = 0; i < *batch_count; i++) {
@@ -285,6 +285,7 @@ restart:
retry:
if (batch_count)
__flush_batch(journal, &batch_count);
+ cond_resched();
spin_lock(&journal->j_list_lock);
goto restart;
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 9153ff3a08e7..7203d2d2624d 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -57,8 +57,8 @@ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
* So here, we have a buffer which has just come off the forget list. Look to
* see if we can strip all buffers from the backing page.
*
- * Called under lock_journal(), and possibly under journal_datalist_lock. The
- * caller provided us with a ref against the buffer, and we drop that here.
+ * Called under j_list_lock. The caller provided us with a ref against the
+ * buffer, and we drop that here.
*/
static void release_buffer_page(struct buffer_head *bh)
{
@@ -99,7 +99,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
h->h_chksum_type = 0;
h->h_chksum_size = 0;
h->h_chksum[0] = 0;
- csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
+ csum = jbd2_chksum(j->j_csum_seed, bh->b_data, j->j_blocksize);
h->h_chksum[0] = cpu_to_be32(csum);
}
@@ -330,8 +330,8 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
seq = cpu_to_be32(sequence);
addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
- csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
- csum32 = jbd2_chksum(j, csum32, addr, bh->b_size);
+ csum32 = jbd2_chksum(j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+ csum32 = jbd2_chksum(csum32, addr, bh->b_size);
kunmap_local(addr);
if (jbd2_has_feature_csum3(j))
@@ -738,10 +738,8 @@ start_journal_io:
err = journal_finish_inode_data_buffers(journal, commit_transaction);
if (err) {
printk(KERN_WARNING
- "JBD2: Detected IO errors while flushing file data "
- "on %s\n", journal->j_devname);
- if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
- jbd2_journal_abort(journal, err);
+ "JBD2: Detected IO errors %d while flushing file data on %s\n",
+ err, journal->j_devname);
err = 0;
}
@@ -772,9 +770,9 @@ start_journal_io:
/*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
- * the commit record
+ * the commit record and update the journal tail sequence.
*/
- if (commit_transaction->t_need_data_flush &&
+ if ((commit_transaction->t_need_data_flush || update_tail) &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 7e49d912b091..c973162d5b31 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(jbd2_log_wait_commit);
EXPORT_SYMBOL(jbd2_journal_start_commit);
EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
EXPORT_SYMBOL(jbd2_journal_wipe);
-EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
+EXPORT_SYMBOL(jbd2_journal_blocks_per_folio);
EXPORT_SYMBOL(jbd2_journal_invalidate_folio);
EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
EXPORT_SYMBOL(jbd2_journal_force_commit);
@@ -115,14 +115,14 @@ void __jbd2_debug(int level, const char *file, const char *func,
#endif
/* Checksumming functions */
-static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+static __be32 jbd2_superblock_csum(journal_superblock_t *sb)
{
__u32 csum;
__be32 old_csum;
old_csum = sb->s_checksum;
sb->s_checksum = 0;
- csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t));
+ csum = jbd2_chksum(~0, (char *)sb, sizeof(journal_superblock_t));
sb->s_checksum = old_csum;
return cpu_to_be32(csum);
@@ -134,7 +134,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
static void commit_timeout(struct timer_list *t)
{
- journal_t *journal = from_timer(journal, t, j_commit_timer);
+ journal_t *journal = timer_container_of(journal, t, j_commit_timer);
wake_up_process(journal->j_task);
}
@@ -197,7 +197,7 @@ loop:
if (journal->j_commit_sequence != journal->j_commit_request) {
jbd2_debug(1, "OK, requests differ\n");
write_unlock(&journal->j_state_lock);
- del_timer_sync(&journal->j_commit_timer);
+ timer_delete_sync(&journal->j_commit_timer);
jbd2_journal_commit_transaction(journal);
write_lock(&journal->j_state_lock);
goto loop;
@@ -246,7 +246,7 @@ loop:
goto loop;
end_loop:
- del_timer_sync(&journal->j_commit_timer);
+ timer_delete_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
jbd2_debug(1, "Journal thread exiting.\n");
@@ -603,7 +603,7 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
{
int ret = 0;
- transaction_t *commit_trans;
+ transaction_t *commit_trans, *running_trans;
if (!(journal->j_flags & JBD2_BARRIER))
return 0;
@@ -613,6 +613,16 @@ int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
goto out;
commit_trans = journal->j_committing_transaction;
if (!commit_trans || commit_trans->t_tid != tid) {
+ running_trans = journal->j_running_transaction;
+ /*
+ * The query transaction hasn't started committing,
+ * it must still be running.
+ */
+ if (WARN_ON_ONCE(!running_trans ||
+ running_trans->t_tid != tid))
+ goto out;
+
+ running_trans->t_need_data_flush = 1;
ret = 1;
goto out;
}
@@ -718,7 +728,6 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
}
journal->j_flags |= JBD2_FAST_COMMIT_ONGOING;
write_unlock(&journal->j_state_lock);
- jbd2_journal_lock_updates(journal);
return 0;
}
@@ -732,7 +741,6 @@ static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
{
if (journal->j_fc_cleanup_callback)
journal->j_fc_cleanup_callback(journal, 0, tid);
- jbd2_journal_unlock_updates(journal);
write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
if (fallback)
@@ -929,8 +937,8 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
printk(KERN_ALERT "%s: journal block not found "
"at offset %lu on %s\n",
__func__, blocknr, journal->j_devname);
+ jbd2_journal_abort(journal, ret ? ret : -EFSCORRUPTED);
err = -EIO;
- jbd2_journal_abort(journal, err);
} else {
*retp = block;
}
@@ -947,7 +955,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
* descriptor blocks we do need to generate bona fide buffers.
*
* After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying
- * the buffer's contents they really should run flush_dcache_page(bh->b_page).
+ * the buffer's contents they really should run flush_dcache_folio(bh->b_folio).
* But we don't bother doing that, so there will be coherency problems with
* mmaps of blockdevs which hold live JBD-controlled filesystems.
*/
@@ -992,7 +1000,7 @@ void jbd2_descriptor_block_csum_set(journal_t *j, struct buffer_head *bh)
tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
sizeof(struct jbd2_journal_block_tail));
tail->t_checksum = 0;
- csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
+ csum = jbd2_chksum(j->j_csum_seed, bh->b_data, j->j_blocksize);
tail->t_checksum = cpu_to_be32(csum);
}
@@ -1361,7 +1369,7 @@ static int journal_check_superblock(journal_t *journal)
return err;
}
- if (jbd2_journal_has_csum_v2or3_feature(journal) &&
+ if (jbd2_journal_has_csum_v2or3(journal) &&
jbd2_has_feature_checksum(journal)) {
/* Can't have checksum v1 and v2 on at the same time! */
printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 "
@@ -1369,22 +1377,14 @@ static int journal_check_superblock(journal_t *journal)
return err;
}
- /* Load the checksum driver */
- if (jbd2_journal_has_csum_v2or3_feature(journal)) {
+ if (jbd2_journal_has_csum_v2or3(journal)) {
if (sb->s_checksum_type != JBD2_CRC32C_CHKSUM) {
printk(KERN_ERR "JBD2: Unknown checksum type\n");
return err;
}
- journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
- err = PTR_ERR(journal->j_chksum_driver);
- journal->j_chksum_driver = NULL;
- return err;
- }
/* Check superblock checksum */
- if (sb->s_checksum != jbd2_superblock_csum(journal, sb)) {
+ if (sb->s_checksum != jbd2_superblock_csum(sb)) {
printk(KERN_ERR "JBD2: journal checksum error\n");
err = -EFSBADCRC;
return err;
@@ -1490,7 +1490,7 @@ static int journal_load_superblock(journal_t *journal)
journal->j_total_len = be32_to_cpu(sb->s_maxlen);
/* Precompute checksum seed for all metadata */
if (jbd2_journal_has_csum_v2or3(journal))
- journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+ journal->j_csum_seed = jbd2_chksum(~0, sb->s_uuid,
sizeof(sb->s_uuid));
/* After journal features are set, we can compute transaction limits */
jbd2_journal_init_transaction_limits(journal);
@@ -1521,7 +1521,6 @@ static journal_t *journal_init_common(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int blocksize)
{
- static struct lock_class_key jbd2_trans_commit_key;
journal_t *journal;
int err;
int n;
@@ -1530,6 +1529,7 @@ static journal_t *journal_init_common(struct block_device *bdev,
if (!journal)
return ERR_PTR(-ENOMEM);
+ lockdep_register_key(&journal->jbd2_trans_commit_key);
journal->j_blocksize = blocksize;
journal->j_dev = bdev;
journal->j_fs_dev = fs_dev;
@@ -1560,7 +1560,7 @@ static journal_t *journal_init_common(struct block_device *bdev,
journal->j_max_batch_time = 15000; /* 15ms */
atomic_set(&journal->j_reserved_credits, 0);
lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle",
- &jbd2_trans_commit_key, 0);
+ &journal->jbd2_trans_commit_key, 0);
/* The journal is marked for error until we succeed with recovery! */
journal->j_flags = JBD2_ABORT;
@@ -1608,11 +1608,10 @@ static journal_t *journal_init_common(struct block_device *bdev,
err_cleanup:
percpu_counter_destroy(&journal->j_checkpoint_jh_count);
- if (journal->j_chksum_driver)
- crypto_free_shash(journal->j_chksum_driver);
kfree(journal->j_wbuf);
jbd2_journal_destroy_revoke(journal);
journal_fail_superblock(journal);
+ lockdep_unregister_key(&journal->jbd2_trans_commit_key);
kfree(journal);
return ERR_PTR(err);
}
@@ -1821,7 +1820,7 @@ static int jbd2_write_superblock(journal_t *journal, blk_opf_t write_flags)
set_buffer_uptodate(bh);
}
if (jbd2_journal_has_csum_v2or3(journal))
- sb->s_checksum = jbd2_superblock_csum(journal, sb);
+ sb->s_checksum = jbd2_superblock_csum(sb);
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
submit_bh(REQ_OP_WRITE | write_flags, bh);
@@ -1860,8 +1859,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
if (is_journal_aborted(journal))
return -EIO;
- if (jbd2_check_fs_dev_write_error(journal)) {
- jbd2_journal_abort(journal, -EIO);
+ ret = jbd2_check_fs_dev_write_error(journal);
+ if (ret) {
+ jbd2_journal_abort(journal, ret);
return -EIO;
}
@@ -1879,7 +1879,6 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
/* Log is no longer empty */
write_lock(&journal->j_state_lock);
- WARN_ON(!sb->s_sequence);
journal->j_flags &= ~JBD2_FLUSHED;
write_unlock(&journal->j_state_lock);
@@ -1975,17 +1974,15 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
return err;
}
- if (block_start == ~0ULL) {
- block_start = phys_block;
- block_stop = block_start - 1;
- }
+ if (block_start == ~0ULL)
+ block_stop = block_start = phys_block;
/*
* last block not contiguous with current block,
* process last contiguous region and return to this block on
* next loop
*/
- if (phys_block != block_stop + 1) {
+ if (phys_block != block_stop) {
block--;
} else {
block_stop++;
@@ -2004,11 +2001,10 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
*/
byte_start = block_start * journal->j_blocksize;
byte_stop = block_stop * journal->j_blocksize;
- byte_count = (block_stop - block_start + 1) *
- journal->j_blocksize;
+ byte_count = (block_stop - block_start) * journal->j_blocksize;
truncate_inode_pages_range(journal->j_dev->bd_mapping,
- byte_start, byte_stop);
+ byte_start, byte_stop - 1);
if (flags & JBD2_JOURNAL_FLUSH_DISCARD) {
err = blkdev_issue_discard(journal->j_dev,
@@ -2023,7 +2019,7 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
}
if (unlikely(err != 0)) {
- pr_err("JBD2: (error %d) unable to wipe journal at physical blocks %llu - %llu",
+ pr_err("JBD2: (error %d) unable to wipe journal at physical blocks [%llu, %llu)",
err, block_start, block_stop);
return err;
}
@@ -2162,9 +2158,11 @@ int jbd2_journal_destroy(journal_t *journal)
* failed to write back to the original location, otherwise the
* filesystem may become inconsistent.
*/
- if (!is_journal_aborted(journal) &&
- jbd2_check_fs_dev_write_error(journal))
- jbd2_journal_abort(journal, -EIO);
+ if (!is_journal_aborted(journal)) {
+ int ret = jbd2_check_fs_dev_write_error(journal);
+ if (ret)
+ jbd2_journal_abort(journal, ret);
+ }
if (journal->j_sb_buffer) {
if (!is_journal_aborted(journal)) {
@@ -2191,10 +2189,9 @@ int jbd2_journal_destroy(journal_t *journal)
iput(journal->j_inode);
if (journal->j_revoke)
jbd2_journal_destroy_revoke(journal);
- if (journal->j_chksum_driver)
- crypto_free_shash(journal->j_chksum_driver);
kfree(journal->j_fc_wbuf);
kfree(journal->j_wbuf);
+ lockdep_unregister_key(&journal->jbd2_trans_commit_key);
kfree(journal);
return err;
@@ -2337,27 +2334,15 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
}
}
- /* Load the checksum driver if necessary */
- if ((journal->j_chksum_driver == NULL) &&
- INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
- journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
- journal->j_chksum_driver = NULL;
- return 0;
- }
- /* Precompute checksum seed for all metadata */
- journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
- sizeof(sb->s_uuid));
- }
-
lock_buffer(journal->j_sb_buffer);
- /* If enabling v3 checksums, update superblock */
+ /* If enabling v3 checksums, update superblock and precompute seed */
if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
sb->s_feature_compat &=
~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+ journal->j_csum_seed = jbd2_chksum(~0, sb->s_uuid,
+ sizeof(sb->s_uuid));
}
/* If enabling v1 checksums, downgrade superblock */
@@ -2369,6 +2354,12 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
sb->s_feature_compat |= cpu_to_be32(compat);
sb->s_feature_ro_compat |= cpu_to_be32(ro);
sb->s_feature_incompat |= cpu_to_be32(incompat);
+ /*
+ * Update the checksum now so that it is valid even for read-only
+ * filesystems where jbd2_write_superblock() doesn't get called.
+ */
+ if (jbd2_journal_has_csum_v2or3(journal))
+ sb->s_checksum = jbd2_superblock_csum(sb);
unlock_buffer(journal->j_sb_buffer);
jbd2_journal_init_transaction_limits(journal);
@@ -2398,9 +2389,17 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
sb = journal->j_superblock;
+ lock_buffer(journal->j_sb_buffer);
sb->s_feature_compat &= ~cpu_to_be32(compat);
sb->s_feature_ro_compat &= ~cpu_to_be32(ro);
sb->s_feature_incompat &= ~cpu_to_be32(incompat);
+ /*
+ * Update the checksum now so that it is valid even for read-only
+ * filesystems where jbd2_write_superblock() doesn't get called.
+ */
+ if (jbd2_journal_has_csum_v2or3(journal))
+ sb->s_checksum = jbd2_superblock_csum(sb);
+ unlock_buffer(journal->j_sb_buffer);
jbd2_journal_init_transaction_limits(journal);
}
EXPORT_SYMBOL(jbd2_journal_clear_features);
@@ -2675,9 +2674,10 @@ void jbd2_journal_ack_err(journal_t *journal)
write_unlock(&journal->j_state_lock);
}
-int jbd2_journal_blocks_per_page(struct inode *inode)
+int jbd2_journal_blocks_per_folio(struct inode *inode)
{
- return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
+ return 1 << (PAGE_SHIFT + mapping_max_folio_order(inode->i_mapping) -
+ inode->i_sb->s_blocksize_bits);
}
/*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 9192be7c19d8..cac8c2cd4a92 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -39,7 +39,7 @@ struct recovery_info
static int do_one_pass(journal_t *journal,
struct recovery_info *info, enum passtype pass);
-static int scan_revoke_records(journal_t *, struct buffer_head *,
+static int scan_revoke_records(journal_t *, enum passtype, struct buffer_head *,
tid_t, struct recovery_info *);
#ifdef __KERNEL__
@@ -65,9 +65,8 @@ static void journal_brelse_array(struct buffer_head *b[], int n)
*/
#define MAXBUF 8
-static int do_readahead(journal_t *journal, unsigned int start)
+static void do_readahead(journal_t *journal, unsigned int start)
{
- int err;
unsigned int max, nbufs, next;
unsigned long long blocknr;
struct buffer_head *bh;
@@ -85,7 +84,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
nbufs = 0;
for (next = start; next < max; next++) {
- err = jbd2_journal_bmap(journal, next, &blocknr);
+ int err = jbd2_journal_bmap(journal, next, &blocknr);
if (err) {
printk(KERN_ERR "JBD2: bad block at offset %u\n",
@@ -94,10 +93,8 @@ static int do_readahead(journal_t *journal, unsigned int start)
}
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
- if (!bh) {
- err = -ENOMEM;
+ if (!bh)
goto failed;
- }
if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
bufs[nbufs++] = bh;
@@ -112,12 +109,10 @@ static int do_readahead(journal_t *journal, unsigned int start)
if (nbufs)
bh_readahead_batch(nbufs, bufs, 0);
- err = 0;
failed:
if (nbufs)
journal_brelse_array(bufs, nbufs);
- return err;
}
#endif /* __KERNEL__ */
@@ -190,7 +185,7 @@ static int jbd2_descriptor_block_csum_verify(journal_t *j, void *buf)
j->j_blocksize - sizeof(struct jbd2_journal_block_tail));
provided = tail->t_checksum;
tail->t_checksum = 0;
- calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+ calculated = jbd2_chksum(j->j_csum_seed, buf, j->j_blocksize);
tail->t_checksum = provided;
return provided == cpu_to_be32(calculated);
@@ -287,19 +282,20 @@ static int fc_do_one_pass(journal_t *journal,
int jbd2_journal_recover(journal_t *journal)
{
int err, err2;
- journal_superblock_t * sb;
-
struct recovery_info info;
memset(&info, 0, sizeof(info));
- sb = journal->j_superblock;
/*
* The journal superblock's s_start field (the current log head)
* is always zero if, and only if, the journal was cleanly
- * unmounted.
+ * unmounted. We use its in-memory version j_tail here because
+ * jbd2_journal_wipe() could have updated it without updating journal
+ * superblock.
*/
- if (!sb->s_start) {
+ if (!journal->j_tail) {
+ journal_superblock_t *sb = journal->j_superblock;
+
jbd2_debug(1, "No recovery required, last transaction %d, head block %u\n",
be32_to_cpu(sb->s_sequence), be32_to_cpu(sb->s_head));
journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
@@ -327,6 +323,12 @@ int jbd2_journal_recover(journal_t *journal)
journal->j_transaction_sequence, journal->j_head);
jbd2_journal_clear_revoke(journal);
+ /* Free revoke table allocated for replay */
+ if (journal->j_revoke != journal->j_revoke_table[0] &&
+ journal->j_revoke != journal->j_revoke_table[1]) {
+ jbd2_journal_destroy_revoke_table(journal->j_revoke);
+ journal->j_revoke = journal->j_revoke_table[1];
+ }
err2 = sync_blockdev(journal->j_fs_dev);
if (!err)
err = err2;
@@ -438,7 +440,7 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
h = buf;
provided = h->h_chksum[0];
h->h_chksum[0] = 0;
- calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+ calculated = jbd2_chksum(j->j_csum_seed, buf, j->j_blocksize);
h->h_chksum[0] = provided;
return provided == cpu_to_be32(calculated);
@@ -459,7 +461,7 @@ static bool jbd2_commit_block_csum_verify_partial(journal_t *j, void *buf)
h = tmpbuf;
provided = h->h_chksum[0];
h->h_chksum[0] = 0;
- calculated = jbd2_chksum(j, j->j_csum_seed, tmpbuf, j->j_blocksize);
+ calculated = jbd2_chksum(j->j_csum_seed, tmpbuf, j->j_blocksize);
kfree(tmpbuf);
return provided == cpu_to_be32(calculated);
@@ -476,8 +478,8 @@ static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
return 1;
seq = cpu_to_be32(sequence);
- csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
- csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
+ csum32 = jbd2_chksum(j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+ csum32 = jbd2_chksum(csum32, buf, j->j_blocksize);
if (jbd2_has_feature_csum3(j))
return tag3->t_checksum == cpu_to_be32(csum32);
@@ -612,6 +614,31 @@ static int do_one_pass(journal_t *journal,
first_commit_ID = next_commit_ID;
if (pass == PASS_SCAN)
info->start_transaction = first_commit_ID;
+ else if (pass == PASS_REVOKE) {
+ /*
+ * Would the default revoke table have too long hash chains
+ * during replay?
+ */
+ if (info->nr_revokes > JOURNAL_REVOKE_DEFAULT_HASH * 16) {
+ unsigned int hash_size;
+
+ /*
+ * Aim for average chain length of 8, limit at 1M
+ * entries to avoid problems with malicious
+ * filesystems.
+ */
+ hash_size = min(roundup_pow_of_two(info->nr_revokes / 8),
+ 1U << 20);
+ journal->j_revoke =
+ jbd2_journal_init_revoke_table(hash_size);
+ if (!journal->j_revoke) {
+ printk(KERN_ERR
+ "JBD2: failed to allocate revoke table for replay with %u entries. "
+ "Journal replay may be slow.\n", hash_size);
+ journal->j_revoke = journal->j_revoke_table[1];
+ }
+ }
+ }
jbd2_debug(1, "Starting recovery pass %d\n", pass);
@@ -852,6 +879,13 @@ chksum_ok:
case JBD2_REVOKE_BLOCK:
/*
+ * If we aren't in the SCAN or REVOKE pass, then we can
+ * just skip over this block.
+ */
+ if (pass != PASS_REVOKE && pass != PASS_SCAN)
+ continue;
+
+ /*
* Check revoke block crc in pass_scan, if csum verify
* failed, check commit block time later.
*/
@@ -863,12 +897,7 @@ chksum_ok:
need_check_commit_time = true;
}
- /* If we aren't in the REVOKE pass, then we can
- * just skip over this block. */
- if (pass != PASS_REVOKE)
- continue;
-
- err = scan_revoke_records(journal, bh,
+ err = scan_revoke_records(journal, pass, bh,
next_commit_ID, info);
if (err)
goto failed;
@@ -922,8 +951,9 @@ chksum_ok:
/* Scan a revoke record, marking all blocks mentioned as revoked. */
-static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
- tid_t sequence, struct recovery_info *info)
+static int scan_revoke_records(journal_t *journal, enum passtype pass,
+ struct buffer_head *bh, tid_t sequence,
+ struct recovery_info *info)
{
jbd2_journal_revoke_header_t *header;
int offset, max;
@@ -944,6 +974,11 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
if (jbd2_has_feature_64bit(journal))
record_len = 8;
+ if (pass == PASS_SCAN) {
+ info->nr_revokes += (max - offset) / record_len;
+ return 0;
+ }
+
while (offset + record_len <= max) {
unsigned long long blocknr;
int err;
@@ -956,7 +991,6 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
err = jbd2_journal_set_revoke(journal, blocknr, sequence);
if (err)
return err;
- ++info->nr_revokes;
}
return 0;
}
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 4556e4689024..1467f6790747 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -215,7 +215,7 @@ int __init jbd2_journal_init_revoke_table_cache(void)
return 0;
}
-static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
+struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
{
int shift = 0;
int tmp = hash_size;
@@ -231,7 +231,7 @@ static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
table->hash_size = hash_size;
table->hash_shift = shift;
table->hash_table =
- kmalloc_array(hash_size, sizeof(struct list_head), GFP_KERNEL);
+ kvmalloc_array(hash_size, sizeof(struct list_head), GFP_KERNEL);
if (!table->hash_table) {
kmem_cache_free(jbd2_revoke_table_cache, table);
table = NULL;
@@ -245,7 +245,7 @@ out:
return table;
}
-static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
+void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
{
int i;
struct list_head *hash_list;
@@ -255,7 +255,7 @@ static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
J_ASSERT(list_empty(hash_list));
}
- kfree(table->hash_table);
+ kvfree(table->hash_table);
kmem_cache_free(jbd2_revoke_table_cache, table);
}
@@ -345,7 +345,8 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
bh = bh_in;
if (!bh) {
- bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
+ bh = __find_get_block_nonatomic(bdev, blocknr,
+ journal->j_blocksize);
if (bh)
BUFFER_TRACE(bh, "found on hash");
}
@@ -355,7 +356,8 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
/* If there is a different buffer_head lying around in
* memory anywhere... */
- bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
+ bh2 = __find_get_block_nonatomic(bdev, blocknr,
+ journal->j_blocksize);
if (bh2) {
/* ... and it has RevokeValid status... */
if (bh2 != bh && buffer_revokevalid(bh2))
@@ -420,12 +422,11 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
* do not trust the Revoked bit on buffers unless RevokeValid is also
* set.
*/
-int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
+void jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
{
struct jbd2_revoke_record_s *record;
journal_t *journal = handle->h_transaction->t_journal;
int need_cancel;
- int did_revoke = 0; /* akpm: debug */
struct buffer_head *bh = jh2bh(jh);
jbd2_debug(4, "journal_head %p, cancelling revoke\n", jh);
@@ -450,7 +451,6 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
list_del(&record->hash);
spin_unlock(&journal->j_revoke_lock);
kmem_cache_free(jbd2_revoke_record_cache, record);
- did_revoke = 1;
}
}
@@ -466,18 +466,18 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
* state machine will get very upset later on. */
if (need_cancel) {
struct buffer_head *bh2;
- bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
+ bh2 = __find_get_block_nonatomic(bh->b_bdev, bh->b_blocknr,
+ bh->b_size);
if (bh2) {
if (bh2 != bh)
clear_buffer_revoked(bh2);
__brelse(bh2);
}
}
- return did_revoke;
}
/*
- * journal_clear_revoked_flag clears revoked flag of buffers in
+ * jbd2_clear_buffer_revoked_flags clears revoked flag of buffers in
* revoke table to reflect there is no revoked buffers in the next
* transaction which is going to be started.
*/
@@ -495,9 +495,9 @@ void jbd2_clear_buffer_revoked_flags(journal_t *journal)
struct jbd2_revoke_record_s *record;
struct buffer_head *bh;
record = (struct jbd2_revoke_record_s *)list_entry;
- bh = __find_get_block(journal->j_fs_dev,
- record->blocknr,
- journal->j_blocksize);
+ bh = __find_get_block_nonatomic(journal->j_fs_dev,
+ record->blocknr,
+ journal->j_blocksize);
if (bh) {
clear_buffer_revoked(bh);
__brelse(bh);
@@ -506,9 +506,9 @@ void jbd2_clear_buffer_revoked_flags(journal_t *journal)
}
}
-/* journal_switch_revoke table select j_revoke for next transaction
- * we do not want to suspend any processing until all revokes are
- * written -bzzz
+/* jbd2_journal_switch_revoke_table table select j_revoke for next
+ * transaction we do not want to suspend any processing until all
+ * revokes are written -bzzz
*/
void jbd2_journal_switch_revoke_table(journal_t *journal)
{
@@ -654,7 +654,7 @@ static void flush_descriptor(journal_t *journal,
set_buffer_jwrite(descriptor);
BUFFER_TRACE(descriptor, "write");
set_buffer_dirty(descriptor);
- write_dirty_buffer(descriptor, REQ_SYNC);
+ write_dirty_buffer(descriptor, JBD2_JOURNAL_REQ_FLAGS);
}
#endif
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 66513c18ca29..dca4b5d8aaaa 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -92,7 +92,6 @@ static void jbd2_get_transaction(journal_t *journal,
atomic_set(&transaction->t_outstanding_revokes, 0);
atomic_set(&transaction->t_handle_count, 0);
INIT_LIST_HEAD(&transaction->t_inode_list);
- INIT_LIST_HEAD(&transaction->t_private_list);
/* Set up the commit timer for the new transaction. */
journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
@@ -114,12 +113,9 @@ static void jbd2_get_transaction(journal_t *journal,
*/
/*
- * Update transaction's maximum wait time, if debugging is enabled.
- *
* t_max_wait is carefully updated here with use of atomic compare exchange.
* Note that there could be multiplre threads trying to do this simultaneously
* hence using cmpxchg to avoid any use of locks in this case.
- * With this t_max_wait can be updated w/o enabling jbd2_journal_enable_debug.
*/
static inline void update_t_max_wait(transaction_t *transaction,
unsigned long ts)
@@ -445,7 +441,7 @@ repeat:
read_unlock(&journal->j_state_lock);
current->journal_info = handle;
- rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
+ rwsem_acquire_read(&journal->j_trans_commit_map, 0, 1, _THIS_IP_);
jbd2_journal_free_transaction(new_transaction);
/*
* Ensure that no allocations done while the transaction is open are
@@ -1223,7 +1219,8 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
return -EROFS;
journal = handle->h_transaction->t_journal;
- if (jbd2_check_fs_dev_write_error(journal)) {
+ rc = jbd2_check_fs_dev_write_error(journal);
+ if (rc) {
/*
* If the fs dev has writeback errors, it may have failed
* to async write out metadata buffers in the background.
@@ -1231,7 +1228,7 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
* it out again, which may lead to on-disk filesystem
* inconsistency. Aborting journal can avoid it happen.
*/
- jbd2_journal_abort(journal, -EIO);
+ jbd2_journal_abort(journal, rc);
return -EIO;
}
@@ -1288,14 +1285,23 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
* committing transaction's lists, but it HAS to be in Forget state in
* that case: the transaction must have deleted the buffer for it to be
* reused here.
+ * In the case of file system data inconsistency, for example, if the
+ * block bitmap of a referenced block is not set, it can lead to the
+ * situation where a block being committed is allocated and used again.
+ * As a result, the following condition will not be satisfied, so here
+ * we directly trigger a JBD abort instead of immediately invoking
+ * bugon.
*/
spin_lock(&jh->b_state_lock);
- J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
- jh->b_transaction == NULL ||
- (jh->b_transaction == journal->j_committing_transaction &&
- jh->b_jlist == BJ_Forget)));
+ if (!(jh->b_transaction == transaction || jh->b_transaction == NULL ||
+ (jh->b_transaction == journal->j_committing_transaction &&
+ jh->b_jlist == BJ_Forget)) || jh->b_next_transaction != NULL) {
+ err = -EROFS;
+ spin_unlock(&jh->b_state_lock);
+ jbd2_journal_abort(journal, err);
+ goto out;
+ }
- J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
if (jh->b_transaction == NULL) {
@@ -1513,7 +1519,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
jh->b_next_transaction == transaction);
spin_unlock(&jh->b_state_lock);
}
- if (jh->b_modified == 1) {
+ if (data_race(jh->b_modified == 1)) {
/* If it's in our transaction it must be in BJ_Metadata list. */
if (data_race(jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata)) {
@@ -1532,7 +1538,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
goto out;
}
- journal = transaction->t_journal;
spin_lock(&jh->b_state_lock);
if (is_handle_aborted(handle)) {
@@ -1547,6 +1552,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
goto out_unlock_bh;
}
+ journal = transaction->t_journal;
+
if (jh->b_modified == 0) {
/*
* This buffer's got modified and becoming part
@@ -1662,6 +1669,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
int drop_reserve = 0;
int err = 0;
int was_modified = 0;
+ int wait_for_writeback = 0;
if (is_handle_aborted(handle))
return -EROFS;
@@ -1785,18 +1793,22 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
}
/*
- * The buffer is still not written to disk, we should
- * attach this buffer to current transaction so that the
- * buffer can be checkpointed only after the current
- * transaction commits.
+ * The buffer has not yet been written to disk. We should
+ * either clear the buffer or ensure that the ongoing I/O
+ * is completed, and attach this buffer to current
+ * transaction so that the buffer can be checkpointed only
+ * after the current transaction commits.
*/
clear_buffer_dirty(bh);
+ wait_for_writeback = 1;
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
spin_unlock(&journal->j_list_lock);
}
drop:
__brelse(bh);
spin_unlock(&jh->b_state_lock);
+ if (wait_for_writeback)
+ wait_on_buffer(bh);
jbd2_journal_put_journal_head(jh);
if (drop_reserve) {
/* no need to reserve log space for this block -bzzz */
@@ -2079,21 +2091,6 @@ static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
jh->b_transaction = NULL;
}
-void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
-{
- struct buffer_head *bh = jh2bh(jh);
-
- /* Get reference so that buffer cannot be freed before we unlock it */
- get_bh(bh);
- spin_lock(&jh->b_state_lock);
- spin_lock(&journal->j_list_lock);
- __jbd2_journal_unfile_buffer(jh);
- spin_unlock(&journal->j_list_lock);
- spin_unlock(&jh->b_state_lock);
- jbd2_journal_put_journal_head(jh);
- __brelse(bh);
-}
-
/**
* jbd2_journal_try_to_free_buffers() - try to free page buffers.
* @journal: journal for operation
@@ -2192,7 +2189,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
/*
* We don't want to write the buffer anymore, clear the
* bit so that we don't confuse checks in
- * __journal_file_buffer
+ * __jbd2_journal_file_buffer
*/
clear_buffer_dirty(bh);
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 2b9ef713b844..3bd9d2f3bece 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -95,7 +95,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
positions[value]=outpos;
if (repeat) {
- if ((outpos + repeat) >= destlen) {
+ if ((outpos + repeat) > destlen) {
return 1;
}
if (backoffs + repeat >= outpos) {
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 2b2938970da3..dd91f725ded6 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -32,8 +32,8 @@ static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
static int jffs2_unlink (struct inode *,struct dentry *);
static int jffs2_symlink (struct mnt_idmap *, struct inode *,
struct dentry *, const char *);
-static int jffs2_mkdir (struct mnt_idmap *, struct inode *,struct dentry *,
- umode_t);
+static struct dentry *jffs2_mkdir (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t);
static int jffs2_rmdir (struct inode *,struct dentry *);
static int jffs2_mknod (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t,dev_t);
@@ -446,8 +446,8 @@ static int jffs2_symlink (struct mnt_idmap *idmap, struct inode *dir_i,
}
-static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
- struct dentry *dentry, umode_t mode)
+static struct dentry *jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
+ struct dentry *dentry, umode_t mode)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
@@ -464,7 +464,7 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
ri = jffs2_alloc_raw_inode();
if (!ri)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
c = JFFS2_SB_INFO(dir_i->i_sb);
@@ -477,7 +477,7 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
if (ret) {
jffs2_free_raw_inode(ri);
- return ret;
+ return ERR_PTR(ret);
}
inode = jffs2_new_inode(dir_i, mode, ri);
@@ -485,7 +485,7 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
if (IS_ERR(inode)) {
jffs2_free_raw_inode(ri);
jffs2_complete_reservation(c);
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
}
inode->i_op = &jffs2_dir_inode_operations;
@@ -584,11 +584,11 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
jffs2_complete_reservation(c);
d_instantiate_new(dentry, inode);
- return 0;
+ return NULL;
fail:
iget_failed(inode);
- return ret;
+ return ERR_PTR(ret);
}
static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index ef3a1e1b6cb0..fda9f4d6093f 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -425,7 +425,9 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
.totlen = cpu_to_je32(c->cleanmarker_size)
};
- jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ if (ret)
+ goto filebad;
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 13c18ccc13b0..b697f3c259ef 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -21,12 +21,14 @@
#include <linux/jffs2.h>
#include "nodelist.h"
-static int jffs2_write_end(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata);
-static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata);
+static int jffs2_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata);
+static int jffs2_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata);
static int jffs2_read_folio(struct file *filp, struct folio *folio);
int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
@@ -54,7 +56,7 @@ const struct file_operations jffs2_file_operations =
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.unlocked_ioctl=jffs2_ioctl,
- .mmap = generic_file_readonly_mmap,
+ .mmap_prepare = generic_file_readonly_mmap_prepare,
.fsync = jffs2_fsync,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
@@ -121,9 +123,10 @@ static int jffs2_read_folio(struct file *file, struct folio *folio)
return ret;
}
-static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
+static int jffs2_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
struct folio *folio;
struct inode *inode = mapping->host;
@@ -227,7 +230,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
goto release_sem;
}
}
- jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags);
+ jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags.f);
release_sem:
mutex_unlock(&c->alloc_sem);
@@ -235,9 +238,10 @@ out_err:
return ret;
}
-static int jffs2_write_end(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+static int jffs2_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
/* Actually commit the write from the page cache page we're looking at.
* For now, we write the full page out each time. It sucks, but it's simple
@@ -255,7 +259,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n",
__func__, inode->i_ino, folio_pos(folio),
- start, end, folio->flags);
+ start, end, folio->flags.f);
/* We need to avoid deadlock with page_cache_read() in
jffs2_garbage_collect_pass(). So the folio must be
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index d175cccb7c55..764bba8ba999 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -265,7 +265,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
f = JFFS2_INODE_INFO(inode);
@@ -373,7 +373,7 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
{
struct iattr iattr;
- if (!(inode->i_state & I_DIRTY_DATASYNC)) {
+ if (!(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) {
jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
__func__, inode->i_ino);
return;
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 29671e33a171..62879c218d4b 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -256,7 +256,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
__func__, skip);
- jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+ if (ret)
+ goto out;
jffs2_scan_dirty_space(c, c->nextblock, skip);
}
#endif
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 4fe64519870f..d83372d3e1a0 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -858,7 +858,10 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
spin_unlock(&c->erase_completion_lock);
jeb = c->nextblock;
- jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+
+ if (ret)
+ goto out;
if (!c->summary->sum_num || !c->summary->sum_list_head) {
JFFS2_WARNING("Empty summary info!!!\n");
@@ -872,6 +875,8 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
datasize += padsize;
ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
+
+out:
spin_lock(&c->erase_completion_lock);
return ret;
}
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 4061e0ba7010..bb815a002984 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -584,7 +584,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
size_t retlen;
/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
- del_timer() the timer we never initialised. */
+ call timer_delete() on the timer we never initialised. */
if (!jffs2_is_writebuffered(c))
return 0;
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 01b6912e60f8..87ad042221e7 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -26,8 +26,8 @@ int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return rc;
inode_lock(inode);
- if (!(inode->i_state & I_DIRTY_ALL) ||
- (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
+ if (!(inode_state_read_once(inode) & I_DIRTY_ALL) ||
+ (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))) {
/* Make sure committed changes hit the disk */
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
inode_unlock(inode);
@@ -44,6 +44,9 @@ static int jfs_open(struct inode *inode, struct file *file)
{
int rc;
+ if (S_ISREG(inode->i_mode) && inode->i_size < 0)
+ return -EIO;
+
if ((rc = dquot_file_open(inode, file)))
return rc;
@@ -143,7 +146,7 @@ const struct file_operations jfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fsync = jfs_fsync,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 07cfdc440596..4709762713ef 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -29,7 +29,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ret = diRead(inode);
@@ -59,9 +59,15 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
*/
inode->i_link[inode->i_size] = '\0';
}
- } else {
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &jfs_file_inode_operations;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ } else {
+ printk(KERN_DEBUG "JFS: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
unlock_new_inode(inode);
return inode;
@@ -145,9 +151,9 @@ void jfs_evict_inode(struct inode *inode)
if (!inode->i_nlink && !is_bad_inode(inode)) {
dquot_initialize(inode);
+ truncate_inode_pages_final(&inode->i_data);
if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
- truncate_inode_pages_final(&inode->i_data);
if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode);
@@ -290,9 +296,10 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int jfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
+static int jfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
@@ -303,13 +310,14 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static int jfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied, struct folio *folio,
- void *fsdata)
+static int jfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
+ ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (ret < len)
jfs_write_failed(mapping, pos + len);
return ret;
@@ -369,7 +377,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
ASSERT(length >= 0);
- if (test_cflag(COMMIT_Nolink, ip)) {
+ if (test_cflag(COMMIT_Nolink, ip) || isReadOnly(ip)) {
xtTruncate(0, ip, length, COMMIT_WMAP);
return;
}
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index f7bd7e8f5be4..563f148be8af 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -57,7 +57,7 @@ static long jfs_map_ext2(unsigned long flags, int from)
return mapped;
}
-int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int jfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct jfs_inode_info *jfs_inode = JFS_IP(d_inode(dentry));
unsigned int flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE;
@@ -71,7 +71,7 @@ int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
}
int jfs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct jfs_inode_info *jfs_inode = JFS_IP(inode);
diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c
index 5f4b305030ad..4b660296caf3 100644
--- a/fs/jfs/jfs_discard.c
+++ b/fs/jfs/jfs_discard.c
@@ -86,7 +86,8 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
down_read(&sb->s_umount);
bmp = JFS_SBI(ip->i_sb)->bmap;
- if (minlen > bmp->db_agsize ||
+ if (bmp == NULL ||
+ minlen > bmp->db_agsize ||
start >= bmp->db_mapsize ||
range->len < sb->s_blocksize) {
up_read(&sb->s_umount);
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index f9009e4f9ffd..cdfa699cd7c8 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -178,41 +178,30 @@ int dbMount(struct inode *ipbmap)
dbmp_le = (struct dbmap_disk *) mp->data;
bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
-
bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
- if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
- bmp->db_l2nbperpage < 0) {
- err = -EINVAL;
- goto err_release_metapage;
- }
-
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
- if (!bmp->db_numag || bmp->db_numag > MAXAG) {
- err = -EINVAL;
- goto err_release_metapage;
- }
-
bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
- if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
- bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
- err = -EINVAL;
- goto err_release_metapage;
- }
-
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
- if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
- bmp->db_agl2size < 0) {
- err = -EINVAL;
- goto err_release_metapage;
- }
- if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
+ if ((bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) ||
+ (bmp->db_l2nbperpage < 0) ||
+ !bmp->db_numag || (bmp->db_numag > MAXAG) ||
+ (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) ||
+ (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) ||
+ (bmp->db_agheight < 0) || (bmp->db_agheight > (L2LPERCTL >> 1)) ||
+ (bmp->db_agwidth < 1) || (bmp->db_agwidth > (LPERCTL / MAXAG)) ||
+ (bmp->db_agwidth > (1 << (L2LPERCTL - (bmp->db_agheight << 1)))) ||
+ (bmp->db_agstart < 0) ||
+ (bmp->db_agstart > (CTLTREESIZE - 1 - bmp->db_agwidth * (MAXAG - 1))) ||
+ (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) ||
+ (bmp->db_agl2size < 0) ||
+ ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
err = -EINVAL;
goto err_release_metapage;
}
@@ -1400,6 +1389,12 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
(1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
+ if (ti < 0 || ti >= le32_to_cpu(dcp->nleafs)) {
+ jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
+ release_metapage(mp);
+ return -EIO;
+ }
+
/* dmap control page trees fan-out by 4 and a single allocation
* group may be described by 1 or 2 subtrees within the ag level
* dmap control page, depending upon the ag size. examine the ag's
@@ -1820,8 +1815,10 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
return -EIO;
dp = (struct dmap *) mp->data;
- if (dp->tree.budmin < 0)
+ if (dp->tree.budmin < 0) {
+ release_metapage(mp);
return -EIO;
+ }
/* try to allocate the blocks.
*/
@@ -3403,7 +3400,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
oldl2agsize = bmp->db_agl2size;
bmp->db_agl2size = l2agsize;
- bmp->db_agsize = 1 << l2agsize;
+ bmp->db_agsize = (s64)1 << l2agsize;
/* compute new number of AG */
agno = bmp->db_numag;
@@ -3666,8 +3663,8 @@ void dbFinalizeBmap(struct inode *ipbmap)
* system size is not a multiple of the group size).
*/
inactfree = (inactags && ag_rem) ?
- ((inactags - 1) << bmp->db_agl2size) + ag_rem
- : inactags << bmp->db_agl2size;
+ (((s64)inactags - 1) << bmp->db_agl2size) + ag_rem
+ : ((s64)inactags << bmp->db_agl2size);
/* determine how many free blocks are in the active
* allocation groups plus the average number of free blocks
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 8f85177f284b..0ab83bb7bbdf 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -117,7 +117,8 @@ do { \
if (!(RC)) { \
if (((P)->header.nextindex > \
(((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \
- ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT))) { \
+ ((BN) && (((P)->header.maxslot > DTPAGEMAXSLOT) || \
+ ((P)->header.stblindex >= DTPAGEMAXSLOT)))) { \
BT_PUTPAGE(MP); \
jfs_error((IP)->i_sb, \
"DT_GETPAGE: dtree page corrupt\n"); \
@@ -2612,7 +2613,7 @@ void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
* fsck.jfs should really fix this, but it currently does not.
* Called from jfs_readdir when bad index is detected.
*/
-static void add_missing_indices(struct inode *inode, s64 bn)
+static int add_missing_indices(struct inode *inode, s64 bn)
{
struct ldtentry *d;
struct dt_lock *dtlck;
@@ -2621,7 +2622,7 @@ static void add_missing_indices(struct inode *inode, s64 bn)
struct lv *lv;
struct metapage *mp;
dtpage_t *p;
- int rc;
+ int rc = 0;
s8 *stbl;
tid_t tid;
struct tlock *tlck;
@@ -2646,6 +2647,16 @@ static void add_missing_indices(struct inode *inode, s64 bn)
stbl = DT_GETSTBL(p);
for (i = 0; i < p->header.nextindex; i++) {
+ if (stbl[i] < 0) {
+ jfs_err("jfs: add_missing_indices: Invalid stbl[%d] = %d for inode %ld, block = %lld",
+ i, stbl[i], (long)inode->i_ino, (long long)bn);
+ rc = -EIO;
+
+ DT_PUTPAGE(mp);
+ txAbort(tid, 0);
+ goto end;
+ }
+
d = (struct ldtentry *) &p->slot[stbl[i]];
index = le32_to_cpu(d->index);
if ((index < 2) || (index >= JFS_IP(inode)->next_index)) {
@@ -2663,6 +2674,7 @@ static void add_missing_indices(struct inode *inode, s64 bn)
(void) txCommit(tid, 1, &inode, 0);
end:
txEnd(tid);
+ return rc;
}
/*
@@ -2891,7 +2903,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) {
- if (stbl[i] < 0 || stbl[i] > 127) {
+ if (stbl[i] < 0 || stbl[i] >= DTPAGEMAXSLOT) {
jfs_err("JFS: Invalid stbl[%d] = %d for inode %ld, block = %lld",
i, stbl[i], (long)ip->i_ino, (long long)bn);
free_page(dirent_buf);
@@ -3016,7 +3028,8 @@ skip_one:
}
if (fix_page) {
- add_missing_indices(ip, bn);
+ if ((rc = add_missing_indices(ip, bn)))
+ goto out;
page_fixed = 1;
}
@@ -3095,7 +3108,7 @@ static int dtReadFirst(struct inode *ip, struct btstack * btstack)
/* get the leftmost entry */
stbl = DT_GETSTBL(p);
- if (stbl[0] < 0 || stbl[0] > 127) {
+ if (stbl[0] < 0 || stbl[0] >= DTPAGEMAXSLOT) {
DT_PUTPAGE(mp);
jfs_error(ip->i_sb, "stbl[0] out of bound\n");
return -EIO;
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 63d21822d309..46529bcc8297 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -74,6 +74,11 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
int rc;
int xflag;
+ if (isReadOnly(ip)) {
+ jfs_error(ip->i_sb, "read-only filesystem\n");
+ return -EIO;
+ }
+
/* This blocks if we are low on resources */
txBeginAnon(ip->i_sb);
@@ -253,6 +258,11 @@ int extRecord(struct inode *ip, xad_t * xp)
{
int rc;
+ if (isReadOnly(ip)) {
+ jfs_error(ip->i_sb, "read-only filesystem\n");
+ return -EIO;
+ }
+
txBeginAnon(ip->i_sb);
mutex_lock(&JFS_IP(ip)->commit_mutex);
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index a360b24ed320..ecb8e05b8b84 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -102,7 +102,7 @@ int diMount(struct inode *ipimap)
* allocate/initialize the in-memory inode map control structure
*/
/* allocate the in-memory inode map control structure. */
- imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
+ imap = kzalloc(sizeof(struct inomap), GFP_KERNEL);
if (imap == NULL)
return -ENOMEM;
@@ -456,7 +456,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
dp += inum % 8; /* 8 inodes per 4K page */
/* copy on-disk inode to in-memory inode */
- if ((copy_from_dinode(dp, ip)) != 0) {
+ if ((copy_from_dinode(dp, ip) != 0) || (ip->i_nlink == 0)) {
/* handle bad return by returning NULL for ip */
set_nlink(ip, 1); /* Don't want iput() deleting it */
iput(ip);
@@ -3029,14 +3029,23 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
*
* RETURN VALUES:
* 0 - success
- * -ENOMEM - insufficient memory
+ * -EINVAL - unexpected inode type
*/
static int copy_from_dinode(struct dinode * dip, struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+ int fileset = le32_to_cpu(dip->di_fileset);
+
+ switch (fileset) {
+ case AGGR_RESERVED_I: case AGGREGATE_I: case BMAP_I:
+ case LOG_I: case BADBLOCK_I: case FILESYSTEM_I:
+ break;
+ default:
+ return -EINVAL;
+ }
- jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
+ jfs_ip->fileset = fileset;
jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
jfs_set_inode_flags(ip);
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 10934f9a11be..5aaafedb8fbc 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -76,14 +76,14 @@ struct jfs_inode_info {
struct {
unchar _unused[16]; /* 16: */
dxd_t _dxd; /* 16: */
- /* _inline may overflow into _inline_ea when needed */
+ /* _inline_sym may overflow into _inline_ea when needed */
/* _inline_ea may overlay the last part of
* file._xtroot if maxentry = XTROOTINITSLOT
*/
union {
struct {
/* 128: inline symlink */
- unchar _inline[128];
+ unchar _inline_sym[128];
/* 128: inline extended attr */
unchar _inline_ea[128];
};
@@ -101,7 +101,7 @@ struct jfs_inode_info {
#define i_imap u.file._imap
#define i_dirtable u.dir._table
#define i_dtroot u.dir._dtroot
-#define i_inline u.link._inline
+#define i_inline u.link._inline_sym
#define i_inline_ea u.link._inline_ea
#define i_inline_all u.link._inline_all
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index ea80661597ac..2c6c81c8cb9f 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -9,9 +9,9 @@ struct fid;
extern struct inode *ialloc(struct inode *, umode_t);
extern int jfs_fsync(struct file *, loff_t, loff_t, int);
-extern int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+extern int jfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
extern int jfs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
extern long jfs_ioctl(struct file *, unsigned int, unsigned long);
extern struct inode *jfs_iget(struct super_block *, unsigned long);
extern int jfs_commit_inode(struct inode *, int);
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 270808b6219b..b343c5ea1159 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1199,7 +1199,6 @@ static int open_dummy_log(struct super_block *sb)
init_waitqueue_head(&dummy_log->syncwait);
dummy_log->no_integrity = 1;
/* Make up some stuff */
- dummy_log->base = 0;
dummy_log->size = 1024;
rc = lmLogInit(dummy_log);
if (rc) {
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index df575a873ec6..871cf4fb3636 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -15,6 +15,7 @@
#include <linux/mempool.h>
#include <linux/seq_file.h>
#include <linux/writeback.h>
+#include <linux/migrate.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_filsys.h"
@@ -151,7 +152,59 @@ static inline void dec_io(struct folio *folio, blk_status_t status,
handler(folio, anchor->status);
}
+#ifdef CONFIG_MIGRATION
+static int __metapage_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ struct meta_anchor *src_anchor = src->private;
+ struct metapage *mps[MPS_PER_PAGE] = {0};
+ struct metapage *mp;
+ int i, rc;
+
+ for (i = 0; i < MPS_PER_PAGE; i++) {
+ mp = src_anchor->mp[i];
+ if (mp && metapage_locked(mp))
+ return -EAGAIN;
+ }
+
+ rc = filemap_migrate_folio(mapping, dst, src, mode);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < MPS_PER_PAGE; i++) {
+ mp = src_anchor->mp[i];
+ if (!mp)
+ continue;
+ if (unlikely(insert_metapage(dst, mp))) {
+ /* If error, roll-back previosly inserted pages */
+ for (int j = 0 ; j < i; j++) {
+ if (mps[j])
+ remove_metapage(dst, mps[j]);
+ }
+ return -EAGAIN;
+ }
+ mps[i] = mp;
+ }
+
+ /* Update the metapage and remove it from src */
+ for (i = 0; i < MPS_PER_PAGE; i++) {
+ mp = mps[i];
+ if (mp) {
+ int page_offset = mp->data - folio_address(src);
+
+ mp->data = folio_address(dst) + page_offset;
+ mp->folio = dst;
+ remove_metapage(src, mp);
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MIGRATION */
+
#else
+
static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
{
return folio->private;
@@ -175,6 +228,35 @@ static inline void remove_metapage(struct folio *folio, struct metapage *mp)
#define inc_io(folio) do {} while(0)
#define dec_io(folio, status, handler) handler(folio, status)
+#ifdef CONFIG_MIGRATION
+static int __metapage_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ struct metapage *mp;
+ int page_offset;
+ int rc;
+
+ mp = folio_to_mp(src, 0);
+ if (metapage_locked(mp))
+ return -EAGAIN;
+
+ rc = filemap_migrate_folio(mapping, dst, src, mode);
+ if (rc)
+ return rc;
+
+ if (unlikely(insert_metapage(dst, mp)))
+ return -EAGAIN;
+
+ page_offset = mp->data - folio_address(src);
+ mp->data = folio_address(dst) + page_offset;
+ mp->folio = dst;
+ remove_metapage(src, mp);
+
+ return 0;
+}
+#endif /* CONFIG_MIGRATION */
+
#endif
static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
@@ -339,7 +421,7 @@ static void metapage_write_end_io(struct bio *bio)
}
static int metapage_write_folio(struct folio *folio,
- struct writeback_control *wbc, void *unused)
+ struct writeback_control *wbc)
{
struct bio *bio = NULL;
int block_offset; /* block offset of mp within page */
@@ -468,10 +550,12 @@ static int metapage_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct blk_plug plug;
+ struct folio *folio = NULL;
int err;
blk_start_plug(&plug);
- err = write_cache_pages(mapping, wbc, metapage_write_folio, NULL);
+ while ((folio = writeback_iter(mapping, wbc, folio, &err)))
+ err = metapage_write_folio(folio, wbc);
blk_finish_plug(&plug);
return err;
@@ -554,6 +638,29 @@ static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
return ret;
}
+#ifdef CONFIG_MIGRATION
+/*
+ * metapage_migrate_folio - Migration function for JFS metapages
+ */
+static int metapage_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ int expected_count;
+
+ if (!src->private)
+ return filemap_migrate_folio(mapping, dst, src, mode);
+
+ /* Check whether page does not have extra refs before we do more work */
+ expected_count = folio_expected_ref_count(src) + 1;
+ if (folio_ref_count(src) != expected_count)
+ return -EAGAIN;
+ return __metapage_migrate_folio(mapping, dst, src, mode);
+}
+#else
+#define metapage_migrate_folio NULL
+#endif /* CONFIG_MIGRATION */
+
static void metapage_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
@@ -570,6 +677,7 @@ const struct address_space_operations jfs_metapage_aops = {
.release_folio = metapage_release_folio,
.invalidate_folio = metapage_invalidate_folio,
.dirty_folio = filemap_dirty_folio,
+ .migrate_folio = metapage_migrate_folio,
};
struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
@@ -707,7 +815,7 @@ static int metapage_write_one(struct folio *folio)
if (folio_clear_dirty_for_io(folio)) {
folio_get(folio);
- ret = metapage_write_folio(folio, &wbc, NULL);
+ ret = metapage_write_folio(folio, &wbc);
if (ret == 0)
folio_wait_writeback(folio);
folio_put(folio);
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index 98f9a432c336..52e6b58c5dbd 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -325,13 +325,13 @@ static int chkSuper(struct super_block *sb)
if ((j_sb->s_flag & cpu_to_le32(JFS_BAD_SAIT)) !=
cpu_to_le32(JFS_BAD_SAIT)) {
expected_AIM_bytesize = 2 * PSIZE;
- AIM_bytesize = lengthPXD(&(j_sb->s_aim2)) * bsize;
+ AIM_bytesize = lengthPXD(&j_sb->s_aim2) * bsize;
expected_AIT_bytesize = 4 * PSIZE;
- AIT_bytesize = lengthPXD(&(j_sb->s_ait2)) * bsize;
- AIM_byte_addr = addressPXD(&(j_sb->s_aim2)) * bsize;
- AIT_byte_addr = addressPXD(&(j_sb->s_ait2)) * bsize;
+ AIT_bytesize = lengthPXD(&j_sb->s_ait2) * bsize;
+ AIM_byte_addr = addressPXD(&j_sb->s_aim2) * bsize;
+ AIT_byte_addr = addressPXD(&j_sb->s_ait2) * bsize;
byte_addr_diff0 = AIT_byte_addr - AIM_byte_addr;
- fsckwsp_addr = addressPXD(&(j_sb->s_fsckpxd)) * bsize;
+ fsckwsp_addr = addressPXD(&j_sb->s_fsckpxd) * bsize;
byte_addr_diff1 = fsckwsp_addr - AIT_byte_addr;
if ((AIM_bytesize != expected_AIM_bytesize) ||
(AIT_bytesize != expected_AIT_bytesize) ||
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index be17e3c43582..c16578af3a77 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -272,14 +272,15 @@ int txInit(void)
if (TxBlock == NULL)
return -ENOMEM;
- for (k = 1; k < nTxBlock - 1; k++) {
- TxBlock[k].next = k + 1;
+ for (k = 0; k < nTxBlock; k++) {
init_waitqueue_head(&TxBlock[k].gcwait);
init_waitqueue_head(&TxBlock[k].waitor);
}
+
+ for (k = 1; k < nTxBlock - 1; k++) {
+ TxBlock[k].next = k + 1;
+ }
TxBlock[k].next = 0;
- init_waitqueue_head(&TxBlock[k].gcwait);
- init_waitqueue_head(&TxBlock[k].waitor);
TxAnchor.freetid = 1;
init_waitqueue_head(&TxAnchor.freewait);
@@ -1286,7 +1287,7 @@ int txCommit(tid_t tid, /* transaction identifier */
* to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
* Joern
*/
- if (tblk->u.ip->i_state & I_SYNC)
+ if (inode_state_read_once(tblk->u.ip) & I_SYNC)
tblk->xflag &= ~COMMIT_LAZY;
}
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index 5ee618d17e77..28c3cf960c6f 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -49,26 +49,6 @@
#define XT_PAGE(IP, MP) BT_PAGE(IP, MP, xtpage_t, i_xtroot)
-/* get page buffer for specified block address */
-/* ToDo: Replace this ugly macro with a function */
-#define XT_GETPAGE(IP, BN, MP, SIZE, P, RC) \
-do { \
- BT_GETPAGE(IP, BN, MP, xtpage_t, SIZE, P, RC, i_xtroot); \
- if (!(RC)) { \
- if ((le16_to_cpu((P)->header.nextindex) < XTENTRYSTART) || \
- (le16_to_cpu((P)->header.nextindex) > \
- le16_to_cpu((P)->header.maxentry)) || \
- (le16_to_cpu((P)->header.maxentry) > \
- (((BN) == 0) ? XTROOTMAXSLOT : PSIZE >> L2XTSLOTSIZE))) { \
- jfs_error((IP)->i_sb, \
- "XT_GETPAGE: xtree page corrupt\n"); \
- BT_PUTPAGE(MP); \
- MP = NULL; \
- RC = -EIO; \
- } \
- } \
-} while (0)
-
/* for consistency */
#define XT_PUTPAGE(MP) BT_PUTPAGE(MP)
@@ -115,6 +95,42 @@ static int xtSplitRoot(tid_t tid, struct inode *ip,
struct xtsplit * split, struct metapage ** rmpp);
/*
+ * xt_getpage()
+ *
+ * function: get the page buffer for a specified block address.
+ *
+ * parameters:
+ * ip - pointer to the inode
+ * bn - block number (s64) of the xtree page to be retrieved;
+ * mp - pointer to a metapage pointer where the page buffer is returned;
+ *
+ * returns:
+ * A pointer to the xtree page (xtpage_t) on success, -EIO on error.
+ */
+
+static inline xtpage_t *xt_getpage(struct inode *ip, s64 bn, struct metapage **mp)
+{
+ xtpage_t *p;
+ int rc;
+
+ BT_GETPAGE(ip, bn, *mp, xtpage_t, PSIZE, p, rc, i_xtroot);
+
+ if (rc)
+ return ERR_PTR(rc);
+ if ((le16_to_cpu(p->header.nextindex) < XTENTRYSTART) ||
+ (le16_to_cpu(p->header.nextindex) >
+ le16_to_cpu(p->header.maxentry)) ||
+ (le16_to_cpu(p->header.maxentry) >
+ ((bn == 0) ? XTROOTMAXSLOT : PSIZE >> L2XTSLOTSIZE))) {
+ jfs_error(ip->i_sb, "xt_getpage: xtree page corrupt\n");
+ BT_PUTPAGE(*mp);
+ *mp = NULL;
+ return ERR_PTR(-EIO);
+ }
+ return p;
+}
+
+/*
* xtLookup()
*
* function: map a single page into a physical extent;
@@ -216,7 +232,6 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
int *cmpp, struct btstack * btstack, int flag)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
- int rc = 0;
int cmp = 1; /* init for empty page */
s64 bn; /* block number */
struct metapage *mp; /* page buffer */
@@ -252,9 +267,9 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
*/
for (bn = 0;;) {
/* get/pin the page to search */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/* try sequential access heuristics with the previous
* access entry in target leaf page:
@@ -807,10 +822,10 @@ xtSplitUp(tid_t tid,
* insert router entry in parent for new right child page <rp>
*/
/* get/pin the parent page <sp> */
- XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
- if (rc) {
+ sp = xt_getpage(ip, parent->bn, &smp);
+ if (IS_ERR(sp)) {
XT_PUTPAGE(rcmp);
- return rc;
+ return PTR_ERR(sp);
}
/*
@@ -1062,10 +1077,10 @@ xtSplitPage(tid_t tid, struct inode *ip,
* update previous pointer of old next/right page of <sp>
*/
if (nextbn != 0) {
- XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
- if (rc) {
+ p = xt_getpage(ip, nextbn, &mp);
+ if (IS_ERR(p)) {
XT_PUTPAGE(rmp);
- goto clean_up;
+ return PTR_ERR(p);
}
BT_MARK_DIRTY(mp, ip);
@@ -1417,9 +1432,9 @@ int xtExtend(tid_t tid, /* transaction id */
return rc;
/* get back old page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
@@ -1433,9 +1448,9 @@ int xtExtend(tid_t tid, /* transaction id */
XT_PUTPAGE(mp);
/* get new child page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -1711,9 +1726,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
return rc;
/* get back old page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
@@ -1727,9 +1742,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
XT_PUTPAGE(mp);
/* get new child page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -1788,9 +1803,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
XT_PUTPAGE(mp);
/* get new right page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -1864,9 +1879,9 @@ printf("xtUpdate.updateLeft.split p:0x%p\n", p);
return rc;
/* get back old page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
@@ -1881,9 +1896,9 @@ printf("xtUpdate.updateLeft.split p:0x%p\n", p);
XT_PUTPAGE(mp);
/* get new child page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -2187,7 +2202,6 @@ void xtInitRoot(tid_t tid, struct inode *ip)
*/
s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
{
- int rc = 0;
s64 teof;
struct metapage *mp;
xtpage_t *p;
@@ -2268,9 +2282,9 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
* first access of each page:
*/
getPage:
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/* process entries backward from last index */
index = le16_to_cpu(p->header.nextindex) - 1;
@@ -2506,9 +2520,9 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
/* get back the parent page */
bn = parent->bn;
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
index = parent->index;
@@ -2791,9 +2805,9 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
* first access of each page:
*/
getPage:
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/* process entries backward from last index */
index = le16_to_cpu(p->header.nextindex) - 1;
@@ -2836,9 +2850,9 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
/* get back the parent page */
bn = parent->bn;
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
index = parent->index;
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index d68a4e6ac345..65a218eba8fa 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -187,13 +187,13 @@ static int jfs_create(struct mnt_idmap *idmap, struct inode *dip,
* dentry - dentry of child directory
* mode - create mode (rwxrwxrwx).
*
- * RETURN: Errors from subroutines
+ * RETURN: ERR_PTR() of errors from subroutines.
*
* note:
* EACCES: user needs search+write permission on the parent directory
*/
-static int jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
- struct dentry *dentry, umode_t mode)
+static struct dentry *jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
+ struct dentry *dentry, umode_t mode)
{
int rc = 0;
tid_t tid; /* transaction id */
@@ -308,7 +308,7 @@ static int jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
out1:
jfs_info("jfs_mkdir: rc:%d", rc);
- return rc;
+ return ERR_PTR(rc);
}
/*
@@ -1576,7 +1576,8 @@ out:
return result;
}
-static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags)
+static int jfs_ci_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
/*
* This is not negative dentry. Always valid.
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 223d9ac59839..3cfb86c5a36e 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -389,8 +389,8 @@ static int jfs_reconfigure(struct fs_context *fc)
if (!ctx->newLVSize) {
ctx->newLVSize = sb_bdev_nr_blocks(sb);
- if (ctx->newLVSize == 0)
- pr_err("JFS: Cannot determine volume size\n");
+ if (ctx->newLVSize == 0)
+ pr_err("JFS: Cannot determine volume size\n");
}
rc = jfs_extendfs(sb, ctx->newLVSize, 0);
@@ -542,7 +542,7 @@ static int jfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_magic = JFS_SUPER_MAGIC;
if (sbi->mntflag & JFS_OS2)
- sb->s_d_op = &jfs_ci_dentry_operations;
+ set_default_d_op(sb, &jfs_ci_dentry_operations);
inode = jfs_iget(sb, ROOT_I);
if (IS_ERR(inode)) {
@@ -766,7 +766,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, tocopy);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 24afbae87225..11d7f74d207b 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -559,11 +559,16 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
size_check:
if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
- int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr));
-
- printk(KERN_ERR "ea_get: invalid extended attribute\n");
- print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
- ea_buf->xattr, size, 1);
+ if (unlikely(EALIST_SIZE(ea_buf->xattr) > INT_MAX)) {
+ printk(KERN_ERR "ea_get: extended attribute size too large: %u > INT_MAX\n",
+ EALIST_SIZE(ea_buf->xattr));
+ } else {
+ int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr));
+
+ printk(KERN_ERR "ea_get: invalid extended attribute\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
+ ea_buf->xattr, size, 1);
+ }
ea_release(inode, ea_buf);
rc = -EIO;
goto clean_up;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 458519e416fe..5c0efd6b239f 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -17,7 +17,6 @@
#include "kernfs-internal.h"
-static DEFINE_RWLOCK(kernfs_rename_lock); /* kn->parent and ->name */
/*
* Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
* call pr_cont() while holding rename_lock. Because sometimes pr_cont()
@@ -27,7 +26,6 @@ static DEFINE_RWLOCK(kernfs_rename_lock); /* kn->parent and ->name */
*/
static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */
-static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
@@ -51,22 +49,14 @@ static bool kernfs_lockdep(struct kernfs_node *kn)
#endif
}
-static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
-{
- if (!kn)
- return strscpy(buf, "(null)", buflen);
-
- return strscpy(buf, kn->parent ? kn->name : "/", buflen);
-}
-
/* kernfs_node_depth - compute depth from @from to @to */
static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
{
size_t depth = 0;
- while (to->parent && to != from) {
+ while (rcu_dereference(to->__parent) && to != from) {
depth++;
- to = to->parent;
+ to = rcu_dereference(to->__parent);
}
return depth;
}
@@ -84,18 +74,18 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
db = kernfs_depth(rb->kn, b);
while (da > db) {
- a = a->parent;
+ a = rcu_dereference(a->__parent);
da--;
}
while (db > da) {
- b = b->parent;
+ b = rcu_dereference(b->__parent);
db--;
}
/* worst case b and a will be the same at root */
while (b != a) {
- b = b->parent;
- a = a->parent;
+ b = rcu_dereference(b->__parent);
+ a = rcu_dereference(a->__parent);
}
return a;
@@ -168,10 +158,13 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
/* Calculate how many bytes we need for the rest */
for (i = depth_to - 1; i >= 0; i--) {
+ const char *name;
+
for (kn = kn_to, j = 0; j < i; j++)
- kn = kn->parent;
+ kn = rcu_dereference(kn->__parent);
- len += scnprintf(buf + len, buflen - len, "/%s", kn->name);
+ name = rcu_dereference(kn->name);
+ len += scnprintf(buf + len, buflen - len, "/%s", name);
}
return len;
@@ -195,13 +188,18 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
*/
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{
- unsigned long flags;
- int ret;
+ struct kernfs_node *kn_parent;
- read_lock_irqsave(&kernfs_rename_lock, flags);
- ret = kernfs_name_locked(kn, buf, buflen);
- read_unlock_irqrestore(&kernfs_rename_lock, flags);
- return ret;
+ if (!kn)
+ return strscpy(buf, "(null)", buflen);
+
+ guard(rcu)();
+ /*
+ * KERNFS_ROOT_INVARIANT_PARENT is ignored here. The name is RCU freed and
+ * the parent is either existing or not.
+ */
+ kn_parent = rcu_dereference(kn->__parent);
+ return strscpy(buf, kn_parent ? rcu_dereference(kn->name) : "/", buflen);
}
/**
@@ -223,13 +221,17 @@ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
char *buf, size_t buflen)
{
- unsigned long flags;
- int ret;
+ struct kernfs_root *root;
- read_lock_irqsave(&kernfs_rename_lock, flags);
- ret = kernfs_path_from_node_locked(to, from, buf, buflen);
- read_unlock_irqrestore(&kernfs_rename_lock, flags);
- return ret;
+ guard(rcu)();
+ if (to) {
+ root = kernfs_root(to);
+ if (!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)) {
+ guard(read_lock_irqsave)(&root->kernfs_rename_lock);
+ return kernfs_path_from_node_locked(to, from, buf, buflen);
+ }
+ }
+ return kernfs_path_from_node_locked(to, from, buf, buflen);
}
EXPORT_SYMBOL_GPL(kernfs_path_from_node);
@@ -292,12 +294,14 @@ out:
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
{
struct kernfs_node *parent;
+ struct kernfs_root *root;
unsigned long flags;
- read_lock_irqsave(&kernfs_rename_lock, flags);
- parent = kn->parent;
+ root = kernfs_root(kn);
+ read_lock_irqsave(&root->kernfs_rename_lock, flags);
+ parent = kernfs_parent(kn);
kernfs_get(parent);
- read_unlock_irqrestore(&kernfs_rename_lock, flags);
+ read_unlock_irqrestore(&root->kernfs_rename_lock, flags);
return parent;
}
@@ -336,13 +340,13 @@ static int kernfs_name_compare(unsigned int hash, const char *name,
return -1;
if (ns > kn->ns)
return 1;
- return strcmp(name, kn->name);
+ return strcmp(name, kernfs_rcu_name(kn));
}
static int kernfs_sd_compare(const struct kernfs_node *left,
const struct kernfs_node *right)
{
- return kernfs_name_compare(left->hash, left->name, left->ns, right);
+ return kernfs_name_compare(left->hash, kernfs_rcu_name(left), left->ns, right);
}
/**
@@ -360,8 +364,12 @@ static int kernfs_sd_compare(const struct kernfs_node *left,
*/
static int kernfs_link_sibling(struct kernfs_node *kn)
{
- struct rb_node **node = &kn->parent->dir.children.rb_node;
struct rb_node *parent = NULL;
+ struct kernfs_node *kn_parent;
+ struct rb_node **node;
+
+ kn_parent = kernfs_parent(kn);
+ node = &kn_parent->dir.children.rb_node;
while (*node) {
struct kernfs_node *pos;
@@ -380,13 +388,13 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
/* add new node and rebalance the tree */
rb_link_node(&kn->rb, parent, node);
- rb_insert_color(&kn->rb, &kn->parent->dir.children);
+ rb_insert_color(&kn->rb, &kn_parent->dir.children);
/* successfully added, account subdir number */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
- kn->parent->dir.subdirs++;
- kernfs_inc_rev(kn->parent);
+ kn_parent->dir.subdirs++;
+ kernfs_inc_rev(kn_parent);
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
return 0;
@@ -407,16 +415,19 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
*/
static bool kernfs_unlink_sibling(struct kernfs_node *kn)
{
+ struct kernfs_node *kn_parent;
+
if (RB_EMPTY_NODE(&kn->rb))
return false;
+ kn_parent = kernfs_parent(kn);
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
- kn->parent->dir.subdirs--;
- kernfs_inc_rev(kn->parent);
+ kn_parent->dir.subdirs--;
+ kernfs_inc_rev(kn_parent);
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
- rb_erase(&kn->rb, &kn->parent->dir.children);
+ rb_erase(&kn->rb, &kn_parent->dir.children);
RB_CLEAR_NODE(&kn->rb);
return true;
}
@@ -533,7 +544,8 @@ static void kernfs_free_rcu(struct rcu_head *rcu)
{
struct kernfs_node *kn = container_of(rcu, struct kernfs_node, rcu);
- kfree_const(kn->name);
+ /* If the whole node goes away, then name can't be used outside */
+ kfree_const(rcu_access_pointer(kn->name));
if (kn->iattr) {
simple_xattrs_free(&kn->iattr->xattrs, NULL);
@@ -562,18 +574,19 @@ void kernfs_put(struct kernfs_node *kn)
* Moving/renaming is always done while holding reference.
* kn->parent won't change beneath us.
*/
- parent = kn->parent;
+ parent = kernfs_parent(kn);
WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
"kernfs_put: %s/%s: released with incorrect active_ref %d\n",
- parent ? parent->name : "", kn->name, atomic_read(&kn->active));
+ parent ? rcu_dereference(parent->name) : "",
+ rcu_dereference(kn->name), atomic_read(&kn->active));
if (kernfs_type(kn) == KERNFS_LINK)
kernfs_put(kn->symlink.target_kn);
- spin_lock(&kernfs_idr_lock);
+ spin_lock(&root->kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
- spin_unlock(&kernfs_idr_lock);
+ spin_unlock(&root->kernfs_idr_lock);
call_rcu(&kn->rcu, kernfs_free_rcu);
@@ -626,13 +639,13 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
goto err_out1;
idr_preload(GFP_KERNEL);
- spin_lock(&kernfs_idr_lock);
+ spin_lock(&root->kernfs_idr_lock);
ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
if (ret >= 0 && ret < root->last_id_lowbits)
root->id_highbits++;
id_highbits = root->id_highbits;
root->last_id_lowbits = ret;
- spin_unlock(&kernfs_idr_lock);
+ spin_unlock(&root->kernfs_idr_lock);
idr_preload_end();
if (ret < 0)
goto err_out2;
@@ -643,7 +656,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
RB_CLEAR_NODE(&kn->rb);
- kn->name = name;
+ rcu_assign_pointer(kn->name, name);
kn->mode = mode;
kn->flags = flags;
@@ -662,15 +675,18 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
if (parent) {
ret = security_kernfs_init_security(parent, kn);
if (ret)
- goto err_out3;
+ goto err_out4;
}
return kn;
+ err_out4:
+ simple_xattrs_free(&kn->iattr->xattrs, NULL);
+ kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
err_out3:
- spin_lock(&kernfs_idr_lock);
+ spin_lock(&root->kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
- spin_unlock(&kernfs_idr_lock);
+ spin_unlock(&root->kernfs_idr_lock);
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
@@ -701,7 +717,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
name, mode, uid, gid, flags);
if (kn) {
kernfs_get(parent);
- kn->parent = parent;
+ rcu_assign_pointer(kn->__parent, parent);
}
return kn;
}
@@ -769,18 +785,20 @@ err_unlock:
*/
int kernfs_add_one(struct kernfs_node *kn)
{
- struct kernfs_node *parent = kn->parent;
- struct kernfs_root *root = kernfs_root(parent);
+ struct kernfs_root *root = kernfs_root(kn);
struct kernfs_iattrs *ps_iattr;
+ struct kernfs_node *parent;
bool has_ns;
int ret;
down_write(&root->kernfs_rwsem);
+ parent = kernfs_parent(kn);
ret = -EINVAL;
has_ns = kernfs_ns_enabled(parent);
if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
- has_ns ? "required" : "invalid", parent->name, kn->name))
+ has_ns ? "required" : "invalid",
+ kernfs_rcu_name(parent), kernfs_rcu_name(kn)))
goto out_unlock;
if (kernfs_type(parent) != KERNFS_DIR)
@@ -790,7 +808,7 @@ int kernfs_add_one(struct kernfs_node *kn)
if (parent->flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR))
goto out_unlock;
- kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->hash = kernfs_name_hash(kernfs_rcu_name(kn), kn->ns);
ret = kernfs_link_sibling(kn);
if (ret)
@@ -846,7 +864,7 @@ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
if (has_ns != (bool)ns) {
WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
- has_ns ? "required" : "invalid", parent->name, name);
+ has_ns ? "required" : "invalid", kernfs_rcu_name(parent), name);
return NULL;
}
@@ -949,6 +967,11 @@ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
return kn;
}
+unsigned int kernfs_root_flags(struct kernfs_node *kn)
+{
+ return kernfs_root(kn)->flags;
+}
+
/**
* kernfs_create_root - create a new kernfs hierarchy
* @scops: optional syscall operations for the hierarchy
@@ -969,10 +992,12 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
return ERR_PTR(-ENOMEM);
idr_init(&root->ino_idr);
+ spin_lock_init(&root->kernfs_idr_lock);
init_rwsem(&root->kernfs_rwsem);
init_rwsem(&root->kernfs_iattr_rwsem);
init_rwsem(&root->kernfs_supers_rwsem);
INIT_LIST_HEAD(&root->supers);
+ rwlock_init(&root->kernfs_rename_lock);
/*
* On 64bit ino setups, id is ino. On 32bit, low 32bits are ino.
@@ -1109,9 +1134,10 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
return ERR_PTR(rc);
}
-static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
+static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct kernfs_node *kn;
+ struct kernfs_node *kn, *parent;
struct kernfs_root *root;
if (flags & LOOKUP_RCU)
@@ -1119,8 +1145,6 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
/* Negative hashed dentry? */
if (d_really_is_negative(dentry)) {
- struct kernfs_node *parent;
-
/* If the kernfs parent node has changed discard and
* proceed to ->lookup.
*
@@ -1162,16 +1186,17 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
if (!kernfs_active(kn))
goto out_bad;
+ parent = kernfs_parent(kn);
/* The kernfs node has been moved? */
- if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
+ if (kernfs_dentry_node(dentry->d_parent) != parent)
goto out_bad;
/* The kernfs node has been renamed */
- if (strcmp(dentry->d_name.name, kn->name) != 0)
+ if (strcmp(dentry->d_name.name, kernfs_rcu_name(kn)) != 0)
goto out_bad;
/* The kernfs node has been moved to a different namespace */
- if (kn->parent && kernfs_ns_enabled(kn->parent) &&
+ if (parent && kernfs_ns_enabled(parent) &&
kernfs_info(dentry->d_sb)->ns != kn->ns)
goto out_bad;
@@ -1229,24 +1254,24 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
return d_splice_alias(inode, dentry);
}
-static int kernfs_iop_mkdir(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static struct dentry *kernfs_iop_mkdir(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
struct kernfs_node *parent = dir->i_private;
struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
int ret;
if (!scops || !scops->mkdir)
- return -EPERM;
+ return ERR_PTR(-EPERM);
if (!kernfs_get_active(parent))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
ret = scops->mkdir(parent, dentry->d_name.name, mode);
kernfs_put_active(parent);
- return ret;
+ return ERR_PTR(ret);
}
static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
@@ -1364,7 +1389,7 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
return kernfs_leftmost_descendant(rb_to_kn(rbn));
/* no sibling left, visit parent */
- return pos->parent;
+ return kernfs_parent(pos);
}
static void kernfs_activate_one(struct kernfs_node *kn)
@@ -1376,7 +1401,7 @@ static void kernfs_activate_one(struct kernfs_node *kn)
if (kernfs_active(kn) || (kn->flags & (KERNFS_HIDDEN | KERNFS_REMOVING)))
return;
- WARN_ON_ONCE(kn->parent && RB_EMPTY_NODE(&kn->rb));
+ WARN_ON_ONCE(rcu_access_pointer(kn->__parent) && RB_EMPTY_NODE(&kn->rb));
WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
@@ -1446,7 +1471,7 @@ void kernfs_show(struct kernfs_node *kn, bool show)
static void __kernfs_remove(struct kernfs_node *kn)
{
- struct kernfs_node *pos;
+ struct kernfs_node *pos, *parent;
/* Short-circuit if non-root @kn has already finished removal. */
if (!kn)
@@ -1458,10 +1483,10 @@ static void __kernfs_remove(struct kernfs_node *kn)
* This is for kernfs_remove_self() which plays with active ref
* after removal.
*/
- if (kn->parent && RB_EMPTY_NODE(&kn->rb))
+ if (kernfs_parent(kn) && RB_EMPTY_NODE(&kn->rb))
return;
- pr_debug("kernfs %s: removing\n", kn->name);
+ pr_debug("kernfs %s: removing\n", kernfs_rcu_name(kn));
/* prevent new usage by marking all nodes removing and deactivating */
pos = NULL;
@@ -1484,14 +1509,14 @@ static void __kernfs_remove(struct kernfs_node *kn)
kernfs_get(pos);
kernfs_drain(pos);
-
+ parent = kernfs_parent(pos);
/*
* kernfs_unlink_sibling() succeeds once per node. Use it
* to decide who's responsible for cleanups.
*/
- if (!pos->parent || kernfs_unlink_sibling(pos)) {
+ if (!parent || kernfs_unlink_sibling(pos)) {
struct kernfs_iattrs *ps_iattr =
- pos->parent ? pos->parent->iattr : NULL;
+ parent ? parent->iattr : NULL;
/* update timestamps on the parent */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
@@ -1560,8 +1585,9 @@ void kernfs_break_active_protection(struct kernfs_node *kn)
* invoked before finishing the kernfs operation. Note that while this
* function restores the active reference, it doesn't and can't actually
* restore the active protection - @kn may already or be in the process of
- * being removed. Once kernfs_break_active_protection() is invoked, that
- * protection is irreversibly gone for the kernfs operation instance.
+ * being drained and removed. Once kernfs_break_active_protection() is
+ * invoked, that protection is irreversibly gone for the kernfs operation
+ * instance.
*
* While this function may be called at any point after
* kernfs_break_active_protection() is invoked, its most useful location
@@ -1717,11 +1743,11 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
{
struct kernfs_node *old_parent;
struct kernfs_root *root;
- const char *old_name = NULL;
+ const char *old_name;
int error;
/* can't move or rename root */
- if (!kn->parent)
+ if (!rcu_access_pointer(kn->__parent))
return -EINVAL;
root = kernfs_root(kn);
@@ -1732,9 +1758,19 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
(new_parent->flags & KERNFS_EMPTY_DIR))
goto out;
+ old_parent = kernfs_parent(kn);
+ if (root->flags & KERNFS_ROOT_INVARIANT_PARENT) {
+ error = -EINVAL;
+ if (WARN_ON_ONCE(old_parent != new_parent))
+ goto out;
+ }
+
error = 0;
- if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
- (strcmp(kn->name, new_name) == 0))
+ old_name = kernfs_rcu_name(kn);
+ if (!new_name)
+ new_name = old_name;
+ if ((old_parent == new_parent) && (kn->ns == new_ns) &&
+ (strcmp(old_name, new_name) == 0))
goto out; /* nothing to rename */
error = -EEXIST;
@@ -1742,7 +1778,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
goto out;
/* rename kernfs_node */
- if (strcmp(kn->name, new_name) != 0) {
+ if (strcmp(old_name, new_name) != 0) {
error = -ENOMEM;
new_name = kstrdup_const(new_name, GFP_KERNEL);
if (!new_name)
@@ -1755,27 +1791,32 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
* Move to the appropriate place in the appropriate directories rbtree.
*/
kernfs_unlink_sibling(kn);
- kernfs_get(new_parent);
- /* rename_lock protects ->parent and ->name accessors */
- write_lock_irq(&kernfs_rename_lock);
+ /* rename_lock protects ->parent accessors */
+ if (old_parent != new_parent) {
+ kernfs_get(new_parent);
+ write_lock_irq(&root->kernfs_rename_lock);
- old_parent = kn->parent;
- kn->parent = new_parent;
+ rcu_assign_pointer(kn->__parent, new_parent);
- kn->ns = new_ns;
- if (new_name) {
- old_name = kn->name;
- kn->name = new_name;
- }
+ kn->ns = new_ns;
+ if (new_name)
+ rcu_assign_pointer(kn->name, new_name);
- write_unlock_irq(&kernfs_rename_lock);
+ write_unlock_irq(&root->kernfs_rename_lock);
+ kernfs_put(old_parent);
+ } else {
+ /* name assignment is RCU protected, parent is the same */
+ kn->ns = new_ns;
+ if (new_name)
+ rcu_assign_pointer(kn->name, new_name);
+ }
- kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->hash = kernfs_name_hash(new_name ?: old_name, kn->ns);
kernfs_link_sibling(kn);
- kernfs_put(old_parent);
- kfree_const(old_name);
+ if (new_name && !is_kernel_rodata((unsigned long)old_name))
+ kfree_rcu_mightsleep(old_name);
error = 0;
out:
@@ -1794,7 +1835,8 @@ static struct kernfs_node *kernfs_dir_pos(const void *ns,
{
if (pos) {
int valid = kernfs_active(pos) &&
- pos->parent == parent && hash == pos->hash;
+ rcu_access_pointer(pos->__parent) == parent &&
+ hash == pos->hash;
kernfs_put(pos);
if (!valid)
pos = NULL;
@@ -1859,7 +1901,7 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
pos;
pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
- const char *name = pos->name;
+ const char *name = kernfs_rcu_name(pos);
unsigned int type = fs_umode_to_dtype(pos->mode);
int len = strlen(name);
ino_t ino = kernfs_ino(pos);
@@ -1868,10 +1910,10 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
file->private_data = pos;
kernfs_get(pos);
- up_read(&root->kernfs_rwsem);
- if (!dir_emit(ctx, name, len, ino, type))
+ if (!dir_emit(ctx, name, len, ino, type)) {
+ up_read(&root->kernfs_rwsem);
return 0;
- down_read(&root->kernfs_rwsem);
+ }
}
up_read(&root->kernfs_rwsem);
file->private_data = NULL;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 8502ef68459b..9adf36e6364b 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -70,6 +70,24 @@ static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
!list_empty(&of->list));
}
+/* Get active reference to kernfs node for an open file */
+static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file *of)
+{
+ /* Skip if file was already released */
+ if (unlikely(of->released))
+ return NULL;
+
+ if (!kernfs_get_active(of->kn))
+ return NULL;
+
+ return of;
+}
+
+static void kernfs_put_active_of(struct kernfs_open_file *of)
+{
+ return kernfs_put_active(of->kn);
+}
+
/**
* kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
*
@@ -139,7 +157,7 @@ static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
if (ops->seq_stop)
ops->seq_stop(sf, v);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
}
static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
@@ -152,7 +170,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return ERR_PTR(-ENODEV);
ops = kernfs_ops(of->kn);
@@ -238,7 +256,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
- if (!kernfs_get_active(of->kn)) {
+ if (!kernfs_get_active_of(of)) {
len = -ENODEV;
mutex_unlock(&of->mutex);
goto out_free;
@@ -252,7 +270,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
else
len = -EINVAL;
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
if (len < 0)
@@ -323,7 +341,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
- if (!kernfs_get_active(of->kn)) {
+ if (!kernfs_get_active_of(of)) {
mutex_unlock(&of->mutex);
len = -ENODEV;
goto out_free;
@@ -335,7 +353,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
else
len = -EINVAL;
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
if (len > 0)
@@ -357,13 +375,13 @@ static void kernfs_vma_open(struct vm_area_struct *vma)
if (!of->vm_ops)
return;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return;
if (of->vm_ops->open)
of->vm_ops->open(vma);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
}
static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
@@ -375,14 +393,14 @@ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
if (of->vm_ops->fault)
ret = of->vm_ops->fault(vmf);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
return ret;
}
@@ -395,7 +413,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return VM_FAULT_SIGBUS;
ret = 0;
@@ -404,7 +422,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
else
file_update_time(file);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
return ret;
}
@@ -418,14 +436,14 @@ static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
if (!of->vm_ops)
return -EINVAL;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return -EINVAL;
ret = -EINVAL;
if (of->vm_ops->access)
ret = of->vm_ops->access(vma, addr, buf, len, write);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
return ret;
}
@@ -455,7 +473,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&of->mutex);
rc = -ENODEV;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
goto out_unlock;
ops = kernfs_ops(of->kn);
@@ -490,7 +508,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
}
vma->vm_ops = &kernfs_vm_ops;
out_put:
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
out_unlock:
mutex_unlock(&of->mutex);
@@ -778,8 +796,9 @@ bool kernfs_should_drain_open_files(struct kernfs_node *kn)
/*
* @kn being deactivated guarantees that @kn->attr.open can't change
* beneath us making the lockless test below safe.
+ * Callers post kernfs_unbreak_active_protection may be counted in
+ * kn->active by now, do not WARN_ON because of them.
*/
- WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
rcu_read_lock();
on = rcu_dereference(kn->attr.open);
@@ -851,7 +870,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
__poll_t ret;
- if (!kernfs_get_active(kn))
+ if (!kernfs_get_active_of(of))
return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
if (kn->attr.ops->poll)
@@ -859,7 +878,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
else
ret = kernfs_generic_poll(of, wait);
- kernfs_put_active(kn);
+ kernfs_put_active_of(of);
return ret;
}
@@ -874,7 +893,7 @@ static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
- if (!kernfs_get_active(of->kn)) {
+ if (!kernfs_get_active_of(of)) {
mutex_unlock(&of->mutex);
return -ENODEV;
}
@@ -885,7 +904,7 @@ static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
else
ret = generic_file_llseek(file, offset, whence);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
return ret;
}
@@ -911,9 +930,11 @@ repeat:
/* kick fsnotify */
down_read(&root->kernfs_supers_rwsem);
+ down_read(&root->kernfs_rwsem);
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
struct kernfs_node *parent;
struct inode *p_inode = NULL;
+ const char *kn_name;
struct inode *inode;
struct qstr name;
@@ -927,7 +948,8 @@ repeat:
if (!inode)
continue;
- name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name));
+ kn_name = kernfs_rcu_name(kn);
+ name = QSTR(kn_name);
parent = kernfs_get_parent(kn);
if (parent) {
p_inode = ilookup(info->sb, kernfs_ino(parent));
@@ -947,6 +969,7 @@ repeat:
iput(inode);
}
+ up_read(&root->kernfs_rwsem);
up_read(&root->kernfs_supers_rwsem);
kernfs_put(kn);
goto repeat;
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index b83054da68b3..a36aaee98dce 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -24,45 +24,46 @@ static const struct inode_operations kernfs_iops = {
.listxattr = kernfs_iop_listxattr,
};
-static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, int alloc)
+static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, bool alloc)
{
- static DEFINE_MUTEX(iattr_mutex);
- struct kernfs_iattrs *ret;
+ struct kernfs_iattrs *ret __free(kfree) = NULL;
+ struct kernfs_iattrs *attr;
- mutex_lock(&iattr_mutex);
+ attr = READ_ONCE(kn->iattr);
+ if (attr || !alloc)
+ return attr;
- if (kn->iattr || !alloc)
- goto out_unlock;
-
- kn->iattr = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL);
- if (!kn->iattr)
- goto out_unlock;
+ ret = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL);
+ if (!ret)
+ return NULL;
/* assign default attributes */
- kn->iattr->ia_uid = GLOBAL_ROOT_UID;
- kn->iattr->ia_gid = GLOBAL_ROOT_GID;
-
- ktime_get_real_ts64(&kn->iattr->ia_atime);
- kn->iattr->ia_mtime = kn->iattr->ia_atime;
- kn->iattr->ia_ctime = kn->iattr->ia_atime;
-
- simple_xattrs_init(&kn->iattr->xattrs);
- atomic_set(&kn->iattr->nr_user_xattrs, 0);
- atomic_set(&kn->iattr->user_xattr_size, 0);
-out_unlock:
- ret = kn->iattr;
- mutex_unlock(&iattr_mutex);
- return ret;
+ ret->ia_uid = GLOBAL_ROOT_UID;
+ ret->ia_gid = GLOBAL_ROOT_GID;
+
+ ktime_get_real_ts64(&ret->ia_atime);
+ ret->ia_mtime = ret->ia_atime;
+ ret->ia_ctime = ret->ia_atime;
+
+ simple_xattrs_init(&ret->xattrs);
+ atomic_set(&ret->nr_user_xattrs, 0);
+ atomic_set(&ret->user_xattr_size, 0);
+
+ /* If someone raced us, recognize it. */
+ if (!try_cmpxchg(&kn->iattr, &attr, ret))
+ return READ_ONCE(kn->iattr);
+
+ return no_free_ptr(ret);
}
static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
{
- return __kernfs_iattrs(kn, 1);
+ return __kernfs_iattrs(kn, true);
}
static struct kernfs_iattrs *kernfs_iattrs_noalloc(struct kernfs_node *kn)
{
- return __kernfs_iattrs(kn, 0);
+ return __kernfs_iattrs(kn, false);
}
int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
@@ -166,9 +167,10 @@ static inline void set_inode_attr(struct inode *inode,
static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
{
- struct kernfs_iattrs *attrs = kn->iattr;
+ struct kernfs_iattrs *attrs;
inode->i_mode = kn->mode;
+ attrs = kernfs_iattrs_noalloc(kn);
if (attrs)
/*
* kernfs_node has non-default attributes get them from
@@ -249,7 +251,7 @@ struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
struct inode *inode;
inode = iget_locked(sb, kernfs_ino(kn));
- if (inode && (inode->i_state & I_NEW))
+ if (inode && (inode_state_read_once(inode) & I_NEW))
kernfs_init_inode(kn, inode);
return inode;
@@ -306,7 +308,9 @@ int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
const void *value, size_t size, int flags)
{
struct simple_xattr *old_xattr;
- struct kernfs_iattrs *attrs = kernfs_iattrs(kn);
+ struct kernfs_iattrs *attrs;
+
+ attrs = kernfs_iattrs(kn);
if (!attrs)
return -ENOMEM;
@@ -345,8 +349,9 @@ static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
struct simple_xattrs *xattrs,
const void *value, size_t size, int flags)
{
- atomic_t *sz = &kn->iattr->user_xattr_size;
- atomic_t *nr = &kn->iattr->nr_user_xattrs;
+ struct kernfs_iattrs *attr = kernfs_iattrs_noalloc(kn);
+ atomic_t *sz = &attr->user_xattr_size;
+ atomic_t *nr = &attr->nr_user_xattrs;
struct simple_xattr *old_xattr;
int ret;
@@ -384,8 +389,9 @@ static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
struct simple_xattrs *xattrs,
const void *value, size_t size, int flags)
{
- atomic_t *sz = &kn->iattr->user_xattr_size;
- atomic_t *nr = &kn->iattr->nr_user_xattrs;
+ struct kernfs_iattrs *attr = kernfs_iattrs_noalloc(kn);
+ atomic_t *sz = &attr->user_xattr_size;
+ atomic_t *nr = &attr->nr_user_xattrs;
struct simple_xattr *old_xattr;
old_xattr = simple_xattr_set(xattrs, full_name, value, size, flags);
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index b42ee6547cdc..6061b6f70d2a 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -38,6 +38,7 @@ struct kernfs_root {
/* private fields, do not use outside kernfs proper */
struct idr ino_idr;
+ spinlock_t kernfs_idr_lock; /* root->ino_idr */
u32 last_id_lowbits;
u32 id_highbits;
struct kernfs_syscall_ops *syscall_ops;
@@ -50,6 +51,9 @@ struct kernfs_root {
struct rw_semaphore kernfs_iattr_rwsem;
struct rw_semaphore kernfs_supers_rwsem;
+ /* kn->parent and kn->name */
+ rwlock_t kernfs_rename_lock;
+
struct rcu_head rcu;
};
@@ -64,11 +68,14 @@ struct kernfs_root {
*
* Return: the kernfs_root @kn belongs to.
*/
-static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn)
+static inline struct kernfs_root *kernfs_root(const struct kernfs_node *kn)
{
+ const struct kernfs_node *knp;
/* if parent exists, it's always a dir; otherwise, @sd is a dir */
- if (kn->parent)
- kn = kn->parent;
+ guard(rcu)();
+ knp = rcu_dereference(kn->__parent);
+ if (knp)
+ kn = knp;
return kn->dir.root;
}
@@ -97,6 +104,38 @@ struct kernfs_super_info {
};
#define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info))
+static inline bool kernfs_root_is_locked(const struct kernfs_node *kn)
+{
+ return lockdep_is_held(&kernfs_root(kn)->kernfs_rwsem);
+}
+
+static inline bool kernfs_rename_is_locked(const struct kernfs_node *kn)
+{
+ return lockdep_is_held(&kernfs_root(kn)->kernfs_rename_lock);
+}
+
+static inline const char *kernfs_rcu_name(const struct kernfs_node *kn)
+{
+ return rcu_dereference_check(kn->name, kernfs_root_is_locked(kn));
+}
+
+static inline struct kernfs_node *kernfs_parent(const struct kernfs_node *kn)
+{
+ /*
+ * The kernfs_node::__parent remains valid within a RCU section. The kn
+ * can be reparented (and renamed) which changes the entry. This can be
+ * avoided by locking kernfs_root::kernfs_rwsem or
+ * kernfs_root::kernfs_rename_lock.
+ * Both locks can be used to obtain a reference on __parent. Once the
+ * reference count reaches 0 then the node is about to be freed
+ * and can not be renamed (or become a different parent) anymore.
+ */
+ return rcu_dereference_check(kn->__parent,
+ kernfs_root_is_locked(kn) ||
+ kernfs_rename_is_locked(kn) ||
+ !atomic_read(&kn->count));
+}
+
static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry)
{
if (d_really_is_negative(dentry))
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 1358c21837f1..3ac52e141766 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -57,11 +57,26 @@ static int kernfs_statfs(struct dentry *dentry, struct kstatfs *buf)
const struct super_operations kernfs_sops = {
.statfs = kernfs_statfs,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
.evict_inode = kernfs_evict_inode,
.show_options = kernfs_sop_show_options,
.show_path = kernfs_sop_show_path,
+
+ /*
+ * sysfs is built on top of kernfs and sysfs provides the power
+ * management infrastructure to support suspend/hibernate by
+ * writing to various files in /sys/power/. As filesystems may
+ * be automatically frozen during suspend/hibernate implementing
+ * freeze/thaw support for kernfs generically will cause
+ * deadlocks as the suspending/hibernation initiating task will
+ * hold a VFS lock that it will then wait upon to be released.
+ * If freeze/thaw for kernfs is needed talk to the VFS.
+ */
+ .freeze_fs = NULL,
+ .unfreeze_fs = NULL,
+ .freeze_super = NULL,
+ .thaw_super = NULL,
};
static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
@@ -145,8 +160,10 @@ static struct dentry *kernfs_fh_to_parent(struct super_block *sb,
static struct dentry *kernfs_get_parent_dentry(struct dentry *child)
{
struct kernfs_node *kn = kernfs_dentry_node(child);
+ struct kernfs_root *root = kernfs_root(kn);
- return d_obtain_alias(kernfs_get_inode(child->d_sb, kn->parent));
+ guard(rwsem_read)(&root->kernfs_rwsem);
+ return d_obtain_alias(kernfs_get_inode(child->d_sb, kernfs_parent(kn)));
}
static const struct export_operations kernfs_export_ops = {
@@ -186,10 +203,10 @@ static struct kernfs_node *find_next_ancestor(struct kernfs_node *child,
return NULL;
}
- while (child->parent != parent) {
- if (!child->parent)
+ while (kernfs_parent(child) != parent) {
+ child = kernfs_parent(child);
+ if (!child)
return NULL;
- child = child->parent;
}
return child;
@@ -207,16 +224,27 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
{
struct dentry *dentry;
struct kernfs_node *knparent;
+ struct kernfs_root *root;
BUG_ON(sb->s_op != &kernfs_sops);
dentry = dget(sb->s_root);
/* Check if this is the root kernfs_node */
- if (!kn->parent)
+ if (!rcu_access_pointer(kn->__parent))
return dentry;
- knparent = find_next_ancestor(kn, NULL);
+ root = kernfs_root(kn);
+ /*
+ * As long as kn is valid, its parent can not vanish. This is cgroup's
+ * kn so it can't have its parent replaced. Therefore it is safe to use
+ * the ancestor node outside of the RCU or locked section.
+ */
+ if (WARN_ON_ONCE(!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)))
+ return ERR_PTR(-EINVAL);
+ scoped_guard(rcu) {
+ knparent = find_next_ancestor(kn, NULL);
+ }
if (WARN_ON(!knparent)) {
dput(dentry);
return ERR_PTR(-EINVAL);
@@ -225,17 +253,26 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
do {
struct dentry *dtmp;
struct kernfs_node *kntmp;
+ const char *name;
if (kn == knparent)
return dentry;
- kntmp = find_next_ancestor(kn, knparent);
- if (WARN_ON(!kntmp)) {
+
+ scoped_guard(rwsem_read, &root->kernfs_rwsem) {
+ kntmp = find_next_ancestor(kn, knparent);
+ if (WARN_ON(!kntmp)) {
+ dput(dentry);
+ return ERR_PTR(-EINVAL);
+ }
+ name = kstrdup(kernfs_rcu_name(kntmp), GFP_KERNEL);
+ }
+ if (!name) {
dput(dentry);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOMEM);
}
- dtmp = lookup_positive_unlocked(kntmp->name, dentry,
- strlen(kntmp->name));
+ dtmp = lookup_noperm_positive_unlocked(&QSTR(name), dentry);
dput(dentry);
+ kfree(name);
if (IS_ERR(dtmp))
return dtmp;
knparent = kntmp;
@@ -261,6 +298,7 @@ static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *k
if (info->root->flags & KERNFS_ROOT_SUPPORT_EXPORTOP)
sb->s_export_op = &kernfs_export_ops;
sb->s_time_gran = 1;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
/* sysfs dentries and inodes don't require IO to create */
sb->s_shrink->seeks = 0;
@@ -281,7 +319,7 @@ static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *k
return -ENOMEM;
}
sb->s_root = root;
- sb->s_d_op = &kernfs_dops;
+ set_default_d_op(sb, &kernfs_dops);
return 0;
}
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 45371a70caa7..0bd8a2143723 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -62,10 +62,10 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
/* go up to the root, stop at the base */
base = parent;
- while (base->parent) {
- kn = target->parent;
- while (kn->parent && base != kn)
- kn = kn->parent;
+ while (kernfs_parent(base)) {
+ kn = kernfs_parent(target);
+ while (kernfs_parent(kn) && base != kn)
+ kn = kernfs_parent(kn);
if (base == kn)
break;
@@ -75,14 +75,14 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
strcpy(s, "../");
s += 3;
- base = base->parent;
+ base = kernfs_parent(base);
}
/* determine end of target string for reverse fillup */
kn = target;
- while (kn->parent && kn != base) {
- len += strlen(kn->name) + 1;
- kn = kn->parent;
+ while (kernfs_parent(kn) && kn != base) {
+ len += strlen(kernfs_rcu_name(kn)) + 1;
+ kn = kernfs_parent(kn);
}
/* check limits */
@@ -94,15 +94,16 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
/* reverse fillup of target string from target to base */
kn = target;
- while (kn->parent && kn != base) {
- int slen = strlen(kn->name);
+ while (kernfs_parent(kn) && kn != base) {
+ const char *name = kernfs_rcu_name(kn);
+ int slen = strlen(name);
len -= slen;
- memcpy(s + len, kn->name, slen);
+ memcpy(s + len, name, slen);
if (len)
s[--len] = '/';
- kn = kn->parent;
+ kn = kernfs_parent(kn);
}
return 0;
@@ -111,12 +112,13 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
static int kernfs_getlink(struct inode *inode, char *path)
{
struct kernfs_node *kn = inode->i_private;
- struct kernfs_node *parent = kn->parent;
+ struct kernfs_node *parent;
struct kernfs_node *target = kn->symlink.target_kn;
- struct kernfs_root *root = kernfs_root(parent);
+ struct kernfs_root *root = kernfs_root(kn);
int error;
down_read(&root->kernfs_rwsem);
+ parent = kernfs_parent(kn);
error = kernfs_get_target_path(parent, target, path);
up_read(&root->kernfs_rwsem);
diff --git a/fs/libfs.c b/fs/libfs.c
index 748ac5923154..9264523be85c 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -62,11 +62,6 @@ int always_delete_dentry(const struct dentry *dentry)
}
EXPORT_SYMBOL(always_delete_dentry);
-const struct dentry_operations simple_dentry_operations = {
- .d_delete = always_delete_dentry,
-};
-EXPORT_SYMBOL(simple_dentry_operations);
-
/*
* Lookup the data. This is trivial - if the dentry didn't already
* exist, we know it is negative. Set d_op to delete negative dentries.
@@ -75,9 +70,11 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned
{
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
- if (!dentry->d_sb->s_d_op)
- d_set_d_op(dentry, &simple_dentry_operations);
-
+ if (!dentry->d_op && !(dentry->d_flags & DCACHE_DONTCACHE)) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_DONTCACHE;
+ spin_unlock(&dentry->d_lock);
+ }
if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
return NULL;
@@ -245,9 +242,16 @@ const struct inode_operations simple_dir_inode_operations = {
};
EXPORT_SYMBOL(simple_dir_inode_operations);
-/* 0 is '.', 1 is '..', so always start with offset 2 or more */
+/* simple_offset_add() never assigns these to a dentry */
+enum {
+ DIR_OFFSET_FIRST = 2, /* Find first real entry */
+ DIR_OFFSET_EOD = S32_MAX,
+};
+
+/* simple_offset_add() allocation range */
enum {
- DIR_OFFSET_MIN = 2,
+ DIR_OFFSET_MIN = DIR_OFFSET_FIRST + 1,
+ DIR_OFFSET_MAX = DIR_OFFSET_EOD - 1,
};
static void offset_set(struct dentry *dentry, long offset)
@@ -291,9 +295,10 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
return -EBUSY;
ret = mtree_alloc_cyclic(&octx->mt, &offset, dentry, DIR_OFFSET_MIN,
- LONG_MAX, &octx->next_offset, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ DIR_OFFSET_MAX, &octx->next_offset,
+ GFP_KERNEL);
+ if (unlikely(ret < 0))
+ return ret == -EBUSY ? -ENOSPC : ret;
offset_set(dentry, offset);
return 0;
@@ -330,38 +335,6 @@ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
}
/**
- * simple_offset_empty - Check if a dentry can be unlinked
- * @dentry: dentry to be tested
- *
- * Returns 0 if @dentry is a non-empty directory; otherwise returns 1.
- */
-int simple_offset_empty(struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- struct offset_ctx *octx;
- struct dentry *child;
- unsigned long index;
- int ret = 1;
-
- if (!inode || !S_ISDIR(inode->i_mode))
- return ret;
-
- index = DIR_OFFSET_MIN;
- octx = inode->i_op->get_offset_ctx(inode);
- mt_for_each(&octx->mt, child, index, LONG_MAX) {
- spin_lock(&child->d_lock);
- if (simple_positive(child)) {
- spin_unlock(&child->d_lock);
- ret = 0;
- break;
- }
- spin_unlock(&child->d_lock);
- }
-
- return ret;
-}
-
-/**
* simple_offset_rename - handle directory offsets for rename
* @old_dir: parent directory of source entry
* @old_dentry: dentry of source entry
@@ -454,14 +427,6 @@ void simple_offset_destroy(struct offset_ctx *octx)
mtree_destroy(&octx->mt);
}
-static int offset_dir_open(struct inode *inode, struct file *file)
-{
- struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
-
- file->private_data = (void *)ctx->next_offset;
- return 0;
-}
-
/**
* offset_dir_llseek - Advance the read position of a directory descriptor
* @file: an open directory whose position is to be updated
@@ -475,9 +440,6 @@ static int offset_dir_open(struct inode *inode, struct file *file)
*/
static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
{
- struct inode *inode = file->f_inode;
- struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
-
switch (whence) {
case SEEK_CUR:
offset += file->f_pos;
@@ -490,62 +452,89 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
return -EINVAL;
}
- /* In this case, ->private_data is protected by f_pos_lock */
- if (!offset)
- file->private_data = (void *)ctx->next_offset;
return vfs_setpos(file, offset, LONG_MAX);
}
-static struct dentry *offset_find_next(struct offset_ctx *octx, loff_t offset)
+static struct dentry *find_positive_dentry(struct dentry *parent,
+ struct dentry *dentry,
+ bool next)
{
- MA_STATE(mas, &octx->mt, offset, offset);
+ struct dentry *found = NULL;
+
+ spin_lock(&parent->d_lock);
+ if (next)
+ dentry = d_next_sibling(dentry);
+ else if (!dentry)
+ dentry = d_first_child(parent);
+ hlist_for_each_entry_from(dentry, d_sib) {
+ if (!simple_positive(dentry))
+ continue;
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(dentry))
+ found = dget_dlock(dentry);
+ spin_unlock(&dentry->d_lock);
+ if (likely(found))
+ break;
+ }
+ spin_unlock(&parent->d_lock);
+ return found;
+}
+
+static noinline_for_stack struct dentry *
+offset_dir_lookup(struct dentry *parent, loff_t offset)
+{
+ struct inode *inode = d_inode(parent);
+ struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
struct dentry *child, *found = NULL;
- rcu_read_lock();
- child = mas_find(&mas, LONG_MAX);
- if (!child)
- goto out;
- spin_lock(&child->d_lock);
- if (simple_positive(child))
- found = dget_dlock(child);
- spin_unlock(&child->d_lock);
-out:
- rcu_read_unlock();
+ MA_STATE(mas, &octx->mt, offset, offset);
+
+ if (offset == DIR_OFFSET_FIRST)
+ found = find_positive_dentry(parent, NULL, false);
+ else {
+ rcu_read_lock();
+ child = mas_find_rev(&mas, DIR_OFFSET_MIN);
+ found = find_positive_dentry(parent, child, false);
+ rcu_read_unlock();
+ }
return found;
}
static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- long offset = dentry2offset(dentry);
- return ctx->actor(ctx, dentry->d_name.name, dentry->d_name.len, offset,
- inode->i_ino, fs_umode_to_dtype(inode->i_mode));
+ return dir_emit(ctx, dentry->d_name.name, dentry->d_name.len,
+ inode->i_ino, fs_umode_to_dtype(inode->i_mode));
}
-static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, long last_index)
+static void offset_iterate_dir(struct file *file, struct dir_context *ctx)
{
- struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
+ struct dentry *dir = file->f_path.dentry;
struct dentry *dentry;
+ dentry = offset_dir_lookup(dir, ctx->pos);
+ if (!dentry)
+ goto out_eod;
while (true) {
- dentry = offset_find_next(octx, ctx->pos);
- if (!dentry)
- return;
-
- if (dentry2offset(dentry) >= last_index) {
- dput(dentry);
- return;
- }
+ struct dentry *next;
- if (!offset_dir_emit(ctx, dentry)) {
- dput(dentry);
- return;
- }
+ ctx->pos = dentry2offset(dentry);
+ if (!offset_dir_emit(ctx, dentry))
+ break;
- ctx->pos = dentry2offset(dentry) + 1;
+ next = find_positive_dentry(dir, dentry, true);
dput(dentry);
+
+ if (!next)
+ goto out_eod;
+ dentry = next;
}
+ dput(dentry);
+ return;
+
+out_eod:
+ ctx->pos = DIR_OFFSET_EOD;
}
/**
@@ -565,6 +554,8 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon
*
* On return, @ctx->pos contains an offset that will read the next entry
* in this directory when offset_readdir() is called again with @ctx.
+ * Caller places this value in the d_off field of the last entry in the
+ * user's buffer.
*
* Return values:
* %0 - Complete
@@ -572,26 +563,24 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon
static int offset_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dir = file->f_path.dentry;
- long last_index = (long)file->private_data;
lockdep_assert_held(&d_inode(dir)->i_rwsem);
if (!dir_emit_dots(file, ctx))
return 0;
-
- offset_iterate_dir(d_inode(dir), ctx, last_index);
+ if (ctx->pos != DIR_OFFSET_EOD)
+ offset_iterate_dir(file, ctx);
return 0;
}
const struct file_operations simple_offset_dir_operations = {
- .open = offset_dir_open,
.llseek = offset_dir_llseek,
.iterate_shared = offset_readdir,
.read = generic_read_dir,
.fsync = noop_fsync,
};
-static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
+struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
{
struct dentry *child = NULL, *d;
@@ -611,16 +600,18 @@ static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev
dput(prev);
return child;
}
+EXPORT_SYMBOL(find_next_child);
-void simple_recursive_removal(struct dentry *dentry,
- void (*callback)(struct dentry *))
+static void __simple_recursive_removal(struct dentry *dentry,
+ void (*callback)(struct dentry *),
+ bool locked)
{
struct dentry *this = dget(dentry);
while (true) {
struct dentry *victim = NULL, *child;
struct inode *inode = this->d_inode;
- inode_lock(inode);
+ inode_lock_nested(inode, I_MUTEX_CHILD);
if (d_is_dir(this))
inode->i_flags |= S_DEAD;
while ((child = find_next_child(this, victim)) == NULL) {
@@ -632,23 +623,22 @@ void simple_recursive_removal(struct dentry *dentry,
victim = this;
this = this->d_parent;
inode = this->d_inode;
- inode_lock(inode);
+ if (!locked || victim != dentry)
+ inode_lock_nested(inode, I_MUTEX_CHILD);
if (simple_positive(victim)) {
d_invalidate(victim); // avoid lost mounts
- if (d_is_dir(victim))
- fsnotify_rmdir(inode, victim);
- else
- fsnotify_unlink(inode, victim);
if (callback)
callback(victim);
- dput(victim); // unpin it
+ fsnotify_delete(inode, d_inode(victim), victim);
+ d_make_discardable(victim);
}
if (victim == dentry) {
inode_set_mtime_to_ts(inode,
inode_set_ctime_current(inode));
if (d_is_dir(dentry))
drop_nlink(inode);
- inode_unlock(inode);
+ if (!locked)
+ inode_unlock(inode);
dput(dentry);
return;
}
@@ -657,8 +647,35 @@ void simple_recursive_removal(struct dentry *dentry,
this = child;
}
}
+
+void simple_recursive_removal(struct dentry *dentry,
+ void (*callback)(struct dentry *))
+{
+ return __simple_recursive_removal(dentry, callback, false);
+}
EXPORT_SYMBOL(simple_recursive_removal);
+void simple_remove_by_name(struct dentry *parent, const char *name,
+ void (*callback)(struct dentry *))
+{
+ struct dentry *dentry;
+
+ dentry = lookup_noperm_positive_unlocked(&QSTR(name), parent);
+ if (!IS_ERR(dentry)) {
+ simple_recursive_removal(dentry, callback);
+ dput(dentry); // paired with lookup_noperm_positive_unlocked()
+ }
+}
+EXPORT_SYMBOL(simple_remove_by_name);
+
+/* caller holds parent directory with I_MUTEX_PARENT */
+void locked_recursive_removal(struct dentry *dentry,
+ void (*callback)(struct dentry *))
+{
+ return __simple_recursive_removal(dentry, callback, true);
+}
+EXPORT_SYMBOL(locked_recursive_removal);
+
static const struct super_operations simple_super_operations = {
.statfs = simple_statfs,
};
@@ -673,8 +690,10 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
s->s_blocksize_bits = PAGE_SHIFT;
s->s_magic = ctx->magic;
s->s_op = ctx->ops ?: &simple_super_operations;
+ s->s_export_op = ctx->eops;
s->s_xattr = ctx->xattr;
s->s_time_gran = 1;
+ s->s_d_flags |= ctx->s_d_flags;
root = new_inode(s);
if (!root)
return -ENOMEM;
@@ -690,7 +709,7 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
s->s_root = d_make_root(root);
if (!s->s_root)
return -ENOMEM;
- s->s_d_op = ctx->dops;
+ set_default_d_op(s, ctx->dops);
return 0;
}
@@ -746,8 +765,7 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inc_nlink(inode);
ihold(inode);
- dget(dentry);
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
return 0;
}
EXPORT_SYMBOL(simple_link);
@@ -773,14 +791,28 @@ out:
}
EXPORT_SYMBOL(simple_empty);
-int simple_unlink(struct inode *dir, struct dentry *dentry)
+void __simple_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
inode_set_mtime_to_ts(dir,
inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
drop_nlink(inode);
- dput(dentry);
+}
+EXPORT_SYMBOL(__simple_unlink);
+
+void __simple_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ drop_nlink(d_inode(dentry));
+ __simple_unlink(dir, dentry);
+ drop_nlink(dir);
+}
+EXPORT_SYMBOL(__simple_rmdir);
+
+int simple_unlink(struct inode *dir, struct dentry *dentry)
+{
+ __simple_unlink(dir, dentry);
+ d_make_discardable(dentry);
return 0;
}
EXPORT_SYMBOL(simple_unlink);
@@ -790,9 +822,8 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
if (!simple_empty(dentry))
return -ENOTEMPTY;
- drop_nlink(d_inode(dentry));
- simple_unlink(dir, dentry);
- drop_nlink(dir);
+ __simple_rmdir(dir, dentry);
+ d_make_discardable(dentry);
return 0;
}
EXPORT_SYMBOL(simple_rmdir);
@@ -916,7 +947,7 @@ static int simple_read_folio(struct file *file, struct folio *folio)
return 0;
}
-int simple_write_begin(struct file *file, struct address_space *mapping,
+int simple_write_begin(const struct kiocb *iocb, struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
{
@@ -941,7 +972,7 @@ EXPORT_SYMBOL(simple_write_begin);
/**
* simple_write_end - .write_end helper for non-block-device FSes
- * @file: See .write_end of address_space_operations
+ * @iocb: kernel I/O control block
* @mapping: "
* @pos: "
* @len: "
@@ -952,7 +983,8 @@ EXPORT_SYMBOL(simple_write_begin);
* simple_write_end does the minimum needed for updating a folio after
* writing is done. It has the same API signature as the .write_end of
* address_space_operations vector. So it can just be set onto .write_end for
- * FSes that don't need any other processing. i_mutex is assumed to be held.
+ * FSes that don't need any other processing. i_rwsem is assumed to be held
+ * exclusively.
* Block based filesystems should use generic_write_end().
* NOTE: Even though i_size might get updated by this function, mark_inode_dirty
* is not called, so a filesystem that actually does store data in .write_inode
@@ -961,9 +993,10 @@ EXPORT_SYMBOL(simple_write_begin);
*
* Use *ONLY* with simple_read_folio()
*/
-static int simple_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
+static int simple_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = folio->mapping->host;
loff_t last_pos = pos + copied;
@@ -979,7 +1012,7 @@ static int simple_write_end(struct file *file, struct address_space *mapping,
}
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold the i_mutex.
+ * cannot change under us because we hold the i_rwsem.
*/
if (last_pos > inode->i_size)
i_size_write(inode, last_pos);
@@ -1058,7 +1091,8 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
simple_inode_init_ts(inode);
inode->i_fop = files->ops;
inode->i_ino = i;
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
+ dput(dentry);
}
return 0;
}
@@ -1535,9 +1569,9 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
inode_lock(inode);
ret = sync_mapping_buffers(inode->i_mapping);
- if (!(inode->i_state & I_DIRTY_ALL))
+ if (!(inode_state_read_once(inode) & I_DIRTY_ALL))
goto out;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))
goto out;
err = sync_inode_metadata(inode, 1);
@@ -1589,13 +1623,17 @@ EXPORT_SYMBOL(generic_file_fsync);
int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
{
u64 last_fs_block = num_blocks - 1;
- u64 last_fs_page =
- last_fs_block >> (PAGE_SHIFT - blocksize_bits);
+ u64 last_fs_page, max_bytes;
+
+ if (check_shl_overflow(num_blocks, blocksize_bits, &max_bytes))
+ return -EFBIG;
+
+ last_fs_page = (max_bytes >> PAGE_SHIFT) - 1;
if (unlikely(num_blocks == 0))
return 0;
- if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
+ if (blocksize_bits < 9)
return -EINVAL;
if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
@@ -1653,11 +1691,15 @@ struct inode *alloc_anon_inode(struct super_block *s)
* list because mark_inode_dirty() will think
* that it already _is_ on the dirty list.
*/
- inode->i_state = I_DIRTY;
+ inode_state_assign_raw(inode, I_DIRTY);
+ /*
+ * Historically anonymous inodes don't have a type at all and
+ * userspace has come to rely on this.
+ */
inode->i_mode = S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_flags |= S_PRIVATE;
+ inode->i_flags |= S_PRIVATE | S_ANON_INODE;
simple_inode_init_ts(inode);
return inode;
}
@@ -1789,7 +1831,7 @@ int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
{
const struct dentry *parent;
const struct inode *dir;
- char strbuf[DNAME_INLINE_LEN];
+ union shortname_store strbuf;
struct qstr qstr;
/*
@@ -1809,22 +1851,23 @@ int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
if (!dir || !IS_CASEFOLDED(dir))
return 1;
+ qstr.len = len;
+ qstr.name = str;
/*
* If the dentry name is stored in-line, then it may be concurrently
* modified by a rename. If this happens, the VFS will eventually retry
* the lookup, so it doesn't matter what ->d_compare() returns.
* However, it's unsafe to call utf8_strncasecmp() with an unstable
* string. Therefore, we have to copy the name into a temporary buffer.
+ * As above, len is guaranteed to match str, so the shortname case
+ * is exactly when str points to ->d_shortname.
*/
- if (len <= DNAME_INLINE_LEN - 1) {
- memcpy(strbuf, str, len);
- strbuf[len] = 0;
- str = strbuf;
+ if (qstr.name == dentry->d_shortname.string) {
+ strbuf = dentry->d_shortname; // NUL is guaranteed to be in there
+ qstr.name = strbuf.string;
/* prevent compiler from optimizing out the temporary buffer */
barrier();
}
- qstr.len = len;
- qstr.name = str;
return utf8_strncasecmp(dentry->d_sb->s_encoding, name, &qstr);
}
@@ -1949,22 +1992,22 @@ static const struct dentry_operations generic_encrypted_dentry_ops = {
* @sb: superblock to be configured
*
* Filesystems supporting casefolding and/or fscrypt can call this
- * helper at mount-time to configure sb->s_d_op to best set of dentry
- * operations required for the enabled features. The helper must be
- * called after these have been configured, but before the root dentry
- * is created.
+ * helper at mount-time to configure default dentry_operations to the
+ * best set of dentry operations required for the enabled features.
+ * The helper must be called after these have been configured, but
+ * before the root dentry is created.
*/
void generic_set_sb_d_ops(struct super_block *sb)
{
#if IS_ENABLED(CONFIG_UNICODE)
if (sb->s_encoding) {
- sb->s_d_op = &generic_ci_dentry_ops;
+ set_default_d_op(sb, &generic_ci_dentry_ops);
return;
}
#endif
#ifdef CONFIG_FS_ENCRYPTION
if (sb->s_cop) {
- sb->s_d_op = &generic_encrypted_dentry_ops;
+ set_default_d_op(sb, &generic_encrypted_dentry_ops);
return;
}
#endif
@@ -2119,7 +2162,7 @@ struct timespec64 simple_inode_init_ts(struct inode *inode)
}
EXPORT_SYMBOL(simple_inode_init_ts);
-static inline struct dentry *get_stashed_dentry(struct dentry **stashed)
+struct dentry *stashed_dentry_get(struct dentry **stashed)
{
struct dentry *dentry;
@@ -2127,6 +2170,8 @@ static inline struct dentry *get_stashed_dentry(struct dentry **stashed)
dentry = rcu_dereference(*stashed);
if (!dentry)
return NULL;
+ if (IS_ERR(dentry))
+ return dentry;
if (!lockref_get_not_dead(&dentry->d_lockref))
return NULL;
return dentry;
@@ -2159,7 +2204,6 @@ static struct dentry *prepare_anon_dentry(struct dentry **stashed,
/* Notice when this is changed. */
WARN_ON_ONCE(!S_ISREG(inode->i_mode));
- WARN_ON_ONCE(!IS_IMMUTABLE(inode));
dentry = d_alloc_anon(sb);
if (!dentry) {
@@ -2175,8 +2219,7 @@ static struct dentry *prepare_anon_dentry(struct dentry **stashed,
return dentry;
}
-static struct dentry *stash_dentry(struct dentry **stashed,
- struct dentry *dentry)
+struct dentry *stash_dentry(struct dentry **stashed, struct dentry *dentry)
{
guard(rcu)();
for (;;) {
@@ -2217,14 +2260,16 @@ static struct dentry *stash_dentry(struct dentry **stashed,
int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
struct path *path)
{
- struct dentry *dentry;
+ struct dentry *dentry, *res;
const struct stashed_operations *sops = mnt->mnt_sb->s_fs_info;
/* See if dentry can be reused. */
- path->dentry = get_stashed_dentry(stashed);
- if (path->dentry) {
+ res = stashed_dentry_get(stashed);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+ if (res) {
sops->put_data(data);
- goto out_path;
+ goto make_path;
}
/* Allocate a new dentry. */
@@ -2233,14 +2278,22 @@ int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
return PTR_ERR(dentry);
/* Added a new dentry. @data is now owned by the filesystem. */
- path->dentry = stash_dentry(stashed, dentry);
- if (path->dentry != dentry)
+ if (sops->stash_dentry)
+ res = sops->stash_dentry(stashed, dentry);
+ else
+ res = stash_dentry(stashed, dentry);
+ if (IS_ERR(res)) {
+ dput(dentry);
+ return PTR_ERR(res);
+ }
+ if (res != dentry)
dput(dentry);
-out_path:
- WARN_ON_ONCE(path->dentry->d_fsdata != stashed);
- WARN_ON_ONCE(d_inode(path->dentry)->i_private != data);
+make_path:
+ path->dentry = res;
path->mnt = mntget(mnt);
+ VFS_WARN_ON_ONCE(path->dentry->d_fsdata != stashed);
+ VFS_WARN_ON_ONCE(d_inode(path->dentry)->i_private != data);
return 0;
}
@@ -2262,3 +2315,34 @@ void stashed_dentry_prune(struct dentry *dentry)
*/
cmpxchg(stashed, dentry, NULL);
}
+
+/**
+ * simple_start_creating - prepare to create a given name
+ * @parent: directory in which to prepare to create the name
+ * @name: the name to be created
+ *
+ * Required lock is taken and a lookup in performed prior to creating an
+ * object in a directory. No permission checking is performed.
+ *
+ * Returns: a negative dentry on which vfs_create() or similar may
+ * be attempted, or an error.
+ */
+struct dentry *simple_start_creating(struct dentry *parent, const char *name)
+{
+ struct qstr qname = QSTR(name);
+ int err;
+
+ err = lookup_noperm_common(&qname, parent);
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, &qname, LOOKUP_CREATE | LOOKUP_EXCL);
+}
+EXPORT_SYMBOL(simple_start_creating);
+
+/* parent must have been held exclusive since simple_start_creating() */
+void simple_done_creating(struct dentry *child)
+{
+ inode_unlock(child->d_parent->d_inode);
+ dput(child);
+}
+EXPORT_SYMBOL(simple_done_creating);
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index fe3e23dd29c3..51bbe22d21e3 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -8,6 +8,6 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_LOCKD) += lockd.o
lockd-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
- svcshare.o svcproc.o svcsubs.o mon.o trace.o xdr.o
+ svcshare.o svcproc.o svcsubs.o mon.o trace.o xdr.o netlink.o
lockd-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
lockd-$(CONFIG_PROC_FS) += procfs.o
diff --git a/fs/lockd/netlink.c b/fs/lockd/netlink.c
new file mode 100644
index 000000000000..880c42b4f8c3
--- /dev/null
+++ b/fs/lockd/netlink.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/lockd.yaml */
+/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink.h"
+
+#include <uapi/linux/lockd_netlink.h>
+
+/* LOCKD_CMD_SERVER_SET - do */
+static const struct nla_policy lockd_server_set_nl_policy[LOCKD_A_SERVER_UDP_PORT + 1] = {
+ [LOCKD_A_SERVER_GRACETIME] = { .type = NLA_U32, },
+ [LOCKD_A_SERVER_TCP_PORT] = { .type = NLA_U16, },
+ [LOCKD_A_SERVER_UDP_PORT] = { .type = NLA_U16, },
+};
+
+/* Ops table for lockd */
+static const struct genl_split_ops lockd_nl_ops[] = {
+ {
+ .cmd = LOCKD_CMD_SERVER_SET,
+ .doit = lockd_nl_server_set_doit,
+ .policy = lockd_server_set_nl_policy,
+ .maxattr = LOCKD_A_SERVER_UDP_PORT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = LOCKD_CMD_SERVER_GET,
+ .doit = lockd_nl_server_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+};
+
+struct genl_family lockd_nl_family __ro_after_init = {
+ .name = LOCKD_FAMILY_NAME,
+ .version = LOCKD_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = lockd_nl_ops,
+ .n_split_ops = ARRAY_SIZE(lockd_nl_ops),
+};
diff --git a/fs/lockd/netlink.h b/fs/lockd/netlink.h
new file mode 100644
index 000000000000..d8408f077dd8
--- /dev/null
+++ b/fs/lockd/netlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/lockd.yaml */
+/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _LINUX_LOCKD_GEN_H
+#define _LINUX_LOCKD_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/lockd_netlink.h>
+
+int lockd_nl_server_set_doit(struct sk_buff *skb, struct genl_info *info);
+int lockd_nl_server_get_doit(struct sk_buff *skb, struct genl_info *info);
+
+extern struct genl_family lockd_nl_family;
+
+#endif /* _LINUX_LOCKD_GEN_H */
diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
index 17432c445fe6..88e8e2a97397 100644
--- a/fs/lockd/netns.h
+++ b/fs/lockd/netns.h
@@ -10,6 +10,9 @@ struct lockd_net {
unsigned int nlmsvc_users;
unsigned long next_gc;
unsigned long nrhosts;
+ u32 gracetime;
+ u16 tcp_port;
+ u16 udp_port;
struct delayed_work grace_period_end;
struct lock_manager lockd_manager;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 4ec22c2f2ea3..d68afa196535 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -41,6 +41,7 @@
#include "netns.h"
#include "procfs.h"
+#include "netlink.h"
#define NLMDBG_FACILITY NLMDBG_SVC
#define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE)
@@ -70,9 +71,6 @@ static unsigned long nlm_grace_period;
unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
static int nlm_udpport, nlm_tcpport;
-/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
-static unsigned int nlm_max_connections = 1024;
-
/*
* Constants needed for the sysctl interface.
*/
@@ -86,8 +84,14 @@ static const int nlm_port_min = 0, nlm_port_max = 65535;
static struct ctl_table_header * nlm_sysctl_table;
#endif
-static unsigned long get_lockd_grace_period(void)
+static unsigned long get_lockd_grace_period(struct net *net)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ /* Return the net-ns specific grace period, if there is one */
+ if (ln->gracetime)
+ return ln->gracetime * HZ;
+
/* Note: nlm_timeout should always be nonzero */
if (nlm_grace_period)
return roundup(nlm_grace_period, nlm_timeout) * HZ;
@@ -106,7 +110,7 @@ static void grace_ender(struct work_struct *grace)
static void set_grace_period(struct net *net)
{
- unsigned long grace_period = get_lockd_grace_period();
+ unsigned long grace_period = get_lockd_grace_period(net);
struct lockd_net *ln = net_generic(net, lockd_net_id);
locks_start_grace(net, &ln->lockd_manager);
@@ -136,9 +140,6 @@ lockd(void *vrqstp)
* NFS mount or NFS daemon has gone away.
*/
while (!svc_thread_should_stop(rqstp)) {
- /* update sv_maxconn if it has changed */
- rqstp->rq_server->sv_maxconn = nlm_max_connections;
-
nlmsvc_retry_blocked(rqstp);
svc_recv(rqstp);
}
@@ -172,15 +173,16 @@ static int create_lockd_listener(struct svc_serv *serv, const char *name,
static int create_lockd_family(struct svc_serv *serv, struct net *net,
const int family, const struct cred *cred)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
int err;
- err = create_lockd_listener(serv, "udp", net, family, nlm_udpport,
- cred);
+ err = create_lockd_listener(serv, "udp", net, family,
+ ln->udp_port ? ln->udp_port : nlm_udpport, cred);
if (err < 0)
return err;
- return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport,
- cred);
+ return create_lockd_listener(serv, "tcp", net, family,
+ ln->tcp_port ? ln->tcp_port : nlm_tcpport, cred);
}
/*
@@ -214,8 +216,7 @@ out_err:
if (warned++ == 0)
printk(KERN_WARNING
"lockd_up: makesock failed, error=%d\n", err);
- svc_xprt_destroy_all(serv, net);
- svc_rpcb_cleanup(serv, net);
+ svc_xprt_destroy_all(serv, net, true);
return err;
}
@@ -253,8 +254,7 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
nlm_shutdown_hosts_net(net);
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
- svc_xprt_destroy_all(serv, net);
- svc_rpcb_cleanup(serv, net);
+ svc_xprt_destroy_all(serv, net, true);
}
} else {
pr_err("%s: no users! net=%x\n",
@@ -340,7 +340,6 @@ static int lockd_get(void)
return -ENOMEM;
}
- serv->sv_maxconn = nlm_max_connections;
error = svc_set_num_threads(serv, NULL, 1);
if (error < 0) {
svc_destroy(&serv);
@@ -419,7 +418,7 @@ EXPORT_SYMBOL_GPL(lockd_down);
* Sysctl parameters (same as module parameters, different interface).
*/
-static struct ctl_table nlm_sysctls[] = {
+static const struct ctl_table nlm_sysctls[] = {
{
.procname = "nlm_grace_period",
.data = &nlm_grace_period,
@@ -466,9 +465,10 @@ static struct ctl_table nlm_sysctls[] = {
{
.procname = "nsm_local_state",
.data = &nsm_local_state,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(nsm_local_state),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_douintvec,
+ .extra1 = SYSCTL_ZERO,
},
};
@@ -542,7 +542,6 @@ module_param_call(nlm_udpport, param_set_port, param_get_int,
module_param_call(nlm_tcpport, param_set_port, param_get_int,
&nlm_tcpport, 0644);
module_param(nsm_use_hostnames, bool, 0644);
-module_param(nlm_max_connections, uint, 0644);
static int lockd_init_net(struct net *net)
{
@@ -596,6 +595,10 @@ static int __init init_nlm(void)
if (err)
goto err_pernet;
+ err = genl_register_family(&lockd_nl_family);
+ if (err)
+ goto err_netlink;
+
err = lockd_create_procfs();
if (err)
goto err_procfs;
@@ -603,6 +606,8 @@ static int __init init_nlm(void)
return 0;
err_procfs:
+ genl_unregister_family(&lockd_nl_family);
+err_netlink:
unregister_pernet_subsys(&lockd_net_ops);
err_pernet:
#ifdef CONFIG_SYSCTL
@@ -616,6 +621,7 @@ static void __exit exit_nlm(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
+ genl_unregister_family(&lockd_nl_family);
lockd_remove_procfs();
unregister_pernet_subsys(&lockd_net_ops);
#ifdef CONFIG_SYSCTL
@@ -718,3 +724,94 @@ static struct svc_program nlmsvc_program = {
.pg_init_request = svc_generic_init_request,
.pg_rpcbind_set = svc_generic_rpcbind_set,
};
+
+/**
+ * lockd_nl_server_set_doit - set the lockd server parameters via netlink
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * This updates the per-net values. When updating the values in the init_net
+ * namespace, also update the "legacy" global values.
+ *
+ * Return 0 on success or a negative errno.
+ */
+int lockd_nl_server_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+ const struct nlattr *attr;
+
+ if (GENL_REQ_ATTR_CHECK(info, LOCKD_A_SERVER_GRACETIME))
+ return -EINVAL;
+
+ if (info->attrs[LOCKD_A_SERVER_GRACETIME] ||
+ info->attrs[LOCKD_A_SERVER_TCP_PORT] ||
+ info->attrs[LOCKD_A_SERVER_UDP_PORT]) {
+ attr = info->attrs[LOCKD_A_SERVER_GRACETIME];
+ if (attr) {
+ u32 gracetime = nla_get_u32(attr);
+
+ if (gracetime > nlm_grace_period_max)
+ return -EINVAL;
+
+ ln->gracetime = gracetime;
+
+ if (net == &init_net)
+ nlm_grace_period = gracetime;
+ }
+
+ attr = info->attrs[LOCKD_A_SERVER_TCP_PORT];
+ if (attr) {
+ ln->tcp_port = nla_get_u16(attr);
+ if (net == &init_net)
+ nlm_tcpport = ln->tcp_port;
+ }
+
+ attr = info->attrs[LOCKD_A_SERVER_UDP_PORT];
+ if (attr) {
+ ln->udp_port = nla_get_u16(attr);
+ if (net == &init_net)
+ nlm_udpport = ln->udp_port;
+ }
+ }
+ return 0;
+}
+
+/**
+ * lockd_nl_server_get_doit - get lockd server parameters via netlink
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int lockd_nl_server_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+ void *hdr;
+ int err;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ err = nla_put_u32(skb, LOCKD_A_SERVER_GRACETIME, ln->gracetime) ||
+ nla_put_u16(skb, LOCKD_A_SERVER_TCP_PORT, ln->tcp_port) ||
+ nla_put_u16(skb, LOCKD_A_SERVER_UDP_PORT, ln->udp_port);
+ if (err)
+ goto err_free_msg;
+
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index c1315df4b350..3a3d05cfe09a 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -495,6 +495,9 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
(long long)lock->fl.fl_end,
wait);
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
if (!locks_can_async_lock(nlmsvc_file_file(file)->f_op)) {
async_block = wait;
wait = 0;
@@ -621,6 +624,9 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
if (locks_in_grace(SVC_NET(rqstp))) {
ret = nlm_lck_denied_grace_period;
goto out;
@@ -678,6 +684,9 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
/* First, cancel any lock that might be there */
nlmsvc_cancel_blocked(net, file, lock);
@@ -715,6 +724,9 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
if (locks_in_grace(net))
return nlm_lck_denied_grace_period;
@@ -980,7 +992,7 @@ nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
struct file_lock *fl;
int error;
- dprintk("grant_reply: looking for cookie %x, s=%d \n",
+ dprintk("grant_reply: looking for cookie %x, s=%d\n",
*(unsigned int *)(cookie->data), status);
if (!(block = nlmsvc_find_block(cookie)))
return;
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
index ade4931b2da2..88c81ce1148d 100644
--- a/fs/lockd/svcshare.c
+++ b/fs/lockd/svcshare.c
@@ -32,6 +32,9 @@ nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file,
struct xdr_netobj *oh = &argp->lock.oh;
u8 *ohdata;
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
for (share = file->f_shares; share; share = share->s_next) {
if (share->s_host == host && nlm_cmp_owner(share, oh))
goto update;
@@ -72,6 +75,9 @@ nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
struct nlm_share *share, **shpp;
struct xdr_netobj *oh = &argp->lock.oh;
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
for (shpp = &file->f_shares; (share = *shpp) != NULL;
shpp = &share->s_next) {
if (share->s_host == host && nlm_cmp_owner(share, oh)) {
diff --git a/fs/locks.c b/fs/locks.c
index 25afc8d9c9d1..9f565802a88c 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -97,7 +97,7 @@ static int leases_enable = 1;
static int lease_break_time = 45;
#ifdef CONFIG_SYSCTL
-static struct ctl_table locks_sysctls[] = {
+static const struct ctl_table locks_sysctls[] = {
{
.procname = "leases-enable",
.data = &leases_enable,
@@ -585,7 +585,7 @@ static const struct lease_manager_operations lease_manager_ops = {
/*
* Initialize a lease, use the default lock manager operations
*/
-static int lease_init(struct file *filp, int type, struct file_lease *fl)
+static int lease_init(struct file *filp, unsigned int flags, int type, struct file_lease *fl)
{
if (assign_type(&fl->c, type) != 0)
return -EINVAL;
@@ -594,13 +594,13 @@ static int lease_init(struct file *filp, int type, struct file_lease *fl)
fl->c.flc_pid = current->tgid;
fl->c.flc_file = filp;
- fl->c.flc_flags = FL_LEASE;
+ fl->c.flc_flags = flags;
fl->fl_lmops = &lease_manager_ops;
return 0;
}
/* Allocate a file_lock initialised to this type of lease */
-static struct file_lease *lease_alloc(struct file *filp, int type)
+static struct file_lease *lease_alloc(struct file *filp, unsigned int flags, int type)
{
struct file_lease *fl = locks_alloc_lease();
int error = -ENOMEM;
@@ -608,7 +608,7 @@ static struct file_lease *lease_alloc(struct file *filp, int type)
if (fl == NULL)
return ERR_PTR(error);
- error = lease_init(filp, type, fl);
+ error = lease_init(filp, flags, type, fl);
if (error) {
locks_free_lease(fl);
return ERR_PTR(error);
@@ -712,7 +712,7 @@ static void __locks_wake_up_blocks(struct file_lock_core *blocker)
fl->fl_lmops && fl->fl_lmops->lm_notify)
fl->fl_lmops->lm_notify(fl);
else
- locks_wake_up(fl);
+ locks_wake_up_waiter(waiter);
/*
* The setting of flc_blocker to NULL marks the "done"
@@ -1529,29 +1529,35 @@ any_leases_conflict(struct inode *inode, struct file_lease *breaker)
/**
* __break_lease - revoke all outstanding leases on file
* @inode: the inode of the file to return
- * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
- * break all leases
- * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
- * only delegations
+ * @flags: LEASE_BREAK_* flags
*
* break_lease (inlined for speed) has checked there already is at least
* some kind of lock (maybe a lease) on this file. Leases are broken on
- * a call to open() or truncate(). This function can sleep unless you
- * specified %O_NONBLOCK to your open().
+ * a call to open() or truncate(). This function can block waiting for the
+ * lease break unless you specify LEASE_BREAK_NONBLOCK.
*/
-int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+int __break_lease(struct inode *inode, unsigned int flags)
{
- int error = 0;
- struct file_lock_context *ctx;
struct file_lease *new_fl, *fl, *tmp;
+ struct file_lock_context *ctx;
unsigned long break_time;
- int want_write = (mode & O_ACCMODE) != O_RDONLY;
+ unsigned int type;
LIST_HEAD(dispose);
+ bool want_write = !(flags & LEASE_BREAK_OPEN_RDONLY);
+ int error = 0;
- new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+ if (flags & LEASE_BREAK_LEASE)
+ type = FL_LEASE;
+ else if (flags & LEASE_BREAK_DELEG)
+ type = FL_DELEG;
+ else if (flags & LEASE_BREAK_LAYOUT)
+ type = FL_LAYOUT;
+ else
+ return -EINVAL;
+
+ new_fl = lease_alloc(NULL, type, want_write ? F_WRLCK : F_RDLCK);
if (IS_ERR(new_fl))
return PTR_ERR(new_fl);
- new_fl->c.flc_flags = type;
/* typically we will check that ctx is non-NULL before calling */
ctx = locks_inode_context(inode);
@@ -1596,7 +1602,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
if (list_empty(&ctx->flc_lease))
goto out;
- if (mode & O_NONBLOCK) {
+ if (flags & LEASE_BREAK_NONBLOCK) {
trace_break_lease_noblock(inode, new_fl);
error = -EWOULDBLOCK;
goto out;
@@ -1675,8 +1681,9 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time)
EXPORT_SYMBOL(lease_get_mtime);
/**
- * fcntl_getlease - Enquire what lease is currently active
+ * __fcntl_getlease - Enquire what lease is currently active
* @filp: the file
+ * @flavor: type of lease flags to check
*
* The value returned by this function will be one of
* (if no lease break is pending):
@@ -1697,7 +1704,7 @@ EXPORT_SYMBOL(lease_get_mtime);
* XXX: sfr & willy disagree over whether F_INPROGRESS
* should be returned to userspace.
*/
-int fcntl_getlease(struct file *filp)
+static int __fcntl_getlease(struct file *filp, unsigned int flavor)
{
struct file_lease *fl;
struct inode *inode = file_inode(filp);
@@ -1713,7 +1720,8 @@ int fcntl_getlease(struct file *filp)
list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
if (fl->c.flc_file != filp)
continue;
- type = target_leasetype(fl);
+ if (fl->c.flc_flags & flavor)
+ type = target_leasetype(fl);
break;
}
spin_unlock(&ctx->flc_lock);
@@ -1724,6 +1732,19 @@ int fcntl_getlease(struct file *filp)
return type;
}
+int fcntl_getlease(struct file *filp)
+{
+ return __fcntl_getlease(filp, FL_LEASE);
+}
+
+int fcntl_getdeleg(struct file *filp, struct delegation *deleg)
+{
+ if (deleg->d_flags != 0 || deleg->__pad != 0)
+ return -EINVAL;
+ deleg->d_type = __fcntl_getlease(filp, FL_DELEG);
+ return 0;
+}
+
/**
* check_conflicting_open - see if the given file points to an inode that has
* an existing open that would conflict with the
@@ -1794,7 +1815,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
/*
* In the delegation case we need mutual exclusion with
- * a number of operations that take the i_mutex. We trylock
+ * a number of operations that take the i_rwsem. We trylock
* because delegations are an optional optimization, and if
* there's some chance of a conflict--we'd rather not
* bother, maybe that's a sign this just isn't a good file to
@@ -1929,11 +1950,19 @@ static int generic_delete_lease(struct file *filp, void *owner)
int generic_setlease(struct file *filp, int arg, struct file_lease **flp,
void **priv)
{
+ struct inode *inode = file_inode(filp);
+
+ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ return -EINVAL;
+
switch (arg) {
case F_UNLCK:
return generic_delete_lease(filp, *priv);
- case F_RDLCK:
case F_WRLCK:
+ if (S_ISDIR(inode->i_mode))
+ return -EINVAL;
+ fallthrough;
+ case F_RDLCK:
if (!(*flp)->fl_lmops->lm_break) {
WARN_ON_ONCE(1);
return -ENOLCK;
@@ -2018,8 +2047,6 @@ vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv)
if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
return -EACCES;
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
error = security_file_lock(filp, arg);
if (error)
return error;
@@ -2027,13 +2054,13 @@ vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv)
}
EXPORT_SYMBOL_GPL(vfs_setlease);
-static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg)
+static int do_fcntl_add_lease(unsigned int fd, struct file *filp, unsigned int flavor, int arg)
{
struct file_lease *fl;
struct fasync_struct *new;
int error;
- fl = lease_alloc(filp, arg);
+ fl = lease_alloc(filp, flavor, arg);
if (IS_ERR(fl))
return PTR_ERR(fl);
@@ -2064,9 +2091,33 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg)
*/
int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
{
+ if (S_ISDIR(file_inode(filp)->i_mode))
+ return -EINVAL;
+
if (arg == F_UNLCK)
return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
- return do_fcntl_add_lease(fd, filp, arg);
+ return do_fcntl_add_lease(fd, filp, FL_LEASE, arg);
+}
+
+/**
+ * fcntl_setdeleg - sets a delegation on an open file
+ * @fd: open file descriptor
+ * @filp: file pointer
+ * @deleg: delegation request from userland
+ *
+ * Call this fcntl to establish a delegation on the file.
+ * Note that you also need to call %F_SETSIG to
+ * receive a signal when the lease is broken.
+ */
+int fcntl_setdeleg(unsigned int fd, struct file *filp, struct delegation *deleg)
+{
+ /* For now, no flags are supported */
+ if (deleg->d_flags != 0 || deleg->__pad != 0)
+ return -EINVAL;
+
+ if (deleg->d_type == F_UNLCK)
+ return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
+ return do_fcntl_add_lease(fd, filp, FL_DELEG, deleg->d_type);
}
/**
@@ -2328,8 +2379,8 @@ out:
* To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
* locks, the ->lock() interface may return asynchronously, before the lock has
* been granted or denied by the underlying filesystem, if (and only if)
- * lm_grant is set. Additionally EXPORT_OP_ASYNC_LOCK in export_operations
- * flags need to be set.
+ * lm_grant is set. Additionally FOP_ASYNC_LOCK in file_operations fop_flags
+ * need to be set.
*
* Callers expecting ->lock() to return asynchronously will only use F_SETLK,
* not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index dd2a425b41f0..19052fc47e9e 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -45,7 +45,7 @@ static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host;
- block_write_end(NULL, mapping, pos, len, len, folio, NULL);
+ block_write_end(pos, len, len, folio);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
diff --git a/fs/minix/file.c b/fs/minix/file.c
index 906d192ab7f3..dca7ac71f049 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -17,7 +17,7 @@ const struct file_operations minix_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.fsync = generic_file_fsync,
.splice_read = filemap_splice_read,
};
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index f007e389d5d2..51ea9bdc813f 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -26,6 +26,22 @@ static int minix_write_inode(struct inode *inode,
struct writeback_control *wbc);
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
+void __minix_error_inode(struct inode *inode, const char *function,
+ unsigned int line, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_CRIT "minix-fs error (device %s): %s:%d: "
+ "inode #%lu: comm %s: %pV\n",
+ inode->i_sb->s_id, function, line, inode->i_ino,
+ current->comm, &vaf);
+ va_end(args);
+}
+
static void minix_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
@@ -442,9 +458,10 @@ static void minix_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int minix_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
+static int minix_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
@@ -491,8 +508,14 @@ void minix_set_inode(struct inode *inode, dev_t rdev)
inode->i_op = &minix_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &minix_aops;
- } else
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
init_special_inode(inode, inode->i_mode, rdev);
+ } else {
+ printk(KERN_DEBUG "MINIX-fs: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ make_bad_inode(inode);
+ }
}
/*
@@ -582,7 +605,7 @@ struct inode *minix_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
if (INODE_VERSION(inode) == MINIX_V1)
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index d54273c3c9ff..2bfaf377f208 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -42,6 +42,9 @@ struct minix_sb_info {
unsigned short s_version;
};
+void __minix_error_inode(struct inode *inode, const char *function,
+ unsigned int line, const char *fmt, ...);
+
struct inode *minix_iget(struct super_block *, unsigned long);
struct minix_inode *minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
struct minix2_inode *minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
@@ -168,4 +171,10 @@ static inline int minix_test_bit(int nr, const void *vaddr)
#endif
+#define minix_error_inode(inode, fmt, ...) \
+ __minix_error_inode((inode), __func__, __LINE__, \
+ (fmt), ##__VA_ARGS__)
+
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
#endif /* FS_MINIX_H */
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 5d9c1406fe27..263e4ba8b1c8 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -104,15 +104,15 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
return add_nondir(dentry, inode);
}
-static int minix_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *minix_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode * inode;
int err;
inode = minix_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
inode_inc_link_count(dir);
minix_set_inode(inode, 0);
@@ -128,7 +128,7 @@ static int minix_mkdir(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
out:
- return err;
+ return ERR_PTR(err);
out_fail:
inode_dec_link_count(inode);
@@ -145,6 +145,11 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry)
struct minix_dir_entry * de;
int err;
+ if (inode->i_nlink == 0) {
+ minix_error_inode(inode, "inode has corrupted nlink");
+ return -EFSCORRUPTED;
+ }
+
de = minix_find_entry(dentry, &folio);
if (!de)
return -ENOENT;
@@ -161,15 +166,24 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry)
static int minix_rmdir(struct inode * dir, struct dentry *dentry)
{
struct inode * inode = d_inode(dentry);
- int err = -ENOTEMPTY;
+ int err = -EFSCORRUPTED;
- if (minix_empty_dir(inode)) {
- err = minix_unlink(dir, dentry);
- if (!err) {
- inode_dec_link_count(dir);
- inode_dec_link_count(inode);
- }
+ if (dir->i_nlink <= 2) {
+ minix_error_inode(dir, "inode has corrupted nlink");
+ goto out;
+ }
+
+ err = -ENOTEMPTY;
+ if (!minix_empty_dir(inode))
+ goto out;
+
+ err = minix_unlink(dir, dentry);
+ if (!err) {
+ inode_dec_link_count(dir);
+ inode_dec_link_count(inode);
}
+
+out:
return err;
}
@@ -208,6 +222,17 @@ static int minix_rename(struct mnt_idmap *idmap,
if (dir_de && !minix_empty_dir(new_inode))
goto out_dir;
+ err = -EFSCORRUPTED;
+ if (new_inode->i_nlink == 0 || (dir_de && new_inode->i_nlink != 2)) {
+ minix_error_inode(new_inode, "inode has corrupted nlink");
+ goto out_dir;
+ }
+
+ if (dir_de && old_dir->i_nlink <= 2) {
+ minix_error_inode(old_dir, "inode has corrupted nlink");
+ goto out_dir;
+ }
+
err = -ENOENT;
new_de = minix_find_entry(new_dentry, &new_folio);
if (!new_de)
diff --git a/fs/mnt_idmapping.c b/fs/mnt_idmapping.c
index 7b1df8cc2821..a37991fdb194 100644
--- a/fs/mnt_idmapping.c
+++ b/fs/mnt_idmapping.c
@@ -6,6 +6,7 @@
#include <linux/mnt_idmapping.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
+#include <linux/seq_file.h>
#include "internal.h"
@@ -334,3 +335,53 @@ void mnt_idmap_put(struct mnt_idmap *idmap)
free_mnt_idmap(idmap);
}
EXPORT_SYMBOL_GPL(mnt_idmap_put);
+
+int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map)
+{
+ struct uid_gid_map *map, *map_up;
+ u32 idx, nr_mappings;
+
+ if (!is_valid_mnt_idmap(idmap))
+ return 0;
+
+ /*
+ * Idmappings are shown relative to the caller's idmapping.
+ * This is both the most intuitive and most useful solution.
+ */
+ if (uid_map) {
+ map = &idmap->uid_map;
+ map_up = &current_user_ns()->uid_map;
+ } else {
+ map = &idmap->gid_map;
+ map_up = &current_user_ns()->gid_map;
+ }
+
+ for (idx = 0, nr_mappings = 0; idx < map->nr_extents; idx++) {
+ uid_t lower;
+ struct uid_gid_extent *extent;
+
+ if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
+ extent = &map->extent[idx];
+ else
+ extent = &map->forward[idx];
+
+ /*
+ * Verify that the whole range of the mapping can be
+ * resolved in the caller's idmapping. If it cannot be
+ * resolved skip the mapping.
+ */
+ lower = map_id_range_up(map_up, extent->lower_first, extent->count);
+ if (lower == (uid_t) -1)
+ continue;
+
+ seq_printf(seq, "%u %u %u", extent->first, lower, extent->count);
+
+ seq->count++; /* mappings are separated by \0 */
+ if (seq_has_overflowed(seq))
+ return -EAGAIN;
+
+ nr_mappings++;
+ }
+
+ return nr_mappings;
+}
diff --git a/fs/mount.h b/fs/mount.h
index 185fc56afc13..2d28ef2a3aed 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -5,19 +5,29 @@
#include <linux/ns_common.h>
#include <linux/fs_pin.h>
+extern struct list_head notify_list;
+
struct mnt_namespace {
struct ns_common ns;
struct mount * root;
- struct rb_root mounts; /* Protected by namespace_sem */
+ struct {
+ struct rb_root mounts; /* Protected by namespace_sem */
+ struct rb_node *mnt_last_node; /* last (rightmost) mount in the rbtree */
+ struct rb_node *mnt_first_node; /* first (leftmost) mount in the rbtree */
+ };
struct user_namespace *user_ns;
struct ucounts *ucounts;
- u64 seq; /* Sequence number to prevent loops */
- wait_queue_head_t poll;
+ wait_queue_head_t poll;
+ u64 seq_origin; /* Sequence number of origin mount namespace */
u64 event;
+#ifdef CONFIG_FSNOTIFY
+ __u32 n_fsnotify_mask;
+ struct fsnotify_mark_connector __rcu *n_fsnotify_marks;
+#endif
unsigned int nr_mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
- struct rb_node mnt_ns_tree_node; /* node in the mnt_ns_tree */
refcount_t passive; /* number references not pinning @mounts */
+ bool is_anon;
} __randomize_layout;
struct mnt_pcp {
@@ -29,7 +39,6 @@ struct mountpoint {
struct hlist_node m_hash;
struct dentry *m_dentry;
struct hlist_head m_list;
- int m_count;
};
struct mount {
@@ -38,6 +47,7 @@ struct mount {
struct dentry *mnt_mountpoint;
struct vfsmount mnt;
union {
+ struct rb_node mnt_node; /* node in the ns->mounts rbtree */
struct rcu_head mnt_rcu;
struct llist_node mnt_llist;
};
@@ -49,16 +59,16 @@ struct mount {
#endif
struct list_head mnt_mounts; /* list of children, anchored here */
struct list_head mnt_child; /* and going through their mnt_child */
- struct list_head mnt_instance; /* mount instance on sb->s_mounts */
+ struct mount *mnt_next_for_sb; /* the next two fields are hlist_node, */
+ struct mount * __aligned(1) *mnt_pprev_for_sb;
+ /* except that LSB of pprev is stolen */
+#define WRITE_HOLD 1 /* ... for use by mnt_hold_writers() */
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
- union {
- struct rb_node mnt_node; /* Under ns->mounts */
- struct list_head mnt_list;
- };
+ struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
struct list_head mnt_share; /* circular list of shared mounts */
- struct list_head mnt_slave_list;/* list of slave mounts */
- struct list_head mnt_slave; /* slave list entry */
+ struct hlist_head mnt_slave_list;/* list of slave mounts */
+ struct hlist_node mnt_slave; /* slave list entry */
struct mount *mnt_master; /* slave is on master->mnt_slave_list */
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
@@ -66,19 +76,38 @@ struct mount {
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
struct hlist_node mnt_umount;
};
- struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
+ struct list_head to_notify; /* need to queue notification */
+ struct mnt_namespace *prev_ns; /* previous namespace (NULL if none) */
#endif
+ int mnt_t_flags; /* namespace_sem-protected flags */
int mnt_id; /* mount identifier, reused */
u64 mnt_id_unique; /* mount ID unique until reboot */
int mnt_group_id; /* peer group identifier */
int mnt_expiry_mark; /* true if marked for expiry */
struct hlist_head mnt_pins;
struct hlist_head mnt_stuck_children;
+ struct mount *overmount; /* mounted on ->mnt_root */
} __randomize_layout;
+enum {
+ T_SHARED = 1, /* mount is shared */
+ T_UNBINDABLE = 2, /* mount is unbindable */
+ T_MARKED = 4, /* internal mark for propagate_... */
+ T_UMOUNT_CANDIDATE = 8, /* for propagate_umount */
+
+ /*
+ * T_SHARED_MASK is the set of flags that should be cleared when a
+ * mount becomes shared. Currently, this is only the flag that says a
+ * mount cannot be bind mounted, since this is how we create a mount
+ * that shares events with another mount. If you add a new T_*
+ * flag, consider how it interacts with shared mounts.
+ */
+ T_SHARED_MASK = T_UNBINDABLE,
+};
+
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
static inline struct mount *real_mount(struct vfsmount *mnt)
@@ -86,7 +115,7 @@ static inline struct mount *real_mount(struct vfsmount *mnt)
return container_of(mnt, struct mount, mnt);
}
-static inline int mnt_has_parent(struct mount *mnt)
+static inline int mnt_has_parent(const struct mount *mnt)
{
return mnt != mnt->mnt_parent;
}
@@ -118,11 +147,16 @@ static inline void detach_mounts(struct dentry *dentry)
static inline void get_mnt_ns(struct mnt_namespace *ns)
{
- refcount_inc(&ns->ns.count);
+ ns_ref_inc(ns);
}
extern seqlock_t mount_lock;
+DEFINE_LOCK_GUARD_0(mount_writer, write_seqlock(&mount_lock),
+ write_sequnlock(&mount_lock))
+DEFINE_LOCK_GUARD_0(mount_locked_reader, read_seqlock_excl(&mount_lock),
+ read_sequnlock_excl(&mount_lock))
+
struct proc_mounts {
struct mnt_namespace *ns;
struct path root;
@@ -131,8 +165,8 @@ struct proc_mounts {
extern const struct seq_operations mounts_op;
-extern bool __is_local_mountpoint(struct dentry *dentry);
-static inline bool is_local_mountpoint(struct dentry *dentry)
+extern bool __is_local_mountpoint(const struct dentry *dentry);
+static inline bool is_local_mountpoint(const struct dentry *dentry)
{
if (!d_mountpoint(dentry))
return false;
@@ -142,28 +176,90 @@ static inline bool is_local_mountpoint(struct dentry *dentry)
static inline bool is_anon_ns(struct mnt_namespace *ns)
{
- return ns->seq == 0;
+ return ns->is_anon;
+}
+
+static inline bool anon_ns_root(const struct mount *m)
+{
+ struct mnt_namespace *ns = READ_ONCE(m->mnt_ns);
+
+ return !IS_ERR_OR_NULL(ns) && is_anon_ns(ns) && m == ns->root;
}
-static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
+static inline bool mnt_ns_attached(const struct mount *mnt)
{
- WARN_ON(!(mnt->mnt.mnt_flags & MNT_ONRB));
- mnt->mnt.mnt_flags &= ~MNT_ONRB;
- rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts);
- list_add_tail(&mnt->mnt_list, dt_list);
+ return !RB_EMPTY_NODE(&mnt->mnt_node);
}
-bool has_locked_children(struct mount *mnt, struct dentry *dentry);
-struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mnt_ns, bool previous);
-static inline struct mnt_namespace *lookup_next_mnt_ns(struct mnt_namespace *mntns)
+static inline bool mnt_ns_empty(const struct mnt_namespace *ns)
{
- return __lookup_next_mnt_ns(mntns, false);
+ return RB_EMPTY_ROOT(&ns->mounts);
}
-static inline struct mnt_namespace *lookup_prev_mnt_ns(struct mnt_namespace *mntns)
+
+static inline void move_from_ns(struct mount *mnt)
{
- return __lookup_next_mnt_ns(mntns, true);
+ struct mnt_namespace *ns = mnt->mnt_ns;
+ WARN_ON(!mnt_ns_attached(mnt));
+ if (ns->mnt_last_node == &mnt->mnt_node)
+ ns->mnt_last_node = rb_prev(&mnt->mnt_node);
+ if (ns->mnt_first_node == &mnt->mnt_node)
+ ns->mnt_first_node = rb_next(&mnt->mnt_node);
+ rb_erase(&mnt->mnt_node, &ns->mounts);
+ RB_CLEAR_NODE(&mnt->mnt_node);
}
+
+bool has_locked_children(struct mount *mnt, struct dentry *dentry);
+struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mnt_ns,
+ bool previous);
+
static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
{
return container_of(ns, struct mnt_namespace, ns);
}
+
+#ifdef CONFIG_FSNOTIFY
+static inline void mnt_notify_add(struct mount *m)
+{
+ /* Optimize the case where there are no watches */
+ if ((m->mnt_ns && m->mnt_ns->n_fsnotify_marks) ||
+ (m->prev_ns && m->prev_ns->n_fsnotify_marks))
+ list_add_tail(&m->to_notify, &notify_list);
+ else
+ m->prev_ns = m->mnt_ns;
+}
+#else
+static inline void mnt_notify_add(struct mount *m)
+{
+}
+#endif
+
+static inline struct mount *topmost_overmount(struct mount *m)
+{
+ while (m->overmount)
+ m = m->overmount;
+ return m;
+}
+
+static inline bool __test_write_hold(struct mount * __aligned(1) *val)
+{
+ return (unsigned long)val & WRITE_HOLD;
+}
+
+static inline bool test_write_hold(const struct mount *m)
+{
+ return __test_write_hold(m->mnt_pprev_for_sb);
+}
+
+static inline void set_write_hold(struct mount *m)
+{
+ m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
+ | WRITE_HOLD);
+}
+
+static inline void clear_write_hold(struct mount *m)
+{
+ m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
+ & ~WRITE_HOLD);
+}
+
+struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry);
diff --git a/fs/mpage.c b/fs/mpage.c
index 82aecf372743..7dae5afc2b9e 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -107,7 +107,7 @@ static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
* don't make any buffers if there is only one buffer on
* the folio and the folio just needs to be set up to date
*/
- if (inode->i_blkbits == PAGE_SHIFT &&
+ if (inode->i_blkbits == folio_shift(folio) &&
buffer_uptodate(bh)) {
folio_mark_uptodate(folio);
return;
@@ -148,12 +148,12 @@ struct mpage_readpage_args {
* represent the validity of its disk mapping and to decide when to do the next
* get_block() call.
*/
-static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
+static void do_mpage_readpage(struct mpage_readpage_args *args)
{
struct folio *folio = args->folio;
struct inode *inode = folio->mapping->host;
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+ const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
const unsigned blocksize = 1 << blkbits;
struct buffer_head *map_bh = &args->map_bh;
sector_t block_in_file;
@@ -161,7 +161,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
sector_t last_block_in_file;
sector_t first_block;
unsigned page_block;
- unsigned first_hole = blocks_per_page;
+ unsigned first_hole = blocks_per_folio;
struct block_device *bdev = NULL;
int length;
int fully_mapped = 1;
@@ -170,9 +170,6 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
unsigned relative_block;
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
- /* MAX_BUF_PER_PAGE, for example */
- VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
-
if (args->is_readahead) {
opf |= REQ_RAHEAD;
gfp |= __GFP_NORETRY | __GFP_NOWARN;
@@ -181,8 +178,8 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
if (folio_buffers(folio))
goto confused;
- block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
- last_block = block_in_file + args->nr_pages * blocks_per_page;
+ block_in_file = folio_pos(folio) >> blkbits;
+ last_block = block_in_file + ((args->nr_pages * PAGE_SIZE) >> blkbits);
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
@@ -204,7 +201,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
clear_buffer_mapped(map_bh);
break;
}
- if (page_block == blocks_per_page)
+ if (page_block == blocks_per_folio)
break;
page_block++;
block_in_file++;
@@ -216,7 +213,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
* Then do more get_blocks calls until we are done with this folio.
*/
map_bh->b_folio = folio;
- while (page_block < blocks_per_page) {
+ while (page_block < blocks_per_folio) {
map_bh->b_state = 0;
map_bh->b_size = 0;
@@ -229,7 +226,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
if (!buffer_mapped(map_bh)) {
fully_mapped = 0;
- if (first_hole == blocks_per_page)
+ if (first_hole == blocks_per_folio)
first_hole = page_block;
page_block++;
block_in_file++;
@@ -247,7 +244,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
goto confused;
}
- if (first_hole != blocks_per_page)
+ if (first_hole != blocks_per_folio)
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
@@ -260,7 +257,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
if (relative_block == nblocks) {
clear_buffer_mapped(map_bh);
break;
- } else if (page_block == blocks_per_page)
+ } else if (page_block == blocks_per_folio)
break;
page_block++;
block_in_file++;
@@ -268,8 +265,8 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
bdev = map_bh->b_bdev;
}
- if (first_hole != blocks_per_page) {
- folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
+ if (first_hole != blocks_per_folio) {
+ folio_zero_segment(folio, first_hole << blkbits, folio_size(folio));
if (first_hole == 0) {
folio_mark_uptodate(folio);
folio_unlock(folio);
@@ -303,12 +300,12 @@ alloc_new:
relative_block = block_in_file - args->first_logical_block;
nblocks = map_bh->b_size >> blkbits;
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
- (first_hole != blocks_per_page))
+ (first_hole != blocks_per_folio))
args->bio = mpage_bio_submit_read(args->bio);
else
- args->last_block_in_bio = first_block + blocks_per_page - 1;
+ args->last_block_in_bio = first_block + blocks_per_folio - 1;
out:
- return args->bio;
+ return;
confused:
if (args->bio)
@@ -371,7 +368,13 @@ void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
prefetchw(&folio->flags);
args.folio = folio;
args.nr_pages = readahead_count(rac);
- args.bio = do_mpage_readpage(&args);
+ do_mpage_readpage(&args);
+ /*
+ * If read ahead failed synchronously, it may cause by removed
+ * device, or some filesystem metadata error.
+ */
+ if (!folio_test_locked(folio) && !folio_test_uptodate(folio))
+ break;
}
if (args.bio)
mpage_bio_submit_read(args.bio);
@@ -385,11 +388,11 @@ int mpage_read_folio(struct folio *folio, get_block_t get_block)
{
struct mpage_readpage_args args = {
.folio = folio,
- .nr_pages = 1,
+ .nr_pages = folio_nr_pages(folio),
.get_block = get_block,
};
- args.bio = do_mpage_readpage(&args);
+ do_mpage_readpage(&args);
if (args.bio)
mpage_bio_submit_read(args.bio);
return 0;
@@ -448,20 +451,19 @@ static void clean_buffers(struct folio *folio, unsigned first_unmapped)
try_to_free_buffers(folio);
}
-static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
- void *data)
+static int mpage_write_folio(struct writeback_control *wbc, struct folio *folio,
+ struct mpage_data *mpd)
{
- struct mpage_data *mpd = data;
struct bio *bio = mpd->bio;
struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+ const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
sector_t last_block;
sector_t block_in_file;
sector_t first_block;
unsigned page_block;
- unsigned first_unmapped = blocks_per_page;
+ unsigned first_unmapped = blocks_per_folio;
struct block_device *bdev = NULL;
int boundary = 0;
sector_t boundary_block = 0;
@@ -486,12 +488,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
*/
if (buffer_dirty(bh))
goto confused;
- if (first_unmapped == blocks_per_page)
+ if (first_unmapped == blocks_per_folio)
first_unmapped = page_block;
continue;
}
- if (first_unmapped != blocks_per_page)
+ if (first_unmapped != blocks_per_folio)
goto confused; /* hole -> non-hole */
if (!buffer_dirty(bh) || !buffer_uptodate(bh))
@@ -527,7 +529,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
* The page has no buffers: map it to disk
*/
BUG_ON(!folio_test_uptodate(folio));
- block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
+ block_in_file = folio_pos(folio) >> blkbits;
/*
* Whole page beyond EOF? Skip allocating blocks to avoid leaking
* space.
@@ -536,7 +538,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
goto page_is_mapped;
last_block = (i_size - 1) >> blkbits;
map_bh.b_folio = folio;
- for (page_block = 0; page_block < blocks_per_page; ) {
+ for (page_block = 0; page_block < blocks_per_folio; ) {
map_bh.b_state = 0;
map_bh.b_size = 1 << blkbits;
@@ -618,14 +620,14 @@ alloc_new:
BUG_ON(folio_test_writeback(folio));
folio_start_writeback(folio);
folio_unlock(folio);
- if (boundary || (first_unmapped != blocks_per_page)) {
+ if (boundary || (first_unmapped != blocks_per_folio)) {
bio = mpage_bio_submit_write(bio);
if (boundary_block) {
write_boundary_block(boundary_bdev,
boundary_block, 1 << blkbits);
}
} else {
- mpd->last_block_in_bio = first_block + blocks_per_page - 1;
+ mpd->last_block_in_bio = first_block + blocks_per_folio - 1;
}
goto out;
@@ -659,14 +661,16 @@ mpage_writepages(struct address_space *mapping,
struct mpage_data mpd = {
.get_block = get_block,
};
+ struct folio *folio = NULL;
struct blk_plug plug;
- int ret;
+ int error;
blk_start_plug(&plug);
- ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ error = mpage_write_folio(wbc, folio, &mpd);
if (mpd.bio)
mpage_bio_submit_write(mpd.bio);
blk_finish_plug(&plug);
- return ret;
+ return error;
}
EXPORT_SYMBOL(mpage_writepages);
diff --git a/fs/namei.c b/fs/namei.c
index 9d30c7aa9aa6..bf0f66f0e9b9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -125,6 +125,13 @@
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
+static inline void initname(struct filename *name, const char __user *uptr)
+{
+ name->uptr = uptr;
+ name->aname = NULL;
+ atomic_set(&name->refcnt, 1);
+}
+
struct filename *
getname_flags(const char __user *filename, int flags)
{
@@ -203,10 +210,7 @@ getname_flags(const char __user *filename, int flags)
return ERR_PTR(-ENAMETOOLONG);
}
}
-
- atomic_set(&result->refcnt, 1);
- result->uptr = filename;
- result->aname = NULL;
+ initname(result, filename);
audit_getname(result);
return result;
}
@@ -218,11 +222,6 @@ struct filename *getname_uflags(const char __user *filename, int uflags)
return getname_flags(filename, flags);
}
-struct filename *getname(const char __user * filename)
-{
- return getname_flags(filename, 0);
-}
-
struct filename *__getname_maybe_null(const char __user *pathname)
{
struct filename *name;
@@ -269,27 +268,29 @@ struct filename *getname_kernel(const char * filename)
return ERR_PTR(-ENAMETOOLONG);
}
memcpy((char *)result->name, filename, len);
- result->uptr = NULL;
- result->aname = NULL;
- atomic_set(&result->refcnt, 1);
+ initname(result, NULL);
audit_getname(result);
-
return result;
}
EXPORT_SYMBOL(getname_kernel);
void putname(struct filename *name)
{
+ int refcnt;
+
if (IS_ERR_OR_NULL(name))
return;
- if (WARN_ON_ONCE(!atomic_read(&name->refcnt)))
- return;
+ refcnt = atomic_read(&name->refcnt);
+ if (unlikely(refcnt != 1)) {
+ if (WARN_ON_ONCE(!refcnt))
+ return;
- if (!atomic_dec_and_test(&name->refcnt))
- return;
+ if (!atomic_dec_and_test(&name->refcnt))
+ return;
+ }
- if (name->name != name->iname) {
+ if (unlikely(name->name != name->iname)) {
__putname(name->name);
kfree(name);
} else
@@ -539,10 +540,13 @@ static inline int do_inode_permission(struct mnt_idmap *idmap,
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Separate out file-system wide checks from inode-specific permission checks.
+ *
+ * Note: lookup_inode_permission_may_exec() does not call here. If you add
+ * MAY_EXEC checks, adjust it.
*/
static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
{
- if (unlikely(mask & MAY_WRITE)) {
+ if (mask & MAY_WRITE) {
umode_t mode = inode->i_mode;
/* Nobody gets write access to a read-only fs. */
@@ -570,14 +574,14 @@ int inode_permission(struct mnt_idmap *idmap,
int retval;
retval = sb_permission(inode->i_sb, inode, mask);
- if (retval)
+ if (unlikely(retval))
return retval;
- if (unlikely(mask & MAY_WRITE)) {
+ if (mask & MAY_WRITE) {
/*
* Nobody gets write access to an immutable file.
*/
- if (IS_IMMUTABLE(inode))
+ if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
/*
@@ -585,22 +589,58 @@ int inode_permission(struct mnt_idmap *idmap,
* written back improperly if their true value is unknown
* to the vfs.
*/
- if (HAS_UNMAPPED_ID(idmap, inode))
+ if (unlikely(HAS_UNMAPPED_ID(idmap, inode)))
return -EACCES;
}
retval = do_inode_permission(idmap, inode, mask);
- if (retval)
+ if (unlikely(retval))
return retval;
retval = devcgroup_inode_permission(inode, mask);
- if (retval)
+ if (unlikely(retval))
return retval;
return security_inode_permission(inode, mask);
}
EXPORT_SYMBOL(inode_permission);
+/*
+ * lookup_inode_permission_may_exec - Check traversal right for given inode
+ *
+ * This is a special case routine for may_lookup() making assumptions specific
+ * to path traversal. Use inode_permission() if you are doing something else.
+ *
+ * Work is shaved off compared to inode_permission() as follows:
+ * - we know for a fact there is no MAY_WRITE to worry about
+ * - it is an invariant the inode is a directory
+ *
+ * Since majority of real-world traversal happens on inodes which grant it for
+ * everyone, we check it upfront and only resort to more expensive work if it
+ * fails.
+ *
+ * Filesystems which have their own ->permission hook and consequently miss out
+ * on IOP_FASTPERM can still get the optimization if they set IOP_FASTPERM_MAY_EXEC
+ * on their directory inodes.
+ */
+static __always_inline int lookup_inode_permission_may_exec(struct mnt_idmap *idmap,
+ struct inode *inode, int mask)
+{
+ /* Lookup already checked this to return -ENOTDIR */
+ VFS_BUG_ON_INODE(!S_ISDIR(inode->i_mode), inode);
+ VFS_BUG_ON((mask & ~MAY_NOT_BLOCK) != 0);
+
+ mask |= MAY_EXEC;
+
+ if (unlikely(!(inode->i_opflags & (IOP_FASTPERM | IOP_FASTPERM_MAY_EXEC))))
+ return inode_permission(idmap, inode, mask);
+
+ if (unlikely(((inode->i_mode & 0111) != 0111) || !no_acl_inode(inode)))
+ return inode_permission(idmap, inode, mask);
+
+ return security_inode_permission(inode, mask);
+}
+
/**
* path_get - get a reference to a path
* @path: path to get the reference to
@@ -745,7 +785,8 @@ static void leave_rcu(struct nameidata *nd)
static void terminate_walk(struct nameidata *nd)
{
- drop_links(nd);
+ if (unlikely(nd->depth))
+ drop_links(nd);
if (!(nd->flags & LOOKUP_RCU)) {
int i;
path_put(&nd->path);
@@ -842,7 +883,7 @@ static bool try_to_unlazy(struct nameidata *nd)
BUG_ON(!(nd->flags & LOOKUP_RCU));
- if (unlikely(!legitimize_links(nd)))
+ if (unlikely(nd->depth && !legitimize_links(nd)))
goto out1;
if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
goto out;
@@ -877,7 +918,7 @@ static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry)
int res;
BUG_ON(!(nd->flags & LOOKUP_RCU));
- if (unlikely(!legitimize_links(nd)))
+ if (unlikely(nd->depth && !legitimize_links(nd)))
goto out2;
res = __legitimize_mnt(nd->path.mnt, nd->m_seq);
if (unlikely(res)) {
@@ -921,10 +962,11 @@ out_dput:
return false;
}
-static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
+static inline int d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
- return dentry->d_op->d_revalidate(dentry, flags);
+ return dentry->d_op->d_revalidate(dir, name, dentry, flags);
else
return 1;
}
@@ -949,8 +991,8 @@ static int complete_walk(struct nameidata *nd)
* We don't want to zero nd->root for scoped-lookups or
* externally-managed nd->root.
*/
- if (!(nd->state & ND_ROOT_PRESET))
- if (!(nd->flags & LOOKUP_IS_SCOPED))
+ if (likely(!(nd->state & ND_ROOT_PRESET)))
+ if (likely(!(nd->flags & LOOKUP_IS_SCOPED)))
nd->root.mnt = NULL;
nd->flags &= ~LOOKUP_CACHED;
if (!try_to_unlazy(nd))
@@ -1010,10 +1052,10 @@ static int set_root(struct nameidata *nd)
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->seq);
nd->root = fs->root;
nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->seq, seq));
} else {
get_fs_root(fs, &nd->root);
nd->state |= ND_ROOT_GRABBED;
@@ -1032,7 +1074,7 @@ static int nd_jump_root(struct nameidata *nd)
}
if (!nd->root.mnt) {
int error = set_root(nd);
- if (error)
+ if (unlikely(error))
return error;
}
if (nd->flags & LOOKUP_RCU) {
@@ -1099,7 +1141,7 @@ static int sysctl_protected_fifos __read_mostly;
static int sysctl_protected_regular __read_mostly;
#ifdef CONFIG_SYSCTL
-static struct ctl_table namei_sysctls[] = {
+static const struct ctl_table namei_sysctls[] = {
{
.procname = "protected_symlinks",
.data = &sysctl_protected_symlinks,
@@ -1447,6 +1489,10 @@ static int follow_automount(struct path *path, int *count, unsigned lookup_flags
dentry->d_inode)
return -EISDIR;
+ /* No need to trigger automounts if mountpoint crossing is disabled. */
+ if (lookup_flags & LOOKUP_NO_XDEV)
+ return -EXDEV;
+
if (count && (*count)++ >= MAXSYMLINKS)
return -ELOOP;
@@ -1467,9 +1513,13 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
int ret = 0;
while (flags & DCACHE_MANAGED_DENTRY) {
- /* Allow the filesystem to manage the transit without i_mutex
+ /* Allow the filesystem to manage the transit without i_rwsem
* being held. */
if (flags & DCACHE_MANAGE_TRANSIT) {
+ if (lookup_flags & LOOKUP_NO_XDEV) {
+ ret = -EXDEV;
+ break;
+ }
ret = path->dentry->d_op->d_manage(path, false);
flags = smp_load_acquire(&path->dentry->d_flags);
if (ret < 0)
@@ -1487,6 +1537,10 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
// here we know it's positive
flags = path->dentry->d_flags;
need_mntput = true;
+ if (unlikely(lookup_flags & LOOKUP_NO_XDEV)) {
+ ret = -EXDEV;
+ break;
+ }
continue;
}
}
@@ -1618,22 +1672,20 @@ static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
path->dentry = dentry;
if (nd->flags & LOOKUP_RCU) {
unsigned int seq = nd->next_seq;
+ if (likely(!d_managed(dentry)))
+ return 0;
if (likely(__follow_mount_rcu(nd, path)))
return 0;
// *path and nd->next_seq might've been clobbered
path->mnt = nd->path.mnt;
path->dentry = dentry;
nd->next_seq = seq;
- if (!try_to_unlazy_next(nd, dentry))
+ if (unlikely(!try_to_unlazy_next(nd, dentry)))
return -ECHILD;
}
ret = traverse_mounts(path, &jumped, &nd->total_link_count, nd->flags);
- if (jumped) {
- if (unlikely(nd->flags & LOOKUP_NO_XDEV))
- ret = -EXDEV;
- else
- nd->state |= ND_JUMPED;
- }
+ if (jumped)
+ nd->state |= ND_JUMPED;
if (unlikely(ret)) {
dput(path->dentry);
if (path->mnt != nd->path.mnt)
@@ -1652,7 +1704,7 @@ static struct dentry *lookup_dcache(const struct qstr *name,
{
struct dentry *dentry = d_lookup(dir, name);
if (dentry) {
- int error = d_revalidate(dentry, flags);
+ int error = d_revalidate(dir->d_inode, name, dentry, flags);
if (unlikely(error <= 0)) {
if (!error)
d_invalidate(dentry);
@@ -1669,19 +1721,22 @@ static struct dentry *lookup_dcache(const struct qstr *name,
* dentries - as the matter of fact, this only gets called
* when directory is guaranteed to have no in-lookup children
* at all.
+ * Will return -ENOENT if name isn't found and LOOKUP_CREATE wasn't passed.
+ * Will return -EEXIST if name is found and LOOKUP_EXCL was passed.
*/
struct dentry *lookup_one_qstr_excl(const struct qstr *name,
- struct dentry *base,
- unsigned int flags)
+ struct dentry *base, unsigned int flags)
{
- struct dentry *dentry = lookup_dcache(name, base, flags);
+ struct dentry *dentry;
struct dentry *old;
- struct inode *dir = base->d_inode;
+ struct inode *dir;
+ dentry = lookup_dcache(name, base, flags);
if (dentry)
- return dentry;
+ goto found;
/* Don't create child dentry for a dead directory. */
+ dir = base->d_inode;
if (unlikely(IS_DEADDIR(dir)))
return ERR_PTR(-ENOENT);
@@ -1694,6 +1749,17 @@ struct dentry *lookup_one_qstr_excl(const struct qstr *name,
dput(dentry);
dentry = old;
}
+found:
+ if (IS_ERR(dentry))
+ return dentry;
+ if (d_is_negative(dentry) && !(flags & LOOKUP_CREATE)) {
+ dput(dentry);
+ return ERR_PTR(-ENOENT);
+ }
+ if (d_is_positive(dentry) && (flags & LOOKUP_EXCL)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
return dentry;
}
EXPORT_SYMBOL(lookup_one_qstr_excl);
@@ -1737,19 +1803,20 @@ static struct dentry *lookup_fast(struct nameidata *nd)
if (read_seqcount_retry(&parent->d_seq, nd->seq))
return ERR_PTR(-ECHILD);
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags);
if (likely(status > 0))
return dentry;
if (!try_to_unlazy_next(nd, dentry))
return ERR_PTR(-ECHILD);
if (status == -ECHILD)
/* we'd been told to redo it in non-rcu mode */
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last,
+ dentry, nd->flags);
} else {
dentry = __d_lookup(parent, &nd->last);
if (unlikely(!dentry))
return NULL;
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags);
}
if (unlikely(status <= 0)) {
if (!status)
@@ -1777,7 +1844,7 @@ again:
if (IS_ERR(dentry))
return dentry;
if (unlikely(!d_in_lookup(dentry))) {
- int error = d_revalidate(dentry, flags);
+ int error = d_revalidate(inode, name, dentry, flags);
if (unlikely(error <= 0)) {
if (!error) {
d_invalidate(dentry);
@@ -1798,7 +1865,7 @@ again:
return dentry;
}
-static struct dentry *lookup_slow(const struct qstr *name,
+static noinline struct dentry *lookup_slow(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
@@ -1810,13 +1877,27 @@ static struct dentry *lookup_slow(const struct qstr *name,
return res;
}
+static struct dentry *lookup_slow_killable(const struct qstr *name,
+ struct dentry *dir,
+ unsigned int flags)
+{
+ struct inode *inode = dir->d_inode;
+ struct dentry *res;
+
+ if (inode_lock_shared_killable(inode))
+ return ERR_PTR(-EINTR);
+ res = __lookup_slow(name, dir, flags);
+ inode_unlock_shared(inode);
+ return res;
+}
+
static inline int may_lookup(struct mnt_idmap *idmap,
struct nameidata *restrict nd)
{
int err, mask;
mask = nd->flags & LOOKUP_RCU ? MAY_NOT_BLOCK : 0;
- err = inode_permission(idmap, nd->inode, mask | MAY_EXEC);
+ err = lookup_inode_permission_may_exec(idmap, nd->inode, mask);
if (likely(!err))
return 0;
@@ -1831,7 +1912,7 @@ static inline int may_lookup(struct mnt_idmap *idmap,
if (err != -ECHILD) // hard error
return err;
- return inode_permission(idmap, nd->inode, MAY_EXEC);
+ return lookup_inode_permission_may_exec(idmap, nd->inode, 0);
}
static int reserve_stack(struct nameidata *nd, struct path *link)
@@ -1862,13 +1943,23 @@ static int reserve_stack(struct nameidata *nd, struct path *link)
enum {WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4};
-static const char *pick_link(struct nameidata *nd, struct path *link,
+static noinline const char *pick_link(struct nameidata *nd, struct path *link,
struct inode *inode, int flags)
{
struct saved *last;
const char *res;
- int error = reserve_stack(nd, link);
+ int error;
+ if (nd->flags & LOOKUP_RCU) {
+ /* make sure that d_is_symlink from step_into_slowpath() matches the inode */
+ if (read_seqcount_retry(&link->dentry->d_seq, nd->next_seq))
+ return ERR_PTR(-ECHILD);
+ } else {
+ if (link->mnt == nd->path.mnt)
+ mntget(link->mnt);
+ }
+
+ error = reserve_stack(nd, link);
if (unlikely(error)) {
if (!(nd->flags & LOOKUP_RCU))
path_put(link);
@@ -1889,13 +1980,13 @@ static const char *pick_link(struct nameidata *nd, struct path *link,
unlikely(link->mnt->mnt_flags & MNT_NOSYMFOLLOW))
return ERR_PTR(-ELOOP);
- if (!(nd->flags & LOOKUP_RCU)) {
+ if (unlikely(atime_needs_update(&last->link, inode))) {
+ if (nd->flags & LOOKUP_RCU) {
+ if (!try_to_unlazy(nd))
+ return ERR_PTR(-ECHILD);
+ }
touch_atime(&last->link);
cond_resched();
- } else if (atime_needs_update(&last->link, inode)) {
- if (!try_to_unlazy(nd))
- return ERR_PTR(-ECHILD);
- touch_atime(&last->link);
}
error = security_inode_follow_link(link->dentry, inode,
@@ -1942,14 +2033,15 @@ all_done: // pure jump
*
* NOTE: dentry must be what nd->next_seq had been sampled from.
*/
-static const char *step_into(struct nameidata *nd, int flags,
+static noinline const char *step_into_slowpath(struct nameidata *nd, int flags,
struct dentry *dentry)
{
struct path path;
struct inode *inode;
- int err = handle_mounts(nd, dentry, &path);
+ int err;
- if (err < 0)
+ err = handle_mounts(nd, dentry, &path);
+ if (unlikely(err < 0))
return ERR_PTR(err);
inode = path.dentry->d_inode;
if (likely(!d_is_symlink(path.dentry)) ||
@@ -1971,15 +2063,32 @@ static const char *step_into(struct nameidata *nd, int flags,
nd->seq = nd->next_seq;
return NULL;
}
- if (nd->flags & LOOKUP_RCU) {
- /* make sure that d_is_symlink above matches inode */
- if (read_seqcount_retry(&path.dentry->d_seq, nd->next_seq))
+ return pick_link(nd, &path, inode, flags);
+}
+
+static __always_inline const char *step_into(struct nameidata *nd, int flags,
+ struct dentry *dentry)
+{
+ /*
+ * In the common case we are in rcu-walk and traversing over a non-mounted on
+ * directory (as opposed to e.g., a symlink).
+ *
+ * We can handle that and negative entries with the checks below.
+ */
+ if (likely((nd->flags & LOOKUP_RCU) &&
+ !d_managed(dentry) && !d_is_symlink(dentry))) {
+ struct inode *inode = dentry->d_inode;
+ if (read_seqcount_retry(&dentry->d_seq, nd->next_seq))
return ERR_PTR(-ECHILD);
- } else {
- if (path.mnt == nd->path.mnt)
- mntget(path.mnt);
+ if (unlikely(!inode))
+ return ERR_PTR(-ENOENT);
+ nd->path.dentry = dentry;
+ /* nd->path.mnt is retained on purpose */
+ nd->inode = inode;
+ nd->seq = nd->next_seq;
+ return NULL;
}
- return pick_link(nd, &path, inode, flags);
+ return step_into_slowpath(nd, flags, dentry);
}
static struct dentry *follow_dotdot_rcu(struct nameidata *nd)
@@ -2062,7 +2171,7 @@ static const char *handle_dots(struct nameidata *nd, int type)
if (!nd->root.mnt) {
error = ERR_PTR(set_root(nd));
- if (error)
+ if (unlikely(error))
return error;
}
if (nd->flags & LOOKUP_RCU)
@@ -2092,7 +2201,7 @@ static const char *handle_dots(struct nameidata *nd, int type)
return NULL;
}
-static const char *walk_component(struct nameidata *nd, int flags)
+static __always_inline const char *walk_component(struct nameidata *nd, int flags)
{
struct dentry *dentry;
/*
@@ -2101,7 +2210,7 @@ static const char *walk_component(struct nameidata *nd, int flags)
* parent relationships.
*/
if (unlikely(nd->last_type != LAST_NORM)) {
- if (!(flags & WALK_MORE) && nd->depth)
+ if (unlikely(nd->depth) && !(flags & WALK_MORE))
put_link(nd);
return handle_dots(nd, nd->last_type);
}
@@ -2113,7 +2222,7 @@ static const char *walk_component(struct nameidata *nd, int flags)
if (IS_ERR(dentry))
return ERR_CAST(dentry);
}
- if (!(flags & WALK_MORE) && nd->depth)
+ if (unlikely(nd->depth) && !(flags & WALK_MORE))
put_link(nd);
return step_into(nd, flags, dentry);
}
@@ -2408,9 +2517,12 @@ static int link_path_walk(const char *name, struct nameidata *nd)
nd->flags |= LOOKUP_PARENT;
if (IS_ERR(name))
return PTR_ERR(name);
- while (*name=='/')
- name++;
- if (!*name) {
+ if (*name == '/') {
+ do {
+ name++;
+ } while (unlikely(*name == '/'));
+ }
+ if (unlikely(!*name)) {
nd->dir_mode = 0; // short-circuit the 'hardening' idiocy
return 0;
}
@@ -2423,7 +2535,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
idmap = mnt_idmap(nd->path.mnt);
err = may_lookup(idmap, nd);
- if (err)
+ if (unlikely(err))
return err;
nd->last.name = name;
@@ -2463,7 +2575,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
if (unlikely(!*name)) {
OK:
/* pathname or trailing symlink, done */
- if (!depth) {
+ if (likely(!depth)) {
nd->dir_vfsuid = i_uid_into_vfsuid(idmap, nd->inode);
nd->dir_mode = nd->inode->i_mode;
nd->flags &= ~LOOKUP_PARENT;
@@ -2501,10 +2613,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
const char *s = nd->pathname;
/* LOOKUP_CACHED requires RCU, ask caller to retry */
- if ((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED)
+ if (unlikely((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED))
return ERR_PTR(-EAGAIN);
- if (!*s)
+ if (unlikely(!*s))
flags &= ~LOOKUP_RCU;
if (flags & LOOKUP_RCU)
rcu_read_lock();
@@ -2518,7 +2630,7 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount);
smp_rmb();
- if (nd->state & ND_ROOT_PRESET) {
+ if (unlikely(nd->state & ND_ROOT_PRESET)) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
if (*s && unlikely(!d_can_lookup(root)))
@@ -2537,7 +2649,7 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->root.mnt = NULL;
/* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
- if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
+ if (*s == '/' && likely(!(flags & LOOKUP_IN_ROOT))) {
error = nd_jump_root(nd);
if (unlikely(error))
return ERR_PTR(error);
@@ -2551,11 +2663,11 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->seq);
nd->path = fs->pwd;
nd->inode = nd->path.dentry->d_inode;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->seq, seq));
} else {
get_fs_pwd(current->fs, &nd->path);
nd->inode = nd->path.dentry->d_inode;
@@ -2590,7 +2702,7 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
}
/* For scoped-lookups we need to set the root to the dirfd as well. */
- if (flags & LOOKUP_IS_SCOPED) {
+ if (unlikely(flags & LOOKUP_IS_SCOPED)) {
nd->root = nd->path;
if (flags & LOOKUP_RCU) {
nd->root_seq = nd->seq;
@@ -2653,7 +2765,7 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
}
int filename_lookup(int dfd, struct filename *name, unsigned flags,
- struct path *path, struct path *root)
+ struct path *path, const struct path *root)
{
int retval;
struct nameidata nd;
@@ -2723,47 +2835,151 @@ static int filename_parentat(int dfd, struct filename *name,
return __filename_parentat(dfd, name, flags, parent, last, type, NULL);
}
+/**
+ * start_dirop - begin a create or remove dirop, performing locking and lookup
+ * @parent: the dentry of the parent in which the operation will occur
+ * @name: a qstr holding the name within that parent
+ * @lookup_flags: intent and other lookup flags.
+ *
+ * The lookup is performed and necessary locks are taken so that, on success,
+ * the returned dentry can be operated on safely.
+ * The qstr must already have the hash value calculated.
+ *
+ * Returns: a locked dentry, or an error.
+ *
+ */
+static struct dentry *__start_dirop(struct dentry *parent, struct qstr *name,
+ unsigned int lookup_flags,
+ unsigned int state)
+{
+ struct dentry *dentry;
+ struct inode *dir = d_inode(parent);
+
+ if (state == TASK_KILLABLE) {
+ int ret = down_write_killable_nested(&dir->i_rwsem,
+ I_MUTEX_PARENT);
+ if (ret)
+ return ERR_PTR(ret);
+ } else {
+ inode_lock_nested(dir, I_MUTEX_PARENT);
+ }
+ dentry = lookup_one_qstr_excl(name, parent, lookup_flags);
+ if (IS_ERR(dentry))
+ inode_unlock(dir);
+ return dentry;
+}
+
+struct dentry *start_dirop(struct dentry *parent, struct qstr *name,
+ unsigned int lookup_flags)
+{
+ return __start_dirop(parent, name, lookup_flags, TASK_NORMAL);
+}
+
+/**
+ * end_dirop - signal completion of a dirop
+ * @de: the dentry which was returned by start_dirop or similar.
+ *
+ * If the de is an error, nothing happens. Otherwise any lock taken to
+ * protect the dentry is dropped and the dentry itself is release (dput()).
+ */
+void end_dirop(struct dentry *de)
+{
+ if (!IS_ERR(de)) {
+ inode_unlock(de->d_parent->d_inode);
+ dput(de);
+ }
+}
+EXPORT_SYMBOL(end_dirop);
+
/* does lookup, returns the object with parent locked */
-static struct dentry *__kern_path_locked(int dfd, struct filename *name, struct path *path)
+static struct dentry *__start_removing_path(int dfd, struct filename *name,
+ struct path *path)
{
+ struct path parent_path __free(path_put) = {};
struct dentry *d;
struct qstr last;
int type, error;
- error = filename_parentat(dfd, name, 0, path, &last, &type);
+ error = filename_parentat(dfd, name, 0, &parent_path, &last, &type);
if (error)
return ERR_PTR(error);
- if (unlikely(type != LAST_NORM)) {
- path_put(path);
+ if (unlikely(type != LAST_NORM))
return ERR_PTR(-EINVAL);
- }
- inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
- d = lookup_one_qstr_excl(&last, path->dentry, 0);
- if (IS_ERR(d)) {
- inode_unlock(path->dentry->d_inode);
- path_put(path);
- }
+ /* don't fail immediately if it's r/o, at least try to report other errors */
+ error = mnt_want_write(parent_path.mnt);
+ d = start_dirop(parent_path.dentry, &last, 0);
+ if (IS_ERR(d))
+ goto drop;
+ if (error)
+ goto fail;
+ path->dentry = no_free_ptr(parent_path.dentry);
+ path->mnt = no_free_ptr(parent_path.mnt);
+ return d;
+
+fail:
+ end_dirop(d);
+ d = ERR_PTR(error);
+drop:
+ if (!error)
+ mnt_drop_write(parent_path.mnt);
+ return d;
+}
+
+/**
+ * kern_path_parent: lookup path returning parent and target
+ * @name: path name
+ * @path: path to store parent in
+ *
+ * The path @name should end with a normal component, not "." or ".." or "/".
+ * A lookup is performed and if successful the parent information
+ * is store in @parent and the dentry is returned.
+ *
+ * The dentry maybe negative, the parent will be positive.
+ *
+ * Returns: dentry or error.
+ */
+struct dentry *kern_path_parent(const char *name, struct path *path)
+{
+ struct path parent_path __free(path_put) = {};
+ struct filename *filename __free(putname) = getname_kernel(name);
+ struct dentry *d;
+ struct qstr last;
+ int type, error;
+
+ error = filename_parentat(AT_FDCWD, filename, 0, &parent_path, &last, &type);
+ if (error)
+ return ERR_PTR(error);
+ if (unlikely(type != LAST_NORM))
+ return ERR_PTR(-EINVAL);
+
+ d = lookup_noperm_unlocked(&last, parent_path.dentry);
+ if (IS_ERR(d))
+ return d;
+ path->dentry = no_free_ptr(parent_path.dentry);
+ path->mnt = no_free_ptr(parent_path.mnt);
return d;
}
-struct dentry *kern_path_locked(const char *name, struct path *path)
+struct dentry *start_removing_path(const char *name, struct path *path)
{
struct filename *filename = getname_kernel(name);
- struct dentry *res = __kern_path_locked(AT_FDCWD, filename, path);
+ struct dentry *res = __start_removing_path(AT_FDCWD, filename, path);
putname(filename);
return res;
}
-struct dentry *user_path_locked_at(int dfd, const char __user *name, struct path *path)
+struct dentry *start_removing_user_path_at(int dfd,
+ const char __user *name,
+ struct path *path)
{
struct filename *filename = getname(name);
- struct dentry *res = __kern_path_locked(dfd, filename, path);
+ struct dentry *res = __start_removing_path(dfd, filename, path);
putname(filename);
return res;
}
-EXPORT_SYMBOL(user_path_locked_at);
+EXPORT_SYMBOL(start_removing_user_path_at);
int kern_path(const char *name, unsigned int flags, struct path *path)
{
@@ -2818,13 +3034,12 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
}
EXPORT_SYMBOL(vfs_path_lookup);
-static int lookup_one_common(struct mnt_idmap *idmap,
- const char *name, struct dentry *base, int len,
- struct qstr *this)
+int lookup_noperm_common(struct qstr *qname, struct dentry *base)
{
- this->name = name;
- this->len = len;
- this->hash = full_name_hash(base, name, len);
+ const char *name = qname->name;
+ u32 len = qname->len;
+
+ qname->hash = full_name_hash(base, name, len);
if (!len)
return -EACCES;
@@ -2841,140 +3056,136 @@ static int lookup_one_common(struct mnt_idmap *idmap,
* to use its own hash..
*/
if (base->d_flags & DCACHE_OP_HASH) {
- int err = base->d_op->d_hash(base, this);
+ int err = base->d_op->d_hash(base, qname);
if (err < 0)
return err;
}
+ return 0;
+}
+static int lookup_one_common(struct mnt_idmap *idmap,
+ struct qstr *qname, struct dentry *base)
+{
+ int err;
+ err = lookup_noperm_common(qname, base);
+ if (err < 0)
+ return err;
return inode_permission(idmap, base->d_inode, MAY_EXEC);
}
/**
- * try_lookup_one_len - filesystem helper to lookup single pathname component
- * @name: pathname component to lookup
+ * try_lookup_noperm - filesystem helper to lookup single pathname component
+ * @name: qstr storing pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
* Look up a dentry by name in the dcache, returning NULL if it does not
- * currently exist. The function does not try to create a dentry.
+ * currently exist. The function does not try to create a dentry and if one
+ * is found it doesn't try to revalidate it.
*
* Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * not be called by generic code. It does no permission checking.
+ *
+ * No locks need be held - only a counted reference to @base is needed.
*
- * The caller must hold base->i_mutex.
*/
-struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len)
+struct dentry *try_lookup_noperm(struct qstr *name, struct dentry *base)
{
- struct qstr this;
int err;
- WARN_ON_ONCE(!inode_is_locked(base->d_inode));
-
- err = lookup_one_common(&nop_mnt_idmap, name, base, len, &this);
+ err = lookup_noperm_common(name, base);
if (err)
return ERR_PTR(err);
- return lookup_dcache(&this, base, 0);
+ return d_lookup(base, name);
}
-EXPORT_SYMBOL(try_lookup_one_len);
+EXPORT_SYMBOL(try_lookup_noperm);
/**
- * lookup_one_len - filesystem helper to lookup single pathname component
- * @name: pathname component to lookup
+ * lookup_noperm - filesystem helper to lookup single pathname component
+ * @name: qstr storing pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * not be called by generic code. It does no permission checking.
*
- * The caller must hold base->i_mutex.
+ * The caller must hold base->i_rwsem.
*/
-struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+struct dentry *lookup_noperm(struct qstr *name, struct dentry *base)
{
struct dentry *dentry;
- struct qstr this;
int err;
WARN_ON_ONCE(!inode_is_locked(base->d_inode));
- err = lookup_one_common(&nop_mnt_idmap, name, base, len, &this);
+ err = lookup_noperm_common(name, base);
if (err)
return ERR_PTR(err);
- dentry = lookup_dcache(&this, base, 0);
- return dentry ? dentry : __lookup_slow(&this, base, 0);
+ dentry = lookup_dcache(name, base, 0);
+ return dentry ? dentry : __lookup_slow(name, base, 0);
}
-EXPORT_SYMBOL(lookup_one_len);
+EXPORT_SYMBOL(lookup_noperm);
/**
- * lookup_one - filesystem helper to lookup single pathname component
+ * lookup_one - lookup single pathname component
* @idmap: idmap of the mount the lookup is performed from
- * @name: pathname component to lookup
+ * @name: qstr holding pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
- * Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * This can be used for in-kernel filesystem clients such as file servers.
*
- * The caller must hold base->i_mutex.
+ * The caller must hold base->i_rwsem.
*/
-struct dentry *lookup_one(struct mnt_idmap *idmap, const char *name,
- struct dentry *base, int len)
+struct dentry *lookup_one(struct mnt_idmap *idmap, struct qstr *name,
+ struct dentry *base)
{
struct dentry *dentry;
- struct qstr this;
int err;
WARN_ON_ONCE(!inode_is_locked(base->d_inode));
- err = lookup_one_common(idmap, name, base, len, &this);
+ err = lookup_one_common(idmap, name, base);
if (err)
return ERR_PTR(err);
- dentry = lookup_dcache(&this, base, 0);
- return dentry ? dentry : __lookup_slow(&this, base, 0);
+ dentry = lookup_dcache(name, base, 0);
+ return dentry ? dentry : __lookup_slow(name, base, 0);
}
EXPORT_SYMBOL(lookup_one);
/**
- * lookup_one_unlocked - filesystem helper to lookup single pathname component
+ * lookup_one_unlocked - lookup single pathname component
* @idmap: idmap of the mount the lookup is performed from
- * @name: pathname component to lookup
+ * @name: qstr olding pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
- * Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * This can be used for in-kernel filesystem clients such as file servers.
*
- * Unlike lookup_one_len, it should be called without the parent
- * i_mutex held, and will take the i_mutex itself if necessary.
+ * Unlike lookup_one, it should be called without the parent
+ * i_rwsem held, and will take the i_rwsem itself if necessary.
*/
-struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap,
- const char *name, struct dentry *base,
- int len)
+struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap, struct qstr *name,
+ struct dentry *base)
{
- struct qstr this;
int err;
struct dentry *ret;
- err = lookup_one_common(idmap, name, base, len, &this);
+ err = lookup_one_common(idmap, name, base);
if (err)
return ERR_PTR(err);
- ret = lookup_dcache(&this, base, 0);
+ ret = lookup_dcache(name, base, 0);
if (!ret)
- ret = lookup_slow(&this, base, 0);
+ ret = lookup_slow(name, base, 0);
return ret;
}
EXPORT_SYMBOL(lookup_one_unlocked);
/**
- * lookup_one_positive_unlocked - filesystem helper to lookup single
- * pathname component
+ * lookup_one_positive_killable - lookup single pathname component
* @idmap: idmap of the mount the lookup is performed from
- * @name: pathname component to lookup
+ * @name: qstr olding pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
* This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns
* known positive or ERR_PTR(). This is what most of the users want.
@@ -2983,16 +3194,56 @@ EXPORT_SYMBOL(lookup_one_unlocked);
* time, so callers of lookup_one_unlocked() need to be very careful; pinned
* positives have >d_inode stable, so this one avoids such problems.
*
- * Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * This can be used for in-kernel filesystem clients such as file servers.
*
- * The helper should be called without i_mutex held.
+ * It should be called without the parent i_rwsem held, and will take
+ * the i_rwsem itself if necessary. If a fatal signal is pending or
+ * delivered, it will return %-EINTR if the lock is needed.
+ */
+struct dentry *lookup_one_positive_killable(struct mnt_idmap *idmap,
+ struct qstr *name,
+ struct dentry *base)
+{
+ int err;
+ struct dentry *ret;
+
+ err = lookup_one_common(idmap, name, base);
+ if (err)
+ return ERR_PTR(err);
+
+ ret = lookup_dcache(name, base, 0);
+ if (!ret)
+ ret = lookup_slow_killable(name, base, 0);
+ if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+ dput(ret);
+ ret = ERR_PTR(-ENOENT);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(lookup_one_positive_killable);
+
+/**
+ * lookup_one_positive_unlocked - lookup single pathname component
+ * @idmap: idmap of the mount the lookup is performed from
+ * @name: qstr holding pathname component to lookup
+ * @base: base directory to lookup from
+ *
+ * This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns
+ * known positive or ERR_PTR(). This is what most of the users want.
+ *
+ * Note that pinned negative with unlocked parent _can_ become positive at any
+ * time, so callers of lookup_one_unlocked() need to be very careful; pinned
+ * positives have >d_inode stable, so this one avoids such problems.
+ *
+ * This can be used for in-kernel filesystem clients such as file servers.
+ *
+ * The helper should be called without i_rwsem held.
*/
struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
- const char *name,
- struct dentry *base, int len)
+ struct qstr *name,
+ struct dentry *base)
{
- struct dentry *ret = lookup_one_unlocked(idmap, name, base, len);
+ struct dentry *ret = lookup_one_unlocked(idmap, name, base);
if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
dput(ret);
@@ -3003,38 +3254,284 @@ struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
EXPORT_SYMBOL(lookup_one_positive_unlocked);
/**
- * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
+ * lookup_noperm_unlocked - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * not be called by generic code. It does no permission checking.
*
- * Unlike lookup_one_len, it should be called without the parent
- * i_mutex held, and will take the i_mutex itself if necessary.
+ * Unlike lookup_noperm(), it should be called without the parent
+ * i_rwsem held, and will take the i_rwsem itself if necessary.
+ *
+ * Unlike try_lookup_noperm() it *does* revalidate the dentry if it already
+ * existed.
*/
-struct dentry *lookup_one_len_unlocked(const char *name,
- struct dentry *base, int len)
+struct dentry *lookup_noperm_unlocked(struct qstr *name, struct dentry *base)
{
- return lookup_one_unlocked(&nop_mnt_idmap, name, base, len);
+ struct dentry *ret;
+ int err;
+
+ err = lookup_noperm_common(name, base);
+ if (err)
+ return ERR_PTR(err);
+
+ ret = lookup_dcache(name, base, 0);
+ if (!ret)
+ ret = lookup_slow(name, base, 0);
+ return ret;
}
-EXPORT_SYMBOL(lookup_one_len_unlocked);
+EXPORT_SYMBOL(lookup_noperm_unlocked);
/*
- * Like lookup_one_len_unlocked(), except that it yields ERR_PTR(-ENOENT)
+ * Like lookup_noperm_unlocked(), except that it yields ERR_PTR(-ENOENT)
* on negatives. Returns known positive or ERR_PTR(); that's what
* most of the users want. Note that pinned negative with unlocked parent
- * _can_ become positive at any time, so callers of lookup_one_len_unlocked()
+ * _can_ become positive at any time, so callers of lookup_noperm_unlocked()
* need to be very careful; pinned positives have ->d_inode stable, so
* this one avoids such problems.
*/
-struct dentry *lookup_positive_unlocked(const char *name,
- struct dentry *base, int len)
+struct dentry *lookup_noperm_positive_unlocked(struct qstr *name,
+ struct dentry *base)
+{
+ struct dentry *ret;
+
+ ret = lookup_noperm_unlocked(name, base);
+ if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+ dput(ret);
+ ret = ERR_PTR(-ENOENT);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(lookup_noperm_positive_unlocked);
+
+/**
+ * start_creating - prepare to create a given name with permission checking
+ * @idmap: idmap of the mount
+ * @parent: directory in which to prepare to create the name
+ * @name: the name to be created
+ *
+ * Locks are taken and a lookup is performed prior to creating
+ * an object in a directory. Permission checking (MAY_EXEC) is performed
+ * against @idmap.
+ *
+ * If the name already exists, a positive dentry is returned, so
+ * behaviour is similar to O_CREAT without O_EXCL, which doesn't fail
+ * with -EEXIST.
+ *
+ * Returns: a negative or positive dentry, or an error.
+ */
+struct dentry *start_creating(struct mnt_idmap *idmap, struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_one_common(idmap, name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, name, LOOKUP_CREATE);
+}
+EXPORT_SYMBOL(start_creating);
+
+/**
+ * start_removing - prepare to remove a given name with permission checking
+ * @idmap: idmap of the mount
+ * @parent: directory in which to find the name
+ * @name: the name to be removed
+ *
+ * Locks are taken and a lookup in performed prior to removing
+ * an object from a directory. Permission checking (MAY_EXEC) is performed
+ * against @idmap.
+ *
+ * If the name doesn't exist, an error is returned.
+ *
+ * end_removing() should be called when removal is complete, or aborted.
+ *
+ * Returns: a positive dentry, or an error.
+ */
+struct dentry *start_removing(struct mnt_idmap *idmap, struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_one_common(idmap, name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, name, 0);
+}
+EXPORT_SYMBOL(start_removing);
+
+/**
+ * start_creating_killable - prepare to create a given name with permission checking
+ * @idmap: idmap of the mount
+ * @parent: directory in which to prepare to create the name
+ * @name: the name to be created
+ *
+ * Locks are taken and a lookup in performed prior to creating
+ * an object in a directory. Permission checking (MAY_EXEC) is performed
+ * against @idmap.
+ *
+ * If the name already exists, a positive dentry is returned.
+ *
+ * If a signal is received or was already pending, the function aborts
+ * with -EINTR;
+ *
+ * Returns: a negative or positive dentry, or an error.
+ */
+struct dentry *start_creating_killable(struct mnt_idmap *idmap,
+ struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_one_common(idmap, name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return __start_dirop(parent, name, LOOKUP_CREATE, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(start_creating_killable);
+
+/**
+ * start_removing_killable - prepare to remove a given name with permission checking
+ * @idmap: idmap of the mount
+ * @parent: directory in which to find the name
+ * @name: the name to be removed
+ *
+ * Locks are taken and a lookup in performed prior to removing
+ * an object from a directory. Permission checking (MAY_EXEC) is performed
+ * against @idmap.
+ *
+ * If the name doesn't exist, an error is returned.
+ *
+ * end_removing() should be called when removal is complete, or aborted.
+ *
+ * If a signal is received or was already pending, the function aborts
+ * with -EINTR;
+ *
+ * Returns: a positive dentry, or an error.
+ */
+struct dentry *start_removing_killable(struct mnt_idmap *idmap,
+ struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_one_common(idmap, name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return __start_dirop(parent, name, 0, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(start_removing_killable);
+
+/**
+ * start_creating_noperm - prepare to create a given name without permission checking
+ * @parent: directory in which to prepare to create the name
+ * @name: the name to be created
+ *
+ * Locks are taken and a lookup in performed prior to creating
+ * an object in a directory.
+ *
+ * If the name already exists, a positive dentry is returned.
+ *
+ * Returns: a negative or positive dentry, or an error.
+ */
+struct dentry *start_creating_noperm(struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_noperm_common(name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, name, LOOKUP_CREATE);
+}
+EXPORT_SYMBOL(start_creating_noperm);
+
+/**
+ * start_removing_noperm - prepare to remove a given name without permission checking
+ * @parent: directory in which to find the name
+ * @name: the name to be removed
+ *
+ * Locks are taken and a lookup in performed prior to removing
+ * an object from a directory.
+ *
+ * If the name doesn't exist, an error is returned.
+ *
+ * end_removing() should be called when removal is complete, or aborted.
+ *
+ * Returns: a positive dentry, or an error.
+ */
+struct dentry *start_removing_noperm(struct dentry *parent,
+ struct qstr *name)
{
- return lookup_one_positive_unlocked(&nop_mnt_idmap, name, base, len);
+ int err = lookup_noperm_common(name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, name, 0);
+}
+EXPORT_SYMBOL(start_removing_noperm);
+
+/**
+ * start_creating_dentry - prepare to create a given dentry
+ * @parent: directory from which dentry should be removed
+ * @child: the dentry to be removed
+ *
+ * A lock is taken to protect the dentry again other dirops and
+ * the validity of the dentry is checked: correct parent and still hashed.
+ *
+ * If the dentry is valid and negative a reference is taken and
+ * returned. If not an error is returned.
+ *
+ * end_creating() should be called when creation is complete, or aborted.
+ *
+ * Returns: the valid dentry, or an error.
+ */
+struct dentry *start_creating_dentry(struct dentry *parent,
+ struct dentry *child)
+{
+ inode_lock_nested(parent->d_inode, I_MUTEX_PARENT);
+ if (unlikely(IS_DEADDIR(parent->d_inode) ||
+ child->d_parent != parent ||
+ d_unhashed(child))) {
+ inode_unlock(parent->d_inode);
+ return ERR_PTR(-EINVAL);
+ }
+ if (d_is_positive(child)) {
+ inode_unlock(parent->d_inode);
+ return ERR_PTR(-EEXIST);
+ }
+ return dget(child);
}
-EXPORT_SYMBOL(lookup_positive_unlocked);
+EXPORT_SYMBOL(start_creating_dentry);
+
+/**
+ * start_removing_dentry - prepare to remove a given dentry
+ * @parent: directory from which dentry should be removed
+ * @child: the dentry to be removed
+ *
+ * A lock is taken to protect the dentry again other dirops and
+ * the validity of the dentry is checked: correct parent and still hashed.
+ *
+ * If the dentry is valid and positive, a reference is taken and
+ * returned. If not an error is returned.
+ *
+ * end_removing() should be called when removal is complete, or aborted.
+ *
+ * Returns: the valid dentry, or an error.
+ */
+struct dentry *start_removing_dentry(struct dentry *parent,
+ struct dentry *child)
+{
+ inode_lock_nested(parent->d_inode, I_MUTEX_PARENT);
+ if (unlikely(IS_DEADDIR(parent->d_inode) ||
+ child->d_parent != parent ||
+ d_unhashed(child))) {
+ inode_unlock(parent->d_inode);
+ return ERR_PTR(-EINVAL);
+ }
+ if (d_is_negative(child)) {
+ inode_unlock(parent->d_inode);
+ return ERR_PTR(-ENOENT);
+ }
+ return dget(child);
+}
+EXPORT_SYMBOL(start_removing_dentry);
#ifdef CONFIG_UNIX98_PTYS
int path_pts(struct path *path)
@@ -3274,6 +3771,290 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
EXPORT_SYMBOL(unlock_rename);
/**
+ * __start_renaming - lookup and lock names for rename
+ * @rd: rename data containing parents and flags, and
+ * for receiving found dentries
+ * @lookup_flags: extra flags to pass to ->lookup (e.g. LOOKUP_REVAL,
+ * LOOKUP_NO_SYMLINKS etc).
+ * @old_last: name of object in @rd.old_parent
+ * @new_last: name of object in @rd.new_parent
+ *
+ * Look up two names and ensure locks are in place for
+ * rename.
+ *
+ * On success the found dentries are stored in @rd.old_dentry,
+ * @rd.new_dentry and an extra ref is taken on @rd.old_parent.
+ * These references and the lock are dropped by end_renaming().
+ *
+ * The passed in qstrs must have the hash calculated, and no permission
+ * checking is performed.
+ *
+ * Returns: zero or an error.
+ */
+static int
+__start_renaming(struct renamedata *rd, int lookup_flags,
+ struct qstr *old_last, struct qstr *new_last)
+{
+ struct dentry *trap;
+ struct dentry *d1, *d2;
+ int target_flags = LOOKUP_RENAME_TARGET | LOOKUP_CREATE;
+ int err;
+
+ if (rd->flags & RENAME_EXCHANGE)
+ target_flags = 0;
+ if (rd->flags & RENAME_NOREPLACE)
+ target_flags |= LOOKUP_EXCL;
+
+ trap = lock_rename(rd->old_parent, rd->new_parent);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+
+ d1 = lookup_one_qstr_excl(old_last, rd->old_parent,
+ lookup_flags);
+ err = PTR_ERR(d1);
+ if (IS_ERR(d1))
+ goto out_unlock;
+
+ d2 = lookup_one_qstr_excl(new_last, rd->new_parent,
+ lookup_flags | target_flags);
+ err = PTR_ERR(d2);
+ if (IS_ERR(d2))
+ goto out_dput_d1;
+
+ if (d1 == trap) {
+ /* source is an ancestor of target */
+ err = -EINVAL;
+ goto out_dput_d2;
+ }
+
+ if (d2 == trap) {
+ /* target is an ancestor of source */
+ if (rd->flags & RENAME_EXCHANGE)
+ err = -EINVAL;
+ else
+ err = -ENOTEMPTY;
+ goto out_dput_d2;
+ }
+
+ rd->old_dentry = d1;
+ rd->new_dentry = d2;
+ dget(rd->old_parent);
+ return 0;
+
+out_dput_d2:
+ dput(d2);
+out_dput_d1:
+ dput(d1);
+out_unlock:
+ unlock_rename(rd->old_parent, rd->new_parent);
+ return err;
+}
+
+/**
+ * start_renaming - lookup and lock names for rename with permission checking
+ * @rd: rename data containing parents and flags, and
+ * for receiving found dentries
+ * @lookup_flags: extra flags to pass to ->lookup (e.g. LOOKUP_REVAL,
+ * LOOKUP_NO_SYMLINKS etc).
+ * @old_last: name of object in @rd.old_parent
+ * @new_last: name of object in @rd.new_parent
+ *
+ * Look up two names and ensure locks are in place for
+ * rename.
+ *
+ * On success the found dentries are stored in @rd.old_dentry,
+ * @rd.new_dentry. Also the refcount on @rd->old_parent is increased.
+ * These references and the lock are dropped by end_renaming().
+ *
+ * The passed in qstrs need not have the hash calculated, and basic
+ * eXecute permission checking is performed against @rd.mnt_idmap.
+ *
+ * Returns: zero or an error.
+ */
+int start_renaming(struct renamedata *rd, int lookup_flags,
+ struct qstr *old_last, struct qstr *new_last)
+{
+ int err;
+
+ err = lookup_one_common(rd->mnt_idmap, old_last, rd->old_parent);
+ if (err)
+ return err;
+ err = lookup_one_common(rd->mnt_idmap, new_last, rd->new_parent);
+ if (err)
+ return err;
+ return __start_renaming(rd, lookup_flags, old_last, new_last);
+}
+EXPORT_SYMBOL(start_renaming);
+
+static int
+__start_renaming_dentry(struct renamedata *rd, int lookup_flags,
+ struct dentry *old_dentry, struct qstr *new_last)
+{
+ struct dentry *trap;
+ struct dentry *d2;
+ int target_flags = LOOKUP_RENAME_TARGET | LOOKUP_CREATE;
+ int err;
+
+ if (rd->flags & RENAME_EXCHANGE)
+ target_flags = 0;
+ if (rd->flags & RENAME_NOREPLACE)
+ target_flags |= LOOKUP_EXCL;
+
+ /* Already have the dentry - need to be sure to lock the correct parent */
+ trap = lock_rename_child(old_dentry, rd->new_parent);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+ if (d_unhashed(old_dentry) ||
+ (rd->old_parent && rd->old_parent != old_dentry->d_parent)) {
+ /* dentry was removed, or moved and explicit parent requested */
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ d2 = lookup_one_qstr_excl(new_last, rd->new_parent,
+ lookup_flags | target_flags);
+ err = PTR_ERR(d2);
+ if (IS_ERR(d2))
+ goto out_unlock;
+
+ if (old_dentry == trap) {
+ /* source is an ancestor of target */
+ err = -EINVAL;
+ goto out_dput_d2;
+ }
+
+ if (d2 == trap) {
+ /* target is an ancestor of source */
+ if (rd->flags & RENAME_EXCHANGE)
+ err = -EINVAL;
+ else
+ err = -ENOTEMPTY;
+ goto out_dput_d2;
+ }
+
+ rd->old_dentry = dget(old_dentry);
+ rd->new_dentry = d2;
+ rd->old_parent = dget(old_dentry->d_parent);
+ return 0;
+
+out_dput_d2:
+ dput(d2);
+out_unlock:
+ unlock_rename(old_dentry->d_parent, rd->new_parent);
+ return err;
+}
+
+/**
+ * start_renaming_dentry - lookup and lock name for rename with permission checking
+ * @rd: rename data containing parents and flags, and
+ * for receiving found dentries
+ * @lookup_flags: extra flags to pass to ->lookup (e.g. LOOKUP_REVAL,
+ * LOOKUP_NO_SYMLINKS etc).
+ * @old_dentry: dentry of name to move
+ * @new_last: name of target in @rd.new_parent
+ *
+ * Look up target name and ensure locks are in place for
+ * rename.
+ *
+ * On success the found dentry is stored in @rd.new_dentry and
+ * @rd.old_parent is confirmed to be the parent of @old_dentry. If it
+ * was originally %NULL, it is set. In either case a reference is taken
+ * so that end_renaming() can have a stable reference to unlock.
+ *
+ * References and the lock can be dropped with end_renaming()
+ *
+ * The passed in qstr need not have the hash calculated, and basic
+ * eXecute permission checking is performed against @rd.mnt_idmap.
+ *
+ * Returns: zero or an error.
+ */
+int start_renaming_dentry(struct renamedata *rd, int lookup_flags,
+ struct dentry *old_dentry, struct qstr *new_last)
+{
+ int err;
+
+ err = lookup_one_common(rd->mnt_idmap, new_last, rd->new_parent);
+ if (err)
+ return err;
+ return __start_renaming_dentry(rd, lookup_flags, old_dentry, new_last);
+}
+EXPORT_SYMBOL(start_renaming_dentry);
+
+/**
+ * start_renaming_two_dentries - Lock to dentries in given parents for rename
+ * @rd: rename data containing parent
+ * @old_dentry: dentry of name to move
+ * @new_dentry: dentry to move to
+ *
+ * Ensure locks are in place for rename and check parentage is still correct.
+ *
+ * On success the two dentries are stored in @rd.old_dentry and
+ * @rd.new_dentry and @rd.old_parent and @rd.new_parent are confirmed to
+ * be the parents of the dentries.
+ *
+ * References and the lock can be dropped with end_renaming()
+ *
+ * Returns: zero or an error.
+ */
+int
+start_renaming_two_dentries(struct renamedata *rd,
+ struct dentry *old_dentry, struct dentry *new_dentry)
+{
+ struct dentry *trap;
+ int err;
+
+ /* Already have the dentry - need to be sure to lock the correct parent */
+ trap = lock_rename_child(old_dentry, rd->new_parent);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+ err = -EINVAL;
+ if (d_unhashed(old_dentry) ||
+ (rd->old_parent && rd->old_parent != old_dentry->d_parent))
+ /* old_dentry was removed, or moved and explicit parent requested */
+ goto out_unlock;
+ if (d_unhashed(new_dentry) ||
+ rd->new_parent != new_dentry->d_parent)
+ /* new_dentry was removed or moved */
+ goto out_unlock;
+
+ if (old_dentry == trap)
+ /* source is an ancestor of target */
+ goto out_unlock;
+
+ if (new_dentry == trap) {
+ /* target is an ancestor of source */
+ if (rd->flags & RENAME_EXCHANGE)
+ err = -EINVAL;
+ else
+ err = -ENOTEMPTY;
+ goto out_unlock;
+ }
+
+ err = -EEXIST;
+ if (d_is_positive(new_dentry) && (rd->flags & RENAME_NOREPLACE))
+ goto out_unlock;
+
+ rd->old_dentry = dget(old_dentry);
+ rd->new_dentry = dget(new_dentry);
+ rd->old_parent = dget(old_dentry->d_parent);
+ return 0;
+
+out_unlock:
+ unlock_rename(old_dentry->d_parent, rd->new_parent);
+ return err;
+}
+EXPORT_SYMBOL(start_renaming_two_dentries);
+
+void end_renaming(struct renamedata *rd)
+{
+ unlock_rename(rd->old_parent, rd->new_parent);
+ dput(rd->old_dentry);
+ dput(rd->new_dentry);
+ dput(rd->old_parent);
+}
+EXPORT_SYMBOL(end_renaming);
+
+/**
* vfs_prepare_mode - prepare the mode to be used for a new inode
* @idmap: idmap of the mount the inode was found from
* @dir: parent directory of the new inode
@@ -3316,10 +4097,9 @@ static inline umode_t vfs_prepare_mode(struct mnt_idmap *idmap,
/**
* vfs_create - create new file
* @idmap: idmap of the mount the inode was found from
- * @dir: inode of the parent directory
* @dentry: dentry of the child file
* @mode: mode of the child file
- * @want_excl: whether the file must not yet exist
+ * @di: returns parent inode, if the inode is delegated.
*
* Create a new file.
*
@@ -3329,9 +4109,10 @@ static inline umode_t vfs_prepare_mode(struct mnt_idmap *idmap,
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
-int vfs_create(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode, bool want_excl)
+int vfs_create(struct mnt_idmap *idmap, struct dentry *dentry, umode_t mode,
+ struct delegated_inode *di)
{
+ struct inode *dir = d_inode(dentry->d_parent);
int error;
error = may_create(idmap, dir, dentry);
@@ -3345,7 +4126,10 @@ int vfs_create(struct mnt_idmap *idmap, struct inode *dir,
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
- error = dir->i_op->create(idmap, dir, dentry, mode, want_excl);
+ error = try_break_deleg(dir, di);
+ if (error)
+ return error;
+ error = dir->i_op->create(idmap, dir, dentry, mode, true);
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -3413,6 +4197,8 @@ static int may_open(struct mnt_idmap *idmap, const struct path *path,
if ((acc_mode & MAY_EXEC) && path_noexec(path))
return -EACCES;
break;
+ default:
+ VFS_BUG_ON_INODE(!IS_ANON_FILE(inode), inode);
}
error = inode_permission(idmap, inode, MAY_OPEN | acc_mode);
@@ -3504,8 +4290,8 @@ static struct dentry *atomic_open(struct nameidata *nd, struct dentry *dentry,
if (nd->flags & LOOKUP_DIRECTORY)
open_flag |= O_DIRECTORY;
- file->f_path.dentry = DENTRY_NOT_SET;
- file->f_path.mnt = nd->path.mnt;
+ file->__f_path.dentry = DENTRY_NOT_SET;
+ file->__f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file,
open_to_namei_flags(open_flag), mode);
d_lookup_done(dentry);
@@ -3550,7 +4336,7 @@ static struct dentry *atomic_open(struct nameidata *nd, struct dentry *dentry,
*/
static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
const struct open_flags *op,
- bool got_write)
+ bool got_write, struct delegated_inode *delegated_inode)
{
struct mnt_idmap *idmap;
struct dentry *dir = nd->path.dentry;
@@ -3575,7 +4361,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
if (d_in_lookup(dentry))
break;
- error = d_revalidate(dentry, nd->flags);
+ error = d_revalidate(dir_inode, &nd->last, dentry, nd->flags);
if (likely(error > 0))
break;
if (error)
@@ -3639,6 +4425,11 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (open_flag & O_CREAT)) {
+ /* but break the directory lease first! */
+ error = try_break_deleg(dir_inode, delegated_inode);
+ if (error)
+ goto out_dput;
+
file->f_mode |= FMODE_CREATED;
audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
if (!dir_inode->i_op->create) {
@@ -3701,6 +4492,7 @@ static struct dentry *lookup_fast_for_open(struct nameidata *nd, int open_flag)
static const char *open_last_lookups(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
+ struct delegated_inode delegated_inode = { };
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
bool got_write = false;
@@ -3732,7 +4524,7 @@ static const char *open_last_lookups(struct nameidata *nd,
return ERR_PTR(-ECHILD);
}
}
-
+retry:
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
got_write = !mnt_want_write(nd->path.mnt);
/*
@@ -3745,7 +4537,7 @@ static const char *open_last_lookups(struct nameidata *nd,
inode_lock(dir->d_inode);
else
inode_lock_shared(dir->d_inode);
- dentry = lookup_open(nd, file, op, got_write);
+ dentry = lookup_open(nd, file, op, got_write, &delegated_inode);
if (!IS_ERR(dentry)) {
if (file->f_mode & FMODE_CREATED)
fsnotify_create(dir->d_inode, dentry);
@@ -3760,8 +4552,16 @@ static const char *open_last_lookups(struct nameidata *nd,
if (got_write)
mnt_drop_write(nd->path.mnt);
- if (IS_ERR(dentry))
+ if (IS_ERR(dentry)) {
+ if (is_delegated(&delegated_inode)) {
+ int error = break_deleg_wait(&delegated_inode);
+
+ if (!error)
+ goto retry;
+ return ERR_PTR(error);
+ }
return ERR_CAST(dentry);
+ }
if (file->f_mode & (FMODE_OPENED | FMODE_CREATED)) {
dput(nd->path.dentry);
@@ -3873,8 +4673,8 @@ int vfs_tmpfile(struct mnt_idmap *idmap,
child = d_alloc(parentpath->dentry, &slash_name);
if (unlikely(!child))
return -ENOMEM;
- file->f_path.mnt = parentpath->mnt;
- file->f_path.dentry = child;
+ file->__f_path.mnt = parentpath->mnt;
+ file->__f_path.dentry = child;
mode = vfs_prepare_mode(idmap, dir, mode, mode, mode);
error = dir->i_op->tmpfile(idmap, dir, file, mode);
dput(child);
@@ -3889,7 +4689,7 @@ int vfs_tmpfile(struct mnt_idmap *idmap,
inode = file_inode(file);
if (!(open_flag & O_EXCL)) {
spin_lock(&inode->i_lock);
- inode->i_state |= I_LINKABLE;
+ inode_state_set(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
}
security_inode_post_create_tmpfile(idmap, inode);
@@ -3993,7 +4793,7 @@ static struct file *path_openat(struct nameidata *nd,
WARN_ON(1);
error = -EINVAL;
}
- fput(file);
+ fput_close(file);
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
@@ -4055,7 +4855,6 @@ static struct dentry *filename_create(int dfd, struct filename *name,
unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
int type;
- int err2;
int error;
error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
@@ -4070,52 +4869,34 @@ static struct dentry *filename_create(int dfd, struct filename *name,
goto out;
/* don't fail immediately if it's r/o, at least try to report other errors */
- err2 = mnt_want_write(path->mnt);
+ error = mnt_want_write(path->mnt);
/*
* Do the final lookup. Suppress 'create' if there is a trailing
* '/', and a directory wasn't requested.
*/
if (last.name[last.len] && !want_dir)
- create_flags = 0;
- inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
- dentry = lookup_one_qstr_excl(&last, path->dentry,
- reval_flag | create_flags);
+ create_flags &= ~LOOKUP_CREATE;
+ dentry = start_dirop(path->dentry, &last, reval_flag | create_flags);
if (IS_ERR(dentry))
- goto unlock;
+ goto out_drop_write;
- error = -EEXIST;
- if (d_is_positive(dentry))
+ if (unlikely(error))
goto fail;
- /*
- * Special case - lookup gave negative, but... we had foo/bar/
- * From the vfs_mknod() POV we just have a negative dentry -
- * all is fine. Let's be bastards - you had / on the end, you've
- * been asking for (non-existent) directory. -ENOENT for you.
- */
- if (unlikely(!create_flags)) {
- error = -ENOENT;
- goto fail;
- }
- if (unlikely(err2)) {
- error = err2;
- goto fail;
- }
return dentry;
fail:
- dput(dentry);
+ end_dirop(dentry);
dentry = ERR_PTR(error);
-unlock:
- inode_unlock(path->dentry->d_inode);
- if (!err2)
+out_drop_write:
+ if (!error)
mnt_drop_write(path->mnt);
out:
path_put(path);
return dentry;
}
-struct dentry *kern_path_create(int dfd, const char *pathname,
- struct path *path, unsigned int lookup_flags)
+struct dentry *start_creating_path(int dfd, const char *pathname,
+ struct path *path, unsigned int lookup_flags)
{
struct filename *filename = getname_kernel(pathname);
struct dentry *res = filename_create(dfd, filename, path, lookup_flags);
@@ -4123,19 +4904,30 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
putname(filename);
return res;
}
-EXPORT_SYMBOL(kern_path_create);
+EXPORT_SYMBOL(start_creating_path);
-void done_path_create(struct path *path, struct dentry *dentry)
+/**
+ * end_creating_path - finish a code section started by start_creating_path()
+ * @path: the path instantiated by start_creating_path()
+ * @dentry: the dentry returned by start_creating_path()
+ *
+ * end_creating_path() will unlock and locks taken by start_creating_path()
+ * and drop an references that were taken. It should only be called
+ * if start_creating_path() returned a non-error.
+ * If vfs_mkdir() was called and it returned an error, that error *should*
+ * be passed to end_creating_path() together with the path.
+ */
+void end_creating_path(const struct path *path, struct dentry *dentry)
{
- dput(dentry);
- inode_unlock(path->dentry->d_inode);
+ end_creating(dentry);
mnt_drop_write(path->mnt);
path_put(path);
}
-EXPORT_SYMBOL(done_path_create);
+EXPORT_SYMBOL(end_creating_path);
-inline struct dentry *user_path_create(int dfd, const char __user *pathname,
- struct path *path, unsigned int lookup_flags)
+inline struct dentry *start_creating_user_path(
+ int dfd, const char __user *pathname,
+ struct path *path, unsigned int lookup_flags)
{
struct filename *filename = getname(pathname);
struct dentry *res = filename_create(dfd, filename, path, lookup_flags);
@@ -4143,15 +4935,17 @@ inline struct dentry *user_path_create(int dfd, const char __user *pathname,
putname(filename);
return res;
}
-EXPORT_SYMBOL(user_path_create);
+EXPORT_SYMBOL(start_creating_user_path);
+
/**
* vfs_mknod - create device node or file
- * @idmap: idmap of the mount the inode was found from
- * @dir: inode of the parent directory
- * @dentry: dentry of the child device node
- * @mode: mode of the child device node
- * @dev: device number of device to create
+ * @idmap: idmap of the mount the inode was found from
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child device node
+ * @mode: mode of the child device node
+ * @dev: device number of device to create
+ * @delegated_inode: returns parent inode, if the inode is delegated.
*
* Create a device node or file.
*
@@ -4162,7 +4956,8 @@ EXPORT_SYMBOL(user_path_create);
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode, dev_t dev)
+ struct dentry *dentry, umode_t mode, dev_t dev,
+ struct delegated_inode *delegated_inode)
{
bool is_whiteout = S_ISCHR(mode) && dev == WHITEOUT_DEV;
int error = may_create(idmap, dir, dentry);
@@ -4186,6 +4981,10 @@ int vfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
if (error)
return error;
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ return error;
+
error = dir->i_op->mknod(idmap, dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
@@ -4213,6 +5012,7 @@ static int may_mknod(umode_t mode)
static int do_mknodat(int dfd, struct filename *name, umode_t mode,
unsigned int dev)
{
+ struct delegated_inode di = { };
struct mnt_idmap *idmap;
struct dentry *dentry;
struct path path;
@@ -4236,22 +5036,26 @@ retry:
idmap = mnt_idmap(path.mnt);
switch (mode & S_IFMT) {
case 0: case S_IFREG:
- error = vfs_create(idmap, path.dentry->d_inode,
- dentry, mode, true);
+ error = vfs_create(idmap, dentry, mode, &di);
if (!error)
security_path_post_mknod(idmap, dentry);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(idmap, path.dentry->d_inode,
- dentry, mode, new_decode_dev(dev));
+ dentry, mode, new_decode_dev(dev), &di);
break;
case S_IFIFO: case S_IFSOCK:
error = vfs_mknod(idmap, path.dentry->d_inode,
- dentry, mode, 0);
+ dentry, mode, 0, &di);
break;
}
out2:
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
+ if (is_delegated(&di)) {
+ error = break_deleg_wait(&di);
+ if (!error)
+ goto retry;
+ }
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
@@ -4273,11 +5077,12 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d
}
/**
- * vfs_mkdir - create directory
- * @idmap: idmap of the mount the inode was found from
- * @dir: inode of the parent directory
- * @dentry: dentry of the child directory
- * @mode: mode of the child directory
+ * vfs_mkdir - create directory returning correct dentry if possible
+ * @idmap: idmap of the mount the inode was found from
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child directory
+ * @mode: mode of the child directory
+ * @delegated_inode: returns parent inode, if the inode is delegated.
*
* Create a directory.
*
@@ -4286,32 +5091,56 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
+ *
+ * In the event that the filesystem does not use the *@dentry but leaves it
+ * negative or unhashes it and possibly splices a different one returning it,
+ * the original dentry is dput() and the alternate is returned.
+ *
+ * In case of an error the dentry is dput() and an ERR_PTR() is returned.
*/
-int vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+struct dentry *vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode,
+ struct delegated_inode *delegated_inode)
{
int error;
unsigned max_links = dir->i_sb->s_max_links;
+ struct dentry *de;
error = may_create(idmap, dir, dentry);
if (error)
- return error;
+ goto err;
+ error = -EPERM;
if (!dir->i_op->mkdir)
- return -EPERM;
+ goto err;
mode = vfs_prepare_mode(idmap, dir, mode, S_IRWXUGO | S_ISVTX, 0);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
- return error;
+ goto err;
+ error = -EMLINK;
if (max_links && dir->i_nlink >= max_links)
- return -EMLINK;
+ goto err;
- error = dir->i_op->mkdir(idmap, dir, dentry, mode);
- if (!error)
- fsnotify_mkdir(dir, dentry);
- return error;
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ goto err;
+
+ de = dir->i_op->mkdir(idmap, dir, dentry, mode);
+ error = PTR_ERR(de);
+ if (IS_ERR(de))
+ goto err;
+ if (de) {
+ dput(dentry);
+ dentry = de;
+ }
+ fsnotify_mkdir(dir, dentry);
+ return dentry;
+
+err:
+ end_creating(dentry);
+ return ERR_PTR(error);
}
EXPORT_SYMBOL(vfs_mkdir);
@@ -4321,6 +5150,7 @@ int do_mkdirat(int dfd, struct filename *name, umode_t mode)
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_DIRECTORY;
+ struct delegated_inode delegated_inode = { };
retry:
dentry = filename_create(dfd, name, &path, lookup_flags);
@@ -4331,10 +5161,17 @@ retry:
error = security_path_mkdir(&path, dentry,
mode_strip_umask(path.dentry->d_inode, mode));
if (!error) {
- error = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
- dentry, mode);
+ dentry = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, mode, &delegated_inode);
+ if (IS_ERR(dentry))
+ error = PTR_ERR(dentry);
+ }
+ end_creating_path(&path, dentry);
+ if (is_delegated(&delegated_inode)) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry;
}
- done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
@@ -4356,9 +5193,10 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
/**
* vfs_rmdir - remove directory
- * @idmap: idmap of the mount the inode was found from
- * @dir: inode of the parent directory
- * @dentry: dentry of the child directory
+ * @idmap: idmap of the mount the inode was found from
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child directory
+ * @delegated_inode: returns parent inode, if it's delegated.
*
* Remove a directory.
*
@@ -4369,7 +5207,7 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_rmdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry)
+ struct dentry *dentry, struct delegated_inode *delegated_inode)
{
int error = may_delete(idmap, dir, dentry, 1);
@@ -4391,6 +5229,10 @@ int vfs_rmdir(struct mnt_idmap *idmap, struct inode *dir,
if (error)
goto out;
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ goto out;
+
error = dir->i_op->rmdir(dir, dentry);
if (error)
goto out;
@@ -4417,6 +5259,7 @@ int do_rmdir(int dfd, struct filename *name)
struct qstr last;
int type;
unsigned int lookup_flags = 0;
+ struct delegated_inode delegated_inode = { };
retry:
error = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (error)
@@ -4438,26 +5281,26 @@ retry:
if (error)
goto exit2;
- inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
- dentry = lookup_one_qstr_excl(&last, path.dentry, lookup_flags);
+ dentry = start_dirop(path.dentry, &last, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit3;
- if (!dentry->d_inode) {
- error = -ENOENT;
- goto exit4;
- }
error = security_path_rmdir(&path, dentry);
if (error)
goto exit4;
- error = vfs_rmdir(mnt_idmap(path.mnt), path.dentry->d_inode, dentry);
+ error = vfs_rmdir(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, &delegated_inode);
exit4:
- dput(dentry);
+ end_dirop(dentry);
exit3:
- inode_unlock(path.dentry->d_inode);
mnt_drop_write(path.mnt);
exit2:
path_put(&path);
+ if (is_delegated(&delegated_inode)) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry;
+ }
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
@@ -4479,13 +5322,13 @@ SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
* @dentry: victim
* @delegated_inode: returns victim inode, if the inode is delegated.
*
- * The caller must hold dir->i_mutex.
+ * The caller must hold dir->i_rwsem exclusively.
*
* If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
* return a reference to the inode in delegated_inode. The caller
* should then break the delegation on that inode and retry. Because
* breaking a delegation may take a long time, the caller should drop
- * dir->i_mutex before doing so.
+ * dir->i_rwsem before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
@@ -4498,7 +5341,7 @@ SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_unlink(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, struct inode **delegated_inode)
+ struct dentry *dentry, struct delegated_inode *delegated_inode)
{
struct inode *target = dentry->d_inode;
int error = may_delete(idmap, dir, dentry, 0);
@@ -4517,6 +5360,9 @@ int vfs_unlink(struct mnt_idmap *idmap, struct inode *dir,
else {
error = security_inode_unlink(dir, dentry);
if (!error) {
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ goto out;
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
@@ -4544,7 +5390,7 @@ EXPORT_SYMBOL(vfs_unlink);
/*
* Make sure that the actual truncation of the file will occur outside its
- * directory's i_mutex. Truncate can take a long time if there is a lot of
+ * directory's i_rwsem. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
@@ -4555,69 +5401,62 @@ int do_unlinkat(int dfd, struct filename *name)
struct path path;
struct qstr last;
int type;
- struct inode *inode = NULL;
- struct inode *delegated_inode = NULL;
+ struct inode *inode;
+ struct delegated_inode delegated_inode = { };
unsigned int lookup_flags = 0;
retry:
error = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (error)
- goto exit1;
+ goto exit_putname;
error = -EISDIR;
if (type != LAST_NORM)
- goto exit2;
+ goto exit_path_put;
error = mnt_want_write(path.mnt);
if (error)
- goto exit2;
+ goto exit_path_put;
retry_deleg:
- inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
- dentry = lookup_one_qstr_excl(&last, path.dentry, lookup_flags);
+ dentry = start_dirop(path.dentry, &last, lookup_flags);
error = PTR_ERR(dentry);
- if (!IS_ERR(dentry)) {
+ if (IS_ERR(dentry))
+ goto exit_drop_write;
- /* Why not before? Because we want correct error value */
- if (last.name[last.len] || d_is_negative(dentry))
- goto slashes;
- inode = dentry->d_inode;
- ihold(inode);
- error = security_path_unlink(&path, dentry);
- if (error)
- goto exit3;
- error = vfs_unlink(mnt_idmap(path.mnt), path.dentry->d_inode,
- dentry, &delegated_inode);
-exit3:
- dput(dentry);
+ /* Why not before? Because we want correct error value */
+ if (unlikely(last.name[last.len])) {
+ if (d_is_dir(dentry))
+ error = -EISDIR;
+ else
+ error = -ENOTDIR;
+ end_dirop(dentry);
+ goto exit_drop_write;
}
- inode_unlock(path.dentry->d_inode);
- if (inode)
- iput(inode); /* truncate the inode here */
- inode = NULL;
- if (delegated_inode) {
+ inode = dentry->d_inode;
+ ihold(inode);
+ error = security_path_unlink(&path, dentry);
+ if (error)
+ goto exit_end_dirop;
+ error = vfs_unlink(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, &delegated_inode);
+exit_end_dirop:
+ end_dirop(dentry);
+ iput(inode); /* truncate the inode here */
+ if (is_delegated(&delegated_inode)) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
+exit_drop_write:
mnt_drop_write(path.mnt);
-exit2:
+exit_path_put:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
- inode = NULL;
goto retry;
}
-exit1:
+exit_putname:
putname(name);
return error;
-
-slashes:
- if (d_is_negative(dentry))
- error = -ENOENT;
- else if (d_is_dir(dentry))
- error = -EISDIR;
- else
- error = -ENOTDIR;
- goto exit3;
}
SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
@@ -4641,6 +5480,7 @@ SYSCALL_DEFINE1(unlink, const char __user *, pathname)
* @dir: inode of the parent directory
* @dentry: dentry of the child symlink file
* @oldname: name of the file to link to
+ * @delegated_inode: returns victim inode, if the inode is delegated.
*
* Create a symlink.
*
@@ -4651,7 +5491,8 @@ SYSCALL_DEFINE1(unlink, const char __user *, pathname)
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, const char *oldname)
+ struct dentry *dentry, const char *oldname,
+ struct delegated_inode *delegated_inode)
{
int error;
@@ -4666,6 +5507,10 @@ int vfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
if (error)
return error;
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ return error;
+
error = dir->i_op->symlink(idmap, dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
@@ -4679,6 +5524,7 @@ int do_symlinkat(struct filename *from, int newdfd, struct filename *to)
struct dentry *dentry;
struct path path;
unsigned int lookup_flags = 0;
+ struct delegated_inode delegated_inode = { };
if (IS_ERR(from)) {
error = PTR_ERR(from);
@@ -4693,8 +5539,13 @@ retry:
error = security_path_symlink(&path, dentry, from->name);
if (!error)
error = vfs_symlink(mnt_idmap(path.mnt), path.dentry->d_inode,
- dentry, from->name);
- done_path_create(&path, dentry);
+ dentry, from->name, &delegated_inode);
+ end_creating_path(&path, dentry);
+ if (is_delegated(&delegated_inode)) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry;
+ }
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
@@ -4724,13 +5575,13 @@ SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newn
* @new_dentry: where to create the new link
* @delegated_inode: returns inode needing a delegation break
*
- * The caller must hold dir->i_mutex
+ * The caller must hold dir->i_rwsem exclusively.
*
* If vfs_link discovers a delegation on the to-be-linked file in need
* of breaking, it will return -EWOULDBLOCK and return a reference to the
* inode in delegated_inode. The caller should then break the delegation
* and retry. Because breaking a delegation may take a long time, the
- * caller should drop the i_mutex before doing so.
+ * caller should drop the i_rwsem before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
@@ -4744,7 +5595,7 @@ SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newn
*/
int vfs_link(struct dentry *old_dentry, struct mnt_idmap *idmap,
struct inode *dir, struct dentry *new_dentry,
- struct inode **delegated_inode)
+ struct delegated_inode *delegated_inode)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
@@ -4767,7 +5618,7 @@ int vfs_link(struct dentry *old_dentry, struct mnt_idmap *idmap,
return -EPERM;
/*
* Updating the link count will likely cause i_uid and i_gid to
- * be writen back improperly if their true value is unknown to
+ * be written back improperly if their true value is unknown to
* the vfs.
*/
if (HAS_UNMAPPED_ID(idmap, inode))
@@ -4783,19 +5634,21 @@ int vfs_link(struct dentry *old_dentry, struct mnt_idmap *idmap,
inode_lock(inode);
/* Make sure we don't allow creating hardlink to an unlinked file */
- if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
+ if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE))
error = -ENOENT;
else if (max_links && inode->i_nlink >= max_links)
error = -EMLINK;
else {
- error = try_break_deleg(inode, delegated_inode);
+ error = try_break_deleg(dir, delegated_inode);
+ if (!error)
+ error = try_break_deleg(inode, delegated_inode);
if (!error)
error = dir->i_op->link(old_dentry, dir, new_dentry);
}
- if (!error && (inode->i_state & I_LINKABLE)) {
+ if (!error && (inode_state_read_once(inode) & I_LINKABLE)) {
spin_lock(&inode->i_lock);
- inode->i_state &= ~I_LINKABLE;
+ inode_state_clear(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
}
inode_unlock(inode);
@@ -4820,7 +5673,7 @@ int do_linkat(int olddfd, struct filename *old, int newdfd,
struct mnt_idmap *idmap;
struct dentry *new_dentry;
struct path old_path, new_path;
- struct inode *delegated_inode = NULL;
+ struct delegated_inode delegated_inode = { };
int how = 0;
int error;
@@ -4863,8 +5716,8 @@ retry:
error = vfs_link(old_path.dentry, idmap, new_path.dentry->d_inode,
new_dentry, &delegated_inode);
out_dput:
- done_path_create(&new_path, new_dentry);
- if (delegated_inode) {
+ end_creating_path(&new_path, new_dentry);
+ if (is_delegated(&delegated_inode)) {
error = break_deleg_wait(&delegated_inode);
if (!error) {
path_put(&old_path);
@@ -4926,7 +5779,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
* c) we may have to lock up to _four_ objects - parents and victim (if it exists),
* and source (if it's a non-directory or a subdirectory that moves to
* different parent).
- * And that - after we got ->i_mutex on parents (until then we don't know
+ * And that - after we got ->i_rwsem on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_mutex _and_ that parent of the object we
@@ -4938,18 +5791,19 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
* has no more than 1 dentry. If "hybrid" objects will ever appear,
* we'd better make sure that there's no link(2) for them.
* d) conversion from fhandle to dentry may come in the wrong moment - when
- * we are removing the target. Solution: we will have to grab ->i_mutex
+ * we are removing the target. Solution: we will have to grab ->i_rwsem
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
- * ->i_mutex on parents, which works but leads to some truly excessive
+ * ->i_rwsem on parents, which works but leads to some truly excessive
* locking].
*/
int vfs_rename(struct renamedata *rd)
{
int error;
- struct inode *old_dir = rd->old_dir, *new_dir = rd->new_dir;
+ struct inode *old_dir = d_inode(rd->old_parent);
+ struct inode *new_dir = d_inode(rd->new_parent);
struct dentry *old_dentry = rd->old_dentry;
struct dentry *new_dentry = rd->new_dentry;
- struct inode **delegated_inode = rd->delegated_inode;
+ struct delegated_inode *delegated_inode = rd->delegated_inode;
unsigned int flags = rd->flags;
bool is_dir = d_is_dir(old_dentry);
struct inode *source = old_dentry->d_inode;
@@ -4962,20 +5816,20 @@ int vfs_rename(struct renamedata *rd)
if (source == target)
return 0;
- error = may_delete(rd->old_mnt_idmap, old_dir, old_dentry, is_dir);
+ error = may_delete(rd->mnt_idmap, old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
- error = may_create(rd->new_mnt_idmap, new_dir, new_dentry);
+ error = may_create(rd->mnt_idmap, new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
- error = may_delete(rd->new_mnt_idmap, new_dir,
+ error = may_delete(rd->mnt_idmap, new_dir,
new_dentry, is_dir);
else
- error = may_delete(rd->new_mnt_idmap, new_dir,
+ error = may_delete(rd->mnt_idmap, new_dir,
new_dentry, new_is_dir);
}
if (error)
@@ -4990,13 +5844,13 @@ int vfs_rename(struct renamedata *rd)
*/
if (new_dir != old_dir) {
if (is_dir) {
- error = inode_permission(rd->old_mnt_idmap, source,
+ error = inode_permission(rd->mnt_idmap, source,
MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
- error = inode_permission(rd->new_mnt_idmap, target,
+ error = inode_permission(rd->mnt_idmap, target,
MAY_WRITE);
if (error)
return error;
@@ -5054,6 +5908,14 @@ int vfs_rename(struct renamedata *rd)
old_dir->i_nlink >= max_links)
goto out;
}
+ error = try_break_deleg(old_dir, delegated_inode);
+ if (error)
+ goto out;
+ if (new_dir != old_dir) {
+ error = try_break_deleg(new_dir, delegated_inode);
+ if (error)
+ goto out;
+ }
if (!is_dir) {
error = try_break_deleg(source, delegated_inode);
if (error)
@@ -5064,7 +5926,7 @@ int vfs_rename(struct renamedata *rd)
if (error)
goto out;
}
- error = old_dir->i_op->rename(rd->new_mnt_idmap, old_dir, old_dentry,
+ error = old_dir->i_op->rename(rd->mnt_idmap, old_dir, old_dentry,
new_dir, new_dentry, flags);
if (error)
goto out;
@@ -5107,13 +5969,11 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd,
struct filename *to, unsigned int flags)
{
struct renamedata rd;
- struct dentry *old_dentry, *new_dentry;
- struct dentry *trap;
struct path old_path, new_path;
struct qstr old_last, new_last;
int old_type, new_type;
- struct inode *delegated_inode = NULL;
- unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
+ struct delegated_inode delegated_inode = { };
+ unsigned int lookup_flags = 0;
bool should_retry = false;
int error = -EINVAL;
@@ -5124,9 +5984,6 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd,
(flags & RENAME_EXCHANGE))
goto put_names;
- if (flags & RENAME_EXCHANGE)
- target_flags = 0;
-
retry:
error = filename_parentat(olddfd, from, lookup_flags, &old_path,
&old_last, &old_type);
@@ -5156,80 +6013,42 @@ retry:
goto exit2;
retry_deleg:
- trap = lock_rename(new_path.dentry, old_path.dentry);
- if (IS_ERR(trap)) {
- error = PTR_ERR(trap);
+ rd.old_parent = old_path.dentry;
+ rd.mnt_idmap = mnt_idmap(old_path.mnt);
+ rd.new_parent = new_path.dentry;
+ rd.delegated_inode = &delegated_inode;
+ rd.flags = flags;
+
+ error = __start_renaming(&rd, lookup_flags, &old_last, &new_last);
+ if (error)
goto exit_lock_rename;
- }
- old_dentry = lookup_one_qstr_excl(&old_last, old_path.dentry,
- lookup_flags);
- error = PTR_ERR(old_dentry);
- if (IS_ERR(old_dentry))
- goto exit3;
- /* source must exist */
- error = -ENOENT;
- if (d_is_negative(old_dentry))
- goto exit4;
- new_dentry = lookup_one_qstr_excl(&new_last, new_path.dentry,
- lookup_flags | target_flags);
- error = PTR_ERR(new_dentry);
- if (IS_ERR(new_dentry))
- goto exit4;
- error = -EEXIST;
- if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry))
- goto exit5;
if (flags & RENAME_EXCHANGE) {
- error = -ENOENT;
- if (d_is_negative(new_dentry))
- goto exit5;
-
- if (!d_is_dir(new_dentry)) {
+ if (!d_is_dir(rd.new_dentry)) {
error = -ENOTDIR;
if (new_last.name[new_last.len])
- goto exit5;
+ goto exit_unlock;
}
}
/* unless the source is a directory trailing slashes give -ENOTDIR */
- if (!d_is_dir(old_dentry)) {
+ if (!d_is_dir(rd.old_dentry)) {
error = -ENOTDIR;
if (old_last.name[old_last.len])
- goto exit5;
+ goto exit_unlock;
if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len])
- goto exit5;
+ goto exit_unlock;
}
- /* source should not be ancestor of target */
- error = -EINVAL;
- if (old_dentry == trap)
- goto exit5;
- /* target should not be an ancestor of source */
- if (!(flags & RENAME_EXCHANGE))
- error = -ENOTEMPTY;
- if (new_dentry == trap)
- goto exit5;
- error = security_path_rename(&old_path, old_dentry,
- &new_path, new_dentry, flags);
+ error = security_path_rename(&old_path, rd.old_dentry,
+ &new_path, rd.new_dentry, flags);
if (error)
- goto exit5;
-
- rd.old_dir = old_path.dentry->d_inode;
- rd.old_dentry = old_dentry;
- rd.old_mnt_idmap = mnt_idmap(old_path.mnt);
- rd.new_dir = new_path.dentry->d_inode;
- rd.new_dentry = new_dentry;
- rd.new_mnt_idmap = mnt_idmap(new_path.mnt);
- rd.delegated_inode = &delegated_inode;
- rd.flags = flags;
+ goto exit_unlock;
+
error = vfs_rename(&rd);
-exit5:
- dput(new_dentry);
-exit4:
- dput(old_dentry);
-exit3:
- unlock_rename(new_path.dentry, old_path.dentry);
+exit_unlock:
+ end_renaming(&rd);
exit_lock_rename:
- if (delegated_inode) {
+ if (is_delegated(&delegated_inode)) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
@@ -5272,19 +6091,16 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
getname(newname), 0);
}
-int readlink_copy(char __user *buffer, int buflen, const char *link)
+int readlink_copy(char __user *buffer, int buflen, const char *link, int linklen)
{
- int len = PTR_ERR(link);
- if (IS_ERR(link))
- goto out;
+ int copylen;
- len = strlen(link);
- if (len > (unsigned) buflen)
- len = buflen;
- if (copy_to_user(buffer, link, len))
- len = -EFAULT;
-out:
- return len;
+ copylen = linklen;
+ if (unlikely(copylen > (unsigned) buflen))
+ copylen = buflen;
+ if (copy_to_user(buffer, link, copylen))
+ copylen = -EFAULT;
+ return copylen;
}
/**
@@ -5304,6 +6120,9 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
const char *link;
int res;
+ if (inode->i_opflags & IOP_CACHED_LINK)
+ return readlink_copy(buffer, buflen, inode->i_link, inode->i_linklen);
+
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
if (unlikely(inode->i_op->readlink))
return inode->i_op->readlink(dentry, buffer, buflen);
@@ -5322,7 +6141,7 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
if (IS_ERR(link))
return PTR_ERR(link);
}
- res = readlink_copy(buffer, buflen, link);
+ res = readlink_copy(buffer, buflen, link, strlen(link));
do_delayed_call(&done);
return res;
}
@@ -5354,47 +6173,89 @@ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
EXPORT_SYMBOL(vfs_get_link);
/* get the link contents into pagecache */
-const char *page_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *callback)
+static char *__page_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
{
- char *kaddr;
- struct page *page;
+ struct folio *folio;
struct address_space *mapping = inode->i_mapping;
if (!dentry) {
- page = find_get_page(mapping, 0);
- if (!page)
+ folio = filemap_get_folio(mapping, 0);
+ if (IS_ERR(folio))
return ERR_PTR(-ECHILD);
- if (!PageUptodate(page)) {
- put_page(page);
+ if (!folio_test_uptodate(folio)) {
+ folio_put(folio);
return ERR_PTR(-ECHILD);
}
} else {
- page = read_mapping_page(mapping, 0, NULL);
- if (IS_ERR(page))
- return (char*)page;
+ folio = read_mapping_folio(mapping, 0, NULL);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
}
- set_delayed_call(callback, page_put_link, page);
+ set_delayed_call(callback, page_put_link, folio);
BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
- kaddr = page_address(page);
- nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
- return kaddr;
+ return folio_address(folio);
+}
+
+const char *page_get_link_raw(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ return __page_get_link(dentry, inode, callback);
}
+EXPORT_SYMBOL_GPL(page_get_link_raw);
+/**
+ * page_get_link() - An implementation of the get_link inode_operation.
+ * @dentry: The directory entry which is the symlink.
+ * @inode: The inode for the symlink.
+ * @callback: Used to drop the reference to the symlink.
+ *
+ * Filesystems which store their symlinks in the page cache should use
+ * this to implement the get_link() member of their inode_operations.
+ *
+ * Return: A pointer to the NUL-terminated symlink.
+ */
+const char *page_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ char *kaddr = __page_get_link(dentry, inode, callback);
+
+ if (!IS_ERR(kaddr))
+ nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
+ return kaddr;
+}
EXPORT_SYMBOL(page_get_link);
+/**
+ * page_put_link() - Drop the reference to the symlink.
+ * @arg: The folio which contains the symlink.
+ *
+ * This is used internally by page_get_link(). It is exported for use
+ * by filesystems which need to implement a variant of page_get_link()
+ * themselves. Despite the apparent symmetry, filesystems which use
+ * page_get_link() do not need to call page_put_link().
+ *
+ * The argument, while it has a void pointer type, must be a pointer to
+ * the folio which was retrieved from the page cache. The delayed_call
+ * infrastructure is used to drop the reference count once the caller
+ * is done with the symlink.
+ */
void page_put_link(void *arg)
{
- put_page(arg);
+ folio_put(arg);
}
EXPORT_SYMBOL(page_put_link);
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
+ const char *link;
+ int res;
+
DEFINE_DELAYED_CALL(done);
- int res = readlink_copy(buffer, buflen,
- page_get_link(dentry, d_inode(dentry),
- &done));
+ link = page_get_link(dentry, d_inode(dentry), &done);
+ res = PTR_ERR(link);
+ if (!IS_ERR(link))
+ res = readlink_copy(buffer, buflen, link, strlen(link));
do_delayed_call(&done);
return res;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 23e81c2a1e3f..c58674a20cad 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -32,7 +32,8 @@
#include <linux/fs_context.h>
#include <linux/shmem_fs.h>
#include <linux/mnt_idmapping.h>
-#include <linux/nospec.h>
+#include <linux/pidfs.h>
+#include <linux/nstree.h>
#include "pnode.h"
#include "internal.h"
@@ -65,13 +66,22 @@ static int __init set_mphash_entries(char *str)
}
__setup("mphash_entries=", set_mphash_entries);
+static char * __initdata initramfs_options;
+static int __init initramfs_options_setup(char *str)
+{
+ initramfs_options = str;
+ return 1;
+}
+
+__setup("initramfs_options=", initramfs_options_setup);
+
static u64 event;
-static DEFINE_IDA(mnt_id_ida);
+static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC);
static DEFINE_IDA(mnt_group_ida);
/* Don't allow confusion with old 32bit mount ID */
#define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
-static atomic64_t mnt_id_ctr = ATOMIC64_INIT(MNT_UNIQUE_ID_OFFSET);
+static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET;
static struct hlist_head *mount_hashtable __ro_after_init;
static struct hlist_head *mountpoint_hashtable __ro_after_init;
@@ -79,15 +89,31 @@ static struct kmem_cache *mnt_cache __ro_after_init;
static DECLARE_RWSEM(namespace_sem);
static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
-static DEFINE_RWLOCK(mnt_ns_tree_lock);
-static struct rb_root mnt_ns_tree = RB_ROOT; /* protected by mnt_ns_tree_lock */
+static struct mnt_namespace *emptied_ns; /* protected by namespace_sem */
+
+static inline void namespace_lock(void);
+static void namespace_unlock(void);
+DEFINE_LOCK_GUARD_0(namespace_excl, namespace_lock(), namespace_unlock())
+DEFINE_LOCK_GUARD_0(namespace_shared, down_read(&namespace_sem),
+ up_read(&namespace_sem))
+
+DEFINE_FREE(mntput, struct vfsmount *, if (!IS_ERR(_T)) mntput(_T))
+
+#ifdef CONFIG_FSNOTIFY
+LIST_HEAD(notify_list); /* protected by namespace_sem */
+#endif
+
+enum mount_kattr_flags_t {
+ MOUNT_KATTR_RECURSE = (1 << 0),
+ MOUNT_KATTR_IDMAP_REPLACE = (1 << 1),
+};
struct mount_kattr {
unsigned int attr_set;
unsigned int attr_clr;
unsigned int propagation;
unsigned int lookup_flags;
- bool recurse;
+ enum mount_kattr_flags_t kflags;
struct user_namespace *mnt_userns;
struct mnt_idmap *mnt_idmap;
};
@@ -106,86 +132,30 @@ EXPORT_SYMBOL_GPL(fs_kobj);
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
-static int mnt_ns_cmp(u64 seq, const struct mnt_namespace *ns)
-{
- u64 seq_b = ns->seq;
-
- if (seq < seq_b)
- return -1;
- if (seq > seq_b)
- return 1;
- return 0;
-}
-
-static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node)
-{
- if (!node)
- return NULL;
- return rb_entry(node, struct mnt_namespace, mnt_ns_tree_node);
-}
-
-static bool mnt_ns_less(struct rb_node *a, const struct rb_node *b)
-{
- struct mnt_namespace *ns_a = node_to_mnt_ns(a);
- struct mnt_namespace *ns_b = node_to_mnt_ns(b);
- u64 seq_a = ns_a->seq;
-
- return mnt_ns_cmp(seq_a, ns_b) < 0;
-}
-
-static void mnt_ns_tree_add(struct mnt_namespace *ns)
-{
- guard(write_lock)(&mnt_ns_tree_lock);
- rb_add(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_less);
-}
-
static void mnt_ns_release(struct mnt_namespace *ns)
{
- lockdep_assert_not_held(&mnt_ns_tree_lock);
-
/* keep alive for {list,stat}mount() */
- if (refcount_dec_and_test(&ns->passive)) {
+ if (ns && refcount_dec_and_test(&ns->passive)) {
+ fsnotify_mntns_delete(ns);
put_user_ns(ns->user_ns);
kfree(ns);
}
}
-DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T))
+DEFINE_FREE(mnt_ns_release, struct mnt_namespace *,
+ if (!IS_ERR(_T)) mnt_ns_release(_T))
-static void mnt_ns_tree_remove(struct mnt_namespace *ns)
+static void mnt_ns_release_rcu(struct rcu_head *rcu)
{
- /* remove from global mount namespace list */
- if (!is_anon_ns(ns)) {
- guard(write_lock)(&mnt_ns_tree_lock);
- rb_erase(&ns->mnt_ns_tree_node, &mnt_ns_tree);
- }
-
- mnt_ns_release(ns);
+ mnt_ns_release(container_of(rcu, struct mnt_namespace, ns.ns_rcu));
}
-/*
- * Returns the mount namespace which either has the specified id, or has the
- * next smallest id afer the specified one.
- */
-static struct mnt_namespace *mnt_ns_find_id_at(u64 mnt_ns_id)
+static void mnt_ns_tree_remove(struct mnt_namespace *ns)
{
- struct rb_node *node = mnt_ns_tree.rb_node;
- struct mnt_namespace *ret = NULL;
-
- lockdep_assert_held(&mnt_ns_tree_lock);
-
- while (node) {
- struct mnt_namespace *n = node_to_mnt_ns(node);
+ /* remove from global mount namespace list */
+ if (ns_tree_active(ns))
+ ns_tree_remove(ns);
- if (mnt_ns_id <= n->seq) {
- ret = node_to_mnt_ns(node);
- if (mnt_ns_id == n->seq)
- break;
- node = node->rb_left;
- } else {
- node = node->rb_right;
- }
- }
- return ret;
+ call_rcu(&ns->ns.ns_rcu, mnt_ns_release_rcu);
}
/*
@@ -195,18 +165,30 @@ static struct mnt_namespace *mnt_ns_find_id_at(u64 mnt_ns_id)
* namespace the @namespace_sem must first be acquired. If the namespace has
* already shut down before acquiring @namespace_sem, {list,stat}mount() will
* see that the mount rbtree of the namespace is empty.
+ *
+ * Note the lookup is lockless protected by a sequence counter. We only
+ * need to guard against false negatives as false positives aren't
+ * possible. So if we didn't find a mount namespace and the sequence
+ * counter has changed we need to retry. If the sequence counter is
+ * still the same we know the search actually failed.
*/
static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id)
{
- struct mnt_namespace *ns;
+ struct mnt_namespace *mnt_ns;
+ struct ns_common *ns;
- guard(read_lock)(&mnt_ns_tree_lock);
- ns = mnt_ns_find_id_at(mnt_ns_id);
- if (!ns || ns->seq != mnt_ns_id)
- return NULL;
+ guard(rcu)();
+ ns = ns_tree_lookup_rcu(mnt_ns_id, CLONE_NEWNS);
+ if (!ns)
+ return NULL;
- refcount_inc(&ns->passive);
- return ns;
+ /*
+ * The last reference count is put with RCU delay so we can
+ * unconditonally acquire a reference here.
+ */
+ mnt_ns = container_of(ns, struct mnt_namespace, ns);
+ refcount_inc(&mnt_ns->passive);
+ return mnt_ns;
}
static inline void lock_mount_hash(void)
@@ -236,18 +218,19 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
static int mnt_alloc_id(struct mount *mnt)
{
- int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
+ int res;
- if (res < 0)
- return res;
- mnt->mnt_id = res;
- mnt->mnt_id_unique = atomic64_inc_return(&mnt_id_ctr);
- return 0;
+ xa_lock(&mnt_id_xa);
+ res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, XA_LIMIT(1, INT_MAX), GFP_KERNEL);
+ if (!res)
+ mnt->mnt_id_unique = ++mnt_id_ctr;
+ xa_unlock(&mnt_id_xa);
+ return res;
}
static void mnt_free_id(struct mount *mnt)
{
- ida_free(&mnt_id_ida, mnt->mnt_id);
+ xa_erase(&mnt_id_xa, mnt->mnt_id);
}
/*
@@ -315,12 +298,13 @@ static struct mount *alloc_vfsmnt(const char *name)
if (err)
goto out_free_cache;
- if (name) {
+ if (name)
mnt->mnt_devname = kstrdup_const(name,
GFP_KERNEL_ACCOUNT);
- if (!mnt->mnt_devname)
- goto out_free_id;
- }
+ else
+ mnt->mnt_devname = "none";
+ if (!mnt->mnt_devname)
+ goto out_free_id;
#ifdef CONFIG_SMP
mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
@@ -339,11 +323,11 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_list);
INIT_LIST_HEAD(&mnt->mnt_expire);
INIT_LIST_HEAD(&mnt->mnt_share);
- INIT_LIST_HEAD(&mnt->mnt_slave_list);
- INIT_LIST_HEAD(&mnt->mnt_slave);
+ INIT_HLIST_HEAD(&mnt->mnt_slave_list);
+ INIT_HLIST_NODE(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
- INIT_LIST_HEAD(&mnt->mnt_umounting);
INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
+ RB_CLEAR_NODE(&mnt->mnt_node);
mnt->mnt.mnt_idmap = &nop_mnt_idmap;
}
return mnt;
@@ -378,7 +362,7 @@ out_free_cache:
* mnt_want/drop_write() will _keep_ the filesystem
* r/w.
*/
-bool __mnt_is_readonly(struct vfsmount *mnt)
+bool __mnt_is_readonly(const struct vfsmount *mnt)
{
return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
}
@@ -418,7 +402,7 @@ static unsigned int mnt_get_writers(struct mount *mnt)
#endif
}
-static int mnt_is_readonly(struct vfsmount *mnt)
+static int mnt_is_readonly(const struct vfsmount *mnt)
{
if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
return 1;
@@ -459,31 +443,31 @@ int mnt_get_write_access(struct vfsmount *m)
mnt_inc_writers(mnt);
/*
* The store to mnt_inc_writers must be visible before we pass
- * MNT_WRITE_HOLD loop below, so that the slowpath can see our
- * incremented count after it has set MNT_WRITE_HOLD.
+ * WRITE_HOLD loop below, so that the slowpath can see our
+ * incremented count after it has set WRITE_HOLD.
*/
smp_mb();
might_lock(&mount_lock.lock);
- while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
+ while (__test_write_hold(READ_ONCE(mnt->mnt_pprev_for_sb))) {
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
cpu_relax();
} else {
/*
* This prevents priority inversion, if the task
- * setting MNT_WRITE_HOLD got preempted on a remote
+ * setting WRITE_HOLD got preempted on a remote
* CPU, and it prevents life lock if the task setting
- * MNT_WRITE_HOLD has a lower priority and is bound to
+ * WRITE_HOLD has a lower priority and is bound to
* the same CPU as the task that is spinning here.
*/
preempt_enable();
- lock_mount_hash();
- unlock_mount_hash();
+ read_seqlock_excl(&mount_lock);
+ read_sequnlock_excl(&mount_lock);
preempt_disable();
}
}
/*
* The barrier pairs with the barrier sb_start_ro_state_change() making
- * sure that if we see MNT_WRITE_HOLD cleared, we will also see
+ * sure that if we see WRITE_HOLD cleared, we will also see
* s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
* mnt_is_readonly() and bail in case we are racing with remount
* read-only.
@@ -621,16 +605,16 @@ EXPORT_SYMBOL(mnt_drop_write_file);
* a call to mnt_unhold_writers() in order to stop preventing write access to
* @mnt.
*
- * Context: This function expects lock_mount_hash() to be held serializing
- * setting MNT_WRITE_HOLD.
+ * Context: This function expects to be in mount_locked_reader scope serializing
+ * setting WRITE_HOLD.
* Return: On success 0 is returned.
* On error, -EBUSY is returned.
*/
static inline int mnt_hold_writers(struct mount *mnt)
{
- mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
+ set_write_hold(mnt);
/*
- * After storing MNT_WRITE_HOLD, we'll read the counters. This store
+ * After storing WRITE_HOLD, we'll read the counters. This store
* should be visible before we do.
*/
smp_mb();
@@ -646,9 +630,9 @@ static inline int mnt_hold_writers(struct mount *mnt)
* sum up each counter, if we read a counter before it is incremented,
* but then read another CPU's count which it has been subsequently
* decremented from -- we would see more decrements than we should.
- * MNT_WRITE_HOLD protects against this scenario, because
+ * WRITE_HOLD protects against this scenario, because
* mnt_want_write first increments count, then smp_mb, then spins on
- * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
+ * WRITE_HOLD, so it can't be decremented by another CPU while
* we're counting up here.
*/
if (mnt_get_writers(mnt) > 0)
@@ -664,19 +648,42 @@ static inline int mnt_hold_writers(struct mount *mnt)
* Stop preventing write access to @mnt allowing callers to gain write access
* to @mnt again.
*
- * This function can only be called after a successful call to
- * mnt_hold_writers().
+ * This function can only be called after a call to mnt_hold_writers().
*
- * Context: This function expects lock_mount_hash() to be held.
+ * Context: This function expects to be in the same mount_locked_reader scope
+ * as the matching mnt_hold_writers().
*/
static inline void mnt_unhold_writers(struct mount *mnt)
{
+ if (!test_write_hold(mnt))
+ return;
/*
- * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
+ * MNT_READONLY must become visible before ~WRITE_HOLD, so writers
* that become unheld will see MNT_READONLY.
*/
smp_wmb();
- mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
+ clear_write_hold(mnt);
+}
+
+static inline void mnt_del_instance(struct mount *m)
+{
+ struct mount **p = m->mnt_pprev_for_sb;
+ struct mount *next = m->mnt_next_for_sb;
+
+ if (next)
+ next->mnt_pprev_for_sb = p;
+ *p = next;
+}
+
+static inline void mnt_add_instance(struct mount *m, struct super_block *s)
+{
+ struct mount *first = s->s_mounts;
+
+ if (first)
+ first->mnt_pprev_for_sb = &m->mnt_next_for_sb;
+ m->mnt_next_for_sb = first;
+ m->mnt_pprev_for_sb = &s->s_mounts;
+ s->s_mounts = m;
}
static int mnt_make_readonly(struct mount *mnt)
@@ -692,17 +699,17 @@ static int mnt_make_readonly(struct mount *mnt)
int sb_prepare_remount_readonly(struct super_block *sb)
{
- struct mount *mnt;
int err = 0;
- /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
+ /* Racy optimization. Recheck the counter under WRITE_HOLD */
if (atomic_long_read(&sb->s_remove_count))
return -EBUSY;
- lock_mount_hash();
- list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
- if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
- err = mnt_hold_writers(mnt);
+ guard(mount_locked_reader)();
+
+ for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
+ if (!(m->mnt.mnt_flags & MNT_READONLY)) {
+ err = mnt_hold_writers(m);
if (err)
break;
}
@@ -712,11 +719,10 @@ int sb_prepare_remount_readonly(struct super_block *sb)
if (!err)
sb_start_ro_state_change(sb);
- list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
- if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
- mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
+ for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
+ if (test_write_hold(m))
+ clear_write_hold(m);
}
- unlock_mount_hash();
return err;
}
@@ -746,15 +752,11 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
- smp_mb(); // see mntput_no_expire()
+ smp_mb(); // see mntput_no_expire() and do_umount()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
- if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
- mnt_add_count(mnt, -1);
- return 1;
- }
lock_mount_hash();
- if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+ if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) {
mnt_add_count(mnt, -1);
unlock_mount_hash();
return 1;
@@ -779,24 +781,16 @@ static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
}
/**
- * __lookup_mnt - find first child mount
+ * __lookup_mnt - mount hash lookup
* @mnt: parent mount
- * @dentry: mountpoint
- *
- * If @mnt has a child mount @c mounted @dentry find and return it.
+ * @dentry: dentry of mountpoint
*
- * Note that the child mount @c need not be unique. There are cases
- * where shadow mounts are created. For example, during mount
- * propagation when a source mount @mnt whose root got overmounted by a
- * mount @o after path lookup but before @namespace_sem could be
- * acquired gets copied and propagated. So @mnt gets copied including
- * @o. When @mnt is propagated to a destination mount @d that already
- * has another mount @n mounted at the same mountpoint then the source
- * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on
- * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt
- * on @dentry.
+ * If @mnt has a child mount @c mounted on @dentry find and return it.
+ * Caller must either hold the spinlock component of @mount_lock or
+ * hold rcu_read_lock(), sample the seqcount component before the call
+ * and recheck it afterwards.
*
- * Return: The first child of @mnt mounted @dentry or NULL.
+ * Return: The child of @mnt mounted on @dentry or %NULL.
*/
struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
{
@@ -809,21 +803,12 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
return NULL;
}
-/*
- * lookup_mnt - Return the first child mount mounted at path
- *
- * "First" means first mounted chronologically. If you create the
- * following mounts:
- *
- * mount /dev/sda1 /mnt
- * mount /dev/sda2 /mnt
- * mount /dev/sda3 /mnt
- *
- * Then lookup_mnt() on the base /mnt dentry in the root mount will
- * return successively the root dentry and vfsmount of /dev/sda1, then
- * /dev/sda2, then /dev/sda3, then NULL.
+/**
+ * lookup_mnt - Return the child mount mounted at given location
+ * @path: location in the namespace
*
- * lookup_mnt takes a reference to the found vfsmount.
+ * Acquires and returns a new reference to mount at given location
+ * or %NULL if nothing is mounted there.
*/
struct vfsmount *lookup_mnt(const struct path *path)
{
@@ -856,59 +841,63 @@ struct vfsmount *lookup_mnt(const struct path *path)
* namespace not just a mount that happens to have some specified
* parent mount.
*/
-bool __is_local_mountpoint(struct dentry *dentry)
+bool __is_local_mountpoint(const struct dentry *dentry)
{
struct mnt_namespace *ns = current->nsproxy->mnt_ns;
struct mount *mnt, *n;
- bool is_covered = false;
- down_read(&namespace_sem);
- rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
- is_covered = (mnt->mnt_mountpoint == dentry);
- if (is_covered)
- break;
- }
- up_read(&namespace_sem);
+ guard(namespace_shared)();
+
+ rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node)
+ if (mnt->mnt_mountpoint == dentry)
+ return true;
- return is_covered;
+ return false;
}
-static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
+struct pinned_mountpoint {
+ struct hlist_node node;
+ struct mountpoint *mp;
+ struct mount *parent;
+};
+
+static bool lookup_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m)
{
struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
- mp->m_count++;
- return mp;
+ hlist_add_head(&m->node, &mp->m_list);
+ m->mp = mp;
+ return true;
}
}
- return NULL;
+ return false;
}
-static struct mountpoint *get_mountpoint(struct dentry *dentry)
+static int get_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m)
{
- struct mountpoint *mp, *new = NULL;
+ struct mountpoint *mp __free(kfree) = NULL;
+ bool found;
int ret;
if (d_mountpoint(dentry)) {
/* might be worth a WARN_ON() */
if (d_unlinked(dentry))
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
mountpoint:
read_seqlock_excl(&mount_lock);
- mp = lookup_mountpoint(dentry);
+ found = lookup_mountpoint(dentry, m);
read_sequnlock_excl(&mount_lock);
- if (mp)
- goto done;
+ if (found)
+ return 0;
}
- if (!new)
- new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
- if (!new)
- return ERR_PTR(-ENOMEM);
-
+ if (!mp)
+ mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
/* Exactly one processes may set d_mounted */
ret = d_set_mounted(dentry);
@@ -918,34 +907,28 @@ mountpoint:
goto mountpoint;
/* The dentry is not available as a mountpoint? */
- mp = ERR_PTR(ret);
if (ret)
- goto done;
+ return ret;
/* Add the new mountpoint to the hash table */
read_seqlock_excl(&mount_lock);
- new->m_dentry = dget(dentry);
- new->m_count = 1;
- hlist_add_head(&new->m_hash, mp_hash(dentry));
- INIT_HLIST_HEAD(&new->m_list);
+ mp->m_dentry = dget(dentry);
+ hlist_add_head(&mp->m_hash, mp_hash(dentry));
+ INIT_HLIST_HEAD(&mp->m_list);
+ hlist_add_head(&m->node, &mp->m_list);
+ m->mp = no_free_ptr(mp);
read_sequnlock_excl(&mount_lock);
-
- mp = new;
- new = NULL;
-done:
- kfree(new);
- return mp;
+ return 0;
}
/*
* vfsmount lock must be held. Additionally, the caller is responsible
* for serializing calls for given disposal list.
*/
-static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
+static void maybe_free_mountpoint(struct mountpoint *mp, struct list_head *list)
{
- if (!--mp->m_count) {
+ if (hlist_empty(&mp->m_list)) {
struct dentry *dentry = mp->m_dentry;
- BUG_ON(!hlist_empty(&mp->m_list));
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
@@ -955,17 +938,33 @@ static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
}
}
-/* called with namespace_lock and vfsmount lock */
-static void put_mountpoint(struct mountpoint *mp)
+/*
+ * locks: mount_lock [read_seqlock_excl], namespace_sem [excl]
+ */
+static void unpin_mountpoint(struct pinned_mountpoint *m)
{
- __put_mountpoint(mp, &ex_mountpoints);
+ if (m->mp) {
+ hlist_del(&m->node);
+ maybe_free_mountpoint(m->mp, &ex_mountpoints);
+ }
}
-static inline int check_mnt(struct mount *mnt)
+static inline int check_mnt(const struct mount *mnt)
{
return mnt->mnt_ns == current->nsproxy->mnt_ns;
}
+static inline bool check_anonymous_mnt(struct mount *mnt)
+{
+ u64 seq;
+
+ if (!is_anon_ns(mnt->mnt_ns))
+ return false;
+
+ seq = mnt->mnt_ns->seq_origin;
+ return !seq || (seq == current->nsproxy->mnt_ns->ns.ns_id);
+}
+
/*
* vfsmount lock must be held for write
*/
@@ -989,11 +988,14 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
}
/*
- * vfsmount lock must be held for write
+ * locks: mount_lock[write_seqlock]
*/
-static struct mountpoint *unhash_mnt(struct mount *mnt)
+static void __umount_mnt(struct mount *mnt, struct list_head *shrink_list)
{
struct mountpoint *mp;
+ struct mount *parent = mnt->mnt_parent;
+ if (unlikely(parent->overmount == mnt))
+ parent->overmount = NULL;
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
@@ -1001,15 +1003,15 @@ static struct mountpoint *unhash_mnt(struct mount *mnt)
hlist_del_init(&mnt->mnt_mp_list);
mp = mnt->mnt_mp;
mnt->mnt_mp = NULL;
- return mp;
+ maybe_free_mountpoint(mp, shrink_list);
}
/*
- * vfsmount lock must be held for write
+ * locks: mount_lock[write_seqlock], namespace_sem[excl] (for ex_mountpoints)
*/
static void umount_mnt(struct mount *mnt)
{
- put_mountpoint(unhash_mnt(mnt));
+ __umount_mnt(mnt, &ex_mountpoints);
}
/*
@@ -1019,43 +1021,17 @@ void mnt_set_mountpoint(struct mount *mnt,
struct mountpoint *mp,
struct mount *child_mnt)
{
- mp->m_count++;
- mnt_add_count(mnt, 1); /* essentially, that's mntget */
child_mnt->mnt_mountpoint = mp->m_dentry;
child_mnt->mnt_parent = mnt;
child_mnt->mnt_mp = mp;
hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
}
-/**
- * mnt_set_mountpoint_beneath - mount a mount beneath another one
- *
- * @new_parent: the source mount
- * @top_mnt: the mount beneath which @new_parent is mounted
- * @new_mp: the new mountpoint of @top_mnt on @new_parent
- *
- * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and
- * parent @top_mnt->mnt_parent and mount it on top of @new_parent at
- * @new_mp. And mount @new_parent on the old parent and old
- * mountpoint of @top_mnt.
- *
- * Context: This function expects namespace_lock() and lock_mount_hash()
- * to have been acquired in that order.
- */
-static void mnt_set_mountpoint_beneath(struct mount *new_parent,
- struct mount *top_mnt,
- struct mountpoint *new_mp)
-{
- struct mount *old_top_parent = top_mnt->mnt_parent;
- struct mountpoint *old_top_mp = top_mnt->mnt_mp;
-
- mnt_set_mountpoint(old_top_parent, old_top_mp, new_parent);
- mnt_change_mountpoint(new_parent, new_mp, top_mnt);
-}
-
-
-static void __attach_mnt(struct mount *mnt, struct mount *parent)
+static void make_visible(struct mount *mnt)
{
+ struct mount *parent = mnt->mnt_parent;
+ if (unlikely(mnt->mnt_mountpoint == parent->mnt.mnt_root))
+ parent->overmount = mnt;
hlist_add_head_rcu(&mnt->mnt_hash,
m_hash(&parent->mnt, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
@@ -1067,51 +1043,34 @@ static void __attach_mnt(struct mount *mnt, struct mount *parent)
* @parent: the parent
* @mnt: the new mount
* @mp: the new mountpoint
- * @beneath: whether to mount @mnt beneath or on top of @parent
*
- * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt
+ * Mount @mnt at @mp on @parent. Then attach @mnt
* to @parent's child mount list and to @mount_hashtable.
*
- * If @beneath is true, remove @mnt from its current parent and
- * mountpoint and mount it on @mp on @parent, and mount @parent on the
- * old parent and old mountpoint of @mnt. Finally, attach @parent to
- * @mnt_hashtable and @parent->mnt_parent->mnt_mounts.
- *
- * Note, when __attach_mnt() is called @mnt->mnt_parent already points
+ * Note, when make_visible() is called @mnt->mnt_parent already points
* to the correct parent.
*
* Context: This function expects namespace_lock() and lock_mount_hash()
* to have been acquired in that order.
*/
static void attach_mnt(struct mount *mnt, struct mount *parent,
- struct mountpoint *mp, bool beneath)
+ struct mountpoint *mp)
{
- if (beneath)
- mnt_set_mountpoint_beneath(mnt, parent, mp);
- else
- mnt_set_mountpoint(parent, mp, mnt);
- /*
- * Note, @mnt->mnt_parent has to be used. If @mnt was mounted
- * beneath @parent then @mnt will need to be attached to
- * @parent's old parent, not @parent. IOW, @mnt->mnt_parent
- * isn't the same mount as @parent.
- */
- __attach_mnt(mnt, mnt->mnt_parent);
+ mnt_set_mountpoint(parent, mp, mnt);
+ make_visible(mnt);
}
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
{
struct mountpoint *old_mp = mnt->mnt_mp;
- struct mount *old_parent = mnt->mnt_parent;
list_del_init(&mnt->mnt_child);
hlist_del_init(&mnt->mnt_mp_list);
hlist_del_init_rcu(&mnt->mnt_hash);
- attach_mnt(mnt, parent, mp, false);
+ attach_mnt(mnt, parent, mp);
- put_mountpoint(old_mp);
- mnt_add_count(old_parent, -1);
+ maybe_free_mountpoint(old_mp, &ex_mountpoints);
}
static inline struct mount *node_to_mount(struct rb_node *node)
@@ -1123,45 +1082,29 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
{
struct rb_node **link = &ns->mounts.rb_node;
struct rb_node *parent = NULL;
+ bool mnt_first_node = true, mnt_last_node = true;
- WARN_ON(mnt->mnt.mnt_flags & MNT_ONRB);
+ WARN_ON(mnt_ns_attached(mnt));
mnt->mnt_ns = ns;
while (*link) {
parent = *link;
- if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique)
+ if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) {
link = &parent->rb_left;
- else
+ mnt_last_node = false;
+ } else {
link = &parent->rb_right;
+ mnt_first_node = false;
+ }
}
+
+ if (mnt_last_node)
+ ns->mnt_last_node = &mnt->mnt_node;
+ if (mnt_first_node)
+ ns->mnt_first_node = &mnt->mnt_node;
rb_link_node(&mnt->mnt_node, parent, link);
rb_insert_color(&mnt->mnt_node, &ns->mounts);
- mnt->mnt.mnt_flags |= MNT_ONRB;
-}
-/*
- * vfsmount lock must be held for write
- */
-static void commit_tree(struct mount *mnt)
-{
- struct mount *parent = mnt->mnt_parent;
- struct mount *m;
- LIST_HEAD(head);
- struct mnt_namespace *n = parent->mnt_ns;
-
- BUG_ON(parent == mnt);
-
- list_add_tail(&head, &mnt->mnt_list);
- while (!list_empty(&head)) {
- m = list_first_entry(&head, typeof(*m), mnt_list);
- list_del(&m->mnt_list);
-
- mnt_add_to_ns(n, m);
- }
- n->nr_mounts += n->pending_mounts;
- n->pending_mounts = 0;
-
- __attach_mnt(mnt, parent);
- touch_mnt_namespace(n);
+ mnt_notify_add(mnt);
}
static struct mount *next_mnt(struct mount *p, struct mount *root)
@@ -1190,6 +1133,38 @@ static struct mount *skip_mnt_tree(struct mount *p)
return p;
}
+/*
+ * vfsmount lock must be held for write
+ */
+static void commit_tree(struct mount *mnt)
+{
+ struct mnt_namespace *n = mnt->mnt_parent->mnt_ns;
+
+ if (!mnt_ns_attached(mnt)) {
+ for (struct mount *m = mnt; m; m = next_mnt(m, mnt))
+ mnt_add_to_ns(n, m);
+ n->nr_mounts += n->pending_mounts;
+ n->pending_mounts = 0;
+ }
+
+ make_visible(mnt);
+ touch_mnt_namespace(n);
+}
+
+static void setup_mnt(struct mount *m, struct dentry *root)
+{
+ struct super_block *s = root->d_sb;
+
+ atomic_inc(&s->s_active);
+ m->mnt.mnt_sb = s;
+ m->mnt.mnt_root = dget(root);
+ m->mnt_mountpoint = m->mnt.mnt_root;
+ m->mnt_parent = m;
+
+ guard(mount_locked_reader)();
+ mnt_add_instance(m, s);
+}
+
/**
* vfs_create_mount - Create a mount for a configured superblock
* @fc: The configuration context with the superblock attached
@@ -1206,22 +1181,15 @@ struct vfsmount *vfs_create_mount(struct fs_context *fc)
if (!fc->root)
return ERR_PTR(-EINVAL);
- mnt = alloc_vfsmnt(fc->source ?: "none");
+ mnt = alloc_vfsmnt(fc->source);
if (!mnt)
return ERR_PTR(-ENOMEM);
if (fc->sb_flags & SB_KERNMOUNT)
mnt->mnt.mnt_flags = MNT_INTERNAL;
- atomic_inc(&fc->root->d_sb->s_active);
- mnt->mnt.mnt_sb = fc->root->d_sb;
- mnt->mnt.mnt_root = dget(fc->root);
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
- mnt->mnt_parent = mnt;
+ setup_mnt(mnt, fc->root);
- lock_mount_hash();
- list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
- unlock_mount_hash();
return &mnt->mnt;
}
EXPORT_SYMBOL(vfs_create_mount);
@@ -1237,6 +1205,15 @@ struct vfsmount *fc_mount(struct fs_context *fc)
}
EXPORT_SYMBOL(fc_mount);
+struct vfsmount *fc_mount_longterm(struct fs_context *fc)
+{
+ struct vfsmount *mnt = fc_mount(fc);
+ if (!IS_ERR(mnt))
+ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
+ return mnt;
+}
+EXPORT_SYMBOL(fc_mount_longterm);
+
struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
void *data)
@@ -1253,8 +1230,7 @@ struct vfsmount *vfs_kern_mount(struct file_system_type *type,
return ERR_CAST(fc);
if (name)
- ret = vfs_parse_fs_string(fc, "source",
- name, strlen(name));
+ ret = vfs_parse_fs_string(fc, "source", name);
if (!ret)
ret = parse_monolithic_mount_data(fc, data);
if (!ret)
@@ -1267,25 +1243,9 @@ struct vfsmount *vfs_kern_mount(struct file_system_type *type,
}
EXPORT_SYMBOL_GPL(vfs_kern_mount);
-struct vfsmount *
-vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
- const char *name, void *data)
-{
- /* Until it is worked out how to pass the user namespace
- * through from the parent mount to the submount don't support
- * unprivileged mounts with submounts.
- */
- if (mountpoint->d_sb->s_user_ns != &init_user_ns)
- return ERR_PTR(-EPERM);
-
- return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
-}
-EXPORT_SYMBOL_GPL(vfs_submount);
-
static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
- struct super_block *sb = old->mnt.mnt_sb;
struct mount *mnt;
int err;
@@ -1293,7 +1253,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
if (!mnt)
return ERR_PTR(-ENOMEM);
- if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
+ mnt->mnt.mnt_flags = READ_ONCE(old->mnt.mnt_flags) &
+ ~MNT_INTERNAL_FLAGS;
+
+ if (flag & (CL_SLAVE | CL_PRIVATE))
mnt->mnt_group_id = 0; /* not a peer of original */
else
mnt->mnt_group_id = old->mnt_group_id;
@@ -1304,44 +1267,26 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
goto out_free;
}
- mnt->mnt.mnt_flags = old->mnt.mnt_flags;
- mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL|MNT_ONRB);
+ if (mnt->mnt_group_id)
+ set_mnt_shared(mnt);
- atomic_inc(&sb->s_active);
mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
- mnt->mnt.mnt_sb = sb;
- mnt->mnt.mnt_root = dget(root);
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
- mnt->mnt_parent = mnt;
- lock_mount_hash();
- list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
- unlock_mount_hash();
+ setup_mnt(mnt, root);
- if ((flag & CL_SLAVE) ||
- ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
- list_add(&mnt->mnt_slave, &old->mnt_slave_list);
+ if (flag & CL_PRIVATE) // we are done with it
+ return mnt;
+
+ if (peers(mnt, old))
+ list_add(&mnt->mnt_share, &old->mnt_share);
+
+ if ((flag & CL_SLAVE) && old->mnt_group_id) {
+ hlist_add_head(&mnt->mnt_slave, &old->mnt_slave_list);
mnt->mnt_master = old;
- CLEAR_MNT_SHARED(mnt);
- } else if (!(flag & CL_PRIVATE)) {
- if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
- list_add(&mnt->mnt_share, &old->mnt_share);
- if (IS_MNT_SLAVE(old))
- list_add(&mnt->mnt_slave, &old->mnt_slave);
+ } else if (IS_MNT_SLAVE(old)) {
+ hlist_add_behind(&mnt->mnt_slave, &old->mnt_slave);
mnt->mnt_master = old->mnt_master;
- } else {
- CLEAR_MNT_SHARED(mnt);
}
- if (flag & CL_MAKE_SHARED)
- set_mnt_shared(mnt);
-
- /* stick the duplicate mount on the same expiry list
- * as the original if that was on one */
- if (flag & CL_EXPIRE) {
- if (!list_empty(&old->mnt_expire))
- list_add(&mnt->mnt_expire, &old->mnt_expire);
- }
-
return mnt;
out_free:
@@ -1391,26 +1336,12 @@ static void delayed_mntput(struct work_struct *unused)
}
static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
-static void mntput_no_expire(struct mount *mnt)
+static void noinline mntput_no_expire_slowpath(struct mount *mnt)
{
LIST_HEAD(list);
int count;
- rcu_read_lock();
- if (likely(READ_ONCE(mnt->mnt_ns))) {
- /*
- * Since we don't do lock_mount_hash() here,
- * ->mnt_ns can change under us. However, if it's
- * non-NULL, then there's a reference that won't
- * be dropped until after an RCU delay done after
- * turning ->mnt_ns NULL. So if we observe it
- * non-NULL under rcu_read_lock(), the reference
- * we are dropping is not the final one.
- */
- mnt_add_count(mnt, -1);
- rcu_read_unlock();
- return;
- }
+ VFS_BUG_ON(mnt->mnt_ns);
lock_mount_hash();
/*
* make sure that if __legitimize_mnt() has not seen us grab
@@ -1433,12 +1364,14 @@ static void mntput_no_expire(struct mount *mnt)
mnt->mnt.mnt_flags |= MNT_DOOMED;
rcu_read_unlock();
- list_del(&mnt->mnt_instance);
+ mnt_del_instance(mnt);
+ if (unlikely(!list_empty(&mnt->mnt_expire)))
+ list_del(&mnt->mnt_expire);
if (unlikely(!list_empty(&mnt->mnt_mounts))) {
struct mount *p, *tmp;
list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
- __put_mountpoint(unhash_mnt(p), &list);
+ __umount_mnt(p, &list);
hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
}
}
@@ -1459,6 +1392,26 @@ static void mntput_no_expire(struct mount *mnt)
cleanup_mnt(mnt);
}
+static void mntput_no_expire(struct mount *mnt)
+{
+ rcu_read_lock();
+ if (likely(READ_ONCE(mnt->mnt_ns))) {
+ /*
+ * Since we don't do lock_mount_hash() here,
+ * ->mnt_ns can change under us. However, if it's
+ * non-NULL, then there's a reference that won't
+ * be dropped until after an RCU delay done after
+ * turning ->mnt_ns NULL. So if we observe it
+ * non-NULL under rcu_read_lock(), the reference
+ * we are dropping is not the final one.
+ */
+ mnt_add_count(mnt, -1);
+ rcu_read_unlock();
+ return;
+ }
+ mntput_no_expire_slowpath(mnt);
+}
+
void mntput(struct vfsmount *mnt)
{
if (mnt) {
@@ -1635,23 +1588,19 @@ const struct seq_operations mounts_op = {
int may_umount_tree(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
- int actual_refs = 0;
- int minimum_refs = 0;
- struct mount *p;
- BUG_ON(!m);
+ bool busy = false;
/* write lock needed for mnt_get_count */
lock_mount_hash();
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- actual_refs += mnt_get_count(p);
- minimum_refs += 2;
+ for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) {
+ if (mnt_get_count(p) > (p == mnt ? 2 : 1)) {
+ busy = true;
+ break;
+ }
}
unlock_mount_hash();
- if (actual_refs > minimum_refs)
- return 0;
-
- return 1;
+ return !busy;
}
EXPORT_SYMBOL(may_umount_tree);
@@ -1683,17 +1632,80 @@ int may_umount(struct vfsmount *mnt)
EXPORT_SYMBOL(may_umount);
+#ifdef CONFIG_FSNOTIFY
+static void mnt_notify(struct mount *p)
+{
+ if (!p->prev_ns && p->mnt_ns) {
+ fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
+ } else if (p->prev_ns && !p->mnt_ns) {
+ fsnotify_mnt_detach(p->prev_ns, &p->mnt);
+ } else if (p->prev_ns == p->mnt_ns) {
+ fsnotify_mnt_move(p->mnt_ns, &p->mnt);
+ } else {
+ fsnotify_mnt_detach(p->prev_ns, &p->mnt);
+ fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
+ }
+ p->prev_ns = p->mnt_ns;
+}
+
+static void notify_mnt_list(void)
+{
+ struct mount *m, *tmp;
+ /*
+ * Notify about mounts that were added/reparented/detached/remain
+ * connected after unmount.
+ */
+ list_for_each_entry_safe(m, tmp, &notify_list, to_notify) {
+ mnt_notify(m);
+ list_del_init(&m->to_notify);
+ }
+}
+
+static bool need_notify_mnt_list(void)
+{
+ return !list_empty(&notify_list);
+}
+#else
+static void notify_mnt_list(void)
+{
+}
+
+static bool need_notify_mnt_list(void)
+{
+ return false;
+}
+#endif
+
+static void free_mnt_ns(struct mnt_namespace *);
static void namespace_unlock(void)
{
struct hlist_head head;
struct hlist_node *p;
struct mount *m;
+ struct mnt_namespace *ns = emptied_ns;
LIST_HEAD(list);
hlist_move_list(&unmounted, &head);
list_splice_init(&ex_mountpoints, &list);
+ emptied_ns = NULL;
- up_write(&namespace_sem);
+ if (need_notify_mnt_list()) {
+ /*
+ * No point blocking out concurrent readers while notifications
+ * are sent. This will also allow statmount()/listmount() to run
+ * concurrently.
+ */
+ downgrade_write(&namespace_sem);
+ notify_mnt_list();
+ up_read(&namespace_sem);
+ } else {
+ up_write(&namespace_sem);
+ }
+ if (unlikely(ns)) {
+ /* Make sure we notice when we leak mounts. */
+ VFS_WARN_ON_ONCE(!mnt_ns_empty(ns));
+ free_mnt_ns(ns);
+ }
shrink_dentry_list(&list);
@@ -1763,10 +1775,9 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
/* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT;
- if (p->mnt.mnt_flags & MNT_ONRB)
- move_from_ns(p, &tmp_list);
- else
- list_move(&p->mnt_list, &tmp_list);
+ if (mnt_ns_attached(p))
+ move_from_ns(p);
+ list_add_tail(&p->mnt_list, &tmp_list);
}
/* Hide the mounts from mnt_mounts */
@@ -1778,6 +1789,8 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);
+ bulk_make_private(&tmp_list);
+
while (!list_empty(&tmp_list)) {
struct mnt_namespace *ns;
bool disconnect;
@@ -1795,7 +1808,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
disconnect = disconnect_mount(p, how);
if (mnt_has_parent(p)) {
- mnt_add_count(p->mnt_parent, -1);
if (!disconnect) {
/* Don't forget about p */
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
@@ -1803,9 +1815,21 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
umount_mnt(p);
}
}
- change_mnt_propagation(p, MS_PRIVATE);
if (disconnect)
hlist_add_head(&p->mnt_umount, &unmounted);
+
+ /*
+ * At this point p->mnt_ns is NULL, notification will be queued
+ * only if
+ *
+ * - p->prev_ns is non-NULL *and*
+ * - p->prev_ns->n_fsnotify_marks is non-NULL
+ *
+ * This will preclude queuing the mount if this is a cleanup
+ * after a failed copy_tree() or destruction of an anonymous
+ * namespace, etc.
+ */
+ mnt_notify_add(p);
}
}
@@ -1859,7 +1883,7 @@ static int do_umount(struct mount *mnt, int flags)
* all race cases, but it's a slowpath.
*/
lock_mount_hash();
- if (mnt_get_count(mnt) != 2) {
+ if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) {
unlock_mount_hash();
return -EBUSY;
}
@@ -1905,24 +1929,27 @@ static int do_umount(struct mount *mnt, int flags)
namespace_lock();
lock_mount_hash();
- /* Recheck MNT_LOCKED with the locks held */
+ /* Repeat the earlier racy checks, now that we are holding the locks */
retval = -EINVAL;
+ if (!check_mnt(mnt))
+ goto out;
+
if (mnt->mnt.mnt_flags & MNT_LOCKED)
goto out;
+ if (!mnt_has_parent(mnt)) /* not the absolute root */
+ goto out;
+
event++;
if (flags & MNT_DETACH) {
- if (mnt->mnt.mnt_flags & MNT_ONRB ||
- !list_empty(&mnt->mnt_list))
- umount_tree(mnt, UMOUNT_PROPAGATE);
+ umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
+ smp_mb(); // paired with __legitimize_mnt()
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
- if (mnt->mnt.mnt_flags & MNT_ONRB ||
- !list_empty(&mnt->mnt_list))
- umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
retval = 0;
}
}
@@ -1940,32 +1967,29 @@ out:
* detach_mounts allows lazily unmounting those mounts instead of
* leaking them.
*
- * The caller may hold dentry->d_inode->i_mutex.
+ * The caller may hold dentry->d_inode->i_rwsem.
*/
void __detach_mounts(struct dentry *dentry)
{
- struct mountpoint *mp;
+ struct pinned_mountpoint mp = {};
struct mount *mnt;
- namespace_lock();
- lock_mount_hash();
- mp = lookup_mountpoint(dentry);
- if (!mp)
- goto out_unlock;
+ guard(namespace_excl)();
+ guard(mount_writer)();
+
+ if (!lookup_mountpoint(dentry, &mp))
+ return;
event++;
- while (!hlist_empty(&mp->m_list)) {
- mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+ while (mp.node.next) {
+ mnt = hlist_entry(mp.node.next, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
umount_mnt(mnt);
hlist_add_head(&mnt->mnt_umount, &unmounted);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
- put_mountpoint(mp);
-out_unlock:
- unlock_mount_hash();
- namespace_unlock();
+ unpin_mountpoint(&mp);
}
/*
@@ -1988,6 +2012,7 @@ static void warn_mandlock(void)
static int can_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
+ struct super_block *sb = path->dentry->d_sb;
if (!may_mount())
return -EPERM;
@@ -1997,13 +2022,13 @@ static int can_umount(const struct path *path, int flags)
return -EINVAL;
if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
return -EINVAL;
- if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
+ if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
// caller is responsible for flags being sane
-int path_umount(struct path *path, int flags)
+int path_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
int ret;
@@ -2055,9 +2080,15 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static bool is_mnt_ns_file(struct dentry *dentry)
{
+ struct ns_common *ns;
+
/* Is this a proxy for a mount namespace? */
- return dentry->d_op == &ns_dentry_operations &&
- dentry->d_fsdata == &mntns_operations;
+ if (dentry->d_op != &ns_dentry_operations)
+ return false;
+
+ ns = d_inode(dentry)->i_private;
+
+ return ns->ops == &mntns_operations;
}
struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
@@ -2065,49 +2096,58 @@ struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
return &mnt->ns;
}
-struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mntns, bool previous)
+struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous)
{
- guard(read_lock)(&mnt_ns_tree_lock);
- for (;;) {
- struct rb_node *node;
+ struct ns_common *ns;
- if (previous)
- node = rb_prev(&mntns->mnt_ns_tree_node);
- else
- node = rb_next(&mntns->mnt_ns_tree_node);
- if (!node)
- return ERR_PTR(-ENOENT);
+ guard(rcu)();
+
+ for (;;) {
+ ns = ns_tree_adjoined_rcu(mntns, previous);
+ if (IS_ERR(ns))
+ return ERR_CAST(ns);
- mntns = node_to_mnt_ns(node);
- node = &mntns->mnt_ns_tree_node;
+ mntns = to_mnt_ns(ns);
+ /*
+ * The last passive reference count is put with RCU
+ * delay so accessing the mount namespace is not just
+ * safe but all relevant members are still valid.
+ */
if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN))
continue;
/*
- * Holding mnt_ns_tree_lock prevents the mount namespace from
- * being freed but it may well be on it's deathbed. We want an
- * active reference, not just a passive one here as we're
- * persisting the mount namespace.
+ * We need an active reference count as we're persisting
+ * the mount namespace and it might already be on its
+ * deathbed.
*/
- if (!refcount_inc_not_zero(&mntns->ns.count))
+ if (!ns_ref_get(mntns))
continue;
return mntns;
}
}
+struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry)
+{
+ if (!is_mnt_ns_file(dentry))
+ return NULL;
+
+ return to_mnt_ns(get_proc_ns(dentry->d_inode));
+}
+
static bool mnt_ns_loop(struct dentry *dentry)
{
/* Could bind mounting the mount namespace inode cause a
* mount namespace loop?
*/
- struct mnt_namespace *mnt_ns;
- if (!is_mnt_ns_file(dentry))
+ struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry);
+
+ if (!mnt_ns)
return false;
- mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
- return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
+ return current->nsproxy->mnt_ns->ns.ns_id >= mnt_ns->ns.ns_id;
}
struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
@@ -2127,7 +2167,6 @@ struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
return dst_mnt;
src_parent = src_root;
- dst_mnt->mnt_mountpoint = src_root->mnt_mountpoint;
list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) {
if (!is_subdir(src_root_child->mnt_mountpoint, dentry))
@@ -2162,8 +2201,16 @@ struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
if (IS_ERR(dst_mnt))
goto out;
lock_mount_hash();
- list_add_tail(&dst_mnt->mnt_list, &res->mnt_list);
- attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp, false);
+ if (src_mnt->mnt.mnt_flags & MNT_LOCKED)
+ dst_mnt->mnt.mnt_flags |= MNT_LOCKED;
+ if (unlikely(flag & CL_EXPIRE)) {
+ /* stick the duplicate mount on the same expiry
+ * list as the original if that was on one */
+ if (!list_empty(&src_mnt->mnt_expire))
+ list_add(&dst_mnt->mnt_expire,
+ &src_mnt->mnt_expire);
+ }
+ attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp);
unlock_mount_hash();
}
}
@@ -2178,54 +2225,98 @@ out:
return dst_mnt;
}
-/* Caller should check returned pointer for errors */
+static inline bool extend_array(struct path **res, struct path **to_free,
+ unsigned n, unsigned *count, unsigned new_count)
+{
+ struct path *p;
+
+ if (likely(n < *count))
+ return true;
+ p = kmalloc_array(new_count, sizeof(struct path), GFP_KERNEL);
+ if (p && *count)
+ memcpy(p, *res, *count * sizeof(struct path));
+ *count = new_count;
+ kfree(*to_free);
+ *to_free = *res = p;
+ return p;
+}
-struct vfsmount *collect_mounts(const struct path *path)
+const struct path *collect_paths(const struct path *path,
+ struct path *prealloc, unsigned count)
{
- struct mount *tree;
- namespace_lock();
- if (!check_mnt(real_mount(path->mnt)))
- tree = ERR_PTR(-EINVAL);
- else
- tree = copy_tree(real_mount(path->mnt), path->dentry,
- CL_COPY_ALL | CL_PRIVATE);
- namespace_unlock();
- if (IS_ERR(tree))
- return ERR_CAST(tree);
- return &tree->mnt;
+ struct mount *root = real_mount(path->mnt);
+ struct mount *child;
+ struct path *res = prealloc, *to_free = NULL;
+ unsigned n = 0;
+
+ guard(namespace_shared)();
+
+ if (!check_mnt(root))
+ return ERR_PTR(-EINVAL);
+ if (!extend_array(&res, &to_free, 0, &count, 32))
+ return ERR_PTR(-ENOMEM);
+ res[n++] = *path;
+ list_for_each_entry(child, &root->mnt_mounts, mnt_child) {
+ if (!is_subdir(child->mnt_mountpoint, path->dentry))
+ continue;
+ for (struct mount *m = child; m; m = next_mnt(m, child)) {
+ if (!extend_array(&res, &to_free, n, &count, 2 * count))
+ return ERR_PTR(-ENOMEM);
+ res[n].mnt = &m->mnt;
+ res[n].dentry = m->mnt.mnt_root;
+ n++;
+ }
+ }
+ if (!extend_array(&res, &to_free, n, &count, count + 1))
+ return ERR_PTR(-ENOMEM);
+ memset(res + n, 0, (count - n) * sizeof(struct path));
+ for (struct path *p = res; p->mnt; p++)
+ path_get(p);
+ return res;
+}
+
+void drop_collected_paths(const struct path *paths, const struct path *prealloc)
+{
+ for (const struct path *p = paths; p->mnt; p++)
+ path_put(p);
+ if (paths != prealloc)
+ kfree(paths);
}
-static void free_mnt_ns(struct mnt_namespace *);
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
void dissolve_on_fput(struct vfsmount *mnt)
{
- struct mnt_namespace *ns;
- namespace_lock();
- lock_mount_hash();
- ns = real_mount(mnt)->mnt_ns;
- if (ns) {
- if (is_anon_ns(ns))
- umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
- else
- ns = NULL;
+ struct mount *m = real_mount(mnt);
+
+ /*
+ * m used to be the root of anon namespace; if it still is one,
+ * we need to dissolve the mount tree and free that namespace.
+ * Let's try to avoid taking namespace_sem if we can determine
+ * that there's nothing to do without it - rcu_read_lock() is
+ * enough to make anon_ns_root() memory-safe and once m has
+ * left its namespace, it's no longer our concern, since it will
+ * never become a root of anon ns again.
+ */
+
+ scoped_guard(rcu) {
+ if (!anon_ns_root(m))
+ return;
}
- unlock_mount_hash();
- namespace_unlock();
- if (ns)
- free_mnt_ns(ns);
-}
-void drop_collected_mounts(struct vfsmount *mnt)
-{
- namespace_lock();
- lock_mount_hash();
- umount_tree(real_mount(mnt), 0);
- unlock_mount_hash();
- namespace_unlock();
+ scoped_guard(namespace_excl) {
+ if (!anon_ns_root(m))
+ return;
+
+ emptied_ns = m->mnt_ns;
+ lock_mount_hash();
+ umount_tree(m, UMOUNT_CONNECTED);
+ unlock_mount_hash();
+ }
}
-bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+/* locks: namespace_shared && pinned(mnt) || mount_locked_reader */
+static bool __has_locked_children(struct mount *mnt, struct dentry *dentry)
{
struct mount *child;
@@ -2239,6 +2330,28 @@ bool has_locked_children(struct mount *mnt, struct dentry *dentry)
return false;
}
+bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+{
+ guard(mount_locked_reader)();
+ return __has_locked_children(mnt, dentry);
+}
+
+/*
+ * Check that there aren't references to earlier/same mount namespaces in the
+ * specified subtree. Such references can act as pins for mount namespaces
+ * that aren't checked by the mount-cycle checking code, thereby allowing
+ * cycles to be made.
+ *
+ * locks: mount_locked_reader || namespace_shared && pinned(subtree)
+ */
+static bool check_for_nsfs_mounts(struct mount *subtree)
+{
+ for (struct mount *p = subtree; p; p = next_mnt(p, subtree))
+ if (mnt_ns_loop(p->mnt.mnt_root))
+ return false;
+ return true;
+}
+
/**
* clone_private_mount - create a private clone of a path
* @path: path to clone
@@ -2247,6 +2360,8 @@ bool has_locked_children(struct mount *mnt, struct dentry *dentry)
* will not be attached anywhere in the namespace and will be private (i.e.
* changes to the originating mount won't be propagated into this).
*
+ * This assumes caller has called or done the equivalent of may_mount().
+ *
* Release with mntput().
*/
struct vfsmount *clone_private_mount(const struct path *path)
@@ -2254,48 +2369,42 @@ struct vfsmount *clone_private_mount(const struct path *path)
struct mount *old_mnt = real_mount(path->mnt);
struct mount *new_mnt;
- down_read(&namespace_sem);
+ guard(namespace_shared)();
+
if (IS_MNT_UNBINDABLE(old_mnt))
- goto invalid;
+ return ERR_PTR(-EINVAL);
- if (!check_mnt(old_mnt))
- goto invalid;
+ /*
+ * Make sure the source mount is acceptable.
+ * Anything mounted in our mount namespace is allowed.
+ * Otherwise, it must be the root of an anonymous mount
+ * namespace, and we need to make sure no namespace
+ * loops get created.
+ */
+ if (!check_mnt(old_mnt)) {
+ if (!anon_ns_root(old_mnt))
+ return ERR_PTR(-EINVAL);
- if (has_locked_children(old_mnt, path->dentry))
- goto invalid;
+ if (!check_for_nsfs_mounts(old_mnt))
+ return ERR_PTR(-EINVAL);
+ }
- new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
- up_read(&namespace_sem);
+ if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ if (__has_locked_children(old_mnt, path->dentry))
+ return ERR_PTR(-EINVAL);
+ new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
if (IS_ERR(new_mnt))
- return ERR_CAST(new_mnt);
+ return ERR_PTR(-EINVAL);
/* Longterm mount to be removed by kern_unmount*() */
new_mnt->mnt_ns = MNT_NS_INTERNAL;
-
return &new_mnt->mnt;
-
-invalid:
- up_read(&namespace_sem);
- return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(clone_private_mount);
-int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
- struct vfsmount *root)
-{
- struct mount *mnt;
- int res = f(root, arg);
- if (res)
- return res;
- list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
- res = f(&mnt->mnt, arg);
- if (res)
- return res;
- }
- return 0;
-}
-
static void lock_mnt_tree(struct mount *mnt)
{
struct mount *p;
@@ -2317,7 +2426,7 @@ static void lock_mnt_tree(struct mount *mnt)
if (flags & MNT_NOEXEC)
flags |= MNT_LOCK_NOEXEC;
/* Don't allow unprivileged users to reveal what is under a mount */
- if (list_empty(&p->mnt_expire))
+ if (list_empty(&p->mnt_expire) && p != mnt)
flags |= MNT_LOCKED;
p->mnt.mnt_flags = flags;
}
@@ -2338,7 +2447,7 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
struct mount *p;
for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
- if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
+ if (!p->mnt_group_id) {
int err = mnt_alloc_group_id(p);
if (err) {
cleanup_group_ids(mnt, p);
@@ -2374,16 +2483,14 @@ int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
}
enum mnt_tree_flags_t {
- MNT_TREE_MOVE = BIT(0),
- MNT_TREE_BENEATH = BIT(1),
+ MNT_TREE_BENEATH = BIT(0),
+ MNT_TREE_PROPAGATION = BIT(1),
};
/**
* attach_recursive_mnt - attach a source mount tree
* @source_mnt: mount tree to be attached
- * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath
- * @dest_mp: the mountpoint @source_mnt will be mounted at
- * @flags: modify how @source_mnt is supposed to be attached
+ * @dest: the context for mounting at the place where the tree should go
*
* NOTE: in the table below explains the semantics when a source mount
* of a given type is attached to a destination mount of a given type.
@@ -2446,26 +2553,32 @@ enum mnt_tree_flags_t {
* Otherwise a negative error code is returned.
*/
static int attach_recursive_mnt(struct mount *source_mnt,
- struct mount *top_mnt,
- struct mountpoint *dest_mp,
- enum mnt_tree_flags_t flags)
+ const struct pinned_mountpoint *dest)
{
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
+ struct mount *dest_mnt = dest->parent;
+ struct mountpoint *dest_mp = dest->mp;
HLIST_HEAD(tree_list);
- struct mnt_namespace *ns = top_mnt->mnt_ns;
- struct mountpoint *smp;
- struct mount *child, *dest_mnt, *p;
+ struct mnt_namespace *ns = dest_mnt->mnt_ns;
+ struct pinned_mountpoint root = {};
+ struct mountpoint *shorter = NULL;
+ struct mount *child, *p;
+ struct mount *top;
struct hlist_node *n;
int err = 0;
- bool moving = flags & MNT_TREE_MOVE, beneath = flags & MNT_TREE_BENEATH;
+ bool moving = mnt_has_parent(source_mnt);
/*
* Preallocate a mountpoint in case the new mounts need to be
* mounted beneath mounts on the same mountpoint.
*/
- smp = get_mountpoint(source_mnt->mnt.mnt_root);
- if (IS_ERR(smp))
- return PTR_ERR(smp);
+ for (top = source_mnt; unlikely(top->overmount); top = top->overmount) {
+ if (!shorter && is_mnt_ns_file(top->mnt.mnt_root))
+ shorter = top->mnt_mp;
+ }
+ err = get_mountpoint(top->mnt.mnt_root, &root);
+ if (err)
+ return err;
/* Is there space to add these mounts to the mount namespace? */
if (!moving) {
@@ -2474,11 +2587,6 @@ static int attach_recursive_mnt(struct mount *source_mnt,
goto out;
}
- if (beneath)
- dest_mnt = top_mnt->mnt_parent;
- else
- dest_mnt = top_mnt;
-
if (IS_MNT_SHARED(dest_mnt)) {
err = invent_group_ids(source_mnt, true);
if (err)
@@ -2495,41 +2603,49 @@ static int attach_recursive_mnt(struct mount *source_mnt,
}
if (moving) {
- if (beneath)
- dest_mp = smp;
- unhash_mnt(source_mnt);
- attach_mnt(source_mnt, top_mnt, dest_mp, beneath);
- touch_mnt_namespace(source_mnt->mnt_ns);
+ umount_mnt(source_mnt);
+ mnt_notify_add(source_mnt);
+ /* if the mount is moved, it should no longer be expired
+ * automatically */
+ list_del_init(&source_mnt->mnt_expire);
} else {
if (source_mnt->mnt_ns) {
- LIST_HEAD(head);
-
/* move from anon - the caller will destroy */
+ emptied_ns = source_mnt->mnt_ns;
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
- move_from_ns(p, &head);
- list_del_init(&head);
+ move_from_ns(p);
}
- if (beneath)
- mnt_set_mountpoint_beneath(source_mnt, top_mnt, smp);
- else
- mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
- commit_tree(source_mnt);
}
+ mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
+ /*
+ * Now the original copy is in the same state as the secondaries -
+ * its root attached to mountpoint, but not hashed and all mounts
+ * in it are either in our namespace or in no namespace at all.
+ * Add the original to the list of copies and deal with the
+ * rest of work for all of them uniformly.
+ */
+ hlist_add_head(&source_mnt->mnt_hash, &tree_list);
+
hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
struct mount *q;
hlist_del_init(&child->mnt_hash);
- q = __lookup_mnt(&child->mnt_parent->mnt,
- child->mnt_mountpoint);
- if (q)
- mnt_change_mountpoint(child, smp, q);
/* Notice when we are propagating across user namespaces */
if (child->mnt_parent->mnt_ns->user_ns != user_ns)
lock_mnt_tree(child);
- child->mnt.mnt_flags &= ~MNT_LOCKED;
+ q = __lookup_mnt(&child->mnt_parent->mnt,
+ child->mnt_mountpoint);
commit_tree(child);
+ if (q) {
+ struct mount *r = topmost_overmount(child);
+ struct mountpoint *mp = root.mp;
+
+ if (unlikely(shorter) && child != source_mnt)
+ mp = shorter;
+ mnt_change_mountpoint(r, mp, q);
+ }
}
- put_mountpoint(smp);
+ unpin_mountpoint(&root);
unlock_mount_hash();
return 0;
@@ -2546,131 +2662,171 @@ static int attach_recursive_mnt(struct mount *source_mnt,
ns->pending_mounts = 0;
read_seqlock_excl(&mount_lock);
- put_mountpoint(smp);
+ unpin_mountpoint(&root);
read_sequnlock_excl(&mount_lock);
return err;
}
+static inline struct mount *where_to_mount(const struct path *path,
+ struct dentry **dentry,
+ bool beneath)
+{
+ struct mount *m;
+
+ if (unlikely(beneath)) {
+ m = topmost_overmount(real_mount(path->mnt));
+ *dentry = m->mnt_mountpoint;
+ return m->mnt_parent;
+ }
+ m = __lookup_mnt(path->mnt, path->dentry);
+ if (unlikely(m)) {
+ m = topmost_overmount(m);
+ *dentry = m->mnt.mnt_root;
+ return m;
+ }
+ *dentry = path->dentry;
+ return real_mount(path->mnt);
+}
+
/**
- * do_lock_mount - lock mount and mountpoint
- * @path: target path
- * @beneath: whether the intention is to mount beneath @path
- *
- * Follow the mount stack on @path until the top mount @mnt is found. If
- * the initial @path->{mnt,dentry} is a mountpoint lookup the first
- * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root}
- * until nothing is stacked on top of it anymore.
+ * do_lock_mount - acquire environment for mounting
+ * @path: target path
+ * @res: context to set up
+ * @beneath: whether the intention is to mount beneath @path
*
- * Acquire the inode_lock() on the top mount's ->mnt_root to protect
- * against concurrent removal of the new mountpoint from another mount
- * namespace.
+ * To mount something at given location, we need
+ * namespace_sem locked exclusive
+ * inode of dentry we are mounting on locked exclusive
+ * struct mountpoint for that dentry
+ * struct mount we are mounting on
*
- * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint
- * @mp on @mnt->mnt_parent must be acquired. This protects against a
- * concurrent unlink of @mp->mnt_dentry from another mount namespace
- * where @mnt doesn't have a child mount mounted @mp. A concurrent
- * removal of @mnt->mnt_root doesn't matter as nothing will be mounted
- * on top of it for @beneath.
+ * Results are stored in caller-supplied context (pinned_mountpoint);
+ * on success we have res->parent and res->mp pointing to parent and
+ * mountpoint respectively and res->node inserted into the ->m_list
+ * of the mountpoint, making sure the mountpoint won't disappear.
+ * On failure we have res->parent set to ERR_PTR(-E...), res->mp
+ * left NULL, res->node - empty.
+ * In case of success do_lock_mount returns with locks acquired (in
+ * proper order - inode lock nests outside of namespace_sem).
*
- * In addition, @beneath needs to make sure that @mnt hasn't been
- * unmounted or moved from its current mountpoint in between dropping
- * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt
- * being unmounted would be detected later by e.g., calling
- * check_mnt(mnt) in the function it's called from. For the @beneath
- * case however, it's useful to detect it directly in do_lock_mount().
- * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points
- * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will
- * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL.
+ * Request to mount on overmounted location is treated as "mount on
+ * top of whatever's overmounting it"; request to mount beneath
+ * a location - "mount immediately beneath the topmost mount at that
+ * place".
*
- * Return: Either the target mountpoint on the top mount or the top
- * mount's mountpoint.
+ * In all cases the location must not have been unmounted and the
+ * chosen mountpoint must be allowed to be mounted on. For "beneath"
+ * case we also require the location to be at the root of a mount
+ * that has a parent (i.e. is not a root of some namespace).
*/
-static struct mountpoint *do_lock_mount(struct path *path, bool beneath)
+static void do_lock_mount(const struct path *path,
+ struct pinned_mountpoint *res,
+ bool beneath)
{
- struct vfsmount *mnt = path->mnt;
- struct dentry *dentry;
- struct mountpoint *mp = ERR_PTR(-ENOENT);
+ int err;
- for (;;) {
- struct mount *m;
+ if (unlikely(beneath) && !path_mounted(path)) {
+ res->parent = ERR_PTR(-EINVAL);
+ return;
+ }
- if (beneath) {
- m = real_mount(mnt);
- read_seqlock_excl(&mount_lock);
- dentry = dget(m->mnt_mountpoint);
- read_sequnlock_excl(&mount_lock);
- } else {
- dentry = path->dentry;
+ do {
+ struct dentry *dentry, *d;
+ struct mount *m, *n;
+
+ scoped_guard(mount_locked_reader) {
+ m = where_to_mount(path, &dentry, beneath);
+ if (&m->mnt != path->mnt) {
+ mntget(&m->mnt);
+ dget(dentry);
+ }
}
inode_lock(dentry->d_inode);
- if (unlikely(cant_mount(dentry))) {
- inode_unlock(dentry->d_inode);
- goto out;
- }
-
namespace_lock();
- if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) {
+ // check if the chain of mounts (if any) has changed.
+ scoped_guard(mount_locked_reader)
+ n = where_to_mount(path, &d, beneath);
+
+ if (unlikely(n != m || dentry != d))
+ err = -EAGAIN; // something moved, retry
+ else if (unlikely(cant_mount(dentry) || !is_mounted(path->mnt)))
+ err = -ENOENT; // not to be mounted on
+ else if (beneath && &m->mnt == path->mnt && !m->overmount)
+ err = -EINVAL;
+ else
+ err = get_mountpoint(dentry, res);
+
+ if (unlikely(err)) {
+ res->parent = ERR_PTR(err);
namespace_unlock();
inode_unlock(dentry->d_inode);
- goto out;
+ } else {
+ res->parent = m;
}
-
- mnt = lookup_mnt(path);
- if (likely(!mnt))
- break;
-
- namespace_unlock();
- inode_unlock(dentry->d_inode);
- if (beneath)
+ /*
+ * Drop the temporary references. This is subtle - on success
+ * we are doing that under namespace_sem, which would normally
+ * be forbidden. However, in that case we are guaranteed that
+ * refcounts won't reach zero, since we know that path->mnt
+ * is mounted and thus all mounts reachable from it are pinned
+ * and stable, along with their mountpoints and roots.
+ */
+ if (&m->mnt != path->mnt) {
dput(dentry);
- path_put(path);
- path->mnt = mnt;
- path->dentry = dget(mnt->mnt_root);
- }
-
- mp = get_mountpoint(dentry);
- if (IS_ERR(mp)) {
- namespace_unlock();
- inode_unlock(dentry->d_inode);
- }
-
-out:
- if (beneath)
- dput(dentry);
-
- return mp;
-}
-
-static inline struct mountpoint *lock_mount(struct path *path)
-{
- return do_lock_mount(path, false);
+ mntput(&m->mnt);
+ }
+ } while (err == -EAGAIN);
}
-static void unlock_mount(struct mountpoint *where)
+static void __unlock_mount(struct pinned_mountpoint *m)
{
- struct dentry *dentry = where->m_dentry;
-
+ inode_unlock(m->mp->m_dentry->d_inode);
read_seqlock_excl(&mount_lock);
- put_mountpoint(where);
+ unpin_mountpoint(m);
read_sequnlock_excl(&mount_lock);
-
namespace_unlock();
- inode_unlock(dentry->d_inode);
}
-static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
+static inline void unlock_mount(struct pinned_mountpoint *m)
+{
+ if (!IS_ERR(m->parent))
+ __unlock_mount(m);
+}
+
+#define LOCK_MOUNT_MAYBE_BENEATH(mp, path, beneath) \
+ struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
+ do_lock_mount((path), &mp, (beneath))
+#define LOCK_MOUNT(mp, path) LOCK_MOUNT_MAYBE_BENEATH(mp, (path), false)
+#define LOCK_MOUNT_EXACT(mp, path) \
+ struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
+ lock_mount_exact((path), &mp)
+
+static int graft_tree(struct mount *mnt, const struct pinned_mountpoint *mp)
{
if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
return -EINVAL;
- if (d_is_dir(mp->m_dentry) !=
+ if (d_is_dir(mp->mp->m_dentry) !=
d_is_dir(mnt->mnt.mnt_root))
return -ENOTDIR;
- return attach_recursive_mnt(mnt, p, mp, 0);
+ return attach_recursive_mnt(mnt, mp);
+}
+
+static int may_change_propagation(const struct mount *m)
+{
+ struct mnt_namespace *ns = m->mnt_ns;
+
+ // it must be mounted in some namespace
+ if (IS_ERR_OR_NULL(ns)) // is_mounted()
+ return -EINVAL;
+ // and the caller must be admin in userns of that namespace
+ if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+ return 0;
}
/*
@@ -2693,13 +2849,13 @@ static int flags_to_propagation_type(int ms_flags)
/*
* recursively change the type of the mountpoint.
*/
-static int do_change_type(struct path *path, int ms_flags)
+static int do_change_type(const struct path *path, int ms_flags)
{
struct mount *m;
struct mount *mnt = real_mount(path->mnt);
int recurse = ms_flags & MS_REC;
int type;
- int err = 0;
+ int err;
if (!path_mounted(path))
return -EINVAL;
@@ -2708,56 +2864,116 @@ static int do_change_type(struct path *path, int ms_flags)
if (!type)
return -EINVAL;
- namespace_lock();
+ guard(namespace_excl)();
+
+ err = may_change_propagation(mnt);
+ if (err)
+ return err;
+
if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse);
if (err)
- goto out_unlock;
+ return err;
}
- lock_mount_hash();
for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
change_mnt_propagation(m, type);
- unlock_mount_hash();
- out_unlock:
- namespace_unlock();
- return err;
+ return 0;
}
-static struct mount *__do_loopback(struct path *old_path, int recurse)
+/* may_copy_tree() - check if a mount tree can be copied
+ * @path: path to the mount tree to be copied
+ *
+ * This helper checks if the caller may copy the mount tree starting
+ * from @path->mnt. The caller may copy the mount tree under the
+ * following circumstances:
+ *
+ * (1) The caller is located in the mount namespace of the mount tree.
+ * This also implies that the mount does not belong to an anonymous
+ * mount namespace.
+ * (2) The caller tries to copy an nfs mount referring to a mount
+ * namespace, i.e., the caller is trying to copy a mount namespace
+ * entry from nsfs.
+ * (3) The caller tries to copy a pidfs mount referring to a pidfd.
+ * (4) The caller is trying to copy a mount tree that belongs to an
+ * anonymous mount namespace.
+ *
+ * For that to be safe, this helper enforces that the origin mount
+ * namespace the anonymous mount namespace was created from is the
+ * same as the caller's mount namespace by comparing the sequence
+ * numbers.
+ *
+ * This is not strictly necessary. The current semantics of the new
+ * mount api enforce that the caller must be located in the same
+ * mount namespace as the mount tree it interacts with. Using the
+ * origin sequence number preserves these semantics even for
+ * anonymous mount namespaces. However, one could envision extending
+ * the api to directly operate across mount namespace if needed.
+ *
+ * The ownership of a non-anonymous mount namespace such as the
+ * caller's cannot change.
+ * => We know that the caller's mount namespace is stable.
+ *
+ * If the origin sequence number of the anonymous mount namespace is
+ * the same as the sequence number of the caller's mount namespace.
+ * => The owning namespaces are the same.
+ *
+ * ==> The earlier capability check on the owning namespace of the
+ * caller's mount namespace ensures that the caller has the
+ * ability to copy the mount tree.
+ *
+ * Returns true if the mount tree can be copied, false otherwise.
+ */
+static inline bool may_copy_tree(const struct path *path)
{
- struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
+ struct mount *mnt = real_mount(path->mnt);
+ const struct dentry_operations *d_op;
+
+ if (check_mnt(mnt))
+ return true;
+
+ d_op = path->dentry->d_op;
+ if (d_op == &ns_dentry_operations)
+ return true;
+
+ if (d_op == &pidfs_dentry_operations)
+ return true;
+
+ if (!is_mounted(path->mnt))
+ return false;
+
+ return check_anonymous_mnt(mnt);
+}
+
+
+static struct mount *__do_loopback(const struct path *old_path, int recurse)
+{
+ struct mount *old = real_mount(old_path->mnt);
if (IS_MNT_UNBINDABLE(old))
- return mnt;
+ return ERR_PTR(-EINVAL);
- if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
- return mnt;
+ if (!may_copy_tree(old_path))
+ return ERR_PTR(-EINVAL);
- if (!recurse && has_locked_children(old, old_path->dentry))
- return mnt;
+ if (!recurse && __has_locked_children(old, old_path->dentry))
+ return ERR_PTR(-EINVAL);
if (recurse)
- mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
+ return copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
else
- mnt = clone_mnt(old, old_path->dentry, 0);
-
- if (!IS_ERR(mnt))
- mnt->mnt.mnt_flags &= ~MNT_LOCKED;
-
- return mnt;
+ return clone_mnt(old, old_path->dentry, 0);
}
/*
* do loopback mount.
*/
-static int do_loopback(struct path *path, const char *old_name,
- int recurse)
+static int do_loopback(const struct path *path, const char *old_name,
+ int recurse)
{
- struct path old_path;
- struct mount *mnt = NULL, *parent;
- struct mountpoint *mp;
+ struct path old_path __free(path_put) = {};
+ struct mount *mnt = NULL;
int err;
if (!old_name || !*old_name)
return -EINVAL;
@@ -2765,69 +2981,78 @@ static int do_loopback(struct path *path, const char *old_name,
if (err)
return err;
- err = -EINVAL;
if (mnt_ns_loop(old_path.dentry))
- goto out;
+ return -EINVAL;
- mp = lock_mount(path);
- if (IS_ERR(mp)) {
- err = PTR_ERR(mp);
- goto out;
- }
+ LOCK_MOUNT(mp, path);
+ if (IS_ERR(mp.parent))
+ return PTR_ERR(mp.parent);
- parent = real_mount(path->mnt);
- if (!check_mnt(parent))
- goto out2;
+ if (!check_mnt(mp.parent))
+ return -EINVAL;
mnt = __do_loopback(&old_path, recurse);
- if (IS_ERR(mnt)) {
- err = PTR_ERR(mnt);
- goto out2;
- }
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
- err = graft_tree(mnt, parent, mp);
+ err = graft_tree(mnt, &mp);
if (err) {
lock_mount_hash();
umount_tree(mnt, UMOUNT_SYNC);
unlock_mount_hash();
}
-out2:
- unlock_mount(mp);
-out:
- path_put(&old_path);
return err;
}
-static struct file *open_detached_copy(struct path *path, bool recursive)
+static struct mnt_namespace *get_detached_copy(const struct path *path, bool recursive)
{
- struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
- struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
+ struct mnt_namespace *ns, *mnt_ns = current->nsproxy->mnt_ns, *src_mnt_ns;
+ struct user_namespace *user_ns = mnt_ns->user_ns;
struct mount *mnt, *p;
- struct file *file;
+ ns = alloc_mnt_ns(user_ns, true);
if (IS_ERR(ns))
- return ERR_CAST(ns);
+ return ns;
+
+ guard(namespace_excl)();
+
+ /*
+ * Record the sequence number of the source mount namespace.
+ * This needs to hold namespace_sem to ensure that the mount
+ * doesn't get attached.
+ */
+ if (is_mounted(path->mnt)) {
+ src_mnt_ns = real_mount(path->mnt)->mnt_ns;
+ if (is_anon_ns(src_mnt_ns))
+ ns->seq_origin = src_mnt_ns->seq_origin;
+ else
+ ns->seq_origin = src_mnt_ns->ns.ns_id;
+ }
- namespace_lock();
mnt = __do_loopback(path, recursive);
if (IS_ERR(mnt)) {
- namespace_unlock();
- free_mnt_ns(ns);
+ emptied_ns = ns;
return ERR_CAST(mnt);
}
- lock_mount_hash();
for (p = mnt; p; p = next_mnt(p, mnt)) {
mnt_add_to_ns(ns, p);
ns->nr_mounts++;
}
ns->root = mnt;
- mntget(&mnt->mnt);
- unlock_mount_hash();
- namespace_unlock();
+ return ns;
+}
+
+static struct file *open_detached_copy(struct path *path, bool recursive)
+{
+ struct mnt_namespace *ns = get_detached_copy(path, recursive);
+ struct file *file;
+
+ if (IS_ERR(ns))
+ return ERR_CAST(ns);
mntput(path->mnt);
- path->mnt = &mnt->mnt;
+ path->mnt = mntget(&ns->root->mnt);
file = dentry_open(path, O_PATH, current_cred());
if (IS_ERR(file))
dissolve_on_fput(path->mnt);
@@ -2836,24 +3061,22 @@ static struct file *open_detached_copy(struct path *path, bool recursive)
return file;
}
-SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
+static struct file *vfs_open_tree(int dfd, const char __user *filename, unsigned int flags)
{
- struct file *file;
- struct path path;
+ int ret;
+ struct path path __free(path_put) = {};
int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
bool detached = flags & OPEN_TREE_CLONE;
- int error;
- int fd;
BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
OPEN_TREE_CLOEXEC))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (flags & AT_NO_AUTOMOUNT)
lookup_flags &= ~LOOKUP_AUTOMOUNT;
@@ -2863,28 +3086,21 @@ SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, fl
lookup_flags |= LOOKUP_EMPTY;
if (detached && !may_mount())
- return -EPERM;
+ return ERR_PTR(-EPERM);
- fd = get_unused_fd_flags(flags & O_CLOEXEC);
- if (fd < 0)
- return fd;
+ ret = user_path_at(dfd, filename, lookup_flags, &path);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
- error = user_path_at(dfd, filename, lookup_flags, &path);
- if (unlikely(error)) {
- file = ERR_PTR(error);
- } else {
- if (detached)
- file = open_detached_copy(&path, flags & AT_RECURSIVE);
- else
- file = dentry_open(&path, O_PATH, current_cred());
- path_put(&path);
- }
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- return PTR_ERR(file);
- }
- fd_install(fd, file);
- return fd;
+ if (detached)
+ return open_detached_copy(&path, flags & AT_RECURSIVE);
+
+ return dentry_open(&path, O_PATH, current_cred());
+}
+
+SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
+{
+ return FD_ADD(flags, vfs_open_tree(dfd, filename, flags));
}
/*
@@ -2941,7 +3157,8 @@ static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
touch_mnt_namespace(mnt->mnt_ns);
}
-static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
+static void mnt_warn_timestamp_expiry(const struct path *mountpoint,
+ struct vfsmount *mnt)
{
struct super_block *sb = mnt->mnt_sb;
@@ -2975,7 +3192,7 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
* superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
* to mount(2).
*/
-static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
+static int do_reconfigure_mnt(const struct path *path, unsigned int mnt_flags)
{
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
@@ -3012,7 +3229,7 @@ static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
* If you've mounted a non-root directory somewhere and want to do remount
* on it - tough luck.
*/
-static int do_remount(struct path *path, int ms_flags, int sb_flags,
+static int do_remount(const struct path *path, int sb_flags,
int mnt_flags, void *data)
{
int err;
@@ -3070,98 +3287,58 @@ static inline int tree_contains_unbindable(struct mount *mnt)
return 0;
}
-/*
- * Check that there aren't references to earlier/same mount namespaces in the
- * specified subtree. Such references can act as pins for mount namespaces
- * that aren't checked by the mount-cycle checking code, thereby allowing
- * cycles to be made.
- */
-static bool check_for_nsfs_mounts(struct mount *subtree)
-{
- struct mount *p;
- bool ret = false;
-
- lock_mount_hash();
- for (p = subtree; p; p = next_mnt(p, subtree))
- if (mnt_ns_loop(p->mnt.mnt_root))
- goto out;
-
- ret = true;
-out:
- unlock_mount_hash();
- return ret;
-}
-
-static int do_set_group(struct path *from_path, struct path *to_path)
+static int do_set_group(const struct path *from_path, const struct path *to_path)
{
- struct mount *from, *to;
+ struct mount *from = real_mount(from_path->mnt);
+ struct mount *to = real_mount(to_path->mnt);
int err;
- from = real_mount(from_path->mnt);
- to = real_mount(to_path->mnt);
+ guard(namespace_excl)();
- namespace_lock();
-
- err = -EINVAL;
- /* To and From must be mounted */
- if (!is_mounted(&from->mnt))
- goto out;
- if (!is_mounted(&to->mnt))
- goto out;
-
- err = -EPERM;
- /* We should be allowed to modify mount namespaces of both mounts */
- if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
- goto out;
- if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
- goto out;
+ err = may_change_propagation(from);
+ if (err)
+ return err;
+ err = may_change_propagation(to);
+ if (err)
+ return err;
- err = -EINVAL;
/* To and From paths should be mount roots */
if (!path_mounted(from_path))
- goto out;
+ return -EINVAL;
if (!path_mounted(to_path))
- goto out;
+ return -EINVAL;
/* Setting sharing groups is only allowed across same superblock */
if (from->mnt.mnt_sb != to->mnt.mnt_sb)
- goto out;
+ return -EINVAL;
/* From mount root should be wider than To mount root */
if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
- goto out;
+ return -EINVAL;
/* From mount should not have locked children in place of To's root */
- if (has_locked_children(from, to->mnt.mnt_root))
- goto out;
+ if (__has_locked_children(from, to->mnt.mnt_root))
+ return -EINVAL;
/* Setting sharing groups is only allowed on private mounts */
if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
- goto out;
+ return -EINVAL;
/* From should not be private */
if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
- goto out;
+ return -EINVAL;
if (IS_MNT_SLAVE(from)) {
- struct mount *m = from->mnt_master;
-
- list_add(&to->mnt_slave, &m->mnt_slave_list);
- to->mnt_master = m;
+ hlist_add_behind(&to->mnt_slave, &from->mnt_slave);
+ to->mnt_master = from->mnt_master;
}
if (IS_MNT_SHARED(from)) {
to->mnt_group_id = from->mnt_group_id;
list_add(&to->mnt_share, &from->mnt_share);
- lock_mount_hash();
set_mnt_shared(to);
- unlock_mount_hash();
}
-
- err = 0;
-out:
- namespace_unlock();
- return err;
+ return 0;
}
/**
@@ -3171,33 +3348,49 @@ out:
* Check if path is overmounted, i.e., if there's a mount on top of
* @path->mnt with @path->dentry as mountpoint.
*
- * Context: This function expects namespace_lock() to be held.
+ * Context: namespace_sem must be held at least shared.
+ * MUST NOT be called under lock_mount_hash() (there one should just
+ * call __lookup_mnt() and check if it returns NULL).
* Return: If path is overmounted true is returned, false if not.
*/
static inline bool path_overmounted(const struct path *path)
{
+ unsigned seq = read_seqbegin(&mount_lock);
+ bool no_child;
+
rcu_read_lock();
- if (unlikely(__lookup_mnt(path->mnt, path->dentry))) {
- rcu_read_unlock();
- return true;
- }
+ no_child = !__lookup_mnt(path->mnt, path->dentry);
rcu_read_unlock();
- return false;
+ if (need_seqretry(&mount_lock, seq)) {
+ read_seqlock_excl(&mount_lock);
+ no_child = !__lookup_mnt(path->mnt, path->dentry);
+ read_sequnlock_excl(&mount_lock);
+ }
+ return unlikely(!no_child);
+}
+
+/*
+ * Check if there is a possibly empty chain of descent from p1 to p2.
+ * Locks: namespace_sem (shared) or mount_lock (read_seqlock_excl).
+ */
+static bool mount_is_ancestor(const struct mount *p1, const struct mount *p2)
+{
+ while (p2 != p1 && mnt_has_parent(p2))
+ p2 = p2->mnt_parent;
+ return p2 == p1;
}
/**
* can_move_mount_beneath - check that we can mount beneath the top mount
- * @from: mount to mount beneath
- * @to: mount under which to mount
- * @mp: mountpoint of @to
+ * @mnt_from: mount we are trying to move
+ * @mnt_to: mount under which to mount
+ * @mp: mountpoint of @mnt_to
*
- * - Make sure that @to->dentry is actually the root of a mount under
- * which we can mount another mount.
* - Make sure that nothing can be mounted beneath the caller's current
* root or the rootfs of the namespace.
* - Make sure that the caller can unmount the topmost mount ensuring
* that the caller could reveal the underlying mountpoint.
- * - Ensure that nothing has been mounted on top of @from before we
+ * - Ensure that nothing has been mounted on top of @mnt_from before we
* grabbed @namespace_sem to avoid creating pointless shadow mounts.
* - Prevent mounting beneath a mount if the propagation relationship
* between the source mount, parent mount, and top mount would lead to
@@ -3206,25 +3399,17 @@ static inline bool path_overmounted(const struct path *path)
* Context: This function expects namespace_lock() to be held.
* Return: On success 0, and on error a negative error code is returned.
*/
-static int can_move_mount_beneath(const struct path *from,
- const struct path *to,
+static int can_move_mount_beneath(const struct mount *mnt_from,
+ const struct mount *mnt_to,
const struct mountpoint *mp)
{
- struct mount *mnt_from = real_mount(from->mnt),
- *mnt_to = real_mount(to->mnt),
- *parent_mnt_to = mnt_to->mnt_parent;
-
- if (!mnt_has_parent(mnt_to))
- return -EINVAL;
-
- if (!path_mounted(to))
- return -EINVAL;
+ struct mount *parent_mnt_to = mnt_to->mnt_parent;
if (IS_MNT_LOCKED(mnt_to))
return -EINVAL;
/* Avoid creating shadow mounts during mount propagation. */
- if (path_overmounted(from))
+ if (mnt_from->overmount)
return -EINVAL;
/*
@@ -3236,9 +3421,8 @@ static int can_move_mount_beneath(const struct path *from,
if (parent_mnt_to == current->nsproxy->mnt_ns->root)
return -EINVAL;
- for (struct mount *p = mnt_from; mnt_has_parent(p); p = p->mnt_parent)
- if (p == mnt_to)
- return -EINVAL;
+ if (mount_is_ancestor(mnt_to, mnt_from))
+ return -EINVAL;
/*
* If the parent mount propagates to the child mount this would
@@ -3261,111 +3445,138 @@ static int can_move_mount_beneath(const struct path *from,
* @mnt_from itself. This defeats the whole purpose of mounting
* @mnt_from beneath @mnt_to.
*/
- if (propagation_would_overmount(parent_mnt_to, mnt_from, mp))
+ if (check_mnt(mnt_from) &&
+ propagation_would_overmount(parent_mnt_to, mnt_from, mp))
return -EINVAL;
return 0;
}
-static int do_move_mount(struct path *old_path, struct path *new_path,
- bool beneath)
+/* may_use_mount() - check if a mount tree can be used
+ * @mnt: vfsmount to be used
+ *
+ * This helper checks if the caller may use the mount tree starting
+ * from @path->mnt. The caller may use the mount tree under the
+ * following circumstances:
+ *
+ * (1) The caller is located in the mount namespace of the mount tree.
+ * This also implies that the mount does not belong to an anonymous
+ * mount namespace.
+ * (2) The caller is trying to use a mount tree that belongs to an
+ * anonymous mount namespace.
+ *
+ * For that to be safe, this helper enforces that the origin mount
+ * namespace the anonymous mount namespace was created from is the
+ * same as the caller's mount namespace by comparing the sequence
+ * numbers.
+ *
+ * The ownership of a non-anonymous mount namespace such as the
+ * caller's cannot change.
+ * => We know that the caller's mount namespace is stable.
+ *
+ * If the origin sequence number of the anonymous mount namespace is
+ * the same as the sequence number of the caller's mount namespace.
+ * => The owning namespaces are the same.
+ *
+ * ==> The earlier capability check on the owning namespace of the
+ * caller's mount namespace ensures that the caller has the
+ * ability to use the mount tree.
+ *
+ * Returns true if the mount tree can be used, false otherwise.
+ */
+static inline bool may_use_mount(struct mount *mnt)
{
- struct mnt_namespace *ns;
- struct mount *p;
- struct mount *old;
- struct mount *parent;
- struct mountpoint *mp, *old_mp;
- int err;
- bool attached;
- enum mnt_tree_flags_t flags = 0;
-
- mp = do_lock_mount(new_path, beneath);
- if (IS_ERR(mp))
- return PTR_ERR(mp);
-
- old = real_mount(old_path->mnt);
- p = real_mount(new_path->mnt);
- parent = old->mnt_parent;
- attached = mnt_has_parent(old);
- if (attached)
- flags |= MNT_TREE_MOVE;
- old_mp = old->mnt_mp;
- ns = old->mnt_ns;
-
- err = -EINVAL;
- /* The mountpoint must be in our namespace. */
- if (!check_mnt(p))
- goto out;
+ if (check_mnt(mnt))
+ return true;
- /* The thing moved must be mounted... */
- if (!is_mounted(&old->mnt))
- goto out;
+ /*
+ * Make sure that noone unmounted the target path or somehow
+ * managed to get their hands on something purely kernel
+ * internal.
+ */
+ if (!is_mounted(&mnt->mnt))
+ return false;
- /* ... and either ours or the root of anon namespace */
- if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
- goto out;
+ return check_anonymous_mnt(mnt);
+}
- if (old->mnt.mnt_flags & MNT_LOCKED)
- goto out;
+static int do_move_mount(const struct path *old_path,
+ const struct path *new_path,
+ enum mnt_tree_flags_t flags)
+{
+ struct mount *old = real_mount(old_path->mnt);
+ int err;
+ bool beneath = flags & MNT_TREE_BENEATH;
if (!path_mounted(old_path))
- goto out;
+ return -EINVAL;
- if (d_is_dir(new_path->dentry) !=
- d_is_dir(old_path->dentry))
- goto out;
- /*
- * Don't move a mount residing in a shared parent.
- */
- if (attached && IS_MNT_SHARED(parent))
- goto out;
+ if (d_is_dir(new_path->dentry) != d_is_dir(old_path->dentry))
+ return -EINVAL;
+
+ LOCK_MOUNT_MAYBE_BENEATH(mp, new_path, beneath);
+ if (IS_ERR(mp.parent))
+ return PTR_ERR(mp.parent);
+
+ if (check_mnt(old)) {
+ /* if the source is in our namespace... */
+ /* ... it should be detachable from parent */
+ if (!mnt_has_parent(old) || IS_MNT_LOCKED(old))
+ return -EINVAL;
+ /* ... which should not be shared */
+ if (IS_MNT_SHARED(old->mnt_parent))
+ return -EINVAL;
+ /* ... and the target should be in our namespace */
+ if (!check_mnt(mp.parent))
+ return -EINVAL;
+ } else {
+ /*
+ * otherwise the source must be the root of some anon namespace.
+ */
+ if (!anon_ns_root(old))
+ return -EINVAL;
+ /*
+ * Bail out early if the target is within the same namespace -
+ * subsequent checks would've rejected that, but they lose
+ * some corner cases if we check it early.
+ */
+ if (old->mnt_ns == mp.parent->mnt_ns)
+ return -EINVAL;
+ /*
+ * Target should be either in our namespace or in an acceptable
+ * anon namespace, sensu check_anonymous_mnt().
+ */
+ if (!may_use_mount(mp.parent))
+ return -EINVAL;
+ }
if (beneath) {
- err = can_move_mount_beneath(old_path, new_path, mp);
- if (err)
- goto out;
+ struct mount *over = real_mount(new_path->mnt);
- err = -EINVAL;
- p = p->mnt_parent;
- flags |= MNT_TREE_BENEATH;
+ if (mp.parent != over->mnt_parent)
+ over = mp.parent->overmount;
+ err = can_move_mount_beneath(old, over, mp.mp);
+ if (err)
+ return err;
}
/*
* Don't move a mount tree containing unbindable mounts to a destination
* mount which is shared.
*/
- if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
- goto out;
- err = -ELOOP;
+ if (IS_MNT_SHARED(mp.parent) && tree_contains_unbindable(old))
+ return -EINVAL;
if (!check_for_nsfs_mounts(old))
- goto out;
- for (; mnt_has_parent(p); p = p->mnt_parent)
- if (p == old)
- goto out;
+ return -ELOOP;
+ if (mount_is_ancestor(old, mp.parent))
+ return -ELOOP;
- err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, flags);
- if (err)
- goto out;
-
- /* if the mount is moved, it should no longer be expire
- * automatically */
- list_del_init(&old->mnt_expire);
- if (attached)
- put_mountpoint(old_mp);
-out:
- unlock_mount(mp);
- if (!err) {
- if (attached)
- mntput_no_expire(parent);
- else
- free_mnt_ns(ns);
- }
- return err;
+ return attach_recursive_mnt(old, &mp);
}
-static int do_move_mount_old(struct path *path, const char *old_name)
+static int do_move_mount_old(const struct path *path, const char *old_name)
{
- struct path old_path;
+ struct path old_path __free(path_put) = {};
int err;
if (!old_name || !*old_name)
@@ -3375,18 +3586,19 @@ static int do_move_mount_old(struct path *path, const char *old_name)
if (err)
return err;
- err = do_move_mount(&old_path, path, false);
- path_put(&old_path);
- return err;
+ return do_move_mount(&old_path, path, 0);
}
/*
* add a mount into a namespace's mount tree
*/
-static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
- const struct path *path, int mnt_flags)
+static int do_add_mount(struct mount *newmnt, const struct pinned_mountpoint *mp,
+ int mnt_flags)
{
- struct mount *parent = real_mount(path->mnt);
+ struct mount *parent = mp->parent;
+
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
mnt_flags &= ~MNT_INTERNAL_FLAGS;
@@ -3400,14 +3612,15 @@ static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
}
/* Refuse the same filesystem on the same mount point */
- if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path_mounted(path))
+ if (parent->mnt.mnt_sb == newmnt->mnt.mnt_sb &&
+ parent->mnt.mnt_root == mp->mp->m_dentry)
return -EBUSY;
if (d_is_symlink(newmnt->mnt.mnt_root))
return -EINVAL;
newmnt->mnt.mnt_flags = mnt_flags;
- return graft_tree(newmnt, parent, mp);
+ return graft_tree(newmnt, mp);
}
static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
@@ -3416,40 +3629,32 @@ static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags
* Create a new mount using a superblock configuration and request it
* be added to the namespace tree.
*/
-static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
+static int do_new_mount_fc(struct fs_context *fc, const struct path *mountpoint,
unsigned int mnt_flags)
{
- struct vfsmount *mnt;
- struct mountpoint *mp;
- struct super_block *sb = fc->root->d_sb;
+ struct super_block *sb;
+ struct vfsmount *mnt __free(mntput) = fc_mount(fc);
int error;
- error = security_sb_kern_mount(sb);
- if (!error && mount_too_revealing(sb, &mnt_flags))
- error = -EPERM;
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
- if (unlikely(error)) {
- fc_drop_locked(fc);
+ sb = fc->root->d_sb;
+ error = security_sb_kern_mount(sb);
+ if (unlikely(error))
return error;
- }
- up_write(&sb->s_umount);
-
- mnt = vfs_create_mount(fc);
- if (IS_ERR(mnt))
- return PTR_ERR(mnt);
+ if (unlikely(mount_too_revealing(sb, &mnt_flags))) {
+ errorfcp(fc, "VFS", "Mount too revealing");
+ return -EPERM;
+ }
mnt_warn_timestamp_expiry(mountpoint, mnt);
- mp = lock_mount(mountpoint);
- if (IS_ERR(mp)) {
- mntput(mnt);
- return PTR_ERR(mp);
- }
- error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
- unlock_mount(mp);
- if (error < 0)
- mntput(mnt);
+ LOCK_MOUNT(mp, mountpoint);
+ error = do_add_mount(real_mount(mnt), &mp, mnt_flags);
+ if (!error)
+ retain_and_null_ptr(mnt); // consumed on success
return error;
}
@@ -3457,8 +3662,9 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
* create a new mount for userspace and request it to be added into the
* namespace's tree
*/
-static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
- int mnt_flags, const char *name, void *data)
+static int do_new_mount(const struct path *path, const char *fstype,
+ int sb_flags, int mnt_flags,
+ const char *name, void *data)
{
struct file_system_type *type;
struct fs_context *fc;
@@ -3495,27 +3701,46 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
fc->oldapi = true;
if (subtype)
- err = vfs_parse_fs_string(fc, "subtype",
- subtype, strlen(subtype));
+ err = vfs_parse_fs_string(fc, "subtype", subtype);
if (!err && name)
- err = vfs_parse_fs_string(fc, "source", name, strlen(name));
+ err = vfs_parse_fs_string(fc, "source", name);
if (!err)
err = parse_monolithic_mount_data(fc, data);
if (!err && !mount_capable(fc))
err = -EPERM;
if (!err)
- err = vfs_get_tree(fc);
- if (!err)
err = do_new_mount_fc(fc, path, mnt_flags);
put_fs_context(fc);
return err;
}
-int finish_automount(struct vfsmount *m, const struct path *path)
+static void lock_mount_exact(const struct path *path,
+ struct pinned_mountpoint *mp)
{
struct dentry *dentry = path->dentry;
- struct mountpoint *mp;
+ int err;
+
+ inode_lock(dentry->d_inode);
+ namespace_lock();
+ if (unlikely(cant_mount(dentry)))
+ err = -ENOENT;
+ else if (path_overmounted(path))
+ err = -EBUSY;
+ else
+ err = get_mountpoint(dentry, mp);
+ if (unlikely(err)) {
+ namespace_unlock();
+ inode_unlock(dentry->d_inode);
+ mp->parent = ERR_PTR(err);
+ } else {
+ mp->parent = real_mount(path->mnt);
+ }
+}
+
+int finish_automount(struct vfsmount *__m, const struct path *path)
+{
+ struct vfsmount *m __free(mntput) = __m;
struct mount *mnt;
int err;
@@ -3525,57 +3750,22 @@ int finish_automount(struct vfsmount *m, const struct path *path)
return PTR_ERR(m);
mnt = real_mount(m);
- /* The new mount record should have at least 2 refs to prevent it being
- * expired before we get a chance to add it
- */
- BUG_ON(mnt_get_count(mnt) < 2);
- if (m->mnt_sb == path->mnt->mnt_sb &&
- m->mnt_root == dentry) {
- err = -ELOOP;
- goto discard;
- }
+ if (m->mnt_root == path->dentry)
+ return -ELOOP;
/*
- * we don't want to use lock_mount() - in this case finding something
+ * we don't want to use LOCK_MOUNT() - in this case finding something
* that overmounts our mountpoint to be means "quitely drop what we've
* got", not "try to mount it on top".
*/
- inode_lock(dentry->d_inode);
- namespace_lock();
- if (unlikely(cant_mount(dentry))) {
- err = -ENOENT;
- goto discard_locked;
- }
- if (path_overmounted(path)) {
- err = 0;
- goto discard_locked;
- }
- mp = get_mountpoint(dentry);
- if (IS_ERR(mp)) {
- err = PTR_ERR(mp);
- goto discard_locked;
- }
-
- err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
- unlock_mount(mp);
- if (unlikely(err))
- goto discard;
- mntput(m);
- return 0;
+ LOCK_MOUNT_EXACT(mp, path);
+ if (mp.parent == ERR_PTR(-EBUSY))
+ return 0;
-discard_locked:
- namespace_unlock();
- inode_unlock(dentry->d_inode);
-discard:
- /* remove m from any expiration list it may be on */
- if (!list_empty(&mnt->mnt_expire)) {
- namespace_lock();
- list_del_init(&mnt->mnt_expire);
- namespace_unlock();
- }
- mntput(m);
- mntput(m);
+ err = do_add_mount(mnt, &mp, path->mnt->mnt_flags | MNT_SHRINKABLE);
+ if (likely(!err))
+ retain_and_null_ptr(m);
return err;
}
@@ -3586,11 +3776,8 @@ discard:
*/
void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
{
- namespace_lock();
-
+ guard(mount_locked_reader)();
list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
-
- namespace_unlock();
}
EXPORT_SYMBOL(mnt_set_expiry);
@@ -3607,16 +3794,19 @@ void mark_mounts_for_expiry(struct list_head *mounts)
if (list_empty(mounts))
return;
- namespace_lock();
- lock_mount_hash();
+ guard(namespace_excl)();
+ guard(mount_writer)();
/* extract from the expiration list every vfsmount that matches the
* following criteria:
+ * - already mounted
* - only referenced by its parent vfsmount
* - still marked for expiry (marked on the last call here; marks are
* cleared by mntput())
*/
list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
+ if (!is_mounted(&mnt->mnt))
+ continue;
if (!xchg(&mnt->mnt_expiry_mark, 1) ||
propagate_mount_busy(mnt, 1))
continue;
@@ -3627,8 +3817,6 @@ void mark_mounts_for_expiry(struct list_head *mounts)
touch_mnt_namespace(mnt->mnt_ns);
umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
}
- unlock_mount_hash();
- namespace_unlock();
}
EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
@@ -3756,7 +3944,7 @@ static char *copy_mount_string(const void __user *data)
* Therefore, if this magic number is present, it carries no information
* and must be discarded.
*/
-int path_mount(const char *dev_name, struct path *path,
+int path_mount(const char *dev_name, const struct path *path,
const char *type_page, unsigned long flags, void *data_page)
{
unsigned int mnt_flags = 0, sb_flags;
@@ -3823,7 +4011,7 @@ int path_mount(const char *dev_name, struct path *path,
if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
return do_reconfigure_mnt(path, mnt_flags);
if (flags & MS_REMOUNT)
- return do_remount(path, flags, sb_flags, mnt_flags, data_page);
+ return do_remount(path, sb_flags, mnt_flags, data_page);
if (flags & MS_BIND)
return do_loopback(path, dev_name, flags & MS_REC);
if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
@@ -3835,18 +4023,16 @@ int path_mount(const char *dev_name, struct path *path,
data_page);
}
-long do_mount(const char *dev_name, const char __user *dir_name,
+int do_mount(const char *dev_name, const char __user *dir_name,
const char *type_page, unsigned long flags, void *data_page)
{
- struct path path;
+ struct path path __free(path_put) = {};
int ret;
ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
if (ret)
return ret;
- ret = path_mount(dev_name, &path, type_page, flags, data_page);
- path_put(&path);
- return ret;
+ return path_mount(dev_name, &path, type_page, flags, data_page);
}
static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
@@ -3862,20 +4048,11 @@ static void dec_mnt_namespaces(struct ucounts *ucounts)
static void free_mnt_ns(struct mnt_namespace *ns)
{
if (!is_anon_ns(ns))
- ns_free_inum(&ns->ns);
+ ns_common_free(ns);
dec_mnt_namespaces(ns->ucounts);
mnt_ns_tree_remove(ns);
}
-/*
- * Assign a sequence number so we can detect when we attempt to bind
- * mount a reference to an older mount namespace into the current
- * mount namespace, preventing reference counting loops. A 64bit
- * number incrementing at 10Ghz will take 12,427 years to wrap which
- * is effectively never, so we can ignore the possibility.
- */
-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
-
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
{
struct mnt_namespace *new_ns;
@@ -3891,21 +4068,21 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a
dec_mnt_namespaces(ucounts);
return ERR_PTR(-ENOMEM);
}
- if (!anon) {
- ret = ns_alloc_inum(&new_ns->ns);
- if (ret) {
- kfree(new_ns);
- dec_mnt_namespaces(ucounts);
- return ERR_PTR(ret);
- }
+
+ if (anon)
+ ret = ns_common_init_inum(new_ns, MNT_NS_ANON_INO);
+ else
+ ret = ns_common_init(new_ns);
+ if (ret) {
+ kfree(new_ns);
+ dec_mnt_namespaces(ucounts);
+ return ERR_PTR(ret);
}
- new_ns->ns.ops = &mntns_operations;
- if (!anon)
- new_ns->seq = atomic64_inc_return(&mnt_ns_seq);
- refcount_set(&new_ns->ns.count, 1);
+ ns_tree_gen_id(new_ns);
+
+ new_ns->is_anon = anon;
refcount_set(&new_ns->passive, 1);
new_ns->mounts = RB_ROOT;
- RB_CLEAR_NODE(&new_ns->mnt_ns_tree_node);
init_waitqueue_head(&new_ns->poll);
new_ns->user_ns = get_user_ns(user_ns);
new_ns->ucounts = ucounts;
@@ -3913,11 +4090,12 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a
}
__latent_entropy
-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
+struct mnt_namespace *copy_mnt_ns(u64 flags, struct mnt_namespace *ns,
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
- struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
+ struct vfsmount *rootmnt __free(mntput) = NULL;
+ struct vfsmount *pwdmnt __free(mntput) = NULL;
struct mount *p, *q;
struct mount *old;
struct mount *new;
@@ -3936,23 +4114,19 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
if (IS_ERR(new_ns))
return new_ns;
- namespace_lock();
+ guard(namespace_excl)();
/* First pass: copy the tree topology */
copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
if (user_ns != ns->user_ns)
- copy_flags |= CL_SHARED_TO_SLAVE;
+ copy_flags |= CL_SLAVE;
new = copy_tree(old, old->mnt.mnt_root, copy_flags);
if (IS_ERR(new)) {
- namespace_unlock();
- ns_free_inum(&new_ns->ns);
- dec_mnt_namespaces(new_ns->ucounts);
- mnt_ns_release(new_ns);
+ emptied_ns = new_ns;
return ERR_CAST(new);
}
if (user_ns != ns->user_ns) {
- lock_mount_hash();
+ guard(mount_writer)();
lock_mnt_tree(new);
- unlock_mount_hash();
}
new_ns->root = new;
@@ -3984,14 +4158,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
while (p->mnt.mnt_root != q->mnt.mnt_root)
p = next_mnt(skip_mnt_tree(p), old);
}
- mnt_ns_tree_add(new_ns);
- namespace_unlock();
-
- if (rootmnt)
- mntput(rootmnt);
- if (pwdmnt)
- mntput(pwdmnt);
-
+ ns_tree_add_raw(new_ns);
return new_ns;
}
@@ -4102,10 +4269,10 @@ static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
unsigned int, attr_flags)
{
+ struct path new_path __free(path_put) = {};
struct mnt_namespace *ns;
struct fs_context *fc;
- struct file *file;
- struct path newmount;
+ struct vfsmount *new_mnt;
struct mount *mnt;
unsigned int mnt_flags = 0;
long ret;
@@ -4143,35 +4310,36 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
fc = fd_file(f)->private_data;
- ret = mutex_lock_interruptible(&fc->uapi_mutex);
- if (ret < 0)
+ ACQUIRE(mutex_intr, uapi_mutex)(&fc->uapi_mutex);
+ ret = ACQUIRE_ERR(mutex_intr, &uapi_mutex);
+ if (ret)
return ret;
/* There must be a valid superblock or we can't mount it */
ret = -EINVAL;
if (!fc->root)
- goto err_unlock;
+ return ret;
ret = -EPERM;
if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
- pr_warn("VFS: Mount too revealing\n");
- goto err_unlock;
+ errorfcp(fc, "VFS", "Mount too revealing");
+ return ret;
}
ret = -EBUSY;
if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
- goto err_unlock;
+ return ret;
if (fc->sb_flags & SB_MANDLOCK)
warn_mandlock();
- newmount.mnt = vfs_create_mount(fc);
- if (IS_ERR(newmount.mnt)) {
- ret = PTR_ERR(newmount.mnt);
- goto err_unlock;
- }
- newmount.dentry = dget(fc->root);
- newmount.mnt->mnt_flags = mnt_flags;
+ new_mnt = vfs_create_mount(fc);
+ if (IS_ERR(new_mnt))
+ return PTR_ERR(new_mnt);
+ new_mnt->mnt_flags = mnt_flags;
+
+ new_path.dentry = dget(fc->root);
+ new_path.mnt = new_mnt;
/* We've done the mount bit - now move the file context into more or
* less the same state as if we'd done an fspick(). We don't want to
@@ -4181,38 +4349,43 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
vfs_clean_context(fc);
ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
- if (IS_ERR(ns)) {
- ret = PTR_ERR(ns);
- goto err_path;
- }
- mnt = real_mount(newmount.mnt);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+ mnt = real_mount(new_path.mnt);
ns->root = mnt;
ns->nr_mounts = 1;
mnt_add_to_ns(ns, mnt);
- mntget(newmount.mnt);
+ mntget(new_path.mnt);
- /* Attach to an apparent O_PATH fd with a note that we need to unmount
- * it, not just simply put it.
- */
- file = dentry_open(&newmount, O_PATH, fc->cred);
- if (IS_ERR(file)) {
- dissolve_on_fput(newmount.mnt);
- ret = PTR_ERR(file);
- goto err_path;
+ FD_PREPARE(fdf, (flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0,
+ dentry_open(&new_path, O_PATH, fc->cred));
+ if (fdf.err) {
+ dissolve_on_fput(new_path.mnt);
+ return fdf.err;
}
- file->f_mode |= FMODE_NEED_UNMOUNT;
- ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
- if (ret >= 0)
- fd_install(ret, file);
- else
- fput(file);
+ /*
+ * Attach to an apparent O_PATH fd with a note that we
+ * need to unmount it, not just simply put it.
+ */
+ fd_prepare_file(fdf)->f_mode |= FMODE_NEED_UNMOUNT;
+ return fd_publish(fdf);
+}
-err_path:
- path_put(&newmount);
-err_unlock:
- mutex_unlock(&fc->uapi_mutex);
- return ret;
+static inline int vfs_move_mount(const struct path *from_path,
+ const struct path *to_path,
+ enum mnt_tree_flags_t mflags)
+{
+ int ret;
+
+ ret = security_move_mount(from_path, to_path);
+ if (ret)
+ return ret;
+
+ if (mflags & MNT_TREE_PROPAGATION)
+ return do_set_group(from_path, to_path);
+
+ return do_move_mount(from_path, to_path, mflags);
}
/*
@@ -4228,8 +4401,12 @@ SYSCALL_DEFINE5(move_mount,
int, to_dfd, const char __user *, to_pathname,
unsigned int, flags)
{
- struct path from_path, to_path;
- unsigned int lflags;
+ struct path to_path __free(path_put) = {};
+ struct path from_path __free(path_put) = {};
+ struct filename *to_name __free(putname) = NULL;
+ struct filename *from_name __free(putname) = NULL;
+ unsigned int lflags, uflags;
+ enum mnt_tree_flags_t mflags = 0;
int ret = 0;
if (!may_mount())
@@ -4242,49 +4419,67 @@ SYSCALL_DEFINE5(move_mount,
(MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
return -EINVAL;
- /* If someone gives a pathname, they aren't permitted to move
- * from an fd that requires unmount as we can't get at the flag
- * to clear it afterwards.
- */
- lflags = 0;
- if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
- if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
- if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
+ if (flags & MOVE_MOUNT_SET_GROUP) mflags |= MNT_TREE_PROPAGATION;
+ if (flags & MOVE_MOUNT_BENEATH) mflags |= MNT_TREE_BENEATH;
- ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
- if (ret < 0)
- return ret;
+ uflags = 0;
+ if (flags & MOVE_MOUNT_T_EMPTY_PATH)
+ uflags = AT_EMPTY_PATH;
- lflags = 0;
- if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
- if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
- if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
+ to_name = getname_maybe_null(to_pathname, uflags);
+ if (IS_ERR(to_name))
+ return PTR_ERR(to_name);
- ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
- if (ret < 0)
- goto out_from;
+ if (!to_name && to_dfd >= 0) {
+ CLASS(fd_raw, f_to)(to_dfd);
+ if (fd_empty(f_to))
+ return -EBADF;
- ret = security_move_mount(&from_path, &to_path);
- if (ret < 0)
- goto out_to;
+ to_path = fd_file(f_to)->f_path;
+ path_get(&to_path);
+ } else {
+ lflags = 0;
+ if (flags & MOVE_MOUNT_T_SYMLINKS)
+ lflags |= LOOKUP_FOLLOW;
+ if (flags & MOVE_MOUNT_T_AUTOMOUNTS)
+ lflags |= LOOKUP_AUTOMOUNT;
+ ret = filename_lookup(to_dfd, to_name, lflags, &to_path, NULL);
+ if (ret)
+ return ret;
+ }
- if (flags & MOVE_MOUNT_SET_GROUP)
- ret = do_set_group(&from_path, &to_path);
- else
- ret = do_move_mount(&from_path, &to_path,
- (flags & MOVE_MOUNT_BENEATH));
+ uflags = 0;
+ if (flags & MOVE_MOUNT_F_EMPTY_PATH)
+ uflags = AT_EMPTY_PATH;
-out_to:
- path_put(&to_path);
-out_from:
- path_put(&from_path);
- return ret;
+ from_name = getname_maybe_null(from_pathname, uflags);
+ if (IS_ERR(from_name))
+ return PTR_ERR(from_name);
+
+ if (!from_name && from_dfd >= 0) {
+ CLASS(fd_raw, f_from)(from_dfd);
+ if (fd_empty(f_from))
+ return -EBADF;
+
+ return vfs_move_mount(&fd_file(f_from)->f_path, &to_path, mflags);
+ }
+
+ lflags = 0;
+ if (flags & MOVE_MOUNT_F_SYMLINKS)
+ lflags |= LOOKUP_FOLLOW;
+ if (flags & MOVE_MOUNT_F_AUTOMOUNTS)
+ lflags |= LOOKUP_AUTOMOUNT;
+ ret = filename_lookup(from_dfd, from_name, lflags, &from_path, NULL);
+ if (ret)
+ return ret;
+
+ return vfs_move_mount(&from_path, &to_path, mflags);
}
/*
* Return true if path is reachable from root
*
- * namespace_sem or mount_lock is held
+ * locks: mount_locked_reader || namespace_shared && is_mounted(mnt)
*/
bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
const struct path *root)
@@ -4298,11 +4493,8 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
bool path_is_under(const struct path *path1, const struct path *path2)
{
- bool res;
- read_seqlock_excl(&mount_lock);
- res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
- read_sequnlock_excl(&mount_lock);
- return res;
+ guard(mount_locked_reader)();
+ return is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
}
EXPORT_SYMBOL(path_is_under);
@@ -4334,9 +4526,10 @@ EXPORT_SYMBOL(path_is_under);
SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
const char __user *, put_old)
{
- struct path new, old, root;
+ struct path new __free(path_put) = {};
+ struct path old __free(path_put) = {};
+ struct path root __free(path_put) = {};
struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
- struct mountpoint *old_mp, *root_mp;
int error;
if (!may_mount())
@@ -4345,89 +4538,73 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
error = user_path_at(AT_FDCWD, new_root,
LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
if (error)
- goto out0;
+ return error;
error = user_path_at(AT_FDCWD, put_old,
LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
if (error)
- goto out1;
+ return error;
error = security_sb_pivotroot(&old, &new);
if (error)
- goto out2;
+ return error;
get_fs_root(current->fs, &root);
- old_mp = lock_mount(&old);
- error = PTR_ERR(old_mp);
- if (IS_ERR(old_mp))
- goto out3;
- error = -EINVAL;
+ LOCK_MOUNT(old_mp, &old);
+ old_mnt = old_mp.parent;
+ if (IS_ERR(old_mnt))
+ return PTR_ERR(old_mnt);
+
new_mnt = real_mount(new.mnt);
root_mnt = real_mount(root.mnt);
- old_mnt = real_mount(old.mnt);
ex_parent = new_mnt->mnt_parent;
root_parent = root_mnt->mnt_parent;
if (IS_MNT_SHARED(old_mnt) ||
IS_MNT_SHARED(ex_parent) ||
IS_MNT_SHARED(root_parent))
- goto out4;
+ return -EINVAL;
if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
- goto out4;
+ return -EINVAL;
if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
- goto out4;
- error = -ENOENT;
+ return -EINVAL;
if (d_unlinked(new.dentry))
- goto out4;
- error = -EBUSY;
+ return -ENOENT;
if (new_mnt == root_mnt || old_mnt == root_mnt)
- goto out4; /* loop, on the same file system */
- error = -EINVAL;
+ return -EBUSY; /* loop, on the same file system */
if (!path_mounted(&root))
- goto out4; /* not a mountpoint */
+ return -EINVAL; /* not a mountpoint */
if (!mnt_has_parent(root_mnt))
- goto out4; /* not attached */
+ return -EINVAL; /* absolute root */
if (!path_mounted(&new))
- goto out4; /* not a mountpoint */
+ return -EINVAL; /* not a mountpoint */
if (!mnt_has_parent(new_mnt))
- goto out4; /* not attached */
+ return -EINVAL; /* absolute root */
/* make sure we can reach put_old from new_root */
- if (!is_path_reachable(old_mnt, old.dentry, &new))
- goto out4;
+ if (!is_path_reachable(old_mnt, old_mp.mp->m_dentry, &new))
+ return -EINVAL;
/* make certain new is below the root */
if (!is_path_reachable(new_mnt, new.dentry, &root))
- goto out4;
+ return -EINVAL;
lock_mount_hash();
umount_mnt(new_mnt);
- root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
new_mnt->mnt.mnt_flags |= MNT_LOCKED;
root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
}
- /* mount old root on put_old */
- attach_mnt(root_mnt, old_mnt, old_mp, false);
/* mount new_root on / */
- attach_mnt(new_mnt, root_parent, root_mp, false);
- mnt_add_count(root_parent, -1);
+ attach_mnt(new_mnt, root_parent, root_mnt->mnt_mp);
+ umount_mnt(root_mnt);
+ /* mount old root on put_old */
+ attach_mnt(root_mnt, old_mnt, old_mp.mp);
touch_mnt_namespace(current->nsproxy->mnt_ns);
/* A moved mount should not expire automatically */
list_del_init(&new_mnt->mnt_expire);
- put_mountpoint(root_mp);
unlock_mount_hash();
+ mnt_notify_add(root_mnt);
+ mnt_notify_add(new_mnt);
chroot_fs_refs(&root, &new);
- error = 0;
-out4:
- unlock_mount(old_mp);
- if (!error)
- mntput_no_expire(ex_parent);
-out3:
- path_put(&root);
-out2:
- path_put(&old);
-out1:
- path_put(&new);
-out0:
- return error;
+ return 0;
}
static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
@@ -4458,11 +4635,10 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
return -EINVAL;
/*
- * Once a mount has been idmapped we don't allow it to change its
- * mapping. It makes things simpler and callers can just create
- * another bind-mount they can idmap if they want to.
+ * We only allow an mount to change it's idmapping if it has
+ * never been accessible to userspace.
*/
- if (is_idmapped_mnt(m))
+ if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE) && is_idmapped_mnt(m))
return -EPERM;
/* The underlying filesystem doesn't support idmapped mounts yet. */
@@ -4518,52 +4694,36 @@ static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
if (!mnt_allow_writers(kattr, m)) {
err = mnt_hold_writers(m);
- if (err)
+ if (err) {
+ m = next_mnt(m, mnt);
break;
+ }
}
- if (!kattr->recurse)
+ if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
return 0;
}
if (err) {
- struct mount *p;
-
- /*
- * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
- * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
- * mounts and needs to take care to include the first mount.
- */
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- /* If we had to hold writers unblock them. */
- if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
- mnt_unhold_writers(p);
-
- /*
- * We're done once the first mount we changed got
- * MNT_WRITE_HOLD unset.
- */
- if (p == m)
- break;
- }
+ /* undo all mnt_hold_writers() we'd done */
+ for (struct mount *p = mnt; p != m; p = next_mnt(p, mnt))
+ mnt_unhold_writers(p);
}
return err;
}
static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
{
+ struct mnt_idmap *old_idmap;
+
if (!kattr->mnt_idmap)
return;
- /*
- * Pairs with smp_load_acquire() in mnt_idmap().
- *
- * Since we only allow a mount to change the idmapping once and
- * verified this in can_idmap_mount() we know that the mount has
- * @nop_mnt_idmap attached to it. So there's no need to drop any
- * references.
- */
+ old_idmap = mnt_idmap(&mnt->mnt);
+
+ /* Pairs with smp_load_acquire() in mnt_idmap(). */
smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
+ mnt_idmap_put(old_idmap);
}
static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
@@ -4578,18 +4738,17 @@ static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
WRITE_ONCE(m->mnt.mnt_flags, flags);
/* If we had to hold writers unblock them. */
- if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
- mnt_unhold_writers(m);
+ mnt_unhold_writers(m);
if (kattr->propagation)
change_mnt_propagation(m, kattr->propagation);
- if (!kattr->recurse)
+ if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
break;
}
touch_mnt_namespace(mnt->mnt_ns);
}
-static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
+static int do_mount_setattr(const struct path *path, struct mount_kattr *kattr)
{
struct mount *mnt = real_mount(path->mnt);
int err = 0;
@@ -4613,7 +4772,7 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
*/
namespace_lock();
if (kattr->propagation == MS_SHARED) {
- err = invent_group_ids(mnt, kattr->recurse);
+ err = invent_group_ids(mnt, kattr->kflags & MOUNT_KATTR_RECURSE);
if (err) {
namespace_unlock();
return err;
@@ -4624,22 +4783,7 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
err = -EINVAL;
lock_mount_hash();
- /* Ensure that this isn't anything purely vfs internal. */
- if (!is_mounted(&mnt->mnt))
- goto out;
-
- /*
- * If this is an attached mount make sure it's located in the callers
- * mount namespace. If it's not don't let the caller interact with it.
- *
- * If this mount doesn't have a parent it's most often simply a
- * detached mount with an anonymous mount namespace. IOW, something
- * that's simply not attached yet. But there are apparently also users
- * that do change mount properties on the rootfs itself. That obviously
- * neither has a parent nor is it a detached mount so we cannot
- * unconditionally check for detached mounts.
- */
- if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
+ if (!anon_ns_root(mnt) && !check_mnt(mnt))
goto out;
/*
@@ -4664,7 +4808,7 @@ out:
}
static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
- struct mount_kattr *kattr, unsigned int flags)
+ struct mount_kattr *kattr)
{
struct ns_common *ns;
struct user_namespace *mnt_userns;
@@ -4672,13 +4816,23 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
return 0;
- /*
- * We currently do not support clearing an idmapped mount. If this ever
- * is a use-case we can revisit this but for now let's keep it simple
- * and not allow it.
- */
- if (attr->attr_clr & MOUNT_ATTR_IDMAP)
- return -EINVAL;
+ if (attr->attr_clr & MOUNT_ATTR_IDMAP) {
+ /*
+ * We can only remove an idmapping if it's never been
+ * exposed to userspace.
+ */
+ if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE))
+ return -EINVAL;
+
+ /*
+ * Removal of idmappings is equivalent to setting
+ * nop_mnt_idmap.
+ */
+ if (!(attr->attr_set & MOUNT_ATTR_IDMAP)) {
+ kattr->mnt_idmap = &nop_mnt_idmap;
+ return 0;
+ }
+ }
if (attr->userns_fd > INT_MAX)
return -EINVAL;
@@ -4691,7 +4845,7 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
return -EINVAL;
ns = get_proc_ns(file_inode(fd_file(f)));
- if (ns->ops->type != CLONE_NEWUSER)
+ if (ns->ns_type != CLONE_NEWUSER)
return -EINVAL;
/*
@@ -4715,22 +4869,8 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
}
static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
- struct mount_kattr *kattr, unsigned int flags)
+ struct mount_kattr *kattr)
{
- unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
-
- if (flags & AT_NO_AUTOMOUNT)
- lookup_flags &= ~LOOKUP_AUTOMOUNT;
- if (flags & AT_SYMLINK_NOFOLLOW)
- lookup_flags &= ~LOOKUP_FOLLOW;
- if (flags & AT_EMPTY_PATH)
- lookup_flags |= LOOKUP_EMPTY;
-
- *kattr = (struct mount_kattr) {
- .lookup_flags = lookup_flags,
- .recurse = !!(flags & AT_RECURSIVE),
- };
-
if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
return -EINVAL;
if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
@@ -4778,35 +4918,28 @@ static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
return -EINVAL;
}
- return build_mount_idmapped(attr, usize, kattr, flags);
+ return build_mount_idmapped(attr, usize, kattr);
}
static void finish_mount_kattr(struct mount_kattr *kattr)
{
- put_user_ns(kattr->mnt_userns);
- kattr->mnt_userns = NULL;
+ if (kattr->mnt_userns) {
+ put_user_ns(kattr->mnt_userns);
+ kattr->mnt_userns = NULL;
+ }
if (kattr->mnt_idmap)
mnt_idmap_put(kattr->mnt_idmap);
}
-SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
- unsigned int, flags, struct mount_attr __user *, uattr,
- size_t, usize)
+static int wants_mount_setattr(struct mount_attr __user *uattr, size_t usize,
+ struct mount_kattr *kattr)
{
- int err;
- struct path target;
+ int ret;
struct mount_attr attr;
- struct mount_kattr kattr;
BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
- if (flags & ~(AT_EMPTY_PATH |
- AT_RECURSIVE |
- AT_SYMLINK_NOFOLLOW |
- AT_NO_AUTOMOUNT))
- return -EINVAL;
-
if (unlikely(usize > PAGE_SIZE))
return -E2BIG;
if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
@@ -4815,18 +4948,54 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
if (!may_mount())
return -EPERM;
- err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
- if (err)
- return err;
+ ret = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
+ if (ret)
+ return ret;
/* Don't bother walking through the mounts if this is a nop. */
if (attr.attr_set == 0 &&
attr.attr_clr == 0 &&
attr.propagation == 0)
- return 0;
+ return 0; /* Tell caller to not bother. */
- err = build_mount_kattr(&attr, usize, &kattr, flags);
- if (err)
+ ret = build_mount_kattr(&attr, usize, kattr);
+ if (ret < 0)
+ return ret;
+
+ return 1;
+}
+
+SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
+ unsigned int, flags, struct mount_attr __user *, uattr,
+ size_t, usize)
+{
+ int err;
+ struct path target;
+ struct mount_kattr kattr;
+ unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
+
+ if (flags & ~(AT_EMPTY_PATH |
+ AT_RECURSIVE |
+ AT_SYMLINK_NOFOLLOW |
+ AT_NO_AUTOMOUNT))
+ return -EINVAL;
+
+ if (flags & AT_NO_AUTOMOUNT)
+ lookup_flags &= ~LOOKUP_AUTOMOUNT;
+ if (flags & AT_SYMLINK_NOFOLLOW)
+ lookup_flags &= ~LOOKUP_FOLLOW;
+ if (flags & AT_EMPTY_PATH)
+ lookup_flags |= LOOKUP_EMPTY;
+
+ kattr = (struct mount_kattr) {
+ .lookup_flags = lookup_flags,
+ };
+
+ if (flags & AT_RECURSIVE)
+ kattr.kflags |= MOUNT_KATTR_RECURSE;
+
+ err = wants_mount_setattr(uattr, usize, &kattr);
+ if (err <= 0)
return err;
err = user_path_at(dfd, path, kattr.lookup_flags, &target);
@@ -4838,6 +5007,39 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
return err;
}
+SYSCALL_DEFINE5(open_tree_attr, int, dfd, const char __user *, filename,
+ unsigned, flags, struct mount_attr __user *, uattr,
+ size_t, usize)
+{
+ if (!uattr && usize)
+ return -EINVAL;
+
+ FD_PREPARE(fdf, flags, vfs_open_tree(dfd, filename, flags));
+ if (fdf.err)
+ return fdf.err;
+
+ if (uattr) {
+ struct mount_kattr kattr = {};
+ struct file *file = fd_prepare_file(fdf);
+ int ret;
+
+ if (flags & OPEN_TREE_CLONE)
+ kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE;
+ if (flags & AT_RECURSIVE)
+ kattr.kflags |= MOUNT_KATTR_RECURSE;
+
+ ret = wants_mount_setattr(uattr, usize, &kattr);
+ if (ret > 0) {
+ ret = do_mount_setattr(&file->f_path, &kattr);
+ finish_mount_kattr(&kattr);
+ }
+ if (ret)
+ return ret;
+ }
+
+ return fd_publish(fdf);
+}
+
int show_path(struct seq_file *m, struct dentry *root)
{
if (root->d_sb->s_op->show_path)
@@ -4861,10 +5063,13 @@ struct kstatmount {
struct statmount __user *buf;
size_t bufsize;
struct vfsmount *mnt;
+ struct mnt_idmap *idmap;
u64 mask;
struct path root;
- struct statmount sm;
struct seq_file seq;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct statmount sm;
};
static u64 mnt_to_attr_flags(struct vfsmount *mnt)
@@ -4914,6 +5119,12 @@ static u64 mnt_to_propagation_flags(struct mount *m)
return propagation;
}
+u64 vfsmount_to_propagation_flags(struct vfsmount *mnt)
+{
+ return mnt_to_propagation_flags(real_mount(mnt));
+}
+EXPORT_SYMBOL_GPL(vfsmount_to_propagation_flags);
+
static void statmount_sb_basic(struct kstatmount *s)
{
struct super_block *sb = s->mnt->mnt_sb;
@@ -4936,7 +5147,7 @@ static void statmount_mnt_basic(struct kstatmount *s)
s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id;
s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt);
s->sm.mnt_propagation = mnt_to_propagation_flags(m);
- s->sm.mnt_peer_group = IS_MNT_SHARED(m) ? m->mnt_group_id : 0;
+ s->sm.mnt_peer_group = m->mnt_group_id;
s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0;
}
@@ -5017,7 +5228,7 @@ static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq)
seq->buf[seq->count] = '\0';
seq->count = start;
seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
- } else if (r->mnt_devname) {
+ } else {
seq_puts(seq, r->mnt_devname);
}
return 0;
@@ -5026,33 +5237,36 @@ static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq)
static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns)
{
s->sm.mask |= STATMOUNT_MNT_NS_ID;
- s->sm.mnt_ns_id = ns->seq;
+ s->sm.mnt_ns_id = ns->ns.ns_id;
}
static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
{
struct vfsmount *mnt = s->mnt;
struct super_block *sb = mnt->mnt_sb;
+ size_t start = seq->count;
int err;
- if (sb->s_op->show_options) {
- size_t start = seq->count;
+ err = security_sb_show_options(seq, sb);
+ if (err)
+ return err;
+ if (sb->s_op->show_options) {
err = sb->s_op->show_options(seq, mnt->mnt_root);
if (err)
return err;
+ }
- if (unlikely(seq_has_overflowed(seq)))
- return -EAGAIN;
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
- if (seq->count == start)
- return 0;
+ if (seq->count == start)
+ return 0;
- /* skip leading comma */
- memmove(seq->buf + start, seq->buf + start + 1,
- seq->count - start - 1);
- seq->count--;
- }
+ /* skip leading comma */
+ memmove(seq->buf + start, seq->buf + start + 1,
+ seq->count - start - 1);
+ seq->count--;
return 0;
}
@@ -5127,47 +5341,101 @@ static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq)
return 0;
}
+static inline int statmount_mnt_uidmap(struct kstatmount *s, struct seq_file *seq)
+{
+ int ret;
+
+ ret = statmount_mnt_idmap(s->idmap, seq, true);
+ if (ret < 0)
+ return ret;
+
+ s->sm.mnt_uidmap_num = ret;
+ /*
+ * Always raise STATMOUNT_MNT_UIDMAP even if there are no valid
+ * mappings. This allows userspace to distinguish between a
+ * non-idmapped mount and an idmapped mount where none of the
+ * individual mappings are valid in the caller's idmapping.
+ */
+ if (is_valid_mnt_idmap(s->idmap))
+ s->sm.mask |= STATMOUNT_MNT_UIDMAP;
+ return 0;
+}
+
+static inline int statmount_mnt_gidmap(struct kstatmount *s, struct seq_file *seq)
+{
+ int ret;
+
+ ret = statmount_mnt_idmap(s->idmap, seq, false);
+ if (ret < 0)
+ return ret;
+
+ s->sm.mnt_gidmap_num = ret;
+ /*
+ * Always raise STATMOUNT_MNT_GIDMAP even if there are no valid
+ * mappings. This allows userspace to distinguish between a
+ * non-idmapped mount and an idmapped mount where none of the
+ * individual mappings are valid in the caller's idmapping.
+ */
+ if (is_valid_mnt_idmap(s->idmap))
+ s->sm.mask |= STATMOUNT_MNT_GIDMAP;
+ return 0;
+}
+
static int statmount_string(struct kstatmount *s, u64 flag)
{
int ret = 0;
size_t kbufsize;
struct seq_file *seq = &s->seq;
struct statmount *sm = &s->sm;
- u32 start = seq->count;
+ u32 start, *offp;
+
+ /* Reserve an empty string at the beginning for any unset offsets */
+ if (!seq->count)
+ seq_putc(seq, 0);
+
+ start = seq->count;
switch (flag) {
case STATMOUNT_FS_TYPE:
- sm->fs_type = start;
+ offp = &sm->fs_type;
ret = statmount_fs_type(s, seq);
break;
case STATMOUNT_MNT_ROOT:
- sm->mnt_root = start;
+ offp = &sm->mnt_root;
ret = statmount_mnt_root(s, seq);
break;
case STATMOUNT_MNT_POINT:
- sm->mnt_point = start;
+ offp = &sm->mnt_point;
ret = statmount_mnt_point(s, seq);
break;
case STATMOUNT_MNT_OPTS:
- sm->mnt_opts = start;
+ offp = &sm->mnt_opts;
ret = statmount_mnt_opts(s, seq);
break;
case STATMOUNT_OPT_ARRAY:
- sm->opt_array = start;
+ offp = &sm->opt_array;
ret = statmount_opt_array(s, seq);
break;
case STATMOUNT_OPT_SEC_ARRAY:
- sm->opt_sec_array = start;
+ offp = &sm->opt_sec_array;
ret = statmount_opt_sec_array(s, seq);
break;
case STATMOUNT_FS_SUBTYPE:
- sm->fs_subtype = start;
+ offp = &sm->fs_subtype;
statmount_fs_subtype(s, seq);
break;
case STATMOUNT_SB_SOURCE:
- sm->sb_source = start;
+ offp = &sm->sb_source;
ret = statmount_sb_source(s, seq);
break;
+ case STATMOUNT_MNT_UIDMAP:
+ offp = &sm->mnt_uidmap;
+ ret = statmount_mnt_uidmap(s, seq);
+ break;
+ case STATMOUNT_MNT_GIDMAP:
+ offp = &sm->mnt_gidmap;
+ ret = statmount_mnt_gidmap(s, seq);
+ break;
default:
WARN_ON_ONCE(true);
return -EINVAL;
@@ -5193,6 +5461,7 @@ static int statmount_string(struct kstatmount *s, u64 flag)
seq->buf[seq->count++] = '\0';
sm->mask |= flag;
+ *offp = start;
return 0;
}
@@ -5242,7 +5511,7 @@ static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
* We have to find the first mount in our ns and use that, however it
* may not exist, so handle that properly.
*/
- if (RB_EMPTY_ROOT(&ns->mounts))
+ if (mnt_ns_empty(ns))
return -ENOENT;
first = child = ns->root;
@@ -5259,22 +5528,39 @@ static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
return 0;
}
+/* This must be updated whenever a new flag is added */
+#define STATMOUNT_SUPPORTED (STATMOUNT_SB_BASIC | \
+ STATMOUNT_MNT_BASIC | \
+ STATMOUNT_PROPAGATE_FROM | \
+ STATMOUNT_MNT_ROOT | \
+ STATMOUNT_MNT_POINT | \
+ STATMOUNT_FS_TYPE | \
+ STATMOUNT_MNT_NS_ID | \
+ STATMOUNT_MNT_OPTS | \
+ STATMOUNT_FS_SUBTYPE | \
+ STATMOUNT_SB_SOURCE | \
+ STATMOUNT_OPT_ARRAY | \
+ STATMOUNT_OPT_SEC_ARRAY | \
+ STATMOUNT_SUPPORTED_MASK | \
+ STATMOUNT_MNT_UIDMAP | \
+ STATMOUNT_MNT_GIDMAP)
+
+/* locks: namespace_shared */
static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
struct mnt_namespace *ns)
{
- struct path root __free(path_put) = {};
struct mount *m;
int err;
/* Has the namespace already been emptied? */
- if (mnt_ns_id && RB_EMPTY_ROOT(&ns->mounts))
+ if (mnt_ns_id && mnt_ns_empty(ns))
return -ENOENT;
s->mnt = lookup_mnt_in_ns(mnt_id, ns);
if (!s->mnt)
return -ENOENT;
- err = grab_requested_root(ns, &root);
+ err = grab_requested_root(ns, &s->root);
if (err)
return err;
@@ -5283,7 +5569,7 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
* mounts to show users.
*/
m = real_mount(s->mnt);
- if (!is_path_reachable(m, m->mnt.mnt_root, &root) &&
+ if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
!ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
@@ -5291,13 +5577,28 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
if (err)
return err;
- s->root = root;
- if (s->mask & STATMOUNT_SB_BASIC)
- statmount_sb_basic(s);
-
+ /*
+ * Note that mount properties in mnt->mnt_flags, mnt->mnt_idmap
+ * can change concurrently as we only hold the read-side of the
+ * namespace semaphore and mount properties may change with only
+ * the mount lock held.
+ *
+ * We could sample the mount lock sequence counter to detect
+ * those changes and retry. But it's not worth it. Worst that
+ * happens is that the mnt->mnt_idmap pointer is already changed
+ * while mnt->mnt_flags isn't or vica versa. So what.
+ *
+ * Both mnt->mnt_flags and mnt->mnt_idmap are set and retrieved
+ * via READ_ONCE()/WRITE_ONCE() and guard against theoretical
+ * torn read/write. That's all we care about right now.
+ */
+ s->idmap = mnt_idmap(s->mnt);
if (s->mask & STATMOUNT_MNT_BASIC)
statmount_mnt_basic(s);
+ if (s->mask & STATMOUNT_SB_BASIC)
+ statmount_sb_basic(s);
+
if (s->mask & STATMOUNT_PROPAGATE_FROM)
statmount_propagate_from(s);
@@ -5325,12 +5626,26 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
if (!err && s->mask & STATMOUNT_SB_SOURCE)
err = statmount_string(s, STATMOUNT_SB_SOURCE);
+ if (!err && s->mask & STATMOUNT_MNT_UIDMAP)
+ err = statmount_string(s, STATMOUNT_MNT_UIDMAP);
+
+ if (!err && s->mask & STATMOUNT_MNT_GIDMAP)
+ err = statmount_string(s, STATMOUNT_MNT_GIDMAP);
+
if (!err && s->mask & STATMOUNT_MNT_NS_ID)
statmount_mnt_ns_id(s, ns);
+ if (!err && s->mask & STATMOUNT_SUPPORTED_MASK) {
+ s->sm.mask |= STATMOUNT_SUPPORTED_MASK;
+ s->sm.supported_mask = STATMOUNT_SUPPORTED;
+ }
+
if (err)
return err;
+ /* Are there bits in the return mask not present in STATMOUNT_SUPPORTED? */
+ WARN_ON_ONCE(~STATMOUNT_SUPPORTED & s->sm.mask);
+
return 0;
}
@@ -5348,7 +5663,8 @@ static inline bool retry_statmount(const long ret, size_t *seq_size)
#define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \
STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \
- STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY)
+ STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY | \
+ STATMOUNT_MNT_UIDMAP | STATMOUNT_MNT_GIDMAP)
static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
struct statmount __user *buf, size_t bufsize,
@@ -5395,7 +5711,7 @@ static int copy_mnt_id_req(const struct mnt_id_req __user *req,
ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
if (ret)
return ret;
- if (kreq->spare != 0)
+ if (kreq->mnt_ns_fd != 0 && kreq->mnt_ns_id)
return -EINVAL;
/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET)
@@ -5412,16 +5728,14 @@ static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq
{
struct mnt_namespace *mnt_ns;
- if (kreq->mnt_ns_id && kreq->spare)
- return ERR_PTR(-EINVAL);
-
- if (kreq->mnt_ns_id)
- return lookup_mnt_ns(kreq->mnt_ns_id);
-
- if (kreq->spare) {
+ if (kreq->mnt_ns_id) {
+ mnt_ns = lookup_mnt_ns(kreq->mnt_ns_id);
+ if (!mnt_ns)
+ return ERR_PTR(-ENOENT);
+ } else if (kreq->mnt_ns_fd) {
struct ns_common *ns;
- CLASS(fd, f)(kreq->spare);
+ CLASS(fd, f)(kreq->mnt_ns_fd);
if (fd_empty(f))
return ERR_PTR(-EBADF);
@@ -5429,15 +5743,16 @@ static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq
return ERR_PTR(-EINVAL);
ns = get_proc_ns(file_inode(fd_file(f)));
- if (ns->ops->type != CLONE_NEWNS)
+ if (ns->ns_type != CLONE_NEWNS)
return ERR_PTR(-EINVAL);
mnt_ns = to_mnt_ns(ns);
+ refcount_inc(&mnt_ns->passive);
} else {
mnt_ns = current->nsproxy->mnt_ns;
+ refcount_inc(&mnt_ns->passive);
}
- refcount_inc(&mnt_ns->passive);
return mnt_ns;
}
@@ -5460,8 +5775,8 @@ SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
return ret;
ns = grab_requested_mnt_ns(&kreq);
- if (!ns)
- return -ENOENT;
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
!ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
@@ -5476,34 +5791,47 @@ retry:
if (ret)
return ret;
- scoped_guard(rwsem_read, &namespace_sem)
+ scoped_guard(namespace_shared)
ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, ns);
if (!ret)
ret = copy_statmount_to_user(ks);
kvfree(ks->seq.buf);
+ path_put(&ks->root);
if (retry_statmount(ret, &seq_size))
goto retry;
return ret;
}
-static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
- u64 last_mnt_id, u64 *mnt_ids, size_t nr_mnt_ids,
- bool reverse)
+struct klistmount {
+ u64 last_mnt_id;
+ u64 mnt_parent_id;
+ u64 *kmnt_ids;
+ u32 nr_mnt_ids;
+ struct mnt_namespace *ns;
+ struct path root;
+};
+
+/* locks: namespace_shared */
+static ssize_t do_listmount(struct klistmount *kls, bool reverse)
{
- struct path root __free(path_put) = {};
+ struct mnt_namespace *ns = kls->ns;
+ u64 mnt_parent_id = kls->mnt_parent_id;
+ u64 last_mnt_id = kls->last_mnt_id;
+ u64 *mnt_ids = kls->kmnt_ids;
+ size_t nr_mnt_ids = kls->nr_mnt_ids;
struct path orig;
struct mount *r, *first;
ssize_t ret;
rwsem_assert_held(&namespace_sem);
- ret = grab_requested_root(ns, &root);
+ ret = grab_requested_root(ns, &kls->root);
if (ret)
return ret;
if (mnt_parent_id == LSMT_ROOT) {
- orig = root;
+ orig = kls->root;
} else {
orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
if (!orig.mnt)
@@ -5515,7 +5843,7 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
* Don't trigger audit denials. We just want to determine what
* mounts to show users.
*/
- if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) &&
+ if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &kls->root) &&
!ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
@@ -5525,9 +5853,9 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
if (!last_mnt_id) {
if (reverse)
- first = node_to_mount(rb_last(&ns->mounts));
+ first = node_to_mount(ns->mnt_last_node);
else
- first = node_to_mount(rb_first(&ns->mounts));
+ first = node_to_mount(ns->mnt_first_node);
} else {
if (reverse)
first = mnt_find_id_at_reverse(ns, last_mnt_id - 1);
@@ -5548,14 +5876,46 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
return ret;
}
+static void __free_klistmount_free(const struct klistmount *kls)
+{
+ path_put(&kls->root);
+ kvfree(kls->kmnt_ids);
+ mnt_ns_release(kls->ns);
+}
+
+static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq,
+ size_t nr_mnt_ids)
+{
+ u64 last_mnt_id = kreq->param;
+ struct mnt_namespace *ns;
+
+ /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
+ if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
+ return -EINVAL;
+
+ kls->last_mnt_id = last_mnt_id;
+
+ kls->nr_mnt_ids = nr_mnt_ids;
+ kls->kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kls->kmnt_ids),
+ GFP_KERNEL_ACCOUNT);
+ if (!kls->kmnt_ids)
+ return -ENOMEM;
+
+ ns = grab_requested_mnt_ns(kreq);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+ kls->ns = ns;
+
+ kls->mnt_parent_id = kreq->mnt_id;
+ return 0;
+}
+
SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
{
- u64 *kmnt_ids __free(kvfree) = NULL;
+ struct klistmount kls __free(klistmount_free) = {};
const size_t maxcount = 1000000;
- struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
struct mnt_id_req kreq;
- u64 last_mnt_id;
ssize_t ret;
if (flags & ~LISTMOUNT_REVERSE)
@@ -5576,65 +5936,61 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
if (ret)
return ret;
- last_mnt_id = kreq.param;
- /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
- if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
- return -EINVAL;
-
- kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kmnt_ids),
- GFP_KERNEL_ACCOUNT);
- if (!kmnt_ids)
- return -ENOMEM;
-
- ns = grab_requested_mnt_ns(&kreq);
- if (!ns)
- return -ENOENT;
+ ret = prepare_klistmount(&kls, &kreq, nr_mnt_ids);
+ if (ret)
+ return ret;
- if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
- !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ if (kreq.mnt_ns_id && (kls.ns != current->nsproxy->mnt_ns) &&
+ !ns_capable_noaudit(kls.ns->user_ns, CAP_SYS_ADMIN))
return -ENOENT;
- scoped_guard(rwsem_read, &namespace_sem)
- ret = do_listmount(ns, kreq.mnt_id, last_mnt_id, kmnt_ids,
- nr_mnt_ids, (flags & LISTMOUNT_REVERSE));
+ /*
+ * We only need to guard against mount topology changes as
+ * listmount() doesn't care about any mount properties.
+ */
+ scoped_guard(namespace_shared)
+ ret = do_listmount(&kls, (flags & LISTMOUNT_REVERSE));
if (ret <= 0)
return ret;
- if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids)))
+ if (copy_to_user(mnt_ids, kls.kmnt_ids, ret * sizeof(*mnt_ids)))
return -EFAULT;
return ret;
}
+struct mnt_namespace init_mnt_ns = {
+ .ns = NS_COMMON_INIT(init_mnt_ns),
+ .user_ns = &init_user_ns,
+ .passive = REFCOUNT_INIT(1),
+ .mounts = RB_ROOT,
+ .poll = __WAIT_QUEUE_HEAD_INITIALIZER(init_mnt_ns.poll),
+};
+
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
struct mount *m;
- struct mnt_namespace *ns;
struct path root;
- mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
+ mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options);
if (IS_ERR(mnt))
panic("Can't create rootfs");
- ns = alloc_mnt_ns(&init_user_ns, false);
- if (IS_ERR(ns))
- panic("Can't allocate initial namespace");
m = real_mount(mnt);
- ns->root = m;
- ns->nr_mounts = 1;
- mnt_add_to_ns(ns, m);
- init_task.nsproxy->mnt_ns = ns;
- get_mnt_ns(ns);
+ init_mnt_ns.root = m;
+ init_mnt_ns.nr_mounts = 1;
+ mnt_add_to_ns(&init_mnt_ns, m);
+ init_task.nsproxy->mnt_ns = &init_mnt_ns;
+ get_mnt_ns(&init_mnt_ns);
root.mnt = mnt;
root.dentry = mnt->mnt_root;
- mnt->mnt_flags |= MNT_LOCKED;
set_fs_pwd(current->fs, &root);
set_fs_root(current->fs, &root);
- mnt_ns_tree_add(ns);
+ ns_tree_add(&init_mnt_ns);
}
void __init mnt_init(void)
@@ -5674,10 +6030,12 @@ void __init mnt_init(void)
void put_mnt_ns(struct mnt_namespace *ns)
{
- if (!refcount_dec_and_test(&ns->ns.count))
+ if (!ns_ref_put(ns))
return;
- drop_collected_mounts(&ns->root->mnt);
- free_mnt_ns(ns);
+ guard(namespace_excl)();
+ emptied_ns = ns;
+ guard(mount_writer)();
+ umount_tree(ns->root, 0);
}
struct vfsmount *kern_mount(struct file_system_type *type)
@@ -5726,25 +6084,18 @@ bool our_mnt(struct vfsmount *mnt)
bool current_chrooted(void)
{
/* Does the current process have a non-standard root */
- struct path ns_root;
- struct path fs_root;
- bool chrooted;
-
- /* Find the namespace root */
- ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
- ns_root.dentry = ns_root.mnt->mnt_root;
- path_get(&ns_root);
- while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
- ;
+ struct path fs_root __free(path_put) = {};
+ struct mount *root;
get_fs_root(current->fs, &fs_root);
- chrooted = !path_equal(&fs_root, &ns_root);
+ /* Find the namespace root */
+
+ guard(mount_locked_reader)();
- path_put(&fs_root);
- path_put(&ns_root);
+ root = topmost_overmount(current->nsproxy->mnt_ns->root);
- return chrooted;
+ return fs_root.mnt != &root->mnt || !path_mounted(&fs_root);
}
static bool mnt_already_visible(struct mnt_namespace *ns,
@@ -5753,9 +6104,8 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
{
int new_flags = *new_mnt_flags;
struct mount *mnt, *n;
- bool visible = false;
- down_read(&namespace_sem);
+ guard(namespace_shared)();
rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
struct mount *child;
int mnt_flags;
@@ -5802,13 +6152,10 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
/* Preserve the locked attributes */
*new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
MNT_LOCK_ATIME);
- visible = true;
- goto found;
+ return true;
next: ;
}
-found:
- up_read(&namespace_sem);
- return visible;
+ return false;
}
static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
@@ -5919,7 +6266,6 @@ static struct user_namespace *mntns_owner(struct ns_common *ns)
const struct proc_ns_operations mntns_operations = {
.name = "mnt",
- .type = CLONE_NEWNS,
.get = mntns_get,
.put = mntns_put,
.install = mntns_install,
@@ -5927,7 +6273,7 @@ const struct proc_ns_operations mntns_operations = {
};
#ifdef CONFIG_SYSCTL
-static struct ctl_table fs_namespace_sysctls[] = {
+static const struct ctl_table fs_namespace_sysctls[] = {
{
.procname = "mount-max",
.data = &sysctl_mount_max,
diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile
index d08b0bfb6756..b43188d64bd8 100644
--- a/fs/netfs/Makefile
+++ b/fs/netfs/Makefile
@@ -13,8 +13,11 @@ netfs-y := \
read_collect.o \
read_pgpriv2.o \
read_retry.o \
+ read_single.o \
+ rolling_buffer.o \
write_collect.o \
- write_issue.o
+ write_issue.o \
+ write_retry.o
netfs-$(CONFIG_NETFS_STATS) += stats.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 7ac34550c403..37ab6f28b5ad 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -64,37 +64,6 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in
}
/*
- * Decant the list of folios to read into a rolling buffer.
- */
-static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
- struct folio_queue *folioq,
- struct folio_batch *put_batch)
-{
- unsigned int order, nr;
- size_t size = 0;
-
- nr = __readahead_batch(rreq->ractl, (struct page **)folioq->vec.folios,
- ARRAY_SIZE(folioq->vec.folios));
- folioq->vec.nr = nr;
- for (int i = 0; i < nr; i++) {
- struct folio *folio = folioq_folio(folioq, i);
-
- trace_netfs_folio(folio, netfs_folio_trace_read);
- order = folio_order(folio);
- folioq->orders[i] = order;
- size += PAGE_SIZE << order;
-
- if (!folio_batch_add(put_batch, folio))
- folio_batch_release(put_batch);
- }
-
- for (int i = nr; i < folioq_nr_slots(folioq); i++)
- folioq_clear(folioq, i);
-
- return size;
-}
-
-/*
* netfs_prepare_read_iterator - Prepare the subreq iterator for I/O
* @subreq: The subrequest to be set up
*
@@ -109,7 +78,8 @@ static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
* [!] NOTE: This must be run in the same thread as ->issue_read() was called
* in as we access the readahead_control struct.
*/
-static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
+static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq,
+ struct readahead_control *ractl)
{
struct netfs_io_request *rreq = subreq->rreq;
size_t rsize = subreq->len;
@@ -117,7 +87,7 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
if (subreq->source == NETFS_DOWNLOAD_FROM_SERVER)
rsize = umin(rsize, rreq->io_streams[0].sreq_max_len);
- if (rreq->ractl) {
+ if (ractl) {
/* If we don't have sufficient folios in the rolling buffer,
* extract a folioq's worth from the readahead region at a time
* into the buffer. Note that this acquires a ref on each page
@@ -128,19 +98,12 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
folio_batch_init(&put_batch);
while (rreq->submitted < subreq->start + rsize) {
- struct folio_queue *tail = rreq->buffer_tail, *new;
- size_t added;
-
- new = kmalloc(sizeof(*new), GFP_NOFS);
- if (!new)
- return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(new);
- new->prev = tail;
- tail->next = new;
- rreq->buffer_tail = new;
- added = netfs_load_buffer_from_ra(rreq, new, &put_batch);
- rreq->iter.count += added;
+ ssize_t added;
+
+ added = rolling_buffer_load_from_ra(&rreq->buffer, ractl,
+ &put_batch);
+ if (added < 0)
+ return added;
rreq->submitted += added;
}
folio_batch_release(&put_batch);
@@ -148,7 +111,7 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
subreq->len = rsize;
if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
- size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize,
+ size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
rreq->io_streams[0].sreq_max_segs);
if (limit < rsize) {
@@ -157,20 +120,10 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
}
}
- subreq->io_iter = rreq->iter;
-
- if (iov_iter_is_folioq(&subreq->io_iter)) {
- if (subreq->io_iter.folioq_slot >= folioq_nr_slots(subreq->io_iter.folioq)) {
- subreq->io_iter.folioq = subreq->io_iter.folioq->next;
- subreq->io_iter.folioq_slot = 0;
- }
- subreq->curr_folioq = (struct folio_queue *)subreq->io_iter.folioq;
- subreq->curr_folioq_slot = subreq->io_iter.folioq_slot;
- subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
- }
+ subreq->io_iter = rreq->buffer.iter;
iov_iter_truncate(&subreq->io_iter, subreq->len);
- iov_iter_advance(&rreq->iter, subreq->len);
+ rolling_buffer_advance(&rreq->buffer, subreq->len);
return subreq->len;
}
@@ -179,25 +132,14 @@ static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rr
loff_t i_size)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
+ enum netfs_io_source source;
if (!cres->ops)
return NETFS_DOWNLOAD_FROM_SERVER;
- return cres->ops->prepare_read(subreq, i_size);
-}
-
-static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_io_subrequest *subreq = priv;
-
- if (transferred_or_error < 0) {
- netfs_read_subreq_terminated(subreq, transferred_or_error, was_async);
- return;
- }
+ source = cres->ops->prepare_read(subreq, i_size);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ return source;
- if (transferred_or_error > 0)
- subreq->transferred += transferred_or_error;
- netfs_read_subreq_terminated(subreq, 0, was_async);
}
/*
@@ -214,23 +156,73 @@ static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq,
netfs_cache_read_terminated, subreq);
}
+static void netfs_queue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq,
+ bool last_subreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ /* We add to the end of the list whilst the collector may be walking
+ * the list. The collector only goes nextwards and uses the lock to
+ * remove entries off of the front.
+ */
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+ stream->front = subreq;
+ if (!stream->active) {
+ stream->collected_to = stream->front->start;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ }
+ }
+
+ if (last_subreq) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ }
+
+ spin_unlock(&rreq->lock);
+}
+
+static void netfs_issue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ switch (subreq->source) {
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ rreq->netfs_ops->issue_read(subreq);
+ break;
+ case NETFS_READ_FROM_CACHE:
+ netfs_read_cache_to_pagecache(rreq, subreq);
+ break;
+ default:
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ subreq->error = 0;
+ iov_iter_zero(subreq->len, &subreq->io_iter);
+ subreq->transferred = subreq->len;
+ netfs_read_subreq_terminated(subreq);
+ break;
+ }
+}
+
/*
* Perform a read to the pagecache from a series of sources of different types,
* slicing up the region to be read according to available cache blocks and
* network rsize.
*/
-static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
+static void netfs_read_to_pagecache(struct netfs_io_request *rreq,
+ struct readahead_control *ractl)
{
struct netfs_inode *ictx = netfs_inode(rreq->inode);
unsigned long long start = rreq->start;
ssize_t size = rreq->len;
int ret = 0;
- atomic_inc(&rreq->nr_outstanding);
-
do {
struct netfs_io_subrequest *subreq;
- enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
+ enum netfs_io_source source = NETFS_SOURCE_UNKNOWN;
ssize_t slice;
subreq = netfs_alloc_subrequest(rreq);
@@ -242,20 +234,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
subreq->start = start;
subreq->len = size;
- atomic_inc(&rreq->nr_outstanding);
- spin_lock_bh(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &rreq->subrequests);
- subreq->prev_donated = rreq->prev_donated;
- rreq->prev_donated = 0;
- trace_netfs_sreq(subreq, netfs_sreq_trace_added);
- spin_unlock_bh(&rreq->lock);
-
source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
subreq->source = source;
if (source == NETFS_DOWNLOAD_FROM_SERVER) {
unsigned long long zp = umin(ictx->zero_point, rreq->i_size);
size_t len = subreq->len;
+ if (unlikely(rreq->origin == NETFS_READ_SINGLE))
+ zp = rreq->i_size;
if (subreq->start >= zp) {
subreq->source = source = NETFS_FILL_WITH_ZEROES;
goto fill_with_zeroes;
@@ -276,24 +262,17 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
if (ret < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false,
+ subreq->error = ret;
+ /* Not queued - release both refs. */
+ netfs_put_subrequest(subreq,
+ netfs_sreq_trace_put_cancel);
+ netfs_put_subrequest(subreq,
netfs_sreq_trace_put_cancel);
break;
}
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
}
-
- slice = netfs_prepare_read_iterator(subreq);
- if (slice < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
- ret = slice;
- break;
- }
-
- rreq->netfs_ops->issue_read(subreq);
- goto done;
+ goto issue;
}
fill_with_zeroes:
@@ -301,82 +280,47 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
subreq->source = NETFS_FILL_WITH_ZEROES;
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
netfs_stat(&netfs_n_rh_zero);
- slice = netfs_prepare_read_iterator(subreq);
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- netfs_read_subreq_terminated(subreq, 0, false);
- goto done;
+ goto issue;
}
if (source == NETFS_READ_FROM_CACHE) {
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- slice = netfs_prepare_read_iterator(subreq);
- netfs_read_cache_to_pagecache(rreq, subreq);
- goto done;
+ goto issue;
}
pr_err("Unexpected read source %u\n", source);
WARN_ON_ONCE(1);
break;
- done:
+ issue:
+ slice = netfs_prepare_read_iterator(subreq, ractl);
+ if (slice < 0) {
+ ret = slice;
+ subreq->error = ret;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cancel);
+ /* Not queued - release both refs. */
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
+ break;
+ }
size -= slice;
start += slice;
+
+ netfs_queue_read(rreq, subreq, size <= 0);
+ netfs_issue_read(rreq, subreq);
cond_resched();
} while (size > 0);
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_terminated(rreq, false);
+ if (unlikely(size > 0)) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ netfs_wake_collector(rreq);
+ }
/* Defer error return as we may need to wait for outstanding I/O. */
cmpxchg(&rreq->error, 0, ret);
}
-/*
- * Wait for the read operation to complete, successfully or otherwise.
- */
-static int netfs_wait_for_read(struct netfs_io_request *rreq)
-{
- int ret;
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
- wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
- ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
- ret = -EIO;
- }
-
- return ret;
-}
-
-/*
- * Set up the initial folioq of buffer folios in the rolling buffer and set the
- * iterator to refer to it.
- */
-static int netfs_prime_buffer(struct netfs_io_request *rreq)
-{
- struct folio_queue *folioq;
- struct folio_batch put_batch;
- size_t added;
-
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
- if (!folioq)
- return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
- rreq->buffer = folioq;
- rreq->buffer_tail = folioq;
- rreq->submitted = rreq->start;
- iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0);
-
- folio_batch_init(&put_batch);
- added = netfs_load_buffer_from_ra(rreq, folioq, &put_batch);
- folio_batch_release(&put_batch);
- rreq->iter.count += added;
- rreq->submitted += added;
- return 0;
-}
-
/**
* netfs_readahead - Helper to manage a read request
* @ractl: The description of the readahead request
@@ -405,6 +349,8 @@ void netfs_readahead(struct readahead_control *ractl)
if (IS_ERR(rreq))
return;
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
+
ret = netfs_begin_cache_read(rreq, ictx);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto cleanup_free;
@@ -415,41 +361,33 @@ void netfs_readahead(struct readahead_control *ractl)
netfs_rreq_expand(rreq, ractl);
- rreq->ractl = ractl;
- if (netfs_prime_buffer(rreq) < 0)
+ rreq->submitted = rreq->start;
+ if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0)
goto cleanup_free;
- netfs_read_to_pagecache(rreq);
+ netfs_read_to_pagecache(rreq, ractl);
- netfs_put_request(rreq, true, netfs_rreq_trace_put_return);
- return;
+ return netfs_put_request(rreq, netfs_rreq_trace_put_return);
cleanup_free:
- netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
- return;
+ return netfs_put_failed_request(rreq);
}
EXPORT_SYMBOL(netfs_readahead);
/*
* Create a rolling buffer with a single occupying folio.
*/
-static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio)
+static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio,
+ unsigned int rollbuf_flags)
{
- struct folio_queue *folioq;
+ ssize_t added;
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
- if (!folioq)
+ if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
- folioq_append(folioq, folio);
- BUG_ON(folioq_folio(folioq, 0) != folio);
- BUG_ON(folioq_folio_order(folioq, 0) != folio_order(folio));
- rreq->buffer = folioq;
- rreq->buffer_tail = folioq;
- rreq->submitted = rreq->start + rreq->len;
- iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, rreq->len);
- rreq->ractl = (struct readahead_control *)1UL;
+ added = rolling_buffer_append(&rreq->buffer, folio, rollbuf_flags);
+ if (added < 0)
+ return added;
+ rreq->submitted = rreq->start + added;
return 0;
}
@@ -516,25 +454,25 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
}
if (to < flen)
bvec_set_folio(&bvec[i++], folio, flen - to, to);
- iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len);
+ iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len);
rreq->submitted = rreq->start + flen;
- netfs_read_to_pagecache(rreq);
+ netfs_read_to_pagecache(rreq, NULL);
if (sink)
folio_put(sink);
ret = netfs_wait_for_read(rreq);
- if (ret == 0) {
+ if (ret >= 0) {
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
}
folio_unlock(folio);
- netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
return ret < 0 ? ret : 0;
discard:
- netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+ netfs_put_failed_request(rreq);
alloc_error:
folio_unlock(folio);
return ret;
@@ -584,17 +522,17 @@ int netfs_read_folio(struct file *file, struct folio *folio)
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
/* Set up the output buffer */
- ret = netfs_create_singular_buffer(rreq, folio);
+ ret = netfs_create_singular_buffer(rreq, folio, 0);
if (ret < 0)
goto discard;
- netfs_read_to_pagecache(rreq);
+ netfs_read_to_pagecache(rreq, NULL);
ret = netfs_wait_for_read(rreq);
- netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
return ret < 0 ? ret : 0;
discard:
- netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+ netfs_put_failed_request(rreq);
alloc_error:
folio_unlock(folio);
return ret;
@@ -741,15 +679,15 @@ retry:
trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
/* Set up the output buffer */
- ret = netfs_create_singular_buffer(rreq, folio);
+ ret = netfs_create_singular_buffer(rreq, folio, 0);
if (ret < 0)
goto error_put;
- netfs_read_to_pagecache(rreq);
+ netfs_read_to_pagecache(rreq, NULL);
ret = netfs_wait_for_read(rreq);
if (ret < 0)
goto error;
- netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
have_folio:
ret = folio_wait_private_2_killable(folio);
@@ -761,7 +699,7 @@ have_folio_no_wait:
return 0;
error_put:
- netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+ netfs_put_failed_request(rreq);
error:
if (folio) {
folio_unlock(folio);
@@ -806,18 +744,17 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write);
/* Set up the output buffer */
- ret = netfs_create_singular_buffer(rreq, folio);
+ ret = netfs_create_singular_buffer(rreq, folio, NETFS_ROLLBUF_PAGECACHE_MARK);
if (ret < 0)
goto error_put;
- folioq_mark2(rreq->buffer, 0);
- netfs_read_to_pagecache(rreq);
+ netfs_read_to_pagecache(rreq, NULL);
ret = netfs_wait_for_read(rreq);
- netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
- return ret;
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
+ return ret < 0 ? ret : 0;
error_put:
- netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+ netfs_put_failed_request(rreq);
error:
_leave(" = %d", ret);
return ret;
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index b4826360a411..f9d62abef2ac 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -53,30 +53,40 @@ static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
* data written into the pagecache until we can find out from the server what
* the values actually are.
*/
-static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
- loff_t i_size, loff_t pos, size_t copied)
+void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
+ loff_t pos, size_t copied)
{
+ loff_t i_size, end = pos + copied;
blkcnt_t add;
size_t gap;
+ if (end <= i_size_read(inode))
+ return;
+
if (ctx->ops->update_i_size) {
- ctx->ops->update_i_size(inode, pos);
+ ctx->ops->update_i_size(inode, end);
return;
}
- i_size_write(inode, pos);
+ spin_lock(&inode->i_lock);
+
+ i_size = i_size_read(inode);
+ if (end > i_size) {
+ i_size_write(inode, end);
#if IS_ENABLED(CONFIG_FSCACHE)
- fscache_update_cookie(ctx->cache, NULL, &pos);
+ fscache_update_cookie(ctx->cache, NULL, &end);
#endif
- gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
- if (copied > gap) {
- add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
+ gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
+ if (copied > gap) {
+ add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
- inode->i_blocks = min_t(blkcnt_t,
- DIV_ROUND_UP(pos, SECTOR_SIZE),
- inode->i_blocks + add);
+ inode->i_blocks = min_t(blkcnt_t,
+ DIV_ROUND_UP(end, SECTOR_SIZE),
+ inode->i_blocks + add);
+ }
}
+ spin_unlock(&inode->i_lock);
}
/**
@@ -111,12 +121,11 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
struct folio *folio = NULL, *writethrough = NULL;
unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
ssize_t written = 0, ret, ret2;
- loff_t i_size, pos = iocb->ki_pos;
+ loff_t pos = iocb->ki_pos;
size_t max_chunk = mapping_max_folio_size(mapping);
bool maybe_trouble = false;
- if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
- iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
+ if (unlikely(iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
) {
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
@@ -338,17 +347,15 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
folio_put(folio);
ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
if (ret < 0)
- goto error_folio_unlock;
+ goto out;
continue;
copied:
flush_dcache_folio(folio);
/* Update the inode size if we moved the EOF marker */
+ netfs_update_i_size(ctx, inode, pos, copied);
pos += copied;
- i_size = i_size_read(inode);
- if (pos > i_size)
- netfs_update_i_size(ctx, inode, i_size, pos, copied);
written += copied;
if (likely(!wreq)) {
@@ -386,7 +393,7 @@ out:
wbc_detach_inode(&wbc);
if (ret2 == -EIOCBQUEUED)
return ret2;
- if (ret == 0)
+ if (ret == 0 && ret2 < 0)
ret = ret2;
}
@@ -528,7 +535,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
folio_unlock(folio);
err = filemap_fdatawrite_range(mapping,
folio_pos(folio),
- folio_pos(folio) + folio_size(folio));
+ folio_next_pos(folio));
switch (err) {
case 0:
ret = VM_FAULT_RETRY;
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index b1a66a6e6bc2..a498ee8d6674 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -25,7 +25,7 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
subreq->len = rsize;
if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
- size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize,
+ size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
rreq->io_streams[0].sreq_max_segs);
if (limit < rsize) {
@@ -36,9 +36,9 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
- subreq->io_iter = rreq->iter;
+ subreq->io_iter = rreq->buffer.iter;
iov_iter_truncate(&subreq->io_iter, subreq->len);
- iov_iter_advance(&rreq->iter, subreq->len);
+ iov_iter_advance(&rreq->buffer.iter, subreq->len);
}
/*
@@ -47,12 +47,11 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
*/
static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
unsigned long long start = rreq->start;
ssize_t size = rreq->len;
int ret = 0;
- atomic_set(&rreq->nr_outstanding, 1);
-
do {
struct netfs_io_subrequest *subreq;
ssize_t slice;
@@ -67,40 +66,55 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
subreq->start = start;
subreq->len = size;
- atomic_inc(&rreq->nr_outstanding);
- spin_lock_bh(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &rreq->subrequests);
- subreq->prev_donated = rreq->prev_donated;
- rreq->prev_donated = 0;
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+ stream->front = subreq;
+ if (!stream->active) {
+ stream->collected_to = stream->front->start;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ }
+ }
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
if (ret < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
break;
}
}
netfs_prepare_dio_read_iterator(subreq);
slice = subreq->len;
- rreq->netfs_ops->issue_read(subreq);
-
size -= slice;
start += slice;
rreq->submitted += slice;
+ if (size <= 0) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ }
- if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
- test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
+ rreq->netfs_ops->issue_read(subreq);
+
+ if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
+ netfs_wait_for_paused_read(rreq);
+ if (test_bit(NETFS_RREQ_FAILED, &rreq->flags))
break;
cond_resched();
} while (size > 0);
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_terminated(rreq, false);
+ if (unlikely(size > 0)) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ netfs_wake_collector(rreq);
+ }
+
return ret;
}
@@ -108,15 +122,16 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
* Perform a read to an application buffer, bypassing the pagecache and the
* local disk cache.
*/
-static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
+static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
{
- int ret;
+ ssize_t ret;
_enter("R=%x %llx-%llx",
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
if (rreq->len == 0) {
pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+ netfs_put_request(rreq, netfs_rreq_trace_put_discard);
return -EIO;
}
@@ -127,29 +142,18 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
ret = netfs_dispatch_unbuffered_reads(rreq);
if (!rreq->submitted) {
- netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
+ netfs_put_request(rreq, netfs_rreq_trace_put_no_submit);
inode_dio_end(rreq->inode);
ret = 0;
goto out;
}
- if (sync) {
- trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
- wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
-
- ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len &&
- rreq->origin != NETFS_DIO_READ) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
- ret = -EIO;
- }
- } else {
+ if (sync)
+ ret = netfs_wait_for_read(rreq);
+ else
ret = -EIOCBQUEUED;
- }
-
out:
- _leave(" = %d", ret);
+ _leave(" = %zd", ret);
return ret;
}
@@ -182,7 +186,8 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
iocb->ki_pos, orig_count,
- NETFS_DIO_READ);
+ iocb->ki_flags & IOCB_DIRECT ?
+ NETFS_DIO_READ : NETFS_UNBUFFERED_READ);
if (IS_ERR(rreq))
return PTR_ERR(rreq);
@@ -199,15 +204,15 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
* the request.
*/
if (user_backed_iter(iter)) {
- ret = netfs_extract_user_iter(iter, rreq->len, &rreq->iter, 0);
+ ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0);
if (ret < 0)
- goto out;
- rreq->direct_bv = (struct bio_vec *)rreq->iter.bvec;
+ goto error_put;
+ rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec;
rreq->direct_bv_count = ret;
rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
- rreq->len = iov_iter_count(&rreq->iter);
+ rreq->len = iov_iter_count(&rreq->buffer.iter);
} else {
- rreq->iter = *iter;
+ rreq->buffer.iter = *iter;
rreq->len = orig_count;
rreq->direct_bv_unpin = false;
iov_iter_advance(iter, orig_count);
@@ -215,8 +220,10 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
// TODO: Set up bounce buffer if needed
- if (!sync)
+ if (!sync) {
rreq->iocb = iocb;
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
+ }
ret = netfs_unbuffered_read(rreq, sync);
if (ret < 0)
@@ -228,10 +235,14 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
}
out:
- netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
if (ret > 0)
orig_count -= ret;
return ret;
+
+error_put:
+ netfs_put_failed_request(rreq);
+ return ret;
}
EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked);
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index 88f2adfab75e..a9d1c3b2c084 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -9,20 +9,6 @@
#include <linux/uio.h>
#include "internal.h"
-static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
-{
- struct inode *inode = wreq->inode;
- unsigned long long end = wreq->start + wreq->transferred;
-
- if (!wreq->error &&
- i_size_read(inode) < end) {
- if (wreq->netfs_ops->update_i_size)
- wreq->netfs_ops->update_i_size(inode, end);
- else
- i_size_write(inode, end);
- }
-}
-
/*
* Perform an unbuffered write where we may have to do an RMW operation on an
* encrypted file. This can also be used for direct I/O writes.
@@ -67,23 +53,28 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
* allocate a sufficiently large bvec array and may shorten the
* request.
*/
- if (async || user_backed_iter(iter)) {
- n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
+ if (user_backed_iter(iter)) {
+ n = netfs_extract_user_iter(iter, len, &wreq->buffer.iter, 0);
if (n < 0) {
ret = n;
- goto out;
+ goto error_put;
}
- wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
+ wreq->direct_bv = (struct bio_vec *)wreq->buffer.iter.bvec;
wreq->direct_bv_count = n;
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
} else {
- wreq->iter = *iter;
+ /* If this is a kernel-generated async DIO request,
+ * assume that any resources the iterator points to
+ * (eg. a bio_vec array) will persist till the end of
+ * the op.
+ */
+ wreq->buffer.iter = *iter;
}
-
- wreq->io_iter = wreq->iter;
}
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
+ if (async)
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
/* Copy the data into the bounce buffer and encrypt it. */
// TODO
@@ -92,8 +83,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
if (async)
wreq->iocb = iocb;
- wreq->len = iov_iter_count(&wreq->io_iter);
- wreq->cleanup = netfs_cleanup_dio_write;
+ wreq->len = iov_iter_count(&wreq->buffer.iter);
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
if (ret < 0) {
_debug("begin = %zd", ret);
@@ -101,21 +91,19 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
}
if (!async) {
- trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
- wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
- smp_rmb(); /* Read error/transferred after RIP flag */
- ret = wreq->error;
- if (ret == 0) {
- ret = wreq->transferred;
+ ret = netfs_wait_for_write(wreq);
+ if (ret > 0)
iocb->ki_pos += ret;
- }
} else {
ret = -EIOCBQUEUED;
}
out:
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ return ret;
+
+error_put:
+ netfs_put_failed_request(wreq);
return ret;
}
EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);
diff --git a/fs/netfs/fscache_cache.c b/fs/netfs/fscache_cache.c
index 9397ed39b0b4..8f70f8da064b 100644
--- a/fs/netfs/fscache_cache.c
+++ b/fs/netfs/fscache_cache.c
@@ -372,7 +372,7 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
EXPORT_SYMBOL(fscache_withdraw_cache);
#ifdef CONFIG_PROC_FS
-static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
+static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] __nonstring = "-PAEW";
/*
* Generate a list of caches in /proc/fs/fscache/caches
diff --git a/fs/netfs/fscache_cookie.c b/fs/netfs/fscache_cookie.c
index d4d4b3a8b106..3d56fc73435f 100644
--- a/fs/netfs/fscache_cookie.c
+++ b/fs/netfs/fscache_cookie.c
@@ -29,7 +29,7 @@ static LIST_HEAD(fscache_cookie_lru);
static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
-static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
+static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] __nonstring = "-LCAIFUWRD";
static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c
index b1722a82c03d..e4308457633c 100644
--- a/fs/netfs/fscache_io.c
+++ b/fs/netfs/fscache_io.c
@@ -192,8 +192,7 @@ EXPORT_SYMBOL(__fscache_clear_page_bits);
/*
* Deal with the completion of writing the data to the cache.
*/
-static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
- bool was_async)
+static void fscache_wreq_done(void *priv, ssize_t transferred_or_error)
{
struct fscache_write_request *wreq = priv;
@@ -202,8 +201,7 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
wreq->set_bits);
if (wreq->term_func)
- wreq->term_func(wreq->term_func_priv, transferred_or_error,
- was_async);
+ wreq->term_func(wreq->term_func_priv, transferred_or_error);
fscache_end_operation(&wreq->cache_resources);
kfree(wreq);
}
@@ -255,14 +253,14 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
return;
abandon_end:
- return fscache_wreq_done(wreq, ret, false);
+ return fscache_wreq_done(wreq, ret);
abandon_free:
kfree(wreq);
abandon:
if (using_pgpriv2)
fscache_clear_page_bits(mapping, start, len, cond);
if (term_func)
- term_func(term_func_priv, ret, false);
+ term_func(term_func_priv, ret);
}
EXPORT_SYMBOL(__fscache_write_to_cache);
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index c562aec3b483..4319611f5354 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -23,10 +23,17 @@
/*
* buffered_read.c
*/
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t offset, size_t len);
/*
+ * buffered_write.c
+ */
+void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
+ loff_t pos, size_t copied);
+
+/*
* main.c
*/
extern unsigned int netfs_debug;
@@ -58,12 +65,17 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
-struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq);
-int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
- bool needs_put);
-struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
-void netfs_clear_buffer(struct netfs_io_request *rreq);
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
+ enum netfs_folioq_trace trace);
void netfs_reset_iter(struct netfs_io_subrequest *subreq);
+void netfs_wake_collector(struct netfs_io_request *rreq);
+void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq);
+void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
+ struct netfs_io_stream *stream);
+ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
+ssize_t netfs_wait_for_write(struct netfs_io_request *rreq);
+void netfs_wait_for_paused_read(struct netfs_io_request *rreq);
+void netfs_wait_for_paused_write(struct netfs_io_request *rreq);
/*
* objects.c
@@ -73,9 +85,9 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
loff_t start, size_t len,
enum netfs_io_origin origin);
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
-void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
-void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
- enum netfs_rreq_ref_trace what);
+void netfs_clear_subrequests(struct netfs_io_request *rreq);
+void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
+void netfs_put_failed_request(struct netfs_io_request *rreq);
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
static inline void netfs_see_request(struct netfs_io_request *rreq,
@@ -84,20 +96,25 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
}
+static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what)
+{
+ trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref), what);
+}
+
/*
* read_collect.c
*/
-void netfs_read_termination_worker(struct work_struct *work);
-void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async);
+bool netfs_read_collection(struct netfs_io_request *rreq);
+void netfs_read_collection_worker(struct work_struct *work);
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
/*
* read_pgpriv2.c
*/
-void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
- struct netfs_io_request *rreq,
- struct folio_queue *folioq,
- int slot);
-void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq);
+void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio);
+void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq);
bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
/*
@@ -113,6 +130,7 @@ void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq);
extern atomic_t netfs_n_rh_dio_read;
extern atomic_t netfs_n_rh_readahead;
extern atomic_t netfs_n_rh_read_folio;
+extern atomic_t netfs_n_rh_read_single;
extern atomic_t netfs_n_rh_rreq;
extern atomic_t netfs_n_rh_sreq;
extern atomic_t netfs_n_rh_download;
@@ -129,6 +147,8 @@ extern atomic_t netfs_n_rh_write_begin;
extern atomic_t netfs_n_rh_write_done;
extern atomic_t netfs_n_rh_write_failed;
extern atomic_t netfs_n_rh_write_zskip;
+extern atomic_t netfs_n_rh_retry_read_req;
+extern atomic_t netfs_n_rh_retry_read_subreq;
extern atomic_t netfs_n_wh_buffered_write;
extern atomic_t netfs_n_wh_writethrough;
extern atomic_t netfs_n_wh_dio_write;
@@ -141,6 +161,8 @@ extern atomic_t netfs_n_wh_upload_failed;
extern atomic_t netfs_n_wh_write;
extern atomic_t netfs_n_wh_write_done;
extern atomic_t netfs_n_wh_write_failed;
+extern atomic_t netfs_n_wh_retry_write_req;
+extern atomic_t netfs_n_wh_retry_write_subreq;
extern atomic_t netfs_n_wb_lock_skip;
extern atomic_t netfs_n_wb_lock_wait;
extern atomic_t netfs_n_folioq;
@@ -166,8 +188,8 @@ static inline void netfs_stat_d(atomic_t *stat)
* write_collect.c
*/
int netfs_folio_written_back(struct folio *folio);
+bool netfs_write_collection(struct netfs_io_request *wreq);
void netfs_write_collection_worker(struct work_struct *work);
-void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
/*
* write_issue.c
@@ -181,18 +203,23 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
struct iov_iter *source);
void netfs_issue_write(struct netfs_io_request *wreq,
struct netfs_io_stream *stream);
-int netfs_advance_write(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream,
- loff_t start, size_t len, bool to_eof);
+size_t netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof);
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache);
-int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
- struct folio *writethrough_cache);
+ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache);
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
/*
+ * write_retry.c
+ */
+void netfs_retry_writes(struct netfs_io_request *wreq);
+
+/*
* Miscellaneous functions.
*/
static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
@@ -240,6 +267,39 @@ static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
}
/*
+ * Clear and wake up a NETFS_RREQ_* flag bit on a request.
+ */
+static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq,
+ unsigned int rreq_flag,
+ enum netfs_rreq_trace trace)
+{
+ if (test_bit(rreq_flag, &rreq->flags)) {
+ clear_bit_unlock(rreq_flag, &rreq->flags);
+ smp_mb__after_atomic(); /* Set flag before task state */
+ trace_netfs_rreq(rreq, trace);
+ wake_up(&rreq->waitq);
+ }
+}
+
+/*
+ * Test the NETFS_RREQ_IN_PROGRESS flag, inserting an appropriate barrier.
+ */
+static inline bool netfs_check_rreq_in_progress(const struct netfs_io_request *rreq)
+{
+ /* Order read of flags before read of anything else, such as error. */
+ return test_bit_acquire(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+}
+
+/*
+ * Test the NETFS_SREQ_IN_PROGRESS flag, inserting an appropriate barrier.
+ */
+static inline bool netfs_check_subreq_in_progress(const struct netfs_io_subrequest *subreq)
+{
+ /* Order read of flags before read of anything else, such as error. */
+ return test_bit_acquire(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+}
+
+/*
* fscache-cache.c
*/
#ifdef CONFIG_PROC_FS
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
index 6c7be1377ee0..73da6c9f5777 100644
--- a/fs/netfs/main.c
+++ b/fs/netfs/main.c
@@ -37,9 +37,12 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_READAHEAD] = "RA",
[NETFS_READPAGE] = "RP",
[NETFS_READ_GAPS] = "RG",
+ [NETFS_READ_SINGLE] = "R1",
[NETFS_READ_FOR_WRITE] = "RW",
+ [NETFS_UNBUFFERED_READ] = "UR",
[NETFS_DIO_READ] = "DR",
[NETFS_WRITEBACK] = "WB",
+ [NETFS_WRITEBACK_SINGLE] = "W1",
[NETFS_WRITETHROUGH] = "WT",
[NETFS_UNBUFFERED_WRITE] = "UW",
[NETFS_DIO_WRITE] = "DW",
@@ -55,21 +58,21 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v)
if (v == &netfs_io_requests) {
seq_puts(m,
- "REQUEST OR REF FL ERR OPS COVERAGE\n"
- "======== == === == ==== === =========\n"
+ "REQUEST OR REF FLAG ERR OPS COVERAGE\n"
+ "======== == === ==== ==== === =========\n"
);
return 0;
}
rreq = list_entry(v, struct netfs_io_request, proc_link);
seq_printf(m,
- "%08x %s %3d %2lx %4ld %3d @%04llx %llx/%llx",
+ "%08x %s %3d %4lx %4ld %3d @%04llx %llx/%llx",
rreq->debug_id,
netfs_origins[rreq->origin],
refcount_read(&rreq->ref),
rreq->flags,
rreq->error,
- atomic_read(&rreq->nr_outstanding),
+ 0,
rreq->start, rreq->submitted, rreq->len);
seq_putc(m, '\n');
return 0;
@@ -116,7 +119,7 @@ static int __init netfs_init(void)
goto error_reqpool;
netfs_subrequest_slab = kmem_cache_create("netfs_subrequest",
- sizeof(struct netfs_io_subrequest), 0,
+ sizeof(struct netfs_io_subrequest) + 16, 0,
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
NULL);
if (!netfs_subrequest_slab)
@@ -125,11 +128,13 @@ static int __init netfs_init(void)
if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0)
goto error_subreqpool;
+#ifdef CONFIG_PROC_FS
if (!proc_mkdir("fs/netfs", NULL))
goto error_proc;
if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
&netfs_requests_seq_ops))
goto error_procfile;
+#endif
#ifdef CONFIG_FSCACHE_STATS
if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
netfs_stats_show))
@@ -142,9 +147,11 @@ static int __init netfs_init(void)
return 0;
error_fscache:
+#ifdef CONFIG_PROC_FS
error_procfile:
remove_proc_subtree("fs/netfs", NULL);
error_proc:
+#endif
mempool_exit(&netfs_subrequest_pool);
error_subreqpool:
kmem_cache_destroy(netfs_subrequest_slab);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 78fe5796b2b2..6df89c92b10b 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -8,113 +8,101 @@
#include <linux/swap.h>
#include "internal.h"
-/*
- * Make sure there's space in the rolling queue.
+/**
+ * netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue
+ * @mapping: Address space to set on the folio (or NULL).
+ * @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated).
+ * @_cur_size: Current size of the buffer (updated).
+ * @size: Target size of the buffer.
+ * @gfp: The allocation constraints.
*/
-struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq)
+int netfs_alloc_folioq_buffer(struct address_space *mapping,
+ struct folio_queue **_buffer,
+ size_t *_cur_size, ssize_t size, gfp_t gfp)
{
- struct folio_queue *tail = rreq->buffer_tail, *prev;
- unsigned int prev_nr_slots = 0;
-
- if (WARN_ON_ONCE(!rreq->buffer && tail) ||
- WARN_ON_ONCE(rreq->buffer && !tail))
- return ERR_PTR(-EIO);
-
- prev = tail;
- if (prev) {
- if (!folioq_full(tail))
- return tail;
- prev_nr_slots = folioq_nr_slots(tail);
- }
-
- tail = kmalloc(sizeof(*tail), GFP_NOFS);
- if (!tail)
- return ERR_PTR(-ENOMEM);
- netfs_stat(&netfs_n_folioq);
- folioq_init(tail);
- tail->prev = prev;
- if (prev)
- /* [!] NOTE: After we set prev->next, the consumer is entirely
- * at liberty to delete prev.
- */
- WRITE_ONCE(prev->next, tail);
-
- rreq->buffer_tail = tail;
- if (!rreq->buffer) {
- rreq->buffer = tail;
- iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
- } else {
- /* Make sure we don't leave the master iterator pointing to a
- * block that might get immediately consumed.
- */
- if (rreq->io_iter.folioq == prev &&
- rreq->io_iter.folioq_slot == prev_nr_slots) {
- rreq->io_iter.folioq = tail;
- rreq->io_iter.folioq_slot = 0;
+ struct folio_queue *tail = *_buffer, *p;
+
+ size = round_up(size, PAGE_SIZE);
+ if (*_cur_size >= size)
+ return 0;
+
+ if (tail)
+ while (tail->next)
+ tail = tail->next;
+
+ do {
+ struct folio *folio;
+ int order = 0, slot;
+
+ if (!tail || folioq_full(tail)) {
+ p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer);
+ if (!p)
+ return -ENOMEM;
+ if (tail) {
+ tail->next = p;
+ p->prev = tail;
+ } else {
+ *_buffer = p;
+ }
+ tail = p;
}
- }
- rreq->buffer_tail_slot = 0;
- return tail;
-}
-/*
- * Append a folio to the rolling queue.
- */
-int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
- bool needs_put)
-{
- struct folio_queue *tail;
- unsigned int slot, order = folio_order(folio);
+ if (size - *_cur_size > PAGE_SIZE)
+ order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT,
+ MAX_PAGECACHE_ORDER);
- tail = netfs_buffer_make_space(rreq);
- if (IS_ERR(tail))
- return PTR_ERR(tail);
+ folio = folio_alloc(gfp, order);
+ if (!folio && order > 0)
+ folio = folio_alloc(gfp, 0);
+ if (!folio)
+ return -ENOMEM;
- rreq->io_iter.count += PAGE_SIZE << order;
+ folio->mapping = mapping;
+ folio->index = *_cur_size / PAGE_SIZE;
+ trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer);
+ slot = folioq_append_mark(tail, folio);
+ *_cur_size += folioq_folio_size(tail, slot);
+ } while (*_cur_size < size);
- slot = folioq_append(tail, folio);
- /* Store the counter after setting the slot. */
- smp_store_release(&rreq->buffer_tail_slot, slot);
return 0;
}
+EXPORT_SYMBOL(netfs_alloc_folioq_buffer);
-/*
- * Delete the head of a rolling queue.
+/**
+ * netfs_free_folioq_buffer - Free a folio queue.
+ * @fq: The start of the folio queue to free
+ *
+ * Free up a chain of folio_queues and, if marked, the marked folios they point
+ * to.
*/
-struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
+void netfs_free_folioq_buffer(struct folio_queue *fq)
{
- struct folio_queue *head = wreq->buffer, *next = head->next;
-
- if (next)
- next->prev = NULL;
- netfs_stat_d(&netfs_n_folioq);
- kfree(head);
- wreq->buffer = next;
- return next;
-}
+ struct folio_queue *next;
+ struct folio_batch fbatch;
-/*
- * Clear out a rolling queue.
- */
-void netfs_clear_buffer(struct netfs_io_request *rreq)
-{
- struct folio_queue *p;
+ folio_batch_init(&fbatch);
- while ((p = rreq->buffer)) {
- rreq->buffer = p->next;
- for (int slot = 0; slot < folioq_count(p); slot++) {
- struct folio *folio = folioq_folio(p, slot);
- if (!folio)
+ for (; fq; fq = next) {
+ for (int slot = 0; slot < folioq_count(fq); slot++) {
+ struct folio *folio = folioq_folio(fq, slot);
+
+ if (!folio ||
+ !folioq_is_marked(fq, slot))
continue;
- if (folioq_is_marked(p, slot)) {
- trace_netfs_folio(folio, netfs_folio_trace_put);
- folio_put(folio);
- }
+
+ trace_netfs_folio(folio, netfs_folio_trace_put);
+ if (folio_batch_add(&fbatch, folio))
+ folio_batch_release(&fbatch);
}
+
netfs_stat_d(&netfs_n_folioq);
- kfree(p);
+ next = fq->next;
+ kfree(fq);
}
+
+ folio_batch_release(&fbatch);
}
+EXPORT_SYMBOL(netfs_free_folioq_buffer);
/*
* Reset the subrequest iterator to refer just to the region remaining to be
@@ -159,10 +147,10 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
if (!fscache_cookie_valid(cookie))
return true;
- if (!(inode->i_state & I_PINNING_NETFS_WB)) {
+ if (!(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) {
spin_lock(&inode->i_lock);
- if (!(inode->i_state & I_PINNING_NETFS_WB)) {
- inode->i_state |= I_PINNING_NETFS_WB;
+ if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
+ inode_state_set(inode, I_PINNING_NETFS_WB);
need_use = true;
}
spin_unlock(&inode->i_lock);
@@ -204,7 +192,7 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
{
struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
- if (inode->i_state & I_PINNING_NETFS_WB) {
+ if (inode_state_read_once(inode) & I_PINNING_NETFS_WB) {
loff_t i_size = i_size_read(inode);
fscache_unuse_cookie(cookie, aux, &i_size);
}
@@ -310,7 +298,7 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
if (folio_test_dirty(folio))
return false;
- end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
+ end = umin(folio_next_pos(folio), i_size_read(&ctx->inode));
if (end > ctx->zero_point)
ctx->zero_point = end;
@@ -325,3 +313,234 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
return true;
}
EXPORT_SYMBOL(netfs_release_folio);
+
+/*
+ * Wake the collection work item.
+ */
+void netfs_wake_collector(struct netfs_io_request *rreq)
+{
+ if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) &&
+ !test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) {
+ queue_work(system_dfl_wq, &rreq->work);
+ } else {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
+ wake_up(&rreq->waitq);
+ }
+}
+
+/*
+ * Mark a subrequest as no longer being in progress and, if need be, wake the
+ * collector.
+ */
+void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[subreq->stream_nr];
+
+ clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */
+
+ /* If we are at the head of the queue, wake up the collector. */
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests) ||
+ test_bit(NETFS_RREQ_RETRYING, &rreq->flags))
+ netfs_wake_collector(rreq);
+}
+
+/*
+ * Wait for all outstanding I/O in a stream to quiesce.
+ */
+void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
+ struct netfs_io_stream *stream)
+{
+ struct netfs_io_subrequest *subreq;
+ DEFINE_WAIT(myself);
+
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ if (!netfs_check_subreq_in_progress(subreq))
+ continue;
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_quiesce);
+ for (;;) {
+ prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
+
+ if (!netfs_check_subreq_in_progress(subreq))
+ break;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for);
+ schedule();
+ }
+ }
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_waited_quiesce);
+ finish_wait(&rreq->waitq, &myself);
+}
+
+/*
+ * Perform collection in app thread if not offloaded to workqueue.
+ */
+static int netfs_collect_in_app(struct netfs_io_request *rreq,
+ bool (*collector)(struct netfs_io_request *rreq))
+{
+ bool need_collect = false, inactive = true, done = true;
+
+ if (!netfs_check_rreq_in_progress(rreq)) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_recollect);
+ return 1; /* Done */
+ }
+
+ for (int i = 0; i < NR_IO_STREAMS; i++) {
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[i];
+
+ if (!stream->active)
+ continue;
+ inactive = false;
+ trace_netfs_collect_stream(rreq, stream);
+ subreq = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest,
+ rreq_link);
+ if (subreq &&
+ (!netfs_check_subreq_in_progress(subreq) ||
+ test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
+ need_collect = true;
+ break;
+ }
+ if (subreq || !test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
+ done = false;
+ }
+
+ if (!need_collect && !inactive && !done)
+ return 0; /* Sleep */
+
+ __set_current_state(TASK_RUNNING);
+ if (collector(rreq)) {
+ /* Drop the ref from the NETFS_RREQ_IN_PROGRESS flag. */
+ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
+ return 1; /* Done */
+ }
+
+ if (inactive) {
+ WARN(true, "Failed to collect inactive req R=%08x\n",
+ rreq->debug_id);
+ cond_resched();
+ }
+ return 2; /* Again */
+}
+
+/*
+ * Wait for a request to complete, successfully or otherwise.
+ */
+static ssize_t netfs_wait_for_in_progress(struct netfs_io_request *rreq,
+ bool (*collector)(struct netfs_io_request *rreq))
+{
+ DEFINE_WAIT(myself);
+ ssize_t ret;
+
+ for (;;) {
+ prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
+
+ if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
+ switch (netfs_collect_in_app(rreq, collector)) {
+ case 0:
+ break;
+ case 1:
+ goto all_collected;
+ case 2:
+ if (!netfs_check_rreq_in_progress(rreq))
+ break;
+ cond_resched();
+ continue;
+ }
+ }
+
+ if (!netfs_check_rreq_in_progress(rreq))
+ break;
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
+ schedule();
+ }
+
+all_collected:
+ trace_netfs_rreq(rreq, netfs_rreq_trace_waited_ip);
+ finish_wait(&rreq->waitq, &myself);
+
+ ret = rreq->error;
+ if (ret == 0) {
+ ret = rreq->transferred;
+ switch (rreq->origin) {
+ case NETFS_DIO_READ:
+ case NETFS_DIO_WRITE:
+ case NETFS_READ_SINGLE:
+ case NETFS_UNBUFFERED_READ:
+ case NETFS_UNBUFFERED_WRITE:
+ break;
+ default:
+ if (rreq->submitted < rreq->len) {
+ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+ ret = -EIO;
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+
+ssize_t netfs_wait_for_read(struct netfs_io_request *rreq)
+{
+ return netfs_wait_for_in_progress(rreq, netfs_read_collection);
+}
+
+ssize_t netfs_wait_for_write(struct netfs_io_request *rreq)
+{
+ return netfs_wait_for_in_progress(rreq, netfs_write_collection);
+}
+
+/*
+ * Wait for a paused operation to unpause or complete in some manner.
+ */
+static void netfs_wait_for_pause(struct netfs_io_request *rreq,
+ bool (*collector)(struct netfs_io_request *rreq))
+{
+ DEFINE_WAIT(myself);
+
+ for (;;) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause);
+ prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
+
+ if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
+ switch (netfs_collect_in_app(rreq, collector)) {
+ case 0:
+ break;
+ case 1:
+ goto all_collected;
+ case 2:
+ if (!netfs_check_rreq_in_progress(rreq) ||
+ !test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
+ break;
+ cond_resched();
+ continue;
+ }
+ }
+
+ if (!netfs_check_rreq_in_progress(rreq) ||
+ !test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
+ break;
+
+ schedule();
+ }
+
+all_collected:
+ trace_netfs_rreq(rreq, netfs_rreq_trace_waited_pause);
+ finish_wait(&rreq->waitq, &myself);
+}
+
+void netfs_wait_for_paused_read(struct netfs_io_request *rreq)
+{
+ return netfs_wait_for_pause(rreq, netfs_read_collection);
+}
+
+void netfs_wait_for_paused_write(struct netfs_io_request *rreq)
+{
+ return netfs_wait_for_pause(rreq, netfs_write_collection);
+}
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 31e388ec6e48..b8c4918d3dcd 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -10,6 +10,8 @@
#include <linux/delay.h>
#include "internal.h"
+static void netfs_free_request(struct work_struct *work);
+
/*
* Allocate an I/O request and initialise it.
*/
@@ -34,6 +36,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
}
memset(rreq, 0, kmem_cache_size(cache));
+ INIT_WORK(&rreq->cleanup_work, netfs_free_request);
rreq->start = start;
rreq->len = len;
rreq->origin = origin;
@@ -48,21 +51,23 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
spin_lock_init(&rreq->lock);
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
- INIT_LIST_HEAD(&rreq->subrequests);
- refcount_set(&rreq->ref, 1);
+ init_waitqueue_head(&rreq->waitq);
+ refcount_set(&rreq->ref, 2);
if (origin == NETFS_READAHEAD ||
origin == NETFS_READPAGE ||
origin == NETFS_READ_GAPS ||
+ origin == NETFS_READ_SINGLE ||
origin == NETFS_READ_FOR_WRITE ||
- origin == NETFS_DIO_READ)
- INIT_WORK(&rreq->work, netfs_read_termination_worker);
- else
+ origin == NETFS_UNBUFFERED_READ ||
+ origin == NETFS_DIO_READ) {
+ INIT_WORK(&rreq->work, netfs_read_collection_worker);
+ rreq->io_streams[0].avail = true;
+ } else {
INIT_WORK(&rreq->work, netfs_write_collection_worker);
+ }
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- if (file && file->f_flags & O_NONBLOCK)
- __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
if (rreq->netfs_ops->init_request) {
ret = rreq->netfs_ops->init_request(rreq, file);
if (ret < 0) {
@@ -72,7 +77,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
}
atomic_inc(&ctx->io_count);
- trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
+ trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), netfs_rreq_trace_new);
netfs_proc_add_rreq(rreq);
netfs_stat(&netfs_n_rh_rreq);
return rreq;
@@ -86,28 +91,19 @@ void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
}
-void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
+void netfs_clear_subrequests(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream;
int s;
- while (!list_empty(&rreq->subrequests)) {
- subreq = list_first_entry(&rreq->subrequests,
- struct netfs_io_subrequest, rreq_link);
- list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, was_async,
- netfs_sreq_trace_put_clear);
- }
-
for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
stream = &rreq->io_streams[s];
while (!list_empty(&stream->subrequests)) {
subreq = list_first_entry(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, was_async,
- netfs_sreq_trace_put_clear);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_clear);
}
}
}
@@ -120,16 +116,20 @@ static void netfs_free_request_rcu(struct rcu_head *rcu)
netfs_stat_d(&netfs_n_rh_rreq);
}
-static void netfs_free_request(struct work_struct *work)
+static void netfs_deinit_request(struct netfs_io_request *rreq)
{
- struct netfs_io_request *rreq =
- container_of(work, struct netfs_io_request, work);
struct netfs_inode *ictx = netfs_inode(rreq->inode);
unsigned int i;
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+
+ /* Cancel/flush the result collection worker. That does not carry a
+ * ref of its own, so we must wait for it somewhere.
+ */
+ cancel_work_sync(&rreq->work);
+
netfs_proc_del_rreq(rreq);
- netfs_clear_subrequests(rreq, false);
+ netfs_clear_subrequests(rreq);
if (rreq->netfs_ops->free_request)
rreq->netfs_ops->free_request(rreq);
if (rreq->cache_resources.ops)
@@ -143,15 +143,22 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
- netfs_clear_buffer(rreq);
+ rolling_buffer_clear(&rreq->buffer);
if (atomic_dec_and_test(&ictx->io_count))
wake_up_var(&ictx->io_count);
+}
+
+static void netfs_free_request(struct work_struct *work)
+{
+ struct netfs_io_request *rreq =
+ container_of(work, struct netfs_io_request, cleanup_work);
+
+ netfs_deinit_request(rreq);
call_rcu(&rreq->rcu, netfs_free_request_rcu);
}
-void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
- enum netfs_rreq_ref_trace what)
+void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
{
unsigned int debug_id;
bool dead;
@@ -161,19 +168,30 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
debug_id = rreq->debug_id;
dead = __refcount_dec_and_test(&rreq->ref, &r);
trace_netfs_rreq_ref(debug_id, r - 1, what);
- if (dead) {
- if (was_async) {
- rreq->work.func = netfs_free_request;
- if (!queue_work(system_unbound_wq, &rreq->work))
- WARN_ON(1);
- } else {
- netfs_free_request(&rreq->work);
- }
- }
+ if (dead)
+ WARN_ON(!queue_work(system_dfl_wq, &rreq->cleanup_work));
}
}
/*
+ * Free a request (synchronously) that was just allocated but has
+ * failed before it could be submitted.
+ */
+void netfs_put_failed_request(struct netfs_io_request *rreq)
+{
+ int r = refcount_read(&rreq->ref);
+
+ /* new requests have two references (see
+ * netfs_alloc_request(), and this function is only allowed on
+ * new request objects
+ */
+ WARN_ON_ONCE(r != 2);
+
+ trace_netfs_rreq_ref(rreq->debug_id, r, netfs_rreq_trace_put_failed);
+ netfs_free_request(&rreq->cleanup_work);
+}
+
+/*
* Allocate and partially initialise an I/O request structure.
*/
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
@@ -211,8 +229,7 @@ void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
what);
}
-static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
- bool was_async)
+static void netfs_free_subrequest(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
@@ -221,10 +238,10 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
rreq->netfs_ops->free_subrequest(subreq);
mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
netfs_stat_d(&netfs_n_rh_sreq);
- netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
+ netfs_put_request(rreq, netfs_rreq_trace_put_subreq);
}
-void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what)
{
unsigned int debug_index = subreq->debug_index;
@@ -235,5 +252,5 @@ void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
dead = __refcount_dec_and_test(&subreq->ref, &r);
trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
if (dead)
- netfs_free_subrequest(subreq, was_async);
+ netfs_free_subrequest(subreq);
}
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index 3cbb289535a8..a95e7aadafd0 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -14,6 +14,14 @@
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
+/* Notes made in the collector */
+#define HIT_PENDING 0x01 /* A front op was still pending */
+#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
+#define BUFFERED 0x08 /* The pagecache needs cleaning up */
+#define NEED_RETRY 0x10 /* A front op requests retrying */
+#define COPY_TO_CACHE 0x40 /* Need to copy subrequest to cache */
+#define ABANDON_SREQ 0x80 /* Need to abandon untransferred part of subrequest */
+
/*
* Clear the unread part of an I/O request.
*/
@@ -31,14 +39,18 @@ static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
* cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it
* dirty and let writeback handle it.
*/
-static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
- struct netfs_io_request *rreq,
+static void netfs_unlock_read_folio(struct netfs_io_request *rreq,
struct folio_queue *folioq,
int slot)
{
struct netfs_folio *finfo;
struct folio *folio = folioq_folio(folioq, slot);
+ if (unlikely(folio_pos(folio) < rreq->abandon_to)) {
+ trace_netfs_folio(folio, netfs_folio_trace_abandon);
+ goto just_unlock;
+ }
+
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
@@ -53,7 +65,7 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
kfree(finfo);
}
- if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags)) {
if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
@@ -62,254 +74,267 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
} else {
trace_netfs_folio(folio, netfs_folio_trace_read_done);
}
+
+ folioq_clear(folioq, slot);
} else {
// TODO: Use of PG_private_2 is deprecated.
- if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
- netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot);
+ if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags))
+ netfs_pgpriv2_copy_to_cache(rreq, folio);
}
- if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
- if (folio->index == rreq->no_unlock_folio &&
- test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
- _debug("no unlock");
- } else {
- trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
- folio_unlock(folio);
- }
+just_unlock:
+ if (folio->index == rreq->no_unlock_folio &&
+ test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
+ _debug("no unlock");
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
+ folio_unlock(folio);
}
folioq_clear(folioq, slot);
}
/*
- * Unlock any folios that are now completely read. Returns true if the
- * subrequest is removed from the list.
+ * Unlock any folios we've finished with.
*/
-static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was_async)
+static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
+ unsigned int *notes)
{
- struct netfs_io_subrequest *prev, *next;
- struct netfs_io_request *rreq = subreq->rreq;
- struct folio_queue *folioq = subreq->curr_folioq;
- size_t avail, prev_donated, next_donated, fsize, part, excess;
- loff_t fpos, start;
- loff_t fend;
- int slot = subreq->curr_folioq_slot;
-
- if (WARN(subreq->transferred > subreq->len,
- "Subreq overread: R%x[%x] %zu > %zu",
- rreq->debug_id, subreq->debug_index,
- subreq->transferred, subreq->len))
- subreq->transferred = subreq->len;
-
-next_folio:
- fsize = PAGE_SIZE << subreq->curr_folio_order;
- fpos = round_down(subreq->start + subreq->consumed, fsize);
- fend = fpos + fsize;
-
- if (WARN_ON_ONCE(!folioq) ||
- WARN_ON_ONCE(!folioq_folio(folioq, slot)) ||
- WARN_ON_ONCE(folioq_folio(folioq, slot)->index != fpos / PAGE_SIZE)) {
- pr_err("R=%08x[%x] s=%llx-%llx ctl=%zx/%zx/%zx sl=%u\n",
- rreq->debug_id, subreq->debug_index,
- subreq->start, subreq->start + subreq->transferred - 1,
- subreq->consumed, subreq->transferred, subreq->len,
- slot);
- if (folioq) {
- struct folio *folio = folioq_folio(folioq, slot);
-
- pr_err("folioq: orders=%02x%02x%02x%02x\n",
- folioq->orders[0], folioq->orders[1],
- folioq->orders[2], folioq->orders[3]);
- if (folio)
- pr_err("folio: %llx-%llx ix=%llx o=%u qo=%u\n",
- fpos, fend - 1, folio_pos(folio), folio_order(folio),
- folioq_folio_order(folioq, slot));
- }
- }
+ struct folio_queue *folioq = rreq->buffer.tail;
+ unsigned long long collected_to = rreq->collected_to;
+ unsigned int slot = rreq->buffer.first_tail_slot;
-donation_changed:
- /* Try to consume the current folio if we've hit or passed the end of
- * it. There's a possibility that this subreq doesn't start at the
- * beginning of the folio, in which case we need to donate to/from the
- * preceding subreq.
- *
- * We also need to include any potential donation back from the
- * following subreq.
- */
- prev_donated = READ_ONCE(subreq->prev_donated);
- next_donated = READ_ONCE(subreq->next_donated);
- if (prev_donated || next_donated) {
- spin_lock_bh(&rreq->lock);
- prev_donated = subreq->prev_donated;
- next_donated = subreq->next_donated;
- subreq->start -= prev_donated;
- subreq->len += prev_donated;
- subreq->transferred += prev_donated;
- prev_donated = subreq->prev_donated = 0;
- if (subreq->transferred == subreq->len) {
- subreq->len += next_donated;
- subreq->transferred += next_donated;
- next_donated = subreq->next_donated = 0;
+ if (rreq->cleaned_to >= rreq->collected_to)
+ return;
+
+ // TODO: Begin decryption
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&rreq->buffer);
+ if (!folioq) {
+ rreq->front_folio_order = 0;
+ return;
}
- trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations);
- spin_unlock_bh(&rreq->lock);
+ slot = 0;
}
- avail = subreq->transferred;
- if (avail == subreq->len)
- avail += next_donated;
- start = subreq->start;
- if (subreq->consumed == 0) {
- start -= prev_donated;
- avail += prev_donated;
- } else {
- start += subreq->consumed;
- avail -= subreq->consumed;
- }
- part = umin(avail, fsize);
-
- trace_netfs_progress(subreq, start, avail, part);
-
- if (start + avail >= fend) {
- if (fpos == start) {
- /* Flush, unlock and mark for caching any folio we've just read. */
- subreq->consumed = fend - subreq->start;
- netfs_unlock_read_folio(subreq, rreq, folioq, slot);
- folioq_mark2(folioq, slot);
- if (subreq->consumed >= subreq->len)
- goto remove_subreq;
- } else if (fpos < start) {
- excess = fend - subreq->start;
-
- spin_lock_bh(&rreq->lock);
- /* If we complete first on a folio split with the
- * preceding subreq, donate to that subreq - otherwise
- * we get the responsibility.
- */
- if (subreq->prev_donated != prev_donated) {
- spin_unlock_bh(&rreq->lock);
- goto donation_changed;
- }
+ for (;;) {
+ struct folio *folio;
+ unsigned long long fpos, fend;
+ unsigned int order;
+ size_t fsize;
- if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
- spin_unlock_bh(&rreq->lock);
- pr_err("Can't donate prior to front\n");
- goto bad;
- }
+ if (*notes & COPY_TO_CACHE)
+ set_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
- prev = list_prev_entry(subreq, rreq_link);
- WRITE_ONCE(prev->next_donated, prev->next_donated + excess);
- subreq->start += excess;
- subreq->len -= excess;
- subreq->transferred -= excess;
- trace_netfs_donate(rreq, subreq, prev, excess,
- netfs_trace_donate_tail_to_prev);
- trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
-
- if (subreq->consumed >= subreq->len)
- goto remove_subreq_locked;
- spin_unlock_bh(&rreq->lock);
- } else {
- pr_err("fpos > start\n");
- goto bad;
- }
+ folio = folioq_folio(folioq, slot);
+ if (WARN_ONCE(!folio_test_locked(folio),
+ "R=%08x: folio %lx is not locked\n",
+ rreq->debug_id, folio->index))
+ trace_netfs_folio(folio, netfs_folio_trace_not_locked);
+
+ order = folioq_folio_order(folioq, slot);
+ rreq->front_folio_order = order;
+ fsize = PAGE_SIZE << order;
+ fpos = folio_pos(folio);
+ fend = umin(fpos + fsize, rreq->i_size);
+
+ trace_netfs_collect_folio(rreq, folio, fend, collected_to);
+
+ /* Unlock any folio we've transferred all of. */
+ if (collected_to < fend)
+ break;
- /* Advance the rolling buffer to the next folio. */
+ netfs_unlock_read_folio(rreq, folioq, slot);
+ WRITE_ONCE(rreq->cleaned_to, fpos + fsize);
+ *notes |= MADE_PROGRESS;
+
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
+
+ /* Clean up the head folioq. If we clear an entire folioq, then
+ * we can get rid of it provided it's not also the tail folioq
+ * being filled by the issuer.
+ */
+ folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&rreq->buffer);
+ if (!folioq)
+ goto done;
slot = 0;
- folioq = folioq->next;
- subreq->curr_folioq = folioq;
+ trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress);
}
- subreq->curr_folioq_slot = slot;
- if (folioq && folioq_folio(folioq, slot))
- subreq->curr_folio_order = folioq->orders[slot];
- if (!was_async)
- cond_resched();
- goto next_folio;
+
+ if (fpos + fsize >= collected_to)
+ break;
}
- /* Deal with partial progress. */
- if (subreq->transferred < subreq->len)
- return false;
+ rreq->buffer.tail = folioq;
+done:
+ rreq->buffer.first_tail_slot = slot;
+}
- /* Donate the remaining downloaded data to one of the neighbouring
- * subrequests. Note that we may race with them doing the same thing.
+/*
+ * Collect and assess the results of various read subrequests. We may need to
+ * retry some of the results.
+ *
+ * Note that we have a sequence of subrequests, which may be drawing on
+ * different sources and may or may not be the same size or starting position
+ * and may not even correspond in boundary alignment.
+ */
+static void netfs_collect_read_results(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *front, *remove;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ unsigned int notes;
+
+ _enter("%llx-%llx", rreq->start, rreq->start + rreq->len);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_collect);
+ trace_netfs_collect(rreq);
+
+reassess:
+ if (rreq->origin == NETFS_READAHEAD ||
+ rreq->origin == NETFS_READPAGE ||
+ rreq->origin == NETFS_READ_FOR_WRITE)
+ notes = BUFFERED;
+ else
+ notes = 0;
+
+ /* Remove completed subrequests from the front of the stream and
+ * advance the completion point. We stop when we hit something that's
+ * in progress. The issuer thread may be adding stuff to the tail
+ * whilst we're doing this.
*/
- spin_lock_bh(&rreq->lock);
+ front = READ_ONCE(stream->front);
+ while (front) {
+ size_t transferred;
- if (subreq->prev_donated != prev_donated ||
- subreq->next_donated != next_donated) {
- spin_unlock_bh(&rreq->lock);
- cond_resched();
- goto donation_changed;
- }
+ trace_netfs_collect_sreq(rreq, front);
+ _debug("sreq [%x] %llx %zx/%zx",
+ front->debug_index, front->start, front->transferred, front->len);
- /* Deal with the trickiest case: that this subreq is in the middle of a
- * folio, not touching either edge, but finishes first. In such a
- * case, we donate to the previous subreq, if there is one, so that the
- * donation is only handled when that completes - and remove this
- * subreq from the list.
- *
- * If the previous subreq finished first, we will have acquired their
- * donation and should be able to unlock folios and/or donate nextwards.
- */
- if (!subreq->consumed &&
- !prev_donated &&
- !list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
- prev = list_prev_entry(subreq, rreq_link);
- WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
- subreq->start += subreq->len;
- subreq->len = 0;
- subreq->transferred = 0;
- trace_netfs_donate(rreq, subreq, prev, subreq->len,
- netfs_trace_donate_to_prev);
- trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
- goto remove_subreq_locked;
+ if (stream->collected_to < front->start) {
+ trace_netfs_collect_gap(rreq, stream, front->start, 'F');
+ stream->collected_to = front->start;
+ }
+
+ if (netfs_check_subreq_in_progress(front))
+ notes |= HIT_PENDING;
+ smp_rmb(); /* Read counters after IN_PROGRESS flag. */
+ transferred = READ_ONCE(front->transferred);
+
+ /* If we can now collect the next folio, do so. We don't want
+ * to defer this as we have to decide whether we need to copy
+ * to the cache or not, and that may differ between adjacent
+ * subreqs.
+ */
+ if (notes & BUFFERED) {
+ size_t fsize = PAGE_SIZE << rreq->front_folio_order;
+
+ /* Clear the tail of a short read. */
+ if (!(notes & HIT_PENDING) &&
+ front->error == 0 &&
+ transferred < front->len &&
+ (test_bit(NETFS_SREQ_HIT_EOF, &front->flags) ||
+ test_bit(NETFS_SREQ_CLEAR_TAIL, &front->flags))) {
+ netfs_clear_unread(front);
+ transferred = front->transferred = front->len;
+ trace_netfs_sreq(front, netfs_sreq_trace_clear);
+ }
+
+ stream->collected_to = front->start + transferred;
+ rreq->collected_to = stream->collected_to;
+
+ if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &front->flags))
+ notes |= COPY_TO_CACHE;
+
+ if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+ rreq->abandon_to = front->start + front->len;
+ front->transferred = front->len;
+ transferred = front->len;
+ trace_netfs_rreq(rreq, netfs_rreq_trace_set_abandon);
+ }
+ if (front->start + transferred >= rreq->cleaned_to + fsize ||
+ test_bit(NETFS_SREQ_HIT_EOF, &front->flags))
+ netfs_read_unlock_folios(rreq, &notes);
+ } else {
+ stream->collected_to = front->start + transferred;
+ rreq->collected_to = stream->collected_to;
+ }
+
+ /* Stall if the front is still undergoing I/O. */
+ if (notes & HIT_PENDING)
+ break;
+
+ if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+ if (!stream->failed) {
+ stream->error = front->error;
+ rreq->error = front->error;
+ set_bit(NETFS_RREQ_FAILED, &rreq->flags);
+ stream->failed = true;
+ }
+ notes |= MADE_PROGRESS | ABANDON_SREQ;
+ } else if (test_bit(NETFS_SREQ_NEED_RETRY, &front->flags)) {
+ stream->need_retry = true;
+ notes |= NEED_RETRY | MADE_PROGRESS;
+ break;
+ } else if (test_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags)) {
+ notes |= MADE_PROGRESS;
+ } else {
+ if (!stream->failed) {
+ stream->transferred += transferred;
+ stream->transferred_valid = true;
+ }
+ if (front->transferred < front->len)
+ set_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags);
+ notes |= MADE_PROGRESS;
+ }
+
+ /* Remove if completely consumed. */
+ stream->source = front->source;
+ spin_lock(&rreq->lock);
+
+ remove = front;
+ trace_netfs_sreq(front,
+ notes & ABANDON_SREQ ?
+ netfs_sreq_trace_abandoned : netfs_sreq_trace_consumed);
+ list_del_init(&front->rreq_link);
+ front = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ stream->front = front;
+ spin_unlock(&rreq->lock);
+ netfs_put_subrequest(remove,
+ notes & ABANDON_SREQ ?
+ netfs_sreq_trace_put_abandon :
+ netfs_sreq_trace_put_done);
}
- /* If we can't donate down the chain, donate up the chain instead. */
- excess = subreq->len - subreq->consumed + next_donated;
+ trace_netfs_collect_stream(rreq, stream);
+ trace_netfs_collect_state(rreq, rreq->collected_to, notes);
- if (!subreq->consumed)
- excess += prev_donated;
+ if (!(notes & BUFFERED))
+ rreq->cleaned_to = rreq->collected_to;
- if (list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
- rreq->prev_donated = excess;
- trace_netfs_donate(rreq, subreq, NULL, excess,
- netfs_trace_donate_to_deferred_next);
- } else {
- next = list_next_entry(subreq, rreq_link);
- WRITE_ONCE(next->prev_donated, excess);
- trace_netfs_donate(rreq, subreq, next, excess,
- netfs_trace_donate_to_next);
+ if (notes & NEED_RETRY)
+ goto need_retry;
+ if (notes & MADE_PROGRESS) {
+ netfs_wake_rreq_flag(rreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
+ //cond_resched();
+ goto reassess;
}
- trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_next);
- subreq->len = subreq->consumed;
- subreq->transferred = subreq->consumed;
- goto remove_subreq_locked;
-
-remove_subreq:
- spin_lock_bh(&rreq->lock);
-remove_subreq_locked:
- subreq->consumed = subreq->len;
- list_del(&subreq->rreq_link);
- spin_unlock_bh(&rreq->lock);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed);
- return true;
-bad:
- /* Errr... prev and next both donated to us, but insufficient to finish
- * the folio.
+out:
+ _leave(" = %x", notes);
+ return;
+
+need_retry:
+ /* Okay... We're going to have to retry parts of the stream. Note
+ * that any partially completed op will have had any wholly transferred
+ * folios removed from it.
*/
- printk("R=%08x[%x] s=%llx-%llx %zx/%zx/%zx\n",
- rreq->debug_id, subreq->debug_index,
- subreq->start, subreq->start + subreq->transferred - 1,
- subreq->consumed, subreq->transferred, subreq->len);
- printk("folio: %llx-%llx\n", fpos, fend - 1);
- printk("donated: prev=%zx next=%zx\n", prev_donated, next_donated);
- printk("s=%llx av=%zx part=%zx\n", start, avail, part);
- BUG();
+ _debug("retry");
+ netfs_retry_reads(rreq);
+ goto out;
}
/*
@@ -317,23 +342,10 @@ bad:
*/
static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
{
- struct netfs_io_subrequest *subreq;
unsigned int i;
- /* Collect unbuffered reads and direct reads, adding up the transfer
- * sizes until we find the first short or failed subrequest.
- */
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- rreq->transferred += subreq->transferred;
-
- if (subreq->transferred < subreq->len ||
- test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
- rreq->error = subreq->error;
- break;
- }
- }
-
- if (rreq->origin == NETFS_DIO_READ) {
+ if (rreq->origin == NETFS_UNBUFFERED_READ ||
+ rreq->origin == NETFS_DIO_READ) {
for (i = 0; i < rreq->direct_bv_count; i++) {
flush_dcache_page(rreq->direct_bv[i].bv_page);
// TODO: cifs marks pages in the destination buffer
@@ -345,100 +357,140 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
if (rreq->iocb) {
rreq->iocb->ki_pos += rreq->transferred;
- if (rreq->iocb->ki_complete)
+ if (rreq->iocb->ki_complete) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_ki_complete);
rreq->iocb->ki_complete(
rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
+ }
}
if (rreq->netfs_ops->done)
rreq->netfs_ops->done(rreq);
- if (rreq->origin == NETFS_DIO_READ)
+ if (rreq->origin == NETFS_UNBUFFERED_READ ||
+ rreq->origin == NETFS_DIO_READ)
inode_dio_end(rreq->inode);
}
/*
- * Assess the state of a read request and decide what to do next.
+ * Do processing after reading a monolithic single object.
+ */
+static void netfs_rreq_assess_single(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+
+ if (!rreq->error && stream->source == NETFS_DOWNLOAD_FROM_SERVER &&
+ fscache_resources_valid(&rreq->cache_resources)) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_dirty);
+ netfs_single_mark_inode_dirty(rreq->inode);
+ }
+
+ if (rreq->iocb) {
+ rreq->iocb->ki_pos += rreq->transferred;
+ if (rreq->iocb->ki_complete) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_ki_complete);
+ rreq->iocb->ki_complete(
+ rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
+ }
+ }
+ if (rreq->netfs_ops->done)
+ rreq->netfs_ops->done(rreq);
+}
+
+/*
+ * Perform the collection of subrequests and folios.
*
* Note that we're in normal kernel thread context at this point, possibly
* running on a workqueue.
*/
-static void netfs_rreq_assess(struct netfs_io_request *rreq)
+bool netfs_read_collection(struct netfs_io_request *rreq)
{
- trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
- //netfs_rreq_is_still_valid(rreq);
+ netfs_collect_read_results(rreq);
- if (test_and_clear_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags)) {
- netfs_retry_reads(rreq);
- return;
- }
+ /* We're done when the app thread has finished posting subreqs and the
+ * queue is empty.
+ */
+ if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
+ return false;
+ smp_rmb(); /* Read ALL_QUEUED before subreq lists. */
- if (rreq->origin == NETFS_DIO_READ ||
- rreq->origin == NETFS_READ_GAPS)
+ if (!list_empty(&stream->subrequests))
+ return false;
+
+ /* Okay, declare that all I/O is complete. */
+ rreq->transferred = stream->transferred;
+ trace_netfs_rreq(rreq, netfs_rreq_trace_complete);
+
+ //netfs_rreq_is_still_valid(rreq);
+
+ switch (rreq->origin) {
+ case NETFS_UNBUFFERED_READ:
+ case NETFS_DIO_READ:
+ case NETFS_READ_GAPS:
netfs_rreq_assess_dio(rreq);
+ break;
+ case NETFS_READ_SINGLE:
+ netfs_rreq_assess_single(rreq);
+ break;
+ default:
+ break;
+ }
task_io_account_read(rreq->transferred);
- trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+ netfs_wake_rreq_flag(rreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
+ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
trace_netfs_rreq(rreq, netfs_rreq_trace_done);
- netfs_clear_subrequests(rreq, false);
+ netfs_clear_subrequests(rreq);
netfs_unlock_abandoned_read_pages(rreq);
- if (unlikely(test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)))
- netfs_pgpriv2_write_to_the_cache(rreq);
+ if (unlikely(rreq->copy_to_cache))
+ netfs_pgpriv2_end_copy_to_cache(rreq);
+ return true;
}
-void netfs_read_termination_worker(struct work_struct *work)
+void netfs_read_collection_worker(struct work_struct *work)
{
- struct netfs_io_request *rreq =
- container_of(work, struct netfs_io_request, work);
- netfs_see_request(rreq, netfs_rreq_trace_see_work);
- netfs_rreq_assess(rreq);
- netfs_put_request(rreq, false, netfs_rreq_trace_put_work_complete);
-}
+ struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
-/*
- * Handle the completion of all outstanding I/O operations on a read request.
- * We inherit a ref from the caller.
- */
-void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async)
-{
- if (!was_async)
- return netfs_rreq_assess(rreq);
- if (!work_pending(&rreq->work)) {
- netfs_get_request(rreq, netfs_rreq_trace_get_work);
- if (!queue_work(system_unbound_wq, &rreq->work))
- netfs_put_request(rreq, was_async, netfs_rreq_trace_put_work_nq);
+ netfs_see_request(rreq, netfs_rreq_trace_see_work);
+ if (netfs_check_rreq_in_progress(rreq)) {
+ if (netfs_read_collection(rreq))
+ /* Drop the ref from the IN_PROGRESS flag. */
+ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
+ else
+ netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
}
}
/**
* netfs_read_subreq_progress - Note progress of a read operation.
* @subreq: The read request that has terminated.
- * @was_async: True if we're in an asynchronous context.
*
* This tells the read side of netfs lib that a contributory I/O operation has
* made some progress and that it may be possible to unlock some folios.
*
* Before calling, the filesystem should update subreq->transferred to track
* the amount of data copied into the output buffer.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
*/
-void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
- bool was_async)
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ size_t fsize = PAGE_SIZE << rreq->front_folio_order;
trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
- if (subreq->transferred > subreq->consumed &&
+ /* If we are at the head of the queue, wake up the collector,
+ * getting a ref to it if we were the ones to do so.
+ */
+ if (subreq->start + subreq->transferred > rreq->cleaned_to + fsize &&
(rreq->origin == NETFS_READAHEAD ||
rreq->origin == NETFS_READPAGE ||
- rreq->origin == NETFS_READ_FOR_WRITE)) {
- netfs_consume_read_data(subreq, was_async);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ rreq->origin == NETFS_READ_FOR_WRITE) &&
+ list_is_first(&subreq->rreq_link, &stream->subrequests)
+ ) {
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ netfs_wake_collector(rreq);
}
}
EXPORT_SYMBOL(netfs_read_subreq_progress);
@@ -446,25 +498,20 @@ EXPORT_SYMBOL(netfs_read_subreq_progress);
/**
* netfs_read_subreq_terminated - Note the termination of an I/O operation.
* @subreq: The I/O request that has terminated.
- * @error: Error code indicating type of completion.
- * @was_async: The termination was asynchronous
*
* This tells the read helper that a contributory I/O operation has terminated,
* one way or another, and that it should integrate the results.
*
- * The caller indicates the outcome of the operation through @error, supplying
- * 0 to indicate a successful or retryable transfer (if NETFS_SREQ_NEED_RETRY
- * is set) or a negative error code. The helper will look after reissuing I/O
- * operations as appropriate and writing downloaded data to the cache.
+ * The caller indicates the outcome of the operation through @subreq->error,
+ * supplying 0 to indicate a successful or retryable transfer (if
+ * NETFS_SREQ_NEED_RETRY is set) or a negative error code. The helper will
+ * look after reissuing I/O operations as appropriate and writing downloaded
+ * data to the cache.
*
* Before calling, the filesystem should update subreq->transferred to track
* the amount of data copied into the output buffer.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
*/
-void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
- int error, bool was_async)
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
@@ -479,68 +526,60 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
break;
}
- if (rreq->origin != NETFS_DIO_READ) {
- /* Collect buffered reads.
- *
- * If the read completed validly short, then we can clear the
- * tail before going on to unlock the folios.
- */
- if (error == 0 && subreq->transferred < subreq->len &&
- (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags) ||
- test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags))) {
- netfs_clear_unread(subreq);
- subreq->transferred = subreq->len;
- trace_netfs_sreq(subreq, netfs_sreq_trace_clear);
- }
- if (subreq->transferred > subreq->consumed &&
- (rreq->origin == NETFS_READAHEAD ||
- rreq->origin == NETFS_READPAGE ||
- rreq->origin == NETFS_READ_FOR_WRITE)) {
- netfs_consume_read_data(subreq, was_async);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- }
- rreq->transferred += subreq->transferred;
- }
-
/* Deal with retry requests, short reads and errors. If we retry
* but don't make progress, we abandon the attempt.
*/
- if (!error && subreq->transferred < subreq->len) {
+ if (!subreq->error && subreq->transferred < subreq->len) {
if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
+ } else if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_need_clear);
+ } else if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_need_retry);
+ } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_partial_read);
} else {
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ subreq->error = -ENODATA;
trace_netfs_sreq(subreq, netfs_sreq_trace_short);
- if (subreq->transferred > subreq->consumed) {
- __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
- } else if (!__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
- __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
- } else {
- __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
- error = -ENODATA;
- }
}
}
- subreq->error = error;
- trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
- if (unlikely(error < 0)) {
- trace_netfs_failure(rreq, subreq, error, netfs_fail_read);
+ if (unlikely(subreq->error < 0)) {
+ trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read);
if (subreq->source == NETFS_READ_FROM_CACHE) {
netfs_stat(&netfs_n_rh_read_failed);
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
} else {
netfs_stat(&netfs_n_rh_download_failed);
- set_bit(NETFS_RREQ_FAILED, &rreq->flags);
- rreq->error = subreq->error;
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
}
+ trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause);
+ set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
}
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_terminated(rreq, was_async);
-
- netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+ netfs_subreq_clear_in_progress(subreq);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
}
EXPORT_SYMBOL(netfs_read_subreq_terminated);
+
+/*
+ * Handle termination of a read from the cache.
+ */
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error)
+{
+ struct netfs_io_subrequest *subreq = priv;
+
+ if (transferred_or_error > 0) {
+ subreq->error = 0;
+ if (transferred_or_error > 0) {
+ subreq->transferred += transferred_or_error;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ }
+ } else {
+ subreq->error = transferred_or_error;
+ }
+ netfs_read_subreq_terminated(subreq);
+}
diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
index ba5af89d37fa..a1489aa29f78 100644
--- a/fs/netfs/read_pgpriv2.c
+++ b/fs/netfs/read_pgpriv2.c
@@ -14,52 +14,11 @@
#include "internal.h"
/*
- * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2. The
- * third mark in the folio queue is used to indicate that this folio needs
- * writing.
- */
-void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
- struct netfs_io_request *rreq,
- struct folio_queue *folioq,
- int slot)
-{
- struct folio *folio = folioq_folio(folioq, slot);
-
- trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
- folio_start_private_2(folio);
- folioq_mark3(folioq, slot);
-}
-
-/*
- * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
- * unrecoverable error.
- */
-static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
-{
- struct folio *folio;
- int slot;
-
- while (folioq) {
- if (!folioq->marks3) {
- folioq = folioq->next;
- continue;
- }
-
- slot = __ffs(folioq->marks3);
- folio = folioq_folio(folioq, slot);
-
- trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
- folio_end_private_2(folio);
- folioq_unmark3(folioq, slot);
- }
-}
-
-/*
* [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
*/
-static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
+static void netfs_pgpriv2_copy_folio(struct netfs_io_request *creq, struct folio *folio)
{
- struct netfs_io_stream *cache = &wreq->io_streams[1];
+ struct netfs_io_stream *cache = &creq->io_streams[1];
size_t fsize = folio_size(folio), flen = fsize;
loff_t fpos = folio_pos(folio), i_size;
bool to_eof = false;
@@ -70,17 +29,17 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
* of the page to beyond it, but cannot move i_size into or through the
* page since we have it locked.
*/
- i_size = i_size_read(wreq->inode);
+ i_size = i_size_read(creq->inode);
if (fpos >= i_size) {
/* mmap beyond eof. */
_debug("beyond eof");
folio_end_private_2(folio);
- return 0;
+ return;
}
- if (fpos + fsize > wreq->i_size)
- wreq->i_size = i_size;
+ if (fpos + fsize > creq->i_size)
+ creq->i_size = i_size;
if (flen > i_size - fpos) {
flen = i_size - fpos;
@@ -94,8 +53,10 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
trace_netfs_folio(folio, netfs_folio_trace_store_copy);
/* Attach the folio to the rolling buffer. */
- if (netfs_buffer_append_folio(wreq, folio, false) < 0)
- return -ENOMEM;
+ if (rolling_buffer_append(&creq->buffer, folio, 0) < 0) {
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &creq->flags);
+ return;
+ }
cache->submit_extendable_to = fsize;
cache->submit_off = 0;
@@ -109,11 +70,11 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
do {
ssize_t part;
- wreq->io_iter.iov_offset = cache->submit_off;
+ creq->buffer.iter.iov_offset = cache->submit_off;
- atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
+ atomic64_set(&creq->issued_to, fpos + cache->submit_off);
cache->submit_extendable_to = fsize - cache->submit_off;
- part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
+ part = netfs_advance_write(creq, cache, fpos + cache->submit_off,
cache->submit_len, to_eof);
cache->submit_off += part;
if (part > cache->submit_len)
@@ -122,94 +83,100 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
cache->submit_len -= part;
} while (cache->submit_len > 0);
- wreq->io_iter.iov_offset = 0;
- iov_iter_advance(&wreq->io_iter, fsize);
- atomic64_set(&wreq->issued_to, fpos + fsize);
+ creq->buffer.iter.iov_offset = 0;
+ rolling_buffer_advance(&creq->buffer, fsize);
+ atomic64_set(&creq->issued_to, fpos + fsize);
if (flen < fsize)
- netfs_issue_write(wreq, cache);
-
- _leave(" = 0");
- return 0;
+ netfs_issue_write(creq, cache);
}
/*
- * [DEPRECATED] Go through the buffer and write any folios that are marked with
- * the third mark to the cache.
+ * [DEPRECATED] Set up copying to the cache.
*/
-void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
+static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
+ struct netfs_io_request *rreq, struct folio *folio)
{
- struct netfs_io_request *wreq;
- struct folio_queue *folioq;
- struct folio *folio;
- int error = 0;
- int slot = 0;
-
- _enter("");
+ struct netfs_io_request *creq;
if (!fscache_resources_valid(&rreq->cache_resources))
- goto couldnt_start;
-
- /* Need the first folio to be able to set up the op. */
- for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
- if (folioq->marks3) {
- slot = __ffs(folioq->marks3);
- break;
- }
- }
- if (!folioq)
- return;
- folio = folioq_folio(folioq, slot);
+ goto cancel;
- wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
+ creq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
NETFS_PGPRIV2_COPY_TO_CACHE);
- if (IS_ERR(wreq)) {
- kleave(" [create %ld]", PTR_ERR(wreq));
- goto couldnt_start;
- }
+ if (IS_ERR(creq))
+ goto cancel;
+
+ if (!creq->io_streams[1].avail)
+ goto cancel_put;
- trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &creq->flags);
+ trace_netfs_copy2cache(rreq, creq);
+ trace_netfs_write(creq, netfs_write_trace_copy_to_cache);
netfs_stat(&netfs_n_wh_copy_to_cache);
+ rreq->copy_to_cache = creq;
+ return creq;
+
+cancel_put:
+ netfs_put_failed_request(creq);
+cancel:
+ rreq->copy_to_cache = ERR_PTR(-ENOBUFS);
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
+ return ERR_PTR(-ENOBUFS);
+}
- for (;;) {
- error = netfs_pgpriv2_copy_folio(wreq, folio);
- if (error < 0)
- break;
+/*
+ * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2 and add
+ * it to the copy write request.
+ */
+void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio)
+{
+ struct netfs_io_request *creq = rreq->copy_to_cache;
- folioq_unmark3(folioq, slot);
- if (!folioq->marks3) {
- folioq = folioq->next;
- if (!folioq)
- break;
- }
+ if (!creq)
+ creq = netfs_pgpriv2_begin_copy_to_cache(rreq, folio);
+ if (IS_ERR(creq))
+ return;
- slot = __ffs(folioq->marks3);
- folio = folioq_folio(folioq, slot);
- }
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_start_private_2(folio);
+ netfs_pgpriv2_copy_folio(creq, folio);
+}
- netfs_issue_write(wreq, &wreq->io_streams[1]);
+/*
+ * [DEPRECATED] End writing to the cache, flushing out any outstanding writes.
+ */
+void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
+{
+ struct netfs_io_request *creq = rreq->copy_to_cache;
+
+ if (IS_ERR_OR_NULL(creq))
+ return;
+
+ netfs_issue_write(creq, &creq->io_streams[1]);
smp_wmb(); /* Write lists before ALL_QUEUED. */
- set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+ set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_end_copy_to_cache);
+ if (list_empty_careful(&creq->io_streams[1].subrequests))
+ netfs_wake_collector(creq);
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
- _leave(" = %d", error);
-couldnt_start:
- netfs_pgpriv2_cancel(rreq->buffer);
+ netfs_put_request(creq, netfs_rreq_trace_put_return);
+ creq->copy_to_cache = NULL;
}
/*
* [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
* copying.
*/
-bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
+bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *creq)
{
- struct folio_queue *folioq = wreq->buffer;
- unsigned long long collected_to = wreq->collected_to;
- unsigned int slot = wreq->buffer_head_slot;
+ struct folio_queue *folioq = creq->buffer.tail;
+ unsigned long long collected_to = creq->collected_to;
+ unsigned int slot = creq->buffer.first_tail_slot;
bool made_progress = false;
if (slot >= folioq_nr_slots(folioq)) {
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&creq->buffer);
slot = 0;
}
@@ -221,16 +188,16 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
folio = folioq_folio(folioq, slot);
if (WARN_ONCE(!folio_test_private_2(folio),
"R=%08x: folio %lx is not marked private_2\n",
- wreq->debug_id, folio->index))
+ creq->debug_id, folio->index))
trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
fpos = folio_pos(folio);
fsize = folio_size(folio);
flen = fsize;
- fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
+ fend = min_t(unsigned long long, fpos + flen, creq->i_size);
- trace_netfs_collect_folio(wreq, folio, fend, collected_to);
+ trace_netfs_collect_folio(creq, folio, fend, collected_to);
/* Unlock any folio we've transferred all of. */
if (collected_to < fend)
@@ -238,7 +205,7 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
folio_end_private_2(folio);
- wreq->cleaned_to = fpos + fsize;
+ creq->cleaned_to = fpos + fsize;
made_progress = true;
/* Clean up the head folioq. If we clear an entire folioq, then
@@ -248,9 +215,9 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
- if (READ_ONCE(wreq->buffer_tail) == folioq)
- break;
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&creq->buffer);
+ if (!folioq)
+ goto done;
slot = 0;
}
@@ -258,7 +225,8 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
break;
}
- wreq->buffer = folioq;
- wreq->buffer_head_slot = slot;
+ creq->buffer.tail = folioq;
+done:
+ creq->buffer.first_tail_slot = slot;
return made_progress;
}
diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
index 0350592ea804..b99e84a8170a 100644
--- a/fs/netfs/read_retry.c
+++ b/fs/netfs/read_retry.c
@@ -12,17 +12,9 @@
static void netfs_reissue_read(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
- struct iov_iter *io_iter = &subreq->io_iter;
-
- if (iov_iter_is_folioq(io_iter)) {
- subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
- subreq->curr_folioq_slot = io_iter->folioq_slot;
- subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
- }
-
- atomic_inc(&rreq->nr_outstanding);
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_stat(&netfs_n_rh_retry_read_subreq);
subreq->rreq->netfs_ops->issue_read(subreq);
}
@@ -33,13 +25,12 @@ static void netfs_reissue_read(struct netfs_io_request *rreq,
static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
- struct netfs_io_stream *stream0 = &rreq->io_streams[0];
- LIST_HEAD(sublist);
- LIST_HEAD(queue);
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ struct list_head *next;
_enter("R=%x", rreq->debug_id);
- if (list_empty(&rreq->subrequests))
+ if (list_empty(&stream->subrequests))
return;
if (rreq->netfs_ops->retry_request)
@@ -49,14 +40,15 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
* up to the first permanently failed one.
*/
if (!rreq->netfs_ops->prepare_read &&
- !test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) {
- struct netfs_io_subrequest *subreq;
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ !rreq->cache_resources.ops) {
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ subreq->retry_count++;
netfs_reset_iter(subreq);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_read(rreq, subreq);
}
}
@@ -73,48 +65,44 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
* populating with smaller subrequests. In the event that the subreq
* we just launched finishes before we insert the next subreq, it'll
* fill in rreq->prev_donated instead.
-
+ *
* Note: Alternatively, we could split the tail subrequest right before
* we reissue it and fix up the donations under lock.
*/
- list_splice_init(&rreq->subrequests, &queue);
+ next = stream->subrequests.next;
do {
- struct netfs_io_subrequest *from;
+ struct netfs_io_subrequest *from, *to, *tmp;
struct iov_iter source;
unsigned long long start, len;
- size_t part, deferred_next_donated = 0;
- bool boundary = false;
+ size_t part;
+ bool boundary = false, subreq_superfluous = false;
/* Go through the subreqs and find the next span of contiguous
* buffer that we then rejig (cifs, for example, needs the
* rsize renegotiating) and reissue.
*/
- from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
- list_move_tail(&from->rreq_link, &sublist);
+ from = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ to = from;
start = from->start + from->transferred;
len = from->len - from->transferred;
- _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
+ _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
rreq->debug_id, from->debug_index,
- from->start, from->consumed, from->transferred, from->len);
+ from->start, from->transferred, from->len);
if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
!test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
goto abandon;
- deferred_next_donated = from->next_donated;
- while ((subreq = list_first_entry_or_null(
- &queue, struct netfs_io_subrequest, rreq_link))) {
- if (subreq->start != start + len ||
- subreq->transferred > 0 ||
+ list_for_each_continue(next, &stream->subrequests) {
+ subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ if (subreq->start + subreq->transferred != start + len ||
+ test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
break;
- list_move_tail(&subreq->rreq_link, &sublist);
- len += subreq->len;
- deferred_next_donated = subreq->next_donated;
- if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
- break;
+ to = subreq;
+ len += to->len;
}
_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
@@ -127,36 +115,33 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
source.count = len;
/* Work through the sublist. */
- while ((subreq = list_first_entry_or_null(
- &sublist, struct netfs_io_subrequest, rreq_link))) {
- list_del(&subreq->rreq_link);
-
+ subreq = from;
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!len) {
+ subreq_superfluous = true;
+ break;
+ }
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
subreq->start = start - subreq->transferred;
subreq->len = len + subreq->transferred;
- stream0->sreq_max_len = subreq->len;
-
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ subreq->retry_count++;
- spin_lock_bh(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &rreq->subrequests);
- subreq->prev_donated += rreq->prev_donated;
- rreq->prev_donated = 0;
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
- spin_unlock_bh(&rreq->lock);
-
- BUG_ON(!len);
/* Renegotiate max_len (rsize) */
- if (rreq->netfs_ops->prepare_read(subreq) < 0) {
+ stream->sreq_max_len = subreq->len;
+ if (rreq->netfs_ops->prepare_read &&
+ rreq->netfs_ops->prepare_read(subreq) < 0) {
trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ goto abandon;
}
- part = umin(len, stream0->sreq_max_len);
- if (unlikely(rreq->io_streams[0].sreq_max_segs))
- part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
+ part = umin(len, stream->sreq_max_len);
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
subreq->len = subreq->transferred + part;
subreq->io_iter = source;
iov_iter_truncate(&subreq->io_iter, part);
@@ -166,58 +151,105 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
if (!len) {
if (boundary)
__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
- subreq->next_donated = deferred_next_donated;
} else {
__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
- subreq->next_donated = 0;
}
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_read(rreq, subreq);
- if (!len)
+ if (subreq == to) {
+ subreq_superfluous = false;
break;
-
- /* If we ran out of subrequests, allocate another. */
- if (list_empty(&sublist)) {
- subreq = netfs_alloc_subrequest(rreq);
- if (!subreq)
- goto abandon;
- subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
- subreq->start = start;
-
- /* We get two refs, but need just one. */
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
- trace_netfs_sreq(subreq, netfs_sreq_trace_split);
- list_add_tail(&subreq->rreq_link, &sublist);
}
}
/* If we managed to use fewer subreqs, we can discard the
- * excess.
+ * excess; if we used the same number, then we're done.
*/
- while ((subreq = list_first_entry_or_null(
- &sublist, struct netfs_io_subrequest, rreq_link))) {
- trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
- list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
+ if (!len) {
+ if (!subreq_superfluous)
+ continue;
+ list_for_each_entry_safe_from(subreq, tmp,
+ &stream->subrequests, rreq_link) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
+ if (subreq == to)
+ break;
+ }
+ continue;
}
- } while (!list_empty(&queue));
+ /* We ran out of subrequests, so we need to allocate some more
+ * and insert them after.
+ */
+ do {
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq) {
+ subreq = to;
+ goto abandon_after;
+ }
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->start = start;
+ subreq->len = len;
+ subreq->stream_nr = stream->stream_nr;
+ subreq->retry_count = 1;
+
+ trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+
+ list_add(&subreq->rreq_link, &to->rreq_link);
+ to = list_next_entry(to, rreq_link);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ stream->sreq_max_len = umin(len, rreq->rsize);
+ stream->sreq_max_segs = 0;
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
+
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read(subreq) < 0) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ goto abandon;
+ }
+
+ part = umin(len, stream->sreq_max_len);
+ subreq->len = subreq->transferred + part;
+ subreq->io_iter = source;
+ iov_iter_truncate(&subreq->io_iter, part);
+ iov_iter_advance(&source, part);
+
+ len -= part;
+ start += part;
+ if (!len && boundary) {
+ __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
+ boundary = false;
+ }
+
+ netfs_reissue_read(rreq, subreq);
+ } while (len);
+
+ } while (!list_is_head(next, &stream->subrequests));
return;
- /* If we hit ENOMEM, fail all remaining subrequests */
+ /* If we hit an error, fail all remaining incomplete subrequests */
+abandon_after:
+ if (list_is_last(&subreq->rreq_link, &stream->subrequests))
+ return;
+ subreq = list_next_entry(subreq, rreq_link);
abandon:
- list_splice_init(&sublist, &queue);
- list_for_each_entry(subreq, &queue, rreq_link) {
- if (!subreq->error)
- subreq->error = -ENOMEM;
- __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!subreq->error &&
+ !test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ continue;
+ subreq->error = -ENOMEM;
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
}
- spin_lock_bh(&rreq->lock);
- list_splice_tail_init(&queue, &rreq->subrequests);
- spin_unlock_bh(&rreq->lock);
}
/*
@@ -225,14 +257,19 @@ abandon:
*/
void netfs_retry_reads(struct netfs_io_request *rreq)
{
- trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
- atomic_inc(&rreq->nr_outstanding);
+ netfs_stat(&netfs_n_rh_retry_read_req);
- netfs_retry_read_subrequests(rreq);
+ /* Wait for all outstanding I/O to quiesce before performing retries as
+ * we may need to renegotiate the I/O sizes.
+ */
+ set_bit(NETFS_RREQ_RETRYING, &rreq->flags);
+ netfs_wait_for_in_progress_stream(rreq, stream);
+ clear_bit(NETFS_RREQ_RETRYING, &rreq->flags);
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_terminated(rreq, false);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+ netfs_retry_read_subrequests(rreq);
}
/*
@@ -243,7 +280,7 @@ void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
{
struct folio_queue *p;
- for (p = rreq->buffer; p; p = p->next) {
+ for (p = rreq->buffer.tail; p; p = p->next) {
for (int slot = 0; slot < folioq_count(p); slot++) {
struct folio *folio = folioq_folio(p, slot);
diff --git a/fs/netfs/read_single.c b/fs/netfs/read_single.c
new file mode 100644
index 000000000000..8e6264f62a8f
--- /dev/null
+++ b/fs/netfs/read_single.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Single, monolithic object support (e.g. AFS directory).
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/**
+ * netfs_single_mark_inode_dirty - Mark a single, monolithic object inode dirty
+ * @inode: The inode to mark
+ *
+ * Mark an inode that contains a single, monolithic object as dirty so that its
+ * writepages op will get called. If set, the SINGLE_NO_UPLOAD flag indicates
+ * that the object will only be written to the cache and not uploaded (e.g. AFS
+ * directory contents).
+ */
+void netfs_single_mark_inode_dirty(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+ bool cache_only = test_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &ictx->flags);
+ bool caching = fscache_cookie_enabled(netfs_i_cookie(netfs_inode(inode)));
+
+ if (cache_only && !caching)
+ return;
+
+ mark_inode_dirty(inode);
+
+ if (caching && !(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) {
+ bool need_use = false;
+
+ spin_lock(&inode->i_lock);
+ if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
+ inode_state_set(inode, I_PINNING_NETFS_WB);
+ need_use = true;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (need_use)
+ fscache_use_cookie(netfs_i_cookie(ictx), true);
+ }
+
+}
+EXPORT_SYMBOL(netfs_single_mark_inode_dirty);
+
+static int netfs_single_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
+{
+ return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx));
+}
+
+static void netfs_single_cache_prepare_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ if (!cres->ops) {
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ return;
+ }
+ subreq->source = cres->ops->prepare_read(subreq, rreq->i_size);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+}
+
+static void netfs_single_read_cache(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ _enter("R=%08x[%x]", rreq->debug_id, subreq->debug_index);
+ netfs_stat(&netfs_n_rh_read);
+ cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_FAIL,
+ netfs_cache_read_terminated, subreq);
+}
+
+/*
+ * Perform a read to a buffer from the cache or the server. Only a single
+ * subreq is permitted as the object must be fetched in a single transaction.
+ */
+static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ struct netfs_io_subrequest *subreq;
+ int ret = 0;
+
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq)
+ return -ENOMEM;
+
+ subreq->source = NETFS_SOURCE_UNKNOWN;
+ subreq->start = 0;
+ subreq->len = rreq->len;
+ subreq->io_iter = rreq->buffer.iter;
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_added);
+ stream->front = subreq;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ spin_unlock(&rreq->lock);
+
+ netfs_single_cache_prepare_read(rreq, subreq);
+ switch (subreq->source) {
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read) {
+ ret = rreq->netfs_ops->prepare_read(subreq);
+ if (ret < 0)
+ goto cancel;
+ }
+
+ rreq->netfs_ops->issue_read(subreq);
+ rreq->submitted += subreq->len;
+ break;
+ case NETFS_READ_FROM_CACHE:
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ netfs_single_read_cache(rreq, subreq);
+ rreq->submitted += subreq->len;
+ ret = 0;
+ break;
+ default:
+ pr_warn("Unexpected single-read source %u\n", subreq->source);
+ WARN_ON_ONCE(true);
+ ret = -EIO;
+ break;
+ }
+
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ return ret;
+cancel:
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
+ return ret;
+}
+
+/**
+ * netfs_read_single - Synchronously read a single blob of pages.
+ * @inode: The inode to read from.
+ * @file: The file we're using to read or NULL.
+ * @iter: The buffer we're reading into.
+ *
+ * Fulfil a read request for a single monolithic object by drawing data from
+ * the cache if possible, or the netfs if not. The buffer may be larger than
+ * the file content; unused beyond the EOF will be zero-filled. The content
+ * will be read with a single I/O request (though this may be retried).
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled. If caching is enabled,
+ * the data will be stored as a single object into the cache.
+ */
+ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter)
+{
+ struct netfs_io_request *rreq;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ ssize_t ret;
+
+ rreq = netfs_alloc_request(inode->i_mapping, file, 0, iov_iter_count(iter),
+ NETFS_READ_SINGLE);
+ if (IS_ERR(rreq))
+ return PTR_ERR(rreq);
+
+ ret = netfs_single_begin_cache_read(rreq, ictx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto cleanup_free;
+
+ netfs_stat(&netfs_n_rh_read_single);
+ trace_netfs_read(rreq, 0, rreq->len, netfs_read_trace_read_single);
+
+ rreq->buffer.iter = *iter;
+ netfs_single_dispatch_read(rreq);
+
+ ret = netfs_wait_for_read(rreq);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
+ return ret;
+
+cleanup_free:
+ netfs_put_failed_request(rreq);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_read_single);
diff --git a/fs/netfs/rolling_buffer.c b/fs/netfs/rolling_buffer.c
new file mode 100644
index 000000000000..207b6a326651
--- /dev/null
+++ b/fs/netfs/rolling_buffer.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Rolling buffer helpers
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/bitops.h>
+#include <linux/pagemap.h>
+#include <linux/rolling_buffer.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+static atomic_t debug_ids;
+
+/**
+ * netfs_folioq_alloc - Allocate a folio_queue struct
+ * @rreq_id: Associated debugging ID for tracing purposes
+ * @gfp: Allocation constraints
+ * @trace: Trace tag to indicate the purpose of the allocation
+ *
+ * Allocate, initialise and account the folio_queue struct and log a trace line
+ * to mark the allocation.
+ */
+struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp,
+ unsigned int /*enum netfs_folioq_trace*/ trace)
+{
+ struct folio_queue *fq;
+
+ fq = kmalloc(sizeof(*fq), gfp);
+ if (fq) {
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(fq, rreq_id);
+ fq->debug_id = atomic_inc_return(&debug_ids);
+ trace_netfs_folioq(fq, trace);
+ }
+ return fq;
+}
+EXPORT_SYMBOL(netfs_folioq_alloc);
+
+/**
+ * netfs_folioq_free - Free a folio_queue struct
+ * @folioq: The object to free
+ * @trace: Trace tag to indicate which free
+ *
+ * Free and unaccount the folio_queue struct.
+ */
+void netfs_folioq_free(struct folio_queue *folioq,
+ unsigned int /*enum netfs_trace_folioq*/ trace)
+{
+ trace_netfs_folioq(folioq, trace);
+ netfs_stat_d(&netfs_n_folioq);
+ kfree(folioq);
+}
+EXPORT_SYMBOL(netfs_folioq_free);
+
+/*
+ * Initialise a rolling buffer. We allocate an empty folio queue struct to so
+ * that the pointers can be independently driven by the producer and the
+ * consumer.
+ */
+int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id,
+ unsigned int direction)
+{
+ struct folio_queue *fq;
+
+ fq = netfs_folioq_alloc(rreq_id, GFP_NOFS, netfs_trace_folioq_rollbuf_init);
+ if (!fq)
+ return -ENOMEM;
+
+ roll->head = fq;
+ roll->tail = fq;
+ iov_iter_folio_queue(&roll->iter, direction, fq, 0, 0, 0);
+ return 0;
+}
+
+/*
+ * Add another folio_queue to a rolling buffer if there's no space left.
+ */
+int rolling_buffer_make_space(struct rolling_buffer *roll)
+{
+ struct folio_queue *fq, *head = roll->head;
+
+ if (!folioq_full(head))
+ return 0;
+
+ fq = netfs_folioq_alloc(head->rreq_id, GFP_NOFS, netfs_trace_folioq_make_space);
+ if (!fq)
+ return -ENOMEM;
+ fq->prev = head;
+
+ roll->head = fq;
+ if (folioq_full(head)) {
+ /* Make sure we don't leave the master iterator pointing to a
+ * block that might get immediately consumed.
+ */
+ if (roll->iter.folioq == head &&
+ roll->iter.folioq_slot == folioq_nr_slots(head)) {
+ roll->iter.folioq = fq;
+ roll->iter.folioq_slot = 0;
+ }
+ }
+
+ /* Make sure the initialisation is stored before the next pointer.
+ *
+ * [!] NOTE: After we set head->next, the consumer is at liberty to
+ * immediately delete the old head.
+ */
+ smp_store_release(&head->next, fq);
+ return 0;
+}
+
+/*
+ * Decant the list of folios to read into a rolling buffer.
+ */
+ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
+ struct readahead_control *ractl,
+ struct folio_batch *put_batch)
+{
+ struct folio_queue *fq;
+ struct page **vec;
+ int nr, ix, to;
+ ssize_t size = 0;
+
+ if (rolling_buffer_make_space(roll) < 0)
+ return -ENOMEM;
+
+ fq = roll->head;
+ vec = (struct page **)fq->vec.folios;
+ nr = __readahead_batch(ractl, vec + folio_batch_count(&fq->vec),
+ folio_batch_space(&fq->vec));
+ ix = fq->vec.nr;
+ to = ix + nr;
+ fq->vec.nr = to;
+ for (; ix < to; ix++) {
+ struct folio *folio = folioq_folio(fq, ix);
+ unsigned int order = folio_order(folio);
+
+ fq->orders[ix] = order;
+ size += PAGE_SIZE << order;
+ trace_netfs_folio(folio, netfs_folio_trace_read);
+ if (!folio_batch_add(put_batch, folio))
+ folio_batch_release(put_batch);
+ }
+ WRITE_ONCE(roll->iter.count, roll->iter.count + size);
+
+ /* Store the counter after setting the slot. */
+ smp_store_release(&roll->next_head_slot, to);
+ return size;
+}
+
+/*
+ * Append a folio to the rolling buffer.
+ */
+ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio,
+ unsigned int flags)
+{
+ ssize_t size = folio_size(folio);
+ int slot;
+
+ if (rolling_buffer_make_space(roll) < 0)
+ return -ENOMEM;
+
+ slot = folioq_append(roll->head, folio);
+ if (flags & ROLLBUF_MARK_1)
+ folioq_mark(roll->head, slot);
+ if (flags & ROLLBUF_MARK_2)
+ folioq_mark2(roll->head, slot);
+
+ WRITE_ONCE(roll->iter.count, roll->iter.count + size);
+
+ /* Store the counter after setting the slot. */
+ smp_store_release(&roll->next_head_slot, slot);
+ return size;
+}
+
+/*
+ * Delete a spent buffer from a rolling queue and return the next in line. We
+ * don't return the last buffer to keep the pointers independent, but return
+ * NULL instead.
+ */
+struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll)
+{
+ struct folio_queue *spent = roll->tail, *next = READ_ONCE(spent->next);
+
+ if (!next)
+ return NULL;
+ next->prev = NULL;
+ netfs_folioq_free(spent, netfs_trace_folioq_delete);
+ roll->tail = next;
+ return next;
+}
+
+/*
+ * Clear out a rolling queue. Folios that have mark 1 set are put.
+ */
+void rolling_buffer_clear(struct rolling_buffer *roll)
+{
+ struct folio_batch fbatch;
+ struct folio_queue *p;
+
+ folio_batch_init(&fbatch);
+
+ while ((p = roll->tail)) {
+ roll->tail = p->next;
+ for (int slot = 0; slot < folioq_count(p); slot++) {
+ struct folio *folio = folioq_folio(p, slot);
+
+ if (!folio)
+ continue;
+ if (folioq_is_marked(p, slot)) {
+ trace_netfs_folio(folio, netfs_folio_trace_put);
+ if (!folio_batch_add(&fbatch, folio))
+ folio_batch_release(&fbatch);
+ }
+ }
+
+ netfs_folioq_free(p, netfs_trace_folioq_clear);
+ }
+
+ folio_batch_release(&fbatch);
+}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index 8e63516b40f6..ab6b916addc4 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -12,6 +12,7 @@
atomic_t netfs_n_rh_dio_read;
atomic_t netfs_n_rh_readahead;
atomic_t netfs_n_rh_read_folio;
+atomic_t netfs_n_rh_read_single;
atomic_t netfs_n_rh_rreq;
atomic_t netfs_n_rh_sreq;
atomic_t netfs_n_rh_download;
@@ -28,6 +29,8 @@ atomic_t netfs_n_rh_write_begin;
atomic_t netfs_n_rh_write_done;
atomic_t netfs_n_rh_write_failed;
atomic_t netfs_n_rh_write_zskip;
+atomic_t netfs_n_rh_retry_read_req;
+atomic_t netfs_n_rh_retry_read_subreq;
atomic_t netfs_n_wh_buffered_write;
atomic_t netfs_n_wh_writethrough;
atomic_t netfs_n_wh_dio_write;
@@ -40,16 +43,19 @@ atomic_t netfs_n_wh_upload_failed;
atomic_t netfs_n_wh_write;
atomic_t netfs_n_wh_write_done;
atomic_t netfs_n_wh_write_failed;
+atomic_t netfs_n_wh_retry_write_req;
+atomic_t netfs_n_wh_retry_write_subreq;
atomic_t netfs_n_wb_lock_skip;
atomic_t netfs_n_wb_lock_wait;
atomic_t netfs_n_folioq;
int netfs_stats_show(struct seq_file *m, void *v)
{
- seq_printf(m, "Reads : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n",
+ seq_printf(m, "Reads : DR=%u RA=%u RF=%u RS=%u WB=%u WBZ=%u\n",
atomic_read(&netfs_n_rh_dio_read),
atomic_read(&netfs_n_rh_readahead),
atomic_read(&netfs_n_rh_read_folio),
+ atomic_read(&netfs_n_rh_read_single),
atomic_read(&netfs_n_rh_write_begin),
atomic_read(&netfs_n_rh_write_zskip));
seq_printf(m, "Writes : BW=%u WT=%u DW=%u WP=%u 2C=%u\n",
@@ -79,6 +85,11 @@ int netfs_stats_show(struct seq_file *m, void *v)
atomic_read(&netfs_n_wh_write),
atomic_read(&netfs_n_wh_write_done),
atomic_read(&netfs_n_wh_write_failed));
+ seq_printf(m, "Retries: rq=%u rs=%u wq=%u ws=%u\n",
+ atomic_read(&netfs_n_rh_retry_read_req),
+ atomic_read(&netfs_n_rh_retry_read_subreq),
+ atomic_read(&netfs_n_wh_retry_write_req),
+ atomic_read(&netfs_n_wh_retry_write_subreq));
seq_printf(m, "Objs : rr=%u sr=%u foq=%u wsc=%u\n",
atomic_read(&netfs_n_rh_rreq),
atomic_read(&netfs_n_rh_sreq),
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 1d438be2e1b4..cbf3d9194c7b 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -17,10 +17,38 @@
#define HIT_PENDING 0x01 /* A front op was still pending */
#define NEED_REASSESS 0x02 /* Need to loop round and reassess */
#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
-#define BUFFERED 0x08 /* The pagecache needs cleaning up */
+#define NEED_UNLOCK 0x08 /* The pagecache needs unlocking */
#define NEED_RETRY 0x10 /* A front op requests retrying */
#define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
+static void netfs_dump_request(const struct netfs_io_request *rreq)
+{
+ pr_err("Request R=%08x r=%d fl=%lx or=%x e=%ld\n",
+ rreq->debug_id, refcount_read(&rreq->ref), rreq->flags,
+ rreq->origin, rreq->error);
+ pr_err(" st=%llx tsl=%zx/%llx/%llx\n",
+ rreq->start, rreq->transferred, rreq->submitted, rreq->len);
+ pr_err(" cci=%llx/%llx/%llx\n",
+ rreq->cleaned_to, rreq->collected_to, atomic64_read(&rreq->issued_to));
+ pr_err(" iw=%pSR\n", rreq->netfs_ops->issue_write);
+ for (int i = 0; i < NR_IO_STREAMS; i++) {
+ const struct netfs_io_subrequest *sreq;
+ const struct netfs_io_stream *s = &rreq->io_streams[i];
+
+ pr_err(" str[%x] s=%x e=%d acnf=%u,%u,%u,%u\n",
+ s->stream_nr, s->source, s->error,
+ s->avail, s->active, s->need_retry, s->failed);
+ pr_err(" str[%x] ct=%llx t=%zx\n",
+ s->stream_nr, s->collected_to, s->transferred);
+ list_for_each_entry(sreq, &s->subrequests, rreq_link) {
+ pr_err(" sreq[%x:%x] sc=%u s=%llx t=%zx/%zx r=%d f=%lx\n",
+ sreq->stream_nr, sreq->debug_index, sreq->source,
+ sreq->start, sreq->transferred, sreq->len,
+ refcount_read(&sreq->ref), sreq->flags);
+ }
+ }
+}
+
/*
* Successful completion of write of a folio to the server and/or cache. Note
* that we are not allowed to lock the folio here on pain of deadlocking with
@@ -83,9 +111,15 @@ end_wb:
static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
unsigned int *notes)
{
- struct folio_queue *folioq = wreq->buffer;
+ struct folio_queue *folioq = wreq->buffer.tail;
unsigned long long collected_to = wreq->collected_to;
- unsigned int slot = wreq->buffer_head_slot;
+ unsigned int slot = wreq->buffer.first_tail_slot;
+
+ if (WARN_ON_ONCE(!folioq)) {
+ pr_err("[!] Writeback unlock found empty rolling buffer!\n");
+ netfs_dump_request(wreq);
+ return;
+ }
if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) {
if (netfs_pgpriv2_unlock_copied_folios(wreq))
@@ -94,7 +128,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
}
if (slot >= folioq_nr_slots(folioq)) {
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ return;
slot = 0;
}
@@ -134,9 +170,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
- if (READ_ONCE(wreq->buffer_tail) == folioq)
- break;
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ goto done;
slot = 0;
}
@@ -144,223 +180,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
break;
}
- wreq->buffer = folioq;
- wreq->buffer_head_slot = slot;
-}
-
-/*
- * Perform retries on the streams that need it.
- */
-static void netfs_retry_write_stream(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream)
-{
- struct list_head *next;
-
- _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
-
- if (list_empty(&stream->subrequests))
- return;
-
- if (stream->source == NETFS_UPLOAD_TO_SERVER &&
- wreq->netfs_ops->retry_request)
- wreq->netfs_ops->retry_request(wreq, stream);
-
- if (unlikely(stream->failed))
- return;
-
- /* If there's no renegotiation to do, just resend each failed subreq. */
- if (!stream->prepare_write) {
- struct netfs_io_subrequest *subreq;
-
- list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
- if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
- break;
- if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
- struct iov_iter source = subreq->io_iter;
-
- iov_iter_revert(&source, subreq->len - source.count);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- netfs_reissue_write(stream, subreq, &source);
- }
- }
- return;
- }
-
- next = stream->subrequests.next;
-
- do {
- struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
- struct iov_iter source;
- unsigned long long start, len;
- size_t part;
- bool boundary = false;
-
- /* Go through the stream and find the next span of contiguous
- * data that we then rejig (cifs, for example, needs the wsize
- * renegotiating) and reissue.
- */
- from = list_entry(next, struct netfs_io_subrequest, rreq_link);
- to = from;
- start = from->start + from->transferred;
- len = from->len - from->transferred;
-
- if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
- !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
- return;
-
- list_for_each_continue(next, &stream->subrequests) {
- subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
- if (subreq->start + subreq->transferred != start + len ||
- test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
- !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
- break;
- to = subreq;
- len += to->len;
- }
-
- /* Determine the set of buffers we're going to use. Each
- * subreq gets a subset of a single overall contiguous buffer.
- */
- netfs_reset_iter(from);
- source = from->io_iter;
- source.count = len;
-
- /* Work through the sublist. */
- subreq = from;
- list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
- if (!len)
- break;
- /* Renegotiate max_len (wsize) */
- trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
- __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
- stream->prepare_write(subreq);
-
- part = min(len, stream->sreq_max_len);
- subreq->len = part;
- subreq->start = start;
- subreq->transferred = 0;
- len -= part;
- start += part;
- if (len && subreq == to &&
- __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
- boundary = true;
-
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- netfs_reissue_write(stream, subreq, &source);
- if (subreq == to)
- break;
- }
-
- /* If we managed to use fewer subreqs, we can discard the
- * excess; if we used the same number, then we're done.
- */
- if (!len) {
- if (subreq == to)
- continue;
- list_for_each_entry_safe_from(subreq, tmp,
- &stream->subrequests, rreq_link) {
- trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
- list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
- if (subreq == to)
- break;
- }
- continue;
- }
-
- /* We ran out of subrequests, so we need to allocate some more
- * and insert them after.
- */
- do {
- subreq = netfs_alloc_subrequest(wreq);
- subreq->source = to->source;
- subreq->start = start;
- subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
- subreq->stream_nr = to->stream_nr;
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
-
- trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
- refcount_read(&subreq->ref),
- netfs_sreq_trace_new);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
-
- list_add(&subreq->rreq_link, &to->rreq_link);
- to = list_next_entry(to, rreq_link);
- trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
-
- stream->sreq_max_len = len;
- stream->sreq_max_segs = INT_MAX;
- switch (stream->source) {
- case NETFS_UPLOAD_TO_SERVER:
- netfs_stat(&netfs_n_wh_upload);
- stream->sreq_max_len = umin(len, wreq->wsize);
- break;
- case NETFS_WRITE_TO_CACHE:
- netfs_stat(&netfs_n_wh_write);
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- stream->prepare_write(subreq);
-
- part = umin(len, stream->sreq_max_len);
- subreq->len = subreq->transferred + part;
- len -= part;
- start += part;
- if (!len && boundary) {
- __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
- boundary = false;
- }
-
- netfs_reissue_write(stream, subreq, &source);
- if (!len)
- break;
-
- } while (len);
-
- } while (!list_is_head(next, &stream->subrequests));
-}
-
-/*
- * Perform retries on the streams that need it. If we're doing content
- * encryption and the server copy changed due to a third-party write, we may
- * need to do an RMW cycle and also rewrite the data to the cache.
- */
-static void netfs_retry_writes(struct netfs_io_request *wreq)
-{
- struct netfs_io_subrequest *subreq;
- struct netfs_io_stream *stream;
- int s;
-
- /* Wait for all outstanding I/O to quiesce before performing retries as
- * we may need to renegotiate the I/O sizes.
- */
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (!stream->active)
- continue;
-
- list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
- wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
- }
- }
-
- // TODO: Enc: Fetch changed partial pages
- // TODO: Enc: Reencrypt content if needed.
- // TODO: Enc: Wind back transferred point.
- // TODO: Enc: Mark cache pages for retry.
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->need_retry) {
- stream->need_retry = false;
- netfs_retry_write_stream(wreq, stream);
- }
- }
+ wreq->buffer.tail = folioq;
+done:
+ wreq->buffer.first_tail_slot = slot;
}
/*
@@ -391,7 +213,7 @@ reassess_streams:
if (wreq->origin == NETFS_WRITEBACK ||
wreq->origin == NETFS_WRITETHROUGH ||
wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE)
- notes = BUFFERED;
+ notes = NEED_UNLOCK;
else
notes = 0;
@@ -418,7 +240,7 @@ reassess_streams:
}
/* Stall if the front is still undergoing I/O. */
- if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags)) {
+ if (netfs_check_subreq_in_progress(front)) {
notes |= HIT_PENDING;
break;
}
@@ -432,6 +254,7 @@ reassess_streams:
if (front->start + front->transferred > stream->collected_to) {
stream->collected_to = front->start + front->transferred;
stream->transferred = stream->collected_to - wreq->start;
+ stream->transferred_valid = true;
notes |= MADE_PROGRESS;
}
if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
@@ -450,15 +273,15 @@ reassess_streams:
cancel:
/* Remove if completely consumed. */
- spin_lock_bh(&wreq->lock);
+ spin_lock(&wreq->lock);
remove = front;
list_del_init(&front->rreq_link);
front = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
stream->front = front;
- spin_unlock_bh(&wreq->lock);
- netfs_put_subrequest(remove, false,
+ spin_unlock(&wreq->lock);
+ netfs_put_subrequest(remove,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :
netfs_sreq_trace_put_done);
@@ -488,7 +311,7 @@ reassess_streams:
trace_netfs_collect_state(wreq, wreq->collected_to, notes);
/* Unlock any folios that we have now finished with. */
- if (notes & BUFFERED) {
+ if (notes & NEED_UNLOCK) {
if (wreq->cleaned_to < wreq->collected_to)
netfs_writeback_unlock_folios(wreq, &notes);
} else {
@@ -499,17 +322,14 @@ reassess_streams:
if (notes & NEED_RETRY)
goto need_retry;
- if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
- trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
- clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE);
- }
- if (notes & NEED_REASSESS) {
+ if (notes & MADE_PROGRESS) {
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
//cond_resched();
goto reassess_streams;
}
- if (notes & MADE_PROGRESS) {
+
+ if (notes & NEED_REASSESS) {
//cond_resched();
goto reassess_streams;
}
@@ -533,30 +353,22 @@ need_retry:
/*
* Perform the collection of subrequests, folios and encryption buffers.
*/
-void netfs_write_collection_worker(struct work_struct *work)
+bool netfs_write_collection(struct netfs_io_request *wreq)
{
- struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
struct netfs_inode *ictx = netfs_inode(wreq->inode);
size_t transferred;
+ bool transferred_valid = false;
int s;
_enter("R=%x", wreq->debug_id);
- netfs_see_request(wreq, netfs_rreq_trace_see_work);
- if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
- netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
- return;
- }
-
netfs_collect_write_results(wreq);
/* We're done when the app thread has finished posting subreqs and all
* the queues in all the streams are empty.
*/
- if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) {
- netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
- return;
- }
+ if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags))
+ return false;
smp_rmb(); /* Read ALL_QUEUED before lists. */
transferred = LONG_MAX;
@@ -564,28 +376,33 @@ void netfs_write_collection_worker(struct work_struct *work)
struct netfs_io_stream *stream = &wreq->io_streams[s];
if (!stream->active)
continue;
- if (!list_empty(&stream->subrequests)) {
- netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
- return;
- }
- if (stream->transferred < transferred)
+ if (!list_empty(&stream->subrequests))
+ return false;
+ if (stream->transferred_valid &&
+ stream->transferred < transferred) {
transferred = stream->transferred;
+ transferred_valid = true;
+ }
}
/* Okay, declare that all I/O is complete. */
- wreq->transferred = transferred;
+ if (transferred_valid)
+ wreq->transferred = transferred;
trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
if (wreq->io_streams[1].active &&
- wreq->io_streams[1].failed) {
+ wreq->io_streams[1].failed &&
+ ictx->ops->invalidate_cache) {
/* Cache write failure doesn't prevent writeback completion
* unless we're in disconnected mode.
*/
ictx->ops->invalidate_cache(wreq);
}
- if (wreq->cleanup)
- wreq->cleanup(wreq);
+ if ((wreq->origin == NETFS_UNBUFFERED_WRITE ||
+ wreq->origin == NETFS_DIO_WRITE) &&
+ !wreq->error)
+ netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
if (wreq->origin == NETFS_DIO_WRITE &&
wreq->mapping->nrpages) {
@@ -604,32 +421,35 @@ void netfs_write_collection_worker(struct work_struct *work)
inode_dio_end(wreq->inode);
_debug("finished");
- trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
+ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
if (wreq->iocb) {
size_t written = min(wreq->transferred, wreq->len);
wreq->iocb->ki_pos += written;
- if (wreq->iocb->ki_complete)
+ if (wreq->iocb->ki_complete) {
+ trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete);
wreq->iocb->ki_complete(
wreq->iocb, wreq->error ? wreq->error : written);
+ }
wreq->iocb = VFS_PTR_POISON;
}
- netfs_clear_subrequests(wreq, false);
- netfs_put_request(wreq, false, netfs_rreq_trace_put_work_complete);
+ netfs_clear_subrequests(wreq);
+ return true;
}
-/*
- * Wake the collection work item.
- */
-void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
+void netfs_write_collection_worker(struct work_struct *work)
{
- if (!work_pending(&wreq->work)) {
- netfs_get_request(wreq, netfs_rreq_trace_get_work);
- if (!queue_work(system_unbound_wq, &wreq->work))
- netfs_put_request(wreq, was_async, netfs_rreq_trace_put_work_nq);
+ struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
+
+ netfs_see_request(rreq, netfs_rreq_trace_see_work);
+ if (netfs_check_rreq_in_progress(rreq)) {
+ if (netfs_write_collection(rreq))
+ /* Drop the ref from the IN_PROGRESS flag. */
+ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
+ else
+ netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
}
}
@@ -637,7 +457,6 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
* netfs_write_subrequest_terminated - Note the termination of a write operation.
* @_op: The I/O request that has terminated.
* @transferred_or_error: The amount of data transferred or an error code.
- * @was_async: The termination was asynchronous
*
* This tells the library that a contributory write I/O operation has
* terminated, one way or another, and that it should collect the results.
@@ -647,21 +466,16 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
* negative error code. The library will look after reissuing I/O operations
* as appropriate and writing downloaded data to the cache.
*
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
- *
* When this is called, ownership of the subrequest is transferred back to the
* library, along with a ref.
*
* Note that %_op is a void* so that the function can be passed to
* kiocb::term_func without the need for a casting wrapper.
*/
-void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
- bool was_async)
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error)
{
struct netfs_io_subrequest *subreq = _op;
struct netfs_io_request *wreq = subreq->rreq;
- struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
_enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
@@ -672,8 +486,6 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
case NETFS_WRITE_TO_CACHE:
netfs_stat(&netfs_n_wh_write_done);
break;
- case NETFS_INVALID_WRITE:
- break;
default:
BUG();
}
@@ -713,16 +525,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
}
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
- clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
- wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS);
-
- /* If we are at the head of the queue, wake up the collector,
- * transferring a ref to it if we were the ones to do so.
- */
- if (list_is_first(&subreq->rreq_link, &stream->subrequests))
- netfs_wake_write_collector(wreq, was_async);
-
- netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+ netfs_subreq_clear_in_progress(subreq);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
}
EXPORT_SYMBOL(netfs_write_subrequest_terminated);
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index bf6d507578e5..dd8743bc8d7f 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -94,9 +94,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
{
struct netfs_io_request *wreq;
struct netfs_inode *ictx;
- bool is_buffered = (origin == NETFS_WRITEBACK ||
- origin == NETFS_WRITETHROUGH ||
- origin == NETFS_PGPRIV2_COPY_TO_CACHE);
+ bool is_cacheable = (origin == NETFS_WRITEBACK ||
+ origin == NETFS_WRITEBACK_SINGLE ||
+ origin == NETFS_WRITETHROUGH ||
+ origin == NETFS_PGPRIV2_COPY_TO_CACHE);
wreq = netfs_alloc_request(mapping, file, start, 0, origin);
if (IS_ERR(wreq))
@@ -105,8 +106,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
_enter("R=%x", wreq->debug_id);
ictx = netfs_inode(wreq->inode);
- if (is_buffered && netfs_is_cache_enabled(ictx))
+ if (is_cacheable && netfs_is_cache_enabled(ictx))
fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
+ if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0)
+ goto nomem;
wreq->cleaned_to = wreq->start;
@@ -115,12 +118,12 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
wreq->io_streams[0].prepare_write = ictx->ops->prepare_write;
wreq->io_streams[0].issue_write = ictx->ops->issue_write;
wreq->io_streams[0].collected_to = start;
- wreq->io_streams[0].transferred = LONG_MAX;
+ wreq->io_streams[0].transferred = 0;
wreq->io_streams[1].stream_nr = 1;
wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE;
wreq->io_streams[1].collected_to = start;
- wreq->io_streams[1].transferred = LONG_MAX;
+ wreq->io_streams[1].transferred = 0;
if (fscache_resources_valid(&wreq->cache_resources)) {
wreq->io_streams[1].avail = true;
wreq->io_streams[1].active = true;
@@ -129,6 +132,9 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
}
return wreq;
+nomem:
+ netfs_put_failed_request(wreq);
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -153,16 +159,15 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
loff_t start)
{
struct netfs_io_subrequest *subreq;
- struct iov_iter *wreq_iter = &wreq->io_iter;
+ struct iov_iter *wreq_iter = &wreq->buffer.iter;
/* Make sure we don't point the iterator at a used-up folio_queue
* struct being used as a placeholder to prevent the queue from
* collapsing. In such a case, extend the queue.
*/
if (iov_iter_is_folioq(wreq_iter) &&
- wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) {
- netfs_buffer_make_space(wreq);
- }
+ wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq))
+ rolling_buffer_make_space(&wreq->buffer);
subreq = netfs_alloc_subrequest(wreq);
subreq->source = stream->source;
@@ -198,7 +203,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
- spin_lock_bh(&wreq->lock);
+ spin_lock(&wreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
@@ -209,7 +214,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
}
}
- spin_unlock_bh(&wreq->lock);
+ spin_unlock(&wreq->lock);
stream->construct = subreq;
}
@@ -227,7 +232,7 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream,
_enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
- return netfs_write_subrequest_terminated(subreq, subreq->error, false);
+ return netfs_write_subrequest_terminated(subreq, subreq->error);
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
stream->issue_write(subreq);
@@ -244,7 +249,10 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
iov_iter_advance(source, size);
iov_iter_truncate(&subreq->io_iter, size);
+ subreq->retry_count++;
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ netfs_stat(&netfs_n_wh_retry_write_subreq);
netfs_do_issue_write(stream, subreq);
}
@@ -266,9 +274,9 @@ void netfs_issue_write(struct netfs_io_request *wreq,
* we can avoid overrunning the credits obtained (cifs) and try to parallelise
* content-crypto preparation with network writes.
*/
-int netfs_advance_write(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream,
- loff_t start, size_t len, bool to_eof)
+size_t netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof)
{
struct netfs_io_subrequest *subreq = stream->construct;
size_t part;
@@ -325,6 +333,9 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
_enter("");
+ if (rolling_buffer_make_space(&wreq->buffer) < 0)
+ return -ENOMEM;
+
/* netfs_perform_write() may shift i_size around the page or from out
* of the page to beyond it, but cannot move i_size into or through the
* page since we have it locked.
@@ -429,7 +440,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
}
/* Attach the folio to the rolling buffer. */
- netfs_buffer_append_folio(wreq, folio, false);
+ rolling_buffer_append(&wreq->buffer, folio, 0);
/* Move the submission point forward to allow for write-streaming data
* not starting at the front of the page. We don't do write-streaming
@@ -442,7 +453,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
stream = &wreq->io_streams[s];
stream->submit_off = foff;
stream->submit_len = flen;
- if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
+ if (!stream->avail ||
+ (stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
(stream->source == NETFS_UPLOAD_TO_SERVER &&
fgroup == NETFS_FOLIO_COPY_TO_CACHE)) {
stream->submit_off = UINT_MAX;
@@ -476,7 +488,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
/* Advance the iterator(s). */
if (stream->submit_off > iter_off) {
- iov_iter_advance(&wreq->io_iter, stream->submit_off - iter_off);
+ rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off);
iter_off = stream->submit_off;
}
@@ -494,7 +506,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
}
if (fsize > iter_off)
- iov_iter_advance(&wreq->io_iter, fsize - iter_off);
+ rolling_buffer_advance(&wreq->buffer, fsize - iter_off);
atomic64_set(&wreq->issued_to, fpos + fsize);
if (!debug)
@@ -529,7 +541,7 @@ static void netfs_end_issue_write(struct netfs_io_request *wreq)
}
if (needs_poke)
- netfs_wake_write_collector(wreq, false);
+ netfs_wake_collector(wreq);
}
/*
@@ -563,6 +575,7 @@ int netfs_writepages(struct address_space *mapping,
goto couldnt_start;
}
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
trace_netfs_write(wreq, netfs_write_trace_writeback);
netfs_stat(&netfs_n_wh_writepages);
@@ -586,8 +599,9 @@ int netfs_writepages(struct address_space *mapping,
netfs_end_issue_write(wreq);
mutex_unlock(&ictx->wb_lock);
+ netfs_wake_collector(wreq);
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
_leave(" = %d", error);
return error;
@@ -633,7 +647,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
struct folio **writethrough_cache)
{
_enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
- wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
+ wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end);
if (!*writethrough_cache) {
if (folio_test_dirty(folio))
@@ -660,11 +674,11 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
/*
* End a write operation used when writing through the pagecache.
*/
-int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
- struct folio *writethrough_cache)
+ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache)
{
struct netfs_inode *ictx = netfs_inode(wreq->inode);
- int ret;
+ ssize_t ret;
_enter("R=%x", wreq->debug_id);
@@ -675,13 +689,11 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
mutex_unlock(&ictx->wb_lock);
- if (wreq->iocb) {
+ if (wreq->iocb)
ret = -EIOCBQUEUED;
- } else {
- wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
- ret = wreq->error;
- }
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ else
+ ret = netfs_wait_for_write(wreq);
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
return ret;
}
@@ -708,11 +720,9 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
part = netfs_advance_write(wreq, upload, start, len, false);
start += part;
len -= part;
- iov_iter_advance(&wreq->io_iter, part);
- if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
- trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
- wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE);
- }
+ rolling_buffer_advance(&wreq->buffer, part);
+ if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags))
+ netfs_wait_for_paused_write(wreq);
if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
break;
}
@@ -721,3 +731,196 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
_leave(" = %d", error);
return error;
}
+
+/*
+ * Write some of a pending folio data back to the server and/or the cache.
+ */
+static int netfs_write_folio_single(struct netfs_io_request *wreq,
+ struct folio *folio)
+{
+ struct netfs_io_stream *upload = &wreq->io_streams[0];
+ struct netfs_io_stream *cache = &wreq->io_streams[1];
+ struct netfs_io_stream *stream;
+ size_t iter_off = 0;
+ size_t fsize = folio_size(folio), flen;
+ loff_t fpos = folio_pos(folio);
+ bool to_eof = false;
+ bool no_debug = false;
+
+ _enter("");
+
+ flen = folio_size(folio);
+ if (flen > wreq->i_size - fpos) {
+ flen = wreq->i_size - fpos;
+ folio_zero_segment(folio, flen, fsize);
+ to_eof = true;
+ } else if (flen == wreq->i_size - fpos) {
+ to_eof = true;
+ }
+
+ _debug("folio %zx/%zx", flen, fsize);
+
+ if (!upload->avail && !cache->avail) {
+ trace_netfs_folio(folio, netfs_folio_trace_cancel_store);
+ return 0;
+ }
+
+ if (!upload->construct)
+ trace_netfs_folio(folio, netfs_folio_trace_store);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_store_plus);
+
+ /* Attach the folio to the rolling buffer. */
+ folio_get(folio);
+ rolling_buffer_append(&wreq->buffer, folio, NETFS_ROLLBUF_PUT_MARK);
+
+ /* Move the submission point forward to allow for write-streaming data
+ * not starting at the front of the page. We don't do write-streaming
+ * with the cache as the cache requires DIO alignment.
+ *
+ * Also skip uploading for data that's been read and just needs copying
+ * to the cache.
+ */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ stream->submit_off = 0;
+ stream->submit_len = flen;
+ if (!stream->avail) {
+ stream->submit_off = UINT_MAX;
+ stream->submit_len = 0;
+ }
+ }
+
+ /* Attach the folio to one or more subrequests. For a big folio, we
+ * could end up with thousands of subrequests if the wsize is small -
+ * but we might need to wait during the creation of subrequests for
+ * network resources (eg. SMB credits).
+ */
+ for (;;) {
+ ssize_t part;
+ size_t lowest_off = ULONG_MAX;
+ int choose_s = -1;
+
+ /* Always add to the lowest-submitted stream first. */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->submit_len > 0 &&
+ stream->submit_off < lowest_off) {
+ lowest_off = stream->submit_off;
+ choose_s = s;
+ }
+ }
+
+ if (choose_s < 0)
+ break;
+ stream = &wreq->io_streams[choose_s];
+
+ /* Advance the iterator(s). */
+ if (stream->submit_off > iter_off) {
+ rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off);
+ iter_off = stream->submit_off;
+ }
+
+ atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
+ stream->submit_extendable_to = fsize - stream->submit_off;
+ part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
+ stream->submit_len, to_eof);
+ stream->submit_off += part;
+ if (part > stream->submit_len)
+ stream->submit_len = 0;
+ else
+ stream->submit_len -= part;
+ if (part > 0)
+ no_debug = true;
+ }
+
+ wreq->buffer.iter.iov_offset = 0;
+ if (fsize > iter_off)
+ rolling_buffer_advance(&wreq->buffer, fsize - iter_off);
+ atomic64_set(&wreq->issued_to, fpos + fsize);
+
+ if (!no_debug)
+ kdebug("R=%x: No submit", wreq->debug_id);
+ _leave(" = 0");
+ return 0;
+}
+
+/**
+ * netfs_writeback_single - Write back a monolithic payload
+ * @mapping: The mapping to write from
+ * @wbc: Hints from the VM
+ * @iter: Data to write, must be ITER_FOLIOQ.
+ *
+ * Write a monolithic, non-pagecache object back to the server and/or
+ * the cache.
+ */
+int netfs_writeback_single(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct iov_iter *iter)
+{
+ struct netfs_io_request *wreq;
+ struct netfs_inode *ictx = netfs_inode(mapping->host);
+ struct folio_queue *fq;
+ size_t size = iov_iter_count(iter);
+ int ret;
+
+ if (WARN_ON_ONCE(!iov_iter_is_folioq(iter)))
+ return -EIO;
+
+ if (!mutex_trylock(&ictx->wb_lock)) {
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ netfs_stat(&netfs_n_wb_lock_skip);
+ return 0;
+ }
+ netfs_stat(&netfs_n_wb_lock_wait);
+ mutex_lock(&ictx->wb_lock);
+ }
+
+ wreq = netfs_create_write_req(mapping, NULL, 0, NETFS_WRITEBACK_SINGLE);
+ if (IS_ERR(wreq)) {
+ ret = PTR_ERR(wreq);
+ goto couldnt_start;
+ }
+
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
+ trace_netfs_write(wreq, netfs_write_trace_writeback_single);
+ netfs_stat(&netfs_n_wh_writepages);
+
+ if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
+ wreq->netfs_ops->begin_writeback(wreq);
+
+ for (fq = (struct folio_queue *)iter->folioq; fq; fq = fq->next) {
+ for (int slot = 0; slot < folioq_count(fq); slot++) {
+ struct folio *folio = folioq_folio(fq, slot);
+ size_t part = umin(folioq_folio_size(fq, slot), size);
+
+ _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to));
+
+ ret = netfs_write_folio_single(wreq, folio);
+ if (ret < 0)
+ goto stop;
+ size -= part;
+ if (size <= 0)
+ goto stop;
+ }
+ }
+
+stop:
+ for (int s = 0; s < NR_IO_STREAMS; s++)
+ netfs_issue_write(wreq, &wreq->io_streams[s]);
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+
+ mutex_unlock(&ictx->wb_lock);
+ netfs_wake_collector(wreq);
+
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ _leave(" = %d", ret);
+ return ret;
+
+couldnt_start:
+ mutex_unlock(&ictx->wb_lock);
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_writeback_single);
diff --git a/fs/netfs/write_retry.c b/fs/netfs/write_retry.c
new file mode 100644
index 000000000000..fc9c3e0d34d8
--- /dev/null
+++ b/fs/netfs/write_retry.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem write retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Perform retries on the streams that need it.
+ */
+static void netfs_retry_write_stream(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream)
+{
+ struct list_head *next;
+
+ _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
+
+ if (list_empty(&stream->subrequests))
+ return;
+
+ if (stream->source == NETFS_UPLOAD_TO_SERVER &&
+ wreq->netfs_ops->retry_request)
+ wreq->netfs_ops->retry_request(wreq, stream);
+
+ if (unlikely(stream->failed))
+ return;
+
+ /* If there's no renegotiation to do, just resend each failed subreq. */
+ if (!stream->prepare_write) {
+ struct netfs_io_subrequest *subreq;
+
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ break;
+ if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ struct iov_iter source;
+
+ netfs_reset_iter(subreq);
+ source = subreq->io_iter;
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_write(stream, subreq, &source);
+ }
+ }
+ return;
+ }
+
+ next = stream->subrequests.next;
+
+ do {
+ struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
+ struct iov_iter source;
+ unsigned long long start, len;
+ size_t part;
+ bool boundary = false;
+
+ /* Go through the stream and find the next span of contiguous
+ * data that we then rejig (cifs, for example, needs the wsize
+ * renegotiating) and reissue.
+ */
+ from = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ to = from;
+ start = from->start + from->transferred;
+ len = from->len - from->transferred;
+
+ if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
+ return;
+
+ list_for_each_continue(next, &stream->subrequests) {
+ subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ if (subreq->start + subreq->transferred != start + len ||
+ test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ break;
+ to = subreq;
+ len += to->len;
+ }
+
+ /* Determine the set of buffers we're going to use. Each
+ * subreq gets a subset of a single overall contiguous buffer.
+ */
+ netfs_reset_iter(from);
+ source = from->io_iter;
+ source.count = len;
+
+ /* Work through the sublist. */
+ subreq = from;
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!len)
+ break;
+
+ subreq->start = start;
+ subreq->len = len;
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ subreq->retry_count++;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ /* Renegotiate max_len (wsize) */
+ stream->sreq_max_len = len;
+ stream->prepare_write(subreq);
+
+ part = umin(len, stream->sreq_max_len);
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
+ subreq->len = part;
+ subreq->transferred = 0;
+ len -= part;
+ start += part;
+ if (len && subreq == to &&
+ __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
+ boundary = true;
+
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_write(stream, subreq, &source);
+ if (subreq == to)
+ break;
+ }
+
+ /* If we managed to use fewer subreqs, we can discard the
+ * excess; if we used the same number, then we're done.
+ */
+ if (!len) {
+ if (subreq == to)
+ continue;
+ list_for_each_entry_safe_from(subreq, tmp,
+ &stream->subrequests, rreq_link) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
+ if (subreq == to)
+ break;
+ }
+ continue;
+ }
+
+ /* We ran out of subrequests, so we need to allocate some more
+ * and insert them after.
+ */
+ do {
+ subreq = netfs_alloc_subrequest(wreq);
+ subreq->source = to->source;
+ subreq->start = start;
+ subreq->stream_nr = to->stream_nr;
+ subreq->retry_count = 1;
+
+ trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_split);
+
+ list_add(&subreq->rreq_link, &to->rreq_link);
+ to = list_next_entry(to, rreq_link);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ stream->sreq_max_len = len;
+ stream->sreq_max_segs = INT_MAX;
+ switch (stream->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload);
+ stream->sreq_max_len = umin(len, wreq->wsize);
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ stream->prepare_write(subreq);
+
+ part = umin(len, stream->sreq_max_len);
+ subreq->len = subreq->transferred + part;
+ len -= part;
+ start += part;
+ if (!len && boundary) {
+ __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
+ boundary = false;
+ }
+
+ netfs_reissue_write(stream, subreq, &source);
+ if (!len)
+ break;
+
+ } while (len);
+
+ } while (!list_is_head(next, &stream->subrequests));
+}
+
+/*
+ * Perform retries on the streams that need it. If we're doing content
+ * encryption and the server copy changed due to a third-party write, we may
+ * need to do an RMW cycle and also rewrite the data to the cache.
+ */
+void netfs_retry_writes(struct netfs_io_request *wreq)
+{
+ struct netfs_io_stream *stream;
+ int s;
+
+ netfs_stat(&netfs_n_wh_retry_write_req);
+
+ /* Wait for all outstanding I/O to quiesce before performing retries as
+ * we may need to renegotiate the I/O sizes.
+ */
+ set_bit(NETFS_RREQ_RETRYING, &wreq->flags);
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->active)
+ netfs_wait_for_in_progress_stream(wreq, stream);
+ }
+ clear_bit(NETFS_RREQ_RETRYING, &wreq->flags);
+
+ // TODO: Enc: Fetch changed partial pages
+ // TODO: Enc: Reencrypt content if needed.
+ // TODO: Enc: Wind back transferred point.
+ // TODO: Enc: Mark cache pages for retry.
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->need_retry) {
+ stream->need_retry = false;
+ netfs_retry_write_stream(wreq, stream);
+ }
+ }
+}
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 0eb20012792f..07932ce9246c 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -2,6 +2,7 @@
config NFS_FS
tristate "NFS client support"
depends on INET && FILE_LOCKING && MULTIUSER
+ select CRC32
select LOCKD
select SUNRPC
select NFS_COMMON
@@ -170,7 +171,8 @@ config ROOT_NFS
config NFS_FSCACHE
bool "Provide NFS client caching support"
- depends on NFS_FS=m && NETFS_SUPPORT || NFS_FS=y && NETFS_SUPPORT=y
+ depends on NFS_FS
+ select NETFS_SUPPORT
select FSCACHE
help
Say Y here if you want NFS data to be cached locally on disc through
@@ -195,7 +197,6 @@ config NFS_USE_KERNEL_DNS
config NFS_DEBUG
bool
depends on NFS_FS && SUNRPC_DEBUG
- select CRC32
default y
config NFS_DISABLE_UDP_SUPPORT
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 47189476b553..0e4c67373e4f 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -149,8 +149,8 @@ do_add_page_to_bio(struct bio *bio, int npg, enum req_op op, sector_t isect,
/* limit length to what the device mapping allows */
end = disk_addr + *len;
- if (end >= map->start + map->len)
- *len = map->start + map->len - disk_addr;
+ if (end >= map->disk_offset + map->len)
+ *len = map->disk_offset + map->len - disk_addr;
retry:
if (!bio) {
@@ -676,7 +676,7 @@ bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
struct pnfs_layout_segment *lseg;
struct xdr_buf buf;
struct xdr_stream xdr;
- struct page *scratch;
+ struct folio *scratch;
int status, i;
uint32_t count;
__be32 *p;
@@ -689,13 +689,13 @@ bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
return ERR_PTR(-ENOMEM);
status = -ENOMEM;
- scratch = alloc_page(gfp_mask);
+ scratch = folio_alloc(gfp_mask, 0);
if (!scratch)
goto out;
xdr_init_decode_pages(&xdr, &buf,
lgr->layoutp->pages, lgr->layoutp->len);
- xdr_set_scratch_page(&xdr, scratch);
+ xdr_set_scratch_folio(&xdr, scratch);
status = -EIO;
p = xdr_inline_decode(&xdr, 4);
@@ -744,7 +744,7 @@ process_extents:
}
out_free_scratch:
- __free_page(scratch);
+ folio_put(scratch);
out:
dprintk("%s returns %d\n", __func__, status);
switch (status) {
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index cab8809f0e0f..ab76120705e2 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -257,10 +257,11 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
struct pnfs_block_dev *child;
u64 chunk;
u32 chunk_idx;
+ u64 disk_chunk;
u64 disk_offset;
chunk = div_u64(offset, dev->chunk_size);
- div_u64_rem(chunk, dev->nr_children, &chunk_idx);
+ disk_chunk = div_u64_rem(chunk, dev->nr_children, &chunk_idx);
if (chunk_idx >= dev->nr_children) {
dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
@@ -273,7 +274,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
offset = chunk * dev->chunk_size;
/* disk offset of the stripe */
- disk_offset = div_u64(offset, dev->nr_children);
+ disk_offset = disk_chunk * dev->chunk_size;
child = &dev->children[chunk_idx];
child->map(child, disk_offset, map);
@@ -540,16 +541,16 @@ bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
struct pnfs_block_dev *top;
struct xdr_stream xdr;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
int nr_volumes, ret, i;
__be32 *p;
- scratch = alloc_page(gfp_mask);
+ scratch = folio_alloc(gfp_mask, 0);
if (!scratch)
goto out;
xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen);
- xdr_set_scratch_page(&xdr, scratch);
+ xdr_set_scratch_folio(&xdr, scratch);
p = xdr_inline_decode(&xdr, sizeof(__be32));
if (!p)
@@ -581,7 +582,7 @@ bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
out_free_volumes:
kfree(volumes);
out_free_scratch:
- __free_page(scratch);
+ folio_put(scratch);
out:
return node;
}
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index 8f7cff7a4293..315949a7e92d 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -6,6 +6,7 @@
#include <linux/vmalloc.h>
#include "blocklayout.h"
+#include "../nfs4trace.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
@@ -520,10 +521,71 @@ static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p)
return xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT);
}
-static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
+/**
+ * ext_tree_try_encode_commit - try to encode all extents into the buffer
+ * @bl: pointer to the layout
+ * @p: pointer to the output buffer
+ * @buffer_size: size of the output buffer
+ * @count: output pointer to the number of encoded extents
+ * @lastbyte: output pointer to the last written byte
+ *
+ * Return values:
+ * %0: Success, all required extents encoded, outputs are valid
+ * %-ENOSPC: Buffer too small, nothing encoded, outputs are invalid
+ */
+static int
+ext_tree_try_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
size_t buffer_size, size_t *count, __u64 *lastbyte)
{
struct pnfs_block_extent *be;
+
+ spin_lock(&bl->bl_ext_lock);
+ for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) {
+ if (be->be_state != PNFS_BLOCK_INVALID_DATA ||
+ be->be_tag != EXTENT_WRITTEN)
+ continue;
+
+ (*count)++;
+ if (ext_tree_layoutupdate_size(bl, *count) > buffer_size) {
+ spin_unlock(&bl->bl_ext_lock);
+ return -ENOSPC;
+ }
+ }
+ for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) {
+ if (be->be_state != PNFS_BLOCK_INVALID_DATA ||
+ be->be_tag != EXTENT_WRITTEN)
+ continue;
+
+ if (bl->bl_scsi_layout)
+ p = encode_scsi_range(be, p);
+ else
+ p = encode_block_extent(be, p);
+ be->be_tag = EXTENT_COMMITTING;
+ }
+ *lastbyte = (bl->bl_lwb != 0) ? bl->bl_lwb - 1 : U64_MAX;
+ bl->bl_lwb = 0;
+ spin_unlock(&bl->bl_ext_lock);
+
+ return 0;
+}
+
+/**
+ * ext_tree_encode_commit - encode as much as possible extents into the buffer
+ * @bl: pointer to the layout
+ * @p: pointer to the output buffer
+ * @buffer_size: size of the output buffer
+ * @count: output pointer to the number of encoded extents
+ * @lastbyte: output pointer to the last written byte
+ *
+ * Return values:
+ * %0: Success, all required extents encoded, outputs are valid
+ * %-ENOSPC: Buffer too small, some extents are encoded, outputs are valid
+ */
+static int
+ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
+ size_t buffer_size, size_t *count, __u64 *lastbyte)
+{
+ struct pnfs_block_extent *be, *be_prev;
int ret = 0;
spin_lock(&bl->bl_ext_lock);
@@ -534,9 +596,9 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
(*count)++;
if (ext_tree_layoutupdate_size(bl, *count) > buffer_size) {
- /* keep counting.. */
+ (*count)--;
ret = -ENOSPC;
- continue;
+ break;
}
if (bl->bl_scsi_layout)
@@ -544,14 +606,30 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
else
p = encode_block_extent(be, p);
be->be_tag = EXTENT_COMMITTING;
+ be_prev = be;
+ }
+ if (!ret) {
+ *lastbyte = (bl->bl_lwb != 0) ? bl->bl_lwb - 1 : U64_MAX;
+ bl->bl_lwb = 0;
+ } else {
+ *lastbyte = be_prev->be_f_offset + be_prev->be_length;
+ *lastbyte <<= SECTOR_SHIFT;
+ *lastbyte -= 1;
}
- *lastbyte = bl->bl_lwb - 1;
- bl->bl_lwb = 0;
spin_unlock(&bl->bl_ext_lock);
return ret;
}
+/**
+ * ext_tree_prepare_commit - encode extents that need to be committed
+ * @arg: layout commit data
+ *
+ * Return values:
+ * %0: Success, all required extents are encoded
+ * %-ENOSPC: Some extents are encoded, but not all, due to RPC size limit
+ * %-ENOMEM: Out of memory, extents not encoded
+ */
int
ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
{
@@ -560,20 +638,18 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
__be32 *start_p;
int ret;
- dprintk("%s enter\n", __func__);
-
arg->layoutupdate_page = alloc_page(GFP_NOFS);
if (!arg->layoutupdate_page)
return -ENOMEM;
start_p = page_address(arg->layoutupdate_page);
arg->layoutupdate_pages = &arg->layoutupdate_page;
-retry:
- ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten);
+ ret = ext_tree_try_encode_commit(bl, start_p + 1, buffer_size,
+ &count, &arg->lastbytewritten);
if (unlikely(ret)) {
ext_tree_free_commitdata(arg, buffer_size);
- buffer_size = ext_tree_layoutupdate_size(bl, count);
+ buffer_size = NFS_SERVER(arg->inode)->wsize;
count = 0;
arg->layoutupdate_pages =
@@ -588,7 +664,8 @@ retry:
return -ENOMEM;
}
- goto retry;
+ ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size,
+ &count, &arg->lastbytewritten);
}
*start_p = cpu_to_be32(count);
@@ -607,8 +684,9 @@ retry:
}
}
- dprintk("%s found %zu ranges\n", __func__, count);
- return 0;
+ trace_bl_ext_tree_prepare_commit(ret, count,
+ arg->lastbytewritten, !!ret);
+ return ret;
}
void
diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c
index d8d50a88de04..d526f5ba7887 100644
--- a/fs/nfs/blocklayout/rpc_pipefs.c
+++ b/fs/nfs/blocklayout/rpc_pipefs.c
@@ -141,24 +141,18 @@ static const struct rpc_pipe_ops bl_upcall_ops = {
.destroy_msg = bl_pipe_destroy_msg,
};
-static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
+static int nfs4blocklayout_register_sb(struct super_block *sb,
struct rpc_pipe *pipe)
{
- struct dentry *dir, *dentry;
+ struct dentry *dir;
+ int err;
dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
if (dir == NULL)
- return ERR_PTR(-ENOENT);
- dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
+ return -ENOENT;
+ err = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
dput(dir);
- return dentry;
-}
-
-static void nfs4blocklayout_unregister_sb(struct super_block *sb,
- struct rpc_pipe *pipe)
-{
- if (pipe->dentry)
- rpc_unlink(pipe->dentry);
+ return err;
}
static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
@@ -167,7 +161,6 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
struct super_block *sb = ptr;
struct net *net = sb->s_fs_info;
struct nfs_net *nn = net_generic(net, nfs_net_id);
- struct dentry *dentry;
int ret = 0;
if (!try_module_get(THIS_MODULE))
@@ -180,16 +173,10 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
switch (event) {
case RPC_PIPEFS_MOUNT:
- dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- break;
- }
- nn->bl_device_pipe->dentry = dentry;
+ ret = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
break;
case RPC_PIPEFS_UMOUNT:
- if (nn->bl_device_pipe->dentry)
- nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
+ rpc_unlink(nn->bl_device_pipe);
break;
default:
ret = -ENOTSUPP;
@@ -203,18 +190,17 @@ static struct notifier_block nfs4blocklayout_block = {
.notifier_call = rpc_pipefs_event,
};
-static struct dentry *nfs4blocklayout_register_net(struct net *net,
- struct rpc_pipe *pipe)
+static int nfs4blocklayout_register_net(struct net *net, struct rpc_pipe *pipe)
{
struct super_block *pipefs_sb;
- struct dentry *dentry;
+ int ret;
pipefs_sb = rpc_get_sb_net(net);
if (!pipefs_sb)
- return NULL;
- dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
+ return 0;
+ ret = nfs4blocklayout_register_sb(pipefs_sb, pipe);
rpc_put_sb_net(net);
- return dentry;
+ return ret;
}
static void nfs4blocklayout_unregister_net(struct net *net,
@@ -224,7 +210,7 @@ static void nfs4blocklayout_unregister_net(struct net *net,
pipefs_sb = rpc_get_sb_net(net);
if (pipefs_sb) {
- nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
+ rpc_unlink(pipe);
rpc_put_sb_net(net);
}
}
@@ -232,20 +218,17 @@ static void nfs4blocklayout_unregister_net(struct net *net,
static int nfs4blocklayout_net_init(struct net *net)
{
struct nfs_net *nn = net_generic(net, nfs_net_id);
- struct dentry *dentry;
+ int err;
mutex_init(&nn->bl_mutex);
init_waitqueue_head(&nn->bl_wq);
nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
if (IS_ERR(nn->bl_device_pipe))
return PTR_ERR(nn->bl_device_pipe);
- dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
- if (IS_ERR(dentry)) {
+ err = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
+ if (unlikely(err))
rpc_destroy_pipe_data(nn->bl_device_pipe);
- return PTR_ERR(dentry);
- }
- nn->bl_device_pipe->dentry = dentry;
- return 0;
+ return err;
}
static void nfs4blocklayout_net_exit(struct net *net)
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 6cf92498a5ac..c8b837006bb2 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -136,7 +136,7 @@ static void nfs_callback_down_net(u32 minorversion, struct svc_serv *serv, struc
return;
dprintk("NFS: destroy per-net callback data; net=%x\n", net->ns.inum);
- svc_xprt_destroy_all(serv, net);
+ svc_xprt_destroy_all(serv, net, false);
}
static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
@@ -153,7 +153,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
ret = svc_bind(serv, net);
if (ret < 0) {
printk(KERN_WARNING "NFS: bind callback service failed\n");
- goto err_bind;
+ goto err;
}
ret = 0;
@@ -166,13 +166,11 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
if (ret < 0) {
printk(KERN_ERR "NFS: callback service start failed\n");
- goto err_socks;
+ goto err;
}
return 0;
-err_socks:
- svc_rpcb_cleanup(serv, net);
-err_bind:
+err:
nn->cb_users[minorversion]--;
dprintk("NFS: Couldn't create callback socket: err = %d; "
"net = %x\n", ret, net->ns.inum);
@@ -211,10 +209,6 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
return ERR_PTR(-ENOMEM);
}
cb_info->serv = serv;
- /* As there is only one thread we need to over-ride the
- * default maximum of 80 connections
- */
- serv->sv_maxconn = 1024;
dprintk("nfs_callback_create_svc: service created\n");
return serv;
}
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 7832fb0369a1..8397c43358bd 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -718,7 +718,7 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
if (!copy)
- return htonl(NFS4ERR_SERVERFAULT);
+ return cpu_to_be32(NFS4ERR_DELAY);
spin_lock(&cps->clp->cl_lock);
rcu_read_lock();
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index fdeb0b34a3d3..4254ba3ee7c5 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -984,6 +984,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
nfs_put_client(cps.clp);
goto out_invalidcred;
}
+ svc_xprt_set_valid(rqstp->rq_xprt);
}
cps.minorversion = hdr_arg.minorversion;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 550ca934c9cf..54699299d5b1 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -38,7 +38,7 @@
#include <linux/sunrpc/bc_xprt.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
-
+#include <linux/nfslocalio.h>
#include "nfs4_fs.h"
#include "callback.h"
@@ -180,13 +180,13 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_proto = cl_init->proto;
clp->cl_nconnect = cl_init->nconnect;
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
- clp->cl_net = get_net(cl_init->net);
+ clp->cl_net = get_net_track(cl_init->net, &clp->cl_ns_tracker, GFP_KERNEL);
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
seqlock_init(&clp->cl_boot_lock);
ktime_get_real_ts64(&clp->cl_nfssvc_boot);
nfs_uuid_init(&clp->cl_uuid);
- spin_lock_init(&clp->cl_localio_lock);
+ INIT_WORK(&clp->cl_local_probe_work, nfs_local_probe_async_work);
#endif /* CONFIG_NFS_LOCALIO */
clp->cl_principal = "*";
@@ -244,13 +244,13 @@ static void pnfs_init_server(struct nfs_server *server)
*/
void nfs_free_client(struct nfs_client *clp)
{
- nfs_local_disable(clp);
+ nfs_localio_disable_client(clp);
/* -EIO all pending I/O */
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
- put_net(clp->cl_net);
+ put_net_track(clp->cl_net, &clp->cl_ns_tracker);
put_nfs_version(clp->cl_nfs_mod);
kfree(clp->cl_hostname);
kfree(clp->cl_acceptor);
@@ -338,6 +338,14 @@ again:
/* Match the xprt security policy */
if (clp->cl_xprtsec.policy != data->xprtsec.policy)
continue;
+ if (clp->cl_xprtsec.policy == RPC_XPRTSEC_TLS_X509) {
+ if (clp->cl_xprtsec.cert_serial !=
+ data->xprtsec.cert_serial)
+ continue;
+ if (clp->cl_xprtsec.privkey_serial !=
+ data->xprtsec.privkey_serial)
+ continue;
+ }
refcount_inc(&clp->cl_count);
return clp;
@@ -439,7 +447,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
spin_unlock(&nn->nfs_client_lock);
new = rpc_ops->init_client(new, cl_init);
if (!IS_ERR(new))
- nfs_local_probe(new);
+ nfs_local_probe_async(new);
return new;
}
@@ -546,6 +554,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
args.flags |= RPC_CLNT_CREATE_NOPING;
if (test_bit(NFS_CS_REUSEPORT, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_REUSEPORT;
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ args.flags |= RPC_CLNT_CREATE_NETUNREACH_FATAL;
if (!IS_ERR(clp->cl_rpcclient))
return 0;
@@ -680,6 +690,44 @@ struct nfs_client *nfs_init_client(struct nfs_client *clp,
}
EXPORT_SYMBOL_GPL(nfs_init_client);
+static void nfs4_server_set_init_caps(struct nfs_server *server)
+{
+#if IS_ENABLED(CONFIG_NFS_V4)
+ /* Set the basic capabilities */
+ server->caps = server->nfs_client->cl_mvops->init_caps;
+ if (server->flags & NFS_MOUNT_NORDIRPLUS)
+ server->caps &= ~NFS_CAP_READDIRPLUS;
+ if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA)
+ server->caps &= ~NFS_CAP_READ_PLUS;
+
+ /*
+ * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
+ * authentication.
+ */
+ if (nfs4_disable_idmapping &&
+ server->client->cl_auth->au_flavor == RPC_AUTH_UNIX)
+ server->caps |= NFS_CAP_UIDGID_NOMAP;
+#endif
+}
+
+void nfs_server_set_init_caps(struct nfs_server *server)
+{
+ switch (server->nfs_client->rpc_ops->version) {
+ case 2:
+ server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
+ break;
+ case 3:
+ server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
+ if (!(server->flags & NFS_MOUNT_NORDIRPLUS))
+ server->caps |= NFS_CAP_READDIRPLUS;
+ break;
+ default:
+ nfs4_server_set_init_caps(server);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_server_set_init_caps);
+
/*
* Create a version 2 or 3 client
*/
@@ -709,6 +757,9 @@ static int nfs_init_server(struct nfs_server *server,
if (ctx->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (ctx->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
if (IS_ERR(clp))
@@ -721,7 +772,6 @@ static int nfs_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = ctx->flags;
server->options = ctx->options;
- server->caps |= NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
switch (clp->rpc_ops->version) {
case 2:
@@ -757,6 +807,8 @@ static int nfs_init_server(struct nfs_server *server,
if (error < 0)
goto error;
+ nfs_server_set_init_caps(server);
+
/* Preserve the values of mount_server-related mount options */
if (ctx->mount_server.addrlen) {
memcpy(&server->mountd_address, &ctx->mount_server.address,
@@ -809,7 +861,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
server->wsize = max_rpc_payload;
if (server->wsize > NFS_MAX_FILE_IO_SIZE)
server->wsize = NFS_MAX_FILE_IO_SIZE;
- server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
@@ -826,7 +877,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
server->maxfilesize = fsinfo->maxfilesize;
- server->time_delta = fsinfo->time_delta;
server->change_attr_type = fsinfo->change_attr_type;
server->clone_blksize = fsinfo->clone_blksize;
@@ -846,6 +896,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
if (fsinfo->xattr_support)
server->caps |= NFS_CAP_XATTR;
+ else
+ server->caps &= ~NFS_CAP_XATTR;
#endif
}
@@ -931,7 +983,6 @@ void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *sour
target->acregmax = source->acregmax;
target->acdirmin = source->acdirmin;
target->acdirmax = source->acdirmax;
- target->caps = source->caps;
target->options = source->options;
target->auth_info = source->auth_info;
target->port = source->port;
@@ -1002,6 +1053,7 @@ struct nfs_server *nfs_alloc_server(void)
INIT_LIST_HEAD(&server->ss_src_copies);
atomic_set(&server->active, 0);
+ atomic_long_set(&server->nr_active_delegations, 0);
server->io_stats = nfs_alloc_iostats();
if (!server->io_stats) {
@@ -1100,6 +1152,8 @@ struct nfs_server *nfs_create_server(struct fs_context *fc)
if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
server->namelen = NFS2_MAXNAMLEN;
}
+ /* Linux 'subtree_check' borkenness mandates this setting */
+ server->fh_expire_type = NFS_FH_VOL_RENAME;
if (!(fattr->valid & NFS_ATTR_FATTR)) {
error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh,
@@ -1163,6 +1217,8 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
if (error < 0)
goto out_free_server;
+ nfs_server_set_init_caps(server);
+
/* probe the filesystem info for this server filesystem */
error = nfs_probe_server(server, fh);
if (error < 0)
@@ -1195,6 +1251,10 @@ void nfs_clients_init(struct net *net)
#if IS_ENABLED(CONFIG_NFS_V4)
idr_init(&nn->cb_ident_idr);
#endif
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ INIT_LIST_HEAD(&nn->nfs4_data_server_cache);
+ spin_lock_init(&nn->nfs4_data_server_lock);
+#endif
spin_lock_init(&nn->nfs_client_lock);
nn->boot_time = ktime_get_real();
memset(&nn->rpcstats, 0, sizeof(nn->rpcstats));
@@ -1211,6 +1271,9 @@ void nfs_clients_exit(struct net *net)
nfs_cleanup_cb_ident_idr(net);
WARN_ON_ONCE(!list_empty(&nn->nfs_client_list));
WARN_ON_ONCE(!list_empty(&nn->nfs_volume_list));
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ WARN_ON_ONCE(!list_empty(&nn->nfs4_data_server_cache));
+#endif
}
#ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 035ba52742a5..9d3a5f29f17f 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -27,8 +27,15 @@
#define NFS_DEFAULT_DELEGATION_WATERMARK (5000U)
-static atomic_long_t nfs_active_delegations;
static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK;
+module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644);
+
+static struct hlist_head *nfs_delegation_hash(struct nfs_server *server,
+ const struct nfs_fh *fhandle)
+{
+ return server->delegation_hash_table +
+ (nfs_fhandle_hash(fhandle) & server->delegation_hash_mask);
+}
static void __nfs_free_delegation(struct nfs_delegation *delegation)
{
@@ -37,11 +44,12 @@ static void __nfs_free_delegation(struct nfs_delegation *delegation)
kfree_rcu(delegation, rcu);
}
-static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
+static void nfs_mark_delegation_revoked(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
- atomic_long_dec(&nfs_active_delegations);
+ atomic_long_dec(&server->nr_active_delegations);
if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
nfs_clear_verifier_delegated(delegation->inode);
}
@@ -59,9 +67,10 @@ static void nfs_put_delegation(struct nfs_delegation *delegation)
__nfs_free_delegation(delegation);
}
-static void nfs_free_delegation(struct nfs_delegation *delegation)
+static void nfs_free_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
- nfs_mark_delegation_revoked(delegation);
+ nfs_mark_delegation_revoked(server, delegation);
nfs_put_delegation(delegation);
}
@@ -79,6 +88,7 @@ static void nfs_mark_return_delegation(struct nfs_server *server,
struct nfs_delegation *delegation)
{
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
}
@@ -236,34 +246,34 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
- if (delegation != NULL) {
- spin_lock(&delegation->lock);
- nfs4_stateid_copy(&delegation->stateid, stateid);
- delegation->type = type;
- delegation->pagemod_limit = pagemod_limit;
- oldcred = delegation->cred;
- delegation->cred = get_cred(cred);
- switch (deleg_type) {
- case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
- case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
- set_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags);
- break;
- default:
- clear_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags);
- }
- clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
- if (test_and_clear_bit(NFS_DELEGATION_REVOKED,
- &delegation->flags))
- atomic_long_inc(&nfs_active_delegations);
- spin_unlock(&delegation->lock);
- rcu_read_unlock();
- put_cred(oldcred);
- trace_nfs4_reclaim_delegation(inode, type);
- } else {
+ if (!delegation) {
rcu_read_unlock();
nfs_inode_set_delegation(inode, cred, type, stateid,
pagemod_limit, deleg_type);
+ return;
}
+
+ spin_lock(&delegation->lock);
+ nfs4_stateid_copy(&delegation->stateid, stateid);
+ delegation->type = type;
+ delegation->pagemod_limit = pagemod_limit;
+ oldcred = delegation->cred;
+ delegation->cred = get_cred(cred);
+ switch (deleg_type) {
+ case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
+ case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ set_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags);
+ break;
+ default:
+ clear_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags);
+ }
+ clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+ if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+ atomic_long_inc(&NFS_SERVER(inode)->nr_active_delegations);
+ spin_unlock(&delegation->lock);
+ rcu_read_unlock();
+ put_cred(oldcred);
+ trace_nfs4_reclaim_delegation(inode, type);
}
static int nfs_do_return_delegation(struct inode *inode,
@@ -306,7 +316,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
- if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ if (delegation->inode &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
@@ -330,14 +341,16 @@ nfs_start_delegation_return(struct nfs_inode *nfsi)
}
static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
- struct nfs_client *clp, int err)
+ struct nfs_server *server, int err)
{
-
spin_lock(&delegation->lock);
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
if (err == -EAGAIN) {
set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
+ set_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGRETURN_DELAYED,
+ &server->nfs_client->cl_state);
}
spin_unlock(&delegation->lock);
}
@@ -351,6 +364,8 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
rcu_dereference_protected(nfsi->delegation,
lockdep_is_held(&clp->cl_lock));
+ trace_nfs4_detach_delegation(&nfsi->vfs_inode, delegation->type);
+
if (deleg_cur == NULL || delegation != deleg_cur)
return NULL;
@@ -359,6 +374,7 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
spin_unlock(&delegation->lock);
return NULL;
}
+ hlist_del_init_rcu(&delegation->hash);
list_del_rcu(&delegation->super_list);
delegation->inode = NULL;
rcu_assign_pointer(nfsi->delegation, NULL);
@@ -406,7 +422,8 @@ nfs_update_delegation_cred(struct nfs_delegation *delegation,
}
static void
-nfs_update_inplace_delegation(struct nfs_delegation *delegation,
+nfs_update_inplace_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation,
const struct nfs_delegation *update)
{
if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
@@ -419,7 +436,7 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
nfs_update_delegation_cred(delegation, update->cred);
/* smp_mb__before_atomic() is implicit due to xchg() */
clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
- atomic_long_inc(&nfs_active_delegations);
+ atomic_long_inc(&server->nr_active_delegations);
}
}
}
@@ -474,7 +491,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
if (nfs4_stateid_match_other(&old_delegation->stateid,
&delegation->stateid)) {
spin_lock(&old_delegation->lock);
- nfs_update_inplace_delegation(old_delegation,
+ nfs_update_inplace_delegation(server, old_delegation,
delegation);
spin_unlock(&old_delegation->lock);
goto out;
@@ -520,10 +537,12 @@ add_new:
spin_unlock(&inode->i_lock);
list_add_tail_rcu(&delegation->super_list, &server->delegations);
+ hlist_add_head_rcu(&delegation->hash,
+ nfs_delegation_hash(server, &NFS_I(inode)->fh));
rcu_assign_pointer(nfsi->delegation, delegation);
delegation = NULL;
- atomic_long_inc(&nfs_active_delegations);
+ atomic_long_inc(&server->nr_active_delegations);
trace_nfs4_set_delegation(inode, type);
@@ -537,7 +556,7 @@ out:
__nfs_free_delegation(delegation);
if (freeme != NULL) {
nfs_do_return_delegation(inode, freeme, 0);
- nfs_free_delegation(freeme);
+ nfs_free_delegation(server, freeme);
}
return status;
}
@@ -547,7 +566,7 @@ out:
*/
static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_server *server = NFS_SERVER(inode);
unsigned int mode = O_WRONLY | O_RDWR;
int err = 0;
@@ -569,11 +588,11 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
/*
* Guard against state recovery
*/
- err = nfs4_wait_clnt_recover(clp);
+ err = nfs4_wait_clnt_recover(server->nfs_client);
}
if (err) {
- nfs_abort_delegation_return(delegation, clp, err);
+ nfs_abort_delegation_return(delegation, server, err);
goto out;
}
@@ -588,19 +607,10 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
{
bool ret = false;
+ trace_nfs_delegation_need_return(delegation);
+
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
ret = true;
- else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
- struct inode *inode;
-
- spin_lock(&delegation->lock);
- inode = delegation->inode;
- if (inode && list_empty(&NFS_I(inode)->open_files))
- ret = true;
- spin_unlock(&delegation->lock);
- }
- if (ret)
- clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
@@ -619,6 +629,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
struct nfs_delegation *place_holder_deleg = NULL;
int err = 0;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN,
+ &server->delegation_flags))
+ return 0;
restart:
/*
* To avoid quadratic looping we hold a reference
@@ -670,6 +683,7 @@ restart:
cond_resched();
if (!err)
goto restart;
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
goto out;
}
@@ -684,6 +698,9 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
struct nfs_delegation *d;
bool ret = false;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags))
+ goto out;
list_for_each_entry_rcu (d, &server->delegations, super_list) {
if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
continue;
@@ -691,6 +708,7 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
ret = true;
}
+out:
return ret;
}
@@ -750,7 +768,7 @@ void nfs_inode_evict_delegation(struct inode *inode)
set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
nfs_do_return_delegation(inode, delegation, 1);
- nfs_free_delegation(delegation);
+ nfs_free_delegation(NFS_SERVER(inode), delegation);
}
}
@@ -781,6 +799,43 @@ int nfs4_inode_return_delegation(struct inode *inode)
}
/**
+ * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation
+ * @inode: inode to process
+ *
+ * This routine is called to request that the delegation be returned as soon
+ * as the file is closed. If the file is already closed, the delegation is
+ * immediately returned.
+ */
+void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+ struct nfs_delegation *ret = NULL;
+
+ if (!inode)
+ return;
+ rcu_read_lock();
+ delegation = nfs4_get_valid_delegation(inode);
+ if (!delegation)
+ goto out;
+ spin_lock(&delegation->lock);
+ if (!delegation->inode)
+ goto out_unlock;
+ if (list_empty(&NFS_I(inode)->open_files) &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ /* Refcount matched in nfs_end_delegation_return() */
+ ret = nfs_get_delegation(delegation);
+ } else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out_unlock:
+ spin_unlock(&delegation->lock);
+ if (ret)
+ nfs_clear_verifier_delegated(inode);
+out:
+ rcu_read_unlock();
+ nfs_end_delegation_return(inode, ret, 0);
+}
+
+/**
* nfs4_inode_return_delegation_on_close - asynchronously return a delegation
* @inode: inode to process
*
@@ -799,7 +854,8 @@ void nfs4_inode_return_delegation_on_close(struct inode *inode)
if (!delegation)
goto out;
if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
- atomic_long_read(&nfs_active_delegations) >= nfs_delegation_watermark) {
+ atomic_long_read(&NFS_SERVER(inode)->nr_active_delegations) >=
+ nfs_delegation_watermark) {
spin_lock(&delegation->lock);
if (delegation->inode &&
list_empty(&NFS_I(inode)->open_files) &&
@@ -841,11 +897,25 @@ int nfs4_inode_make_writeable(struct inode *inode)
return nfs4_inode_return_delegation(inode);
}
-static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
- struct nfs_delegation *delegation)
+static void
+nfs_mark_return_if_closed_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
- set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ struct inode *inode;
+
+ if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags))
+ return;
+ spin_lock(&delegation->lock);
+ inode = delegation->inode;
+ if (!inode)
+ goto out;
+ if (list_empty(&NFS_I(inode)->open_files))
+ nfs_mark_return_delegation(server, delegation);
+ else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out:
+ spin_unlock(&delegation->lock);
}
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
@@ -961,7 +1031,7 @@ static void nfs_revoke_delegation(struct inode *inode,
}
spin_unlock(&delegation->lock);
}
- nfs_mark_delegation_revoked(delegation);
+ nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
ret = true;
out:
rcu_read_unlock();
@@ -969,13 +1039,6 @@ out:
nfs_inode_find_state_and_recover(inode, stateid);
}
-void nfs_remove_bad_delegation(struct inode *inode,
- const nfs4_stateid *stateid)
-{
- nfs_revoke_delegation(inode, stateid);
-}
-EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
-
void nfs_delegation_mark_returned(struct inode *inode,
const nfs4_stateid *stateid)
{
@@ -1000,7 +1063,7 @@ void nfs_delegation_mark_returned(struct inode *inode,
delegation->stateid.seqid = stateid->seqid;
}
- nfs_mark_delegation_revoked(delegation);
+ nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
spin_unlock(&delegation->lock);
if (nfs_detach_delegation(NFS_I(inode), delegation, NFS_SERVER(inode)))
@@ -1018,6 +1081,24 @@ out_rcu_unlock:
}
/**
+ * nfs_remove_bad_delegation - handle delegations that are unusable
+ * @inode: inode to process
+ * @stateid: the delegation's stateid
+ *
+ * If the server ACK-ed our FREE_STATEID then clean
+ * up the delegation, else mark and keep the revoked state.
+ */
+void nfs_remove_bad_delegation(struct inode *inode,
+ const nfs4_stateid *stateid)
+{
+ if (stateid && stateid->type == NFS4_FREED_STATEID_TYPE)
+ nfs_delegation_mark_returned(inode, stateid);
+ else
+ nfs_revoke_delegation(inode, stateid);
+}
+EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
+
+/**
* nfs_expire_unused_delegation_types
* @clp: client to process
* @flags: delegation types to expire
@@ -1095,11 +1176,12 @@ static struct inode *
nfs_delegation_find_inode_server(struct nfs_server *server,
const struct nfs_fh *fhandle)
{
+ struct hlist_head *head = nfs_delegation_hash(server, fhandle);
struct nfs_delegation *delegation;
struct super_block *freeme = NULL;
struct inode *res = NULL;
- list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ hlist_for_each_entry_rcu(delegation, head, hash) {
spin_lock(&delegation->lock);
if (delegation->inode != NULL &&
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
@@ -1202,7 +1284,7 @@ restart:
if (delegation != NULL) {
if (nfs_detach_delegation(NFS_I(inode), delegation,
server) != NULL)
- nfs_free_delegation(delegation);
+ nfs_free_delegation(server, delegation);
/* Match nfs_start_delegation_return_locked */
nfs_put_delegation(delegation);
}
@@ -1239,6 +1321,7 @@ static void nfs_mark_test_expired_delegation(struct nfs_server *server,
return;
clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
}
@@ -1317,6 +1400,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
nfs4_stateid stateid;
unsigned long gen = ++server->delegation_gen;
+ if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED,
+ &server->delegation_flags))
+ return 0;
restart:
rcu_read_lock();
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
@@ -1346,6 +1432,9 @@ restart:
goto restart;
}
nfs_inode_mark_test_expired_delegation(server,inode);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGATION_EXPIRED,
+ &server->nfs_client->cl_state);
iput(inode);
return -EAGAIN;
}
@@ -1500,4 +1589,17 @@ out:
return ret;
}
-module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644);
+int nfs4_delegation_hash_alloc(struct nfs_server *server)
+{
+ int delegation_buckets, i;
+
+ delegation_buckets = roundup_pow_of_two(nfs_delegation_watermark / 16);
+ server->delegation_hash_mask = delegation_buckets - 1;
+ server->delegation_hash_table = kmalloc_array(delegation_buckets,
+ sizeof(*server->delegation_hash_table), GFP_KERNEL);
+ if (!server->delegation_hash_table)
+ return -ENOMEM;
+ for (i = 0; i < delegation_buckets; i++)
+ INIT_HLIST_HEAD(&server->delegation_hash_table[i]);
+ return 0;
+}
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 71524d34ed20..08ec2e9c68a4 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -14,6 +14,7 @@
* NFSv4 delegation
*/
struct nfs_delegation {
+ struct hlist_node hash;
struct list_head super_list;
const struct cred *cred;
struct inode *inode;
@@ -49,6 +50,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
unsigned long pagemod_limit, u32 deleg_type);
int nfs4_inode_return_delegation(struct inode *inode);
void nfs4_inode_return_delegation_on_close(struct inode *inode);
+void nfs4_inode_set_return_delegation_on_close(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_evict_delegation(struct inode *inode);
@@ -122,4 +124,6 @@ static inline int nfs_have_delegated_mtime(struct inode *inode)
NFS_DELEGATION_FLAG_TIME);
}
+int nfs4_delegation_hash_alloc(struct nfs_server *server);
+
#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 492cffd9d3d8..ea9f6ca8f30f 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -666,6 +666,8 @@ static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx,
{
if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
return false;
+ if (NFS_SERVER(dir)->flags & NFS_MOUNT_FORCE_RDIRPLUS)
+ return true;
if (ctx->pos == 0 ||
cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD)
return true;
@@ -827,17 +829,17 @@ static int nfs_readdir_folio_filler(struct nfs_readdir_descriptor *desc,
struct address_space *mapping = desc->file->f_mapping;
struct folio *new, *folio = *arrays;
struct xdr_stream stream;
- struct page *scratch;
+ struct folio *scratch;
struct xdr_buf buf;
u64 cookie;
int status;
- scratch = alloc_page(GFP_KERNEL);
+ scratch = folio_alloc(GFP_KERNEL, 0);
if (scratch == NULL)
return -ENOMEM;
xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
do {
status = nfs_readdir_entry_decode(desc, entry, &stream);
@@ -889,7 +891,7 @@ static int nfs_readdir_folio_filler(struct nfs_readdir_descriptor *desc,
if (folio != *arrays)
nfs_readdir_folio_unlock_and_put(folio);
- put_page(scratch);
+ folio_put(scratch);
return status;
}
@@ -1532,7 +1534,8 @@ static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags)
{
if (NFS_PROTO(dir)->version == 2)
return 0;
- return flags & LOOKUP_EXCL;
+ return (flags & (LOOKUP_CREATE | LOOKUP_EXCL)) ==
+ (LOOKUP_CREATE | LOOKUP_EXCL);
}
/*
@@ -1672,7 +1675,7 @@ nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
}
-static int nfs_lookup_revalidate_dentry(struct inode *dir,
+static int nfs_lookup_revalidate_dentry(struct inode *dir, const struct qstr *name,
struct dentry *dentry,
struct inode *inode, unsigned int flags)
{
@@ -1690,7 +1693,7 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir,
goto out;
dir_verifier = nfs_save_change_attribute(dir);
- ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+ ret = NFS_PROTO(dir)->lookup(dir, dentry, name, fhandle, fattr);
if (ret < 0)
goto out;
@@ -1732,8 +1735,8 @@ out:
* cached dentry and do a new lookup.
*/
static int
-nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
+nfs_do_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int error = 0;
@@ -1775,7 +1778,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
if (NFS_STALE(inode))
goto out_bad;
- return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags);
+ return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags);
out_valid:
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
out_bad:
@@ -1785,38 +1788,26 @@ out_bad:
}
static int
-__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
- int (*reval)(struct inode *, struct dentry *, unsigned int))
+__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
- struct dentry *parent;
- struct inode *dir;
- int ret;
-
if (flags & LOOKUP_RCU) {
if (dentry->d_fsdata == NFS_FSDATA_BLOCKED)
return -ECHILD;
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- ret = reval(dir, dentry, flags);
- if (parent != READ_ONCE(dentry->d_parent))
- return -ECHILD;
} else {
/* Wait for unlink to complete - see unblock_revalidate() */
wait_var_event(&dentry->d_fsdata,
smp_load_acquire(&dentry->d_fsdata)
!= NFS_FSDATA_BLOCKED);
- parent = dget_parent(dentry);
- ret = reval(d_inode(parent), dentry, flags);
- dput(parent);
}
- return ret;
+ return 0;
}
-static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int nfs_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
+ if (__nfs_lookup_revalidate(dentry, flags))
+ return -ECHILD;
+ return nfs_do_lookup_revalidate(dir, name, dentry, flags);
}
static void block_revalidate(struct dentry *dentry)
@@ -1837,9 +1828,7 @@ static void block_revalidate(struct dentry *dentry)
static void unblock_revalidate(struct dentry *dentry)
{
- /* store_release ensures wait_var_event() sees the update */
- smp_store_release(&dentry->d_fsdata, NULL);
- wake_up_var(&dentry->d_fsdata);
+ store_release_wake_up(&dentry->d_fsdata, NULL);
}
/*
@@ -1982,7 +1971,8 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
dir_verifier = nfs_save_change_attribute(dir);
trace_nfs_lookup_enter(dir, dentry, flags);
- error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name,
+ fhandle, fattr);
if (error == -ENOENT) {
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
dir_verifier = inode_peek_iversion_raw(dir);
@@ -2025,7 +2015,8 @@ void nfs_d_prune_case_insensitive_aliases(struct inode *inode)
EXPORT_SYMBOL_GPL(nfs_d_prune_case_insensitive_aliases);
#if IS_ENABLED(CONFIG_NFS_V4)
-static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
+static int nfs4_lookup_revalidate(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
const struct dentry_operations nfs4_dentry_operations = {
.d_revalidate = nfs4_lookup_revalidate,
@@ -2207,18 +2198,19 @@ no_open:
else
dput(dentry);
}
- if (IS_ERR(res))
- return PTR_ERR(res);
return finish_no_open(file, res);
}
EXPORT_SYMBOL_GPL(nfs_atomic_open);
static int
-nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
+nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
+ if (__nfs_lookup_revalidate(dentry, flags))
+ return -ECHILD;
+
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
@@ -2254,16 +2246,10 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
reval_dentry:
if (flags & LOOKUP_RCU)
return -ECHILD;
- return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags);
+ return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags);
full_reval:
- return nfs_do_lookup_revalidate(dir, dentry, flags);
-}
-
-static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
-{
- return __nfs_lookup_revalidate(dentry, flags,
- nfs4_do_lookup_revalidate);
+ return nfs_do_lookup_revalidate(dir, name, dentry, flags);
}
#endif /* CONFIG_NFSV4 */
@@ -2272,7 +2258,7 @@ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned int open_flags,
umode_t mode)
{
-
+ struct dentry *res = NULL;
/* Same as look+open from lookup_open(), but with different O_TRUNC
* handling.
*/
@@ -2282,26 +2268,21 @@ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
return -ENAMETOOLONG;
if (open_flags & O_CREAT) {
- file->f_mode |= FMODE_CREATED;
error = nfs_do_create(dir, dentry, mode, open_flags);
- if (error)
+ if (!error) {
+ file->f_mode |= FMODE_CREATED;
+ return finish_open(file, dentry, NULL);
+ } else if (error != -EEXIST || open_flags & O_EXCL)
return error;
- return finish_open(file, dentry, NULL);
- } else if (d_in_lookup(dentry)) {
+ }
+ if (d_in_lookup(dentry)) {
/* The only flags nfs_lookup considers are
* LOOKUP_EXCL and LOOKUP_RENAME_TARGET, and
* we want those to be zero so the lookup isn't skipped.
*/
- struct dentry *res = nfs_lookup(dir, dentry, 0);
-
- d_lookup_done(dentry);
- if (unlikely(res)) {
- if (IS_ERR(res))
- return PTR_ERR(res);
- return finish_no_open(file, res);
- }
+ res = nfs_lookup(dir, dentry, 0);
}
- return finish_no_open(file, NULL);
+ return finish_no_open(file, res);
}
EXPORT_SYMBOL_GPL(nfs_atomic_open_v23);
@@ -2319,7 +2300,8 @@ nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
d_drop(dentry);
if (fhandle->size == 0) {
- error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name,
+ fhandle, fattr);
if (error)
goto out_error;
}
@@ -2433,11 +2415,11 @@ EXPORT_SYMBOL_GPL(nfs_mknod);
/*
* See comments for nfs_proc_create regarding failed operations.
*/
-int nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+struct dentry *nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct iattr attr;
- int error;
+ struct dentry *ret;
dfprintk(VFS, "NFS: mkdir(%s/%lu), %pd\n",
dir->i_sb->s_id, dir->i_ino, dentry);
@@ -2446,14 +2428,9 @@ int nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
attr.ia_mode = mode | S_IFDIR;
trace_nfs_mkdir_enter(dir, dentry);
- error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
- trace_nfs_mkdir_exit(dir, dentry, error);
- if (error != 0)
- goto out_err;
- return 0;
-out_err:
- d_drop(dentry);
- return error;
+ ret = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
+ trace_nfs_mkdir_exit(dir, dentry, PTR_ERR_OR_ZERO(ret));
+ return ret;
}
EXPORT_SYMBOL_GPL(nfs_mkdir);
@@ -2690,6 +2667,18 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
unblock_revalidate(new_dentry);
}
+static bool nfs_rename_is_unsafe_cross_dir(struct dentry *old_dentry,
+ struct dentry *new_dentry)
+{
+ struct nfs_server *server = NFS_SB(old_dentry->d_sb);
+
+ if (old_dentry->d_parent != new_dentry->d_parent)
+ return false;
+ if (server->fh_expire_type & NFS_FH_RENAME_UNSAFE)
+ return !(server->fh_expire_type & NFS_FH_NOEXPIRE_WITH_OPEN);
+ return true;
+}
+
/*
* RENAME
* FIXME: Some nfsds, like the Linux user space nfsd, may generate a
@@ -2777,7 +2766,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
}
- if (S_ISREG(old_inode->i_mode))
+ if (S_ISREG(old_inode->i_mode) &&
+ nfs_rename_is_unsafe_cross_dir(old_dentry, new_dentry))
nfs_sync_inode(old_inode);
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
must_unblock ? nfs_unblock_rename : NULL);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index b08dbe96bc57..48d89716193a 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -56,6 +56,7 @@
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "pnfs.h"
@@ -130,6 +131,20 @@ static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
dreq->count = req_start;
}
+static void nfs_direct_file_adjust_size_locked(struct inode *inode,
+ loff_t offset, size_t count)
+{
+ loff_t newsize = offset + (loff_t)count;
+ loff_t oldsize = i_size_read(inode);
+
+ if (newsize > oldsize) {
+ i_size_write(inode, newsize);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
+ trace_nfs_size_grow(inode, newsize);
+ nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
+ }
+}
+
/**
* nfs_swap_rw - NFS address space operation for swap I/O
* @iocb: target I/O control block
@@ -272,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
nfs_direct_count_bytes(dreq, hdr);
spin_unlock(&dreq->lock);
+ nfs_update_delegated_atime(dreq->inode);
+
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
@@ -303,6 +320,7 @@ static void nfs_read_sync_pgio_error(struct list_head *head, int error)
static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
{
get_dreq(hdr->dreq);
+ set_bit(NFS_IOHDR_ODIRECT, &hdr->flags);
}
static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
@@ -739,7 +757,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
{
struct nfs_direct_req *dreq = hdr->dreq;
struct nfs_commit_info cinfo;
- struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ struct inode *inode = dreq->inode;
int flags = NFS_ODIRECT_DONE;
trace_nfs_direct_write_completion(dreq);
@@ -761,7 +779,13 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
}
spin_unlock(&dreq->lock);
+ spin_lock(&inode->i_lock);
+ nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count);
+ nfs_update_delegated_mtime_locked(dreq->inode);
+ spin_unlock(&inode->i_lock);
+
while (!list_empty(&hdr->pages)) {
+ struct nfs_page *req;
req = nfs_list_entry(hdr->pages.next);
nfs_list_remove_request(req);
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
index be686b8e0c54..a10dd5f9d078 100644
--- a/fs/nfs/export.c
+++ b/fs/nfs/export.c
@@ -66,14 +66,21 @@ nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
{
struct nfs_fattr *fattr = NULL;
struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
- size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
+ size_t fh_size = offsetof(struct nfs_fh, data);
const struct nfs_rpc_ops *rpc_ops;
struct dentry *dentry;
struct inode *inode;
- int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
+ int len = EMBED_FH_OFF;
u32 *p = fid->raw;
int ret;
+ /* Initial check of bounds */
+ if (fh_len < len + XDR_QUADLEN(fh_size) ||
+ fh_len > XDR_QUADLEN(NFS_MAXFHSIZE))
+ return NULL;
+ /* Calculate embedded filehandle size */
+ fh_size += server_fh->size;
+ len += XDR_QUADLEN(fh_size);
/* NULL translates to ESTALE */
if (fh_len < len || fh_type != len)
return NULL;
@@ -154,5 +161,6 @@ const struct export_operations nfs_export_ops = {
EXPORT_OP_CLOSE_BEFORE_UNLINK |
EXPORT_OP_REMOTE_FS |
EXPORT_OP_NOATOMIC_ATTR |
- EXPORT_OP_FLUSH_ON_CLOSE,
+ EXPORT_OP_FLUSH_ON_CLOSE |
+ EXPORT_OP_NOLOCKS,
};
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 1bb646752e46..d020aab40c64 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -28,7 +28,9 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/gfp.h>
+#include <linux/rmap.h>
#include <linux/swap.h>
+#include <linux/compaction.h>
#include <linux/uaccess.h>
#include <linux/filelock.h>
@@ -159,6 +161,8 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t result;
+ trace_nfs_file_read(iocb, to);
+
if (iocb->ki_flags & IOCB_DIRECT)
return nfs_file_direct_read(iocb, to, false);
@@ -206,24 +210,25 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe
EXPORT_SYMBOL_GPL(nfs_file_splice_read);
int
-nfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+nfs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
int status;
dprintk("NFS: mmap(%pD2)\n", file);
- /* Note: generic_file_mmap() returns ENOSYS on nommu systems
+ /* Note: generic_file_mmap_prepare() returns ENOSYS on nommu systems
* so we call that before revalidating the mapping
*/
- status = generic_file_mmap(file, vma);
+ status = generic_file_mmap_prepare(desc);
if (!status) {
- vma->vm_ops = &nfs_file_vm_ops;
+ desc->vm_ops = &nfs_file_vm_ops;
status = nfs_revalidate_mapping(inode, file->f_mapping);
}
return status;
}
-EXPORT_SYMBOL_GPL(nfs_file_mmap);
+EXPORT_SYMBOL_GPL(nfs_file_mmap_prepare);
/*
* Flush any dirty pages for this process, and check for write errors.
@@ -278,6 +283,37 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
EXPORT_SYMBOL_GPL(nfs_file_fsync);
+void nfs_truncate_last_folio(struct address_space *mapping, loff_t from,
+ loff_t to)
+{
+ struct folio *folio;
+
+ if (from >= to)
+ return;
+
+ folio = filemap_lock_folio(mapping, from >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return;
+
+ if (folio_mkclean(folio))
+ folio_mark_dirty(folio);
+
+ if (folio_test_uptodate(folio)) {
+ loff_t fpos = folio_pos(folio);
+ size_t offset = from - fpos;
+ size_t end = folio_size(folio);
+
+ if (to - fpos < end)
+ end = to - fpos;
+ folio_zero_segment(folio, offset, end);
+ trace_nfs_size_truncate_folio(mapping->host, to);
+ }
+
+ folio_unlock(folio);
+ folio_put(folio);
+}
+EXPORT_SYMBOL_GPL(nfs_truncate_last_folio);
+
/*
* Decide whether a read/modify/write cycle may be more efficient
* then a modify/write/read cycle when writing to a page in the
@@ -327,6 +363,8 @@ static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
if (pnfs_ld_read_whole_page(file_inode(file)))
return true;
+ if (folio_test_dropbehind(folio))
+ return false;
/* Open for reading too? */
if (file->f_mode & FMODE_READ)
return true;
@@ -341,24 +379,28 @@ static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
* If the writer ends up delaying the write, the writer needs to
* increment the page use counts until he is done with the page.
*/
-static int nfs_write_begin(struct file *file, struct address_space *mapping,
+static int nfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len, struct folio **foliop,
void **fsdata)
{
- fgf_t fgp = FGP_WRITEBEGIN;
struct folio *folio;
+ struct file *file = iocb->ki_filp;
int once_thru = 0;
int ret;
+ trace_nfs_write_begin(file_inode(file), pos, len);
+
dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n",
file, mapping->host->i_ino, len, (long long) pos);
+ nfs_truncate_last_folio(mapping, i_size_read(mapping->host), pos);
- fgp |= fgf_set_order(len);
start:
- folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp,
- mapping_gfp_mask(mapping));
- if (IS_ERR(folio))
- return PTR_ERR(folio);
+ folio = write_begin_get_folio(iocb, mapping, pos >> PAGE_SHIFT, len);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ goto out;
+ }
*foliop = folio;
ret = nfs_flush_incompatible(file, folio);
@@ -368,22 +410,28 @@ start:
} else if (!once_thru &&
nfs_want_read_modify_write(file, folio, pos, len)) {
once_thru = 1;
+ folio_clear_dropbehind(folio);
ret = nfs_read_folio(file, folio);
folio_put(folio);
if (!ret)
goto start;
}
+out:
+ trace_nfs_write_begin_done(file_inode(file), pos, len, ret);
return ret;
}
-static int nfs_write_end(struct file *file, struct address_space *mapping,
+static int nfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
{
+ struct file *file = iocb->ki_filp;
struct nfs_open_context *ctx = nfs_file_open_context(file);
unsigned offset = offset_in_folio(folio, pos);
int status;
+ trace_nfs_write_end(file_inode(file), pos, len);
dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n",
file, mapping->host->i_ino, len, (long long) pos);
@@ -412,13 +460,16 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
folio_unlock(folio);
folio_put(folio);
- if (status < 0)
+ if (status < 0) {
+ trace_nfs_write_end_done(file_inode(file), pos, len, status);
return status;
+ }
NFS_I(mapping->host)->write_io += copied;
if (nfs_ctx_key_to_expire(ctx, mapping->host))
nfs_wb_all(mapping->host);
+ trace_nfs_write_end_done(file_inode(file), pos, len, copied);
return copied;
}
@@ -436,10 +487,11 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset,
dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
folio->index, offset, length);
- if (offset != 0 || length < folio_size(folio))
- return;
/* Cancel any unstarted writes on this page */
- nfs_wb_folio_cancel(inode, folio);
+ if (offset != 0 || length < folio_size(folio))
+ nfs_wb_folio(inode, folio);
+ else
+ nfs_wb_folio_cancel(inode, folio);
folio_wait_private_2(folio); /* [DEPRECATED] */
trace_nfs_invalidate_folio(inode, folio_pos(folio) + offset, length);
}
@@ -457,7 +509,7 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
/* If the private flag is set, then the folio is not freeable */
if (folio_test_private(folio)) {
if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
- current_is_kswapd())
+ current_is_kswapd() || current_is_kcompactd())
return false;
if (nfs_wb_folio(folio->mapping->host, folio) < 0)
return false;
@@ -650,6 +702,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
errseq_t since;
int error;
+ trace_nfs_file_write(iocb, from);
+
result = nfs_key_timeout_notify(file, inode);
if (result)
return result;
@@ -898,7 +952,7 @@ const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
- .mmap = nfs_file_mmap,
+ .mmap_prepare = nfs_file_mmap_prepare,
.open = nfs_file_open,
.flush = nfs_file_flush,
.release = nfs_file_release,
@@ -909,5 +963,6 @@ const struct file_operations nfs_file_operations = {
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = simple_nosetlease,
+ .fop_flags = FOP_DONTCACHE,
};
EXPORT_SYMBOL_GPL(nfs_file_operations);
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index d39a1f58e18d..5c4551117c58 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -646,19 +646,19 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
{
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
__be32 *p;
uint32_t nfl_util;
int i;
dprintk("%s: set_layout_map Begin\n", __func__);
- scratch = alloc_page(gfp_flags);
+ scratch = folio_alloc(gfp_flags, 0);
if (!scratch)
return -ENOMEM;
xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
/* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
* num_fh (4) */
@@ -724,11 +724,11 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
fl->fh_array[i]->size);
}
- __free_page(scratch);
+ folio_put(scratch);
return 0;
out_err:
- __free_page(scratch);
+ folio_put(scratch);
return -EIO;
}
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index 4fa304fa5bc4..df79aeb68db4 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -73,17 +73,18 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
struct nfs4_file_layout_dsaddr *dsaddr = NULL;
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
struct list_head dsaddrs;
struct nfs4_pnfs_ds_addr *da;
+ struct net *net = server->nfs_client->cl_net;
/* set up xdr stream */
- scratch = alloc_page(gfp_flags);
+ scratch = folio_alloc(gfp_flags, 0);
if (!scratch)
goto out_err;
xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
/* Get the stripe count (number of stripe index) */
p = xdr_inline_decode(&stream, 4);
@@ -159,8 +160,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
mp_count = be32_to_cpup(p); /* multipath count */
for (j = 0; j < mp_count; j++) {
- da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
- &stream, gfp_flags);
+ da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags);
if (da)
list_add_tail(&da->da_node, &dsaddrs);
}
@@ -170,7 +170,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
goto out_err_free_deviceid;
}
- dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
+ dsaddr->ds_list[i] = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags);
if (!dsaddr->ds_list[i])
goto out_err_drain_dsaddrs;
trace_fl_getdevinfo(server, &pdev->dev_id, dsaddr->ds_list[i]->ds_remotestr);
@@ -186,7 +186,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
}
}
- __free_page(scratch);
+ folio_put(scratch);
return dsaddr;
out_err_drain_dsaddrs:
@@ -204,7 +204,7 @@ out_err_free_deviceid:
out_err_free_stripe_indices:
kfree(stripe_indices);
out_err_free_scratch:
- __free_page(scratch);
+ folio_put(scratch);
out_err:
dprintk("%s ERROR: returning NULL\n", __func__);
return NULL;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index f78115c6c2c1..9056f05a67dc 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -47,7 +47,7 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
int dev_limit, enum nfs4_ff_op_type type);
static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
const struct nfs42_layoutstat_devinfo *devinfo,
- struct nfs4_ff_layout_mirror *mirror);
+ struct nfs4_ff_layout_ds_stripe *dss_info);
static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
@@ -164,32 +164,32 @@ decode_name(struct xdr_stream *xdr, u32 *id)
}
static struct nfsd_file *
-ff_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx, u32 dss_id,
+ struct nfs_client *clp, const struct cred *cred,
struct nfs_fh *fh, fmode_t mode)
{
- if (mode & FMODE_WRITE) {
- /*
- * Always request read and write access since this corresponds
- * to a rw layout.
- */
- mode |= FMODE_READ;
- }
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
- return nfs_local_open_fh(clp, cred, fh, mode);
+ return nfs_local_open_fh(clp, cred, fh, &mirror->dss[dss_id].nfl, mode);
+#else
+ return NULL;
+#endif
}
-static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
- const struct nfs4_ff_layout_mirror *m2)
+static bool ff_dss_match_fh(const struct nfs4_ff_layout_ds_stripe *dss1,
+ const struct nfs4_ff_layout_ds_stripe *dss2)
{
int i, j;
- if (m1->fh_versions_cnt != m2->fh_versions_cnt)
+ if (dss1->fh_versions_cnt != dss2->fh_versions_cnt)
return false;
- for (i = 0; i < m1->fh_versions_cnt; i++) {
+
+ for (i = 0; i < dss1->fh_versions_cnt; i++) {
bool found_fh = false;
- for (j = 0; j < m2->fh_versions_cnt; j++) {
- if (nfs_compare_fh(&m1->fh_versions[i],
- &m2->fh_versions[j]) == 0) {
+ for (j = 0; j < dss2->fh_versions_cnt; j++) {
+ if (nfs_compare_fh(&dss1->fh_versions[i],
+ &dss2->fh_versions[j]) == 0) {
found_fh = true;
break;
}
@@ -200,6 +200,38 @@ static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
return true;
}
+static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
+ const struct nfs4_ff_layout_mirror *m2)
+{
+ u32 dss_id;
+
+ if (m1->dss_count != m2->dss_count)
+ return false;
+
+ for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
+ if (!ff_dss_match_fh(&m1->dss[dss_id], &m2->dss[dss_id]))
+ return false;
+
+ return true;
+}
+
+static bool ff_mirror_match_devid(const struct nfs4_ff_layout_mirror *m1,
+ const struct nfs4_ff_layout_mirror *m2)
+{
+ u32 dss_id;
+
+ if (m1->dss_count != m2->dss_count)
+ return false;
+
+ for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
+ if (memcmp(&m1->dss[dss_id].devid,
+ &m2->dss[dss_id].devid,
+ sizeof(m1->dss[dss_id].devid)) != 0)
+ return false;
+
+ return true;
+}
+
static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
struct nfs4_ff_layout_mirror *mirror)
@@ -210,7 +242,7 @@ ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
spin_lock(&inode->i_lock);
list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
- if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
+ if (!ff_mirror_match_devid(mirror, pos))
continue;
if (!ff_mirror_match_fh(mirror, pos))
continue;
@@ -238,30 +270,52 @@ ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
mirror->layout = NULL;
}
-static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
+static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(u32 dss_count,
+ gfp_t gfp_flags)
{
struct nfs4_ff_layout_mirror *mirror;
mirror = kzalloc(sizeof(*mirror), gfp_flags);
- if (mirror != NULL) {
- spin_lock_init(&mirror->lock);
- refcount_set(&mirror->ref, 1);
- INIT_LIST_HEAD(&mirror->mirrors);
+ if (mirror == NULL)
+ return NULL;
+
+ spin_lock_init(&mirror->lock);
+ refcount_set(&mirror->ref, 1);
+ INIT_LIST_HEAD(&mirror->mirrors);
+
+ mirror->dss_count = dss_count;
+ mirror->dss =
+ kcalloc(dss_count, sizeof(struct nfs4_ff_layout_ds_stripe),
+ gfp_flags);
+ if (mirror->dss == NULL) {
+ kfree(mirror);
+ return NULL;
}
+
+ for (u32 dss_id = 0; dss_id < mirror->dss_count; dss_id++)
+ nfs_localio_file_init(&mirror->dss[dss_id].nfl);
+
return mirror;
}
static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
{
- const struct cred *cred;
+ const struct cred *cred;
+ u32 dss_id;
ff_layout_remove_mirror(mirror);
- kfree(mirror->fh_versions);
- cred = rcu_access_pointer(mirror->ro_cred);
- put_cred(cred);
- cred = rcu_access_pointer(mirror->rw_cred);
- put_cred(cred);
- nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
+
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
+ kfree(mirror->dss[dss_id].fh_versions);
+ cred = rcu_access_pointer(mirror->dss[dss_id].ro_cred);
+ put_cred(cred);
+ cred = rcu_access_pointer(mirror->dss[dss_id].rw_cred);
+ put_cred(cred);
+ nfs_close_local_fh(&mirror->dss[dss_id].nfl);
+ nfs4_ff_layout_put_deviceid(mirror->dss[dss_id].mirror_ds);
+ }
+
+ kfree(mirror->dss);
kfree(mirror);
}
@@ -292,7 +346,7 @@ ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
struct pnfs_layout_segment *l2)
{
const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
- const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
+ const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
u32 i;
if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
@@ -365,14 +419,24 @@ ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
free_me);
}
+static u32 ff_mirror_efficiency_sum(const struct nfs4_ff_layout_mirror *mirror)
+{
+ u32 dss_id, sum = 0;
+
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++)
+ sum += mirror->dss[dss_id].efficiency;
+
+ return sum;
+}
+
static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
{
int i, j;
for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
for (j = i + 1; j < fls->mirror_array_cnt; j++)
- if (fls->mirror_array[i]->efficiency <
- fls->mirror_array[j]->efficiency)
+ if (ff_mirror_efficiency_sum(fls->mirror_array[i]) <
+ ff_mirror_efficiency_sum(fls->mirror_array[j]))
swap(fls->mirror_array[i],
fls->mirror_array[j]);
}
@@ -387,20 +451,21 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
struct nfs4_ff_layout_segment *fls = NULL;
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
u64 stripe_unit;
u32 mirror_array_cnt;
__be32 *p;
int i, rc;
+ struct nfs4_ff_layout_ds_stripe *dss_info;
dprintk("--> %s\n", __func__);
- scratch = alloc_page(gfp_flags);
+ scratch = folio_alloc(gfp_flags, 0);
if (!scratch)
return ERR_PTR(-ENOMEM);
xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
lgr->layoutp->len);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
/* stripe unit and mirror_array_cnt */
rc = -EIO;
@@ -426,116 +491,134 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
fls->mirror_array_cnt = mirror_array_cnt;
fls->stripe_unit = stripe_unit;
+ u32 dss_count = 0;
for (i = 0; i < fls->mirror_array_cnt; i++) {
struct nfs4_ff_layout_mirror *mirror;
struct cred *kcred;
const struct cred __rcu *cred;
kuid_t uid;
kgid_t gid;
- u32 ds_count, fh_count, id;
- int j;
+ u32 fh_count, id;
+ int j, dss_id;
rc = -EIO;
p = xdr_inline_decode(&stream, 4);
if (!p)
goto out_err_free;
- ds_count = be32_to_cpup(p);
- /* FIXME: allow for striping? */
- if (ds_count != 1)
+ // Ensure all mirrors have same stripe count.
+ if (dss_count == 0)
+ dss_count = be32_to_cpup(p);
+ else if (dss_count != be32_to_cpup(p))
goto out_err_free;
- fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
+ if (dss_count > NFS4_FLEXFILE_LAYOUT_MAX_STRIPE_CNT ||
+ dss_count == 0)
+ goto out_err_free;
+
+ if (dss_count > 1 && stripe_unit == 0)
+ goto out_err_free;
+
+ fls->mirror_array[i] = ff_layout_alloc_mirror(dss_count, gfp_flags);
if (fls->mirror_array[i] == NULL) {
rc = -ENOMEM;
goto out_err_free;
}
- fls->mirror_array[i]->ds_count = ds_count;
+ for (dss_id = 0; dss_id < dss_count; dss_id++) {
+ dss_info = &fls->mirror_array[i]->dss[dss_id];
+ dss_info->mirror = fls->mirror_array[i];
- /* deviceid */
- rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
- if (rc)
- goto out_err_free;
+ /* deviceid */
+ rc = decode_deviceid(&stream, &dss_info->devid);
+ if (rc)
+ goto out_err_free;
- /* efficiency */
- rc = -EIO;
- p = xdr_inline_decode(&stream, 4);
- if (!p)
- goto out_err_free;
- fls->mirror_array[i]->efficiency = be32_to_cpup(p);
+ /* efficiency */
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ dss_info->efficiency = be32_to_cpup(p);
- /* stateid */
- rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
- if (rc)
- goto out_err_free;
+ /* stateid */
+ rc = decode_pnfs_stateid(&stream, &dss_info->stateid);
+ if (rc)
+ goto out_err_free;
- /* fh */
- rc = -EIO;
- p = xdr_inline_decode(&stream, 4);
- if (!p)
- goto out_err_free;
- fh_count = be32_to_cpup(p);
+ /* fh */
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ fh_count = be32_to_cpup(p);
- fls->mirror_array[i]->fh_versions =
- kcalloc(fh_count, sizeof(struct nfs_fh),
- gfp_flags);
- if (fls->mirror_array[i]->fh_versions == NULL) {
- rc = -ENOMEM;
- goto out_err_free;
- }
+ dss_info->fh_versions =
+ kcalloc(fh_count, sizeof(struct nfs_fh),
+ gfp_flags);
+ if (dss_info->fh_versions == NULL) {
+ rc = -ENOMEM;
+ goto out_err_free;
+ }
+
+ for (j = 0; j < fh_count; j++) {
+ rc = decode_nfs_fh(&stream,
+ &dss_info->fh_versions[j]);
+ if (rc)
+ goto out_err_free;
+ }
- for (j = 0; j < fh_count; j++) {
- rc = decode_nfs_fh(&stream,
- &fls->mirror_array[i]->fh_versions[j]);
+ dss_info->fh_versions_cnt = fh_count;
+
+ /* user */
+ rc = decode_name(&stream, &id);
if (rc)
goto out_err_free;
- }
-
- fls->mirror_array[i]->fh_versions_cnt = fh_count;
- /* user */
- rc = decode_name(&stream, &id);
- if (rc)
- goto out_err_free;
+ uid = make_kuid(&init_user_ns, id);
- uid = make_kuid(&init_user_ns, id);
+ /* group */
+ rc = decode_name(&stream, &id);
+ if (rc)
+ goto out_err_free;
- /* group */
- rc = decode_name(&stream, &id);
- if (rc)
- goto out_err_free;
+ gid = make_kgid(&init_user_ns, id);
- gid = make_kgid(&init_user_ns, id);
+ if (gfp_flags & __GFP_FS)
+ kcred = prepare_kernel_cred(&init_task);
+ else {
+ unsigned int nofs_flags = memalloc_nofs_save();
- if (gfp_flags & __GFP_FS)
- kcred = prepare_kernel_cred(&init_task);
- else {
- unsigned int nofs_flags = memalloc_nofs_save();
- kcred = prepare_kernel_cred(&init_task);
- memalloc_nofs_restore(nofs_flags);
+ kcred = prepare_kernel_cred(&init_task);
+ memalloc_nofs_restore(nofs_flags);
+ }
+ rc = -ENOMEM;
+ if (!kcred)
+ goto out_err_free;
+ kcred->fsuid = uid;
+ kcred->fsgid = gid;
+ cred = RCU_INITIALIZER(kcred);
+
+ if (lgr->range.iomode == IOMODE_READ)
+ rcu_assign_pointer(dss_info->ro_cred, cred);
+ else
+ rcu_assign_pointer(dss_info->rw_cred, cred);
}
- rc = -ENOMEM;
- if (!kcred)
- goto out_err_free;
- kcred->fsuid = uid;
- kcred->fsgid = gid;
- cred = RCU_INITIALIZER(kcred);
-
- if (lgr->range.iomode == IOMODE_READ)
- rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
- else
- rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
if (mirror != fls->mirror_array[i]) {
- /* swap cred ptrs so free_mirror will clean up old */
- if (lgr->range.iomode == IOMODE_READ) {
- cred = xchg(&mirror->ro_cred, cred);
- rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
- } else {
- cred = xchg(&mirror->rw_cred, cred);
- rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
+ for (dss_id = 0; dss_id < dss_count; dss_id++) {
+ dss_info = &fls->mirror_array[i]->dss[dss_id];
+ /* swap cred ptrs so free_mirror will clean up old */
+ if (lgr->range.iomode == IOMODE_READ) {
+ cred = xchg(&mirror->dss[dss_id].ro_cred,
+ dss_info->ro_cred);
+ rcu_assign_pointer(dss_info->ro_cred, cred);
+ } else {
+ cred = xchg(&mirror->dss[dss_id].rw_cred,
+ dss_info->rw_cred);
+ rcu_assign_pointer(dss_info->rw_cred, cred);
+ }
}
ff_layout_free_mirror(fls->mirror_array[i]);
fls->mirror_array[i] = mirror;
@@ -563,7 +646,7 @@ out_sort_mirrors:
ret = &fls->generic_hdr;
dprintk("<-- %s (success)\n", __func__);
out_free_page:
- __free_page(scratch);
+ folio_put(scratch);
return ret;
out_err_free:
_ff_layout_free_lseg(fls);
@@ -592,6 +675,26 @@ ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
_ff_layout_free_lseg(fls);
}
+static u32 calc_commit_idx(struct pnfs_layout_segment *lseg,
+ u32 mirror_idx, u32 dss_id)
+{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
+
+ return (mirror_idx * flseg->mirror_array[0]->dss_count) + dss_id;
+}
+
+static u32 calc_mirror_idx_from_commit(struct pnfs_layout_segment *lseg,
+ u32 commit_index)
+{
+ return commit_index / FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
+}
+
+static u32 calc_dss_id_from_commit(struct pnfs_layout_segment *lseg,
+ u32 commit_index)
+{
+ return commit_index % FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
+}
+
static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
{
@@ -616,6 +719,7 @@ nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
struct nfs4_ff_layoutstat *layoutstat,
ktime_t now)
{
@@ -623,8 +727,8 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
- if (!mirror->start_time)
- mirror->start_time = now;
+ if (!mirror->dss[dss_id].start_time)
+ mirror->dss[dss_id].start_time = now;
if (mirror->report_interval != 0)
report_interval = (s64)mirror->report_interval * 1000LL;
else if (layoutstats_timer != 0)
@@ -674,13 +778,16 @@ nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
static void
nfs4_ff_layout_stat_io_start_read(struct inode *inode,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
__u64 requested, ktime_t now)
{
bool report;
spin_lock(&mirror->lock);
- report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
- nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
+ report = nfs4_ff_layoutstat_start_io(
+ mirror, dss_id, &mirror->dss[dss_id].read_stat, now);
+ nfs4_ff_layout_stat_io_update_requested(
+ &mirror->dss[dss_id].read_stat, requested);
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
spin_unlock(&mirror->lock);
@@ -691,11 +798,12 @@ nfs4_ff_layout_stat_io_start_read(struct inode *inode,
static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
__u64 requested,
__u64 completed)
{
spin_lock(&mirror->lock);
- nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
+ nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].read_stat,
requested, completed,
ktime_get(), task->tk_start);
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
@@ -705,13 +813,20 @@ nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
static void
nfs4_ff_layout_stat_io_start_write(struct inode *inode,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
__u64 requested, ktime_t now)
{
bool report;
spin_lock(&mirror->lock);
- report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
- nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
+ report = nfs4_ff_layoutstat_start_io(
+ mirror,
+ dss_id,
+ &mirror->dss[dss_id].write_stat,
+ now);
+ nfs4_ff_layout_stat_io_update_requested(
+ &mirror->dss[dss_id].write_stat,
+ requested);
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
spin_unlock(&mirror->lock);
@@ -722,6 +837,7 @@ nfs4_ff_layout_stat_io_start_write(struct inode *inode,
static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
__u64 requested,
__u64 completed,
enum nfs3_stable_how committed)
@@ -730,25 +846,25 @@ nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
requested = completed = 0;
spin_lock(&mirror->lock);
- nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
+ nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].write_stat,
requested, completed, ktime_get(), task->tk_start);
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
spin_unlock(&mirror->lock);
}
static void
-ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
+ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
{
- struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
if (devid)
nfs4_mark_deviceid_unavailable(devid);
}
static void
-ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
+ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
{
- struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
if (devid)
nfs4_mark_deviceid_available(devid);
@@ -757,69 +873,87 @@ ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
u32 start_idx, u32 *best_idx,
+ u32 offset, u32 *dss_id,
bool check_device)
{
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
struct nfs4_ff_layout_mirror *mirror;
- struct nfs4_pnfs_ds *ds;
+ struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
u32 idx;
/* mirrors are initially sorted by efficiency */
for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
mirror = FF_LAYOUT_COMP(lseg, idx);
- ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
- if (!ds)
+ *dss_id = nfs4_ff_layout_calc_dss_id(
+ fls->stripe_unit,
+ fls->mirror_array[idx]->dss_count,
+ offset);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, *dss_id, false);
+ if (IS_ERR(ds))
continue;
if (check_device &&
- nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
+ nfs4_test_deviceid_unavailable(&mirror->dss[*dss_id].mirror_ds->id_node)) {
+ // reinitialize the error state in case if this is the last iteration
+ ds = ERR_PTR(-EINVAL);
continue;
+ }
*best_idx = idx;
- return ds;
+ break;
}
- return NULL;
+ return ds;
}
static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
- u32 start_idx, u32 *best_idx)
+ u32 start_idx, u32 *best_idx,
+ u32 offset, u32 *dss_id)
{
- return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
+ return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
+ offset, dss_id, false);
}
static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
- u32 start_idx, u32 *best_idx)
+ u32 start_idx, u32 *best_idx,
+ u32 offset, u32 *dss_id)
{
- return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
+ return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
+ offset, dss_id, true);
}
static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
- u32 start_idx, u32 *best_idx)
+ u32 start_idx, u32 *best_idx,
+ u32 offset, u32 *dss_id)
{
struct nfs4_pnfs_ds *ds;
- ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
- if (ds)
+ ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx,
+ offset, dss_id);
+ if (!IS_ERR(ds))
return ds;
- return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
+ return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx,
+ offset, dss_id);
}
static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
- u32 *best_idx)
+ u32 *best_idx,
+ u32 offset,
+ u32 *dss_id)
{
struct pnfs_layout_segment *lseg = pgio->pg_lseg;
struct nfs4_pnfs_ds *ds;
ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
- best_idx);
- if (ds || !pgio->pg_mirror_idx)
+ best_idx, offset, dss_id);
+ if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
return ds;
- return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
+ return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx,
+ offset, dss_id);
}
static void
@@ -838,6 +972,56 @@ ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
}
}
+static bool
+ff_layout_lseg_is_striped(const struct nfs4_ff_layout_segment *fls)
+{
+ return fls->mirror_array[0]->dss_count > 1;
+}
+
+/*
+ * ff_layout_pg_test(). Called by nfs_can_coalesce_requests()
+ *
+ * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
+ * of bytes (maximum @req->wb_bytes) that can be coalesced.
+ */
+static size_t
+ff_layout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ struct nfs_page *req)
+{
+ unsigned int size;
+ u64 p_stripe, r_stripe;
+ u32 stripe_offset;
+ u64 segment_offset = pgio->pg_lseg->pls_range.offset;
+ u32 stripe_unit = FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
+
+ /* calls nfs_generic_pg_test */
+ size = pnfs_generic_pg_test(pgio, prev, req);
+ if (!size)
+ return 0;
+ else if (!ff_layout_lseg_is_striped(FF_LAYOUT_LSEG(pgio->pg_lseg)))
+ return size;
+
+ /* see if req and prev are in the same stripe */
+ if (prev) {
+ p_stripe = (u64)req_offset(prev) - segment_offset;
+ r_stripe = (u64)req_offset(req) - segment_offset;
+ do_div(p_stripe, stripe_unit);
+ do_div(r_stripe, stripe_unit);
+
+ if (p_stripe != r_stripe)
+ return 0;
+ }
+
+ /* calculate remaining bytes in the current stripe */
+ div_u64_rem((u64)req_offset(req) - segment_offset,
+ stripe_unit,
+ &stripe_offset);
+ WARN_ON_ONCE(stripe_offset > stripe_unit);
+ if (stripe_offset >= stripe_unit)
+ return 0;
+ return min(stripe_unit - (unsigned int)stripe_offset, size);
+}
+
static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
@@ -845,8 +1029,11 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_pgio_mirror *pgm;
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_pnfs_ds *ds;
- u32 ds_idx;
+ u32 ds_idx, dss_id;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
retry:
pnfs_generic_pg_check_layout(pgio, req);
/* Use full layout for now */
@@ -860,9 +1047,12 @@ retry:
if (!pgio->pg_lseg)
goto out_nolseg;
}
+ /* Reset wb_nio, since getting layout segment was successful */
+ req->wb_nio = 0;
- ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
- if (!ds) {
+ ds = ff_layout_get_ds_for_read(pgio, &ds_idx,
+ req_offset(req), &dss_id);
+ if (IS_ERR(ds)) {
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
pnfs_generic_pg_cleanup(pgio);
@@ -873,17 +1063,27 @@ retry:
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
pgm = &pgio->pg_mirrors[0];
- pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
+ pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize;
pgio->pg_mirror_idx = ds_idx;
-
- if (NFS_SERVER(pgio->pg_inode)->flags &
- (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
- pgio->pg_maxretrans = io_maxretrans;
return;
out_nolseg:
- if (pgio->pg_error < 0)
- return;
+ if (pgio->pg_error < 0) {
+ if (pgio->pg_error != -EAGAIN)
+ return;
+ /* Retry getting layout segment if lower layer returned -EAGAIN */
+ if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
+ if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
+ pgio->pg_error = -ETIMEDOUT;
+ else
+ pgio->pg_error = -EIO;
+ return;
+ }
+ pgio->pg_error = 0;
+ /* Sleep for 1 second before retrying */
+ ssleep(1);
+ goto retry;
+ }
out_mds:
trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
0, NFS4_MAX_UINT64, IOMODE_READ,
@@ -900,7 +1100,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs4_ff_layout_mirror *mirror;
struct nfs_pgio_mirror *pgm;
struct nfs4_pnfs_ds *ds;
- u32 i;
+ u32 i, dss_id;
retry:
pnfs_generic_pg_check_layout(pgio, req);
@@ -925,8 +1125,13 @@ retry:
for (i = 0; i < pgio->pg_mirror_count; i++) {
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
- ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
- if (!ds) {
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit,
+ mirror->dss_count,
+ req_offset(req));
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror,
+ dss_id, true);
+ if (IS_ERR(ds)) {
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
pnfs_generic_pg_cleanup(pgio);
@@ -935,7 +1140,7 @@ retry:
goto retry;
}
pgm = &pgio->pg_mirrors[i];
- pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
+ pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize;
}
if (NFS_SERVER(pgio->pg_inode)->flags &
@@ -1001,14 +1206,14 @@ ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
.pg_init = ff_layout_pg_init_read,
- .pg_test = pnfs_generic_pg_test,
+ .pg_test = ff_layout_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
.pg_cleanup = pnfs_generic_pg_cleanup,
};
static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
.pg_init = ff_layout_pg_init_write,
- .pg_test = pnfs_generic_pg_test,
+ .pg_test = ff_layout_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
.pg_cleanup = pnfs_generic_pg_cleanup,
@@ -1056,11 +1261,15 @@ static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
{
u32 idx = hdr->pgio_mirror_idx + 1;
u32 new_idx = 0;
+ u32 dss_id = 0;
+ struct nfs4_pnfs_ds *ds;
- if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
- ff_layout_send_layouterror(hdr->lseg);
- else
+ ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx,
+ hdr->args.offset, &dss_id);
+ if (IS_ERR(ds))
pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
+ else
+ ff_layout_send_layouterror(hdr->lseg);
pnfs_read_resend_pnfs(hdr, new_idx);
}
@@ -1089,42 +1298,53 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
}
static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ u32 op_status,
struct nfs4_state *state,
struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
- u32 idx)
+ u32 idx, u32 dss_id)
{
struct pnfs_layout_hdr *lo = lseg->pls_layout;
struct inode *inode = lo->plh_inode;
- struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
- switch (task->tk_status) {
- case -NFS4ERR_BADSESSION:
- case -NFS4ERR_BADSLOT:
- case -NFS4ERR_BAD_HIGH_SLOT:
- case -NFS4ERR_DEADSESSION:
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
- case -NFS4ERR_SEQ_FALSE_RETRY:
- case -NFS4ERR_SEQ_MISORDERED:
+ switch (op_status) {
+ case NFS4_OK:
+ case NFS4ERR_NXIO:
+ break;
+ case NFSERR_PERM:
+ if (!task->tk_xprt)
+ break;
+ xprt_force_disconnect(task->tk_xprt);
+ goto out_retry;
+ case NFS4ERR_BADSESSION:
+ case NFS4ERR_BADSLOT:
+ case NFS4ERR_BAD_HIGH_SLOT:
+ case NFS4ERR_DEADSESSION:
+ case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case NFS4ERR_SEQ_FALSE_RETRY:
+ case NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session. Exchangeid "
"flags 0x%x\n", __func__, task->tk_status,
clp->cl_exchange_flags);
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
- break;
- case -NFS4ERR_DELAY:
- case -NFS4ERR_GRACE:
+ goto out_retry;
+ case NFS4ERR_DELAY:
+ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+ fallthrough;
+ case NFS4ERR_GRACE:
rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
- break;
- case -NFS4ERR_RETRY_UNCACHED_REP:
- break;
+ goto out_retry;
+ case NFS4ERR_RETRY_UNCACHED_REP:
+ goto out_retry;
/* Invalidate Layout errors */
- case -NFS4ERR_PNFS_NO_LAYOUT:
- case -ESTALE: /* mapped NFS4ERR_STALE */
- case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
- case -EISDIR: /* mapped NFS4ERR_ISDIR */
- case -NFS4ERR_FHEXPIRED:
- case -NFS4ERR_WRONG_TYPE:
+ case NFS4ERR_PNFS_NO_LAYOUT:
+ case NFS4ERR_STALE:
+ case NFS4ERR_BADHANDLE:
+ case NFS4ERR_ISDIR:
+ case NFS4ERR_FHEXPIRED:
+ case NFS4ERR_WRONG_TYPE:
dprintk("%s Invalid layout error %d\n", __func__,
task->tk_status);
/*
@@ -1137,11 +1357,20 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
pnfs_destroy_layout(NFS_I(inode));
rpc_wake_up(&tbl->slot_tbl_waitq);
goto reset;
+ default:
+ break;
+ }
+
+ switch (task->tk_status) {
/* RPC connection errors */
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
case -ECONNREFUSED:
case -EHOSTDOWN:
case -EHOSTUNREACH:
- case -ENETUNREACH:
case -EIO:
case -ETIMEDOUT:
case -EPIPE:
@@ -1152,25 +1381,55 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
nfs4_delete_deviceid(devid->ld, devid->nfs_client,
&devid->deviceid);
rpc_wake_up(&tbl->slot_tbl_waitq);
- fallthrough;
+ break;
default:
- if (ff_layout_avoid_mds_available_ds(lseg))
- return -NFS4ERR_RESET_TO_PNFS;
-reset:
- dprintk("%s Retry through MDS. Error %d\n", __func__,
- task->tk_status);
- return -NFS4ERR_RESET_TO_MDS;
+ break;
}
+
+ if (ff_layout_avoid_mds_available_ds(lseg))
+ return -NFS4ERR_RESET_TO_PNFS;
+reset:
+ dprintk("%s Retry through MDS. Error %d\n", __func__,
+ task->tk_status);
+ return -NFS4ERR_RESET_TO_MDS;
+
+out_retry:
task->tk_status = 0;
return -EAGAIN;
}
/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ u32 op_status,
+ struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
- u32 idx)
+ u32 idx, u32 dss_id)
{
- struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
+
+ switch (op_status) {
+ case NFS_OK:
+ case NFSERR_NXIO:
+ break;
+ case NFSERR_PERM:
+ if (!task->tk_xprt)
+ break;
+ xprt_force_disconnect(task->tk_xprt);
+ goto out_retry;
+ case NFSERR_ACCES:
+ case NFSERR_BADHANDLE:
+ case NFSERR_FBIG:
+ case NFSERR_IO:
+ case NFSERR_NOSPC:
+ case NFSERR_ROFS:
+ case NFSERR_STALE:
+ goto out_reset_to_pnfs;
+ case NFSERR_JUKEBOX:
+ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+ goto out_retry;
+ default:
+ break;
+ }
switch (task->tk_status) {
/* File access problems. Don't mark the device as unavailable */
@@ -1184,12 +1443,18 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
goto out_retry;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
default:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_delete_deviceid(devid->ld, devid->nfs_client,
&devid->deviceid);
}
+out_reset_to_pnfs:
/* FIXME: Need to prevent infinite looping here. */
return -NFS4ERR_RESET_TO_PNFS;
out_retry:
@@ -1200,15 +1465,16 @@ out_retry:
}
static int ff_layout_async_handle_error(struct rpc_task *task,
+ u32 op_status,
struct nfs4_state *state,
struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
- u32 idx)
+ u32 idx, u32 dss_id)
{
int vers = clp->cl_nfs_mod->rpc_vers->number;
if (task->tk_status >= 0) {
- ff_layout_mark_ds_reachable(lseg, idx);
+ ff_layout_mark_ds_reachable(lseg, idx, dss_id);
return 0;
}
@@ -1218,10 +1484,11 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
switch (vers) {
case 3:
- return ff_layout_async_handle_error_v3(task, lseg, idx);
+ return ff_layout_async_handle_error_v3(task, op_status, clp,
+ lseg, idx, dss_id);
case 4:
- return ff_layout_async_handle_error_v4(task, state, clp,
- lseg, idx);
+ return ff_layout_async_handle_error_v4(task, op_status, state,
+ clp, lseg, idx, dss_id);
default:
/* should never happen */
WARN_ON_ONCE(1);
@@ -1230,7 +1497,7 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
}
static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
- u32 idx, u64 offset, u64 length,
+ u32 idx, u32 dss_id, u64 offset, u64 length,
u32 *op_status, int opnum, int error)
{
struct nfs4_ff_layout_mirror *mirror;
@@ -1248,6 +1515,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -ECONNRESET:
case -EHOSTDOWN:
case -EHOSTUNREACH:
+ case -ENETDOWN:
case -ENETUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
@@ -1267,15 +1535,16 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
mirror = FF_LAYOUT_COMP(lseg, idx);
err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
- mirror, offset, length, status, opnum,
+ mirror, dss_id, offset, length, status, opnum,
nfs_io_gfp_mask());
switch (status) {
case NFS4ERR_DELAY:
case NFS4ERR_GRACE:
+ case NFS4ERR_PERM:
break;
case NFS4ERR_NXIO:
- ff_layout_mark_ds_unreachable(lseg, idx);
+ ff_layout_mark_ds_unreachable(lseg, idx, dss_id);
/*
* Don't return the layout if this is a read and we still
* have layouts to try
@@ -1295,19 +1564,27 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
static int ff_layout_read_done_cb(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
+ u32 dss_id = nfs4_ff_layout_calc_dss_id(
+ flseg->stripe_unit,
+ flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
+ hdr->args.offset);
int err;
if (task->tk_status < 0) {
- ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
+ ff_layout_io_track_ds_error(hdr->lseg,
+ hdr->pgio_mirror_idx, dss_id,
hdr->args.offset, hdr->args.count,
&hdr->res.op_status, OP_READ,
task->tk_status);
- trace_ff_layout_read_error(hdr);
+ trace_ff_layout_read_error(hdr, task->tk_status);
}
- err = ff_layout_async_handle_error(task, hdr->args.context->state,
+ err = ff_layout_async_handle_error(task, hdr->res.op_status,
+ hdr->args.context->state,
hdr->ds_clp, hdr->lseg,
- hdr->pgio_mirror_idx);
+ hdr->pgio_mirror_idx,
+ dss_id);
trace_nfs4_pnfs_read(hdr, err);
clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
@@ -1321,6 +1598,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
goto out_eagain;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
return 0;
@@ -1360,23 +1640,47 @@ ff_layout_set_layoutcommit(struct inode *inode,
static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_mirror *mirror;
+ u32 dss_id;
+
if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
return;
- nfs4_ff_layout_stat_io_start_read(hdr->inode,
- FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
- hdr->args.count,
- task->tk_start);
+
+ mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
+ mirror->dss_count,
+ hdr->args.offset);
+
+ nfs4_ff_layout_stat_io_start_read(
+ hdr->inode,
+ mirror,
+ dss_id,
+ hdr->args.count,
+ task->tk_start);
}
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_mirror *mirror;
+ u32 dss_id;
+
if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
return;
- nfs4_ff_layout_stat_io_end_read(task,
- FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
- hdr->args.count,
- hdr->res.count);
+
+ mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
+ mirror->dss_count,
+ hdr->args.offset);
+
+ nfs4_ff_layout_stat_io_end_read(
+ task,
+ mirror,
+ dss_id,
+ hdr->args.count,
+ hdr->res.count);
set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
}
@@ -1464,20 +1768,28 @@ static void ff_layout_read_release(void *data)
static int ff_layout_write_done_cb(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
+ u32 dss_id = nfs4_ff_layout_calc_dss_id(
+ flseg->stripe_unit,
+ flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
+ hdr->args.offset);
loff_t end_offs = 0;
int err;
if (task->tk_status < 0) {
- ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
+ ff_layout_io_track_ds_error(hdr->lseg,
+ hdr->pgio_mirror_idx, dss_id,
hdr->args.offset, hdr->args.count,
&hdr->res.op_status, OP_WRITE,
task->tk_status);
- trace_ff_layout_write_error(hdr);
+ trace_ff_layout_write_error(hdr, task->tk_status);
}
- err = ff_layout_async_handle_error(task, hdr->args.context->state,
+ err = ff_layout_async_handle_error(task, hdr->res.op_status,
+ hdr->args.context->state,
hdr->ds_clp, hdr->lseg,
- hdr->pgio_mirror_idx);
+ hdr->pgio_mirror_idx,
+ dss_id);
trace_nfs4_pnfs_write(hdr, err);
clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
@@ -1491,6 +1803,9 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
if (hdr->res.verf->committed == NFS_FILE_SYNC ||
@@ -1512,17 +1827,20 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
struct nfs_commit_data *data)
{
int err;
+ u32 idx = calc_mirror_idx_from_commit(data->lseg, data->ds_commit_index);
+ u32 dss_id = calc_dss_id_from_commit(data->lseg, data->ds_commit_index);
if (task->tk_status < 0) {
- ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
+ ff_layout_io_track_ds_error(data->lseg, idx, dss_id,
data->args.offset, data->args.count,
&data->res.op_status, OP_COMMIT,
task->tk_status);
- trace_ff_layout_commit_error(data);
+ trace_ff_layout_commit_error(data, task->tk_status);
}
- err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
- data->lseg, data->ds_commit_index);
+ err = ff_layout_async_handle_error(task, data->res.op_status,
+ NULL, data->ds_clp, data->lseg, idx,
+ dss_id);
trace_nfs4_pnfs_commit_ds(data, err);
switch (err) {
@@ -1535,33 +1853,60 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
-
return 0;
}
static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_mirror *mirror;
+ u32 dss_id;
+
if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
return;
- nfs4_ff_layout_stat_io_start_write(hdr->inode,
- FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
- hdr->args.count,
- task->tk_start);
+
+ mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
+ mirror->dss_count,
+ hdr->args.offset);
+
+ nfs4_ff_layout_stat_io_start_write(
+ hdr->inode,
+ mirror,
+ dss_id,
+ hdr->args.count,
+ task->tk_start);
}
static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_mirror *mirror;
+ u32 dss_id;
+
if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
return;
- nfs4_ff_layout_stat_io_end_write(task,
- FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
- hdr->args.count, hdr->res.count,
- hdr->res.verf->committed);
+
+ mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
+ mirror->dss_count,
+ hdr->args.offset);
+
+ nfs4_ff_layout_stat_io_end_write(
+ task,
+ mirror,
+ dss_id,
+ hdr->args.count,
+ hdr->res.count,
+ hdr->res.verf->committed);
set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
}
@@ -1644,10 +1989,16 @@ static void ff_layout_write_release(void *data)
static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
struct nfs_commit_data *cdata)
{
+ u32 idx, dss_id;
+
if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
return;
+
+ idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
+ dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
nfs4_ff_layout_stat_io_start_write(cdata->inode,
- FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
+ FF_LAYOUT_COMP(cdata->lseg, idx),
+ dss_id,
0, task->tk_start);
}
@@ -1656,6 +2007,7 @@ static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
{
struct nfs_page *req;
__u64 count = 0;
+ u32 idx, dss_id;
if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
return;
@@ -1664,8 +2016,12 @@ static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
list_for_each_entry(req, &cdata->pages, wb_list)
count += req->wb_bytes;
}
+
+ idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
+ dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
nfs4_ff_layout_stat_io_end_write(task,
- FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
+ FF_LAYOUT_COMP(cdata->lseg, idx),
+ dss_id,
count, count, NFS_FILE_SYNC);
set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
}
@@ -1779,26 +2135,34 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
u32 idx = hdr->pgio_mirror_idx;
int vers;
struct nfs_fh *fh;
+ u32 dss_id;
+ bool ds_fatal_error = false;
dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
__func__, hdr->inode->i_ino,
hdr->args.pgbase, (size_t)hdr->args.count, offset);
mirror = FF_LAYOUT_COMP(lseg, idx);
- ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
- if (!ds)
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(lseg)->stripe_unit,
+ mirror->dss_count,
+ offset);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, false);
+ if (IS_ERR(ds)) {
+ ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
goto out_failed;
+ }
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
- hdr->inode);
+ hdr->inode, dss_id);
if (IS_ERR(ds_clnt))
goto out_failed;
- ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
if (!ds_cred)
goto out_failed;
- vers = nfs4_ff_layout_ds_version(mirror);
+ vers = nfs4_ff_layout_ds_version(mirror, dss_id);
dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
@@ -1806,11 +2170,11 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->pgio_done_cb = ff_layout_read_done_cb;
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- fh = nfs4_ff_layout_select_ds_fh(mirror);
+ fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
if (fh)
hdr->args.fh = fh;
- nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
+ nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
/*
* Note that if we ever decide to split across DSes,
@@ -1820,7 +2184,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->mds_offset = offset;
/* Start IO accounting for local read */
- localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ);
+ localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
+ FMODE_READ);
if (localio) {
hdr->task.tk_start = ktime_get();
ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
@@ -1835,7 +2200,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
return PNFS_ATTEMPTED;
out_failed:
- if (ff_layout_avoid_mds_available_ds(lseg))
+ if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
return PNFS_TRY_AGAIN;
trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
hdr->args.offset, hdr->args.count,
@@ -1857,22 +2222,30 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
int vers;
struct nfs_fh *fh;
u32 idx = hdr->pgio_mirror_idx;
+ u32 dss_id;
+ bool ds_fatal_error = false;
mirror = FF_LAYOUT_COMP(lseg, idx);
- ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
- if (!ds)
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(lseg)->stripe_unit,
+ mirror->dss_count,
+ offset);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
+ if (IS_ERR(ds)) {
+ ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
goto out_failed;
+ }
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
- hdr->inode);
+ hdr->inode, dss_id);
if (IS_ERR(ds_clnt))
goto out_failed;
- ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
if (!ds_cred)
goto out_failed;
- vers = nfs4_ff_layout_ds_version(mirror);
+ vers = nfs4_ff_layout_ds_version(mirror, dss_id);
dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
@@ -1882,12 +2255,12 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
hdr->pgio_done_cb = ff_layout_write_done_cb;
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- hdr->ds_commit_idx = idx;
- fh = nfs4_ff_layout_select_ds_fh(mirror);
+ hdr->ds_commit_idx = calc_commit_idx(lseg, idx, dss_id);
+ fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
if (fh)
hdr->args.fh = fh;
- nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
+ nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
/*
* Note that if we ever decide to split across DSes,
@@ -1896,7 +2269,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
hdr->args.offset = offset;
/* Start IO accounting for local write */
- localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
FMODE_READ|FMODE_WRITE);
if (localio) {
hdr->task.tk_start = ktime_get();
@@ -1912,7 +2285,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
return PNFS_ATTEMPTED;
out_failed:
- if (ff_layout_avoid_mds_available_ds(lseg))
+ if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
return PNFS_TRY_AGAIN;
trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
hdr->args.offset, hdr->args.count,
@@ -1920,20 +2293,15 @@ out_failed:
return PNFS_NOT_ATTEMPTED;
}
-static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
-{
- return i;
-}
-
static struct nfs_fh *
-select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i, u32 dss_id)
{
struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
/* FIXME: Assume that there is only one NFS version available
* for the DS.
*/
- return &flseg->mirror_array[i]->fh_versions[0];
+ return &flseg->mirror_array[i]->dss[dss_id].fh_versions[0];
}
static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
@@ -1944,7 +2312,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
- u32 idx;
+ u32 idx, dss_id;
int vers, ret;
struct nfs_fh *fh;
@@ -1952,22 +2320,23 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
goto out_err;
- idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ idx = calc_mirror_idx_from_commit(lseg, data->ds_commit_index);
mirror = FF_LAYOUT_COMP(lseg, idx);
- ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
- if (!ds)
+ dss_id = calc_dss_id_from_commit(lseg, data->ds_commit_index);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
+ if (IS_ERR(ds))
goto out_err;
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
- data->inode);
+ data->inode, dss_id);
if (IS_ERR(ds_clnt))
goto out_err;
- ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred, dss_id);
if (!ds_cred)
goto out_err;
- vers = nfs4_ff_layout_ds_version(mirror);
+ vers = nfs4_ff_layout_ds_version(mirror, dss_id);
dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
@@ -1976,12 +2345,12 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
data->cred = ds_cred;
refcount_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
- fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
+ fh = select_ds_fh_from_commit(lseg, idx, dss_id);
if (fh)
data->args.fh = fh;
/* Start IO accounting for local commit */
- localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
FMODE_READ|FMODE_WRITE);
if (localio) {
data->task.tk_start = ktime_get();
@@ -2045,25 +2414,28 @@ static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
struct nfs4_pnfs_ds *ds;
struct nfs_client *ds_clp;
struct rpc_clnt *clnt;
- u32 idx;
+ u32 idx, dss_id;
for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
mirror = flseg->mirror_array[idx];
- mirror_ds = mirror->mirror_ds;
- if (IS_ERR_OR_NULL(mirror_ds))
- continue;
- ds = mirror->mirror_ds->ds;
- if (!ds)
- continue;
- ds_clp = ds->ds_clp;
- if (!ds_clp)
- continue;
- clnt = ds_clp->cl_rpcclient;
- if (!clnt)
- continue;
- if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
- continue;
- rpc_clnt_disconnect(clnt);
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
+ mirror_ds = mirror->dss[dss_id].mirror_ds;
+ if (IS_ERR_OR_NULL(mirror_ds))
+ continue;
+ ds = mirror->dss[dss_id].mirror_ds->ds;
+ if (!ds)
+ continue;
+ ds_clp = ds->ds_clp;
+ if (!ds_clp)
+ continue;
+ clnt = ds_clp->cl_rpcclient;
+ if (!clnt)
+ continue;
+ if (!rpc_cancel_tasks(clnt, -EAGAIN,
+ ff_layout_match_io, lseg))
+ continue;
+ rpc_clnt_disconnect(clnt);
+ }
}
}
@@ -2085,8 +2457,9 @@ ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
struct inode *inode = lseg->pls_layout->plh_inode;
struct pnfs_commit_array *array, *new;
+ u32 size = flseg->mirror_array_cnt * flseg->mirror_array[0]->dss_count;
- new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
+ new = pnfs_alloc_commit_array(size,
nfs_io_gfp_mask());
if (new) {
spin_lock(&inode->i_lock);
@@ -2450,11 +2823,11 @@ ff_layout_encode_io_latency(struct xdr_stream *xdr,
static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
const struct nfs42_layoutstat_devinfo *devinfo,
- struct nfs4_ff_layout_mirror *mirror)
+ struct nfs4_ff_layout_ds_stripe *dss_info)
{
struct nfs4_pnfs_ds_addr *da;
- struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
- struct nfs_fh *fh = &mirror->fh_versions[0];
+ struct nfs4_pnfs_ds *ds = dss_info->mirror_ds->ds;
+ struct nfs_fh *fh = &dss_info->fh_versions[0];
__be32 *p;
da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
@@ -2466,13 +2839,17 @@ ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
p = xdr_reserve_space(xdr, 4 + fh->size);
xdr_encode_opaque(p, fh->data, fh->size);
/* ff_io_latency4 read */
- spin_lock(&mirror->lock);
- ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
+ spin_lock(&dss_info->mirror->lock);
+ ff_layout_encode_io_latency(xdr,
+ &dss_info->read_stat.io_stat);
/* ff_io_latency4 write */
- ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
- spin_unlock(&mirror->lock);
+ ff_layout_encode_io_latency(xdr,
+ &dss_info->write_stat.io_stat);
+ spin_unlock(&dss_info->mirror->lock);
/* nfstime4 */
- ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
+ ff_layout_encode_nfstime(xdr,
+ ktime_sub(ktime_get(),
+ dss_info->start_time));
/* bool */
p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(false);
@@ -2496,7 +2873,8 @@ ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
{
- struct nfs4_ff_layout_mirror *mirror = opaque->data;
+ struct nfs4_ff_layout_ds_stripe *dss_info = opaque->data;
+ struct nfs4_ff_layout_mirror *mirror = dss_info->mirror;
ff_layout_put_mirror(mirror);
}
@@ -2513,37 +2891,47 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
{
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
struct nfs4_ff_layout_mirror *mirror;
+ struct nfs4_ff_layout_ds_stripe *dss_info;
struct nfs4_deviceid_node *dev;
- int i = 0;
+ int i = 0, dss_id;
list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
- if (i >= dev_limit)
- break;
- if (IS_ERR_OR_NULL(mirror->mirror_ds))
- continue;
- if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
- &mirror->flags) &&
- type != NFS4_FF_OP_LAYOUTRETURN)
- continue;
- /* mirror refcount put in cleanup_layoutstats */
- if (!refcount_inc_not_zero(&mirror->ref))
- continue;
- dev = &mirror->mirror_ds->id_node;
- memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
- devinfo->offset = 0;
- devinfo->length = NFS4_MAX_UINT64;
- spin_lock(&mirror->lock);
- devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
- devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
- devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
- devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
- spin_unlock(&mirror->lock);
- devinfo->layout_type = LAYOUT_FLEX_FILES;
- devinfo->ld_private.ops = &layoutstat_ops;
- devinfo->ld_private.data = mirror;
-
- devinfo++;
- i++;
+ for (dss_id = 0; dss_id < mirror->dss_count; ++dss_id) {
+ dss_info = &mirror->dss[dss_id];
+ if (i >= dev_limit)
+ break;
+ if (IS_ERR_OR_NULL(dss_info->mirror_ds))
+ continue;
+ if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
+ &mirror->flags) &&
+ type != NFS4_FF_OP_LAYOUTRETURN)
+ continue;
+ /* mirror refcount put in cleanup_layoutstats */
+ if (!refcount_inc_not_zero(&mirror->ref))
+ continue;
+ dev = &dss_info->mirror_ds->id_node;
+ memcpy(&devinfo->dev_id,
+ &dev->deviceid,
+ NFS4_DEVICEID4_SIZE);
+ devinfo->offset = 0;
+ devinfo->length = NFS4_MAX_UINT64;
+ spin_lock(&mirror->lock);
+ devinfo->read_count =
+ dss_info->read_stat.io_stat.ops_completed;
+ devinfo->read_bytes =
+ dss_info->read_stat.io_stat.bytes_completed;
+ devinfo->write_count =
+ dss_info->write_stat.io_stat.ops_completed;
+ devinfo->write_bytes =
+ dss_info->write_stat.io_stat.bytes_completed;
+ spin_unlock(&mirror->lock);
+ devinfo->layout_type = LAYOUT_FLEX_FILES;
+ devinfo->ld_private.ops = &layoutstat_ops;
+ devinfo->ld_private.data = &mirror->dss[dss_id];
+
+ devinfo++;
+ i++;
+ }
}
return i;
}
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index f84b3fb0dddd..17a008c8e97c 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -21,6 +21,8 @@
* due to network error etc. */
#define NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT 4096
+#define NFS4_FLEXFILE_LAYOUT_MAX_STRIPE_CNT 4096
+
/* LAYOUTSTATS report interval in ms */
#define FF_LAYOUTSTATS_REPORT_INTERVAL (60000L)
#define FF_LAYOUTSTATS_MAXDEV 4
@@ -71,24 +73,32 @@ struct nfs4_ff_layoutstat {
struct nfs4_ff_busy_timer busy_timer;
};
-struct nfs4_ff_layout_mirror {
- struct pnfs_layout_hdr *layout;
- struct list_head mirrors;
- u32 ds_count;
- u32 efficiency;
+struct nfs4_ff_layout_mirror;
+
+struct nfs4_ff_layout_ds_stripe {
+ struct nfs4_ff_layout_mirror *mirror;
struct nfs4_deviceid devid;
+ u32 efficiency;
struct nfs4_ff_layout_ds *mirror_ds;
u32 fh_versions_cnt;
struct nfs_fh *fh_versions;
nfs4_stateid stateid;
const struct cred __rcu *ro_cred;
const struct cred __rcu *rw_cred;
- refcount_t ref;
- spinlock_t lock;
- unsigned long flags;
+ struct nfs_file_localio nfl;
struct nfs4_ff_layoutstat read_stat;
struct nfs4_ff_layoutstat write_stat;
ktime_t start_time;
+};
+
+struct nfs4_ff_layout_mirror {
+ struct pnfs_layout_hdr *layout;
+ struct list_head mirrors;
+ u32 dss_count;
+ struct nfs4_ff_layout_ds_stripe *dss;
+ refcount_t ref;
+ spinlock_t lock;
+ unsigned long flags;
u32 report_interval;
};
@@ -149,12 +159,12 @@ FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx)
}
static inline struct nfs4_deviceid_node *
-FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx)
+FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
{
struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, idx);
if (mirror != NULL) {
- struct nfs4_ff_layout_ds *mirror_ds = mirror->mirror_ds;
+ struct nfs4_ff_layout_ds *mirror_ds = mirror->dss[dss_id].mirror_ds;
if (!IS_ERR_OR_NULL(mirror_ds))
return &mirror_ds->id_node;
@@ -181,9 +191,22 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
}
static inline int
-nfs4_ff_layout_ds_version(const struct nfs4_ff_layout_mirror *mirror)
+nfs4_ff_layout_ds_version(const struct nfs4_ff_layout_mirror *mirror, u32 dss_id)
+{
+ return mirror->dss[dss_id].mirror_ds->ds_versions[0].version;
+}
+
+static inline u32
+nfs4_ff_layout_calc_dss_id(const u64 stripe_unit, const u32 dss_count, const loff_t offset)
{
- return mirror->mirror_ds->ds_versions[0].version;
+ u64 tmp = offset;
+
+ if (dss_count == 1 || stripe_unit == 0)
+ return 0;
+
+ do_div(tmp, stripe_unit);
+
+ return do_div(tmp, dss_count);
}
struct nfs4_ff_layout_ds *
@@ -192,9 +215,9 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds);
void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds);
int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
- struct nfs4_ff_layout_mirror *mirror, u64 offset,
- u64 length, int status, enum nfs_opnum4 opnum,
- gfp_t gfp_flags);
+ struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id, u64 offset, u64 length, int status,
+ enum nfs_opnum4 opnum, gfp_t gfp_flags);
void ff_layout_send_layouterror(struct pnfs_layout_segment *lseg);
int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head);
void ff_layout_free_ds_ioerr(struct list_head *head);
@@ -203,23 +226,27 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
struct list_head *head,
unsigned int maxnum);
struct nfs_fh *
-nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror);
+nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror, u32 dss_id);
void
nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
- nfs4_stateid *stateid);
+ u32 dss_id,
+ nfs4_stateid *stateid);
struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
bool fail_return);
struct rpc_clnt *
nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
struct nfs_client *ds_clp,
- struct inode *inode);
+ struct inode *inode,
+ u32 dss_id);
const struct cred *ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
const struct pnfs_layout_range *range,
- const struct cred *mdscred);
+ const struct cred *mdscred,
+ u32 dss_id);
bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg);
bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg);
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e58bedfb1dcc..c55ea8fa3bfa 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -44,18 +44,19 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
{
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
struct list_head dsaddrs;
struct nfs4_pnfs_ds_addr *da;
struct nfs4_ff_layout_ds *new_ds = NULL;
struct nfs4_ff_ds_version *ds_versions = NULL;
+ struct net *net = server->nfs_client->cl_net;
u32 mp_count;
u32 version_count;
__be32 *p;
int i, ret = -ENOMEM;
/* set up xdr stream */
- scratch = alloc_page(gfp_flags);
+ scratch = folio_alloc(gfp_flags, 0);
if (!scratch)
goto out_err;
@@ -69,7 +70,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
INIT_LIST_HEAD(&dsaddrs);
xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
/* multipath count */
p = xdr_inline_decode(&stream, 4);
@@ -80,8 +81,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
for (i = 0; i < mp_count; i++) {
/* multipath ds */
- da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
- &stream, gfp_flags);
+ da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags);
if (da)
list_add_tail(&da->da_node, &dsaddrs);
}
@@ -149,7 +149,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
new_ds->ds_versions = ds_versions;
new_ds->ds_versions_cnt = version_count;
- new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
+ new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags);
if (!new_ds->ds)
goto out_err_drain_dsaddrs;
@@ -163,7 +163,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
kfree(da);
}
- __free_page(scratch);
+ folio_put(scratch);
return new_ds;
out_err_drain_dsaddrs:
@@ -177,7 +177,7 @@ out_err_drain_dsaddrs:
kfree(ds_versions);
out_scratch:
- __free_page(scratch);
+ folio_put(scratch);
out_err:
kfree(new_ds);
@@ -250,16 +250,16 @@ ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
}
int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
- struct nfs4_ff_layout_mirror *mirror, u64 offset,
- u64 length, int status, enum nfs_opnum4 opnum,
- gfp_t gfp_flags)
+ struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id, u64 offset, u64 length, int status,
+ enum nfs_opnum4 opnum, gfp_t gfp_flags)
{
struct nfs4_ff_layout_ds_err *dserr;
if (status == 0)
return 0;
- if (IS_ERR_OR_NULL(mirror->mirror_ds))
+ if (IS_ERR_OR_NULL(mirror->dss[dss_id].mirror_ds))
return -EINVAL;
dserr = kmalloc(sizeof(*dserr), gfp_flags);
@@ -271,8 +271,8 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
dserr->length = length;
dserr->status = status;
dserr->opnum = opnum;
- nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
- memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
+ nfs4_stateid_copy(&dserr->stateid, &mirror->dss[dss_id].stateid);
+ memcpy(&dserr->deviceid, &mirror->dss[dss_id].mirror_ds->id_node.deviceid,
NFS4_DEVICEID4_SIZE);
spin_lock(&flo->generic_hdr.plh_inode->i_lock);
@@ -282,14 +282,14 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
}
static const struct cred *
-ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
+ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode, u32 dss_id)
{
const struct cred *cred, __rcu **pcred;
if (iomode == IOMODE_READ)
- pcred = &mirror->ro_cred;
+ pcred = &mirror->dss[dss_id].ro_cred;
else
- pcred = &mirror->rw_cred;
+ pcred = &mirror->dss[dss_id].rw_cred;
rcu_read_lock();
do {
@@ -304,43 +304,45 @@ ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
}
struct nfs_fh *
-nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror)
+nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror, u32 dss_id)
{
/* FIXME: For now assume there is only 1 version available for the DS */
- return &mirror->fh_versions[0];
+ return &mirror->dss[dss_id].fh_versions[0];
}
void
nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
- nfs4_stateid *stateid)
+ u32 dss_id,
+ nfs4_stateid *stateid)
{
- if (nfs4_ff_layout_ds_version(mirror) == 4)
- nfs4_stateid_copy(stateid, &mirror->stateid);
+ if (nfs4_ff_layout_ds_version(mirror, dss_id) == 4)
+ nfs4_stateid_copy(stateid, &mirror->dss[dss_id].stateid);
}
static bool
ff_layout_init_mirror_ds(struct pnfs_layout_hdr *lo,
- struct nfs4_ff_layout_mirror *mirror)
+ struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id)
{
if (mirror == NULL)
goto outerr;
- if (mirror->mirror_ds == NULL) {
+ if (mirror->dss[dss_id].mirror_ds == NULL) {
struct nfs4_deviceid_node *node;
struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode),
- &mirror->devid, lo->plh_lc_cred,
+ &mirror->dss[dss_id].devid, lo->plh_lc_cred,
GFP_KERNEL);
if (node)
mirror_ds = FF_LAYOUT_MIRROR_DS(node);
/* check for race with another call to this function */
- if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
+ if (cmpxchg(&mirror->dss[dss_id].mirror_ds, NULL, mirror_ds) &&
mirror_ds != ERR_PTR(-ENODEV))
nfs4_put_deviceid_node(node);
}
- if (IS_ERR(mirror->mirror_ds))
+ if (IS_ERR(mirror->dss[dss_id].mirror_ds))
goto outerr;
return true;
@@ -352,6 +354,7 @@ outerr:
* nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
* @lseg: the layout segment we're operating on
* @mirror: layout mirror describing the DS to use
+ * @dss_id: DS stripe id to select stripe to use
* @fail_return: return layout on connect failure?
*
* Try to prepare a DS connection to accept an RPC call. This involves
@@ -368,18 +371,19 @@ outerr:
struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
bool fail_return)
{
- struct nfs4_pnfs_ds *ds = NULL;
+ struct nfs4_pnfs_ds *ds;
struct inode *ino = lseg->pls_layout->plh_inode;
struct nfs_server *s = NFS_SERVER(ino);
unsigned int max_payload;
- int status;
+ int status = -EAGAIN;
- if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
+ if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror, dss_id))
goto noconnect;
- ds = mirror->mirror_ds->ds;
+ ds = mirror->dss[dss_id].mirror_ds->ds;
if (READ_ONCE(ds->ds_clp))
goto out;
/* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
@@ -388,10 +392,10 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
/* FIXME: For now we assume the server sent only one version of NFS
* to use for the DS.
*/
- status = nfs4_pnfs_ds_connect(s, ds, &mirror->mirror_ds->id_node,
+ status = nfs4_pnfs_ds_connect(s, ds, &mirror->dss[dss_id].mirror_ds->id_node,
dataserver_timeo, dataserver_retrans,
- mirror->mirror_ds->ds_versions[0].version,
- mirror->mirror_ds->ds_versions[0].minor_version);
+ mirror->dss[dss_id].mirror_ds->ds_versions[0].version,
+ mirror->dss[dss_id].mirror_ds->ds_versions[0].minor_version);
/* connect success, check rsize/wsize limit */
if (!status) {
@@ -400,25 +404,25 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
* keep ds_clp even if DS is local, so that if local IO cannot
* proceed somehow, we can fall back to NFS whenever we want.
*/
- nfs_local_probe(ds->ds_clp);
+ nfs_local_probe_async(ds->ds_clp);
max_payload =
nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
NULL);
- if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
- mirror->mirror_ds->ds_versions[0].rsize = max_payload;
- if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
- mirror->mirror_ds->ds_versions[0].wsize = max_payload;
+ if (mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize > max_payload)
+ mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize = max_payload;
+ if (mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize > max_payload)
+ mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize = max_payload;
goto out;
}
noconnect:
ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
- mirror, lseg->pls_range.offset,
+ mirror, dss_id, lseg->pls_range.offset,
lseg->pls_range.length, NFS4ERR_NXIO,
OP_ILLEGAL, GFP_NOIO);
ff_layout_send_layouterror(lseg);
if (fail_return || !ff_layout_has_available_ds(lseg))
pnfs_error_mark_layout_for_return(ino, lseg);
- ds = NULL;
+ ds = ERR_PTR(status);
out:
return ds;
}
@@ -426,12 +430,13 @@ out:
const struct cred *
ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
const struct pnfs_layout_range *range,
- const struct cred *mdscred)
+ const struct cred *mdscred,
+ u32 dss_id)
{
const struct cred *cred;
- if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) {
- cred = ff_layout_get_mirror_cred(mirror, range->iomode);
+ if (mirror && !mirror->dss[dss_id].mirror_ds->ds_versions[0].tightly_coupled) {
+ cred = ff_layout_get_mirror_cred(mirror, range->iomode, dss_id);
if (!cred)
cred = get_cred(mdscred);
} else {
@@ -445,15 +450,17 @@ ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
* @mirror: pointer to the mirror
* @ds_clp: nfs_client for the DS
* @inode: pointer to inode
+ * @dss_id: DS stripe id
*
* Find or create a DS rpc client with th MDS server rpc client auth flavor
* in the nfs_client cl_ds_clients list.
*/
struct rpc_clnt *
nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
- struct nfs_client *ds_clp, struct inode *inode)
+ struct nfs_client *ds_clp, struct inode *inode,
+ u32 dss_id)
{
- switch (mirror->mirror_ds->ds_versions[0].version) {
+ switch (mirror->dss[dss_id].mirror_ds->ds_versions[0].version) {
case 3:
/* For NFSv3 DS, flavor is set when creating DS connections */
return ds_clp->cl_rpcclient;
@@ -559,16 +566,18 @@ static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
{
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_deviceid_node *devid;
- u32 idx;
+ u32 idx, dss_id;
for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
mirror = FF_LAYOUT_COMP(lseg, idx);
- if (mirror) {
- if (!mirror->mirror_ds)
+ if (!mirror)
+ continue;
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
+ if (!mirror->dss[dss_id].mirror_ds)
return true;
- if (IS_ERR(mirror->mirror_ds))
+ if (IS_ERR(mirror->dss[dss_id].mirror_ds))
continue;
- devid = &mirror->mirror_ds->id_node;
+ devid = &mirror->dss[dss_id].mirror_ds->id_node;
if (!nfs4_test_deviceid_unavailable(devid))
return true;
}
@@ -581,17 +590,21 @@ static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
{
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_deviceid_node *devid;
- u32 idx;
+ u32 idx, dss_id;
for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
mirror = FF_LAYOUT_COMP(lseg, idx);
- if (!mirror || IS_ERR(mirror->mirror_ds))
- return false;
- if (!mirror->mirror_ds)
- continue;
- devid = &mirror->mirror_ds->id_node;
- if (nfs4_test_deviceid_unavailable(devid))
+ if (!mirror)
return false;
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
+ if (IS_ERR(mirror->dss[dss_id].mirror_ds))
+ return false;
+ if (!mirror->dss[dss_id].mirror_ds)
+ continue;
+ devid = &mirror->dss[dss_id].mirror_ds->id_node;
+ if (nfs4_test_deviceid_unavailable(devid))
+ return false;
+ }
}
return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index b069385eea17..b4679b7161b0 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -50,6 +50,7 @@ enum nfs_param {
Opt_clientaddr,
Opt_cto,
Opt_alignwrite,
+ Opt_fatal_neterrors,
Opt_fg,
Opt_fscache,
Opt_fscache_flag,
@@ -72,6 +73,8 @@ enum nfs_param {
Opt_posix,
Opt_proto,
Opt_rdirplus,
+ Opt_rdirplus_none,
+ Opt_rdirplus_force,
Opt_rdma,
Opt_resvport,
Opt_retrans,
@@ -93,6 +96,22 @@ enum nfs_param {
Opt_wsize,
Opt_write,
Opt_xprtsec,
+ Opt_cert_serial,
+ Opt_privkey_serial,
+};
+
+enum {
+ Opt_fatal_neterrors_default,
+ Opt_fatal_neterrors_enetunreach,
+ Opt_fatal_neterrors_none,
+};
+
+static const struct constant_table nfs_param_enums_fatal_neterrors[] = {
+ { "default", Opt_fatal_neterrors_default },
+ { "ENETDOWN:ENETUNREACH", Opt_fatal_neterrors_enetunreach },
+ { "ENETUNREACH:ENETDOWN", Opt_fatal_neterrors_enetunreach },
+ { "none", Opt_fatal_neterrors_none },
+ {}
};
enum {
@@ -151,6 +170,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_string("clientaddr", Opt_clientaddr),
fsparam_flag_no("cto", Opt_cto),
fsparam_flag_no("alignwrite", Opt_alignwrite),
+ fsparam_enum("fatal_neterrors", Opt_fatal_neterrors,
+ nfs_param_enums_fatal_neterrors),
fsparam_flag ("fg", Opt_fg),
fsparam_flag_no("fsc", Opt_fscache_flag),
fsparam_string("fsc", Opt_fscache),
@@ -174,7 +195,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("port", Opt_port),
fsparam_flag_no("posix", Opt_posix),
fsparam_string("proto", Opt_proto),
- fsparam_flag_no("rdirplus", Opt_rdirplus),
+ fsparam_flag_no("rdirplus", Opt_rdirplus), // rdirplus|nordirplus
+ fsparam_string("rdirplus", Opt_rdirplus), // rdirplus=...
fsparam_flag ("rdma", Opt_rdma),
fsparam_flag_no("resvport", Opt_resvport),
fsparam_u32 ("retrans", Opt_retrans),
@@ -201,6 +223,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_enum ("write", Opt_write, nfs_param_enums_write),
fsparam_u32 ("wsize", Opt_wsize),
fsparam_string("xprtsec", Opt_xprtsec),
+ fsparam_s32("cert_serial", Opt_cert_serial),
+ fsparam_s32("privkey_serial", Opt_privkey_serial),
{}
};
@@ -288,6 +312,12 @@ static const struct constant_table nfs_xprtsec_policies[] = {
{}
};
+static const struct constant_table nfs_rdirplus_tokens[] = {
+ { "none", Opt_rdirplus_none },
+ { "force", Opt_rdirplus_force },
+ {}
+};
+
/*
* Sanity-check a server address provided by the mount command.
*
@@ -525,6 +555,32 @@ static int nfs_parse_version_string(struct fs_context *fc,
return 0;
}
+#ifdef CONFIG_KEYS
+static int nfs_tls_key_verify(key_serial_t key_id)
+{
+ struct key *key = key_lookup(key_id);
+ int error = 0;
+
+ if (IS_ERR(key)) {
+ pr_err("key id %08x not found\n", key_id);
+ return PTR_ERR(key);
+ }
+ if (test_bit(KEY_FLAG_REVOKED, &key->flags) ||
+ test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+ pr_err("key id %08x revoked\n", key_id);
+ error = -EKEYREVOKED;
+ }
+
+ key_put(key);
+ return error;
+}
+#else
+static inline int nfs_tls_key_verify(key_serial_t key_id)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_KEYS */
+
/*
* Parse a single mount parameter.
*/
@@ -636,10 +692,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->flags &= ~NFS_MOUNT_NOACL;
break;
case Opt_rdirplus:
- if (result.negated)
+ if (result.negated) {
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
ctx->flags |= NFS_MOUNT_NORDIRPLUS;
- else
- ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ } else if (!param->string) {
+ ctx->flags &= ~(NFS_MOUNT_NORDIRPLUS | NFS_MOUNT_FORCE_RDIRPLUS);
+ } else {
+ switch (lookup_constant(nfs_rdirplus_tokens, param->string, -1)) {
+ case Opt_rdirplus_none:
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
+ ctx->flags |= NFS_MOUNT_NORDIRPLUS;
+ break;
+ case Opt_rdirplus_force:
+ ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ ctx->flags |= NFS_MOUNT_FORCE_RDIRPLUS;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ }
break;
case Opt_sharecache:
if (result.negated)
@@ -766,6 +837,18 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
if (ret < 0)
return ret;
break;
+ case Opt_cert_serial:
+ ret = nfs_tls_key_verify(result.int_32);
+ if (ret < 0)
+ return ret;
+ ctx->xprtsec.cert_serial = result.int_32;
+ break;
+ case Opt_privkey_serial:
+ ret = nfs_tls_key_verify(result.int_32);
+ if (ret < 0)
+ return ret;
+ ctx->xprtsec.privkey_serial = result.int_32;
+ break;
case Opt_proto:
if (!param->string)
@@ -872,6 +955,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
goto out_of_bounds;
ctx->nfs_server.max_connect = result.uint_32;
break;
+ case Opt_fatal_neterrors:
+ trace_nfs_mount_assign(param->key, param->string);
+ switch (result.uint_32) {
+ case Opt_fatal_neterrors_default:
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ else
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_enetunreach:
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_none:
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ break;
case Opt_lookupcache:
trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
@@ -1167,8 +1269,7 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
int ret;
data->context[NFS_MAX_CONTEXT_LEN] = '\0';
- ret = vfs_parse_fs_string(fc, "context",
- data->context, strlen(data->context));
+ ret = vfs_parse_fs_string(fc, "context", data->context);
if (ret < 0)
return ret;
#else
@@ -1651,6 +1752,9 @@ static int nfs_init_fs_context(struct fs_context *fc)
ctx->xprtsec.cert_serial = TLS_NO_CERT;
ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY;
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+
fc->s_iflags |= SB_I_STABLE_WRITES;
}
fc->fs_private = ctx;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 810269ee0a50..8b0785178731 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -263,6 +263,12 @@ int nfs_netfs_readahead(struct readahead_control *ractl)
static atomic_t nfs_netfs_debug_id;
static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
{
+ if (!file) {
+ if (WARN_ON_ONCE(rreq->origin != NETFS_PGPRIV2_COPY_TO_CACHE))
+ return -EIO;
+ return 0;
+ }
+
rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
@@ -274,7 +280,8 @@ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *fi
static void nfs_netfs_free_request(struct netfs_io_request *rreq)
{
- put_nfs_open_context(rreq->netfs_priv);
+ if (rreq->netfs_priv)
+ put_nfs_open_context(rreq->netfs_priv);
}
static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
@@ -307,8 +314,10 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
&nfs_async_read_completion_ops);
netfs = nfs_netfs_alloc(sreq);
- if (!netfs)
- return netfs_read_subreq_terminated(sreq, -ENOMEM, false);
+ if (!netfs) {
+ sreq->error = -ENOMEM;
+ return netfs_read_subreq_terminated(sreq);
+ }
pgio.pg_netfs = netfs; /* used in completion */
@@ -358,6 +367,7 @@ void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
sreq = netfs->sreq;
if (test_bit(NFS_IOHDR_EOF, &hdr->flags) &&
+ sreq->rreq->origin != NETFS_UNBUFFERED_READ &&
sreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 772d485e96d3..9d86868f4998 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -74,7 +74,8 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
*/
netfs->sreq->transferred = min_t(s64, netfs->sreq->len,
atomic64_read(&netfs->transferred));
- netfs_read_subreq_terminated(netfs->sreq, netfs->error, false);
+ netfs->sreq->error = netfs->error;
+ netfs_read_subreq_terminated(netfs->sreq);
kfree(netfs);
}
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 596f35170137..f76fe406937a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -74,6 +74,8 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
{
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
@@ -106,7 +108,7 @@ u64 nfs_compat_user_ino64(u64 fileid)
int nfs_drop_inode(struct inode *inode)
{
- return NFS_STALE(inode) || generic_drop_inode(inode);
+ return NFS_STALE(inode) || inode_generic_drop(inode);
}
EXPORT_SYMBOL_GPL(nfs_drop_inode);
@@ -195,6 +197,7 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
if (!(flags & NFS_INO_REVAL_FORCED))
flags &= ~(NFS_INO_INVALID_MODE |
NFS_INO_INVALID_OTHER |
+ NFS_INO_INVALID_BTIME |
NFS_INO_INVALID_XATTR);
flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE);
}
@@ -472,7 +475,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
goto out_no_inode;
}
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long now = jiffies;
@@ -520,6 +523,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode_set_atime(inode, 0, 0);
inode_set_mtime(inode, 0, 0);
inode_set_ctime(inode, 0, 0);
+ memset(&nfsi->btime, 0, sizeof(nfsi->btime));
inode_set_iversion_raw(inode, 0);
inode->i_size = 0;
clear_nlink(inode);
@@ -543,6 +547,10 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode_set_ctime_to_ts(inode, fattr->ctime);
else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CTIME);
+ if (fattr->valid & NFS_ATTR_FATTR_BTIME)
+ nfsi->btime = fattr->btime;
+ else if (fattr_supported & NFS_ATTR_FATTR_BTIME)
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_BTIME);
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
inode_set_iversion_raw(inode, fattr->change_attr);
else
@@ -555,6 +563,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
set_nlink(inode, fattr->nlink);
else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK);
+ else
+ set_nlink(inode, 1);
if (fattr->valid & NFS_ATTR_FATTR_OWNER)
inode->i_uid = fattr->uid;
else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
@@ -598,7 +608,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode),
nfs_display_fhandle_hash(fh),
- atomic_read(&inode->i_count));
+ icount_read(inode));
out:
return inode;
@@ -631,6 +641,34 @@ nfs_fattr_fixup_delegated(struct inode *inode, struct nfs_fattr *fattr)
}
}
+static void nfs_set_timestamps_to_ts(struct inode *inode, struct iattr *attr)
+{
+ unsigned int cache_flags = 0;
+
+ if (attr->ia_valid & ATTR_MTIME_SET) {
+ struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
+ struct timespec64 now;
+ int updated = 0;
+
+ now = inode_set_ctime_current(inode);
+ if (!timespec64_equal(&now, &ctime))
+ updated |= S_CTIME;
+
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
+ if (!timespec64_equal(&now, &mtime))
+ updated |= S_MTIME;
+
+ inode_maybe_inc_iversion(inode, updated);
+ cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
+ }
+ if (attr->ia_valid & ATTR_ATIME_SET) {
+ inode_set_atime_to_ts(inode, attr->ia_atime);
+ cache_flags |= NFS_INO_INVALID_ATIME;
+ }
+ NFS_I(inode)->cache_validity &= ~cache_flags;
+}
+
static void nfs_update_timestamps(struct inode *inode, unsigned int ia_valid)
{
enum file_time_flags time_flags = 0;
@@ -678,7 +716,10 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
struct nfs_fattr *fattr;
+ loff_t oldsize = i_size_read(inode);
int error = 0;
+ kuid_t task_uid = current_fsuid();
+ kuid_t owner_uid = inode->i_uid;
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
@@ -693,20 +734,37 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (error)
return error;
- if (attr->ia_size == i_size_read(inode))
+ if (attr->ia_size == oldsize)
attr->ia_valid &= ~ATTR_SIZE;
}
if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) {
spin_lock(&inode->i_lock);
- nfs_update_timestamps(inode, attr->ia_valid);
+ if (attr->ia_valid & ATTR_MTIME_SET) {
+ if (uid_eq(task_uid, owner_uid)) {
+ nfs_set_timestamps_to_ts(inode, attr);
+ attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
+ ATTR_ATIME|ATTR_ATIME_SET);
+ }
+ } else {
+ nfs_update_timestamps(inode, attr->ia_valid);
+ attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME);
+ }
spin_unlock(&inode->i_lock);
- attr->ia_valid &= ~(ATTR_MTIME | ATTR_ATIME);
} else if (nfs_have_delegated_atime(inode) &&
attr->ia_valid & ATTR_ATIME &&
!(attr->ia_valid & ATTR_MTIME)) {
- nfs_update_delegated_atime(inode);
- attr->ia_valid &= ~ATTR_ATIME;
+ if (attr->ia_valid & ATTR_ATIME_SET) {
+ if (uid_eq(task_uid, owner_uid)) {
+ spin_lock(&inode->i_lock);
+ nfs_set_timestamps_to_ts(inode, attr);
+ spin_unlock(&inode->i_lock);
+ attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
+ }
+ } else {
+ nfs_update_delegated_atime(inode);
+ attr->ia_valid &= ~ATTR_ATIME;
+ }
}
/* Optimization: if the end result is no change, don't RPC */
@@ -716,8 +774,10 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
trace_nfs_setattr_enter(inode);
/* Write all dirty data */
- if (S_ISREG(inode->i_mode))
+ if (S_ISREG(inode->i_mode)) {
+ nfs_file_block_o_direct(NFS_I(inode));
nfs_sync_inode(inode);
+ }
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
if (fattr == NULL) {
@@ -726,8 +786,12 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
- if (error == 0)
+ if (error == 0) {
+ if (attr->ia_valid & ATTR_SIZE)
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ attr->ia_size);
error = nfs_refresh_inode(inode, fattr);
+ }
nfs_free_fattr(fattr);
out:
trace_nfs_setattr_exit(inode, error);
@@ -886,6 +950,7 @@ static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry)
static u32 nfs_get_valid_attrmask(struct inode *inode)
{
+ u64 fattr_valid = NFS_SERVER(inode)->fattr_valid;
unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
u32 reply_mask = STATX_INO | STATX_TYPE;
@@ -905,6 +970,9 @@ static u32 nfs_get_valid_attrmask(struct inode *inode)
reply_mask |= STATX_UID | STATX_GID;
if (!(cache_validity & NFS_INO_INVALID_BLOCKS))
reply_mask |= STATX_BLOCKS;
+ if (!(cache_validity & NFS_INO_INVALID_BTIME) &&
+ (fattr_valid & NFS_ATTR_FATTR_BTIME))
+ reply_mask |= STATX_BTIME;
if (!(cache_validity & NFS_INO_INVALID_CHANGE))
reply_mask |= STATX_CHANGE_COOKIE;
return reply_mask;
@@ -915,6 +983,7 @@ int nfs_getattr(struct mnt_idmap *idmap, const struct path *path,
{
struct inode *inode = d_inode(path->dentry);
struct nfs_server *server = NFS_SERVER(inode);
+ u64 fattr_valid = server->fattr_valid;
unsigned long cache_validity;
int err = 0;
bool force_sync = query_flags & AT_STATX_FORCE_SYNC;
@@ -925,9 +994,12 @@ int nfs_getattr(struct mnt_idmap *idmap, const struct path *path,
request_mask &= STATX_TYPE | STATX_MODE | STATX_NLINK | STATX_UID |
STATX_GID | STATX_ATIME | STATX_MTIME | STATX_CTIME |
- STATX_INO | STATX_SIZE | STATX_BLOCKS |
+ STATX_INO | STATX_SIZE | STATX_BLOCKS | STATX_BTIME |
STATX_CHANGE_COOKIE;
+ if (!(fattr_valid & NFS_ATTR_FATTR_BTIME))
+ request_mask &= ~STATX_BTIME;
+
if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) {
if (readdirplus_enabled)
nfs_readdirplus_parent_cache_hit(path->dentry);
@@ -959,7 +1031,7 @@ int nfs_getattr(struct mnt_idmap *idmap, const struct path *path,
/* Is the user requesting attributes that might need revalidation? */
if (!(request_mask & (STATX_MODE|STATX_NLINK|STATX_ATIME|STATX_CTIME|
STATX_MTIME|STATX_UID|STATX_GID|
- STATX_SIZE|STATX_BLOCKS|
+ STATX_SIZE|STATX_BLOCKS|STATX_BTIME|
STATX_CHANGE_COOKIE)))
goto out_no_revalidate;
@@ -983,6 +1055,8 @@ int nfs_getattr(struct mnt_idmap *idmap, const struct path *path,
do_update |= cache_validity & NFS_INO_INVALID_OTHER;
if (request_mask & STATX_BLOCKS)
do_update |= cache_validity & NFS_INO_INVALID_BLOCKS;
+ if (request_mask & STATX_BTIME)
+ do_update |= cache_validity & NFS_INO_INVALID_BTIME;
if (do_update) {
if (readdirplus_enabled)
@@ -1004,6 +1078,22 @@ out_no_revalidate:
stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
if (S_ISDIR(inode->i_mode))
stat->blksize = NFS_SERVER(inode)->dtsize;
+ stat->btime = NFS_I(inode)->btime;
+
+ /* Special handling for STATX_DIOALIGN and STATX_DIO_READ_ALIGN
+ * - NFS doesn't have DIO alignment constraints, avoid getting
+ * these DIO attrs from remote and just respond with most
+ * accommodating limits (so client will issue supported DIO).
+ * - this is unintuitive, but the most coarse-grained
+ * dio_offset_align is the most accommodating.
+ */
+ if ((request_mask & (STATX_DIOALIGN | STATX_DIO_READ_ALIGN)) &&
+ S_ISREG(inode->i_mode)) {
+ stat->result_mask |= STATX_DIOALIGN | STATX_DIO_READ_ALIGN;
+ stat->dio_mem_align = 4; /* 4-byte alignment */
+ stat->dio_offset_align = PAGE_SIZE;
+ stat->dio_read_offset_align = stat->dio_offset_align;
+ }
out:
trace_nfs_getattr_exit(inode, err);
return err;
@@ -1137,6 +1227,8 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
ctx->lock_context.open_context = ctx;
INIT_LIST_HEAD(&ctx->list);
ctx->mdsthreshold = NULL;
+ nfs_localio_file_init(&ctx->nfl);
+
return ctx;
}
EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
@@ -1168,6 +1260,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
nfs_sb_deactive(sb);
put_rpccred(rcu_dereference_protected(ctx->ll_cred, 1));
kfree(ctx->mdsthreshold);
+ nfs_close_local_fh(&ctx->nfl);
kfree_rcu(ctx, rcu_head);
}
@@ -1895,7 +1988,7 @@ static int nfs_inode_finish_partial_attr_update(const struct nfs_fattr *fattr,
NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME |
NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_OTHER |
- NFS_INO_INVALID_NLINK;
+ NFS_INO_INVALID_NLINK | NFS_INO_INVALID_BTIME;
unsigned long cache_validity = NFS_I(inode)->cache_validity;
enum nfs4_change_attr_type ctype = NFS_SERVER(inode)->change_attr_type;
@@ -2161,10 +2254,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
bool attr_changed = false;
bool have_delegation;
- dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
+ dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%llx)\n",
__func__, inode->i_sb->s_id, inode->i_ino,
nfs_display_fhandle_hash(NFS_FH(inode)),
- atomic_read(&inode->i_count), fattr->valid);
+ icount_read(inode), fattr->valid);
if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
/* Only a mounted-on-fileid? Just exit */
@@ -2256,7 +2349,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
| NFS_INO_INVALID_BLOCKS
| NFS_INO_INVALID_NLINK
| NFS_INO_INVALID_MODE
- | NFS_INO_INVALID_OTHER;
+ | NFS_INO_INVALID_OTHER
+ | NFS_INO_INVALID_BTIME;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
attr_changed = true;
@@ -2290,6 +2384,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_CTIME;
+ if (fattr->valid & NFS_ATTR_FATTR_BTIME)
+ nfsi->btime = fattr->btime;
+ else if (fattr_supported & NFS_ATTR_FATTR_BTIME)
+ nfsi->cache_validity |=
+ save_cache_validity & NFS_INO_INVALID_BTIME;
+
/* Check if our cached file size is stale */
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
new_isize = nfs_size_to_loff_t(fattr->size);
@@ -2541,15 +2641,26 @@ EXPORT_SYMBOL_GPL(nfs_net_id);
static int nfs_net_init(struct net *net)
{
struct nfs_net *nn = net_generic(net, nfs_net_id);
+ int err;
nfs_clients_init(net);
if (!rpc_proc_register(net, &nn->rpcstats)) {
- nfs_clients_exit(net);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_proc_rpc;
}
- return nfs_fs_proc_net_init(net);
+ err = nfs_fs_proc_net_init(net);
+ if (err)
+ goto err_proc_nfs;
+
+ return 0;
+
+err_proc_nfs:
+ rpc_proc_unregister(net, "nfs");
+err_proc_rpc:
+ nfs_clients_exit(net);
+ return err;
}
static void nfs_net_exit(struct net *net)
@@ -2566,6 +2677,35 @@ static struct pernet_operations nfs_net_ops = {
.size = sizeof(struct nfs_net),
};
+#ifdef CONFIG_KEYS
+static struct key *nfs_keyring;
+
+static int __init nfs_init_keyring(void)
+{
+ nfs_keyring = keyring_alloc(".nfs",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ (KEY_USR_ALL & ~KEY_USR_SETATTR),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ return PTR_ERR_OR_ZERO(nfs_keyring);
+}
+
+static void nfs_exit_keyring(void)
+{
+ key_put(nfs_keyring);
+}
+#else
+static inline int nfs_init_keyring(void)
+{
+ return 0;
+}
+
+static inline void nfs_exit_keyring(void)
+{
+}
+#endif /* CONFIG_KEYS */
+
/*
* Initialize NFS
*/
@@ -2573,6 +2713,10 @@ static int __init init_nfs_fs(void)
{
int err;
+ err = nfs_init_keyring();
+ if (err)
+ return err;
+
err = nfs_sysfs_init();
if (err < 0)
goto out10;
@@ -2633,6 +2777,7 @@ out7:
out9:
nfs_sysfs_exit();
out10:
+ nfs_exit_keyring();
return err;
}
@@ -2648,6 +2793,7 @@ static void __exit exit_nfs_fs(void)
nfs_fs_proc_exit();
nfsiod_stop();
nfs_sysfs_exit();
+ nfs_exit_keyring();
}
/* Not quite true; I just maintain it */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e564bd11ba60..2ecd38e1d17a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -207,7 +207,6 @@ struct nfs_mount_request {
};
extern int nfs_mount(struct nfs_mount_request *info, int timeo, int retrans);
-extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
extern const struct rpc_program nfs_program;
@@ -232,7 +231,7 @@ extern struct nfs_client *
nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
struct nfs4_sessionid *, u32);
extern struct nfs_server *nfs_create_server(struct fs_context *);
-extern void nfs4_server_set_init_caps(struct nfs_server *);
+extern void nfs_server_set_init_caps(struct nfs_server *);
extern struct nfs_server *nfs4_create_server(struct fs_context *);
extern struct nfs_server *nfs4_create_referral_server(struct fs_context *);
extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
@@ -400,8 +399,8 @@ struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
void nfs_d_prune_case_insensitive_aliases(struct inode *inode);
int nfs_create(struct mnt_idmap *, struct inode *, struct dentry *,
umode_t, bool);
-int nfs_mkdir(struct mnt_idmap *, struct inode *, struct dentry *,
- umode_t);
+struct dentry *nfs_mkdir(struct mnt_idmap *, struct inode *, struct dentry *,
+ umode_t);
int nfs_rmdir(struct inode *, struct dentry *);
int nfs_unlink(struct inode *, struct dentry *);
int nfs_symlink(struct mnt_idmap *, struct inode *, struct dentry *,
@@ -432,12 +431,14 @@ loff_t nfs_file_llseek(struct file *, loff_t, int);
ssize_t nfs_file_read(struct kiocb *, struct iov_iter *);
ssize_t nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
-int nfs_file_mmap(struct file *, struct vm_area_struct *);
+int nfs_file_mmap_prepare(struct vm_area_desc *);
ssize_t nfs_file_write(struct kiocb *, struct iov_iter *);
int nfs_file_release(struct inode *, struct file *);
int nfs_lock(struct file *, int, struct file_lock *);
int nfs_flock(struct file *, int, struct file_lock *);
int nfs_check_flags(int);
+void nfs_truncate_last_folio(struct address_space *mapping, loff_t from,
+ loff_t to);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
@@ -455,11 +456,22 @@ extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
/* localio.c */
-extern void nfs_local_disable(struct nfs_client *);
-extern void nfs_local_probe(struct nfs_client *);
+struct nfs_local_dio {
+ u32 mem_align;
+ u32 offset_align;
+ loff_t middle_offset;
+ loff_t end_offset;
+ ssize_t start_len; /* Length for misaligned first extent */
+ ssize_t middle_len; /* Length for DIO-aligned middle extent */
+ ssize_t end_len; /* Length for misaligned last extent */
+};
+
+extern void nfs_local_probe_async(struct nfs_client *);
+extern void nfs_local_probe_async_work(struct work_struct *);
extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *,
const struct cred *,
struct nfs_fh *,
+ struct nfs_file_localio *,
const fmode_t);
extern int nfs_local_doio(struct nfs_client *,
struct nfsd_file *,
@@ -471,11 +483,12 @@ extern int nfs_local_commit(struct nfsd_file *,
extern bool nfs_server_is_local(const struct nfs_client *clp);
#else /* CONFIG_NFS_LOCALIO */
-static inline void nfs_local_disable(struct nfs_client *clp) {}
static inline void nfs_local_probe(struct nfs_client *clp) {}
+static inline void nfs_local_probe_async(struct nfs_client *clp) {}
static inline struct nfsd_file *
nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
- struct nfs_fh *fh, const fmode_t mode)
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ const fmode_t mode)
{
return NULL;
}
@@ -529,6 +542,16 @@ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0;
}
+/* Must be called with exclusively locked inode->i_rwsem */
+static inline void nfs_file_block_o_direct(struct nfs_inode *nfsi)
+{
+ if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
+ clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
+ inode_dio_wait(&nfsi->vfs_inode);
+ }
+}
+
+
/* namespace.c */
#define NFS_PATH_CANONICAL 1
extern char *nfs_path(char **p, struct dentry *dentry,
@@ -669,9 +692,12 @@ nfs_write_match_verf(const struct nfs_writeverf *verf,
static inline gfp_t nfs_io_gfp_mask(void)
{
- if (current->flags & PF_WQ_WORKER)
- return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
- return GFP_KERNEL;
+ gfp_t ret = current_gfp_context(GFP_KERNEL);
+
+ /* For workers __GFP_NORETRY only with __GFP_IO or __GFP_FS */
+ if ((current->flags & PF_WQ_WORKER) && ret == GFP_KERNEL)
+ ret |= __GFP_NORETRY | __GFP_NOWARN;
+ return ret;
}
/*
@@ -896,18 +922,16 @@ u64 nfs_timespec_to_change_attr(const struct timespec64 *ts)
return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
}
-#ifdef CONFIG_CRC32
static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid)
{
return ~crc32_le(0xFFFFFFFF, &stateid->other[0],
NFS4_STATEID_OTHER_SIZE);
}
-#else
-static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
+
+static inline bool nfs_current_task_exiting(void)
{
- return 0;
+ return (current->flags & PF_EXITING) != 0;
}
-#endif
static inline bool nfs_error_is_fatal(int err)
{
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
index 3388faf2acb9..d275b0a250bf 100644
--- a/fs/nfs/io.c
+++ b/fs/nfs/io.c
@@ -14,15 +14,6 @@
#include "internal.h"
-/* Call with exclusively locked inode->i_rwsem */
-static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
-{
- if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
- clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
- inode_dio_wait(inode);
- }
-}
-
/**
* nfs_start_io_read - declare the file is being used for buffered reads
* @inode: file inode
@@ -57,7 +48,7 @@ nfs_start_io_read(struct inode *inode)
err = down_write_killable(&inode->i_rwsem);
if (err)
return err;
- nfs_block_o_direct(nfsi, inode);
+ nfs_file_block_o_direct(nfsi);
downgrade_write(&inode->i_rwsem);
return 0;
@@ -90,7 +81,7 @@ nfs_start_io_write(struct inode *inode)
err = down_write_killable(&inode->i_rwsem);
if (!err)
- nfs_block_o_direct(NFS_I(inode), inode);
+ nfs_file_block_o_direct(NFS_I(inode));
return err;
}
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
index 4b8618cf114c..f33bfa7b58e6 100644
--- a/fs/nfs/localio.c
+++ b/fs/nfs/localio.c
@@ -30,12 +30,22 @@
#define NFSDBG_FACILITY NFSDBG_VFS
+#define NFSLOCAL_MAX_IOS 3
+
struct nfs_local_kiocb {
struct kiocb kiocb;
struct bio_vec *bvec;
struct nfs_pgio_header *hdr;
struct work_struct work;
+ void (*aio_complete_work)(struct work_struct *);
struct nfsd_file *localio;
+ /* Begin mostly DIO-specific members */
+ size_t end_len;
+ short int end_iter_index;
+ atomic_t n_iters;
+ bool iter_is_dio_aligned[NFSLOCAL_MAX_IOS];
+ struct iov_iter iters[NFSLOCAL_MAX_IOS] ____cacheline_aligned;
+ /* End mostly DIO-specific members */
};
struct nfs_local_fsync_ctx {
@@ -50,7 +60,7 @@ module_param(localio_enabled, bool, 0644);
static inline bool nfs_client_is_local(const struct nfs_client *clp)
{
- return !!test_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+ return !!rcu_access_pointer(clp->cl_uuid.net);
}
bool nfs_server_is_local(const struct nfs_client *clp)
@@ -116,30 +126,6 @@ const struct rpc_program nfslocalio_program = {
};
/*
- * nfs_local_enable - enable local i/o for an nfs_client
- */
-static void nfs_local_enable(struct nfs_client *clp)
-{
- spin_lock(&clp->cl_localio_lock);
- set_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
- trace_nfs_local_enable(clp);
- spin_unlock(&clp->cl_localio_lock);
-}
-
-/*
- * nfs_local_disable - disable local i/o for an nfs_client
- */
-void nfs_local_disable(struct nfs_client *clp)
-{
- spin_lock(&clp->cl_localio_lock);
- if (test_and_clear_bit(NFS_CS_LOCAL_IO, &clp->cl_flags)) {
- trace_nfs_local_disable(clp);
- nfs_uuid_invalidate_one_client(&clp->cl_uuid);
- }
- spin_unlock(&clp->cl_localio_lock);
-}
-
-/*
* nfs_init_localioclient - Initialise an NFS localio client connection
*/
static struct rpc_clnt *nfs_init_localioclient(struct nfs_client *clp)
@@ -178,7 +164,7 @@ static bool nfs_server_uuid_is_local(struct nfs_client *clp)
rpc_shutdown_client(rpcclient_localio);
/* Server is only local if it initialized required struct members */
- if (status || !clp->cl_uuid.net || !clp->cl_uuid.dom)
+ if (status || !rcu_access_pointer(clp->cl_uuid.net) || !clp->cl_uuid.dom)
return false;
return true;
@@ -189,79 +175,116 @@ static bool nfs_server_uuid_is_local(struct nfs_client *clp)
* - called after alloc_client and init_client (so cl_rpcclient exists)
* - this function is idempotent, it can be called for old or new clients
*/
-void nfs_local_probe(struct nfs_client *clp)
+static void nfs_local_probe(struct nfs_client *clp)
{
/* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */
if (!localio_enabled ||
clp->cl_rpcclient->cl_auth->au_flavor != RPC_AUTH_UNIX) {
- nfs_local_disable(clp);
+ nfs_localio_disable_client(clp);
return;
}
- if (nfs_client_is_local(clp)) {
- /* If already enabled, disable and re-enable */
- nfs_local_disable(clp);
- }
+ if (nfs_client_is_local(clp))
+ return;
if (!nfs_uuid_begin(&clp->cl_uuid))
return;
if (nfs_server_uuid_is_local(clp))
- nfs_local_enable(clp);
+ nfs_localio_enable_client(clp);
nfs_uuid_end(&clp->cl_uuid);
}
-EXPORT_SYMBOL_GPL(nfs_local_probe);
+
+void nfs_local_probe_async_work(struct work_struct *work)
+{
+ struct nfs_client *clp =
+ container_of(work, struct nfs_client, cl_local_probe_work);
+
+ if (!refcount_inc_not_zero(&clp->cl_count))
+ return;
+ nfs_local_probe(clp);
+ nfs_put_client(clp);
+}
+
+void nfs_local_probe_async(struct nfs_client *clp)
+{
+ queue_work(nfsiod_workqueue, &clp->cl_local_probe_work);
+}
+EXPORT_SYMBOL_GPL(nfs_local_probe_async);
+
+static inline void nfs_local_file_put(struct nfsd_file *localio)
+{
+ /* nfs_to_nfsd_file_put_local() expects an __rcu pointer
+ * but we have a __kernel pointer. It is always safe
+ * to cast a __kernel pointer to an __rcu pointer
+ * because the cast only weakens what is known about the pointer.
+ */
+ struct nfsd_file __rcu *nf = (struct nfsd_file __rcu*) localio;
+
+ nfs_to_nfsd_file_put_local(&nf);
+}
/*
- * nfs_local_open_fh - open a local filehandle in terms of nfsd_file
+ * __nfs_local_open_fh - open a local filehandle in terms of nfsd_file.
*
- * Returns a pointer to a struct nfsd_file or NULL
+ * Returns a pointer to a struct nfsd_file or ERR_PTR.
+ * Caller must release returned nfsd_file with nfs_to_nfsd_file_put_local().
*/
-struct nfsd_file *
-nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
- struct nfs_fh *fh, const fmode_t mode)
+static struct nfsd_file *
+__nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ struct nfsd_file __rcu **pnf,
+ const fmode_t mode)
{
+ int status = 0;
struct nfsd_file *localio;
- int status;
-
- if (!nfs_server_is_local(clp))
- return NULL;
- if (mode & ~(FMODE_READ | FMODE_WRITE))
- return NULL;
localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient,
- cred, fh, mode);
+ cred, fh, nfl, pnf, mode);
if (IS_ERR(localio)) {
status = PTR_ERR(localio);
- trace_nfs_local_open_fh(fh, mode, status);
switch (status) {
case -ENOMEM:
case -ENXIO:
case -ENOENT:
- /* Revalidate localio, will disable if unsupported */
+ /* Revalidate localio */
+ nfs_localio_disable_client(clp);
nfs_local_probe(clp);
}
- return NULL;
}
+ trace_nfs_local_open_fh(fh, mode, status);
return localio;
}
-EXPORT_SYMBOL_GPL(nfs_local_open_fh);
-static struct bio_vec *
-nfs_bvec_alloc_and_import_pagevec(struct page **pagevec,
- unsigned int npages, gfp_t flags)
+/*
+ * nfs_local_open_fh - open a local filehandle in terms of nfsd_file.
+ * First checking if the open nfsd_file is already cached, otherwise
+ * must __nfs_local_open_fh and insert the nfsd_file in nfs_file_localio.
+ *
+ * Returns a pointer to a struct nfsd_file or NULL.
+ */
+struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ const fmode_t mode)
{
- struct bio_vec *bvec, *p;
+ struct nfsd_file *nf, __rcu **pnf;
- bvec = kmalloc_array(npages, sizeof(*bvec), flags);
- if (bvec != NULL) {
- for (p = bvec; npages > 0; p++, pagevec++, npages--) {
- p->bv_page = *pagevec;
- p->bv_len = PAGE_SIZE;
- p->bv_offset = 0;
- }
- }
- return bvec;
+ if (!nfs_server_is_local(clp))
+ return NULL;
+ if (mode & ~(FMODE_READ | FMODE_WRITE))
+ return NULL;
+
+ if (mode & FMODE_WRITE)
+ pnf = &nfl->rw_file;
+ else
+ pnf = &nfl->ro_file;
+
+ nf = __nfs_local_open_fh(clp, cred, fh, nfl, pnf, mode);
+ if (IS_ERR(nf))
+ return NULL;
+ return nf;
}
+EXPORT_SYMBOL_GPL(nfs_local_open_fh);
static void
nfs_local_iocb_free(struct nfs_local_kiocb *iocb)
@@ -276,31 +299,200 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
{
struct nfs_local_kiocb *iocb;
- iocb = kmalloc(sizeof(*iocb), flags);
+ iocb = kzalloc(sizeof(*iocb), flags);
if (iocb == NULL)
return NULL;
- iocb->bvec = nfs_bvec_alloc_and_import_pagevec(hdr->page_array.pagevec,
- hdr->page_array.npages, flags);
+
+ iocb->bvec = kmalloc_array(hdr->page_array.npages,
+ sizeof(struct bio_vec), flags);
if (iocb->bvec == NULL) {
kfree(iocb);
return NULL;
}
+
init_sync_kiocb(&iocb->kiocb, file);
- iocb->kiocb.ki_pos = hdr->args.offset;
+
iocb->hdr = hdr;
+ iocb->kiocb.ki_pos = hdr->args.offset;
iocb->kiocb.ki_flags &= ~IOCB_APPEND;
+ iocb->kiocb.ki_complete = NULL;
+ iocb->aio_complete_work = NULL;
+
+ iocb->end_iter_index = -1;
+
return iocb;
}
+static bool
+nfs_is_local_dio_possible(struct nfs_local_kiocb *iocb, int rw,
+ size_t len, struct nfs_local_dio *local_dio)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ loff_t offset = hdr->args.offset;
+ u32 nf_dio_mem_align, nf_dio_offset_align, nf_dio_read_offset_align;
+ loff_t start_end, orig_end, middle_end;
+
+ nfs_to->nfsd_file_dio_alignment(iocb->localio, &nf_dio_mem_align,
+ &nf_dio_offset_align, &nf_dio_read_offset_align);
+ if (rw == ITER_DEST)
+ nf_dio_offset_align = nf_dio_read_offset_align;
+
+ if (unlikely(!nf_dio_mem_align || !nf_dio_offset_align))
+ return false;
+ if (unlikely(nf_dio_offset_align > PAGE_SIZE))
+ return false;
+ if (unlikely(len < nf_dio_offset_align))
+ return false;
+
+ local_dio->mem_align = nf_dio_mem_align;
+ local_dio->offset_align = nf_dio_offset_align;
+
+ start_end = round_up(offset, nf_dio_offset_align);
+ orig_end = offset + len;
+ middle_end = round_down(orig_end, nf_dio_offset_align);
+
+ local_dio->middle_offset = start_end;
+ local_dio->end_offset = middle_end;
+
+ local_dio->start_len = start_end - offset;
+ local_dio->middle_len = middle_end - start_end;
+ local_dio->end_len = orig_end - middle_end;
+
+ if (rw == ITER_DEST)
+ trace_nfs_local_dio_read(hdr->inode, offset, len, local_dio);
+ else
+ trace_nfs_local_dio_write(hdr->inode, offset, len, local_dio);
+ return true;
+}
+
+static bool nfs_iov_iter_aligned_bvec(const struct iov_iter *i,
+ unsigned int addr_mask, unsigned int len_mask)
+{
+ const struct bio_vec *bvec = i->bvec;
+ size_t skip = i->iov_offset;
+ size_t size = i->count;
+
+ if (size & len_mask)
+ return false;
+ do {
+ size_t len = bvec->bv_len;
+
+ if (len > size)
+ len = size;
+ if ((unsigned long)(bvec->bv_offset + skip) & addr_mask)
+ return false;
+ bvec++;
+ size -= len;
+ skip = 0;
+ } while (size);
+
+ return true;
+}
+
static void
-nfs_local_iter_init(struct iov_iter *i, struct nfs_local_kiocb *iocb, int dir)
+nfs_local_iter_setup(struct iov_iter *iter, int rw, struct bio_vec *bvec,
+ unsigned int nvecs, unsigned long total,
+ size_t start, size_t len)
+{
+ iov_iter_bvec(iter, rw, bvec, nvecs, total);
+ if (start)
+ iov_iter_advance(iter, start);
+ iov_iter_truncate(iter, len);
+}
+
+/*
+ * Setup as many as 3 iov_iter based on extents described by @local_dio.
+ * Returns the number of iov_iter that were setup.
+ */
+static int
+nfs_local_iters_setup_dio(struct nfs_local_kiocb *iocb, int rw,
+ unsigned int nvecs, unsigned long total,
+ struct nfs_local_dio *local_dio)
+{
+ int n_iters = 0;
+ struct iov_iter *iters = iocb->iters;
+
+ /* Setup misaligned start? */
+ if (local_dio->start_len) {
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec,
+ nvecs, total, 0, local_dio->start_len);
+ ++n_iters;
+ }
+
+ /*
+ * Setup DIO-aligned middle, if there is no misaligned end (below)
+ * then AIO completion is used, see nfs_local_call_{read,write}
+ */
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec, nvecs,
+ total, local_dio->start_len, local_dio->middle_len);
+
+ iocb->iter_is_dio_aligned[n_iters] =
+ nfs_iov_iter_aligned_bvec(&iters[n_iters],
+ local_dio->mem_align-1, local_dio->offset_align-1);
+
+ if (unlikely(!iocb->iter_is_dio_aligned[n_iters])) {
+ trace_nfs_local_dio_misaligned(iocb->hdr->inode,
+ local_dio->start_len, local_dio->middle_len, local_dio);
+ return 0; /* no DIO-aligned IO possible */
+ }
+ iocb->end_iter_index = n_iters;
+ ++n_iters;
+
+ /* Setup misaligned end? */
+ if (local_dio->end_len) {
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec,
+ nvecs, total, local_dio->start_len +
+ local_dio->middle_len, local_dio->end_len);
+ iocb->end_iter_index = n_iters;
+ ++n_iters;
+ }
+
+ atomic_set(&iocb->n_iters, n_iters);
+ return n_iters;
+}
+
+static noinline_for_stack void
+nfs_local_iters_init(struct nfs_local_kiocb *iocb, int rw)
{
struct nfs_pgio_header *hdr = iocb->hdr;
+ struct page **pagevec = hdr->page_array.pagevec;
+ unsigned long v, total;
+ unsigned int base;
+ size_t len;
+
+ v = 0;
+ total = hdr->args.count;
+ base = hdr->args.pgbase;
+ while (total && v < hdr->page_array.npages) {
+ len = min_t(size_t, total, PAGE_SIZE - base);
+ bvec_set_page(&iocb->bvec[v], *pagevec, len, base);
+ total -= len;
+ ++pagevec;
+ ++v;
+ base = 0;
+ }
+ len = hdr->args.count - total;
+
+ /*
+ * For each iocb, iocb->n_iters is always at least 1 and we always
+ * end io after first nfs_local_pgio_done call unless misaligned DIO.
+ */
+ atomic_set(&iocb->n_iters, 1);
- iov_iter_bvec(i, dir, iocb->bvec, hdr->page_array.npages,
- hdr->args.count + hdr->args.pgbase);
- if (hdr->args.pgbase != 0)
- iov_iter_advance(i, hdr->args.pgbase);
+ if (test_bit(NFS_IOHDR_ODIRECT, &hdr->flags)) {
+ struct nfs_local_dio local_dio;
+
+ if (nfs_is_local_dio_possible(iocb, rw, len, &local_dio) &&
+ nfs_local_iters_setup_dio(iocb, rw, v, len, &local_dio) != 0) {
+ /* Ensure DIO WRITE's IO on stable storage upon completion */
+ if (rw == ITER_SOURCE)
+ iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
+ return; /* is DIO-aligned */
+ }
+ }
+
+ /* Use buffered IO */
+ iov_iter_bvec(&iocb->iters[0], rw, iocb->bvec, v, len);
}
static void
@@ -320,17 +512,34 @@ nfs_local_pgio_init(struct nfs_pgio_header *hdr,
hdr->task.tk_start = ktime_get();
}
-static void
-nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
+static bool
+nfs_local_pgio_done(struct nfs_local_kiocb *iocb, long status, bool force)
{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ /* Must handle partial completions */
if (status >= 0) {
- hdr->res.count = status;
- hdr->res.op_status = NFS4_OK;
- hdr->task.tk_status = 0;
+ hdr->res.count += status;
+ /* @hdr was initialized to 0 (zeroed during allocation) */
+ if (hdr->task.tk_status == 0)
+ hdr->res.op_status = NFS4_OK;
} else {
- hdr->res.op_status = nfs4_stat_to_errno(status);
+ hdr->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
hdr->task.tk_status = status;
}
+
+ if (force)
+ return true;
+
+ BUG_ON(atomic_read(&iocb->n_iters) <= 0);
+ return atomic_dec_and_test(&iocb->n_iters);
+}
+
+static void
+nfs_local_iocb_release(struct nfs_local_kiocb *iocb)
+{
+ nfs_local_file_put(iocb->localio);
+ nfs_local_iocb_free(iocb);
}
static void
@@ -338,18 +547,32 @@ nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
{
struct nfs_pgio_header *hdr = iocb->hdr;
- nfs_to_nfsd_file_put_local(iocb->localio);
- nfs_local_iocb_free(iocb);
+ nfs_local_iocb_release(iocb);
nfs_local_hdr_release(hdr, hdr->task.tk_ops);
}
-static void
-nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
+/*
+ * Complete the I/O from iocb->kiocb.ki_complete()
+ *
+ * Note that this function can be called from a bottom half context,
+ * hence we need to queue the rpc_call_done() etc to a workqueue
+ */
+static inline void nfs_local_pgio_aio_complete(struct nfs_local_kiocb *iocb)
+{
+ INIT_WORK(&iocb->work, iocb->aio_complete_work);
+ queue_work(nfsiod_workqueue, &iocb->work);
+}
+
+static void nfs_local_read_done(struct nfs_local_kiocb *iocb)
{
struct nfs_pgio_header *hdr = iocb->hdr;
struct file *filp = iocb->kiocb.ki_filp;
+ long status = hdr->task.tk_status;
- nfs_local_pgio_done(hdr, status);
+ if ((iocb->kiocb.ki_flags & IOCB_DIRECT) && status == -EINVAL) {
+ /* Underlying FS will return -EINVAL if misaligned DIO is attempted. */
+ pr_info_ratelimited("nfs: Unexpected direct I/O read alignment failure\n");
+ }
/*
* Must clear replen otherwise NFSv3 data corruption will occur
@@ -357,56 +580,85 @@ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
*/
hdr->res.replen = 0;
- if (hdr->res.count != hdr->args.count ||
- hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
+ /* nfs_readpage_result() handles short read */
+
+ if (hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
hdr->res.eof = true;
dprintk("%s: read %ld bytes eof %d.\n", __func__,
status > 0 ? status : 0, hdr->res.eof);
}
-static void nfs_local_call_read(struct work_struct *work)
+static inline void nfs_local_read_iocb_done(struct nfs_local_kiocb *iocb)
+{
+ nfs_local_read_done(iocb);
+ nfs_local_pgio_release(iocb);
+}
+
+static void nfs_local_read_aio_complete_work(struct work_struct *work)
{
struct nfs_local_kiocb *iocb =
container_of(work, struct nfs_local_kiocb, work);
- struct file *filp = iocb->kiocb.ki_filp;
- const struct cred *save_cred;
- struct iov_iter iter;
- ssize_t status;
- save_cred = override_creds(filp->f_cred);
+ nfs_local_read_iocb_done(iocb);
+}
- nfs_local_iter_init(&iter, iocb, READ);
+static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(kiocb, struct nfs_local_kiocb, kiocb);
- status = filp->f_op->read_iter(&iocb->kiocb, &iter);
- WARN_ON_ONCE(status == -EIOCBQUEUED);
+ /* AIO completion of DIO read should always be last to complete */
+ if (unlikely(!nfs_local_pgio_done(iocb, ret, false)))
+ return;
- nfs_local_read_done(iocb, status);
- nfs_local_pgio_release(iocb);
+ nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */
+}
- revert_creds(save_cred);
+static void nfs_local_call_read(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ bool force_done = false;
+ ssize_t status;
+ int n_iters;
+
+ n_iters = atomic_read(&iocb->n_iters);
+ for (int i = 0; i < n_iters ; i++) {
+ if (iocb->iter_is_dio_aligned[i]) {
+ iocb->kiocb.ki_flags |= IOCB_DIRECT;
+ /* Only use AIO completion if DIO-aligned segment is last */
+ if (i == iocb->end_iter_index) {
+ iocb->kiocb.ki_complete = nfs_local_read_aio_complete;
+ iocb->aio_complete_work = nfs_local_read_aio_complete_work;
+ }
+ } else
+ iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
+
+ scoped_with_creds(filp->f_cred)
+ status = filp->f_op->read_iter(&iocb->kiocb, &iocb->iters[i]);
+
+ if (status != -EIOCBQUEUED) {
+ if (unlikely(status >= 0 && status < iocb->iters[i].count))
+ force_done = true; /* Partial read */
+ if (nfs_local_pgio_done(iocb, status, force_done)) {
+ nfs_local_read_iocb_done(iocb);
+ break;
+ }
+ }
+ }
}
static int
-nfs_do_local_read(struct nfs_pgio_header *hdr,
- struct nfsd_file *localio,
+nfs_local_do_read(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops)
{
- struct nfs_local_kiocb *iocb;
- struct file *file = nfs_to->nfsd_file_file(localio);
-
- /* Don't support filesystems without read_iter */
- if (!file->f_op->read_iter)
- return -EAGAIN;
+ struct nfs_pgio_header *hdr = iocb->hdr;
dprintk("%s: vfs_read count=%u pos=%llu\n",
__func__, hdr->args.count, hdr->args.offset);
- iocb = nfs_local_iocb_alloc(hdr, file, GFP_KERNEL);
- if (iocb == NULL)
- return -ENOMEM;
- iocb->localio = localio;
-
nfs_local_pgio_init(hdr, call_ops);
hdr->res.eof = false;
@@ -421,14 +673,13 @@ nfs_copy_boot_verifier(struct nfs_write_verifier *verifier, struct inode *inode)
{
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
u32 *verf = (u32 *)verifier->data;
- int seq = 0;
+ unsigned int seq;
do {
- read_seqbegin_or_lock(&clp->cl_boot_lock, &seq);
+ seq = read_seqbegin(&clp->cl_boot_lock);
verf[0] = (u32)clp->cl_nfssvc_boot.tv_sec;
verf[1] = (u32)clp->cl_nfssvc_boot.tv_nsec;
- } while (need_seqretry(&clp->cl_boot_lock, seq));
- done_seqretry(&clp->cl_boot_lock, seq);
+ } while (read_seqretry(&clp->cl_boot_lock, seq));
}
static void
@@ -451,7 +702,7 @@ nfs_set_local_verifier(struct inode *inode,
}
/* Factored out from fs/nfsd/vfs.h:fh_getattr() */
-static int __vfs_getattr(struct path *p, struct kstat *stat, int version)
+static int __vfs_getattr(const struct path *p, struct kstat *stat, int version)
{
u32 request_mask = STATX_BASIC_STATS;
@@ -511,15 +762,20 @@ static void nfs_local_vfs_getattr(struct nfs_local_kiocb *iocb)
fattr->du.nfs3.used = stat.blocks << 9;
}
-static void
-nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
+static void nfs_local_write_done(struct nfs_local_kiocb *iocb)
{
struct nfs_pgio_header *hdr = iocb->hdr;
- struct inode *inode = hdr->inode;
+ long status = hdr->task.tk_status;
dprintk("%s: wrote %ld bytes.\n", __func__, status > 0 ? status : 0);
+ if ((iocb->kiocb.ki_flags & IOCB_DIRECT) && status == -EINVAL) {
+ /* Underlying FS will return -EINVAL if misaligned DIO is attempted. */
+ pr_info_ratelimited("nfs: Unexpected direct I/O write alignment failure\n");
+ }
+
/* Handle short writes as if they are ENOSPC */
+ status = hdr->res.count;
if (status > 0 && status < hdr->args.count) {
hdr->mds_offset += status;
hdr->args.offset += status;
@@ -527,11 +783,38 @@ nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
hdr->args.count -= status;
nfs_set_pgio_error(hdr, -ENOSPC, hdr->args.offset);
status = -ENOSPC;
+ /* record -ENOSPC in terms of nfs_local_pgio_done */
+ (void) nfs_local_pgio_done(iocb, status, true);
}
- if (status < 0)
- nfs_reset_boot_verifier(inode);
+ if (hdr->task.tk_status < 0)
+ nfs_reset_boot_verifier(hdr->inode);
+}
+
+static inline void nfs_local_write_iocb_done(struct nfs_local_kiocb *iocb)
+{
+ nfs_local_write_done(iocb);
+ nfs_local_vfs_getattr(iocb);
+ nfs_local_pgio_release(iocb);
+}
+
+static void nfs_local_write_aio_complete_work(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
- nfs_local_pgio_done(hdr, status);
+ nfs_local_write_iocb_done(iocb);
+}
+
+static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(kiocb, struct nfs_local_kiocb, kiocb);
+
+ /* AIO completion of DIO write should always be last to complete */
+ if (unlikely(!nfs_local_pgio_done(iocb, ret, false)))
+ return;
+
+ nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */
}
static void nfs_local_call_write(struct work_struct *work)
@@ -540,49 +823,52 @@ static void nfs_local_call_write(struct work_struct *work)
container_of(work, struct nfs_local_kiocb, work);
struct file *filp = iocb->kiocb.ki_filp;
unsigned long old_flags = current->flags;
- const struct cred *save_cred;
- struct iov_iter iter;
+ bool force_done = false;
ssize_t status;
+ int n_iters;
current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
- save_cred = override_creds(filp->f_cred);
-
- nfs_local_iter_init(&iter, iocb, WRITE);
file_start_write(filp);
- status = filp->f_op->write_iter(&iocb->kiocb, &iter);
+ n_iters = atomic_read(&iocb->n_iters);
+ for (int i = 0; i < n_iters ; i++) {
+ if (iocb->iter_is_dio_aligned[i]) {
+ iocb->kiocb.ki_flags |= IOCB_DIRECT;
+ /* Only use AIO completion if DIO-aligned segment is last */
+ if (i == iocb->end_iter_index) {
+ iocb->kiocb.ki_complete = nfs_local_write_aio_complete;
+ iocb->aio_complete_work = nfs_local_write_aio_complete_work;
+ }
+ } else
+ iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
+
+ scoped_with_creds(filp->f_cred)
+ status = filp->f_op->write_iter(&iocb->kiocb, &iocb->iters[i]);
+
+ if (status != -EIOCBQUEUED) {
+ if (unlikely(status >= 0 && status < iocb->iters[i].count))
+ force_done = true; /* Partial write */
+ if (nfs_local_pgio_done(iocb, status, force_done)) {
+ nfs_local_write_iocb_done(iocb);
+ break;
+ }
+ }
+ }
file_end_write(filp);
- WARN_ON_ONCE(status == -EIOCBQUEUED);
-
- nfs_local_write_done(iocb, status);
- nfs_local_vfs_getattr(iocb);
- nfs_local_pgio_release(iocb);
- revert_creds(save_cred);
current->flags = old_flags;
}
static int
-nfs_do_local_write(struct nfs_pgio_header *hdr,
- struct nfsd_file *localio,
+nfs_local_do_write(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops)
{
- struct nfs_local_kiocb *iocb;
- struct file *file = nfs_to->nfsd_file_file(localio);
-
- /* Don't support filesystems without write_iter */
- if (!file->f_op->write_iter)
- return -EAGAIN;
+ struct nfs_pgio_header *hdr = iocb->hdr;
dprintk("%s: vfs_write count=%u pos=%llu %s\n",
__func__, hdr->args.count, hdr->args.offset,
(hdr->args.stable == NFS_UNSTABLE) ? "unstable" : "stable");
- iocb = nfs_local_iocb_alloc(hdr, file, GFP_NOIO);
- if (iocb == NULL)
- return -ENOMEM;
- iocb->localio = localio;
-
switch (hdr->args.stable) {
default:
break;
@@ -592,6 +878,7 @@ nfs_do_local_write(struct nfs_pgio_header *hdr,
case NFS_FILE_SYNC:
iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
}
+
nfs_local_pgio_init(hdr, call_ops);
nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable);
@@ -602,32 +889,68 @@ nfs_do_local_write(struct nfs_pgio_header *hdr,
return 0;
}
+static struct nfs_local_kiocb *
+nfs_local_iocb_init(struct nfs_pgio_header *hdr, struct nfsd_file *localio)
+{
+ struct file *file = nfs_to->nfsd_file_file(localio);
+ struct nfs_local_kiocb *iocb;
+ gfp_t gfp_mask;
+ int rw;
+
+ if (hdr->rw_mode & FMODE_READ) {
+ if (!file->f_op->read_iter)
+ return ERR_PTR(-EOPNOTSUPP);
+ gfp_mask = GFP_KERNEL;
+ rw = ITER_DEST;
+ } else {
+ if (!file->f_op->write_iter)
+ return ERR_PTR(-EOPNOTSUPP);
+ gfp_mask = GFP_NOIO;
+ rw = ITER_SOURCE;
+ }
+
+ iocb = nfs_local_iocb_alloc(hdr, file, gfp_mask);
+ if (iocb == NULL)
+ return ERR_PTR(-ENOMEM);
+ iocb->hdr = hdr;
+ iocb->localio = localio;
+
+ nfs_local_iters_init(iocb, rw);
+
+ return iocb;
+}
+
int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
struct nfs_pgio_header *hdr,
const struct rpc_call_ops *call_ops)
{
+ struct nfs_local_kiocb *iocb;
int status = 0;
if (!hdr->args.count)
return 0;
+ iocb = nfs_local_iocb_init(hdr, localio);
+ if (IS_ERR(iocb))
+ return PTR_ERR(iocb);
+
switch (hdr->rw_mode) {
case FMODE_READ:
- status = nfs_do_local_read(hdr, localio, call_ops);
+ status = nfs_local_do_read(iocb, call_ops);
break;
case FMODE_WRITE:
- status = nfs_do_local_write(hdr, localio, call_ops);
+ status = nfs_local_do_write(iocb, call_ops);
break;
default:
dprintk("%s: invalid mode: %d\n", __func__,
hdr->rw_mode);
- status = -EINVAL;
+ status = -EOPNOTSUPP;
}
if (status != 0) {
if (status == -EAGAIN)
- nfs_local_disable(clp);
- nfs_to_nfsd_file_put_local(localio);
+ nfs_localio_disable_client(clp);
+ nfs_local_iocb_release(iocb);
hdr->task.tk_status = status;
nfs_local_hdr_release(hdr, call_ops);
}
@@ -668,7 +991,7 @@ nfs_local_commit_done(struct nfs_commit_data *data, int status)
data->task.tk_status = 0;
} else {
nfs_reset_boot_verifier(data->inode);
- data->res.op_status = nfs4_stat_to_errno(status);
+ data->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
data->task.tk_status = status;
}
}
@@ -678,7 +1001,7 @@ nfs_local_release_commit_data(struct nfsd_file *localio,
struct nfs_commit_data *data,
const struct rpc_call_ops *call_ops)
{
- nfs_to_nfsd_file_put_local(localio);
+ nfs_local_file_put(localio);
call_ops->rpc_call_done(&data->task, data);
call_ops->rpc_release(data);
}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 57c9dd700b58..db8dfb920394 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -223,74 +223,6 @@ out_mnt_err:
goto out;
}
-/**
- * nfs_umount - Notify a server that we have unmounted this export
- * @info: pointer to umount request arguments
- *
- * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always
- * use UDP.
- */
-void nfs_umount(const struct nfs_mount_request *info)
-{
- static const struct rpc_timeout nfs_umnt_timeout = {
- .to_initval = 1 * HZ,
- .to_maxval = 3 * HZ,
- .to_retries = 2,
- };
- struct rpc_create_args args = {
- .net = info->net,
- .protocol = IPPROTO_UDP,
- .address = (struct sockaddr *)info->sap,
- .addrsize = info->salen,
- .timeout = &nfs_umnt_timeout,
- .servername = info->hostname,
- .program = &mnt_program,
- .version = info->version,
- .authflavor = RPC_AUTH_UNIX,
- .flags = RPC_CLNT_CREATE_NOPING,
- .cred = current_cred(),
- };
- struct rpc_message msg = {
- .rpc_argp = info->dirpath,
- };
- struct rpc_clnt *clnt;
- int status;
-
- if (strlen(info->dirpath) > MNTPATHLEN)
- return;
-
- if (info->noresvport)
- args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
-
- clnt = rpc_create(&args);
- if (IS_ERR(clnt))
- goto out_clnt_err;
-
- dprintk("NFS: sending UMNT request for %s:%s\n",
- (info->hostname ? info->hostname : "server"), info->dirpath);
-
- if (info->version == NFS_MNT3_VERSION)
- msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT];
- else
- msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT];
-
- status = rpc_call_sync(clnt, &msg, 0);
- rpc_shutdown_client(clnt);
-
- if (unlikely(status < 0))
- goto out_call_err;
-
- return;
-
-out_clnt_err:
- dprintk("NFS: failed to create UMNT RPC client, status=%ld\n",
- PTR_ERR(clnt));
- return;
-
-out_call_err:
- dprintk("NFS: UMNT request failed, status=%d\n", status);
-}
-
/*
* XDR encode/decode functions for MOUNT
*/
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 2d53574da605..5a4d193da1a9 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -195,7 +195,6 @@ struct vfsmount *nfs_d_automount(struct path *path)
if (IS_ERR(mnt))
goto out_fc;
- mntget(mnt); /* prevent immediate expiration */
if (timeout <= 0)
goto out_fc;
@@ -291,7 +290,8 @@ int nfs_do_submount(struct fs_context *fc)
nfs_errorf(fc, "NFS: Couldn't determine submount pathname");
ret = PTR_ERR(p);
} else {
- ret = vfs_parse_fs_string(fc, "source", p, buffer + 4096 - p);
+ ret = vfs_parse_fs_qstr(fc, "source",
+ &QSTR_LEN(p, buffer + 4096 - p));
if (!ret)
ret = vfs_get_tree(fc);
}
@@ -308,7 +308,7 @@ int nfs_submount(struct fs_context *fc, struct nfs_server *server)
int err;
/* Look it up again to get its attributes */
- err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry,
+ err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry, &dentry->d_name,
ctx->mntfh, ctx->clone_data.fattr);
dput(parent);
if (err != 0)
@@ -336,7 +336,7 @@ static int param_set_nfs_timeout(const char *val, const struct kernel_param *kp)
num *= HZ;
*((int *)kp->arg) = num;
if (!list_empty(&nfs_automount_list))
- mod_delayed_work(system_wq, &nfs_automount_task, num);
+ mod_delayed_work(system_percpu_wq, &nfs_automount_task, num);
} else {
*((int *)kp->arg) = -1*HZ;
cancel_delayed_work(&nfs_automount_task);
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index a68b21603ea9..6ba3ea39e928 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -31,7 +31,11 @@ struct nfs_net {
unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
int cb_users[NFS4_MAX_MINOR_VERSION + 1];
-#endif
+#endif /* CONFIG_NFS_V4 */
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ struct list_head nfs4_data_server_cache;
+ spinlock_t nfs4_data_server_lock;
+#endif /* CONFIG_NFS_V4_1 */
struct nfs_netns_client *nfs_client;
spinlock_t nfs_client_lock;
ktime_t boot_time;
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 6e75c6c2d234..9eff09158518 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -23,8 +23,8 @@
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_common.h>
-#include "nfstrace.h"
#include "internal.h"
+#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_XDR
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 18d8f6529f61..a126eb31f62f 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -104,7 +104,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type, bool rcu)
switch (status) {
case 0:
- status = nfs_refresh_inode(inode, res.fattr);
+ nfs_refresh_inode(inode, res.fattr);
break;
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index b0c8a39c2bbd..5d97c1d38bb6 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -2,6 +2,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/sunrpc/addr.h>
+#include <net/handshake.h>
#include "internal.h"
#include "nfs3_fs.h"
#include "netns.h"
@@ -98,7 +99,11 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
.net = mds_clp->cl_net,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
- .xprtsec = mds_clp->cl_xprtsec,
+ .xprtsec = {
+ .policy = RPC_XPRTSEC_NONE,
+ .cert_serial = TLS_NO_CERT,
+ .privkey_serial = TLS_NO_PRIVKEY,
+ },
.connect_timeout = connect_timeout,
.reconnect_timeout = connect_timeout,
};
@@ -111,15 +116,22 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
cl_init.hostname = buf;
switch (ds_proto) {
+ case XPRT_TRANSPORT_TCP_TLS:
+ if (mds_clp->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
+ cl_init.xprtsec = mds_clp->cl_xprtsec;
+ else
+ ds_proto = XPRT_TRANSPORT_TCP;
+ fallthrough;
case XPRT_TRANSPORT_RDMA:
case XPRT_TRANSPORT_TCP:
- case XPRT_TRANSPORT_TCP_TLS:
if (mds_clp->cl_nconnect > 1)
cl_init.nconnect = mds_clp->cl_nconnect;
}
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
__set_bit(NFS_CS_DS, &cl_init.init_flags);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 1566163c6d85..a4cb67573aa7 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -39,7 +39,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
- } while (!fatal_signal_pending(current));
+ } while (!fatal_signal_pending(current) && !nfs_current_task_exiting());
return res;
}
@@ -192,7 +192,7 @@ __nfs3_proc_lookup(struct inode *dir, const char *name, size_t len,
}
static int
-nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
+nfs3_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
unsigned short task_flags = 0;
@@ -202,8 +202,7 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
task_flags |= RPC_TASK_TIMEOUT;
dprintk("NFS call lookup %pd2\n", dentry);
- return __nfs3_proc_lookup(dir, dentry->d_name.name,
- dentry->d_name.len, fhandle, fattr,
+ return __nfs3_proc_lookup(dir, name->name, name->len, fhandle, fattr,
task_flags);
}
@@ -579,13 +578,13 @@ out:
return status;
}
-static int
+static struct dentry *
nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
- struct dentry *d_alias;
- int status = -ENOMEM;
+ struct dentry *ret = ERR_PTR(-ENOMEM);
+ int status;
dprintk("NFS call mkdir %pd\n", dentry);
@@ -593,8 +592,9 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
if (data == NULL)
goto out;
- status = posix_acl_create(dir, &sattr->ia_mode, &default_acl, &acl);
- if (status)
+ ret = ERR_PTR(posix_acl_create(dir, &sattr->ia_mode,
+ &default_acl, &acl));
+ if (IS_ERR(ret))
goto out;
data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKDIR];
@@ -603,25 +603,27 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
data->arg.mkdir.len = dentry->d_name.len;
data->arg.mkdir.sattr = sattr;
- d_alias = nfs3_do_create(dir, dentry, data);
- status = PTR_ERR_OR_ZERO(d_alias);
+ ret = nfs3_do_create(dir, dentry, data);
- if (status != 0)
+ if (IS_ERR(ret))
goto out_release_acls;
- if (d_alias)
- dentry = d_alias;
+ if (ret)
+ dentry = ret;
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+ if (status) {
+ dput(ret);
+ ret = ERR_PTR(status);
+ }
- dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
out:
nfs3_free_createdata(data);
- dprintk("NFS reply mkdir: %d\n", status);
- return status;
+ dprintk("NFS reply mkdir: %d\n", PTR_ERR_OR_ZERO(ret));
+ return ret;
}
static int
@@ -844,6 +846,41 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
return status;
}
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+static unsigned nfs3_localio_probe_throttle __read_mostly = 0;
+module_param(nfs3_localio_probe_throttle, uint, 0644);
+MODULE_PARM_DESC(nfs3_localio_probe_throttle,
+ "Probe for NFSv3 LOCALIO every N IO requests. Must be power-of-2, defaults to 0 (probing disabled).");
+
+static void nfs3_localio_probe(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+
+ /* Throttled to reduce nfs_local_probe_async() frequency */
+ if (!nfs3_localio_probe_throttle || nfs_server_is_local(clp))
+ return;
+
+ /*
+ * Try (re)enabling LOCALIO if isn't enabled -- admin deems
+ * it worthwhile to periodically check if LOCALIO possible by
+ * setting the 'nfs3_localio_probe_throttle' module parameter.
+ *
+ * This is useful if LOCALIO was previously enabled, but was
+ * disabled due to server restart, and IO has successfully
+ * completed in terms of normal RPC.
+ */
+ if ((clp->cl_uuid.nfs3_localio_probe_count++ &
+ (nfs3_localio_probe_throttle - 1)) == 0) {
+ if (!nfs_server_is_local(clp))
+ nfs_local_probe_async(clp);
+ }
+}
+
+#else
+static void nfs3_localio_probe(struct nfs_server *server) {}
+#endif
+
static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
struct inode *inode = hdr->inode;
@@ -855,8 +892,11 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
- if (task->tk_status >= 0 && !server->read_hdrsize)
- cmpxchg(&server->read_hdrsize, 0, hdr->res.replen);
+ if (task->tk_status >= 0) {
+ if (!server->read_hdrsize)
+ cmpxchg(&server->read_hdrsize, 0, hdr->res.replen);
+ nfs3_localio_probe(server);
+ }
nfs_invalidate_atime(inode);
nfs_refresh_inode(inode, &hdr->fattr);
@@ -886,8 +926,10 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
nfs_writeback_update_inode(hdr);
+ nfs3_localio_probe(NFS_SERVER(inode));
+ }
return 0;
}
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 4ae01c10b7e2..e17d72908412 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -23,8 +23,8 @@
#include <linux/nfsacl.h>
#include <linux/nfs_common.h>
-#include "nfstrace.h"
#include "internal.h"
+#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_XDR
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
index 0282d93c8bcc..aafd15a4afce 100644
--- a/fs/nfs/nfs42.h
+++ b/fs/nfs/nfs42.h
@@ -21,6 +21,7 @@ int nfs42_proc_allocate(struct file *, loff_t, loff_t);
ssize_t nfs42_proc_copy(struct file *, loff_t, struct file *, loff_t, size_t,
struct nl4_server *, nfs4_stateid *, bool);
int nfs42_proc_deallocate(struct file *, loff_t, loff_t);
+int nfs42_proc_zero_range(struct file *, loff_t, loff_t);
loff_t nfs42_proc_llseek(struct file *, loff_t, int);
int nfs42_proc_layoutstats_generic(struct nfs_server *,
struct nfs42_layoutstat_data *);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 531c9c20ef1d..d537fb0c230e 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -21,6 +21,8 @@
#define NFSDBG_FACILITY NFSDBG_PROC
static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
+static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid,
+ u64 *copied);
static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
{
@@ -112,6 +114,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
exception.inode = inode;
exception.state = lock->open_context->state;
+ nfs_file_block_o_direct(NFS_I(inode));
err = nfs_sync_inode(inode);
if (err)
goto out;
@@ -135,6 +138,7 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
};
struct inode *inode = file_inode(filep);
+ loff_t oldsize = i_size_read(inode);
int err;
if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
@@ -143,8 +147,13 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
inode_lock(inode);
err = nfs42_proc_fallocate(&msg, filep, offset, len);
- if (err == -EOPNOTSUPP)
- NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
+
+ if (err == 0)
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ offset + len);
+ else if (err == -EOPNOTSUPP)
+ NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE |
+ NFS_CAP_ZERO_RANGE);
inode_unlock(inode);
return err;
@@ -167,12 +176,53 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
if (err == 0)
truncate_pagecache_range(inode, offset, (offset + len) -1);
if (err == -EOPNOTSUPP)
- NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
+ NFS_SERVER(inode)->caps &= ~(NFS_CAP_DEALLOCATE |
+ NFS_CAP_ZERO_RANGE);
inode_unlock(inode);
return err;
}
+int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE],
+ };
+ struct inode *inode = file_inode(filep);
+ loff_t oldsize = i_size_read(inode);
+ int err;
+
+ if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE))
+ return -EOPNOTSUPP;
+
+ inode_lock(inode);
+
+ err = nfs42_proc_fallocate(&msg, filep, offset, len);
+ if (err == 0) {
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ offset + len);
+ truncate_pagecache_range(inode, offset, (offset + len) -1);
+ } else if (err == -EOPNOTSUPP)
+ NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE;
+
+ inode_unlock(inode);
+ return err;
+}
+
+static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server,
+ struct nfs_server *src_server,
+ struct nfs4_copy_state *copy)
+{
+ spin_lock(&dst_server->nfs_client->cl_lock);
+ list_del_init(&copy->copies);
+ spin_unlock(&dst_server->nfs_client->cl_lock);
+ if (dst_server != src_server) {
+ spin_lock(&src_server->nfs_client->cl_lock);
+ list_del_init(&copy->src_copies);
+ spin_unlock(&src_server->nfs_client->cl_lock);
+ }
+}
+
static int handle_async_copy(struct nfs42_copy_res *res,
struct nfs_server *dst_server,
struct nfs_server *src_server,
@@ -182,9 +232,12 @@ static int handle_async_copy(struct nfs42_copy_res *res,
bool *restart)
{
struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter;
- int status = NFS4_OK;
struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
struct nfs_open_context *src_ctx = nfs_file_open_context(src);
+ struct nfs_client *clp = dst_server->nfs_client;
+ unsigned long timeout = 3 * HZ;
+ int status = NFS4_OK;
+ u64 copied;
copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
if (!copy)
@@ -222,15 +275,12 @@ static int handle_async_copy(struct nfs42_copy_res *res,
spin_unlock(&src_server->nfs_client->cl_lock);
}
- status = wait_for_completion_interruptible(&copy->completion);
- spin_lock(&dst_server->nfs_client->cl_lock);
- list_del_init(&copy->copies);
- spin_unlock(&dst_server->nfs_client->cl_lock);
- if (dst_server != src_server) {
- spin_lock(&src_server->nfs_client->cl_lock);
- list_del_init(&copy->src_copies);
- spin_unlock(&src_server->nfs_client->cl_lock);
- }
+wait:
+ status = wait_for_completion_interruptible_timeout(&copy->completion,
+ timeout);
+ if (!status)
+ goto timeout;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
if (status == -ERESTARTSYS) {
goto out_cancel;
} else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
@@ -240,6 +290,7 @@ static int handle_async_copy(struct nfs42_copy_res *res,
}
out:
res->write_res.count = copy->count;
+ /* Copy out the updated write verifier provided by CB_OFFLOAD. */
memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
status = -copy->error;
@@ -251,6 +302,39 @@ out_cancel:
if (!nfs42_files_from_same_server(src, dst))
nfs42_do_offload_cancel_async(src, src_stateid);
goto out_free;
+timeout:
+ timeout <<= 1;
+ if (timeout > (clp->cl_lease_time >> 1))
+ timeout = clp->cl_lease_time >> 1;
+ status = nfs42_proc_offload_status(dst, &copy->stateid, &copied);
+ if (status == -EINPROGRESS)
+ goto wait;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
+ switch (status) {
+ case 0:
+ /* The server recognized the copy stateid, so it hasn't
+ * rebooted. Don't overwrite the verifier returned in the
+ * COPY result. */
+ res->write_res.count = copied;
+ goto out_free;
+ case -EREMOTEIO:
+ /* COPY operation failed on the server. */
+ status = -EOPNOTSUPP;
+ res->write_res.count = copied;
+ goto out_free;
+ case -EBADF:
+ /* Server did not recognize the copy stateid. It has
+ * probably restarted and lost the plot. */
+ res->write_res.count = 0;
+ status = -EOPNOTSUPP;
+ break;
+ case -EOPNOTSUPP:
+ /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when
+ * it has signed up for an async COPY, so server is not
+ * spec-compliant. */
+ res->write_res.count = 0;
+ }
+ goto out_free;
}
static int process_copy_commit(struct file *dst, loff_t pos_dst,
@@ -279,22 +363,27 @@ out:
/**
* nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
- * @inode: pointer to destination inode
+ * @file: pointer to destination file
* @pos: destination offset
* @len: copy length
+ * @oldsize: length of the file prior to clone/copy
*
* Punch a hole in the inode page cache, so that the NFS client will
* know to retrieve new data.
* Update the file size if necessary, and then mark the inode as having
* invalid cached values for change attribute, ctime, mtime and space used.
*/
-static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
+static void nfs42_copy_dest_done(struct file *file, loff_t pos, loff_t len,
+ loff_t oldsize)
{
+ struct inode *inode = file_inode(file);
+ struct address_space *mapping = file->f_mapping;
loff_t newsize = pos + len;
loff_t end = newsize - 1;
- WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
- pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
+ nfs_truncate_last_folio(mapping, oldsize, pos);
+ WARN_ON_ONCE(invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
+ end >> PAGE_SHIFT));
spin_lock(&inode->i_lock);
if (newsize > i_size_read(inode))
@@ -327,6 +416,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
struct nfs_server *src_server = NFS_SERVER(src_inode);
loff_t pos_src = args->src_pos;
loff_t pos_dst = args->dst_pos;
+ loff_t oldsize_dst = i_size_read(dst_inode);
size_t count = args->count;
ssize_t status;
@@ -355,6 +445,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
return status;
}
+ nfs_file_block_o_direct(NFS_I(dst_inode));
status = nfs_sync_inode(dst_inode);
if (status)
return status;
@@ -400,7 +491,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
goto out;
}
- nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
+ nfs42_copy_dest_done(dst, pos_dst, res->write_res.count, oldsize_dst);
nfs_invalidate_atime(src_inode);
status = res->write_res.count;
out:
@@ -498,15 +589,15 @@ out_put_src_lock:
return err;
}
-struct nfs42_offloadcancel_data {
+struct nfs42_offload_data {
struct nfs_server *seq_server;
struct nfs42_offload_status_args args;
struct nfs42_offload_status_res res;
};
-static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
+static void nfs42_offload_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs42_offloadcancel_data *data = calldata;
+ struct nfs42_offload_data *data = calldata;
nfs4_setup_sequence(data->seq_server->nfs_client,
&data->args.osa_seq_args,
@@ -515,7 +606,7 @@ static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
{
- struct nfs42_offloadcancel_data *data = calldata;
+ struct nfs42_offload_data *data = calldata;
trace_nfs4_offload_cancel(&data->args, task->tk_status);
nfs41_sequence_done(task, &data->res.osr_seq_res);
@@ -525,22 +616,22 @@ static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
rpc_restart_call_prepare(task);
}
-static void nfs42_free_offloadcancel_data(void *data)
+static void nfs42_offload_release(void *data)
{
kfree(data);
}
static const struct rpc_call_ops nfs42_offload_cancel_ops = {
- .rpc_call_prepare = nfs42_offload_cancel_prepare,
+ .rpc_call_prepare = nfs42_offload_prepare,
.rpc_call_done = nfs42_offload_cancel_done,
- .rpc_release = nfs42_free_offloadcancel_data,
+ .rpc_release = nfs42_offload_release,
};
static int nfs42_do_offload_cancel_async(struct file *dst,
nfs4_stateid *stateid)
{
struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
- struct nfs42_offloadcancel_data *data = NULL;
+ struct nfs42_offload_data *data = NULL;
struct nfs_open_context *ctx = nfs_file_open_context(dst);
struct rpc_task *task;
struct rpc_message msg = {
@@ -552,14 +643,14 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
.rpc_message = &msg,
.callback_ops = &nfs42_offload_cancel_ops,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
int status;
if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
return -EOPNOTSUPP;
- data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_KERNEL);
+ data = kzalloc(sizeof(struct nfs42_offload_data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -582,6 +673,108 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
return status;
}
+static int
+_nfs42_proc_offload_status(struct nfs_server *server, struct file *file,
+ struct nfs42_offload_data *data)
+{
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS],
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = ctx->cred,
+ };
+ int status;
+
+ status = nfs4_call_sync(server->client, server, &msg,
+ &data->args.osa_seq_args,
+ &data->res.osr_seq_res, 1);
+ trace_nfs4_offload_status(&data->args, status);
+ switch (status) {
+ case 0:
+ break;
+
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ /*
+ * Server does not recognize the COPY stateid. CB_OFFLOAD
+ * could have purged it, or server might have rebooted.
+ * Since COPY stateids don't have an associated inode,
+ * avoid triggering state recovery.
+ */
+ status = -EBADF;
+ break;
+ case -NFS4ERR_NOTSUPP:
+ case -ENOTSUPP:
+ case -EOPNOTSUPP:
+ server->caps &= ~NFS_CAP_OFFLOAD_STATUS;
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * nfs42_proc_offload_status - Poll completion status of an async copy operation
+ * @dst: handle of file being copied into
+ * @stateid: copy stateid (from async COPY result)
+ * @copied: OUT: number of bytes copied so far
+ *
+ * Return values:
+ * %0: Server returned an NFS4_OK completion status
+ * %-EINPROGRESS: Server returned no completion status
+ * %-EREMOTEIO: Server returned an error completion status
+ * %-EBADF: Server did not recognize the copy stateid
+ * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS
+ * %-ERESTARTSYS: Wait interrupted by signal
+ *
+ * Other negative errnos indicate the client could not complete the
+ * request.
+ */
+static int
+nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied)
+{
+ struct inode *inode = file_inode(dst);
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_exception exception = {
+ .inode = inode,
+ };
+ struct nfs42_offload_data *data;
+ int status;
+
+ if (!(server->caps & NFS_CAP_OFFLOAD_STATUS))
+ return -EOPNOTSUPP;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->seq_server = server;
+ data->args.osa_src_fh = NFS_FH(inode);
+ memcpy(&data->args.osa_stateid, stateid,
+ sizeof(data->args.osa_stateid));
+ exception.stateid = &data->args.osa_stateid;
+ do {
+ status = _nfs42_proc_offload_status(server, dst, data);
+ if (status == -EOPNOTSUPP)
+ goto out;
+ status = nfs4_handle_exception(server, status, &exception);
+ } while (exception.retry);
+ if (status)
+ goto out;
+
+ *copied = data->res.osr_count;
+ if (!data->res.complete_count)
+ status = -EINPROGRESS;
+ else if (data->res.osr_complete != NFS_OK)
+ status = -EREMOTEIO;
+
+out:
+ kfree(data);
+ return status;
+}
+
static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
struct nfs42_copy_notify_args *args,
struct nfs42_copy_notify_res *res)
@@ -861,7 +1054,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
.rpc_message = &msg,
.callback_ops = &nfs42_layoutstat_ops,
.callback_data = data,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
struct rpc_task *task;
@@ -1016,7 +1209,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
struct rpc_task_setup task_setup = {
.rpc_message = &msg,
.callback_ops = &nfs42_layouterror_ops,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
unsigned int i;
@@ -1065,6 +1258,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
struct nfs42_clone_res res = {
.server = server,
};
+ loff_t oldsize_dst = i_size_read(dst_inode);
int status;
msg->rpc_argp = &args;
@@ -1099,7 +1293,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
/* a zero-length count means clone to EOF in src */
if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE)
count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset;
- nfs42_copy_dest_done(dst_inode, dst_offset, count);
+ nfs42_copy_dest_done(dst_f, dst_offset, count, oldsize_dst);
status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
}
@@ -1320,7 +1514,7 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
ret = -ENOMEM;
- res.scratch = alloc_page(GFP_KERNEL);
+ res.scratch = folio_alloc(GFP_KERNEL, 0);
if (!res.scratch)
goto out;
@@ -1358,7 +1552,7 @@ out_free_pages:
}
kfree(pages);
out_free_scratch:
- __free_page(res.scratch);
+ folio_put(res.scratch);
out:
return ret;
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 9e3ae53e2205..e10d83ba835e 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -35,6 +35,11 @@
#define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_offload_cancel_maxsz (op_decode_hdr_maxsz)
+#define encode_offload_status_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_offload_status_maxsz (op_decode_hdr_maxsz + \
+ 2 /* osr_count */ + \
+ 2 /* osr_complete */)
#define encode_copy_notify_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE) + \
1 + /* nl4_type */ \
@@ -143,10 +148,20 @@
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_offload_cancel_maxsz)
+#define NFS4_enc_offload_status_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_offload_status_maxsz)
+#define NFS4_dec_offload_status_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_offload_status_maxsz)
#define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_copy_notify_maxsz)
#define NFS4_dec_copy_notify_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_copy_notify_maxsz)
#define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \
@@ -159,6 +174,18 @@
decode_putfh_maxsz + \
decode_deallocate_maxsz + \
decode_getattr_maxsz)
+#define NFS4_enc_zero_range_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_deallocate_maxsz + \
+ encode_allocate_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_zero_range_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_deallocate_maxsz + \
+ decode_allocate_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_read_plus_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -343,6 +370,14 @@ static void encode_offload_cancel(struct xdr_stream *xdr,
encode_nfs4_stateid(xdr, &args->osa_stateid);
}
+static void encode_offload_status(struct xdr_stream *xdr,
+ const struct nfs42_offload_status_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OFFLOAD_STATUS, decode_offload_status_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &args->osa_stateid);
+}
+
static void encode_copy_notify(struct xdr_stream *xdr,
const struct nfs42_copy_notify_args *args,
struct compound_hdr *hdr)
@@ -549,7 +584,7 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
}
/*
- * Encode OFFLOAD_CANEL request
+ * Encode OFFLOAD_CANCEL request
*/
static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
struct xdr_stream *xdr,
@@ -568,6 +603,25 @@ static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
}
/*
+ * Encode OFFLOAD_STATUS request
+ */
+static void nfs4_xdr_enc_offload_status(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_offload_status_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->osa_seq_args, &hdr);
+ encode_putfh(xdr, args->osa_src_fh, &hdr);
+ encode_offload_status(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode COPY_NOTIFY request
*/
static void nfs4_xdr_enc_copy_notify(struct rpc_rqst *req,
@@ -607,6 +661,27 @@ static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
}
/*
+ * Encode ZERO_RANGE request
+ */
+static void nfs4_xdr_enc_zero_range(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_falloc_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->falloc_fh, &hdr);
+ encode_deallocate(xdr, args, &hdr);
+ encode_allocate(xdr, args, &hdr);
+ encode_getfattr(xdr, args->falloc_bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode READ_PLUS request
*/
static void nfs4_xdr_enc_read_plus(struct rpc_rqst *req,
@@ -919,6 +994,26 @@ static int decode_offload_cancel(struct xdr_stream *xdr,
return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL);
}
+static int decode_offload_status(struct xdr_stream *xdr,
+ struct nfs42_offload_status_res *res)
+{
+ ssize_t result;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_OFFLOAD_STATUS);
+ if (status)
+ return status;
+ /* osr_count */
+ if (xdr_stream_decode_u64(xdr, &res->osr_count) < 0)
+ return -EIO;
+ /* osr_complete<1> */
+ result = xdr_stream_decode_uint32_array(xdr, &res->osr_complete, 1);
+ if (result < 0)
+ return -EIO;
+ res->complete_count = result;
+ return 0;
+}
+
static int decode_copy_notify(struct xdr_stream *xdr,
struct nfs42_copy_notify_res *res)
{
@@ -1369,6 +1464,32 @@ out:
}
/*
+ * Decode OFFLOAD_STATUS response
+ */
+static int nfs4_xdr_dec_offload_status(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_offload_status_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->osr_seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_offload_status(xdr, res);
+
+out:
+ return status;
+}
+
+/*
* Decode COPY_NOTIFY response
*/
static int nfs4_xdr_dec_copy_notify(struct rpc_rqst *rqstp,
@@ -1423,6 +1544,37 @@ out:
}
/*
+ * Decode ZERO_RANGE request
+ */
+static int nfs4_xdr_dec_zero_range(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_falloc_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_deallocate(xdr, res);
+ if (status)
+ goto out;
+ status = decode_allocate(xdr, res);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->falloc_fattr, res->falloc_server);
+out:
+ return status;
+}
+
+/*
* Decode READ_PLUS request
*/
static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp,
@@ -1629,7 +1781,7 @@ static int nfs4_xdr_dec_listxattrs(struct rpc_rqst *rqstp,
struct compound_hdr hdr;
int status;
- xdr_set_scratch_page(xdr, res->scratch);
+ xdr_set_scratch_folio(xdr, res->scratch);
status = decode_compound_hdr(xdr, &hdr);
if (status)
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 7d383d29a995..c34c89af9c7d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -63,12 +63,11 @@ struct nfs4_minor_version_ops {
bool (*match_stateid)(const nfs4_stateid *,
const nfs4_stateid *);
int (*find_root_sec)(struct nfs_server *, struct nfs_fh *,
- struct nfs_fsinfo *);
+ struct nfs_fattr *);
void (*free_lock_state)(struct nfs_server *,
struct nfs4_lock_state *);
int (*test_and_free_expired)(struct nfs_server *,
- const nfs4_stateid *,
- const struct cred *);
+ nfs4_stateid *, const struct cred *);
struct nfs_seqid *
(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
void (*session_trunk)(struct rpc_clnt *clnt,
@@ -297,7 +296,8 @@ extern int nfs4_call_sync(struct rpc_clnt *, struct nfs_server *,
extern void nfs4_init_sequence(struct nfs4_sequence_args *, struct nfs4_sequence_res *, int, int);
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, const struct cred *, struct nfs4_setclientid_res *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, const struct cred *);
-extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *, bool);
+extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *,
+ struct nfs_fattr *, bool);
extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, const struct cred *cred);
extern int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred);
extern int nfs4_destroy_clientid(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 83378f69b35e..3a4baed993c9 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -11,6 +11,7 @@
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <net/handshake.h>
#include "internal.h"
#include "callback.h"
#include "delegation.h"
@@ -222,6 +223,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
clp->cl_mig_gen = 1;
+ clp->cl_last_renewal = jiffies;
#if IS_ENABLED(CONFIG_NFS_V4_1)
init_waitqueue_head(&clp->cl_lock_waitq);
#endif
@@ -233,6 +235,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
if (test_bit(NFS_CS_PNFS, &cl_init->init_flags))
__set_bit(NFS_CS_PNFS, &clp->cl_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags);
/*
* Set up the connection to the server before we add add to the
* global list.
@@ -800,6 +804,7 @@ static void nfs4_destroy_server(struct nfs_server *server)
unset_pnfs_layoutdriver(server);
nfs4_purge_state_owners(server, &freeme);
nfs4_free_state_owners(&freeme);
+ kfree(server->delegation_hash_table);
}
/*
@@ -893,52 +898,40 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
* Set up an NFS4 client
*/
static int nfs4_set_client(struct nfs_server *server,
- const char *hostname,
- const struct sockaddr_storage *addr,
- const size_t addrlen,
- const char *ip_addr,
- int proto, const struct rpc_timeout *timeparms,
- u32 minorversion, unsigned int nconnect,
- unsigned int max_connect,
- struct net *net,
- struct xprtsec_parms *xprtsec)
+ struct nfs_client_initdata *cl_init)
{
- struct nfs_client_initdata cl_init = {
- .hostname = hostname,
- .addr = addr,
- .addrlen = addrlen,
- .ip_addr = ip_addr,
- .nfs_mod = &nfs_v4,
- .proto = proto,
- .minorversion = minorversion,
- .net = net,
- .timeparms = timeparms,
- .cred = server->cred,
- .xprtsec = *xprtsec,
- };
struct nfs_client *clp;
- if (minorversion == 0)
- __set_bit(NFS_CS_REUSEPORT, &cl_init.init_flags);
- else
- cl_init.max_connect = max_connect;
- switch (proto) {
+ cl_init->nfs_mod = &nfs_v4;
+ cl_init->cred = server->cred;
+
+ if (cl_init->minorversion == 0) {
+ __set_bit(NFS_CS_REUSEPORT, &cl_init->init_flags);
+ cl_init->max_connect = 0;
+ }
+
+ switch (cl_init->proto) {
case XPRT_TRANSPORT_RDMA:
case XPRT_TRANSPORT_TCP:
case XPRT_TRANSPORT_TCP_TLS:
- cl_init.nconnect = nconnect;
+ break;
+ default:
+ cl_init->nconnect = 0;
}
if (server->flags & NFS_MOUNT_NORESVPORT)
- __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ __set_bit(NFS_CS_NORESVPORT, &cl_init->init_flags);
if (server->options & NFS_OPTION_MIGRATION)
- __set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
+ __set_bit(NFS_CS_MIGRATION, &cl_init->init_flags);
if (test_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status))
- __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags);
- server->port = rpc_get_port((struct sockaddr *)addr);
+ __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init->init_flags);
+ server->port = rpc_get_port((struct sockaddr *)cl_init->addr);
+
+ if (server->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags);
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init);
+ clp = nfs_get_client(cl_init);
if (IS_ERR(clp))
return PTR_ERR(clp);
@@ -991,7 +984,11 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
.net = mds_clp->cl_net,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
- .xprtsec = mds_srv->nfs_client->cl_xprtsec,
+ .xprtsec = {
+ .policy = RPC_XPRTSEC_NONE,
+ .cert_serial = TLS_NO_CERT,
+ .privkey_serial = TLS_NO_PRIVKEY,
+ },
};
char buf[INET6_ADDRSTRLEN + 1];
@@ -1000,9 +997,14 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
cl_init.hostname = buf;
switch (ds_proto) {
+ case XPRT_TRANSPORT_TCP_TLS:
+ if (mds_srv->nfs_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
+ cl_init.xprtsec = mds_srv->nfs_client->cl_xprtsec;
+ else
+ ds_proto = XPRT_TRANSPORT_TCP;
+ fallthrough;
case XPRT_TRANSPORT_RDMA:
case XPRT_TRANSPORT_TCP:
- case XPRT_TRANSPORT_TCP_TLS:
if (mds_clp->cl_nconnect > 1) {
cl_init.nconnect = mds_clp->cl_nconnect;
cl_init.max_connect = NFS_MAX_TRANSPORTS;
@@ -1011,6 +1013,8 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
__set_bit(NFS_CS_PNFS, &cl_init.init_flags);
cl_init.max_connect = NFS_MAX_TRANSPORTS;
@@ -1081,29 +1085,15 @@ static void nfs4_session_limit_xasize(struct nfs_server *server)
#endif
}
-void nfs4_server_set_init_caps(struct nfs_server *server)
-{
- /* Set the basic capabilities */
- server->caps |= server->nfs_client->cl_mvops->init_caps;
- if (server->flags & NFS_MOUNT_NORDIRPLUS)
- server->caps &= ~NFS_CAP_READDIRPLUS;
- if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA)
- server->caps &= ~NFS_CAP_READ_PLUS;
-
- /*
- * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
- * authentication.
- */
- if (nfs4_disable_idmapping &&
- server->client->cl_auth->au_flavor == RPC_AUTH_UNIX)
- server->caps |= NFS_CAP_UIDGID_NOMAP;
-}
-
static int nfs4_server_common_setup(struct nfs_server *server,
struct nfs_fh *mntfh, bool auth_probe)
{
int error;
+ error = nfs4_delegation_hash_alloc(server);
+ if (error)
+ return error;
+
/* data servers support only a subset of NFSv4.1 */
if (is_ds_only_client(server->nfs_client))
return -EPROTONOSUPPORT;
@@ -1111,14 +1101,14 @@ static int nfs4_server_common_setup(struct nfs_server *server,
/* We must ensure the session is initialised first */
error = nfs4_init_session(server->nfs_client);
if (error < 0)
- goto out;
+ return error;
- nfs4_server_set_init_caps(server);
+ nfs_server_set_init_caps(server);
/* Probe the root fh to retrieve its FSID and filehandle */
error = nfs4_get_rootfh(server, mntfh, auth_probe);
if (error < 0)
- goto out;
+ return error;
dprintk("Server FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
@@ -1127,7 +1117,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
error = nfs_probe_server(server, mntfh);
if (error < 0)
- goto out;
+ return error;
nfs4_session_limit_rwsize(server);
nfs4_session_limit_xasize(server);
@@ -1138,8 +1128,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
nfs_server_insert_lists(server);
server->mount_time = jiffies;
server->destroy = nfs4_destroy_server;
-out:
- return error;
+ return 0;
}
/*
@@ -1149,6 +1138,19 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct rpc_timeout timeparms;
+ struct nfs_client_initdata cl_init = {
+ .hostname = ctx->nfs_server.hostname,
+ .addr = &ctx->nfs_server._address,
+ .addrlen = ctx->nfs_server.addrlen,
+ .ip_addr = ctx->client_address,
+ .proto = ctx->nfs_server.protocol,
+ .minorversion = ctx->minorversion,
+ .net = fc->net_ns,
+ .timeparms = &timeparms,
+ .xprtsec = ctx->xprtsec,
+ .nconnect = ctx->nfs_server.nconnect,
+ .max_connect = ctx->nfs_server.max_connect,
+ };
int error;
nfs_init_timeout_values(&timeparms, ctx->nfs_server.protocol,
@@ -1168,18 +1170,7 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
ctx->selected_flavor = RPC_AUTH_UNIX;
/* Get a client record */
- error = nfs4_set_client(server,
- ctx->nfs_server.hostname,
- &ctx->nfs_server._address,
- ctx->nfs_server.addrlen,
- ctx->client_address,
- ctx->nfs_server.protocol,
- &timeparms,
- ctx->minorversion,
- ctx->nfs_server.nconnect,
- ctx->nfs_server.max_connect,
- fc->net_ns,
- &ctx->xprtsec);
+ error = nfs4_set_client(server, &cl_init);
if (error < 0)
return error;
@@ -1239,18 +1230,28 @@ error:
struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
- struct nfs_client *parent_client;
- struct nfs_server *server, *parent_server;
- int proto, error;
+ struct nfs_server *parent_server = NFS_SB(ctx->clone_data.sb);
+ struct nfs_client *parent_client = parent_server->nfs_client;
+ struct nfs_client_initdata cl_init = {
+ .hostname = ctx->nfs_server.hostname,
+ .addr = &ctx->nfs_server._address,
+ .addrlen = ctx->nfs_server.addrlen,
+ .ip_addr = parent_client->cl_ipaddr,
+ .minorversion = parent_client->cl_mvops->minor_version,
+ .net = parent_client->cl_net,
+ .timeparms = parent_server->client->cl_timeout,
+ .xprtsec = parent_client->cl_xprtsec,
+ .nconnect = parent_client->cl_nconnect,
+ .max_connect = parent_client->cl_max_connect,
+ };
+ struct nfs_server *server;
bool auth_probe;
+ int error;
server = nfs_alloc_server();
if (!server)
return ERR_PTR(-ENOMEM);
- parent_server = NFS_SB(ctx->clone_data.sb);
- parent_client = parent_server->nfs_client;
-
server->cred = get_cred(parent_server->cred);
/* Initialise the client representation from the parent server */
@@ -1259,38 +1260,17 @@ struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
/* Get a client representation */
#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
rpc_set_port(&ctx->nfs_server.address, NFS_RDMA_PORT);
- error = nfs4_set_client(server,
- ctx->nfs_server.hostname,
- &ctx->nfs_server._address,
- ctx->nfs_server.addrlen,
- parent_client->cl_ipaddr,
- XPRT_TRANSPORT_RDMA,
- parent_server->client->cl_timeout,
- parent_client->cl_mvops->minor_version,
- parent_client->cl_nconnect,
- parent_client->cl_max_connect,
- parent_client->cl_net,
- &parent_client->cl_xprtsec);
+ cl_init.proto = XPRT_TRANSPORT_RDMA;
+ error = nfs4_set_client(server, &cl_init);
if (!error)
goto init_server;
#endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */
- proto = XPRT_TRANSPORT_TCP;
+ cl_init.proto = XPRT_TRANSPORT_TCP;
if (parent_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
- proto = XPRT_TRANSPORT_TCP_TLS;
+ cl_init.proto = XPRT_TRANSPORT_TCP_TLS;
rpc_set_port(&ctx->nfs_server.address, NFS_PORT);
- error = nfs4_set_client(server,
- ctx->nfs_server.hostname,
- &ctx->nfs_server._address,
- ctx->nfs_server.addrlen,
- parent_client->cl_ipaddr,
- proto,
- parent_server->client->cl_timeout,
- parent_client->cl_mvops->minor_version,
- parent_client->cl_nconnect,
- parent_client->cl_max_connect,
- parent_client->cl_net,
- &parent_client->cl_xprtsec);
+ error = nfs4_set_client(server, &cl_init);
if (error < 0)
goto error;
@@ -1346,6 +1326,19 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
char buf[INET6_ADDRSTRLEN + 1];
struct sockaddr_storage address;
struct sockaddr *localaddr = (struct sockaddr *)&address;
+ struct nfs_client_initdata cl_init = {
+ .hostname = hostname,
+ .addr = sap,
+ .addrlen = salen,
+ .ip_addr = buf,
+ .proto = clp->cl_proto,
+ .minorversion = clp->cl_minorversion,
+ .net = net,
+ .timeparms = clnt->cl_timeout,
+ .xprtsec = clp->cl_xprtsec,
+ .nconnect = clp->cl_nconnect,
+ .max_connect = clp->cl_max_connect,
+ };
int error;
error = rpc_switch_client_transport(clnt, &xargs, clnt->cl_timeout);
@@ -1361,11 +1354,7 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
nfs_server_remove_lists(server);
set_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
- error = nfs4_set_client(server, hostname, sap, salen, buf,
- clp->cl_proto, clnt->cl_timeout,
- clp->cl_minorversion,
- clp->cl_nconnect, clp->cl_max_connect,
- net, &clp->cl_xprtsec);
+ error = nfs4_set_client(server, &cl_init);
clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
if (error != 0) {
nfs_server_insert_lists(server);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 1cd9652f3c28..7317f26892c5 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -225,8 +225,14 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
- if ((mode != 0) && (mode != (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)))
+ switch (mode) {
+ case 0:
+ case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+ case FALLOC_FL_ZERO_RANGE:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
ret = inode_newsize_ok(inode, offset + len);
if (ret < 0)
@@ -234,6 +240,8 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t
if (mode & FALLOC_FL_PUNCH_HOLE)
return nfs42_proc_deallocate(filep, offset, len);
+ else if (mode & FALLOC_FL_ZERO_RANGE)
+ return nfs42_proc_zero_range(filep, offset ,len);
return nfs42_proc_allocate(filep, offset, len);
}
@@ -245,7 +253,6 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
struct nfs_server *server = NFS_SERVER(dst_inode);
struct inode *src_inode = file_inode(src_file);
unsigned int bs = server->clone_blksize;
- bool same_inode = false;
int ret;
/* NFS does not support deduplication. */
@@ -267,25 +274,15 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
goto out;
}
- if (src_inode == dst_inode)
- same_inode = true;
-
/* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */
- if (same_inode) {
- inode_lock(src_inode);
- } else if (dst_inode < src_inode) {
- inode_lock_nested(dst_inode, I_MUTEX_PARENT);
- inode_lock_nested(src_inode, I_MUTEX_CHILD);
- } else {
- inode_lock_nested(src_inode, I_MUTEX_PARENT);
- inode_lock_nested(dst_inode, I_MUTEX_CHILD);
- }
-
+ lock_two_nondirectories(src_inode, dst_inode);
/* flush all pending writes on both src and dst so that server
* has the latest data */
+ nfs_file_block_o_direct(NFS_I(src_inode));
ret = nfs_sync_inode(src_inode);
if (ret)
goto out_unlock;
+ nfs_file_block_o_direct(NFS_I(dst_inode));
ret = nfs_sync_inode(dst_inode);
if (ret)
goto out_unlock;
@@ -298,15 +295,7 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1);
out_unlock:
- if (same_inode) {
- inode_unlock(src_inode);
- } else if (dst_inode < src_inode) {
- inode_unlock(src_inode);
- inode_unlock(dst_inode);
- } else {
- inode_unlock(dst_inode);
- inode_unlock(src_inode);
- }
+ unlock_two_nondirectories(src_inode, dst_inode);
out:
return ret < 0 ? ret : count;
}
@@ -442,13 +431,15 @@ void nfs42_ssc_unregister_ops(void)
static int nfs4_setlease(struct file *file, int arg, struct file_lease **lease,
void **priv)
{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return -EINVAL;
return nfs4_proc_setlease(file, arg, lease, priv);
}
const struct file_operations nfs4_file_operations = {
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
- .mmap = nfs_file_mmap,
+ .mmap_prepare = nfs_file_mmap_prepare,
.open = nfs4_file_open,
.flush = nfs4_file_flush,
.release = nfs_file_release,
@@ -467,4 +458,5 @@ const struct file_operations nfs4_file_operations = {
#else
.llseek = nfs_file_llseek,
#endif
+ .fop_flags = FOP_DONTCACHE,
};
diff --git a/fs/nfs/nfs4getroot.c b/fs/nfs/nfs4getroot.c
index 1a69479a3a59..e67ea345de69 100644
--- a/fs/nfs/nfs4getroot.c
+++ b/fs/nfs/nfs4getroot.c
@@ -12,30 +12,28 @@
int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_probe)
{
- struct nfs_fsinfo fsinfo;
+ struct nfs_fattr *fattr = nfs_alloc_fattr();
int ret = -ENOMEM;
- fsinfo.fattr = nfs_alloc_fattr();
- if (fsinfo.fattr == NULL)
+ if (fattr == NULL)
goto out;
/* Start by getting the root filehandle from the server */
- ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo, auth_probe);
+ ret = nfs4_proc_get_rootfh(server, mntfh, fattr, auth_probe);
if (ret < 0) {
dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
goto out;
}
- if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_TYPE)
- || !S_ISDIR(fsinfo.fattr->mode)) {
+ if (!(fattr->valid & NFS_ATTR_FATTR_TYPE) || !S_ISDIR(fattr->mode)) {
printk(KERN_ERR "nfs4_get_rootfh:"
" getroot encountered non-directory\n");
ret = -ENOTDIR;
goto out;
}
- memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
out:
- nfs_free_fattr(fsinfo.fattr);
+ nfs_free_fattr(fattr);
return ret;
}
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 25a7c771cfd8..9e1c48c5c0b8 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -306,15 +306,12 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
const char *type, void *data,
size_t data_size, struct idmap *idmap)
{
- const struct cred *saved_cred;
struct key *rkey;
const struct user_key_payload *payload;
ssize_t ret;
- saved_cred = override_creds(id_resolver_cache);
- rkey = nfs_idmap_request_key(name, namelen, type, idmap);
- revert_creds(saved_cred);
-
+ scoped_with_creds(id_resolver_cache)
+ rkey = nfs_idmap_request_key(name, namelen, type, idmap);
if (IS_ERR(rkey)) {
ret = PTR_ERR(rkey);
goto out;
@@ -424,26 +421,16 @@ static void nfs_idmap_pipe_destroy(struct dentry *dir,
struct rpc_pipe_dir_object *pdo)
{
struct idmap *idmap = pdo->pdo_data;
- struct rpc_pipe *pipe = idmap->idmap_pipe;
- if (pipe->dentry) {
- rpc_unlink(pipe->dentry);
- pipe->dentry = NULL;
- }
+ rpc_unlink(idmap->idmap_pipe);
}
static int nfs_idmap_pipe_create(struct dentry *dir,
struct rpc_pipe_dir_object *pdo)
{
struct idmap *idmap = pdo->pdo_data;
- struct rpc_pipe *pipe = idmap->idmap_pipe;
- struct dentry *dentry;
- dentry = rpc_mkpipe_dentry(dir, "idmap", idmap, pipe);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- pipe->dentry = dentry;
- return 0;
+ return rpc_mkpipe_dentry(dir, "idmap", idmap, idmap->idmap_pipe);
}
static const struct rpc_pipe_dir_object_ops nfs_idmap_pipe_dir_object_ops = {
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 405f17e6e0b4..93c6ce04332b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -105,7 +105,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
bool is_privileged);
static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *,
const struct cred *);
-static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
+static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
const struct cred *, bool);
#endif
@@ -114,6 +114,7 @@ static inline struct nfs4_label *
nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
struct iattr *sattr, struct nfs4_label *label)
{
+ struct lsm_context shim;
int err;
if (label == NULL)
@@ -128,18 +129,26 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
label->label = NULL;
err = security_dentry_init_security(dentry, sattr->ia_mode,
- &dentry->d_name, NULL,
- (void **)&label->label, &label->len);
- if (err == 0)
- return label;
+ &dentry->d_name, NULL, &shim);
+ if (err)
+ return NULL;
- return NULL;
+ label->lsmid = shim.id;
+ label->label = shim.context;
+ label->len = shim.len;
+ return label;
}
static inline void
nfs4_label_release_security(struct nfs4_label *label)
{
- if (label)
- security_release_secctx(label->label, label->len);
+ struct lsm_context shim;
+
+ if (label) {
+ shim.context = label->label;
+ shim.len = label->len;
+ shim.id = label->lsmid;
+ security_release_secctx(&shim);
+ }
}
static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
{
@@ -186,6 +195,9 @@ static int nfs4_map_errors(int err)
return -EBUSY;
case -NFS4ERR_NOT_SAME:
return -ENOTSYNC;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ break;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -210,6 +222,7 @@ const u32 nfs4_fattr_bitmap[3] = {
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_CREATE
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY
| FATTR4_WORD1_MOUNTED_ON_FILEID,
@@ -231,6 +244,7 @@ static const u32 nfs4_pnfs_open_bitmap[3] = {
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_CREATE
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY,
FATTR4_WORD2_MDSTHRESHOLD
@@ -311,16 +325,19 @@ static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
if (!(cache_validity & NFS_INO_INVALID_OTHER))
dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
+ if (!(cache_validity & NFS_INO_INVALID_BTIME))
+ dst[1] &= ~FATTR4_WORD1_TIME_CREATE;
+
if (nfs_have_delegated_mtime(inode)) {
if (!(cache_validity & NFS_INO_INVALID_ATIME))
- dst[1] &= ~FATTR4_WORD1_TIME_ACCESS;
+ dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
if (!(cache_validity & NFS_INO_INVALID_MTIME))
- dst[1] &= ~FATTR4_WORD1_TIME_MODIFY;
+ dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET);
if (!(cache_validity & NFS_INO_INVALID_CTIME))
- dst[1] &= ~FATTR4_WORD1_TIME_METADATA;
+ dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET);
} else if (nfs_have_delegated_atime(inode)) {
if (!(cache_validity & NFS_INO_INVALID_ATIME))
- dst[1] &= ~FATTR4_WORD1_TIME_ACCESS;
+ dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
}
}
@@ -374,7 +391,9 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
*p++ = htonl(attrs); /* bitmap */
*p++ = htonl(12); /* attribute buffer length */
*p++ = htonl(NF4DIR);
+ spin_lock(&dentry->d_lock);
p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
+ spin_unlock(&dentry->d_lock);
readdir->pgbase = (char *)p - (char *)start;
readdir->count -= readdir->pgbase;
@@ -434,6 +453,8 @@ static int nfs4_delay_killable(long *timeout)
{
might_sleep();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(nfs4_update_delay(timeout));
if (!__fatal_signal_pending(current))
@@ -445,6 +466,8 @@ static int nfs4_delay_interruptible(long *timeout)
{
might_sleep();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(nfs4_update_delay(timeout));
if (!signal_pending(current))
@@ -655,6 +678,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
struct nfs_client *clp = server->nfs_client;
int ret;
+ if ((task->tk_rpc_status == -ENETDOWN ||
+ task->tk_rpc_status == -ENETUNREACH) &&
+ task->tk_flags & RPC_TASK_NETUNREACH_FATAL) {
+ exception->delay = 0;
+ exception->recovering = 0;
+ exception->retry = 0;
+ return -EIO;
+ }
+
ret = nfs4_do_handle_exception(server, errorcode, exception);
if (exception->delay) {
int ret2 = nfs4_exception_should_retrans(server, exception);
@@ -1282,7 +1314,8 @@ nfs4_update_changeattr_locked(struct inode *inode,
NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
- NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
+ NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME |
+ NFS_INO_INVALID_XATTR;
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
}
nfsi->attrtimeo_timestamp = jiffies;
@@ -1765,7 +1798,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
- if (!fatal_signal_pending(current)) {
+ if (!fatal_signal_pending(current) &&
+ !nfs_current_task_exiting()) {
if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN;
else
@@ -2877,16 +2911,14 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
}
static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
- const nfs4_stateid *stateid,
- const struct cred *cred)
+ nfs4_stateid *stateid, const struct cred *cred)
{
return -NFS4ERR_BAD_STATEID;
}
#if defined(CONFIG_NFS_V4_1)
static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
- const nfs4_stateid *stateid,
- const struct cred *cred)
+ nfs4_stateid *stateid, const struct cred *cred)
{
int status;
@@ -2895,6 +2927,7 @@ static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
break;
case NFS4_INVALID_STATEID_TYPE:
case NFS4_SPECIAL_STATEID_TYPE:
+ case NFS4_FREED_STATEID_TYPE:
return -NFS4ERR_BAD_STATEID;
case NFS4_REVOKED_STATEID_TYPE:
goto out_free;
@@ -3145,9 +3178,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
if (d_really_is_negative(dentry)) {
struct dentry *alias;
d_drop(dentry);
- alias = d_exact_alias(dentry, state->inode);
- if (!alias)
- alias = d_splice_alias(igrab(state->inode), dentry);
+ alias = d_splice_alias(igrab(state->inode), dentry);
/* d_splice_alias() can't fail here - it's a non-directory */
if (alias) {
dput(ctx->dentry);
@@ -3569,7 +3600,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
write_sequnlock(&state->seqlock);
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current) || nfs_current_task_exiting())
status = -EINTR;
else
if (schedule_timeout(5*HZ) != 0)
@@ -3605,6 +3636,7 @@ struct nfs4_closedata {
} lr;
struct nfs_fattr fattr;
unsigned long timestamp;
+ unsigned short retrans;
};
static void nfs4_free_closedata(void *data)
@@ -3633,6 +3665,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
.state = state,
.inode = calldata->inode,
.stateid = &calldata->arg.stateid,
+ .retrans = calldata->retrans,
};
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
@@ -3680,6 +3713,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
default:
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
+ calldata->retrans = exception.retrans;
if (exception.retry)
goto out_restart;
}
@@ -3898,8 +3932,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
+ struct dentry *dentry = ctx->dentry;
if (ctx->state == NULL)
return;
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
if (is_sync)
nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
else
@@ -3949,8 +3986,9 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
FATTR4_WORD0_CASE_INSENSITIVE |
FATTR4_WORD0_CASE_PRESERVING;
if (minorversion)
- bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT |
- FATTR4_WORD2_OPEN_ARGUMENTS;
+ bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
+ if (minorversion > 1)
+ bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (status == 0) {
@@ -3980,8 +4018,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
res.attr_bitmask[2];
}
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
- server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
- NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
+ server->caps &=
+ ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS |
+ NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS |
+ NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME);
server->fattr_valid = NFS_ATTR_FATTR_V4;
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
@@ -4020,6 +4060,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
+ if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
+ server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
+ if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE))
+ server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME;
memcpy(server->attr_bitmask_nl, res.attr_bitmask,
sizeof(server->attr_bitmask));
server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
@@ -4055,7 +4099,6 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
};
int err;
- nfs4_server_set_init_caps(server);
do {
err = nfs4_handle_exception(server,
_nfs4_server_capabilities(server, fhandle),
@@ -4203,15 +4246,18 @@ out:
}
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+ struct nfs_fattr *fattr)
{
- u32 bitmask[3];
+ u32 bitmask[3] = {
+ [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE |
+ FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID,
+ };
struct nfs4_lookup_root_arg args = {
.bitmask = bitmask,
};
struct nfs4_lookup_res res = {
.server = server,
- .fattr = info->fattr,
+ .fattr = fattr,
.fh = fhandle,
};
struct rpc_message msg = {
@@ -4220,27 +4266,20 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
- bitmask[0] = nfs4_fattr_bitmap[0];
- bitmask[1] = nfs4_fattr_bitmap[1];
- /*
- * Process the label in the upcoming getfattr
- */
- bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
-
- nfs_fattr_init(info->fattr);
+ nfs_fattr_init(fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+ struct nfs_fattr *fattr)
{
struct nfs4_exception exception = {
.interruptible = true,
};
int err;
do {
- err = _nfs4_lookup_root(server, fhandle, info);
- trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
+ err = _nfs4_lookup_root(server, fhandle, fattr);
+ trace_nfs4_lookup_root(server, fhandle, fattr, err);
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
@@ -4253,8 +4292,9 @@ out:
return err;
}
-static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info, rpc_authflavor_t flavor)
+static int nfs4_lookup_root_sec(struct nfs_server *server,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr,
+ rpc_authflavor_t flavor)
{
struct rpc_auth_create_args auth_args = {
.pseudoflavor = flavor,
@@ -4264,7 +4304,7 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl
auth = rpcauth_create(&auth_args, server->client);
if (IS_ERR(auth))
return -EACCES;
- return nfs4_lookup_root(server, fhandle, info);
+ return nfs4_lookup_root(server, fhandle, fattr);
}
/*
@@ -4277,7 +4317,7 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl
* negative errno value.
*/
static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+ struct nfs_fattr *fattr)
{
/* Per 3530bis 15.33.5 */
static const rpc_authflavor_t flav_array[] = {
@@ -4293,8 +4333,9 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
if (server->auth_info.flavor_len > 0) {
/* try each flavor specified by user */
for (i = 0; i < server->auth_info.flavor_len; i++) {
- status = nfs4_lookup_root_sec(server, fhandle, info,
- server->auth_info.flavors[i]);
+ status = nfs4_lookup_root_sec(
+ server, fhandle, fattr,
+ server->auth_info.flavors[i]);
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
continue;
break;
@@ -4302,7 +4343,7 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
} else {
/* no flavors specified by user, try default list */
for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
- status = nfs4_lookup_root_sec(server, fhandle, info,
+ status = nfs4_lookup_root_sec(server, fhandle, fattr,
flav_array[i]);
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
continue;
@@ -4326,28 +4367,22 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
* nfs4_proc_get_rootfh - get file handle for server's pseudoroot
* @server: initialized nfs_server handle
* @fhandle: we fill in the pseudo-fs root file handle
- * @info: we fill in an FSINFO struct
+ * @fattr: we fill in a bare bones struct fattr
* @auth_probe: probe the auth flavours
*
* Returns zero on success, or a negative errno.
*/
int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info,
- bool auth_probe)
+ struct nfs_fattr *fattr, bool auth_probe)
{
int status = 0;
if (!auth_probe)
- status = nfs4_lookup_root(server, fhandle, info);
+ status = nfs4_lookup_root(server, fhandle, fattr);
if (auth_probe || status == NFS4ERR_WRONGSEC)
- status = server->nfs_client->cl_mvops->find_root_sec(server,
- fhandle, info);
-
- if (status == 0)
- status = nfs4_server_capabilities(server, fhandle);
- if (status == 0)
- status = nfs4_do_fsinfo(server, fhandle, info);
+ status = server->nfs_client->cl_mvops->find_root_sec(
+ server, fhandle, fattr);
return nfs4_map_errors(status);
}
@@ -4536,15 +4571,15 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
- struct dentry *dentry, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
+ struct dentry *dentry, const struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_server *server = NFS_SERVER(dir);
int status;
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
.dir_fh = NFS_FH(dir),
- .name = &dentry->d_name,
+ .name = name,
};
struct nfs4_lookup_res res = {
.server = server,
@@ -4586,17 +4621,16 @@ static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
}
static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
- struct dentry *dentry, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
+ struct dentry *dentry, const struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = {
.interruptible = true,
};
struct rpc_clnt *client = *clnt;
- const struct qstr *name = &dentry->d_name;
int err;
do {
- err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr);
+ err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr);
trace_nfs4_lookup(dir, name, err);
switch (err) {
case -NFS4ERR_BADNAME:
@@ -4631,13 +4665,13 @@ out:
return err;
}
-static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
+static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
struct rpc_clnt *client = NFS_CLIENT(dir);
- status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
+ status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr);
if (client != NFS_CLIENT(dir)) {
rpc_shutdown_client(client);
nfs_fixup_secinfo_attributes(fattr);
@@ -4652,7 +4686,8 @@ nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
struct rpc_clnt *client = NFS_CLIENT(dir);
int status;
- status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
+ status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name,
+ fhandle, fattr);
if (status < 0)
return ERR_PTR(status);
return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
@@ -4680,16 +4715,19 @@ static int _nfs4_proc_lookupp(struct inode *inode,
};
unsigned short task_flags = 0;
- if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
+ if (server->flags & NFS_MOUNT_SOFTREVAL)
task_flags |= RPC_TASK_TIMEOUT;
+ if (server->caps & NFS_CAP_MOVEABLE)
+ task_flags |= RPC_TASK_MOVEABLE;
args.bitmask = nfs4_bitmask(server, fattr->label);
nfs_fattr_init(fattr);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
- status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
- &res.seq_res, task_flags);
+ status = nfs4_do_call_sync(clnt, server, &msg, &args.seq_args,
+ &res.seq_res, task_flags);
dprintk("NFS reply lookupp: %d\n", status);
return status;
}
@@ -5127,9 +5165,6 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
spin_lock(&dir->i_lock);
- /* Creating a directory bumps nlink in the parent */
- if (data->arg.ftype == NF4DIR)
- nfs4_inc_nlink_locked(dir);
nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
data->res.fattr->time_start,
NFS_INO_INVALID_DATA);
@@ -5139,6 +5174,31 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
return status;
}
+static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry,
+ struct nfs4_createdata *data, int *statusp)
+{
+ struct dentry *ret;
+
+ *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
+ &data->arg.seq_args, &data->res.seq_res, 1);
+
+ if (*statusp)
+ return NULL;
+
+ spin_lock(&dir->i_lock);
+ /* Creating a directory bumps nlink in the parent */
+ nfs4_inc_nlink_locked(dir);
+ nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
+ data->res.fattr->time_start,
+ NFS_INO_INVALID_DATA);
+ spin_unlock(&dir->i_lock);
+ ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr);
+ if (!IS_ERR(ret))
+ return ret;
+ *statusp = PTR_ERR(ret);
+ return NULL;
+}
+
static void nfs4_free_createdata(struct nfs4_createdata *data)
{
nfs4_label_free(data->fattr.label);
@@ -5195,32 +5255,35 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
return err;
}
-static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
- struct iattr *sattr, struct nfs4_label *label)
+static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr,
+ struct nfs4_label *label, int *statusp)
{
struct nfs4_createdata *data;
- int status = -ENOMEM;
+ struct dentry *ret = NULL;
+ *statusp = -ENOMEM;
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
if (data == NULL)
goto out;
data->arg.label = label;
- status = nfs4_do_create(dir, dentry, data);
+ ret = nfs4_do_mkdir(dir, dentry, data, statusp);
nfs4_free_createdata(data);
out:
- return status;
+ return ret;
}
-static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
- struct iattr *sattr)
+static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_exception exception = {
.interruptible = true,
};
struct nfs4_label l, *label;
+ struct dentry *alias;
int err;
label = nfs4_label_init_security(dir, dentry, sattr, &l);
@@ -5228,14 +5291,16 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
sattr->ia_mode &= ~current_umask();
do {
- err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
+ alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err);
trace_nfs4_mkdir(dir, &dentry->d_name, err);
- err = nfs4_handle_exception(NFS_SERVER(dir), err,
- &exception);
+ if (err)
+ alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
+ err,
+ &exception));
} while (exception.retry);
nfs4_label_release_security(label);
- return err;
+ return alias;
}
static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
@@ -5534,9 +5599,11 @@ static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
.inode = hdr->inode,
.state = hdr->args.context->state,
.stateid = &hdr->args.stateid,
+ .retrans = hdr->retrans,
};
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
+ hdr->retrans = exception.retrans;
if (exception.retry) {
rpc_restart_call_prepare(task);
return -EAGAIN;
@@ -5650,10 +5717,12 @@ static int nfs4_write_done_cb(struct rpc_task *task,
.inode = hdr->inode,
.state = hdr->args.context->state,
.stateid = &hdr->args.stateid,
+ .retrans = hdr->retrans,
};
task->tk_status = nfs4_async_handle_exception(task,
NFS_SERVER(inode), task->tk_status,
&exception);
+ hdr->retrans = exception.retrans;
if (exception.retry) {
rpc_restart_call_prepare(task);
return -EAGAIN;
@@ -5727,6 +5796,8 @@ void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
if (cache_validity & NFS_INO_INVALID_BLOCKS)
bitmask[1] |= FATTR4_WORD1_SPACE_USED;
+ if (cache_validity & NFS_INO_INVALID_BTIME)
+ bitmask[1] |= FATTR4_WORD1_TIME_CREATE;
if (cache_validity & NFS_INO_INVALID_SIZE)
bitmask[0] |= FATTR4_WORD0_SIZE;
@@ -6101,7 +6172,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
}
/* for decoding across pages */
- res.acl_scratch = alloc_page(GFP_KERNEL);
+ res.acl_scratch = folio_alloc(GFP_KERNEL, 0);
if (!res.acl_scratch)
goto out_free;
@@ -6137,7 +6208,7 @@ out_free:
while (--i >= 0)
__free_page(pages[i]);
if (res.acl_scratch)
- __free_page(res.acl_scratch);
+ folio_put(res.acl_scratch);
kfree(pages);
return ret;
}
@@ -6165,6 +6236,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
struct nfs_server *server = NFS_SERVER(inode);
int ret;
+ if (unlikely(NFS_FH(inode)->size == 0))
+ return -ENODATA;
if (!nfs4_server_supports_acls(server, type))
return -EOPNOTSUPP;
ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
@@ -6239,6 +6312,9 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
{
struct nfs4_exception exception = { };
int err;
+
+ if (unlikely(NFS_FH(inode)->size == 0))
+ return -ENODATA;
do {
err = __nfs4_proc_set_acl(inode, buf, buflen, type);
trace_nfs4_set_acl(inode, err);
@@ -6261,7 +6337,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs4_label label = {0, 0, buflen, buf};
+ struct nfs4_label label = {0, 0, 0, buflen, buf};
u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
struct nfs_fattr fattr = {
@@ -6366,7 +6442,7 @@ static int nfs4_do_set_security_label(struct inode *inode,
static int
nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
{
- struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
+ struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf };
struct nfs_fattr *fattr;
int status;
@@ -6660,6 +6736,7 @@ struct nfs4_delegreturndata {
struct nfs_fh fh;
nfs4_stateid stateid;
unsigned long timestamp;
+ unsigned short retrans;
struct {
struct nfs4_layoutreturn_args arg;
struct nfs4_layoutreturn_res res;
@@ -6680,6 +6757,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
.inode = data->inode,
.stateid = &data->stateid,
.task_is_privileged = data->args.seq_args.sa_privileged,
+ .retrans = data->retrans,
};
if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6751,6 +6829,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
task->tk_status = nfs4_async_handle_exception(task,
data->res.server, task->tk_status,
&exception);
+ data->retrans = exception.retrans;
if (exception.retry)
goto out_restart;
}
@@ -7027,6 +7106,7 @@ struct nfs4_unlockdata {
struct file_lock fl;
struct nfs_server *server;
unsigned long timestamp;
+ unsigned short retrans;
};
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
@@ -7037,10 +7117,18 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs4_unlockdata *p;
struct nfs4_state *state = lsp->ls_state;
struct inode *inode = state->inode;
+ struct nfs_lock_context *l_ctx;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return NULL;
+ l_ctx = nfs_get_lock_context(ctx);
+ if (!IS_ERR(l_ctx)) {
+ p->l_ctx = l_ctx;
+ } else {
+ kfree(p);
+ return NULL;
+ }
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
@@ -7048,7 +7136,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->lsp = lsp;
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
- p->l_ctx = nfs_get_lock_context(ctx);
locks_init_lock(&p->fl);
locks_copy_lock(&p->fl, fl);
p->server = NFS_SERVER(inode);
@@ -7074,6 +7161,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
struct nfs4_exception exception = {
.inode = calldata->lsp->ls_state->inode,
.stateid = &calldata->arg.stateid,
+ .retrans = calldata->retrans,
};
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
@@ -7107,6 +7195,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
task->tk_status = nfs4_async_handle_exception(task,
calldata->server, task->tk_status,
&exception);
+ calldata->retrans = exception.retrans;
if (exception.retry)
rpc_restart_call_prepare(task);
}
@@ -7801,10 +7890,10 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
return err;
do {
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
- if (err != -NFS4ERR_DELAY)
+ if (err != -NFS4ERR_DELAY && err != -NFS4ERR_GRACE)
break;
ssleep(1);
- } while (err == -NFS4ERR_DELAY);
+ } while (err == -NFS4ERR_DELAY || err == -NFSERR_GRACE);
return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
}
@@ -9371,7 +9460,7 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args
goto out;
if (rcvd->max_rqst_sz > sent->max_rqst_sz)
return -EINVAL;
- if (rcvd->max_resp_sz < sent->max_resp_sz)
+ if (rcvd->max_resp_sz > sent->max_resp_sz)
return -EINVAL;
if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
return -EINVAL;
@@ -9565,7 +9654,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
return;
trace_nfs4_sequence(clp, task->tk_status);
- if (task->tk_status < 0 && !task->tk_client->cl_shutdown) {
+ if (task->tk_status < 0 && clp->cl_cons_state >= 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (refcount_read(&clp->cl_count) == 1)
return;
@@ -10273,10 +10362,10 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
* Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
* possible) as per RFC3530bis and RFC5661 Security Considerations sections
*/
-static int
-_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info,
- struct nfs4_secinfo_flavors *flavors, bool use_integrity)
+static int _nfs41_proc_secinfo_no_name(struct nfs_server *server,
+ struct nfs_fh *fhandle,
+ struct nfs4_secinfo_flavors *flavors,
+ bool use_integrity)
{
struct nfs41_secinfo_no_name_args args = {
.style = SECINFO_STYLE_CURRENT_FH,
@@ -10320,9 +10409,9 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
return status;
}
-static int
-nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
+static int nfs41_proc_secinfo_no_name(struct nfs_server *server,
+ struct nfs_fh *fhandle,
+ struct nfs4_secinfo_flavors *flavors)
{
struct nfs4_exception exception = {
.interruptible = true,
@@ -10334,7 +10423,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
/* try to use integrity protection with machine cred */
if (_nfs4_is_integrity_protected(server->nfs_client))
- err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
+ err = _nfs41_proc_secinfo_no_name(server, fhandle,
flavors, true);
/*
@@ -10344,7 +10433,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
* the current filesystem's rpc_client and the user cred.
*/
if (err == -NFS4ERR_WRONGSEC)
- err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
+ err = _nfs41_proc_secinfo_no_name(server, fhandle,
flavors, false);
switch (err) {
@@ -10360,9 +10449,8 @@ out:
return err;
}
-static int
-nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+static int nfs41_find_root_sec(struct nfs_server *server,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int err;
struct page *page;
@@ -10378,14 +10466,14 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
}
flavors = page_address(page);
- err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
+ err = nfs41_proc_secinfo_no_name(server, fhandle, flavors);
/*
* Fall back on "guess and check" method if
* the server doesn't support SECINFO_NO_NAME
*/
if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
- err = nfs4_find_root_sec(server, fhandle, info);
+ err = nfs4_find_root_sec(server, fhandle, fattr);
goto out_freepage;
}
if (err)
@@ -10410,8 +10498,8 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
flavor = RPC_AUTH_MAXFLAVOR;
if (flavor != RPC_AUTH_MAXFLAVOR) {
- err = nfs4_lookup_root_sec(server, fhandle,
- info, flavor);
+ err = nfs4_lookup_root_sec(server, fhandle, fattr,
+ flavor);
if (!err)
break;
}
@@ -10558,7 +10646,7 @@ static const struct rpc_call_ops nfs41_free_stateid_ops = {
* Note: this function is always asynchronous.
*/
static int nfs41_free_stateid(struct nfs_server *server,
- const nfs4_stateid *stateid,
+ nfs4_stateid *stateid,
const struct cred *cred,
bool privileged)
{
@@ -10598,6 +10686,7 @@ static int nfs41_free_stateid(struct nfs_server *server,
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
+ stateid->type = NFS4_FREED_STATEID_TYPE;
return 0;
}
@@ -10613,6 +10702,8 @@ nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
static bool nfs41_match_stateid(const nfs4_stateid *s1,
const nfs4_stateid *s2)
{
+ trace_nfs41_match_stateid(s1, s2);
+
if (s1->type != s2->type)
return false;
@@ -10630,6 +10721,8 @@ static bool nfs41_match_stateid(const nfs4_stateid *s1,
static bool nfs4_match_stateid(const nfs4_stateid *s1,
const nfs4_stateid *s2)
{
+ trace_nfs4_match_stateid(s1, s2);
+
return nfs4_stateid_match(s1, s2);
}
@@ -10764,12 +10857,14 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_OFFLOAD_CANCEL
| NFS_CAP_COPY_NOTIFY
| NFS_CAP_DEALLOCATE
+ | NFS_CAP_ZERO_RANGE
| NFS_CAP_SEEK
| NFS_CAP_LAYOUTSTATS
| NFS_CAP_CLONE
| NFS_CAP_LAYOUTERROR
| NFS_CAP_READ_PLUS
- | NFS_CAP_MOVEABLE,
+ | NFS_CAP_MOVEABLE
+ | NFS_CAP_OFFLOAD_STATUS,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
@@ -10798,7 +10893,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
{
- ssize_t error, error2, error3;
+ ssize_t error, error2, error3, error4 = 0;
size_t left = size;
error = generic_listxattr(dentry, list, left);
@@ -10821,8 +10916,18 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
if (error3 < 0)
return error3;
+ if (list) {
+ list += error3;
+ left -= error3;
+ }
+
+ if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
+ error4 = security_inode_listsecurity(d_inode(dentry), list, left);
+ if (error4 < 0)
+ return error4;
+ }
- error += error2 + error3;
+ error += error2 + error3 + error4;
if (size && error > size)
return -ERANGE;
return error;
@@ -10874,6 +10979,26 @@ static const struct inode_operations nfs4_file_inode_operations = {
.listxattr = nfs4_listxattr,
};
+static struct nfs_server *nfs4_clone_server(struct nfs_server *source,
+ struct nfs_fh *fh, struct nfs_fattr *fattr,
+ rpc_authflavor_t flavor)
+{
+ struct nfs_server *server;
+ int error;
+
+ server = nfs_clone_server(source, fh, fattr, flavor);
+ if (IS_ERR(server))
+ return server;
+
+ error = nfs4_delegation_hash_alloc(server);
+ if (error) {
+ nfs_free_server(server);
+ return ERR_PTR(error);
+ }
+
+ return server;
+}
+
const struct nfs_rpc_ops nfs_v4_clientops = {
.version = 4, /* protocol version */
.dentry_ops = &nfs4_dentry_operations,
@@ -10926,7 +11051,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.init_client = nfs4_init_client,
.free_client = nfs4_free_client,
.create_server = nfs4_create_server,
- .clone_server = nfs_clone_server,
+ .clone_server = nfs4_clone_server,
.discover_trunking = nfs4_discover_trunking,
.enable_swap = nfs4_enable_swap,
.disable_swap = nfs4_disable_swap,
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index db3811af0796..18ae614e5a6c 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -122,7 +122,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
timeout = 5 * HZ;
dprintk("%s: requeueing work. Lease period = %ld\n",
__func__, (timeout + HZ - 1) / HZ);
- mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
+ mod_delayed_work(system_percpu_wq, &clp->cl_renewd, timeout);
set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
spin_unlock(&clp->cl_lock);
}
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 351616c61df5..f9c291e2165c 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -148,16 +148,12 @@ static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
}
-#ifdef CONFIG_CRC32
/*
* nfs_session_id_hash - calculate the crc32 hash for the session id
* @session - pointer to session
*/
#define nfs_session_id_hash(sess_id) \
(~crc32_le(0xFFFFFFFF, &(sess_id)->data[0], sizeof((sess_id)->data)))
-#else
-#define nfs_session_id_hash(session) (0)
-#endif
#else /* defined(CONFIG_NFS_V4_1) */
static inline int nfs4_init_session(struct nfs_client *clp)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9a9f60a2291b..01179f7de322 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1198,7 +1198,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
struct rpc_clnt *clnt = clp->cl_rpcclient;
bool swapon = false;
- if (clnt->cl_shutdown)
+ if (clp->cl_cons_state < 0)
return;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
@@ -1403,7 +1403,7 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
clp->cl_hostname);
nfs4_schedule_state_manager(clp);
- return 0;
+ return clp->cl_cons_state < 0 ? clp->cl_cons_state : 0;
}
EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -1955,6 +1955,7 @@ restart:
}
rcu_read_unlock();
nfs4_free_state_owners(&freeme);
+ nfs_local_probe_async(clp);
if (lost_locks)
pr_warn("NFS: %s: lost %d locks\n",
clp->cl_hostname, lost_locks);
@@ -2738,7 +2739,18 @@ out_error:
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);
- ssleep(1);
+ switch (status) {
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ nfs_mark_client_ready(clp, -EIO);
+ break;
+ case -EINVAL:
+ nfs_mark_client_ready(clp, status);
+ break;
+ default:
+ ssleep(1);
+ break;
+ }
out_drain:
memalloc_nofs_restore(memflags);
nfs4_end_drain_session(clp);
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index b29a26923ce0..5ec9c83f1ef0 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -149,21 +149,9 @@ static int do_nfs4_mount(struct nfs_server *server,
struct fs_context *root_fc;
struct vfsmount *root_mnt;
struct dentry *dentry;
- size_t len;
+ char *source;
int ret;
- struct fs_parameter param = {
- .key = "source",
- .type = fs_value_is_string,
- .dirfd = -1,
- };
-
- struct fs_parameter param_fsc = {
- .key = "fsc",
- .type = fs_value_is_string,
- .dirfd = -1,
- };
-
if (IS_ERR(server))
return PTR_ERR(server);
@@ -181,15 +169,7 @@ static int do_nfs4_mount(struct nfs_server *server,
root_ctx->server = server;
if (ctx->fscache_uniq) {
- len = strlen(ctx->fscache_uniq);
- param_fsc.size = len;
- param_fsc.string = kmemdup_nul(ctx->fscache_uniq, len, GFP_KERNEL);
- if (param_fsc.string == NULL) {
- put_fs_context(root_fc);
- return -ENOMEM;
- }
- ret = vfs_parse_fs_param(root_fc, &param_fsc);
- kfree(param_fsc.string);
+ ret = vfs_parse_fs_string(root_fc, "fsc", ctx->fscache_uniq);
if (ret < 0) {
put_fs_context(root_fc);
return ret;
@@ -197,20 +177,18 @@ static int do_nfs4_mount(struct nfs_server *server,
}
/* We leave export_path unset as it's not used to find the root. */
- len = strlen(hostname) + 5;
- param.string = kmalloc(len, GFP_KERNEL);
- if (param.string == NULL) {
- put_fs_context(root_fc);
- return -ENOMEM;
- }
-
/* Does hostname needs to be enclosed in brackets? */
if (strchr(hostname, ':'))
- param.size = snprintf(param.string, len, "[%s]:/", hostname);
+ source = kasprintf(GFP_KERNEL, "[%s]:/", hostname);
else
- param.size = snprintf(param.string, len, "%s:/", hostname);
- ret = vfs_parse_fs_param(root_fc, &param);
- kfree(param.string);
+ source = kasprintf(GFP_KERNEL, "%s:/", hostname);
+
+ if (!source) {
+ put_fs_context(root_fc);
+ return -ENOMEM;
+ }
+ ret = vfs_parse_fs_string(root_fc, "source", source);
+ kfree(source);
if (ret < 0) {
put_fs_context(root_fc);
return ret;
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index 886a7c4c60b3..d1a92d8f8ba4 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -17,7 +17,7 @@ static const int nfs_set_port_min;
static const int nfs_set_port_max = 65535;
static struct ctl_table_header *nfs4_callback_sysctl_table;
-static struct ctl_table nfs4_cb_sysctls[] = {
+static const struct ctl_table nfs4_cb_sysctls[] = {
{
.procname = "nfs_callback_tcpport",
.data = &nfs_callback_set_tcpport,
diff --git a/fs/nfs/nfs4trace.c b/fs/nfs/nfs4trace.c
index 389941ccc9c9..987c92d6364b 100644
--- a/fs/nfs/nfs4trace.c
+++ b/fs/nfs/nfs4trace.c
@@ -26,11 +26,13 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_pagelist);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_pagelist);
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_ds_connect);
EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_read_error);
EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_write_error);
EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_commit_error);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bl_ext_tree_prepare_commit);
EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_reg);
EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_reg_err);
EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_unreg);
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 22c973316f0b..9776d220cec3 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -14,6 +14,8 @@
#include <trace/misc/fs.h>
#include <trace/misc/nfs.h>
+#include "delegation.h"
+
#define show_nfs_fattr_flags(valid) \
__print_flags((unsigned long)valid, "|", \
{ NFS_ATTR_FATTR_TYPE, "TYPE" }, \
@@ -30,7 +32,8 @@
{ NFS_ATTR_FATTR_CTIME, "CTIME" }, \
{ NFS_ATTR_FATTR_CHANGE, "CHANGE" }, \
{ NFS_ATTR_FATTR_OWNER_NAME, "OWNER_NAME" }, \
- { NFS_ATTR_FATTR_GROUP_NAME, "GROUP_NAME" })
+ { NFS_ATTR_FATTR_GROUP_NAME, "GROUP_NAME" }, \
+ { NFS_ATTR_FATTR_BTIME, "BTIME" })
DECLARE_EVENT_CLASS(nfs4_clientid_event,
TP_PROTO(
@@ -273,6 +276,32 @@ TRACE_EVENT(nfs4_cb_offload,
show_nfs_stable_how(__entry->cb_how)
)
);
+
+TRACE_EVENT(pnfs_ds_connect,
+ TP_PROTO(
+ char *ds_remotestr,
+ int status
+ ),
+
+ TP_ARGS(ds_remotestr, status),
+
+ TP_STRUCT__entry(
+ __string(ds_ips, ds_remotestr)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(ds_ips);
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "ds_ips=%s, status=%d",
+ __get_str(ds_ips),
+ __entry->status
+ )
+);
+
#endif /* CONFIG_NFS_V4_1 */
TRACE_EVENT(nfs4_setup_sequence,
@@ -956,6 +985,52 @@ DECLARE_EVENT_CLASS(nfs4_set_delegation_event,
TP_ARGS(inode, fmode))
DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_set_delegation);
DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_reclaim_delegation);
+DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_detach_delegation);
+
+#define show_delegation_flags(flags) \
+ __print_flags(flags, "|", \
+ { BIT(NFS_DELEGATION_NEED_RECLAIM), "NEED_RECLAIM" }, \
+ { BIT(NFS_DELEGATION_RETURN), "RETURN" }, \
+ { BIT(NFS_DELEGATION_RETURN_IF_CLOSED), "RETURN_IF_CLOSED" }, \
+ { BIT(NFS_DELEGATION_REFERENCED), "REFERENCED" }, \
+ { BIT(NFS_DELEGATION_RETURNING), "RETURNING" }, \
+ { BIT(NFS_DELEGATION_REVOKED), "REVOKED" }, \
+ { BIT(NFS_DELEGATION_TEST_EXPIRED), "TEST_EXPIRED" }, \
+ { BIT(NFS_DELEGATION_INODE_FREEING), "INODE_FREEING" }, \
+ { BIT(NFS_DELEGATION_RETURN_DELAYED), "RETURN_DELAYED" })
+
+DECLARE_EVENT_CLASS(nfs4_delegation_event,
+ TP_PROTO(
+ const struct nfs_delegation *delegation
+ ),
+
+ TP_ARGS(delegation),
+
+ TP_STRUCT__entry(
+ __field(u32, fhandle)
+ __field(unsigned int, fmode)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->fhandle = nfs_fhandle_hash(NFS_FH(delegation->inode));
+ __entry->fmode = delegation->type;
+ __entry->flags = delegation->flags;
+ ),
+
+ TP_printk(
+ "fhandle=0x%08x fmode=%s flags=%s",
+ __entry->fhandle, show_fs_fmode_flags(__entry->fmode),
+ show_delegation_flags(__entry->flags)
+ )
+);
+#define DEFINE_NFS4_DELEGATION_EVENT(name) \
+ DEFINE_EVENT(nfs4_delegation_event, name, \
+ TP_PROTO( \
+ const struct nfs_delegation *delegation \
+ ), \
+ TP_ARGS(delegation))
+DEFINE_NFS4_DELEGATION_EVENT(nfs_delegation_need_return);
TRACE_EVENT(nfs4_delegreturn_exit,
TP_PROTO(
@@ -1449,6 +1524,63 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
DEFINE_NFS4_INODE_STATEID_CALLBACK_EVENT(nfs4_cb_recall);
DEFINE_NFS4_INODE_STATEID_CALLBACK_EVENT(nfs4_cb_layoutrecall_file);
+#define show_stateid_type(type) \
+ __print_symbolic(type, \
+ { NFS4_INVALID_STATEID_TYPE, "INVALID" }, \
+ { NFS4_SPECIAL_STATEID_TYPE, "SPECIAL" }, \
+ { NFS4_OPEN_STATEID_TYPE, "OPEN" }, \
+ { NFS4_LOCK_STATEID_TYPE, "LOCK" }, \
+ { NFS4_DELEGATION_STATEID_TYPE, "DELEGATION" }, \
+ { NFS4_LAYOUT_STATEID_TYPE, "LAYOUT" }, \
+ { NFS4_PNFS_DS_STATEID_TYPE, "PNFS_DS" }, \
+ { NFS4_REVOKED_STATEID_TYPE, "REVOKED" }, \
+ { NFS4_FREED_STATEID_TYPE, "FREED" })
+
+DECLARE_EVENT_CLASS(nfs4_match_stateid_event,
+ TP_PROTO(
+ const nfs4_stateid *s1,
+ const nfs4_stateid *s2
+ ),
+
+ TP_ARGS(s1, s2),
+
+ TP_STRUCT__entry(
+ __field(int, s1_seq)
+ __field(int, s2_seq)
+ __field(u32, s1_hash)
+ __field(u32, s2_hash)
+ __field(int, s1_type)
+ __field(int, s2_type)
+ ),
+
+ TP_fast_assign(
+ __entry->s1_seq = s1->seqid;
+ __entry->s1_hash = nfs_stateid_hash(s1);
+ __entry->s1_type = s1->type;
+ __entry->s2_seq = s2->seqid;
+ __entry->s2_hash = nfs_stateid_hash(s2);
+ __entry->s2_type = s2->type;
+ ),
+
+ TP_printk(
+ "s1=%s:%x:%u s2=%s:%x:%u",
+ show_stateid_type(__entry->s1_type),
+ __entry->s1_hash, __entry->s1_seq,
+ show_stateid_type(__entry->s2_type),
+ __entry->s2_hash, __entry->s2_seq
+ )
+);
+
+#define DEFINE_NFS4_MATCH_STATEID_EVENT(name) \
+ DEFINE_EVENT(nfs4_match_stateid_event, name, \
+ TP_PROTO( \
+ const nfs4_stateid *s1, \
+ const nfs4_stateid *s2 \
+ ), \
+ TP_ARGS(s1, s2))
+DEFINE_NFS4_MATCH_STATEID_EVENT(nfs41_match_stateid);
+DEFINE_NFS4_MATCH_STATEID_EVENT(nfs4_match_stateid);
+
DECLARE_EVENT_CLASS(nfs4_idmap_event,
TP_PROTO(
const char *name,
@@ -2051,13 +2183,15 @@ TRACE_EVENT(fl_getdevinfo,
DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
TP_PROTO(
- const struct nfs_pgio_header *hdr
+ const struct nfs_pgio_header *hdr,
+ int error
),
- TP_ARGS(hdr),
+ TP_ARGS(hdr, error),
TP_STRUCT__entry(
__field(unsigned long, error)
+ __field(unsigned long, nfs_error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
@@ -2073,7 +2207,8 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
TP_fast_assign(
const struct inode *inode = hdr->inode;
- __entry->error = hdr->res.op_status;
+ __entry->error = -error;
+ __entry->nfs_error = hdr->res.op_status;
__entry->fhandle = nfs_fhandle_hash(hdr->args.fh);
__entry->fileid = NFS_FILEID(inode);
__entry->dev = inode->i_sb->s_dev;
@@ -2088,7 +2223,8 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
TP_printk(
"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%llu count=%u stateid=%d:0x%08x dstaddr=%s",
+ "offset=%llu count=%u stateid=%d:0x%08x dstaddr=%s "
+ "nfs_error=%lu (%s)",
-__entry->error,
show_nfs4_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -2096,28 +2232,32 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
__entry->fhandle,
__entry->offset, __entry->count,
__entry->stateid_seq, __entry->stateid_hash,
- __get_str(dstaddr)
+ __get_str(dstaddr), __entry->nfs_error,
+ show_nfs4_status(__entry->nfs_error)
)
);
#define DEFINE_NFS4_FLEXFILES_IO_EVENT(name) \
DEFINE_EVENT(nfs4_flexfiles_io_event, name, \
TP_PROTO( \
- const struct nfs_pgio_header *hdr \
+ const struct nfs_pgio_header *hdr, \
+ int error \
), \
- TP_ARGS(hdr))
+ TP_ARGS(hdr, error))
DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_read_error);
DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_write_error);
TRACE_EVENT(ff_layout_commit_error,
TP_PROTO(
- const struct nfs_commit_data *data
+ const struct nfs_commit_data *data,
+ int error
),
- TP_ARGS(data),
+ TP_ARGS(data, error),
TP_STRUCT__entry(
__field(unsigned long, error)
+ __field(unsigned long, nfs_error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
@@ -2131,7 +2271,8 @@ TRACE_EVENT(ff_layout_commit_error,
TP_fast_assign(
const struct inode *inode = data->inode;
- __entry->error = data->res.op_status;
+ __entry->error = -error;
+ __entry->nfs_error = data->res.op_status;
__entry->fhandle = nfs_fhandle_hash(data->args.fh);
__entry->fileid = NFS_FILEID(inode);
__entry->dev = inode->i_sb->s_dev;
@@ -2142,14 +2283,49 @@ TRACE_EVENT(ff_layout_commit_error,
TP_printk(
"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%llu count=%u dstaddr=%s",
+ "offset=%llu count=%u dstaddr=%s nfs_error=%lu (%s)",
-__entry->error,
show_nfs4_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
__entry->offset, __entry->count,
- __get_str(dstaddr)
+ __get_str(dstaddr), __entry->nfs_error,
+ show_nfs4_status(__entry->nfs_error)
+ )
+);
+
+TRACE_EVENT(bl_ext_tree_prepare_commit,
+ TP_PROTO(
+ int ret,
+ size_t count,
+ u64 lwb,
+ bool not_all_ranges
+ ),
+
+ TP_ARGS(ret, count, lwb, not_all_ranges),
+
+ TP_STRUCT__entry(
+ __field(int, ret)
+ __field(size_t, count)
+ __field(u64, lwb)
+ __field(bool, not_all_ranges)
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ __entry->count = count;
+ __entry->lwb = lwb;
+ __entry->not_all_ranges = not_all_ranges;
+ ),
+
+ TP_printk(
+ "ret=%d, found %zu ranges, lwb=%llu%s",
+ __entry->ret,
+ __entry->count,
+ __entry->lwb,
+ __entry->not_all_ranges ? ", not all ranges encoded" :
+ ""
)
);
@@ -2608,7 +2784,7 @@ TRACE_EVENT(nfs4_copy_notify,
)
);
-TRACE_EVENT(nfs4_offload_cancel,
+DECLARE_EVENT_CLASS(nfs4_offload_class,
TP_PROTO(
const struct nfs42_offload_status_args *args,
int error
@@ -2640,6 +2816,15 @@ TRACE_EVENT(nfs4_offload_cancel,
__entry->stateid_seq, __entry->stateid_hash
)
);
+#define DEFINE_NFS4_OFFLOAD_EVENT(name) \
+ DEFINE_EVENT(nfs4_offload_class, name, \
+ TP_PROTO( \
+ const struct nfs42_offload_status_args *args, \
+ int error \
+ ), \
+ TP_ARGS(args, error))
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_cancel);
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_status);
DECLARE_EVENT_CLASS(nfs4_xattr_event,
TP_PROTO(
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e8ac3f615f93..1d0e6c10f921 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -82,9 +82,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
* we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
*/
#define pagepad_maxsz (1)
-#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2)
-#define lock_owner_id_maxsz (1 + 1 + 4)
-#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+#define open_owner_id_maxsz (2 + 1 + 2 + 2)
+#define lock_owner_id_maxsz (2 + 1 + 2)
#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define op_encode_hdr_maxsz (1)
@@ -185,7 +184,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
2 + encode_share_access_maxsz + 2 + \
- open_owner_id_maxsz + \
+ 1 + open_owner_id_maxsz + \
encode_opentype_maxsz + \
encode_claim_null_maxsz)
#define decode_space_limit_maxsz (3)
@@ -255,13 +254,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
-#define encode_lockowner_maxsz (7)
+#define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz)
+
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 1 + \
encode_lockowner_maxsz)
#define decode_lock_denied_maxsz \
- (8 + decode_lockowner_maxsz)
+ (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz)
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
@@ -617,7 +617,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
encode_lockowner_maxsz)
#define NFS4_dec_release_lockowner_sz \
(compound_decode_hdr_maxsz + \
- decode_lockowner_maxsz)
+ decode_release_lockowner_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -1412,7 +1412,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
__be32 *p;
/*
* opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
- * owner 4 = 32
+ * owner 28
*/
encode_nfs4_seqid(xdr, arg->seqid);
encode_share_access(xdr, arg->share_access);
@@ -1623,6 +1623,7 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_CREATE
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY;
attrs[2] |= FATTR4_WORD2_SECURITY_LABEL;
@@ -4207,6 +4208,24 @@ static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, str
return status;
}
+static int decode_attr_time_create(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec64 *time)
+{
+ int status = 0;
+
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_CREATE - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_TIME_CREATE)) {
+ status = decode_attr_time(xdr, time);
+ if (status == 0)
+ status = NFS_ATTR_FATTR_BTIME;
+ bitmap[1] &= ~FATTR4_WORD1_TIME_CREATE;
+ }
+ dprintk("%s: btime=%lld\n", __func__, time->tv_sec);
+ return status;
+}
+
static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec64 *time)
{
int status = 0;
@@ -4781,6 +4800,11 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
goto xdr_error;
fattr->valid |= status;
+ status = decode_attr_time_create(xdr, bitmap, &fattr->btime);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
status = decode_attr_time_metadata(xdr, bitmap, &fattr->ctime);
if (status < 0)
goto xdr_error;
@@ -4906,7 +4930,7 @@ static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap,
}
/*
- * The prefered block size for layout directed io
+ * The preferred block size for layout directed io
*/
static int decode_attr_layout_blksize(struct xdr_stream *xdr, uint32_t *bitmap,
uint32_t *res)
@@ -5077,7 +5101,7 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
/*
* We create the owner, so we know a proper owner.id length is 4.
*/
-static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
+static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl)
{
uint64_t offset, length, clientid;
__be32 *p;
@@ -6561,7 +6585,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
int status;
if (res->acl_scratch != NULL)
- xdr_set_scratch_page(xdr, res->acl_scratch);
+ xdr_set_scratch_folio(xdr, res->acl_scratch);
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
@@ -7702,6 +7726,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(CLONE, enc_clone, dec_clone),
PROC42(COPY, enc_copy, dec_copy),
PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel),
+ PROC42(OFFLOAD_STATUS, enc_offload_status, dec_offload_status),
PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify),
PROC(LOOKUPP, enc_lookupp, dec_lookupp),
PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
@@ -7710,6 +7735,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(LISTXATTRS, enc_listxattrs, dec_listxattrs),
PROC42(REMOVEXATTR, enc_removexattr, dec_removexattr),
PROC42(READ_PLUS, enc_read_plus, dec_read_plus),
+ PROC42(ZERO_RANGE, enc_zero_range, dec_zero_range),
};
static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 1eab98c277fa..6ce55e8e6b67 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -32,7 +32,8 @@
{ NFS_INO_INVALID_BLOCKS, "INVALID_BLOCKS" }, \
{ NFS_INO_INVALID_XATTR, "INVALID_XATTR" }, \
{ NFS_INO_INVALID_NLINK, "INVALID_NLINK" }, \
- { NFS_INO_INVALID_MODE, "INVALID_MODE" })
+ { NFS_INO_INVALID_MODE, "INVALID_MODE" }, \
+ { NFS_INO_INVALID_BTIME, "INVALID_BTIME" })
#define nfs_show_nfsi_flags(v) \
__print_flags(v, "|", \
@@ -44,6 +45,23 @@
{ BIT(NFS_INO_LAYOUTSTATS), "LAYOUTSTATS" }, \
{ BIT(NFS_INO_ODIRECT), "ODIRECT" })
+#define nfs_show_wb_flags(v) \
+ __print_flags(v, "|", \
+ { BIT(PG_BUSY), "BUSY" }, \
+ { BIT(PG_MAPPED), "MAPPED" }, \
+ { BIT(PG_FOLIO), "FOLIO" }, \
+ { BIT(PG_CLEAN), "CLEAN" }, \
+ { BIT(PG_COMMIT_TO_DS), "COMMIT_TO_DS" }, \
+ { BIT(PG_INODE_REF), "INODE_REF" }, \
+ { BIT(PG_HEADLOCK), "HEADLOCK" }, \
+ { BIT(PG_TEARDOWN), "TEARDOWN" }, \
+ { BIT(PG_UNLOCKPAGE), "UNLOCKPAGE" }, \
+ { BIT(PG_UPTODATE), "UPTODATE" }, \
+ { BIT(PG_WB_END), "WB_END" }, \
+ { BIT(PG_REMOVE), "REMOVE" }, \
+ { BIT(PG_CONTENDED1), "CONTENDED1" }, \
+ { BIT(PG_CONTENDED2), "CONTENDED2" })
+
DECLARE_EVENT_CLASS(nfs_inode_event,
TP_PROTO(
const struct inode *inode
@@ -56,6 +74,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event,
__field(u32, fhandle)
__field(u64, fileid)
__field(u64, version)
+ __field(unsigned long, cache_validity)
),
TP_fast_assign(
@@ -64,14 +83,17 @@ DECLARE_EVENT_CLASS(nfs_inode_event,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode);
+ __entry->cache_validity = nfsi->cache_validity;
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu ",
+ "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu cache_validity=0x%lx (%s)",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- (unsigned long long)__entry->version
+ (unsigned long long)__entry->version,
+ __entry->cache_validity,
+ nfs_show_cache_validity(__entry->cache_validity)
)
);
@@ -267,6 +289,7 @@ DECLARE_EVENT_CLASS(nfs_update_size_class,
TP_ARGS(inode, new_size))
DEFINE_NFS_UPDATE_SIZE_EVENT(truncate);
+DEFINE_NFS_UPDATE_SIZE_EVENT(truncate_folio);
DEFINE_NFS_UPDATE_SIZE_EVENT(wcc);
DEFINE_NFS_UPDATE_SIZE_EVENT(update);
DEFINE_NFS_UPDATE_SIZE_EVENT(grow);
@@ -961,7 +984,7 @@ DECLARE_EVENT_CLASS(nfs_folio_event,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode);
- __entry->offset = offset,
+ __entry->offset = offset;
__entry->count = count;
),
@@ -1011,8 +1034,8 @@ DECLARE_EVENT_CLASS(nfs_folio_event_done,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode);
- __entry->offset = offset,
- __entry->count = count,
+ __entry->offset = offset;
+ __entry->count = count;
__entry->ret = ret;
),
@@ -1045,6 +1068,73 @@ DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_done);
DEFINE_NFS_FOLIO_EVENT(nfs_invalidate_folio);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_launder_folio_done);
+DEFINE_NFS_FOLIO_EVENT(nfs_try_to_update_request);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_try_to_update_request_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_update_folio);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_update_folio_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_write_begin);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_write_begin_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_write_end);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_write_end_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_writepages);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writepages_done);
+
+DECLARE_EVENT_CLASS(nfs_kiocb_event,
+ TP_PROTO(
+ const struct kiocb *iocb,
+ const struct iov_iter *iter
+ ),
+
+ TP_ARGS(iocb, iter),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(u64, version)
+ __field(loff_t, offset)
+ __field(size_t, count)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = file_inode(iocb->ki_filp);
+ const struct nfs_inode *nfsi = NFS_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->version = inode_peek_iversion_raw(inode);
+ __entry->offset = iocb->ki_pos;
+ __entry->count = iov_iter_count(iter);
+ __entry->flags = iocb->ki_flags;
+ ),
+
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu offset=%lld count=%zu ki_flags=%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->version,
+ __entry->offset, __entry->count,
+ __print_flags(__entry->flags, "|", TRACE_IOCB_STRINGS)
+ )
+);
+
+#define DEFINE_NFS_KIOCB_EVENT(name) \
+ DEFINE_EVENT(nfs_kiocb_event, name, \
+ TP_PROTO( \
+ const struct kiocb *iocb, \
+ const struct iov_iter *iter \
+ ), \
+ TP_ARGS(iocb, iter))
+
+DEFINE_NFS_KIOCB_EVENT(nfs_file_read);
+DEFINE_NFS_KIOCB_EVENT(nfs_file_write);
+
TRACE_EVENT(nfs_aop_readahead,
TP_PROTO(
const struct inode *inode,
@@ -1392,6 +1482,55 @@ TRACE_EVENT(nfs_writeback_done,
)
);
+DECLARE_EVENT_CLASS(nfs_page_class,
+ TP_PROTO(
+ const struct nfs_page *req
+ ),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(const struct nfs_page *__private, req)
+ __field(loff_t, offset)
+ __field(unsigned int, count)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = folio_inode(req->wb_folio);
+ const struct nfs_inode *nfsi = NFS_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->req = req;
+ __entry->offset = req_offset(req);
+ __entry->count = req->wb_bytes;
+ __entry->flags = req->wb_flags;
+ ),
+
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x req=%p offset=%lld count=%u flags=%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid, __entry->fhandle,
+ __entry->req, __entry->offset, __entry->count,
+ nfs_show_wb_flags(__entry->flags)
+ )
+);
+
+#define DEFINE_NFS_PAGE_EVENT(name) \
+ DEFINE_EVENT(nfs_page_class, name, \
+ TP_PROTO( \
+ const struct nfs_page *req \
+ ), \
+ TP_ARGS(req))
+
+DEFINE_NFS_PAGE_EVENT(nfs_writepage_setup);
+DEFINE_NFS_PAGE_EVENT(nfs_do_writepage);
+
DECLARE_EVENT_CLASS(nfs_page_error_class,
TP_PROTO(
const struct inode *inode,
@@ -1593,6 +1732,76 @@ DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_completion);
DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_schedule_iovec);
DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_reschedule_io);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+DECLARE_EVENT_CLASS(nfs_local_dio_class,
+ TP_PROTO(
+ const struct inode *inode,
+ loff_t offset,
+ ssize_t count,
+ const struct nfs_local_dio *local_dio
+ ),
+ TP_ARGS(inode, offset, count, local_dio),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, fileid)
+ __field(u32, fhandle)
+ __field(loff_t, offset)
+ __field(ssize_t, count)
+ __field(u32, mem_align)
+ __field(u32, offset_align)
+ __field(loff_t, start)
+ __field(ssize_t, start_len)
+ __field(loff_t, middle)
+ __field(ssize_t, middle_len)
+ __field(loff_t, end)
+ __field(ssize_t, end_len)
+ ),
+ TP_fast_assign(
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = &nfsi->fh;
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ __entry->offset = offset;
+ __entry->count = count;
+ __entry->mem_align = local_dio->mem_align;
+ __entry->offset_align = local_dio->offset_align;
+ __entry->start = offset;
+ __entry->start_len = local_dio->start_len;
+ __entry->middle = local_dio->middle_offset;
+ __entry->middle_len = local_dio->middle_len;
+ __entry->end = local_dio->end_offset;
+ __entry->end_len = local_dio->end_len;
+ ),
+ TP_printk("fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%zd "
+ "mem_align=%u offset_align=%u "
+ "start=%llu+%zd middle=%llu+%zd end=%llu+%zd",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->offset, __entry->count,
+ __entry->mem_align, __entry->offset_align,
+ __entry->start, __entry->start_len,
+ __entry->middle, __entry->middle_len,
+ __entry->end, __entry->end_len)
+)
+
+#define DEFINE_NFS_LOCAL_DIO_EVENT(name) \
+DEFINE_EVENT(nfs_local_dio_class, nfs_local_dio_##name, \
+ TP_PROTO(const struct inode *inode, \
+ loff_t offset, \
+ ssize_t count, \
+ const struct nfs_local_dio *local_dio),\
+ TP_ARGS(inode, offset, count, local_dio))
+
+DEFINE_NFS_LOCAL_DIO_EVENT(read);
+DEFINE_NFS_LOCAL_DIO_EVENT(write);
+DEFINE_NFS_LOCAL_DIO_EVENT(misaligned);
+
+#endif /* CONFIG_NFS_LOCALIO */
+
TRACE_EVENT(nfs_fh_to_dentry,
TP_PROTO(
const struct super_block *sb,
@@ -1707,45 +1916,13 @@ TRACE_EVENT(nfs_local_open_fh,
),
TP_printk(
- "error=%d fhandle=0x%08x mode=%s",
- __entry->error,
+ "fhandle=0x%08x mode=%s result=%d",
__entry->fhandle,
- show_fs_fmode_flags(__entry->fmode)
- )
-);
-
-DECLARE_EVENT_CLASS(nfs_local_client_event,
- TP_PROTO(
- const struct nfs_client *clp
- ),
-
- TP_ARGS(clp),
-
- TP_STRUCT__entry(
- __field(unsigned int, protocol)
- __string(server, clp->cl_hostname)
- ),
-
- TP_fast_assign(
- __entry->protocol = clp->rpc_ops->version;
- __assign_str(server);
- ),
-
- TP_printk(
- "server=%s NFSv%u", __get_str(server), __entry->protocol
+ show_fs_fmode_flags(__entry->fmode),
+ __entry->error
)
);
-#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \
- DEFINE_EVENT(nfs_local_client_event, name, \
- TP_PROTO( \
- const struct nfs_client *clp \
- ), \
- TP_ARGS(clp))
-
-DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_enable);
-DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_disable);
-
DECLARE_EVENT_CLASS(nfs_xdr_event,
TP_PROTO(
const struct xdr_stream *xdr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e27c07bd8929..6e69ce43a13f 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -253,13 +253,14 @@ nfs_page_group_unlock(struct nfs_page *req)
nfs_page_clear_headlock(req);
}
-/*
- * nfs_page_group_sync_on_bit_locked
+/**
+ * nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
+ * @req: request in page group
+ * @bit: PG_* bit that is used to sync page group
*
* must be called with page group lock held
*/
-static bool
-nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
+bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
{
struct nfs_page *head = req->wb_head;
struct nfs_page *tmp;
@@ -961,8 +962,9 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client;
struct nfsd_file *localio =
- nfs_local_open_fh(clp, hdr->cred,
- hdr->args.fh, hdr->args.context->mode);
+ nfs_local_open_fh(clp, hdr->cred, hdr->args.fh,
+ &hdr->args.context->nfl,
+ hdr->args.context->mode);
if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0d16b383a452..f157d43d1312 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -306,7 +306,6 @@ void
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
{
struct inode *inode;
- unsigned long i_state;
if (!lo)
return;
@@ -317,12 +316,11 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
if (!list_empty(&lo->plh_segs))
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
- i_state = inode->i_state;
+ /* Notify pnfs_destroy_layout_final() that we're done */
+ if (inode_state_read(inode) & (I_FREEING | I_CLEAR))
+ wake_up_var_locked(lo, &inode->i_lock);
spin_unlock(&inode->i_lock);
pnfs_free_layout_hdr(lo);
- /* Notify pnfs_destroy_layout_final() that we're done */
- if (i_state & (I_FREEING | I_CLEAR))
- wake_up_var(lo);
}
}
@@ -745,6 +743,14 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
return remaining;
}
+static void pnfs_reset_return_info(struct pnfs_layout_hdr *lo)
+{
+ struct pnfs_layout_segment *lseg;
+
+ list_for_each_entry(lseg, &lo->plh_return_segs, pls_list)
+ pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
+}
+
static void
pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
struct list_head *free_me,
@@ -801,23 +807,17 @@ void pnfs_destroy_layout(struct nfs_inode *nfsi)
}
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
-static bool pnfs_layout_removed(struct nfs_inode *nfsi,
- struct pnfs_layout_hdr *lo)
-{
- bool ret;
-
- spin_lock(&nfsi->vfs_inode.i_lock);
- ret = nfsi->layout != lo;
- spin_unlock(&nfsi->vfs_inode.i_lock);
- return ret;
-}
-
void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
{
struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
+ struct inode *inode = &nfsi->vfs_inode;
- if (lo)
- wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
+ if (lo) {
+ spin_lock(&inode->i_lock);
+ wait_var_event_spinlock(lo, nfsi->layout != lo,
+ &inode->i_lock);
+ spin_unlock(&inode->i_lock);
+ }
}
static bool
@@ -1246,21 +1246,15 @@ static void pnfs_clear_layoutcommit(struct inode *inode,
static void
pnfs_layoutreturn_retry_later_locked(struct pnfs_layout_hdr *lo,
const nfs4_stateid *arg_stateid,
- const struct pnfs_layout_range *range)
+ const struct pnfs_layout_range *range,
+ struct list_head *freeme)
{
- const struct pnfs_layout_segment *lseg;
- u32 seq = be32_to_cpu(arg_stateid->seqid);
-
if (pnfs_layout_is_valid(lo) &&
- nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid)) {
- list_for_each_entry(lseg, &lo->plh_return_segs, pls_list) {
- if (pnfs_seqid_is_newer(lseg->pls_seq, seq) ||
- !pnfs_should_free_range(&lseg->pls_range, range))
- continue;
- pnfs_set_plh_return_info(lo, range->iomode, seq);
- break;
- }
- }
+ nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
+ pnfs_reset_return_info(lo);
+ else
+ pnfs_mark_layout_stateid_invalid(lo, freeme);
+ pnfs_clear_layoutreturn_waitbit(lo);
}
void pnfs_layoutreturn_retry_later(struct pnfs_layout_hdr *lo,
@@ -1268,11 +1262,12 @@ void pnfs_layoutreturn_retry_later(struct pnfs_layout_hdr *lo,
const struct pnfs_layout_range *range)
{
struct inode *inode = lo->plh_inode;
+ LIST_HEAD(freeme);
spin_lock(&inode->i_lock);
- pnfs_layoutreturn_retry_later_locked(lo, arg_stateid, range);
- pnfs_clear_layoutreturn_waitbit(lo);
+ pnfs_layoutreturn_retry_later_locked(lo, arg_stateid, range, &freeme);
spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&freeme);
}
void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
@@ -1292,6 +1287,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
pnfs_free_returned_lsegs(lo, &freeme, range, seq);
pnfs_set_layout_stateid(lo, stateid, NULL, true);
+ pnfs_reset_return_info(lo);
} else
pnfs_mark_layout_stateid_invalid(lo, &freeme);
out_unlock:
@@ -1308,7 +1304,7 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
enum pnfs_iomode *iomode)
{
/* Serialise LAYOUTGET/LAYOUTRETURN */
- if (atomic_read(&lo->plh_outstanding) != 0)
+ if (atomic_read(&lo->plh_outstanding) != 0 && lo->plh_return_seq == 0)
return false;
if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
return false;
@@ -1661,6 +1657,18 @@ int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
/* Was there an RPC level error? If not, retry */
if (task->tk_rpc_status == 0)
break;
+ /*
+ * Is there a fatal network level error?
+ * If so release the layout, but flag the error.
+ */
+ if ((task->tk_rpc_status == -ENETDOWN ||
+ task->tk_rpc_status == -ENETUNREACH) &&
+ task->tk_flags & RPC_TASK_NETUNREACH_FATAL) {
+ *ret = 0;
+ (*respp)->lrs_present = 0;
+ retval = -EIO;
+ break;
+ }
/* If the call was not sent, let caller handle it */
if (!RPC_WAS_SENT(task))
return 0;
@@ -1695,6 +1703,7 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct inode *inode = args->inode;
const nfs4_stateid *res_stateid = NULL;
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
+ LIST_HEAD(freeme);
switch (ret) {
case -NFS4ERR_BADSESSION:
@@ -1703,9 +1712,9 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
case -NFS4ERR_NOMATCHING_LAYOUT:
spin_lock(&inode->i_lock);
pnfs_layoutreturn_retry_later_locked(lo, &args->stateid,
- &args->range);
- pnfs_clear_layoutreturn_waitbit(lo);
+ &args->range, &freeme);
spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&freeme);
break;
case 0:
if (res->lrs_present)
@@ -2042,8 +2051,10 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
{
if (atomic_dec_and_test(&lo->plh_outstanding) &&
- test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags))
+ test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) {
+ smp_mb__after_atomic();
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
+ }
}
static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
@@ -3321,6 +3332,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
struct nfs_inode *nfsi = NFS_I(inode);
loff_t end_pos;
int status;
+ bool mark_as_dirty = false;
if (!pnfs_layoutcommit_outstanding(inode))
return 0;
@@ -3372,19 +3384,23 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
if (ld->prepare_layoutcommit) {
status = ld->prepare_layoutcommit(&data->args);
if (status) {
- put_cred(data->cred);
+ if (status != -ENOSPC)
+ put_cred(data->cred);
spin_lock(&inode->i_lock);
set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
- goto out_unlock;
+ if (status != -ENOSPC)
+ goto out_unlock;
+ spin_unlock(&inode->i_lock);
+ mark_as_dirty = true;
}
}
status = nfs4_proc_layoutcommit(data, sync);
out:
- if (status)
+ if (status || mark_as_dirty)
mark_inode_dirty_sync(inode);
dprintk("<-- %s status %d\n", __func__, status);
return status;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 30d2613e912b..91ff877185c8 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -60,6 +60,7 @@ struct nfs4_pnfs_ds {
struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
char *ds_remotestr; /* comma sep list of addrs */
struct list_head ds_addrs;
+ const struct net *ds_net;
struct nfs_client *ds_clp;
refcount_t ds_count;
unsigned long ds_state;
@@ -415,7 +416,8 @@ int pnfs_generic_commit_pagelist(struct inode *inode,
int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max);
void pnfs_generic_write_commit_done(struct rpc_task *task, void *data);
void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
-struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
+struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(const struct net *net,
+ struct list_head *dsaddrs,
gfp_t gfp_flags);
void nfs4_pnfs_v3_ds_connect_unload(void);
int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index dbef837e871a..9976cc16b689 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -16,6 +16,8 @@
#include "nfs4session.h"
#include "internal.h"
#include "pnfs.h"
+#include "netns.h"
+#include "nfs4trace.h"
#define NFSDBG_FACILITY NFSDBG_PNFS
@@ -504,14 +506,14 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
/*
* Data server cache
*
- * Data servers can be mapped to different device ids.
- * nfs4_pnfs_ds reference counting
+ * Data servers can be mapped to different device ids, but should
+ * never be shared between net namespaces.
+ *
+ * nfs4_pnfs_ds reference counting:
* - set to 1 on allocation
* - incremented when a device id maps a data server already in the cache.
* - decremented when deviceid is removed from the cache.
*/
-static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
-static LIST_HEAD(nfs4_data_server_cache);
/* Debug routines */
static void
@@ -604,11 +606,11 @@ _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
* Lookup DS by addresses. nfs4_ds_cache_lock is held
*/
static struct nfs4_pnfs_ds *
-_data_server_lookup_locked(const struct list_head *dsaddrs)
+_data_server_lookup_locked(const struct nfs_net *nn, const struct list_head *dsaddrs)
{
struct nfs4_pnfs_ds *ds;
- list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+ list_for_each_entry(ds, &nn->nfs4_data_server_cache, ds_node)
if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
return ds;
return NULL;
@@ -653,10 +655,11 @@ static void destroy_ds(struct nfs4_pnfs_ds *ds)
void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
{
- if (refcount_dec_and_lock(&ds->ds_count,
- &nfs4_ds_cache_lock)) {
+ struct nfs_net *nn = net_generic(ds->ds_net, nfs_net_id);
+
+ if (refcount_dec_and_lock(&ds->ds_count, &nn->nfs4_data_server_lock)) {
list_del_init(&ds->ds_node);
- spin_unlock(&nfs4_ds_cache_lock);
+ spin_unlock(&nn->nfs4_data_server_lock);
destroy_ds(ds);
}
}
@@ -716,8 +719,9 @@ out_err:
* uncached and return cached struct nfs4_pnfs_ds.
*/
struct nfs4_pnfs_ds *
-nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
+nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags)
{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
char *remotestr;
@@ -733,16 +737,17 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
/* this is only used for debugging, so it's ok if its NULL */
remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
- spin_lock(&nfs4_ds_cache_lock);
- tmp_ds = _data_server_lookup_locked(dsaddrs);
+ spin_lock(&nn->nfs4_data_server_lock);
+ tmp_ds = _data_server_lookup_locked(nn, dsaddrs);
if (tmp_ds == NULL) {
INIT_LIST_HEAD(&ds->ds_addrs);
list_splice_init(dsaddrs, &ds->ds_addrs);
ds->ds_remotestr = remotestr;
refcount_set(&ds->ds_count, 1);
INIT_LIST_HEAD(&ds->ds_node);
+ ds->ds_net = net;
ds->ds_clp = NULL;
- list_add(&ds->ds_node, &nfs4_data_server_cache);
+ list_add(&ds->ds_node, &nn->nfs4_data_server_cache);
dprintk("%s add new data server %s\n", __func__,
ds->ds_remotestr);
} else {
@@ -754,7 +759,7 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
refcount_read(&tmp_ds->ds_count));
ds = tmp_ds;
}
- spin_unlock(&nfs4_ds_cache_lock);
+ spin_unlock(&nn->nfs4_data_server_lock);
out:
return ds;
}
@@ -804,8 +809,11 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
unsigned int retrans)
{
struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs_client *mds_clp = mds_srv->nfs_client;
+ enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
struct nfs4_pnfs_ds_addr *da;
unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10;
+ int ds_proto;
int status = 0;
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
@@ -826,21 +834,31 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
.servername = clp->cl_hostname,
.connect_timeout = connect_timeout,
.reconnect_timeout = connect_timeout,
+ .xprtsec = clp->cl_xprtsec,
};
- if (da->da_transport != clp->cl_proto)
+ if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
+ clp->cl_proto == XPRT_TRANSPORT_TCP_TLS)
+ xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
+
+ if (xprt_args.ident != clp->cl_proto)
continue;
- if (da->da_addr.ss_family != clp->cl_addr.ss_family)
+ if (xprt_args.dstaddr->sa_family !=
+ clp->cl_addr.ss_family)
continue;
/* Add this address as an alias */
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
- rpc_clnt_test_and_add_xprt, NULL);
+ rpc_clnt_test_and_add_xprt, NULL);
continue;
}
- clp = get_v3_ds_connect(mds_srv,
- &da->da_addr,
- da->da_addrlen, da->da_transport,
- timeo, retrans);
+
+ ds_proto = da->da_transport;
+ if (ds_proto == XPRT_TRANSPORT_TCP &&
+ xprtsec_policy != RPC_XPRTSEC_NONE)
+ ds_proto = XPRT_TRANSPORT_TCP_TLS;
+
+ clp = get_v3_ds_connect(mds_srv, &da->da_addr, da->da_addrlen,
+ ds_proto, timeo, retrans);
if (IS_ERR(clp))
continue;
clp->cl_rpcclient->cl_softerr = 0;
@@ -866,7 +884,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
u32 minor_version)
{
struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs_client *mds_clp = mds_srv->nfs_client;
+ enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
struct nfs4_pnfs_ds_addr *da;
+ int ds_proto;
int status = 0;
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
@@ -894,12 +915,8 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
.data = &xprtdata,
};
- if (da->da_transport != clp->cl_proto &&
- clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
- continue;
- if (da->da_transport == XPRT_TRANSPORT_TCP &&
- mds_srv->nfs_client->cl_proto ==
- XPRT_TRANSPORT_TCP_TLS) {
+ if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
+ clp->cl_proto == XPRT_TRANSPORT_TCP_TLS) {
struct sockaddr *addr =
(struct sockaddr *)&da->da_addr;
struct sockaddr_in *sin =
@@ -930,7 +947,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
xprt_args.servername = servername;
}
- if (da->da_addr.ss_family != clp->cl_addr.ss_family)
+ if (xprt_args.ident != clp->cl_proto)
+ continue;
+ if (xprt_args.dstaddr->sa_family !=
+ clp->cl_addr.ss_family)
continue;
/**
@@ -944,15 +964,14 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
if (xprtdata.cred)
put_cred(xprtdata.cred);
} else {
- if (da->da_transport == XPRT_TRANSPORT_TCP &&
- mds_srv->nfs_client->cl_proto ==
- XPRT_TRANSPORT_TCP_TLS)
- da->da_transport = XPRT_TRANSPORT_TCP_TLS;
- clp = nfs4_set_ds_client(mds_srv,
- &da->da_addr,
- da->da_addrlen,
- da->da_transport, timeo,
- retrans, minor_version);
+ ds_proto = da->da_transport;
+ if (ds_proto == XPRT_TRANSPORT_TCP &&
+ xprtsec_policy != RPC_XPRTSEC_NONE)
+ ds_proto = XPRT_TRANSPORT_TCP_TLS;
+
+ clp = nfs4_set_ds_client(mds_srv, &da->da_addr,
+ da->da_addrlen, ds_proto,
+ timeo, retrans, minor_version);
if (IS_ERR(clp))
continue;
@@ -963,7 +982,6 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
clp = ERR_PTR(-EIO);
continue;
}
-
}
}
@@ -994,8 +1012,10 @@ int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
err = nfs4_wait_ds_connect(ds);
if (err || ds->ds_clp)
goto out;
- if (nfs4_test_deviceid_unavailable(devid))
- return -ENODEV;
+ if (nfs4_test_deviceid_unavailable(devid)) {
+ err = -ENODEV;
+ goto out;
+ }
} while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0);
if (ds->ds_clp)
@@ -1025,11 +1045,12 @@ out:
if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
WARN_ON_ONCE(ds->ds_clp ||
!nfs4_test_deviceid_unavailable(devid));
- return -EINVAL;
- }
- err = nfs_client_init_status(ds->ds_clp);
+ err = -EINVAL;
+ } else
+ err = nfs_client_init_status(ds->ds_clp);
}
+ trace_pnfs_ds_connect(ds->ds_remotestr, err);
return err;
}
EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 6c09cd090c34..63e71310b9f6 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -153,13 +153,13 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct inode *dir, struct dentry *dentry,
+nfs_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_diropargs arg = {
.fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len
+ .name = name->name,
+ .len = name->len
};
struct nfs_diropok res = {
.fh = fhandle,
@@ -446,13 +446,14 @@ out:
return status;
}
-static int
+static struct dentry *
nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_MKDIR],
};
+ struct dentry *alias = NULL;
int status = -ENOMEM;
dprintk("NFS call mkdir %pd\n", dentry);
@@ -464,12 +465,15 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
- if (status == 0)
- status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ if (status == 0) {
+ alias = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr);
+ status = PTR_ERR_OR_ZERO(alias);
+ } else
+ alias = ERR_PTR(status);
nfs_free_createdata(data);
out:
dprintk("NFS reply mkdir: %d\n", status);
- return status;
+ return alias;
}
static int
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 81bd1b9aba17..3c1fa320b3f1 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -56,7 +56,8 @@ static int nfs_return_empty_folio(struct folio *folio)
{
folio_zero_segment(folio, 0, folio_size(folio));
folio_mark_uptodate(folio);
- folio_unlock(folio);
+ if (nfs_netfs_folio_unlock(folio))
+ folio_unlock(folio);
return 0;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ae5c5e39afa0..72dee6f3050e 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -73,6 +73,7 @@
#include "nfs.h"
#include "netns.h"
#include "sysfs.h"
+#include "nfs4idmap.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -453,8 +454,12 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
{ NFS_MOUNT_NONLM, ",nolock", "" },
{ NFS_MOUNT_NOACL, ",noacl", "" },
{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
+ { NFS_MOUNT_FORCE_RDIRPLUS, ",rdirplus=force", "" },
{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
+ { NFS_MOUNT_NETUNREACH_FATAL,
+ ",fatal_neterrors=ENETDOWN:ENETUNREACH",
+ ",fatal_neterrors=none" },
{ 0, NULL, NULL }
};
const struct proc_nfs_info *nfs_infop;
@@ -1047,6 +1052,16 @@ int nfs_reconfigure(struct fs_context *fc)
sync_filesystem(sb);
/*
+ * The SB_RDONLY flag has been removed from the superblock during
+ * mounts to prevent interference between different filesystems.
+ * Similarly, it is also necessary to ignore the SB_RDONLY flag
+ * during reconfiguration; otherwise, it may also result in the
+ * creation of redundant superblocks when mounting a directory with
+ * different rw and ro flags multiple times.
+ */
+ fc->sb_flags_mask &= ~SB_RDONLY;
+
+ /*
* Userspace mount programs that send binary options generally send
* them populated with default values. We have no way to know which
* ones were explicitly specified. Fall back to legacy behavior and
@@ -1168,7 +1183,7 @@ static int nfs_set_super(struct super_block *s, struct fs_context *fc)
struct nfs_server *server = fc->s_fs_info;
int ret;
- s->s_d_op = server->nfs_client->rpc_ops->dentry_ops;
+ set_default_d_op(s, server->nfs_client->rpc_ops->dentry_ops);
ret = set_anon_super(s, server);
if (ret == 0)
server->s_dev = s->s_dev;
@@ -1303,8 +1318,17 @@ int nfs_get_tree_common(struct fs_context *fc)
if (IS_ERR(server))
return PTR_ERR(server);
+ /*
+ * When NFS_MOUNT_UNSHARED is not set, NFS forces the sharing of a
+ * superblock among each filesystem that mounts sub-directories
+ * belonging to a single exported root path.
+ * To prevent interference between different filesystems, the
+ * SB_RDONLY flag should be removed from the superblock.
+ */
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
+ else
+ fc->sb_flags &= ~SB_RDONLY;
/* -o noac implies -o sync */
if (server->flags & NFS_MOUNT_NOAC)
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 1c62a5a9f51d..58146e935402 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -40,31 +40,31 @@ static const char *nfs_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct page *page;
+ struct folio *folio;
void *err;
if (!dentry) {
err = ERR_PTR(nfs_revalidate_mapping_rcu(inode));
if (err)
return err;
- page = find_get_page(inode->i_mapping, 0);
- if (!page)
+ folio = filemap_get_folio(inode->i_mapping, 0);
+ if (IS_ERR(folio))
return ERR_PTR(-ECHILD);
- if (!PageUptodate(page)) {
- put_page(page);
+ if (!folio_test_uptodate(folio)) {
+ folio_put(folio);
return ERR_PTR(-ECHILD);
}
} else {
err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
if (err)
return err;
- page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler,
+ folio = read_cache_folio(&inode->i_data, 0, nfs_symlink_filler,
NULL);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
}
- set_delayed_call(done, page_put_link, page);
- return page_address(page);
+ set_delayed_call(done, page_put_link, folio);
+ return folio_address(folio);
}
/*
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index e645be1a3381..f579df0e8d67 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -14,7 +14,7 @@
static struct ctl_table_header *nfs_callback_sysctl_table;
-static struct ctl_table nfs_cb_sysctls[] = {
+static const struct ctl_table nfs_cb_sysctls[] = {
{
.procname = "nfs_mountpoint_timeout",
.data = &nfs_mountpoint_expiry_timeout,
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index bf378ecd5d9f..ea6e6168092b 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -14,6 +14,7 @@
#include <linux/rcupdate.h>
#include <linux/lockd/lockd.h>
+#include "internal.h"
#include "nfs4_fs.h"
#include "netns.h"
#include "sysfs.h"
@@ -188,6 +189,7 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
return p;
kobject_put(&p->kobject);
+ kobject_put(&p->nfs_net_kobj);
}
return NULL;
}
@@ -228,6 +230,25 @@ static void shutdown_client(struct rpc_clnt *clnt)
rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL);
}
+/*
+ * Shut down the nfs_client only once all the superblocks
+ * have been shut down.
+ */
+static void shutdown_nfs_client(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ if (!(server->flags & NFS_MOUNT_SHUTDOWN)) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ nfs_mark_client_ready(clp, -EIO);
+ shutdown_client(clp->cl_rpcclient);
+}
+
static ssize_t
shutdown_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
@@ -259,7 +280,6 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
server->flags |= NFS_MOUNT_SHUTDOWN;
shutdown_client(server->client);
- shutdown_client(server->nfs_client->cl_rpcclient);
if (!IS_ERR(server->client_acl))
shutdown_client(server->client_acl);
@@ -267,11 +287,44 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
if (server->nlm_host)
shutdown_client(server->nlm_host->h_rpcclnt);
out:
+ shutdown_nfs_client(server->nfs_client);
return count;
}
static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown);
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static ssize_t
+implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->domain) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->domain);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_domain = __ATTR_RO(implid_domain);
+
+
+static ssize_t
+implid_name_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->name) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->name);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name);
+
+#endif /* IS_ENABLED(CONFIG_NFS_V4_1) */
+
#define RPC_CLIENT_NAME_SIZE 64
void nfs_sysfs_link_rpc_client(struct nfs_server *server,
@@ -280,9 +333,9 @@ void nfs_sysfs_link_rpc_client(struct nfs_server *server,
char name[RPC_CLIENT_NAME_SIZE];
int ret;
- strcpy(name, clnt->cl_program->name);
- strcat(name, uniq ? uniq : "");
- strcat(name, "_client");
+ strscpy(name, clnt->cl_program->name, sizeof(name));
+ strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1);
+ strncat(name, "_client", sizeof(name) - strlen(name) - 1);
ret = sysfs_create_link_nowarn(&server->kobj,
&clnt->cl_sysfs->kobject, name);
@@ -309,6 +362,59 @@ static struct kobj_type nfs_sb_ktype = {
.child_ns_type = nfs_netns_object_child_ns_type,
};
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+ int ret;
+
+ if (!server->nfs_client->cl_implid)
+ return;
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_domain.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_name.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+}
+#else /* CONFIG_NFS_V4_1 */
+static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+static ssize_t
+localio_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ bool localio = nfs_server_is_local(server->nfs_client);
+ return sysfs_emit(buf, "%d\n", localio);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_localio = __ATTR_RO(localio);
+
+static void nfs_sysfs_add_nfs_localio_server(struct nfs_server *server)
+{
+ int ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_localio.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+}
+#else
+static inline void nfs_sysfs_add_nfs_localio_server(struct nfs_server *server)
+{
+}
+#endif /* IS_ENABLED(CONFIG_NFS_LOCALIO) */
+
void nfs_sysfs_add_server(struct nfs_server *server)
{
int ret;
@@ -325,6 +431,9 @@ void nfs_sysfs_add_server(struct nfs_server *server)
if (ret < 0)
pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
server->s_sysfs_id, ret);
+
+ nfs_sysfs_add_nfsv41_server(server);
+ nfs_sysfs_add_nfs_localio_server(server);
}
EXPORT_SYMBOL_GPL(nfs_sysfs_add_server);
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index bf77399696a7..b55467911648 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -464,18 +464,17 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
sdentry = NULL;
do {
- int slen;
dput(sdentry);
sillycounter++;
- slen = scnprintf(silly, sizeof(silly),
- SILLYNAME_PREFIX "%0*llx%0*x",
- SILLYNAME_FILEID_LEN, fileid,
- SILLYNAME_COUNTER_LEN, sillycounter);
+ scnprintf(silly, sizeof(silly),
+ SILLYNAME_PREFIX "%0*llx%0*x",
+ SILLYNAME_FILEID_LEN, fileid,
+ SILLYNAME_COUNTER_LEN, sillycounter);
dfprintk(VFS, "NFS: trying to rename %pd to %s\n",
dentry, silly);
- sdentry = lookup_one_len(silly, dentry->d_parent, slen);
+ sdentry = lookup_noperm(&QSTR(silly), dentry->d_parent);
/*
* N.B. Better to return EBUSY here ... it could be
* dangerous to delete the file while it's in use.
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 50fa539611f5..336c510f3750 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -153,20 +153,10 @@ nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
}
}
-static int
-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
{
- int ret;
-
- if (!test_bit(PG_REMOVE, &req->wb_flags))
- return 0;
- ret = nfs_page_group_lock(req);
- if (ret)
- return ret;
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
nfs_page_set_inode_ref(req, inode);
- nfs_page_group_unlock(req);
- return 0;
}
/**
@@ -247,59 +237,17 @@ static void nfs_mapping_set_error(struct folio *folio, int error)
}
/*
- * nfs_page_group_search_locked
- * @head - head request of page group
- * @page_offset - offset into page
- *
- * Search page group with head @head to find a request that contains the
- * page offset @page_offset.
- *
- * Returns a pointer to the first matching nfs request, or NULL if no
- * match is found.
- *
- * Must be called with the page group lock held
- */
-static struct nfs_page *
-nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
-{
- struct nfs_page *req;
-
- req = head;
- do {
- if (page_offset >= req->wb_pgbase &&
- page_offset < (req->wb_pgbase + req->wb_bytes))
- return req;
-
- req = req->wb_this_page;
- } while (req != head);
-
- return NULL;
-}
-
-/*
- * nfs_page_group_covers_page
- * @head - head request of page group
+ * nfs_page_covers_folio
+ * @req: struct nfs_page
*
- * Return true if the page group with head @head covers the whole page,
- * returns false otherwise
+ * Return true if the request covers the whole folio.
+ * Note that the caller should ensure all subrequests have been joined
*/
static bool nfs_page_group_covers_page(struct nfs_page *req)
{
unsigned int len = nfs_folio_length(nfs_page_to_folio(req));
- struct nfs_page *tmp;
- unsigned int pos = 0;
-
- nfs_page_group_lock(req);
- for (;;) {
- tmp = nfs_page_group_search_locked(req->wb_head, pos);
- if (!tmp)
- break;
- pos = tmp->wb_pgbase + tmp->wb_bytes;
- }
-
- nfs_page_group_unlock(req);
- return pos >= len;
+ return req->wb_pgbase == 0 && req->wb_bytes == len;
}
/* We can set the PG_uptodate flag if we see that a write request
@@ -348,7 +296,7 @@ static void nfs_folio_end_writeback(struct folio *folio)
{
struct nfs_server *nfss = NFS_SERVER(folio->mapping->host);
- folio_end_writeback(folio);
+ folio_end_writeback_no_dropbehind(folio);
if (atomic_long_dec_return(&nfss->writeback) <
NFS_CONGESTION_OFF_THRESH) {
nfss->write_congested = 0;
@@ -579,23 +527,24 @@ retry:
while (!nfs_lock_request(head)) {
ret = nfs_wait_on_request(head);
- if (ret < 0)
+ if (ret < 0) {
+ nfs_release_request(head);
return ERR_PTR(ret);
+ }
}
+ ret = nfs_page_group_lock(head);
+ if (ret < 0)
+ goto out_unlock;
+
/* Ensure that nobody removed the request before we locked it */
if (head != folio->private) {
+ nfs_page_group_unlock(head);
nfs_unlock_and_release_request(head);
goto retry;
}
- ret = nfs_cancel_remove_inode(head, inode);
- if (ret < 0)
- goto out_unlock;
-
- ret = nfs_page_group_lock(head);
- if (ret < 0)
- goto out_unlock;
+ nfs_cancel_remove_inode(head, inode);
/* lock each request in the page group */
for (subreq = head->wb_this_page;
@@ -630,20 +579,21 @@ static void nfs_write_error(struct nfs_page *req, int error)
* Find an associated nfs write request, and prepare to flush it out
* May return an error if the user signalled nfs_wait_on_request().
*/
-static int nfs_page_async_flush(struct folio *folio,
- struct writeback_control *wbc,
- struct nfs_pageio_descriptor *pgio)
+static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc,
+ struct nfs_pageio_descriptor *pgio)
{
struct nfs_page *req;
- int ret = 0;
+ int ret;
+
+ nfs_pageio_cond_complete(pgio, folio->index);
req = nfs_lock_and_join_requests(folio);
if (!req)
- goto out;
- ret = PTR_ERR(req);
+ return 0;
if (IS_ERR(req))
- goto out;
+ return PTR_ERR(req);
+ trace_nfs_do_writepage(req);
nfs_folio_set_writeback(folio);
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
@@ -652,7 +602,6 @@ static int nfs_page_async_flush(struct folio *folio,
if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
- ret = 0;
if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error;
/*
@@ -660,28 +609,20 @@ static int nfs_page_async_flush(struct folio *folio,
*/
if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
- if (wbc->sync_mode == WB_SYNC_NONE)
- ret = AOP_WRITEPAGE_ACTIVATE;
folio_redirty_for_writepage(wbc, folio);
nfs_redirty_request(req);
pgio->pg_error = 0;
- } else
- nfs_add_stats(folio->mapping->host,
- NFSIOS_WRITEPAGES, 1);
-out:
- return ret;
+ return ret;
+ }
+
+ nfs_add_stats(folio->mapping->host, NFSIOS_WRITEPAGES, 1);
+ return 0;
+
out_launder:
nfs_write_error(req, ret);
return 0;
}
-static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc,
- struct nfs_pageio_descriptor *pgio)
-{
- nfs_pageio_cond_complete(pgio, folio->index);
- return nfs_page_async_flush(folio, wbc, pgio);
-}
-
/*
* Write an mmapped page to the server.
*/
@@ -701,17 +642,6 @@ static int nfs_writepage_locked(struct folio *folio,
return err;
}
-static int nfs_writepages_callback(struct folio *folio,
- struct writeback_control *wbc, void *data)
-{
- int ret;
-
- ret = nfs_do_writepage(folio, wbc, data);
- if (ret != AOP_WRITEPAGE_ACTIVATE)
- folio_unlock(folio);
- return ret;
-}
-
static void nfs_io_completion_commit(void *inode)
{
nfs_commit_inode(inode, 0);
@@ -727,18 +657,20 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
int priority = 0;
int err;
+ trace_nfs_writepages(inode, wbc->range_start, wbc->range_end - wbc->range_start);
+
/* Wait with writeback until write congestion eases */
if (wbc->sync_mode == WB_SYNC_NONE && nfss->write_congested) {
err = wait_event_killable(nfss->write_congestion_wait,
nfss->write_congested == 0);
if (err)
- return err;
+ goto out_err;
}
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate ||
- wbc->for_background || wbc->for_sync || wbc->for_reclaim) {
+ wbc->for_background || wbc->for_sync) {
ioc = nfs_io_completion_alloc(GFP_KERNEL);
if (ioc)
nfs_io_completion_init(ioc, nfs_io_completion_commit,
@@ -747,11 +679,15 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
}
do {
+ struct folio *folio = NULL;
+
nfs_pageio_init_write(&pgio, inode, priority, false,
&nfs_async_write_completion_ops);
pgio.pg_io_completion = ioc;
- err = write_cache_pages(mapping, wbc, nfs_writepages_callback,
- &pgio);
+ while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
+ err = nfs_do_writepage(folio, wbc, &pgio);
+ folio_unlock(folio);
+ }
pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR)
@@ -759,10 +695,10 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
} while (err < 0 && !nfs_error_is_fatal(err));
nfs_io_completion_put(ioc);
- if (err < 0)
- goto out_err;
- return 0;
+ if (err > 0)
+ err = 0;
out_err:
+ trace_nfs_writepages_done(inode, wbc->range_start, wbc->range_end - wbc->range_start, err);
return err;
}
@@ -800,7 +736,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
- if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
+ nfs_page_group_lock(req);
+ if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio->mapping;
@@ -811,7 +748,10 @@ static void nfs_inode_remove_request(struct nfs_page *req)
clear_bit(PG_MAPPED, &req->wb_head->wb_flags);
}
spin_unlock(&mapping->i_private_lock);
+
+ folio_end_dropbehind(folio);
}
+ nfs_page_group_unlock(req);
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
atomic_long_dec(&nfsi->nrequests);
@@ -991,7 +931,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
req->wb_nio = 0;
memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
nfs_mark_request_commit(req, hdr->lseg, &cinfo,
- hdr->pgio_mirror_idx);
+ hdr->ds_commit_idx);
goto next;
}
remove_req:
@@ -1082,11 +1022,12 @@ static struct nfs_page *nfs_try_to_update_request(struct folio *folio,
unsigned int end;
int error;
+ trace_nfs_try_to_update_request(folio_inode(folio), offset, bytes);
end = offset + bytes;
req = nfs_lock_and_join_requests(folio);
if (IS_ERR_OR_NULL(req))
- return req;
+ goto out;
rqend = req->wb_offset + req->wb_bytes;
/*
@@ -1108,6 +1049,9 @@ static struct nfs_page *nfs_try_to_update_request(struct folio *folio,
else
req->wb_bytes = rqend - req->wb_offset;
req->wb_nio = 0;
+out:
+ trace_nfs_try_to_update_request_done(folio_inode(folio), offset, bytes,
+ PTR_ERR_OR_ZERO(req));
return req;
out_flushme:
/*
@@ -1118,6 +1062,7 @@ out_flushme:
nfs_mark_request_dirty(req);
nfs_unlock_and_release_request(req);
error = nfs_wb_folio(folio->mapping->host, folio);
+ trace_nfs_try_to_update_request_done(folio_inode(folio), offset, bytes, error);
return (error < 0) ? ERR_PTR(error) : NULL;
}
@@ -1155,6 +1100,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx,
req = nfs_setup_write_request(ctx, folio, offset, count);
if (IS_ERR(req))
return PTR_ERR(req);
+ trace_nfs_writepage_setup(req);
/* Update file length */
nfs_grow_file(folio, offset, count);
nfs_mark_uptodate(req);
@@ -1355,6 +1301,8 @@ int nfs_update_folio(struct file *file, struct folio *folio,
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
+ trace_nfs_update_folio(inode, offset, count);
+
dprintk("NFS: nfs_update_folio(%pD2 %d@%lld)\n", file, count,
(long long)(folio_pos(folio) + offset));
@@ -1374,6 +1322,7 @@ int nfs_update_folio(struct file *file, struct folio *folio,
if (status < 0)
nfs_set_pageerror(mapping);
out:
+ trace_nfs_update_folio_done(inode, offset, count, status);
dprintk("NFS: nfs_update_folio returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
return status;
@@ -1586,7 +1535,8 @@ static int nfs_writeback_done(struct rpc_task *task,
/* Deal with the suid/sgid bit corner case */
if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE
+ | NFS_INO_REVAL_FORCED);
spin_unlock(&inode->i_lock);
}
return 0;
@@ -1826,7 +1776,8 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
task_flags = RPC_TASK_MOVEABLE;
localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred,
- data->args.fh, data->context->mode);
+ data->args.fh, &data->context->nfl,
+ data->context->mode);
return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
data->mds_ops, how,
RPC_TASK_CRED_NOREF | task_flags, localio);
@@ -1870,7 +1821,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
nfs_mapping_set_error(folio, status);
nfs_inode_remove_request(req);
}
- dprintk_cont(", error = %d\n", status);
+ dprintk(", error = %d\n", status);
goto next;
}
@@ -1880,11 +1831,11 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
/* We have a match */
if (folio)
nfs_inode_remove_request(req);
- dprintk_cont(" OK\n");
+ dprintk(" OK\n");
goto next;
}
/* We have a mismatch. Write the page again */
- dprintk_cont(" mismatch\n");
+ dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
atomic_long_inc(&NFS_I(data->inode)->redirtied_pages);
next:
@@ -2067,6 +2018,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
* release it */
nfs_inode_remove_request(req);
nfs_unlock_and_release_request(req);
+ folio_cancel_dirty(folio);
}
return ret;
@@ -2126,8 +2078,12 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
* that we can safely release the inode reference while holding
* the folio lock.
*/
- if (folio_test_private(src))
- return -EBUSY;
+ if (folio_test_private(src)) {
+ if (mode == MIGRATE_SYNC)
+ nfs_wb_folio(src->mapping->host, src);
+ if (folio_test_private(src))
+ return -EBUSY;
+ }
if (folio_test_private_2(src)) { /* [DEPRECATED] */
if (mode == MIGRATE_ASYNC)
diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
index a5e54809701e..c10ead273ff2 100644
--- a/fs/nfs_common/Makefile
+++ b/fs/nfs_common/Makefile
@@ -6,8 +6,9 @@
obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
+CFLAGS_localio_trace.o += -I$(src)
obj-$(CONFIG_NFS_COMMON_LOCALIO_SUPPORT) += nfs_localio.o
-nfs_localio-objs := nfslocalio.o
+nfs_localio-objs := nfslocalio.o localio_trace.o
obj-$(CONFIG_GRACE_PERIOD) += grace.o
obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
index 34a115176f97..af09aed09fd2 100644
--- a/fs/nfs_common/common.c
+++ b/fs/nfs_common/common.c
@@ -15,7 +15,7 @@ static const struct {
{ NFS_OK, 0 },
{ NFSERR_PERM, -EPERM },
{ NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_IO, -EIO },
{ NFSERR_NXIO, -ENXIO },
/* { NFSERR_EAGAIN, -EAGAIN }, */
{ NFSERR_ACCES, -EACCES },
@@ -45,7 +45,6 @@ static const struct {
{ NFSERR_SERVERFAULT, -EREMOTEIO },
{ NFSERR_BADTYPE, -EBADTYPE },
{ NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
};
/**
@@ -59,26 +58,29 @@ int nfs_stat_to_errno(enum nfs_stat status)
{
int i;
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
if (nfs_errtbl[i].stat == (int)status)
return nfs_errtbl[i].errno;
}
- return nfs_errtbl[i].errno;
+ return -EIO;
}
EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
/*
* We need to translate between nfs v4 status return values and
* the local errno values which may not be the same.
+ *
+ * nfs4_errtbl_common[] is used before more specialized mappings
+ * available in nfs4_errtbl[] or nfs4_errtbl_localio[].
*/
static const struct {
int stat;
int errno;
-} nfs4_errtbl[] = {
+} nfs4_errtbl_common[] = {
{ NFS4_OK, 0 },
{ NFS4ERR_PERM, -EPERM },
{ NFS4ERR_NOENT, -ENOENT },
- { NFS4ERR_IO, -errno_NFSERR_IO},
+ { NFS4ERR_IO, -EIO },
{ NFS4ERR_NXIO, -ENXIO },
{ NFS4ERR_ACCESS, -EACCES },
{ NFS4ERR_EXIST, -EEXIST },
@@ -98,15 +100,20 @@ static const struct {
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -EREMOTEIO },
{ NFS4ERR_BADTYPE, -EBADTYPE },
- { NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_SYMLINK, -ELOOP },
- { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_DEADLOCK, -EDEADLK },
+};
+
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl[] = {
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_NOXATTR, -ENODATA },
{ NFS4ERR_XATTR2BIG, -E2BIG },
- { -1, -EIO }
};
/*
@@ -116,7 +123,14 @@ static const struct {
int nfs4_stat_to_errno(int stat)
{
int i;
- for (i = 0; nfs4_errtbl[i].stat != -1; i++) {
+
+ /* First check nfs4_errtbl_common */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
+ if (nfs4_errtbl_common[i].stat == stat)
+ return nfs4_errtbl_common[i].errno;
+ }
+ /* Then check nfs4_errtbl */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl); i++) {
if (nfs4_errtbl[i].stat == stat)
return nfs4_errtbl[i].errno;
}
@@ -132,3 +146,56 @@ int nfs4_stat_to_errno(int stat)
return -stat;
}
EXPORT_SYMBOL_GPL(nfs4_stat_to_errno);
+
+/*
+ * This table is useful for conversion from local errno to NFS error.
+ * It provides more logically correct mappings for use with LOCALIO
+ * (which is focused on converting from errno to NFS status).
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl_localio[] = {
+ /* Map errors differently than nfs4_errtbl */
+ { NFS4ERR_IO, -EREMOTEIO },
+ { NFS4ERR_DELAY, -EAGAIN },
+ { NFS4ERR_FBIG, -E2BIG },
+ /* Map errors not handled by nfs4_errtbl */
+ { NFS4ERR_STALE, -EBADF },
+ { NFS4ERR_STALE, -EOPENSTALE },
+ { NFS4ERR_DELAY, -ETIMEDOUT },
+ { NFS4ERR_DELAY, -ERESTARTSYS },
+ { NFS4ERR_DELAY, -ENOMEM },
+ { NFS4ERR_IO, -ETXTBSY },
+ { NFS4ERR_IO, -EBUSY },
+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_SERVERFAULT, -ENFILE },
+ { NFS4ERR_IO, -EUCLEAN },
+ { NFS4ERR_PERM, -ENOKEY },
+};
+
+/*
+ * Convert an errno to an NFS error code for LOCALIO.
+ */
+__u32 nfs_localio_errno_to_nfs4_stat(int errno)
+{
+ int i;
+
+ /* First check nfs4_errtbl_common */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
+ if (nfs4_errtbl_common[i].errno == errno)
+ return nfs4_errtbl_common[i].stat;
+ }
+ /* Then check nfs4_errtbl_localio */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_localio); i++) {
+ if (nfs4_errtbl_localio[i].errno == errno)
+ return nfs4_errtbl_localio[i].stat;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+ return NFS4ERR_SERVERFAULT;
+}
+EXPORT_SYMBOL_GPL(nfs_localio_errno_to_nfs4_stat);
diff --git a/fs/nfs_common/localio_trace.c b/fs/nfs_common/localio_trace.c
new file mode 100644
index 000000000000..7decfe57abeb
--- /dev/null
+++ b/fs/nfs_common/localio_trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ */
+#include <linux/nfs_fs.h>
+#include <linux/namei.h>
+
+#define CREATE_TRACE_POINTS
+#include "localio_trace.h"
diff --git a/fs/nfs_common/localio_trace.h b/fs/nfs_common/localio_trace.h
new file mode 100644
index 000000000000..4055aec9ff8d
--- /dev/null
+++ b/fs/nfs_common/localio_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nfs_localio
+
+#if !defined(_TRACE_NFS_COMMON_LOCALIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NFS_COMMON_LOCALIO_H
+
+#include <linux/tracepoint.h>
+
+#include <trace/misc/fs.h>
+#include <trace/misc/nfs.h>
+#include <trace/misc/sunrpc.h>
+
+DECLARE_EVENT_CLASS(nfs_local_client_event,
+ TP_PROTO(
+ const struct nfs_client *clp
+ ),
+
+ TP_ARGS(clp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, protocol)
+ __string(server, clp->cl_hostname)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol = clp->rpc_ops->version;
+ __assign_str(server);
+ ),
+
+ TP_printk(
+ "server=%s NFSv%u", __get_str(server), __entry->protocol
+ )
+);
+
+#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \
+ DEFINE_EVENT(nfs_local_client_event, name, \
+ TP_PROTO( \
+ const struct nfs_client *clp \
+ ), \
+ TP_ARGS(clp))
+
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_enable_client);
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_disable_client);
+
+#endif /* _TRACE_NFS_COMMON_LOCALIO_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE localio_trace
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index ea382b75b26c..e2eaac14fd8e 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -42,7 +42,7 @@ struct nfsacl_encode_desc {
};
struct nfsacl_simple_acl {
- struct posix_acl acl;
+ struct posix_acl_hdr acl;
struct posix_acl_entry ace[4];
};
@@ -112,7 +112,8 @@ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
xdr_encode_word(buf, base, entries))
return -EINVAL;
if (encode_entries && acl && acl->a_count == 3) {
- struct posix_acl *acl2 = &aclbuf.acl;
+ struct posix_acl *acl2 =
+ container_of(&aclbuf.acl, struct posix_acl, hdr);
/* Avoid the use of posix_acl_alloc(). nfsacl_encode() is
* invoked in contexts where a memory allocation failure is
@@ -177,7 +178,8 @@ bool nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode,
return false;
if (encode_entries && acl && acl->a_count == 3) {
- struct posix_acl *acl2 = &aclbuf.acl;
+ struct posix_acl *acl2 =
+ container_of(&aclbuf.acl, struct posix_acl, hdr);
/* Avoid the use of posix_acl_alloc(). nfsacl_encode() is
* invoked in contexts where a memory allocation failure is
diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c
index a74ec08f6c96..dd715cdb6c04 100644
--- a/fs/nfs_common/nfslocalio.c
+++ b/fs/nfs_common/nfslocalio.c
@@ -7,38 +7,67 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/nfslocalio.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
#include <net/netns/generic.h>
+#include "localio_trace.h"
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("NFS localio protocol bypass support");
-static DEFINE_SPINLOCK(nfs_uuid_lock);
+static DEFINE_SPINLOCK(nfs_uuids_lock);
/*
* Global list of nfs_uuid_t instances
- * that is protected by nfs_uuid_lock.
+ * that is protected by nfs_uuids_lock.
*/
static LIST_HEAD(nfs_uuids);
+/*
+ * Lock ordering:
+ * 1: nfs_uuid->lock
+ * 2: nfs_uuids_lock
+ * 3: nfs_uuid->list_lock (aka nn->local_clients_lock)
+ *
+ * May skip locks in select cases, but never hold multiple
+ * locks out of order.
+ */
+
void nfs_uuid_init(nfs_uuid_t *nfs_uuid)
{
- nfs_uuid->net = NULL;
+ RCU_INIT_POINTER(nfs_uuid->net, NULL);
nfs_uuid->dom = NULL;
+ nfs_uuid->list_lock = NULL;
INIT_LIST_HEAD(&nfs_uuid->list);
+ INIT_LIST_HEAD(&nfs_uuid->files);
+ spin_lock_init(&nfs_uuid->lock);
+ nfs_uuid->nfs3_localio_probe_count = 0;
}
EXPORT_SYMBOL_GPL(nfs_uuid_init);
bool nfs_uuid_begin(nfs_uuid_t *nfs_uuid)
{
- spin_lock(&nfs_uuid_lock);
- /* Is this nfs_uuid already in use? */
+ spin_lock(&nfs_uuid->lock);
+ if (rcu_access_pointer(nfs_uuid->net)) {
+ /* This nfs_uuid is already in use */
+ spin_unlock(&nfs_uuid->lock);
+ return false;
+ }
+
+ spin_lock(&nfs_uuids_lock);
if (!list_empty(&nfs_uuid->list)) {
- spin_unlock(&nfs_uuid_lock);
+ /* This nfs_uuid is already in use */
+ spin_unlock(&nfs_uuids_lock);
+ spin_unlock(&nfs_uuid->lock);
return false;
}
- uuid_gen(&nfs_uuid->uuid);
list_add_tail(&nfs_uuid->list, &nfs_uuids);
- spin_unlock(&nfs_uuid_lock);
+ spin_unlock(&nfs_uuids_lock);
+
+ uuid_gen(&nfs_uuid->uuid);
+ spin_unlock(&nfs_uuid->lock);
return true;
}
@@ -46,12 +75,16 @@ EXPORT_SYMBOL_GPL(nfs_uuid_begin);
void nfs_uuid_end(nfs_uuid_t *nfs_uuid)
{
- if (nfs_uuid->net == NULL) {
- spin_lock(&nfs_uuid_lock);
- if (nfs_uuid->net == NULL)
+ if (!rcu_access_pointer(nfs_uuid->net)) {
+ spin_lock(&nfs_uuid->lock);
+ if (!rcu_access_pointer(nfs_uuid->net)) {
+ /* Not local, remove from nfs_uuids */
+ spin_lock(&nfs_uuids_lock);
list_del_init(&nfs_uuid->list);
- spin_unlock(&nfs_uuid_lock);
- }
+ spin_unlock(&nfs_uuids_lock);
+ }
+ spin_unlock(&nfs_uuid->lock);
+ }
}
EXPORT_SYMBOL_GPL(nfs_uuid_end);
@@ -69,68 +102,171 @@ static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid)
static struct module *nfsd_mod;
void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list,
- struct net *net, struct auth_domain *dom,
- struct module *mod)
+ spinlock_t *list_lock, struct net *net,
+ struct auth_domain *dom, struct module *mod)
{
nfs_uuid_t *nfs_uuid;
- spin_lock(&nfs_uuid_lock);
+ spin_lock(&nfs_uuids_lock);
nfs_uuid = nfs_uuid_lookup_locked(uuid);
- if (nfs_uuid) {
- kref_get(&dom->ref);
- nfs_uuid->dom = dom;
- /*
- * We don't hold a ref on the net, but instead put
- * ourselves on a list so the net pointer can be
- * invalidated.
- */
- list_move(&nfs_uuid->list, list);
- rcu_assign_pointer(nfs_uuid->net, net);
-
- __module_get(mod);
- nfsd_mod = mod;
+ if (!nfs_uuid) {
+ spin_unlock(&nfs_uuids_lock);
+ return;
}
- spin_unlock(&nfs_uuid_lock);
+
+ /*
+ * We don't hold a ref on the net, but instead put
+ * ourselves on @list (nn->local_clients) so the net
+ * pointer can be invalidated.
+ */
+ spin_lock(list_lock); /* list_lock is nn->local_clients_lock */
+ list_move(&nfs_uuid->list, list);
+ spin_unlock(list_lock);
+
+ spin_unlock(&nfs_uuids_lock);
+ /* Once nfs_uuid is parented to @list, avoid global nfs_uuids_lock */
+ spin_lock(&nfs_uuid->lock);
+
+ __module_get(mod);
+ nfsd_mod = mod;
+
+ nfs_uuid->list_lock = list_lock;
+ kref_get(&dom->ref);
+ nfs_uuid->dom = dom;
+ rcu_assign_pointer(nfs_uuid->net, net);
+ spin_unlock(&nfs_uuid->lock);
}
EXPORT_SYMBOL_GPL(nfs_uuid_is_local);
-static void nfs_uuid_put_locked(nfs_uuid_t *nfs_uuid)
+void nfs_localio_enable_client(struct nfs_client *clp)
{
- if (nfs_uuid->net) {
- module_put(nfsd_mod);
- nfs_uuid->net = NULL;
+ /* nfs_uuid_is_local() does the actual enablement */
+ trace_nfs_localio_enable_client(clp);
+}
+EXPORT_SYMBOL_GPL(nfs_localio_enable_client);
+
+/*
+ * Cleanup the nfs_uuid_t embedded in an nfs_client.
+ * This is the long-form of nfs_uuid_init().
+ */
+static bool nfs_uuid_put(nfs_uuid_t *nfs_uuid)
+{
+ struct nfs_file_localio *nfl;
+
+ spin_lock(&nfs_uuid->lock);
+ if (unlikely(!rcu_access_pointer(nfs_uuid->net))) {
+ spin_unlock(&nfs_uuid->lock);
+ return false;
}
+ RCU_INIT_POINTER(nfs_uuid->net, NULL);
+
if (nfs_uuid->dom) {
auth_domain_put(nfs_uuid->dom);
nfs_uuid->dom = NULL;
}
- list_del_init(&nfs_uuid->list);
+
+ /* Walk list of files and ensure their last references dropped */
+
+ while ((nfl = list_first_entry_or_null(&nfs_uuid->files,
+ struct nfs_file_localio,
+ list)) != NULL) {
+ /* If nfs_uuid is already NULL, nfs_close_local_fh is
+ * closing and we must wait, else we unlink and close.
+ */
+ if (rcu_access_pointer(nfl->nfs_uuid) == NULL) {
+ /* nfs_close_local_fh() is doing the
+ * close and we must wait. until it unlinks
+ */
+ wait_var_event_spinlock(nfs_uuid,
+ list_first_entry_or_null(
+ &nfs_uuid->files,
+ struct nfs_file_localio,
+ list) != nfl,
+ &nfs_uuid->lock);
+ continue;
+ }
+
+ /* Remove nfl from nfs_uuid->files list */
+ list_del_init(&nfl->list);
+ spin_unlock(&nfs_uuid->lock);
+
+ nfs_to_nfsd_file_put_local(&nfl->ro_file);
+ nfs_to_nfsd_file_put_local(&nfl->rw_file);
+ cond_resched();
+
+ spin_lock(&nfs_uuid->lock);
+ /* Now we can allow racing nfs_close_local_fh() to
+ * skip the locking.
+ */
+ store_release_wake_up(&nfl->nfs_uuid, RCU_INITIALIZER(NULL));
+ }
+
+ /* Remove client from nn->local_clients */
+ if (nfs_uuid->list_lock) {
+ spin_lock(nfs_uuid->list_lock);
+ BUG_ON(list_empty(&nfs_uuid->list));
+ list_del_init(&nfs_uuid->list);
+ spin_unlock(nfs_uuid->list_lock);
+ nfs_uuid->list_lock = NULL;
+ }
+
+ module_put(nfsd_mod);
+ spin_unlock(&nfs_uuid->lock);
+
+ return true;
}
-void nfs_uuid_invalidate_clients(struct list_head *list)
+void nfs_localio_disable_client(struct nfs_client *clp)
{
+ if (nfs_uuid_put(&clp->cl_uuid))
+ trace_nfs_localio_disable_client(clp);
+}
+EXPORT_SYMBOL_GPL(nfs_localio_disable_client);
+
+void nfs_localio_invalidate_clients(struct list_head *nn_local_clients,
+ spinlock_t *nn_local_clients_lock)
+{
+ LIST_HEAD(local_clients);
nfs_uuid_t *nfs_uuid, *tmp;
+ struct nfs_client *clp;
- spin_lock(&nfs_uuid_lock);
- list_for_each_entry_safe(nfs_uuid, tmp, list, list)
- nfs_uuid_put_locked(nfs_uuid);
- spin_unlock(&nfs_uuid_lock);
+ spin_lock(nn_local_clients_lock);
+ list_splice_init(nn_local_clients, &local_clients);
+ spin_unlock(nn_local_clients_lock);
+ list_for_each_entry_safe(nfs_uuid, tmp, &local_clients, list) {
+ if (WARN_ON(nfs_uuid->list_lock != nn_local_clients_lock))
+ break;
+ clp = container_of(nfs_uuid, struct nfs_client, cl_uuid);
+ nfs_localio_disable_client(clp);
+ }
}
-EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_clients);
+EXPORT_SYMBOL_GPL(nfs_localio_invalidate_clients);
-void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid)
+static int nfs_uuid_add_file(nfs_uuid_t *nfs_uuid, struct nfs_file_localio *nfl)
{
- if (nfs_uuid->net) {
- spin_lock(&nfs_uuid_lock);
- nfs_uuid_put_locked(nfs_uuid);
- spin_unlock(&nfs_uuid_lock);
+ int ret = 0;
+
+ /* Add nfl to nfs_uuid->files if it isn't already */
+ spin_lock(&nfs_uuid->lock);
+ if (rcu_access_pointer(nfs_uuid->net) == NULL) {
+ ret = -ENXIO;
+ } else if (list_empty(&nfl->list)) {
+ rcu_assign_pointer(nfl->nfs_uuid, nfs_uuid);
+ list_add_tail(&nfl->list, &nfs_uuid->files);
}
+ spin_unlock(&nfs_uuid->lock);
+ return ret;
}
-EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_one_client);
+/*
+ * Caller is responsible for calling nfsd_net_put and
+ * nfsd_file_put (via nfs_to_nfsd_file_put_local).
+ */
struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
struct rpc_clnt *rpc_clnt, const struct cred *cred,
- const struct nfs_fh *nfs_fh, const fmode_t fmode)
+ const struct nfs_fh *nfs_fh, struct nfs_file_localio *nfl,
+ struct nfsd_file __rcu **pnf,
+ const fmode_t fmode)
{
struct net *net;
struct nfsd_file *localio;
@@ -139,7 +275,7 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
* Not running in nfsd context, so must safely get reference on nfsd_serv.
* But the server may already be shutting down, if so disallow new localio.
* uuid->net is NOT a counted reference, but rcu_read_lock() ensures that
- * if uuid->net is not NULL, then calling nfsd_serv_try_get() is safe
+ * if uuid->net is not NULL, then calling nfsd_net_try_get() is safe
* and if it succeeds we will have an implied reference to the net.
*
* Otherwise NFS may not have ref on NFSD and therefore cannot safely
@@ -147,21 +283,72 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
*/
rcu_read_lock();
net = rcu_dereference(uuid->net);
- if (!net || !nfs_to->nfsd_serv_try_get(net)) {
+ if (!net || !nfs_to->nfsd_net_try_get(net)) {
rcu_read_unlock();
return ERR_PTR(-ENXIO);
}
rcu_read_unlock();
- /* We have an implied reference to net thanks to nfsd_serv_try_get */
- localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt,
- cred, nfs_fh, fmode);
- if (IS_ERR(localio))
- nfs_to_nfsd_net_put(net);
+ /* We have an implied reference to net thanks to nfsd_net_try_get */
+ localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, cred,
+ nfs_fh, pnf, fmode);
+ if (!IS_ERR(localio) && nfs_uuid_add_file(uuid, nfl) < 0) {
+ /* Delete the cached file when racing with nfs_uuid_put() */
+ nfs_to_nfsd_file_put_local(pnf);
+ }
+ nfs_to_nfsd_net_put(net);
return localio;
}
EXPORT_SYMBOL_GPL(nfs_open_local_fh);
+void nfs_close_local_fh(struct nfs_file_localio *nfl)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ rcu_read_lock();
+ nfs_uuid = rcu_dereference(nfl->nfs_uuid);
+ if (!nfs_uuid) {
+ /* regular (non-LOCALIO) NFS will hammer this */
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock(&nfs_uuid->lock);
+ if (!rcu_access_pointer(nfl->nfs_uuid)) {
+ /* nfs_uuid_put has finished here */
+ spin_unlock(&nfs_uuid->lock);
+ rcu_read_unlock();
+ return;
+ }
+ if (list_empty(&nfl->list)) {
+ /* nfs_uuid_put() has started closing files, wait for it
+ * to finished
+ */
+ spin_unlock(&nfs_uuid->lock);
+ rcu_read_unlock();
+ wait_var_event(&nfl->nfs_uuid,
+ rcu_access_pointer(nfl->nfs_uuid) == NULL);
+ return;
+ }
+ /* tell nfs_uuid_put() to wait for us */
+ RCU_INIT_POINTER(nfl->nfs_uuid, NULL);
+ spin_unlock(&nfs_uuid->lock);
+ rcu_read_unlock();
+
+ nfs_to_nfsd_file_put_local(&nfl->ro_file);
+ nfs_to_nfsd_file_put_local(&nfl->rw_file);
+
+ /* Remove nfl from nfs_uuid->files list and signal nfs_uuid_put()
+ * that we are done. The moment we drop the spinlock the
+ * nfs_uuid could be freed.
+ */
+ spin_lock(&nfs_uuid->lock);
+ list_del_init(&nfl->list);
+ wake_up_var_locked(nfs_uuid, &nfs_uuid->lock);
+ spin_unlock(&nfs_uuid->lock);
+}
+EXPORT_SYMBOL_GPL(nfs_close_local_fh);
+
/*
* The NFS LOCALIO code needs to call into NFSD using various symbols,
* but cannot be statically linked, because that will make the NFS
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index c0bd1509ccd4..0b5c1a0bf1cf 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -4,6 +4,9 @@ config NFSD
depends on INET
depends on FILE_LOCKING
depends on FSNOTIFY
+ select CRC32
+ select CRYPTO_LIB_MD5 if NFSD_LEGACY_CLIENT_TRACKING
+ select CRYPTO_LIB_SHA256 if NFSD_V4
select LOCKD
select SUNRPC
select EXPORTFS
@@ -75,9 +78,7 @@ config NFSD_V4
depends on NFSD && PROC_FS
select FS_POSIX_ACL
select RPCSEC_GSS_KRB5
- select CRYPTO
- select CRYPTO_MD5
- select CRYPTO_SHA256
+ select CRYPTO # required by RPCSEC_GSS_KRB5
select GRACE_PERIOD
select NFS_V4_2_SSC_HELPER if NFS_V4_2
help
@@ -163,7 +164,7 @@ config NFSD_V4_SECURITY_LABEL
config NFSD_LEGACY_CLIENT_TRACKING
bool "Support legacy NFSv4 client tracking methods (DEPRECATED)"
depends on NFSD_V4
- default y
+ default n
help
The NFSv4 server needs to store a small amount of information on
stable storage in order to handle state recovery after reboot. Most
@@ -172,6 +173,16 @@ config NFSD_LEGACY_CLIENT_TRACKING
recoverydir, or spawn a process directly using a usermodehelper
upcall.
- These legacy client tracking methods have proven to be probelmatic
+ These legacy client tracking methods have proven to be problematic
and will be removed in the future. Say Y here if you need support
for them in the interim.
+
+config NFSD_V4_DELEG_TIMESTAMPS
+ bool "Support delegated timestamps"
+ depends on NFSD_V4
+ default n
+ help
+ NFSD implements delegated timestamps according to
+ draft-ietf-nfsv4-delstid-08 "Extending the Opening of Files". This
+ is currently an experimental feature and is therefore left disabled
+ by default.
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index 18cbd3fa7691..55744bb786c9 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -18,9 +18,24 @@ nfsd-$(CONFIG_NFSD_V2) += nfsproc.o nfsxdr.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
- nfs4acl.o nfs4callback.o nfs4recover.o
+ nfs4acl.o nfs4callback.o nfs4recover.o nfs4xdr_gen.o
nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o
nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o
nfsd-$(CONFIG_NFS_LOCALIO) += localio.o
+nfsd-$(CONFIG_DEBUG_FS) += debugfs.o
+
+
+.PHONY: xdrgen
+
+xdrgen: ../../include/linux/sunrpc/xdrgen/nfs4_1.h nfs4xdr_gen.h nfs4xdr_gen.c
+
+../../include/linux/sunrpc/xdrgen/nfs4_1.h: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen definitions $< > $@
+
+nfs4xdr_gen.h: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen declarations $< > $@
+
+nfs4xdr_gen.c: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen source $< > $@
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 93e33d1ee891..4dc327e02456 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -27,7 +27,7 @@ int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp)
int flags = nfsexp_flags(cred, exp);
/* discard any old override before preparing the new set */
- revert_creds(get_cred(current_real_cred()));
+ put_cred(revert_creds(get_cred(current_real_cred())));
new = prepare_creds();
if (!new)
return -ENOMEM;
@@ -80,7 +80,6 @@ int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp)
new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
put_cred(override_creds(new));
- put_cred(new);
return 0;
oom:
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 08a20e5bcf7f..afa16d7a8013 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -13,67 +13,49 @@
#include "pnfs.h"
#include "filecache.h"
#include "vfs.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
+/*
+ * Get an extent from the file system that starts at offset or below
+ * and may be shorter than the requested length.
+ */
static __be32
-nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
- struct nfsd4_layoutget *args)
+nfsd4_block_map_extent(struct inode *inode, const struct svc_fh *fhp,
+ u64 offset, u64 length, u32 iomode, u64 minlength,
+ struct pnfs_block_extent *bex)
{
- struct nfsd4_layout_seg *seg = &args->lg_seg;
struct super_block *sb = inode->i_sb;
- u32 block_size = i_blocksize(inode);
- struct pnfs_block_extent *bex;
struct iomap iomap;
u32 device_generation = 0;
int error;
- if (seg->offset & (block_size - 1)) {
- dprintk("pnfsd: I/O misaligned\n");
- goto out_layoutunavailable;
- }
-
- /*
- * Some clients barf on non-zero block numbers for NONE or INVALID
- * layouts, so make sure to zero the whole structure.
- */
- error = -ENOMEM;
- bex = kzalloc(sizeof(*bex), GFP_KERNEL);
- if (!bex)
- goto out_error;
- args->lg_content = bex;
-
- error = sb->s_export_op->map_blocks(inode, seg->offset, seg->length,
- &iomap, seg->iomode != IOMODE_READ,
- &device_generation);
+ error = sb->s_export_op->map_blocks(inode, offset, length, &iomap,
+ iomode != IOMODE_READ, &device_generation);
if (error) {
if (error == -ENXIO)
- goto out_layoutunavailable;
- goto out_error;
- }
-
- if (iomap.length < args->lg_minlength) {
- dprintk("pnfsd: extent smaller than minlength\n");
- goto out_layoutunavailable;
+ return nfserr_layoutunavailable;
+ return nfserrno(error);
}
switch (iomap.type) {
case IOMAP_MAPPED:
- if (seg->iomode == IOMODE_READ)
+ if (iomode == IOMODE_READ)
bex->es = PNFS_BLOCK_READ_DATA;
else
bex->es = PNFS_BLOCK_READWRITE_DATA;
bex->soff = iomap.addr;
break;
case IOMAP_UNWRITTEN:
- if (seg->iomode & IOMODE_RW) {
+ if (iomode & IOMODE_RW) {
/*
* Crack monkey special case from section 2.3.1.
*/
- if (args->lg_minlength == 0) {
+ if (minlength == 0) {
dprintk("pnfsd: no soup for you!\n");
- goto out_layoutunavailable;
+ return nfserr_layoutunavailable;
}
bex->es = PNFS_BLOCK_INVALID_DATA;
@@ -82,7 +64,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
}
fallthrough;
case IOMAP_HOLE:
- if (seg->iomode == IOMODE_READ) {
+ if (iomode == IOMODE_READ) {
bex->es = PNFS_BLOCK_NONE_DATA;
break;
}
@@ -90,27 +72,107 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
case IOMAP_DELALLOC:
default:
WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type);
- goto out_layoutunavailable;
+ return nfserr_layoutunavailable;
}
error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation);
if (error)
- goto out_error;
+ return nfserrno(error);
+
bex->foff = iomap.offset;
bex->len = iomap.length;
+ return nfs_ok;
+}
- seg->offset = iomap.offset;
- seg->length = iomap.length;
+static __be32
+nfsd4_block_proc_layoutget(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *args)
+{
+ struct nfsd4_layout_seg *seg = &args->lg_seg;
+ struct pnfs_block_layout *bl;
+ struct pnfs_block_extent *first_bex, *last_bex;
+ u64 offset = seg->offset, length = seg->length;
+ u32 i, nr_extents_max, block_size = i_blocksize(inode);
+ __be32 nfserr;
- dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es);
- return 0;
+ if (locks_in_grace(SVC_NET(rqstp)))
+ return nfserr_grace;
+
+ nfserr = nfserr_layoutunavailable;
+ if (seg->offset & (block_size - 1)) {
+ dprintk("pnfsd: I/O misaligned\n");
+ goto out_error;
+ }
+
+ /*
+ * RFC 8881, section 3.3.17:
+ * The layout4 data type defines a layout for a file.
+ *
+ * RFC 8881, section 18.43.3:
+ * The loga_maxcount field specifies the maximum layout size
+ * (in bytes) that the client can handle. If the size of the
+ * layout structure exceeds the size specified by maxcount,
+ * the metadata server will return the NFS4ERR_TOOSMALL error.
+ */
+ nfserr = nfserr_toosmall;
+ if (args->lg_maxcount < PNFS_BLOCK_LAYOUT4_SIZE +
+ PNFS_BLOCK_EXTENT_SIZE)
+ goto out_error;
+
+ /*
+ * Limit the maximum layout size to avoid allocating
+ * a large buffer on the server for each layout request.
+ */
+ nr_extents_max = (min(args->lg_maxcount, PAGE_SIZE) -
+ PNFS_BLOCK_LAYOUT4_SIZE) / PNFS_BLOCK_EXTENT_SIZE;
+
+ /*
+ * Some clients barf on non-zero block numbers for NONE or INVALID
+ * layouts, so make sure to zero the whole structure.
+ */
+ nfserr = nfserrno(-ENOMEM);
+ bl = kzalloc(struct_size(bl, extents, nr_extents_max), GFP_KERNEL);
+ if (!bl)
+ goto out_error;
+ bl->nr_extents = nr_extents_max;
+ args->lg_content = bl;
+
+ for (i = 0; i < bl->nr_extents; i++) {
+ struct pnfs_block_extent *bex = bl->extents + i;
+ u64 bex_length;
+
+ nfserr = nfsd4_block_map_extent(inode, fhp, offset, length,
+ seg->iomode, args->lg_minlength, bex);
+ if (nfserr != nfs_ok)
+ goto out_error;
+
+ bex_length = bex->len - (offset - bex->foff);
+ if (bex_length >= length) {
+ bl->nr_extents = i + 1;
+ break;
+ }
+
+ offset = bex->foff + bex->len;
+ length -= bex_length;
+ }
+
+ first_bex = bl->extents;
+ last_bex = bl->extents + bl->nr_extents - 1;
+
+ nfserr = nfserr_layoutunavailable;
+ length = last_bex->foff + last_bex->len - seg->offset;
+ if (length < args->lg_minlength) {
+ dprintk("pnfsd: extent smaller than minlength\n");
+ goto out_error;
+ }
+
+ seg->offset = first_bex->foff;
+ seg->length = last_bex->foff - first_bex->foff + last_bex->len;
+ return nfs_ok;
out_error:
seg->length = 0;
- return nfserrno(error);
-out_layoutunavailable:
- seg->length = 0;
- return nfserr_layoutunavailable;
+ return nfserr;
}
static __be32
@@ -118,7 +180,6 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
struct iomap *iomaps, int nr_iomaps)
{
struct timespec64 mtime = inode_get_mtime(inode);
- loff_t new_size = lcp->lc_last_wr + 1;
struct iattr iattr = { .ia_valid = 0 };
int error;
@@ -128,9 +189,9 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
- if (new_size > i_size_read(inode)) {
+ if (lcp->lc_size_chg) {
iattr.ia_valid |= ATTR_SIZE;
- iattr.ia_size = new_size;
+ iattr.ia_size = lcp->lc_newsize;
}
error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
@@ -173,16 +234,20 @@ nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
}
static __be32
-nfsd4_block_proc_layoutcommit(struct inode *inode,
+nfsd4_block_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp)
{
struct iomap *iomaps;
int nr_iomaps;
+ __be32 nfserr;
+
+ rqstp->rq_arg = lcp->lc_up_layout;
+ svcxdr_init_decode(rqstp);
- nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
- lcp->lc_up_len, &iomaps, i_blocksize(inode));
- if (nr_iomaps < 0)
- return nfserrno(nr_iomaps);
+ nfserr = nfsd4_block_decode_layoutupdate(&rqstp->rq_arg_stream,
+ &iomaps, &nr_iomaps, i_blocksize(inode));
+ if (nfserr != nfs_ok)
+ return nfserr;
return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
}
@@ -311,16 +376,20 @@ nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp));
}
static __be32
-nfsd4_scsi_proc_layoutcommit(struct inode *inode,
+nfsd4_scsi_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp)
{
struct iomap *iomaps;
int nr_iomaps;
+ __be32 nfserr;
+
+ rqstp->rq_arg = lcp->lc_up_layout;
+ svcxdr_init_decode(rqstp);
- nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout,
- lcp->lc_up_len, &iomaps, i_blocksize(inode));
- if (nr_iomaps < 0)
- return nfserrno(nr_iomaps);
+ nfserr = nfsd4_scsi_decode_layoutupdate(&rqstp->rq_arg_stream,
+ &iomaps, &nr_iomaps, i_blocksize(inode));
+ if (nfserr != nfs_ok)
+ return nfserr;
return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
}
@@ -330,9 +399,12 @@ nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls, struct nfsd_file *file)
{
struct nfs4_client *clp = ls->ls_stid.sc_client;
struct block_device *bdev = file->nf_file->f_path.mnt->mnt_sb->s_bdev;
+ int status;
- bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
- nfsd4_scsi_pr_key(clp), 0, true);
+ status = bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
+ nfsd4_scsi_pr_key(clp),
+ PR_EXCLUSIVE_ACCESS_REG_ONLY, true);
+ trace_nfsd_pnfs_fence(clp, bdev->bd_disk->disk_name, status);
}
const struct nfsd4_layout_ops scsi_layout_ops = {
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index ce78f74715ee..196ef4245604 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -14,12 +14,25 @@
#define NFSDDBG_FACILITY NFSDDBG_PNFS
+/**
+ * nfsd4_block_encode_layoutget - encode block/scsi layout extent array
+ * @xdr: stream for data encoding
+ * @lgp: layoutget content, actually an array of extents to encode
+ *
+ * Encode the opaque loc_body field in the layoutget response. Since the
+ * pnfs_block_layout4 and pnfs_scsi_layout4 structures on the wire are
+ * the same, this function is used by both layout drivers.
+ *
+ * Return values:
+ * %nfs_ok: Success, all extents encoded into @xdr
+ * %nfserr_toosmall: Not enough space in @xdr to encode all the data
+ */
__be32
nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
const struct nfsd4_layoutget *lgp)
{
- const struct pnfs_block_extent *b = lgp->lg_content;
- int len = sizeof(__be32) + 5 * sizeof(__be64) + sizeof(__be32);
+ const struct pnfs_block_layout *bl = lgp->lg_content;
+ u32 i, len = sizeof(__be32) + bl->nr_extents * PNFS_BLOCK_EXTENT_SIZE;
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(__be32) + len);
@@ -27,15 +40,19 @@ nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
return nfserr_toosmall;
*p++ = cpu_to_be32(len);
- *p++ = cpu_to_be32(1); /* we always return a single extent */
-
- p = xdr_encode_opaque_fixed(p, &b->vol_id,
- sizeof(struct nfsd4_deviceid));
- p = xdr_encode_hyper(p, b->foff);
- p = xdr_encode_hyper(p, b->len);
- p = xdr_encode_hyper(p, b->soff);
- *p++ = cpu_to_be32(b->es);
- return 0;
+ *p++ = cpu_to_be32(bl->nr_extents);
+
+ for (i = 0; i < bl->nr_extents; i++) {
+ const struct pnfs_block_extent *bex = bl->extents + i;
+
+ p = svcxdr_encode_deviceid4(p, &bex->vol_id);
+ p = xdr_encode_hyper(p, bex->foff);
+ p = xdr_encode_hyper(p, bex->len);
+ p = xdr_encode_hyper(p, bex->soff);
+ *p++ = cpu_to_be32(bex->es);
+ }
+
+ return nfs_ok;
}
static int
@@ -112,64 +129,86 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
return 0;
}
-int
-nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
- u32 block_size)
+/**
+ * nfsd4_block_decode_layoutupdate - decode the block layout extent array
+ * @xdr: subbuf set to the encoded array
+ * @iomapp: pointer to store the decoded extent array
+ * @nr_iomapsp: pointer to store the number of extents
+ * @block_size: alignment of extent offset and length
+ *
+ * This function decodes the opaque field of the layoutupdate4 structure
+ * in a layoutcommit request for the block layout driver. The field is
+ * actually an array of extents sent by the client. It also checks that
+ * the file offset, storage offset and length of each extent are aligned
+ * by @block_size.
+ *
+ * Return values:
+ * %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid
+ * %nfserr_bad_xdr: The encoded array in @xdr is invalid
+ * %nfserr_inval: An unaligned extent found
+ * %nfserr_delay: Failed to allocate memory for @iomapp
+ */
+__be32
+nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp,
+ int *nr_iomapsp, u32 block_size)
{
struct iomap *iomaps;
- u32 nr_iomaps, i;
+ u32 nr_iomaps, expected, len, i;
+ __be32 nfserr;
- if (len < sizeof(u32)) {
- dprintk("%s: extent array too small: %u\n", __func__, len);
- return -EINVAL;
- }
- len -= sizeof(u32);
- if (len % PNFS_BLOCK_EXTENT_SIZE) {
- dprintk("%s: extent array invalid: %u\n", __func__, len);
- return -EINVAL;
- }
+ if (xdr_stream_decode_u32(xdr, &nr_iomaps))
+ return nfserr_bad_xdr;
- nr_iomaps = be32_to_cpup(p++);
- if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE) {
- dprintk("%s: extent array size mismatch: %u/%u\n",
- __func__, len, nr_iomaps);
- return -EINVAL;
- }
+ len = sizeof(__be32) + xdr_stream_remaining(xdr);
+ expected = sizeof(__be32) + nr_iomaps * PNFS_BLOCK_EXTENT_SIZE;
+ if (len != expected)
+ return nfserr_bad_xdr;
iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
- if (!iomaps) {
- dprintk("%s: failed to allocate extent array\n", __func__);
- return -ENOMEM;
- }
+ if (!iomaps)
+ return nfserr_delay;
for (i = 0; i < nr_iomaps; i++) {
struct pnfs_block_extent bex;
- memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
- p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
+ if (nfsd4_decode_deviceid4(xdr, &bex.vol_id)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
- p = xdr_decode_hyper(p, &bex.foff);
+ if (xdr_stream_decode_u64(xdr, &bex.foff)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (bex.foff & (block_size - 1)) {
- dprintk("%s: unaligned offset 0x%llx\n",
- __func__, bex.foff);
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u64(xdr, &bex.len)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- p = xdr_decode_hyper(p, &bex.len);
if (bex.len & (block_size - 1)) {
- dprintk("%s: unaligned length 0x%llx\n",
- __func__, bex.foff);
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u64(xdr, &bex.soff)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- p = xdr_decode_hyper(p, &bex.soff);
if (bex.soff & (block_size - 1)) {
- dprintk("%s: unaligned disk offset 0x%llx\n",
- __func__, bex.soff);
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u32(xdr, &bex.es)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- bex.es = be32_to_cpup(p++);
if (bex.es != PNFS_BLOCK_READWRITE_DATA) {
- dprintk("%s: incorrect extent state %d\n",
- __func__, bex.es);
+ nfserr = nfserr_inval;
goto fail;
}
@@ -178,59 +217,79 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
}
*iomapp = iomaps;
- return nr_iomaps;
+ *nr_iomapsp = nr_iomaps;
+ return nfs_ok;
fail:
kfree(iomaps);
- return -EINVAL;
+ return nfserr;
}
-int
-nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
- u32 block_size)
+/**
+ * nfsd4_scsi_decode_layoutupdate - decode the scsi layout extent array
+ * @xdr: subbuf set to the encoded array
+ * @iomapp: pointer to store the decoded extent array
+ * @nr_iomapsp: pointer to store the number of extents
+ * @block_size: alignment of extent offset and length
+ *
+ * This function decodes the opaque field of the layoutupdate4 structure
+ * in a layoutcommit request for the scsi layout driver. The field is
+ * actually an array of extents sent by the client. It also checks that
+ * the offset and length of each extent are aligned by @block_size.
+ *
+ * Return values:
+ * %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid
+ * %nfserr_bad_xdr: The encoded array in @xdr is invalid
+ * %nfserr_inval: An unaligned extent found
+ * %nfserr_delay: Failed to allocate memory for @iomapp
+ */
+__be32
+nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp,
+ int *nr_iomapsp, u32 block_size)
{
struct iomap *iomaps;
- u32 nr_iomaps, expected, i;
+ u32 nr_iomaps, expected, len, i;
+ __be32 nfserr;
- if (len < sizeof(u32)) {
- dprintk("%s: extent array too small: %u\n", __func__, len);
- return -EINVAL;
- }
+ if (xdr_stream_decode_u32(xdr, &nr_iomaps))
+ return nfserr_bad_xdr;
- nr_iomaps = be32_to_cpup(p++);
+ len = sizeof(__be32) + xdr_stream_remaining(xdr);
expected = sizeof(__be32) + nr_iomaps * PNFS_SCSI_RANGE_SIZE;
- if (len != expected) {
- dprintk("%s: extent array size mismatch: %u/%u\n",
- __func__, len, expected);
- return -EINVAL;
- }
+ if (len != expected)
+ return nfserr_bad_xdr;
iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
- if (!iomaps) {
- dprintk("%s: failed to allocate extent array\n", __func__);
- return -ENOMEM;
- }
+ if (!iomaps)
+ return nfserr_delay;
for (i = 0; i < nr_iomaps; i++) {
u64 val;
- p = xdr_decode_hyper(p, &val);
+ if (xdr_stream_decode_u64(xdr, &val)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (val & (block_size - 1)) {
- dprintk("%s: unaligned offset 0x%llx\n", __func__, val);
+ nfserr = nfserr_inval;
goto fail;
}
iomaps[i].offset = val;
- p = xdr_decode_hyper(p, &val);
+ if (xdr_stream_decode_u64(xdr, &val)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (val & (block_size - 1)) {
- dprintk("%s: unaligned length 0x%llx\n", __func__, val);
+ nfserr = nfserr_inval;
goto fail;
}
iomaps[i].length = val;
}
*iomapp = iomaps;
- return nr_iomaps;
+ *nr_iomapsp = nr_iomaps;
+ return nfs_ok;
fail:
kfree(iomaps);
- return -EINVAL;
+ return nfserr;
}
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
index 4e28ac8f1127..2e0c6c7d2b42 100644
--- a/fs/nfsd/blocklayoutxdr.h
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -8,6 +8,15 @@
struct iomap;
struct xdr_stream;
+/* On the wire size of the layout4 struct with zero number of extents */
+#define PNFS_BLOCK_LAYOUT4_SIZE \
+ (sizeof(__be32) * 2 + /* offset4 */ \
+ sizeof(__be32) * 2 + /* length4 */ \
+ sizeof(__be32) + /* layoutiomode4 */ \
+ sizeof(__be32) + /* layouttype4 */ \
+ sizeof(__be32) + /* number of bytes */ \
+ sizeof(__be32)) /* number of extents */
+
struct pnfs_block_extent {
struct nfsd4_deviceid vol_id;
u64 foff;
@@ -21,6 +30,11 @@ struct pnfs_block_range {
u64 len;
};
+struct pnfs_block_layout {
+ u32 nr_extents;
+ struct pnfs_block_extent extents[] __counted_by(nr_extents);
+};
+
/*
* Random upper cap for the uuid length to avoid unbounded allocation.
* Not actually limited by the protocol.
@@ -54,9 +68,9 @@ __be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
const struct nfsd4_getdeviceinfo *gdp);
__be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
const struct nfsd4_layoutget *lgp);
-int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
- u32 block_size);
-int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
- u32 block_size);
+__be32 nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr,
+ struct iomap **iomapp, int *nr_iomapsp, u32 block_size);
+__be32 nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr,
+ struct iomap **iomapp, int *nr_iomapsp, u32 block_size);
#endif /* _NFSD_BLOCKLAYOUTXDR_H */
diff --git a/fs/nfsd/debugfs.c b/fs/nfsd/debugfs.c
new file mode 100644
index 000000000000..7f44689e0a53
--- /dev/null
+++ b/fs/nfsd/debugfs.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/debugfs.h>
+
+#include "nfsd.h"
+
+static struct dentry *nfsd_top_dir __read_mostly;
+
+/*
+ * /sys/kernel/debug/nfsd/disable-splice-read
+ *
+ * Contents:
+ * %0: NFS READ is allowed to use page splicing
+ * %1: NFS READ uses only iov iter read
+ *
+ * The default value of this setting is zero (page splicing is
+ * allowed). This setting takes immediate effect for all NFS
+ * versions, all exports, and in all NFSD net namespaces.
+ */
+
+static int nfsd_dsr_get(void *data, u64 *val)
+{
+ *val = nfsd_disable_splice_read ? 1 : 0;
+ return 0;
+}
+
+static int nfsd_dsr_set(void *data, u64 val)
+{
+ nfsd_disable_splice_read = (val > 0);
+ if (!nfsd_disable_splice_read) {
+ /*
+ * Must use buffered I/O if splice_read is enabled.
+ */
+ nfsd_io_cache_read = NFSD_IO_BUFFERED;
+ }
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(nfsd_dsr_fops, nfsd_dsr_get, nfsd_dsr_set, "%llu\n");
+
+/*
+ * /sys/kernel/debug/nfsd/io_cache_read
+ *
+ * Contents:
+ * %0: NFS READ will use buffered IO
+ * %1: NFS READ will use dontcache (buffered IO w/ dropbehind)
+ * %2: NFS READ will use direct IO
+ *
+ * This setting takes immediate effect for all NFS versions,
+ * all exports, and in all NFSD net namespaces.
+ */
+
+static int nfsd_io_cache_read_get(void *data, u64 *val)
+{
+ *val = nfsd_io_cache_read;
+ return 0;
+}
+
+static int nfsd_io_cache_read_set(void *data, u64 val)
+{
+ int ret = 0;
+
+ switch (val) {
+ case NFSD_IO_BUFFERED:
+ nfsd_io_cache_read = NFSD_IO_BUFFERED;
+ break;
+ case NFSD_IO_DONTCACHE:
+ case NFSD_IO_DIRECT:
+ /*
+ * Must disable splice_read when enabling
+ * NFSD_IO_DONTCACHE.
+ */
+ nfsd_disable_splice_read = true;
+ nfsd_io_cache_read = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(nfsd_io_cache_read_fops, nfsd_io_cache_read_get,
+ nfsd_io_cache_read_set, "%llu\n");
+
+/*
+ * /sys/kernel/debug/nfsd/io_cache_write
+ *
+ * Contents:
+ * %0: NFS WRITE will use buffered IO
+ * %1: NFS WRITE will use dontcache (buffered IO w/ dropbehind)
+ *
+ * This setting takes immediate effect for all NFS versions,
+ * all exports, and in all NFSD net namespaces.
+ */
+
+static int nfsd_io_cache_write_get(void *data, u64 *val)
+{
+ *val = nfsd_io_cache_write;
+ return 0;
+}
+
+static int nfsd_io_cache_write_set(void *data, u64 val)
+{
+ int ret = 0;
+
+ switch (val) {
+ case NFSD_IO_BUFFERED:
+ case NFSD_IO_DONTCACHE:
+ case NFSD_IO_DIRECT:
+ nfsd_io_cache_write = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(nfsd_io_cache_write_fops, nfsd_io_cache_write_get,
+ nfsd_io_cache_write_set, "%llu\n");
+
+void nfsd_debugfs_exit(void)
+{
+ debugfs_remove_recursive(nfsd_top_dir);
+ nfsd_top_dir = NULL;
+}
+
+void nfsd_debugfs_init(void)
+{
+ nfsd_top_dir = debugfs_create_dir("nfsd", NULL);
+
+ debugfs_create_file("disable-splice-read", S_IWUSR | S_IRUGO,
+ nfsd_top_dir, NULL, &nfsd_dsr_fops);
+
+ debugfs_create_file("io_cache_read", 0644, nfsd_top_dir, NULL,
+ &nfsd_io_cache_read_fops);
+
+ debugfs_create_file("io_cache_write", 0644, nfsd_top_dir, NULL,
+ &nfsd_io_cache_write_fops);
+}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index eacafe46e3b6..9d55512d0cc9 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -40,24 +40,15 @@
#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
-static void expkey_put_work(struct work_struct *work)
+static void expkey_put(struct kref *ref)
{
- struct svc_expkey *key =
- container_of(to_rcu_work(work), struct svc_expkey, ek_rcu_work);
+ struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
if (test_bit(CACHE_VALID, &key->h.flags) &&
!test_bit(CACHE_NEGATIVE, &key->h.flags))
path_put(&key->ek_path);
auth_domain_put(key->ek_client);
- kfree(key);
-}
-
-static void expkey_put(struct kref *ref)
-{
- struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
-
- INIT_RCU_WORK(&key->ek_rcu_work, expkey_put_work);
- queue_rcu_work(system_wq, &key->ek_rcu_work);
+ kfree_rcu(key, ek_rcu);
}
static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -91,8 +82,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
int len;
struct auth_domain *dom = NULL;
int err;
- int fsidtype;
- char *ep;
+ u8 fsidtype;
struct svc_expkey key;
struct svc_expkey *ek = NULL;
@@ -118,10 +108,9 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
err = -EINVAL;
if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out;
- fsidtype = simple_strtoul(buf, &ep, 10);
- if (*ep)
+ if (kstrtou8(buf, 10, &fsidtype))
goto out;
- dprintk("found fsidtype %d\n", fsidtype);
+ dprintk("found fsidtype %u\n", fsidtype);
if (key_len(fsidtype)==0) /* invalid type */
goto out;
if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
@@ -364,13 +353,11 @@ static void export_stats_destroy(struct export_stats *stats)
EXP_STATS_COUNTERS_NUM);
}
-static void svc_export_put_work(struct work_struct *work)
+static void svc_export_release(struct rcu_head *rcu_head)
{
- struct svc_export *exp =
- container_of(to_rcu_work(work), struct svc_export, ex_rcu_work);
+ struct svc_export *exp = container_of(rcu_head, struct svc_export,
+ ex_rcu);
- path_put(&exp->ex_path);
- auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
export_stats_destroy(exp->ex_stats);
kfree(exp->ex_stats);
@@ -382,8 +369,9 @@ static void svc_export_put(struct kref *ref)
{
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
- INIT_RCU_WORK(&exp->ex_rcu_work, svc_export_put_work);
- queue_rcu_work(system_wq, &exp->ex_rcu_work);
+ path_put(&exp->ex_path);
+ auth_domain_put(exp->ex_client);
+ call_rcu(&exp->ex_rcu, svc_export_release);
}
static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -414,7 +402,7 @@ static struct svc_export *svc_export_update(struct svc_export *new,
struct svc_export *old);
static struct svc_export *svc_export_lookup(struct svc_export *);
-static int check_export(struct path *path, int *flags, unsigned char *uuid)
+static int check_export(const struct path *path, int *flags, unsigned char *uuid)
{
struct inode *inode = d_inode(path->dentry);
@@ -1094,49 +1082,62 @@ static struct svc_export *exp_find(struct cache_detail *cd,
}
/**
- * check_nfsd_access - check if access to export is allowed.
+ * check_xprtsec_policy - check if access to export is allowed by the
+ * xprtsec policy
* @exp: svc_export that is being accessed.
- * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO).
- * @may_bypass_gss: reduce strictness of authorization check
+ * @rqstp: svc_rqst attempting to access @exp.
+ *
+ * Helper function for check_nfsd_access(). Note that callers should be
+ * using check_nfsd_access() instead of calling this function directly. The
+ * one exception is __fh_verify() since it has logic that may result in one
+ * or both of the helpers being skipped.
*
* Return values:
* %nfs_ok if access is granted, or
* %nfserr_wrongsec if access is denied
*/
-__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
- bool may_bypass_gss)
+__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp)
{
- struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
- struct svc_xprt *xprt;
-
- /*
- * If rqstp is NULL, this is a LOCALIO request which will only
- * ever use a filehandle/credential pair for which access has
- * been affirmed (by ACCESS or OPEN NFS requests) over the
- * wire. So there is no need for further checks here.
- */
- if (!rqstp)
- return nfs_ok;
-
- xprt = rqstp->rq_xprt;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
- goto ok;
+ return nfs_ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
!test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
- goto ok;
+ return nfs_ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
- goto ok;
+ return nfs_ok;
}
- goto denied;
+ return nfserr_wrongsec;
+}
+
+/**
+ * check_security_flavor - check if access to export is allowed by the
+ * security flavor
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp.
+ * @may_bypass_gss: reduce strictness of authorization check
+ *
+ * Helper function for check_nfsd_access(). Note that callers should be
+ * using check_nfsd_access() instead of calling this function directly. The
+ * one exception is __fh_verify() since it has logic that may result in one
+ * or both of the helpers being skipped.
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
+__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss)
+{
+ struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
-ok:
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
return nfs_ok;
@@ -1178,10 +1179,30 @@ ok:
}
}
-denied:
return nfserr_wrongsec;
}
+/**
+ * check_nfsd_access - check if access to export is allowed.
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp.
+ * @may_bypass_gss: reduce strictness of authorization check
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
+__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss)
+{
+ __be32 status;
+
+ status = check_xprtsec_policy(exp, rqstp);
+ if (status != nfs_ok)
+ return status;
+ return check_security_flavor(exp, rqstp, may_bypass_gss);
+}
+
/*
* Uses rq_client and rq_gssclient to find an export; uses rq_client (an
* auth_unix client) if it's available and has secinfo information;
@@ -1192,7 +1213,7 @@ denied:
* use exp_get_by_name() or exp_find().
*/
struct svc_export *
-rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
+rqst_exp_get_by_name(struct svc_rqst *rqstp, const struct path *path)
{
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
@@ -1444,13 +1465,9 @@ static int e_show(struct seq_file *m, void *p)
return 0;
}
- if (!cache_get_rcu(&exp->h))
+ if (cache_check_rcu(cd, &exp->h, NULL))
return 0;
- if (cache_check(cd, &exp->h, NULL))
- return 0;
-
- exp_put(exp);
return svc_export_show(m, cd, cp);
}
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 6f2fbaae01fa..d2b09cd76145 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -75,7 +75,7 @@ struct svc_export {
u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
- struct rcu_work ex_rcu_work;
+ struct rcu_head ex_rcu;
unsigned long ex_xprtsec_modes;
struct export_stats *ex_stats;
};
@@ -88,11 +88,11 @@ struct svc_expkey {
struct cache_head h;
struct auth_domain * ek_client;
- int ek_fsidtype;
+ u8 ek_fsidtype;
u32 ek_fsid[6];
struct path ek_path;
- struct rcu_work ek_rcu_work;
+ struct rcu_head ek_rcu;
};
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
@@ -101,6 +101,9 @@ struct svc_expkey {
struct svc_cred;
int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp);
+__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp);
+__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss);
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
bool may_bypass_gss);
@@ -111,7 +114,7 @@ int nfsd_export_init(struct net *);
void nfsd_export_shutdown(struct net *);
void nfsd_export_flush(struct net *);
struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
- struct path *);
+ const struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
struct path *);
struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *);
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index a1cdba42c4fa..93798575b807 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -39,6 +39,7 @@
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
#include <linux/rhashtable.h>
+#include <linux/nfslocalio.h>
#include "vfs.h"
#include "nfsd.h"
@@ -112,7 +113,7 @@ static void
nfsd_file_schedule_laundrette(void)
{
if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
- queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette,
+ queue_delayed_work(system_dfl_wq, &nfsd_filecache_laundrette,
NFSD_LAUNDRETTE_DELAY);
}
@@ -230,6 +231,9 @@ nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
refcount_set(&nf->nf_ref, 1);
nf->nf_may = need;
nf->nf_mark = NULL;
+ nf->nf_dio_mem_align = 0;
+ nf->nf_dio_offset_align = 0;
+ nf->nf_dio_read_offset_align = 0;
return nf;
}
@@ -318,15 +322,14 @@ nfsd_file_check_writeback(struct nfsd_file *nf)
mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
}
-
-static bool nfsd_file_lru_add(struct nfsd_file *nf)
+static void nfsd_file_lru_add(struct nfsd_file *nf)
{
- set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
- if (list_lru_add_obj(&nfsd_file_lru, &nf->nf_lru)) {
+ refcount_inc(&nf->nf_ref);
+ if (list_lru_add_obj(&nfsd_file_lru, &nf->nf_lru))
trace_nfsd_file_lru_add(nf);
- return true;
- }
- return false;
+ else
+ WARN_ON(1);
+ nfsd_file_schedule_laundrette();
}
static bool nfsd_file_lru_remove(struct nfsd_file *nf)
@@ -362,47 +365,32 @@ nfsd_file_put(struct nfsd_file *nf)
if (test_bit(NFSD_FILE_GC, &nf->nf_flags) &&
test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
- /*
- * If this is the last reference (nf_ref == 1), then try to
- * transfer it to the LRU.
- */
- if (refcount_dec_not_one(&nf->nf_ref))
- return;
-
- /* Try to add it to the LRU. If that fails, decrement. */
- if (nfsd_file_lru_add(nf)) {
- /* If it's still hashed, we're done */
- if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
- nfsd_file_schedule_laundrette();
- return;
- }
-
- /*
- * We're racing with unhashing, so try to remove it from
- * the LRU. If removal fails, then someone else already
- * has our reference.
- */
- if (!nfsd_file_lru_remove(nf))
- return;
- }
+ set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ set_bit(NFSD_FILE_RECENT, &nf->nf_flags);
}
+
if (refcount_dec_and_test(&nf->nf_ref))
nfsd_file_free(nf);
}
/**
- * nfsd_file_put_local - put nfsd_file reference and arm nfsd_serv_put in caller
- * @nf: nfsd_file of which to put the reference
+ * nfsd_file_put_local - put nfsd_file reference and arm nfsd_net_put in caller
+ * @pnf: nfsd_file of which to put the reference
*
* First save the associated net to return to caller, then put
* the reference of the nfsd_file.
*/
struct net *
-nfsd_file_put_local(struct nfsd_file *nf)
+nfsd_file_put_local(struct nfsd_file __rcu **pnf)
{
- struct net *net = nf->nf_net;
+ struct nfsd_file *nf;
+ struct net *net = NULL;
- nfsd_file_put(nf);
+ nf = unrcu_pointer(xchg(pnf, NULL));
+ if (nf) {
+ net = nf->nf_net;
+ nfsd_file_put(nf);
+ }
return net;
}
@@ -445,11 +433,20 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose)
struct nfsd_file, nf_gc);
struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
struct nfsd_fcache_disposal *l = nn->fcache_disposal;
+ struct svc_serv *serv;
spin_lock(&l->lock);
list_move_tail(&nf->nf_gc, &l->freeme);
spin_unlock(&l->lock);
- svc_wake_up(nn->nfsd_serv);
+
+ /*
+ * The filecache laundrette is shut down after the
+ * nn->nfsd_serv pointer is cleared, but before the
+ * svc_serv is freed.
+ */
+ serv = nn->nfsd_serv;
+ if (serv)
+ svc_wake_up(serv);
}
}
@@ -520,13 +517,12 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
}
/*
- * Put the reference held on behalf of the LRU. If it wasn't the last
- * one, then just remove it from the LRU and ignore it.
+ * Put the reference held on behalf of the LRU if it is the last
+ * reference, else rotate.
*/
- if (!refcount_dec_and_test(&nf->nf_ref)) {
+ if (!refcount_dec_if_one(&nf->nf_ref)) {
trace_nfsd_file_gc_in_use(nf);
- list_lru_isolate(lru, &nf->nf_lru);
- return LRU_REMOVED;
+ return LRU_ROTATE;
}
/* Refcount went to zero. Unhash it and queue it to the dispose list */
@@ -538,14 +534,54 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
return LRU_REMOVED;
}
+static enum lru_status
+nfsd_file_gc_cb(struct list_head *item, struct list_lru_one *lru,
+ void *arg)
+{
+ struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
+
+ if (test_and_clear_bit(NFSD_FILE_RECENT, &nf->nf_flags)) {
+ /*
+ * "REFERENCED" really means "should be at the end of the
+ * LRU. As we are putting it there we can clear the flag.
+ */
+ clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ trace_nfsd_file_gc_aged(nf);
+ return LRU_ROTATE;
+ }
+ return nfsd_file_lru_cb(item, lru, arg);
+}
+
+/* If the shrinker runs between calls to list_lru_walk_node() in
+ * nfsd_file_gc(), the "remaining" count will be wrong. This could
+ * result in premature freeing of some files. This may not matter much
+ * but is easy to fix with this spinlock which temporarily disables
+ * the shrinker.
+ */
+static DEFINE_SPINLOCK(nfsd_gc_lock);
static void
nfsd_file_gc(void)
{
+ unsigned long ret = 0;
LIST_HEAD(dispose);
- unsigned long ret;
+ int nid;
+
+ spin_lock(&nfsd_gc_lock);
+ for_each_node_state(nid, N_NORMAL_MEMORY) {
+ unsigned long remaining = list_lru_count_node(&nfsd_file_lru, nid);
+
+ while (remaining > 0) {
+ unsigned long nr = min(remaining, NFSD_FILE_GC_BATCH);
- ret = list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb,
- &dispose, list_lru_count(&nfsd_file_lru));
+ remaining -= nr;
+ ret += list_lru_walk_node(&nfsd_file_lru, nid, nfsd_file_gc_cb,
+ &dispose, &nr);
+ if (nr)
+ /* walk aborted early */
+ remaining = 0;
+ }
+ }
+ spin_unlock(&nfsd_gc_lock);
trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru));
nfsd_file_dispose_list_delayed(&dispose);
}
@@ -553,9 +589,9 @@ nfsd_file_gc(void)
static void
nfsd_file_gc_worker(struct work_struct *work)
{
- nfsd_file_gc();
if (list_lru_count(&nfsd_file_lru))
- nfsd_file_schedule_laundrette();
+ nfsd_file_gc();
+ nfsd_file_schedule_laundrette();
}
static unsigned long
@@ -570,8 +606,12 @@ nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
LIST_HEAD(dispose);
unsigned long ret;
+ if (!spin_trylock(&nfsd_gc_lock))
+ return SHRINK_STOP;
+
ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
nfsd_file_lru_cb, &dispose);
+ spin_unlock(&nfsd_gc_lock);
trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru));
nfsd_file_dispose_list_delayed(&dispose);
return ret;
@@ -676,17 +716,12 @@ nfsd_file_close_inode(struct inode *inode)
void
nfsd_file_close_inode_sync(struct inode *inode)
{
- struct nfsd_file *nf;
LIST_HEAD(dispose);
trace_nfsd_file_close(inode);
nfsd_file_queue_for_close(inode, &dispose);
- while (!list_empty(&dispose)) {
- nf = list_first_entry(&dispose, struct nfsd_file, nf_gc);
- list_del_init(&nf->nf_gc);
- nfsd_file_free(nf);
- }
+ nfsd_file_dispose_list(&dispose);
}
static int
@@ -833,6 +868,14 @@ __nfsd_file_cache_purge(struct net *net)
struct nfsd_file *nf;
LIST_HEAD(dispose);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ if (net) {
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ nfs_localio_invalidate_clients(&nn->local_clients,
+ &nn->local_clients_lock);
+ }
+#endif
+
rhltable_walk_enter(&nfsd_file_rhltable, &iter);
do {
rhashtable_walk_start(&iter);
@@ -1009,12 +1052,41 @@ nfsd_file_is_cached(struct inode *inode)
}
static __be32
+nfsd_file_get_dio_attrs(const struct svc_fh *fhp, struct nfsd_file *nf)
+{
+ struct inode *inode = file_inode(nf->nf_file);
+ struct kstat stat;
+ __be32 status;
+
+ /* Currently only need to get DIO alignment info for regular files */
+ if (!S_ISREG(inode->i_mode))
+ return nfs_ok;
+
+ status = fh_getattr(fhp, &stat);
+ if (status != nfs_ok)
+ return status;
+
+ trace_nfsd_file_get_dio_attrs(inode, &stat);
+
+ if (stat.result_mask & STATX_DIOALIGN) {
+ nf->nf_dio_mem_align = stat.dio_mem_align;
+ nf->nf_dio_offset_align = stat.dio_offset_align;
+ }
+ if (stat.result_mask & STATX_DIO_READ_ALIGN)
+ nf->nf_dio_read_offset_align = stat.dio_read_offset_align;
+ else
+ nf->nf_dio_read_offset_align = nf->nf_dio_offset_align;
+
+ return nfs_ok;
+}
+
+static __be32
nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
struct svc_cred *cred,
struct auth_domain *client,
struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
- struct nfsd_file **pnf, bool want_gc)
+ umode_t type, bool want_gc, struct nfsd_file **pnf)
{
unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
struct nfsd_file *new, *nf;
@@ -1025,13 +1097,13 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
int ret;
retry:
- if (rqstp) {
- status = fh_verify(rqstp, fhp, S_IFREG,
+ if (rqstp)
+ status = fh_verify(rqstp, fhp, type,
may_flags|NFSD_MAY_OWNER_OVERRIDE);
- } else {
- status = fh_verify_local(net, cred, client, fhp, S_IFREG,
+ else
+ status = fh_verify_local(net, cred, client, fhp, type,
may_flags|NFSD_MAY_OWNER_OVERRIDE);
- }
+
if (status != nfs_ok)
return status;
inode = d_inode(fhp->fh_dentry);
@@ -1040,16 +1112,8 @@ retry:
nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
rcu_read_unlock();
- if (nf) {
- /*
- * If the nf is on the LRU then it holds an extra reference
- * that must be put if it's removed. It had better not be
- * the last one however, since we should hold another.
- */
- if (nfsd_file_lru_remove(nf))
- refcount_dec(&nf->nf_ref);
+ if (nf)
goto wait_for_construction;
- }
new = nfsd_file_alloc(net, inode, need, want_gc);
if (!new) {
@@ -1112,15 +1176,18 @@ out:
open_file:
trace_nfsd_file_alloc(nf);
- nf->nf_mark = nfsd_file_mark_find_or_create(inode);
- if (nf->nf_mark) {
+
+ if (type == S_IFREG)
+ nf->nf_mark = nfsd_file_mark_find_or_create(inode);
+
+ if (type != S_IFREG || nf->nf_mark) {
if (file) {
get_file(file);
nf->nf_file = file;
status = nfs_ok;
trace_nfsd_file_opened(nf, status);
} else {
- ret = nfsd_open_verified(fhp, may_flags, &nf->nf_file);
+ ret = nfsd_open_verified(fhp, type, may_flags, &nf->nf_file);
if (ret == -EOPENSTALE && stale_retry) {
stale_retry = false;
nfsd_file_unhash(nf);
@@ -1134,6 +1201,8 @@ open_file:
}
status = nfserrno(ret);
trace_nfsd_file_open(nf, status);
+ if (status == nfs_ok)
+ status = nfsd_file_get_dio_attrs(fhp, nf);
}
} else
status = nfserr_jukebox;
@@ -1143,6 +1212,9 @@ open_file:
*/
if (status != nfs_ok || inode->i_nlink == 0)
nfsd_file_unhash(nf);
+ else if (want_gc)
+ nfsd_file_lru_add(nf);
+
clear_and_wake_up_bit(NFSD_FILE_PENDING, &nf->nf_flags);
if (status == nfs_ok)
goto out;
@@ -1177,7 +1249,7 @@ nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
- fhp, may_flags, NULL, pnf, true);
+ fhp, may_flags, NULL, S_IFREG, true, pnf);
}
/**
@@ -1202,7 +1274,7 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
- fhp, may_flags, NULL, pnf, false);
+ fhp, may_flags, NULL, S_IFREG, false, pnf);
}
/**
@@ -1222,10 +1294,9 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
* a file. The security implications of this should be carefully
* considered before use.
*
- * The nfsd_file object returned by this API is reference-counted
- * and garbage-collected. The object is retained for a few
- * seconds after the final nfsd_file_put() in case the caller
- * wants to re-use it.
+ * The nfsd_file_object returned by this API is reference-counted
+ * but not garbage-collected. The object is unhashed after the
+ * final nfsd_file_put().
*
* Return values:
* %nfs_ok - @pnf points to an nfsd_file with its reference
@@ -1246,9 +1317,9 @@ nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
const struct cred *save_cred = get_current_cred();
__be32 beres;
- beres = nfsd_file_do_acquire(NULL, net, cred, client,
- fhp, may_flags, NULL, pnf, true);
- revert_creds(save_cred);
+ beres = nfsd_file_do_acquire(NULL, net, cred, client, fhp, may_flags,
+ NULL, S_IFREG, false, pnf);
+ put_cred(revert_creds(save_cred));
return beres;
}
@@ -1276,7 +1347,33 @@ nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct nfsd_file **pnf)
{
return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
- fhp, may_flags, file, pnf, false);
+ fhp, may_flags, file, S_IFREG, false, pnf);
+}
+
+/**
+ * nfsd_file_acquire_dir - Get a struct nfsd_file with an open directory
+ * @rqstp: the RPC transaction being executed
+ * @fhp: the NFS filehandle of the file to be opened
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * The nfsd_file_object returned by this API is reference-counted
+ * but not garbage-collected. The object is unhashed after the
+ * final nfsd_file_put(). This opens directories only, and only
+ * in O_RDONLY mode.
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire_dir(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfsd_file **pnf)
+{
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL, fhp,
+ NFSD_MAY_READ|NFSD_MAY_64BIT_COOKIE,
+ NULL, S_IFDIR, false, pnf);
}
/*
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index d5db6b34ba30..b383dbc5b921 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -4,6 +4,12 @@
#include <linux/fsnotify_backend.h>
/*
+ * Limit the time that the list_lru_one lock is held during
+ * an LRU scan.
+ */
+#define NFSD_FILE_GC_BATCH (16UL)
+
+/*
* This is the fsnotify_mark container that nfsd attaches to the files that it
* is holding open. Note that we have a separate refcount here aside from the
* one in the fsnotify_mark. We only want a single fsnotify_mark attached to
@@ -38,6 +44,7 @@ struct nfsd_file {
#define NFSD_FILE_PENDING (1)
#define NFSD_FILE_REFERENCED (2)
#define NFSD_FILE_GC (3)
+#define NFSD_FILE_RECENT (4)
unsigned long nf_flags;
refcount_t nf_ref;
unsigned char nf_may;
@@ -47,6 +54,10 @@ struct nfsd_file {
struct list_head nf_gc;
struct rcu_head nf_rcu;
ktime_t nf_birthtime;
+
+ u32 nf_dio_mem_align;
+ u32 nf_dio_offset_align;
+ u32 nf_dio_read_offset_align;
};
int nfsd_file_cache_init(void);
@@ -55,7 +66,7 @@ void nfsd_file_cache_shutdown(void);
int nfsd_file_cache_start_net(struct net *net);
void nfsd_file_cache_shutdown_net(struct net *net);
void nfsd_file_put(struct nfsd_file *nf);
-struct net *nfsd_file_put_local(struct nfsd_file *nf);
+struct net *nfsd_file_put_local(struct nfsd_file __rcu **nf);
struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
struct file *nfsd_file_file(struct nfsd_file *nf);
void nfsd_file_close_inode_sync(struct inode *inode);
@@ -71,5 +82,7 @@ __be32 nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
struct auth_domain *client, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf);
+__be32 nfsd_file_acquire_dir(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfsd_file **pnf);
int nfsd_file_cache_stats_show(struct seq_file *m, void *v);
#endif /* _FS_NFSD_FILECACHE_H */
diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
index 3ca5304440ff..0f1a35400cd5 100644
--- a/fs/nfsd/flexfilelayout.c
+++ b/fs/nfsd/flexfilelayout.c
@@ -20,8 +20,8 @@
#define NFSDDBG_FACILITY NFSDDBG_PNFS
static __be32
-nfsd4_ff_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
- struct nfsd4_layoutget *args)
+nfsd4_ff_proc_layoutget(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *args)
{
struct nfsd4_layout_seg *seg = &args->lg_seg;
u32 device_generation = 0;
@@ -125,6 +125,13 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
return 0;
}
+static __be32
+nfsd4_ff_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
+ struct nfsd4_layoutcommit *lcp)
+{
+ return nfs_ok;
+}
+
const struct nfsd4_layout_ops ff_layout_ops = {
.notify_types =
NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
@@ -133,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_ops = {
.encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
.proc_layoutget = nfsd4_ff_proc_layoutget,
.encode_layoutget = nfsd4_ff_encode_layoutget,
+ .proc_layoutcommit = nfsd4_ff_proc_layoutcommit,
};
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
index aeb71c10ff1b..f9f7e38cba13 100644
--- a/fs/nfsd/flexfilelayoutxdr.c
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
*p++ = cpu_to_be32(1); /* single mirror */
*p++ = cpu_to_be32(1); /* single data server */
- p = xdr_encode_opaque_fixed(p, &fl->deviceid,
- sizeof(struct nfsd4_deviceid));
+ p = svcxdr_encode_deviceid4(p, &fl->deviceid);
*p++ = cpu_to_be32(1); /* efficiency */
diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c
index f441cb9f74d5..be710d809a3b 100644
--- a/fs/nfsd/localio.c
+++ b/fs/nfsd/localio.c
@@ -24,19 +24,6 @@
#include "filecache.h"
#include "cache.h"
-static const struct nfsd_localio_operations nfsd_localio_ops = {
- .nfsd_serv_try_get = nfsd_serv_try_get,
- .nfsd_serv_put = nfsd_serv_put,
- .nfsd_open_local_fh = nfsd_open_local_fh,
- .nfsd_file_put_local = nfsd_file_put_local,
- .nfsd_file_file = nfsd_file_file,
-};
-
-void nfsd_localio_ops_init(void)
-{
- nfs_to = &nfsd_localio_ops;
-}
-
/**
* nfsd_open_local_fh - lookup a local filehandle @nfs_fh and map to nfsd_file
*
@@ -45,6 +32,7 @@ void nfsd_localio_ops_init(void)
* @rpc_clnt: rpc_clnt that the client established
* @cred: cred that the client established
* @nfs_fh: filehandle to lookup
+ * @pnf: place to find the nfsd_file, or store it if it was non-NULL
* @fmode: fmode_t to use for open
*
* This function maps a local fh to a path on a local filesystem.
@@ -52,13 +40,14 @@ void nfsd_localio_ops_init(void)
* avoid all the NFS overhead with reads, writes and commits.
*
* On successful return, returned nfsd_file will have its nf_net member
- * set. Caller (NFS client) is responsible for calling nfsd_serv_put and
+ * set. Caller (NFS client) is responsible for calling nfsd_net_put and
* nfsd_file_put (via nfs_to_nfsd_file_put_local).
*/
-struct nfsd_file *
+static struct nfsd_file *
nfsd_open_local_fh(struct net *net, struct auth_domain *dom,
struct rpc_clnt *rpc_clnt, const struct cred *cred,
- const struct nfs_fh *nfs_fh, const fmode_t fmode)
+ const struct nfs_fh *nfs_fh, struct nfsd_file __rcu **pnf,
+ const fmode_t fmode)
{
int mayflags = NFSD_MAY_LOCALIO;
struct svc_cred rq_cred;
@@ -69,6 +58,15 @@ nfsd_open_local_fh(struct net *net, struct auth_domain *dom,
if (nfs_fh->size > NFS4_FHSIZE)
return ERR_PTR(-EINVAL);
+ if (!nfsd_net_try_get(net))
+ return ERR_PTR(-ENXIO);
+
+ rcu_read_lock();
+ localio = nfsd_file_get(rcu_dereference(*pnf));
+ rcu_read_unlock();
+ if (localio)
+ return localio;
+
/* nfs_fh -> svc_fh */
fh_init(&fh, NFS4_FHSIZE);
fh.fh_handle.fh_size = nfs_fh->size;
@@ -90,9 +88,58 @@ nfsd_open_local_fh(struct net *net, struct auth_domain *dom,
if (rq_cred.cr_group_info)
put_group_info(rq_cred.cr_group_info);
+ if (!IS_ERR(localio)) {
+ struct nfsd_file *new;
+ if (!nfsd_net_try_get(net)) {
+ nfsd_file_put(localio);
+ nfsd_net_put(net);
+ return ERR_PTR(-ENXIO);
+ }
+ nfsd_file_get(localio);
+ again:
+ new = unrcu_pointer(cmpxchg(pnf, NULL, RCU_INITIALIZER(localio)));
+ if (new) {
+ /* Some other thread installed an nfsd_file */
+ if (nfsd_file_get(new) == NULL)
+ goto again;
+ /*
+ * Drop the ref we were going to install (both file and
+ * net) and the one we were going to return (only file).
+ */
+ nfsd_file_put(localio);
+ nfsd_net_put(net);
+ nfsd_file_put(localio);
+ localio = new;
+ }
+ } else
+ nfsd_net_put(net);
+
return localio;
}
-EXPORT_SYMBOL_GPL(nfsd_open_local_fh);
+
+static void nfsd_file_dio_alignment(struct nfsd_file *nf,
+ u32 *nf_dio_mem_align,
+ u32 *nf_dio_offset_align,
+ u32 *nf_dio_read_offset_align)
+{
+ *nf_dio_mem_align = nf->nf_dio_mem_align;
+ *nf_dio_offset_align = nf->nf_dio_offset_align;
+ *nf_dio_read_offset_align = nf->nf_dio_read_offset_align;
+}
+
+static const struct nfsd_localio_operations nfsd_localio_ops = {
+ .nfsd_net_try_get = nfsd_net_try_get,
+ .nfsd_net_put = nfsd_net_put,
+ .nfsd_open_local_fh = nfsd_open_local_fh,
+ .nfsd_file_put_local = nfsd_file_put_local,
+ .nfsd_file_file = nfsd_file_file,
+ .nfsd_file_dio_alignment = nfsd_file_dio_alignment,
+};
+
+void nfsd_localio_ops_init(void)
+{
+ nfs_to = &nfsd_localio_ops;
+}
/*
* UUID_IS_LOCAL XDR functions
@@ -114,6 +161,7 @@ static __be32 localio_proc_uuid_is_local(struct svc_rqst *rqstp)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfs_uuid_is_local(&argp->uuid, &nn->local_clients,
+ &nn->local_clients_lock,
net, rqstp->rq_client, THIS_MODULE);
return rpc_success;
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index edc9f75dc75c..c774ce9aa296 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -57,7 +57,20 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp,
switch (nfserr) {
case nfs_ok:
return 0;
- case nfserr_dropit:
+ case nfserr_jukebox:
+ /* this error can indicate a presence of a conflicting
+ * delegation to an NLM lock request. Options are:
+ * (1) For now, drop this request and make the client
+ * retry. When delegation is returned, client's lock retry
+ * will complete.
+ * (2) NLM4_DENIED as per "spec" signals to the client
+ * that the lock is unavailable now but client can retry.
+ * Linux client implementation does not. It treats
+ * NLM4_DENIED same as NLM4_FAILED and errors the request.
+ * (3) For the future, treat this as blocked lock and try
+ * to callback when the delegation is returned but might
+ * not have a proper lock request to block on.
+ */
return nlm_drop_reply;
case nfserr_stale:
return nlm_stale_fh;
diff --git a/fs/nfsd/netlink.c b/fs/nfsd/netlink.c
index ca54aa583530..ac51a44e1065 100644
--- a/fs/nfsd/netlink.c
+++ b/fs/nfsd/netlink.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/nfsd.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/fs/nfsd/netlink.h b/fs/nfsd/netlink.h
index 8eb903f24c41..478117ff6b8c 100644
--- a/fs/nfsd/netlink.h
+++ b/fs/nfsd/netlink.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/nfsd.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_NFSD_GEN_H
#define _LINUX_NFSD_GEN_H
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 26f7b34d1a03..3e2d0fde80a7 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -128,21 +128,16 @@ struct nfsd_net {
seqlock_t writeverf_lock;
unsigned char writeverf[8];
- /*
- * Max number of connections this nfsd container will allow. Defaults
- * to '0' which is means that it bases this on the number of threads.
- */
- unsigned int max_connections;
-
u32 clientid_base;
u32 clientid_counter;
u32 clverifier_counter;
struct svc_info nfsd_info;
#define nfsd_serv nfsd_info.serv
- struct percpu_ref nfsd_serv_ref;
- struct completion nfsd_serv_confirm_done;
- struct completion nfsd_serv_free_done;
+
+ struct percpu_ref nfsd_net_ref;
+ struct completion nfsd_net_confirm_done;
+ struct completion nfsd_net_free_done;
/*
* clientid and stateid data for construction of net unique COPY
@@ -219,6 +214,7 @@ struct nfsd_net {
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
/* Local clients to be invalidated when net is shut down */
+ spinlock_t local_clients_lock;
struct list_head local_clients;
#endif
};
@@ -229,8 +225,8 @@ struct nfsd_net {
extern bool nfsd_support_version(int vers);
extern unsigned int nfsd_net_id;
-bool nfsd_serv_try_get(struct net *net);
-void nfsd_serv_put(struct net *net);
+bool nfsd_net_try_get(struct net *net);
+void nfsd_net_put(struct net *net);
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn);
void nfsd_reset_write_verifier(struct nfsd_net *nn);
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 4e3be7201b1c..5fb202acb0fd 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -84,6 +84,8 @@ out:
fail:
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
+ resp->acl_access = NULL;
+ resp->acl_default = NULL;
goto out;
}
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 5e34e98db969..7b5433bd3019 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -76,6 +76,8 @@ out:
fail:
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
+ resp->acl_access = NULL;
+ resp->acl_default = NULL;
goto out;
}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 372bdcf5e07a..42adc5461db0 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -14,6 +14,7 @@
#include "xdr3.h"
#include "vfs.h"
#include "filecache.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -69,8 +70,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp)
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
- dprintk("nfsd: GETATTR(3) %s\n",
- SVCFH_fmt(&argp->fh));
+ trace_nfsd_vfs_getattr(rqstp, &argp->fh);
fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0,
@@ -220,7 +220,6 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
struct nfsd3_writeargs *argp = rqstp->rq_argp;
struct nfsd3_writeres *resp = rqstp->rq_resp;
unsigned long cnt = argp->len;
- unsigned int nvecs;
dprintk("nfsd: WRITE(3) %s %d bytes at %Lu%s\n",
SVCFH_fmt(&argp->fh),
@@ -235,10 +234,8 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->committed = argp->stable;
- nvecs = svc_fill_write_vector(rqstp, &argp->payload);
-
resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
- rqstp->rq_vec, nvecs, &cnt,
+ &argp->payload, &cnt,
resp->committed, resp->verf);
resp->count = cnt;
resp->status = nfsd3_map_status(resp->status);
@@ -266,6 +263,8 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 status;
int host_err;
+ trace_nfsd_vfs_create(rqstp, fhp, S_IFREG, argp->name, argp->len);
+
if (isdotent(argp->name, argp->len))
return nfserr_exist;
if (!(iap->ia_valid & ATTR_MODE))
@@ -282,12 +281,11 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (host_err)
return nfserrno(host_err);
- inode_lock_nested(inode, I_MUTEX_PARENT);
-
- child = lookup_one_len(argp->name, parent, argp->len);
+ child = start_creating(&nop_mnt_idmap, parent,
+ &QSTR_LEN(argp->name, argp->len));
if (IS_ERR(child)) {
status = nfserrno(PTR_ERR(child));
- goto out;
+ goto out_write;
}
if (d_really_is_negative(child)) {
@@ -343,7 +341,7 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
status = fh_fill_pre_attrs(fhp);
if (status != nfs_ok)
goto out;
- host_err = vfs_create(&nop_mnt_idmap, inode, child, iap->ia_mode, true);
+ host_err = vfs_create(&nop_mnt_idmap, child, iap->ia_mode, NULL);
if (host_err < 0) {
status = nfserrno(host_err);
goto out;
@@ -366,9 +364,8 @@ set_attr:
status = nfsd_create_setattr(rqstp, fhp, resfhp, &attrs);
out:
- inode_unlock(inode);
- if (child && !IS_ERR(child))
- dput(child);
+ end_creating(child);
+out_write:
fh_drop_write(fhp);
return status;
}
@@ -380,11 +377,6 @@ nfsd3_proc_create(struct svc_rqst *rqstp)
struct nfsd3_diropres *resp = rqstp->rq_resp;
svc_fh *dirfhp, *newfhp;
- dprintk("nfsd: CREATE(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
-
dirfhp = fh_copy(&resp->dirfh, &argp->fh);
newfhp = fh_init(&resp->fh, NFS3_FHSIZE);
@@ -405,11 +397,6 @@ nfsd3_proc_mkdir(struct svc_rqst *rqstp)
.na_iattr = &argp->attrs,
};
- dprintk("nfsd: MKDIR(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
-
argp->attrs.ia_valid &= ~ATTR_SIZE;
fh_copy(&resp->dirfh, &argp->fh);
fh_init(&resp->fh, NFS3_FHSIZE);
@@ -445,11 +432,6 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
goto out;
}
- dprintk("nfsd: SYMLINK(3) %s %.*s -> %.*s\n",
- SVCFH_fmt(&argp->ffh),
- argp->flen, argp->fname,
- argp->tlen, argp->tname);
-
fh_copy(&resp->dirfh, &argp->ffh);
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_symlink(rqstp, &resp->dirfh, argp->fname,
@@ -474,11 +456,6 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
int type;
dev_t rdev = 0;
- dprintk("nfsd: MKNOD(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
-
fh_copy(&resp->dirfh, &argp->fh);
fh_init(&resp->fh, NFS3_FHSIZE);
@@ -511,11 +488,6 @@ nfsd3_proc_remove(struct svc_rqst *rqstp)
struct nfsd3_diropargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
- dprintk("nfsd: REMOVE(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
-
/* Unlink. -S_IFDIR means file must not be a directory */
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, -S_IFDIR,
@@ -533,11 +505,6 @@ nfsd3_proc_rmdir(struct svc_rqst *rqstp)
struct nfsd3_diropargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
- dprintk("nfsd: RMDIR(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
-
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, S_IFDIR,
argp->name, argp->len);
@@ -551,15 +518,6 @@ nfsd3_proc_rename(struct svc_rqst *rqstp)
struct nfsd3_renameargs *argp = rqstp->rq_argp;
struct nfsd3_renameres *resp = rqstp->rq_resp;
- dprintk("nfsd: RENAME(3) %s %.*s ->\n",
- SVCFH_fmt(&argp->ffh),
- argp->flen,
- argp->fname);
- dprintk("nfsd: -> %s %.*s\n",
- SVCFH_fmt(&argp->tfh),
- argp->tlen,
- argp->tname);
-
fh_copy(&resp->ffh, &argp->ffh);
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_rename(rqstp, &resp->ffh, argp->fname, argp->flen,
@@ -574,13 +532,6 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
struct nfsd3_linkargs *argp = rqstp->rq_argp;
struct nfsd3_linkres *resp = rqstp->rq_resp;
- dprintk("nfsd: LINK(3) %s ->\n",
- SVCFH_fmt(&argp->ffh));
- dprintk("nfsd: -> %s %.*s\n",
- SVCFH_fmt(&argp->tfh),
- argp->tlen,
- argp->tname);
-
fh_copy(&resp->fh, &argp->ffh);
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_link(rqstp, &resp->tfh, argp->tname, argp->tlen,
@@ -606,7 +557,7 @@ static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
buf->pages = rqstp->rq_next_page;
rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
- xdr_init_encode_pages(xdr, buf, buf->pages, NULL);
+ xdr_init_encode_pages(xdr, buf);
}
/*
@@ -619,9 +570,7 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
struct nfsd3_readdirres *resp = rqstp->rq_resp;
loff_t offset;
- dprintk("nfsd: READDIR(3) %s %d bytes at %d\n",
- SVCFH_fmt(&argp->fh),
- argp->count, (u32) argp->cookie);
+ trace_nfsd_vfs_readdir(rqstp, &argp->fh, argp->count, argp->cookie);
nfsd3_init_dirlist_pages(rqstp, resp, argp->count);
@@ -653,9 +602,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
struct nfsd3_readdirres *resp = rqstp->rq_resp;
loff_t offset;
- dprintk("nfsd: READDIR+(3) %s %d bytes at %d\n",
- SVCFH_fmt(&argp->fh),
- argp->count, (u32) argp->cookie);
+ trace_nfsd_vfs_readdir(rqstp, &argp->fh, argp->count, argp->cookie);
nfsd3_init_dirlist_pages(rqstp, resp, argp->count);
@@ -696,9 +643,6 @@ nfsd3_proc_fsstat(struct svc_rqst *rqstp)
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_fsstatres *resp = rqstp->rq_resp;
- dprintk("nfsd: FSSTAT(3) %s\n",
- SVCFH_fmt(&argp->fh));
-
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats, 0);
fh_put(&argp->fh);
resp->status = nfsd3_map_status(resp->status);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index a7a07470c1f8..ef4971d71ac4 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -1001,7 +1001,9 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
} else
dchild = dget(dparent);
} else
- dchild = lookup_positive_unlocked(name, dparent, namlen);
+ dchild = lookup_one_positive_unlocked(&nop_mnt_idmap,
+ &QSTR_LEN(name, namlen),
+ dparent);
if (IS_ERR(dchild))
return rv;
if (d_mountpoint(dchild))
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3877b53e429f..e00b2aea8da2 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -42,11 +42,10 @@
#include "trace.h"
#include "xdr4cb.h"
#include "xdr4.h"
+#include "nfs4xdr_gen.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
-static void nfsd4_mark_cb_fault(struct nfs4_client *clp);
-
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
@@ -93,12 +92,35 @@ static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
{
fattr->ncf_cb_change = 0;
fattr->ncf_cb_fsize = 0;
+ fattr->ncf_cb_atime.tv_sec = 0;
+ fattr->ncf_cb_atime.tv_nsec = 0;
+ fattr->ncf_cb_mtime.tv_sec = 0;
+ fattr->ncf_cb_mtime.tv_nsec = 0;
+
if (bitmap[0] & FATTR4_WORD0_CHANGE)
if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0)
- return -NFSERR_BAD_XDR;
+ return -EIO;
if (bitmap[0] & FATTR4_WORD0_SIZE)
if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0)
- return -NFSERR_BAD_XDR;
+ return -EIO;
+ if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) {
+ fattr4_time_deleg_access access;
+
+ if (!xdrgen_decode_fattr4_time_deleg_access(xdr, &access))
+ return -EIO;
+ fattr->ncf_cb_atime.tv_sec = access.seconds;
+ fattr->ncf_cb_atime.tv_nsec = access.nseconds;
+
+ }
+ if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ fattr4_time_deleg_modify modify;
+
+ if (!xdrgen_decode_fattr4_time_deleg_modify(xdr, &modify))
+ return -EIO;
+ fattr->ncf_cb_mtime.tv_sec = modify.seconds;
+ fattr->ncf_cb_mtime.tv_nsec = modify.nseconds;
+
+ }
return 0;
}
@@ -361,16 +383,24 @@ static void
encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr,
struct nfs4_cb_fattr *fattr)
{
- struct nfs4_delegation *dp =
- container_of(fattr, struct nfs4_delegation, dl_cb_fattr);
+ struct nfs4_delegation *dp = container_of(fattr, struct nfs4_delegation, dl_cb_fattr);
struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle;
- u32 bmap[1];
-
- bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE;
-
+ struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
+ u32 bmap_size = 1;
+ u32 bmap[3];
+
+ bmap[0] = FATTR4_WORD0_SIZE;
+ if (!ncf->ncf_file_modified)
+ bmap[0] |= FATTR4_WORD0_CHANGE;
+
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ bmap[1] = 0;
+ bmap[2] = FATTR4_WORD2_TIME_DELEG_ACCESS | FATTR4_WORD2_TIME_DELEG_MODIFY;
+ bmap_size = 3;
+ }
encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR);
encode_nfs_fh4(xdr, fh);
- encode_bitmap4(xdr, bmap, ARRAY_SIZE(bmap));
+ encode_bitmap4(xdr, bmap, bmap_size);
hdr->nops++;
}
@@ -387,6 +417,29 @@ static u32 highest_slotid(struct nfsd4_session *ses)
return idx;
}
+static void
+encode_referring_call4(struct xdr_stream *xdr,
+ const struct nfsd4_referring_call *rc)
+{
+ encode_uint32(xdr, rc->rc_sequenceid);
+ encode_uint32(xdr, rc->rc_slotid);
+}
+
+static void
+encode_referring_call_list4(struct xdr_stream *xdr,
+ const struct nfsd4_referring_call_list *rcl)
+{
+ struct nfsd4_referring_call *rc;
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
+ xdr_encode_opaque_fixed(p, rcl->rcl_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN);
+ encode_uint32(xdr, rcl->__nr_referring_calls);
+ list_for_each_entry(rc, &rcl->rcl_referring_calls, __list)
+ encode_referring_call4(xdr, rc);
+}
+
/*
* CB_SEQUENCE4args
*
@@ -404,6 +457,7 @@ static void encode_cb_sequence4args(struct xdr_stream *xdr,
struct nfs4_cb_compound_hdr *hdr)
{
struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
+ struct nfsd4_referring_call_list *rcl;
__be32 *p;
if (hdr->minorversion == 0)
@@ -412,12 +466,16 @@ static void encode_cb_sequence4args(struct xdr_stream *xdr,
encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
encode_sessionid4(xdr, session);
- p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 4);
*p++ = cpu_to_be32(session->se_cb_seq_nr[cb->cb_held_slot]); /* csa_sequenceid */
*p++ = cpu_to_be32(cb->cb_held_slot); /* csa_slotid */
*p++ = cpu_to_be32(highest_slotid(session)); /* csa_highest_slotid */
*p++ = xdr_zero; /* csa_cachethis */
- xdr_encode_empty_array(p); /* csa_referring_call_lists */
+
+ /* csa_referring_call_lists */
+ encode_uint32(xdr, cb->cb_nr_referring_call_list);
+ list_for_each_entry(rcl, &cb->cb_referring_call_list, __list)
+ encode_referring_call_list4(xdr, rcl);
hdr->nops++;
}
@@ -634,7 +692,7 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
struct nfs4_cb_compound_hdr hdr;
int status;
u32 bitmap[3] = {0};
- u32 attrlen;
+ u32 attrlen, maxlen;
struct nfs4_cb_fattr *ncf =
container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
@@ -647,14 +705,18 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
return status;
status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
- if (status)
+ if (unlikely(status || cb->cb_status))
return status;
if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
- return -NFSERR_BAD_XDR;
+ return -EIO;
if (xdr_stream_decode_u32(xdr, &attrlen) < 0)
- return -NFSERR_BAD_XDR;
- if (attrlen > (sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize)))
- return -NFSERR_BAD_XDR;
+ return -EIO;
+ maxlen = sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize);
+ if (bitmap[2] != 0)
+ maxlen += (sizeof(ncf->ncf_cb_mtime.tv_sec) +
+ sizeof(ncf->ncf_cb_mtime.tv_nsec)) * 2;
+ if (attrlen > maxlen)
+ return -EIO;
status = decode_cb_fattr4(xdr, bitmap, ncf);
return status;
}
@@ -1028,6 +1090,17 @@ static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
return queue_work(clp->cl_callback_wq, &cb->cb_work);
}
+static void nfsd4_requeue_cb(struct rpc_task *task, struct nfsd4_callback *cb)
+{
+ struct nfs4_client *clp = cb->cb_clp;
+
+ if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
+ trace_nfsd_cb_restart(clp, cb);
+ task->tk_status = 0;
+ set_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
+ }
+}
+
static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
{
atomic_inc(&clp->cl_cb_inflight);
@@ -1036,8 +1109,7 @@ static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
static void nfsd41_cb_inflight_end(struct nfs4_client *clp)
{
- if (atomic_dec_and_test(&clp->cl_cb_inflight))
- wake_up_var(&clp->cl_cb_inflight);
+ atomic_dec_and_wake_up(&clp->cl_cb_inflight);
}
static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp)
@@ -1100,7 +1172,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
- if (!conn->cb_xprt)
+ if (!conn->cb_xprt || !ses)
return -EINVAL;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
@@ -1266,15 +1338,113 @@ static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
trace_nfsd_cb_destroy(clp, cb);
nfsd41_cb_release_slot(cb);
+ if (test_bit(NFSD4_CALLBACK_WAKE, &cb->cb_flags))
+ clear_and_wake_up_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags);
+ else
+ clear_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags);
+
if (cb->cb_ops && cb->cb_ops->release)
cb->cb_ops->release(cb);
nfsd41_cb_inflight_end(clp);
}
-/*
- * TODO: cb_sequence should support referring call lists, cachethis,
- * and mark callback channel down on communication errors.
+/**
+ * nfsd41_cb_referring_call - add a referring call to a callback operation
+ * @cb: context of callback to add the rc to
+ * @sessionid: referring call's session ID
+ * @slotid: referring call's session slot index
+ * @seqno: referring call's slot sequence number
+ *
+ * Caller serializes access to @cb.
+ *
+ * NB: If memory allocation fails, the referring call is not added.
+ */
+void nfsd41_cb_referring_call(struct nfsd4_callback *cb,
+ struct nfs4_sessionid *sessionid,
+ u32 slotid, u32 seqno)
+{
+ struct nfsd4_referring_call_list *rcl;
+ struct nfsd4_referring_call *rc;
+ bool found;
+
+ might_sleep();
+
+ found = false;
+ list_for_each_entry(rcl, &cb->cb_referring_call_list, __list) {
+ if (!memcmp(rcl->rcl_sessionid.data, sessionid->data,
+ NFS4_MAX_SESSIONID_LEN)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ rcl = kmalloc(sizeof(*rcl), GFP_KERNEL);
+ if (!rcl)
+ return;
+ memcpy(rcl->rcl_sessionid.data, sessionid->data,
+ NFS4_MAX_SESSIONID_LEN);
+ rcl->__nr_referring_calls = 0;
+ INIT_LIST_HEAD(&rcl->rcl_referring_calls);
+ list_add(&rcl->__list, &cb->cb_referring_call_list);
+ cb->cb_nr_referring_call_list++;
+ }
+
+ found = false;
+ list_for_each_entry(rc, &rcl->rcl_referring_calls, __list) {
+ if (rc->rc_sequenceid == seqno && rc->rc_slotid == slotid) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ rc = kmalloc(sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ goto out;
+ rc->rc_sequenceid = seqno;
+ rc->rc_slotid = slotid;
+ rcl->__nr_referring_calls++;
+ list_add(&rc->__list, &rcl->rcl_referring_calls);
+ }
+
+out:
+ if (!rcl->__nr_referring_calls) {
+ cb->cb_nr_referring_call_list--;
+ list_del(&rcl->__list);
+ kfree(rcl);
+ }
+}
+
+/**
+ * nfsd41_cb_destroy_referring_call_list - release referring call info
+ * @cb: context of a callback that has completed
+ *
+ * Callers who allocate referring calls using nfsd41_cb_referring_call() must
+ * release those resources by calling nfsd41_cb_destroy_referring_call_list.
+ *
+ * Caller serializes access to @cb.
*/
+void nfsd41_cb_destroy_referring_call_list(struct nfsd4_callback *cb)
+{
+ struct nfsd4_referring_call_list *rcl;
+ struct nfsd4_referring_call *rc;
+
+ while (!list_empty(&cb->cb_referring_call_list)) {
+ rcl = list_first_entry(&cb->cb_referring_call_list,
+ struct nfsd4_referring_call_list,
+ __list);
+
+ while (!list_empty(&rcl->rcl_referring_calls)) {
+ rc = list_first_entry(&rcl->rcl_referring_calls,
+ struct nfsd4_referring_call,
+ __list);
+ list_del(&rc->__list);
+ kfree(rc);
+ }
+ list_del(&rcl->__list);
+ kfree(rcl);
+ }
+}
+
static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
@@ -1293,30 +1463,14 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
rpc_call_start(task);
}
+/* Returns true if CB_COMPOUND processing should continue */
static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
{
- struct nfs4_client *clp = cb->cb_clp;
- struct nfsd4_session *session = clp->cl_cb_session;
- bool ret = true;
-
- if (!clp->cl_minorversion) {
- /*
- * If the backchannel connection was shut down while this
- * task was queued, we need to resubmit it after setting up
- * a new backchannel connection.
- *
- * Note that if we lost our callback connection permanently
- * the submission code will error out, so we don't need to
- * handle that case here.
- */
- if (RPC_SIGNALLED(task))
- goto need_restart;
-
- return true;
- }
+ struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
+ bool ret = false;
if (cb->cb_held_slot < 0)
- goto need_restart;
+ goto requeue;
/* This is the operation status code for CB_SEQUENCE */
trace_nfsd_cb_seq_status(task, cb);
@@ -1330,11 +1484,16 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
* (sequence ID, cached reply) MUST NOT change.
*/
++session->se_cb_seq_nr[cb->cb_held_slot];
+ ret = true;
break;
case -ESERVERFAULT:
- ++session->se_cb_seq_nr[cb->cb_held_slot];
+ /*
+ * Call succeeded, but the session, slot index, or slot
+ * sequence number in the response do not match the same
+ * in the server's call. The sequence information is thus
+ * untrustworthy.
+ */
nfsd4_mark_cb_fault(cb->cb_clp);
- ret = false;
break;
case 1:
/*
@@ -1346,43 +1505,42 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
fallthrough;
case -NFS4ERR_BADSESSION:
nfsd4_mark_cb_fault(cb->cb_clp);
- ret = false;
- goto need_restart;
+ goto requeue;
case -NFS4ERR_DELAY:
cb->cb_seq_status = 1;
- if (!rpc_restart_call(task))
- goto out;
-
+ if (RPC_SIGNALLED(task) || !rpc_restart_call(task))
+ goto requeue;
rpc_delay(task, 2 * HZ);
return false;
+ case -NFS4ERR_SEQ_MISORDERED:
case -NFS4ERR_BADSLOT:
+ /*
+ * A SEQ_MISORDERED or BADSLOT error means that the client and
+ * server are out of sync as to the backchannel parameters. Mark
+ * the backchannel faulty and restart the RPC, but leak the slot
+ * so that it's no longer used.
+ */
+ nfsd4_mark_cb_fault(cb->cb_clp);
+ cb->cb_held_slot = -1;
goto retry_nowait;
- case -NFS4ERR_SEQ_MISORDERED:
- if (session->se_cb_seq_nr[cb->cb_held_slot] != 1) {
- session->se_cb_seq_nr[cb->cb_held_slot] = 1;
- goto retry_nowait;
- }
- break;
default:
nfsd4_mark_cb_fault(cb->cb_clp);
}
trace_nfsd_cb_free_slot(task, cb);
nfsd41_cb_release_slot(cb);
-
- if (RPC_SIGNALLED(task))
- goto need_restart;
-out:
return ret;
retry_nowait:
- if (rpc_restart_call_prepare(task))
- ret = false;
- goto out;
-need_restart:
- if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
- trace_nfsd_cb_restart(clp, cb);
- task->tk_status = 0;
- cb->cb_need_restart = true;
+ /*
+ * RPC_SIGNALLED() means that the rpc_client is being torn down and
+ * (possibly) recreated. Requeue the call in that case.
+ */
+ if (!RPC_SIGNALLED(task)) {
+ if (rpc_restart_call_prepare(task))
+ return false;
}
+requeue:
+ nfsd41_cb_release_slot(cb);
+ nfsd4_requeue_cb(task, cb);
return false;
}
@@ -1393,12 +1551,26 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
trace_nfsd_cb_rpc_done(clp);
- if (!nfsd4_cb_sequence_done(task, cb))
+ if (!clp->cl_minorversion) {
+ /*
+ * If the backchannel connection was shut down while this
+ * task was queued, we need to resubmit it after setting up
+ * a new backchannel connection.
+ *
+ * Note that if we lost our callback connection permanently
+ * the submission code will error out, so we don't need to
+ * handle that case here.
+ */
+ if (RPC_SIGNALLED(task))
+ nfsd4_requeue_cb(task, cb);
+ } else if (!nfsd4_cb_sequence_done(task, cb)) {
return;
+ }
if (cb->cb_status) {
- WARN_ONCE(task->tk_status, "cb_status=%d tk_status=%d",
- cb->cb_status, task->tk_status);
+ WARN_ONCE(task->tk_status,
+ "cb_status=%d tk_status=%d cb_opcode=%d",
+ cb->cb_status, task->tk_status, cb->cb_ops->opcode);
task->tk_status = cb->cb_status;
}
@@ -1426,7 +1598,7 @@ static void nfsd4_cb_release(void *calldata)
trace_nfsd_cb_rpc_release(cb->cb_clp);
- if (cb->cb_need_restart)
+ if (test_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags))
nfsd4_queue_cb(cb);
else
nfsd41_destroy_cb(cb);
@@ -1522,8 +1694,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
ses = c->cn_session;
}
spin_unlock(&clp->cl_lock);
- if (!c)
- return;
err = setup_callback_client(clp, &conn, ses);
if (err) {
@@ -1541,7 +1711,7 @@ nfsd4_run_cb_work(struct work_struct *work)
container_of(work, struct nfsd4_callback, cb_work);
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
- int flags;
+ int flags, ret;
trace_nfsd_cb_start(clp);
@@ -1549,8 +1719,11 @@ nfsd4_run_cb_work(struct work_struct *work)
nfsd4_process_cb_update(cb);
clnt = clp->cl_cb_client;
- if (!clnt) {
- /* Callback channel broken, or client killed; give up: */
+ if (!clnt || clp->cl_state == NFSD4_COURTESY) {
+ /*
+ * Callback channel broken, client killed or
+ * nfs4_client in courtesy state; give up.
+ */
nfsd41_destroy_cb(cb);
return;
}
@@ -1564,16 +1737,19 @@ nfsd4_run_cb_work(struct work_struct *work)
return;
}
- if (cb->cb_need_restart) {
- cb->cb_need_restart = false;
- } else {
+ if (!test_and_clear_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags)) {
if (cb->cb_ops && cb->cb_ops->prepare)
cb->cb_ops->prepare(cb);
}
+
cb->cb_msg.rpc_cred = clp->cl_cb_cred;
flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN;
- rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
- cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
+ ret = rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
+ cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
+ if (ret != 0) {
+ set_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
+ nfsd4_queue_cb(cb);
+ }
}
void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
@@ -1583,11 +1759,13 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
cb->cb_msg.rpc_argp = cb;
cb->cb_msg.rpc_resp = cb;
+ cb->cb_flags = 0;
cb->cb_ops = ops;
INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
cb->cb_status = 0;
- cb->cb_need_restart = false;
cb->cb_held_slot = -1;
+ cb->cb_nr_referring_call_list = 0;
+ INIT_LIST_HEAD(&cb->cb_referring_call_list);
}
/**
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index fbfddd3c4c94..683bd1130afe 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -65,7 +65,7 @@ nfsd4_alloc_devid_map(const struct svc_fh *fhp)
return;
map->fsid_type = fh->fh_fsid_type;
- memcpy(&map->fsid, fh->fh_fsid, fsid_len);
+ memcpy(&map->fsid, fh_fsid(fh), fsid_len);
spin_lock(&nfsd_devid_lock);
if (fhp->fh_export->ex_devid_map)
@@ -75,7 +75,7 @@ nfsd4_alloc_devid_map(const struct svc_fh *fhp)
list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
if (old->fsid_type != fh->fh_fsid_type)
continue;
- if (memcmp(old->fsid, fh->fh_fsid,
+ if (memcmp(old->fsid, fh_fsid(fh),
key_len(old->fsid_type)))
continue;
@@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
id->generation = device_generation;
- id->pad = 0;
return 0;
}
@@ -344,9 +343,10 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
- refcount_inc(&ls->ls_stid.sc_count);
- nfsd4_run_cb(&ls->ls_recall);
-
+ if (!test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ls->ls_recall.cb_flags)) {
+ refcount_inc(&ls->ls_stid.sc_count);
+ nfsd4_run_cb(&ls->ls_recall);
+ }
out_unlock:
spin_unlock(&ls->ls_lock);
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index f8a10f90bc7a..b74800917583 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -264,12 +264,11 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (is_create_with_attrs(open))
nfsd4_acl_to_attr(NF4REG, open->op_acl, &attrs);
- inode_lock_nested(inode, I_MUTEX_PARENT);
-
- child = lookup_one_len(open->op_fname, parent, open->op_fnamelen);
+ child = start_creating(&nop_mnt_idmap, parent,
+ &QSTR_LEN(open->op_fname, open->op_fnamelen));
if (IS_ERR(child)) {
status = nfserrno(PTR_ERR(child));
- goto out;
+ goto out_write;
}
if (d_really_is_negative(child)) {
@@ -377,10 +376,9 @@ set_attr:
if (attrs.na_aclerr)
open->op_bmval[0] &= ~FATTR4_WORD0_ACL;
out:
- inode_unlock(inode);
+ end_creating(child);
nfsd_attrs_free(&attrs);
- if (child && !IS_ERR(child))
- dput(child);
+out_write:
fh_drop_write(fhp);
return status;
}
@@ -876,6 +874,8 @@ nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_getattr *getattr = &u->getattr;
__be32 status;
+ trace_nfsd_vfs_getattr(rqstp, &cstate->current_fh);
+
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
return status;
@@ -984,10 +984,11 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static void
nfsd4_read_release(union nfsd4_op_u *u)
{
- if (u->read.rd_nf)
+ if (u->read.rd_nf) {
+ trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp,
+ u->read.rd_offset, u->read.rd_length);
nfsd_file_put(u->read.rd_nf);
- trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp,
- u->read.rd_offset, u->read.rd_length);
+ }
}
static __be32
@@ -998,6 +999,9 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
u64 cookie = readdir->rd_cookie;
static const nfs4_verifier zeroverf;
+ trace_nfsd_vfs_readdir(rqstp, &cstate->current_fh,
+ readdir->rd_maxcount, readdir->rd_cookie);
+
/* no need to check permission - this will be done in nfsd_readdir() */
if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
@@ -1126,6 +1130,35 @@ nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
exp_put(u->secinfo_no_name.sin_exp);
}
+/*
+ * Validate that the requested timestamps are within the acceptable range. If
+ * timestamp appears to be in the future, then it will be clamped to
+ * current_time().
+ */
+static void
+vet_deleg_attrs(struct nfsd4_setattr *setattr, struct nfs4_delegation *dp)
+{
+ struct timespec64 now = current_time(dp->dl_stid.sc_file->fi_inode);
+ struct iattr *iattr = &setattr->sa_iattr;
+
+ if ((setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) &&
+ !nfsd4_vet_deleg_time(&iattr->ia_atime, &dp->dl_atime, &now))
+ iattr->ia_valid &= ~(ATTR_ATIME | ATTR_ATIME_SET);
+
+ if (setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ if (nfsd4_vet_deleg_time(&iattr->ia_mtime, &dp->dl_mtime, &now)) {
+ iattr->ia_ctime = iattr->ia_mtime;
+ if (nfsd4_vet_deleg_time(&iattr->ia_ctime, &dp->dl_ctime, &now))
+ dp->dl_setattr = true;
+ else
+ iattr->ia_valid &= ~(ATTR_CTIME | ATTR_CTIME_SET);
+ } else {
+ iattr->ia_valid &= ~(ATTR_CTIME | ATTR_CTIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET);
+ }
+ }
+}
+
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -1135,18 +1168,45 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
.na_iattr = &setattr->sa_iattr,
.na_seclabel = &setattr->sa_label,
};
+ bool save_no_wcc, deleg_attrs;
+ struct nfs4_stid *st = NULL;
struct inode *inode;
__be32 status = nfs_ok;
- bool save_no_wcc;
int err;
- if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
+ deleg_attrs = setattr->sa_bmval[2] & (FATTR4_WORD2_TIME_DELEG_ACCESS |
+ FATTR4_WORD2_TIME_DELEG_MODIFY);
+
+ if (deleg_attrs || (setattr->sa_iattr.ia_valid & ATTR_SIZE)) {
+ int flags = WR_STATE;
+
+ if (setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS)
+ flags |= RD_STATE;
+
status = nfs4_preprocess_stateid_op(rqstp, cstate,
&cstate->current_fh, &setattr->sa_stateid,
- WR_STATE, NULL, NULL);
+ flags, NULL, &st);
if (status)
return status;
}
+
+ if (deleg_attrs) {
+ status = nfserr_bad_stateid;
+ if (st->sc_type & SC_TYPE_DELEG) {
+ struct nfs4_delegation *dp = delegstateid(st);
+
+ /* Only for *_ATTRS_DELEG flavors */
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ vet_deleg_attrs(setattr, dp);
+ status = nfs_ok;
+ }
+ }
+ }
+ if (st)
+ nfs4_put_stid(st);
+ if (status)
+ return status;
+
err = fh_want_write(&cstate->current_fh);
if (err)
return nfserrno(err);
@@ -1177,16 +1237,29 @@ out:
return status;
}
+static void nfsd4_file_mark_deleg_written(struct nfs4_file *fi)
+{
+ spin_lock(&fi->fi_lock);
+ if (!list_empty(&fi->fi_delegations)) {
+ struct nfs4_delegation *dp = list_first_entry(&fi->fi_delegations,
+ struct nfs4_delegation, dl_perfile);
+
+ if (dp->dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG)
+ dp->dl_written = true;
+ }
+ spin_unlock(&fi->fi_lock);
+}
+
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_write *write = &u->write;
stateid_t *stateid = &write->wr_stateid;
+ struct nfs4_stid *stid = NULL;
struct nfsd_file *nf = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
- int nvecs;
if (write->wr_offset > (u64)OFFSET_MAX ||
write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX)
@@ -1196,18 +1269,19 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
trace_nfsd_write_start(rqstp, &cstate->current_fh,
write->wr_offset, cnt);
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
- stateid, WR_STATE, &nf, NULL);
+ stateid, WR_STATE, &nf, &stid);
if (status)
return status;
- write->wr_how_written = write->wr_stable_how;
-
- nvecs = svc_fill_write_vector(rqstp, &write->wr_payload);
- WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
+ if (stid) {
+ nfsd4_file_mark_deleg_written(stid->sc_file);
+ nfs4_put_stid(stid);
+ }
+ write->wr_how_written = write->wr_stable_how;
status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf,
- write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
- write->wr_how_written,
+ write->wr_offset, &write->wr_payload,
+ &cnt, write->wr_how_written,
(__be32 *)write->wr_verifier.data);
nfsd_file_put(nf);
@@ -1347,7 +1421,6 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
{
if (!refcount_dec_and_test(&copy->refcount))
return;
- atomic_dec(&copy->cp_nn->pending_async_copies);
kfree(copy->cp_src);
kfree(copy);
}
@@ -1355,8 +1428,11 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
static void nfsd4_stop_copy(struct nfsd4_copy *copy)
{
trace_nfsd_copy_async_cancel(copy);
- if (!test_and_set_bit(NFSD4_COPY_F_STOPPED, &copy->cp_flags))
+ if (!test_and_set_bit(NFSD4_COPY_F_STOPPED, &copy->cp_flags)) {
kthread_stop(copy->copy_task);
+ copy->nfserr = nfs_ok;
+ set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
+ }
nfs4_put_copy(copy);
}
@@ -1440,7 +1516,7 @@ try_again:
return 0;
}
if (work) {
- strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1);
+ strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
refcount_set(&work->nsui_refcnt, 2);
work->nsui_busy = true;
list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
@@ -1685,10 +1761,11 @@ static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
switch (task->tk_status) {
case -NFS4ERR_DELAY:
if (cbo->co_retries--) {
- rpc_delay(task, 1 * HZ);
+ rpc_delay(task, HZ / 5);
return 0;
}
}
+ nfsd41_cb_destroy_referring_call_list(cb);
return 1;
}
@@ -1821,9 +1898,12 @@ static void nfsd4_send_cb_offload(struct nfsd4_copy *copy)
nfsd4_init_cb(&cbo->co_cb, copy->cp_clp, &nfsd4_cb_offload_ops,
NFSPROC4_CLNT_CB_OFFLOAD);
+ nfsd41_cb_referring_call(&cbo->co_cb, &cbo->co_referring_sessionid,
+ cbo->co_referring_slotid,
+ cbo->co_referring_seqno);
trace_nfsd_cb_offload(copy->cp_clp, &cbo->co_res.cb_stateid,
&cbo->co_fh, copy->cp_count, copy->nfserr);
- nfsd4_run_cb(&cbo->co_cb);
+ nfsd4_try_run_cb(&cbo->co_cb);
}
/**
@@ -1870,6 +1950,7 @@ do_callback:
set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
trace_nfsd_copy_async_done(copy);
nfsd4_send_cb_offload(copy);
+ atomic_dec(&copy->cp_nn->pending_async_copies);
return 0;
}
@@ -1883,13 +1964,6 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd42_write_res *result;
__be32 status;
- /*
- * Currently, async COPY is not reliable. Force all COPY
- * requests to be synchronous to avoid client application
- * hangs waiting for COPY completion.
- */
- nfsd4_copy_set_sync(copy, true);
-
result = &copy->cp_res;
nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn);
@@ -1927,19 +2001,24 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* Arbitrary cap on number of pending async copy operations */
if (atomic_inc_return(&nn->pending_async_copies) >
(int)rqstp->rq_pool->sp_nrthreads)
- goto out_err;
+ goto out_dec_async_copy_err;
async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
if (!async_copy->cp_src)
- goto out_err;
+ goto out_dec_async_copy_err;
if (!nfs4_init_copy_state(nn, copy))
- goto out_err;
+ goto out_dec_async_copy_err;
memcpy(&result->cb_stateid, &copy->cp_stateid.cs_stid,
sizeof(result->cb_stateid));
dup_copy_fields(copy, async_copy);
+ memcpy(async_copy->cp_cb_offload.co_referring_sessionid.data,
+ cstate->session->se_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN);
+ async_copy->cp_cb_offload.co_referring_slotid = cstate->slot->sl_index;
+ async_copy->cp_cb_offload.co_referring_seqno = cstate->slot->sl_seqid;
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
async_copy, "%s", "copy thread");
if (IS_ERR(async_copy->copy_task))
- goto out_err;
+ goto out_dec_async_copy_err;
spin_lock(&async_copy->cp_clp->async_lock);
list_add(&async_copy->copies,
&async_copy->cp_clp->async_copies);
@@ -1954,6 +2033,9 @@ out:
trace_nfsd_copy_done(copy, status);
release_copy_files(copy);
return status;
+out_dec_async_copy_err:
+ if (async_copy)
+ atomic_dec(&nn->pending_async_copies);
out_err:
if (nfsd4_ssc_is_inter(copy)) {
/*
@@ -2256,6 +2338,13 @@ nfsd4_get_dir_delegation(struct svc_rqst *rqstp,
union nfsd4_op_u *u)
{
struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
+ struct nfs4_delegation *dd;
+ struct nfsd_file *nf;
+ __be32 status;
+
+ status = nfsd_file_acquire_dir(rqstp, &cstate->current_fh, &nf);
+ if (status != nfs_ok)
+ return status;
/*
* RFC 8881, section 18.39.3 says:
@@ -2269,7 +2358,20 @@ nfsd4_get_dir_delegation(struct svc_rqst *rqstp,
* return NFS4_OK with a non-fatal status of GDD4_UNAVAIL in this
* situation.
*/
- gdd->gddrnf_status = GDD4_UNAVAIL;
+ dd = nfsd_get_dir_deleg(cstate, gdd, nf);
+ nfsd_file_put(nf);
+ if (IS_ERR(dd)) {
+ int err = PTR_ERR(dd);
+
+ if (err != -EAGAIN)
+ return nfserrno(err);
+ gdd->gddrnf_status = GDD4_UNAVAIL;
+ return nfs_ok;
+ }
+
+ gdd->gddrnf_status = GDD4_OK;
+ memcpy(&gdd->gddr_stateid, &dd->dl_stid.sc_stateid, sizeof(gdd->gddr_stateid));
+ nfs4_put_stid(&dd->dl_stid);
return nfs_ok;
}
@@ -2412,7 +2514,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
goto out_put_stid;
- nfserr = ops->proc_layoutget(d_inode(current_fh->fh_dentry),
+ nfserr = ops->proc_layoutget(rqstp, d_inode(current_fh->fh_dentry),
current_fh, lgp);
if (nfserr)
goto out_put_stid;
@@ -2436,11 +2538,11 @@ static __be32
nfsd4_layoutcommit(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct net *net = SVC_NET(rqstp);
struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
- loff_t new_size = lcp->lc_last_wr + 1;
struct inode *inode;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
@@ -2456,43 +2558,50 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
goto out;
inode = d_inode(current_fh->fh_dentry);
- nfserr = nfserr_inval;
- if (new_size <= seg->offset) {
- dprintk("pnfsd: last write before layout segment\n");
- goto out;
+ lcp->lc_size_chg = false;
+ if (lcp->lc_newoffset) {
+ loff_t new_size = lcp->lc_last_wr + 1;
+
+ nfserr = nfserr_inval;
+ if (new_size <= seg->offset)
+ goto out;
+ if (new_size > seg->offset + seg->length)
+ goto out;
+
+ if (new_size > i_size_read(inode)) {
+ lcp->lc_size_chg = true;
+ lcp->lc_newsize = new_size;
+ }
}
- if (new_size > seg->offset + seg->length) {
- dprintk("pnfsd: last write beyond layout segment\n");
+
+ nfserr = nfserr_grace;
+ if (locks_in_grace(net) && !lcp->lc_reclaim)
goto out;
- }
- if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
- dprintk("pnfsd: layoutcommit beyond EOF\n");
+ nfserr = nfserr_no_grace;
+ if (!locks_in_grace(net) && lcp->lc_reclaim)
goto out;
- }
- nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
- false, lcp->lc_layout_type,
- &ls);
- if (nfserr) {
- trace_nfsd_layout_commit_lookup_fail(&lcp->lc_sid);
- /* fixup error code as per RFC5661 */
- if (nfserr == nfserr_bad_stateid)
- nfserr = nfserr_badlayout;
- goto out;
+ if (!lcp->lc_reclaim) {
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate,
+ &lcp->lc_sid, false, lcp->lc_layout_type, &ls);
+ if (nfserr) {
+ trace_nfsd_layout_commit_lookup_fail(&lcp->lc_sid);
+ /* fixup error code as per RFC5661 */
+ if (nfserr == nfserr_bad_stateid)
+ nfserr = nfserr_badlayout;
+ goto out;
+ }
+
+ /* LAYOUTCOMMIT does not require any serialization */
+ mutex_unlock(&ls->ls_mutex);
}
- /* LAYOUTCOMMIT does not require any serialization */
- mutex_unlock(&ls->ls_mutex);
+ nfserr = ops->proc_layoutcommit(inode, rqstp, lcp);
- if (new_size > i_size_read(inode)) {
- lcp->lc_size_chg = true;
- lcp->lc_newsize = new_size;
- } else {
- lcp->lc_size_chg = false;
+ if (!lcp->lc_reclaim) {
+ nfsd4_file_mark_deleg_written(ls->ls_stid.sc_file);
+ nfs4_put_stid(&ls->ls_stid);
}
-
- nfserr = ops->proc_layoutcommit(inode, lcp);
- nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
@@ -3738,7 +3847,8 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
u32 opiter;
- if (!cstate->minorversion)
+ if (rqstp->rq_procinfo != &nfsd_version4.vs_proc[NFSPROC4_COMPOUND] ||
+ cstate->minorversion == 0)
return false;
if (cstate->spo_must_allowed)
@@ -3804,7 +3914,7 @@ static const struct svc_procedure nfsd_procedures4[2] = {
.pc_ressize = sizeof(struct nfsd4_compoundres),
.pc_release = nfsd4_release_compoundargs,
.pc_cachetype = RC_NOCACHE,
- .pc_xdrressize = NFSD_BUFSIZE/4,
+ .pc_xdrressize = 3+NFSSVC_MAXBLKSIZE/4,
.pc_name = "COMPOUND",
},
};
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 4a765555bf84..441dfbfe2d2b 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -32,7 +32,8 @@
*
*/
-#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha2.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/namei.h>
@@ -82,110 +83,38 @@ nfs4_save_creds(const struct cred **original_creds)
new->fsuid = GLOBAL_ROOT_UID;
new->fsgid = GLOBAL_ROOT_GID;
*original_creds = override_creds(new);
- put_cred(new);
return 0;
}
static void
nfs4_reset_creds(const struct cred *original)
{
- revert_creds(original);
+ put_cred(revert_creds(original));
}
static void
-md5_to_hex(char *out, char *md5)
+nfs4_make_rec_clidname(char dname[HEXDIR_LEN], const struct xdr_netobj *clname)
{
- int i;
-
- for (i=0; i<16; i++) {
- unsigned char c = md5[i];
-
- *out++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
- *out++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
- }
- *out = '\0';
-}
-
-static int
-nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
-{
- struct xdr_netobj cksum;
- struct crypto_shash *tfm;
- int status;
+ u8 digest[MD5_DIGEST_SIZE];
dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
clname->len, clname->data);
- tfm = crypto_alloc_shash("md5", 0, 0);
- if (IS_ERR(tfm)) {
- status = PTR_ERR(tfm);
- goto out_no_tfm;
- }
- cksum.len = crypto_shash_digestsize(tfm);
- cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- if (cksum.data == NULL) {
- status = -ENOMEM;
- goto out;
- }
-
- status = crypto_shash_tfm_digest(tfm, clname->data, clname->len,
- cksum.data);
- if (status)
- goto out;
+ md5(clname->data, clname->len, digest);
- md5_to_hex(dname, cksum.data);
-
- status = 0;
-out:
- kfree(cksum.data);
- crypto_free_shash(tfm);
-out_no_tfm:
- return status;
-}
-
-/*
- * If we had an error generating the recdir name for the legacy tracker
- * then warn the admin. If the error doesn't appear to be transient,
- * then disable recovery tracking.
- */
-static void
-legacy_recdir_name_error(struct nfs4_client *clp, int error)
-{
- printk(KERN_ERR "NFSD: unable to generate recoverydir "
- "name (%d).\n", error);
-
- /*
- * if the algorithm just doesn't exist, then disable the recovery
- * tracker altogether. The crypto libs will generally return this if
- * FIPS is enabled as well.
- */
- if (error == -ENOENT) {
- printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
- "Reboot recovery will not function correctly!\n");
- nfsd4_client_tracking_exit(clp->net);
- }
+ static_assert(HEXDIR_LEN == 2 * MD5_DIGEST_SIZE + 1);
+ sprintf(dname, "%*phN", MD5_DIGEST_SIZE, digest);
}
static void
__nfsd4_create_reclaim_record_grace(struct nfs4_client *clp,
- const char *dname, int len, struct nfsd_net *nn)
+ char *dname, struct nfsd_net *nn)
{
- struct xdr_netobj name;
+ struct xdr_netobj name = { .len = strlen(dname), .data = dname };
struct xdr_netobj princhash = { .len = 0, .data = NULL };
struct nfs4_client_reclaim *crp;
- name.data = kmemdup(dname, len, GFP_KERNEL);
- if (!name.data) {
- dprintk("%s: failed to allocate memory for name.data!\n",
- __func__);
- return;
- }
- name.len = len;
crp = nfs4_client_to_reclaim(name, princhash, nn);
- if (!crp) {
- kfree(name.data);
- return;
- }
crp->cr_clp = clp;
}
@@ -203,9 +132,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
if (!nn->rec_file)
return;
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status)
- return legacy_recdir_name_error(clp, status);
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
status = nfs4_save_creds(&original_cred);
if (status < 0)
@@ -216,13 +143,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
goto out_creds;
dir = nn->rec_file->f_path.dentry;
- /* lock the parent */
- inode_lock(d_inode(dir));
- dentry = lookup_one_len(dname, dir, HEXDIR_LEN-1);
+ dentry = start_creating(&nop_mnt_idmap, dir, &QSTR(dname));
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
- goto out_unlock;
+ goto out;
}
if (d_really_is_positive(dentry))
/*
@@ -233,16 +158,16 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
* In the 4.0 case, we should never get here; but we may
* as well be forgiving and just succeed silently.
*/
- goto out_put;
- status = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), dentry, S_IRWXU);
-out_put:
- dput(dentry);
-out_unlock:
- inode_unlock(d_inode(dir));
+ goto out_end;
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), dentry, 0700, NULL);
+ if (IS_ERR(dentry))
+ status = PTR_ERR(dentry);
+out_end:
+ end_creating(dentry);
+out:
if (status == 0) {
if (nn->in_grace)
- __nfsd4_create_reclaim_record_grace(clp, dname,
- HEXDIR_LEN, nn);
+ __nfsd4_create_reclaim_record_grace(clp, dname, nn);
vfs_fsync(nn->rec_file, 0);
} else {
printk(KERN_ERR "NFSD: failed to write recovery record"
@@ -255,7 +180,7 @@ out_creds:
nfs4_reset_creds(original_cred);
}
-typedef int (recdir_func)(struct dentry *, struct dentry *, struct nfsd_net *);
+typedef int (recdir_func)(struct dentry *, char *, struct nfsd_net *);
struct name_list {
char name[HEXDIR_LEN];
@@ -309,23 +234,14 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
}
status = iterate_dir(nn->rec_file, &ctx.ctx);
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
- if (!status) {
- struct dentry *dentry;
- dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
- if (IS_ERR(dentry)) {
- status = PTR_ERR(dentry);
- break;
- }
- status = f(dir, dentry, nn);
- dput(dentry);
- }
+ if (!status)
+ status = f(dir, entry->name, nn);
+
list_del(&entry->list);
kfree(entry);
}
- inode_unlock(d_inode(dir));
nfs4_reset_creds(original_cred);
list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
@@ -337,28 +253,20 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
}
static int
-nfsd4_unlink_clid_dir(char *name, int namlen, struct nfsd_net *nn)
+nfsd4_unlink_clid_dir(char *name, struct nfsd_net *nn)
{
struct dentry *dir, *dentry;
int status;
- dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
+ dprintk("NFSD: nfsd4_unlink_clid_dir. name %s\n", name);
dir = nn->rec_file->f_path.dentry;
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
- dentry = lookup_one_len(name, dir, namlen);
- if (IS_ERR(dentry)) {
- status = PTR_ERR(dentry);
- goto out_unlock;
- }
- status = -ENOENT;
- if (d_really_is_negative(dentry))
- goto out;
- status = vfs_rmdir(&nop_mnt_idmap, d_inode(dir), dentry);
-out:
- dput(dentry);
-out_unlock:
- inode_unlock(d_inode(dir));
+ dentry = start_removing(&nop_mnt_idmap, dir, &QSTR(name));
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ status = vfs_rmdir(&nop_mnt_idmap, d_inode(dir), dentry, NULL);
+ end_removing(dentry);
return status;
}
@@ -393,9 +301,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
if (!nn->rec_file || !test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status)
- return legacy_recdir_name_error(clp, status);
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
status = mnt_want_write_file(nn->rec_file);
if (status)
@@ -406,7 +312,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
if (status < 0)
goto out_drop_write;
- status = nfsd4_unlink_clid_dir(dname, HEXDIR_LEN-1, nn);
+ status = nfsd4_unlink_clid_dir(dname, nn);
nfs4_reset_creds(original_cred);
if (status == 0) {
vfs_fsync(nn->rec_file, 0);
@@ -423,18 +329,19 @@ out:
}
static int
-purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
+purge_old(struct dentry *parent, char *cname, struct nfsd_net *nn)
{
int status;
+ struct dentry *child;
struct xdr_netobj name;
- if (child->d_name.len != HEXDIR_LEN - 1) {
- printk("%s: illegal name %pd in recovery directory\n",
- __func__, child);
+ if (strlen(cname) != HEXDIR_LEN - 1) {
+ printk("%s: illegal name %s in recovery directory\n",
+ __func__, cname);
/* Keep trying; maybe the others are OK: */
return 0;
}
- name.data = kmemdup_nul(child->d_name.name, child->d_name.len, GFP_KERNEL);
+ name.data = kstrdup(cname, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data!\n",
__func__);
@@ -444,10 +351,17 @@ purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
if (nfs4_has_reclaimed_state(name, nn))
goto out_free;
- status = vfs_rmdir(&nop_mnt_idmap, d_inode(parent), child);
- if (status)
- printk("failed to remove client recovery directory %pd\n",
- child);
+ inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
+ child = lookup_one(&nop_mnt_idmap, &QSTR(cname), parent);
+ if (!IS_ERR(child)) {
+ status = vfs_rmdir(&nop_mnt_idmap, d_inode(parent), child, NULL);
+ if (status)
+ printk("failed to remove client recovery directory %pd\n",
+ child);
+ dput(child);
+ }
+ inode_unlock(d_inode(parent));
+
out_free:
kfree(name.data);
out:
@@ -478,27 +392,18 @@ out:
}
static int
-load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
+load_recdir(struct dentry *parent, char *cname, struct nfsd_net *nn)
{
- struct xdr_netobj name;
+ struct xdr_netobj name = { .len = HEXDIR_LEN, .data = cname };
struct xdr_netobj princhash = { .len = 0, .data = NULL };
- if (child->d_name.len != HEXDIR_LEN - 1) {
- printk("%s: illegal name %pd in recovery directory\n",
- __func__, child);
+ if (strlen(cname) != HEXDIR_LEN - 1) {
+ printk("%s: illegal name %s in recovery directory\n",
+ __func__, cname);
/* Keep trying; maybe the others are OK: */
return 0;
}
- name.data = kmemdup_nul(child->d_name.name, child->d_name.len, GFP_KERNEL);
- if (!name.data) {
- dprintk("%s: failed to allocate memory for name.data!\n",
- __func__);
- goto out;
- }
- name.len = HEXDIR_LEN;
- if (!nfs4_client_to_reclaim(name, princhash, nn))
- kfree(name.data);
-out:
+ nfs4_client_to_reclaim(name, princhash, nn);
return 0;
}
@@ -676,7 +581,6 @@ nfs4_recoverydir(void)
static int
nfsd4_check_legacy_client(struct nfs4_client *clp)
{
- int status;
char dname[HEXDIR_LEN];
struct nfs4_client_reclaim *crp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
@@ -686,11 +590,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status) {
- legacy_recdir_name_error(clp, status);
- return status;
- }
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
/* look for it in the reclaim hashtable otherwise */
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
@@ -734,7 +634,6 @@ struct cld_net {
spinlock_t cn_lock;
struct list_head cn_list;
unsigned int cn_xid;
- struct crypto_shash *cn_tfm;
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
bool cn_has_legacy;
#endif
@@ -797,6 +696,8 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
{
uint8_t cmd, princhashlen;
struct xdr_netobj name, princhash = { .len = 0, .data = NULL };
+ char *namecopy __free(kfree) = NULL;
+ char *princhashcopy __free(kfree) = NULL;
uint16_t namelen;
if (get_user(cmd, &cmsg->cm_cmd)) {
@@ -814,19 +715,19 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
dprintk("%s: invalid namelen (%u)", __func__, namelen);
return -EINVAL;
}
- name.data = memdup_user(&ci->cc_name.cn_id, namelen);
- if (IS_ERR(name.data))
- return PTR_ERR(name.data);
+ namecopy = memdup_user(&ci->cc_name.cn_id, namelen);
+ if (IS_ERR(namecopy))
+ return PTR_ERR(namecopy);
+ name.data = namecopy;
name.len = namelen;
get_user(princhashlen, &ci->cc_princhash.cp_len);
if (princhashlen > 0) {
- princhash.data = memdup_user(
- &ci->cc_princhash.cp_data,
- princhashlen);
- if (IS_ERR(princhash.data)) {
- kfree(name.data);
- return PTR_ERR(princhash.data);
- }
+ princhashcopy = memdup_user(
+ &ci->cc_princhash.cp_data,
+ princhashlen);
+ if (IS_ERR(princhashcopy))
+ return PTR_ERR(princhashcopy);
+ princhash.data = princhashcopy;
princhash.len = princhashlen;
} else
princhash.len = 0;
@@ -840,9 +741,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
dprintk("%s: invalid namelen (%u)", __func__, namelen);
return -EINVAL;
}
- name.data = memdup_user(&cnm->cn_id, namelen);
- if (IS_ERR(name.data))
- return PTR_ERR(name.data);
+ namecopy = memdup_user(&cnm->cn_id, namelen);
+ if (IS_ERR(namecopy))
+ return PTR_ERR(namecopy);
+ name.data = namecopy;
name.len = namelen;
}
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
@@ -850,15 +752,12 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
struct cld_net *cn = nn->cld_net;
name.len = name.len - 5;
- memmove(name.data, name.data + 5, name.len);
+ name.data = name.data + 5;
cn->cn_has_legacy = true;
}
#endif
- if (!nfs4_client_to_reclaim(name, princhash, nn)) {
- kfree(name.data);
- kfree(princhash.data);
+ if (!nfs4_client_to_reclaim(name, princhash, nn))
return -EFAULT;
- }
return nn->client_tracking_ops->msglen;
}
return -EFAULT;
@@ -947,38 +846,32 @@ static const struct rpc_pipe_ops cld_upcall_ops = {
.destroy_msg = cld_pipe_destroy_msg,
};
-static struct dentry *
+static int
nfsd4_cld_register_sb(struct super_block *sb, struct rpc_pipe *pipe)
{
- struct dentry *dir, *dentry;
+ struct dentry *dir;
+ int err;
dir = rpc_d_lookup_sb(sb, NFSD_PIPE_DIR);
if (dir == NULL)
- return ERR_PTR(-ENOENT);
- dentry = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe);
+ return -ENOENT;
+ err = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe);
dput(dir);
- return dentry;
+ return err;
}
-static void
-nfsd4_cld_unregister_sb(struct rpc_pipe *pipe)
-{
- if (pipe->dentry)
- rpc_unlink(pipe->dentry);
-}
-
-static struct dentry *
+static int
nfsd4_cld_register_net(struct net *net, struct rpc_pipe *pipe)
{
struct super_block *sb;
- struct dentry *dentry;
+ int err;
sb = rpc_get_sb_net(net);
if (!sb)
- return NULL;
- dentry = nfsd4_cld_register_sb(sb, pipe);
+ return 0;
+ err = nfsd4_cld_register_sb(sb, pipe);
rpc_put_sb_net(net);
- return dentry;
+ return err;
}
static void
@@ -988,7 +881,7 @@ nfsd4_cld_unregister_net(struct net *net, struct rpc_pipe *pipe)
sb = rpc_get_sb_net(net);
if (sb) {
- nfsd4_cld_unregister_sb(pipe);
+ rpc_unlink(pipe);
rpc_put_sb_net(net);
}
}
@@ -998,7 +891,6 @@ static int
__nfsd4_init_cld_pipe(struct net *net)
{
int ret;
- struct dentry *dentry;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cld_net *cn;
@@ -1019,13 +911,10 @@ __nfsd4_init_cld_pipe(struct net *net)
spin_lock_init(&cn->cn_lock);
INIT_LIST_HEAD(&cn->cn_list);
- dentry = nfsd4_cld_register_net(net, cn->cn_pipe);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
+ ret = nfsd4_cld_register_net(net, cn->cn_pipe);
+ if (unlikely(ret))
goto err_destroy_data;
- }
- cn->cn_pipe->dentry = dentry;
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
cn->cn_has_legacy = false;
#endif
@@ -1060,8 +949,6 @@ nfsd4_remove_cld_pipe(struct net *net)
nfsd4_cld_unregister_net(net, cn->cn_pipe);
rpc_destroy_pipe_data(cn->cn_pipe);
- if (cn->cn_tfm)
- crypto_free_shash(cn->cn_tfm);
kfree(nn->cld_net);
nn->cld_net = NULL;
}
@@ -1155,8 +1042,6 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
struct cld_msg_v2 *cmsg;
- struct crypto_shash *tfm = cn->cn_tfm;
- struct xdr_netobj cksum;
char *principal = NULL;
/* Don't upcall if it's already stored */
@@ -1179,22 +1064,9 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
else if (clp->cl_cred.cr_principal)
principal = clp->cl_cred.cr_principal;
if (principal) {
- cksum.len = crypto_shash_digestsize(tfm);
- cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- if (cksum.data == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- ret = crypto_shash_tfm_digest(tfm, principal, strlen(principal),
- cksum.data);
- if (ret) {
- kfree(cksum.data);
- goto out;
- }
- cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = cksum.len;
- memcpy(cmsg->cm_u.cm_clntinfo.cc_princhash.cp_data,
- cksum.data, cksum.len);
- kfree(cksum.data);
+ sha256(principal, strlen(principal),
+ cmsg->cm_u.cm_clntinfo.cc_princhash.cp_data);
+ cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = SHA256_DIGEST_SIZE;
} else
cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = 0;
@@ -1204,7 +1076,6 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
-out:
free_cld_upcall(cup);
out_err:
if (ret)
@@ -1312,13 +1183,10 @@ nfsd4_cld_check(struct nfs4_client *clp)
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
if (nn->cld_net->cn_has_legacy) {
- int status;
char dname[HEXDIR_LEN];
struct xdr_netobj name;
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status)
- return -ENOENT;
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
if (!name.data) {
@@ -1343,12 +1211,11 @@ found:
static int
nfsd4_cld_check_v2(struct nfs4_client *clp)
{
- struct nfs4_client_reclaim *crp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
struct cld_net *cn = nn->cld_net;
- int status;
- struct crypto_shash *tfm = cn->cn_tfm;
- struct xdr_netobj cksum;
+#endif
+ struct nfs4_client_reclaim *crp;
char *principal = NULL;
/* did we already find that this client is stable? */
@@ -1365,9 +1232,7 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
struct xdr_netobj name;
char dname[HEXDIR_LEN];
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status)
- return -ENOENT;
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
if (!name.data) {
@@ -1386,28 +1251,18 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
return -ENOENT;
found:
if (crp->cr_princhash.len) {
+ u8 digest[SHA256_DIGEST_SIZE];
+
if (clp->cl_cred.cr_raw_principal)
principal = clp->cl_cred.cr_raw_principal;
else if (clp->cl_cred.cr_principal)
principal = clp->cl_cred.cr_principal;
if (principal == NULL)
return -ENOENT;
- cksum.len = crypto_shash_digestsize(tfm);
- cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- if (cksum.data == NULL)
- return -ENOENT;
- status = crypto_shash_tfm_digest(tfm, principal,
- strlen(principal), cksum.data);
- if (status) {
- kfree(cksum.data);
- return -ENOENT;
- }
- if (memcmp(crp->cr_princhash.data, cksum.data,
- crp->cr_princhash.len)) {
- kfree(cksum.data);
+ sha256(principal, strlen(principal), digest);
+ if (memcmp(crp->cr_princhash.data, digest,
+ crp->cr_princhash.len))
return -ENOENT;
- }
- kfree(cksum.data);
}
crp->cr_clp = clp;
return 0;
@@ -1587,7 +1442,6 @@ nfsd4_cld_tracking_init(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
bool running;
int retries = 10;
- struct crypto_shash *tfm;
status = nfs4_cld_state_init(net);
if (status)
@@ -1612,12 +1466,6 @@ nfsd4_cld_tracking_init(struct net *net)
status = -ETIMEDOUT;
goto err_remove;
}
- tfm = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(tfm)) {
- status = PTR_ERR(tfm);
- goto err_remove;
- }
- nn->cld_net->cn_tfm = tfm;
status = nfsd4_cld_get_version(nn);
if (status == -EOPNOTSUPP)
@@ -1757,11 +1605,7 @@ nfsd4_cltrack_legacy_recdir(const struct xdr_netobj *name)
return NULL;
}
- copied = nfs4_make_rec_clidname(result + copied, name);
- if (copied) {
- kfree(result);
- return NULL;
- }
+ nfs4_make_rec_clidname(result + copied, name);
return result;
}
@@ -2052,7 +1896,6 @@ static inline int check_for_legacy_methods(int status, struct net *net)
path_put(&path);
if (status)
return -ENOTDIR;
- status = nn->client_tracking_ops->init(net);
}
return status;
}
@@ -2154,7 +1997,6 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
struct net *net = sb->s_fs_info;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
- struct dentry *dentry;
int ret = 0;
if (!try_module_get(THIS_MODULE))
@@ -2167,16 +2009,10 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
switch (event) {
case RPC_PIPEFS_MOUNT:
- dentry = nfsd4_cld_register_sb(sb, cn->cn_pipe);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- break;
- }
- cn->cn_pipe->dentry = dentry;
+ ret = nfsd4_cld_register_sb(sb, cn->cn_pipe);
break;
case RPC_PIPEFS_UMOUNT:
- if (cn->cn_pipe->dentry)
- nfsd4_cld_unregister_sb(cn->cn_pipe);
+ rpc_unlink(cn->cn_pipe);
break;
default:
ret = -ENOTSUPP;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 741b9449f727..808c24fb5c9a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -633,18 +633,6 @@ find_readable_file(struct nfs4_file *f)
return ret;
}
-static struct nfsd_file *
-find_rw_file(struct nfs4_file *f)
-{
- struct nfsd_file *ret;
-
- spin_lock(&f->fi_lock);
- ret = nfsd_file_get(f->fi_fds[O_RDWR]);
- spin_unlock(&f->fi_lock);
-
- return ret;
-}
-
struct nfsd_file *
find_any_file(struct nfs4_file *f)
{
@@ -946,15 +934,6 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
spin_lock_init(&stid->sc_lock);
INIT_LIST_HEAD(&stid->sc_cp_list);
- /*
- * It shouldn't be a problem to reuse an opaque stateid value.
- * I don't think it is for 4.1. But with 4.0 I worry that, for
- * example, a stray write retransmission could be accepted by
- * the server when it should have been rejected. Therefore,
- * adopt a trick from the sctp code to attempt to maximize the
- * amount of time until an id is reused, by ensuring they always
- * "increase" (mod INT_MAX):
- */
return stid;
out_free:
kmem_cache_free(slab, stid);
@@ -1050,6 +1029,12 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
return openlockstateid(stid);
}
+/*
+ * As the sc_free callback of deleg, this may be called by nfs4_put_stid
+ * in nfsd_break_one_deleg.
+ * Considering nfsd_break_one_deleg is called with the flc->flc_lock held,
+ * this function mustn't ever sleep.
+ */
static void nfs4_free_deleg(struct nfs4_stid *stid)
{
struct nfs4_delegation *dp = delegstateid(stid);
@@ -1221,15 +1206,56 @@ nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
static void put_deleg_file(struct nfs4_file *fp)
{
+ struct nfsd_file *rnf = NULL;
struct nfsd_file *nf = NULL;
spin_lock(&fp->fi_lock);
- if (--fp->fi_delegees == 0)
+ if (--fp->fi_delegees == 0) {
swap(nf, fp->fi_deleg_file);
+ swap(rnf, fp->fi_rdeleg_file);
+ }
spin_unlock(&fp->fi_lock);
if (nf)
nfsd_file_put(nf);
+ if (rnf)
+ nfs4_file_put_access(fp, NFS4_SHARE_ACCESS_READ);
+}
+
+static void nfsd4_finalize_deleg_timestamps(struct nfs4_delegation *dp, struct file *f)
+{
+ struct iattr ia = { .ia_valid = ATTR_ATIME | ATTR_CTIME | ATTR_MTIME };
+ struct inode *inode = file_inode(f);
+ int ret;
+
+ /* don't do anything if FMODE_NOCMTIME isn't set */
+ if ((READ_ONCE(f->f_mode) & FMODE_NOCMTIME) == 0)
+ return;
+
+ spin_lock(&f->f_lock);
+ f->f_mode &= ~FMODE_NOCMTIME;
+ spin_unlock(&f->f_lock);
+
+ /* was it never written? */
+ if (!dp->dl_written)
+ return;
+
+ /* did it get a setattr for the timestamps at some point? */
+ if (dp->dl_setattr)
+ return;
+
+ /* Stamp everything to "now" */
+ inode_lock(inode);
+ ret = notify_change(&nop_mnt_idmap, f->f_path.dentry, &ia, NULL);
+ inode_unlock(inode);
+ if (ret) {
+ struct inode *inode = file_inode(f);
+
+ pr_notice_ratelimited("Unable to update timestamps on inode %02x:%02x:%lu: %d\n",
+ MAJOR(inode->i_sb->s_dev),
+ MINOR(inode->i_sb->s_dev),
+ inode->i_ino, ret);
+ }
}
static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
@@ -1239,6 +1265,7 @@ static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
WARN_ON_ONCE(!fp->fi_delegees);
+ nfsd4_finalize_deleg_timestamps(dp, nf->nf_file);
kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
put_deleg_file(fp);
}
@@ -1378,7 +1405,8 @@ static void revoke_delegation(struct nfs4_delegation *dp)
struct nfs4_client *clp = dp->dl_stid.sc_client;
WARN_ON(!list_empty(&dp->dl_recall_lru));
- WARN_ON_ONCE(!(dp->dl_stid.sc_status &
+ WARN_ON_ONCE(dp->dl_stid.sc_client->cl_minorversion > 0 &&
+ !(dp->dl_stid.sc_status &
(SC_STATUS_REVOKED | SC_STATUS_ADMIN_REVOKED)));
trace_nfsd_stid_revoke(&dp->dl_stid);
@@ -1514,7 +1542,8 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
release_all_access(stp);
if (stp->st_stateowner)
nfs4_put_stateowner(stp->st_stateowner);
- WARN_ON(!list_empty(&stid->sc_cp_list));
+ if (!list_empty(&stid->sc_cp_list))
+ nfs4_free_cpntf_statelist(stid->sc_client->net, stid);
kmem_cache_free(stateid_slab, stid);
}
@@ -1909,115 +1938,145 @@ gen_sessionid(struct nfsd4_session *ses)
*/
#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
+static struct shrinker *nfsd_slot_shrinker;
+static DEFINE_SPINLOCK(nfsd_session_list_lock);
+static LIST_HEAD(nfsd_session_list);
+/* The sum of "target_slots-1" on every session. The shrinker can push this
+ * down, though it can take a little while for the memory to actually
+ * be freed. The "-1" is because we can never free slot 0 while the
+ * session is active.
+ */
+static atomic_t nfsd_total_target_slots = ATOMIC_INIT(0);
+
static void
-free_session_slots(struct nfsd4_session *ses)
+free_session_slots(struct nfsd4_session *ses, int from)
{
int i;
- for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
- free_svc_cred(&ses->se_slots[i]->sl_cred);
- kfree(ses->se_slots[i]);
+ if (from >= ses->se_fchannel.maxreqs)
+ return;
+
+ for (i = from; i < ses->se_fchannel.maxreqs; i++) {
+ struct nfsd4_slot *slot = xa_load(&ses->se_slots, i);
+
+ /*
+ * Save the seqid in case we reactivate this slot.
+ * This will never require a memory allocation so GFP
+ * flag is irrelevant
+ */
+ xa_store(&ses->se_slots, i, xa_mk_value(slot->sl_seqid), 0);
+ free_svc_cred(&slot->sl_cred);
+ kfree(slot);
+ }
+ ses->se_fchannel.maxreqs = from;
+ if (ses->se_target_maxslots > from) {
+ int new_target = from ?: 1;
+ atomic_sub(ses->se_target_maxslots - new_target, &nfsd_total_target_slots);
+ ses->se_target_maxslots = new_target;
}
}
-/*
- * We don't actually need to cache the rpc and session headers, so we
- * can allocate a little less for each slot:
+/**
+ * reduce_session_slots - reduce the target max-slots of a session if possible
+ * @ses: The session to affect
+ * @dec: how much to decrease the target by
+ *
+ * This interface can be used by a shrinker to reduce the target max-slots
+ * for a session so that some slots can eventually be freed.
+ * It uses spin_trylock() as it may be called in a context where another
+ * spinlock is held that has a dependency on client_lock. As shrinkers are
+ * best-effort, skiping a session is client_lock is already held has no
+ * great coast
+ *
+ * Return value:
+ * The number of slots that the target was reduced by.
*/
-static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
+static int
+reduce_session_slots(struct nfsd4_session *ses, int dec)
{
- u32 size;
+ struct nfsd_net *nn = net_generic(ses->se_client->net,
+ nfsd_net_id);
+ int ret = 0;
- if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
- size = 0;
- else
- size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
- return size + sizeof(struct nfsd4_slot);
+ if (ses->se_target_maxslots <= 1)
+ return ret;
+ if (!spin_trylock(&nn->client_lock))
+ return ret;
+ ret = min(dec, ses->se_target_maxslots-1);
+ ses->se_target_maxslots -= ret;
+ atomic_sub(ret, &nfsd_total_target_slots);
+ ses->se_slot_gen += 1;
+ if (ses->se_slot_gen == 0) {
+ int i;
+ ses->se_slot_gen = 1;
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
+ struct nfsd4_slot *slot = xa_load(&ses->se_slots, i);
+ slot->sl_generation = 0;
+ }
+ }
+ spin_unlock(&nn->client_lock);
+ return ret;
}
-/*
- * XXX: If we run out of reserved DRC memory we could (up to a point)
- * re-negotiate active sessions and reduce their slot usage to make
- * room for new connections. For now we just fail the create session.
- */
-static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
+static struct nfsd4_slot *nfsd4_alloc_slot(struct nfsd4_channel_attrs *fattrs,
+ int index, gfp_t gfp)
{
- u32 slotsize = slot_bytes(ca);
- u32 num = ca->maxreqs;
- unsigned long avail, total_avail;
- unsigned int scale_factor;
+ struct nfsd4_slot *slot;
+ size_t size;
- spin_lock(&nfsd_drc_lock);
- if (nfsd_drc_max_mem > nfsd_drc_mem_used)
- total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
- else
- /* We have handed out more space than we chose in
- * set_max_drc() to allow. That isn't really a
- * problem as long as that doesn't make us think we
- * have lots more due to integer overflow.
- */
- total_avail = 0;
- avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
/*
- * Never use more than a fraction of the remaining memory,
- * unless it's the only way to give this client a slot.
- * The chosen fraction is either 1/8 or 1/number of threads,
- * whichever is smaller. This ensures there are adequate
- * slots to support multiple clients per thread.
- * Give the client one slot even if that would require
- * over-allocation--it is better than failure.
+ * The RPC and NFS session headers are never saved in
+ * the slot reply cache buffer.
*/
- scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
-
- avail = clamp_t(unsigned long, avail, slotsize,
- total_avail/scale_factor);
- num = min_t(int, num, avail / slotsize);
- num = max_t(int, num, 1);
- nfsd_drc_mem_used += num * slotsize;
- spin_unlock(&nfsd_drc_lock);
+ size = fattrs->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ ?
+ 0 : fattrs->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
- return num;
-}
-
-static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
-{
- int slotsize = slot_bytes(ca);
-
- spin_lock(&nfsd_drc_lock);
- nfsd_drc_mem_used -= slotsize * ca->maxreqs;
- spin_unlock(&nfsd_drc_lock);
+ slot = kzalloc(struct_size(slot, sl_data, size), gfp);
+ if (!slot)
+ return NULL;
+ slot->sl_index = index;
+ return slot;
}
static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
struct nfsd4_channel_attrs *battrs)
{
int numslots = fattrs->maxreqs;
- int slotsize = slot_bytes(fattrs);
struct nfsd4_session *new;
+ struct nfsd4_slot *slot;
int i;
- BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
- > PAGE_SIZE);
-
- new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
- /* allocate each struct nfsd4_slot and data cache in one piece */
- for (i = 0; i < numslots; i++) {
- new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
- if (!new->se_slots[i])
- goto out_free;
- }
+ xa_init(&new->se_slots);
+
+ slot = nfsd4_alloc_slot(fattrs, 0, GFP_KERNEL);
+ if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL)))
+ goto out_free;
+ for (i = 1; i < numslots; i++) {
+ const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+ slot = nfsd4_alloc_slot(fattrs, i, gfp);
+ if (!slot)
+ break;
+ if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) {
+ kfree(slot);
+ break;
+ }
+ }
+ fattrs->maxreqs = i;
memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
+ new->se_target_maxslots = i;
+ atomic_add(i - 1, &nfsd_total_target_slots);
new->se_cb_slot_avail = ~0U;
new->se_cb_highest_slot = min(battrs->maxreqs - 1,
NFSD_BC_SLOT_TABLE_SIZE - 1);
spin_lock_init(&new->se_lock);
return new;
out_free:
- while (i--)
- kfree(new->se_slots[i]);
+ kfree(slot);
+ xa_destroy(&new->se_slots);
kfree(new);
return NULL;
}
@@ -2123,17 +2182,47 @@ static void nfsd4_del_conns(struct nfsd4_session *s)
static void __free_session(struct nfsd4_session *ses)
{
- free_session_slots(ses);
+ free_session_slots(ses, 0);
+ xa_destroy(&ses->se_slots);
kfree(ses);
}
static void free_session(struct nfsd4_session *ses)
{
nfsd4_del_conns(ses);
- nfsd4_put_drc_mem(&ses->se_fchannel);
__free_session(ses);
}
+static unsigned long
+nfsd_slot_count(struct shrinker *s, struct shrink_control *sc)
+{
+ unsigned long cnt = atomic_read(&nfsd_total_target_slots);
+
+ return cnt ? cnt : SHRINK_EMPTY;
+}
+
+static unsigned long
+nfsd_slot_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ struct nfsd4_session *ses;
+ unsigned long scanned = 0;
+ unsigned long freed = 0;
+
+ spin_lock(&nfsd_session_list_lock);
+ list_for_each_entry(ses, &nfsd_session_list, se_all_sessions) {
+ freed += reduce_session_slots(ses, 1);
+ scanned += 1;
+ if (scanned >= sc->nr_to_scan) {
+ /* Move starting point for next scan */
+ list_move(&nfsd_session_list, &ses->se_all_sessions);
+ break;
+ }
+ }
+ spin_unlock(&nfsd_session_list_lock);
+ sc->nr_scanned = scanned;
+ return freed;
+}
+
static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
{
int idx;
@@ -2158,6 +2247,10 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&clp->cl_lock);
+ spin_lock(&nfsd_session_list_lock);
+ list_add_tail(&new->se_all_sessions, &nfsd_session_list);
+ spin_unlock(&nfsd_session_list_lock);
+
{
struct sockaddr *sa = svc_addr(rqstp);
/*
@@ -2227,6 +2320,9 @@ unhash_session(struct nfsd4_session *ses)
spin_lock(&ses->se_client->cl_lock);
list_del(&ses->se_perclnt);
spin_unlock(&ses->se_client->cl_lock);
+ spin_lock(&nfsd_session_list_lock);
+ list_del(&ses->se_all_sessions);
+ spin_unlock(&nfsd_session_list_lock);
}
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
@@ -2362,8 +2458,12 @@ unhash_client_locked(struct nfs4_client *clp)
}
list_del_init(&clp->cl_lru);
spin_lock(&clp->cl_lock);
- list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ spin_lock(&nfsd_session_list_lock);
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) {
list_del_init(&ses->se_hash);
+ list_del_init(&ses->se_all_sessions);
+ }
+ spin_unlock(&nfsd_session_list_lock);
spin_unlock(&clp->cl_lock);
}
@@ -2685,6 +2785,7 @@ static const char *cb_state2str(int state)
static int client_info_show(struct seq_file *m, void *v)
{
struct inode *inode = file_inode(m->file);
+ struct nfsd4_session *ses;
struct nfs4_client *clp;
u64 clid;
@@ -2721,6 +2822,16 @@ static int client_info_show(struct seq_file *m, void *v)
seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr);
seq_printf(m, "admin-revoked states: %d\n",
atomic_read(&clp->cl_admin_revoked));
+ spin_lock(&clp->cl_lock);
+ seq_printf(m, "session slots:");
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ seq_printf(m, " %u", ses->se_fchannel.maxreqs);
+ seq_printf(m, "\nsession target slots:");
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ seq_printf(m, " %u", ses->se_target_maxslots);
+ spin_unlock(&clp->cl_lock);
+ seq_puts(m, "\n");
+
drop_client(clp);
return 0;
@@ -2873,6 +2984,21 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
return 0;
}
+static char *nfs4_show_deleg_type(u32 dl_type)
+{
+ switch (dl_type) {
+ case OPEN_DELEGATE_READ:
+ return "r";
+ case OPEN_DELEGATE_WRITE:
+ return "w";
+ case OPEN_DELEGATE_READ_ATTRS_DELEG:
+ return "ra";
+ case OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ return "wa";
+ }
+ return "?";
+}
+
static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_delegation *ds;
@@ -2886,8 +3012,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
nfs4_show_stateid(s, &st->sc_stateid);
seq_puts(s, ": { type: deleg, ");
- seq_printf(s, "access: %s",
- ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
+ seq_printf(s, "access: %s", nfs4_show_deleg_type(ds->dl_type));
/* XXX: lease time, whether it's being recalled. */
@@ -3076,7 +3201,6 @@ nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
- clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
drop_client(clp);
}
@@ -3107,7 +3231,6 @@ nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
struct nfs4_delegation *dp =
container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
- clear_and_wake_up_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
nfs4_put_stid(&dp->dl_stid);
}
@@ -3128,11 +3251,15 @@ static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
struct nfs4_delegation *dp =
container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
- if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags))
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ncf->ncf_getattr.cb_flags))
return;
+
/* set to proper status when nfsd4_cb_getattr_done runs */
ncf->ncf_cb_status = NFS4ERR_IO;
+ /* ensure that wake_bit is done when RUNNING is cleared */
+ set_bit(NFSD4_CALLBACK_WAKE, &ncf->ncf_getattr.cb_flags);
+
refcount_inc(&dp->dl_stid.sc_count);
nfsd4_run_cb(&ncf->ncf_getattr);
}
@@ -3360,7 +3487,20 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
struct nfsd4_slot *slot = resp->cstate.slot;
unsigned int base;
- dprintk("--> %s slot %p\n", __func__, slot);
+ /*
+ * RFC 5661 Section 2.10.6.1.2:
+ *
+ * Any time SEQUENCE ... returns an error ... [t]he replier MUST NOT
+ * modify the reply cache entry for the slot whenever an error is
+ * returned from SEQUENCE ...
+ *
+ * Because nfsd4_store_cache_entry is called only by
+ * nfsd4_sequence_done(), nfsd4_store_cache_entry() is called only
+ * when a SEQUENCE operation was part of the COMPOUND.
+ * nfs41_check_op_ordering() ensures SEQUENCE is the first op.
+ */
+ if (resp->opcnt == 1 && resp->cstate.status != nfs_ok)
+ return;
slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
slot->sl_opcnt = resp->opcnt;
@@ -3368,7 +3508,7 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
free_svc_cred(&slot->sl_cred);
copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
- if (!nfsd4_cache_this(resp)) {
+ if (!(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
slot->sl_flags &= ~NFSD4_SLOT_CACHED;
return;
}
@@ -3383,41 +3523,6 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
}
/*
- * Encode the replay sequence operation from the slot values.
- * If cachethis is FALSE encode the uncached rep error on the next
- * operation which sets resp->p and increments resp->opcnt for
- * nfs4svc_encode_compoundres.
- *
- */
-static __be32
-nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
- struct nfsd4_compoundres *resp)
-{
- struct nfsd4_op *op;
- struct nfsd4_slot *slot = resp->cstate.slot;
-
- /* Encode the replayed sequence operation */
- op = &args->ops[resp->opcnt - 1];
- nfsd4_encode_operation(resp, op);
-
- if (slot->sl_flags & NFSD4_SLOT_CACHED)
- return op->status;
- if (args->opcnt == 1) {
- /*
- * The original operation wasn't a solo sequence--we
- * always cache those--so this retry must not match the
- * original:
- */
- op->status = nfserr_seq_false_retry;
- } else {
- op = &args->ops[resp->opcnt++];
- op->status = nfserr_retry_uncached_rep;
- nfsd4_encode_operation(resp, op);
- }
- return op->status;
-}
-
-/*
* The sequence operation is not cached because we can use the slot and
* session values.
*/
@@ -3425,17 +3530,30 @@ static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
struct nfsd4_sequence *seq)
{
+ struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
struct nfsd4_slot *slot = resp->cstate.slot;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
- __be32 status;
dprintk("--> %s slot %p\n", __func__, slot);
- status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
- if (status)
- return status;
+ /* Always encode the SEQUENCE response. */
+ nfsd4_encode_operation(resp, &args->ops[0]);
+ if (args->opcnt == 1)
+ /* A solo SEQUENCE - nothing was cached */
+ return args->ops[0].status;
+
+ if (!(slot->sl_flags & NFSD4_SLOT_CACHED)) {
+ /* We weren't asked to cache this. */
+ struct nfsd4_op *op;
+
+ op = &args->ops[resp->opcnt++];
+ op->status = nfserr_retry_uncached_rep;
+ nfsd4_encode_operation(resp, op);
+ return op->status;
+ }
+ /* return reply from cache */
p = xdr_reserve_space(xdr, slot->sl_datalen);
if (!p) {
WARN_ON_ONCE(1);
@@ -3708,10 +3826,10 @@ nfsd4_exchange_id_release(union nfsd4_op_u *u)
kfree(exid->server_impl_name);
}
-static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
+static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, u8 flags)
{
/* The slot is in use, and no response has been sent. */
- if (slot_inuse) {
+ if (flags & NFSD4_SLOT_INUSE) {
if (seqid == slot_seqid)
return nfserr_jukebox;
else
@@ -3720,6 +3838,8 @@ static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
/* Note unsigned 32-bit arithmetic handles wraparound: */
if (likely(seqid == slot_seqid + 1))
return nfs_ok;
+ if ((flags & NFSD4_SLOT_REUSED) && seqid == 1)
+ return nfs_ok;
if (seqid == slot_seqid)
return nfserr_replay_cache;
return nfserr_seq_misordered;
@@ -3778,17 +3898,6 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs
ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
- /*
- * Note decreasing slot size below client's request may make it
- * difficult for client to function correctly, whereas
- * decreasing the number of slots will (just?) affect
- * performance. When short on memory we therefore prefer to
- * decrease number of slots instead of their size. Clients that
- * request larger slots than they need will get poor results:
- * Note that we always allow at least one slot, because our
- * accounting is soft and provides no guarantees either way.
- */
- ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
return nfs_ok;
}
@@ -3866,11 +3975,11 @@ nfsd4_create_session(struct svc_rqst *rqstp,
return status;
status = check_backchannel_attrs(&cr_ses->back_channel);
if (status)
- goto out_release_drc_mem;
+ goto out_err;
status = nfserr_jukebox;
new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
if (!new)
- goto out_release_drc_mem;
+ goto out_err;
conn = alloc_conn_from_crses(rqstp, cr_ses);
if (!conn)
goto out_free_session;
@@ -3979,8 +4088,7 @@ out_free_conn:
free_conn(conn);
out_free_session:
__free_session(new);
-out_release_drc_mem:
- nfsd4_put_drc_mem(&cr_ses->fore_channel);
+out_err:
return status;
}
@@ -4233,6 +4341,36 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
return true;
}
+/*
+ * Note that the response is constructed here both for the case
+ * of a new SEQUENCE request and for a replayed SEQUENCE request.
+ * We do not cache SEQUENCE responses as SEQUENCE is idempotent.
+ */
+static void nfsd4_construct_sequence_response(struct nfsd4_session *session,
+ struct nfsd4_sequence *seq)
+{
+ struct nfs4_client *clp = session->se_client;
+
+ seq->maxslots_response = max(session->se_target_maxslots,
+ seq->maxslots);
+ seq->target_maxslots = session->se_target_maxslots;
+
+ switch (clp->cl_cb_state) {
+ case NFSD4_CB_DOWN:
+ seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
+ break;
+ case NFSD4_CB_FAULT:
+ seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
+ break;
+ default:
+ seq->status_flags = 0;
+ }
+ if (!list_empty(&clp->cl_revoked))
+ seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
+ if (atomic_read(&clp->cl_admin_revoked))
+ seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
+}
+
__be32
nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -4278,17 +4416,14 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (seq->slotid >= session->se_fchannel.maxreqs)
goto out_put_session;
- slot = session->se_slots[seq->slotid];
+ slot = xa_load(&session->se_slots, seq->slotid);
dprintk("%s: slotid %d\n", __func__, seq->slotid);
- /* We do not negotiate the number of slots yet, so set the
- * maxslots to the session maxreqs which is used to encode
- * sr_highest_slotid and the sr_target_slot id to maxslots */
- seq->maxslots = session->se_fchannel.maxreqs;
-
trace_nfsd_slot_seqid_sequence(clp, seq, slot);
- status = check_slot_seqid(seq->seqid, slot->sl_seqid,
- slot->sl_flags & NFSD4_SLOT_INUSE);
+
+ nfsd4_construct_sequence_response(session, seq);
+
+ status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags);
if (status == nfserr_replay_cache) {
status = nfserr_seq_misordered;
if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
@@ -4313,6 +4448,12 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out_put_session;
+ if (session->se_target_maxslots < session->se_fchannel.maxreqs &&
+ slot->sl_generation == session->se_slot_gen &&
+ seq->maxslots <= session->se_target_maxslots)
+ /* Client acknowledged our reduce maxreqs */
+ free_session_slots(session, session->se_target_maxslots);
+
buflen = (seq->cachethis) ?
session->se_fchannel.maxresp_cached :
session->se_fchannel.maxresp_sz;
@@ -4320,12 +4461,14 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfserr_rep_too_big;
if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
goto out_put_session;
- svc_reserve(rqstp, buflen);
+ svc_reserve_auth(rqstp, buflen);
status = nfs_ok;
- /* Success! bump slot seqid */
+ /* Success! accept new slot seqid */
slot->sl_seqid = seq->seqid;
+ slot->sl_flags &= ~NFSD4_SLOT_REUSED;
slot->sl_flags |= NFSD4_SLOT_INUSE;
+ slot->sl_generation = session->se_slot_gen;
if (seq->cachethis)
slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
else
@@ -4335,21 +4478,48 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
cstate->session = session;
cstate->clp = clp;
-out:
- switch (clp->cl_cb_state) {
- case NFSD4_CB_DOWN:
- seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
- break;
- case NFSD4_CB_FAULT:
- seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
- break;
- default:
- seq->status_flags = 0;
+ /*
+ * If the client ever uses the highest available slot,
+ * gently try to allocate another 20%. This allows
+ * fairly quick growth without grossly over-shooting what
+ * the client might use.
+ */
+ if (seq->slotid == session->se_fchannel.maxreqs - 1 &&
+ session->se_target_maxslots >= session->se_fchannel.maxreqs &&
+ session->se_fchannel.maxreqs < NFSD_MAX_SLOTS_PER_SESSION) {
+ int s = session->se_fchannel.maxreqs;
+ int cnt = DIV_ROUND_UP(s, 5);
+ void *prev_slot;
+
+ do {
+ /*
+ * GFP_NOWAIT both allows allocation under a
+ * spinlock, and only succeeds if there is
+ * plenty of memory.
+ */
+ slot = nfsd4_alloc_slot(&session->se_fchannel, s,
+ GFP_NOWAIT);
+ prev_slot = xa_load(&session->se_slots, s);
+ if (xa_is_value(prev_slot) && slot) {
+ slot->sl_seqid = xa_to_value(prev_slot);
+ slot->sl_flags |= NFSD4_SLOT_REUSED;
+ }
+ if (slot &&
+ !xa_is_err(xa_store(&session->se_slots, s, slot,
+ GFP_NOWAIT))) {
+ s += 1;
+ session->se_fchannel.maxreqs = s;
+ atomic_add(s - session->se_target_maxslots,
+ &nfsd_total_target_slots);
+ session->se_target_maxslots = s;
+ } else {
+ kfree(slot);
+ slot = NULL;
+ }
+ } while (slot && --cnt > 0);
}
- if (!list_empty(&clp->cl_revoked))
- seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
- if (atomic_read(&clp->cl_admin_revoked))
- seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
+
+out:
trace_nfsd_seq4_status(rqstp, seq);
out_no_session:
if (conn)
@@ -4565,10 +4735,16 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
}
status = nfs_ok;
if (conf) {
- old = unconf;
- unhash_client_locked(old);
- nfsd4_change_callback(conf, &unconf->cl_cb_conn);
- } else {
+ if (get_client_locked(conf) == nfs_ok) {
+ old = unconf;
+ unhash_client_locked(old);
+ nfsd4_change_callback(conf, &unconf->cl_cb_conn);
+ } else {
+ conf = NULL;
+ }
+ }
+
+ if (!conf) {
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = nfserr_clid_inuse;
@@ -4585,10 +4761,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
}
trace_nfsd_clid_replaced(&old->cl_clientid);
}
+ status = get_client_locked(unconf);
+ if (status != nfs_ok) {
+ old = NULL;
+ goto out;
+ }
move_to_confirmed(unconf);
conf = unconf;
}
- get_client_locked(conf);
spin_unlock(&nn->client_lock);
if (conf == unconf)
fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
@@ -4618,6 +4798,7 @@ static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
INIT_LIST_HEAD(&fp->fi_clnt_odstate);
fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
fp->fi_deleg_file = NULL;
+ fp->fi_rdeleg_file = NULL;
fp->fi_had_conflict = false;
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
@@ -4687,8 +4868,8 @@ out:
static unsigned long
nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
- int count;
struct nfsd_net *nn = shrink->private_data;
+ long count;
count = atomic_read(&nn->nfsd_courtesy_clients);
if (!count)
@@ -4739,7 +4920,7 @@ static void init_nfs4_replay(struct nfs4_replay *rp)
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
- atomic_set(&rp->rp_locked, RP_UNLOCKED);
+ rp->rp_locked = RP_UNLOCKED;
}
static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
@@ -4747,9 +4928,9 @@ static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
{
if (!nfsd4_has_session(cstate)) {
wait_var_event(&so->so_replay.rp_locked,
- atomic_cmpxchg(&so->so_replay.rp_locked,
- RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
- if (atomic_read(&so->so_replay.rp_locked) == RP_UNHASHED)
+ cmpxchg(&so->so_replay.rp_locked,
+ RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
+ if (so->so_replay.rp_locked == RP_UNHASHED)
return -EAGAIN;
cstate->replay_owner = nfs4_get_stateowner(so);
}
@@ -4762,9 +4943,7 @@ void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
if (so != NULL) {
cstate->replay_owner = NULL;
- atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED);
- smp_mb__after_atomic();
- wake_up_var(&so->so_replay.rp_locked);
+ store_release_wake_up(&so->so_replay.rp_locked, RP_UNLOCKED);
nfs4_put_stateowner(so);
}
}
@@ -5069,9 +5248,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
* Some threads with a reference might be waiting for rp_locked,
* so tell them to stop waiting.
*/
- atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
- smp_mb__after_atomic();
- wake_up_var(&oo->oo_owner.so_replay.rp_locked);
+ store_release_wake_up(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
release_all_access(s);
@@ -5290,6 +5467,11 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
{
+ bool queued;
+
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &dp->dl_recall.cb_flags))
+ return;
+
/*
* We're assuming the state code never drops its reference
* without first removing the lease. Since we're in this lease
@@ -5298,7 +5480,10 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
* we know it's safe to take a reference.
*/
refcount_inc(&dp->dl_stid.sc_count);
- WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
+ queued = nfsd4_run_cb(&dp->dl_recall);
+ WARN_ON_ONCE(!queued);
+ if (!queued)
+ refcount_dec(&dp->dl_stid.sc_count);
}
/* Called from break_lease() with flc_lock held. */
@@ -5472,7 +5657,7 @@ retry:
static inline __be32
nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
{
- if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
+ if (!(flags & RD_STATE) && deleg_is_read(dp->dl_type))
return nfserr_openmode;
else
return nfs_ok;
@@ -5704,8 +5889,7 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
}
-static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
- int flag)
+static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp)
{
struct file_lease *fl;
@@ -5714,7 +5898,7 @@ static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
return NULL;
fl->fl_lmops = &nfsd_lease_mng_ops;
fl->c.flc_flags = FL_DELEG;
- fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
+ fl->c.flc_type = deleg_is_read(dp->dl_type) ? F_RDLCK : F_WRLCK;
fl->c.flc_owner = (fl_owner_t)dp;
fl->c.flc_pid = current->tgid;
fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
@@ -5825,17 +6009,30 @@ nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
return 0;
}
+#ifdef CONFIG_NFSD_V4_DELEG_TIMESTAMPS
+static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open)
+{
+ return open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS;
+}
+#else /* CONFIG_NFSD_V4_DELEG_TIMESTAMPS */
+static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open)
+{
+ return false;
+}
+#endif /* CONFIG NFSD_V4_DELEG_TIMESTAMPS */
+
static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *parent)
{
- int status = 0;
+ bool deleg_ts = nfsd4_want_deleg_timestamps(open);
struct nfs4_client *clp = stp->st_stid.sc_client;
struct nfs4_file *fp = stp->st_stid.sc_file;
struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
struct nfs4_delegation *dp;
struct nfsd_file *nf = NULL;
struct file_lease *fl;
+ int status = 0;
u32 dl_type;
/*
@@ -5852,15 +6049,20 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
* "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
* on its own, all opens."
*
- * Furthermore the client can use a write delegation for most READ
- * operations as well, so we require a O_RDWR file here.
+ * Furthermore, section 9.1.2 says:
*
- * Offer a write delegation in the case of a BOTH open, and ensure
- * we get the O_RDWR descriptor.
+ * "In the case of READ, the server may perform the corresponding
+ * check on the access mode, or it may choose to allow READ for
+ * OPEN4_SHARE_ACCESS_WRITE, to accommodate clients whose WRITE
+ * implementation may unavoidably do reads (e.g., due to buffer
+ * cache constraints)."
+ *
+ * We choose to offer a write delegation for OPEN with the
+ * OPEN4_SHARE_ACCESS_WRITE access mode to accommodate such clients.
*/
- if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
- nf = find_rw_file(fp);
- dl_type = NFS4_OPEN_DELEGATE_WRITE;
+ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
+ nf = find_writeable_file(fp);
+ dl_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : OPEN_DELEGATE_WRITE;
}
/*
@@ -5869,12 +6071,21 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
*/
if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
nf = find_readable_file(fp);
- dl_type = NFS4_OPEN_DELEGATE_READ;
+ dl_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG : OPEN_DELEGATE_READ;
}
if (!nf)
return ERR_PTR(-EAGAIN);
+ /*
+ * File delegations and associated locks cannot be recovered if the
+ * export is from an NFS proxy server.
+ */
+ if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) {
+ nfsd_file_put(nf);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
if (nfs4_delegation_exists(clp, fp))
@@ -5901,7 +6112,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (!dp)
goto out_delegees;
- fl = nfs4_alloc_init_lease(dp, dl_type);
+ fl = nfs4_alloc_init_lease(dp);
if (!fl)
goto out_clnt_odstate;
@@ -5958,20 +6169,20 @@ out_delegees:
static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
{
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
if (status == -EAGAIN)
open->op_why_no_deleg = WND4_CONTENTION;
else {
open->op_why_no_deleg = WND4_RESOURCE;
switch (open->op_deleg_want) {
- case NFS4_SHARE_WANT_READ_DELEG:
- case NFS4_SHARE_WANT_WRITE_DELEG:
- case NFS4_SHARE_WANT_ANY_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_READ_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG:
break;
- case NFS4_SHARE_WANT_CANCEL:
+ case OPEN4_SHARE_ACCESS_WANT_CANCEL:
open->op_why_no_deleg = WND4_CANCELLED;
break;
- case NFS4_SHARE_WANT_NO_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_NO_DELEG:
WARN_ON_ONCE(1);
}
}
@@ -5981,7 +6192,7 @@ static bool
nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
struct kstat *stat)
{
- struct nfsd_file *nf = find_rw_file(dp->dl_stid.sc_file);
+ struct nfsd_file *nf = find_writeable_file(dp->dl_stid.sc_file);
struct path path;
int rc;
@@ -5992,7 +6203,8 @@ nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
path.dentry = file_dentry(nf->nf_file);
rc = vfs_getattr(&path, stat,
- (STATX_MODE | STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
+ STATX_MODE | STATX_SIZE | STATX_ATIME |
+ STATX_MTIME | STATX_CTIME | STATX_CHANGE_COOKIE,
AT_STATX_SYNC_AS_STAT);
nfsd_file_put(nf);
@@ -6000,6 +6212,34 @@ nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
}
/*
+ * Add NFS4_SHARE_ACCESS_READ to the write delegation granted on OPEN
+ * with NFS4_SHARE_ACCESS_WRITE by allocating separate nfsd_file and
+ * struct file to be used for read with delegation stateid.
+ *
+ */
+static bool
+nfsd4_add_rdaccess_to_wrdeleg(struct svc_rqst *rqstp, struct nfsd4_open *open,
+ struct svc_fh *fh, struct nfs4_ol_stateid *stp)
+{
+ struct nfs4_file *fp;
+ struct nfsd_file *nf = NULL;
+
+ if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) ==
+ NFS4_SHARE_ACCESS_WRITE) {
+ if (nfsd_file_acquire_opened(rqstp, fh, NFSD_MAY_READ, NULL, &nf))
+ return (false);
+ fp = stp->st_stid.sc_file;
+ spin_lock(&fp->fi_lock);
+ __nfs4_file_get_access(fp, NFS4_SHARE_ACCESS_READ);
+ fp = stp->st_stid.sc_file;
+ fp->fi_fds[O_RDONLY] = nf;
+ fp->fi_rdeleg_file = nf;
+ spin_unlock(&fp->fi_lock);
+ }
+ return true;
+}
+
+/*
* The Linux NFS server does not offer write delegations to NFSv4.0
* clients in order to avoid conflicts between write delegations and
* GETATTRs requesting CHANGE or SIZE attributes.
@@ -6024,16 +6264,18 @@ nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
* open or lock state.
*/
static void
-nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
- struct svc_fh *currentfh)
+nfs4_open_delegation(struct svc_rqst *rqstp, struct nfsd4_open *open,
+ struct nfs4_ol_stateid *stp, struct svc_fh *currentfh,
+ struct svc_fh *fh)
{
- struct nfs4_delegation *dp;
struct nfs4_openowner *oo = openowner(stp->st_stateowner);
+ bool deleg_ts = nfsd4_want_deleg_timestamps(open);
struct nfs4_client *clp = stp->st_stid.sc_client;
struct svc_fh *parent = NULL;
- int cb_up;
- int status = 0;
+ struct nfs4_delegation *dp;
struct kstat stat;
+ int status = 0;
+ int cb_up;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
open->op_recall = false;
@@ -6069,28 +6311,35 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
- if (!nfs4_delegation_stat(dp, currentfh, &stat)) {
+ struct file *f = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
+
+ if (!nfsd4_add_rdaccess_to_wrdeleg(rqstp, open, fh, stp) ||
+ !nfs4_delegation_stat(dp, currentfh, &stat)) {
nfs4_put_stid(&dp->dl_stid);
destroy_delegation(dp);
goto out_no_deleg;
}
- open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
+ open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG :
+ OPEN_DELEGATE_WRITE;
dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat);
+ dp->dl_atime = stat.atime;
+ dp->dl_ctime = stat.ctime;
+ dp->dl_mtime = stat.mtime;
+ spin_lock(&f->f_lock);
+ f->f_mode |= FMODE_NOCMTIME;
+ spin_unlock(&f->f_lock);
trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
} else {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+ open->op_delegate_type = deleg_ts && nfs4_delegation_stat(dp, currentfh, &stat) ?
+ OPEN_DELEGATE_READ_ATTRS_DELEG : OPEN_DELEGATE_READ;
+ dp->dl_atime = stat.atime;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
}
nfs4_put_stid(&dp->dl_stid);
return;
out_no_deleg:
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
- if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
- open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
- dprintk("NFSD: WARNING: refusing delegation reclaim\n");
- open->op_recall = true;
- }
+ open->op_delegate_type = OPEN_DELEGATE_NONE;
/* 4.1 client asking for a delegation? */
if (open->op_deleg_want)
@@ -6101,21 +6350,32 @@ out_no_deleg:
static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
struct nfs4_delegation *dp)
{
- if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
- dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
- open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
- } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
- dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
- open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
+ if (deleg_is_write(dp->dl_type)) {
+ if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_READ_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
+ } else if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
+ }
}
/* Otherwise the client must be confused wanting a delegation
* it already has, therefore we don't return
- * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
+ * OPEN_DELEGATE_NONE_EXT and reason.
*/
}
+/* Are we returning only a delegation stateid? */
+static bool open_xor_delegation(struct nfsd4_open *open)
+{
+ if (!(open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION))
+ return false;
+ /* Did we actually get a delegation? */
+ if (!deleg_is_read(open->op_delegate_type) && !deleg_is_write(open->op_delegate_type))
+ return false;
+ return true;
+}
+
/**
* nfsd4_process_open2 - finish open processing
* @rqstp: the RPC transaction being executed
@@ -6151,6 +6411,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_check_deleg(cl, open, &dp);
if (status)
goto out;
+ if (dp && nfsd4_is_deleg_cur(open) &&
+ (dp->dl_stid.sc_file != fp)) {
+ /*
+ * RFC8881 section 8.2.4 mandates the server to return
+ * NFS4ERR_BAD_STATEID if the selected table entry does
+ * not match the current filehandle. However returning
+ * NFS4ERR_BAD_STATEID in the OPEN can cause the client
+ * to repeatedly retry the operation with the same
+ * stateid, since the stateid itself is valid. To avoid
+ * this situation NFSD returns NFS4ERR_INVAL instead.
+ */
+ status = nfserr_inval;
+ goto out;
+ }
stp = nfsd4_find_and_lock_existing_open(fp, open);
} else {
open->op_file = NULL;
@@ -6201,8 +6475,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
mutex_unlock(&stp->st_mutex);
if (nfsd4_has_session(&resp->cstate)) {
- if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_NO_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_WANTED;
goto nodeleg;
}
@@ -6212,13 +6486,25 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
* Attempt to hand out a delegation. No error return, because the
* OPEN succeeds even if we fail.
*/
- nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
+ nfs4_open_delegation(rqstp, open, stp,
+ &resp->cstate.current_fh, current_fh);
+
+ /*
+ * If there is an existing open stateid, it must be updated and
+ * returned. Only respect WANT_OPEN_XOR_DELEGATION when a new
+ * open stateid would have to be created.
+ */
+ if (new_stp && open_xor_delegation(open)) {
+ memcpy(&open->op_stateid, &zero_stateid, sizeof(open->op_stateid));
+ open->op_rflags |= OPEN4_RESULT_NO_OPEN_STATEID;
+ release_open_stateid(stp);
+ }
nodeleg:
status = nfs_ok;
trace_nfsd_open(&stp->st_stid.sc_stateid);
out:
/* 4.1 client trying to upgrade/downgrade delegation? */
- if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
+ if (open->op_delegate_type == OPEN_DELEGATE_NONE && dp &&
open->op_deleg_want)
nfsd4_deleg_xgrade_none_ext(open, dp);
@@ -6229,7 +6515,7 @@ out:
/*
* To finish the open response, we just need to set the rflags.
*/
- open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
+ open->op_rflags |= NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
if (nfsd4_has_session(&resp->cstate))
open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
@@ -6706,38 +6992,34 @@ deleg_reaper(struct nfsd_net *nn)
{
struct list_head *pos, *next;
struct nfs4_client *clp;
- LIST_HEAD(cblist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
- if (clp->cl_state != NFSD4_ACTIVE ||
- list_empty(&clp->cl_delegations) ||
- atomic_read(&clp->cl_delegs_in_recall) ||
- test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
- (ktime_get_boottime_seconds() -
- clp->cl_ra_time < 5)) {
+
+ if (clp->cl_state != NFSD4_ACTIVE)
+ continue;
+ if (list_empty(&clp->cl_delegations))
+ continue;
+ if (atomic_read(&clp->cl_delegs_in_recall))
+ continue;
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &clp->cl_ra->ra_cb.cb_flags))
+ continue;
+ if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5)
+ continue;
+ if (clp->cl_cb_state != NFSD4_CB_UP)
continue;
- }
- list_add(&clp->cl_ra_cblist, &cblist);
/* release in nfsd4_cb_recall_any_release */
kref_get(&clp->cl_nfsdfs.cl_ref);
- set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
clp->cl_ra_time = ktime_get_boottime_seconds();
- }
- spin_unlock(&nn->client_lock);
-
- while (!list_empty(&cblist)) {
- clp = list_first_entry(&cblist, struct nfs4_client,
- cl_ra_cblist);
- list_del_init(&clp->cl_ra_cblist);
clp->cl_ra->ra_keep = 0;
clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
BIT(RCA4_TYPE_MASK_WDATA_DLG);
trace_nfsd_cb_recall_any(clp->cl_ra);
nfsd4_run_cb(&clp->cl_ra->ra_cb);
}
+ spin_unlock(&nn->client_lock);
}
static void
@@ -6898,11 +7180,11 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
return_revoked = true;
if (typemask & SC_TYPE_DELEG)
/* Always allow REVOKED for DELEG so we can
- * retturn the appropriate error.
+ * return the appropriate error.
*/
statusmask |= SC_STATUS_REVOKED;
- statusmask |= SC_STATUS_ADMIN_REVOKED;
+ statusmask |= SC_STATUS_ADMIN_REVOKED | SC_STATUS_FREEABLE;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
@@ -6941,10 +7223,6 @@ nfs4_find_file(struct nfs4_stid *s, int flags)
switch (s->sc_type) {
case SC_TYPE_DELEG:
- spin_lock(&s->sc_file->fi_lock);
- ret = nfsd_file_get(s->sc_file->fi_deleg_file);
- spin_unlock(&s->sc_file->fi_lock);
- break;
case SC_TYPE_OPEN:
case SC_TYPE_LOCK:
if (flags & RD_STATE)
@@ -7554,12 +7832,11 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
+ status = fh_verify(rqstp, &cstate->current_fh, 0, 0);
+ if (status)
return status;
- status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG,
- SC_STATUS_REVOKED | SC_STATUS_FREEABLE,
- &s, nn);
+ status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, SC_STATUS_REVOKED, &s, nn);
if (status)
goto out;
dp = delegstateid(s);
@@ -7667,7 +7944,7 @@ nfsd4_lm_notify(struct file_lock *fl)
if (queue) {
trace_nfsd_cb_notify_lock(lo, nbl);
- nfsd4_run_cb(&nbl->nbl_cb);
+ nfsd4_try_run_cb(&nbl->nbl_cb);
}
}
@@ -7966,7 +8243,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
- struct super_block *sb;
__be32 status = 0;
int lkflg;
int err;
@@ -7986,7 +8262,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
if (status != nfs_ok)
return status;
- sb = cstate->current_fh.fh_dentry->d_sb;
+ if (exportfs_cannot_lock(cstate->current_fh.fh_dentry->d_sb->s_export_op)) {
+ status = nfserr_notsupp;
+ goto out;
+ }
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
@@ -8326,6 +8605,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_lock_range;
goto put_stateid;
}
+ if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) {
+ status = nfserr_notsupp;
+ goto put_file;
+ }
+
file_lock = locks_alloc_lock();
if (!file_lock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
@@ -8491,9 +8775,6 @@ nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
/*
* failure => all reset bets are off, nfserr_no_grace...
- *
- * The caller is responsible for freeing name.data if NULL is returned (it
- * will be freed in nfs4_remove_reclaim_record in the normal case).
*/
struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
@@ -8502,6 +8783,22 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
unsigned int strhashval;
struct nfs4_client_reclaim *crp;
+ name.data = kmemdup(name.data, name.len, GFP_KERNEL);
+ if (!name.data) {
+ dprintk("%s: failed to allocate memory for name.data!\n",
+ __func__);
+ return NULL;
+ }
+ if (princhash.len) {
+ princhash.data = kmemdup(princhash.data, princhash.len, GFP_KERNEL);
+ if (!princhash.data) {
+ dprintk("%s: failed to allocate memory for princhash.data!\n",
+ __func__);
+ kfree(name.data);
+ return NULL;
+ }
+ } else
+ princhash.data = NULL;
crp = alloc_reclaim();
if (crp) {
strhashval = clientstr_hashval(name);
@@ -8513,6 +8810,9 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
crp->cr_princhash.len = princhash.len;
crp->cr_clp = NULL;
nn->reclaim_str_hashtbl_size++;
+ } else {
+ kfree(name.data);
+ kfree(princhash.data);
}
return crp;
}
@@ -8721,7 +9021,6 @@ skip_grace:
}
/* initialization to perform when the nfsd service is started: */
-
int
nfs4_state_start(void)
{
@@ -8731,6 +9030,15 @@ nfs4_state_start(void)
if (ret)
return ret;
+ nfsd_slot_shrinker = shrinker_alloc(0, "nfsd-DRC-slot");
+ if (!nfsd_slot_shrinker) {
+ rhltable_destroy(&nfs4_file_rhltable);
+ return -ENOMEM;
+ }
+ nfsd_slot_shrinker->count_objects = nfsd_slot_count;
+ nfsd_slot_shrinker->scan_objects = nfsd_slot_scan;
+ shrinker_register(nfsd_slot_shrinker);
+
set_max_delegations();
return 0;
}
@@ -8772,6 +9080,7 @@ void
nfs4_state_shutdown(void)
{
rhltable_destroy(&nfs4_file_rhltable);
+ shrinker_free(nfsd_slot_shrinker);
}
static void
@@ -8889,6 +9198,75 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
}
/**
+ * nfsd4_vet_deleg_time - vet and set the timespec for a delegated timestamp update
+ * @req: timestamp from the client
+ * @orig: original timestamp in the inode
+ * @now: current time
+ *
+ * Given a timestamp from the client response, check it against the
+ * current timestamp in the inode and the current time. Returns true
+ * if the inode's timestamp needs to be updated, and false otherwise.
+ * @req may also be changed if the timestamp needs to be clamped.
+ */
+bool nfsd4_vet_deleg_time(struct timespec64 *req, const struct timespec64 *orig,
+ const struct timespec64 *now)
+{
+
+ /*
+ * "When the time presented is before the original time, then the
+ * update is ignored." Also no need to update if there is no change.
+ */
+ if (timespec64_compare(req, orig) <= 0)
+ return false;
+
+ /*
+ * "When the time presented is in the future, the server can either
+ * clamp the new time to the current time, or it may
+ * return NFS4ERR_DELAY to the client, allowing it to retry."
+ */
+ if (timespec64_compare(req, now) > 0)
+ *req = *now;
+
+ return true;
+}
+
+static int cb_getattr_update_times(struct dentry *dentry, struct nfs4_delegation *dp)
+{
+ struct inode *inode = d_inode(dentry);
+ struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
+ struct iattr attrs = { };
+ int ret;
+
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ struct timespec64 now = current_time(inode);
+
+ attrs.ia_atime = ncf->ncf_cb_atime;
+ attrs.ia_mtime = ncf->ncf_cb_mtime;
+
+ if (nfsd4_vet_deleg_time(&attrs.ia_atime, &dp->dl_atime, &now))
+ attrs.ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
+
+ if (nfsd4_vet_deleg_time(&attrs.ia_mtime, &dp->dl_mtime, &now)) {
+ attrs.ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
+ attrs.ia_ctime = attrs.ia_mtime;
+ if (nfsd4_vet_deleg_time(&attrs.ia_ctime, &dp->dl_ctime, &now))
+ attrs.ia_valid |= ATTR_CTIME | ATTR_CTIME_SET;
+ }
+ } else {
+ attrs.ia_valid |= ATTR_MTIME | ATTR_CTIME;
+ }
+
+ if (!attrs.ia_valid)
+ return 0;
+
+ attrs.ia_valid |= ATTR_DELEG;
+ inode_lock(inode);
+ ret = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
+ inode_unlock(inode);
+ return ret;
+}
+
+/**
* nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
* @rqstp: RPC transaction context
* @dentry: dentry of inode to be checked for a conflict
@@ -8914,7 +9292,6 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
struct file_lock_context *ctx;
struct nfs4_delegation *dp = NULL;
struct file_lease *fl;
- struct iattr attrs;
struct nfs4_cb_fattr *ncf;
struct inode *inode = d_inode(dentry);
@@ -8955,8 +9332,8 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
nfs4_cb_getattr(&dp->dl_cb_fattr);
spin_unlock(&ctx->flc_lock);
- wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
- TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
+ wait_on_bit_timeout(&ncf->ncf_getattr.cb_flags, NFSD4_CALLBACK_RUNNING,
+ TASK_UNINTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
if (ncf->ncf_cb_status) {
/* Recall delegation only if client didn't respond */
status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
@@ -8976,11 +9353,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
* not update the file's metadata with the client's
* modified size
*/
- attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
- attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
- inode_lock(inode);
- err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
- inode_unlock(inode);
+ err = cb_getattr_update_times(dentry, dp);
if (err) {
status = nfserrno(err);
goto out_status;
@@ -8994,3 +9367,103 @@ out_status:
nfs4_put_stid(&dp->dl_stid);
return status;
}
+
+/**
+ * nfsd_get_dir_deleg - attempt to get a directory delegation
+ * @cstate: compound state
+ * @gdd: GET_DIR_DELEGATION arg/resp structure
+ * @nf: nfsd_file opened on the directory
+ *
+ * Given a GET_DIR_DELEGATION request @gdd, attempt to acquire a delegation
+ * on the directory to which @nf refers. Note that this does not set up any
+ * sort of async notifications for the delegation.
+ */
+struct nfs4_delegation *
+nfsd_get_dir_deleg(struct nfsd4_compound_state *cstate,
+ struct nfsd4_get_dir_delegation *gdd,
+ struct nfsd_file *nf)
+{
+ struct nfs4_client *clp = cstate->clp;
+ struct nfs4_delegation *dp;
+ struct file_lease *fl;
+ struct nfs4_file *fp, *rfp;
+ int status = 0;
+
+ fp = nfsd4_alloc_file();
+ if (!fp)
+ return ERR_PTR(-ENOMEM);
+
+ nfsd4_file_init(&cstate->current_fh, fp);
+
+ rfp = nfsd4_file_hash_insert(fp, &cstate->current_fh);
+ if (unlikely(!rfp)) {
+ put_nfs4_file(fp);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (rfp != fp) {
+ put_nfs4_file(fp);
+ fp = rfp;
+ }
+
+ /* if this client already has one, return that it's unavailable */
+ spin_lock(&state_lock);
+ spin_lock(&fp->fi_lock);
+ /* existing delegation? */
+ if (nfs4_delegation_exists(clp, fp)) {
+ status = -EAGAIN;
+ } else if (!fp->fi_deleg_file) {
+ fp->fi_deleg_file = nfsd_file_get(nf);
+ fp->fi_delegees = 1;
+ } else {
+ ++fp->fi_delegees;
+ }
+ spin_unlock(&fp->fi_lock);
+ spin_unlock(&state_lock);
+
+ if (status) {
+ put_nfs4_file(fp);
+ return ERR_PTR(status);
+ }
+
+ /* Try to set up the lease */
+ status = -ENOMEM;
+ dp = alloc_init_deleg(clp, fp, NULL, NFS4_OPEN_DELEGATE_READ);
+ if (!dp)
+ goto out_delegees;
+
+ fl = nfs4_alloc_init_lease(dp);
+ if (!fl)
+ goto out_put_stid;
+
+ status = kernel_setlease(nf->nf_file,
+ fl->c.flc_type, &fl, NULL);
+ if (fl)
+ locks_free_lease(fl);
+ if (status)
+ goto out_put_stid;
+
+ /*
+ * Now, try to hash it. This can fail if we race another nfsd task
+ * trying to set a delegation on the same file. If that happens,
+ * then just say UNAVAIL.
+ */
+ spin_lock(&state_lock);
+ spin_lock(&clp->cl_lock);
+ spin_lock(&fp->fi_lock);
+ status = hash_delegation_locked(dp, fp);
+ spin_unlock(&fp->fi_lock);
+ spin_unlock(&clp->cl_lock);
+ spin_unlock(&state_lock);
+
+ if (!status)
+ return dp;
+
+ /* Something failed. Drop the lease and clean up the stid */
+ kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
+out_put_stid:
+ nfs4_put_stid(&dp->dl_stid);
+out_delegees:
+ put_deleg_file(fp);
+ return ERR_PTR(status);
+}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 53fac037611c..30ce5851fe4c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -55,6 +55,7 @@
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
+#include "nfs4xdr_gen.h"
#include "trace.h"
@@ -520,6 +521,27 @@ nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen,
*umask = mask & S_IRWXUGO;
iattr->ia_valid |= ATTR_MODE;
}
+ if (bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) {
+ fattr4_time_deleg_access access;
+
+ if (!xdrgen_decode_fattr4_time_deleg_access(argp->xdr, &access))
+ return nfserr_bad_xdr;
+ iattr->ia_atime.tv_sec = access.seconds;
+ iattr->ia_atime.tv_nsec = access.nseconds;
+ iattr->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET | ATTR_DELEG;
+ }
+ if (bmval[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ fattr4_time_deleg_modify modify;
+
+ if (!xdrgen_decode_fattr4_time_deleg_modify(argp->xdr, &modify))
+ return nfserr_bad_xdr;
+ iattr->ia_mtime.tv_sec = modify.seconds;
+ iattr->ia_mtime.tv_nsec = modify.nseconds;
+ iattr->ia_ctime.tv_sec = modify.seconds;
+ iattr->ia_ctime.tv_nsec = modify.nseconds;
+ iattr->ia_valid |= ATTR_CTIME | ATTR_CTIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET | ATTR_DELEG;
+ }
/* request sanity: did attrlist4 contain the expected number of words? */
if (attrlist4_count != xdr_stream_pos(argp->xdr) - starting_pos)
@@ -566,23 +588,13 @@ nfsd4_decode_state_owner4(struct nfsd4_compoundargs *argp,
}
#ifdef CONFIG_NFSD_PNFS
-static __be32
-nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp,
- struct nfsd4_deviceid *devid)
-{
- __be32 *p;
-
- p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE);
- if (!p)
- return nfserr_bad_xdr;
- memcpy(devid, p, sizeof(*devid));
- return nfs_ok;
-}
static __be32
nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutcommit *lcp)
{
+ u32 len;
+
if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_layout_type) < 0)
return nfserr_bad_xdr;
if (lcp->lc_layout_type < LAYOUT_NFSV4_1_FILES)
@@ -590,13 +602,10 @@ nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
if (lcp->lc_layout_type >= LAYOUT_TYPE_MAX)
return nfserr_bad_xdr;
- if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_up_len) < 0)
+ if (xdr_stream_decode_u32(argp->xdr, &len) < 0)
+ return nfserr_bad_xdr;
+ if (!xdr_stream_subsegment(argp->xdr, &lcp->lc_up_layout, len))
return nfserr_bad_xdr;
- if (lcp->lc_up_len > 0) {
- lcp->lc_up_layout = xdr_inline_decode(argp->xdr, lcp->lc_up_len);
- if (!lcp->lc_up_layout)
- return nfserr_bad_xdr;
- }
return nfs_ok;
}
@@ -1066,13 +1075,13 @@ static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *sh
return nfs_ok;
if (!argp->minorversion)
return nfserr_bad_xdr;
- switch (w & NFS4_SHARE_WANT_MASK) {
- case NFS4_SHARE_WANT_NO_PREFERENCE:
- case NFS4_SHARE_WANT_READ_DELEG:
- case NFS4_SHARE_WANT_WRITE_DELEG:
- case NFS4_SHARE_WANT_ANY_DELEG:
- case NFS4_SHARE_WANT_NO_DELEG:
- case NFS4_SHARE_WANT_CANCEL:
+ switch (w & NFS4_SHARE_WANT_TYPE_MASK) {
+ case OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE:
+ case OPEN4_SHARE_ACCESS_WANT_READ_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_NO_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_CANCEL:
break;
default:
return nfserr_bad_xdr;
@@ -1762,7 +1771,7 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
__be32 status;
memset(gdev, 0, sizeof(*gdev));
- status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid);
+ status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0)
@@ -1793,7 +1802,7 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
status = nfsd4_decode_stateid4(argp, &lcp->lc_sid);
if (status)
return status;
- if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_newoffset) < 0)
+ if (xdr_stream_decode_bool(argp->xdr, &lcp->lc_newoffset) < 0)
return nfserr_bad_xdr;
if (lcp->lc_newoffset) {
if (xdr_stream_decode_u64(argp->xdr, &lcp->lc_last_wr) < 0)
@@ -1884,7 +1893,8 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
return nfserr_bad_xdr;
seq->seqid = be32_to_cpup(p++);
seq->slotid = be32_to_cpup(p++);
- seq->maxslots = be32_to_cpup(p++);
+ /* sa_highest_slotid counts from 0 but maxslots counts from 1 ... */
+ seq->maxslots = be32_to_cpup(p++) + 1;
seq->cachethis = be32_to_cpup(p);
seq->status_flags = 0;
@@ -2542,7 +2552,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
/* Sessions make the DRC unnecessary: */
if (argp->minorversion)
cachethis = false;
- svc_reserve(argp->rqstp, max_reply + readbytes);
+ svc_reserve_auth(argp->rqstp, max_reply + readbytes);
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
argp->splice_ok = nfsd_read_splice_ok(argp->rqstp);
@@ -2620,10 +2630,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
__be32 *p;
__be32 pathlen;
int pathlen_offset;
- int strlen, count=0;
char *str, *end, *next;
-
- dprintk("nfsd4_encode_components(%s)\n", components);
+ int count = 0;
pathlen_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 4);
@@ -2650,9 +2658,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
for (; *end && (*end != sep); end++)
/* find sep or end of string */;
- strlen = end - str;
- if (strlen) {
- if (xdr_stream_encode_opaque(xdr, str, strlen) < 0)
+ if (end > str) {
+ if (xdr_stream_encode_opaque(xdr, str, end - str) < 0)
return nfserr_resource;
count++;
} else
@@ -2818,11 +2825,11 @@ static __be32 nfsd4_encode_nfsace4(struct xdr_stream *xdr, struct svc_rqst *rqst
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- void *context, int len)
+ const struct lsm_context *context)
{
__be32 *p;
- p = xdr_reserve_space(xdr, len + 4 + 4 + 4);
+ p = xdr_reserve_space(xdr, context->len + 4 + 4 + 4);
if (!p)
return nfserr_resource;
@@ -2832,13 +2839,13 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
*/
*p++ = cpu_to_be32(0); /* lfs */
*p++ = cpu_to_be32(0); /* pi */
- p = xdr_encode_opaque(p, context, len);
+ p = xdr_encode_opaque(p, context->context, context->len);
return 0;
}
#else
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- void *context, int len)
+ struct lsm_context *context)
{ return 0; }
#endif
@@ -2919,9 +2926,9 @@ struct nfsd4_fattr_args {
struct kstat stat;
struct kstatfs statfs;
struct nfs4_acl *acl;
+ u64 change_attr;
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- void *context;
- int contextlen;
+ struct lsm_context context;
#endif
u32 rdattr_err;
bool contextsupport;
@@ -2931,6 +2938,12 @@ struct nfsd4_fattr_args {
typedef __be32(*nfsd4_enc_attr)(struct xdr_stream *xdr,
const struct nfsd4_fattr_args *args);
+static __be32 nfsd4_encode_fattr4__inval(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfserr_inval;
+}
+
static __be32 nfsd4_encode_fattr4__noop(struct xdr_stream *xdr,
const struct nfsd4_fattr_args *args)
{
@@ -3018,7 +3031,6 @@ static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr,
const struct nfsd4_fattr_args *args)
{
const struct svc_export *exp = args->exp;
- u64 c;
if (unlikely(exp->ex_flags & NFSEXP_V4ROOT)) {
u32 flush_time = convert_to_wallclock(exp->cd->flush_time);
@@ -3029,9 +3041,7 @@ static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr,
return nfserr_resource;
return nfs_ok;
}
-
- c = nfsd4_change_attribute(&args->stat);
- return nfsd4_encode_changeid4(xdr, c);
+ return nfsd4_encode_changeid4(xdr, args->change_attr);
}
static __be32 nfsd4_encode_fattr4_size(struct xdr_stream *xdr,
@@ -3372,12 +3382,28 @@ static __be32 nfsd4_encode_fattr4_suppattr_exclcreat(struct xdr_stream *xdr,
return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]);
}
+/*
+ * Copied from generic_remap_checks/generic_remap_file_range_prep.
+ *
+ * These generic functions use the file system's s_blocksize, but
+ * individual file systems aren't required to use
+ * generic_remap_file_range_prep. Until there is a mechanism for
+ * determining a particular file system's (or file's) clone block
+ * size, this is the best NFSD can do.
+ */
+static __be32 nfsd4_encode_fattr4_clone_blksize(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct inode *inode = d_inode(args->dentry);
+
+ return nfsd4_encode_uint32_t(xdr, inode->i_sb->s_blocksize);
+}
+
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
static __be32 nfsd4_encode_fattr4_sec_label(struct xdr_stream *xdr,
const struct nfsd4_fattr_args *args)
{
- return nfsd4_encode_security_label(xdr, args->rqstp,
- args->context, args->contextlen);
+ return nfsd4_encode_security_label(xdr, args->rqstp, &args->context);
}
#endif
@@ -3389,6 +3415,56 @@ static __be32 nfsd4_encode_fattr4_xattr_support(struct xdr_stream *xdr,
return nfsd4_encode_bool(xdr, err == 0);
}
+#define NFSD_OA_SHARE_ACCESS (BIT(OPEN_ARGS_SHARE_ACCESS_READ) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WRITE) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_BOTH))
+
+#define NFSD_OA_SHARE_DENY (BIT(OPEN_ARGS_SHARE_DENY_NONE) | \
+ BIT(OPEN_ARGS_SHARE_DENY_READ) | \
+ BIT(OPEN_ARGS_SHARE_DENY_WRITE) | \
+ BIT(OPEN_ARGS_SHARE_DENY_BOTH))
+
+#define NFSD_OA_SHARE_ACCESS_WANT (BIT(OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION))
+
+#define NFSD_OA_OPEN_CLAIM (BIT(OPEN_ARGS_OPEN_CLAIM_NULL) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_PREVIOUS) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV)| \
+ BIT(OPEN_ARGS_OPEN_CLAIM_FH) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH))
+
+#define NFSD_OA_CREATE_MODE (BIT(OPEN_ARGS_CREATEMODE_UNCHECKED4) | \
+ BIT(OPEN_ARGS_CREATE_MODE_GUARDED) | \
+ BIT(OPEN_ARGS_CREATEMODE_EXCLUSIVE4) | \
+ BIT(OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1))
+
+static uint32_t oa_share_access = NFSD_OA_SHARE_ACCESS;
+static uint32_t oa_share_deny = NFSD_OA_SHARE_DENY;
+static uint32_t oa_share_access_want = NFSD_OA_SHARE_ACCESS_WANT;
+static uint32_t oa_open_claim = NFSD_OA_OPEN_CLAIM;
+static uint32_t oa_create_mode = NFSD_OA_CREATE_MODE;
+
+static const struct open_arguments4 nfsd_open_arguments = {
+ .oa_share_access = { .count = 1, .element = &oa_share_access },
+ .oa_share_deny = { .count = 1, .element = &oa_share_deny },
+ .oa_share_access_want = { .count = 1, .element = &oa_share_access_want },
+ .oa_open_claim = { .count = 1, .element = &oa_open_claim },
+ .oa_create_mode = { .count = 1, .element = &oa_create_mode },
+};
+
+static __be32 nfsd4_encode_fattr4_open_arguments(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ if (!xdrgen_encode_fattr4_open_arguments(xdr, &nfsd_open_arguments))
+ return nfserr_resource;
+ return nfs_ok;
+}
+
static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
[FATTR4_SUPPORTED_ATTRS] = nfsd4_encode_fattr4_supported_attrs,
[FATTR4_TYPE] = nfsd4_encode_fattr4_type,
@@ -3477,7 +3553,7 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
[FATTR4_MODE_SET_MASKED] = nfsd4_encode_fattr4__noop,
[FATTR4_SUPPATTR_EXCLCREAT] = nfsd4_encode_fattr4_suppattr_exclcreat,
[FATTR4_FS_CHARSET_CAP] = nfsd4_encode_fattr4__noop,
- [FATTR4_CLONE_BLKSIZE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CLONE_BLKSIZE] = nfsd4_encode_fattr4_clone_blksize,
[FATTR4_SPACE_FREED] = nfsd4_encode_fattr4__noop,
[FATTR4_CHANGE_ATTR_TYPE] = nfsd4_encode_fattr4__noop,
@@ -3489,6 +3565,9 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
[FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop,
[FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support,
+ [FATTR4_TIME_DELEG_ACCESS] = nfsd4_encode_fattr4__inval,
+ [FATTR4_TIME_DELEG_MODIFY] = nfsd4_encode_fattr4__inval,
+ [FATTR4_OPEN_ARGUMENTS] = nfsd4_encode_fattr4_open_arguments,
};
/*
@@ -3506,8 +3585,8 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
struct nfsd4_fattr_args args;
struct svc_fh *tempfh = NULL;
int starting_len = xdr->buf->len;
- __be32 *attrlen_p, status;
- int attrlen_offset;
+ unsigned int attrlen_offset;
+ __be32 attrlen, status;
u32 attrmask[3];
int err;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
@@ -3527,7 +3606,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
args.ignore_crossmnt = (ignore_crossmnt != 0);
args.acl = NULL;
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- args.context = NULL;
+ args.context.context = NULL;
#endif
/*
@@ -3544,7 +3623,11 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
}
- if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+ if ((attrmask[0] & (FATTR4_WORD0_CHANGE |
+ FATTR4_WORD0_SIZE)) ||
+ (attrmask[1] & (FATTR4_WORD1_TIME_ACCESS |
+ FATTR4_WORD1_TIME_MODIFY |
+ FATTR4_WORD1_TIME_METADATA))) {
status = nfsd4_deleg_getattr_conflict(rqstp, dentry, &dp);
if (status)
goto out;
@@ -3556,11 +3639,22 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (dp) {
struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
- if (ncf->ncf_file_modified)
+ if (ncf->ncf_file_modified) {
+ ++ncf->ncf_initial_cinfo;
args.stat.size = ncf->ncf_cur_fsize;
+ if (!timespec64_is_epoch(&ncf->ncf_cb_mtime))
+ args.stat.mtime = ncf->ncf_cb_mtime;
+ }
+ args.change_attr = ncf->ncf_initial_cinfo;
+
+ if (!timespec64_is_epoch(&ncf->ncf_cb_atime))
+ args.stat.atime = ncf->ncf_cb_atime;
nfs4_put_stid(&dp->dl_stid);
+ } else {
+ args.change_attr = nfsd4_change_attribute(&args.stat);
}
+
if (err)
goto out_nfserr;
@@ -3607,7 +3701,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
- &args.context, &args.contextlen);
+ &args.context);
else
err = -EOPNOTSUPP;
args.contextsupport = (err == 0);
@@ -3628,8 +3722,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
/* attr_vals */
attrlen_offset = xdr->buf->len;
- attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
- if (!attrlen_p)
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
goto out_resource;
bitmap_from_arr32(attr_bitmap, attrmask,
ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
@@ -3639,13 +3732,14 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (status != nfs_ok)
goto out;
}
- *attrlen_p = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
+ attrlen = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
+ write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, XDR_UNIT);
status = nfs_ok;
out:
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if (args.context)
- security_release_secctx(args.context, args.contextlen);
+ if (args.context.context)
+ security_release_secctx(&args.context);
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
kfree(args.acl);
if (tempfh) {
@@ -3728,7 +3822,9 @@ nfsd4_encode_entry4_fattr(struct nfsd4_readdir *cd, const char *name,
__be32 nfserr;
int ignore_crossmnt = 0;
- dentry = lookup_positive_unlocked(name, cd->rd_fhp->fh_dentry, namlen);
+ dentry = lookup_one_positive_unlocked(&nop_mnt_idmap,
+ &QSTR_LEN(name, namlen),
+ cd->rd_fhp->fh_dentry);
if (IS_ERR(dentry))
return nfserrno(PTR_ERR(dentry));
@@ -4229,18 +4325,20 @@ nfsd4_encode_open_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open)
if (xdr_stream_encode_u32(xdr, open->op_delegate_type) != XDR_UNIT)
return nfserr_resource;
switch (open->op_delegate_type) {
- case NFS4_OPEN_DELEGATE_NONE:
+ case OPEN_DELEGATE_NONE:
status = nfs_ok;
break;
- case NFS4_OPEN_DELEGATE_READ:
+ case OPEN_DELEGATE_READ:
+ case OPEN_DELEGATE_READ_ATTRS_DELEG:
/* read */
status = nfsd4_encode_open_read_delegation4(xdr, open);
break;
- case NFS4_OPEN_DELEGATE_WRITE:
+ case OPEN_DELEGATE_WRITE:
+ case OPEN_DELEGATE_WRITE_ATTRS_DELEG:
/* write */
status = nfsd4_encode_open_write_delegation4(xdr, open);
break;
- case NFS4_OPEN_DELEGATE_NONE_EXT:
+ case OPEN_DELEGATE_NONE_EXT:
/* od_whynone */
status = nfsd4_encode_open_none_delegation4(xdr, open);
break;
@@ -4317,6 +4415,15 @@ static __be32 nfsd4_encode_splice_read(
__be32 nfserr;
/*
+ * Splice read doesn't work if encoding has already wandered
+ * into the XDR buf's page array.
+ */
+ if (unlikely(xdr->buf->page_len)) {
+ WARN_ON_ONCE(1);
+ return nfserr_serverfault;
+ }
+
+ /*
* Make sure there is room at the end of buf->head for
* svcxdr_encode_opaque_pages() to create a tail buffer
* to XDR-pad the payload.
@@ -4365,7 +4472,7 @@ out_err:
static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
- struct file *file, unsigned long maxcount)
+ unsigned long maxcount)
{
struct xdr_stream *xdr = resp->xdr;
unsigned int base = xdr->buf->page_len & ~PAGE_MASK;
@@ -4373,18 +4480,30 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
__be32 zero = xdr_zero;
__be32 nfserr;
- if (xdr_reserve_space_vec(xdr, maxcount) < 0)
- return nfserr_resource;
-
- nfserr = nfsd_iter_read(resp->rqstp, read->rd_fhp, file,
+ nfserr = nfsd_iter_read(resp->rqstp, read->rd_fhp, read->rd_nf,
read->rd_offset, &maxcount, base,
&read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
return nfserr;
+
+ /*
+ * svcxdr_encode_opaque_pages() is not used here because
+ * we don't want to encode subsequent results in this
+ * COMPOUND into the xdr->buf's tail, but rather those
+ * results should follow the NFS READ payload in the
+ * buf's pages.
+ */
+ if (xdr_reserve_space_vec(xdr, maxcount) < 0)
+ return nfserr_resource;
+
+ /*
+ * Mark the buffer location of the NFS READ payload so that
+ * direct placement-capable transports send only the
+ * payload bytes out-of-band.
+ */
if (svc_encode_result_payload(resp->rqstp, starting_len, maxcount))
return nfserr_io;
- xdr_truncate_encode(xdr, starting_len + xdr_align_size(maxcount));
write_bytes_to_xdr_buf(xdr->buf, starting_len + maxcount, &zero,
xdr_pad_size(maxcount));
@@ -4398,25 +4517,23 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_compoundargs *argp = resp->rqstp->rq_argp;
struct nfsd4_read *read = &u->read;
struct xdr_stream *xdr = resp->xdr;
- int starting_len = xdr->buf->len;
bool splice_ok = argp->splice_ok;
+ unsigned int eof_offset;
unsigned long maxcount;
+ __be32 wire_data[2];
struct file *file;
- __be32 *p;
if (nfserr)
return nfserr;
+
+ eof_offset = xdr->buf->len;
file = read->rd_nf->nf_file;
- p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
- if (!p) {
+ /* Reserve space for the eof flag and byte count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 2))) {
WARN_ON_ONCE(splice_ok);
return nfserr_resource;
}
- if (resp->xdr->buf->page_len && splice_ok) {
- WARN_ON_ONCE(1);
- return nfserr_serverfault;
- }
xdr_commit_encode(xdr);
maxcount = min_t(unsigned long, read->rd_length,
@@ -4425,14 +4542,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
if (file->f_op->splice_read && splice_ok)
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
- nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
+ nfserr = nfsd4_encode_readv(resp, read, maxcount);
if (nfserr) {
- xdr_truncate_encode(xdr, starting_len);
+ xdr_truncate_encode(xdr, eof_offset);
return nfserr;
}
- p = xdr_encode_bool(p, read->rd_eof);
- *p = cpu_to_be32(read->rd_length);
+ wire_data[0] = read->rd_eof ? xdr_one : xdr_zero;
+ wire_data[1] = cpu_to_be32(read->rd_length);
+ write_bytes_to_xdr_buf(xdr->buf, eof_offset, &wire_data, XDR_UNIT * 2);
return nfs_ok;
}
@@ -4441,25 +4559,21 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_readlink *readlink = &u->readlink;
- __be32 *p, *maxcount_p, zero = xdr_zero;
+ __be32 *p, wire_count, zero = xdr_zero;
struct xdr_stream *xdr = resp->xdr;
- int length_offset = xdr->buf->len;
+ unsigned int length_offset;
int maxcount, status;
- maxcount_p = xdr_reserve_space(xdr, XDR_UNIT);
- if (!maxcount_p)
+ /* linktext4.count */
+ length_offset = xdr->buf->len;
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
return nfserr_resource;
- maxcount = PAGE_SIZE;
+ /* linktext4.data */
+ maxcount = PAGE_SIZE;
p = xdr_reserve_space(xdr, maxcount);
if (!p)
return nfserr_resource;
- /*
- * XXX: By default, vfs_readlink() will truncate symlinks if they
- * would overflow the buffer. Is this kosher in NFSv4? If not, one
- * easy fix is: if vfs_readlink() precisely fills the buffer, assume
- * that truncation occurred, and return NFS4ERR_RESOURCE.
- */
nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp,
(char *)p, &maxcount);
if (nfserr == nfserr_isdir)
@@ -4472,7 +4586,9 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr,
nfserr = nfserrno(status);
goto out_err;
}
- *maxcount_p = cpu_to_be32(maxcount);
+
+ wire_count = cpu_to_be32(maxcount);
+ write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, XDR_UNIT);
xdr_truncate_encode(xdr, length_offset + 4 + xdr_align_size(maxcount));
write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, &zero,
xdr_pad_size(maxcount));
@@ -4607,14 +4723,42 @@ nfsd4_encode_rpcsec_gss_info(struct xdr_stream *xdr,
}
static __be32
-nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
+nfsd4_encode_secinfo4(struct xdr_stream *xdr, rpc_authflavor_t pf,
+ u32 *supported)
+{
+ struct rpcsec_gss_info info;
+ __be32 status;
+
+ if (rpcauth_get_gssinfo(pf, &info) == 0) {
+ (*supported)++;
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS);
+ if (status != nfs_ok)
+ return status;
+ /* flavor_info */
+ status = nfsd4_encode_rpcsec_gss_info(xdr, &info);
+ if (status != nfs_ok)
+ return status;
+ } else if (pf < RPC_AUTH_MAXFLAVOR) {
+ (*supported)++;
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, pf);
+ if (status != nfs_ok)
+ return status;
+ }
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_SECINFO4resok(struct xdr_stream *xdr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
- static bool report = true;
- __be32 *flavorsp;
- __be32 status;
+ unsigned int count_offset;
+ __be32 status, wire_count;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
@@ -4636,43 +4780,20 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
}
}
- supported = 0;
- flavorsp = xdr_reserve_space(xdr, XDR_UNIT);
- if (!flavorsp)
+ count_offset = xdr->buf->len;
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
return nfserr_resource;
- for (i = 0; i < nflavs; i++) {
- rpc_authflavor_t pf = flavs[i].pseudoflavor;
- struct rpcsec_gss_info info;
-
- if (rpcauth_get_gssinfo(pf, &info) == 0) {
- supported++;
-
- /* flavor */
- status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS);
- if (status != nfs_ok)
- return status;
- /* flavor_info */
- status = nfsd4_encode_rpcsec_gss_info(xdr, &info);
- if (status != nfs_ok)
- return status;
- } else if (pf < RPC_AUTH_MAXFLAVOR) {
- supported++;
-
- /* flavor */
- status = nfsd4_encode_uint32_t(xdr, pf);
- if (status != nfs_ok)
- return status;
- } else {
- if (report)
- pr_warn("NFS: SECINFO: security flavor %u "
- "is not supported\n", pf);
- }
+ for (i = 0, supported = 0; i < nflavs; i++) {
+ status = nfsd4_encode_secinfo4(xdr, flavs[i].pseudoflavor,
+ &supported);
+ if (status != nfs_ok)
+ return status;
}
- if (nflavs != supported)
- report = false;
- *flavorsp = cpu_to_be32(supported);
+ wire_count = cpu_to_be32(supported);
+ write_bytes_to_xdr_buf(xdr->buf, count_offset, &wire_count,
+ XDR_UNIT);
return 0;
}
@@ -4683,7 +4804,7 @@ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo *secinfo = &u->secinfo;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_do_encode_secinfo(xdr, secinfo->si_exp);
+ return nfsd4_encode_SECINFO4resok(xdr, secinfo->si_exp);
}
static __be32
@@ -4693,7 +4814,7 @@ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo_no_name *secinfo = &u->secinfo_no_name;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp);
+ return nfsd4_encode_SECINFO4resok(xdr, secinfo->sin_exp);
}
static __be32
@@ -4964,11 +5085,11 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfserr;
/* Note slotid's are numbered from zero: */
/* sr_highest_slotid */
- nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
+ nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots_response - 1);
if (nfserr != nfs_ok)
return nfserr;
/* sr_target_highest_slotid */
- nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
+ nfserr = nfsd4_encode_slotid4(xdr, seq->target_maxslots - 1);
if (nfserr != nfs_ok)
return nfserr;
/* sr_status_flags */
@@ -5296,17 +5417,20 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
struct file *file = read->rd_nf->nf_file;
struct xdr_stream *xdr = resp->xdr;
bool splice_ok = argp->splice_ok;
+ unsigned int offset_offset;
+ __be32 nfserr, wire_count;
unsigned long maxcount;
- __be32 nfserr, *p;
+ __be64 wire_offset;
- /* Content type, offset, byte count */
- p = xdr_reserve_space(xdr, 4 + 8 + 4);
- if (!p)
+ if (xdr_stream_encode_u32(xdr, NFS4_CONTENT_DATA) != XDR_UNIT)
return nfserr_io;
- if (resp->xdr->buf->page_len && splice_ok) {
- WARN_ON_ONCE(splice_ok);
- return nfserr_serverfault;
- }
+
+ offset_offset = xdr->buf->len;
+
+ /* Reserve space for the byte offset and count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 3)))
+ return nfserr_io;
+ xdr_commit_encode(xdr);
maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
@@ -5314,14 +5438,16 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
if (file->f_op->splice_read && splice_ok)
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
- nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
+ nfserr = nfsd4_encode_readv(resp, read, maxcount);
if (nfserr)
return nfserr;
- *p++ = cpu_to_be32(NFS4_CONTENT_DATA);
- p = xdr_encode_hyper(p, read->rd_offset);
- *p = cpu_to_be32(read->rd_length);
-
+ wire_offset = cpu_to_be64(read->rd_offset);
+ write_bytes_to_xdr_buf(xdr->buf, offset_offset, &wire_offset,
+ XDR_UNIT * 2);
+ wire_count = cpu_to_be32(read->rd_length);
+ write_bytes_to_xdr_buf(xdr->buf, offset_offset + XDR_UNIT * 2,
+ &wire_count, XDR_UNIT);
return nfs_ok;
}
@@ -5332,16 +5458,17 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_read *read = &u->read;
struct file *file = read->rd_nf->nf_file;
struct xdr_stream *xdr = resp->xdr;
- int starting_len = xdr->buf->len;
+ unsigned int eof_offset;
+ __be32 wire_data[2];
u32 segments = 0;
- __be32 *p;
if (nfserr)
return nfserr;
- /* eof flag, segment count */
- p = xdr_reserve_space(xdr, 4 + 4);
- if (!p)
+ eof_offset = xdr->buf->len;
+
+ /* Reserve space for the eof flag and segment count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 2)))
return nfserr_io;
xdr_commit_encode(xdr);
@@ -5351,15 +5478,16 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
nfserr = nfsd4_encode_read_plus_data(resp, read);
if (nfserr) {
- xdr_truncate_encode(xdr, starting_len);
+ xdr_truncate_encode(xdr, eof_offset);
return nfserr;
}
segments++;
out:
- p = xdr_encode_bool(p, read->rd_eof);
- *p = cpu_to_be32(segments);
+ wire_data[0] = read->rd_eof ? xdr_one : xdr_zero;
+ wire_data[1] = cpu_to_be32(segments);
+ write_bytes_to_xdr_buf(xdr->buf, eof_offset, &wire_data, XDR_UNIT * 2);
return nfserr;
}
@@ -5760,15 +5888,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
struct nfs4_stateowner *so = resp->cstate.replay_owner;
struct svc_rqst *rqstp = resp->rqstp;
const struct nfsd4_operation *opdesc = op->opdesc;
- int post_err_offset;
+ unsigned int op_status_offset;
nfsd4_enc encoder;
- __be32 *p;
- p = xdr_reserve_space(xdr, 8);
- if (!p)
+ if (xdr_stream_encode_u32(xdr, op->opnum) != XDR_UNIT)
+ goto release;
+ op_status_offset = xdr->buf->len;
+ if (!xdr_reserve_space(xdr, XDR_UNIT))
goto release;
- *p++ = cpu_to_be32(op->opnum);
- post_err_offset = xdr->buf->len;
if (op->opnum == OP_ILLEGAL)
goto status;
@@ -5809,20 +5936,20 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
* bug if we had to do this on a non-idempotent op:
*/
warn_on_nonidempotent_op(op);
- xdr_truncate_encode(xdr, post_err_offset);
- }
- if (so) {
- int len = xdr->buf->len - post_err_offset;
+ xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT);
+ } else if (so) {
+ int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
so->so_replay.rp_status = op->status;
so->so_replay.rp_buflen = len;
- read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
+ read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
so->so_replay.rp_buf, len);
}
status:
op->status = nfsd4_map_status(op->status,
resp->cstate.minorversion);
- *p = op->status;
+ write_bytes_to_xdr_buf(xdr->buf, op_status_offset,
+ &op->status, XDR_UNIT);
release:
if (opdesc && opdesc->op_release)
opdesc->op_release(&op->u);
diff --git a/fs/nfsd/nfs4xdr_gen.c b/fs/nfsd/nfs4xdr_gen.c
new file mode 100644
index 000000000000..a17b5d8e60b3
--- /dev/null
+++ b/fs/nfsd/nfs4xdr_gen.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+// Generated by xdrgen. Manual edits will be lost.
+// XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x
+// XDR specification modification time: Mon Oct 14 09:10:13 2024
+
+#include <linux/sunrpc/svc.h>
+
+#include "nfs4xdr_gen.h"
+
+static bool __maybe_unused
+xdrgen_decode_int64_t(struct xdr_stream *xdr, int64_t *ptr)
+{
+ return xdrgen_decode_hyper(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_uint32_t(struct xdr_stream *xdr, uint32_t *ptr)
+{
+ return xdrgen_decode_unsigned_int(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_bitmap4(struct xdr_stream *xdr, bitmap4 *ptr)
+{
+ if (xdr_stream_decode_u32(xdr, &ptr->count) < 0)
+ return false;
+ for (u32 i = 0; i < ptr->count; i++)
+ if (!xdrgen_decode_uint32_t(xdr, &ptr->element[i]))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_nfstime4(struct xdr_stream *xdr, struct nfstime4 *ptr)
+{
+ if (!xdrgen_decode_int64_t(xdr, &ptr->seconds))
+ return false;
+ if (!xdrgen_decode_uint32_t(xdr, &ptr->nseconds))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_fattr4_offline(struct xdr_stream *xdr, fattr4_offline *ptr)
+{
+ return xdrgen_decode_bool(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_arguments4(struct xdr_stream *xdr, struct open_arguments4 *ptr)
+{
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_access))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_deny))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_access_want))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_open_claim))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_create_mode))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_access4(struct xdr_stream *xdr, open_args_share_access4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_deny4(struct xdr_stream *xdr, open_args_share_deny4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_access_want4(struct xdr_stream *xdr, open_args_share_access_want4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_open_claim4(struct xdr_stream *xdr, open_args_open_claim4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_createmode4(struct xdr_stream *xdr, open_args_createmode4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+bool
+xdrgen_decode_fattr4_open_arguments(struct xdr_stream *xdr, fattr4_open_arguments *ptr)
+{
+ return xdrgen_decode_open_arguments4(xdr, ptr);
+};
+
+bool
+xdrgen_decode_fattr4_time_deleg_access(struct xdr_stream *xdr, fattr4_time_deleg_access *ptr)
+{
+ return xdrgen_decode_nfstime4(xdr, ptr);
+};
+
+bool
+xdrgen_decode_fattr4_time_deleg_modify(struct xdr_stream *xdr, fattr4_time_deleg_modify *ptr)
+{
+ return xdrgen_decode_nfstime4(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_delegation_type4(struct xdr_stream *xdr, open_delegation_type4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_encode_int64_t(struct xdr_stream *xdr, const int64_t value)
+{
+ return xdrgen_encode_hyper(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_uint32_t(struct xdr_stream *xdr, const uint32_t value)
+{
+ return xdrgen_encode_unsigned_int(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_bitmap4(struct xdr_stream *xdr, const bitmap4 value)
+{
+ if (xdr_stream_encode_u32(xdr, value.count) != XDR_UNIT)
+ return false;
+ for (u32 i = 0; i < value.count; i++)
+ if (!xdrgen_encode_uint32_t(xdr, value.element[i]))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_nfstime4(struct xdr_stream *xdr, const struct nfstime4 *value)
+{
+ if (!xdrgen_encode_int64_t(xdr, value->seconds))
+ return false;
+ if (!xdrgen_encode_uint32_t(xdr, value->nseconds))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_fattr4_offline(struct xdr_stream *xdr, const fattr4_offline value)
+{
+ return xdrgen_encode_bool(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_arguments4(struct xdr_stream *xdr, const struct open_arguments4 *value)
+{
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_access))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_deny))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_access_want))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_open_claim))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_create_mode))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_access4(struct xdr_stream *xdr, open_args_share_access4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_deny4(struct xdr_stream *xdr, open_args_share_deny4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_access_want4(struct xdr_stream *xdr, open_args_share_access_want4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_open_claim4(struct xdr_stream *xdr, open_args_open_claim4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_createmode4(struct xdr_stream *xdr, open_args_createmode4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+bool
+xdrgen_encode_fattr4_open_arguments(struct xdr_stream *xdr, const fattr4_open_arguments *value)
+{
+ return xdrgen_encode_open_arguments4(xdr, value);
+};
+
+bool
+xdrgen_encode_fattr4_time_deleg_access(struct xdr_stream *xdr, const fattr4_time_deleg_access *value)
+{
+ return xdrgen_encode_nfstime4(xdr, value);
+};
+
+bool
+xdrgen_encode_fattr4_time_deleg_modify(struct xdr_stream *xdr, const fattr4_time_deleg_modify *value)
+{
+ return xdrgen_encode_nfstime4(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_delegation_type4(struct xdr_stream *xdr, open_delegation_type4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
diff --git a/fs/nfsd/nfs4xdr_gen.h b/fs/nfsd/nfs4xdr_gen.h
new file mode 100644
index 000000000000..41a0033b7256
--- /dev/null
+++ b/fs/nfsd/nfs4xdr_gen.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generated by xdrgen. Manual edits will be lost. */
+/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */
+/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */
+
+#ifndef _LINUX_XDRGEN_NFS4_1_DECL_H
+#define _LINUX_XDRGEN_NFS4_1_DECL_H
+
+#include <linux/types.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/xdrgen/_defs.h>
+#include <linux/sunrpc/xdrgen/_builtins.h>
+#include <linux/sunrpc/xdrgen/nfs4_1.h>
+
+bool xdrgen_decode_fattr4_open_arguments(struct xdr_stream *xdr, fattr4_open_arguments *ptr);
+bool xdrgen_encode_fattr4_open_arguments(struct xdr_stream *xdr, const fattr4_open_arguments *value);
+
+bool xdrgen_decode_fattr4_time_deleg_access(struct xdr_stream *xdr, fattr4_time_deleg_access *ptr);
+bool xdrgen_encode_fattr4_time_deleg_access(struct xdr_stream *xdr, const fattr4_time_deleg_access *value);
+
+bool xdrgen_decode_fattr4_time_deleg_modify(struct xdr_stream *xdr, fattr4_time_deleg_modify *ptr);
+bool xdrgen_encode_fattr4_time_deleg_modify(struct xdr_stream *xdr, const fattr4_time_deleg_modify *value);
+
+#endif /* _LINUX_XDRGEN_NFS4_1_DECL_H */
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index ba9d326b3de6..ab13ee9c7fd8 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -27,7 +27,7 @@
* cache size, the idea being that when the cache is at its maximum number
* of entries, then this should be the average number of entries per bucket.
*/
-#define TARGET_BUCKET_SIZE 64
+#define TARGET_BUCKET_SIZE 8
struct nfsd_drc_bucket {
struct rb_root rb_head;
@@ -237,10 +237,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
}
-/*
- * Move cache entry to end of LRU list, and queue the cleaner to run if it's
- * not already scheduled.
- */
static void
lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
{
@@ -272,13 +268,6 @@ nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
/* The bucket LRU is ordered oldest-first. */
list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
- /*
- * Don't free entries attached to calls that are still
- * in-progress, but do keep scanning the list.
- */
- if (rp->c_state == RC_INPROG)
- continue;
-
if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
time_before(expiry, rp->c_timestamp))
break;
@@ -453,8 +442,6 @@ out:
nn->longest_chain_cachesize,
atomic_read(&nn->num_drc_entries));
}
-
- lru_put_end(b, ret);
return ret;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 3adbc05ebaac..5ce9a49e76ba 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -48,7 +48,6 @@ enum {
NFSD_Versions,
NFSD_Ports,
NFSD_MaxBlkSize,
- NFSD_MaxConnections,
NFSD_Filecache,
NFSD_Leasetime,
NFSD_Gracetime,
@@ -68,7 +67,6 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size);
static ssize_t write_versions(struct file *file, char *buf, size_t size);
static ssize_t write_ports(struct file *file, char *buf, size_t size);
static ssize_t write_maxblksize(struct file *file, char *buf, size_t size);
-static ssize_t write_maxconn(struct file *file, char *buf, size_t size);
#ifdef CONFIG_NFSD_V4
static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
static ssize_t write_gracetime(struct file *file, char *buf, size_t size);
@@ -87,7 +85,6 @@ static ssize_t (*const write_op[])(struct file *, char *, size_t) = {
[NFSD_Versions] = write_versions,
[NFSD_Ports] = write_ports,
[NFSD_MaxBlkSize] = write_maxblksize,
- [NFSD_MaxConnections] = write_maxconn,
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = write_leasetime,
[NFSD_Gracetime] = write_gracetime,
@@ -902,44 +899,6 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
nfsd_max_blksize);
}
-/*
- * write_maxconn - Set or report the current max number of connections
- *
- * Input:
- * buf: ignored
- * size: zero
- * OR
- *
- * Input:
- * buf: C string containing an unsigned
- * integer value representing the new
- * number of max connections
- * size: non-zero length of C string in @buf
- * Output:
- * On success: passed-in buffer filled with '\n'-terminated C string
- * containing numeric value of max_connections setting
- * for this net namespace;
- * return code is the size in bytes of the string
- * On error: return code is zero or a negative errno value
- */
-static ssize_t write_maxconn(struct file *file, char *buf, size_t size)
-{
- char *mesg = buf;
- struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
- unsigned int maxconn = nn->max_connections;
-
- if (size > 0) {
- int rv = get_uint(&mesg, &maxconn);
-
- if (rv)
- return rv;
- trace_nfsd_ctl_maxconn(netns(file), maxconn);
- nn->max_connections = maxconn;
- }
-
- return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%u\n", maxconn);
-}
-
#ifdef CONFIG_NFSD_V4
static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
time64_t *time, struct nfsd_net *nn)
@@ -1144,89 +1103,48 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
* populating the filesystem.
*/
-/* Basically copying rpc_get_inode. */
static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode = new_inode(sb);
- if (!inode)
- return NULL;
- /* Following advice from simple_fill_super documentation: */
- inode->i_ino = iunique(sb, NFSD_MaxReserved);
- inode->i_mode = mode;
- simple_inode_init_ts(inode);
- switch (mode & S_IFMT) {
- case S_IFDIR:
- inode->i_fop = &simple_dir_operations;
- inode->i_op = &simple_dir_inode_operations;
- inc_nlink(inode);
- break;
- case S_IFLNK:
- inode->i_op = &simple_symlink_inode_operations;
- break;
- default:
- break;
+ if (inode) {
+ /* Following advice from simple_fill_super documentation: */
+ inode->i_ino = iunique(sb, NFSD_MaxReserved);
+ inode->i_mode = mode;
+ simple_inode_init_ts(inode);
}
return inode;
}
-static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
+static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *ncl, char *name)
{
+ struct inode *dir = parent->d_inode;
+ struct dentry *dentry;
struct inode *inode;
- inode = nfsd_get_inode(dir->i_sb, mode);
+ inode = nfsd_get_inode(parent->d_sb, S_IFDIR | 0600);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(inode);
+ return dentry;
+ }
+ inode->i_fop = &simple_dir_operations;
+ inode->i_op = &simple_dir_inode_operations;
+ inc_nlink(inode);
if (ncl) {
inode->i_private = ncl;
kref_get(&ncl->cl_ref);
}
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
- return 0;
-}
-
-static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *ncl, char *name)
-{
- struct inode *dir = parent->d_inode;
- struct dentry *dentry;
- int ret = -ENOMEM;
-
- inode_lock(dir);
- dentry = d_alloc_name(parent, name);
- if (!dentry)
- goto out_err;
- ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
- if (ret)
- goto out_err;
-out:
- inode_unlock(dir);
- return dentry;
-out_err:
- dput(dentry);
- dentry = ERR_PTR(ret);
- goto out;
+ simple_done_creating(dentry);
+ return dentry; // borrowed
}
#if IS_ENABLED(CONFIG_SUNRPC_GSS)
-static int __nfsd_symlink(struct inode *dir, struct dentry *dentry,
- umode_t mode, const char *content)
-{
- struct inode *inode;
-
- inode = nfsd_get_inode(dir->i_sb, mode);
- if (!inode)
- return -ENOMEM;
-
- inode->i_link = (char *)content;
- inode->i_size = strlen(content);
-
- d_add(dentry, inode);
- inc_nlink(dir);
- fsnotify_create(dir, dentry);
- return 0;
-}
-
/*
* @content is assumed to be a NUL-terminated string that lives
* longer than the symlink itself.
@@ -1235,18 +1153,26 @@ static void _nfsd_symlink(struct dentry *parent, const char *name,
const char *content)
{
struct inode *dir = parent->d_inode;
+ struct inode *inode;
struct dentry *dentry;
- int ret;
- inode_lock(dir);
- dentry = d_alloc_name(parent, name);
- if (!dentry)
- goto out;
- ret = __nfsd_symlink(d_inode(parent), dentry, S_IFLNK | 0777, content);
- if (ret)
- dput(dentry);
-out:
- inode_unlock(dir);
+ inode = nfsd_get_inode(dir->i_sb, S_IFLNK | 0777);
+ if (!inode)
+ return;
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(inode);
+ return;
+ }
+
+ inode->i_op = &simple_symlink_inode_operations;
+ inode->i_link = (char *)content;
+ inode->i_size = strlen(content);
+
+ d_make_persistent(dentry, inode);
+ fsnotify_create(dir, dentry);
+ simple_done_creating(dentry);
}
#else
static inline void _nfsd_symlink(struct dentry *parent, const char *name,
@@ -1281,40 +1207,34 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
/* XXX: cut'n'paste from simple_fill_super; figure out if we could share
* code instead. */
-static int nfsdfs_create_files(struct dentry *root,
+static int nfsdfs_create_files(struct dentry *root,
const struct tree_descr *files,
struct nfsdfs_client *ncl,
struct dentry **fdentries)
{
struct inode *dir = d_inode(root);
- struct inode *inode;
struct dentry *dentry;
- int i;
- inode_lock(dir);
- for (i = 0; files->name && files->name[0]; i++, files++) {
- dentry = d_alloc_name(root, files->name);
- if (!dentry)
- goto out;
- inode = nfsd_get_inode(d_inode(root)->i_sb,
- S_IFREG | files->mode);
- if (!inode) {
- dput(dentry);
- goto out;
+ for (int i = 0; files->name && files->name[0]; i++, files++) {
+ struct inode *inode = nfsd_get_inode(root->d_sb,
+ S_IFREG | files->mode);
+ if (!inode)
+ return -ENOMEM;
+ dentry = simple_start_creating(root, files->name);
+ if (IS_ERR(dentry)) {
+ iput(inode);
+ return PTR_ERR(dentry);
}
kref_get(&ncl->cl_ref);
inode->i_fop = files->ops;
inode->i_private = ncl;
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
fsnotify_create(dir, dentry);
if (fdentries)
- fdentries[i] = dentry;
+ fdentries[i] = dentry; // borrowed
+ simple_done_creating(dentry);
}
- inode_unlock(dir);
return 0;
-out:
- inode_unlock(dir);
- return -ENOMEM;
}
/* on success, returns positive number unique to that client. */
@@ -1372,7 +1292,6 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
[NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
- [NFSD_MaxConnections] = {"max_connections", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_Filecache] = {"filecache", &nfsd_file_cache_stats_fops, S_IRUGO},
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
@@ -1427,7 +1346,7 @@ static void nfsd_umount(struct super_block *sb)
nfsd_shutdown_threads(net);
- kill_litter_super(sb);
+ kill_anon_super(sb);
put_net(net);
}
@@ -1478,7 +1397,7 @@ unsigned int nfsd_net_id;
static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
struct netlink_callback *cb,
- struct nfsd_genl_rqstp *rqstp)
+ struct nfsd_genl_rqstp *genl_rqstp)
{
void *hdr;
u32 i;
@@ -1488,22 +1407,22 @@ static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
if (!hdr)
return -ENOBUFS;
- if (nla_put_be32(skb, NFSD_A_RPC_STATUS_XID, rqstp->rq_xid) ||
- nla_put_u32(skb, NFSD_A_RPC_STATUS_FLAGS, rqstp->rq_flags) ||
- nla_put_u32(skb, NFSD_A_RPC_STATUS_PROG, rqstp->rq_prog) ||
- nla_put_u32(skb, NFSD_A_RPC_STATUS_PROC, rqstp->rq_proc) ||
- nla_put_u8(skb, NFSD_A_RPC_STATUS_VERSION, rqstp->rq_vers) ||
+ if (nla_put_be32(skb, NFSD_A_RPC_STATUS_XID, genl_rqstp->rq_xid) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_FLAGS, genl_rqstp->rq_flags) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_PROG, genl_rqstp->rq_prog) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_PROC, genl_rqstp->rq_proc) ||
+ nla_put_u8(skb, NFSD_A_RPC_STATUS_VERSION, genl_rqstp->rq_vers) ||
nla_put_s64(skb, NFSD_A_RPC_STATUS_SERVICE_TIME,
- ktime_to_us(rqstp->rq_stime),
+ ktime_to_us(genl_rqstp->rq_stime),
NFSD_A_RPC_STATUS_PAD))
return -ENOBUFS;
- switch (rqstp->rq_saddr.sa_family) {
+ switch (genl_rqstp->rq_saddr.sa_family) {
case AF_INET: {
const struct sockaddr_in *s_in, *d_in;
- s_in = (const struct sockaddr_in *)&rqstp->rq_saddr;
- d_in = (const struct sockaddr_in *)&rqstp->rq_daddr;
+ s_in = (const struct sockaddr_in *)&genl_rqstp->rq_saddr;
+ d_in = (const struct sockaddr_in *)&genl_rqstp->rq_daddr;
if (nla_put_in_addr(skb, NFSD_A_RPC_STATUS_SADDR4,
s_in->sin_addr.s_addr) ||
nla_put_in_addr(skb, NFSD_A_RPC_STATUS_DADDR4,
@@ -1518,8 +1437,8 @@ static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
case AF_INET6: {
const struct sockaddr_in6 *s_in, *d_in;
- s_in = (const struct sockaddr_in6 *)&rqstp->rq_saddr;
- d_in = (const struct sockaddr_in6 *)&rqstp->rq_daddr;
+ s_in = (const struct sockaddr_in6 *)&genl_rqstp->rq_saddr;
+ d_in = (const struct sockaddr_in6 *)&genl_rqstp->rq_daddr;
if (nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_SADDR6,
&s_in->sin6_addr) ||
nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_DADDR6,
@@ -1533,9 +1452,9 @@ static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
}
}
- for (i = 0; i < rqstp->rq_opcnt; i++)
+ for (i = 0; i < genl_rqstp->rq_opcnt; i++)
if (nla_put_u32(skb, NFSD_A_RPC_STATUS_COMPOUND_OPS,
- rqstp->rq_opnum[i]))
+ genl_rqstp->rq_opnum[i]))
return -ENOBUFS;
genlmsg_end(skb, hdr);
@@ -1611,7 +1530,8 @@ int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
int j;
args = rqstp->rq_argp;
- genl_rqstp.rq_opcnt = args->opcnt;
+ genl_rqstp.rq_opcnt = min_t(u32, args->opcnt,
+ ARRAY_SIZE(genl_rqstp.rq_opnum));
for (j = 0; j < genl_rqstp.rq_opcnt; j++)
genl_rqstp.rq_opnum[j] =
args->ops[j].opnum;
@@ -1653,7 +1573,7 @@ out_unlock:
*/
int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
{
- int *nthreads, count = 0, nrpools, i, ret = -EOPNOTSUPP, rem;
+ int *nthreads, nrpools = 0, i, ret = -EOPNOTSUPP, rem;
struct net *net = genl_info_net(info);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
const struct nlattr *attr;
@@ -1663,14 +1583,12 @@ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
/* count number of SERVER_THREADS values */
- nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
- if (nla_type(attr) == NFSD_A_SERVER_THREADS)
- count++;
- }
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_THREADS, info->nlhdr,
+ GENL_HDRLEN, rem)
+ nrpools++;
mutex_lock(&nfsd_mutex);
- nrpools = max(count, nfsd_nrpools(net));
nthreads = kcalloc(nrpools, sizeof(int), GFP_KERNEL);
if (!nthreads) {
ret = -ENOMEM;
@@ -1678,12 +1596,11 @@ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
}
i = 0;
- nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
- if (nla_type(attr) == NFSD_A_SERVER_THREADS) {
- nthreads[i++] = nla_get_u32(attr);
- if (i >= nrpools)
- break;
- }
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_THREADS, info->nlhdr,
+ GENL_HDRLEN, rem) {
+ nthreads[i++] = nla_get_u32(attr);
+ if (i >= nrpools)
+ break;
}
if (info->attrs[NFSD_A_SERVER_GRACETIME] ||
@@ -1824,14 +1741,12 @@ int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info)
for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
nfsd_minorversion(nn, i, NFSD_CLEAR);
- nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_PROTO_VERSION, info->nlhdr,
+ GENL_HDRLEN, rem) {
struct nlattr *tb[NFSD_A_VERSION_MAX + 1];
u32 major, minor = 0;
bool enabled;
- if (nla_type(attr) != NFSD_A_SERVER_PROTO_VERSION)
- continue;
-
if (nla_parse_nested(tb, NFSD_A_VERSION_MAX, attr,
nfsd_version_nl_policy, info->extack) < 0)
continue;
@@ -1959,6 +1874,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
struct svc_serv *serv;
LIST_HEAD(permsocks);
struct nfsd_net *nn;
+ bool delete = false;
int err, rem;
mutex_lock(&nfsd_mutex);
@@ -1981,14 +1897,12 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
* Walk the list of server_socks from userland and move any that match
* back to sv_permsocks
*/
- nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_SOCK_ADDR, info->nlhdr,
+ GENL_HDRLEN, rem) {
struct nlattr *tb[NFSD_A_SOCK_MAX + 1];
const char *xcl_name;
struct sockaddr *sa;
- if (nla_type(attr) != NFSD_A_SERVER_SOCK_ADDR)
- continue;
-
if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr,
nfsd_sock_nl_policy, info->extack) < 0)
continue;
@@ -2019,45 +1933,37 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
}
}
- /* For now, no removing old sockets while server is running */
- if (serv->sv_nrthreads && !list_empty(&permsocks)) {
+ /*
+ * If there are listener transports remaining on the permsocks list,
+ * it means we were asked to remove a listener.
+ */
+ if (!list_empty(&permsocks)) {
list_splice_init(&permsocks, &serv->sv_permsocks);
- spin_unlock_bh(&serv->sv_lock);
- err = -EBUSY;
- goto out_unlock_mtx;
+ delete = true;
}
+ spin_unlock_bh(&serv->sv_lock);
- /* Close the remaining sockets on the permsocks list */
- while (!list_empty(&permsocks)) {
- xprt = list_first_entry(&permsocks, struct svc_xprt, xpt_list);
- list_move(&xprt->xpt_list, &serv->sv_permsocks);
-
- /*
- * Newly-created sockets are born with the BUSY bit set. Clear
- * it if there are no threads, since nothing can pick it up
- * in that case.
- */
- if (!serv->sv_nrthreads)
- clear_bit(XPT_BUSY, &xprt->xpt_flags);
-
- set_bit(XPT_CLOSE, &xprt->xpt_flags);
- spin_unlock_bh(&serv->sv_lock);
- svc_xprt_close(xprt);
- spin_lock_bh(&serv->sv_lock);
+ /* Do not remove listeners while there are active threads. */
+ if (serv->sv_nrthreads) {
+ err = -EBUSY;
+ goto out_unlock_mtx;
}
- spin_unlock_bh(&serv->sv_lock);
+ /*
+ * Since we can't delete an arbitrary llist entry, destroy the
+ * remaining listeners and recreate the list.
+ */
+ if (delete)
+ svc_xprt_destroy_all(serv, net, false);
/* walk list of addrs again, open any that still don't exist */
- nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_SOCK_ADDR, info->nlhdr,
+ GENL_HDRLEN, rem) {
struct nlattr *tb[NFSD_A_SOCK_MAX + 1];
const char *xcl_name;
struct sockaddr *sa;
int ret;
- if (nla_type(attr) != NFSD_A_SERVER_SOCK_ADDR)
- continue;
-
if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr,
nfsd_sock_nl_policy, info->extack) < 0)
continue;
@@ -2073,6 +1979,9 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
xprt = svc_find_listener(serv, xcl_name, net, sa);
if (xprt) {
+ if (delete)
+ WARN_ONCE(1, "Transport type=%s already exists\n",
+ xcl_name);
svc_xprt_put(xprt);
continue;
}
@@ -2246,8 +2155,14 @@ static __net_init int nfsd_net_init(struct net *net)
NFSD_STATS_COUNTERS_NUM);
if (retval)
goto out_repcache_error;
+
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
nn->nfsd_svcstats.program = &nfsd_programs[0];
+ if (!nfsd_proc_stat_init(net)) {
+ retval = -ENOMEM;
+ goto out_proc_error;
+ }
+
for (i = 0; i < sizeof(nn->nfsd_versions); i++)
nn->nfsd_versions[i] = nfsd_support_version(i);
for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
@@ -2257,12 +2172,14 @@ static __net_init int nfsd_net_init(struct net *net)
nfsd4_init_leases_net(nn);
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
- nfsd_proc_stat_init(net);
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ spin_lock_init(&nn->local_clients_lock);
INIT_LIST_HEAD(&nn->local_clients);
#endif
return 0;
+out_proc_error:
+ percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
out_repcache_error:
nfsd_idmap_shutdown(net);
out_idmap_error:
@@ -2276,14 +2193,15 @@ out_export_error:
* nfsd_net_pre_exit - Disconnect localio clients from net namespace
* @net: a network namespace that is about to be destroyed
*
- * This invalidated ->net pointers held by localio clients
+ * This invalidates ->net pointers held by localio clients
* while they can still safely access nn->counter.
*/
static __net_exit void nfsd_net_pre_exit(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- nfs_uuid_invalidate_clients(&nn->local_clients);
+ nfs_localio_invalidate_clients(&nn->local_clients,
+ &nn->local_clients_lock);
}
#endif
@@ -2316,6 +2234,8 @@ static int __init init_nfsd(void)
{
int retval;
+ nfsd_debugfs_init();
+
retval = nfsd4_init_slabs();
if (retval)
return retval;
@@ -2326,12 +2246,9 @@ static int __init init_nfsd(void)
if (retval)
goto out_free_pnfs;
nfsd_lockd_init(); /* lockd->nfsd callbacks */
- retval = create_proc_exports_entry();
- if (retval)
- goto out_free_lockd;
retval = register_pernet_subsys(&nfsd_net_ops);
if (retval < 0)
- goto out_free_exports;
+ goto out_free_lockd;
retval = register_cld_notifier();
if (retval)
goto out_free_subsys;
@@ -2340,22 +2257,26 @@ static int __init init_nfsd(void)
goto out_free_cld;
retval = register_filesystem(&nfsd_fs_type);
if (retval)
- goto out_free_all;
+ goto out_free_nfsd4;
retval = genl_register_family(&nfsd_nl_family);
if (retval)
+ goto out_free_filesystem;
+ retval = create_proc_exports_entry();
+ if (retval)
goto out_free_all;
nfsd_localio_ops_init();
return 0;
out_free_all:
+ genl_unregister_family(&nfsd_nl_family);
+out_free_filesystem:
+ unregister_filesystem(&nfsd_fs_type);
+out_free_nfsd4:
nfsd4_destroy_laundry_wq();
out_free_cld:
unregister_cld_notifier();
out_free_subsys:
unregister_pernet_subsys(&nfsd_net_ops);
-out_free_exports:
- remove_proc_entry("fs/nfs/exports", NULL);
- remove_proc_entry("fs/nfs", NULL);
out_free_lockd:
nfsd_lockd_shutdown();
nfsd_drc_slab_free();
@@ -2363,22 +2284,24 @@ out_free_pnfs:
nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
+ nfsd_debugfs_exit();
return retval;
}
static void __exit exit_nfsd(void)
{
+ remove_proc_entry("fs/nfs/exports", NULL);
+ remove_proc_entry("fs/nfs", NULL);
genl_unregister_family(&nfsd_nl_family);
unregister_filesystem(&nfsd_fs_type);
nfsd4_destroy_laundry_wq();
unregister_cld_notifier();
unregister_pernet_subsys(&nfsd_net_ops);
nfsd_drc_slab_free();
- remove_proc_entry("fs/nfs/exports", NULL);
- remove_proc_entry("fs/nfs", NULL);
nfsd_lockd_shutdown();
nfsd4_free_slabs();
nfsd4_exit_pnfs();
+ nfsd_debugfs_exit();
}
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 4b56ba1e8e48..e4263326ca4a 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -44,31 +44,21 @@ bool nfsd_support_version(int vers);
#include "stats.h"
/*
- * Maximum blocksizes supported by daemon under various circumstances.
+ * Default and maximum payload size (NFS READ or WRITE), in bytes.
+ * The default is historical, and the maximum is an implementation
+ * limit.
*/
-#define NFSSVC_MAXBLKSIZE RPCSVC_MAXPAYLOAD
-/* NFSv2 is limited by the protocol specification, see RFC 1094 */
-#define NFSSVC_MAXBLKSIZE_V2 (8*1024)
-
-
-/*
- * Largest number of bytes we need to allocate for an NFS
- * call or reply. Used to control buffer sizes. We use
- * the length of v3 WRITE, READDIR and READDIR replies
- * which are an RPC header, up to 26 XDR units of reply
- * data, and some page data.
- *
- * Note that accuracy here doesn't matter too much as the
- * size is rounded up to a page size when allocating space.
- */
-#define NFSD_BUFSIZE ((RPC_MAX_HEADER_WITH_AUTH+26)*XDR_UNIT + NFSSVC_MAXBLKSIZE)
+enum {
+ NFSSVC_DEFBLKSIZE = 1 * 1024 * 1024,
+ NFSSVC_MAXBLKSIZE = RPCSVC_MAXPAYLOAD,
+};
struct readdir_cd {
__be32 err; /* 0, nfserr, or nfserr_eof */
};
/* Maximum number of operations per session compound */
-#define NFSD_MAX_OPS_PER_COMPOUND 50
+#define NFSD_MAX_OPS_PER_COMPOUND 200
struct nfsd_genl_rqstp {
struct sockaddr rq_daddr;
@@ -82,15 +72,12 @@ struct nfsd_genl_rqstp {
/* NFSv4 compound */
u32 rq_opcnt;
- u32 rq_opnum[NFSD_MAX_OPS_PER_COMPOUND];
+ u32 rq_opnum[16];
};
extern struct svc_program nfsd_programs[];
extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
extern struct mutex nfsd_mutex;
-extern spinlock_t nfsd_drc_lock;
-extern unsigned long nfsd_drc_max_mem;
-extern unsigned long nfsd_drc_mem_used;
extern atomic_t nfsd_th_cnt; /* number of available threads */
extern const struct seq_operations nfs_exports_op;
@@ -159,6 +146,26 @@ void nfsd_reset_versions(struct nfsd_net *nn);
int nfsd_create_serv(struct net *net);
void nfsd_destroy_serv(struct net *net);
+#ifdef CONFIG_DEBUG_FS
+void nfsd_debugfs_init(void);
+void nfsd_debugfs_exit(void);
+#else
+static inline void nfsd_debugfs_init(void) {}
+static inline void nfsd_debugfs_exit(void) {}
+#endif
+
+extern bool nfsd_disable_splice_read __read_mostly;
+
+enum {
+ /* Any new NFSD_IO enum value must be added at the end */
+ NFSD_IO_BUFFERED,
+ NFSD_IO_DONTCACHE,
+ NFSD_IO_DIRECT,
+};
+
+extern u64 nfsd_io_cache_read __read_mostly;
+extern u64 nfsd_io_cache_write __read_mostly;
+
extern int nfsd_max_blksize;
static inline int nfsd_v4client(struct svc_rqst *rq)
@@ -286,6 +293,7 @@ void nfsd_lockd_shutdown(void);
#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN)
#define nfserr_locked cpu_to_be32(NFSERR_LOCKED)
#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC)
+#define nfserr_delay cpu_to_be32(NFS4ERR_DELAY)
#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE)
#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT)
#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
@@ -340,14 +348,8 @@ void nfsd_lockd_shutdown(void);
* cannot conflict with any existing be32 nfserr value.
*/
enum {
- NFSERR_DROPIT = NFS4ERR_FIRST_FREE,
-/* if a request fails due to kmalloc failure, it gets dropped.
- * Client should resend eventually
- */
-#define nfserr_dropit cpu_to_be32(NFSERR_DROPIT)
-
/* end-of-file indicator in readdir */
- NFSERR_EOF,
+ NFSERR_EOF = NFS4ERR_FIRST_FREE,
#define nfserr_eof cpu_to_be32(NFSERR_EOF)
/* replay detected */
@@ -396,14 +398,13 @@ enum {
#define NFSD_CB_GETATTR_TIMEOUT NFSD_DELEGRETURN_TIMEOUT
/*
- * The following attributes are currently not supported by the NFSv4 server:
+ * The following attributes are not implemented by NFSD:
* ARCHIVE (deprecated anyway)
* HIDDEN (unlikely to be supported any time soon)
* MIMETYPE (unlikely to be supported any time soon)
* QUOTA_* (will be supported in a forthcoming patch)
* SYSTEM (unlikely to be supported any time soon)
* TIME_BACKUP (unlikely to be supported any time soon)
- * TIME_CREATE (unlikely to be supported any time soon)
*/
#define NFSD4_SUPPORTED_ATTRS_WORD0 \
(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \
@@ -457,8 +458,12 @@ enum {
#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
FATTR4_WORD2_MODE_UMASK | \
+ FATTR4_WORD2_CLONE_BLKSIZE | \
NFSD4_2_SECURITY_ATTRS | \
- FATTR4_WORD2_XATTR_SUPPORT)
+ FATTR4_WORD2_XATTR_SUPPORT | \
+ FATTR4_WORD2_TIME_DELEG_ACCESS | \
+ FATTR4_WORD2_TIME_DELEG_MODIFY | \
+ FATTR4_WORD2_OPEN_ARGUMENTS)
extern const u32 nfsd_suppattrs[3][3];
@@ -528,7 +533,10 @@ static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval)
#endif
#define NFSD_WRITEABLE_ATTRS_WORD2 \
(FATTR4_WORD2_MODE_UMASK \
- | MAYBE_FATTR4_WORD2_SECURITY_LABEL)
+ | MAYBE_FATTR4_WORD2_SECURITY_LABEL \
+ | FATTR4_WORD2_TIME_DELEG_ACCESS \
+ | FATTR4_WORD2_TIME_DELEG_MODIFY \
+ )
#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
NFSD_WRITEABLE_ATTRS_WORD0
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 6a831cb242df..ed85dd43da18 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -172,6 +172,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
if (len == 0)
return error;
if (fh->fh_fsid_type == FSID_MAJOR_MINOR) {
+ u32 *fsid = fh_fsid(fh);
+
/* deprecated, convert to type 3 */
len = key_len(FSID_ENCODE_DEV)/4;
fh->fh_fsid_type = FSID_ENCODE_DEV;
@@ -181,17 +183,17 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
* confuses sparse, so we must use __force here to
* keep it from complaining.
*/
- fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl((__force __be32)fh->fh_fsid[0]),
- ntohl((__force __be32)fh->fh_fsid[1])));
- fh->fh_fsid[1] = fh->fh_fsid[2];
+ fsid[0] = new_encode_dev(MKDEV(ntohl((__force __be32)fsid[0]),
+ ntohl((__force __be32)fsid[1])));
+ fsid[1] = fsid[2];
}
data_left -= len;
if (data_left < 0)
return error;
exp = rqst_exp_find(rqstp ? &rqstp->rq_chandle : NULL,
net, client, gssclient,
- fh->fh_fsid_type, fh->fh_fsid);
- fid = (struct fid *)(fh->fh_fsid + len);
+ fh->fh_fsid_type, fh_fsid(fh));
+ fid = (struct fid *)(fh_fsid(fh) + len);
error = nfserr_stale;
if (IS_ERR(exp)) {
@@ -222,7 +224,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
put_cred(override_creds(new));
- put_cred(new);
} else {
error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
@@ -268,9 +269,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
dentry);
}
- fhp->fh_dentry = dentry;
- fhp->fh_export = exp;
-
switch (fhp->fh_maxsize) {
case NFS4_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
@@ -292,6 +290,9 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
goto out;
}
+ fhp->fh_dentry = dentry;
+ fhp->fh_export = exp;
+
return 0;
out:
exp_put(exp);
@@ -363,10 +364,30 @@ __fh_verify(struct svc_rqst *rqstp,
if (error)
goto out;
+ /*
+ * If rqstp is NULL, this is a LOCALIO request which will only
+ * ever use a filehandle/credential pair for which access has
+ * been affirmed (by ACCESS or OPEN NFS requests) over the
+ * wire. Skip both the xprtsec policy and the security flavor
+ * checks.
+ */
+ if (!rqstp)
+ goto check_permissions;
+
if ((access & NFSD_MAY_NLM) && (exp->ex_flags & NFSEXP_NOAUTHNLM))
/* NLM is allowed to fully bypass authentication */
goto out;
+ /*
+ * NLM is allowed to bypass the xprtsec policy check because lockd
+ * doesn't support xprtsec.
+ */
+ if (!(access & NFSD_MAY_NLM)) {
+ error = check_xprtsec_policy(exp, rqstp);
+ if (error)
+ goto out;
+ }
+
if (access & NFSD_MAY_BYPASS_GSS)
may_bypass_gss = true;
/*
@@ -378,10 +399,13 @@ __fh_verify(struct svc_rqst *rqstp,
&& exp->ex_path.dentry == dentry)
may_bypass_gss = true;
- error = check_nfsd_access(exp, rqstp, may_bypass_gss);
+ error = check_security_flavor(exp, rqstp, may_bypass_gss);
if (error)
goto out;
+ svc_xprt_set_valid(rqstp->rq_xprt);
+
+check_permissions:
/* Finally, check access permissions. */
error = nfsd_permission(cred, exp, dentry, access);
out:
@@ -461,7 +485,7 @@ static void _fh_update(struct svc_fh *fhp, struct svc_export *exp,
{
if (dentry != exp->ex_path.dentry) {
struct fid *fid = (struct fid *)
- (fhp->fh_handle.fh_fsid + fhp->fh_handle.fh_size/4 - 1);
+ (fh_fsid(&fhp->fh_handle) + fhp->fh_handle.fh_size/4 - 1);
int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
int fh_flags = (exp->ex_flags & NFSEXP_NOSUBTREECHECK) ? 0 :
EXPORT_FH_CONNECTABLE;
@@ -612,7 +636,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
fhp->fh_handle.fh_auth_type = 0;
mk_fsid(fhp->fh_handle.fh_fsid_type,
- fhp->fh_handle.fh_fsid,
+ fh_fsid(&fhp->fh_handle),
ex_dev,
d_inode(exp->ex_path.dentry)->i_ino,
exp->ex_fsid, exp->ex_uuid);
@@ -659,6 +683,33 @@ out_negative:
}
/**
+ * fh_getattr - Retrieve attributes on a local file
+ * @fhp: File handle of target file
+ * @stat: Caller-supplied kstat buffer to be filled in
+ *
+ * Returns nfs_ok on success, otherwise an NFS status code is
+ * returned.
+ */
+__be32 fh_getattr(const struct svc_fh *fhp, struct kstat *stat)
+{
+ struct path p = {
+ .mnt = fhp->fh_export->ex_path.mnt,
+ .dentry = fhp->fh_dentry,
+ };
+ struct inode *inode = d_inode(p.dentry);
+ u32 request_mask = STATX_BASIC_STATS;
+
+ if (S_ISREG(inode->i_mode))
+ request_mask |= (STATX_DIOALIGN | STATX_DIO_READ_ALIGN);
+
+ if (fhp->fh_maxsize == NFS4_FHSIZE)
+ request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
+
+ return nfserrno(vfs_getattr(&p, stat, request_mask,
+ AT_STATX_SYNC_AS_STAT));
+}
+
+/**
* fh_fill_pre_attrs - Fill in pre-op attributes
* @fhp: file handle to be updated
*
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 876152a91f12..5ef7191f8ad8 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -14,6 +14,8 @@
#include <linux/exportfs.h>
#include <linux/nfs4.h>
+#include "export.h"
+
/*
* The file handle starts with a sequence of four-byte words.
* The first word contains a version number (1) and three descriptor bytes
@@ -49,18 +51,19 @@ struct knfsd_fh {
* Points to the current size while
* building a new file handle.
*/
- union {
- char fh_raw[NFS4_FHSIZE];
- struct {
- u8 fh_version; /* == 1 */
- u8 fh_auth_type; /* deprecated */
- u8 fh_fsid_type;
- u8 fh_fileid_type;
- u32 fh_fsid[]; /* flexible-array member */
- };
- };
+ u8 fh_raw[NFS4_FHSIZE];
};
+#define fh_version fh_raw[0]
+#define fh_auth_type fh_raw[1]
+#define fh_fsid_type fh_raw[2]
+#define fh_fileid_type fh_raw[3]
+
+static inline u32 *fh_fsid(const struct knfsd_fh *fh)
+{
+ return (u32 *)&fh->fh_raw[4];
+}
+
static inline __u32 ino_t_to_u32(ino_t ino)
{
return (__u32) ino;
@@ -219,6 +222,7 @@ extern char * SVCFH_fmt(struct svc_fh *fhp);
__be32 fh_verify(struct svc_rqst *, struct svc_fh *, umode_t, int);
__be32 fh_verify_local(struct net *, struct svc_cred *, struct auth_domain *,
struct svc_fh *, umode_t, int);
+__be32 fh_getattr(const struct svc_fh *fhp, struct kstat *stat);
__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
__be32 fh_update(struct svc_fh *);
void fh_put(struct svc_fh *);
@@ -260,14 +264,51 @@ static inline bool fh_match(const struct knfsd_fh *fh1,
static inline bool fh_fsid_match(const struct knfsd_fh *fh1,
const struct knfsd_fh *fh2)
{
+ u32 *fsid1 = fh_fsid(fh1);
+ u32 *fsid2 = fh_fsid(fh2);
+
if (fh1->fh_fsid_type != fh2->fh_fsid_type)
return false;
- if (memcmp(fh1->fh_fsid, fh2->fh_fsid, key_len(fh1->fh_fsid_type)) != 0)
+ if (memcmp(fsid1, fsid2, key_len(fh1->fh_fsid_type)) != 0)
return false;
return true;
}
-#ifdef CONFIG_CRC32
+/**
+ * fh_want_write - Get write access to an export
+ * @fhp: File handle of file to be written
+ *
+ * Caller must invoke fh_drop_write() when its write operation
+ * is complete.
+ *
+ * Returns 0 if the file handle's export can be written to. Otherwise
+ * the export is not prepared for updates, and the returned negative
+ * errno value reflects the reason for the failure.
+ */
+static inline int fh_want_write(struct svc_fh *fhp)
+{
+ int ret;
+
+ if (fhp->fh_want_write)
+ return 0;
+ ret = mnt_want_write(fhp->fh_export->ex_path.mnt);
+ if (!ret)
+ fhp->fh_want_write = true;
+ return ret;
+}
+
+/**
+ * fh_drop_write - Release write access on an export
+ * @fhp: File handle of file on which fh_want_write() was previously called
+ */
+static inline void fh_drop_write(struct svc_fh *fhp)
+{
+ if (fhp->fh_want_write) {
+ fhp->fh_want_write = false;
+ mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ }
+}
+
/**
* knfsd_fh_hash - calculate the crc32 hash for the filehandle
* @fh - pointer to filehandle
@@ -279,12 +320,6 @@ static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
{
return ~crc32_le(0xFFFFFFFF, fh->fh_raw, fh->fh_size);
}
-#else
-static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
-{
- return 0;
-}
-#endif
/**
* fh_clear_pre_post_attrs - Reset pre/post attributes
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 6dda081eb24c..481e789a7697 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -10,6 +10,7 @@
#include "cache.h"
#include "xdr.h"
#include "vfs.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -54,7 +55,7 @@ nfsd_proc_getattr(struct svc_rqst *rqstp)
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
- dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
+ trace_nfsd_vfs_getattr(rqstp, &argp->fh);
fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0,
@@ -211,7 +212,7 @@ nfsd_proc_read(struct svc_rqst *rqstp)
SVCFH_fmt(&argp->fh),
argp->count, argp->offset);
- argp->count = min_t(u32, argp->count, NFSSVC_MAXBLKSIZE_V2);
+ argp->count = min_t(u32, argp->count, NFS_MAXDATA);
argp->count = min_t(u32, argp->count, rqstp->rq_res.buflen);
resp->pages = rqstp->rq_next_page;
@@ -250,17 +251,14 @@ nfsd_proc_write(struct svc_rqst *rqstp)
struct nfsd_writeargs *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
unsigned long cnt = argp->len;
- unsigned int nvecs;
dprintk("nfsd: WRITE %s %u bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->len, argp->offset);
- nvecs = svc_fill_write_vector(rqstp, &argp->payload);
-
- resp->status = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
- argp->offset, rqstp->rq_vec, nvecs,
- &cnt, NFS_DATA_SYNC, NULL);
+ fh_copy(&resp->fh, &argp->fh);
+ resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
+ &argp->payload, &cnt, NFS_DATA_SYNC, NULL);
if (resp->status == nfs_ok)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
@@ -292,9 +290,6 @@ nfsd_proc_create(struct svc_rqst *rqstp)
int hosterr;
dev_t rdev = 0, wanted = new_decode_dev(attr->ia_size);
- dprintk("nfsd: CREATE %s %.*s\n",
- SVCFH_fmt(dirfhp), argp->len, argp->name);
-
/* First verify the parent file handle */
resp->status = fh_verify(rqstp, dirfhp, S_IFDIR, NFSD_MAY_EXEC);
if (resp->status != nfs_ok)
@@ -311,17 +306,16 @@ nfsd_proc_create(struct svc_rqst *rqstp)
goto done;
}
- inode_lock_nested(dirfhp->fh_dentry->d_inode, I_MUTEX_PARENT);
- dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len);
+ dchild = start_creating(&nop_mnt_idmap, dirfhp->fh_dentry,
+ &QSTR_LEN(argp->name, argp->len));
if (IS_ERR(dchild)) {
resp->status = nfserrno(PTR_ERR(dchild));
- goto out_unlock;
+ goto out_write;
}
fh_init(newfhp, NFS_FHSIZE);
resp->status = fh_compose(newfhp, dirfhp->fh_export, dchild, dirfhp);
if (!resp->status && d_really_is_negative(dchild))
resp->status = nfserr_noent;
- dput(dchild);
if (resp->status) {
if (resp->status != nfserr_noent)
goto out_unlock;
@@ -331,7 +325,7 @@ nfsd_proc_create(struct svc_rqst *rqstp)
*/
resp->status = nfserr_acces;
if (!newfhp->fh_dentry) {
- printk(KERN_WARNING
+ printk(KERN_WARNING
"nfsd_proc_create: file handle not verified\n");
goto out_unlock;
}
@@ -413,6 +407,9 @@ nfsd_proc_create(struct svc_rqst *rqstp)
/* File doesn't exist. Create it and set attrs */
resp->status = nfsd_create_locked(rqstp, dirfhp, &attrs, type,
rdev, newfhp);
+ /* nfsd_create_locked() unlocked the parent */
+ dput(dchild);
+ goto out_write;
} else if (type == S_IFREG) {
dprintk("nfsd: existing %s, valid=%x, size=%ld\n",
argp->name, attr->ia_valid, (long) attr->ia_size);
@@ -427,7 +424,8 @@ nfsd_proc_create(struct svc_rqst *rqstp)
}
out_unlock:
- inode_unlock(dirfhp->fh_dentry->d_inode);
+ end_creating(dchild);
+out_write:
fh_drop_write(dirfhp);
done:
fh_put(dirfhp);
@@ -445,9 +443,6 @@ nfsd_proc_remove(struct svc_rqst *rqstp)
struct nfsd_diropargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
- dprintk("nfsd: REMOVE %s %.*s\n", SVCFH_fmt(&argp->fh),
- argp->len, argp->name);
-
/* Unlink. -SIFDIR means file must not be a directory */
resp->status = nfsd_unlink(rqstp, &argp->fh, -S_IFDIR,
argp->name, argp->len);
@@ -462,11 +457,6 @@ nfsd_proc_rename(struct svc_rqst *rqstp)
struct nfsd_renameargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
- dprintk("nfsd: RENAME %s %.*s -> \n",
- SVCFH_fmt(&argp->ffh), argp->flen, argp->fname);
- dprintk("nfsd: -> %s %.*s\n",
- SVCFH_fmt(&argp->tfh), argp->tlen, argp->tname);
-
resp->status = nfsd_rename(rqstp, &argp->ffh, argp->fname, argp->flen,
&argp->tfh, argp->tname, argp->tlen);
fh_put(&argp->ffh);
@@ -481,13 +471,6 @@ nfsd_proc_link(struct svc_rqst *rqstp)
struct nfsd_linkargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
- dprintk("nfsd: LINK %s ->\n",
- SVCFH_fmt(&argp->ffh));
- dprintk("nfsd: %s %.*s\n",
- SVCFH_fmt(&argp->tfh),
- argp->tlen,
- argp->tname);
-
resp->status = nfsd_link(rqstp, &argp->tfh, argp->tname, argp->tlen,
&argp->ffh);
fh_put(&argp->ffh);
@@ -519,10 +502,6 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
goto out;
}
- dprintk("nfsd: SYMLINK %s %.*s -> %.*s\n",
- SVCFH_fmt(&argp->ffh), argp->flen, argp->fname,
- argp->tlen, argp->tname);
-
fh_init(&newfh, NFS_FHSIZE);
resp->status = nfsd_symlink(rqstp, &argp->ffh, argp->fname, argp->flen,
argp->tname, &attrs, &newfh);
@@ -548,8 +527,6 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp)
.na_iattr = &argp->attrs,
};
- dprintk("nfsd: MKDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
-
if (resp->fh.fh_dentry) {
printk(KERN_WARNING
"nfsd_proc_mkdir: response already verified??\n");
@@ -578,8 +555,6 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
struct nfsd_diropargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
- dprintk("nfsd: RMDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
-
resp->status = nfsd_unlink(rqstp, &argp->fh, S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
@@ -602,7 +577,7 @@ static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
buf->pages = rqstp->rq_next_page;
rqstp->rq_next_page++;
- xdr_init_encode_pages(xdr, buf, buf->pages, NULL);
+ xdr_init_encode_pages(xdr, buf);
}
/*
@@ -615,9 +590,7 @@ nfsd_proc_readdir(struct svc_rqst *rqstp)
struct nfsd_readdirres *resp = rqstp->rq_resp;
loff_t offset;
- dprintk("nfsd: READDIR %s %d bytes at %d\n",
- SVCFH_fmt(&argp->fh),
- argp->count, argp->cookie);
+ trace_nfsd_vfs_readdir(rqstp, &argp->fh, argp->count, argp->cookie);
nfsd_init_dirlist_pages(rqstp, resp, argp->count);
@@ -642,8 +615,6 @@ nfsd_proc_statfs(struct svc_rqst *rqstp)
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_statfsres *resp = rqstp->rq_resp;
- dprintk("nfsd: STATFS %s\n", SVCFH_fmt(&argp->fh));
-
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats,
NFSD_MAY_BYPASS_GSS_ON_ROOT);
fh_put(&argp->fh);
@@ -739,7 +710,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_argzero = sizeof(struct nfsd_readargs),
.pc_ressize = sizeof(struct nfsd_readres),
.pc_cachetype = RC_NOCACHE,
- .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+ .pc_xdrressize = ST+AT+1+NFS_MAXDATA/4,
.pc_name = "READ",
},
[NFSPROC_WRITECACHE] = {
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 49e2f32102ab..b08ae85d53ef 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -70,16 +70,6 @@ static __be32 nfsd_init_request(struct svc_rqst *,
*/
DEFINE_MUTEX(nfsd_mutex);
-/*
- * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
- * nfsd_drc_max_pages limits the total amount of memory available for
- * version 4.1 DRC caches.
- * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
- */
-DEFINE_SPINLOCK(nfsd_drc_lock);
-unsigned long nfsd_drc_max_mem;
-unsigned long nfsd_drc_mem_used;
-
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
static const struct svc_version *localio_versions[] = {
[1] = &localio_version1,
@@ -214,32 +204,32 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
return 0;
}
-bool nfsd_serv_try_get(struct net *net) __must_hold(rcu)
+bool nfsd_net_try_get(struct net *net) __must_hold(rcu)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- return (nn && percpu_ref_tryget_live(&nn->nfsd_serv_ref));
+ return (nn && percpu_ref_tryget_live(&nn->nfsd_net_ref));
}
-void nfsd_serv_put(struct net *net) __must_hold(rcu)
+void nfsd_net_put(struct net *net) __must_hold(rcu)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- percpu_ref_put(&nn->nfsd_serv_ref);
+ percpu_ref_put(&nn->nfsd_net_ref);
}
-static void nfsd_serv_done(struct percpu_ref *ref)
+static void nfsd_net_done(struct percpu_ref *ref)
{
- struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref);
- complete(&nn->nfsd_serv_confirm_done);
+ complete(&nn->nfsd_net_confirm_done);
}
-static void nfsd_serv_free(struct percpu_ref *ref)
+static void nfsd_net_free(struct percpu_ref *ref)
{
- struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref);
- complete(&nn->nfsd_serv_free_done);
+ complete(&nn->nfsd_net_free_done);
}
/*
@@ -259,27 +249,6 @@ int nfsd_nrthreads(struct net *net)
return rv;
}
-static int nfsd_init_socks(struct net *net, const struct cred *cred)
-{
- int error;
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
- if (!list_empty(&nn->nfsd_serv->sv_permsocks))
- return 0;
-
- error = svc_xprt_create(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT,
- SVC_SOCK_DEFAULTS, cred);
- if (error < 0)
- return error;
-
- error = svc_xprt_create(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT,
- SVC_SOCK_DEFAULTS, cred);
- if (error < 0)
- return error;
-
- return 0;
-}
-
static int nfsd_users = 0;
static int nfsd_startup_generic(void)
@@ -387,9 +356,12 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
ret = nfsd_startup_generic();
if (ret)
return ret;
- ret = nfsd_init_socks(net, cred);
- if (ret)
+
+ if (list_empty(&nn->nfsd_serv->sv_permsocks)) {
+ pr_warn("NFSD: Failed to start, no listeners configured.\n");
+ ret = -EIO;
goto out_socks;
+ }
if (nfsd_needs_lockd(nn) && !nn->lockd_up) {
ret = lockd_up(net, cred);
@@ -406,13 +378,13 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
if (ret)
goto out_filecache;
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+ nfsd4_ssc_init_umount_work(nn);
+#endif
ret = nfs4_state_start_net(net);
if (ret)
goto out_reply_cache;
-#ifdef CONFIG_NFSD_V4_2_INTER_SSC
- nfsd4_ssc_init_umount_work(nn);
-#endif
nn->nfsd_net_up = true;
return 0;
@@ -436,6 +408,10 @@ static void nfsd_shutdown_net(struct net *net)
if (!nn->nfsd_net_up)
return;
+
+ percpu_ref_kill_and_confirm(&nn->nfsd_net_ref, nfsd_net_done);
+ wait_for_completion(&nn->nfsd_net_confirm_done);
+
nfsd_export_flush(net);
nfs4_state_shutdown_net(net);
nfsd_reply_cache_shutdown(nn);
@@ -444,7 +420,10 @@ static void nfsd_shutdown_net(struct net *net)
lockd_down(net);
nn->lockd_up = false;
}
- percpu_ref_exit(&nn->nfsd_serv_ref);
+
+ wait_for_completion(&nn->nfsd_net_free_done);
+ percpu_ref_exit(&nn->nfsd_net_ref);
+
nn->nfsd_net_up = false;
nfsd_shutdown_generic();
}
@@ -526,11 +505,6 @@ void nfsd_destroy_serv(struct net *net)
lockdep_assert_held(&nfsd_mutex);
- percpu_ref_kill_and_confirm(&nn->nfsd_serv_ref, nfsd_serv_done);
- wait_for_completion(&nn->nfsd_serv_confirm_done);
- wait_for_completion(&nn->nfsd_serv_free_done);
- /* percpu_ref_exit is called in nfsd_shutdown_net */
-
spin_lock(&nfsd_notifier_lock);
nn->nfsd_serv = NULL;
spin_unlock(&nfsd_notifier_lock);
@@ -543,16 +517,13 @@ void nfsd_destroy_serv(struct net *net)
#endif
}
- svc_xprt_destroy_all(serv, net);
-
/*
* write_ports can create the server without actually starting
- * any threads--if we get shut down before any threads are
+ * any threads. If we get shut down before any threads are
* started, then nfsd_destroy_serv will be run before any of this
* other initialization has been done except the rpcb information.
*/
- svc_rpcb_cleanup(serv, net);
-
+ svc_xprt_destroy_all(serv, net, true);
nfsd_shutdown_net(net);
svc_destroy(&serv);
}
@@ -575,27 +546,6 @@ void nfsd_reset_versions(struct nfsd_net *nn)
}
}
-/*
- * Each session guarantees a negotiated per slot memory cache for replies
- * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
- * NFSv4.1 server might want to use more memory for a DRC than a machine
- * with mutiple services.
- *
- * Impose a hard limit on the number of pages for the DRC which varies
- * according to the machines free pages. This is of course only a default.
- *
- * For now this is a #defined shift which could be under admin control
- * in the future.
- */
-static void set_max_drc(void)
-{
- #define NFSD_DRC_SIZE_SHIFT 7
- nfsd_drc_max_mem = (nr_free_buffer_pages()
- >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
- nfsd_drc_mem_used = 0;
- dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem);
-}
-
static int nfsd_get_default_max_blksize(void)
{
struct sysinfo i;
@@ -611,7 +561,7 @@ static int nfsd_get_default_max_blksize(void)
*/
target >>= 12;
- ret = NFSSVC_MAXBLKSIZE;
+ ret = NFSSVC_DEFBLKSIZE;
while (ret > target && ret >= 8*1024*2)
ret /= 2;
return ret;
@@ -652,12 +602,12 @@ int nfsd_create_serv(struct net *net)
if (nn->nfsd_serv)
return 0;
- error = percpu_ref_init(&nn->nfsd_serv_ref, nfsd_serv_free,
+ error = percpu_ref_init(&nn->nfsd_net_ref, nfsd_net_free,
0, GFP_KERNEL);
if (error)
return error;
- init_completion(&nn->nfsd_serv_free_done);
- init_completion(&nn->nfsd_serv_confirm_done);
+ init_completion(&nn->nfsd_net_free_done);
+ init_completion(&nn->nfsd_net_confirm_done);
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
@@ -668,7 +618,6 @@ int nfsd_create_serv(struct net *net)
if (serv == NULL)
return -ENOMEM;
- serv->sv_maxconn = nn->max_connections;
error = svc_bind(serv, net);
if (error < 0) {
svc_destroy(&serv);
@@ -678,7 +627,6 @@ int nfsd_create_serv(struct net *net)
nn->nfsd_serv = serv;
spin_unlock(&nfsd_notifier_lock);
- set_max_drc();
/* check if the notifier is already set */
if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
register_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -954,11 +902,7 @@ nfsd(void *vrqstp)
* The main request loop
*/
while (!svc_thread_should_stop(rqstp)) {
- /* Update sv_maxconn if it has changed */
- rqstp->rq_server->sv_maxconn = nn->max_connections;
-
svc_recv(rqstp);
-
nfsd_file_net_dispose(nn);
}
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 5777f40c7353..fc262ceafca9 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -336,7 +336,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
/* opaque data */
if (xdr_stream_decode_u32(xdr, &args->len) < 0)
return false;
- if (args->len > NFSSVC_MAXBLKSIZE_V2)
+ if (args->len > NFS_MAXDATA)
return false;
return xdr_stream_subsegment(xdr, &args->payload, args->len);
@@ -540,7 +540,7 @@ nfssvc_encode_statfsres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
p = xdr_reserve_space(xdr, XDR_UNIT * 5);
if (!p)
return false;
- *p++ = cpu_to_be32(NFSSVC_MAXBLKSIZE_V2);
+ *p++ = cpu_to_be32(NFS_MAXDATA);
*p++ = cpu_to_be32(stat->f_bsize);
*p++ = cpu_to_be32(stat->f_blocks);
*p++ = cpu_to_be32(stat->f_bfree);
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
index 925817f66917..db9af780438b 100644
--- a/fs/nfsd/pnfs.h
+++ b/fs/nfsd/pnfs.h
@@ -29,12 +29,13 @@ struct nfsd4_layout_ops {
__be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
const struct nfsd4_getdeviceinfo *gdevp);
- __be32 (*proc_layoutget)(struct inode *, const struct svc_fh *fhp,
- struct nfsd4_layoutget *lgp);
+ __be32 (*proc_layoutget)(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *lgp);
__be32 (*encode_layoutget)(struct xdr_stream *xdr,
const struct nfsd4_layoutget *lgp);
__be32 (*proc_layoutcommit)(struct inode *inode,
+ struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp);
void (*fence_client)(struct nfs4_layout_stateid *ls,
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index e16bb3717fb9..b052c1effdc5 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -35,6 +35,7 @@
#ifndef _NFSD4_STATE_H
#define _NFSD4_STATE_H
+#include <crypto/md5.h>
#include <linux/idr.h>
#include <linux/refcount.h>
#include <linux/sunrpc/svc_xprt.h>
@@ -64,15 +65,36 @@ typedef struct {
refcount_t cs_count;
} copy_stateid_t;
+struct nfsd4_referring_call {
+ struct list_head __list;
+
+ u32 rc_sequenceid;
+ u32 rc_slotid;
+};
+
+struct nfsd4_referring_call_list {
+ struct list_head __list;
+
+ struct nfs4_sessionid rcl_sessionid;
+ int __nr_referring_calls;
+ struct list_head rcl_referring_calls;
+};
+
struct nfsd4_callback {
struct nfs4_client *cb_clp;
struct rpc_message cb_msg;
+#define NFSD4_CALLBACK_RUNNING (0)
+#define NFSD4_CALLBACK_WAKE (1)
+#define NFSD4_CALLBACK_REQUEUE (2)
+ unsigned long cb_flags;
const struct nfsd4_callback_ops *cb_ops;
struct work_struct cb_work;
int cb_seq_status;
int cb_status;
int cb_held_slot;
- bool cb_need_restart;
+
+ int cb_nr_referring_call_list;
+ struct list_head cb_referring_call_list;
};
struct nfsd4_callback_ops {
@@ -159,16 +181,14 @@ struct nfs4_cb_fattr {
/* from CB_GETATTR reply */
u64 ncf_cb_change;
u64 ncf_cb_fsize;
+ struct timespec64 ncf_cb_mtime;
+ struct timespec64 ncf_cb_atime;
- unsigned long ncf_cb_flags;
bool ncf_file_modified;
u64 ncf_initial_cinfo;
u64 ncf_cur_fsize;
};
-/* bits for ncf_cb_flags */
-#define CB_GETATTR_BUSY 0
-
/*
* Represents a delegation stateid. The nfs4_client holds references to these
* and they are put when it is being destroyed or when the delegation is
@@ -196,17 +216,43 @@ struct nfs4_delegation {
struct list_head dl_perclnt;
struct list_head dl_recall_lru; /* delegation recalled */
struct nfs4_clnt_odstate *dl_clnt_odstate;
- u32 dl_type;
time64_t dl_time;
-/* For recall: */
+ u32 dl_type;
+ /* For recall: */
int dl_retries;
struct nfsd4_callback dl_recall;
bool dl_recalled;
+ bool dl_written;
+ bool dl_setattr;
/* for CB_GETATTR */
struct nfs4_cb_fattr dl_cb_fattr;
+
+ /* For delegated timestamps */
+ struct timespec64 dl_atime;
+ struct timespec64 dl_mtime;
+ struct timespec64 dl_ctime;
};
+static inline bool deleg_is_read(u32 dl_type)
+{
+ return (dl_type == OPEN_DELEGATE_READ || dl_type == OPEN_DELEGATE_READ_ATTRS_DELEG);
+}
+
+static inline bool deleg_is_write(u32 dl_type)
+{
+ return (dl_type == OPEN_DELEGATE_WRITE || dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG);
+}
+
+static inline bool deleg_attrs_deleg(u32 dl_type)
+{
+ return dl_type == OPEN_DELEGATE_READ_ATTRS_DELEG ||
+ dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG;
+}
+
+bool nfsd4_vet_deleg_time(struct timespec64 *cb, const struct timespec64 *orig,
+ const struct timespec64 *now);
+
#define cb_to_delegation(cb) \
container_of(cb, struct nfs4_delegation, dl_recall)
@@ -227,8 +273,11 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
return container_of(s, struct nfs4_delegation, dl_stid);
}
-/* Maximum number of slots per session. 160 is useful for long haul TCP */
-#define NFSD_MAX_SLOTS_PER_SESSION 160
+/* Maximum number of slots per session. This is for sanity-check only.
+ * It could be increased if we had a mechanism to shutdown misbehaving clients.
+ * A large number can be needed to get good throughput on high-latency servers.
+ */
+#define NFSD_MAX_SLOTS_PER_SESSION 2048
/* Maximum session per slot cache size */
#define NFSD_SLOT_CACHE_SIZE 2048
/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */
@@ -240,12 +289,15 @@ struct nfsd4_slot {
u32 sl_seqid;
__be32 sl_status;
struct svc_cred sl_cred;
+ u32 sl_index;
u32 sl_datalen;
u16 sl_opcnt;
+ u16 sl_generation;
#define NFSD4_SLOT_INUSE (1 << 0)
#define NFSD4_SLOT_CACHETHIS (1 << 1)
#define NFSD4_SLOT_INITIALIZED (1 << 2)
#define NFSD4_SLOT_CACHED (1 << 3)
+#define NFSD4_SLOT_REUSED (1 << 4)
u8 sl_flags;
char sl_data[];
};
@@ -318,16 +370,19 @@ struct nfsd4_session {
u32 se_cb_slot_avail; /* bitmap of available slots */
u32 se_cb_highest_slot; /* highest slot client wants */
u32 se_cb_prog;
- bool se_dead;
struct list_head se_hash; /* hash by sessionid */
struct list_head se_perclnt;
+ struct list_head se_all_sessions;/* global list of sessions */
struct nfs4_client *se_client;
struct nfs4_sessionid se_sessionid;
struct nfsd4_channel_attrs se_fchannel;
struct nfsd4_cb_sec se_cb_sec;
struct list_head se_conns;
u32 se_cb_seq_nr[NFSD_BC_SLOT_TABLE_SIZE];
- struct nfsd4_slot *se_slots[]; /* forward channel slots */
+ struct xarray se_slots; /* forward channel slots */
+ u16 se_slot_gen;
+ bool se_dead;
+ u32 se_target_maxslots;
};
/* formatted contents of nfs4_sessionid */
@@ -337,7 +392,8 @@ struct nfsd4_sessionid {
u32 reserved;
};
-#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
+/* Length of MD5 digest as hex, plus terminating '\0' */
+#define HEXDIR_LEN (2 * MD5_DIGEST_SIZE + 1)
/*
* State Meaning Where set
@@ -426,7 +482,6 @@ struct nfs4_client {
#define NFSD4_CLIENT_UPCALL_LOCK (5) /* upcall serialization */
#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
1 << NFSD4_CLIENT_CB_KILL)
-#define NFSD4_CLIENT_CB_RECALL_ANY (6)
unsigned long cl_flags;
struct workqueue_struct *cl_callback_wq;
@@ -472,7 +527,6 @@ struct nfs4_client {
struct nfsd4_cb_recall_any *cl_ra;
time64_t cl_ra_time;
- struct list_head cl_ra_cblist;
};
/* struct nfs4_client_reset
@@ -505,7 +559,7 @@ struct nfs4_replay {
unsigned int rp_buflen;
char *rp_buf;
struct knfsd_fh rp_openfh;
- atomic_t rp_locked;
+ int rp_locked;
char rp_ibuf[NFSD4_REPLAY_ISIZE];
};
@@ -623,6 +677,7 @@ struct nfs4_file {
atomic_t fi_access[2];
u32 fi_share_deny;
struct nfsd_file *fi_deleg_file;
+ struct nfsd_file *fi_rdeleg_file;
int fi_delegees;
struct knfsd_fh fi_fhandle;
bool fi_had_conflict;
@@ -751,9 +806,20 @@ extern __be32 nfs4_check_open_reclaim(struct nfs4_client *);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
+extern void nfsd41_cb_referring_call(struct nfsd4_callback *cb,
+ struct nfs4_sessionid *sessionid,
+ u32 slotid, u32 seqno);
+extern void nfsd41_cb_destroy_referring_call_list(struct nfsd4_callback *cb);
extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
+
+static inline void nfsd4_try_run_cb(struct nfsd4_callback *cb)
+{
+ if (!test_and_set_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags))
+ WARN_ON_ONCE(!nfsd4_run_cb(cb));
+}
+
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfsd4_shutdown_copy(struct nfs4_client *clp);
void nfsd4_async_copy_reaper(struct nfsd_net *nn);
@@ -801,4 +867,9 @@ static inline bool try_to_expire_client(struct nfs4_client *clp)
extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
struct dentry *dentry, struct nfs4_delegation **pdp);
+
+struct nfsd4_get_dir_delegation;
+struct nfs4_delegation *nfsd_get_dir_deleg(struct nfsd4_compound_state *cstate,
+ struct nfsd4_get_dir_delegation *gdd,
+ struct nfsd_file *nf);
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index bb22893f1157..f7eaf95e20fc 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -73,11 +73,11 @@ static int nfsd_show(struct seq_file *seq, void *v)
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
-void nfsd_proc_stat_init(struct net *net)
+struct proc_dir_entry *nfsd_proc_stat_init(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
+ return svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
}
void nfsd_proc_stat_shutdown(struct net *net)
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index 04aacb6c36e2..e4efb0e4e56d 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -10,7 +10,7 @@
#include <uapi/linux/nfsd/stats.h>
#include <linux/percpu_counter.h>
-void nfsd_proc_stat_init(struct net *net);
+struct proc_dir_entry *nfsd_proc_stat_init(struct net *net);
void nfsd_proc_stat_shutdown(struct net *net);
static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 696c89f68a9e..5ae2a611e57f 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -11,6 +11,7 @@
#include <linux/tracepoint.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/xprt.h>
+#include <trace/misc/fs.h>
#include <trace/misc/nfs.h>
#include <trace/misc/sunrpc.h>
@@ -18,22 +19,40 @@
#include "nfsfh.h"
#include "xdr4.h"
-#define NFSD_TRACE_PROC_RES_FIELDS \
+#define NFSD_TRACE_PROC_CALL_FIELDS(r) \
+ __field(unsigned int, netns_ino) \
+ __field(u32, xid) \
+ __sockaddr(server, (r)->rq_xprt->xpt_locallen) \
+ __sockaddr(client, (r)->rq_xprt->xpt_remotelen)
+
+#define NFSD_TRACE_PROC_CALL_ASSIGNMENTS(r) \
+ do { \
+ struct svc_xprt *xprt = (r)->rq_xprt; \
+ __entry->netns_ino = SVC_NET(r)->ns.inum; \
+ __entry->xid = be32_to_cpu((r)->rq_xid); \
+ __assign_sockaddr(server, &xprt->xpt_local, \
+ xprt->xpt_locallen); \
+ __assign_sockaddr(client, &xprt->xpt_remote, \
+ xprt->xpt_remotelen); \
+ } while (0)
+
+#define NFSD_TRACE_PROC_RES_FIELDS(r) \
__field(unsigned int, netns_ino) \
__field(u32, xid) \
__field(unsigned long, status) \
- __array(unsigned char, server, sizeof(struct sockaddr_in6)) \
- __array(unsigned char, client, sizeof(struct sockaddr_in6))
+ __sockaddr(server, (r)->rq_xprt->xpt_locallen) \
+ __sockaddr(client, (r)->rq_xprt->xpt_remotelen)
-#define NFSD_TRACE_PROC_RES_ASSIGNMENTS(error) \
+#define NFSD_TRACE_PROC_RES_ASSIGNMENTS(r, error) \
do { \
- __entry->netns_ino = SVC_NET(rqstp)->ns.inum; \
- __entry->xid = be32_to_cpu(rqstp->rq_xid); \
+ struct svc_xprt *xprt = (r)->rq_xprt; \
+ __entry->netns_ino = SVC_NET(r)->ns.inum; \
+ __entry->xid = be32_to_cpu((r)->rq_xid); \
__entry->status = be32_to_cpu(error); \
- memcpy(__entry->server, &rqstp->rq_xprt->xpt_local, \
- rqstp->rq_xprt->xpt_locallen); \
- memcpy(__entry->client, &rqstp->rq_xprt->xpt_remote, \
- rqstp->rq_xprt->xpt_remotelen); \
+ __assign_sockaddr(server, &xprt->xpt_local, \
+ xprt->xpt_locallen); \
+ __assign_sockaddr(client, &xprt->xpt_remote, \
+ xprt->xpt_remotelen); \
} while (0);
DECLARE_EVENT_CLASS(nfsd_xdr_err_class,
@@ -145,14 +164,14 @@ TRACE_EVENT(nfsd_compound_decode_err,
),
TP_ARGS(rqstp, args_opcnt, resp_opcnt, opnum, status),
TP_STRUCT__entry(
- NFSD_TRACE_PROC_RES_FIELDS
+ NFSD_TRACE_PROC_RES_FIELDS(rqstp)
__field(u32, args_opcnt)
__field(u32, resp_opcnt)
__field(u32, opnum)
),
TP_fast_assign(
- NFSD_TRACE_PROC_RES_ASSIGNMENTS(status)
+ NFSD_TRACE_PROC_RES_ASSIGNMENTS(rqstp, status)
__entry->args_opcnt = args_opcnt;
__entry->resp_opcnt = resp_opcnt;
@@ -171,12 +190,12 @@ DECLARE_EVENT_CLASS(nfsd_compound_err_class,
),
TP_ARGS(rqstp, opnum, status),
TP_STRUCT__entry(
- NFSD_TRACE_PROC_RES_FIELDS
+ NFSD_TRACE_PROC_RES_FIELDS(rqstp)
__field(u32, opnum)
),
TP_fast_assign(
- NFSD_TRACE_PROC_RES_ASSIGNMENTS(status)
+ NFSD_TRACE_PROC_RES_ASSIGNMENTS(rqstp, status)
__entry->opnum = opnum;
),
@@ -325,7 +344,7 @@ TRACE_EVENT(nfsd_exp_find_key,
int status),
TP_ARGS(key, status),
TP_STRUCT__entry(
- __field(int, fsidtype)
+ __field(u8, fsidtype)
__array(u32, fsid, 6)
__string(auth_domain, key->ek_client->name)
__field(int, status)
@@ -348,7 +367,7 @@ TRACE_EVENT(nfsd_expkey_update,
TP_PROTO(const struct svc_expkey *key, const char *exp_path),
TP_ARGS(key, exp_path),
TP_STRUCT__entry(
- __field(int, fsidtype)
+ __field(u8, fsidtype)
__array(u32, fsid, 6)
__string(auth_domain, key->ek_client->name)
__string(path, exp_path)
@@ -445,12 +464,17 @@ DEFINE_EVENT(nfsd_io_class, nfsd_##name, \
DEFINE_NFSD_IO_EVENT(read_start);
DEFINE_NFSD_IO_EVENT(read_splice);
DEFINE_NFSD_IO_EVENT(read_vector);
+DEFINE_NFSD_IO_EVENT(read_direct);
DEFINE_NFSD_IO_EVENT(read_io_done);
DEFINE_NFSD_IO_EVENT(read_done);
DEFINE_NFSD_IO_EVENT(write_start);
DEFINE_NFSD_IO_EVENT(write_opened);
+DEFINE_NFSD_IO_EVENT(write_direct);
+DEFINE_NFSD_IO_EVENT(write_vector);
DEFINE_NFSD_IO_EVENT(write_io_done);
DEFINE_NFSD_IO_EVENT(write_done);
+DEFINE_NFSD_IO_EVENT(commit_start);
+DEFINE_NFSD_IO_EVENT(commit_done);
DECLARE_EVENT_CLASS(nfsd_err_class,
TP_PROTO(struct svc_rqst *rqstp,
@@ -626,7 +650,6 @@ DEFINE_STATEID_EVENT(open);
DEFINE_STATEID_EVENT(deleg_read);
DEFINE_STATEID_EVENT(deleg_write);
DEFINE_STATEID_EVENT(deleg_return);
-DEFINE_STATEID_EVENT(deleg_recall);
DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
TP_PROTO(u32 seqid, const stateid_t *stp),
@@ -804,6 +827,14 @@ DEFINE_EVENT(nfsd_cs_slot_class, nfsd_##name, \
DEFINE_CS_SLOT_EVENT(slot_seqid_conf);
DEFINE_CS_SLOT_EVENT(slot_seqid_unconf);
+#define show_nfs_slot_flags(val) \
+ __print_flags(val, "|", \
+ { NFSD4_SLOT_INUSE, "INUSE" }, \
+ { NFSD4_SLOT_CACHETHIS, "CACHETHIS" }, \
+ { NFSD4_SLOT_INITIALIZED, "INITIALIZED" }, \
+ { NFSD4_SLOT_CACHED, "CACHED" }, \
+ { NFSD4_SLOT_REUSED, "REUSED" })
+
TRACE_EVENT(nfsd_slot_seqid_sequence,
TP_PROTO(
const struct nfs4_client *clp,
@@ -814,10 +845,11 @@ TRACE_EVENT(nfsd_slot_seqid_sequence,
TP_STRUCT__entry(
__field(u32, seqid)
__field(u32, slot_seqid)
+ __field(u32, slot_index)
+ __field(unsigned long, slot_flags)
__field(u32, cl_boot)
__field(u32, cl_id)
__sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
- __field(bool, in_use)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
@@ -826,11 +858,13 @@ TRACE_EVENT(nfsd_slot_seqid_sequence,
clp->cl_cb_conn.cb_addrlen);
__entry->seqid = seq->seqid;
__entry->slot_seqid = slot->sl_seqid;
+ __entry->slot_index = seq->slotid;
+ __entry->slot_flags = slot->sl_flags;
),
- TP_printk("addr=%pISpc client %08x:%08x seqid=%u slot_seqid=%u (%sin use)",
+ TP_printk("addr=%pISpc client %08x:%08x idx=%u seqid=%u slot_seqid=%u flags=%s",
__get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
- __entry->seqid, __entry->slot_seqid,
- __entry->in_use ? "" : "not "
+ __entry->slot_index, __entry->seqid, __entry->slot_seqid,
+ show_nfs_slot_flags(__entry->slot_flags)
)
);
@@ -1040,6 +1074,7 @@ DEFINE_CLID_EVENT(confirmed_r);
{ 1 << NFSD_FILE_HASHED, "HASHED" }, \
{ 1 << NFSD_FILE_PENDING, "PENDING" }, \
{ 1 << NFSD_FILE_REFERENCED, "REFERENCED" }, \
+ { 1 << NFSD_FILE_RECENT, "RECENT" }, \
{ 1 << NFSD_FILE_GC, "GC" })
DECLARE_EVENT_CLASS(nfsd_file_class,
@@ -1076,7 +1111,6 @@ DEFINE_NFSD_FILE_EVENT(nfsd_file_free);
DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash);
DEFINE_NFSD_FILE_EVENT(nfsd_file_put);
DEFINE_NFSD_FILE_EVENT(nfsd_file_closing);
-DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_queue);
TRACE_EVENT(nfsd_file_alloc,
TP_PROTO(
@@ -1102,6 +1136,33 @@ TRACE_EVENT(nfsd_file_alloc,
)
);
+TRACE_EVENT(nfsd_file_get_dio_attrs,
+ TP_PROTO(
+ const struct inode *inode,
+ const struct kstat *stat
+ ),
+ TP_ARGS(inode, stat),
+ TP_STRUCT__entry(
+ __field(const void *, inode)
+ __field(unsigned long, mask)
+ __field(u32, mem_align)
+ __field(u32, offset_align)
+ __field(u32, read_offset_align)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->mask = stat->result_mask;
+ __entry->mem_align = stat->dio_mem_align;
+ __entry->offset_align = stat->dio_offset_align;
+ __entry->read_offset_align = stat->dio_read_offset_align;
+ ),
+ TP_printk("inode=%p flags=%s mem_align=%u offset_align=%u read_offset_align=%u",
+ __entry->inode, show_statx_mask(__entry->mask),
+ __entry->mem_align, __entry->offset_align,
+ __entry->read_offset_align
+ )
+);
+
TRACE_EVENT(nfsd_file_acquire,
TP_PROTO(
const struct svc_rqst *rqstp,
@@ -1312,12 +1373,11 @@ DEFINE_EVENT(nfsd_file_gc_class, name, \
TP_ARGS(nf))
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add);
-DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add_disposed);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del);
-DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_aged);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_disposed);
DECLARE_EVENT_CLASS(nfsd_file_lruwalk_class,
@@ -1603,7 +1663,7 @@ DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
__entry->cl_id = clp->cl_clientid.cl_id;
__entry->cb = cb;
__entry->opcode = cb->cb_ops ? cb->cb_ops->opcode : _CB_NULL;
- __entry->need_restart = cb->cb_need_restart;
+ __entry->need_restart = test_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
__assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
clp->cl_cb_conn.cb_addrlen)
),
@@ -2069,25 +2129,6 @@ TRACE_EVENT(nfsd_ctl_maxblksize,
)
);
-TRACE_EVENT(nfsd_ctl_maxconn,
- TP_PROTO(
- const struct net *net,
- int maxconn
- ),
- TP_ARGS(net, maxconn),
- TP_STRUCT__entry(
- __field(unsigned int, netns_ino)
- __field(int, maxconn)
- ),
- TP_fast_assign(
- __entry->netns_ino = net->ns.inum;
- __entry->maxconn = maxconn;
- ),
- TP_printk("maxconn=%d",
- __entry->maxconn
- )
-);
-
TRACE_EVENT(nfsd_ctl_time,
TP_PROTO(
const struct net *net,
@@ -2322,6 +2363,297 @@ DEFINE_EVENT(nfsd_copy_async_done_class, \
DEFINE_COPY_ASYNC_DONE_EVENT(done);
DEFINE_COPY_ASYNC_DONE_EVENT(cancel);
+TRACE_EVENT(nfsd_vfs_setattr,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ const struct iattr *iap,
+ const struct timespec64 *guardtime
+ ),
+ TP_ARGS(rqstp, fhp, iap, guardtime),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, fh_hash)
+ __field(s64, gtime_tv_sec)
+ __field(u32, gtime_tv_nsec)
+ __field(unsigned int, ia_valid)
+ __field(loff_t, ia_size)
+ __field(uid_t, ia_uid)
+ __field(gid_t, ia_gid)
+ __field(umode_t, ia_mode)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __entry->gtime_tv_sec = guardtime ? guardtime->tv_sec : 0;
+ __entry->gtime_tv_nsec = guardtime ? guardtime->tv_nsec : 0;
+ __entry->ia_valid = iap->ia_valid;
+ __entry->ia_size = iap->ia_size;
+ __entry->ia_uid = __kuid_val(iap->ia_uid);
+ __entry->ia_gid = __kgid_val(iap->ia_gid);
+ __entry->ia_mode = iap->ia_mode;
+ ),
+ TP_printk(
+ "xid=0x%08x fh_hash=0x%08x ia_valid=%s ia_size=%llu ia_mode=0%o ia_uid=%u ia_gid=%u guard_time=%lld.%u",
+ __entry->xid, __entry->fh_hash, show_ia_valid_flags(__entry->ia_valid),
+ __entry->ia_size, __entry->ia_mode, __entry->ia_uid, __entry->ia_gid,
+ __entry->gtime_tv_sec, __entry->gtime_tv_nsec
+ )
+)
+
+TRACE_EVENT(nfsd_vfs_lookup,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ const char *name,
+ unsigned int len
+ ),
+ TP_ARGS(rqstp, fhp, name, len),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, fh_hash)
+ __string_len(name, name, len)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __assign_str(name);
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x name=%s",
+ __entry->xid, __entry->fh_hash, __get_str(name)
+ )
+);
+
+TRACE_EVENT(nfsd_vfs_create,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ umode_t type,
+ const char *name,
+ unsigned int len
+ ),
+ TP_ARGS(rqstp, fhp, type, name, len),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, fh_hash)
+ __field(umode_t, type)
+ __string_len(name, name, len)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __entry->type = type;
+ __assign_str(name);
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x type=%s name=%s",
+ __entry->xid, __entry->fh_hash,
+ show_fs_file_type(__entry->type), __get_str(name)
+ )
+);
+
+TRACE_EVENT(nfsd_vfs_symlink,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ const char *name,
+ unsigned int namelen,
+ const char *target
+ ),
+ TP_ARGS(rqstp, fhp, name, namelen, target),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, fh_hash)
+ __string_len(name, name, namelen)
+ __string(target, target)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __assign_str(name);
+ __assign_str(target);
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x name=%s target=%s",
+ __entry->xid, __entry->fh_hash,
+ __get_str(name), __get_str(target)
+ )
+);
+
+TRACE_EVENT(nfsd_vfs_link,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *sfhp,
+ const struct svc_fh *tfhp,
+ const char *name,
+ unsigned int namelen
+ ),
+ TP_ARGS(rqstp, sfhp, tfhp, name, namelen),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, sfh_hash)
+ __field(u32, tfh_hash)
+ __string_len(name, name, namelen)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->sfh_hash = knfsd_fh_hash(&sfhp->fh_handle);
+ __entry->tfh_hash = knfsd_fh_hash(&tfhp->fh_handle);
+ __assign_str(name);
+ ),
+ TP_printk("xid=0x%08x src_fh=0x%08x tgt_fh=0x%08x name=%s",
+ __entry->xid, __entry->sfh_hash, __entry->tfh_hash,
+ __get_str(name)
+ )
+);
+
+TRACE_EVENT(nfsd_vfs_unlink,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ const char *name,
+ unsigned int len
+ ),
+ TP_ARGS(rqstp, fhp, name, len),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, fh_hash)
+ __string_len(name, name, len)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __assign_str(name);
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x name=%s",
+ __entry->xid, __entry->fh_hash,
+ __get_str(name)
+ )
+);
+
+TRACE_EVENT(nfsd_vfs_rename,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *sfhp,
+ const struct svc_fh *tfhp,
+ const char *source,
+ unsigned int sourcelen,
+ const char *target,
+ unsigned int targetlen
+ ),
+ TP_ARGS(rqstp, sfhp, tfhp, source, sourcelen, target, targetlen),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, sfh_hash)
+ __field(u32, tfh_hash)
+ __string_len(source, source, sourcelen)
+ __string_len(target, target, targetlen)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->sfh_hash = knfsd_fh_hash(&sfhp->fh_handle);
+ __entry->tfh_hash = knfsd_fh_hash(&tfhp->fh_handle);
+ __assign_str(source);
+ __assign_str(target);
+ ),
+ TP_printk("xid=0x%08x sfh_hash=0x%08x tfh_hash=0x%08x source=%s target=%s",
+ __entry->xid, __entry->sfh_hash, __entry->tfh_hash,
+ __get_str(source), __get_str(target)
+ )
+);
+
+TRACE_EVENT(nfsd_vfs_readdir,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ u32 count,
+ u64 offset
+ ),
+ TP_ARGS(rqstp, fhp, count, offset),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, fh_hash)
+ __field(u32, count)
+ __field(u64, offset)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __entry->count = count;
+ __entry->offset = offset;
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu count=%u",
+ __entry->xid, __entry->fh_hash,
+ __entry->offset, __entry->count
+ )
+);
+
+DECLARE_EVENT_CLASS(nfsd_vfs_getattr_class,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp
+ ),
+ TP_ARGS(rqstp, fhp),
+ TP_STRUCT__entry(
+ NFSD_TRACE_PROC_CALL_FIELDS(rqstp)
+ __field(u32, fh_hash)
+ ),
+ TP_fast_assign(
+ NFSD_TRACE_PROC_CALL_ASSIGNMENTS(rqstp);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x",
+ __entry->xid, __entry->fh_hash
+ )
+);
+
+#define DEFINE_NFSD_VFS_GETATTR_EVENT(__name) \
+DEFINE_EVENT(nfsd_vfs_getattr_class, __name, \
+ TP_PROTO( \
+ const struct svc_rqst *rqstp, \
+ const struct svc_fh *fhp \
+ ), \
+ TP_ARGS(rqstp, fhp))
+
+DEFINE_NFSD_VFS_GETATTR_EVENT(nfsd_vfs_getattr);
+DEFINE_NFSD_VFS_GETATTR_EVENT(nfsd_vfs_statfs);
+
+DECLARE_EVENT_CLASS(nfsd_pnfs_class,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const char *dev,
+ int error
+ ),
+ TP_ARGS(clp, dev, error),
+ TP_STRUCT__entry(
+ __sockaddr(addr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
+ __string(dev, dev)
+ __field(int, error)
+ ),
+ TP_fast_assign(
+ __assign_sockaddr(addr, &clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ __entry->netns_ino = clp->net->ns.inum;
+ __assign_str(dev);
+ __entry->error = error;
+ ),
+ TP_printk("client=%pISpc nn=%d dev=%s error=%d",
+ __get_sockaddr(addr),
+ __entry->netns_ino,
+ __get_str(dev),
+ __entry->error
+ )
+);
+
+#define DEFINE_NFSD_PNFS_ERR_EVENT(name) \
+DEFINE_EVENT(nfsd_pnfs_class, nfsd_pnfs_##name, \
+ TP_PROTO( \
+ const struct nfs4_client *clp, \
+ const char *dev, \
+ int error \
+ ), \
+ TP_ARGS(clp, dev, error))
+
+DEFINE_NFSD_PNFS_ERR_EVENT(fence);
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 29cb7b812d71..964cf922ad83 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -31,6 +31,7 @@
#include <linux/exportfs.h>
#include <linux/writeback.h>
#include <linux/security.h>
+#include <linux/sunrpc/xdr.h>
#include "xdr3.h"
@@ -47,6 +48,10 @@
#define NFSDDBG_FACILITY NFSDDBG_FILEOP
+bool nfsd_disable_splice_read __read_mostly;
+u64 nfsd_io_cache_read __read_mostly = NFSD_IO_BUFFERED;
+u64 nfsd_io_cache_write __read_mostly = NFSD_IO_BUFFERED;
+
/**
* nfserrno - Map Linux errnos to NFS errnos
* @errno: POSIX(-ish) error code to be mapped
@@ -71,7 +76,6 @@ nfserrno (int errno)
{ nfserr_acces, -EACCES },
{ nfserr_exist, -EEXIST },
{ nfserr_xdev, -EXDEV },
- { nfserr_mlink, -EMLINK },
{ nfserr_nodev, -ENODEV },
{ nfserr_notdir, -ENOTDIR },
{ nfserr_isdir, -EISDIR },
@@ -245,7 +249,7 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct dentry *dentry;
int host_err;
- dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name);
+ trace_nfsd_vfs_lookup(rqstp, fhp, name, len);
dparent = fhp->fh_dentry;
exp = exp_get(fhp->fh_export);
@@ -265,7 +269,8 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out_nfserr;
}
} else {
- dentry = lookup_one_len_unlocked(name, dparent, len);
+ dentry = lookup_one_unlocked(&nop_mnt_idmap,
+ &QSTR_LEN(name, len), dparent);
host_err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_nfserr;
@@ -464,10 +469,18 @@ static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap)
return 0;
}
- if (!iap->ia_valid)
+ if ((iap->ia_valid & ~ATTR_DELEG) == 0)
return 0;
- iap->ia_valid |= ATTR_CTIME;
+ /*
+ * If ATTR_DELEG is set, then this is an update from a client that
+ * holds a delegation. If this is an update for only the atime, the
+ * ctime should not be changed. If the update contains the mtime
+ * too, then ATTR_CTIME should already be set.
+ */
+ if (!(iap->ia_valid & ATTR_DELEG))
+ iap->ia_valid |= ATTR_CTIME;
+
return notify_change(&nop_mnt_idmap, dentry, iap, NULL);
}
@@ -500,6 +513,8 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
bool size_change = (iap->ia_valid & ATTR_SIZE);
int retries;
+ trace_nfsd_vfs_setattr(rqstp, fhp, iap, guardtime);
+
if (iap->ia_valid & ATTR_SIZE) {
accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
ftype = S_IFREG;
@@ -923,7 +938,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
* directories, but we never have and it doesn't seem to have
* caused anyone a problem. If we were to change this, note
* also that our filldir callbacks would need a variant of
- * lookup_one_len that doesn't check permissions.
+ * lookup_one_positive_unlocked() that doesn't check permissions.
*/
if (type == S_IFREG)
may_flags |= NFSD_MAY_OWNER_OVERRIDE;
@@ -944,15 +959,16 @@ retry:
/**
* nfsd_open_verified - Open a regular file for the filecache
* @fhp: NFS filehandle of the file to open
+ * @type: S_IFMT inode type allowed (0 means any type is allowed)
* @may_flags: internal permission flags
* @filp: OUT: open "struct file *"
*
* Returns zero on success, or a negative errno value.
*/
int
-nfsd_open_verified(struct svc_fh *fhp, int may_flags, struct file **filp)
+nfsd_open_verified(struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp)
{
- return __nfsd_open(fhp, S_IFREG, may_flags, filp);
+ return __nfsd_open(fhp, type, may_flags, filp);
}
/*
@@ -1059,11 +1075,88 @@ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
}
+/*
+ * The byte range of the client's READ request is expanded on both ends
+ * until it meets the underlying file system's direct I/O alignment
+ * requirements. After the internal read is complete, the byte range of
+ * the NFS READ payload is reduced to the byte range that was originally
+ * requested.
+ *
+ * Note that a direct read can be done only when the xdr_buf containing
+ * the NFS READ reply does not already have contents in its .pages array.
+ * This is due to potentially restrictive alignment requirements on the
+ * read buffer. When .page_len and @base are zero, the .pages array is
+ * guaranteed to be page-aligned.
+ */
+static noinline_for_stack __be32
+nfsd_direct_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfsd_file *nf, loff_t offset, unsigned long *count,
+ u32 *eof)
+{
+ u64 dio_start, dio_end;
+ unsigned long v, total;
+ struct iov_iter iter;
+ struct kiocb kiocb;
+ ssize_t host_err;
+ size_t len;
+
+ init_sync_kiocb(&kiocb, nf->nf_file);
+ kiocb.ki_flags |= IOCB_DIRECT;
+
+ /* Read a properly-aligned region of bytes into rq_bvec */
+ dio_start = round_down(offset, nf->nf_dio_read_offset_align);
+ dio_end = round_up((u64)offset + *count, nf->nf_dio_read_offset_align);
+
+ kiocb.ki_pos = dio_start;
+
+ v = 0;
+ total = dio_end - dio_start;
+ while (total && v < rqstp->rq_maxpages &&
+ rqstp->rq_next_page < rqstp->rq_page_end) {
+ len = min_t(size_t, total, PAGE_SIZE);
+ bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page,
+ len, 0);
+
+ total -= len;
+ ++rqstp->rq_next_page;
+ ++v;
+ }
+
+ trace_nfsd_read_direct(rqstp, fhp, offset, *count - total);
+ iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v,
+ dio_end - dio_start - total);
+
+ host_err = vfs_iocb_iter_read(nf->nf_file, &kiocb, &iter);
+ if (host_err >= 0) {
+ unsigned int pad = offset - dio_start;
+
+ /* The returned payload starts after the pad */
+ rqstp->rq_res.page_base = pad;
+
+ /* Compute the count of bytes to be returned */
+ if (host_err > pad + *count)
+ host_err = *count;
+ else if (host_err > pad)
+ host_err -= pad;
+ else
+ host_err = 0;
+ } else if (unlikely(host_err == -EINVAL)) {
+ struct inode *inode = d_inode(fhp->fh_dentry);
+
+ pr_info_ratelimited("nfsd: Direct I/O alignment failure on %s/%ld\n",
+ inode->i_sb->s_id, inode->i_ino);
+ host_err = -ESERVERFAULT;
+ }
+
+ return nfsd_finish_read(rqstp, fhp, nf->nf_file, offset, count,
+ eof, host_err);
+}
+
/**
* nfsd_iter_read - Perform a VFS read using an iterator
* @rqstp: RPC transaction context
* @fhp: file handle of file to be read
- * @file: opened struct file of file to be read
+ * @nf: opened struct nfsd_file of file to be read
* @offset: starting byte offset
* @count: IN: requested number of bytes; OUT: number of bytes read
* @base: offset in first page of read buffer
@@ -1076,30 +1169,52 @@ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
* returned.
*/
__be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct file *file, loff_t offset, unsigned long *count,
+ struct nfsd_file *nf, loff_t offset, unsigned long *count,
unsigned int base, u32 *eof)
{
+ struct file *file = nf->nf_file;
unsigned long v, total;
struct iov_iter iter;
- loff_t ppos = offset;
- struct page *page;
+ struct kiocb kiocb;
ssize_t host_err;
+ size_t len;
+
+ init_sync_kiocb(&kiocb, file);
+
+ switch (nfsd_io_cache_read) {
+ case NFSD_IO_BUFFERED:
+ break;
+ case NFSD_IO_DIRECT:
+ /* When dio_read_offset_align is zero, dio is not supported */
+ if (nf->nf_dio_read_offset_align && !rqstp->rq_res.page_len)
+ return nfsd_direct_read(rqstp, fhp, nf, offset,
+ count, eof);
+ fallthrough;
+ case NFSD_IO_DONTCACHE:
+ if (file->f_op->fop_flags & FOP_DONTCACHE)
+ kiocb.ki_flags = IOCB_DONTCACHE;
+ break;
+ }
+
+ kiocb.ki_pos = offset;
v = 0;
total = *count;
- while (total) {
- page = *(rqstp->rq_next_page++);
- rqstp->rq_vec[v].iov_base = page_address(page) + base;
- rqstp->rq_vec[v].iov_len = min_t(size_t, total, PAGE_SIZE - base);
- total -= rqstp->rq_vec[v].iov_len;
+ while (total && v < rqstp->rq_maxpages &&
+ rqstp->rq_next_page < rqstp->rq_page_end) {
+ len = min_t(size_t, total, PAGE_SIZE - base);
+ bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page,
+ len, base);
+
+ total -= len;
+ ++rqstp->rq_next_page;
++v;
base = 0;
}
- WARN_ON_ONCE(v > ARRAY_SIZE(rqstp->rq_vec));
- trace_nfsd_read_vector(rqstp, fhp, offset, *count);
- iov_iter_kvec(&iter, ITER_DEST, rqstp->rq_vec, v, *count);
- host_err = vfs_iter_read(file, &iter, &ppos, 0);
+ trace_nfsd_read_vector(rqstp, fhp, offset, *count - total);
+ iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v, *count - total);
+ host_err = vfs_iocb_iter_read(file, &kiocb, &iter);
return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
}
@@ -1131,7 +1246,7 @@ static int wait_for_concurrent_writes(struct file *file)
dprintk("nfsd: write resume %d\n", task_pid_nr(current));
}
- if (inode->i_state & I_DIRTY) {
+ if (inode_state_read_once(inode) & I_DIRTY) {
dprintk("nfsd: write sync %d\n", task_pid_nr(current));
err = vfs_fsync(file, 0);
}
@@ -1140,25 +1255,171 @@ static int wait_for_concurrent_writes(struct file *file)
return err;
}
+struct nfsd_write_dio_seg {
+ struct iov_iter iter;
+ int flags;
+};
+
+static unsigned long
+iov_iter_bvec_offset(const struct iov_iter *iter)
+{
+ return (unsigned long)(iter->bvec->bv_offset + iter->iov_offset);
+}
+
+static void
+nfsd_write_dio_seg_init(struct nfsd_write_dio_seg *segment,
+ struct bio_vec *bvec, unsigned int nvecs,
+ unsigned long total, size_t start, size_t len,
+ struct kiocb *iocb)
+{
+ iov_iter_bvec(&segment->iter, ITER_SOURCE, bvec, nvecs, total);
+ if (start)
+ iov_iter_advance(&segment->iter, start);
+ iov_iter_truncate(&segment->iter, len);
+ segment->flags = iocb->ki_flags;
+}
+
+static unsigned int
+nfsd_write_dio_iters_init(struct nfsd_file *nf, struct bio_vec *bvec,
+ unsigned int nvecs, struct kiocb *iocb,
+ unsigned long total,
+ struct nfsd_write_dio_seg segments[3])
+{
+ u32 offset_align = nf->nf_dio_offset_align;
+ loff_t prefix_end, orig_end, middle_end;
+ u32 mem_align = nf->nf_dio_mem_align;
+ size_t prefix, middle, suffix;
+ loff_t offset = iocb->ki_pos;
+ unsigned int nsegs = 0;
+
+ /*
+ * Check if direct I/O is feasible for this write request.
+ * If alignments are not available, the write is too small,
+ * or no alignment can be found, fall back to buffered I/O.
+ */
+ if (unlikely(!mem_align || !offset_align) ||
+ unlikely(total < max(offset_align, mem_align)))
+ goto no_dio;
+
+ prefix_end = round_up(offset, offset_align);
+ orig_end = offset + total;
+ middle_end = round_down(orig_end, offset_align);
+
+ prefix = prefix_end - offset;
+ middle = middle_end - prefix_end;
+ suffix = orig_end - middle_end;
+
+ if (!middle)
+ goto no_dio;
+
+ if (prefix)
+ nfsd_write_dio_seg_init(&segments[nsegs++], bvec,
+ nvecs, total, 0, prefix, iocb);
+
+ nfsd_write_dio_seg_init(&segments[nsegs], bvec, nvecs,
+ total, prefix, middle, iocb);
+
+ /*
+ * Check if the bvec iterator is aligned for direct I/O.
+ *
+ * bvecs generated from RPC receive buffers are contiguous: After
+ * the first bvec, all subsequent bvecs start at bv_offset zero
+ * (page-aligned). Therefore, only the first bvec is checked.
+ */
+ if (iov_iter_bvec_offset(&segments[nsegs].iter) & (mem_align - 1))
+ goto no_dio;
+ segments[nsegs].flags |= IOCB_DIRECT;
+ nsegs++;
+
+ if (suffix)
+ nfsd_write_dio_seg_init(&segments[nsegs++], bvec, nvecs, total,
+ prefix + middle, suffix, iocb);
+
+ return nsegs;
+
+no_dio:
+ /* No DIO alignment possible - pack into single non-DIO segment. */
+ nfsd_write_dio_seg_init(&segments[0], bvec, nvecs, total, 0,
+ total, iocb);
+ return 1;
+}
+
+static noinline_for_stack int
+nfsd_direct_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfsd_file *nf, unsigned int nvecs,
+ unsigned long *cnt, struct kiocb *kiocb)
+{
+ struct nfsd_write_dio_seg segments[3];
+ struct file *file = nf->nf_file;
+ unsigned int nsegs, i;
+ ssize_t host_err;
+
+ nsegs = nfsd_write_dio_iters_init(nf, rqstp->rq_bvec, nvecs,
+ kiocb, *cnt, segments);
+
+ *cnt = 0;
+ for (i = 0; i < nsegs; i++) {
+ kiocb->ki_flags = segments[i].flags;
+ if (kiocb->ki_flags & IOCB_DIRECT)
+ trace_nfsd_write_direct(rqstp, fhp, kiocb->ki_pos,
+ segments[i].iter.count);
+ else {
+ trace_nfsd_write_vector(rqstp, fhp, kiocb->ki_pos,
+ segments[i].iter.count);
+ /*
+ * Mark the I/O buffer as evict-able to reduce
+ * memory contention.
+ */
+ if (nf->nf_file->f_op->fop_flags & FOP_DONTCACHE)
+ kiocb->ki_flags |= IOCB_DONTCACHE;
+ }
+
+ host_err = vfs_iocb_iter_write(file, kiocb, &segments[i].iter);
+ if (host_err < 0)
+ return host_err;
+ *cnt += host_err;
+ if (host_err < segments[i].iter.count)
+ break; /* partial write */
+ }
+
+ return 0;
+}
+
+/**
+ * nfsd_vfs_write - write data to an already-open file
+ * @rqstp: RPC execution context
+ * @fhp: File handle of file to write into
+ * @nf: An open file matching @fhp
+ * @offset: Byte offset of start
+ * @payload: xdr_buf containing the write payload
+ * @cnt: IN: number of bytes to write, OUT: number of bytes actually written
+ * @stable: An NFS stable_how value
+ * @verf: NFS WRITE verifier
+ *
+ * Upon return, caller must invoke fh_put on @fhp.
+ *
+ * Return values:
+ * An nfsstat value in network byte order.
+ */
__be32
-nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
- loff_t offset, struct kvec *vec, int vlen,
- unsigned long *cnt, int stable,
- __be32 *verf)
+nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfsd_file *nf, loff_t offset,
+ const struct xdr_buf *payload, unsigned long *cnt,
+ int stable, __be32 *verf)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct file *file = nf->nf_file;
struct super_block *sb = file_inode(file)->i_sb;
+ struct kiocb kiocb;
struct svc_export *exp;
struct iov_iter iter;
errseq_t since;
__be32 nfserr;
int host_err;
- loff_t pos = offset;
unsigned long exp_op_flags = 0;
unsigned int pflags = current->flags;
- rwf_t flags = 0;
bool restore_flags = false;
+ unsigned int nvecs;
trace_nfsd_write_opened(rqstp, fhp, offset, *cnt);
@@ -1182,20 +1443,48 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
if (!EX_ISSYNC(exp))
stable = NFS_UNSTABLE;
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = offset;
+ if (likely(!fhp->fh_use_wgather)) {
+ switch (stable) {
+ case NFS_FILE_SYNC:
+ /* persist data and timestamps */
+ kiocb.ki_flags |= IOCB_DSYNC | IOCB_SYNC;
+ break;
+ case NFS_DATA_SYNC:
+ /* persist data only */
+ kiocb.ki_flags |= IOCB_DSYNC;
+ break;
+ }
+ }
- if (stable && !fhp->fh_use_wgather)
- flags |= RWF_SYNC;
+ nvecs = xdr_buf_to_bvec(rqstp->rq_bvec, rqstp->rq_maxpages, payload);
- iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt);
since = READ_ONCE(file->f_wb_err);
if (verf)
nfsd_copy_write_verifier(verf, nn);
- host_err = vfs_iter_write(file, &iter, &pos, flags);
+
+ switch (nfsd_io_cache_write) {
+ case NFSD_IO_DIRECT:
+ host_err = nfsd_direct_write(rqstp, fhp, nf, nvecs,
+ cnt, &kiocb);
+ break;
+ case NFSD_IO_DONTCACHE:
+ if (file->f_op->fop_flags & FOP_DONTCACHE)
+ kiocb.ki_flags |= IOCB_DONTCACHE;
+ fallthrough;
+ case NFSD_IO_BUFFERED:
+ iov_iter_bvec(&iter, ITER_SOURCE, rqstp->rq_bvec, nvecs, *cnt);
+ host_err = vfs_iocb_iter_write(file, &kiocb, &iter);
+ if (host_err < 0)
+ break;
+ *cnt = host_err;
+ break;
+ }
if (host_err < 0) {
commit_reset_write_verifier(nn, rqstp, host_err);
goto out_nfserr;
}
- *cnt = host_err;
nfsd_stats_io_write_add(nn, exp, *cnt);
fsnotify_modify(file);
host_err = filemap_check_wb_err(file->f_mapping, since);
@@ -1237,6 +1526,8 @@ out_nfserr:
*/
bool nfsd_read_splice_ok(struct svc_rqst *rqstp)
{
+ if (nfsd_disable_splice_read)
+ return false;
switch (svc_auth_flavor(rqstp)) {
case RPC_AUTH_GSS_KRB5I:
case RPC_AUTH_GSS_KRB5P:
@@ -1277,21 +1568,31 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (file->f_op->splice_read && nfsd_read_splice_ok(rqstp))
err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof);
else
- err = nfsd_iter_read(rqstp, fhp, file, offset, count, 0, eof);
+ err = nfsd_iter_read(rqstp, fhp, nf, offset, count, 0, eof);
nfsd_file_put(nf);
trace_nfsd_read_done(rqstp, fhp, offset, *count);
return err;
}
-/*
- * Write data to a file.
- * The stable flag requests synchronous writes.
- * N.B. After this call fhp needs an fh_put
+/**
+ * nfsd_write - open a file and write data to it
+ * @rqstp: RPC execution context
+ * @fhp: File handle of file to write into; nfsd_write() may modify it
+ * @offset: Byte offset of start
+ * @payload: xdr_buf containing the write payload
+ * @cnt: IN: number of bytes to write, OUT: number of bytes actually written
+ * @stable: An NFS stable_how value
+ * @verf: NFS WRITE verifier
+ *
+ * Upon return, caller must invoke fh_put on @fhp.
+ *
+ * Return values:
+ * An nfsstat value in network byte order.
*/
__be32
nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
- struct kvec *vec, int vlen, unsigned long *cnt, int stable,
+ const struct xdr_buf *payload, unsigned long *cnt, int stable,
__be32 *verf)
{
struct nfsd_file *nf;
@@ -1303,8 +1604,8 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
if (err)
goto out;
- err = nfsd_vfs_write(rqstp, fhp, nf, offset, vec,
- vlen, cnt, stable, verf);
+ err = nfsd_vfs_write(rqstp, fhp, nf, offset, payload, cnt,
+ stable, verf);
nfsd_file_put(nf);
out:
trace_nfsd_write_done(rqstp, fhp, offset, *cnt);
@@ -1340,6 +1641,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
loff_t start, end;
struct nfsd_net *nn;
+ trace_nfsd_commit_start(rqstp, fhp, offset, count);
+
/*
* Convert the client-provided (offset, count) range to a
* (start, end) range. If the client-provided range falls
@@ -1378,6 +1681,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
} else
nfsd_copy_write_verifier(verf, nn);
+ trace_nfsd_commit_done(rqstp, fhp, offset, count);
return err;
}
@@ -1451,7 +1755,7 @@ nfsd_check_ignore_resizing(struct iattr *iap)
iap->ia_valid &= ~ATTR_SIZE;
}
-/* The parent directory should already be locked: */
+/* The parent directory should already be locked - we will unlock */
__be32
nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct nfsd_attrs *attrs,
@@ -1461,7 +1765,7 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct inode *dirp;
struct iattr *iap = attrs->na_iattr;
__be32 err;
- int host_err;
+ int host_err = 0;
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
@@ -1482,34 +1786,20 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
err = 0;
switch (type) {
case S_IFREG:
- host_err = vfs_create(&nop_mnt_idmap, dirp, dchild,
- iap->ia_mode, true);
+ host_err = vfs_create(&nop_mnt_idmap, dchild, iap->ia_mode, NULL);
if (!host_err)
nfsd_check_ignore_resizing(iap);
break;
case S_IFDIR:
- host_err = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode);
- if (!host_err && unlikely(d_unhashed(dchild))) {
- struct dentry *d;
- d = lookup_one_len(dchild->d_name.name,
- dchild->d_parent,
- dchild->d_name.len);
- if (IS_ERR(d)) {
- host_err = PTR_ERR(d);
- break;
- }
- if (unlikely(d_is_negative(d))) {
- dput(d);
- err = nfserr_serverfault;
- goto out;
- }
+ dchild = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode, NULL);
+ if (IS_ERR(dchild)) {
+ host_err = PTR_ERR(dchild);
+ } else if (d_is_negative(dchild)) {
+ err = nfserr_serverfault;
+ goto out;
+ } else if (unlikely(dchild != resfhp->fh_dentry)) {
dput(resfhp->fh_dentry);
- resfhp->fh_dentry = dget(d);
- err = fh_update(resfhp);
- dput(dchild);
- dchild = d;
- if (err)
- goto out;
+ resfhp->fh_dentry = dget(dchild);
}
break;
case S_IFCHR:
@@ -1517,7 +1807,7 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
case S_IFIFO:
case S_IFSOCK:
host_err = vfs_mknod(&nop_mnt_idmap, dirp, dchild,
- iap->ia_mode, rdev);
+ iap->ia_mode, rdev, NULL);
break;
default:
printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
@@ -1530,7 +1820,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
out:
- dput(dchild);
+ if (!err)
+ fh_fill_post_attrs(fhp);
+ end_creating(dchild);
return err;
out_nfserr:
@@ -1553,6 +1845,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 err;
int host_err;
+ trace_nfsd_vfs_create(rqstp, fhp, type, fname, flen);
+
if (isdotent(fname, flen))
return nfserr_exist;
@@ -1566,28 +1860,24 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (host_err)
return nfserrno(host_err);
- inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT);
- dchild = lookup_one_len(fname, dentry, flen);
+ dchild = start_creating(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen));
host_err = PTR_ERR(dchild);
- if (IS_ERR(dchild)) {
- err = nfserrno(host_err);
- goto out_unlock;
- }
+ if (IS_ERR(dchild))
+ return nfserrno(host_err);
+
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
- /*
- * We unconditionally drop our ref to dchild as fh_compose will have
- * already grabbed its own ref for it.
- */
- dput(dchild);
if (err)
goto out_unlock;
err = fh_fill_pre_attrs(fhp);
if (err != nfs_ok)
goto out_unlock;
err = nfsd_create_locked(rqstp, fhp, attrs, type, rdev, resfhp);
- fh_fill_post_attrs(fhp);
+ /* nfsd_create_locked() unlocked the parent */
+ dput(dchild);
+ return err;
+
out_unlock:
- inode_unlock(dentry->d_inode);
+ end_creating(dchild);
return err;
}
@@ -1653,6 +1943,8 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 err, cerr;
int host_err;
+ trace_nfsd_vfs_symlink(rqstp, fhp, fname, flen, path);
+
err = nfserr_noent;
if (!flen || path[0] == '\0')
goto out;
@@ -1671,37 +1963,43 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
dentry = fhp->fh_dentry;
- inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT);
- dnew = lookup_one_len(fname, dentry, flen);
+ dnew = start_creating(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen));
if (IS_ERR(dnew)) {
err = nfserrno(PTR_ERR(dnew));
- inode_unlock(dentry->d_inode);
goto out_drop_write;
}
err = fh_fill_pre_attrs(fhp);
if (err != nfs_ok)
goto out_unlock;
- host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path);
+ host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path, NULL);
err = nfserrno(host_err);
cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
if (!err)
nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
fh_fill_post_attrs(fhp);
out_unlock:
- inode_unlock(dentry->d_inode);
+ end_creating(dnew);
if (!err)
err = nfserrno(commit_metadata(fhp));
- dput(dnew);
- if (err==0) err = cerr;
+ if (!err)
+ err = cerr;
out_drop_write:
fh_drop_write(fhp);
out:
return err;
}
-/*
- * Create a hardlink
- * N.B. After this call _both_ ffhp and tfhp need an fh_put
+/**
+ * nfsd_link - create a link
+ * @rqstp: RPC transaction context
+ * @ffhp: the file handle of the directory where the new link is to be created
+ * @name: the filename of the new link
+ * @len: the length of @name in octets
+ * @tfhp: the file handle of an existing file object
+ *
+ * After this call _both_ ffhp and tfhp need an fh_put.
+ *
+ * Returns a generic NFS status code in network byte-order.
*/
__be32
nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
@@ -1709,9 +2007,12 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
{
struct dentry *ddir, *dnew, *dold;
struct inode *dirp;
+ int type;
__be32 err;
int host_err;
+ trace_nfsd_vfs_link(rqstp, ffhp, tfhp, name, len);
+
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
@@ -1728,51 +2029,53 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
if (isdotent(name, len))
goto out;
+ err = nfs_ok;
+ type = d_inode(tfhp->fh_dentry)->i_mode & S_IFMT;
host_err = fh_want_write(tfhp);
- if (host_err) {
- err = nfserrno(host_err);
+ if (host_err)
goto out;
- }
ddir = ffhp->fh_dentry;
dirp = d_inode(ddir);
- inode_lock_nested(dirp, I_MUTEX_PARENT);
+ dnew = start_creating(&nop_mnt_idmap, ddir, &QSTR_LEN(name, len));
- dnew = lookup_one_len(name, ddir, len);
if (IS_ERR(dnew)) {
- err = nfserrno(PTR_ERR(dnew));
- goto out_unlock;
+ host_err = PTR_ERR(dnew);
+ goto out_drop_write;
}
dold = tfhp->fh_dentry;
err = nfserr_noent;
if (d_really_is_negative(dold))
- goto out_dput;
+ goto out_unlock;
err = fh_fill_pre_attrs(ffhp);
if (err != nfs_ok)
- goto out_dput;
+ goto out_unlock;
host_err = vfs_link(dold, &nop_mnt_idmap, dirp, dnew, NULL);
fh_fill_post_attrs(ffhp);
- inode_unlock(dirp);
+out_unlock:
+ end_creating(dnew);
if (!host_err) {
- err = nfserrno(commit_metadata(ffhp));
- if (!err)
- err = nfserrno(commit_metadata(tfhp));
- } else {
- err = nfserrno(host_err);
+ host_err = commit_metadata(ffhp);
+ if (!host_err)
+ host_err = commit_metadata(tfhp);
}
- dput(dnew);
+
out_drop_write:
fh_drop_write(tfhp);
+ if (host_err == -EBUSY) {
+ /*
+ * See RFC 8881 Section 18.9.4 para 1-2: NFSv4 LINK
+ * wants a status unique to the object type.
+ */
+ if (type != S_IFDIR)
+ err = nfserr_file_open;
+ else
+ err = nfserr_acces;
+ }
out:
- return err;
-
-out_dput:
- dput(dnew);
-out_unlock:
- inode_unlock(dirp);
- goto out_drop_write;
+ return err != nfs_ok ? err : nfserrno(host_err);
}
static void
@@ -1795,19 +2098,32 @@ nfsd_has_cached_files(struct dentry *dentry)
return ret;
}
-/*
- * Rename a file
- * N.B. After this call _both_ ffhp and tfhp need an fh_put
+/**
+ * nfsd_rename - rename a directory entry
+ * @rqstp: RPC transaction context
+ * @ffhp: the file handle of parent directory containing the entry to be renamed
+ * @fname: the filename of directory entry to be renamed
+ * @flen: the length of @fname in octets
+ * @tfhp: the file handle of parent directory to contain the renamed entry
+ * @tname: the filename of the new entry
+ * @tlen: the length of @tlen in octets
+ *
+ * After this call _both_ ffhp and tfhp need an fh_put.
+ *
+ * Returns a generic NFS status code in network byte-order.
*/
__be32
nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
struct svc_fh *tfhp, char *tname, int tlen)
{
- struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap;
- struct inode *fdir, *tdir;
+ struct dentry *fdentry, *tdentry;
+ int type = S_IFDIR;
+ struct renamedata rd = {};
__be32 err;
int host_err;
- bool close_cached = false;
+ struct dentry *close_cached;
+
+ trace_nfsd_vfs_rename(rqstp, ffhp, tfhp, fname, flen, tname, tlen);
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE);
if (err)
@@ -1817,10 +2133,8 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
goto out;
fdentry = ffhp->fh_dentry;
- fdir = d_inode(fdentry);
tdentry = tfhp->fh_dentry;
- tdir = d_inode(tdentry);
err = nfserr_perm;
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
@@ -1833,15 +2147,22 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
goto out;
retry:
+ close_cached = NULL;
host_err = fh_want_write(ffhp);
if (host_err) {
err = nfserrno(host_err);
goto out;
}
- trap = lock_rename(tdentry, fdentry);
- if (IS_ERR(trap)) {
- err = nfserr_xdev;
+ rd.mnt_idmap = &nop_mnt_idmap;
+ rd.old_parent = fdentry;
+ rd.new_parent = tdentry;
+
+ host_err = start_renaming(&rd, 0, &QSTR_LEN(fname, flen),
+ &QSTR_LEN(tname, tlen));
+
+ if (host_err) {
+ err = nfserrno(host_err);
goto out_want_write;
}
err = fh_fill_pre_attrs(ffhp);
@@ -1851,46 +2172,23 @@ retry:
if (err != nfs_ok)
goto out_unlock;
- odentry = lookup_one_len(fname, fdentry, flen);
- host_err = PTR_ERR(odentry);
- if (IS_ERR(odentry))
- goto out_nfserr;
+ type = d_inode(rd.old_dentry)->i_mode & S_IFMT;
+
+ if (d_inode(rd.new_dentry))
+ type = d_inode(rd.new_dentry)->i_mode & S_IFMT;
- host_err = -ENOENT;
- if (d_really_is_negative(odentry))
- goto out_dput_old;
- host_err = -EINVAL;
- if (odentry == trap)
- goto out_dput_old;
-
- ndentry = lookup_one_len(tname, tdentry, tlen);
- host_err = PTR_ERR(ndentry);
- if (IS_ERR(ndentry))
- goto out_dput_old;
- host_err = -ENOTEMPTY;
- if (ndentry == trap)
- goto out_dput_new;
-
- if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
- nfsd_has_cached_files(ndentry)) {
- close_cached = true;
- goto out_dput_old;
+ if ((rd.new_dentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
+ nfsd_has_cached_files(rd.new_dentry)) {
+ close_cached = dget(rd.new_dentry);
+ goto out_unlock;
} else {
- struct renamedata rd = {
- .old_mnt_idmap = &nop_mnt_idmap,
- .old_dir = fdir,
- .old_dentry = odentry,
- .new_mnt_idmap = &nop_mnt_idmap,
- .new_dir = tdir,
- .new_dentry = ndentry,
- };
int retries;
for (retries = 1;;) {
host_err = vfs_rename(&rd);
if (host_err != -EAGAIN || !retries--)
break;
- if (!nfsd_wait_for_delegreturn(rqstp, d_inode(odentry)))
+ if (!nfsd_wait_for_delegreturn(rqstp, d_inode(rd.old_dentry)))
break;
}
if (!host_err) {
@@ -1899,19 +2197,25 @@ retry:
host_err = commit_metadata(ffhp);
}
}
- out_dput_new:
- dput(ndentry);
- out_dput_old:
- dput(odentry);
- out_nfserr:
- err = nfserrno(host_err);
+ if (host_err == -EBUSY) {
+ /*
+ * See RFC 8881 Section 18.26.4 para 1-3: NFSv4 RENAME
+ * wants a status unique to the object type.
+ */
+ if (type != S_IFDIR)
+ err = nfserr_file_open;
+ else
+ err = nfserr_acces;
+ } else {
+ err = nfserrno(host_err);
+ }
if (!close_cached) {
fh_fill_post_attrs(ffhp);
fh_fill_post_attrs(tfhp);
}
out_unlock:
- unlock_rename(tdentry, fdentry);
+ end_renaming(&rd);
out_want_write:
fh_drop_write(ffhp);
@@ -1922,18 +2226,25 @@ out_want_write:
* until this point and then reattempt the whole shebang.
*/
if (close_cached) {
- close_cached = false;
- nfsd_close_cached_files(ndentry);
- dput(ndentry);
+ nfsd_close_cached_files(close_cached);
+ dput(close_cached);
goto retry;
}
out:
return err;
}
-/*
- * Unlink a file or directory
- * N.B. After this call fhp needs an fh_put
+/**
+ * nfsd_unlink - remove a directory entry
+ * @rqstp: RPC transaction context
+ * @fhp: the file handle of the parent directory to be modified
+ * @type: enforced file type of the object to be removed
+ * @fname: the name of directory entry to be removed
+ * @flen: length of @fname in octets
+ *
+ * After this call fhp needs an fh_put.
+ *
+ * Returns a generic NFS status code in network byte-order.
*/
__be32
nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
@@ -1941,10 +2252,12 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
{
struct dentry *dentry, *rdentry;
struct inode *dirp;
- struct inode *rinode;
+ struct inode *rinode = NULL;
__be32 err;
int host_err;
+ trace_nfsd_vfs_unlink(rqstp, fhp, fname, flen);
+
err = nfserr_acces;
if (!flen || isdotent(fname, flen))
goto out;
@@ -1958,24 +2271,21 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
- inode_lock_nested(dirp, I_MUTEX_PARENT);
- rdentry = lookup_one_len(fname, dentry, flen);
+ rdentry = start_removing(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen));
+
host_err = PTR_ERR(rdentry);
if (IS_ERR(rdentry))
- goto out_unlock;
+ goto out_drop_write;
- if (d_really_is_negative(rdentry)) {
- dput(rdentry);
- host_err = -ENOENT;
- goto out_unlock;
- }
- rinode = d_inode(rdentry);
err = fh_fill_pre_attrs(fhp);
if (err != nfs_ok)
goto out_unlock;
+ rinode = d_inode(rdentry);
+ /* Prevent truncation until after locks dropped */
ihold(rinode);
+
if (!type)
type = d_inode(rdentry)->i_mode & S_IFMT;
@@ -1993,32 +2303,31 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
break;
}
} else {
- host_err = vfs_rmdir(&nop_mnt_idmap, dirp, rdentry);
+ host_err = vfs_rmdir(&nop_mnt_idmap, dirp, rdentry, NULL);
}
fh_fill_post_attrs(fhp);
- inode_unlock(dirp);
- if (!host_err)
+out_unlock:
+ end_removing(rdentry);
+ if (!err && !host_err)
host_err = commit_metadata(fhp);
- dput(rdentry);
iput(rinode); /* truncate the inode here */
out_drop_write:
fh_drop_write(fhp);
out_nfserr:
if (host_err == -EBUSY) {
- /* name is mounted-on. There is no perfect
- * error status.
+ /*
+ * See RFC 8881 Section 18.25.4 para 4: NFSv4 REMOVE
+ * wants a status unique to the object type.
*/
- err = nfserr_file_open;
- } else {
- err = nfserrno(host_err);
+ if (type != S_IFDIR)
+ err = nfserr_file_open;
+ else
+ err = nfserr_acces;
}
out:
- return err;
-out_unlock:
- inode_unlock(dirp);
- goto out_drop_write;
+ return err != nfs_ok ? err : nfserrno(host_err);
}
/*
@@ -2231,6 +2540,8 @@ nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, in
{
__be32 err;
+ trace_nfsd_vfs_statfs(rqstp, fhp);
+
err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
if (!err) {
struct path path = {
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index f9b09b842856..ded2900d423f 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -114,27 +114,27 @@ __be32 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
int nfsd_open_break_lease(struct inode *, int);
__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
int, struct file **);
-int nfsd_open_verified(struct svc_fh *fhp, int may_flags,
+int nfsd_open_verified(struct svc_fh *fhp, umode_t type, int may_flags,
struct file **filp);
__be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset,
unsigned long *count,
u32 *eof);
__be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct file *file, loff_t offset,
+ struct nfsd_file *nf, loff_t offset,
unsigned long *count, unsigned int base,
u32 *eof);
bool nfsd_read_splice_ok(struct svc_rqst *rqstp);
__be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
loff_t offset, unsigned long *count,
u32 *eof);
-__be32 nfsd_write(struct svc_rqst *, struct svc_fh *, loff_t,
- struct kvec *, int, unsigned long *,
- int stable, __be32 *verf);
+__be32 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ loff_t offset, const struct xdr_buf *payload,
+ unsigned long *cnt, int stable, __be32 *verf);
__be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct nfsd_file *nf, loff_t offset,
- struct kvec *vec, int vlen, unsigned long *cnt,
- int stable, __be32 *verf);
+ const struct xdr_buf *payload,
+ unsigned long *cnt, int stable, __be32 *verf);
__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
char *, int *);
__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
@@ -160,37 +160,4 @@ __be32 nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
void nfsd_filp_close(struct file *fp);
-static inline int fh_want_write(struct svc_fh *fh)
-{
- int ret;
-
- if (fh->fh_want_write)
- return 0;
- ret = mnt_want_write(fh->fh_export->ex_path.mnt);
- if (!ret)
- fh->fh_want_write = true;
- return ret;
-}
-
-static inline void fh_drop_write(struct svc_fh *fh)
-{
- if (fh->fh_want_write) {
- fh->fh_want_write = false;
- mnt_drop_write(fh->fh_export->ex_path.mnt);
- }
-}
-
-static inline __be32 fh_getattr(const struct svc_fh *fh, struct kstat *stat)
-{
- u32 request_mask = STATX_BASIC_STATS;
- struct path p = {.mnt = fh->fh_export->ex_path.mnt,
- .dentry = fh->fh_dentry};
-
- if (fh->fh_maxsize == NFS4_FHSIZE)
- request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
-
- return nfserrno(vfs_getattr(&p, stat, request_mask,
- AT_STATX_SYNC_AS_STAT));
-}
-
#endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 382cc1389396..ae75846b3cd7 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -574,11 +574,10 @@ struct nfsd4_sequence {
struct nfs4_sessionid sessionid; /* request/response */
u32 seqid; /* request/response */
u32 slotid; /* request/response */
- u32 maxslots; /* request/response */
+ u32 maxslots; /* request */
u32 cachethis; /* request */
-#if 0
+ u32 maxslots_response; /* response */
u32 target_maxslots; /* response */
-#endif /* not yet */
u32 status_flags; /* response */
};
@@ -597,9 +596,43 @@ struct nfsd4_reclaim_complete {
struct nfsd4_deviceid {
u64 fsid_idx;
u32 generation;
- u32 pad;
};
+static inline __be32 *
+svcxdr_encode_deviceid4(__be32 *p, const struct nfsd4_deviceid *devid)
+{
+ __be64 *q = (__be64 *)p;
+
+ *q = (__force __be64)devid->fsid_idx;
+ p += 2;
+ *p++ = (__force __be32)devid->generation;
+ *p++ = xdr_zero;
+ return p;
+}
+
+static inline __be32 *
+svcxdr_decode_deviceid4(__be32 *p, struct nfsd4_deviceid *devid)
+{
+ __be64 *q = (__be64 *)p;
+
+ devid->fsid_idx = (__force u64)(*q);
+ p += 2;
+ devid->generation = (__force u32)(*p++);
+ p++; /* NFSD does not use the remaining octets */
+ return p;
+}
+
+static inline __be32
+nfsd4_decode_deviceid4(struct xdr_stream *xdr, struct nfsd4_deviceid *devid)
+{
+ __be32 *p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
+
+ if (unlikely(!p))
+ return nfserr_bad_xdr;
+ svcxdr_decode_deviceid4(p, devid);
+ return nfs_ok;
+}
+
struct nfsd4_layout_seg {
u32 iomode;
u64 offset;
@@ -632,8 +665,7 @@ struct nfsd4_layoutcommit {
u64 lc_last_wr; /* request */
struct timespec64 lc_mtime; /* request */
u32 lc_layout_type; /* request */
- u32 lc_up_len; /* layout length */
- void *lc_up_layout; /* decoded by callback */
+ struct xdr_buf lc_up_layout; /* decoded by callback */
bool lc_size_chg; /* response */
u64 lc_newsize; /* response */
};
@@ -678,6 +710,10 @@ struct nfsd4_cb_offload {
__be32 co_nfserr;
unsigned int co_retries;
struct knfsd_fh co_fh;
+
+ struct nfs4_sessionid co_referring_sessionid;
+ u32 co_referring_slotid;
+ u32 co_referring_seqno;
};
struct nfsd4_copy {
@@ -888,27 +924,6 @@ struct nfsd4_compoundres {
struct nfsd4_compound_state cstate;
};
-static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
-{
- struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
- return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE;
-}
-
-/*
- * The session reply cache only needs to cache replies that the client
- * actually asked us to. But it's almost free for us to cache compounds
- * consisting of only a SEQUENCE op, so we may as well cache those too.
- * Also, the protocol doesn't give us a convenient response in the case
- * of a replay of a solo SEQUENCE op that wasn't cached
- * (RETRY_UNCACHED_REP can only be returned in the second op of a
- * compound).
- */
-static inline bool nfsd4_cache_this(struct nfsd4_compoundres *resp)
-{
- return (resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
- || nfsd4_is_solo_sequence(resp);
-}
-
static inline bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index e8b00309c449..f4e29c0c701c 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -6,8 +6,11 @@
#define cb_compound_enc_hdr_sz 4
#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
+#define enc_referring_call4_sz (1 + 1)
+#define enc_referring_call_list4_sz (sessionid_sz + 1 + \
+ enc_referring_call4_sz)
#define cb_sequence_enc_sz (sessionid_sz + 4 + \
- 1 /* no referring calls list yet */)
+ enc_referring_call_list4_sz)
#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
#define op_enc_sz 1
@@ -59,16 +62,20 @@
* 1: CB_GETATTR opcode (32-bit)
* N: file_handle
* 1: number of entry in attribute array (32-bit)
- * 1: entry 0 in attribute array (32-bit)
+ * 3: entry 0-2 in attribute array (32-bit * 3)
*/
#define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \
cb_sequence_enc_sz + \
- 1 + enc_nfs4_fh_sz + 1 + 1)
+ 1 + enc_nfs4_fh_sz + 1 + 3)
/*
* 4: fattr_bitmap_maxsz
* 1: attribute array len
* 2: change attr (64-bit)
* 2: size (64-bit)
+ * 2: atime.seconds (64-bit)
+ * 1: atime.nanoseconds (32-bit)
+ * 2: mtime.seconds (64-bit)
+ * 1: mtime.nanoseconds (32-bit)
*/
#define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \
- cb_sequence_dec_sz + 4 + 1 + 2 + 2 + op_dec_sz)
+ cb_sequence_dec_sz + 4 + 1 + 2 + 2 + 2 + 1 + 2 + 1 + op_dec_sz)
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index ba3e1f591f36..6b506995818d 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -21,6 +21,8 @@
* nilfs_palloc_groups_per_desc_block - get the number of groups that a group
* descriptor block can maintain
* @inode: inode of metadata file using this allocator
+ *
+ * Return: Number of groups that a group descriptor block can maintain.
*/
static inline unsigned long
nilfs_palloc_groups_per_desc_block(const struct inode *inode)
@@ -32,6 +34,8 @@ nilfs_palloc_groups_per_desc_block(const struct inode *inode)
/**
* nilfs_palloc_groups_count - get maximum number of groups
* @inode: inode of metadata file using this allocator
+ *
+ * Return: Maximum number of groups.
*/
static inline unsigned long
nilfs_palloc_groups_count(const struct inode *inode)
@@ -43,6 +47,8 @@ nilfs_palloc_groups_count(const struct inode *inode)
* nilfs_palloc_init_blockgroup - initialize private variables for allocator
* @inode: inode of metadata file using this allocator
* @entry_size: size of the persistent object
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
{
@@ -78,6 +84,9 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
* @inode: inode of metadata file using this allocator
* @nr: serial number of the entry (e.g. inode number)
* @offset: pointer to store offset number in the group
+ *
+ * Return: Number of the group that contains the entry with the index
+ * specified by @nr.
*/
static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
unsigned long *offset)
@@ -93,8 +102,8 @@ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
* @inode: inode of metadata file using this allocator
* @group: group number
*
- * nilfs_palloc_desc_blkoff() returns block offset of the descriptor
- * block which contains a descriptor of the specified group.
+ * Return: Index number in the metadata file of the descriptor block of
+ * the group specified by @group.
*/
static unsigned long
nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
@@ -111,6 +120,9 @@ nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
*
* nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap
* block used to allocate/deallocate entries in the specified group.
+ *
+ * Return: Index number in the metadata file of the bitmap block of
+ * the group specified by @group.
*/
static unsigned long
nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
@@ -125,6 +137,8 @@ nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
* nilfs_palloc_group_desc_nfrees - get the number of free entries in a group
* @desc: pointer to descriptor structure for the group
* @lock: spin lock protecting @desc
+ *
+ * Return: Number of free entries written in the group descriptor @desc.
*/
static unsigned long
nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc,
@@ -143,6 +157,9 @@ nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc,
* @desc: pointer to descriptor structure for the group
* @lock: spin lock protecting @desc
* @n: delta to be added
+ *
+ * Return: Number of free entries after adjusting the group descriptor
+ * @desc.
*/
static u32
nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
@@ -161,6 +178,9 @@ nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
* nilfs_palloc_entry_blkoff - get block offset of an entry block
* @inode: inode of metadata file using this allocator
* @nr: serial number of the entry (e.g. inode number)
+ *
+ * Return: Index number in the metadata file of the block containing
+ * the entry specified by @nr.
*/
static unsigned long
nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
@@ -238,6 +258,12 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
* @blkoff: block offset
* @prev: nilfs_bh_assoc struct of the last used buffer
* @lock: spin lock protecting @prev
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Non-existent block.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff,
struct nilfs_bh_assoc *prev,
@@ -258,6 +284,8 @@ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff,
* @group: group number
* @create: create flag
* @bhp: pointer to store the resultant buffer head
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_get_desc_block(struct inode *inode,
unsigned long group,
@@ -277,6 +305,8 @@ static int nilfs_palloc_get_desc_block(struct inode *inode,
* @group: group number
* @create: create flag
* @bhp: pointer to store the resultant buffer head
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_get_bitmap_block(struct inode *inode,
unsigned long group,
@@ -294,6 +324,8 @@ static int nilfs_palloc_get_bitmap_block(struct inode *inode,
* nilfs_palloc_delete_bitmap_block - delete a bitmap block
* @inode: inode of metadata file using this allocator
* @group: group number
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_delete_bitmap_block(struct inode *inode,
unsigned long group)
@@ -312,6 +344,8 @@ static int nilfs_palloc_delete_bitmap_block(struct inode *inode,
* @nr: serial number of the entry (e.g. inode number)
* @create: create flag
* @bhp: pointer to store the resultant buffer head
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
int create, struct buffer_head **bhp)
@@ -328,6 +362,8 @@ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
* nilfs_palloc_delete_entry_block - delete an entry block
* @inode: inode of metadata file using this allocator
* @nr: serial number of the entry
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr)
{
@@ -397,6 +433,9 @@ size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr,
* @bsize: size in bits
* @lock: spin lock protecting @bitmap
* @wrap: whether to wrap around
+ *
+ * Return: Offset number within the group of the found free entry, or
+ * %-ENOSPC if not found.
*/
static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
unsigned long target,
@@ -438,6 +477,9 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
* @inode: inode of metadata file using this allocator
* @curr: current group number
* @max: maximum number of groups
+ *
+ * Return: Number of remaining descriptors (= groups) managed by the descriptor
+ * block.
*/
static unsigned long
nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
@@ -453,6 +495,8 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
* nilfs_palloc_count_desc_blocks - count descriptor blocks number
* @inode: inode of metadata file using this allocator
* @desc_blocks: descriptor blocks number [out]
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_count_desc_blocks(struct inode *inode,
unsigned long *desc_blocks)
@@ -473,6 +517,8 @@ static int nilfs_palloc_count_desc_blocks(struct inode *inode,
* MDT file growing
* @inode: inode of metadata file using this allocator
* @desc_blocks: known current descriptor blocks count
+ *
+ * Return: true if a group can be added in the metadata file, false if not.
*/
static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
unsigned long desc_blocks)
@@ -487,6 +533,12 @@ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
* @inode: inode of metadata file using this allocator
* @nused: current number of used entries
* @nmaxp: max number of entries [out]
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ERANGE - Number of entries in use is out of range.
*/
int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
{
@@ -518,6 +570,13 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
* @inode: inode of metadata file using this allocator
* @req: nilfs_palloc_req structure exchanged for the allocation
* @wrap: whether to wrap around
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - Entries exhausted (No entries available for allocation).
+ * * %-EROFS - Read only filesystem
*/
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req, bool wrap)
@@ -710,6 +769,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
* nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object
* @inode: inode of metadata file using this allocator
* @req: nilfs_palloc_req structure exchanged for the removal
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_palloc_prepare_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
@@ -754,6 +815,8 @@ void nilfs_palloc_abort_free_entry(struct inode *inode,
* @inode: inode of metadata file using this allocator
* @entry_nrs: array of entry numbers to be deallocated
* @nitems: number of entries stored in @entry_nrs
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
{
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 3f115ab7e9a7..046d876ea3e0 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -21,6 +21,8 @@
*
* The number of entries per group is defined by the number of bits
* that a bitmap block can maintain.
+ *
+ * Return: Number of entries per group.
*/
static inline unsigned long
nilfs_palloc_entries_per_group(const struct inode *inode)
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index c9e8d9a7d820..ccc1a7aa52d2 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -47,17 +47,14 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
* @ptrp: place to store the value associated to @key
*
* Description: nilfs_bmap_lookup_at_level() finds a record whose key
- * matches @key in the block at @level of the bmap.
- *
- * Return Value: On success, 0 is returned and the record associated with @key
- * is stored in the place pointed by @ptrp. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - A record associated with @key does not exist.
+ * matches @key in the block at @level of the bmap. The record associated
+ * with @key is stored in the place pointed to by @ptrp.
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - A record associated with @key does not exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
__u64 *ptrp)
@@ -138,14 +135,11 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
* Description: nilfs_bmap_insert() inserts the new key-record pair specified
* by @key and @rec into @bmap.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EEXIST - A record associated with @key already exist.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EEXIST - A record associated with @key already exists.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec)
{
@@ -193,14 +187,11 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key)
* Description: nilfs_bmap_seek_key() seeks a valid key on @bmap
* starting from @start, and stores it to @keyp if found.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - No valid entry was found
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No valid entry was found.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp)
{
@@ -236,14 +227,11 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp)
* Description: nilfs_bmap_delete() deletes the key-record pair specified by
* @key from @bmap.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - A record associated with @key does not exist.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - A record associated with @key does not exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key)
{
@@ -290,12 +278,10 @@ static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key)
* Description: nilfs_bmap_truncate() removes key-record pairs whose keys are
* greater than or equal to @key from @bmap.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key)
{
@@ -330,12 +316,10 @@ void nilfs_bmap_clear(struct nilfs_bmap *bmap)
* Description: nilfs_bmap_propagate() marks the buffers that directly or
* indirectly refer to the block specified by @bh dirty.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh)
{
@@ -362,22 +346,22 @@ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap,
/**
* nilfs_bmap_assign - assign a new block number to a block
- * @bmap: bmap
- * @bh: pointer to buffer head
+ * @bmap: bmap
+ * @bh: place to store a pointer to the buffer head to which a block
+ * address is assigned (in/out)
* @blocknr: block number
- * @binfo: block information
+ * @binfo: block information
*
* Description: nilfs_bmap_assign() assigns the block number @blocknr to the
- * buffer specified by @bh.
- *
- * Return Value: On success, 0 is returned and the buffer head of a newly
- * create buffer and the block information associated with the buffer are
- * stored in the place pointed by @bh and @binfo, respectively. On error, one
- * of the following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * buffer specified by @bh. The block information is stored in the memory
+ * pointed to by @binfo, and the buffer head may be replaced as a block
+ * address is assigned, in which case a pointer to the new buffer head is
+ * stored in the memory pointed to by @bh.
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_assign(struct nilfs_bmap *bmap,
struct buffer_head **bh,
@@ -402,12 +386,10 @@ int nilfs_bmap_assign(struct nilfs_bmap *bmap,
* Description: nilfs_bmap_mark() marks the block specified by @key and @level
* as dirty.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
{
@@ -430,7 +412,7 @@ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
* Description: nilfs_test_and_clear() is the atomic operation to test and
* clear the dirty state of @bmap.
*
- * Return Value: 1 is returned if @bmap is dirty, or 0 if clear.
+ * Return: 1 if @bmap is dirty, or 0 if clear.
*/
int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
{
@@ -490,10 +472,10 @@ static struct lock_class_key nilfs_bmap_mdt_lock_key;
*
* Description: nilfs_bmap_read() initializes the bmap @bmap.
*
- * Return Value: On success, 0 is returned. On error, the following negative
- * error code is returned.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (corrupted bmap).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
{
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 501ad7be5174..568367129092 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -35,6 +35,7 @@ void nilfs_init_btnc_inode(struct inode *btnc_inode)
ii->i_flags = 0;
memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
+ btnc_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
@@ -200,7 +201,8 @@ void nilfs_btnode_delete(struct buffer_head *bh)
* Note that the current implementation does not support folio sizes larger
* than the page size.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EIO - I/O error (metadata corruption).
* * %-ENOMEM - Insufficient memory available.
*/
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index ef5061bb56da..dd0c8e560ef6 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -334,7 +334,7 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
* @inode: host inode of btree
* @blocknr: block number
*
- * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ * Return: 0 if normal, 1 if the node is broken.
*/
static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
size_t size, struct inode *inode,
@@ -366,7 +366,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
* @node: btree root node to be examined
* @inode: host inode of btree
*
- * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ * Return: 0 if normal, 1 if the root node is broken.
*/
static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
struct inode *inode)
@@ -652,8 +652,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
* @minlevel: start level
* @nextkey: place to store the next valid key
*
- * Return Value: If a next key was found, 0 is returned. Otherwise,
- * -ENOENT is returned.
+ * Return: 0 if the next key was found, %-ENOENT if not found.
*/
static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree,
const struct nilfs_btree_path *path,
@@ -2103,11 +2102,13 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree,
ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
if (ret < 0) {
- if (unlikely(ret == -ENOENT))
+ if (unlikely(ret == -ENOENT)) {
nilfs_crit(btree->b_inode->i_sb,
"writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d",
btree->b_inode->i_ino,
(unsigned long long)key, level);
+ ret = -EINVAL;
+ }
goto out;
}
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index c20207d7a989..4bbdc832d7f2 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -191,14 +191,11 @@ static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
* @cnop: place to store the next checkpoint number
* @bhp: place to store a pointer to buffer_head struct
*
- * Return Value: On success, it returns 0. On error, the following negative
- * error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - no block exists in the range.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - no block exists in the range.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
__u64 start_cno, __u64 end_cno,
@@ -239,7 +236,8 @@ static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
* stores it to the inode file given by @ifile and the nilfs root object
* given by @root.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINVAL - Invalid checkpoint.
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
@@ -307,7 +305,8 @@ out_sem:
* In either case, the buffer of the block containing the checkpoint entry
* and the cpfile inode are made dirty for inclusion in the write log.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
* * %-EROFS - Read only filesystem
@@ -376,7 +375,8 @@ out_sem:
* cpfile with the data given by the arguments @root, @blkinc, @ctime, and
* @minor.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
*/
@@ -447,14 +447,11 @@ error:
* the period from @start to @end, excluding @end itself. The checkpoints
* which have been already deleted are ignored.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - invalid checkpoints.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Invalid checkpoints.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
__u64 start,
@@ -718,7 +715,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
* number to continue searching.
*
* Return: Count of checkpoint info items stored in the output buffer on
- * success, or the following negative error code on failure.
+ * success, or one of the following negative error codes on failure:
* * %-EINVAL - Invalid checkpoint mode.
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
@@ -743,7 +740,8 @@ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
* @cpfile: checkpoint file inode
* @cno: checkpoint number to delete
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EBUSY - Checkpoint in use (snapshot specified).
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOENT - No valid checkpoint found.
@@ -1011,7 +1009,7 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
* @cno: checkpoint number
*
* Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or
- * the following negative error code on failure.
+ * one of the following negative error codes on failure:
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOENT - No such checkpoint.
* * %-ENOMEM - Insufficient memory available.
@@ -1058,14 +1056,11 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
* Description: nilfs_change_cpmode() changes the mode of the checkpoint
* specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - No such checkpoint.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No such checkpoint.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
{
@@ -1097,14 +1092,12 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
* @cpstat: pointer to a structure of checkpoint statistics
*
* Description: nilfs_cpfile_get_stat() returns information about checkpoints.
+ * The checkpoint statistics are stored in the location pointed to by @cpstat.
*
- * Return Value: On success, 0 is returned, and checkpoints information is
- * stored in the place pointed by @cpstat. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
{
@@ -1135,6 +1128,8 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
* @cpsize: size of a checkpoint entry
* @raw_inode: on-disk cpfile inode
* @inodep: buffer to store the inode
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
struct nilfs_inode *raw_inode, struct inode **inodep)
@@ -1153,7 +1148,7 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
if (unlikely(!cpfile))
return -ENOMEM;
- if (!(cpfile->i_state & I_NEW))
+ if (!(inode_state_read_once(cpfile) & I_NEW))
goto out;
err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index e220dcb08aa6..674380837ab9 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -276,7 +276,8 @@ void nilfs_dat_abort_update(struct inode *dat,
* @dat: DAT file inode
* @vblocknr: virtual block number
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINVAL - Invalid DAT entry (internal code).
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOMEM - Insufficient memory available.
@@ -302,14 +303,11 @@ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
* Description: nilfs_dat_freev() frees the virtual block numbers specified by
* @vblocknrs and @nitems.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - The virtual block number have not been allocated.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - The virtual block number have not been allocated.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
{
@@ -325,12 +323,10 @@ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
* Description: nilfs_dat_move() changes the block number associated with
* @vblocknr to @blocknr.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
{
@@ -390,17 +386,14 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
* @blocknrp: pointer to a block number
*
* Description: nilfs_dat_translate() maps the virtual block number @vblocknr
- * to the corresponding block number.
- *
- * Return Value: On success, 0 is returned and the block number associated
- * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
- * of the following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * to the corresponding block number. The block number associated with
+ * @vblocknr is stored in the place pointed to by @blocknrp.
*
- * %-ENOENT - A block number associated with @vblocknr does not exist.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - A block number associated with @vblocknr does not exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
{
@@ -489,6 +482,8 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
* @entry_size: size of a dat entry
* @raw_inode: on-disk dat inode
* @inodep: buffer to store the inode
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_dat_read(struct super_block *sb, size_t entry_size,
struct nilfs_inode *raw_inode, struct inode **inodep)
@@ -511,7 +506,7 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
if (unlikely(!dat))
return -ENOMEM;
- if (!(dat->i_state & I_NEW))
+ if (!(inode_state_read_once(dat) & I_NEW))
goto out;
err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 14e8d82f8629..6ca3d74be1e1 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -70,7 +70,7 @@ static inline unsigned int nilfs_chunk_size(struct inode *inode)
*/
static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr)
{
- unsigned int last_byte = inode->i_size;
+ u64 last_byte = inode->i_size;
last_byte -= page_nr << PAGE_SHIFT;
if (last_byte > PAGE_SIZE)
@@ -96,7 +96,7 @@ static void nilfs_commit_chunk(struct folio *folio,
int err;
nr_dirty = nilfs_page_count_clean_buffers(folio, from, to);
- copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL);
+ copied = block_write_end(pos, len, len, folio);
if (pos + copied > dir->i_size)
i_size_write(dir, pos + copied);
if (IS_DIRSYNC(dir))
@@ -400,7 +400,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino)
return 0;
}
-void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
struct folio *folio, struct inode *inode)
{
size_t from = offset_in_folio(folio, de);
@@ -410,11 +410,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
folio_lock(folio);
err = nilfs_prepare_chunk(folio, from, to);
- BUG_ON(err);
+ if (unlikely(err)) {
+ folio_unlock(folio);
+ return err;
+ }
de->inode = cpu_to_le64(inode->i_ino);
de->file_type = fs_umode_to_ftype(inode->i_mode);
nilfs_commit_chunk(folio, mapping, from, to);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ return 0;
}
/*
@@ -543,7 +547,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio)
from = (char *)pde - kaddr;
folio_lock(folio);
err = nilfs_prepare_chunk(folio, from, to);
- BUG_ON(err);
+ if (unlikely(err)) {
+ folio_unlock(folio);
+ goto out;
+ }
if (pde)
pde->rec_len = nilfs_rec_len_to_disk(to - from);
dir->inode = 0;
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 893ab36824cc..2d8dc6b35b54 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -273,6 +273,9 @@ static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
dat = nilfs_bmap_get_dat(bmap);
key = nilfs_bmap_data_get_key(bmap, bh);
ptr = nilfs_direct_get_ptr(bmap, key);
+ if (ptr == NILFS_BMAP_INVALID_PTR)
+ return -EINVAL;
+
if (!buffer_nilfs_volatile(bh)) {
oldreq.pr_entry_nr = ptr;
newreq.pr_entry_nr = ptr;
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 0e3fc5ba33c7..1b8d754db44d 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -125,10 +125,10 @@ static const struct vm_operations_struct nilfs_file_vm_ops = {
.page_mkwrite = nilfs_page_mkwrite,
};
-static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int nilfs_file_mmap_prepare(struct vm_area_desc *desc)
{
- file_accessed(file);
- vma->vm_ops = &nilfs_file_vm_ops;
+ file_accessed(desc->file);
+ desc->vm_ops = &nilfs_file_vm_ops;
return 0;
}
@@ -144,7 +144,7 @@ const struct file_operations nilfs_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = nilfs_compat_ioctl,
#endif /* CONFIG_COMPAT */
- .mmap = nilfs_file_mmap,
+ .mmap_prepare = nilfs_file_mmap_prepare,
.open = generic_file_open,
/* .release = nilfs_release_file, */
.fsync = nilfs_sync_file,
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index ace22253fed0..561c220799c7 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -46,14 +46,11 @@
* specified by @pbn to the GC pagecache with the key @blkoff.
* This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
*
- * Return Value: On success, 0 is returned. On Error, one of the following
- * negative error code is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - The block specified with @pbn does not exist.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - The block specified with @pbn does not exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
sector_t pbn, __u64 vbn,
@@ -114,12 +111,11 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
* specified by @vbn to the GC pagecache. @pbn can be supplied by the
* caller to avoid translation of the disk block address.
*
- * Return Value: On success, 0 is returned. On Error, one of the following
- * negative error code is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Invalid virtual block address.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh)
@@ -163,7 +159,7 @@ int nilfs_init_gcinode(struct inode *inode)
inode->i_mode = S_IFREG;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
- inode->i_mapping->a_ops = &empty_aops;
+ inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
ii->i_flags = 0;
nilfs_bmap_init_gc(ii->i_bmap);
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index e7339eb3c08a..99eb8a59009e 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -38,17 +38,16 @@ static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile)
* @out_ino: pointer to a variable to store inode number
* @out_bh: buffer_head contains newly allocated disk inode
*
- * Return Value: On success, 0 is returned and the newly allocated inode
- * number is stored in the place pointed by @ino, and buffer_head pointer
- * that contains newly allocated disk inode structure is stored in the
- * place pointed by @out_bh
- * On error, one of the following negative error codes is returned.
+ * nilfs_ifile_create_inode() allocates a new inode in the ifile metadata
+ * file and stores the inode number in the variable pointed to by @out_ino,
+ * as well as storing the ifile's buffer with the disk inode in the location
+ * pointed to by @out_bh.
*
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOSPC - No inode left.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No inode left.
*/
int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
struct buffer_head **out_bh)
@@ -83,14 +82,11 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
* @ifile: ifile inode
* @ino: inode number
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - The inode number @ino have not been allocated.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Inode number unallocated.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
{
@@ -150,6 +146,8 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
* @ifile: ifile inode
* @nmaxinodes: current maximum of available inodes count [out]
* @nfreeinodes: free inodes count [out]
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_ifile_count_free_inodes(struct inode *ifile,
u64 *nmaxinodes, u64 *nfreeinodes)
@@ -174,7 +172,8 @@ int nilfs_ifile_count_free_inodes(struct inode *ifile,
* @cno: number of checkpoint entry to read
* @inode_size: size of an inode
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINVAL - Invalid checkpoint.
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
@@ -189,7 +188,7 @@ int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root,
ifile = nilfs_iget_locked(sb, root, NILFS_IFILE_INO);
if (unlikely(!ifile))
return -ENOMEM;
- if (!(ifile->i_state & I_NEW))
+ if (!(inode_state_read_once(ifile) & I_NEW))
goto out;
err = nilfs_mdt_init(ifile, NILFS_MDT_GFP,
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index cf9ba481ae37..51bde45d5865 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -68,6 +68,8 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
*
* This function does not issue actual read request of the specified data
* block. It is done by VFS.
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_get_block(struct inode *inode, sector_t blkoff,
struct buffer_head *bh_result, int create)
@@ -141,6 +143,8 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
* address_space_operations.
* @file: file struct of the file to be read
* @folio: the folio to be read
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_read_folio(struct file *file, struct folio *folio)
{
@@ -214,7 +218,8 @@ void nilfs_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int nilfs_write_begin(struct file *file, struct address_space *mapping,
+static int nilfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
@@ -233,7 +238,8 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
return err;
}
-static int nilfs_write_end(struct file *file, struct address_space *mapping,
+static int nilfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
{
@@ -244,7 +250,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
nr_dirty = nilfs_page_count_clean_buffers(folio, start,
start + copied);
- copied = generic_write_end(file, mapping, pos, len, copied, folio,
+ copied = generic_write_end(iocb, mapping, pos, len, copied, folio,
fsdata);
nilfs_set_file_dirty(inode, nr_dirty);
err = nilfs_transaction_commit(inode->i_sb);
@@ -276,6 +282,10 @@ const struct address_space_operations nilfs_aops = {
.is_partially_uptodate = block_is_partially_uptodate,
};
+const struct address_space_operations nilfs_buffer_cache_aops = {
+ .invalidate_folio = block_invalidate_folio,
+};
+
static int nilfs_insert_inode_locked(struct inode *inode,
struct nilfs_root *root,
unsigned long ino)
@@ -355,7 +365,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
failed_after_creation:
clear_nlink(inode);
- if (inode->i_state & I_NEW)
+ if (inode_state_read_once(inode) & I_NEW)
unlock_new_inode(inode);
iput(inode); /*
* raw_inode will be deleted through
@@ -464,11 +474,18 @@ static int __nilfs_read_inode(struct super_block *sb,
inode->i_op = &nilfs_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &nilfs_aops;
- } else {
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &nilfs_special_inode_operations;
init_special_inode(
inode, inode->i_mode,
huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
+ } else {
+ nilfs_error(sb,
+ "invalid file type bits in mode 0%o for inode %lu",
+ inode->i_mode, ino);
+ err = -EIO;
+ goto failed_unmap;
}
nilfs_ifile_unmap_inode(raw_inode);
brelse(bh);
@@ -544,8 +561,14 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
inode = nilfs_iget_locked(sb, root, ino);
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+
+ if (!(inode_state_read_once(inode) & I_NEW)) {
+ if (!inode->i_nlink) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
return inode;
+ }
err = __nilfs_read_inode(sb, root, ino, inode);
if (unlikely(err)) {
@@ -568,7 +591,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
err = nilfs_init_gcinode(inode);
@@ -588,10 +611,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
* or does nothing if the inode already has it. This function allocates
* an additional inode to maintain page cache of B-tree nodes one-on-one.
*
- * Return Value: On success, 0 is returned. On errors, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or %-ENOMEM if memory is insufficient.
*/
int nilfs_attach_btree_node_cache(struct inode *inode)
{
@@ -611,7 +631,7 @@ int nilfs_attach_btree_node_cache(struct inode *inode)
nilfs_iget_set, &args);
if (unlikely(!btnc_inode))
return -ENOMEM;
- if (btnc_inode->i_state & I_NEW) {
+ if (inode_state_read_once(btnc_inode) & I_NEW) {
nilfs_init_btnc_inode(btnc_inode);
unlock_new_inode(btnc_inode);
}
@@ -650,11 +670,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode)
* in one inode and the one for b-tree node pages is set up in the
* other inode, which is attached to the former inode.
*
- * Return Value: On success, a pointer to the inode for data pages is
- * returned. On errors, one of the following negative error code is returned
- * in a pointer type.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: a pointer to the inode for data pages on success, or %-ENOMEM
+ * if memory is insufficient.
*/
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
@@ -669,12 +686,13 @@ struct inode *nilfs_iget_for_shadow(struct inode *inode)
nilfs_iget_set, &args);
if (unlikely(!s_inode))
return ERR_PTR(-ENOMEM);
- if (!(s_inode->i_state & I_NEW))
+ if (!(inode_state_read_once(s_inode) & I_NEW))
return inode;
NILFS_I(s_inode)->i_flags = 0;
memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
+ s_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
err = nilfs_attach_btree_node_cache(s_inode);
if (unlikely(err)) {
@@ -1177,7 +1195,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (size) {
if (phys && blkphy << blkbits == phys + size) {
/* The current extent goes on */
- size += n << blkbits;
+ size += (u64)n << blkbits;
} else {
/* Terminate the current extent */
ret = fiemap_fill_next_extent(
@@ -1190,14 +1208,14 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
flags = FIEMAP_EXTENT_MERGED;
logical = blkoff << blkbits;
phys = blkphy << blkbits;
- size = n << blkbits;
+ size = (u64)n << blkbits;
}
} else {
/* Start a new extent */
flags = FIEMAP_EXTENT_MERGED;
logical = blkoff << blkbits;
phys = blkphy << blkbits;
- size = n << blkbits;
+ size = (u64)n << blkbits;
}
blkoff += n;
}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index fa77f78df681..e17b8da66491 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -33,17 +33,14 @@
* @dofunc: concrete function of get/set metadata info
*
* Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of
- * calling dofunc() function on the basis of @argv argument.
- *
- * Return Value: On success, 0 is returned and requested metadata info
- * is copied into userspace. On error, one of the following
- * negative error codes is returned.
- *
- * %-EINVAL - Invalid arguments from userspace.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during execution of requested operation.
+ * calling dofunc() function on the basis of @argv argument. If successful,
+ * the requested metadata information is copied to userspace memory.
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during execution of requested operation.
+ * * %-EINVAL - Invalid arguments from userspace.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
struct nilfs_argv *argv, int dir,
@@ -52,7 +49,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
void *, size_t, size_t))
{
void *buf;
- void __user *base = (void __user *)(unsigned long)argv->v_base;
+ void __user *base = u64_to_user_ptr(argv->v_base);
size_t maxmembs, total, n;
ssize_t nr;
int ret, i;
@@ -121,7 +118,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
*
* Return: always 0 as success.
*/
-int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int nilfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
@@ -139,7 +136,7 @@ int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
* Return: 0 on success, or a negative error code on failure.
*/
int nilfs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa)
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct nilfs_transaction_info ti;
@@ -190,13 +187,10 @@ static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
* given checkpoint between checkpoint and snapshot state. This ioctl
* is used in chcp and mkcp utilities.
*
- * Return Value: On success, 0 is returned and mode of a checkpoint is
- * changed. On error, one of the following negative error codes
- * is returned.
- *
- * %-EPERM - Operation not permitted.
- *
- * %-EFAULT - Failure during checkpoint mode changing.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * %-EFAULT - Failure during checkpoint mode changing.
+ * %-EPERM - Operation not permitted.
*/
static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -244,13 +238,10 @@ out:
* checkpoint from NILFS2 file system. This ioctl is used in rmcp
* utility.
*
- * Return Value: On success, 0 is returned and a checkpoint is
- * removed. On error, one of the following negative error codes
- * is returned.
- *
- * %-EPERM - Operation not permitted.
- *
- * %-EFAULT - Failure during checkpoint removing.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * %-EFAULT - Failure during checkpoint removing.
+ * %-EPERM - Operation not permitted.
*/
static int
nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
@@ -296,7 +287,7 @@ out:
* requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in
* lscp utility and by nilfs_cleanerd daemon.
*
- * Return value: count of nilfs_cpinfo structures in output buffer.
+ * Return: Count of nilfs_cpinfo structures in output buffer.
*/
static ssize_t
nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
@@ -320,17 +311,14 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
*
* Description: nilfs_ioctl_get_cpstat() returns information about checkpoints.
* The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities
- * and by nilfs_cleanerd daemon.
- *
- * Return Value: On success, 0 is returned, and checkpoints information is
- * copied into userspace pointer @argp. On error, one of the following
- * negative error codes is returned.
+ * and by nilfs_cleanerd daemon. The checkpoint statistics are copied to
+ * the userspace memory pointed to by @argp.
*
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during getting checkpoints statistics.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during getting checkpoints statistics.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -363,7 +351,8 @@ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
* info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used
* in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon.
*
- * Return value: count of nilfs_suinfo structures in output buffer.
+ * Return: Count of nilfs_suinfo structures in output buffer on success,
+ * or a negative error code on failure.
*/
static ssize_t
nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
@@ -387,17 +376,14 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
*
* Description: nilfs_ioctl_get_sustat() returns segment usage statistics.
* The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities
- * and by nilfs_cleanerd daemon.
- *
- * Return Value: On success, 0 is returned, and segment usage information is
- * copied into userspace pointer @argp. On error, one of the following
- * negative error codes is returned.
+ * and by nilfs_cleanerd daemon. The requested segment usage information is
+ * copied to the userspace memory pointed to by @argp.
*
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during getting segment usage statistics.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during getting segment usage statistics.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -430,7 +416,8 @@ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
* on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used
* by nilfs_cleanerd daemon.
*
- * Return value: count of nilfs_vinfo structures in output buffer.
+ * Return: Count of nilfs_vinfo structures in output buffer on success, or
+ * a negative error code on failure.
*/
static ssize_t
nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
@@ -457,7 +444,8 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
* about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl
* is used by nilfs_cleanerd daemon.
*
- * Return value: count of nilfs_bdescs structures in output buffer.
+ * Return: Count of nilfs_bdescs structures in output buffer on success, or
+ * a negative error code on failure.
*/
static ssize_t
nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
@@ -494,19 +482,15 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
*
* Description: nilfs_ioctl_do_get_bdescs() function returns information
* about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl
- * is used by nilfs_cleanerd daemon.
+ * is used by nilfs_cleanerd daemon. If successful, disk block descriptors
+ * are copied to userspace pointer @argp.
*
- * Return Value: On success, 0 is returned, and disk block descriptors are
- * copied into userspace pointer @argp. On error, one of the following
- * negative error codes is returned.
- *
- * %-EINVAL - Invalid arguments from userspace.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during getting disk block descriptors.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during getting disk block descriptors.
+ * * %-EINVAL - Invalid arguments from userspace.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -540,16 +524,12 @@ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
* Description: nilfs_ioctl_move_inode_block() function registers data/node
* buffer in the GC pagecache and submit read request.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - Requested block doesn't exist.
- *
- * %-EEXIST - Blocks conflict is detected.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EEXIST - Block conflict detected.
+ * * %-EIO - I/O error.
+ * * %-ENOENT - Requested block doesn't exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_move_inode_block(struct inode *inode,
struct nilfs_vdesc *vdesc,
@@ -604,8 +584,8 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode,
* blocks that garbage collector specified with the array of nilfs_vdesc
* structures and stores them into page caches of GC inodes.
*
- * Return Value: Number of processed nilfs_vdesc structures or
- * error code, otherwise.
+ * Return: Number of processed nilfs_vdesc structures on success, or
+ * a negative error code on failure.
*/
static int nilfs_ioctl_move_blocks(struct super_block *sb,
struct nilfs_argv *argv, void *buf)
@@ -682,14 +662,11 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb,
* in the period from p_start to p_end, excluding p_end itself. The checkpoints
* which have been already deleted are ignored.
*
- * Return Value: Number of processed nilfs_period structures or
- * error code, otherwise.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - invalid checkpoints.
+ * Return: Number of processed nilfs_period structures on success, or one of
+ * the following negative error codes on failure:
+ * * %-EINVAL - invalid checkpoints.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
@@ -717,14 +694,11 @@ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
* Description: nilfs_ioctl_free_vblocknrs() function frees
* the virtual block numbers specified by @buf and @argv->v_nmembs.
*
- * Return Value: Number of processed virtual block numbers or
- * error code, otherwise.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - The virtual block number have not been allocated.
+ * Return: Number of processed virtual block numbers on success, or one of the
+ * following negative error codes on failure:
+ * * %-EIO - I/O error.
+ * * %-ENOENT - Unallocated virtual block number.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
@@ -746,14 +720,11 @@ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
* Description: nilfs_ioctl_mark_blocks_dirty() function marks
* metadata file or data blocks as dirty.
*
- * Return Value: Number of processed block descriptors or
- * error code, otherwise.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - the specified block does not exist (hole block)
+ * Return: Number of processed block descriptors on success, or one of the
+ * following negative error codes on failure:
+ * * %-EIO - I/O error.
+ * * %-ENOENT - Non-existent block (hole block).
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
@@ -852,7 +823,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
* from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by
* nilfs_cleanerd daemon.
*
- * Return Value: On success, 0 is returned or error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -865,7 +836,6 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
sizeof(struct nilfs_bdesc),
sizeof(__u64),
};
- void __user *base;
void *kbufs[5];
struct the_nilfs *nilfs;
size_t len, nsegs;
@@ -892,7 +862,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
* use kmalloc() for its buffer because the memory used for the
* segment numbers is small enough.
*/
- kbufs[4] = memdup_array_user((void __user *)(unsigned long)argv[4].v_base,
+ kbufs[4] = memdup_array_user(u64_to_user_ptr(argv[4].v_base),
nsegs, sizeof(__u64));
if (IS_ERR(kbufs[4])) {
ret = PTR_ERR(kbufs[4]);
@@ -912,20 +882,14 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
goto out_free;
len = argv[n].v_size * argv[n].v_nmembs;
- base = (void __user *)(unsigned long)argv[n].v_base;
if (len == 0) {
kbufs[n] = NULL;
continue;
}
- kbufs[n] = vmalloc(len);
- if (!kbufs[n]) {
- ret = -ENOMEM;
- goto out_free;
- }
- if (copy_from_user(kbufs[n], base, len)) {
- ret = -EFAULT;
- vfree(kbufs[n]);
+ kbufs[n] = vmemdup_user(u64_to_user_ptr(argv[n].v_base), len);
+ if (IS_ERR(kbufs[n])) {
+ ret = PTR_ERR(kbufs[n]);
goto out_free;
}
}
@@ -957,7 +921,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
out_free:
while (--n >= 0)
- vfree(kbufs[n]);
+ kvfree(kbufs[n]);
kfree(kbufs[4]);
out:
mnt_drop_write_file(filp);
@@ -976,20 +940,14 @@ out:
* and metadata are written out to the device when it successfully
* returned.
*
- * Return Value: On success, 0 is retured. On errors, one of the following
- * negative error code is returned.
- *
- * %-EROFS - Read only filesystem.
- *
- * %-EIO - I/O error
- *
- * %-ENOSPC - No space left on device (only in a panic state).
- *
- * %-ERESTARTSYS - Interrupted.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EFAULT - Failure during execution of requested operation.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during execution of requested operation.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (only in a panic state).
+ * * %-ERESTARTSYS - Interrupted.
+ * * %-EROFS - Read only filesystem.
*/
static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -1023,7 +981,7 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
* @filp: file object
* @argp: pointer on argument from userspace
*
- * Return Value: On success, 0 is returned or error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
void __user *argp)
@@ -1059,7 +1017,7 @@ out:
* checks the arguments from userspace and calls nilfs_sufile_trim_fs, which
* performs the actual trim operation.
*
- * Return Value: On success, 0 is returned or negative error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
{
@@ -1101,7 +1059,7 @@ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
* of segments in bytes and upper limit of segments in bytes.
* The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility.
*
- * Return Value: On success, 0 is returned or error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
{
@@ -1152,17 +1110,15 @@ out:
* @dofunc: concrete function of getting metadata info
*
* Description: nilfs_ioctl_get_info() gets metadata info by means of
- * calling dofunc() function.
- *
- * Return Value: On success, 0 is returned and requested metadata info
- * is copied into userspace. On error, one of the following
- * negative error codes is returned.
- *
- * %-EINVAL - Invalid arguments from userspace.
+ * calling dofunc() function. The requested metadata information is copied
+ * to userspace memory @argp.
*
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during execution of requested operation.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during execution of requested operation.
+ * * %-EINVAL - Invalid arguments from userspace.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp,
@@ -1202,18 +1158,14 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
* encapsulated in nilfs_argv and updates the segment usage info
* according to the flags in nilfs_suinfo_update.
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EPERM - Not enough permissions
- *
- * %-EFAULT - Error copying input data
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EEXIST - Block conflict detected.
+ * * %-EFAULT - Error copying input data.
+ * * %-EINVAL - Invalid values in input (segment number, flags or nblocks).
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-EPERM - Not enough permissions.
*/
static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -1222,7 +1174,6 @@ static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp,
struct nilfs_transaction_info ti;
struct nilfs_argv argv;
size_t len;
- void __user *base;
void *kbuf;
int ret;
@@ -1253,18 +1204,12 @@ static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp,
goto out;
}
- base = (void __user *)(unsigned long)argv.v_base;
- kbuf = vmalloc(len);
- if (!kbuf) {
- ret = -ENOMEM;
+ kbuf = vmemdup_user(u64_to_user_ptr(argv.v_base), len);
+ if (IS_ERR(kbuf)) {
+ ret = PTR_ERR(kbuf);
goto out;
}
- if (copy_from_user(kbuf, base, len)) {
- ret = -EFAULT;
- goto out_free;
- }
-
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_sufile_set_suinfo(nilfs->ns_sufile, kbuf, argv.v_size,
argv.v_nmembs);
@@ -1273,8 +1218,7 @@ static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp,
else
nilfs_transaction_commit(inode->i_sb); /* never fails */
-out_free:
- vfree(kbuf);
+ kvfree(kbuf);
out:
mnt_drop_write_file(filp);
return ret;
@@ -1309,7 +1253,8 @@ static int nilfs_ioctl_get_fslabel(struct super_block *sb, void __user *argp)
* @filp: file object
* @argp: pointer to userspace memory that contains the volume name
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EFAULT - Error copying input data.
* * %-EINVAL - Label length exceeds record size in superblock.
* * %-EIO - I/O error.
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 965b5ad1c0df..946b0d3534a5 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -226,20 +226,21 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
* @out_bh: output of a pointer to the buffer_head
*
* nilfs_mdt_get_block() looks up the specified buffer and tries to create
- * a new buffer if @create is not zero. On success, the returned buffer is
- * assured to be either existing or formatted using a buffer lock on success.
- * @out_bh is substituted only when zero is returned.
+ * a new buffer if @create is not zero. If (and only if) this function
+ * succeeds, it stores a pointer to the retrieved buffer head in the location
+ * pointed to by @out_bh.
*
- * Return Value: On success, it returns 0. On error, the following negative
- * error code is returned.
+ * The retrieved buffer may be either an existing one or a newly allocated one.
+ * For a newly created buffer, if the callback function argument @init_block
+ * is non-NULL, the callback will be called with the buffer locked to format
+ * the block.
*
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - the specified block does not exist (hole block)
- *
- * %-EROFS - Read only filesystem (for create mode)
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - The specified block does not exist (hole block).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-EROFS - Read only filesystem (for create mode).
*/
int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
void (*init_block)(struct inode *,
@@ -275,14 +276,11 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
* @out_bh, and block offset to @blkoff, respectively. @out_bh and
* @blkoff are substituted only when zero is returned.
*
- * Return Value: On success, it returns 0. On error, the following negative
- * error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - no block was found in the range
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No block was found in the range.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
unsigned long end, unsigned long *blkoff,
@@ -321,12 +319,11 @@ out:
* @inode: inode of the meta data file
* @block: block offset
*
- * Return Value: On success, zero is returned.
- * On error, one of the following negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Non-existent block.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
{
@@ -349,12 +346,10 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
* nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
* tries to release the page including the buffer from a page cache.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-EBUSY - page has an active buffer.
- *
- * %-ENOENT - page cache has no page addressed by the offset.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EBUSY - Page has an active buffer.
+ * * %-ENOENT - Page cache has no page addressed by the offset.
*/
int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
{
@@ -427,8 +422,6 @@ static int nilfs_mdt_write_folio(struct folio *folio,
if (wbc->sync_mode == WB_SYNC_ALL)
err = nilfs_construct_segment(sb);
- else if (wbc->for_reclaim)
- nilfs_flush_segment(sb, inode->i_ino);
return err;
}
@@ -524,6 +517,8 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
* nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
* @inode: inode of the metadata file
* @shadow: shadow mapping
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct nilfs_shadow_map *shadow)
@@ -545,6 +540,8 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
/**
* nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
* @inode: inode of the metadata file
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_mdt_save_to_shadow_map(struct inode *inode)
{
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 9b108052d9f7..40f4b1a28705 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -67,6 +67,11 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
inode = NULL;
} else {
inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
+ if (inode == ERR_PTR(-ESTALE)) {
+ nilfs_error(dir->i_sb,
+ "deleted inode referenced: %lu", ino);
+ return ERR_PTR(-EIO);
+ }
}
return d_splice_alias(inode, dentry);
@@ -213,8 +218,8 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
return err;
}
-static int nilfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *nilfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct nilfs_transaction_info ti;
@@ -222,7 +227,7 @@ static int nilfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
err = nilfs_transaction_begin(dir->i_sb, &ti, 1);
if (err)
- return err;
+ return ERR_PTR(err);
inc_nlink(dir);
@@ -253,7 +258,7 @@ out:
else
nilfs_transaction_abort(dir->i_sb);
- return err;
+ return ERR_PTR(err);
out_fail:
drop_nlink(inode);
@@ -365,6 +370,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
struct folio *old_folio;
struct nilfs_dir_entry *old_de;
struct nilfs_transaction_info ti;
+ bool old_is_dir = S_ISDIR(old_inode->i_mode);
int err;
if (flags & ~RENAME_NOREPLACE)
@@ -380,7 +386,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
goto out;
}
- if (S_ISDIR(old_inode->i_mode)) {
+ if (old_is_dir && old_dir != new_dir) {
err = -EIO;
dir_de = nilfs_dotdot(old_inode, &dir_folio);
if (!dir_de)
@@ -392,7 +398,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
struct nilfs_dir_entry *new_de;
err = -ENOTEMPTY;
- if (dir_de && !nilfs_empty_dir(new_inode))
+ if (old_is_dir && !nilfs_empty_dir(new_inode))
goto out_dir;
new_de = nilfs_find_entry(new_dir, &new_dentry->d_name,
@@ -401,11 +407,13 @@ static int nilfs_rename(struct mnt_idmap *idmap,
err = PTR_ERR(new_de);
goto out_dir;
}
- nilfs_set_link(new_dir, new_de, new_folio, old_inode);
+ err = nilfs_set_link(new_dir, new_de, new_folio, old_inode);
folio_release_kmap(new_folio, new_de);
+ if (unlikely(err))
+ goto out_dir;
nilfs_mark_inode_dirty(new_dir);
inode_set_ctime_current(new_inode);
- if (dir_de)
+ if (old_is_dir)
drop_nlink(new_inode);
drop_nlink(new_inode);
nilfs_mark_inode_dirty(new_inode);
@@ -413,7 +421,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
err = nilfs_add_link(new_dentry, old_inode);
if (err)
goto out_dir;
- if (dir_de) {
+ if (old_is_dir) {
inc_nlink(new_dir);
nilfs_mark_inode_dirty(new_dir);
}
@@ -425,28 +433,28 @@ static int nilfs_rename(struct mnt_idmap *idmap,
*/
inode_set_ctime_current(old_inode);
- nilfs_delete_entry(old_de, old_folio);
-
- if (dir_de) {
- nilfs_set_link(old_inode, dir_de, dir_folio, new_dir);
- folio_release_kmap(dir_folio, dir_de);
- drop_nlink(old_dir);
+ err = nilfs_delete_entry(old_de, old_folio);
+ if (likely(!err)) {
+ if (old_is_dir) {
+ if (old_dir != new_dir)
+ err = nilfs_set_link(old_inode, dir_de,
+ dir_folio, new_dir);
+ drop_nlink(old_dir);
+ }
+ nilfs_mark_inode_dirty(old_dir);
}
- folio_release_kmap(old_folio, old_de);
-
- nilfs_mark_inode_dirty(old_dir);
nilfs_mark_inode_dirty(old_inode);
- err = nilfs_transaction_commit(old_dir->i_sb);
- return err;
-
out_dir:
if (dir_de)
folio_release_kmap(dir_folio, dir_de);
out_old:
folio_release_kmap(old_folio, old_de);
out:
- nilfs_transaction_abort(old_dir->i_sb);
+ if (likely(!err))
+ err = nilfs_transaction_commit(old_dir->i_sb);
+ else
+ nilfs_transaction_abort(old_dir->i_sb);
return err;
}
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 45d03826eaf1..b7e3d91b6243 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -14,6 +14,7 @@
#include <linux/buffer_head.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
+#include <linux/fs_struct.h>
#include <linux/nilfs2_api.h>
#include <linux/nilfs2_ondisk.h>
#include "the_nilfs.h"
@@ -261,16 +262,16 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *,
int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *);
int nilfs_empty_dir(struct inode *);
struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **);
-void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
- struct folio *, struct inode *);
+int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ struct folio *folio, struct inode *inode);
/* file.c */
extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
/* ioctl.c */
-int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *m);
+int nilfs_fileattr_get(struct dentry *dentry, struct file_kattr *m);
int nilfs_fileattr_set(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
long nilfs_ioctl(struct file *, unsigned int, unsigned long);
long nilfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *, struct nilfs_argv *,
@@ -401,6 +402,7 @@ extern const struct file_operations nilfs_dir_operations;
extern const struct inode_operations nilfs_file_inode_operations;
extern const struct file_operations nilfs_file_operations;
extern const struct address_space_operations nilfs_aops;
+extern const struct address_space_operations nilfs_buffer_cache_aops;
extern const struct inode_operations nilfs_dir_inode_operations;
extern const struct inode_operations nilfs_special_inode_operations;
extern const struct inode_operations nilfs_symlink_inode_operations;
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 9de2a494a069..56c4da417b6a 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -135,8 +135,7 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
* nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
* @folio: Folio to be checked.
*
- * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers.
- * Otherwise, it returns true.
+ * Return: false if the folio has dirty buffers, true otherwise.
*/
bool nilfs_folio_buffers_clean(struct folio *folio)
{
@@ -168,7 +167,7 @@ void nilfs_folio_bug(struct folio *folio)
printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
"mapping=%p ino=%lu\n",
folio, folio_ref_count(folio),
- (unsigned long long)folio->index, folio->flags, m, ino);
+ (unsigned long long)folio->index, folio->flags.f, m, ino);
head = folio_buffers(folio);
if (head) {
@@ -392,6 +391,11 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
/**
* nilfs_clear_folio_dirty - discard dirty folio
* @folio: dirty folio that will be discarded
+ *
+ * nilfs_clear_folio_dirty() clears working states including dirty state for
+ * the folio and its buffers. If the folio has buffers, clear only if it is
+ * confirmed that none of the buffer heads are busy (none have valid
+ * references and none are locked).
*/
void nilfs_clear_folio_dirty(struct folio *folio)
{
@@ -399,10 +403,6 @@ void nilfs_clear_folio_dirty(struct folio *folio)
BUG_ON(!folio_test_locked(folio));
- folio_clear_uptodate(folio);
- folio_clear_mappedtodisk(folio);
- folio_clear_checked(folio);
-
head = folio_buffers(folio);
if (head) {
const unsigned long clear_bits =
@@ -410,6 +410,25 @@ void nilfs_clear_folio_dirty(struct folio *folio)
BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
BIT(BH_Delay));
+ bool busy, invalidated = false;
+
+recheck_buffers:
+ busy = false;
+ bh = head;
+ do {
+ if (atomic_read(&bh->b_count) | buffer_locked(bh)) {
+ busy = true;
+ break;
+ }
+ } while (bh = bh->b_this_page, bh != head);
+
+ if (busy) {
+ if (invalidated)
+ return;
+ invalidate_bh_lrus();
+ invalidated = true;
+ goto recheck_buffers;
+ }
bh = head;
do {
@@ -419,6 +438,9 @@ void nilfs_clear_folio_dirty(struct folio *folio)
} while (bh = bh->b_this_page, bh != head);
}
+ folio_clear_uptodate(folio);
+ folio_clear_mappedtodisk(folio);
+ folio_clear_checked(folio);
__nilfs_clear_folio_dirty(folio);
}
@@ -477,8 +499,9 @@ void __nilfs_clear_folio_dirty(struct folio *folio)
* This function searches an extent of buffers marked "delayed" which
* starts from a block offset equal to or larger than @start_blk. If
* such an extent was found, this will store the start offset in
- * @blkoff and return its length in blocks. Otherwise, zero is
- * returned.
+ * @blkoff and return its length in blocks.
+ *
+ * Return: Length in blocks of found extent, 0 otherwise.
*/
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index e43405bf521e..a9c61d0492cb 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -88,6 +88,8 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err)
* @check_bytes: number of bytes to be checked
* @start: DBN of start block
* @nblock: number of blocks to be checked
+ *
+ * Return: 0 on success, or %-EIO if an I/O error occurs.
*/
static int nilfs_compute_checksum(struct the_nilfs *nilfs,
struct buffer_head *bhs, u32 *sum,
@@ -126,6 +128,11 @@ static int nilfs_compute_checksum(struct the_nilfs *nilfs,
* @sr_block: disk block number of the super root block
* @pbh: address of a buffer_head pointer to return super root buffer
* @check: CRC check flag
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Super root block corrupted.
+ * * %-EIO - I/O error.
*/
int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
struct buffer_head **pbh, int check)
@@ -176,6 +183,8 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
* @nilfs: nilfs object
* @start_blocknr: start block number of the log
* @sum: pointer to return segment summary structure
+ *
+ * Return: Buffer head pointer, or NULL if an I/O error occurs.
*/
static struct buffer_head *
nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
@@ -195,6 +204,13 @@ nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
* @seg_seq: sequence number of segment
* @bh_sum: buffer head of summary block
* @sum: segment summary struct
+ *
+ * Return: 0 on success, or one of the following internal codes on failure:
+ * * %NILFS_SEG_FAIL_MAGIC - Magic number mismatch.
+ * * %NILFS_SEG_FAIL_SEQ - Sequence number mismatch.
+ * * %NIFLS_SEG_FAIL_CONSISTENCY - Block count out of range.
+ * * %NILFS_SEG_FAIL_IO - I/O error.
+ * * %NILFS_SEG_FAIL_CHECKSUM_FULL - Full log checksum verification failed.
*/
static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
struct buffer_head *bh_sum,
@@ -238,6 +254,9 @@ out:
* @pbh: the current buffer head on summary blocks [in, out]
* @offset: the current byte offset on summary blocks [in, out]
* @bytes: byte size of the item to be read
+ *
+ * Return: Kernel space address of current segment summary entry, or
+ * NULL if an I/O error occurs.
*/
static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
struct buffer_head **pbh,
@@ -300,6 +319,11 @@ static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
* @start_blocknr: start block number of the log
* @sum: log summary information
* @head: list head to add nilfs_recovery_block struct
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
struct nilfs_segment_summary *sum,
@@ -536,8 +560,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
if (unlikely(err))
goto failed_folio;
- block_write_end(NULL, inode->i_mapping, pos, blocksize,
- blocksize, folio, NULL);
+ block_write_end(pos, blocksize, blocksize, folio);
folio_unlock(folio);
folio_put(folio);
@@ -571,6 +594,12 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
* @sb: super block instance
* @root: NILFS root instance
* @ri: pointer to a nilfs_recovery_info
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Log format error.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
struct super_block *sb,
@@ -754,18 +783,13 @@ static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
* @sb: super block instance
* @ri: pointer to a nilfs_recovery_info struct to store search results.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-EINVAL - Inconsistent filesystem state.
- *
- * %-EIO - I/O error
- *
- * %-ENOSPC - No space left on device (only in a panic state).
- *
- * %-ERESTARTSYS - Interrupted.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Inconsistent filesystem state.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (only in a panic state).
+ * * %-ERESTARTSYS - Interrupted.
*/
int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
struct super_block *sb,
@@ -830,14 +854,11 @@ failed:
* segment pointed by the superblock. It sets up struct the_nilfs through
* this search. It fills nilfs_recovery_info (ri) required for recovery.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-EINVAL - No valid segment found
- *
- * %-EIO - I/O error
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - No valid segment found.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_search_super_root(struct the_nilfs *nilfs,
struct nilfs_recovery_info *ri)
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index e08cab03366b..a8bdf3d318ea 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -406,12 +406,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
* @segbuf: buffer storing a log to be written
* @nilfs: nilfs object
*
- * Return Value: On Success, 0 is returned. On Error, one of the following
- * negative error code is returned.
- *
- * %-EIO - I/O error
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: Always 0.
*/
static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
struct the_nilfs *nilfs)
@@ -452,10 +447,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* nilfs_segbuf_wait - wait for completion of requested BIOs
* @segbuf: segment buffer
*
- * Return Value: On Success, 0 is returned. On Error, one of the following
- * negative error code is returned.
- *
- * %-EIO - I/O error
+ * Return: 0 on success, or %-EIO if I/O error is detected.
*/
static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
{
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 587251830897..deee16bc9d4e 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -191,12 +191,10 @@ static int nilfs_prepare_segment_lock(struct super_block *sb,
* When @vacancy_check flag is set, this function will check the amount of
* free space, and will wait for the GC to reclaim disk space if low capacity.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-ENOSPC - No space left on device
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (if checking free space).
*/
int nilfs_transaction_begin(struct super_block *sb,
struct nilfs_transaction_info *ti,
@@ -252,6 +250,8 @@ int nilfs_transaction_begin(struct super_block *sb,
* nilfs_transaction_commit() sets a timer to start the segment
* constructor. If a sync flag is set, it starts construction
* directly.
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_transaction_commit(struct super_block *sb)
{
@@ -407,6 +407,8 @@ static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
/**
* nilfs_segctor_reset_segment_buffer - reset the current segment buffer
* @sci: nilfs_sc_info
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
{
@@ -734,7 +736,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
if (!head)
head = create_empty_buffers(folio,
i_blocksize(inode), 0);
- folio_unlock(folio);
bh = head;
do {
@@ -744,11 +745,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
list_add_tail(&bh->b_assoc_buffers, listp);
ndirties++;
if (unlikely(ndirties >= nlimit)) {
+ folio_unlock(folio);
folio_batch_release(&fbatch);
cond_resched();
return ndirties;
}
} while (bh = bh->b_this_page, bh != head);
+
+ folio_unlock(folio);
}
folio_batch_release(&fbatch);
cond_resched();
@@ -1118,7 +1122,8 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
* a super root block containing this sufile change is complete, and it can
* be canceled with nilfs_sufile_cancel_freev() until then.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINVAL - Invalid segment number.
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOMEM - Insufficient memory available.
@@ -1315,6 +1320,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
* nilfs_segctor_begin_construction - setup segment buffer to make a new log
* @sci: nilfs_sc_info
* @nilfs: nilfs object
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
@@ -2214,22 +2221,6 @@ static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
spin_unlock(&sci->sc_state_lock);
}
-/**
- * nilfs_flush_segment - trigger a segment construction for resource control
- * @sb: super block
- * @ino: inode number of the file to be flushed out.
- */
-void nilfs_flush_segment(struct super_block *sb, ino_t ino)
-{
- struct the_nilfs *nilfs = sb->s_fs_info;
- struct nilfs_sc_info *sci = nilfs->ns_writer;
-
- if (!sci || nilfs_doing_construction())
- return;
- nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
- /* assign bit 0 to data files */
-}
-
struct nilfs_segctor_wait_request {
wait_queue_entry_t wq;
__u32 seq;
@@ -2312,18 +2303,13 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
* nilfs_construct_segment - construct a logical segment
* @sb: super block
*
- * Return Value: On success, 0 is returned. On errors, one of the following
- * negative error code is returned.
- *
- * %-EROFS - Read only filesystem.
- *
- * %-EIO - I/O error
- *
- * %-ENOSPC - No space left on device (only in a panic state).
- *
- * %-ERESTARTSYS - Interrupted.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (only in a panic state).
+ * * %-ERESTARTSYS - Interrupted.
+ * * %-EROFS - Read only filesystem.
*/
int nilfs_construct_segment(struct super_block *sb)
{
@@ -2347,18 +2333,13 @@ int nilfs_construct_segment(struct super_block *sb)
* @start: start byte offset
* @end: end byte offset (inclusive)
*
- * Return Value: On success, 0 is returned. On errors, one of the following
- * negative error code is returned.
- *
- * %-EROFS - Read only filesystem.
- *
- * %-EIO - I/O error
- *
- * %-ENOSPC - No space left on device (only in a panic state).
- *
- * %-ERESTARTSYS - Interrupted.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (only in a panic state).
+ * * %-ERESTARTSYS - Interrupted.
+ * * %-EROFS - Read only filesystem.
*/
int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
loff_t start, loff_t end)
@@ -2427,7 +2408,7 @@ static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
* the area protected by sc_state_lock.
*/
if (thread_is_alive)
- del_timer_sync(&sci->sc_timer);
+ timer_delete_sync(&sci->sc_timer);
}
/**
@@ -2464,6 +2445,8 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
* nilfs_segctor_construct - form logs and write them to disk
* @sci: segment constructor object
* @mode: mode of log forming
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
{
@@ -2502,7 +2485,7 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
static void nilfs_construction_timeout(struct timer_list *t)
{
- struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
+ struct nilfs_sc_info *sci = timer_container_of(sci, t, sc_timer);
wake_up_process(sci->sc_task);
}
@@ -2785,7 +2768,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
if (sci->sc_task) {
wake_up(&sci->sc_wait_daemon);
- kthread_stop(sci->sc_task);
+ if (kthread_stop(sci->sc_task)) {
+ spin_lock(&sci->sc_state_lock);
+ sci->sc_task = NULL;
+ timer_shutdown_sync(&sci->sc_timer);
+ spin_unlock(&sci->sc_state_lock);
+ }
}
spin_lock(&sci->sc_state_lock);
@@ -2836,7 +2824,8 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
* This allocates a log writer object, initializes it, and starts the
* log writer.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINTR - Log writer thread creation failed due to interruption.
* * %-ENOMEM - Insufficient memory available.
*/
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index f723f47ddc4e..4b39ed43ae72 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -226,7 +226,6 @@ extern void nilfs_relax_pressure_in_lock(struct super_block *);
extern int nilfs_construct_segment(struct super_block *);
extern int nilfs_construct_dsync_segment(struct super_block *, struct inode *,
loff_t, loff_t);
-extern void nilfs_flush_segment(struct super_block *, ino_t);
extern int nilfs_clean_segments(struct super_block *, struct nilfs_argv *,
void **);
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index d3ecc813d633..83f93337c01b 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -133,6 +133,8 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
/**
* nilfs_sufile_get_ncleansegs - return the number of clean segments
* @sufile: inode of segment usage file
+ *
+ * Return: Number of clean segments.
*/
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
{
@@ -155,17 +157,13 @@ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
* of successfully modified segments from the head is stored in the
* place @ndone points to.
*
- * Return Value: On success, zero is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - Given segment usage is in hole block (may be returned if
- * @create is zero)
- *
- * %-EINVAL - Invalid segment usage number
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Invalid segment usage number
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Given segment usage is in hole block (may be returned if
+ * @create is zero)
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
int create, size_t *ndone,
@@ -272,10 +270,7 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
* @start: minimum segment number of allocatable region (inclusive)
* @end: maximum segment number of allocatable region (inclusive)
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-ERANGE - invalid segment region
+ * Return: 0 on success, or %-ERANGE if segment range is invalid.
*/
int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
{
@@ -300,17 +295,14 @@ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
* @sufile: inode of segment usage file
* @segnump: pointer to segment number
*
- * Description: nilfs_sufile_alloc() allocates a clean segment.
- *
- * Return Value: On success, 0 is returned and the segment number of the
- * allocated segment is stored in the place pointed by @segnump. On error, one
- * of the following negative error codes is returned.
- *
- * %-EIO - I/O error.
+ * Description: nilfs_sufile_alloc() allocates a clean segment, and stores
+ * its segment number in the place pointed to by @segnump.
*
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOSPC - No clean segment left.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No clean segment left.
*/
int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
{
@@ -510,6 +502,8 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
* nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
* @sufile: inode of segment usage file
* @segnum: segment number
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
{
@@ -569,6 +563,8 @@ out_sem:
* @segnum: segment number
* @nblocks: number of live blocks in the segment
* @modtime: modification time (option)
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
unsigned long nblocks, time64_t modtime)
@@ -610,16 +606,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
* @sufile: inode of segment usage file
* @sustat: pointer to a structure of segment usage statistics
*
- * Description: nilfs_sufile_get_stat() returns information about segment
- * usage.
+ * Description: nilfs_sufile_get_stat() retrieves segment usage statistics
+ * and stores them in the location pointed to by @sustat.
*
- * Return Value: On success, 0 is returned, and segment usage information is
- * stored in the place pointed by @sustat. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
{
@@ -683,16 +676,12 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
* @start: start segment number (inclusive)
* @end: end segment number (inclusive)
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid number of segments specified
- *
- * %-EBUSY - Dirty or active segments are present in the range
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EBUSY - Dirty or active segments are present in the range.
+ * * %-EINVAL - Invalid number of segments specified.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_sufile_truncate_range(struct inode *sufile,
__u64 start, __u64 end)
@@ -787,16 +776,12 @@ out:
* @sufile: inode of segment usage file
* @newnsegs: new number of segments
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOSPC - Enough free space is not left for shrinking
- *
- * %-EBUSY - Dirty or active segments exist in the region to be truncated
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EBUSY - Dirty or active segments exist in the region to be truncated.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - Enough free space is not left for shrinking.
*/
int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
{
@@ -865,7 +850,7 @@ out:
* @nsi: size of suinfo array
*
* Return: Count of segment usage info items stored in the output buffer on
- * success, or the following negative error code on failure.
+ * success, or one of the following negative error codes on failure:
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOMEM - Insufficient memory available.
*/
@@ -939,14 +924,11 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
* segment usage accordingly. Only the fields indicated by the sup_flags
* are updated.
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Invalid values in input (segment number, flags or nblocks).
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
unsigned int supsz, size_t nsup)
@@ -1073,7 +1055,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
* and start+len is rounded down. For each clean segment blkdev_issue_discard
* function is invoked.
*
- * Return Value: On success, 0 is returned or negative error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
{
@@ -1219,6 +1201,8 @@ out_sem:
* @susize: size of a segment usage entry
* @raw_inode: on-disk sufile inode
* @inodep: buffer to store the inode
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_sufile_read(struct super_block *sb, size_t susize,
struct nilfs_inode *raw_inode, struct inode **inodep)
@@ -1242,7 +1226,7 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
if (unlikely(!sufile))
return -ENOMEM;
- if (!(sufile->i_state & I_NEW))
+ if (!(inode_state_read_once(sufile) & I_NEW))
goto out;
err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 8e8a1a5a0402..cd6f28ab3521 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -58,6 +58,8 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range);
* nilfs_sufile_scrap - make a segment garbage
* @sufile: inode of segment usage file
* @segnum: segment number to be freed
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
{
@@ -68,6 +70,8 @@ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
* nilfs_sufile_free - free segment
* @sufile: inode of segment usage file
* @segnum: segment number to be freed
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
{
@@ -80,6 +84,8 @@ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
* @segnumv: array of segment numbers
* @nsegs: size of @segnumv array
* @ndone: place to store the number of freed segments
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv,
size_t nsegs, size_t *ndone)
@@ -95,8 +101,7 @@ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv,
* @nsegs: size of @segnumv array
* @ndone: place to store the number of cancelled segments
*
- * Return Value: On success, 0 is returned. On error, a negative error codes
- * is returned.
+ * Return: 0 on success, or a negative error code on failure.
*/
static inline int nilfs_sufile_cancel_freev(struct inode *sufile,
__u64 *segnumv, size_t nsegs,
@@ -114,14 +119,11 @@ static inline int nilfs_sufile_cancel_freev(struct inode *sufile,
* Description: nilfs_sufile_set_error() marks the segment specified by
* @segnum as erroneous. The error segment will never be used again.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid segment usage number.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Invalid segment usage number.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
{
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index eca79cca3803..badc2cbc895e 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -309,6 +309,8 @@ int nilfs_commit_super(struct super_block *sb, int flag)
* This function restores state flags in the on-disk super block.
* This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
* filesystem was not clean previously.
+ *
+ * Return: 0 on success, %-EIO if I/O error or superblock is corrupted.
*/
int nilfs_cleanup_super(struct super_block *sb)
{
@@ -339,6 +341,8 @@ int nilfs_cleanup_super(struct super_block *sb)
* nilfs_move_2nd_super - relocate secondary super block
* @sb: super block instance
* @sb2off: new offset of the secondary super block (in bytes)
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
{
@@ -420,6 +424,8 @@ out:
* nilfs_resize_fs - resize the filesystem
* @sb: super block instance
* @newsize: new size of the filesystem (in bytes)
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
{
@@ -987,7 +993,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
* nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
* @root_dentry: root dentry of the tree to be shrunk
*
- * This function returns true if the tree was in-use.
+ * Return: true if the tree was in-use, false otherwise.
*/
static bool nilfs_tree_is_busy(struct dentry *root_dentry)
{
@@ -1033,6 +1039,8 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
*
* This function is called exclusively by nilfs->ns_mount_mutex.
* So, the recovery process is protected from other simultaneous mounts.
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int
nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 14868a3dd592..bc52afbfc5c7 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
************************************************************************/
static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d.%d\n",
NILFS_CURRENT_REV, NILFS_MINOR_REV);
@@ -1087,7 +1087,7 @@ static const char features_readme_str[] =
"(1) revision\n\tshow current revision of NILFS file system driver.\n";
static ssize_t nilfs_feature_README_show(struct kobject *kobj,
- struct attribute *attr,
+ struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, features_readme_str);
diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h
index 78a87a016928..d370cd5cce3f 100644
--- a/fs/nilfs2/sysfs.h
+++ b/fs/nilfs2/sysfs.h
@@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
struct completion sg_segments_kobj_unregister;
};
-#define NILFS_COMMON_ATTR_STRUCT(name) \
+#define NILFS_KOBJ_ATTR_STRUCT(name) \
struct nilfs_##name##_attr { \
struct attribute attr; \
- ssize_t (*show)(struct kobject *, struct attribute *, \
+ ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
char *); \
- ssize_t (*store)(struct kobject *, struct attribute *, \
+ ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
const char *, size_t); \
}
-NILFS_COMMON_ATTR_STRUCT(feature);
+NILFS_KOBJ_ATTR_STRUCT(feature);
#define NILFS_DEV_ATTR_STRUCT(name) \
struct nilfs_##name##_attr { \
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ac03fd3c330c..d0bcf744c553 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -49,8 +49,8 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs,
* alloc_nilfs - allocate a nilfs object
* @sb: super block instance
*
- * Return Value: On success, pointer to the_nilfs is returned.
- * On error, NULL is returned.
+ * Return: a pointer to the allocated nilfs object on success, or NULL on
+ * failure.
*/
struct the_nilfs *alloc_nilfs(struct super_block *sb)
{
@@ -165,6 +165,9 @@ static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri)
* containing a super root from a given super block, and initializes
* relevant information on the nilfs object preparatory for log
* scanning and recovery.
+ *
+ * Return: 0 on success, or %-EINVAL if current segment number is out
+ * of range.
*/
static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
struct nilfs_super_block *sbp)
@@ -200,8 +203,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
* exponent information written in @sbp and stores it in @blocksize,
* or aborts with an error message if it's too large.
*
- * Return Value: On success, 0 is returned. If the block size is too
- * large, -EINVAL is returned.
+ * Return: 0 on success, or %-EINVAL if the block size is too large.
*/
static int nilfs_get_blocksize(struct super_block *sb,
struct nilfs_super_block *sbp, int *blocksize)
@@ -226,6 +228,13 @@ static int nilfs_get_blocksize(struct super_block *sb,
* load_nilfs() searches and load the latest super root,
* attaches the last segment, and does recovery if needed.
* The caller must call this exclusively for simultaneous mounts.
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - No valid segment found.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-EROFS - Read only device or RO compat mode (if recovery is required)
*/
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
{
@@ -395,6 +404,8 @@ static unsigned long long nilfs_max_size(unsigned int blkbits)
* nilfs_nrsvsegs - calculate the number of reserved segments
* @nilfs: nilfs object
* @nsegs: total number of segments
+ *
+ * Return: Number of reserved segments.
*/
unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
{
@@ -406,6 +417,8 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
/**
* nilfs_max_segment_count - calculate the maximum number of segments
* @nilfs: nilfs object
+ *
+ * Return: Maximum number of segments
*/
static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
{
@@ -538,7 +551,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
* area, or if the parameters themselves are not normal, it is
* determined to be invalid.
*
- * Return Value: true if invalid, false if valid.
+ * Return: true if invalid, false if valid.
*/
static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
{
@@ -684,8 +697,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
* reading the super block, getting disk layout information, initializing
* shared fields in the_nilfs).
*
- * Return Value: On success, 0 is returned. On error, a negative error
- * code is returned.
+ * Return: 0 on success, or a negative error code on failure.
*/
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
{
@@ -693,8 +705,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
int blocksize;
int err;
- down_write(&nilfs->ns_sem);
-
blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
if (!blocksize) {
nilfs_err(sb, "unable to set blocksize");
@@ -767,7 +777,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
set_nilfs_init(nilfs);
err = 0;
out:
- up_write(&nilfs->ns_sem);
return err;
failed_sbh:
diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
index 18d597e49a19..a5c3a9f1b8dc 100644
--- a/fs/nls/nls_base.c
+++ b/fs/nls/nls_base.c
@@ -67,19 +67,22 @@ int utf8_to_utf32(const u8 *s, int inlen, unicode_t *pu)
l &= t->lmask;
if (l < t->lval || l > UNICODE_MAX ||
(l & SURROGATE_MASK) == SURROGATE_PAIR)
- return -1;
+ return -EILSEQ;
+
*pu = (unicode_t) l;
return nc;
}
if (inlen <= nc)
- return -1;
+ return -EOVERFLOW;
+
s++;
c = (*s ^ 0x80) & 0xFF;
if (c & 0xC0)
- return -1;
+ return -EILSEQ;
+
l = (l << 6) | c;
}
- return -1;
+ return -EILSEQ;
}
EXPORT_SYMBOL(utf8_to_utf32);
@@ -94,7 +97,7 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxout)
l = u;
if (l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR)
- return -1;
+ return -EILSEQ;
nc = 0;
for (t = utf8_table; t->cmask && maxout; t++, maxout--) {
@@ -110,7 +113,7 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxout)
return nc;
}
}
- return -1;
+ return -EOVERFLOW;
}
EXPORT_SYMBOL(utf32_to_utf8);
@@ -217,8 +220,16 @@ int utf16s_to_utf8s(const wchar_t *pwcs, int inlen, enum utf16_endian endian,
inlen--;
}
size = utf32_to_utf8(u, op, maxout);
- if (size == -1) {
- /* Ignore character and move on */
+ if (size < 0) {
+ if (size == -EILSEQ) {
+ /* Ignore character and move on */
+ continue;
+ }
+ /*
+ * Stop filling the buffer with data once a character
+ * does not fit anymore.
+ */
+ break;
} else {
op += size;
maxout -= size;
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 6004dfdfdf0f..9fb73bafd41d 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -20,7 +20,7 @@
static int dir_notify_enable __read_mostly = 1;
#ifdef CONFIG_SYSCTL
-static struct ctl_table dnotify_sysctls[] = {
+static const struct ctl_table dnotify_sysctls[] = {
{
.procname = "dir-notify-enable",
.data = &dir_notify_enable,
@@ -308,6 +308,10 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
goto out_err;
}
+ error = file_f_owner_allocate(filp);
+ if (error)
+ goto out_err;
+
/* new fsnotify mark, we expect most fcntl calls to add a new mark */
new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
if (!new_dn_mark) {
@@ -315,10 +319,6 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
goto out_err;
}
- error = file_f_owner_allocate(filp);
- if (error)
- goto out_err;
-
/* set up the new_fsn_mark and new_dn_mark */
new_fsn_mark = &new_dn_mark->fsn_mark;
fsnotify_init_mark(new_fsn_mark, dnotify_group);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 24c7c5df4998..bfe884d624e7 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -166,6 +166,8 @@ static bool fanotify_should_merge(struct fanotify_event *old,
case FANOTIFY_EVENT_TYPE_FS_ERROR:
return fanotify_error_event_equal(FANOTIFY_EE(old),
FANOTIFY_EE(new));
+ case FANOTIFY_EVENT_TYPE_MNT:
+ return false;
default:
WARN_ON_ONCE(1);
}
@@ -223,7 +225,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
struct fanotify_perm_event *event,
struct fsnotify_iter_info *iter_info)
{
- int ret;
+ int ret, errno;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
@@ -262,14 +264,23 @@ static int fanotify_get_response(struct fsnotify_group *group,
ret = 0;
break;
case FAN_DENY:
+ /* Check custom errno from pre-content events */
+ errno = fanotify_get_response_errno(event->response);
+ if (errno) {
+ ret = -errno;
+ break;
+ }
+ fallthrough;
default:
ret = -EPERM;
}
/* Check if the response should be audited */
- if (event->response & FAN_AUDIT)
- audit_fanotify(event->response & ~FAN_AUDIT,
- &event->audit_rule);
+ if (event->response & FAN_AUDIT) {
+ u32 response = event->response &
+ (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS);
+ audit_fanotify(response & ~FAN_AUDIT, &event->audit_rule);
+ }
pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
group, event, ret);
@@ -303,7 +314,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
__func__, iter_info->report_mask, event_mask, data, data_type);
- if (!fid_mode) {
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_MNT)) {
+ if (data_type != FSNOTIFY_EVENT_MNT)
+ return 0;
+ } else if (!fid_mode) {
/* Do we have path to open a file descriptor? */
if (!path)
return 0;
@@ -401,7 +415,7 @@ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
{
int dwords, type = 0;
char *ext_buf = NULL;
- void *buf = fh->buf;
+ void *buf = fh + 1;
int err;
fh->type = FILEID_ROOT;
@@ -440,7 +454,13 @@ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
dwords = fh_len >> 2;
type = exportfs_encode_fid(inode, buf, &dwords);
err = -EINVAL;
- if (type <= 0 || type == FILEID_INVALID || fh_len != dwords << 2)
+ /*
+ * Unlike file_handle, type and len of struct fanotify_fh are u8.
+ * Traditionally, filesystem return handle_type < 0xff, but there
+ * is no enforecement for that in vfs.
+ */
+ BUILD_BUG_ON(MAX_HANDLE_SZ > 0xff || FILEID_INVALID > 0xff);
+ if (type <= 0 || type >= FILEID_INVALID || fh_len != dwords << 2)
goto out_err;
fh->type = type;
@@ -548,9 +568,27 @@ static struct fanotify_event *fanotify_alloc_path_event(const struct path *path,
return &pevent->fae;
}
-static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path,
+static struct fanotify_event *fanotify_alloc_mnt_event(u64 mnt_id, gfp_t gfp)
+{
+ struct fanotify_mnt_event *pevent;
+
+ pevent = kmem_cache_alloc(fanotify_mnt_event_cachep, gfp);
+ if (!pevent)
+ return NULL;
+
+ pevent->fae.type = FANOTIFY_EVENT_TYPE_MNT;
+ pevent->mnt_id = mnt_id;
+
+ return &pevent->fae;
+}
+
+static struct fanotify_event *fanotify_alloc_perm_event(const void *data,
+ int data_type,
gfp_t gfp)
{
+ const struct path *path = fsnotify_data_path(data, data_type);
+ const struct file_range *range =
+ fsnotify_data_file_range(data, data_type);
struct fanotify_perm_event *pevent;
pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
@@ -564,6 +602,9 @@ static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path,
pevent->hdr.len = 0;
pevent->state = FAN_EVENT_INIT;
pevent->path = *path;
+ /* NULL ppos means no range info */
+ pevent->ppos = range ? &range->pos : NULL;
+ pevent->count = range ? range->count : 0;
path_get(path);
return &pevent->fae;
@@ -715,6 +756,7 @@ static struct fanotify_event *fanotify_alloc_event(
fid_mode);
struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir);
const struct path *path = fsnotify_data_path(data, data_type);
+ u64 mnt_id = fsnotify_data_mnt_id(data, data_type);
struct mem_cgroup *old_memcg;
struct dentry *moved = NULL;
struct inode *child = NULL;
@@ -801,7 +843,7 @@ static struct fanotify_event *fanotify_alloc_event(
old_memcg = set_active_memcg(group->memcg);
if (fanotify_is_perm_event(mask)) {
- event = fanotify_alloc_perm_event(path, gfp);
+ event = fanotify_alloc_perm_event(data, data_type, gfp);
} else if (fanotify_is_error_event(mask)) {
event = fanotify_alloc_error_event(group, fsid, data,
data_type, &hash);
@@ -810,8 +852,12 @@ static struct fanotify_event *fanotify_alloc_event(
moved, &hash, gfp);
} else if (fid_mode) {
event = fanotify_alloc_fid_event(id, fsid, &hash, gfp);
- } else {
+ } else if (path) {
event = fanotify_alloc_path_event(path, &hash, gfp);
+ } else if (mnt_id) {
+ event = fanotify_alloc_mnt_event(mnt_id, gfp);
+ } else {
+ WARN_ON_ONCE(1);
}
if (!event)
@@ -909,8 +955,9 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
BUILD_BUG_ON(FAN_FS_ERROR != FS_ERROR);
BUILD_BUG_ON(FAN_RENAME != FS_RENAME);
+ BUILD_BUG_ON(FAN_PRE_ACCESS != FS_PRE_ACCESS);
- BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 21);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 24);
mask = fanotify_group_event_mask(group, iter_info, &match_mask,
mask, data, data_type, dir);
@@ -968,6 +1015,7 @@ finish:
static void fanotify_free_group_priv(struct fsnotify_group *group)
{
+ put_user_ns(group->user_ns);
kfree(group->fanotify_data.merge_hash);
if (group->fanotify_data.ucounts)
dec_ucount(group->fanotify_data.ucounts,
@@ -1011,6 +1059,11 @@ static void fanotify_free_error_event(struct fsnotify_group *group,
mempool_free(fee, &group->fanotify_data.error_events_pool);
}
+static void fanotify_free_mnt_event(struct fanotify_event *event)
+{
+ kmem_cache_free(fanotify_mnt_event_cachep, FANOTIFY_ME(event));
+}
+
static void fanotify_free_event(struct fsnotify_group *group,
struct fsnotify_event *fsn_event)
{
@@ -1037,6 +1090,9 @@ static void fanotify_free_event(struct fsnotify_group *group,
case FANOTIFY_EVENT_TYPE_FS_ERROR:
fanotify_free_error_event(group, event);
break;
+ case FANOTIFY_EVENT_TYPE_MNT:
+ fanotify_free_mnt_event(event);
+ break;
default:
WARN_ON_ONCE(1);
}
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index e5ab33cae6a7..39e60218df7c 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -9,6 +9,7 @@ extern struct kmem_cache *fanotify_mark_cache;
extern struct kmem_cache *fanotify_fid_event_cachep;
extern struct kmem_cache *fanotify_path_event_cachep;
extern struct kmem_cache *fanotify_perm_event_cachep;
+extern struct kmem_cache *fanotify_mnt_event_cachep;
/* Possible states of the permission event */
enum {
@@ -24,7 +25,7 @@ enum {
* stored in either the first or last 2 dwords.
*/
#define FANOTIFY_INLINE_FH_LEN (3 << 2)
-#define FANOTIFY_FH_HDR_LEN offsetof(struct fanotify_fh, buf)
+#define FANOTIFY_FH_HDR_LEN sizeof(struct fanotify_fh)
/* Fixed size struct for file handle */
struct fanotify_fh {
@@ -33,7 +34,6 @@ struct fanotify_fh {
#define FANOTIFY_FH_FLAG_EXT_BUF 1
u8 flags;
u8 pad;
- unsigned char buf[];
} __aligned(4);
/* Variable size struct for dir file handle + child file handle + name */
@@ -91,7 +91,7 @@ static inline char **fanotify_fh_ext_buf_ptr(struct fanotify_fh *fh)
BUILD_BUG_ON(FANOTIFY_FH_HDR_LEN % 4);
BUILD_BUG_ON(__alignof__(char *) - 4 + sizeof(char *) >
FANOTIFY_INLINE_FH_LEN);
- return (char **)ALIGN((unsigned long)(fh->buf), __alignof__(char *));
+ return (char **)ALIGN((unsigned long)(fh + 1), __alignof__(char *));
}
static inline void *fanotify_fh_ext_buf(struct fanotify_fh *fh)
@@ -101,7 +101,7 @@ static inline void *fanotify_fh_ext_buf(struct fanotify_fh *fh)
static inline void *fanotify_fh_buf(struct fanotify_fh *fh)
{
- return fanotify_fh_has_ext_buf(fh) ? fanotify_fh_ext_buf(fh) : fh->buf;
+ return fanotify_fh_has_ext_buf(fh) ? fanotify_fh_ext_buf(fh) : fh + 1;
}
static inline int fanotify_info_dir_fh_len(struct fanotify_info *info)
@@ -244,6 +244,7 @@ enum fanotify_event_type {
FANOTIFY_EVENT_TYPE_PATH_PERM,
FANOTIFY_EVENT_TYPE_OVERFLOW, /* struct fanotify_event */
FANOTIFY_EVENT_TYPE_FS_ERROR, /* struct fanotify_error_event */
+ FANOTIFY_EVENT_TYPE_MNT,
__FANOTIFY_EVENT_TYPE_NUM
};
@@ -276,7 +277,7 @@ static inline void fanotify_init_event(struct fanotify_event *event,
#define FANOTIFY_INLINE_FH(name, size) \
struct { \
struct fanotify_fh name; \
- /* Space for object_fh.buf[] - access with fanotify_fh_buf() */ \
+ /* Space for filehandle - access with fanotify_fh_buf() */ \
unsigned char _inline_fh_buf[size]; \
}
@@ -409,12 +410,23 @@ struct fanotify_path_event {
struct path path;
};
+struct fanotify_mnt_event {
+ struct fanotify_event fae;
+ u64 mnt_id;
+};
+
static inline struct fanotify_path_event *
FANOTIFY_PE(struct fanotify_event *event)
{
return container_of(event, struct fanotify_path_event, fae);
}
+static inline struct fanotify_mnt_event *
+FANOTIFY_ME(struct fanotify_event *event)
+{
+ return container_of(event, struct fanotify_mnt_event, fae);
+}
+
/*
* Structure for permission fanotify events. It gets allocated and freed in
* fanotify_handle_event() since we wait there for user response. When the
@@ -425,9 +437,13 @@ FANOTIFY_PE(struct fanotify_event *event)
struct fanotify_perm_event {
struct fanotify_event fae;
struct path path;
+ const loff_t *ppos; /* optional file range info */
+ size_t count;
u32 response; /* userspace answer to the event */
unsigned short state; /* state of the event */
+ unsigned short watchdog_cnt; /* already scanned by watchdog? */
int fd; /* fd we passed to userspace for this event */
+ pid_t recv_pid; /* pid of task receiving the event */
union {
struct fanotify_response_info_header hdr;
struct fanotify_response_info_audit_rule audit_rule;
@@ -446,6 +462,14 @@ static inline bool fanotify_is_perm_event(u32 mask)
mask & FANOTIFY_PERM_EVENTS;
}
+static inline bool fanotify_event_has_access_range(struct fanotify_event *event)
+{
+ if (!(event->mask & FANOTIFY_PRE_CONTENT_EVENTS))
+ return false;
+
+ return FANOTIFY_PERM(event)->ppos;
+}
+
static inline struct fanotify_event *FANOTIFY_E(struct fsnotify_event *fse)
{
return container_of(fse, struct fanotify_event, fse);
@@ -456,6 +480,11 @@ static inline bool fanotify_is_error_event(u32 mask)
return mask & FAN_FS_ERROR;
}
+static inline bool fanotify_is_mnt_event(u32 mask)
+{
+ return mask & (FAN_MNT_ATTACH | FAN_MNT_DETACH);
+}
+
static inline const struct path *fanotify_event_path(struct fanotify_event *event)
{
if (event->type == FANOTIFY_EVENT_TYPE_PATH)
@@ -518,3 +547,8 @@ static inline unsigned int fanotify_mark_user_flags(struct fsnotify_mark *mark)
return mflags;
}
+
+static inline u32 fanotify_get_response_errno(int res)
+{
+ return (res >> FAN_ERRNO_SHIFT) & FAN_ERRNO_MASK;
+}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 2d85c71717d6..d0b9b984002f 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -50,6 +50,7 @@
/* configurable via /proc/sys/fs/fanotify/ */
static int fanotify_max_queued_events __read_mostly;
+static int perm_group_timeout __read_mostly;
#ifdef CONFIG_SYSCTL
@@ -58,7 +59,7 @@ static int fanotify_max_queued_events __read_mostly;
static long ft_zero = 0;
static long ft_int_max = INT_MAX;
-static struct ctl_table fanotify_table[] = {
+static const struct ctl_table fanotify_table[] = {
{
.procname = "max_user_groups",
.data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS],
@@ -85,6 +86,14 @@ static struct ctl_table fanotify_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO
},
+ {
+ .procname = "watchdog_timeout",
+ .data = &perm_group_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
};
static void __init fanotify_sysctls_init(void)
@@ -95,13 +104,97 @@ static void __init fanotify_sysctls_init(void)
#define fanotify_sysctls_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
+static LIST_HEAD(perm_group_list);
+static DEFINE_SPINLOCK(perm_group_lock);
+static void perm_group_watchdog(struct work_struct *work);
+static DECLARE_DELAYED_WORK(perm_group_work, perm_group_watchdog);
+
+static void perm_group_watchdog_schedule(void)
+{
+ schedule_delayed_work(&perm_group_work, secs_to_jiffies(perm_group_timeout));
+}
+
+static void perm_group_watchdog(struct work_struct *work)
+{
+ struct fsnotify_group *group;
+ struct fanotify_perm_event *event;
+ struct task_struct *task;
+ pid_t failed_pid = 0;
+
+ guard(spinlock)(&perm_group_lock);
+ if (list_empty(&perm_group_list))
+ return;
+
+ list_for_each_entry(group, &perm_group_list,
+ fanotify_data.perm_grp_list) {
+ /*
+ * Ok to test without lock, racing with an addition is
+ * fine, will deal with it next round
+ */
+ if (list_empty(&group->fanotify_data.access_list))
+ continue;
+
+ spin_lock(&group->notification_lock);
+ list_for_each_entry(event, &group->fanotify_data.access_list,
+ fae.fse.list) {
+ if (likely(event->watchdog_cnt == 0)) {
+ event->watchdog_cnt = 1;
+ } else if (event->watchdog_cnt == 1) {
+ /* Report on event only once */
+ event->watchdog_cnt = 2;
+
+ /* Do not report same pid repeatedly */
+ if (event->recv_pid == failed_pid)
+ continue;
+
+ failed_pid = event->recv_pid;
+ rcu_read_lock();
+ task = find_task_by_pid_ns(event->recv_pid,
+ &init_pid_ns);
+ pr_warn_ratelimited(
+ "PID %u (%s) failed to respond to fanotify queue for more than %d seconds\n",
+ event->recv_pid,
+ task ? task->comm : NULL,
+ perm_group_timeout);
+ rcu_read_unlock();
+ }
+ }
+ spin_unlock(&group->notification_lock);
+ }
+ perm_group_watchdog_schedule();
+}
+
+static void fanotify_perm_watchdog_group_remove(struct fsnotify_group *group)
+{
+ if (!list_empty(&group->fanotify_data.perm_grp_list)) {
+ /* Perm event watchdog can no longer scan this group. */
+ spin_lock(&perm_group_lock);
+ list_del_init(&group->fanotify_data.perm_grp_list);
+ spin_unlock(&perm_group_lock);
+ }
+}
+
+static void fanotify_perm_watchdog_group_add(struct fsnotify_group *group)
+{
+ if (!perm_group_timeout)
+ return;
+
+ spin_lock(&perm_group_lock);
+ if (list_empty(&group->fanotify_data.perm_grp_list)) {
+ /* Add to perm_group_list for monitoring by watchdog. */
+ if (list_empty(&perm_group_list))
+ perm_group_watchdog_schedule();
+ list_add_tail(&group->fanotify_data.perm_grp_list, &perm_group_list);
+ }
+ spin_unlock(&perm_group_lock);
+}
+
/*
* All flags that may be specified in parameter event_f_flags of fanotify_init.
*
* Internal and external open flags are stored together in field f_flags of
* struct file. Only external open flags shall be allowed in event_f_flags.
- * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
- * excluded.
+ * Internal flags like FMODE_EXEC shall be excluded.
*/
#define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
O_ACCMODE | O_APPEND | O_NONBLOCK | \
@@ -114,14 +207,19 @@ struct kmem_cache *fanotify_mark_cache __ro_after_init;
struct kmem_cache *fanotify_fid_event_cachep __ro_after_init;
struct kmem_cache *fanotify_path_event_cachep __ro_after_init;
struct kmem_cache *fanotify_perm_event_cachep __ro_after_init;
+struct kmem_cache *fanotify_mnt_event_cachep __ro_after_init;
#define FANOTIFY_EVENT_ALIGN 4
#define FANOTIFY_FID_INFO_HDR_LEN \
(sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle))
-#define FANOTIFY_PIDFD_INFO_HDR_LEN \
+#define FANOTIFY_PIDFD_INFO_LEN \
sizeof(struct fanotify_event_info_pidfd)
#define FANOTIFY_ERROR_INFO_LEN \
(sizeof(struct fanotify_event_info_error))
+#define FANOTIFY_RANGE_INFO_LEN \
+ (sizeof(struct fanotify_event_info_range))
+#define FANOTIFY_MNT_INFO_LEN \
+ (sizeof(struct fanotify_event_info_mnt))
static int fanotify_fid_info_len(int fh_len, int name_len)
{
@@ -159,9 +257,6 @@ static size_t fanotify_event_len(unsigned int info_mode,
int fh_len;
int dot_len = 0;
- if (!info_mode)
- return event_len;
-
if (fanotify_is_error_event(event->mask))
event_len += FANOTIFY_ERROR_INFO_LEN;
@@ -176,13 +271,18 @@ static size_t fanotify_event_len(unsigned int info_mode,
dot_len = 1;
}
- if (info_mode & FAN_REPORT_PIDFD)
- event_len += FANOTIFY_PIDFD_INFO_HDR_LEN;
-
if (fanotify_event_has_object_fh(event)) {
fh_len = fanotify_event_object_fh_len(event);
event_len += fanotify_fid_info_len(fh_len, dot_len);
}
+ if (fanotify_is_mnt_event(event->mask))
+ event_len += FANOTIFY_MNT_INFO_LEN;
+
+ if (info_mode & FAN_REPORT_PIDFD)
+ event_len += FANOTIFY_PIDFD_INFO_LEN;
+
+ if (fanotify_event_has_access_range(event))
+ event_len += FANOTIFY_RANGE_INFO_LEN;
return event_len;
}
@@ -258,12 +358,11 @@ static int create_fd(struct fsnotify_group *group, const struct path *path,
return client_fd;
/*
- * we need a new file handle for the userspace program so it can read even if it was
- * originally opened O_WRONLY.
+ * We provide an fd for the userspace program, so it could access the
+ * file without generating fanotify events itself.
*/
- new_file = dentry_open(path,
- group->fanotify_data.f_flags | __FMODE_NONOTIFY,
- current_cred());
+ new_file = dentry_open_nonotify(path, group->fanotify_data.f_flags,
+ current_cred());
if (IS_ERR(new_file)) {
put_unused_fd(client_fd);
client_fd = PTR_ERR(new_file);
@@ -327,11 +426,12 @@ static int process_access_response(struct fsnotify_group *group,
struct fanotify_perm_event *event;
int fd = response_struct->fd;
u32 response = response_struct->response;
+ int errno = fanotify_get_response_errno(response);
int ret = info_len;
struct fanotify_response_info_audit_rule friar;
- pr_debug("%s: group=%p fd=%d response=%u buf=%p size=%zu\n", __func__,
- group, fd, response, info, info_len);
+ pr_debug("%s: group=%p fd=%d response=%x errno=%d buf=%p size=%zu\n",
+ __func__, group, fd, response, errno, info, info_len);
/*
* make sure the response is valid, if invalid we do nothing and either
* userspace can send a valid response or we will clean it up after the
@@ -342,7 +442,31 @@ static int process_access_response(struct fsnotify_group *group,
switch (response & FANOTIFY_RESPONSE_ACCESS) {
case FAN_ALLOW:
+ if (errno)
+ return -EINVAL;
+ break;
case FAN_DENY:
+ /* Custom errno is supported only for pre-content groups */
+ if (errno && group->priority != FSNOTIFY_PRIO_PRE_CONTENT)
+ return -EINVAL;
+
+ /*
+ * Limit errno to values expected on open(2)/read(2)/write(2)
+ * of regular files.
+ */
+ switch (errno) {
+ case 0:
+ case EIO:
+ case EPERM:
+ case EBUSY:
+ case ETXTBSY:
+ case EAGAIN:
+ case ENOSPC:
+ case EDQUOT:
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
@@ -380,6 +504,25 @@ static int process_access_response(struct fsnotify_group *group,
return -ENOENT;
}
+static size_t copy_mnt_info_to_user(struct fanotify_event *event,
+ char __user *buf, int count)
+{
+ struct fanotify_event_info_mnt info = { };
+
+ info.hdr.info_type = FAN_EVENT_INFO_TYPE_MNT;
+ info.hdr.len = FANOTIFY_MNT_INFO_LEN;
+
+ if (WARN_ON(count < info.hdr.len))
+ return -EFAULT;
+
+ info.mnt_id = FANOTIFY_ME(event)->mnt_id;
+
+ if (copy_to_user(buf, &info, sizeof(info)))
+ return -EFAULT;
+
+ return info.hdr.len;
+}
+
static size_t copy_error_info_to_user(struct fanotify_event *event,
char __user *buf, int count)
{
@@ -506,7 +649,7 @@ static int copy_pidfd_info_to_user(int pidfd,
size_t count)
{
struct fanotify_event_info_pidfd info = { };
- size_t info_len = FANOTIFY_PIDFD_INFO_HDR_LEN;
+ size_t info_len = FANOTIFY_PIDFD_INFO_LEN;
if (WARN_ON_ONCE(info_len > count))
return -EFAULT;
@@ -521,6 +664,30 @@ static int copy_pidfd_info_to_user(int pidfd,
return info_len;
}
+static size_t copy_range_info_to_user(struct fanotify_event *event,
+ char __user *buf, int count)
+{
+ struct fanotify_perm_event *pevent = FANOTIFY_PERM(event);
+ struct fanotify_event_info_range info = { };
+ size_t info_len = FANOTIFY_RANGE_INFO_LEN;
+
+ if (WARN_ON_ONCE(info_len > count))
+ return -EFAULT;
+
+ if (WARN_ON_ONCE(!pevent->ppos))
+ return -EINVAL;
+
+ info.hdr.info_type = FAN_EVENT_INFO_TYPE_RANGE;
+ info.hdr.len = info_len;
+ info.offset = *(pevent->ppos);
+ info.count = pevent->count;
+
+ if (copy_to_user(buf, &info, info_len))
+ return -EFAULT;
+
+ return info_len;
+}
+
static int copy_info_records_to_user(struct fanotify_event *event,
struct fanotify_info *info,
unsigned int info_mode, int pidfd,
@@ -642,6 +809,24 @@ static int copy_info_records_to_user(struct fanotify_event *event,
total_bytes += ret;
}
+ if (fanotify_event_has_access_range(event)) {
+ ret = copy_range_info_to_user(event, buf, count);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ count -= ret;
+ total_bytes += ret;
+ }
+
+ if (fanotify_is_mnt_event(event->mask)) {
+ ret = copy_mnt_info_to_user(event, buf, count);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ count -= ret;
+ total_bytes += ret;
+ }
+
return total_bytes;
}
@@ -756,12 +941,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
buf += FAN_EVENT_METADATA_LEN;
count -= FAN_EVENT_METADATA_LEN;
- if (info_mode) {
- ret = copy_info_records_to_user(event, info, info_mode, pidfd,
- buf, count);
- if (ret < 0)
- goto out_close_fd;
- }
+ ret = copy_info_records_to_user(event, info, info_mode, pidfd,
+ buf, count);
+ if (ret < 0)
+ goto out_close_fd;
if (f)
fd_install(fd, f);
@@ -864,6 +1047,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
spin_lock(&group->notification_lock);
list_add_tail(&event->fse.list,
&group->fanotify_data.access_list);
+ FANOTIFY_PERM(event)->recv_pid = current->pid;
spin_unlock(&group->notification_lock);
}
}
@@ -923,6 +1107,8 @@ static int fanotify_release(struct inode *ignored, struct file *file)
*/
fsnotify_group_stop_queueing(group);
+ fanotify_perm_watchdog_group_remove(group);
+
/*
* Process all permission events on access_list and notification queue
* and simulate reply from userspace.
@@ -1245,6 +1431,7 @@ static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
* A group with FAN_UNLIMITED_MARKS does not contribute to mark count
* in the limited groups account.
*/
+ BUILD_BUG_ON(!(FANOTIFY_ADMIN_INIT_FLAGS & FAN_UNLIMITED_MARKS));
if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS) &&
!inc_ucount(ucounts->ns, ucounts->uid, UCOUNT_FANOTIFY_MARKS))
return ERR_PTR(-ENOSPC);
@@ -1294,7 +1481,7 @@ static int fanotify_group_init_error_pool(struct fsnotify_group *group)
}
static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark,
- unsigned int fan_flags)
+ __u32 mask, unsigned int fan_flags)
{
/*
* Non evictable mark cannot be downgraded to evictable mark.
@@ -1321,6 +1508,11 @@ static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark,
fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)
return -EEXIST;
+ /* For now pre-content events are not generated for directories */
+ mask |= fsn_mark->mask;
+ if (mask & FANOTIFY_PRE_CONTENT_EVENTS && mask & FAN_ONDIR)
+ return -EEXIST;
+
return 0;
}
@@ -1347,7 +1539,7 @@ static int fanotify_add_mark(struct fsnotify_group *group,
/*
* Check if requested mark flags conflict with an existing mark flags.
*/
- ret = fanotify_may_update_existing_mark(fsn_mark, fan_flags);
+ ret = fanotify_may_update_existing_mark(fsn_mark, mask, fan_flags);
if (ret)
goto out;
@@ -1370,6 +1562,10 @@ out:
fsnotify_group_unlock(group);
fsnotify_put_mark(fsn_mark);
+
+ if (!ret && (mask & FANOTIFY_PERM_EVENTS))
+ fanotify_perm_watchdog_group_add(group);
+
return ret;
}
@@ -1401,10 +1597,16 @@ static struct hlist_head *fanotify_alloc_merge_hash(void)
return hash;
}
+DEFINE_CLASS(fsnotify_group,
+ struct fsnotify_group *,
+ if (!IS_ERR_OR_NULL(_T)) fsnotify_destroy_group(_T),
+ fsnotify_alloc_group(ops, flags),
+ const struct fsnotify_ops *ops, int flags)
+
/* fanotify syscalls */
SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
{
- struct fsnotify_group *group;
+ struct user_namespace *user_ns = current_user_ns();
int f_flags, fd;
unsigned int fid_mode = flags & FANOTIFY_FID_BITS;
unsigned int class = flags & FANOTIFY_CLASS_BITS;
@@ -1417,10 +1619,11 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
/*
* An unprivileged user can setup an fanotify group with
* limited functionality - an unprivileged group is limited to
- * notification events with file handles and it cannot use
- * unlimited queue/marks.
+ * notification events with file handles or mount ids and it
+ * cannot use unlimited queue/marks.
*/
- if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode)
+ if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) ||
+ !(flags & (FANOTIFY_FID_BITS | FAN_REPORT_MNT)))
return -EPERM;
/*
@@ -1446,6 +1649,14 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
if ((flags & FAN_REPORT_PIDFD) && (flags & FAN_REPORT_TID))
return -EINVAL;
+ /* Don't allow mixing mnt events with inode events for now */
+ if (flags & FAN_REPORT_MNT) {
+ if (class != FAN_CLASS_NOTIF)
+ return -EINVAL;
+ if (flags & (FANOTIFY_FID_BITS | FAN_REPORT_FD_ERROR))
+ return -EINVAL;
+ }
+
if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
return -EINVAL;
@@ -1477,48 +1688,42 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
(!(fid_mode & FAN_REPORT_NAME) || !(fid_mode & FAN_REPORT_FID)))
return -EINVAL;
- f_flags = O_RDWR | __FMODE_NONOTIFY;
+ f_flags = O_RDWR;
if (flags & FAN_CLOEXEC)
f_flags |= O_CLOEXEC;
if (flags & FAN_NONBLOCK)
f_flags |= O_NONBLOCK;
- /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
- group = fsnotify_alloc_group(&fanotify_fsnotify_ops,
+ CLASS(fsnotify_group, group)(&fanotify_fsnotify_ops,
FSNOTIFY_GROUP_USER);
- if (IS_ERR(group)) {
+ /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
+ if (IS_ERR(group))
return PTR_ERR(group);
- }
/* Enforce groups limits per user in all containing user ns */
- group->fanotify_data.ucounts = inc_ucount(current_user_ns(),
- current_euid(),
+ group->fanotify_data.ucounts = inc_ucount(user_ns, current_euid(),
UCOUNT_FANOTIFY_GROUPS);
- if (!group->fanotify_data.ucounts) {
- fd = -EMFILE;
- goto out_destroy_group;
- }
+ if (!group->fanotify_data.ucounts)
+ return -EMFILE;
group->fanotify_data.flags = flags | internal_flags;
group->memcg = get_mem_cgroup_from_mm(current->mm);
+ group->user_ns = get_user_ns(user_ns);
group->fanotify_data.merge_hash = fanotify_alloc_merge_hash();
- if (!group->fanotify_data.merge_hash) {
- fd = -ENOMEM;
- goto out_destroy_group;
- }
+ if (!group->fanotify_data.merge_hash)
+ return -ENOMEM;
group->overflow_event = fanotify_alloc_overflow_event();
- if (unlikely(!group->overflow_event)) {
- fd = -ENOMEM;
- goto out_destroy_group;
- }
+ if (unlikely(!group->overflow_event))
+ return -ENOMEM;
if (force_o_largefile())
event_f_flags |= O_LARGEFILE;
group->fanotify_data.f_flags = event_f_flags;
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
+ INIT_LIST_HEAD(&group->fanotify_data.perm_grp_list);
switch (class) {
case FAN_CLASS_NOTIF:
group->priority = FSNOTIFY_PRIO_NORMAL;
@@ -1530,39 +1735,26 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->priority = FSNOTIFY_PRIO_PRE_CONTENT;
break;
default:
- fd = -EINVAL;
- goto out_destroy_group;
+ return -EINVAL;
}
+ BUILD_BUG_ON(!(FANOTIFY_ADMIN_INIT_FLAGS & FAN_UNLIMITED_QUEUE));
if (flags & FAN_UNLIMITED_QUEUE) {
- fd = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
- goto out_destroy_group;
group->max_events = UINT_MAX;
} else {
group->max_events = fanotify_max_queued_events;
}
- if (flags & FAN_UNLIMITED_MARKS) {
- fd = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
- goto out_destroy_group;
- }
-
if (flags & FAN_ENABLE_AUDIT) {
- fd = -EPERM;
if (!capable(CAP_AUDIT_WRITE))
- goto out_destroy_group;
+ return -EPERM;
}
- fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
- if (fd < 0)
- goto out_destroy_group;
-
- return fd;
-
-out_destroy_group:
- fsnotify_destroy_group(group);
+ fd = FD_ADD(f_flags,
+ anon_inode_getfile_fmode("[fanotify]", &fanotify_fops,
+ group, f_flags, FMODE_NONOTIFY));
+ if (fd >= 0)
+ retain_and_null_ptr(group);
return fd;
}
@@ -1638,12 +1830,24 @@ static int fanotify_events_supported(struct fsnotify_group *group,
unsigned int flags)
{
unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+ bool is_dir = d_is_dir(path->dentry);
/* Strict validation of events in non-dir inode mask with v5.17+ APIs */
bool strict_dir_events = FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID) ||
(mask & FAN_RENAME) ||
(flags & FAN_MARK_IGNORE);
/*
+ * Filesystems need to opt-into pre-content evnets (a.k.a HSM)
+ * and they are only supported on regular files and directories.
+ */
+ if (mask & FANOTIFY_PRE_CONTENT_EVENTS) {
+ if (!(path->mnt->mnt_sb->s_iflags & SB_I_ALLOW_HSM))
+ return -EOPNOTSUPP;
+ if (!is_dir && !d_is_reg(path->dentry))
+ return -EINVAL;
+ }
+
+ /*
* Some filesystems such as 'proc' acquire unusual locks when opening
* files. For them fanotify permission events have high chances of
* deadlocking the system - open done when reporting fanotify event
@@ -1675,7 +1879,7 @@ static int fanotify_events_supported(struct fsnotify_group *group,
* but because we always allowed it, error only when using new APIs.
*/
if (strict_dir_events && mark_type == FAN_MARK_INODE &&
- !d_is_dir(path->dentry) && (mask & FANOTIFY_DIRONLY_EVENT_BITS))
+ !is_dir && (mask & FANOTIFY_DIRONLY_EVENT_BITS))
return -ENOTDIR;
return 0;
@@ -1685,16 +1889,17 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
int dfd, const char __user *pathname)
{
struct inode *inode = NULL;
- struct vfsmount *mnt = NULL;
struct fsnotify_group *group;
struct path path;
struct fan_fsid __fsid, *fsid = NULL;
+ struct user_namespace *user_ns = NULL;
+ struct mnt_namespace *mntns;
u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS;
unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
unsigned int mark_cmd = flags & FANOTIFY_MARK_CMD_BITS;
unsigned int ignore = flags & FANOTIFY_MARK_IGNORE_BITS;
unsigned int obj_type, fid_mode;
- void *obj;
+ void *obj = NULL;
u32 umask = 0;
int ret;
@@ -1718,6 +1923,9 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
case FAN_MARK_FILESYSTEM:
obj_type = FSNOTIFY_OBJ_TYPE_SB;
break;
+ case FAN_MARK_MNTNS:
+ obj_type = FSNOTIFY_OBJ_TYPE_MNTNS;
+ break;
default:
return -EINVAL;
}
@@ -1765,21 +1973,36 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
return -EINVAL;
group = fd_file(f)->private_data;
+ /* Only report mount events on mnt namespace */
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_MNT)) {
+ if (mask & ~FANOTIFY_MOUNT_EVENTS)
+ return -EINVAL;
+ if (mark_type != FAN_MARK_MNTNS)
+ return -EINVAL;
+ } else {
+ if (mask & FANOTIFY_MOUNT_EVENTS)
+ return -EINVAL;
+ if (mark_type == FAN_MARK_MNTNS)
+ return -EINVAL;
+ }
+
/*
- * An unprivileged user is not allowed to setup mount nor filesystem
- * marks. This also includes setting up such marks by a group that
- * was initialized by an unprivileged user.
+ * A user is allowed to setup sb/mount/mntns marks only if it is
+ * capable in the user ns where the group was created.
*/
- if ((!capable(CAP_SYS_ADMIN) ||
- FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) &&
+ if (!ns_capable(group->user_ns, CAP_SYS_ADMIN) &&
mark_type != FAN_MARK_INODE)
return -EPERM;
/*
- * Permission events require minimum priority FAN_CLASS_CONTENT.
+ * Permission events are not allowed for FAN_CLASS_NOTIF.
+ * Pre-content permission events are not allowed for FAN_CLASS_CONTENT.
*/
if (mask & FANOTIFY_PERM_EVENTS &&
- group->priority < FSNOTIFY_PRIO_CONTENT)
+ group->priority == FSNOTIFY_PRIO_NORMAL)
+ return -EINVAL;
+ else if (mask & FANOTIFY_PRE_CONTENT_EVENTS &&
+ group->priority == FSNOTIFY_PRIO_CONTENT)
return -EINVAL;
if (mask & FAN_FS_ERROR &&
@@ -1802,7 +2025,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
* point.
*/
fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
- if (mask & ~(FANOTIFY_FD_EVENTS|FANOTIFY_EVENT_FLAGS) &&
+ if (mask & ~(FANOTIFY_FD_EVENTS|FANOTIFY_MOUNT_EVENTS|FANOTIFY_EVENT_FLAGS) &&
(!fid_mode || mark_type == FAN_MARK_MOUNT))
return -EINVAL;
@@ -1814,13 +2037,12 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
if (mask & FAN_RENAME && !(fid_mode & FAN_REPORT_NAME))
return -EINVAL;
+ /* Pre-content events are not currently generated for directories. */
+ if (mask & FANOTIFY_PRE_CONTENT_EVENTS && mask & FAN_ONDIR)
+ return -EINVAL;
+
if (mark_cmd == FAN_MARK_FLUSH) {
- if (mark_type == FAN_MARK_MOUNT)
- fsnotify_clear_vfsmount_marks_by_group(group);
- else if (mark_type == FAN_MARK_FILESYSTEM)
- fsnotify_clear_sb_marks_by_group(group);
- else
- fsnotify_clear_inode_marks_by_group(group);
+ fsnotify_clear_marks_by_group(group, obj_type);
return 0;
}
@@ -1847,18 +2069,38 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
fsid = &__fsid;
}
- /* inode held in place by reference to path; group by fget on fd */
- if (mark_type == FAN_MARK_INODE) {
+ /*
+ * In addition to being capable in the user ns where group was created,
+ * the user also needs to be capable in the user ns associated with
+ * the filesystem or in the user ns associated with the mntns
+ * (when marking mntns).
+ */
+ if (obj_type == FSNOTIFY_OBJ_TYPE_INODE) {
inode = path.dentry->d_inode;
obj = inode;
- } else {
- mnt = path.mnt;
- if (mark_type == FAN_MARK_MOUNT)
- obj = mnt;
- else
- obj = mnt->mnt_sb;
+ } else if (obj_type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
+ user_ns = path.mnt->mnt_sb->s_user_ns;
+ obj = path.mnt;
+ } else if (obj_type == FSNOTIFY_OBJ_TYPE_SB) {
+ user_ns = path.mnt->mnt_sb->s_user_ns;
+ obj = path.mnt->mnt_sb;
+ } else if (obj_type == FSNOTIFY_OBJ_TYPE_MNTNS) {
+ ret = -EINVAL;
+ mntns = mnt_ns_from_dentry(path.dentry);
+ if (!mntns)
+ goto path_put_and_out;
+ user_ns = mntns->user_ns;
+ obj = mntns;
}
+ ret = -EPERM;
+ if (user_ns && !ns_capable(user_ns, CAP_SYS_ADMIN))
+ goto path_put_and_out;
+
+ ret = -EINVAL;
+ if (!obj)
+ goto path_put_and_out;
+
/*
* If some other task has this inode open for write we should not add
* an ignore mask, unless that ignore mask is supposed to survive
@@ -1866,10 +2108,10 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
*/
if (mark_cmd == FAN_MARK_ADD && (flags & FANOTIFY_MARK_IGNORE_BITS) &&
!(flags & FAN_MARK_IGNORED_SURV_MODIFY)) {
- ret = mnt ? -EINVAL : -EISDIR;
+ ret = !inode ? -EINVAL : -EISDIR;
/* FAN_MARK_IGNORE requires SURV_MODIFY for sb/mount/dir marks */
if (ignore == FAN_MARK_IGNORE &&
- (mnt || S_ISDIR(inode->i_mode)))
+ (!inode || S_ISDIR(inode->i_mode)))
goto path_put_and_out;
ret = 0;
@@ -1878,7 +2120,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
}
/* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */
- if (mnt || !S_ISDIR(inode->i_mode)) {
+ if (!inode || !S_ISDIR(inode->i_mode)) {
mask &= ~FAN_EVENT_ON_CHILD;
umask = FAN_EVENT_ON_CHILD;
/*
@@ -1952,7 +2194,7 @@ static int __init fanotify_user_setup(void)
FANOTIFY_DEFAULT_MAX_USER_MARKS);
BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
- BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 13);
+ BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 14);
BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 11);
fanotify_mark_cache = KMEM_CACHE(fanotify_mark,
@@ -1965,6 +2207,7 @@ static int __init fanotify_user_setup(void)
fanotify_perm_event_cachep =
KMEM_CACHE(fanotify_perm_event, SLAB_PANIC);
}
+ fanotify_mnt_event_cachep = KMEM_CACHE(fanotify_mnt_event, SLAB_PANIC);
fanotify_max_queued_events = FANOTIFY_DEFAULT_MAX_EVENTS;
init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS] =
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index dec553034027..9cc7eb863643 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -17,6 +17,7 @@
#include "fanotify/fanotify.h"
#include "fdinfo.h"
#include "fsnotify.h"
+#include "../internal.h"
#if defined(CONFIG_PROC_FS)
@@ -46,11 +47,14 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
size = f->handle_bytes >> 2;
+ if (!super_trylock_shared(inode->i_sb))
+ return;
+
ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size);
- if ((ret == FILEID_INVALID) || (ret < 0)) {
- WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
+ up_read(&inode->i_sb->s_umount);
+
+ if ((ret == FILEID_INVALID) || (ret < 0))
return;
- }
f->handle_type = ret;
f->handle_bytes = size * sizeof(u32);
@@ -123,6 +127,11 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
seq_printf(m, "fanotify sdev:%x mflags:%x mask:%x ignored_mask:%x\n",
sb->s_dev, mflags, mark->mask, mark->ignore_mask);
+ } else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_MNTNS) {
+ struct mnt_namespace *mnt_ns = fsnotify_conn_mntns(mark->connector);
+
+ seq_printf(m, "fanotify mnt_ns:%u mflags:%x mask:%x ignored_mask:%x\n",
+ mnt_ns->ns.inum, mflags, mark->mask, mark->ignore_mask);
}
}
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index f976949d2634..d27ff5e5f165 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -28,6 +28,11 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
fsnotify_clear_marks_by_mount(mnt);
}
+void __fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{
+ fsnotify_clear_marks_by_mntns(mntns);
+}
+
/**
* fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
* @sb: superblock being unmounted.
@@ -47,7 +52,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
* the inode cannot have any associated watches.
*/
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
continue;
}
@@ -61,7 +66,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
* removed all zero refcount inodes, in any case. Test to
* be sure.
*/
- if (!atomic_read(&inode->i_count)) {
+ if (!icount_read(inode)) {
spin_unlock(&inode->i_lock);
continue;
}
@@ -193,9 +198,9 @@ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask,
return mask & marks_mask;
}
-/* Are there any inode/mount/sb objects that are interested in this event? */
-static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask,
- __u32 mask)
+/* Are there any inode/mount/sb objects that watch for these events? */
+static inline __u32 fsnotify_object_watched(struct inode *inode, __u32 mnt_mask,
+ __u32 mask)
{
__u32 marks_mask = READ_ONCE(inode->i_fsnotify_mask) | mnt_mask |
READ_ONCE(inode->i_sb->s_fsnotify_mask);
@@ -203,6 +208,24 @@ static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask,
return mask & marks_mask & ALL_FSNOTIFY_EVENTS;
}
+/* Report pre-content event with optional range info */
+int fsnotify_pre_content(const struct path *path, const loff_t *ppos,
+ size_t count)
+{
+ struct file_range range;
+
+ /* Report page aligned range only when pos is known */
+ if (!ppos)
+ return fsnotify_path(path, FS_PRE_ACCESS);
+
+ range.path = path;
+ range.pos = PAGE_ALIGN_DOWN(*ppos);
+ range.count = PAGE_ALIGN(*ppos + count) - range.pos;
+
+ return fsnotify_parent(path->dentry, FS_PRE_ACCESS, &range,
+ FSNOTIFY_EVENT_FILE_RANGE);
+}
+
/*
* Notify this dentry's parent about a child's events with child name info
* if parent is watching or if inode/sb/mount are interested in events with
@@ -402,7 +425,7 @@ static int send_to_group(__u32 mask, const void *data, int data_type,
file_name, cookie, iter_info);
}
-static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp)
+static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector *const *connp)
{
struct fsnotify_mark_connector *conn;
struct hlist_node *node = NULL;
@@ -520,14 +543,15 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
{
const struct path *path = fsnotify_data_path(data, data_type);
struct super_block *sb = fsnotify_data_sb(data, data_type);
- struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb);
+ const struct fsnotify_mnt *mnt_data = fsnotify_data_mnt(data, data_type);
+ struct fsnotify_sb_info *sbinfo = sb ? fsnotify_sb_info(sb) : NULL;
struct fsnotify_iter_info iter_info = {};
struct mount *mnt = NULL;
struct inode *inode2 = NULL;
struct dentry *moved;
int inode2_type;
int ret = 0;
- __u32 test_mask, marks_mask;
+ __u32 test_mask, marks_mask = 0;
if (path)
mnt = real_mount(path->mnt);
@@ -560,17 +584,20 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
if ((!sbinfo || !sbinfo->sb_marks) &&
(!mnt || !mnt->mnt_fsnotify_marks) &&
(!inode || !inode->i_fsnotify_marks) &&
- (!inode2 || !inode2->i_fsnotify_marks))
+ (!inode2 || !inode2->i_fsnotify_marks) &&
+ (!mnt_data || !mnt_data->ns->n_fsnotify_marks))
return 0;
- marks_mask = READ_ONCE(sb->s_fsnotify_mask);
+ if (sb)
+ marks_mask |= READ_ONCE(sb->s_fsnotify_mask);
if (mnt)
marks_mask |= READ_ONCE(mnt->mnt_fsnotify_mask);
if (inode)
marks_mask |= READ_ONCE(inode->i_fsnotify_mask);
if (inode2)
marks_mask |= READ_ONCE(inode2->i_fsnotify_mask);
-
+ if (mnt_data)
+ marks_mask |= READ_ONCE(mnt_data->ns->n_fsnotify_mask);
/*
* If this is a modify event we may need to clear some ignore masks.
@@ -600,6 +627,10 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
iter_info.marks[inode2_type] =
fsnotify_first_mark(&inode2->i_fsnotify_marks);
}
+ if (mnt_data) {
+ iter_info.marks[FSNOTIFY_ITER_TYPE_MNTNS] =
+ fsnotify_first_mark(&mnt_data->ns->n_fsnotify_marks);
+ }
/*
* We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark
@@ -623,11 +654,117 @@ out:
}
EXPORT_SYMBOL_GPL(fsnotify);
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+/*
+ * At open time we check fsnotify_sb_has_priority_watchers(), call the open perm
+ * hook and set the FMODE_NONOTIFY_ mode bits accordignly.
+ * Later, fsnotify permission hooks do not check if there are permission event
+ * watches, but that there were permission event watches at open time.
+ */
+int fsnotify_open_perm_and_set_mode(struct file *file)
+{
+ struct dentry *dentry = file->f_path.dentry, *parent;
+ struct super_block *sb = dentry->d_sb;
+ __u32 mnt_mask, p_mask = 0;
+
+ /* Is it a file opened by fanotify? */
+ if (FMODE_FSNOTIFY_NONE(file->f_mode))
+ return 0;
+
+ /*
+ * Permission events is a super set of pre-content events, so if there
+ * are no permission event watchers, there are also no pre-content event
+ * watchers and this is implied from the single FMODE_NONOTIFY_PERM bit.
+ */
+ if (likely(!fsnotify_sb_has_priority_watchers(sb,
+ FSNOTIFY_PRIO_CONTENT))) {
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY_PERM);
+ return 0;
+ }
+
+ /*
+ * OK, there are some permission event watchers. Check if anybody is
+ * watching for permission events on *this* file.
+ */
+ mnt_mask = READ_ONCE(real_mount(file->f_path.mnt)->mnt_fsnotify_mask);
+ p_mask = fsnotify_object_watched(d_inode(dentry), mnt_mask,
+ ALL_FSNOTIFY_PERM_EVENTS);
+ if (dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED) {
+ parent = dget_parent(dentry);
+ p_mask |= fsnotify_inode_watches_children(d_inode(parent));
+ dput(parent);
+ }
+
+ /*
+ * Legacy FAN_ACCESS_PERM events have very high performance overhead,
+ * so unlikely to be used in the wild. If they are used there will be
+ * no optimizations at all.
+ */
+ if (unlikely(p_mask & FS_ACCESS_PERM)) {
+ /* Enable all permission and pre-content events */
+ file_set_fsnotify_mode(file, 0);
+ goto open_perm;
+ }
+
+ /*
+ * Pre-content events are only supported on regular files.
+ * If there are pre-content event watchers and no permission access
+ * watchers, set FMODE_NONOTIFY | FMODE_NONOTIFY_PERM to indicate that.
+ * That is the common case with HSM service.
+ */
+ if (d_is_reg(dentry) && (p_mask & FSNOTIFY_PRE_CONTENT_EVENTS)) {
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY |
+ FMODE_NONOTIFY_PERM);
+ goto open_perm;
+ }
+
+ /* Nobody watching permission and pre-content events on this file */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY_PERM);
+
+open_perm:
+ /*
+ * Send open perm events depending on object masks and regardless of
+ * FMODE_NONOTIFY_PERM.
+ */
+ if (file->f_flags & __FMODE_EXEC && p_mask & FS_OPEN_EXEC_PERM) {
+ int ret = fsnotify_path(&file->f_path, FS_OPEN_EXEC_PERM);
+
+ if (ret)
+ return ret;
+ }
+
+ if (p_mask & FS_OPEN_PERM)
+ return fsnotify_path(&file->f_path, FS_OPEN_PERM);
+
+ return 0;
+}
+#endif
+
+void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ struct fsnotify_mnt data = {
+ .ns = ns,
+ .mnt_id = real_mount(mnt)->mnt_id_unique,
+ };
+
+ if (WARN_ON_ONCE(!ns))
+ return;
+
+ /*
+ * This is an optimization as well as making sure fsnotify_init() has
+ * been called.
+ */
+ if (!ns->n_fsnotify_marks)
+ return;
+
+ fsnotify(mask, &data, FSNOTIFY_EVENT_MNT, NULL, NULL, NULL, 0);
+}
+
static __init int fsnotify_init(void)
{
int ret;
- BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 23);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 26);
ret = init_srcu_struct(&fsnotify_mark_srcu);
if (ret)
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 663759ed6fbc..5950c7a67f41 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -33,6 +33,12 @@ static inline struct super_block *fsnotify_conn_sb(
return conn->obj;
}
+static inline struct mnt_namespace *fsnotify_conn_mntns(
+ struct fsnotify_mark_connector *conn)
+{
+ return conn->obj;
+}
+
static inline struct super_block *fsnotify_object_sb(void *obj,
enum fsnotify_obj_type obj_type)
{
@@ -89,6 +95,11 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
fsnotify_destroy_marks(fsnotify_sb_marks(sb));
}
+static inline void fsnotify_clear_marks_by_mntns(struct mnt_namespace *mntns)
+{
+ fsnotify_destroy_marks(&mntns->n_fsnotify_marks);
+}
+
/*
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 993375f0db67..7c326ec2e8a8 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -10,7 +10,7 @@
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
- * inotify was largely rewriten to make use of the fsnotify infrastructure
+ * inotify was largely rewritten to make use of the fsnotify infrastructure
*/
#include <linux/dcache.h> /* d_unlinked */
@@ -121,7 +121,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
event->sync_cookie = cookie;
event->name_len = len;
if (len)
- strcpy(event->name, name->name);
+ strscpy(event->name, name->name, event->name_len + 1);
ret = fsnotify_add_event(group, fsn_event, inotify_merge);
if (ret) {
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index e0c48956608a..b372fb2c56bd 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -58,7 +58,7 @@ struct kmem_cache *inotify_inode_mark_cachep __ro_after_init;
static long it_zero = 0;
static long it_int_max = INT_MAX;
-static struct ctl_table inotify_table[] = {
+static const struct ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
.data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 4981439e6209..55a03bb05aa1 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -107,6 +107,8 @@ static fsnotify_connp_t *fsnotify_object_connp(void *obj,
return &real_mount(obj)->mnt_fsnotify_marks;
case FSNOTIFY_OBJ_TYPE_SB:
return fsnotify_sb_marks(obj);
+ case FSNOTIFY_OBJ_TYPE_MNTNS:
+ return &((struct mnt_namespace *)obj)->n_fsnotify_marks;
default:
return NULL;
}
@@ -120,6 +122,8 @@ static __u32 *fsnotify_conn_mask_p(struct fsnotify_mark_connector *conn)
return &fsnotify_conn_mount(conn)->mnt_fsnotify_mask;
else if (conn->type == FSNOTIFY_OBJ_TYPE_SB)
return &fsnotify_conn_sb(conn)->s_fsnotify_mask;
+ else if (conn->type == FSNOTIFY_OBJ_TYPE_MNTNS)
+ return &fsnotify_conn_mntns(conn)->n_fsnotify_mask;
return NULL;
}
@@ -346,12 +350,15 @@ static void *fsnotify_detach_connector_from_object(
fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
} else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) {
fsnotify_conn_sb(conn)->s_fsnotify_mask = 0;
+ } else if (conn->type == FSNOTIFY_OBJ_TYPE_MNTNS) {
+ fsnotify_conn_mntns(conn)->n_fsnotify_mask = 0;
}
rcu_assign_pointer(*connp, NULL);
conn->obj = NULL;
conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
- fsnotify_update_sb_watchers(sb, conn);
+ if (sb)
+ fsnotify_update_sb_watchers(sb, conn);
return inode;
}
@@ -421,7 +428,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
conn->destroy_next = connector_destroy_list;
connector_destroy_list = conn;
spin_unlock(&destroy_lock);
- queue_work(system_unbound_wq, &connector_reaper_work);
+ queue_work(system_dfl_wq, &connector_reaper_work);
}
/*
* Note that we didn't update flags telling whether inode cares about
@@ -432,7 +439,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
spin_lock(&destroy_lock);
list_add(&mark->g_list, &destroy_list);
spin_unlock(&destroy_lock);
- queue_delayed_work(system_unbound_wq, &reaper_work,
+ queue_delayed_work(system_dfl_wq, &reaper_work,
FSNOTIFY_REAPER_DELAY);
}
EXPORT_SYMBOL_GPL(fsnotify_put_mark);
@@ -724,7 +731,7 @@ static int fsnotify_add_mark_list(struct fsnotify_mark *mark, void *obj,
* Attach the sb info before attaching a connector to any object on sb.
* The sb info will remain attached as long as sb lives.
*/
- if (!fsnotify_sb_info(sb)) {
+ if (sb && !fsnotify_sb_info(sb)) {
err = fsnotify_attach_info_to_sb(sb);
if (err)
return err;
@@ -770,7 +777,8 @@ restart:
/* mark should be the last entry. last is the current last entry */
hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
added:
- fsnotify_update_sb_watchers(sb, conn);
+ if (sb)
+ fsnotify_update_sb_watchers(sb, conn);
/*
* Since connector is attached to object using cmpxchg() we are
* guaranteed that connector initialization is fully visible by anyone
diff --git a/fs/nsfs.c b/fs/nsfs.c
index c675fc40ce2d..bf27d5da91f1 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -13,12 +13,26 @@
#include <linux/nsfs.h>
#include <linux/uaccess.h>
#include <linux/mnt_namespace.h>
+#include <linux/ipc_namespace.h>
+#include <linux/time_namespace.h>
+#include <linux/utsname.h>
+#include <linux/exportfs.h>
+#include <linux/nstree.h>
+#include <net/net_namespace.h>
#include "mount.h"
#include "internal.h"
static struct vfsmount *nsfs_mnt;
+static struct path nsfs_root_path = {};
+
+void nsfs_get_root(struct path *path)
+{
+ *path = nsfs_root_path;
+ path_get(path);
+}
+
static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg);
static const struct file_operations ns_file_operations = {
@@ -37,7 +51,6 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
}
const struct dentry_operations ns_dentry_operations = {
- .d_delete = always_delete_dentry,
.d_dname = ns_dname,
.d_prune = stashed_dentry_prune,
};
@@ -45,6 +58,8 @@ const struct dentry_operations ns_dentry_operations = {
static void nsfs_evict(struct inode *inode)
{
struct ns_common *ns = inode->i_private;
+
+ __ns_ref_active_put(ns);
clear_inode(inode);
ns->ops->put(ns);
}
@@ -95,7 +110,6 @@ int ns_get_path(struct path *path, struct task_struct *task,
int open_namespace(struct ns_common *ns)
{
struct path path __free(path_put) = {};
- struct file *f;
int err;
/* call first to consume reference */
@@ -103,16 +117,7 @@ int open_namespace(struct ns_common *ns)
if (err < 0)
return err;
- CLASS(get_unused_fd, fd)(O_CLOEXEC);
- if (fd < 0)
- return fd;
-
- f = dentry_open(&path, O_RDONLY, current_cred());
- if (IS_ERR(f))
- return PTR_ERR(f);
-
- fd_install(fd, f);
- return take_fd(fd);
+ return FD_ADD(O_CLOEXEC, dentry_open(&path, O_RDONLY, current_cred()));
}
int open_related_ns(struct ns_common *ns,
@@ -140,7 +145,7 @@ static int copy_ns_info_to_user(const struct mnt_namespace *mnt_ns,
* the size value will be set to the size the kernel knows about.
*/
kinfo->size = min(usize, sizeof(*kinfo));
- kinfo->mnt_ns_id = mnt_ns->seq;
+ kinfo->mnt_ns_id = mnt_ns->ns.ns_id;
kinfo->nr_mounts = READ_ONCE(mnt_ns->nr_mounts);
/* Subtract the root mount of the mount namespace. */
if (kinfo->nr_mounts)
@@ -152,19 +157,52 @@ static int copy_ns_info_to_user(const struct mnt_namespace *mnt_ns,
return 0;
}
+static bool nsfs_ioctl_valid(unsigned int cmd)
+{
+ switch (cmd) {
+ case NS_GET_USERNS:
+ case NS_GET_PARENT:
+ case NS_GET_NSTYPE:
+ case NS_GET_OWNER_UID:
+ case NS_GET_MNTNS_ID:
+ case NS_GET_PID_FROM_PIDNS:
+ case NS_GET_TGID_FROM_PIDNS:
+ case NS_GET_PID_IN_PIDNS:
+ case NS_GET_TGID_IN_PIDNS:
+ case NS_GET_ID:
+ return true;
+ }
+
+ /* Extensible ioctls require some extra handling. */
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(NS_MNT_GET_INFO):
+ return extensible_ioctl_valid(cmd, NS_MNT_GET_INFO, MNT_NS_INFO_SIZE_VER0);
+ case _IOC_NR(NS_MNT_GET_NEXT):
+ return extensible_ioctl_valid(cmd, NS_MNT_GET_NEXT, MNT_NS_INFO_SIZE_VER0);
+ case _IOC_NR(NS_MNT_GET_PREV):
+ return extensible_ioctl_valid(cmd, NS_MNT_GET_PREV, MNT_NS_INFO_SIZE_VER0);
+ }
+
+ return false;
+}
+
static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct user_namespace *user_ns;
struct pid_namespace *pid_ns;
struct task_struct *tsk;
- struct ns_common *ns = get_proc_ns(file_inode(filp));
+ struct ns_common *ns;
struct mnt_namespace *mnt_ns;
bool previous = false;
uid_t __user *argp;
uid_t uid;
int ret;
+ if (!nsfs_ioctl_valid(ioctl))
+ return -ENOIOCTLCMD;
+
+ ns = get_proc_ns(file_inode(filp));
switch (ioctl) {
case NS_GET_USERNS:
return open_related_ns(ns, ns_get_owner);
@@ -173,26 +211,14 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
return -EINVAL;
return open_related_ns(ns, ns->ops->get_parent);
case NS_GET_NSTYPE:
- return ns->ops->type;
+ return ns->ns_type;
case NS_GET_OWNER_UID:
- if (ns->ops->type != CLONE_NEWUSER)
+ if (ns->ns_type != CLONE_NEWUSER)
return -EINVAL;
user_ns = container_of(ns, struct user_namespace, ns);
argp = (uid_t __user *) arg;
uid = from_kuid_munged(current_user_ns(), user_ns->owner);
return put_user(uid, argp);
- case NS_GET_MNTNS_ID: {
- __u64 __user *idp;
- __u64 id;
-
- if (ns->ops->type != CLONE_NEWNS)
- return -EINVAL;
-
- mnt_ns = container_of(ns, struct mnt_namespace, ns);
- idp = (__u64 __user *)arg;
- id = mnt_ns->seq;
- return put_user(id, idp);
- }
case NS_GET_PID_FROM_PIDNS:
fallthrough;
case NS_GET_TGID_FROM_PIDNS:
@@ -200,7 +226,7 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
case NS_GET_PID_IN_PIDNS:
fallthrough;
case NS_GET_TGID_IN_PIDNS: {
- if (ns->ops->type != CLONE_NEWPID)
+ if (ns->ns_type != CLONE_NEWPID)
return -EINVAL;
ret = -ESRCH;
@@ -238,6 +264,18 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
ret = -ESRCH;
return ret;
}
+ case NS_GET_MNTNS_ID:
+ if (ns->ns_type != CLONE_NEWNS)
+ return -EINVAL;
+ fallthrough;
+ case NS_GET_ID: {
+ __u64 __user *idp;
+ __u64 id;
+
+ idp = (__u64 __user *)arg;
+ id = ns->ns_id;
+ return put_user(id, idp);
+ }
}
/* extensible ioctls */
@@ -247,7 +285,7 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
struct mnt_ns_info __user *uinfo = (struct mnt_ns_info __user *)arg;
size_t usize = _IOC_SIZE(ioctl);
- if (ns->ops->type != CLONE_NEWNS)
+ if (ns->ns_type != CLONE_NEWNS)
return -EINVAL;
if (!uinfo)
@@ -265,19 +303,15 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
struct mnt_ns_info kinfo = {};
struct mnt_ns_info __user *uinfo = (struct mnt_ns_info __user *)arg;
struct path path __free(path_put) = {};
- struct file *f __free(fput) = NULL;
size_t usize = _IOC_SIZE(ioctl);
- if (ns->ops->type != CLONE_NEWNS)
+ if (ns->ns_type != CLONE_NEWNS)
return -EINVAL;
if (usize < MNT_NS_INFO_SIZE_VER0)
return -EINVAL;
- if (previous)
- mnt_ns = lookup_prev_mnt_ns(to_mnt_ns(ns));
- else
- mnt_ns = lookup_next_mnt_ns(to_mnt_ns(ns));
+ mnt_ns = get_sequential_mnt_ns(to_mnt_ns(ns), previous);
if (IS_ERR(mnt_ns))
return PTR_ERR(mnt_ns);
@@ -287,28 +321,18 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
if (ret)
return ret;
- CLASS(get_unused_fd, fd)(O_CLOEXEC);
- if (fd < 0)
- return fd;
-
- f = dentry_open(&path, O_RDONLY, current_cred());
- if (IS_ERR(f))
- return PTR_ERR(f);
-
- if (uinfo) {
- /*
- * If @uinfo is passed return all information about the
- * mount namespace as well.
- */
- ret = copy_ns_info_to_user(to_mnt_ns(ns), uinfo, usize, &kinfo);
- if (ret)
- return ret;
- }
-
- /* Transfer reference of @f to caller's fdtable. */
- fd_install(fd, no_free_ptr(f));
- /* File descriptor is live so hand it off to the caller. */
- return take_fd(fd);
+ FD_PREPARE(fdf, O_CLOEXEC, dentry_open(&path, O_RDONLY, current_cred()));
+ if (fdf.err)
+ return fdf.err;
+ /*
+ * If @uinfo is passed return all information about the
+ * mount namespace as well.
+ */
+ ret = copy_ns_info_to_user(to_mnt_ns(ns), uinfo, usize, &kinfo);
+ if (ret)
+ return ret;
+ ret = fd_publish(fdf);
+ break;
}
default:
ret = -ENOTTY;
@@ -365,6 +389,7 @@ static const struct super_operations nsfs_ops = {
.statfs = simple_statfs,
.evict_inode = nsfs_evict,
.show_path = nsfs_show_path,
+ .drop_inode = inode_just_drop,
};
static int nsfs_init_inode(struct inode *inode, void *data)
@@ -375,6 +400,16 @@ static int nsfs_init_inode(struct inode *inode, void *data)
inode->i_mode |= S_IRUGO;
inode->i_fop = &ns_file_operations;
inode->i_ino = ns->inum;
+
+ /*
+ * Bring the namespace subtree back to life if we have to. This
+ * can happen when e.g., all processes using a network namespace
+ * and all namespace files or namespace file bind-mounts have
+ * died but there are still sockets pinning it. The SIOCGSKNS
+ * ioctl on such a socket will resurrect the relevant namespace
+ * subtree.
+ */
+ __ns_ref_active_get(ns);
return 0;
}
@@ -389,12 +424,224 @@ static const struct stashed_operations nsfs_stashed_ops = {
.put_data = nsfs_put_data,
};
+#define NSFS_FID_SIZE_U32_VER0 (NSFS_FILE_HANDLE_SIZE_VER0 / sizeof(u32))
+#define NSFS_FID_SIZE_U32_LATEST (NSFS_FILE_HANDLE_SIZE_LATEST / sizeof(u32))
+
+static int nsfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ struct inode *parent)
+{
+ struct nsfs_file_handle *fid = (struct nsfs_file_handle *)fh;
+ struct ns_common *ns = inode->i_private;
+ int len = *max_len;
+
+ if (parent)
+ return FILEID_INVALID;
+
+ if (len < NSFS_FID_SIZE_U32_VER0) {
+ *max_len = NSFS_FID_SIZE_U32_LATEST;
+ return FILEID_INVALID;
+ } else if (len > NSFS_FID_SIZE_U32_LATEST) {
+ *max_len = NSFS_FID_SIZE_U32_LATEST;
+ }
+
+ fid->ns_id = ns->ns_id;
+ fid->ns_type = ns->ns_type;
+ fid->ns_inum = inode->i_ino;
+ return FILEID_NSFS;
+}
+
+bool is_current_namespace(struct ns_common *ns)
+{
+ switch (ns->ns_type) {
+#ifdef CONFIG_CGROUPS
+ case CLONE_NEWCGROUP:
+ return current_in_namespace(to_cg_ns(ns));
+#endif
+#ifdef CONFIG_IPC_NS
+ case CLONE_NEWIPC:
+ return current_in_namespace(to_ipc_ns(ns));
+#endif
+ case CLONE_NEWNS:
+ return current_in_namespace(to_mnt_ns(ns));
+#ifdef CONFIG_NET_NS
+ case CLONE_NEWNET:
+ return current_in_namespace(to_net_ns(ns));
+#endif
+#ifdef CONFIG_PID_NS
+ case CLONE_NEWPID:
+ return current_in_namespace(to_pid_ns(ns));
+#endif
+#ifdef CONFIG_TIME_NS
+ case CLONE_NEWTIME:
+ return current_in_namespace(to_time_ns(ns));
+#endif
+#ifdef CONFIG_USER_NS
+ case CLONE_NEWUSER:
+ return current_in_namespace(to_user_ns(ns));
+#endif
+#ifdef CONFIG_UTS_NS
+ case CLONE_NEWUTS:
+ return current_in_namespace(to_uts_ns(ns));
+#endif
+ default:
+ VFS_WARN_ON_ONCE(true);
+ return false;
+ }
+}
+
+static struct dentry *nsfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
+ int fh_len, int fh_type)
+{
+ struct path path __free(path_put) = {};
+ struct nsfs_file_handle *fid = (struct nsfs_file_handle *)fh;
+ struct user_namespace *owning_ns = NULL;
+ struct ns_common *ns;
+ int ret;
+
+ if (fh_len < NSFS_FID_SIZE_U32_VER0)
+ return NULL;
+
+ /* Check that any trailing bytes are zero. */
+ if ((fh_len > NSFS_FID_SIZE_U32_LATEST) &&
+ memchr_inv((void *)fid + NSFS_FID_SIZE_U32_LATEST, 0,
+ fh_len - NSFS_FID_SIZE_U32_LATEST))
+ return NULL;
+
+ switch (fh_type) {
+ case FILEID_NSFS:
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!fid->ns_id)
+ return NULL;
+ /* Either both are set or both are unset. */
+ if (!fid->ns_inum != !fid->ns_type)
+ return NULL;
+
+ scoped_guard(rcu) {
+ ns = ns_tree_lookup_rcu(fid->ns_id, fid->ns_type);
+ if (!ns)
+ return NULL;
+
+ VFS_WARN_ON_ONCE(ns->ns_id != fid->ns_id);
+
+ if (fid->ns_inum && (fid->ns_inum != ns->inum))
+ return NULL;
+ if (fid->ns_type && (fid->ns_type != ns->ns_type))
+ return NULL;
+
+ /*
+ * This is racy because we're not actually taking an
+ * active reference. IOW, it could happen that the
+ * namespace becomes inactive after this check.
+ * We don't care because nsfs_init_inode() will just
+ * resurrect the relevant namespace tree for us. If it
+ * has been active here we just allow it's resurrection.
+ * We could try to take an active reference here and
+ * then drop it again. But really, why bother.
+ */
+ if (!ns_get_unless_inactive(ns))
+ return NULL;
+ }
+
+ switch (ns->ns_type) {
+#ifdef CONFIG_CGROUPS
+ case CLONE_NEWCGROUP:
+ if (!current_in_namespace(to_cg_ns(ns)))
+ owning_ns = to_cg_ns(ns)->user_ns;
+ break;
+#endif
+#ifdef CONFIG_IPC_NS
+ case CLONE_NEWIPC:
+ if (!current_in_namespace(to_ipc_ns(ns)))
+ owning_ns = to_ipc_ns(ns)->user_ns;
+ break;
+#endif
+ case CLONE_NEWNS:
+ if (!current_in_namespace(to_mnt_ns(ns)))
+ owning_ns = to_mnt_ns(ns)->user_ns;
+ break;
+#ifdef CONFIG_NET_NS
+ case CLONE_NEWNET:
+ if (!current_in_namespace(to_net_ns(ns)))
+ owning_ns = to_net_ns(ns)->user_ns;
+ break;
+#endif
+#ifdef CONFIG_PID_NS
+ case CLONE_NEWPID:
+ if (!current_in_namespace(to_pid_ns(ns))) {
+ owning_ns = to_pid_ns(ns)->user_ns;
+ } else if (!READ_ONCE(to_pid_ns(ns)->child_reaper)) {
+ ns->ops->put(ns);
+ return ERR_PTR(-EPERM);
+ }
+ break;
+#endif
+#ifdef CONFIG_TIME_NS
+ case CLONE_NEWTIME:
+ if (!current_in_namespace(to_time_ns(ns)))
+ owning_ns = to_time_ns(ns)->user_ns;
+ break;
+#endif
+#ifdef CONFIG_USER_NS
+ case CLONE_NEWUSER:
+ if (!current_in_namespace(to_user_ns(ns)))
+ owning_ns = to_user_ns(ns);
+ break;
+#endif
+#ifdef CONFIG_UTS_NS
+ case CLONE_NEWUTS:
+ if (!current_in_namespace(to_uts_ns(ns)))
+ owning_ns = to_uts_ns(ns)->user_ns;
+ break;
+#endif
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (owning_ns && !ns_capable(owning_ns, CAP_SYS_ADMIN)) {
+ ns->ops->put(ns);
+ return ERR_PTR(-EPERM);
+ }
+
+ /* path_from_stashed() unconditionally consumes the reference. */
+ ret = path_from_stashed(&ns->stashed, nsfs_mnt, ns, &path);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return no_free_ptr(path.dentry);
+}
+
+static int nsfs_export_permission(struct handle_to_path_ctx *ctx,
+ unsigned int oflags)
+{
+ /* nsfs_fh_to_dentry() performs all permission checks. */
+ return 0;
+}
+
+static struct file *nsfs_export_open(const struct path *path, unsigned int oflags)
+{
+ return file_open_root(path, "", oflags, 0);
+}
+
+static const struct export_operations nsfs_export_operations = {
+ .encode_fh = nsfs_encode_fh,
+ .fh_to_dentry = nsfs_fh_to_dentry,
+ .open = nsfs_export_open,
+ .permission = nsfs_export_permission,
+};
+
static int nsfs_init_fs_context(struct fs_context *fc)
{
struct pseudo_fs_context *ctx = init_pseudo(fc, NSFS_MAGIC);
if (!ctx)
return -ENOMEM;
+ fc->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
+ ctx->s_d_flags |= DCACHE_DONTCACHE;
ctx->ops = &nsfs_ops;
+ ctx->eops = &nsfs_export_operations;
ctx->dops = &ns_dentry_operations;
fc->s_fs_info = (void *)&nsfs_stashed_ops;
return 0;
@@ -412,4 +659,30 @@ void __init nsfs_init(void)
if (IS_ERR(nsfs_mnt))
panic("can't set nsfs up\n");
nsfs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
+ nsfs_root_path.mnt = nsfs_mnt;
+ nsfs_root_path.dentry = nsfs_mnt->mnt_root;
+}
+
+void nsproxy_ns_active_get(struct nsproxy *ns)
+{
+ ns_ref_active_get(ns->mnt_ns);
+ ns_ref_active_get(ns->uts_ns);
+ ns_ref_active_get(ns->ipc_ns);
+ ns_ref_active_get(ns->pid_ns_for_children);
+ ns_ref_active_get(ns->cgroup_ns);
+ ns_ref_active_get(ns->net_ns);
+ ns_ref_active_get(ns->time_ns);
+ ns_ref_active_get(ns->time_ns_for_children);
+}
+
+void nsproxy_ns_active_put(struct nsproxy *ns)
+{
+ ns_ref_active_put(ns->mnt_ns);
+ ns_ref_active_put(ns->uts_ns);
+ ns_ref_active_put(ns->ipc_ns);
+ ns_ref_active_put(ns->pid_ns_for_children);
+ ns_ref_active_put(ns->cgroup_ns);
+ ns_ref_active_put(ns->net_ns);
+ ns_ref_active_put(ns->time_ns);
+ ns_ref_active_put(ns->time_ns_for_children);
}
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index 8d789b017fa9..980ae9157248 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -787,7 +787,8 @@ pack_runs:
if (err)
goto out;
- attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
+ attr = mi_find_attr(ni, mi, NULL, type, name, name_len,
+ &le->id);
if (!attr) {
err = -EINVAL;
goto bad_inode;
@@ -1181,7 +1182,7 @@ repack:
goto out;
}
- attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
+ attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id);
if (!attr) {
err = -EINVAL;
goto out;
@@ -1406,7 +1407,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
*/
if (!attr->non_res) {
if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
- ntfs_inode_err(&ni->vfs_inode, "is corrupted");
+ _ntfs_bad_inode(&ni->vfs_inode);
return -EINVAL;
}
addr = resident_data(attr);
@@ -1456,7 +1457,6 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
pgoff_t index = vbo[i] >> PAGE_SHIFT;
if (index != folio->index) {
- struct page *page = &folio->page;
u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
u64 to = min(from + PAGE_SIZE, wof_size);
@@ -1466,8 +1466,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
if (err)
goto out1;
- err = ntfs_bio_pages(sbi, run, &page, 1, from,
- to - from, REQ_OP_READ);
+ err = ntfs_read_run(sbi, run, addr, from, to - from);
if (err) {
folio->index = -1;
goto out1;
@@ -1796,7 +1795,7 @@ repack:
goto out;
}
- attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
+ attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0,
&le->id);
if (!attr) {
err = -EINVAL;
@@ -1861,7 +1860,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
- CLST svcn, evcn1, len, dealloc, alen;
+ CLST svcn, evcn1, len, dealloc, alen, done;
CLST vcn, end;
u64 valid_size, data_size, alloc_size, total_size;
u32 mask;
@@ -1924,6 +1923,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
len = bytes >> sbi->cluster_bits;
end = vcn + len;
dealloc = 0;
+ done = 0;
svcn = le64_to_cpu(attr_b->nres.svcn);
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
@@ -1932,23 +1932,28 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
attr = attr_b;
le = le_b;
mi = mi_b;
- } else if (!le_b) {
+ goto check_seg;
+ }
+
+ if (!le_b) {
err = -EINVAL;
goto out;
- } else {
- le = le_b;
- attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
- &mi);
- if (!attr) {
- err = -EINVAL;
- goto out;
- }
+ }
- svcn = le64_to_cpu(attr->nres.svcn);
- evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+ le = le_b;
+ attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi);
+ if (!attr) {
+ err = -EINVAL;
+ goto out;
}
for (;;) {
+ CLST vcn1, eat, next_svcn;
+
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+
+check_seg:
if (svcn >= end) {
/* Shift VCN- */
attr->nres.svcn = cpu_to_le64(svcn - len);
@@ -1958,22 +1963,25 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
ni->attr_list.dirty = true;
}
mi->dirty = true;
- } else if (svcn < vcn || end < evcn1) {
- CLST vcn1, eat, next_svcn;
+ goto next_attr;
+ }
- /* Collapse a part of this attribute segment. */
- err = attr_load_runs(attr, ni, run, &svcn);
- if (err)
- goto out;
- vcn1 = max(vcn, svcn);
- eat = min(end, evcn1) - vcn1;
+ run_truncate(run, 0);
+ err = attr_load_runs(attr, ni, run, &svcn);
+ if (err)
+ goto out;
- err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
- true);
- if (err)
- goto out;
+ vcn1 = vcn + done; /* original vcn in attr/run. */
+ eat = min(end, evcn1) - vcn1;
- if (!run_collapse_range(run, vcn1, eat)) {
+ err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, true);
+ if (err)
+ goto out;
+
+ if (svcn + eat < evcn1) {
+ /* Collapse a part of this attribute segment. */
+
+ if (!run_collapse_range(run, vcn1, eat, done)) {
err = -ENOMEM;
goto out;
}
@@ -1981,7 +1989,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (svcn >= vcn) {
/* Shift VCN */
attr->nres.svcn = cpu_to_le64(vcn);
- if (le) {
+ if (le && attr->nres.svcn != le->vcn) {
le->vcn = attr->nres.svcn;
ni->attr_list.dirty = true;
}
@@ -1992,7 +2000,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
goto out;
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
- if (next_svcn + eat < evcn1) {
+ if (next_svcn + eat + done < evcn1) {
err = ni_insert_nonresident(
ni, ATTR_DATA, NULL, 0, run, next_svcn,
evcn1 - eat - next_svcn, a_flags, &attr,
@@ -2006,18 +2014,9 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
/* Free all allocated memory. */
run_truncate(run, 0);
+ done += eat;
} else {
u16 le_sz;
- u16 roff = le16_to_cpu(attr->nres.run_off);
-
- if (roff > le32_to_cpu(attr->size)) {
- err = -EINVAL;
- goto out;
- }
-
- run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
- evcn1 - 1, svcn, Add2Ptr(attr, roff),
- le32_to_cpu(attr->size) - roff);
/* Delete this attribute segment. */
mi_remove_attr(NULL, mi, attr);
@@ -2030,6 +2029,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
goto out;
}
+ done += evcn1 - svcn;
if (evcn1 >= alen)
break;
@@ -2041,17 +2041,18 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
}
/* Look for required attribute. */
- attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
- 0, &le->id);
+ attr = mi_find_attr(ni, mi, NULL, ATTR_DATA,
+ NULL, 0, &le->id);
if (!attr) {
err = -EINVAL;
goto out;
}
- goto next_attr;
+ continue;
}
le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
}
+next_attr:
if (evcn1 >= alen)
break;
@@ -2060,10 +2061,6 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
err = -EINVAL;
goto out;
}
-
-next_attr:
- svcn = le64_to_cpu(attr->nres.svcn);
- evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
}
if (!attr_b) {
@@ -2553,7 +2550,7 @@ undo_insert_range:
if (attr_load_runs(attr, ni, run, NULL))
goto bad_inode;
- if (!run_collapse_range(run, vcn, len))
+ if (!run_collapse_range(run, vcn, len, 0))
goto bad_inode;
if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
@@ -2587,7 +2584,7 @@ int attr_force_nonresident(struct ntfs_inode *ni)
attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
if (!attr) {
- ntfs_bad_inode(&ni->vfs_inode, "no data attribute");
+ _ntfs_bad_inode(&ni->vfs_inode);
return -ENOENT;
}
@@ -2604,74 +2601,3 @@ int attr_force_nonresident(struct ntfs_inode *ni)
return err;
}
-
-/*
- * Change the compression of data attribute
- */
-int attr_set_compress(struct ntfs_inode *ni, bool compr)
-{
- struct ATTRIB *attr;
- struct mft_inode *mi;
-
- attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
- if (!attr)
- return -ENOENT;
-
- if (is_attr_compressed(attr) == !!compr) {
- /* Already required compressed state. */
- return 0;
- }
-
- if (attr->non_res) {
- u16 run_off;
- u32 run_size;
- char *run;
-
- if (attr->nres.data_size) {
- /*
- * There are rare cases when it possible to change
- * compress state without big changes.
- * TODO: Process these cases.
- */
- return -EOPNOTSUPP;
- }
-
- run_off = le16_to_cpu(attr->nres.run_off);
- run_size = le32_to_cpu(attr->size) - run_off;
- run = Add2Ptr(attr, run_off);
-
- if (!compr) {
- /* remove field 'attr->nres.total_size'. */
- memmove(run - 8, run, run_size);
- run_off -= 8;
- }
-
- if (!mi_resize_attr(mi, attr, compr ? +8 : -8)) {
- /*
- * Ignore rare case when there are no 8 bytes in record with attr.
- * TODO: split attribute.
- */
- return -EOPNOTSUPP;
- }
-
- if (compr) {
- /* Make a gap for 'attr->nres.total_size'. */
- memmove(run + 8, run, run_size);
- run_off += 8;
- attr->nres.total_size = attr->nres.alloc_size;
- }
- attr->nres.run_off = cpu_to_le16(run_off);
- }
-
- /* Update data attribute flags. */
- if (compr) {
- attr->flags |= ATTR_FLAG_COMPRESSED;
- attr->nres.c_unit = NTFS_LZNT_CUNIT;
- } else {
- attr->flags &= ~ATTR_FLAG_COMPRESSED;
- attr->nres.c_unit = 0;
- }
- mi->dirty = true;
-
- return 0;
-}
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index 04107b950717..65d05e6a0566 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -1371,6 +1371,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
mark_buffer_dirty(bh);
unlock_buffer(bh);
/* err = sync_dirty_buffer(bh); */
+ put_bh(bh);
b0 = 0;
bits -= op;
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index fc6a8aa29e3a..b98e95d6b4d9 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -304,6 +304,9 @@ static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
return true;
+ if (fname->name_len + sizeof(struct NTFS_DE) > le16_to_cpu(e->size))
+ return true;
+
name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
PATH_MAX);
if (name_len <= 0) {
@@ -329,9 +332,7 @@ static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
* It does additional locks/reads just to get the type of name.
* Should we use additional mount option to enable branch below?
*/
- if (((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) ||
- fname->dup.ea_size) &&
- ino != ni->mi.rno) {
+ if (fname->dup.extend_data && ino != ni->mi.rno) {
struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
if (!IS_ERR_OR_NULL(inode)) {
dt_type = fs_umode_to_dtype(inode->i_mode);
@@ -512,7 +513,7 @@ out:
ctx->pos = pos;
} else if (err < 0) {
if (err == -EINVAL)
- ntfs_inode_err(dir, "directory corrupted");
+ _ntfs_bad_inode(dir);
ctx->pos = eod;
}
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 3f96a11804c9..2e7b2e566ebe 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -19,6 +19,12 @@
#include "ntfs.h"
#include "ntfs_fs.h"
+/*
+ * cifx, btrfs, exfat, ext4, f2fs use this constant.
+ * Hope this value will become common to all fs.
+ */
+#define NTFS3_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
{
struct fstrim_range __user *user_range;
@@ -49,70 +55,61 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
return 0;
}
-/*
- * ntfs_fileattr_get - inode_operations::fileattr_get
- */
-int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+static int ntfs_ioctl_get_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
{
- struct inode *inode = d_inode(dentry);
- struct ntfs_inode *ni = ntfs_i(inode);
- u32 flags = 0;
+ if (copy_to_user(buf, sbi->volume.label, FSLABEL_MAX))
+ return -EFAULT;
- if (inode->i_flags & S_IMMUTABLE)
- flags |= FS_IMMUTABLE_FL;
+ return 0;
+}
- if (inode->i_flags & S_APPEND)
- flags |= FS_APPEND_FL;
+static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
+{
+ u8 user[FSLABEL_MAX] = { 0 };
+ int len;
- if (is_compressed(ni))
- flags |= FS_COMPR_FL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
- if (is_encrypted(ni))
- flags |= FS_ENCRYPT_FL;
+ if (copy_from_user(user, buf, FSLABEL_MAX))
+ return -EFAULT;
- fileattr_fill_flags(fa, flags);
+ len = strnlen(user, FSLABEL_MAX);
- return 0;
+ return ntfs_set_label(sbi, user, len);
}
/*
- * ntfs_fileattr_set - inode_operations::fileattr_set
+ * ntfs_force_shutdown - helper function. Called from ioctl
*/
-int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
- struct fileattr *fa)
+static int ntfs_force_shutdown(struct super_block *sb, u32 flags)
{
- struct inode *inode = d_inode(dentry);
- struct ntfs_inode *ni = ntfs_i(inode);
- u32 flags = fa->flags;
- unsigned int new_fl = 0;
-
- if (fileattr_has_fsx(fa))
- return -EOPNOTSUPP;
-
- if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_COMPR_FL))
- return -EOPNOTSUPP;
+ int err;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
- if (flags & FS_IMMUTABLE_FL)
- new_fl |= S_IMMUTABLE;
+ if (unlikely(ntfs3_forced_shutdown(sb)))
+ return 0;
- if (flags & FS_APPEND_FL)
- new_fl |= S_APPEND;
+ /* No additional options yet (flags). */
+ err = bdev_freeze(sb->s_bdev);
+ if (err)
+ return err;
+ set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &sbi->flags);
+ bdev_thaw(sb->s_bdev);
+ return 0;
+}
- /* Allowed to change compression for empty files and for directories only. */
- if (!is_dedup(ni) && !is_encrypted(ni) &&
- (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
- /* Change compress state. */
- int err = ni_set_compress(inode, flags & FS_COMPR_FL);
- if (err)
- return err;
- }
+static int ntfs_ioctl_shutdown(struct super_block *sb, unsigned long arg)
+{
+ u32 flags;
- inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
- inode_set_ctime_current(inode);
- mark_inode_dirty(inode);
+ if (get_user(flags, (__u32 __user *)arg))
+ return -EFAULT;
- return 0;
+ return ntfs_force_shutdown(sb, flags);
}
/*
@@ -121,11 +118,22 @@ int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
+ struct super_block *sb = inode->i_sb;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ntfs_i(inode))))
+ return -EINVAL;
switch (cmd) {
case FITRIM:
return ntfs_ioctl_fitrim(sbi, arg);
+ case FS_IOC_GETFSLABEL:
+ return ntfs_ioctl_get_volume_label(sbi, (u8 __user *)arg);
+ case FS_IOC_SETFSLABEL:
+ return ntfs_ioctl_set_volume_label(sbi, (u8 __user *)arg);
+ case NTFS3_IOC_SHUTDOWN:
+ return ntfs_ioctl_shutdown(sb, arg);
}
return -ENOTTY; /* Inappropriate ioctl for device. */
}
@@ -147,6 +155,10 @@ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct inode *inode = d_inode(path->dentry);
struct ntfs_inode *ni = ntfs_i(inode);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
stat->result_mask |= STATX_BTIME;
stat->btime = ni->i_crtime;
stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
@@ -220,13 +232,13 @@ static int ntfs_extend_initialized_size(struct file *file,
if (pos + len > new_valid)
len = new_valid - pos;
- err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL);
+ err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL);
if (err)
goto out;
folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom);
- err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL);
+ err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (err < 0)
goto out;
pos += len;
@@ -327,16 +339,21 @@ out:
}
/*
- * ntfs_file_mmap - file_operations::mmap
+ * ntfs_file_mmap_prepare - file_operations::mmap_prepare
*/
-static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
struct ntfs_inode *ni = ntfs_i(inode);
- u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
- bool rw = vma->vm_flags & VM_WRITE;
+ u64 from = ((u64)desc->pgoff << PAGE_SHIFT);
+ bool rw = desc->vm_flags & VM_WRITE;
int err;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
@@ -350,14 +367,19 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
return -EOPNOTSUPP;
}
- if (is_compressed(ni) && rw) {
- ntfs_inode_warn(inode, "mmap(write) compressed not supported");
- return -EOPNOTSUPP;
+ if (is_compressed(ni)) {
+ if (rw) {
+ ntfs_inode_warn(inode,
+ "mmap(write) compressed not supported");
+ return -EOPNOTSUPP;
+ }
+ /* Turn off readahead for compressed files. */
+ file->f_ra.ra_pages = 0;
}
if (rw) {
u64 to = min_t(loff_t, i_size_read(inode),
- from + vma->vm_end - vma->vm_start);
+ from + vma_desc_size(desc));
if (is_sparsed(ni)) {
/* Allocate clusters for rw map. */
@@ -376,7 +398,10 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
}
if (ni->i_valid < to) {
- inode_lock(inode);
+ if (!inode_trylock(inode)) {
+ err = -EAGAIN;
+ goto out;
+ }
err = ntfs_extend_initialized_size(file, ni,
ni->i_valid, to);
inode_unlock(inode);
@@ -385,7 +410,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
}
}
- err = generic_file_mmap(file, vma);
+ err = generic_file_mmap_prepare(desc);
out:
return err;
}
@@ -525,8 +550,6 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
if (dirty)
mark_inode_dirty(inode);
- /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
-
return 0;
}
@@ -801,6 +824,10 @@ int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
umode_t mode = inode->i_mode;
int err;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
@@ -861,6 +888,10 @@ static int check_read_restriction(struct inode *inode)
{
struct ntfs_inode *ni = ntfs_i(inode);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
@@ -900,9 +931,24 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (err)
return err;
- if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
- ntfs_inode_warn(inode, "direct i/o + compressed not supported");
- return -EOPNOTSUPP;
+ if (is_compressed(ni)) {
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ntfs_inode_warn(
+ inode, "direct i/o + compressed not supported");
+ return -EOPNOTSUPP;
+ }
+ /* Turn off readahead for compressed files. */
+ file->f_ra.ra_pages = 0;
+ }
+
+ /* Check minimum alignment for dio. */
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ struct super_block *sb = inode->i_sb;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+ if ((iocb->ki_pos | iov_iter_alignment(iter)) &
+ sbi->bdev_blocksize_mask) {
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ }
}
return generic_file_read_iter(iocb, iter);
@@ -922,6 +968,11 @@ static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
if (err)
return err;
+ if (is_compressed(ntfs_i(inode))) {
+ /* Turn off readahead for compressed files. */
+ in->f_ra.ra_pages = 0;
+ }
+
return filemap_splice_read(in, ppos, pipe, len, flags);
}
@@ -979,7 +1030,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
struct ntfs_inode *ni = ntfs_i(inode);
u64 valid = ni->i_valid;
struct ntfs_sb_info *sbi = ni->mi.sbi;
- struct page *page, **pages = NULL;
+ struct page **pages = NULL;
+ struct folio *folio;
size_t written = 0;
u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
u32 frame_size = 1u << frame_bits;
@@ -989,7 +1041,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
u64 frame_vbo;
pgoff_t index;
bool frame_uptodate;
- struct folio *folio;
if (frame_size < PAGE_SIZE) {
/*
@@ -1040,11 +1091,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (!frame_uptodate && off) {
err = ni_read_frame(ni, frame_vbo, pages,
- pages_per_frame);
+ pages_per_frame, 0);
if (err) {
for (ip = 0; ip < pages_per_frame; ip++) {
- page = pages[ip];
- folio = page_folio(page);
+ folio = page_folio(pages[ip]);
folio_unlock(folio);
folio_put(folio);
}
@@ -1055,10 +1105,9 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
ip = off >> PAGE_SHIFT;
off = offset_in_page(valid);
for (; ip < pages_per_frame; ip++, off = 0) {
- page = pages[ip];
- folio = page_folio(page);
- zero_user_segment(page, off, PAGE_SIZE);
- flush_dcache_page(page);
+ folio = page_folio(pages[ip]);
+ folio_zero_segment(folio, off, PAGE_SIZE);
+ flush_dcache_folio(folio);
folio_mark_uptodate(folio);
}
@@ -1067,8 +1116,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
ni_unlock(ni);
for (ip = 0; ip < pages_per_frame; ip++) {
- page = pages[ip];
- folio = page_folio(page);
+ folio = page_folio(pages[ip]);
folio_mark_uptodate(folio);
folio_unlock(folio);
folio_put(folio);
@@ -1108,12 +1156,11 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (off || (to < i_size && (to & (frame_size - 1)))) {
err = ni_read_frame(ni, frame_vbo, pages,
- pages_per_frame);
+ pages_per_frame, 0);
if (err) {
for (ip = 0; ip < pages_per_frame;
ip++) {
- page = pages[ip];
- folio = page_folio(page);
+ folio = page_folio(pages[ip]);
folio_unlock(folio);
folio_put(folio);
}
@@ -1131,10 +1178,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
for (;;) {
size_t cp, tail = PAGE_SIZE - off;
- page = pages[ip];
- cp = copy_page_from_iter_atomic(page, off,
- min(tail, bytes), from);
- flush_dcache_page(page);
+ folio = page_folio(pages[ip]);
+ cp = copy_folio_from_iter_atomic(
+ folio, off, min(tail, bytes), from);
+ flush_dcache_folio(folio);
copied += cp;
bytes -= cp;
@@ -1154,9 +1201,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
ni_unlock(ni);
for (ip = 0; ip < pages_per_frame; ip++) {
- page = pages[ip];
- ClearPageDirty(page);
- folio = page_folio(page);
+ folio = page_folio(pages[ip]);
+ folio_clear_dirty(folio);
folio_mark_uptodate(folio);
folio_unlock(folio);
folio_put(folio);
@@ -1201,6 +1247,10 @@ static int check_write_restriction(struct inode *inode)
{
struct ntfs_inode *ni = ntfs_i(inode);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
@@ -1228,21 +1278,22 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
int err;
- err = check_write_restriction(inode);
- if (err)
- return err;
-
- if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
- ntfs_inode_warn(inode, "direct i/o + compressed not supported");
- return -EOPNOTSUPP;
- }
-
if (!inode_trylock(inode)) {
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
inode_lock(inode);
}
+ ret = check_write_restriction(inode);
+ if (ret)
+ goto out;
+
+ if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
+ ntfs_inode_warn(inode, "direct i/o + compressed not supported");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = generic_write_checks(iocb, from);
if (ret <= 0)
goto out;
@@ -1282,6 +1333,10 @@ int ntfs_file_open(struct inode *inode, struct file *file)
{
struct ntfs_inode *ni = ntfs_i(inode);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
@@ -1322,7 +1377,7 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
if (sbi->options->prealloc &&
((file->f_mode & FMODE_WRITE) &&
atomic_read(&inode->i_writecount) == 1)
- /*
+ /*
* The only file when inode->i_fop = &ntfs_file_operations and
* init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
*
@@ -1351,6 +1406,10 @@ int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
int err;
struct ntfs_inode *ni = ntfs_i(inode);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
if (err)
return err;
@@ -1381,6 +1440,18 @@ static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe,
return iter_file_splice_write(pipe, file, ppos, len, flags);
}
+/*
+ * ntfs_file_fsync - file_operations::fsync
+ */
+static int ntfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct inode *inode = file_inode(file);
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
+ return generic_file_fsync(file, start, end, datasync);
+}
+
// clang-format off
const struct inode_operations ntfs_file_inode_operations = {
.getattr = ntfs_getattr,
@@ -1389,8 +1460,6 @@ const struct inode_operations ntfs_file_inode_operations = {
.get_acl = ntfs_get_acl,
.set_acl = ntfs_set_acl,
.fiemap = ntfs_fiemap,
- .fileattr_get = ntfs_fileattr_get,
- .fileattr_set = ntfs_fileattr_set,
};
const struct file_operations ntfs_file_operations = {
@@ -1403,9 +1472,9 @@ const struct file_operations ntfs_file_operations = {
#endif
.splice_read = ntfs_file_splice_read,
.splice_write = ntfs_file_splice_write,
- .mmap = ntfs_file_mmap,
+ .mmap_prepare = ntfs_file_mmap_prepare,
.open = ntfs_file_open,
- .fsync = generic_file_fsync,
+ .fsync = ntfs_file_fsync,
.fallocate = ntfs_fallocate,
.release = ntfs_file_release,
};
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 8b39d0ce5f28..641ddaf8d4a0 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -75,7 +75,7 @@ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
{
const struct ATTRIB *attr;
- attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
+ attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) :
NULL;
}
@@ -89,7 +89,7 @@ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
{
const struct ATTRIB *attr;
- attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
+ attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) :
NULL;
@@ -148,8 +148,10 @@ int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
goto out;
err = mi_get(ni->mi.sbi, rno, &r);
- if (err)
+ if (err) {
+ _ntfs_bad_inode(&ni->vfs_inode);
return err;
+ }
ni_add_mi(ni, r);
@@ -201,7 +203,8 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
*mi = &ni->mi;
/* Look for required attribute in primary record. */
- return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
+ return mi_find_attr(ni, &ni->mi, attr, type, name, name_len,
+ NULL);
}
/* First look for list entry of required type. */
@@ -217,7 +220,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
return NULL;
/* Look for required attribute. */
- attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
+ attr = mi_find_attr(ni, m, NULL, type, name, name_len, &le->id);
if (!attr)
goto out;
@@ -238,8 +241,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
return attr;
out:
- ntfs_inode_err(&ni->vfs_inode, "failed to parse mft record");
- ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(&ni->vfs_inode);
return NULL;
}
@@ -259,7 +261,7 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
if (mi)
*mi = &ni->mi;
/* Enum attributes in primary record. */
- return mi_enum_attr(&ni->mi, attr);
+ return mi_enum_attr(ni, &ni->mi, attr);
}
/* Get next list entry. */
@@ -275,62 +277,7 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
*mi = mi2;
/* Find attribute in loaded record. */
- return rec_find_attr_le(mi2, le2);
-}
-
-/*
- * ni_load_attr - Load attribute that contains given VCN.
- */
-struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, CLST vcn,
- struct mft_inode **pmi)
-{
- struct ATTR_LIST_ENTRY *le;
- struct ATTRIB *attr;
- struct mft_inode *mi;
- struct ATTR_LIST_ENTRY *next;
-
- if (!ni->attr_list.size) {
- if (pmi)
- *pmi = &ni->mi;
- return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
- }
-
- le = al_find_ex(ni, NULL, type, name, name_len, NULL);
- if (!le)
- return NULL;
-
- /*
- * Unfortunately ATTR_LIST_ENTRY contains only start VCN.
- * So to find the ATTRIB segment that contains 'vcn' we should
- * enumerate some entries.
- */
- if (vcn) {
- for (;; le = next) {
- next = al_find_ex(ni, le, type, name, name_len, NULL);
- if (!next || le64_to_cpu(next->vcn) > vcn)
- break;
- }
- }
-
- if (ni_load_mi(ni, le, &mi))
- return NULL;
-
- if (pmi)
- *pmi = mi;
-
- attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
- if (!attr)
- return NULL;
-
- if (!attr->non_res)
- return attr;
-
- if (le64_to_cpu(attr->nres.svcn) <= vcn &&
- vcn <= le64_to_cpu(attr->nres.evcn))
- return attr;
-
- return NULL;
+ return rec_find_attr_le(ni, mi2, le2);
}
/*
@@ -378,8 +325,10 @@ bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
mi_get_ref(&ni->mi, &m->mrec->parent_ref);
- ni_add_mi(ni, m);
- *mi = m;
+ *mi = ni_ins_mi(ni, &ni->mi_tree, m->rno, &m->node);
+ if (*mi != m)
+ mi_put(m);
+
return true;
}
@@ -398,7 +347,8 @@ int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
int diff;
if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
- attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
+ attr = mi_find_attr(ni, &ni->mi, NULL, type, name, name_len,
+ id);
if (!attr)
return -ENOENT;
@@ -437,7 +387,7 @@ next_le2:
al_remove_le(ni, le);
- attr = mi_find_attr(mi, NULL, type, name, name_len, id);
+ attr = mi_find_attr(ni, mi, NULL, type, name, name_len, id);
if (!attr)
return -ENOENT;
@@ -485,7 +435,7 @@ ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi,
name = le->name;
}
- attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
+ attr = mi_insert_attr(ni, mi, type, name, name_len, asize, name_off);
if (!attr) {
if (le_added)
al_remove_le(ni, le);
@@ -673,7 +623,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
if (err)
return err;
- attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
+ attr_list = mi_find_attr(ni, &ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
if (!attr_list)
return 0;
@@ -695,7 +645,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
if (!mi)
return 0;
- attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+ attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
if (!attr)
return 0;
@@ -731,7 +681,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
goto out;
}
- attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+ attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
if (!attr) {
/* Should never happened, 'cause already checked. */
@@ -740,7 +690,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
asize = le32_to_cpu(attr->size);
/* Insert into primary record. */
- attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
+ attr_ins = mi_insert_attr(ni, &ni->mi, le->type, le_name(le),
le->name_len, asize,
le16_to_cpu(attr->name_off));
if (!attr_ins) {
@@ -768,7 +718,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
if (!mi)
continue;
- attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+ attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
if (!attr)
continue;
@@ -819,7 +769,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
* Skip estimating exact memory requirement.
* Looks like one record_size is always enough.
*/
- le = kmalloc(al_aligned(rs), GFP_NOFS);
+ le = kzalloc(al_aligned(rs), GFP_NOFS);
if (!le)
return -ENOMEM;
@@ -831,7 +781,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
free_b = 0;
attr = NULL;
- for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
+ for (; (attr = mi_enum_attr(ni, &ni->mi, attr)); le = Add2Ptr(le, sz)) {
sz = le_size(attr->name_len);
le->type = attr->type;
le->size = cpu_to_le16(sz);
@@ -886,7 +836,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
u32 asize = le32_to_cpu(b->size);
u16 name_off = le16_to_cpu(b->name_off);
- attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
+ attr = mi_insert_attr(ni, mi, b->type, Add2Ptr(b, name_off),
b->name_len, asize, name_off);
if (!attr)
goto out;
@@ -909,7 +859,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
goto out;
}
- attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
+ attr = mi_insert_attr(ni, &ni->mi, ATTR_LIST, NULL, 0,
lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
if (!attr)
goto out;
@@ -993,13 +943,13 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
mi = rb_entry(node, struct mft_inode, node);
if (is_mft_data &&
- (mi_enum_attr(mi, NULL) ||
+ (mi_enum_attr(ni, mi, NULL) ||
vbo <= ((u64)mi->rno << sbi->record_bits))) {
/* We can't accept this record 'cause MFT's bootstrapping. */
continue;
}
if (is_mft &&
- mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
+ mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
/*
* This child record already has a ATTR_DATA.
* So it can't accept any other records.
@@ -1008,7 +958,7 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
}
if ((type != ATTR_NAME || name_len) &&
- mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
+ mi_find_attr(ni, mi, NULL, type, name, name_len, NULL)) {
/* Only indexed attributes can share same record. */
continue;
}
@@ -1067,9 +1017,9 @@ insert_ext:
out2:
ni_remove_mi(ni, mi);
- mi_put(mi);
out1:
+ mi_put(mi);
ntfs_mark_rec_free(sbi, rno, is_mft);
out:
@@ -1157,7 +1107,7 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
/* Estimate the result of moving all possible attributes away. */
attr = NULL;
- while ((attr = mi_enum_attr(&ni->mi, attr))) {
+ while ((attr = mi_enum_attr(ni, &ni->mi, attr))) {
if (attr->type == ATTR_STD)
continue;
if (attr->type == ATTR_LIST)
@@ -1175,7 +1125,7 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
attr = NULL;
for (;;) {
- attr = mi_enum_attr(&ni->mi, attr);
+ attr = mi_enum_attr(ni, &ni->mi, attr);
if (!attr) {
/* We should never be here 'cause we have already check this case. */
err = -EINVAL;
@@ -1259,7 +1209,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
mi = rb_entry(node, struct mft_inode, node);
- attr = mi_enum_attr(mi, NULL);
+ attr = mi_enum_attr(ni, mi, NULL);
if (!attr) {
mft_min = mi->rno;
@@ -1280,7 +1230,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
ni_remove_mi(ni, mi_new);
}
- attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
+ attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
if (!attr) {
err = -EINVAL;
goto out;
@@ -1397,7 +1347,7 @@ int ni_expand_list(struct ntfs_inode *ni)
continue;
/* Find attribute in primary record. */
- attr = rec_find_attr_le(&ni->mi, le);
+ attr = rec_find_attr_le(ni, &ni->mi, le);
if (!attr) {
err = -EINVAL;
goto out;
@@ -1604,8 +1554,8 @@ int ni_delete_all(struct ntfs_inode *ni)
roff = le16_to_cpu(attr->nres.run_off);
if (roff > asize) {
- _ntfs_bad_inode(&ni->vfs_inode);
- return -EINVAL;
+ /* ni_enum_attr_ex checks this case. */
+ continue;
}
/* run==1 means unpack and deallocate. */
@@ -2072,6 +2022,29 @@ out:
return err;
}
+static struct page *ntfs_lock_new_page(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp)
+{
+ struct folio *folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ struct page *page;
+
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
+
+ if (!folio_test_uptodate(folio))
+ return folio_file_page(folio, index);
+
+ /* Use a temporary page to avoid data corruption */
+ folio_unlock(folio);
+ folio_put(folio);
+ page = alloc_page(gfp);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+ __SetPageLocked(page);
+ return page;
+}
+
/*
* ni_readpage_cmpr
*
@@ -2126,15 +2099,15 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio)
if (i == idx)
continue;
- pg = find_or_create_page(mapping, index, gfp_mask);
- if (!pg) {
- err = -ENOMEM;
+ pg = ntfs_lock_new_page(mapping, index, gfp_mask);
+ if (IS_ERR(pg)) {
+ err = PTR_ERR(pg);
goto out1;
}
pages[i] = pg;
}
- err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
+ err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame, 0);
out1:
for (i = 0; i < pages_per_frame; i++) {
@@ -2204,17 +2177,9 @@ int ni_decompress_file(struct ntfs_inode *ni)
*/
index = 0;
for (vbo = 0; vbo < i_size; vbo += bytes) {
- u32 nr_pages;
bool new;
- if (vbo + frame_size > i_size) {
- bytes = i_size - vbo;
- nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
- } else {
- nr_pages = pages_per_frame;
- bytes = frame_size;
- }
-
+ bytes = vbo + frame_size > i_size ? (i_size - vbo) : frame_size;
end = bytes_to_cluster(sbi, vbo + bytes);
for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
@@ -2227,27 +2192,19 @@ int ni_decompress_file(struct ntfs_inode *ni)
for (i = 0; i < pages_per_frame; i++, index++) {
struct page *pg;
- pg = find_or_create_page(mapping, index, gfp_mask);
- if (!pg) {
+ pg = ntfs_lock_new_page(mapping, index, gfp_mask);
+ if (IS_ERR(pg)) {
while (i--) {
unlock_page(pages[i]);
put_page(pages[i]);
}
- err = -ENOMEM;
+ err = PTR_ERR(pg);
goto out;
}
pages[i] = pg;
}
- err = ni_read_frame(ni, vbo, pages, pages_per_frame);
-
- if (!err) {
- down_read(&ni->file.run_lock);
- err = ntfs_bio_pages(sbi, &ni->file.run, pages,
- nr_pages, vbo, bytes,
- REQ_OP_WRITE);
- up_read(&ni->file.run_lock);
- }
+ err = ni_read_frame(ni, vbo, pages, pages_per_frame, 1);
for (i = 0; i < pages_per_frame; i++) {
unlock_page(pages[i]);
@@ -2437,20 +2394,19 @@ out2:
* Pages - Array of locked pages.
*/
int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
- u32 pages_per_frame)
+ u32 pages_per_frame, int copy)
{
int err;
struct ntfs_sb_info *sbi = ni->mi.sbi;
u8 cluster_bits = sbi->cluster_bits;
char *frame_ondisk = NULL;
char *frame_mem = NULL;
- struct page **pages_disk = NULL;
struct ATTR_LIST_ENTRY *le = NULL;
struct runs_tree *run = &ni->file.run;
u64 valid_size = ni->i_valid;
u64 vbo_disk;
size_t unc_size;
- u32 frame_size, i, npages_disk, ondisk_size;
+ u32 frame_size, i, ondisk_size;
struct page *pg;
struct ATTRIB *attr;
CLST frame, clst_data;
@@ -2459,9 +2415,6 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
* To simplify decompress algorithm do vmap for source
* and target pages.
*/
- for (i = 0; i < pages_per_frame; i++)
- kmap(pages[i]);
-
frame_size = pages_per_frame << PAGE_SHIFT;
frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL);
if (!frame_mem) {
@@ -2545,7 +2498,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
err = attr_wof_frame_info(ni, attr, run, frame64, frames,
frame_bits, &ondisk_size, &vbo_data);
if (err)
- goto out2;
+ goto out1;
if (frame64 == frames) {
unc_size = 1 + ((i_size - 1) & (frame_size - 1));
@@ -2556,7 +2509,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
if (ondisk_size > frame_size) {
err = -EINVAL;
- goto out2;
+ goto out1;
}
if (!attr->non_res) {
@@ -2577,10 +2530,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
ARRAY_SIZE(WOF_NAME), run, vbo_disk,
vbo_data + ondisk_size);
if (err)
- goto out2;
- npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
- PAGE_SIZE - 1) >>
- PAGE_SHIFT;
+ goto out1;
#endif
} else if (is_attr_compressed(attr)) {
/* LZNT compression. */
@@ -2614,61 +2564,37 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
if (clst_data >= NTFS_LZNT_CLUSTERS) {
/* Frame is not compressed. */
down_read(&ni->file.run_lock);
- err = ntfs_bio_pages(sbi, run, pages, pages_per_frame,
- frame_vbo, ondisk_size,
- REQ_OP_READ);
+ err = ntfs_read_run(sbi, run, frame_mem, frame_vbo,
+ ondisk_size);
up_read(&ni->file.run_lock);
goto out1;
}
vbo_disk = frame_vbo;
- npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else {
__builtin_unreachable();
err = -EINVAL;
goto out1;
}
- pages_disk = kcalloc(npages_disk, sizeof(*pages_disk), GFP_NOFS);
- if (!pages_disk) {
+ /* Allocate memory to read compressed data to. */
+ frame_ondisk = kvmalloc(ondisk_size, GFP_KERNEL);
+ if (!frame_ondisk) {
err = -ENOMEM;
- goto out2;
- }
-
- for (i = 0; i < npages_disk; i++) {
- pg = alloc_page(GFP_KERNEL);
- if (!pg) {
- err = -ENOMEM;
- goto out3;
- }
- pages_disk[i] = pg;
- lock_page(pg);
- kmap(pg);
+ goto out1;
}
/* Read 'ondisk_size' bytes from disk. */
down_read(&ni->file.run_lock);
- err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk,
- ondisk_size, REQ_OP_READ);
+ err = ntfs_read_run(sbi, run, frame_ondisk, vbo_disk, ondisk_size);
up_read(&ni->file.run_lock);
if (err)
- goto out3;
-
- /*
- * To simplify decompress algorithm do vmap for source and target pages.
- */
- frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
- if (!frame_ondisk) {
- err = -ENOMEM;
- goto out3;
- }
+ goto out2;
- /* Decompress: Frame_ondisk -> frame_mem. */
#ifdef CONFIG_NTFS3_LZX_XPRESS
if (run != &ni->file.run) {
/* LZX or XPRESS */
- err = decompress_lzx_xpress(
- sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)),
- ondisk_size, frame_mem, unc_size, frame_size);
+ err = decompress_lzx_xpress(sbi, frame_ondisk, ondisk_size,
+ frame_mem, unc_size, frame_size);
} else
#endif
{
@@ -2686,30 +2612,25 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
memset(frame_mem + ok, 0, frame_size - ok);
}
- vunmap(frame_ondisk);
-
-out3:
- for (i = 0; i < npages_disk; i++) {
- pg = pages_disk[i];
- if (pg) {
- kunmap(pg);
- unlock_page(pg);
- put_page(pg);
- }
- }
- kfree(pages_disk);
-
out2:
+ kvfree(frame_ondisk);
+out1:
#ifdef CONFIG_NTFS3_LZX_XPRESS
if (run != &ni->file.run)
run_free(run);
+ if (!err && copy) {
+ /* We are called from 'ni_decompress_file' */
+ /* Copy decompressed LZX or XPRESS data into new place. */
+ down_read(&ni->file.run_lock);
+ err = ntfs_write_run(sbi, &ni->file.run, frame_mem, frame_vbo,
+ frame_size);
+ up_read(&ni->file.run_lock);
+ }
#endif
-out1:
vunmap(frame_mem);
out:
for (i = 0; i < pages_per_frame; i++) {
pg = pages[i];
- kunmap(pg);
SetPageUptodate(pg);
}
@@ -2726,18 +2647,16 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
{
int err;
struct ntfs_sb_info *sbi = ni->mi.sbi;
+ struct folio *folio = page_folio(pages[0]);
u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
- u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT;
+ u64 frame_vbo = folio_pos(folio);
CLST frame = frame_vbo >> frame_bits;
char *frame_ondisk = NULL;
- struct page **pages_disk = NULL;
struct ATTR_LIST_ENTRY *le = NULL;
char *frame_mem;
struct ATTRIB *attr;
struct mft_inode *mi;
- u32 i;
- struct page *pg;
size_t compr_size, ondisk_size;
struct lznt *lznt;
@@ -2772,38 +2691,18 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
goto out;
}
- pages_disk = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS);
- if (!pages_disk) {
- err = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < pages_per_frame; i++) {
- pg = alloc_page(GFP_KERNEL);
- if (!pg) {
- err = -ENOMEM;
- goto out1;
- }
- pages_disk[i] = pg;
- lock_page(pg);
- kmap(pg);
- }
-
- /* To simplify compress algorithm do vmap for source and target pages. */
- frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
+ /* Allocate memory to write compressed data to. */
+ frame_ondisk = kvmalloc(frame_size, GFP_KERNEL);
if (!frame_ondisk) {
err = -ENOMEM;
- goto out1;
+ goto out;
}
- for (i = 0; i < pages_per_frame; i++)
- kmap(pages[i]);
-
/* Map in-memory frame for read-only. */
frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
if (!frame_mem) {
err = -ENOMEM;
- goto out2;
+ goto out1;
}
mutex_lock(&sbi->compress.mtx_lznt);
@@ -2819,7 +2718,7 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
if (!lznt) {
mutex_unlock(&sbi->compress.mtx_lznt);
err = -ENOMEM;
- goto out3;
+ goto out2;
}
sbi->compress.lznt = lznt;
@@ -2856,30 +2755,16 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
goto out2;
down_read(&ni->file.run_lock);
- err = ntfs_bio_pages(sbi, &ni->file.run,
- ondisk_size < frame_size ? pages_disk : pages,
- pages_per_frame, frame_vbo, ondisk_size,
- REQ_OP_WRITE);
+ err = ntfs_write_run(sbi, &ni->file.run,
+ ondisk_size < frame_size ? frame_ondisk :
+ frame_mem,
+ frame_vbo, ondisk_size);
up_read(&ni->file.run_lock);
-out3:
- vunmap(frame_mem);
-
out2:
- for (i = 0; i < pages_per_frame; i++)
- kunmap(pages[i]);
-
- vunmap(frame_ondisk);
+ vunmap(frame_mem);
out1:
- for (i = 0; i < pages_per_frame; i++) {
- pg = pages_disk[i];
- if (pg) {
- kunmap(pg);
- unlock_page(pg);
- put_page(pg);
- }
- }
- kfree(pages_disk);
+ kvfree(frame_ondisk);
out:
return err;
}
@@ -3054,8 +2939,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
* ni_rename - Remove one name and insert new name.
*/
int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
- struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de,
- bool *is_bad)
+ struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de)
{
int err;
struct NTFS_DE *de2 = NULL;
@@ -3078,8 +2962,8 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
err = ni_add_name(new_dir_ni, ni, new_de);
if (!err) {
err = ni_remove_name(dir_ni, ni, de, &de2, &undo);
- if (err && ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo))
- *is_bad = true;
+ WARN_ON(err &&
+ ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo));
}
/*
@@ -3170,11 +3054,22 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
}
}
- /* TODO: Fill reparse info. */
- dup->reparse = 0;
- dup->ea_size = 0;
+ dup->extend_data = 0;
- if (ni->ni_flags & NI_FLAG_EA) {
+ if (dup->fa & FILE_ATTRIBUTE_REPARSE_POINT) {
+ attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL,
+ NULL);
+
+ if (attr) {
+ const struct REPARSE_POINT *rp;
+
+ rp = resident_data_ex(attr,
+ sizeof(struct REPARSE_POINT));
+ /* If ATTR_REPARSE exists 'rp' can't be NULL. */
+ if (rp)
+ dup->extend_data = rp->ReparseTag;
+ }
+ } else if (ni->ni_flags & NI_FLAG_EA) {
attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL,
NULL);
if (attr) {
@@ -3183,7 +3078,7 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
info = resident_data_ex(attr, sizeof(struct EA_INFO));
/* If ATTR_EA_INFO exists 'info' can't be NULL. */
if (info)
- dup->ea_size = info->size_pack;
+ dup->extend_data = info->size;
}
}
@@ -3250,6 +3145,10 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
if (is_bad_inode(inode) || sb_rdonly(sb))
return 0;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(sb)))
return -EIO;
@@ -3343,7 +3242,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
if (!mi->dirty)
continue;
- is_empty = !mi_enum_attr(mi, NULL);
+ is_empty = !mi_enum_attr(ni, mi, NULL);
if (is_empty)
clear_rec_inuse(mi->mrec);
@@ -3378,75 +3277,3 @@ out:
return 0;
}
-
-/*
- * ni_set_compress
- *
- * Helper for 'ntfs_fileattr_set'.
- * Changes compression for empty files and directories only.
- */
-int ni_set_compress(struct inode *inode, bool compr)
-{
- int err;
- struct ntfs_inode *ni = ntfs_i(inode);
- struct ATTR_STD_INFO *std;
- const char *bad_inode;
-
- if (is_compressed(ni) == !!compr)
- return 0;
-
- if (is_sparsed(ni)) {
- /* sparse and compress not compatible. */
- return -EOPNOTSUPP;
- }
-
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) {
- /*Skip other inodes. (symlink,fifo,...) */
- return -EOPNOTSUPP;
- }
-
- bad_inode = NULL;
-
- ni_lock(ni);
-
- std = ni_std(ni);
- if (!std) {
- bad_inode = "no std";
- goto out;
- }
-
- if (S_ISREG(inode->i_mode)) {
- err = attr_set_compress(ni, compr);
- if (err) {
- if (err == -ENOENT) {
- /* Fix on the fly? */
- /* Each file must contain data attribute. */
- bad_inode = "no data attribute";
- }
- goto out;
- }
- }
-
- ni->std_fa = std->fa;
- if (compr)
- std->fa |= FILE_ATTRIBUTE_COMPRESSED;
- else
- std->fa &= ~FILE_ATTRIBUTE_COMPRESSED;
-
- if (ni->std_fa != std->fa) {
- ni->std_fa = std->fa;
- ni->mi.dirty = true;
- }
- /* update duplicate information and directory entries in ni_write_inode.*/
- ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
- err = 0;
-
-out:
- ni_unlock(ni);
- if (bad_inode) {
- ntfs_bad_inode(inode, bad_inode);
- err = -EINVAL;
- }
-
- return err;
-}
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index d0d530f4e2b9..38934e6978ec 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -3091,16 +3091,16 @@ static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
inode = ilookup(sbi->sb, rno);
if (inode) {
mi = &ntfs_i(inode)->mi;
- } else if (op == InitializeFileRecordSegment) {
- mi = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
- if (!mi)
- return -ENOMEM;
- err = mi_format_new(mi, sbi, rno, 0, false);
- if (err)
- goto out;
} else {
/* Read from disk. */
err = mi_get(sbi, rno, &mi);
+ if (err && op == InitializeFileRecordSegment) {
+ mi = kzalloc(sizeof(struct mft_inode),
+ GFP_NOFS);
+ if (!mi)
+ return -ENOMEM;
+ err = mi_format_new(mi, sbi, rno, 0, false);
+ }
if (err)
return err;
}
@@ -3109,15 +3109,13 @@ static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
if (op == DeallocateFileRecordSegment)
goto skip_load_parent;
- if (InitializeFileRecordSegment != op) {
- if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
- goto dirty_vol;
- if (!check_lsn(&rec->rhdr, rlsn))
- goto out;
- if (!check_file_record(rec, NULL, sbi))
- goto dirty_vol;
- attr = Add2Ptr(rec, roff);
- }
+ if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
+ goto dirty_vol;
+ if (!check_lsn(&rec->rhdr, rlsn))
+ goto out;
+ if (!check_file_record(rec, NULL, sbi))
+ goto dirty_vol;
+ attr = Add2Ptr(rec, roff);
if (is_rec_base(rec) || InitializeFileRecordSegment == op) {
rno_base = rno;
@@ -3143,7 +3141,7 @@ static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
if (inode)
iput(inode);
- else if (mi)
+ else
mi_put(mi);
inode = inode_parent;
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 03471bc9371c..5f138f715835 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -905,10 +905,18 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
void ntfs_bad_inode(struct inode *inode, const char *hint)
{
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
+ struct ntfs_inode *ni = ntfs_i(inode);
ntfs_inode_err(inode, "%s", hint);
- make_bad_inode(inode);
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+
+ /* Do not call make_bad_inode()! */
+ ni->ni_bad = true;
+
+ /* Avoid recursion if bad inode is $Volume. */
+ if (inode->i_ino != MFT_REC_VOL &&
+ !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ }
}
/*
@@ -1031,34 +1039,6 @@ struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
return NULL;
}
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
-{
- struct block_device *bdev = sb->s_bdev;
- u32 blocksize = sb->s_blocksize;
- u64 block = lbo >> sb->s_blocksize_bits;
- u32 off = lbo & (blocksize - 1);
- u32 op = blocksize - off;
-
- for (; bytes; block += 1, off = 0, op = blocksize) {
- struct buffer_head *bh = __bread(bdev, block, blocksize);
-
- if (!bh)
- return -EIO;
-
- if (op > bytes)
- op = bytes;
-
- memcpy(buffer, bh->b_data + off, op);
-
- put_bh(bh);
-
- bytes -= op;
- buffer = Add2Ptr(buffer, op);
- }
-
- return 0;
-}
-
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buf, int wait)
{
@@ -1369,7 +1349,14 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
}
if (buffer_locked(bh))
__wait_on_buffer(bh);
- set_buffer_uptodate(bh);
+
+ lock_buffer(bh);
+ if (!buffer_uptodate(bh))
+ {
+ memset(bh->b_data, 0, blocksize);
+ set_buffer_uptodate(bh);
+ }
+ unlock_buffer(bh);
} else {
bh = ntfs_bread(sb, block);
if (!bh) {
@@ -1492,99 +1479,86 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
}
/*
- * ntfs_bio_pages - Read/write pages from/to disk.
+ * ntfs_read_write_run - Read/Write disk's page cache.
*/
-int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
- struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
- enum req_op op)
+int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
+ void *buf, u64 vbo, size_t bytes, int wr)
{
- int err = 0;
- struct bio *new, *bio = NULL;
struct super_block *sb = sbi->sb;
- struct block_device *bdev = sb->s_bdev;
- struct page *page;
+ struct address_space *mapping = sb->s_bdev->bd_mapping;
u8 cluster_bits = sbi->cluster_bits;
- CLST lcn, clen, vcn, vcn_next;
- u32 add, off, page_idx;
+ CLST vcn_next, vcn = vbo >> cluster_bits;
+ CLST lcn, clen;
u64 lbo, len;
- size_t run_idx;
- struct blk_plug plug;
+ size_t idx;
+ u32 off, op;
+ struct folio *folio;
+ char *kaddr;
if (!bytes)
return 0;
- blk_start_plug(&plug);
+ if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
+ return -ENOENT;
- /* Align vbo and bytes to be 512 bytes aligned. */
- lbo = (vbo + bytes + 511) & ~511ull;
- vbo = vbo & ~511ull;
- bytes = lbo - vbo;
+ if (lcn == SPARSE_LCN)
+ return -EINVAL;
- vcn = vbo >> cluster_bits;
- if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
- err = -ENOENT;
- goto out;
- }
off = vbo & sbi->cluster_mask;
- page_idx = 0;
- page = pages[0];
+ lbo = ((u64)lcn << cluster_bits) + off;
+ len = ((u64)clen << cluster_bits) - off;
for (;;) {
- lbo = ((u64)lcn << cluster_bits) + off;
- len = ((u64)clen << cluster_bits) - off;
-new_bio:
- new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
- if (bio) {
- bio_chain(bio, new);
- submit_bio(bio);
- }
- bio = new;
- bio->bi_iter.bi_sector = lbo >> 9;
+ /* Read range [lbo, lbo+len). */
+ folio = read_mapping_folio(mapping, lbo >> PAGE_SHIFT, NULL);
- while (len) {
- off = vbo & (PAGE_SIZE - 1);
- add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- if (bio_add_page(bio, page, add, off) < add)
- goto new_bio;
+ off = offset_in_page(lbo);
+ op = PAGE_SIZE - off;
- if (bytes <= add)
- goto out;
- bytes -= add;
- vbo += add;
+ if (op > len)
+ op = len;
+ if (op > bytes)
+ op = bytes;
- if (add + off == PAGE_SIZE) {
- page_idx += 1;
- if (WARN_ON(page_idx >= nr_pages)) {
- err = -EINVAL;
- goto out;
- }
- page = pages[page_idx];
- }
+ kaddr = kmap_local_folio(folio, 0);
+ if (wr) {
+ memcpy(kaddr + off, buf, op);
+ folio_mark_dirty(folio);
+ } else {
+ memcpy(buf, kaddr + off, op);
+ flush_dcache_folio(folio);
+ }
+ kunmap_local(kaddr);
+ folio_put(folio);
- if (len <= add)
- break;
- len -= add;
- lbo += add;
+ bytes -= op;
+ if (!bytes)
+ return 0;
+
+ buf += op;
+ len -= op;
+ if (len) {
+ /* next volume's page. */
+ lbo += op;
+ continue;
}
+ /* get next range. */
vcn_next = vcn + clen;
- if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
+ if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
vcn != vcn_next) {
- err = -ENOENT;
- goto out;
+ return -ENOENT;
}
- off = 0;
- }
-out:
- if (bio) {
- if (!err)
- err = submit_bio_wait(bio);
- bio_put(bio);
- }
- blk_finish_plug(&plug);
- return err;
+ if (lcn == SPARSE_LCN)
+ return -EINVAL;
+
+ lbo = ((u64)lcn << cluster_bits);
+ len = ((u64)clen << cluster_bits);
+ }
}
/*
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 9089c58a005c..7157cfd70fdc 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -618,7 +618,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes)
u32 off = le32_to_cpu(hdr->de_off);
if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot ||
- off + sizeof(struct NTFS_DE) > end) {
+ size_add(off, sizeof(struct NTFS_DE)) > end) {
/* incorrect index buffer. */
return false;
}
@@ -736,7 +736,7 @@ fill_table:
if (end > total)
return NULL;
- if (off + sizeof(struct NTFS_DE) > end)
+ if (size_add(off, sizeof(struct NTFS_DE)) > end)
return NULL;
e = Add2Ptr(hdr, off);
@@ -1094,8 +1094,7 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
ok:
if (!index_buf_check(ib, bytes, &vbn)) {
- ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
- ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(&ni->vfs_inode);
err = -EINVAL;
goto out;
}
@@ -1117,8 +1116,7 @@ ok:
out:
if (err == -E_NTFS_CORRUPT) {
- ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
- ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(&ni->vfs_inode);
err = -EINVAL;
}
@@ -1510,6 +1508,16 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size);
}
+ /*
+ * Index blocks exist, but $BITMAP has zero valid bits.
+ * This implies an on-disk corruption and must be rejected.
+ */
+ if (in->name == I30_NAME &&
+ unlikely(bmp_size_v == 0 && indx->alloc_run.count)) {
+ err = -EINVAL;
+ goto out1;
+ }
+
bit = bmp_size << 3;
}
@@ -1916,7 +1924,8 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
* Undo critical operations.
*/
indx_mark_free(indx, ni, new_vbn >> indx->idx2vbn_bits);
- memcpy(hdr1, hdr1_saved, used1);
+ unsafe_memcpy(hdr1, hdr1_saved, used1,
+ "There are entries after the structure");
indx_write(indx, ni, n1, 0);
}
@@ -2184,6 +2193,10 @@ static int indx_get_entry_to_replace(struct ntfs_index *indx,
e = hdr_first_de(&n->index->ihdr);
fnd_push(fnd, n, e);
+ if (!e) {
+ err = -EINVAL;
+ goto out;
+ }
if (!de_is_last(e)) {
/*
@@ -2205,6 +2218,10 @@ static int indx_get_entry_to_replace(struct ntfs_index *indx,
n = fnd->nodes[level];
te = hdr_first_de(&n->index->ihdr);
+ if (!te) {
+ err = -EINVAL;
+ goto out;
+ }
/* Copy the candidate entry into the replacement entry buffer. */
re = kmalloc(le16_to_cpu(te->size) + sizeof(u64), GFP_NOFS);
if (!re) {
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index be04d2845bb7..0a9ac5efeb67 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -410,6 +410,9 @@ end_enum:
if (!std5)
goto out;
+ if (is_bad_inode(inode))
+ goto out;
+
if (!is_match && name) {
err = -ENOENT;
goto out;
@@ -468,6 +471,8 @@ end_enum:
fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
/* Records in $Extend are not a files or general directories. */
inode->i_op = &ntfs_file_inode_operations;
+ mode = S_IFREG;
+ init_rwsem(&ni->file.run_lock);
} else {
err = -EINVAL;
goto out;
@@ -533,7 +538,7 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
return ERR_PTR(-ENOMEM);
/* If this is a freshly allocated inode, need to read it now. */
- if (inode->i_state & I_NEW)
+ if (inode_state_read_once(inode) & I_NEW)
inode = ntfs_read_mft(inode, name, ref);
else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
/*
@@ -802,6 +807,10 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ret = 0;
goto out;
}
+ if (is_compressed(ni)) {
+ ret = 0;
+ goto out;
+ }
ret = blockdev_direct_IO(iocb, inode, iter,
wr ? ntfs_get_block_direct_IO_W :
@@ -864,13 +873,17 @@ out:
}
static int ntfs_resident_writepage(struct folio *folio,
- struct writeback_control *wbc, void *data)
+ struct writeback_control *wbc)
{
- struct address_space *mapping = data;
+ struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
int ret;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
@@ -889,12 +902,21 @@ static int ntfs_writepages(struct address_space *mapping,
{
struct inode *inode = mapping->host;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ntfs_i(inode))))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
- if (is_resident(ntfs_i(inode)))
- return write_cache_pages(mapping, wbc, ntfs_resident_writepage,
- mapping);
+ if (is_resident(ntfs_i(inode))) {
+ struct folio *folio = NULL;
+ int error;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ error = ntfs_resident_writepage(folio, wbc);
+ return error;
+ }
return mpage_writepages(mapping, wbc, ntfs_get_block);
}
@@ -905,13 +927,17 @@ static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
bh_result, create, GET_BLOCK_WRITE_BEGIN);
}
-int ntfs_write_begin(struct file *file, struct address_space *mapping,
+int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
loff_t pos, u32 len, struct folio **foliop, void **fsdata)
{
int err;
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
@@ -950,8 +976,9 @@ out:
/*
* ntfs_write_end - Address_space_operations::write_end.
*/
-int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
- u32 len, u32 copied, struct folio *folio, void *fsdata)
+int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, u32 len, u32 copied, struct folio *folio,
+ void *fsdata)
{
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
@@ -982,7 +1009,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
folio_unlock(folio);
folio_put(folio);
} else {
- err = generic_write_end(file, mapping, pos, len, copied, folio,
+ err = generic_write_end(iocb, mapping, pos, len, copied, folio,
fsdata);
}
@@ -1022,46 +1049,6 @@ int ntfs_sync_inode(struct inode *inode)
}
/*
- * writeback_inode - Helper function for ntfs_flush_inodes().
- *
- * This writes both the inode and the file data blocks, waiting
- * for in flight data blocks before the start of the call. It
- * does not wait for any io started during the call.
- */
-static int writeback_inode(struct inode *inode)
-{
- int ret = sync_inode_metadata(inode, 0);
-
- if (!ret)
- ret = filemap_fdatawrite(inode->i_mapping);
- return ret;
-}
-
-/*
- * ntfs_flush_inodes
- *
- * Write data and metadata corresponding to i1 and i2. The io is
- * started but we do not wait for any of it to finish.
- *
- * filemap_flush() is used for the block device, so if there is a dirty
- * page for a block already in flight, we will not wait and start the
- * io over again.
- */
-int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2)
-{
- int ret = 0;
-
- if (i1)
- ret = writeback_inode(i1);
- if (!ret && i2)
- ret = writeback_inode(i2);
- if (!ret)
- ret = filemap_flush(sb->s_bdev_file->f_mapping);
- return ret;
-}
-
-/*
* Helper function to read file.
*/
int inode_read_data(struct inode *inode, void *data, size_t bytes)
@@ -1095,10 +1082,10 @@ int inode_read_data(struct inode *inode, void *data, size_t bytes)
* Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
* for unicode string of @uni_len length.
*/
-static inline u32 ntfs_reparse_bytes(u32 uni_len)
+static inline u32 ntfs_reparse_bytes(u32 uni_len, bool is_absolute)
{
/* Header + unicode string + decorated unicode string. */
- return sizeof(short) * (2 * uni_len + 4) +
+ return sizeof(short) * (2 * uni_len + (is_absolute ? 4 : 0)) +
offsetof(struct REPARSE_DATA_BUFFER,
SymbolicLinkReparseBuffer.PathBuffer);
}
@@ -1111,8 +1098,11 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
struct REPARSE_DATA_BUFFER *rp;
__le16 *rp_name;
typeof(rp->SymbolicLinkReparseBuffer) *rs;
+ bool is_absolute;
- rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
+ is_absolute = symname[0] && symname[1] == ':';
+
+ rp = kzalloc(ntfs_reparse_bytes(2 * size + 2, is_absolute), GFP_NOFS);
if (!rp)
return ERR_PTR(-ENOMEM);
@@ -1127,7 +1117,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
goto out;
/* err = the length of unicode name of symlink. */
- *nsize = ntfs_reparse_bytes(err);
+ *nsize = ntfs_reparse_bytes(err, is_absolute);
if (*nsize > sbi->reparse.max_size) {
err = -EFBIG;
@@ -1147,24 +1137,28 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
/* PrintName + SubstituteName. */
rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
- rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
+ rs->SubstituteNameLength =
+ cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0));
rs->PrintNameLength = rs->SubstituteNameOffset;
/*
* TODO: Use relative path if possible to allow Windows to
* parse this path.
- * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
+ * 0-absolute path, 1- relative path (SYMLINK_FLAG_RELATIVE).
*/
- rs->Flags = 0;
-
- memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
-
- /* Decorate SubstituteName. */
- rp_name += err;
- rp_name[0] = cpu_to_le16('\\');
- rp_name[1] = cpu_to_le16('?');
- rp_name[2] = cpu_to_le16('?');
- rp_name[3] = cpu_to_le16('\\');
+ rs->Flags = cpu_to_le32(is_absolute ? 0 : SYMLINK_FLAG_RELATIVE);
+
+ memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name,
+ sizeof(short) * err);
+
+ if (is_absolute) {
+ /* Decorate SubstituteName. */
+ rp_name += err;
+ rp_name[0] = cpu_to_le16('\\');
+ rp_name[1] = cpu_to_le16('?');
+ rp_name[2] = cpu_to_le16('?');
+ rp_name[3] = cpu_to_le16('\\');
+ }
return rp;
out:
@@ -1287,12 +1281,18 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
fa |= FILE_ATTRIBUTE_READONLY;
/* Allocate PATH_MAX bytes. */
- new_de = __getname();
+ new_de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
if (!new_de) {
err = -ENOMEM;
goto out1;
}
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(dir_ni))) {
+ err = -EINVAL;
+ goto out2;
+ }
+
if (unlikely(ntfs3_forced_shutdown(sb))) {
err = -EIO;
goto out2;
@@ -1383,7 +1383,7 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
fname->dup.a_time = std5->cr_time;
fname->dup.alloc_size = fname->dup.data_size = 0;
fname->dup.fa = std5->fa;
- fname->dup.ea_size = fname->dup.reparse = 0;
+ fname->dup.extend_data = S_ISLNK(mode) ? IO_REPARSE_TAG_SYMLINK : 0;
dsize = le16_to_cpu(new_de->key_size);
asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
@@ -1623,27 +1623,30 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
inode->i_flags |= S_NOSEC;
}
- /*
- * ntfs_init_acl and ntfs_save_wsl_perm update extended attribute.
- * The packed size of extended attribute is stored in direntry too.
- * 'fname' here points to inside new_de.
- */
- err = ntfs_save_wsl_perm(inode, &fname->dup.ea_size);
- if (err)
- goto out6;
-
- /*
- * update ea_size in file_name attribute too.
- * Use ni_find_attr cause layout of MFT record may be changed
- * in ntfs_init_acl and ntfs_save_wsl_perm.
- */
- attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, NULL);
- if (attr) {
- struct ATTR_FILE_NAME *fn;
+ if (!S_ISLNK(mode)) {
+ /*
+ * ntfs_init_acl and ntfs_save_wsl_perm update extended attribute.
+ * The packed size of extended attribute is stored in direntry too.
+ * 'fname' here points to inside new_de.
+ */
+ err = ntfs_save_wsl_perm(inode, &fname->dup.extend_data);
+ if (err)
+ goto out6;
- fn = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
- if (fn)
- fn->dup.ea_size = fname->dup.ea_size;
+ /*
+ * update ea_size in file_name attribute too.
+ * Use ni_find_attr cause layout of MFT record may be changed
+ * in ntfs_init_acl and ntfs_save_wsl_perm.
+ */
+ attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL,
+ NULL);
+ if (attr) {
+ struct ATTR_FILE_NAME *fn;
+
+ fn = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
+ if (fn)
+ fn->dup.extend_data = fname->dup.extend_data;
+ }
}
/* We do not need to update parent directory later */
@@ -1720,7 +1723,7 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
struct NTFS_DE *de;
/* Allocate PATH_MAX bytes. */
- de = __getname();
+ de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
if (!de)
return -ENOMEM;
@@ -1758,7 +1761,7 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
return -EINVAL;
/* Allocate PATH_MAX bytes. */
- de = __getname();
+ de = kmem_cache_zalloc(names_cachep, GFP_KERNEL);
if (!de)
return -ENOMEM;
@@ -2103,7 +2106,7 @@ const struct address_space_operations ntfs_aops = {
const struct address_space_operations ntfs_aops_cmpr = {
.read_folio = ntfs_read_folio,
- .readahead = ntfs_readahead,
.dirty_folio = block_dirty_folio,
+ .direct_IO = ntfs_direct_IO,
};
// clang-format on
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index abf7e81584a9..3b24ca02de61 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -171,6 +171,10 @@ static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
struct ntfs_inode *ni = ntfs_i(dir);
int err;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
return -EIO;
@@ -191,6 +195,10 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
{
u32 size = strlen(symname);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ntfs_i(dir))))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
return -EIO;
@@ -199,13 +207,13 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
}
/*
- * ntfs_mkdir- inode_operations::mkdir
+ * ntfs_mkdir - inode_operations::mkdir
*/
-static int ntfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ntfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode, 0,
- NULL, 0, NULL);
+ return ERR_PTR(ntfs_create_inode(idmap, dir, dentry, NULL,
+ S_IFDIR | mode, 0, NULL, 0, NULL));
}
/*
@@ -216,6 +224,10 @@ static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
struct ntfs_inode *ni = ntfs_i(dir);
int err;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
return -EIO;
@@ -244,7 +256,7 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
struct ntfs_inode *ni = ntfs_i(inode);
struct inode *new_inode = d_inode(new_dentry);
struct NTFS_DE *de, *new_de;
- bool is_same, is_bad;
+ bool is_same;
/*
* de - memory of PATH_MAX bytes:
* [0-1024) - original name (dentry->d_name)
@@ -256,6 +268,10 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
1024);
static_assert(PATH_MAX >= 4 * 1024);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(sb)))
return -EIO;
@@ -313,12 +329,8 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
if (dir_ni != new_dir_ni)
ni_lock_dir2(new_dir_ni);
- is_bad = false;
- err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad);
- if (is_bad) {
- /* Restore after failed rename failed too. */
- _ntfs_bad_inode(inode);
- } else if (!err) {
+ err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de);
+ if (!err) {
simple_rename_timestamp(dir, dentry, new_dir, new_dentry);
mark_inode_dirty(inode);
mark_inode_dirty(dir);
@@ -507,8 +519,6 @@ const struct inode_operations ntfs_dir_inode_operations = {
.getattr = ntfs_getattr,
.listxattr = ntfs_listxattr,
.fiemap = ntfs_fiemap,
- .fileattr_get = ntfs_fileattr_get,
- .fileattr_set = ntfs_fileattr_set,
};
const struct inode_operations ntfs_special_inode_operations = {
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 241f2ffdd920..552b97905813 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -561,8 +561,7 @@ struct NTFS_DUP_INFO {
__le64 alloc_size; // 0x20: Data attribute allocated size, multiple of cluster size.
__le64 data_size; // 0x28: Data attribute size <= Dataalloc_size.
enum FILE_ATTRIBUTE fa; // 0x30: Standard DOS attributes & more.
- __le16 ea_size; // 0x34: Packed EAs.
- __le16 reparse; // 0x36: Used by Reparse.
+ __le32 extend_data; // 0x34: Extended data.
}; // 0x38
@@ -717,7 +716,7 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
struct NTFS_DE *e;
u16 esize;
- if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used )
+ if (de_off >= used || size_add(de_off, sizeof(struct NTFS_DE)) > used)
return NULL;
e = Add2Ptr(hdr, de_off);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index cd8e8374bb5a..a4559c9f64e6 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -212,6 +212,7 @@ struct ntfs_sb_info {
u32 discard_granularity;
u64 discard_granularity_mask_inv; // ~(discard_granularity_mask_inv-1)
+ u32 bdev_blocksize_mask; // bdev_logical_block_size(bdev) - 1;
u32 cluster_size; // bytes per cluster
u32 cluster_mask; // == cluster_size - 1
@@ -280,7 +281,7 @@ struct ntfs_sb_info {
__le16 flags; // Cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY.
u8 major_ver;
u8 minor_ver;
- char label[256];
+ char label[FSLABEL_MAX];
bool real_dirty; // Real fs state.
} volume;
@@ -377,6 +378,13 @@ struct ntfs_inode {
*/
u8 mi_loaded;
+ /*
+ * Use this field to avoid any write(s).
+ * If inode is bad during initialization - use make_bad_inode
+ * If inode is bad during operations - use this field
+ */
+ u8 ni_bad;
+
union {
struct ntfs_index dir;
struct {
@@ -454,7 +462,6 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size);
int attr_force_nonresident(struct ntfs_inode *ni);
-int attr_set_compress(struct ntfs_inode *ni, bool compr);
/* Functions from attrlist.c */
void al_destroy(struct ntfs_inode *ni);
@@ -497,9 +504,6 @@ extern const struct file_operations ntfs_dir_operations;
extern const struct file_operations ntfs_legacy_dir_operations;
/* Globals from file.c */
-int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
- struct fileattr *fa);
int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, u32 flags);
int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
@@ -530,9 +534,6 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTR_LIST_ENTRY **le,
struct mft_inode **mi);
-struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, CLST vcn,
- struct mft_inode **pmi);
int ni_load_all_mi(struct ntfs_inode *ni);
bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
@@ -570,7 +571,7 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio);
int ni_decompress_file(struct ntfs_inode *ni);
int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
- u32 pages_per_frame);
+ u32 pages_per_frame, int copy);
int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
u32 pages_per_frame);
int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
@@ -584,11 +585,10 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
struct NTFS_DE *de);
int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
- struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de,
- bool *is_bad);
+ struct ntfs_inode *ni, struct NTFS_DE *de,
+ struct NTFS_DE *new_de);
bool ni_is_dirty(struct inode *inode);
-int ni_set_compress(struct inode *inode, bool compr);
/* Globals from fslog.c */
bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
@@ -619,7 +619,6 @@ enum NTFS_DIRTY_FLAGS {
NTFS_DIRTY_ERROR = 2,
};
int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty);
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buffer, int wait);
int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
@@ -635,9 +634,21 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
u32 bytes, struct ntfs_buffers *nb);
int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
struct ntfs_buffers *nb, int sync);
-int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
- struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
- enum req_op op);
+int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
+ void *buf, u64 vbo, size_t bytes, int wr);
+static inline int ntfs_read_run(struct ntfs_sb_info *sbi,
+ const struct runs_tree *run, void *buf, u64 vbo,
+ size_t bytes)
+{
+ return ntfs_read_write_run(sbi, run, buf, vbo, bytes, 0);
+}
+static inline int ntfs_write_run(struct ntfs_sb_info *sbi,
+ const struct runs_tree *run, void *buf,
+ u64 vbo, size_t bytes)
+{
+ return ntfs_read_write_run(sbi, run, buf, vbo, bytes, 1);
+}
+
int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
u64 vbo, u64 *lbo, u64 *bytes);
@@ -711,14 +722,13 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
int ntfs_set_size(struct inode *inode, u64 new_size);
int ntfs_get_block(struct inode *inode, sector_t vbn,
struct buffer_head *bh_result, int create);
-int ntfs_write_begin(struct file *file, struct address_space *mapping,
+int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
loff_t pos, u32 len, struct folio **foliop, void **fsdata);
-int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
- u32 len, u32 copied, struct folio *folio, void *fsdata);
+int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, u32 len, u32 copied, struct folio *folio,
+ void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode);
-int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2);
int inode_read_data(struct inode *inode, void *data, size_t bytes);
int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const struct cpu_str *uni,
@@ -745,29 +755,30 @@ int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
void mi_put(struct mft_inode *mi);
int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno);
int mi_read(struct mft_inode *mi, bool is_mft);
-struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
-// TODO: id?
-struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
- enum ATTR_TYPE type, const __le16 *name,
- u8 name_len, const __le16 *id);
-static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
+struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ struct ATTRIB *attr);
+struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ struct ATTRIB *attr, enum ATTR_TYPE type,
+ const __le16 *name, u8 name_len, const __le16 *id);
+static inline struct ATTRIB *rec_find_attr_le(struct ntfs_inode *ni,
+ struct mft_inode *rec,
struct ATTR_LIST_ENTRY *le)
{
- return mi_find_attr(rec, NULL, le->type, le_name(le), le->name_len,
+ return mi_find_attr(ni, rec, NULL, le->type, le_name(le), le->name_len,
&le->id);
}
int mi_write(struct mft_inode *mi, int wait);
int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
__le16 flags, bool is_mft);
-struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, u32 asize,
- u16 name_off);
+struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ enum ATTR_TYPE type, const __le16 *name,
+ u8 name_len, u32 asize, u16 name_off);
bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
struct ATTRIB *attr);
bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes);
int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
- struct runs_tree *run, CLST len);
+ const struct runs_tree *run, CLST len);
static inline bool mi_is_ref(const struct mft_inode *mi,
const struct MFT_REF *ref)
{
@@ -802,7 +813,7 @@ void run_truncate_head(struct runs_tree *run, CLST vcn);
void run_truncate_around(struct runs_tree *run, CLST vcn);
bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
bool is_mft);
-bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
+bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub);
bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len);
bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
CLST *lcn, CLST *len);
@@ -884,7 +895,7 @@ int ntfs_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry);
ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
extern const struct xattr_handler *const ntfs_xattr_handlers[];
-int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size);
+int ntfs_save_wsl_perm(struct inode *inode, __le32 *ea_size);
void ntfs_get_wsl_perm(struct inode *inode);
/* globals from lznt.c */
@@ -981,11 +992,12 @@ static inline __le64 kernel2nt(const struct timespec64 *ts)
*/
static inline void nt2kernel(const __le64 tm, struct timespec64 *ts)
{
- u64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
+ s32 t32;
+ /* use signed 64 bit to support timestamps prior to epoch. xfstest 258. */
+ s64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
- // WARNING: do_div changes its first argument(!)
- ts->tv_nsec = do_div(t, _100ns2seconds) * 100;
- ts->tv_sec = t;
+ ts->tv_sec = div_s64_rem(t, _100ns2seconds, &t32);
+ ts->tv_nsec = t32 * 100;
}
static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
@@ -1035,6 +1047,11 @@ static inline bool is_compressed(const struct ntfs_inode *ni)
(ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
}
+static inline bool is_bad_ni(const struct ntfs_inode *ni)
+{
+ return ni->ni_bad;
+}
+
static inline int ni_ext_compress_bits(const struct ntfs_inode *ni)
{
return 0xb + (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 61d53d39f3b9..167093e8d287 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -31,7 +31,7 @@ static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
*
* Return: Unused attribute id that is less than mrec->next_attr_id.
*/
-static __le16 mi_new_attt_id(struct mft_inode *mi)
+static __le16 mi_new_attt_id(struct ntfs_inode *ni, struct mft_inode *mi)
{
u16 free_id, max_id, t16;
struct MFT_REC *rec = mi->mrec;
@@ -52,7 +52,7 @@ static __le16 mi_new_attt_id(struct mft_inode *mi)
attr = NULL;
for (;;) {
- attr = mi_enum_attr(mi, attr);
+ attr = mi_enum_attr(ni, mi, attr);
if (!attr) {
rec->next_attr_id = cpu_to_le16(max_id + 1);
mi->dirty = true;
@@ -195,7 +195,8 @@ out:
* NOTE: mi->mrec - memory of size sbi->record_size
* here we sure that mi->mrec->total == sbi->record_size (see mi_read)
*/
-struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ struct ATTRIB *attr)
{
const struct MFT_REC *rec = mi->mrec;
u32 used = le32_to_cpu(rec->used);
@@ -209,11 +210,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
off = le16_to_cpu(rec->attr_off);
if (used > total)
- return NULL;
+ goto out;
if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
!IS_ALIGNED(off, 8)) {
- return NULL;
+ goto out;
}
/* Skip non-resident records. */
@@ -243,7 +244,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
*/
if (off + 8 > used) {
static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
- return NULL;
+ goto out;
}
if (attr->type == ATTR_END) {
@@ -254,112 +255,116 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
/* 0x100 is last known attribute for now. */
t32 = le32_to_cpu(attr->type);
if (!t32 || (t32 & 0xf) || (t32 > 0x100))
- return NULL;
+ goto out;
/* attributes in record must be ordered by type */
if (t32 < prev_type)
- return NULL;
+ goto out;
asize = le32_to_cpu(attr->size);
if (!IS_ALIGNED(asize, 8))
- return NULL;
+ goto out;
/* Check overflow and boundary. */
if (off + asize < off || off + asize > used)
- return NULL;
+ goto out;
/* Can we use the field attr->non_res. */
if (off + 9 > used)
- return NULL;
+ goto out;
/* Check size of attribute. */
if (!attr->non_res) {
/* Check resident fields. */
if (asize < SIZEOF_RESIDENT)
- return NULL;
+ goto out;
t16 = le16_to_cpu(attr->res.data_off);
if (t16 > asize)
- return NULL;
+ goto out;
if (le32_to_cpu(attr->res.data_size) > asize - t16)
- return NULL;
+ goto out;
t32 = sizeof(short) * attr->name_len;
if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
- return NULL;
+ goto out;
return attr;
}
/* Check nonresident fields. */
if (attr->non_res != 1)
- return NULL;
+ goto out;
/* Can we use memory including attr->nres.valid_size? */
if (asize < SIZEOF_NONRESIDENT)
- return NULL;
+ goto out;
t16 = le16_to_cpu(attr->nres.run_off);
if (t16 > asize)
- return NULL;
+ goto out;
t32 = sizeof(short) * attr->name_len;
if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
- return NULL;
+ goto out;
/* Check start/end vcn. */
if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
- return NULL;
+ goto out;
data_size = le64_to_cpu(attr->nres.data_size);
if (le64_to_cpu(attr->nres.valid_size) > data_size)
- return NULL;
+ goto out;
alloc_size = le64_to_cpu(attr->nres.alloc_size);
if (data_size > alloc_size)
- return NULL;
+ goto out;
t32 = mi->sbi->cluster_mask;
if (alloc_size & t32)
- return NULL;
+ goto out;
if (!attr->nres.svcn && is_attr_ext(attr)) {
/* First segment of sparse/compressed attribute */
/* Can we use memory including attr->nres.total_size? */
if (asize < SIZEOF_NONRESIDENT_EX)
- return NULL;
+ goto out;
tot_size = le64_to_cpu(attr->nres.total_size);
if (tot_size & t32)
- return NULL;
+ goto out;
if (tot_size > alloc_size)
- return NULL;
+ goto out;
} else {
if (attr->nres.c_unit)
- return NULL;
+ goto out;
if (alloc_size > mi->sbi->volume.size)
- return NULL;
+ goto out;
}
return attr;
+
+out:
+ _ntfs_bad_inode(&ni->vfs_inode);
+ return NULL;
}
/*
* mi_find_attr - Find the attribute by type and name and id.
*/
-struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
- enum ATTR_TYPE type, const __le16 *name,
- u8 name_len, const __le16 *id)
+struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ struct ATTRIB *attr, enum ATTR_TYPE type,
+ const __le16 *name, u8 name_len, const __le16 *id)
{
u32 type_in = le32_to_cpu(type);
u32 atype;
next_attr:
- attr = mi_enum_attr(mi, attr);
+ attr = mi_enum_attr(ni, mi, attr);
if (!attr)
return NULL;
@@ -467,9 +472,9 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
*
* Return: Not full constructed attribute or NULL if not possible to create.
*/
-struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, u32 asize,
- u16 name_off)
+struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ enum ATTR_TYPE type, const __le16 *name,
+ u8 name_len, u32 asize, u16 name_off)
{
size_t tail;
struct ATTRIB *attr;
@@ -488,7 +493,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
* at which we should insert it.
*/
attr = NULL;
- while ((attr = mi_enum_attr(mi, attr))) {
+ while ((attr = mi_enum_attr(ni, mi, attr))) {
int diff = compare_attr(attr, type, name, name_len, upcase);
if (diff < 0)
@@ -508,7 +513,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
tail = used - PtrOffset(rec, attr);
}
- id = mi_new_attt_id(mi);
+ id = mi_new_attt_id(ni, mi);
memmove(Add2Ptr(attr, asize), attr, tail);
memset(attr, 0, asize);
@@ -616,7 +621,7 @@ bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
* If failed record is not changed.
*/
int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
- struct runs_tree *run, CLST len)
+ const struct runs_tree *run, CLST len)
{
int err = 0;
struct ntfs_sb_info *sbi = mi->sbi;
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index 6e86d66197ef..395b20492525 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -9,6 +9,7 @@
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/log2.h>
+#include <linux/overflow.h>
#include "debug.h"
#include "ntfs.h"
@@ -486,7 +487,7 @@ requires_new_range:
* Helper for attr_collapse_range(),
* which is helper for fallocate(collapse_range).
*/
-bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
+bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub)
{
size_t index, eat;
struct ntfs_run *r, *e, *eat_start, *eat_end;
@@ -510,7 +511,7 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
/* Collapse a middle part of normal run, split. */
if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
return false;
- return run_collapse_range(run, vcn, len);
+ return run_collapse_range(run, vcn, len, sub);
}
r += 1;
@@ -544,6 +545,13 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
run->count -= eat;
+ if (sub) {
+ e -= eat;
+ for (r = run->runs; r < e; r++) {
+ r->vcn -= sub;
+ }
+ }
+
return true;
}
@@ -982,14 +990,22 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
if (!dlcn)
return -EINVAL;
- lcn = prev_lcn + dlcn;
+
+ /* Check special combination: 0 + SPARSE_LCN64. */
+ if (!prev_lcn && dlcn == SPARSE_LCN64) {
+ lcn = SPARSE_LCN64;
+ } else if (check_add_overflow(prev_lcn, dlcn, &lcn)) {
+ return -EINVAL;
+ }
prev_lcn = lcn;
} else {
/* The size of 'dlcn' can't be > 8. */
return -EINVAL;
}
- next_vcn = vcn64 + len;
+ if (check_add_overflow(vcn64, len, &next_vcn))
+ return -EINVAL;
+
/* Check boundary. */
if (next_vcn > evcn + 1)
return -EINVAL;
@@ -1153,7 +1169,8 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
return -EINVAL;
run_buf += size_size + offset_size;
- vcn64 += len;
+ if (check_add_overflow(vcn64, len, &vcn64))
+ return -EINVAL;
#ifndef CONFIG_NTFS3_64BIT_CLUSTER
if (vcn64 > 0x100000000ull)
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 6a0f6b0a3ab2..8b0cf0ed4f72 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -16,6 +16,13 @@
* mi - MFT inode - One MFT record(usually 1024 bytes or 4K), consists of attributes.
* ni - NTFS inode - Extends linux inode. consists of one or more mft inodes.
* index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size.
+ * resident attribute - Attribute with content stored directly in the MFT record
+ * non-resident attribute - Attribute with content stored in clusters
+ * data_size - Size of attribute content in bytes. Equal to inode->i_size
+ * valid_size - Number of bytes written to the non-resident attribute
+ * allocated_size - Total size of clusters allocated for non-resident content
+ * total_size - Actual size of allocated clusters for sparse or compressed attributes
+ * - Constraint: valid_size <= data_size <= allocated_size
*
* WSL - Windows Subsystem for Linux
* https://docs.microsoft.com/en-us/windows/wsl/file-permissions
@@ -51,6 +58,7 @@
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
#include <linux/fs.h>
+#include <linux/fs_struct.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/log2.h>
@@ -277,9 +285,9 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
fsparam_flag("hide_dot_files", Opt_hide_dot_files),
fsparam_flag("windows_names", Opt_windows_names),
fsparam_flag("showmeta", Opt_showmeta),
- fsparam_flag("acl", Opt_acl),
+ fsparam_flag_no("acl", Opt_acl),
fsparam_string("iocharset", Opt_iocharset),
- fsparam_flag("prealloc", Opt_prealloc),
+ fsparam_flag_no("prealloc", Opt_prealloc),
fsparam_flag("nocase", Opt_nocase),
{}
};
@@ -288,10 +296,8 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
/*
* Load nls table or if @nls is utf8 then return NULL.
*
- * It is good idea to use here "const char *nls".
- * But load_nls accepts "char*".
*/
-static struct nls_table *ntfs_load_nls(char *nls)
+static struct nls_table *ntfs_load_nls(const char *nls)
{
struct nls_table *ret;
@@ -390,7 +396,7 @@ static int ntfs_fs_parse_param(struct fs_context *fc,
param->string = NULL;
break;
case Opt_prealloc:
- opts->prealloc = 1;
+ opts->prealloc = !result.negated;
break;
case Opt_nocase:
opts->nocase = 1;
@@ -555,6 +561,55 @@ static const struct proc_ops ntfs3_label_fops = {
.proc_write = ntfs3_label_write,
};
+static void ntfs_create_procdir(struct super_block *sb)
+{
+ struct proc_dir_entry *e;
+
+ if (!proc_info_root)
+ return;
+
+ e = proc_mkdir(sb->s_id, proc_info_root);
+ if (e) {
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ proc_create_data("volinfo", 0444, e, &ntfs3_volinfo_fops, sb);
+ proc_create_data("label", 0644, e, &ntfs3_label_fops, sb);
+ sbi->procdir = e;
+ }
+}
+
+static void ntfs_remove_procdir(struct super_block *sb)
+{
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ if (!sbi->procdir)
+ return;
+
+ remove_proc_entry("label", sbi->procdir);
+ remove_proc_entry("volinfo", sbi->procdir);
+ remove_proc_entry(sb->s_id, proc_info_root);
+ sbi->procdir = NULL;
+}
+
+static void ntfs_create_proc_root(void)
+{
+ proc_info_root = proc_mkdir("fs/ntfs3", NULL);
+}
+
+static void ntfs_remove_proc_root(void)
+{
+ if (proc_info_root) {
+ remove_proc_entry("fs/ntfs3", NULL);
+ proc_info_root = NULL;
+ }
+}
+#else
+// clang-format off
+static void ntfs_create_procdir(struct super_block *sb){}
+static void ntfs_remove_procdir(struct super_block *sb){}
+static void ntfs_create_proc_root(void){}
+static void ntfs_remove_proc_root(void){}
+// clang-format on
#endif
static struct kmem_cache *ntfs_inode_cachep;
@@ -644,18 +699,18 @@ static void ntfs_put_super(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
-#ifdef CONFIG_PROC_FS
- // Remove /proc/fs/ntfs3/..
- if (sbi->procdir) {
- remove_proc_entry("label", sbi->procdir);
- remove_proc_entry("volinfo", sbi->procdir);
- remove_proc_entry(sb->s_id, proc_info_root);
- sbi->procdir = NULL;
- }
-#endif
+ ntfs_remove_procdir(sb);
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
+
+ if (sbi->options) {
+ unload_nls(sbi->options->nls);
+ kfree(sbi->options->nls_name);
+ kfree(sbi->options);
+ sbi->options = NULL;
+ }
+
ntfs3_put_sbi(sbi);
}
@@ -892,6 +947,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
+ /* Set dummy blocksize to read boot_block. */
+ if (!sb_min_blocksize(sb, PAGE_SIZE)) {
+ return -EINVAL;
+ }
+
read_boot:
bh = ntfs_bread(sb, boot_block);
if (!bh)
@@ -1016,6 +1076,7 @@ read_boot:
dev_size += sector_size - 1;
}
+ sbi->bdev_blocksize_mask = max(boot_sector_size, sector_size) - 1;
sbi->mft.lbo = mlcn << cluster_bits;
sbi->mft.lbo2 = mlcn2 << cluster_bits;
@@ -1157,7 +1218,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
int err;
struct ntfs_sb_info *sbi = sb->s_fs_info;
struct block_device *bdev = sb->s_bdev;
- struct ntfs_mount_options *options;
+ struct ntfs_mount_options *fc_opts;
+ struct ntfs_mount_options *options = NULL;
struct inode *inode;
struct ntfs_inode *ni;
size_t i, tt, bad_len, bad_frags;
@@ -1174,7 +1236,23 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
ref.high = 0;
sbi->sb = sb;
- sbi->options = options = fc->fs_private;
+ fc_opts = fc->fs_private;
+ if (!fc_opts) {
+ errorf(fc, "missing mount options");
+ return -EINVAL;
+ }
+ options = kmemdup(fc_opts, sizeof(*fc_opts), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ if (fc_opts->nls_name) {
+ options->nls_name = kstrdup(fc_opts->nls_name, GFP_KERNEL);
+ if (!options->nls_name) {
+ kfree(options);
+ return -ENOMEM;
+ }
+ }
+ sbi->options = options;
fc->fs_private = NULL;
sb->s_flags |= SB_NODIRATIME;
sb->s_magic = 0x7366746e; // "ntfs"
@@ -1182,7 +1260,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_export_op = &ntfs_export_ops;
sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
sb->s_xattr = ntfs_xattr_handlers;
- sb->s_d_op = options->nocase ? &ntfs_dentry_ops : NULL;
+ set_default_d_op(sb, options->nocase ? &ntfs_dentry_ops : NULL);
options->nls = ntfs_load_nls(options->nls_name);
if (IS_ERR(options->nls)) {
@@ -1252,7 +1330,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
sbi->volume.ni = ni;
if (info->flags & VOLUME_FLAG_DIRTY) {
sbi->volume.real_dirty = true;
- ntfs_info(sb, "It is recommened to use chkdsk.");
+ ntfs_info(sb, "It is recommended to use chkdsk.");
}
/* Load $MFTMirr to estimate recs_mirr. */
@@ -1590,20 +1668,7 @@ load_root:
kfree(boot2);
}
-#ifdef CONFIG_PROC_FS
- /* Create /proc/fs/ntfs3/.. */
- if (proc_info_root) {
- struct proc_dir_entry *e = proc_mkdir(sb->s_id, proc_info_root);
- static_assert((S_IRUGO | S_IWUSR) == 0644);
- if (e) {
- proc_create_data("volinfo", S_IRUGO, e,
- &ntfs3_volinfo_fops, sb);
- proc_create_data("label", S_IRUGO | S_IWUSR, e,
- &ntfs3_label_fops, sb);
- sbi->procdir = e;
- }
- }
-#endif
+ ntfs_create_procdir(sb);
if (is_legacy_ntfs(sb))
sb->s_flags |= SB_RDONLY;
@@ -1612,9 +1677,16 @@ load_root:
put_inode_out:
iput(inode);
out:
+ /* sbi->options == options */
+ if (options) {
+ unload_nls(options->nls);
+ kfree(options->nls_name);
+ kfree(options);
+ sbi->options = NULL;
+ }
+
ntfs3_put_sbi(sbi);
kfree(boot2);
- ntfs3_put_sbi(sbi);
return err;
}
@@ -1738,6 +1810,12 @@ static int __ntfs_init_fs_context(struct fs_context *fc)
opts->fs_gid = current_gid();
opts->fs_fmask_inv = ~current_umask();
opts->fs_dmask_inv = ~current_umask();
+ opts->prealloc = 1;
+
+#ifdef CONFIG_NTFS3_FS_POSIX_ACL
+ /* Set the default value 'acl' */
+ fc->sb_flags |= SB_POSIXACL;
+#endif
if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
goto ok;
@@ -1853,14 +1931,11 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
-#ifdef CONFIG_PROC_FS
- /* Create "/proc/fs/ntfs3" */
- proc_info_root = proc_mkdir("fs/ntfs3", NULL);
-#endif
+ ntfs_create_proc_root();
err = ntfs3_init_bitmap();
if (err)
- return err;
+ goto out2;
ntfs_inode_cachep = kmem_cache_create(
"ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
@@ -1880,6 +1955,8 @@ out:
kmem_cache_destroy(ntfs_inode_cachep);
out1:
ntfs3_exit_bitmap();
+out2:
+ ntfs_remove_proc_root();
return err;
}
@@ -1890,11 +1967,7 @@ static void __exit exit_ntfs_fs(void)
unregister_filesystem(&ntfs_fs_type);
unregister_as_ntfs_legacy();
ntfs3_exit_bitmap();
-
-#ifdef CONFIG_PROC_FS
- if (proc_info_root)
- remove_proc_entry("fs/ntfs3", NULL);
-#endif
+ ntfs_remove_proc_root();
}
MODULE_LICENSE("GPL");
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index e0055dcf8fe3..c93df55e98d0 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -313,7 +313,7 @@ out:
static noinline int ntfs_set_ea(struct inode *inode, const char *name,
size_t name_len, const void *value,
size_t val_size, int flags, bool locked,
- __le16 *ea_size)
+ __le32 *ea_size)
{
struct ntfs_inode *ni = ntfs_i(inode);
struct ntfs_sb_info *sbi = ni->mi.sbi;
@@ -522,7 +522,7 @@ update_ea:
if (ea_info.size_pack != size_pack)
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
if (ea_size)
- *ea_size = ea_info.size_pack;
+ *ea_size = ea_info.size;
mark_inode_dirty(&ni->vfs_inode);
out:
@@ -552,6 +552,10 @@ struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry,
int err;
void *buf;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return ERR_PTR(-EINVAL);
+
/* Allocate PATH_MAX bytes. */
buf = __getname();
if (!buf)
@@ -600,6 +604,10 @@ static noinline int ntfs_set_acl_ex(struct mnt_idmap *idmap,
int flags;
umode_t mode;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ntfs_i(inode))))
+ return -EINVAL;
+
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
@@ -646,12 +654,22 @@ static noinline int ntfs_set_acl_ex(struct mnt_idmap *idmap,
err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0, NULL);
if (err == -ENODATA && !size)
err = 0; /* Removing non existed xattr. */
- if (!err) {
- set_cached_acl(inode, type, acl);
+ if (err)
+ goto out;
+
+ if (inode->i_mode != mode) {
+ umode_t old_mode = inode->i_mode;
+ inode->i_mode = mode;
+ err = ntfs_save_wsl_perm(inode, NULL);
+ if (err) {
+ inode->i_mode = old_mode;
+ goto out;
+ }
inode->i_mode = mode;
- inode_set_ctime_current(inode);
- mark_inode_dirty(inode);
}
+ set_cached_acl(inode, type, acl);
+ inode_set_ctime_current(inode);
+ mark_inode_dirty(inode);
out:
kfree(value);
@@ -730,6 +748,10 @@ ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
struct ntfs_inode *ni = ntfs_i(inode);
ssize_t ret;
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (!(ni->ni_flags & NI_FLAG_EA)) {
/* no xattr in file */
return 0;
@@ -751,6 +773,10 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
int err;
struct ntfs_inode *ni = ntfs_i(inode);
+ /* Avoid any operation if inode is bad. */
+ if (unlikely(is_bad_ni(ni)))
+ return -EINVAL;
+
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
@@ -950,7 +976,7 @@ out:
*
* save uid/gid/mode in xattr
*/
-int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size)
+int ntfs_save_wsl_perm(struct inode *inode, __le32 *ea_size)
{
int err;
__le32 value;
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 62464d194da3..af1e2cedb217 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/fs_struct.h>
#include <cluster/masklog.h>
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 395e23920632..b267ec580da9 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -566,7 +566,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec);
/*
- * Reset the actual path elements so that we can re-use the structure
+ * Reset the actual path elements so that we can reuse the structure
* to build another path. Generally, this involves freeing the buffer
* heads.
*/
@@ -1182,7 +1182,7 @@ static int ocfs2_add_branch(handle_t *handle,
/*
* If there is a gap before the root end and the real end
- * of the righmost leaf block, we need to remove the gap
+ * of the rightmost leaf block, we need to remove the gap
* between new_cpos and root_end first so that the tree
* is consistent after we add a new branch(it will start
* from new_cpos).
@@ -1238,7 +1238,7 @@ static int ocfs2_add_branch(handle_t *handle,
/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
* linked with the rest of the tree.
- * conversly, new_eb_bhs[0] is the new bottommost leaf.
+ * conversely, new_eb_bhs[0] is the new bottommost leaf.
*
* when we leave the loop, new_last_eb_blk will point to the
* newest leaf, and next_blkno will point to the topmost extent
@@ -1803,6 +1803,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
el = root_el;
while (el->l_tree_depth) {
+ if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) {
+ ocfs2_error(ocfs2_metadata_cache_get_super(ci),
+ "Owner %llu has invalid tree depth %u in extent list\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ le16_to_cpu(el->l_tree_depth));
+ ret = -EROFS;
+ goto out;
+ }
if (le16_to_cpu(el->l_next_free_rec) == 0) {
ocfs2_error(ocfs2_metadata_cache_get_super(ci),
"Owner %llu has empty extent list at depth %u\n",
@@ -3712,7 +3720,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
* update split_index here.
*
* When the split_index is zero, we need to merge it to the
- * prevoius extent block. It is more efficient and easier
+ * previous extent block. It is more efficient and easier
* if we do merge_right first and merge_left later.
*/
ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
@@ -4517,7 +4525,7 @@ static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
}
/*
- * This should only be called against the righmost leaf extent list.
+ * This should only be called against the rightmost leaf extent list.
*
* ocfs2_figure_appending_type() will figure out whether we'll have to
* insert at the tail of the rightmost leaf.
@@ -6154,6 +6162,9 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
int status;
struct inode *inode = NULL;
struct buffer_head *bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_truncate_log *tl;
+ unsigned int tl_count;
inode = ocfs2_get_system_file_inode(osb,
TRUNCATE_LOG_SYSTEM_INODE,
@@ -6171,6 +6182,18 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
goto bail;
}
+ di = (struct ocfs2_dinode *)bh->b_data;
+ tl = &di->id2.i_dealloc;
+ tl_count = le16_to_cpu(tl->tl_count);
+ if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
+ tl_count == 0)) {
+ status = -EFSCORRUPTED;
+ iput(inode);
+ brelse(bh);
+ mlog_errno(status);
+ goto bail;
+ }
+
*tl_inode = inode;
*tl_bh = bh;
bail:
@@ -6808,27 +6831,27 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh)
return 0;
}
-void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
- unsigned int from, unsigned int to,
- struct page *page, int zero, u64 *phys)
+void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle,
+ size_t from, size_t to, struct folio *folio, int zero,
+ u64 *phys)
{
int ret, partial = 0;
- loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
+ loff_t start_byte = folio_pos(folio) + from;
loff_t length = to - from;
- ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
+ ret = ocfs2_map_folio_blocks(folio, phys, inode, from, to, 0);
if (ret)
mlog_errno(ret);
if (zero)
- zero_user_segment(page, from, to);
+ folio_zero_segment(folio, from, to);
/*
* Need to set the buffers we zero'd into uptodate
* here if they aren't - ocfs2_map_page_blocks()
* might've skipped some
*/
- ret = walk_page_buffers(handle, page_buffers(page),
+ ret = walk_page_buffers(handle, folio_buffers(folio),
from, to, &partial,
ocfs2_zero_func);
if (ret < 0)
@@ -6841,92 +6864,88 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
}
if (!partial)
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
}
-static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
- loff_t end, struct page **pages,
- int numpages, u64 phys, handle_t *handle)
+static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start,
+ loff_t end, struct folio **folios, int numfolios,
+ u64 phys, handle_t *handle)
{
int i;
- struct page *page;
- unsigned int from, to = PAGE_SIZE;
struct super_block *sb = inode->i_sb;
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
- if (numpages == 0)
+ if (numfolios == 0)
goto out;
- to = PAGE_SIZE;
- for(i = 0; i < numpages; i++) {
- page = pages[i];
+ for (i = 0; i < numfolios; i++) {
+ struct folio *folio = folios[i];
+ size_t to = folio_size(folio);
+ size_t from = offset_in_folio(folio, start);
- from = start & (PAGE_SIZE - 1);
- if ((end >> PAGE_SHIFT) == page->index)
- to = end & (PAGE_SIZE - 1);
+ if (to > end - folio_pos(folio))
+ to = end - folio_pos(folio);
- BUG_ON(from > PAGE_SIZE);
- BUG_ON(to > PAGE_SIZE);
+ ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1,
+ &phys);
- ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
- &phys);
-
- start = (page->index + 1) << PAGE_SHIFT;
+ start = folio_next_pos(folio);
}
out:
- if (pages)
- ocfs2_unlock_and_free_pages(pages, numpages);
+ if (folios)
+ ocfs2_unlock_and_free_folios(folios, numfolios);
}
-int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num)
+static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end,
+ struct folio **folios, int *num)
{
- int numpages, ret = 0;
+ int numfolios, ret = 0;
struct address_space *mapping = inode->i_mapping;
unsigned long index;
loff_t last_page_bytes;
BUG_ON(start > end);
- numpages = 0;
+ numfolios = 0;
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_SHIFT;
do {
- pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
- if (!pages[numpages]) {
- ret = -ENOMEM;
+ folios[numfolios] = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
+ if (IS_ERR(folios[numfolios])) {
+ ret = PTR_ERR(folios[numfolios]);
mlog_errno(ret);
+ folios[numfolios] = NULL;
goto out;
}
- numpages++;
- index++;
+ index = folio_next_index(folios[numfolios]);
+ numfolios++;
} while (index < (last_page_bytes >> PAGE_SHIFT));
out:
if (ret != 0) {
- if (pages)
- ocfs2_unlock_and_free_pages(pages, numpages);
- numpages = 0;
+ ocfs2_unlock_and_free_folios(folios, numfolios);
+ numfolios = 0;
}
- *num = numpages;
+ *num = numfolios;
return ret;
}
-static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num)
+static int ocfs2_grab_eof_folios(struct inode *inode, loff_t start, loff_t end,
+ struct folio **folios, int *num)
{
struct super_block *sb = inode->i_sb;
BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
(end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
- return ocfs2_grab_pages(inode, start, end, pages, num);
+ return ocfs2_grab_folios(inode, start, end, folios, num);
}
/*
@@ -6940,8 +6959,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
u64 range_start, u64 range_end)
{
- int ret = 0, numpages;
- struct page **pages = NULL;
+ int ret = 0, numfolios;
+ struct folio **folios = NULL;
u64 phys;
unsigned int ext_flags;
struct super_block *sb = inode->i_sb;
@@ -6954,17 +6973,17 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
return 0;
/*
- * Avoid zeroing pages fully beyond current i_size. It is pointless as
- * underlying blocks of those pages should be already zeroed out and
+ * Avoid zeroing folios fully beyond current i_size. It is pointless as
+ * underlying blocks of those folios should be already zeroed out and
* page writeback will skip them anyway.
*/
range_end = min_t(u64, range_end, i_size_read(inode));
if (range_start >= range_end)
return 0;
- pages = kcalloc(ocfs2_pages_per_cluster(sb),
- sizeof(struct page *), GFP_NOFS);
- if (pages == NULL) {
+ folios = kcalloc(ocfs2_pages_per_cluster(sb),
+ sizeof(struct folio *), GFP_NOFS);
+ if (folios == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
@@ -6985,18 +7004,18 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
goto out;
- ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
- &numpages);
+ ret = ocfs2_grab_eof_folios(inode, range_start, range_end, folios,
+ &numfolios);
if (ret) {
mlog_errno(ret);
goto out;
}
- ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
- numpages, phys, handle);
+ ocfs2_zero_cluster_folios(inode, range_start, range_end, folios,
+ numfolios, phys, handle);
/*
- * Initiate writeout of the pages we zero'd here. We don't
+ * Initiate writeout of the folios we zero'd here. We don't
* wait on them - the truncate_inode_pages() call later will
* do that for us.
*/
@@ -7006,7 +7025,7 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
mlog_errno(ret);
out:
- kfree(pages);
+ kfree(folios);
return ret;
}
@@ -7059,7 +7078,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
- int ret, has_data, num_pages = 0;
+ int ret, has_data, num_folios = 0;
int need_free = 0;
u32 bit_off, num;
handle_t *handle;
@@ -7068,7 +7087,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_alloc_context *data_ac = NULL;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct ocfs2_extent_tree et;
int did_quota = 0;
@@ -7119,12 +7138,12 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
/*
* Save two copies, one for insert, and one that can
- * be changed by ocfs2_map_and_dirty_page() below.
+ * be changed by ocfs2_map_and_dirty_folio() below.
*/
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
- ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
- &num_pages);
+ ret = ocfs2_grab_eof_folios(inode, 0, page_end, &folio,
+ &num_folios);
if (ret) {
mlog_errno(ret);
need_free = 1;
@@ -7135,15 +7154,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* This should populate the 1st page for us and mark
* it up to date.
*/
- ret = ocfs2_read_inline_data(inode, page, di_bh);
+ ret = ocfs2_read_inline_data(inode, folio, di_bh);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_unlock;
}
- ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
- &phys);
+ ocfs2_map_and_dirty_folio(inode, handle, 0, page_end, folio, 0,
+ &phys);
}
spin_lock(&oi->ip_lock);
@@ -7174,8 +7193,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
out_unlock:
- if (page)
- ocfs2_unlock_and_free_pages(&page, num_pages);
+ if (folio)
+ ocfs2_unlock_and_free_folios(&folio, num_folios);
out_commit:
if (ret < 0 && did_quota)
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 4af7abaa6e40..1c0c83362904 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -254,11 +254,9 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
return !rec->e_leaf_clusters;
}
-int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num);
-void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
- unsigned int from, unsigned int to,
- struct page *page, int zero, u64 *phys);
+void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle,
+ size_t from, size_t to, struct folio *folio, int zero,
+ u64 *phys);
/*
* Structures which describe a path through a btree, and functions to
* manipulate them.
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index db72b3e924b3..76c86f1c2b1c 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -46,7 +46,6 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh = NULL;
struct buffer_head *buffer_cache_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- void *kaddr;
trace_ocfs2_symlink_get_block(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -91,17 +90,11 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
* could've happened. Since we've got a reference on
* the bh, even if it commits while we're doing the
* copy, the data is still good. */
- if (buffer_jbd(buffer_cache_bh)
- && ocfs2_inode_is_new(inode)) {
- kaddr = kmap_atomic(bh_result->b_page);
- if (!kaddr) {
- mlog(ML_ERROR, "couldn't kmap!\n");
- goto bail;
- }
- memcpy(kaddr + (bh_result->b_size * iblock),
- buffer_cache_bh->b_data,
- bh_result->b_size);
- kunmap_atomic(kaddr);
+ if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) {
+ memcpy_to_folio(bh_result->b_folio,
+ bh_result->b_size * iblock,
+ buffer_cache_bh->b_data,
+ bh_result->b_size);
set_buffer_uptodate(bh_result);
}
brelse(buffer_cache_bh);
@@ -215,10 +208,9 @@ bail:
return err;
}
-int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
struct buffer_head *di_bh)
{
- void *kaddr;
loff_t size;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
@@ -230,7 +222,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
size = i_size_read(inode);
- if (size > PAGE_SIZE ||
+ if (size > folio_size(folio) ||
size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
ocfs2_error(inode->i_sb,
"Inode %llu has with inline data has bad size: %Lu\n",
@@ -239,25 +231,18 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
return -EROFS;
}
- kaddr = kmap_atomic(page);
- if (size)
- memcpy(kaddr, di->id2.i_data.id_data, size);
- /* Clear the remaining part of the page */
- memset(kaddr + size, 0, PAGE_SIZE - size);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
-
- SetPageUptodate(page);
+ folio_fill_tail(folio, 0, di->id2.i_data.id_data, size);
+ folio_mark_uptodate(folio);
return 0;
}
-static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
+static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio)
{
int ret;
struct buffer_head *di_bh = NULL;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
ret = ocfs2_read_inode_block(inode, &di_bh);
@@ -266,9 +251,9 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
goto out;
}
- ret = ocfs2_read_inline_data(inode, page, di_bh);
+ ret = ocfs2_read_inline_data(inode, folio, di_bh);
out:
- unlock_page(page);
+ folio_unlock(folio);
brelse(di_bh);
return ret;
@@ -283,7 +268,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
- ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
+ ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio);
if (ret != 0) {
if (ret == AOP_TRUNCATED_PAGE)
unlock = 0;
@@ -305,7 +290,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
}
/*
- * i_size might have just been updated as we grabed the meta lock. We
+ * i_size might have just been updated as we grabbed the meta lock. We
* might now be discovering a truncate that hit on another node.
* block_read_full_folio->get_block freaks out if it is asked to read
* beyond the end of a file, so we check here. Callers
@@ -322,7 +307,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
}
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
- ret = ocfs2_readpage_inline(inode, &folio->page);
+ ret = ocfs2_readpage_inline(inode, folio);
else
ret = block_read_full_folio(folio, ocfs2_get_block);
unlock = 0;
@@ -534,7 +519,7 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
*
* from == to == 0 is code for "zero the entire cluster region"
*/
-static void ocfs2_clear_page_regions(struct page *page,
+static void ocfs2_clear_folio_regions(struct folio *folio,
struct ocfs2_super *osb, u32 cpos,
unsigned from, unsigned to)
{
@@ -543,7 +528,7 @@ static void ocfs2_clear_page_regions(struct page *page,
ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_folio(folio, 0);
if (from || to) {
if (from > cluster_start)
@@ -554,13 +539,13 @@ static void ocfs2_clear_page_regions(struct page *page,
memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
}
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
}
/*
* Nonsparse file systems fully allocate before we get to the write
* code. This prevents ocfs2_write() from tagging the write as an
- * allocating one, which means ocfs2_map_page_blocks() might try to
+ * allocating one, which means ocfs2_map_folio_blocks() might try to
* read-in the blocks at the tail of our file. Avoid reading them by
* testing i_size against each block offset.
*/
@@ -585,11 +570,10 @@ static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio,
*
* This will also skip zeroing, which is handled externally.
*/
-int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
+int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new)
{
- struct folio *folio = page_folio(page);
int ret = 0;
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
unsigned int block_end, block_start;
@@ -729,24 +713,24 @@ struct ocfs2_write_ctxt {
unsigned int w_large_pages;
/*
- * Pages involved in this write.
+ * Folios involved in this write.
*
- * w_target_page is the page being written to by the user.
+ * w_target_folio is the folio being written to by the user.
*
- * w_pages is an array of pages which always contains
- * w_target_page, and in the case of an allocating write with
+ * w_folios is an array of folios which always contains
+ * w_target_folio, and in the case of an allocating write with
* page_size < cluster size, it will contain zero'd and mapped
- * pages adjacent to w_target_page which need to be written
+ * pages adjacent to w_target_folio which need to be written
* out in so that future reads from that region will get
* zero's.
*/
- unsigned int w_num_pages;
- struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
- struct page *w_target_page;
+ unsigned int w_num_folios;
+ struct folio *w_folios[OCFS2_MAX_CTXT_PAGES];
+ struct folio *w_target_folio;
/*
* w_target_locked is used for page_mkwrite path indicating no unlocking
- * against w_target_page in ocfs2_write_end_nolock.
+ * against w_target_folio in ocfs2_write_end_nolock.
*/
unsigned int w_target_locked:1;
@@ -771,40 +755,40 @@ struct ocfs2_write_ctxt {
unsigned int w_unwritten_count;
};
-void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
+void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios)
{
int i;
- for(i = 0; i < num_pages; i++) {
- if (pages[i]) {
- unlock_page(pages[i]);
- mark_page_accessed(pages[i]);
- put_page(pages[i]);
- }
+ for(i = 0; i < num_folios; i++) {
+ if (!folios[i])
+ continue;
+ folio_unlock(folios[i]);
+ folio_mark_accessed(folios[i]);
+ folio_put(folios[i]);
}
}
-static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
+static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc)
{
int i;
/*
* w_target_locked is only set to true in the page_mkwrite() case.
* The intent is to allow us to lock the target page from write_begin()
- * to write_end(). The caller must hold a ref on w_target_page.
+ * to write_end(). The caller must hold a ref on w_target_folio.
*/
if (wc->w_target_locked) {
- BUG_ON(!wc->w_target_page);
- for (i = 0; i < wc->w_num_pages; i++) {
- if (wc->w_target_page == wc->w_pages[i]) {
- wc->w_pages[i] = NULL;
+ BUG_ON(!wc->w_target_folio);
+ for (i = 0; i < wc->w_num_folios; i++) {
+ if (wc->w_target_folio == wc->w_folios[i]) {
+ wc->w_folios[i] = NULL;
break;
}
}
- mark_page_accessed(wc->w_target_page);
- put_page(wc->w_target_page);
+ folio_mark_accessed(wc->w_target_folio);
+ folio_put(wc->w_target_folio);
}
- ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
+ ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios);
}
static void ocfs2_free_unwritten_list(struct inode *inode,
@@ -826,7 +810,7 @@ static void ocfs2_free_write_ctxt(struct inode *inode,
struct ocfs2_write_ctxt *wc)
{
ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
- ocfs2_unlock_pages(wc);
+ ocfs2_unlock_folios(wc);
brelse(wc->w_di_bh);
kfree(wc);
}
@@ -869,29 +853,30 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
* and dirty so they'll be written out (in order to prevent uninitialised
* block data from leaking). And clear the new bit.
*/
-static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
+static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to)
{
unsigned int block_start, block_end;
struct buffer_head *head, *bh;
- BUG_ON(!PageLocked(page));
- if (!page_has_buffers(page))
+ BUG_ON(!folio_test_locked(folio));
+ head = folio_buffers(folio);
+ if (!head)
return;
- bh = head = page_buffers(page);
+ bh = head;
block_start = 0;
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
unsigned start, end;
start = max(from, block_start);
end = min(to, block_end);
- zero_user_segment(page, start, end);
+ folio_zero_segment(folio, start, end);
set_buffer_uptodate(bh);
}
@@ -916,29 +901,26 @@ static void ocfs2_write_failure(struct inode *inode,
int i;
unsigned from = user_pos & (PAGE_SIZE - 1),
to = user_pos + user_len;
- struct page *tmppage;
- if (wc->w_target_page)
- ocfs2_zero_new_buffers(wc->w_target_page, from, to);
+ if (wc->w_target_folio)
+ ocfs2_zero_new_buffers(wc->w_target_folio, from, to);
- for(i = 0; i < wc->w_num_pages; i++) {
- tmppage = wc->w_pages[i];
+ for (i = 0; i < wc->w_num_folios; i++) {
+ struct folio *folio = wc->w_folios[i];
- if (tmppage && page_has_buffers(tmppage)) {
+ if (folio && folio_buffers(folio)) {
if (ocfs2_should_order_data(inode))
ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
user_pos, user_len);
- block_commit_write(tmppage, from, to);
+ block_commit_write(folio, from, to);
}
}
}
-static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
- struct ocfs2_write_ctxt *wc,
- struct page *page, u32 cpos,
- loff_t user_pos, unsigned user_len,
- int new)
+static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno,
+ struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos,
+ loff_t user_pos, unsigned user_len, int new)
{
int ret;
unsigned int map_from = 0, map_to = 0;
@@ -951,20 +933,19 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
/* treat the write as new if the a hole/lseek spanned across
* the page boundary.
*/
- new = new | ((i_size_read(inode) <= page_offset(page)) &&
- (page_offset(page) <= user_pos));
+ new = new | ((i_size_read(inode) <= folio_pos(folio)) &&
+ (folio_pos(folio) <= user_pos));
- if (page == wc->w_target_page) {
+ if (folio == wc->w_target_folio) {
map_from = user_pos & (PAGE_SIZE - 1);
map_to = map_from + user_len;
if (new)
- ret = ocfs2_map_page_blocks(page, p_blkno, inode,
- cluster_start, cluster_end,
- new);
+ ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
+ cluster_start, cluster_end, new);
else
- ret = ocfs2_map_page_blocks(page, p_blkno, inode,
- map_from, map_to, new);
+ ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
+ map_from, map_to, new);
if (ret) {
mlog_errno(ret);
goto out;
@@ -978,7 +959,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
}
} else {
/*
- * If we haven't allocated the new page yet, we
+ * If we haven't allocated the new folio yet, we
* shouldn't be writing it out without copying user
* data. This is likely a math error from the caller.
*/
@@ -987,8 +968,8 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
map_from = cluster_start;
map_to = cluster_end;
- ret = ocfs2_map_page_blocks(page, p_blkno, inode,
- cluster_start, cluster_end, new);
+ ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
+ cluster_start, cluster_end, new);
if (ret) {
mlog_errno(ret);
goto out;
@@ -996,20 +977,20 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
}
/*
- * Parts of newly allocated pages need to be zero'd.
+ * Parts of newly allocated folios need to be zero'd.
*
* Above, we have also rewritten 'to' and 'from' - as far as
* the rest of the function is concerned, the entire cluster
- * range inside of a page needs to be written.
+ * range inside of a folio needs to be written.
*
- * We can skip this if the page is up to date - it's already
+ * We can skip this if the folio is uptodate - it's already
* been zero'd from being read in as a hole.
*/
- if (new && !PageUptodate(page))
- ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
+ if (new && !folio_test_uptodate(folio))
+ ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb),
cpos, user_data_from, user_data_to);
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
out:
return ret;
@@ -1018,11 +999,9 @@ out:
/*
* This function will only grab one clusters worth of pages.
*/
-static int ocfs2_grab_pages_for_write(struct address_space *mapping,
- struct ocfs2_write_ctxt *wc,
- u32 cpos, loff_t user_pos,
- unsigned user_len, int new,
- struct page *mmap_page)
+static int ocfs2_grab_folios_for_write(struct address_space *mapping,
+ struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos,
+ unsigned user_len, int new, struct folio *mmap_folio)
{
int ret = 0, i;
unsigned long start, target_index, end_index, index;
@@ -1039,7 +1018,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
* last page of the write.
*/
if (new) {
- wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
+ wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb);
start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
/*
* We need the index *past* the last page we could possibly
@@ -1049,15 +1028,15 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
last_byte = max(user_pos + user_len, i_size_read(inode));
BUG_ON(last_byte < 1);
end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
- if ((start + wc->w_num_pages) > end_index)
- wc->w_num_pages = end_index - start;
+ if ((start + wc->w_num_folios) > end_index)
+ wc->w_num_folios = end_index - start;
} else {
- wc->w_num_pages = 1;
+ wc->w_num_folios = 1;
start = target_index;
}
end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
- for(i = 0; i < wc->w_num_pages; i++) {
+ for(i = 0; i < wc->w_num_folios; i++) {
index = start + i;
if (index >= target_index && index <= end_index &&
@@ -1067,37 +1046,39 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
* and wants us to directly use the page
* passed in.
*/
- lock_page(mmap_page);
+ folio_lock(mmap_folio);
/* Exit and let the caller retry */
- if (mmap_page->mapping != mapping) {
- WARN_ON(mmap_page->mapping);
- unlock_page(mmap_page);
+ if (mmap_folio->mapping != mapping) {
+ WARN_ON(mmap_folio->mapping);
+ folio_unlock(mmap_folio);
ret = -EAGAIN;
goto out;
}
- get_page(mmap_page);
- wc->w_pages[i] = mmap_page;
+ folio_get(mmap_folio);
+ wc->w_folios[i] = mmap_folio;
wc->w_target_locked = true;
} else if (index >= target_index && index <= end_index &&
wc->w_type == OCFS2_WRITE_DIRECT) {
/* Direct write has no mapping page. */
- wc->w_pages[i] = NULL;
+ wc->w_folios[i] = NULL;
continue;
} else {
- wc->w_pages[i] = find_or_create_page(mapping, index,
- GFP_NOFS);
- if (!wc->w_pages[i]) {
- ret = -ENOMEM;
+ wc->w_folios[i] = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ GFP_NOFS);
+ if (IS_ERR(wc->w_folios[i])) {
+ ret = PTR_ERR(wc->w_folios[i]);
mlog_errno(ret);
+ wc->w_folios[i] = NULL;
goto out;
}
}
- wait_for_stable_page(wc->w_pages[i]);
+ folio_wait_stable(wc->w_folios[i]);
if (index == target_index)
- wc->w_target_page = wc->w_pages[i];
+ wc->w_target_folio = wc->w_folios[i];
}
out:
if (ret)
@@ -1181,19 +1162,18 @@ static int ocfs2_write_cluster(struct address_space *mapping,
if (!should_zero)
p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
- for(i = 0; i < wc->w_num_pages; i++) {
+ for (i = 0; i < wc->w_num_folios; i++) {
int tmpret;
/* This is the direct io target page. */
- if (wc->w_pages[i] == NULL) {
+ if (wc->w_folios[i] == NULL) {
p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits));
continue;
}
- tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
- wc->w_pages[i], cpos,
- user_pos, user_len,
- should_zero);
+ tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc,
+ wc->w_folios[i], cpos, user_pos, user_len,
+ should_zero);
if (tmpret) {
mlog_errno(tmpret);
if (ret == 0)
@@ -1472,7 +1452,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct page *page;
+ struct folio *folio;
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
@@ -1483,19 +1463,21 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
goto out;
}
- page = find_or_create_page(mapping, 0, GFP_NOFS);
- if (!page) {
+ folio = __filemap_get_folio(mapping, 0,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
+ if (IS_ERR(folio)) {
ocfs2_commit_trans(osb, handle);
- ret = -ENOMEM;
+ ret = PTR_ERR(folio);
mlog_errno(ret);
goto out;
}
/*
- * If we don't set w_num_pages then this page won't get unlocked
+ * If we don't set w_num_folios then this folio won't get unlocked
* and freed on cleanup of the write context.
*/
- wc->w_pages[0] = wc->w_target_page = page;
- wc->w_num_pages = 1;
+ wc->w_target_folio = folio;
+ wc->w_folios[0] = folio;
+ wc->w_num_folios = 1;
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1509,8 +1491,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
ocfs2_set_inode_data_inline(inode, di);
- if (!PageUptodate(page)) {
- ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
+ if (!folio_test_uptodate(folio)) {
+ ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh);
if (ret) {
ocfs2_commit_trans(osb, handle);
@@ -1533,9 +1515,8 @@ int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
}
static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
- struct inode *inode, loff_t pos,
- unsigned len, struct page *mmap_page,
- struct ocfs2_write_ctxt *wc)
+ struct inode *inode, loff_t pos, size_t len,
+ struct folio *mmap_folio, struct ocfs2_write_ctxt *wc)
{
int ret, written = 0;
loff_t end = pos + len;
@@ -1550,7 +1531,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
* Handle inodes which already have inline data 1st.
*/
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
- if (mmap_page == NULL &&
+ if (mmap_folio == NULL &&
ocfs2_size_fits_inline_data(wc->w_di_bh, end))
goto do_inline_write;
@@ -1574,7 +1555,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
* Check whether the write can fit.
*/
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
- if (mmap_page ||
+ if (mmap_folio ||
end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
return 0;
@@ -1641,9 +1622,9 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
}
int ocfs2_write_begin_nolock(struct address_space *mapping,
- loff_t pos, unsigned len, ocfs2_write_type_t type,
- struct folio **foliop, void **fsdata,
- struct buffer_head *di_bh, struct page *mmap_page)
+ loff_t pos, unsigned len, ocfs2_write_type_t type,
+ struct folio **foliop, void **fsdata,
+ struct buffer_head *di_bh, struct folio *mmap_folio)
{
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
@@ -1666,7 +1647,7 @@ try_again:
if (ocfs2_supports_inline_data(osb)) {
ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
- mmap_page, wc);
+ mmap_folio, wc);
if (ret == 1) {
ret = 0;
goto success;
@@ -1718,7 +1699,7 @@ try_again:
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(long long)i_size_read(inode),
le32_to_cpu(di->i_clusters),
- pos, len, type, mmap_page,
+ pos, len, type, mmap_folio,
clusters_to_alloc, extents_to_split);
/*
@@ -1789,21 +1770,21 @@ try_again:
}
/*
- * Fill our page array first. That way we've grabbed enough so
+ * Fill our folio array first. That way we've grabbed enough so
* that we can zero and flush if we error after adding the
* extent.
*/
- ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
- cluster_of_pages, mmap_page);
+ ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len,
+ cluster_of_pages, mmap_folio);
if (ret) {
/*
- * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
- * the target page. In this case, we exit with no error and no target
- * page. This will trigger the caller, page_mkwrite(), to re-try
- * the operation.
+ * ocfs2_grab_folios_for_write() returns -EAGAIN if it
+ * could not lock the target folio. In this case, we exit
+ * with no error and no target folio. This will trigger
+ * the caller, page_mkwrite(), to re-try the operation.
*/
if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) {
- BUG_ON(wc->w_target_page);
+ BUG_ON(wc->w_target_folio);
ret = 0;
goto out_quota;
}
@@ -1826,7 +1807,7 @@ try_again:
success:
if (foliop)
- *foliop = page_folio(wc->w_target_page);
+ *foliop = wc->w_target_folio;
*fsdata = wc;
return 0;
out_quota:
@@ -1845,7 +1826,7 @@ out:
* to VM code.
*/
if (wc->w_target_locked)
- unlock_page(mmap_page);
+ folio_unlock(mmap_folio);
ocfs2_free_write_ctxt(inode, wc);
@@ -1876,7 +1857,8 @@ out:
return ret;
}
-static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
+static int ocfs2_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
{
@@ -1924,18 +1906,15 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
struct ocfs2_dinode *di,
struct ocfs2_write_ctxt *wc)
{
- void *kaddr;
-
if (unlikely(*copied < len)) {
- if (!PageUptodate(wc->w_target_page)) {
+ if (!folio_test_uptodate(wc->w_target_folio)) {
*copied = 0;
return;
}
}
- kaddr = kmap_atomic(wc->w_target_page);
- memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
- kunmap_atomic(kaddr);
+ memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio,
+ pos, *copied);
trace_ocfs2_write_end_inline(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -1944,17 +1923,16 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
le16_to_cpu(di->i_dyn_features));
}
-int ocfs2_write_end_nolock(struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied, void *fsdata)
+int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos,
+ unsigned len, unsigned copied, void *fsdata)
{
int i, ret;
- unsigned from, to, start = pos & (PAGE_SIZE - 1);
+ size_t from, to, start = pos & (PAGE_SIZE - 1);
struct inode *inode = mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_write_ctxt *wc = fsdata;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
handle_t *handle = wc->w_handle;
- struct page *tmppage;
BUG_ON(!list_empty(&wc->w_unwritten_list));
@@ -1973,44 +1951,44 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
goto out_write_size;
}
- if (unlikely(copied < len) && wc->w_target_page) {
+ if (unlikely(copied < len) && wc->w_target_folio) {
loff_t new_isize;
- if (!PageUptodate(wc->w_target_page))
+ if (!folio_test_uptodate(wc->w_target_folio))
copied = 0;
new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
- if (new_isize > page_offset(wc->w_target_page))
- ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+ if (new_isize > folio_pos(wc->w_target_folio))
+ ocfs2_zero_new_buffers(wc->w_target_folio, start+copied,
start+len);
else {
/*
- * When page is fully beyond new isize (data copy
- * failed), do not bother zeroing the page. Invalidate
+ * When folio is fully beyond new isize (data copy
+ * failed), do not bother zeroing the folio. Invalidate
* it instead so that writeback does not get confused
* put page & buffer dirty bits into inconsistent
* state.
*/
- block_invalidate_folio(page_folio(wc->w_target_page),
- 0, PAGE_SIZE);
+ block_invalidate_folio(wc->w_target_folio, 0,
+ folio_size(wc->w_target_folio));
}
}
- if (wc->w_target_page)
- flush_dcache_page(wc->w_target_page);
+ if (wc->w_target_folio)
+ flush_dcache_folio(wc->w_target_folio);
- for(i = 0; i < wc->w_num_pages; i++) {
- tmppage = wc->w_pages[i];
+ for (i = 0; i < wc->w_num_folios; i++) {
+ struct folio *folio = wc->w_folios[i];
- /* This is the direct io target page. */
- if (tmppage == NULL)
+ /* This is the direct io target folio */
+ if (folio == NULL)
continue;
- if (tmppage == wc->w_target_page) {
+ if (folio == wc->w_target_folio) {
from = wc->w_target_from;
to = wc->w_target_to;
- BUG_ON(from > PAGE_SIZE ||
- to > PAGE_SIZE ||
+ BUG_ON(from > folio_size(folio) ||
+ to > folio_size(folio) ||
to < from);
} else {
/*
@@ -2019,19 +1997,17 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
* to flush their entire range.
*/
from = 0;
- to = PAGE_SIZE;
+ to = folio_size(folio);
}
- if (page_has_buffers(tmppage)) {
+ if (folio_buffers(folio)) {
if (handle && ocfs2_should_order_data(inode)) {
- loff_t start_byte =
- ((loff_t)tmppage->index << PAGE_SHIFT) +
- from;
+ loff_t start_byte = folio_pos(folio) + from;
loff_t length = to - from;
ocfs2_jbd2_inode_add_write(handle, inode,
start_byte, length);
}
- block_commit_write(tmppage, from, to);
+ block_commit_write(folio, from, to);
}
}
@@ -2060,7 +2036,7 @@ out:
* this lock and will ask for the page lock when flushing the data.
* put it here to preserve the unlock order.
*/
- ocfs2_unlock_pages(wc);
+ ocfs2_unlock_folios(wc);
if (handle)
ocfs2_commit_trans(osb, handle);
@@ -2073,7 +2049,8 @@ out:
return copied;
}
-static int ocfs2_write_end(struct file *file, struct address_space *mapping,
+static int ocfs2_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
{
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 1d1b4b7edba0..114efc9111e4 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -8,16 +8,11 @@
#include <linux/fs.h>
-handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
- struct page *page,
- unsigned from,
- unsigned to);
-
-int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
+int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new);
-void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
+void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios);
int walk_page_buffers( handle_t *handle,
struct buffer_head *head,
@@ -37,11 +32,11 @@ typedef enum {
} ocfs2_write_type_t;
int ocfs2_write_begin_nolock(struct address_space *mapping,
- loff_t pos, unsigned len, ocfs2_write_type_t type,
- struct folio **foliop, void **fsdata,
- struct buffer_head *di_bh, struct page *mmap_page);
+ loff_t pos, unsigned len, ocfs2_write_type_t type,
+ struct folio **foliop, void **fsdata,
+ struct buffer_head *di_bh, struct folio *mmap_folio);
-int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
struct buffer_head *di_bh);
int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 4200a0341343..724350925aff 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -3,6 +3,7 @@
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
*/
+#include "linux/kstrtox.h"
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/jiffies.h>
@@ -1020,7 +1021,7 @@ fire_callbacks:
if (list_empty(&slot->ds_live_item))
goto out;
- /* live nodes only go dead after enough consequtive missed
+ /* live nodes only go dead after enough consecutive missed
* samples.. reset the missed counter whenever we see
* activity */
if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
@@ -1535,10 +1536,11 @@ static int o2hb_read_block_input(struct o2hb_region *reg,
{
unsigned long bytes;
char *p = (char *)page;
+ int ret;
- bytes = simple_strtoul(p, &p, 0);
- if (!p || (*p && (*p != '\n')))
- return -EINVAL;
+ ret = kstrtoul(p, 0, &bytes);
+ if (ret)
+ return ret;
/* Heartbeat and fs min / max block sizes are the same. */
if (bytes > 4096 || bytes < 512)
@@ -1622,13 +1624,14 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item,
struct o2hb_region *reg = to_o2hb_region(item);
unsigned long tmp;
char *p = (char *)page;
+ int ret;
if (reg->hr_bdev_file)
return -EINVAL;
- tmp = simple_strtoul(p, &p, 0);
- if (!p || (*p && (*p != '\n')))
- return -EINVAL;
+ ret = kstrtoul(p, 0, &tmp);
+ if (ret)
+ return ret;
if (tmp > O2NM_MAX_NODES || tmp == 0)
return -ERANGE;
@@ -1776,8 +1779,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
if (o2nm_this_node() == O2NM_MAX_NODES)
return -EINVAL;
- fd = simple_strtol(p, &p, 0);
- if (!p || (*p && (*p != '\n')))
+ ret = kstrtol(p, 0, &fd);
+ if (ret < 0)
return -EINVAL;
if (fd < 0 || fd >= INT_MAX)
@@ -2136,10 +2139,11 @@ static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *ite
{
unsigned long tmp;
char *p = (char *)page;
+ int ret;
- tmp = simple_strtoul(p, &p, 10);
- if (!p || (*p && (*p != '\n')))
- return -EINVAL;
+ ret = kstrtoul(p, 10, &tmp);
+ if (ret)
+ return ret;
/* this will validate ranges for us. */
o2hb_dead_threshold_set((unsigned int) tmp);
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index b73fc42e46ff..630bd5a3dd0d 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -29,7 +29,7 @@
* just calling printk() so that this can eventually make its way through
* relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
* The inline tests and macro dance give GCC the opportunity to quite cleverly
- * only emit the appropriage printk() when the caller passes in a constant
+ * only emit the appropriate printk() when the caller passes in a constant
* mask, as is almost always the case.
*
* All this bitmask nonsense is managed from the files under
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 8bf17231d7b7..bfb8b456876c 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -23,7 +23,7 @@
* race between when we see a node start heartbeating and when we connect
* to it.
*
- * So nodes that are in this transtion put a hold on the quorum decision
+ * So nodes that are in this transition put a hold on the quorum decision
* with a counter. As they fall out of this transition they drop the count
* and if they're the last, they fire off the decision.
*/
@@ -189,7 +189,7 @@ static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
}
/* as a node comes up we delay the quorum decision until we know the fate of
- * the connection. the hold will be droped in conn_up or hb_down. it might be
+ * the connection. the hold will be dropped in conn_up or hb_down. it might be
* perpetuated by con_err until hb_down. if we already have a conn, we might
* be dropping a hold that conn_up got. */
void o2quo_hb_up(u8 node)
@@ -256,7 +256,7 @@ void o2quo_hb_still_up(u8 node)
}
/* This is analogous to hb_up. as a node's connection comes up we delay the
- * quorum decision until we see it heartbeating. the hold will be droped in
+ * quorum decision until we see it heartbeating. the hold will be dropped in
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
* it's already heartbeating we might be dropping a hold that conn_up got.
* */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 2b8fa3e782fb..79b281e32f4c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -5,13 +5,13 @@
*
* ----
*
- * Callers for this were originally written against a very simple synchronus
+ * Callers for this were originally written against a very simple synchronous
* API. This implementation reflects those simple callers. Some day I'm sure
* we'll need to move to a more robust posting/callback mechanism.
*
* Transmit calls pass in kernel virtual addresses and block copying this into
* the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
- * for a failed socket to timeout. TX callers can also pass in a poniter to an
+ * for a failed socket to timeout. TX callers can also pass in a pointer to an
* 'int' which gets filled with an errno off the wire in response to the
* message they send.
*
@@ -101,7 +101,7 @@ static struct socket *o2net_listen_sock;
* o2net_wq. teardown detaches the callbacks before destroying the workqueue.
* quorum work is queued as sock containers are shutdown.. stop_listening
* tears down all the node's sock containers, preventing future shutdowns
- * and queued quroum work, before canceling delayed quorum work and
+ * and queued quorum work, before canceling delayed quorum work and
* destroying the work queue.
*/
static struct workqueue_struct *o2net_wq;
@@ -724,7 +724,7 @@ static void o2net_shutdown_sc(struct work_struct *work)
if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
/* we shouldn't flush as we're in the thread, the
* races with pending sc work structs are harmless */
- del_timer_sync(&sc->sc_idle_timeout);
+ timer_delete_sync(&sc->sc_idle_timeout);
o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
sc_put(sc);
kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
@@ -1419,7 +1419,7 @@ out:
return ret;
}
-/* this work func is triggerd by data ready. it reads until it can read no
+/* this work func is triggered by data ready. it reads until it can read no
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
* our work the work struct will be marked and we'll be called again. */
static void o2net_rx_until_empty(struct work_struct *work)
@@ -1483,12 +1483,13 @@ static void o2net_sc_send_keep_req(struct work_struct *work)
sc_put(sc);
}
-/* socket shutdown does a del_timer_sync against this as it tears down.
+/* socket shutdown does a timer_delete_sync against this as it tears down.
* we can't start this timer until we've got to the point in sc buildup
* where shutdown is going to be involved */
static void o2net_idle_timer(struct timer_list *t)
{
- struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout);
+ struct o2net_sock_container *sc = timer_container_of(sc, t,
+ sc_idle_timeout);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
#ifdef CONFIG_DEBUG_FS
unsigned long msecs = ktime_to_ms(ktime_get()) -
@@ -1614,7 +1615,7 @@ static void o2net_start_connect(struct work_struct *work)
myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
myaddr.sin_port = htons(0); /* any port */
- ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
+ ret = sock->ops->bind(sock, (struct sockaddr_unsized *)&myaddr,
sizeof(myaddr));
if (ret) {
mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
@@ -1637,7 +1638,7 @@ static void o2net_start_connect(struct work_struct *work)
remoteaddr.sin_port = node->nd_ipv4_port;
ret = sc->sc_sock->ops->connect(sc->sc_sock,
- (struct sockaddr *)&remoteaddr,
+ (struct sockaddr_unsized *)&remoteaddr,
sizeof(remoteaddr),
O_NONBLOCK);
if (ret == -EINPROGRESS)
@@ -2001,7 +2002,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
INIT_WORK(&o2net_listen_work, o2net_accept_many);
sock->sk->sk_reuse = SK_CAN_REUSE;
- ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
+ ret = sock->ops->bind(sock, (struct sockaddr_unsized *)&sin, sizeof(sin));
if (ret < 0) {
printk(KERN_ERR "o2net: Error %d while binding socket at "
"%pI4:%u\n", ret, &addr, ntohs(port));
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index a9b8688aaf30..1873bbbb7e5b 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -32,7 +32,8 @@ void ocfs2_dentry_attach_gen(struct dentry *dentry)
}
-static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
+static int ocfs2_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int ret = 0; /* if all else fails, just return false */
@@ -44,8 +45,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
inode = d_inode(dentry);
osb = OCFS2_SB(dentry->d_sb);
- trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len,
- dentry->d_name.name);
+ trace_ocfs2_dentry_revalidate(dentry, name->len, name->name);
/* For a negative dentry -
* check the generation number of the parent and compare with the
@@ -53,12 +53,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
*/
if (inode == NULL) {
unsigned long gen = (unsigned long) dentry->d_fsdata;
- unsigned long pgen;
- spin_lock(&dentry->d_lock);
- pgen = OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen;
- spin_unlock(&dentry->d_lock);
- trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
- dentry->d_name.name,
+ unsigned long pgen = OCFS2_I(dir)->ip_dir_lock_gen;
+ trace_ocfs2_dentry_revalidate_negative(name->len, name->name,
pgen, gen);
if (gen != pgen)
goto bail;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 213206ebdd58..2785ff245e79 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -302,8 +302,21 @@ static int ocfs2_check_dir_entry(struct inode *dir,
unsigned long offset)
{
const char *error_msg = NULL;
- const int rlen = le16_to_cpu(de->rec_len);
- const unsigned long next_offset = ((char *) de - buf) + rlen;
+ unsigned long next_offset;
+ int rlen;
+
+ if (offset > size - OCFS2_DIR_REC_LEN(1)) {
+ /* Dirent is (maybe partially) beyond the buffer
+ * boundaries so touching 'de' members is unsafe.
+ */
+ mlog(ML_ERROR, "directory entry (#%llu: offset=%lu) "
+ "too close to end or out-of-bounds",
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, offset);
+ return 0;
+ }
+
+ rlen = le16_to_cpu(de->rec_len);
+ next_offset = ((char *) de - buf) + rlen;
if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
@@ -778,6 +791,14 @@ static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
struct ocfs2_extent_block *eb;
struct ocfs2_extent_rec *rec = NULL;
+ if (le16_to_cpu(el->l_count) !=
+ ocfs2_extent_recs_per_dx_root(inode->i_sb)) {
+ ret = ocfs2_error(inode->i_sb,
+ "Inode %lu has invalid extent list length %u\n",
+ inode->i_ino, le16_to_cpu(el->l_count));
+ goto out;
+ }
+
if (el->l_tree_depth) {
ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
&eb_bh);
@@ -798,6 +819,14 @@ static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
}
}
+ if (le16_to_cpu(el->l_next_free_rec) == 0) {
+ ret = ocfs2_error(inode->i_sb,
+ "Inode %lu has empty extent list at depth %u\n",
+ inode->i_ino,
+ le16_to_cpu(el->l_tree_depth));
+ goto out;
+ }
+
found = 0;
for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
rec = &el->l_recs[i];
@@ -1065,26 +1094,39 @@ int ocfs2_find_entry(const char *name, int namelen,
{
struct buffer_head *bh;
struct ocfs2_dir_entry *res_dir = NULL;
+ int ret = 0;
if (ocfs2_dir_indexed(dir))
return ocfs2_find_entry_dx(name, namelen, dir, lookup);
+ if (unlikely(i_size_read(dir) <= 0)) {
+ ret = -EFSCORRUPTED;
+ mlog_errno(ret);
+ goto out;
+ }
/*
* The unindexed dir code only uses part of the lookup
* structure, so there's no reason to push it down further
* than this.
*/
- if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) {
+ ret = -EFSCORRUPTED;
+ mlog_errno(ret);
+ goto out;
+ }
bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
- else
+ } else {
bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
+ }
if (bh == NULL)
return -ENOENT;
lookup->dl_leaf_bh = bh;
lookup->dl_entry = res_dir;
- return 0;
+out:
+ return ret;
}
/*
@@ -2010,6 +2052,7 @@ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
*
* Return 0 if the name does not exist
* Return -EEXIST if the directory contains the name
+ * Return -EFSCORRUPTED if found corruption
*
* Callers should have i_rwsem + a cluster lock on dir
*/
@@ -2023,9 +2066,12 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
trace_ocfs2_check_dir_for_entry(
(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
- if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
+ ret = ocfs2_find_entry(name, namelen, dir, &lookup);
+ if (ret == 0) {
ret = -EEXIST;
mlog_errno(ret);
+ } else if (ret == -ENOENT) {
+ ret = 0;
}
ocfs2_free_dir_lookup_result(&lookup);
@@ -3398,6 +3444,14 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
offset += le16_to_cpu(de->rec_len);
}
+ if (!last_de) {
+ ret = ocfs2_error(sb, "Directory entry (#%llu: size=%lld) "
+ "is unexpectedly short",
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ i_size_read(dir));
+ goto out;
+ }
+
/*
* We're going to require expansion of the directory - figure
* out how many blocks we'll need so that a place for the
@@ -4079,10 +4133,15 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir,
}
dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE;
- memset(&dx_root->dr_list, 0, osb->sb->s_blocksize -
- offsetof(struct ocfs2_dx_root_block, dr_list));
+
+ dx_root->dr_list.l_tree_depth = 0;
dx_root->dr_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
+ dx_root->dr_list.l_next_free_rec = 0;
+ memset(&dx_root->dr_list.l_recs, 0,
+ osb->sb->s_blocksize -
+ (offsetof(struct ocfs2_dx_root_block, dr_list) +
+ offsetof(struct ocfs2_extent_list, l_recs)));
/* This should never fail considering we start with an empty
* dx_root. */
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index 847a52dcbe7d..1969db8ffa9c 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -118,7 +118,7 @@ struct dlm_lockstatus {
#define LKM_VALBLK 0x00000100 /* lock value block request */
#define LKM_NOQUEUE 0x00000200 /* non blocking request */
#define LKM_CONVERT 0x00000400 /* conversion request */
-#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */
+#define LKM_NODLCKWT 0x00000800 /* this lock won't deadlock (U) */
#define LKM_UNLOCK 0x00001000 /* deallocate this lock */
#define LKM_CANCEL 0x00002000 /* cancel conversion request */
#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index e9ef4e2b0e75..fe4fdd09bae3 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/export.h>
+#include <linux/string_choices.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
@@ -90,12 +91,12 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
buf, res->owner, res->state);
printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
res->last_used, kref_read(&res->refs),
- list_empty(&res->purge) ? "no" : "yes");
+ str_no_yes(list_empty(&res->purge)));
printk(" on dirty list: %s, on reco list: %s, "
"migrating pending: %s\n",
- list_empty(&res->dirty) ? "no" : "yes",
- list_empty(&res->recovering) ? "no" : "yes",
- res->migration_pending ? "yes" : "no");
+ str_no_yes(list_empty(&res->dirty)),
+ str_no_yes(list_empty(&res->recovering)),
+ str_yes_no(res->migration_pending));
printk(" inflight locks: %d, asts reserved: %d\n",
res->inflight_locks, atomic_read(&res->asts_reserved));
dlm_print_lockres_refmap(res);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 2018501b2249..2347a50f079b 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c