diff options
35 files changed, 60 insertions, 62 deletions
diff --git a/fs/btrfs/accessors.c b/fs/btrfs/accessors.c index 861c7d92c437..1248aa2535d3 100644 --- a/fs/btrfs/accessors.c +++ b/fs/btrfs/accessors.c @@ -44,7 +44,7 @@ static __always_inline void memcpy_split_src(char *dest, const char *src1, * gives us all the type checking. * * The extent buffer pages stored in the array folios may not form a contiguous - * phyusical range, but the API functions assume the linear offset to the range + * physical range, but the API functions assume the linear offset to the range * from 0 to metadata node size. */ diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 6a450be293b1..c6573e845e43 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1690,7 +1690,7 @@ out: * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are * added to the ulist at @ctx->refs, and that ulist is allocated by this * function. The caller should free the ulist with free_leaf_list() if - * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is + * @ctx->ignore_extent_item_pos is false, otherwise a simple ulist_free() is * enough. * * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated. diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 34b0193a181c..25d51c246070 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -190,7 +190,7 @@ struct btrfs_backref_share_check_ctx { * It's very common to have several file extent items that point to the * same extent (bytenr) but with different offsets and lengths. This * typically happens for COW writes, partial writes into prealloc - * extents, NOCOW writes after snapshoting a root, hole punching or + * extents, NOCOW writes after snapshotting a root, hole punching or * reflinking within the same file (less common perhaps). * So keep a small cache with the lookup results for the extent pointed * by the last few file extent items. This cache is checked, with a @@ -414,7 +414,7 @@ struct btrfs_backref_cache { /* * Whether this cache is for relocation * - * Reloction backref cache require more info for reloc root compared + * Relocation backref cache require more info for reloc root compared * to generic backref cache. */ bool is_reloc; diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index b6328d50ffe7..548483a84466 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1971,7 +1971,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) * called, which is where we will transfer a reserved extent's * size from the "reserved" counter to the "used" counter - this * happens when running delayed references. When we relocate the - * chunk below, relocation first flushes dellaloc, waits for + * chunk below, relocation first flushes delalloc, waits for * ordered extent completion (which is where we create delayed * references for data extents) and commits the current * transaction (which runs delayed references), and only after @@ -2839,7 +2839,7 @@ next: * space or none at all (due to no need to COW, extent buffers * were already COWed in the current transaction and still * unwritten, tree heights lower than the maximum possible - * height, etc). For data we generally reserve the axact amount + * height, etc). For data we generally reserve the exact amount * of space we are going to allocate later, the exception is * when using compression, as we must reserve space based on the * uncompressed data size, because the compression is only done diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index a8bb8429c966..9172104a5889 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -63,7 +63,7 @@ enum btrfs_discard_state { * CHUNK_ALLOC_FORCE means it must try to allocate one * * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from - * find_free_extent() that also activaes the zone + * find_free_extent() that also activates the zone */ enum btrfs_chunk_alloc_enum { CHUNK_ALLOC_NO_FORCE, diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d1de0151a154..c865e0f2a7e8 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1290,7 +1290,7 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len, #define ENTROPY_LVL_HIGH (80) /* - * For increasead precision in shannon_entropy calculation, + * For increased precision in shannon_entropy calculation, * let's do pow(n, M) to save more digits after comma: * * - maximum int bit length is 64 diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c index 738179a5e170..84ba9906f0d8 100644 --- a/fs/btrfs/defrag.c +++ b/fs/btrfs/defrag.c @@ -153,7 +153,7 @@ void btrfs_add_inode_defrag(struct btrfs_inode *inode, u32 extent_thresh) } /* - * Pick the defragable inode that we want, if it doesn't exist, we will get the + * Pick the defraggable inode that we want, if it doesn't exist, we will get the * next one. */ static struct inode_defrag *btrfs_pick_defrag_inode( diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 5d8361bac497..6170803d8a1b 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -895,7 +895,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, } /* - * Initialize the structure which represents a modification to a an extent. + * Initialize the structure which represents a modification to an extent. * * @fs_info: Internal to the mounted filesystem mount structure. * diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 4675bcd5f92e..ed3b07fdaab8 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -637,7 +637,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: - DEBUG_WARN("unexpected STARTED ot SUSPENDED dev-replace state"); + DEBUG_WARN("unexpected STARTED or SUSPENDED dev-replace state"); ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED; up_write(&dev_replace->rwsem); goto leave; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index eb55ecf4bf25..7b06bbc40898 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3245,7 +3245,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount) /* * Subpage runtime limitation on v1 cache. * - * V1 space cache still has some hard codeed PAGE_SIZE usage, while + * V1 space cache still has some hard coded PAGE_SIZE usage, while * we're already defaulting to v2 cache, no need to bother v1 as it's * going to be deprecated anyway. */ diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c index 0c58342c6125..bb2ca1c9c7b0 100644 --- a/fs/btrfs/extent-io-tree.c +++ b/fs/btrfs/extent-io-tree.c @@ -1237,7 +1237,7 @@ hit_next: state = next_search_state(inserted_state, end); /* * If there's a next state, whether contiguous or not, we don't - * need to unlock and start search agian. If it's not contiguous + * need to unlock and start search again. If it's not contiguous * we will end up here and try to allocate a prealloc state and insert. */ if (state) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e117b5cbefae..a4416c451b25 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -325,7 +325,7 @@ search_again: /* * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, - * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, + * is_data == BTRFS_REF_TYPE_DATA, data type is required, * is_data == BTRFS_REF_TYPE_ANY, either type is OK. */ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, @@ -4316,7 +4316,7 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info, spin_lock(&fs_info->zone_active_bgs_lock); list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) { /* - * No lock is OK here because avail is monotinically + * No lock is OK here because avail is monotonically * decreasing, and this is just a hint. */ u64 avail = block_group->zone_capacity - block_group->alloc_offset; @@ -5613,7 +5613,7 @@ static int check_next_block_uptodate(struct btrfs_trans_handle *trans, * If we are UPDATE_BACKREF then we will not, we need to update our backrefs. * * If we are DROP_REFERENCE this will figure out if we need to drop our current - * reference, skipping it if we dropped it from a previous incompleted drop, or + * reference, skipping it if we dropped it from a previous uncompleted drop, or * dropping it if we still have a reference to it. */ static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -5760,7 +5760,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, /* * We have to walk down into this node, and if we're currently at the - * DROP_REFERNCE stage and this block is shared then we need to switch + * DROP_REFERENCE stage and this block is shared then we need to switch * to the UPDATE_BACKREF stage in order to convert to FULL_BACKREF. */ if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) { diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 152c9d3e8cce..ca7174fa0240 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -418,7 +418,7 @@ again: if (delalloc_end + 1 - delalloc_start > max_bytes) delalloc_end = delalloc_start + max_bytes - 1; - /* step two, lock all the folioss after the folios that has start */ + /* step two, lock all the folios after the folios that has start */ ret = lock_delalloc_folios(inode, locked_folio, delalloc_start, delalloc_end); ASSERT(!ret || ret == -EAGAIN); @@ -772,7 +772,7 @@ static void alloc_new_bio(struct btrfs_inode *inode, * * The will either add the page into the existing @bio_ctrl->bbio, or allocate a * new one in @bio_ctrl->bbio. - * The mirror number for this IO should already be initizlied in + * The mirror number for this IO should already be initialized in * @bio_ctrl->mirror_num. */ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl, @@ -2225,7 +2225,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, * @fs_info: The fs_info for this file system. * @start: The offset of the range to start waiting on writeback. * @end: The end of the range, inclusive. This is meant to be used in - * conjuction with wait_marked_extents, so this will usually be + * conjunction with wait_marked_extents, so this will usually be * the_next_eb->start - 1. */ void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, @@ -2495,7 +2495,7 @@ retry: * In above case, [32K, 96K) is asynchronously submitted * for compression, and [124K, 128K) needs to be written back. * - * If we didn't wait wrtiteback for page 64K, [128K, 128K) + * If we didn't wait writeback for page 64K, [128K, 128K) * won't be submitted as the page still has writeback flag * and will be skipped in the next check. * @@ -2979,7 +2979,7 @@ static void cleanup_extent_buffer_folios(struct extent_buffer *eb) { const int num_folios = num_extent_folios(eb); - /* We canont use num_extent_folios() as loop bound as eb->folios changes. */ + /* We cannot use num_extent_folios() as loop bound as eb->folios changes. */ for (int i = 0; i < num_folios; i++) { ASSERT(eb->folios[i]); detach_extent_buffer_folio(eb, eb->folios[i]); diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c index 7935586a9dbd..f2eaaef8422b 100644 --- a/fs/btrfs/fiemap.c +++ b/fs/btrfs/fiemap.c @@ -153,7 +153,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo, if (cache_end > offset) { if (offset == cache->offset) { /* - * We cached a dealloc range (found in the io tree) for + * We cached a delalloc range (found in the io tree) for * a hole or prealloc extent and we have now found a * file extent item for the same offset. What we have * now is more recent and up to date, so discard what diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 204674934795..4daec404fec6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -970,7 +970,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio, * Return: * > 0 If we can nocow, and updates @write_bytes. * 0 If we can't do a nocow write. - * -EAGAIN If we can't do a nocow write because snapshoting of the inode's + * -EAGAIN If we can't do a nocow write because snapshotting of the inode's * root is in progress or because we are in a non-blocking IO * context and need to block (@nowait is true). * < 0 If an error happened. @@ -3345,7 +3345,7 @@ static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end * We could also use the extent map tree to find such delalloc that is * being flushed, but using the ordered extents tree is more efficient * because it's usually much smaller as ordered extents are removed from - * the tree once they complete. With the extent maps, we mau have them + * the tree once they complete. With the extent maps, we may have them * in the extent map tree for a very long time, and they were either * created by previous writes or loaded by read operations. */ diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 5d8d1570a5c9..c2730740d928 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2282,7 +2282,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, * If this block group has some small extents we don't want to * use up all of our free slots in the cache with them, we want * to reserve them to larger extents, however if we have plenty - * of cache left then go ahead an dadd them, no sense in adding + * of cache left then go ahead and add them, no sense in adding * the overhead of a bitmap if we don't have to. */ if (info->bytes <= fs_info->sectorsize * 8) { @@ -3829,7 +3829,7 @@ out_unlock: /* * If we break out of trimming a bitmap prematurely, we should reset the - * trimming bit. In a rather contrieved case, it's possible to race here so + * trimming bit. In a rather contrived case, it's possible to race here so * reset the state to BTRFS_TRIM_STATE_UNTRIMMED. * * start = start of bitmap diff --git a/fs/btrfs/fs.c b/fs/btrfs/fs.c index 8b118c03cdb8..335209fe3734 100644 --- a/fs/btrfs/fs.c +++ b/fs/btrfs/fs.c @@ -58,7 +58,7 @@ size_t __attribute_const__ btrfs_get_num_csums(void) * We support the following block sizes for all systems: * * - 4K - * This is the most common block size. For PAGE SIZE > 4K cases the subage + * This is the most common block size. For PAGE SIZE > 4K cases the subpage * mode is used. * * - PAGE_SIZE diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h index 2ccba95af060..5f0b185a7f21 100644 --- a/fs/btrfs/fs.h +++ b/fs/btrfs/fs.h @@ -283,7 +283,7 @@ enum { #ifdef CONFIG_BTRFS_EXPERIMENTAL /* - * Features under developmen like Extent tree v2 support is enabled + * Features under development like Extent tree v2 support is enabled * only under CONFIG_BTRFS_EXPERIMENTAL */ #define BTRFS_FEATURE_INCOMPAT_SUPP \ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 33cabe0a54a4..dd503dba33cf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -370,7 +370,7 @@ int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) } /* - * Unock inode i_rwsem. + * Unlock inode i_rwsem. * * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() * to decide whether the lock acquired is shared or exclusive. @@ -1990,7 +1990,7 @@ error: } /* - * when nowcow writeback call back. This checks for snapshots or COW copies + * When nocow writeback calls back. This checks for snapshots or COW copies * of the extents that exist in the file, and COWs the file as required. * * If no cow copies or snapshots exist, we write directly to the existing @@ -2233,7 +2233,7 @@ error: * | OE cleanup | Skip | Untouched | * * nocow_one_range() failed, the range [cur_offset, nocow_end] is - * alread cleaned up. + * already cleaned up. */ oe_cleanup_start = start; oe_cleanup_len = cur_offset - start; @@ -2986,7 +2986,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, * If we dropped an inline extent here, we know the range where it is * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the * number of bytes only for that range containing the inline extent. - * The remaining of the range will be processed when clearning the + * The remaining of the range will be processed when clearing the * EXTENT_DELALLOC_BIT bit through the ordered extent completion. */ if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { @@ -4905,7 +4905,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 e goto out; /* - * Skip the truncatioin if the range in the target block is already aligned. + * Skip the truncation if the range in the target block is already aligned. * The seemingly complex check will also handle the same block case. */ if (in_head_block && !IS_ALIGNED(start, blocksize)) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e10daf6631af..063291519b36 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -957,7 +957,7 @@ static noinline int btrfs_mksnapshot(struct dentry *parent, /* * Force new buffered writes to reserve space even when NOCOW is - * possible. This is to avoid later writeback (running dealloc) to + * possible. This is to avoid later writeback (running delalloc) to * fallback to COW mode and unexpectedly fail with ENOSPC. */ btrfs_drew_read_lock(&root->snapshot_lock); diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index a3e6d9616e60..0035851d72b0 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -361,7 +361,7 @@ void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) atomic_inc(&lock->readers); /* - * Ensure the pending reader count is perceieved BEFORE this reader + * Ensure the pending reader count is perceived BEFORE this reader * goes to sleep in case of active writers. This guarantees new writers * won't be allowed and that the current reader will be woken up when * the last active writer finishes its jobs. diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index af29df98ac14..a4673e7d95d7 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -74,7 +74,7 @@ enum btrfs_lock_nesting { BTRFS_NESTING_NEW_ROOT, /* - * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so + * We are limited to MAX_LOCKDEP_SUBCLASSES number of subclasses, so * add this in here and add a static_assert to keep us from going over * the limit. As of this writing we're limited to 8, and we're * definitely using 8, hence this check to keep us from messing up in diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index ce5f6732bfb5..d86020ace69c 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -113,7 +113,7 @@ enum { /* Which blocks are covered by extent items. */ scrub_bitmap_nr_has_extent = 0, - /* Which blocks are meteadata. */ + /* Which blocks are metadata. */ scrub_bitmap_nr_is_metadata, /* diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index faa3710fa074..c5771df3a2c7 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1738,7 +1738,7 @@ static int read_symlink(struct btrfs_root *root, * An empty symlink inode. Can happen in rare error paths when * creating a symlink (transaction committed before the inode * eviction handler removed the symlink inode items and a crash - * happened in between or the subvol was snapshoted in between). + * happened in between or the subvol was snapshotted in between). * Print an informative message to dmesg/syslog so that the user * can delete the symlink. */ @@ -2768,7 +2768,7 @@ out: * processing an inode that is a directory and it just got renamed, and existing * entries in the cache may refer to inodes that have the directory in their * full path - in which case we would generate outdated paths (pre-rename) - * for the inodes that the cache entries point to. Instead of prunning the + * for the inodes that the cache entries point to. Instead of pruning the * cache when inserting, do it after we finish processing each inode at * finish_inode_if_needed(). */ @@ -7984,7 +7984,7 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx) } /* - * Make sure any existing dellaloc is flushed for any root used by a send + * Make sure any existing delalloc is flushed for any root used by a send * operation so that we do not miss any data and we do not race with writeback * finishing and changing a tree while send is using the tree. This could * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 0481c693ac2e..0e5c0c80e0fe 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -479,7 +479,7 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, /* * On the zoned mode, we always allocate one zone as one chunk. - * Returning non-zone size alingned bytes here will result in + * Returning non-zone size aligned bytes here will result in * less pressure for the async metadata reclaim process, and it * will over-commit too much leading to ENOSPC. Align down to the * zone size to avoid that. @@ -1528,7 +1528,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, * turned into error mode due to a transaction abort when flushing space * above, in that case fail with the abort error instead of returning * success to the caller if we can steal from the global rsv - this is - * just to have caller fail immeditelly instead of later when trying to + * just to have caller fail immediately instead of later when trying to * modify the fs, making it easier to debug -ENOSPC problems. */ if (BTRFS_FS_ERROR(fs_info)) { diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index cb4f97833dc3..5ca8d4db6722 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -690,7 +690,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked, \ GET_SUBPAGE_BITMAP(fs_info, folio, name, &bitmap); \ btrfs_warn(fs_info, \ - "dumpping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \ + "dumping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \ start, len, folio_pos(folio), \ blocks_per_folio, &bitmap); \ } diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h index ee0710eb13fd..ad0552db7c7d 100644 --- a/fs/btrfs/subpage.h +++ b/fs/btrfs/subpage.h @@ -13,7 +13,7 @@ struct address_space; struct folio; /* - * Extra info for subpapge bitmap. + * Extra info for subpage bitmap. * * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into * one larger bitmap. diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 4951f50e9823..49893c885855 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1283,7 +1283,7 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE); /* - * We need to cleanup all defragable inodes if the autodefragment is + * We need to cleanup all defraggable inodes if the autodefragment is * close or the filesystem is read only. */ if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && diff --git a/fs/btrfs/tests/delayed-refs-tests.c b/fs/btrfs/tests/delayed-refs-tests.c index 265370e79a54..e2248acb906b 100644 --- a/fs/btrfs/tests/delayed-refs-tests.c +++ b/fs/btrfs/tests/delayed-refs-tests.c @@ -997,12 +997,12 @@ int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize) ret = simple_tests(&trans); if (!ret) { - test_msg("running delayed refs merg tests on metadata refs"); + test_msg("running delayed refs merge tests on metadata refs"); ret = merge_tests(&trans, BTRFS_REF_METADATA); } if (!ret) { - test_msg("running delayed refs merg tests on data refs"); + test_msg("running delayed refs merge tests on data refs"); ret = merge_tests(&trans, BTRFS_REF_DATA); } diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c index 3a86534c116f..42af6c737c6e 100644 --- a/fs/btrfs/tests/extent-map-tests.c +++ b/fs/btrfs/tests/extent-map-tests.c @@ -1095,7 +1095,7 @@ int btrfs_test_extent_map(void) /* * Test a chunk with 2 data stripes one of which * intersects the physical address of the super block - * is correctly recognised. + * is correctly recognized. */ .raid_type = BTRFS_BLOCK_GROUP_RAID1, .physical_start = SZ_64M - SZ_4M, diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 25ee0183e178..d04fa6ce8390 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -103,7 +103,7 @@ static struct kmem_cache *btrfs_trans_handle_cachep; * | attached to transid N+1. | * | | * | To next stage: | - * | Until all tree blocks are super blocks are | + * | Until all tree blocks and super blocks are | * | written to block devices | * V | * Transaction N [[TRANS_STATE_COMPLETED]] V @@ -2423,7 +2423,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) * them. * * We needn't worry that this operation will corrupt the snapshots, - * because all the tree which are snapshoted will be forced to COW + * because all the tree which are snapshotted will be forced to COW * the nodes and leaves. */ ret = btrfs_run_delayed_items(trans); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index a997c7cc35a2..c2aac08055fb 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1209,7 +1209,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, /* * For legacy root item, the members starting at generation_v2 will be * all filled with 0. - * And since we allow geneartion_v2 as 0, it will still pass the check. + * And since we allow generation_v2 as 0, it will still pass the check. */ read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot), btrfs_item_size(leaf, slot)); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c3cfffc21008..861f96ef28cf 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1816,7 +1816,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, /* * fixup on a directory may create new entries, - * make sure we always look for the highset possible + * make sure we always look for the highest possible * offset */ key.offset = (u64)-1; @@ -3619,7 +3619,7 @@ static int inode_logged(const struct btrfs_trans_handle *trans, /* * The inode was previously logged and then evicted, set logged_trans to - * the current transacion's ID, to avoid future tree searches as long as + * the current transaction's ID, to avoid future tree searches as long as * the inode is not evicted again. */ spin_lock(&inode->lock); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index cc75b4e49662..4f20c25a39c8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1377,8 +1377,8 @@ struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, } /* - * Make sure the last byte of label is properly NUL termiated. We use - * '%s' to print the label, if not properly NUL termiated we can access + * Make sure the last byte of label is properly NUL terminated. We use + * '%s' to print the label, if not properly NUL terminated we can access * beyond the label. */ if (super->label[0] && super->label[BTRFS_LABEL_SIZE - 1]) @@ -4463,7 +4463,7 @@ out_overflow: } /* - * Should be called with balance mutexe held + * Should be called with balance mutex held */ int btrfs_balance(struct btrfs_fs_info *fs_info, struct btrfs_balance_control *bctl, @@ -7486,7 +7486,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) /* * Lockdep complains about possible circular locking dependency between * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores - * used for freeze procection of a fs (struct super_block.s_writers), + * used for freeze protection of a fs (struct super_block.s_writers), * which we take when starting a transaction, and extent buffers of the * chunk tree if we call read_one_dev() while holding a lock on an * extent buffer of the chunk tree. Since we are mounting the filesystem @@ -7919,8 +7919,6 @@ int btrfs_bg_type_to_factor(u64 flags) return btrfs_raid_array[index].ncopies; } - - static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, u64 chunk_offset, u64 devid, u64 physical_offset, u64 physical_len) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a56e873a3029..2cbf8080eade 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -34,7 +34,7 @@ struct btrfs_zoned_device_info; #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G) /* - * Arbitratry maximum size of one discard request to limit potentially long time + * Arbitrary maximum size of one discard request to limit potentially long time * spent in blkdev_issue_discard(). */ #define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G) @@ -495,7 +495,7 @@ struct btrfs_discard_stripe { }; /* - * Context for IO subsmission for device stripe. + * Context for IO submission for device stripe. * * - Track the unfinished mirrors for mirror based profiles * Mirror based profiles are SINGLE/DUP/RAID1/RAID10. |