diff options
Diffstat (limited to 'fs/btrfs/inode.c')
| -rw-r--r-- | fs/btrfs/inode.c | 139 | 
1 files changed, 86 insertions, 53 deletions
| diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c41db6d45ab6..02bb099845fd 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -379,7 +379,8 @@ again:  	 * change at any time if we discover bad compression ratios.  	 */  	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && -	    btrfs_test_opt(root, COMPRESS)) { +	    (btrfs_test_opt(root, COMPRESS) || +	     (BTRFS_I(inode)->force_compress))) {  		WARN_ON(pages);  		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); @@ -483,8 +484,10 @@ again:  		nr_pages_ret = 0;  		/* flag the file so we don't compress in the future */ -		if (!btrfs_test_opt(root, FORCE_COMPRESS)) +		if (!btrfs_test_opt(root, FORCE_COMPRESS) && +		    !(BTRFS_I(inode)->force_compress)) {  			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; +		}  	}  	if (will_compress) {  		*num_added += 1; @@ -570,8 +573,8 @@ retry:  			unsigned long nr_written = 0;  			lock_extent(io_tree, async_extent->start, -				    async_extent->start + -				    async_extent->ram_size - 1, GFP_NOFS); +					 async_extent->start + +					 async_extent->ram_size - 1, GFP_NOFS);  			/* allocate blocks */  			ret = cow_file_range(inode, async_cow->locked_page, @@ -1211,7 +1214,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,  	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)  		ret = run_delalloc_nocow(inode, locked_page, start, end,  					 page_started, 0, nr_written); -	else if (!btrfs_test_opt(root, COMPRESS)) +	else if (!btrfs_test_opt(root, COMPRESS) && +		 !(BTRFS_I(inode)->force_compress))  		ret = cow_file_range(inode, locked_page, start, end,  				      page_started, nr_written, 1);  	else @@ -1508,12 +1512,13 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,  	return 0;  } -int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) +int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, +			      struct extent_state **cached_state)  {  	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)  		WARN_ON(1);  	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, -				   GFP_NOFS); +				   cached_state, GFP_NOFS);  }  /* see btrfs_writepage_start_hook for details on why this is required */ @@ -1526,6 +1531,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)  {  	struct btrfs_writepage_fixup *fixup;  	struct btrfs_ordered_extent *ordered; +	struct extent_state *cached_state = NULL;  	struct page *page;  	struct inode *inode;  	u64 page_start; @@ -1544,7 +1550,8 @@ again:  	page_start = page_offset(page);  	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; -	lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); +	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, +			 &cached_state, GFP_NOFS);  	/* already ordered? We're done */  	if (PagePrivate2(page)) @@ -1552,17 +1559,18 @@ again:  	ordered = btrfs_lookup_ordered_extent(inode, page_start);  	if (ordered) { -		unlock_extent(&BTRFS_I(inode)->io_tree, page_start, -			      page_end, GFP_NOFS); +		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, +				     page_end, &cached_state, GFP_NOFS);  		unlock_page(page);  		btrfs_start_ordered_extent(inode, ordered, 1);  		goto again;  	} -	btrfs_set_extent_delalloc(inode, page_start, page_end); +	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);  	ClearPageChecked(page);  out: -	unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); +	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, +			     &cached_state, GFP_NOFS);  out_page:  	unlock_page(page);  	page_cache_release(page); @@ -1691,14 +1699,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  	struct btrfs_trans_handle *trans;  	struct btrfs_ordered_extent *ordered_extent = NULL;  	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; +	struct extent_state *cached_state = NULL;  	int compressed = 0;  	int ret; -	ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); +	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, +					     end - start + 1);  	if (!ret)  		return 0; - -	ordered_extent = btrfs_lookup_ordered_extent(inode, start);  	BUG_ON(!ordered_extent);  	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { @@ -1713,9 +1721,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  		goto out;  	} -	lock_extent(io_tree, ordered_extent->file_offset, -		    ordered_extent->file_offset + ordered_extent->len - 1, -		    GFP_NOFS); +	lock_extent_bits(io_tree, ordered_extent->file_offset, +			 ordered_extent->file_offset + ordered_extent->len - 1, +			 0, &cached_state, GFP_NOFS);  	trans = btrfs_join_transaction(root, 1); @@ -1742,9 +1750,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  				   ordered_extent->len);  		BUG_ON(ret);  	} -	unlock_extent(io_tree, ordered_extent->file_offset, -		    ordered_extent->file_offset + ordered_extent->len - 1, -		    GFP_NOFS); +	unlock_extent_cached(io_tree, ordered_extent->file_offset, +			     ordered_extent->file_offset + +			     ordered_extent->len - 1, &cached_state, GFP_NOFS); +  	add_pending_csums(trans, inode, ordered_extent->file_offset,  			  &ordered_extent->list); @@ -2153,7 +2162,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)  		found_key.objectid = found_key.offset;  		found_key.type = BTRFS_INODE_ITEM_KEY;  		found_key.offset = 0; -		inode = btrfs_iget(root->fs_info->sb, &found_key, root); +		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);  		if (IS_ERR(inode))  			break; @@ -3081,6 +3090,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)  	struct btrfs_root *root = BTRFS_I(inode)->root;  	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;  	struct btrfs_ordered_extent *ordered; +	struct extent_state *cached_state = NULL;  	char *kaddr;  	u32 blocksize = root->sectorsize;  	pgoff_t index = from >> PAGE_CACHE_SHIFT; @@ -3127,12 +3137,14 @@ again:  	}  	wait_on_page_writeback(page); -	lock_extent(io_tree, page_start, page_end, GFP_NOFS); +	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, +			 GFP_NOFS);  	set_page_extent_mapped(page);  	ordered = btrfs_lookup_ordered_extent(inode, page_start);  	if (ordered) { -		unlock_extent(io_tree, page_start, page_end, GFP_NOFS); +		unlock_extent_cached(io_tree, page_start, page_end, +				     &cached_state, GFP_NOFS);  		unlock_page(page);  		page_cache_release(page);  		btrfs_start_ordered_extent(inode, ordered, 1); @@ -3140,13 +3152,15 @@ again:  		goto again;  	} -	clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, +	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,  			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, -			  GFP_NOFS); +			  0, 0, &cached_state, GFP_NOFS); -	ret = btrfs_set_extent_delalloc(inode, page_start, page_end); +	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, +					&cached_state);  	if (ret) { -		unlock_extent(io_tree, page_start, page_end, GFP_NOFS); +		unlock_extent_cached(io_tree, page_start, page_end, +				     &cached_state, GFP_NOFS);  		goto out_unlock;  	} @@ -3159,7 +3173,8 @@ again:  	}  	ClearPageChecked(page);  	set_page_dirty(page); -	unlock_extent(io_tree, page_start, page_end, GFP_NOFS); +	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, +			     GFP_NOFS);  out_unlock:  	if (ret) @@ -3177,6 +3192,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)  	struct btrfs_root *root = BTRFS_I(inode)->root;  	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;  	struct extent_map *em; +	struct extent_state *cached_state = NULL;  	u64 mask = root->sectorsize - 1;  	u64 hole_start = (inode->i_size + mask) & ~mask;  	u64 block_end = (size + mask) & ~mask; @@ -3192,11 +3208,13 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)  		struct btrfs_ordered_extent *ordered;  		btrfs_wait_ordered_range(inode, hole_start,  					 block_end - hole_start); -		lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); +		lock_extent_bits(io_tree, hole_start, block_end - 1, 0, +				 &cached_state, GFP_NOFS);  		ordered = btrfs_lookup_ordered_extent(inode, hole_start);  		if (!ordered)  			break; -		unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); +		unlock_extent_cached(io_tree, hole_start, block_end - 1, +				     &cached_state, GFP_NOFS);  		btrfs_put_ordered_extent(ordered);  	} @@ -3241,7 +3259,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)  			break;  	} -	unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); +	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, +			     GFP_NOFS);  	return err;  } @@ -3639,6 +3658,7 @@ static noinline void init_btrfs_i(struct inode *inode)  	bi->index_cnt = (u64)-1;  	bi->last_unlink_trans = 0;  	bi->ordered_data_close = 0; +	bi->force_compress = 0;  	extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);  	extent_io_tree_init(&BTRFS_I(inode)->io_tree,  			     inode->i_mapping, GFP_NOFS); @@ -3687,7 +3707,7 @@ static struct inode *btrfs_iget_locked(struct super_block *s,   * Returns in *is_new if the inode was read from disk   */  struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, -			 struct btrfs_root *root) +			 struct btrfs_root *root, int *new)  {  	struct inode *inode; @@ -3702,6 +3722,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,  		inode_tree_add(inode);  		unlock_new_inode(inode); +		if (new) +			*new = 1;  	}  	return inode; @@ -3754,7 +3776,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)  		return NULL;  	if (location.type == BTRFS_INODE_ITEM_KEY) { -		inode = btrfs_iget(dir->i_sb, &location, root); +		inode = btrfs_iget(dir->i_sb, &location, root, NULL);  		return inode;  	} @@ -3769,7 +3791,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)  		else  			inode = new_simple_dir(dir->i_sb, &location, sub_root);  	} else { -		inode = btrfs_iget(dir->i_sb, &location, sub_root); +		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);  	}  	srcu_read_unlock(&root->fs_info->subvol_srcu, index); @@ -4501,7 +4523,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)  	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);  	if (err) {  		err = -ENOSPC; -		goto out_unlock; +		goto out_fail;  	}  	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, @@ -4979,6 +5001,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)  {  	struct extent_io_tree *tree;  	struct btrfs_ordered_extent *ordered; +	struct extent_state *cached_state = NULL;  	u64 page_start = page_offset(page);  	u64 page_end = page_start + PAGE_CACHE_SIZE - 1; @@ -4997,7 +5020,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)  		btrfs_releasepage(page, GFP_NOFS);  		return;  	} -	lock_extent(tree, page_start, page_end, GFP_NOFS); +	lock_extent_bits(tree, page_start, page_end, 0, &cached_state, +			 GFP_NOFS);  	ordered = btrfs_lookup_ordered_extent(page->mapping->host,  					   page_offset(page));  	if (ordered) { @@ -5008,7 +5032,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)  		clear_extent_bit(tree, page_start, page_end,  				 EXTENT_DIRTY | EXTENT_DELALLOC |  				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, -				 NULL, GFP_NOFS); +				 &cached_state, GFP_NOFS);  		/*  		 * whoever cleared the private bit is responsible  		 * for the finish_ordered_io @@ -5018,11 +5042,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)  						page_start, page_end);  		}  		btrfs_put_ordered_extent(ordered); -		lock_extent(tree, page_start, page_end, GFP_NOFS); +		cached_state = NULL; +		lock_extent_bits(tree, page_start, page_end, 0, &cached_state, +				 GFP_NOFS);  	}  	clear_extent_bit(tree, page_start, page_end,  		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | -		 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS); +		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);  	__btrfs_releasepage(page, GFP_NOFS);  	ClearPageChecked(page); @@ -5055,6 +5081,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)  	struct btrfs_root *root = BTRFS_I(inode)->root;  	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;  	struct btrfs_ordered_extent *ordered; +	struct extent_state *cached_state = NULL;  	char *kaddr;  	unsigned long zero_start;  	loff_t size; @@ -5093,7 +5120,8 @@ again:  	}  	wait_on_page_writeback(page); -	lock_extent(io_tree, page_start, page_end, GFP_NOFS); +	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, +			 GFP_NOFS);  	set_page_extent_mapped(page);  	/* @@ -5102,7 +5130,8 @@ again:  	 */  	ordered = btrfs_lookup_ordered_extent(inode, page_start);  	if (ordered) { -		unlock_extent(io_tree, page_start, page_end, GFP_NOFS); +		unlock_extent_cached(io_tree, page_start, page_end, +				     &cached_state, GFP_NOFS);  		unlock_page(page);  		btrfs_start_ordered_extent(inode, ordered, 1);  		btrfs_put_ordered_extent(ordered); @@ -5116,13 +5145,15 @@ again:  	 * is probably a better way to do this, but for now keep consistent with  	 * prepare_pages in the normal write path.  	 */ -	clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, +	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,  			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, -			  GFP_NOFS); +			  0, 0, &cached_state, GFP_NOFS); -	ret = btrfs_set_extent_delalloc(inode, page_start, page_end); +	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, +					&cached_state);  	if (ret) { -		unlock_extent(io_tree, page_start, page_end, GFP_NOFS); +		unlock_extent_cached(io_tree, page_start, page_end, +				     &cached_state, GFP_NOFS);  		ret = VM_FAULT_SIGBUS;  		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);  		goto out_unlock; @@ -5148,7 +5179,7 @@ again:  	BTRFS_I(inode)->last_trans = root->fs_info->generation;  	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; -	unlock_extent(io_tree, page_start, page_end, GFP_NOFS); +	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);  out_unlock:  	btrfs_unreserve_metadata_for_delalloc(root, inode, 1); @@ -5827,6 +5858,7 @@ stop_trans:  static long btrfs_fallocate(struct inode *inode, int mode,  			    loff_t offset, loff_t len)  { +	struct extent_state *cached_state = NULL;  	u64 cur_offset;  	u64 last_byte;  	u64 alloc_start; @@ -5865,16 +5897,17 @@ static long btrfs_fallocate(struct inode *inode, int mode,  		/* the extent lock is ordered inside the running  		 * transaction  		 */ -		lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, -			    GFP_NOFS); +		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, +				 locked_end, 0, &cached_state, GFP_NOFS);  		ordered = btrfs_lookup_first_ordered_extent(inode,  							    alloc_end - 1);  		if (ordered &&  		    ordered->file_offset + ordered->len > alloc_start &&  		    ordered->file_offset < alloc_end) {  			btrfs_put_ordered_extent(ordered); -			unlock_extent(&BTRFS_I(inode)->io_tree, -				      alloc_start, locked_end, GFP_NOFS); +			unlock_extent_cached(&BTRFS_I(inode)->io_tree, +					     alloc_start, locked_end, +					     &cached_state, GFP_NOFS);  			/*  			 * we can't wait on the range with the transaction  			 * running or with the extent lock held @@ -5916,8 +5949,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,  			break;  		}  	} -	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, -		      GFP_NOFS); +	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, +			     &cached_state, GFP_NOFS);  	btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,  				       alloc_end - alloc_start); | 
