a | b | |
---|
| 0 | + | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
---|
| 0 | + | index fc6401a..6016500 100644 |
---|
| 0 | + | --- a/fs/btrfs/inode.c |
---|
| 0 | + | +++ b/fs/btrfs/inode.c |
---|
| 0 | + | @@ -120,30 +120,26 @@ static int btrfs_dirty_inode(struct inode *inode); |
---|
| 0 | + | |
---|
| 0 | + | static void __endio_write_update_ordered(struct inode *inode, |
---|
| 0 | + | u64 offset, u64 bytes, |
---|
| 0 | + | - bool uptodate, bool cleanup); |
---|
| 0 | + | - |
---|
| 0 | + | -static inline void btrfs_endio_direct_write_update_ordered(struct inode *inode, |
---|
| 0 | + | - u64 offset, |
---|
| 0 | + | - u64 bytes, |
---|
| 0 | + | - bool uptodate) |
---|
| 0 | + | -{ |
---|
| 0 | + | - return __endio_write_update_ordered(inode, offset, bytes, uptodate, false); |
---|
| 0 | + | -} |
---|
| 0 | + | + bool uptodate); |
---|
| 0 | + | |
---|
| 0 | + | /* |
---|
| 0 | + | - * Cleanup all submitted ordered extents in specified range to handle error |
---|
| 0 | + | - * in cow_file_range() and run_delalloc_nocow(). |
---|
| 0 | + | - * Compression handles error and ordered extent submission by itself, |
---|
| 0 | + | - * so no need to call this function. |
---|
| 0 | + | + * Cleanup all submitted ordered extents in specified range to handle errors |
---|
| 0 | + | + * from the fill_dellaloc() callback. |
---|
| 0 | + | * |
---|
| 0 | + | - * NOTE: caller must ensure extent_clear_unlock_delalloc() in error handler |
---|
| 0 | + | - * doesn't cover any range of submitted ordered extent. |
---|
| 0 | + | - * Or we will double free metadata for submitted ordered extent. |
---|
| 0 | + | + * NOTE: caller must ensure that when an error happens, it can not call |
---|
| 0 | + | + * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING |
---|
| 0 | + | + * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata |
---|
| 0 | + | + * to be released, which we want to happen only when finishing the ordered |
---|
| 0 | + | + * extent (btrfs_finish_ordered_io()). Also note that the caller of the |
---|
| 0 | + | + * fill_dealloc() callback already does proper cleanup for the first page of |
---|
| 0 | + | + * the range, that is, it invokes the callback writepage_end_io_hook() for the |
---|
| 0 | + | + * range of the first page. |
---|
| 0 | + | */ |
---|
| 0 | + | static inline void btrfs_cleanup_ordered_extents(struct inode *inode, |
---|
| 0 | + | u64 offset, u64 bytes) |
---|
| 0 | + | { |
---|
| 0 | + | - return __endio_write_update_ordered(inode, offset, bytes, false, true); |
---|
| 0 | + | + return __endio_write_update_ordered(inode, offset + PAGE_SIZE, |
---|
| 0 | + | + bytes - PAGE_SIZE, false); |
---|
| 0 | + | } |
---|
| 0 | + | |
---|
| 0 | + | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
---|
| 0 | + | @@ -958,7 +954,6 @@ static noinline int cow_file_range(struct inode *inode, |
---|
| 0 | + | u64 disk_num_bytes; |
---|
| 0 | + | u64 cur_alloc_size; |
---|
| 0 | + | u64 blocksize = fs_info->sectorsize; |
---|
| 0 | + | - u64 orig_start = start; |
---|
| 0 | + | struct btrfs_key ins; |
---|
| 0 | + | struct extent_map *em; |
---|
| 0 | + | int ret = 0; |
---|
| 0 | + | @@ -1100,7 +1095,6 @@ static noinline int cow_file_range(struct inode *inode, |
---|
| 0 | + | EXTENT_DELALLOC | EXTENT_DEFRAG, |
---|
| 0 | + | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | |
---|
| 0 | + | PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); |
---|
| 0 | + | - btrfs_cleanup_ordered_extents(inode, orig_start, end - orig_start + 1); |
---|
| 0 | + | goto out; |
---|
| 0 | + | } |
---|
| 0 | + | |
---|
| 0 | + | @@ -1526,8 +1520,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, |
---|
| 0 | + | PAGE_CLEAR_DIRTY | |
---|
| 0 | + | PAGE_SET_WRITEBACK | |
---|
| 0 | + | PAGE_END_WRITEBACK); |
---|
| 0 | + | - if (ret) |
---|
| 0 | + | - btrfs_cleanup_ordered_extents(inode, start, end - start + 1); |
---|
| 0 | + | btrfs_free_path(path); |
---|
| 0 | + | return ret; |
---|
| 0 | + | } |
---|
| 0 | + | @@ -1577,6 +1569,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, |
---|
| 0 | + | ret = cow_file_range_async(inode, locked_page, start, end, |
---|
| 0 | + | page_started, nr_written); |
---|
| 0 | + | } |
---|
| 0 | + | + if (ret) |
---|
| 0 | + | + btrfs_cleanup_ordered_extents(inode, start, end - start + 1); |
---|
| 0 | + | return ret; |
---|
| 0 | + | } |
---|
| 0 | + | |
---|
| 0 | + | @@ -8176,7 +8170,7 @@ static void btrfs_endio_direct_read(struct bio *bio) |
---|
| 0 | + | |
---|
| 0 | + | static void __endio_write_update_ordered(struct inode *inode, |
---|
| 0 | + | u64 offset, u64 bytes, |
---|
| 0 | + | - bool uptodate, bool cleanup) |
---|
| 0 | + | + bool uptodate) |
---|
| 0 | + | { |
---|
| 0 | + | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
---|
| 0 | + | struct btrfs_ordered_extent *ordered = NULL; |
---|
| 0 | + | @@ -8194,16 +8188,6 @@ static void __endio_write_update_ordered(struct inode *inode, |
---|
| 0 | + | func = btrfs_endio_write_helper; |
---|
| 0 | + | } |
---|
| 0 | + | |
---|
| 0 | + | - /* |
---|
| 0 | + | - * In cleanup case, the first page of the range will be handled |
---|
| 0 | + | - * by end_extent_writepage() when called from __extent_writepage() |
---|
| 0 | + | - * |
---|
| 0 | + | - * So we must skip first page, or we will underflow ordered->bytes_left |
---|
| 0 | + | - */ |
---|
| 0 | + | - if (cleanup) { |
---|
| 0 | + | - ordered_offset += PAGE_SIZE; |
---|
| 0 | + | - ordered_bytes -= PAGE_SIZE; |
---|
| 0 | + | - } |
---|
| 0 | + | again: |
---|
| 0 | + | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, |
---|
| 0 | + | &ordered_offset, |
---|
| 0 | + | @@ -8231,10 +8215,10 @@ static void btrfs_endio_direct_write(struct bio *bio) |
---|
| 0 | + | struct btrfs_dio_private *dip = bio->bi_private; |
---|
| 0 | + | struct bio *dio_bio = dip->dio_bio; |
---|
| 0 | + | |
---|
| 0 | + | - btrfs_endio_direct_write_update_ordered(dip->inode, |
---|
| 0 | + | - dip->logical_offset, |
---|
| 0 | + | - dip->bytes, |
---|
| 0 | + | - !bio->bi_error); |
---|
| 0 | + | + __endio_write_update_ordered(dip->inode, |
---|
| 0 | + | + dip->logical_offset, |
---|
| 0 | + | + dip->bytes, |
---|
| 0 | + | + !bio->bi_error); |
---|
| 0 | + | |
---|
| 0 | + | kfree(dip); |
---|
| 0 | + | |
---|
| 0 | + | @@ -8595,10 +8579,10 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, |
---|
| 0 | + | io_bio = NULL; |
---|
| 0 | + | } else { |
---|
| 0 | + | if (write) |
---|
| 0 | + | - btrfs_endio_direct_write_update_ordered(inode, |
---|
| 0 | + | + __endio_write_update_ordered(inode, |
---|
| 0 | + | file_offset, |
---|
| 0 | + | dio_bio->bi_iter.bi_size, |
---|
| 0 | + | - 0); |
---|
| 0 | + | + false); |
---|
| 0 | + | else |
---|
| 0 | + | unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, |
---|
| 0 | + | file_offset + dio_bio->bi_iter.bi_size - 1); |
---|
| 0 | + | @@ -8733,11 +8717,11 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
---|
| 0 | + | */ |
---|
| 0 | + | if (dio_data.unsubmitted_oe_range_start < |
---|
| 0 | + | dio_data.unsubmitted_oe_range_end) |
---|
| 0 | + | - btrfs_endio_direct_write_update_ordered(inode, |
---|
| 0 | + | + __endio_write_update_ordered(inode, |
---|
| 0 | + | dio_data.unsubmitted_oe_range_start, |
---|
| 0 | + | dio_data.unsubmitted_oe_range_end - |
---|
| 0 | + | dio_data.unsubmitted_oe_range_start, |
---|
| 0 | + | - 0); |
---|
| 0 | + | + false); |
---|
| 0 | + | } else if (ret >= 0 && (size_t)ret < count) |
---|
| 0 | + | btrfs_delalloc_release_space(inode, offset, |
---|
| 0 | + | count - (size_t)ret); |
---|
... | |
---|