Commit 855a85f7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Quoth Chris:
 "This is later than I wanted because I got backed up running through
  btrfs bugs from the Oracle QA teams.  But they are all bug fixes that
  we've queued and tested since rc1.

  Nothing in particular stands out, this just reflects bug fixing and QA
  done in parallel by all the btrfs developers.  The most user visible
  of these is:

    Btrfs: clear the extent uptodate bits during parent transid failures

  Because that helps deal with out of date drives (say an iscsi disk
  that has gone away and come back).  The old code wasn't always
  properly retrying the other mirror for this type of failure."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (24 commits)
  Btrfs: fix compiler warnings on 32 bit systems
  Btrfs: increase the global block reserve estimates
  Btrfs: clear the extent uptodate bits during parent transid failures
  Btrfs: add extra sanity checks on the path names in btrfs_mksubvol
  Btrfs: make sure we update latest_bdev
  Btrfs: improve error handling for btrfs_insert_dir_item callers
  Btrfs: be less strict on finding next node in clear_extent_bit
  Btrfs: fix a bug on overcommit stuff
  Btrfs: kick out redundant stuff in convert_extent_bit
  Btrfs: skip states when they does not contain bits to clear
  Btrfs: check return value of lookup_extent_mapping() correctly
  Btrfs: fix deadlock on page lock when doing auto-defragment
  Btrfs: fix return value check of extent_io_ops
  btrfs: honor umask when creating subvol root
  btrfs: silence warning in raid array setup
  btrfs: fix structs where bitfields and spinlock/atomic share 8B word
  btrfs: delalloc for page dirtied out-of-band in fixup worker
  Btrfs: fix memory leak in load_free_space_cache()
  btrfs: don't check DUP chunks twice
  Btrfs: fix trim 0 bytes after a device delete
  ...
parents ee325324 e77266e4
...@@ -892,6 +892,8 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, ...@@ -892,6 +892,8 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
if (eb != eb_in) if (eb != eb_in)
free_extent_buffer(eb); free_extent_buffer(eb);
ret = inode_ref_info(parent, 0, fs_root, path, &found_key); ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
if (ret > 0)
ret = -ENOENT;
if (ret) if (ret)
break; break;
next_inum = found_key.offset; next_inum = found_key.offset;
......
...@@ -644,7 +644,7 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( ...@@ -644,7 +644,7 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
static int btrfsic_process_superblock(struct btrfsic_state *state, static int btrfsic_process_superblock(struct btrfsic_state *state,
struct btrfs_fs_devices *fs_devices) struct btrfs_fs_devices *fs_devices)
{ {
int ret; int ret = 0;
struct btrfs_super_block *selected_super; struct btrfs_super_block *selected_super;
struct list_head *dev_head = &fs_devices->devices; struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device; struct btrfs_device *device;
......
...@@ -588,6 +588,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -588,6 +588,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page_offset(bio->bi_io_vec->bv_page), page_offset(bio->bi_io_vec->bv_page),
PAGE_CACHE_SIZE); PAGE_CACHE_SIZE);
read_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em)
return -EIO;
compressed_len = em->block_len; compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
......
...@@ -886,7 +886,7 @@ struct btrfs_block_rsv { ...@@ -886,7 +886,7 @@ struct btrfs_block_rsv {
u64 reserved; u64 reserved;
struct btrfs_space_info *space_info; struct btrfs_space_info *space_info;
spinlock_t lock; spinlock_t lock;
unsigned int full:1; unsigned int full;
}; };
/* /*
......
...@@ -2260,6 +2260,12 @@ int open_ctree(struct super_block *sb, ...@@ -2260,6 +2260,12 @@ int open_ctree(struct super_block *sb,
goto fail_sb_buffer; goto fail_sb_buffer;
} }
if (sectorsize < PAGE_SIZE) {
printk(KERN_WARNING "btrfs: Incompatible sector size "
"found on %s\n", sb->s_id);
goto fail_sb_buffer;
}
mutex_lock(&fs_info->chunk_mutex); mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_read_sys_array(tree_root); ret = btrfs_read_sys_array(tree_root);
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
...@@ -2301,6 +2307,12 @@ int open_ctree(struct super_block *sb, ...@@ -2301,6 +2307,12 @@ int open_ctree(struct super_block *sb,
btrfs_close_extra_devices(fs_devices); btrfs_close_extra_devices(fs_devices);
if (!fs_devices->latest_bdev) {
printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
sb->s_id);
goto fail_tree_roots;
}
retry_root_backup: retry_root_backup:
blocksize = btrfs_level_size(tree_root, blocksize = btrfs_level_size(tree_root,
btrfs_super_root_level(disk_super)); btrfs_super_root_level(disk_super));
......
...@@ -3312,7 +3312,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) ...@@ -3312,7 +3312,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
} }
data_sinfo->bytes_may_use += bytes; data_sinfo->bytes_may_use += bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info", trace_btrfs_space_reservation(root->fs_info, "space_info",
(u64)data_sinfo, bytes, 1); (u64)(unsigned long)data_sinfo,
bytes, 1);
spin_unlock(&data_sinfo->lock); spin_unlock(&data_sinfo->lock);
return 0; return 0;
...@@ -3333,7 +3334,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) ...@@ -3333,7 +3334,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
spin_lock(&data_sinfo->lock); spin_lock(&data_sinfo->lock);
data_sinfo->bytes_may_use -= bytes; data_sinfo->bytes_may_use -= bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info", trace_btrfs_space_reservation(root->fs_info, "space_info",
(u64)data_sinfo, bytes, 0); (u64)(unsigned long)data_sinfo,
bytes, 0);
spin_unlock(&data_sinfo->lock); spin_unlock(&data_sinfo->lock);
} }
...@@ -3611,12 +3613,15 @@ static int may_commit_transaction(struct btrfs_root *root, ...@@ -3611,12 +3613,15 @@ static int may_commit_transaction(struct btrfs_root *root,
if (space_info != delayed_rsv->space_info) if (space_info != delayed_rsv->space_info)
return -ENOSPC; return -ENOSPC;
spin_lock(&space_info->lock);
spin_lock(&delayed_rsv->lock); spin_lock(&delayed_rsv->lock);
if (delayed_rsv->size < bytes) { if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
spin_unlock(&delayed_rsv->lock); spin_unlock(&delayed_rsv->lock);
spin_unlock(&space_info->lock);
return -ENOSPC; return -ENOSPC;
} }
spin_unlock(&delayed_rsv->lock); spin_unlock(&delayed_rsv->lock);
spin_unlock(&space_info->lock);
commit: commit:
trans = btrfs_join_transaction(root); trans = btrfs_join_transaction(root);
...@@ -3695,9 +3700,9 @@ static int reserve_metadata_bytes(struct btrfs_root *root, ...@@ -3695,9 +3700,9 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
if (used + orig_bytes <= space_info->total_bytes) { if (used + orig_bytes <= space_info->total_bytes) {
space_info->bytes_may_use += orig_bytes; space_info->bytes_may_use += orig_bytes;
trace_btrfs_space_reservation(root->fs_info, trace_btrfs_space_reservation(root->fs_info,
"space_info", "space_info",
(u64)space_info, (u64)(unsigned long)space_info,
orig_bytes, 1); orig_bytes, 1);
ret = 0; ret = 0;
} else { } else {
/* /*
...@@ -3766,9 +3771,9 @@ static int reserve_metadata_bytes(struct btrfs_root *root, ...@@ -3766,9 +3771,9 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
if (used + num_bytes < space_info->total_bytes + avail) { if (used + num_bytes < space_info->total_bytes + avail) {
space_info->bytes_may_use += orig_bytes; space_info->bytes_may_use += orig_bytes;
trace_btrfs_space_reservation(root->fs_info, trace_btrfs_space_reservation(root->fs_info,
"space_info", "space_info",
(u64)space_info, (u64)(unsigned long)space_info,
orig_bytes, 1); orig_bytes, 1);
ret = 0; ret = 0;
} else { } else {
wait_ordered = true; wait_ordered = true;
...@@ -3913,8 +3918,8 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info, ...@@ -3913,8 +3918,8 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
space_info->bytes_may_use -= num_bytes; space_info->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info", trace_btrfs_space_reservation(fs_info, "space_info",
(u64)space_info, (u64)(unsigned long)space_info,
num_bytes, 0); num_bytes, 0);
space_info->reservation_progress++; space_info->reservation_progress++;
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
} }
...@@ -4105,7 +4110,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) ...@@ -4105,7 +4110,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
num_bytes += div64_u64(data_used + meta_used, 50); num_bytes += div64_u64(data_used + meta_used, 50);
if (num_bytes * 3 > meta_used) if (num_bytes * 3 > meta_used)
num_bytes = div64_u64(meta_used, 3); num_bytes = div64_u64(meta_used, 3) * 2;
return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
} }
...@@ -4132,14 +4137,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) ...@@ -4132,14 +4137,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
block_rsv->reserved += num_bytes; block_rsv->reserved += num_bytes;
sinfo->bytes_may_use += num_bytes; sinfo->bytes_may_use += num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info", trace_btrfs_space_reservation(fs_info, "space_info",
(u64)sinfo, num_bytes, 1); (u64)(unsigned long)sinfo, num_bytes, 1);
} }
if (block_rsv->reserved >= block_rsv->size) { if (block_rsv->reserved >= block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size; num_bytes = block_rsv->reserved - block_rsv->size;
sinfo->bytes_may_use -= num_bytes; sinfo->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info", trace_btrfs_space_reservation(fs_info, "space_info",
(u64)sinfo, num_bytes, 0); (u64)(unsigned long)sinfo, num_bytes, 0);
sinfo->reservation_progress++; sinfo->reservation_progress++;
block_rsv->reserved = block_rsv->size; block_rsv->reserved = block_rsv->size;
block_rsv->full = 1; block_rsv->full = 1;
...@@ -4192,7 +4197,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, ...@@ -4192,7 +4197,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
if (!trans->bytes_reserved) if (!trans->bytes_reserved)
return; return;
trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans, trace_btrfs_space_reservation(root->fs_info, "transaction",
(u64)(unsigned long)trans,
trans->bytes_reserved, 0); trans->bytes_reserved, 0);
btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
trans->bytes_reserved = 0; trans->bytes_reserved = 0;
...@@ -4710,9 +4716,9 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, ...@@ -4710,9 +4716,9 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
space_info->bytes_reserved += num_bytes; space_info->bytes_reserved += num_bytes;
if (reserve == RESERVE_ALLOC) { if (reserve == RESERVE_ALLOC) {
trace_btrfs_space_reservation(cache->fs_info, trace_btrfs_space_reservation(cache->fs_info,
"space_info", "space_info",
(u64)space_info, (u64)(unsigned long)space_info,
num_bytes, 0); num_bytes, 0);
space_info->bytes_may_use -= num_bytes; space_info->bytes_may_use -= num_bytes;
} }
} }
...@@ -7886,9 +7892,16 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) ...@@ -7886,9 +7892,16 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
u64 start; u64 start;
u64 end; u64 end;
u64 trimmed = 0; u64 trimmed = 0;
u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret = 0; int ret = 0;
cache = btrfs_lookup_block_group(fs_info, range->start); /*
* try to trim all FS space, our block group may start from non-zero.
*/
if (range->len == total_bytes)
cache = btrfs_lookup_first_block_group(fs_info, range->start);
else
cache = btrfs_lookup_block_group(fs_info, range->start);
while (cache) { while (cache) {
if (cache->key.objectid >= (range->start + range->len)) { if (cache->key.objectid >= (range->start + range->len)) {
......
...@@ -513,6 +513,15 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -513,6 +513,15 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
WARN_ON(state->end < start); WARN_ON(state->end < start);
last_end = state->end; last_end = state->end;
if (state->end < end && !need_resched())
next_node = rb_next(&state->rb_node);
else
next_node = NULL;
/* the state doesn't have the wanted bits, go ahead */
if (!(state->state & bits))
goto next;
/* /*
* | ---- desired range ---- | * | ---- desired range ---- |
* | state | or * | state | or
...@@ -565,20 +574,15 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -565,20 +574,15 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
goto out; goto out;
} }
if (state->end < end && prealloc && !need_resched())
next_node = rb_next(&state->rb_node);
else
next_node = NULL;
set |= clear_state_bit(tree, state, &bits, wake); set |= clear_state_bit(tree, state, &bits, wake);
next:
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
start = last_end + 1; start = last_end + 1;
if (start <= end && next_node) { if (start <= end && next_node) {
state = rb_entry(next_node, struct extent_state, state = rb_entry(next_node, struct extent_state,
rb_node); rb_node);
if (state->start == start) goto hit_next;
goto hit_next;
} }
goto search_again; goto search_again;
...@@ -961,8 +965,6 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -961,8 +965,6 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
set_state_bits(tree, state, &bits); set_state_bits(tree, state, &bits);
clear_state_bit(tree, state, &clear_bits, 0); clear_state_bit(tree, state, &clear_bits, 0);
merge_state(tree, state);
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
...@@ -1007,7 +1009,6 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -1007,7 +1009,6 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (state->end <= end) { if (state->end <= end) {
set_state_bits(tree, state, &bits); set_state_bits(tree, state, &bits);
clear_state_bit(tree, state, &clear_bits, 0); clear_state_bit(tree, state, &clear_bits, 0);
merge_state(tree, state);
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
start = last_end + 1; start = last_end + 1;
...@@ -1068,8 +1069,6 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -1068,8 +1069,6 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
set_state_bits(tree, prealloc, &bits); set_state_bits(tree, prealloc, &bits);
clear_state_bit(tree, prealloc, &clear_bits, 0); clear_state_bit(tree, prealloc, &clear_bits, 0);
merge_state(tree, prealloc);
prealloc = NULL; prealloc = NULL;
goto out; goto out;
} }
...@@ -2154,13 +2153,46 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, ...@@ -2154,13 +2153,46 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
"this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode, "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
failrec->this_mirror, num_copies, failrec->in_validation); failrec->this_mirror, num_copies, failrec->in_validation);
tree->ops->submit_bio_hook(inode, read_mode, bio, failrec->this_mirror, ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
failrec->bio_flags, 0); failrec->this_mirror,
return 0; failrec->bio_flags, 0);
return ret;
} }
/* lots and lots of room for performance fixes in the end_bio funcs */ /* lots and lots of room for performance fixes in the end_bio funcs */
int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
{
int uptodate = (err == 0);
struct extent_io_tree *tree;
int ret;
tree = &BTRFS_I(page->mapping->host)->io_tree;
if (tree->ops && tree->ops->writepage_end_io_hook) {
ret = tree->ops->writepage_end_io_hook(page, start,
end, NULL, uptodate);
if (ret)
uptodate = 0;
}
if (!uptodate && tree->ops &&
tree->ops->writepage_io_failed_hook) {
ret = tree->ops->writepage_io_failed_hook(NULL, page,
start, end, NULL);
/* Writeback already completed */
if (ret == 0)
return 1;
}
if (!uptodate) {
clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
ClearPageUptodate(page);
SetPageError(page);
}
return 0;
}
/* /*
* after a writepage IO is done, we need to: * after a writepage IO is done, we need to:
* clear the uptodate bits on error * clear the uptodate bits on error
...@@ -2172,13 +2204,11 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, ...@@ -2172,13 +2204,11 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
*/ */
static void end_bio_extent_writepage(struct bio *bio, int err) static void end_bio_extent_writepage(struct bio *bio, int err)
{ {
int uptodate = err == 0;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
struct extent_io_tree *tree; struct extent_io_tree *tree;
u64 start; u64 start;
u64 end; u64 end;
int whole_page; int whole_page;
int ret;
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
...@@ -2195,28 +2225,9 @@ static void end_bio_extent_writepage(struct bio *bio, int err) ...@@ -2195,28 +2225,9 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
if (--bvec >= bio->bi_io_vec) if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags); prefetchw(&bvec->bv_page->flags);
if (tree->ops && tree->ops->writepage_end_io_hook) {
ret = tree->ops->writepage_end_io_hook(page, start,
end, NULL, uptodate);
if (ret)
uptodate = 0;
}
if (!uptodate && tree->ops &&
tree->ops->writepage_io_failed_hook) {
ret = tree->ops->writepage_io_failed_hook(bio, page,
start, end, NULL);
if (ret == 0) {
uptodate = (err == 0);
continue;
}
}
if (!uptodate) { if (end_extent_writepage(page, err, start, end))
clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); continue;
ClearPageUptodate(page);
SetPageError(page);
}
if (whole_page) if (whole_page)
end_page_writeback(page); end_page_writeback(page);
...@@ -2779,9 +2790,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -2779,9 +2790,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
delalloc_start = delalloc_end + 1; delalloc_start = delalloc_end + 1;
continue; continue;
} }
tree->ops->fill_delalloc(inode, page, delalloc_start, ret = tree->ops->fill_delalloc(inode, page,
delalloc_end, &page_started, delalloc_start,
&nr_written); delalloc_end,
&page_started,
&nr_written);
BUG_ON(ret);
/* /*
* delalloc_end is already one less than the total * delalloc_end is already one less than the total
* length, so we don't subtract one from * length, so we don't subtract one from
...@@ -2818,8 +2832,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -2818,8 +2832,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
if (tree->ops && tree->ops->writepage_start_hook) { if (tree->ops && tree->ops->writepage_start_hook) {
ret = tree->ops->writepage_start_hook(page, start, ret = tree->ops->writepage_start_hook(page, start,
page_end); page_end);
if (ret == -EAGAIN) { if (ret) {
redirty_page_for_writepage(wbc, page); /* Fixup worker will requeue */
if (ret == -EBUSY)
wbc->pages_skipped++;
else
redirty_page_for_writepage(wbc, page);
update_nr_written(page, wbc, nr_written); update_nr_written(page, wbc, nr_written);
unlock_page(page); unlock_page(page);
ret = 0; ret = 0;
...@@ -3289,7 +3307,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, ...@@ -3289,7 +3307,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
len = end - start + 1; len = end - start + 1;
write_lock(&map->lock); write_lock(&map->lock);
em = lookup_extent_mapping(map, start, len); em = lookup_extent_mapping(map, start, len);
if (IS_ERR_OR_NULL(em)) { if (!em) {
write_unlock(&map->lock); write_unlock(&map->lock);
break; break;
} }
...@@ -3853,10 +3871,9 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, ...@@ -3853,10 +3871,9 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
num_pages = num_extent_pages(eb->start, eb->len); num_pages = num_extent_pages(eb->start, eb->len);
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
if (eb_straddles_pages(eb)) { clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, cached_state, GFP_NOFS);
cached_state, GFP_NOFS);
}
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i); page = extent_buffer_page(eb, i);
if (page) if (page)
......
...@@ -319,4 +319,5 @@ struct btrfs_mapping_tree; ...@@ -319,4 +319,5 @@ struct btrfs_mapping_tree;
int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
u64 length, u64 logical, struct page *page, u64 length, u64 logical, struct page *page,
int mirror_num); int mirror_num);
int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
#endif #endif
...@@ -26,8 +26,8 @@ struct extent_map { ...@@ -26,8 +26,8 @@ struct extent_map {
unsigned long flags; unsigned long flags;
struct block_device *bdev; struct block_device *bdev;
atomic_t refs; atomic_t refs;
unsigned int in_tree:1; unsigned int in_tree;
unsigned int compress_type:4; unsigned int compress_type;
}; };
struct extent_map_tree { struct extent_map_tree {
......
...@@ -1604,6 +1604,14 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -1604,6 +1604,14 @@ static long btrfs_fallocate(struct file *file, int mode,
if (mode & ~FALLOC_FL_KEEP_SIZE) if (mode & ~FALLOC_FL_KEEP_SIZE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/*
* Make sure we have enough space before we do the
* allocation.
*/
ret = btrfs_check_data_free_space(inode, len);
if (ret)
return ret;
/* /*