Commit ed8f3737 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (31 commits)
  Btrfs: don't call writepages from within write_full_page
  Btrfs: Remove unused variable 'last_index' in file.c
  Btrfs: clean up for find_first_extent_bit()
  Btrfs: clean up for wait_extent_bit()
  Btrfs: clean up for insert_state()
  Btrfs: remove unused members from struct extent_state
  Btrfs: clean up code for merging extent maps
  Btrfs: clean up code for extent_map lookup
  Btrfs: clean up search_extent_mapping()
  Btrfs: remove redundant code for dir item lookup
  Btrfs: make acl functions really no-op if acl is not enabled
  Btrfs: remove remaining ref-cache code
  Btrfs: remove a BUG_ON() in btrfs_commit_transaction()
  Btrfs: use wait_event()
  Btrfs: check the nodatasum flag when writing compressed files
  Btrfs: copy string correctly in INO_LOOKUP ioctl
  Btrfs: don't print the leaf if we had an error
  btrfs: make btrfs_set_root_node void
  Btrfs: fix oops while writing data to SSD partitions
  Btrfs: Protect the readonly flag of block group
  ...

Fix up trivial conflicts (due to acl and writeback cleanups) in
 - fs/btrfs/acl.c
 - fs/btrfs/ctree.h
 - fs/btrfs/extent_io.c
parents a6b11f53 0d10ee2e
......@@ -6,5 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
......@@ -28,8 +28,6 @@
#include "btrfs_inode.h"
#include "xattr.h"
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
{
int size;
......@@ -276,18 +274,3 @@ const struct xattr_handler btrfs_xattr_acl_access_handler = {
.get = btrfs_xattr_acl_get,
.set = btrfs_xattr_acl_set,
};
#else /* CONFIG_BTRFS_FS_POSIX_ACL */
int btrfs_acl_chmod(struct inode *inode)
{
return 0;
}
int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir)
{
return 0;
}
#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
......@@ -338,6 +338,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
u64 first_byte = disk_start;
struct block_device *bdev;
int ret;
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
......@@ -392,8 +393,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
BUG_ON(ret);
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio,
start, 1);
BUG_ON(ret);
}
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret);
......@@ -418,8 +422,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
BUG_ON(ret);
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
BUG_ON(ret);
}
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret);
......
......@@ -2406,8 +2406,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
void btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
/* dir-item.c */
......@@ -2523,6 +2523,14 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
#define PageChecked PageFsMisc
#endif
/* This forces readahead on a given range of bytes in an inode */
static inline void btrfs_force_ra(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
pgoff_t offset, unsigned long req_size)
{
page_cache_sync_readahead(mapping, ra, file, offset, req_size);
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
int btrfs_set_inode_index(struct inode *dir, u64 *index);
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
......@@ -2551,9 +2559,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio, unsigned long bio_flags);
unsigned long btrfs_force_ra(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
pgoff_t offset, pgoff_t last_index);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
......@@ -2648,12 +2653,21 @@ do { \
/* acl.c */
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
struct posix_acl *btrfs_get_acl(struct inode *inode, int type);
#else
#define btrfs_get_acl NULL
#endif
int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir);
int btrfs_acl_chmod(struct inode *inode);
#else
#define btrfs_get_acl NULL
static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir)
{
return 0;
}
static inline int btrfs_acl_chmod(struct inode *inode)
{
return 0;
}
#endif
/* relocation.c */
int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
......
......@@ -198,8 +198,6 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
key.objectid = dir;
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
......@@ -209,18 +207,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0) {
if (path->slots[0] == 0)
return NULL;
path->slots[0]--;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != dir ||
btrfs_key_type(&found_key) != BTRFS_DIR_ITEM_KEY ||
found_key.offset != key.offset)
if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
......@@ -315,8 +302,6 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
key.objectid = dir;
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
......@@ -324,18 +309,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0) {
if (path->slots[0] == 0)
return NULL;
path->slots[0]--;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != dir ||
btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY ||
found_key.offset != key.offset)
if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
......
......@@ -663,7 +663,9 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
struct btrfs_path *path;
path = btrfs_alloc_path();
BUG_ON(!path);
if (!path)
return -ENOMEM;
key.objectid = start;
key.offset = len;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
......@@ -3272,6 +3274,9 @@ again:
}
ret = btrfs_alloc_chunk(trans, extent_root, flags);
if (ret < 0 && ret != -ENOSPC)
goto out;
spin_lock(&space_info->lock);
if (ret)
space_info->full = 1;
......@@ -3281,6 +3286,7 @@ again:
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
}
......@@ -4456,7 +4462,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
printk(KERN_ERR "umm, got %d back from search"
", was looking for %llu\n", ret,
(unsigned long long)bytenr);
btrfs_print_leaf(extent_root, path->nodes[0]);
if (ret > 0)
btrfs_print_leaf(extent_root,
path->nodes[0]);
}
BUG_ON(ret);
extent_slot = path->slots[0];
......@@ -5073,7 +5081,9 @@ have_block_group:
* group is does point to and try again
*/
if (!last_ptr_loop && last_ptr->block_group &&
last_ptr->block_group != block_group) {
last_ptr->block_group != block_group &&
index <=
get_block_group_index(last_ptr->block_group)) {
btrfs_put_block_group(block_group);
block_group = last_ptr->block_group;
......@@ -5501,7 +5511,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
path = btrfs_alloc_path();
BUG_ON(!path);
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
......@@ -6272,10 +6283,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level;
path = btrfs_alloc_path();
BUG_ON(!path);
if (!path)
return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS);
BUG_ON(!wc);
if (!wc) {
btrfs_free_path(path);
return -ENOMEM;
}
trans = btrfs_start_transaction(tree_root, 0);
BUG_ON(IS_ERR(trans));
......@@ -6538,8 +6553,6 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
u64 min_allocable_bytes;
int ret = -ENOSPC;
if (cache->ro)
return 0;
/*
* We need some metadata space and system metadata space for
......@@ -6555,6 +6568,12 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
if (cache->ro) {
ret = 0;
goto out;
}
num_bytes = cache->key.offset - cache->reserved - cache->pinned -
cache->bytes_super - btrfs_block_group_used(&cache->item);
......@@ -6568,7 +6587,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
cache->ro = 1;
ret = 0;
}
out:
spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock);
return ret;
......@@ -7183,11 +7202,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cluster->refill_lock);
path = btrfs_alloc_path();
BUG_ON(!path);
if (!path) {
ret = -ENOMEM;
goto out;
}
inode = lookup_free_space_inode(root, block_group, path);
if (!IS_ERR(inode)) {
btrfs_orphan_add(trans, inode);
ret = btrfs_orphan_add(trans, inode);
BUG_ON(ret);
clear_nlink(inode);
/* One for the block groups ref */
spin_lock(&block_group->lock);
......
......@@ -254,14 +254,14 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
*
* This should be called with the tree lock held.
*/
static int merge_state(struct extent_io_tree *tree,
struct extent_state *state)
static void merge_state(struct extent_io_tree *tree,
struct extent_state *state)
{
struct extent_state *other;
struct rb_node *other_node;
if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
return 0;
return;
other_node = rb_prev(&state->rb_node);
if (other_node) {
......@@ -287,19 +287,13 @@ static int merge_state(struct extent_io_tree *tree,
free_extent_state(other);
}
}
return 0;
}
static int set_state_cb(struct extent_io_tree *tree,
static void set_state_cb(struct extent_io_tree *tree,
struct extent_state *state, int *bits)
{
if (tree->ops && tree->ops->set_bit_hook) {
return tree->ops->set_bit_hook(tree->mapping->host,
state, bits);
}
return 0;
if (tree->ops && tree->ops->set_bit_hook)
tree->ops->set_bit_hook(tree->mapping->host, state, bits);
}
static void clear_state_cb(struct extent_io_tree *tree,
......@@ -309,6 +303,9 @@ static void clear_state_cb(struct extent_io_tree *tree,
tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
}
static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state, int *bits);
/*
* insert an extent_state struct into the tree. 'bits' are set on the
* struct before it is inserted.
......@@ -324,8 +321,6 @@ static int insert_state(struct extent_io_tree *tree,
int *bits)
{
struct rb_node *node;
int bits_to_set = *bits & ~EXTENT_CTLBITS;
int ret;
if (end < start) {
printk(KERN_ERR "btrfs end < start %llu %llu\n",
......@@ -335,13 +330,9 @@ static int insert_state(struct extent_io_tree *tree,
}
state->start = start;
state->end = end;
ret = set_state_cb(tree, state, bits);
if (ret)
return ret;
if (bits_to_set & EXTENT_DIRTY)
tree->dirty_bytes += end - start + 1;
state->state |= bits_to_set;
set_state_bits(tree, state, bits);
node = tree_insert(&tree->state, end, &state->rb_node);
if (node) {
struct extent_state *found;
......@@ -357,13 +348,11 @@ static int insert_state(struct extent_io_tree *tree,
return 0;
}
static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
u64 split)
{
if (tree->ops && tree->ops->split_extent_hook)
return tree->ops->split_extent_hook(tree->mapping->host,
orig, split);
return 0;
tree->ops->split_extent_hook(tree->mapping->host, orig, split);
}
/*
......@@ -659,34 +648,25 @@ again:
if (start > end)
break;
if (need_resched()) {
spin_unlock(&tree->lock);
cond_resched();
spin_lock(&tree->lock);
}
cond_resched_lock(&tree->lock);
}
out:
spin_unlock(&tree->lock);
return 0;
}
static int set_state_bits(struct extent_io_tree *tree,
static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state,
int *bits)
{
int ret;
int bits_to_set = *bits & ~EXTENT_CTLBITS;
ret = set_state_cb(tree, state, bits);
if (ret)
return ret;
set_state_cb(tree, state, bits);
if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
u64 range = state->end - state->start + 1;
tree->dirty_bytes += range;
}
state->state |= bits_to_set;
return 0;
}
static void cache_state(struct extent_state *state,
......@@ -779,9 +759,7 @@ hit_next:
goto out;
}
err = set_state_bits(tree, state, &bits);
if (err)
goto out;
set_state_bits(tree, state, &bits);
cache_state(state, cached_state);
merge_state(tree, state);
......@@ -830,9 +808,7 @@ hit_next:
if (err)
goto out;
if (state->end <= end) {
err = set_state_bits(tree, state, &bits);
if (err)
goto out;
set_state_bits(tree, state, &bits);
cache_state(state, cached_state);
merge_state(tree, state);
if (last_end == (u64)-1)
......@@ -893,11 +869,7 @@ hit_next:
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
err = set_state_bits(tree, prealloc, &bits);
if (err) {
prealloc = NULL;
goto out;
}
set_state_bits(tree, prealloc, &bits);
cache_state(prealloc, cached_state);
merge_state(tree, prealloc);
prealloc = NULL;
......@@ -1059,46 +1031,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
return 0;
}
/*
* find the first offset in the io tree with 'bits' set. zero is
* returned if we find something, and *start_ret and *end_ret are
* set to reflect the state struct that was found.
*
* If nothing was found, 1 is returned, < 0 on error
*/
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits)
{
struct rb_node *node;
struct extent_state *state;
int ret = 1;
spin_lock(&tree->lock);
/*
* this search will find all the extents that end after
* our range starts.
*/
node = tree_search(tree, start);
if (!node)
goto out;
while (1) {
state = rb_entry(node, struct extent_state, rb_node);
if (state->end >= start && (state->state & bits)) {
*start_ret = state->start;
*end_ret = state->end;
ret = 0;
break;
}
node = rb_next(node);
if (!node)
break;
}
out:
spin_unlock(&tree->lock);
return ret;
}
/* find the first state struct with 'bits' set after 'start', and
* return it. tree->lock must be held. NULL will returned if
* nothing was found after 'start'
......@@ -1130,6 +1062,30 @@ out:
return NULL;
}
/*
* find the first offset in the io tree with 'bits' set. zero is
* returned if we find something, and *start_ret and *end_ret are
* set to reflect the state struct that was found.
*
* If nothing was found, 1 is returned, < 0 on error
*/
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits)
{
struct extent_state *state;
int ret = 1;
spin_lock(&tree->lock);
state = find_first_extent_bit_state(tree, start, bits);
if (state) {
*start_ret = state->start;
*end_ret = state->end;
ret = 0;
}
spin_unlock(&tree->lock);
return ret;
}
/*
* find a contiguous range of bytes in the file marked as delalloc, not
* more than 'max_bytes'. start and end are used to return the range,
......@@ -2546,7 +2502,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
struct writeback_control *wbc)
{
int ret;
struct address_space *mapping = page->mapping;
struct extent_page_data epd = {
.bio = NULL,
.tree = tree,
......@@ -2554,17 +2509,9 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
struct writeback_control wbc_writepages = {
.sync_mode = wbc->sync_mode,
.nr_to_write = 64,
.range_start = page_offset(page) + PAGE_CACHE_SIZE,
.range_end = (loff_t)-1,
};
ret = __extent_writepage(page, wbc, &epd);
extent_write_cache_pages(tree, mapping, &wbc_writepages,
__extent_writepage, &epd, flush_write_bio);
flush_epd_write_bio(&epd);
return ret;
}
......
......@@ -76,15 +76,15 @@ struct extent_io_ops {
struct extent_state *state);
int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate);
int (*set_bit_hook)(struct inode *inode, struct extent_state *state,
int *bits);
int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
int *bits);
int (*merge_extent_hook)(struct inode *inode,
struct extent_state *new,
struct extent_state *other);
int (*split_extent_hook)(struct inode *inode,
struct extent_state *orig, u64 split);
void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
int *bits);
void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
int *bits);
void (*merge_extent_hook)(struct inode *inode,
struct extent_state *new,
struct extent_state *other);
void (*split_extent_hook)(struct inode *inode,
struct extent_state *orig, u64 split);
int (*write_cache_pages_lock_hook)(struct page *page);
};
......@@ -108,8 +108,6 @@ struct extent_state {
wait_queue_head_t wq;
atomic_t refs;
unsigned long state;
u64 split_start;
u64 split_end;
/* for use by the FS */
u64 private;
......
......@@ -183,22 +183,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
return 0;
}
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
{
int ret = 0;
struct extent_map *merge = NULL;
struct rb_node *rb;
struct extent_map *em;
write_lock(&tree->lock);
em = lookup_extent_mapping(tree, start, len);