Commit a35afb83 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

Remove SLAB_CTOR_CONSTRUCTOR



SLAB_CTOR_CONSTRUCTOR is always specified. No point in checking it.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Steven French <sfrench@us.ibm.com>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@ucw.cz>
Cc: David Chinner <dgc@sgi.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5577bd8a
......@@ -71,9 +71,7 @@ spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
{
struct spufs_inode_info *ei = p;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&ei->vfs_inode);
}
inode_init_once(&ei->vfs_inode);
}
static struct inode *
......
......@@ -940,9 +940,6 @@ static void ltree_entry_ctor(void *obj, struct kmem_cache *cache,
{
struct ltree_entry *le = obj;
if (flags & SLAB_CTOR_CONSTRUCTOR)
return;
le->users = 0;
init_rwsem(&le->mutex);
}
......
......@@ -232,8 +232,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
......
......@@ -87,11 +87,9 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
init_MUTEX(&ei->i_link_lock);
init_MUTEX(&ei->i_ext_lock);
inode_init_once(&ei->vfs_inode);
}
init_MUTEX(&ei->i_link_lock);
init_MUTEX(&ei->i_ext_lock);
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
......
......@@ -451,17 +451,15 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
{
struct afs_vnode *vnode = _vnode;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
memset(vnode, 0, sizeof(*vnode));
inode_init_once(&vnode->vfs_inode);
init_waitqueue_head(&vnode->update_waitq);
mutex_init(&vnode->permits_lock);
mutex_init(&vnode->validate_lock);
spin_lock_init(&vnode->writeback_lock);
spin_lock_init(&vnode->lock);
INIT_LIST_HEAD(&vnode->writebacks);
INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
}
memset(vnode, 0, sizeof(*vnode));
inode_init_once(&vnode->vfs_inode);
init_waitqueue_head(&vnode->update_waitq);
mutex_init(&vnode->permits_lock);
mutex_init(&vnode->validate_lock);
spin_lock_init(&vnode->writeback_lock);
spin_lock_init(&vnode->lock);
INIT_LIST_HEAD(&vnode->writebacks);
INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
}
/*
......
......@@ -292,10 +292,8 @@ befs_destroy_inode(struct inode *inode)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct befs_inode_info *bi = (struct befs_inode_info *) foo;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&bi->vfs_inode);
}
inode_init_once(&bi->vfs_inode);
}
static void
......
......@@ -248,8 +248,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct bfs_inode_info *bi = foo;
if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&bi->vfs_inode);
inode_init_once(&bi->vfs_inode);
}
static int init_inodecache(void)
......
......@@ -458,17 +458,15 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
struct bdev_inode *ei = (struct bdev_inode *) foo;
struct block_device *bdev = &ei->bdev;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
memset(bdev, 0, sizeof(*bdev));
mutex_init(&bdev->bd_mutex);
sema_init(&bdev->bd_mount_sem, 1);
INIT_LIST_HEAD(&bdev->bd_inodes);
INIT_LIST_HEAD(&bdev->bd_list);
memset(bdev, 0, sizeof(*bdev));
mutex_init(&bdev->bd_mutex);
sema_init(&bdev->bd_mount_sem, 1);
INIT_LIST_HEAD(&bdev->bd_inodes);
INIT_LIST_HEAD(&bdev->bd_list);
#ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_list);
INIT_LIST_HEAD(&bdev->bd_holder_list);
#endif
inode_init_once(&ei->vfs_inode);
}
inode_init_once(&ei->vfs_inode);
}
static inline void __bd_forget(struct inode *inode)
......
......@@ -2898,8 +2898,9 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
get_cpu_var(bh_accounting).nr++;
recalc_bh_state();
put_cpu_var(bh_accounting);
......@@ -2918,17 +2919,6 @@ void free_buffer_head(struct buffer_head *bh)
}
EXPORT_SYMBOL(free_buffer_head);
static void
init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
{
if (flags & SLAB_CTOR_CONSTRUCTOR) {
struct buffer_head * bh = (struct buffer_head *)data;
memset(bh, 0, sizeof(*bh));
INIT_LIST_HEAD(&bh->b_assoc_buffers);
}
}
static void buffer_exit_cpu(int cpu)
{
int i;
......@@ -2955,12 +2945,8 @@ void __init buffer_init(void)
{
int nrpages;
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
init_buffer_head,
NULL);
bh_cachep = KMEM_CACHE(buffer_head,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
......
......@@ -701,10 +701,8 @@ cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
{
struct cifsInodeInfo *cifsi = inode;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&cifsi->vfs_inode);
INIT_LIST_HEAD(&cifsi->lockList);
}
inode_init_once(&cifsi->vfs_inode);
INIT_LIST_HEAD(&cifsi->lockList);
}
static int
......
......@@ -62,8 +62,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct coda_inode_info *ei = (struct coda_inode_info *) foo;
if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
inode_init_once(&ei->vfs_inode);
}
int coda_init_inodecache(void)
......
......@@ -583,8 +583,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
{
struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
inode_init_once(&ei->vfs_inode);
}
static struct ecryptfs_cache_info {
......
......@@ -72,8 +72,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct efs_inode_info *ei = (struct efs_inode_info *) foo;
if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
......
......@@ -160,13 +160,11 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
rwlock_init(&ei->i_meta_lock);
rwlock_init(&ei->i_meta_lock);
#ifdef CONFIG_EXT2_FS_XATTR
init_rwsem(&ei->xattr_sem);
init_rwsem(&ei->xattr_sem);
#endif
inode_init_once(&ei->vfs_inode);
}
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
......
......@@ -466,14 +466,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
INIT_LIST_HEAD(&ei->i_orphan);
INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT3_FS_XATTR
init_rwsem(&ei->xattr_sem);
init_rwsem(&ei->xattr_sem);
#endif
mutex_init(&ei->truncate_mutex);
inode_init_once(&ei->vfs_inode);
}
mutex_init(&ei->truncate_mutex);
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
......
......@@ -517,14 +517,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
INIT_LIST_HEAD(&ei->i_orphan);
INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT4DEV_FS_XATTR
init_rwsem(&ei->xattr_sem);
init_rwsem(&ei->xattr_sem);
#endif
mutex_init(&ei->truncate_mutex);
inode_init_once(&ei->vfs_inode);
}
mutex_init(&ei->truncate_mutex);
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
......
......@@ -40,8 +40,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct fat_cache *cache = (struct fat_cache *)foo;
if (flags & SLAB_CTOR_CONSTRUCTOR)
INIT_LIST_HEAD(&cache->cache_list);
INIT_LIST_HEAD(&cache->cache_list);
}
int __init fat_cache_init(void)
......
......@@ -500,14 +500,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
spin_lock_init(&ei->cache_lru_lock);
ei->nr_caches = 0;
ei->cache_valid_id = FAT_CACHE_VALID + 1;
INIT_LIST_HEAD(&ei->cache_lru);
INIT_HLIST_NODE(&ei->i_fat_hash);
inode_init_once(&ei->vfs_inode);
}
spin_lock_init(&ei->cache_lru_lock);
ei->nr_caches = 0;
ei->cache_valid_id = FAT_CACHE_VALID + 1;
INIT_LIST_HEAD(&ei->cache_lru);
INIT_HLIST_NODE(&ei->i_fat_hash);
inode_init_once(&ei->vfs_inode);
}
static int __init fat_init_inodecache(void)
......
......@@ -687,8 +687,7 @@ static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
{
struct inode * inode = foo;
if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(inode);
inode_init_once(inode);
}
static int __init fuse_fs_init(void)
......
......@@ -27,29 +27,27 @@
static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct gfs2_inode *ip = foo;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&ip->i_inode);
spin_lock_init(&ip->i_spin);
init_rwsem(&ip->i_rw_mutex);
memset(ip->i_cache, 0, sizeof(ip->i_cache));
}
inode_init_once(&ip->i_inode);
spin_lock_init(&ip->i_spin);
init_rwsem(&ip->i_rw_mutex);
memset(ip->i_cache, 0, sizeof(ip->i_cache));
}
static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct gfs2_glock *gl = foo;
if (flags & SLAB_CTOR_CONSTRUCTOR) {
INIT_HLIST_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_waiters1);
INIT_LIST_HEAD(&gl->gl_waiters3);
gl->gl_lvb = NULL;
atomic_set(&gl->gl_lvb_count, 0);
INIT_LIST_HEAD(&gl->gl_reclaim);
INIT_LIST_HEAD(&gl->gl_ail_list);
atomic_set(&gl->gl_ail_count, 0);
}
INIT_HLIST_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_waiters1);
INIT_LIST_HEAD(&gl->gl_waiters3);
gl->gl_lvb = NULL;
atomic_set(&gl->gl_lvb_count, 0);
INIT_LIST_HEAD(&gl->gl_reclaim);
INIT_LIST_HEAD(&gl->gl_ail_list);
atomic_set(&gl->gl_ail_count, 0);
}
/**
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment