Commit c3762229 authored by Robert P. J. Day's avatar Robert P. J. Day Committed by Linus Torvalds
Browse files

[PATCH] Transform kmem_cache_alloc()+memset(0) -> kmem_cache_zalloc().



Replace appropriate pairs of "kmem_cache_alloc()" + "memset(0)" with the
corresponding "kmem_cache_zalloc()" call.
Signed-off-by: default avatarRobert P. J. Day <rpjday@mindspring.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Roland McGrath <roland@redhat.com>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: Greg KH <greg@kroah.com>
Acked-by: default avatarJoel Becker <Joel.Becker@oracle.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Jan Kara <jack@ucw.cz>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: James Morris <jmorris@namei.org>
Cc: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1b135431
......@@ -91,9 +91,8 @@ ia64_elf32_init (struct pt_regs *regs)
* it with privilege level 3 because the IVE uses non-privileged accesses to these
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
*/
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_start = IA32_GDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_SIZE;
......@@ -117,9 +116,8 @@ ia64_elf32_init (struct pt_regs *regs)
* code is locked in specific gate page, which is pointed by pretcode
* when setup_frame_ia32
*/
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_start = IA32_GATE_OFFSET;
vma->vm_end = vma->vm_start + PAGE_SIZE;
......@@ -142,9 +140,8 @@ ia64_elf32_init (struct pt_regs *regs)
* Install LDT as anonymous memory. This gives us all-zero segment descriptors
* until a task modifies them via modify_ldt().
*/
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_start = IA32_LDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
......@@ -214,12 +211,10 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
bprm->loader += stack_base;
bprm->exec += stack_base;
mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
memset(mpnt, 0, sizeof(*mpnt));
down_write(&current->mm->mmap_sem);
{
mpnt->vm_mm = current->mm;
......
......@@ -2301,12 +2301,11 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
}
memset(vma, 0, sizeof(*vma));
/*
* partially initialize the vma for the sampling buffer
......
......@@ -176,9 +176,8 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
......@@ -195,9 +194,8 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
......
......@@ -300,12 +300,10 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
bprm->loader += stack_base;
bprm->exec += stack_base;
mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
memset(mpnt, 0, sizeof(*mpnt));
down_write(&mm->mmap_sem);
{
mpnt->vm_mm = mm;
......
......@@ -134,14 +134,13 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
return ERR_PTR(-EINVAL);
my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
if (!my_cq) {
ehca_err(device, "Out of memory for ehca_cq struct device=%p",
device);
return ERR_PTR(-ENOMEM);
}
memset(my_cq, 0, sizeof(struct ehca_cq));
memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
spin_lock_init(&my_cq->spinlock);
......
......@@ -53,9 +53,8 @@ static struct ehca_mr *ehca_mr_new(void)
{
struct ehca_mr *me;
me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
if (me) {
memset(me, 0, sizeof(struct ehca_mr));
spin_lock_init(&me->mrlock);
} else
ehca_gen_err("alloc failed");
......@@ -72,9 +71,8 @@ static struct ehca_mw *ehca_mw_new(void)
{
struct ehca_mw *me;
me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
if (me) {
memset(me, 0, sizeof(struct ehca_mw));
spin_lock_init(&me->mwlock);
} else
ehca_gen_err("alloc failed");
......
......@@ -50,14 +50,13 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
{
struct ehca_pd *pd;
pd = kmem_cache_alloc(pd_cache, GFP_KERNEL);
pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
if (!pd) {
ehca_err(device, "device=%p context=%p out of memory",
device, context);
return ERR_PTR(-ENOMEM);
}
memset(pd, 0, sizeof(struct ehca_pd));
pd->ownpid = current->tgid;
/*
......
......@@ -450,13 +450,12 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
if (pd->uobject && udata)
context = pd->uobject->context;
my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL);
my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
if (!my_qp) {
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
return ERR_PTR(-ENOMEM);
}
memset(my_qp, 0, sizeof(struct ehca_qp));
memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
spin_lock_init(&my_qp->spinlock_s);
spin_lock_init(&my_qp->spinlock_r);
......
......@@ -1052,10 +1052,9 @@ static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
struct asd_ascb *ascb;
unsigned long flags;
ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags);
ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags);
if (ascb) {
memset(ascb, 0, sizeof(*ascb));
ascb->dma_scb.size = sizeof(struct scb);
ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
gfp_flags,
......
......@@ -388,10 +388,9 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
int err = 0;
int write = (data_direction == DMA_TO_DEVICE);
sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
if (!sioc)
return DRIVER_ERROR << 24;
memset(sioc, 0, sizeof(*sioc));
req = blk_get_request(sdev->request_queue, write, gfp);
if (!req)
......
......@@ -2163,9 +2163,8 @@ static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid)
maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
sb_desc = kmem_cache_zalloc(usb_desc_cache, SLAB_FLAG);
assert(sb_desc != NULL);
memset(sb_desc, 0, sizeof(USB_SB_Desc_t));
if (usb_pipeout(urb->pipe)) {
......
......@@ -624,12 +624,10 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
{
struct urb_priv *urbp;
urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC);
urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
if (!urbp)
return NULL;
memset((void *)urbp, 0, sizeof(*urbp));
urbp->urb = urb;
urb->hcpriv = urbp;
......
......@@ -211,11 +211,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
if ((unsigned long)nr_events > aio_max_nr)
return ERR_PTR(-EAGAIN);
ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
memset(ctx, 0, sizeof(*ctx));
ctx->max_reqs = nr_events;
mm = ctx->mm = current->mm;
atomic_inc(&mm->mm_count);
......
......@@ -72,11 +72,10 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * pare
{
struct configfs_dirent * sd;
sd = kmem_cache_alloc(configfs_dir_cachep, GFP_KERNEL);
sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL);
if (!sd)
return NULL;
memset(sd, 0, sizeof(*sd));
atomic_set(&sd->s_count, 1);
INIT_LIST_HEAD(&sd->s_links);
INIT_LIST_HEAD(&sd->s_children);
......
......@@ -76,9 +76,7 @@ struct dlm_lkb *allocate_lkb(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
lkb = kmem_cache_alloc(lkb_cache, GFP_KERNEL);
if (lkb)
memset(lkb, 0, sizeof(*lkb));
lkb = kmem_cache_zalloc(lkb_cache, GFP_KERNEL);
return lkb;
}
......
......@@ -600,11 +600,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
{
struct dquot *dquot;
dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS);
dquot = kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
if(!dquot)
return NODQUOT;
memset((caddr_t)dquot, 0, sizeof(struct dquot));
mutex_init(&dquot->dq_lock);
INIT_LIST_HEAD(&dquot->dq_free);
INIT_LIST_HEAD(&dquot->dq_inuse);
......
......@@ -1332,13 +1332,13 @@ int ecryptfs_write_headers(struct dentry *ecryptfs_dentry,
goto out;
}
/* Released in this function */
page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER);
page_virt = kmem_cache_zalloc(ecryptfs_header_cache_0, GFP_USER);
if (!page_virt) {
ecryptfs_printk(KERN_ERR, "Out of memory\n");
rc = -ENOMEM;
goto out;
}
memset(page_virt, 0, PAGE_CACHE_SIZE);
rc = ecryptfs_write_headers_virt(page_virt, crypt_stat,
ecryptfs_dentry);
if (unlikely(rc)) {
......
......@@ -251,7 +251,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
int lower_flags;
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL);
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
ecryptfs_set_file_private(file, file_info);
if (!file_info) {
ecryptfs_printk(KERN_ERR,
......@@ -259,7 +259,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
rc = -ENOMEM;
goto out;
}
memset(file_info, 0, sizeof(*file_info));
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
mount_crypt_stat = &ecryptfs_superblock_to_private(
......
......@@ -361,8 +361,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
/* Released in this function */
page_virt =
(char *)kmem_cache_alloc(ecryptfs_header_cache_2,
page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2,
GFP_USER);
if (!page_virt) {
rc = -ENOMEM;
......@@ -370,7 +369,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
"Cannot ecryptfs_kmalloc a page\n");
goto out_dput;
}
memset(page_virt, 0, PAGE_CACHE_SIZE);
rc = ecryptfs_read_header_region(page_virt, lower_dentry, nd->mnt);
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_POLICY_APPLIED))
......
......@@ -207,14 +207,12 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
* at end of function upon failure */
auth_tok_list_item =
kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
kmem_cache_zalloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
if (!auth_tok_list_item) {
ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
rc = -ENOMEM;
goto out;
}
memset(auth_tok_list_item, 0,
sizeof(struct ecryptfs_auth_tok_list_item));
(*new_auth_tok) = &auth_tok_list_item->auth_tok;
/* check for body size - one to two bytes */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment