Commit 77d04bd9 authored by Andrew Morton's avatar Andrew Morton Committed by David S. Miller

[NET]: More kzalloc conversions.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 31380de9
...@@ -55,15 +55,12 @@ int alloc_divert_blk(struct net_device *dev) ...@@ -55,15 +55,12 @@ int alloc_divert_blk(struct net_device *dev)
dev->divert = NULL; dev->divert = NULL;
if (dev->type == ARPHRD_ETHER) { if (dev->type == ARPHRD_ETHER) {
dev->divert = (struct divert_blk *) dev->divert = kzalloc(alloc_size, GFP_KERNEL);
kmalloc(alloc_size, GFP_KERNEL);
if (dev->divert == NULL) { if (dev->divert == NULL) {
printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n", printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n",
dev->name); dev->name);
return -ENOMEM; return -ENOMEM;
} }
memset(dev->divert, 0, sizeof(struct divert_blk));
dev_hold(dev); dev_hold(dev);
} }
......
...@@ -318,12 +318,10 @@ static void __devinit flow_cache_cpu_prepare(int cpu) ...@@ -318,12 +318,10 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
/* NOTHING */; /* NOTHING */;
flow_table(cpu) = (struct flow_cache_entry **) flow_table(cpu) = (struct flow_cache_entry **)
__get_free_pages(GFP_KERNEL, order); __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
if (!flow_table(cpu)) if (!flow_table(cpu))
panic("NET: failed to allocate flow cache order %lu\n", order); panic("NET: failed to allocate flow cache order %lu\n", order);
memset(flow_table(cpu), 0, PAGE_SIZE << order);
flow_hash_rnd_recalc(cpu) = 1; flow_hash_rnd_recalc(cpu) = 1;
flow_count(cpu) = 0; flow_count(cpu) = 0;
......
...@@ -159,11 +159,10 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, ...@@ -159,11 +159,10 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
if (parm->interval < -2 || parm->interval > 3) if (parm->interval < -2 || parm->interval > 3)
return -EINVAL; return -EINVAL;
est = kmalloc(sizeof(*est), GFP_KERNEL); est = kzalloc(sizeof(*est), GFP_KERNEL);
if (est == NULL) if (est == NULL)
return -ENOBUFS; return -ENOBUFS;
memset(est, 0, sizeof(*est));
est->interval = parm->interval + 2; est->interval = parm->interval + 2;
est->bstats = bstats; est->bstats = bstats;
est->rate_est = rate_est; est->rate_est = rate_est;
......
...@@ -284,14 +284,11 @@ static struct neighbour **neigh_hash_alloc(unsigned int entries) ...@@ -284,14 +284,11 @@ static struct neighbour **neigh_hash_alloc(unsigned int entries)
struct neighbour **ret; struct neighbour **ret;
if (size <= PAGE_SIZE) { if (size <= PAGE_SIZE) {
ret = kmalloc(size, GFP_ATOMIC); ret = kzalloc(size, GFP_ATOMIC);
} else { } else {
ret = (struct neighbour **) ret = (struct neighbour **)
__get_free_pages(GFP_ATOMIC, get_order(size)); __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
} }
if (ret)
memset(ret, 0, size);
return ret; return ret;
} }
...@@ -1089,8 +1086,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, ...@@ -1089,8 +1086,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
if (hh->hh_type == protocol) if (hh->hh_type == protocol)
break; break;
if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
memset(hh, 0, sizeof(struct hh_cache));
rwlock_init(&hh->hh_lock); rwlock_init(&hh->hh_lock);
hh->hh_type = protocol; hh->hh_type = protocol;
atomic_set(&hh->hh_refcnt, 0); atomic_set(&hh->hh_refcnt, 0);
...@@ -1366,13 +1362,11 @@ void neigh_table_init(struct neigh_table *tbl) ...@@ -1366,13 +1362,11 @@ void neigh_table_init(struct neigh_table *tbl)
tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1); tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL); tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
if (!tbl->hash_buckets || !tbl->phash_buckets) if (!tbl->hash_buckets || !tbl->phash_buckets)
panic("cannot allocate neighbour cache hashes"); panic("cannot allocate neighbour cache hashes");
memset(tbl->phash_buckets, 0, phsize);
get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
rwlock_init(&tbl->lock); rwlock_init(&tbl->lock);
......
...@@ -38,13 +38,11 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, ...@@ -38,13 +38,11 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
{ {
const int lopt_size = sizeof(struct listen_sock) + const int lopt_size = sizeof(struct listen_sock) +
nr_table_entries * sizeof(struct request_sock *); nr_table_entries * sizeof(struct request_sock *);
struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL); struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL);
if (lopt == NULL) if (lopt == NULL)
return -ENOMEM; return -ENOMEM;
memset(lopt, 0, lopt_size);
for (lopt->max_qlen_log = 6; for (lopt->max_qlen_log = 6;
(1 << lopt->max_qlen_log) < sysctl_max_syn_backlog; (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog;
lopt->max_qlen_log++); lopt->max_qlen_log++);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment